From a5df2ea00de95ec8b09459d38c11d53ecb894897 Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Sun, 15 Jan 2023 12:12:52 -0800 Subject: [PATCH 01/80] make deadlock feature optional --- web3_proxy/src/bin/web3_proxy.rs | 47 ++++++++++++++++++-------------- 1 file changed, 27 insertions(+), 20 deletions(-) diff --git a/web3_proxy/src/bin/web3_proxy.rs b/web3_proxy/src/bin/web3_proxy.rs index c61b5476..5a951c8f 100644 --- a/web3_proxy/src/bin/web3_proxy.rs +++ b/web3_proxy/src/bin/web3_proxy.rs @@ -12,18 +12,22 @@ use anyhow::Context; use futures::StreamExt; use log::{debug, error, info, warn}; use num::Zero; -use parking_lot::deadlock; use std::fs; use std::path::Path; use std::sync::atomic::{self, AtomicUsize}; -use std::thread; use tokio::runtime; use tokio::sync::broadcast; -use tokio::time::Duration; use web3_proxy::app::{flatten_handle, flatten_handles, Web3ProxyApp}; use web3_proxy::config::{CliConfig, TopConfig}; use web3_proxy::{frontend, metrics_frontend}; +#[cfg(feature = "deadlock")] +use parking_lot::deadlock; +#[cfg(feature = "deadlock")] +use std::thread; +#[cfg(feature = "deadlock")] +use tokio::time::Duration; + fn run( shutdown_sender: broadcast::Sender<()>, cli_config: CliConfig, @@ -34,24 +38,26 @@ fn run( let mut shutdown_receiver = shutdown_sender.subscribe(); - // spawn a thread for deadlock detection - // TODO: disable this feature during release mode and things should go faster - thread::spawn(move || loop { - thread::sleep(Duration::from_secs(10)); - let deadlocks = deadlock::check_deadlock(); - if deadlocks.is_empty() { - continue; - } - - println!("{} deadlocks detected", deadlocks.len()); - for (i, threads) in deadlocks.iter().enumerate() { - println!("Deadlock #{}", i); - for t in threads { - println!("Thread Id {:#?}", t.thread_id()); - println!("{:#?}", t.backtrace()); + #[cfg(feature = "deadlock")] + { + // spawn a thread for deadlock detection + thread::spawn(move || loop { + thread::sleep(Duration::from_secs(10)); + let deadlocks = deadlock::check_deadlock(); + if deadlocks.is_empty() { + continue; } - } - }); + + println!("{} deadlocks detected", deadlocks.len()); + for (i, threads) in deadlocks.iter().enumerate() { + println!("Deadlock #{}", i); + for t in threads { + println!("Thread Id {:#?}", t.thread_id()); + println!("{:#?}", t.backtrace()); + } + } + }); + } // set up tokio's async runtime let mut rt_builder = runtime::Builder::new_multi_thread(); @@ -248,6 +254,7 @@ mod tests { }; use hashbrown::HashMap; use std::env; + use std::thread; use web3_proxy::{ config::{AppConfig, Web3ConnectionConfig}, From 81b178e71c5b62987e8aed3ca64b2f28a87b5646 Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Sun, 15 Jan 2023 12:54:08 -0800 Subject: [PATCH 02/80] exit with errors better --- web3_proxy/src/bin/web3_proxy.rs | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/web3_proxy/src/bin/web3_proxy.rs b/web3_proxy/src/bin/web3_proxy.rs index 5a951c8f..fe4b92df 100644 --- a/web3_proxy/src/bin/web3_proxy.rs +++ b/web3_proxy/src/bin/web3_proxy.rs @@ -92,10 +92,8 @@ fn run( let frontend_handle = tokio::spawn(frontend::serve(app_frontend_port, spawned_app.app.clone())); - let prometheus_handle = tokio::spawn(metrics_frontend::serve( - spawned_app.app, - app_prometheus_port, - )); + // TODO: should we put this in a dedicated thread? + let prometheus_handle = tokio::spawn(metrics_frontend::serve(app_prometheus_port)); // if everything is working, these should both run forever tokio::select! { @@ -165,12 +163,11 @@ fn run( if background_errors.is_zero() { info!("finished"); + Ok(()) } else { // TODO: collect instead? - error!("finished with errors!") + Err(anyhow::anyhow!("finished with errors!")) } - - Ok(()) }) } From c831609978a424d55d5b4ac22f1063288b867a20 Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Sun, 15 Jan 2023 12:56:00 -0800 Subject: [PATCH 03/80] disable default features (deadlock detection) in docker --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 02bfdfa0..c1487de7 100644 --- a/Dockerfile +++ b/Dockerfile @@ -8,7 +8,7 @@ COPY . . RUN --mount=type=cache,target=/usr/local/cargo/registry \ --mount=type=cache,target=/usr/src/web3_proxy/target \ cargo test &&\ - cargo install --locked --root /opt/bin --path ./web3_proxy + cargo install --locked --no-default-features --root /opt/bin --path ./web3_proxy FROM debian:bullseye-slim From 5c03249bdafdd7a37fdd8cc49c7b2b674add7128 Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Mon, 16 Jan 2023 16:56:40 -0800 Subject: [PATCH 04/80] fix accidental merge. moved to a branch --- web3_proxy/src/bin/web3_proxy.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/web3_proxy/src/bin/web3_proxy.rs b/web3_proxy/src/bin/web3_proxy.rs index fe4b92df..656fdce4 100644 --- a/web3_proxy/src/bin/web3_proxy.rs +++ b/web3_proxy/src/bin/web3_proxy.rs @@ -93,7 +93,10 @@ fn run( tokio::spawn(frontend::serve(app_frontend_port, spawned_app.app.clone())); // TODO: should we put this in a dedicated thread? - let prometheus_handle = tokio::spawn(metrics_frontend::serve(app_prometheus_port)); + let prometheus_handle = tokio::spawn(metrics_frontend::serve( + spawned_app.app.clone(), + app_prometheus_port, + )); // if everything is working, these should both run forever tokio::select! { From b21b5699dbaa18f6483e0d72a62cd4aff3a85c2d Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Sun, 15 Jan 2023 21:13:02 -0800 Subject: [PATCH 05/80] print result as json --- web3_proxy/src/bin/web3_proxy_cli/rpc_accounting.rs | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/web3_proxy/src/bin/web3_proxy_cli/rpc_accounting.rs b/web3_proxy/src/bin/web3_proxy_cli/rpc_accounting.rs index 9d9e7170..653ecb02 100644 --- a/web3_proxy/src/bin/web3_proxy_cli/rpc_accounting.rs +++ b/web3_proxy/src/bin/web3_proxy_cli/rpc_accounting.rs @@ -12,6 +12,8 @@ use migration::{ }, Condition, }; +use serde::Serialize; +use serde_json::json; /// count requests #[derive(FromArgs, PartialEq, Debug, Eq)] @@ -37,7 +39,7 @@ pub struct RpcAccountingSubCommand { impl RpcAccountingSubCommand { pub async fn main(self, db_conn: &DatabaseConnection) -> anyhow::Result<()> { - #[derive(Debug, FromQueryResult)] + #[derive(Serialize, FromQueryResult)] struct SelectResult { total_frontend_requests: Decimal, // pub total_backend_retries: Decimal, @@ -137,8 +139,9 @@ impl RpcAccountingSubCommand { .context("no query result")?; info!( - "query_response for chain {:?}: {:#?}", - self.chain_id, query_response + "query_response for chain {:?}: {:#}", + self.chain_id, + json!(query_response) ); // let query_seconds: Decimal = query_response From d7c75f843e5a232bc3d1a697e8d62dfad3607e51 Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Mon, 16 Jan 2023 22:54:40 -0800 Subject: [PATCH 06/80] add stub try_proxy_connection --- web3_proxy/src/app/mod.rs | 55 ++++---- web3_proxy/src/frontend/mod.rs | 92 ++++++++++++-- web3_proxy/src/frontend/rpc_proxy_http.rs | 111 ++++++++++++++++- web3_proxy/src/frontend/rpc_proxy_ws.rs | 145 ++++++++++++++++++++-- web3_proxy/src/rpcs/blockchain.rs | 4 +- web3_proxy/src/rpcs/connections.rs | 54 +++++--- web3_proxy/src/rpcs/request.rs | 44 +++---- 7 files changed, 426 insertions(+), 79 deletions(-) diff --git a/web3_proxy/src/app/mod.rs b/web3_proxy/src/app/mod.rs index 5ec9d856..828dc6bb 100644 --- a/web3_proxy/src/app/mod.rs +++ b/web3_proxy/src/app/mod.rs @@ -6,6 +6,7 @@ use crate::block_number::{block_needed, BlockNeeded}; use crate::config::{AppConfig, TopConfig}; use crate::frontend::authorization::{Authorization, RequestMetadata}; use crate::frontend::errors::FrontendErrorResponse; +use crate::frontend::rpc_proxy_ws::ProxyMode; use crate::jsonrpc::{ JsonRpcForwardedResponse, JsonRpcForwardedResponseEnum, JsonRpcRequest, JsonRpcRequestEnum, }; @@ -907,10 +908,10 @@ impl Web3ProxyApp { self: &Arc, authorization: Arc, request: JsonRpcRequestEnum, + proxy_mode: ProxyMode, ) -> Result<(JsonRpcForwardedResponseEnum, Vec>), FrontendErrorResponse> { - // TODO: this should probably be trace level - // // trace!(?request, "proxy_web3_rpc"); + // trace!(?request, "proxy_web3_rpc"); // even though we have timeouts on the requests to our backend providers, // we need a timeout for the incoming request so that retries don't run forever @@ -921,7 +922,7 @@ impl Web3ProxyApp { JsonRpcRequestEnum::Single(request) => { let (response, rpcs) = timeout( max_time, - self.proxy_web3_rpc_request(&authorization, request), + self.proxy_cached_request(&authorization, request, proxy_mode), ) .await??; @@ -930,7 +931,7 @@ impl Web3ProxyApp { JsonRpcRequestEnum::Batch(requests) => { let (responses, rpcs) = timeout( max_time, - self.proxy_web3_rpc_requests(&authorization, requests), + self.proxy_web3_rpc_requests(&authorization, requests, proxy_mode), ) .await??; @@ -947,6 +948,7 @@ impl Web3ProxyApp { self: &Arc, authorization: &Arc, requests: Vec, + proxy_mode: ProxyMode, ) -> anyhow::Result<(Vec, Vec>)> { // TODO: we should probably change ethers-rs to support this directly. they pushed this off to v2 though let num_requests = requests.len(); @@ -956,7 +958,7 @@ impl Web3ProxyApp { let responses = join_all( requests .into_iter() - .map(|request| self.proxy_web3_rpc_request(authorization, request)) + .map(|request| self.proxy_cached_request(authorization, request, proxy_mode)) .collect::>(), ) .await; @@ -1000,10 +1002,11 @@ impl Web3ProxyApp { } #[measure([ErrorCount, HitCount, ResponseTime, Throughput])] - async fn proxy_web3_rpc_request( + async fn proxy_cached_request( self: &Arc, authorization: &Arc, mut request: JsonRpcRequest, + proxy_mode: ProxyMode, ) -> anyhow::Result<(JsonRpcForwardedResponse, Vec>)> { // trace!("Received request: {:?}", request); @@ -1172,22 +1175,32 @@ impl Web3ProxyApp { // TODO: eth_sendBundle (flashbots command) // broadcast transactions to all private rpcs at once "eth_sendRawTransaction" => { + // TODO: how should we handle private_mode here? + let default_num = match proxy_mode { + // TODO: how many balanced rpcs should we send to? configurable? percentage of total? + ProxyMode::Best => Some(2), + ProxyMode::Fastest(0) => None, + // TODO: how many balanced rpcs should we send to? configurable? percentage of total? + // TODO: what if we do 2 per tier? we want to blast the third party rpcs + // TODO: maybe having the third party rpcs in their own Web3Connections would be good for this + ProxyMode::Fastest(x) => Some(x * 2), + ProxyMode::Versus => None, + }; + let (private_rpcs, num) = if let Some(private_rpcs) = self.private_rpcs.as_ref() { if authorization.checks.private_txs { + // if we are sending the transaction privately, no matter the proxy_mode, we send to ALL private rpcs (private_rpcs, None) } else { - // TODO: how many balanced rpcs should we send to? configurable? percentage of total? - // TODO: what if we do 2 per tier? we want to blast the third party rpcs - // TODO: maybe having the third party rpcs would be good for this - (&self.balanced_rpcs, Some(2)) + (&self.balanced_rpcs, default_num) } } else { - (&self.balanced_rpcs, Some(2)) + (&self.balanced_rpcs, default_num) }; // try_send_all_upstream_servers puts the request id into the response. no need to do that ourselves here. let mut response = private_rpcs - .try_send_all_upstream_servers( + .try_send_all_synced_connections( authorization, &request, Some(request_metadata.clone()), @@ -1298,7 +1311,8 @@ impl Web3ProxyApp { json!(true) } "net_peerCount" => { - // emit stats + // no stats on this. its cheap + // TODO: do something with proxy_mode here? self.balanced_rpcs.num_synced_rpcs().into() } "web3_clientVersion" => { @@ -1404,10 +1418,12 @@ impl Web3ProxyApp { .try_get_with(cache_key, async move { // TODO: retry some failures automatically! // TODO: try private_rpcs if all the balanced_rpcs fail! - // TODO: put the hash here instead? + // TODO: put the hash here instead of the block number? its in the request already. + let mut response = self .balanced_rpcs - .try_send_best_upstream_server( + .try_proxy_connection( + proxy_mode, self.allowed_lag, &authorization, request, @@ -1433,18 +1449,15 @@ impl Web3ProxyApp { })? } else { self.balanced_rpcs - .try_send_best_upstream_server( + .try_proxy_connection( + proxy_mode, self.allowed_lag, &authorization, request, Some(&request_metadata), None, ) - .await - .map_err(|err| { - // TODO: emit a stat for an error - anyhow::anyhow!("error while forwarding response: {}", err) - })? + .await? } }; diff --git a/web3_proxy/src/frontend/mod.rs b/web3_proxy/src/frontend/mod.rs index 4d94367c..1507a835 100644 --- a/web3_proxy/src/frontend/mod.rs +++ b/web3_proxy/src/frontend/mod.rs @@ -42,26 +42,98 @@ pub async fn serve(port: u16, proxy_app: Arc) -> anyhow::Result<() // build our axum Router let app = Router::new() - // routes should be ordered most to least common + // TODO: i think these routes could be done a lot better + // + // HTTP RPC (POST) + // + // public .route("/", post(rpc_proxy_http::proxy_web3_rpc)) + // authenticated with and without trailing slash + .route( + "/rpc/:rpc_key/", + post(rpc_proxy_http::proxy_web3_rpc_with_key), + ) + .route( + "/rpc/:rpc_key", + post(rpc_proxy_http::proxy_web3_rpc_with_key), + ) + // public fastest with and without trailing slash + .route("/fastest/", post(rpc_proxy_http::fastest_proxy_web3_rpc)) + .route("/fastest", post(rpc_proxy_http::fastest_proxy_web3_rpc)) + // authenticated fastest with and without trailing slash + .route( + "/fastest/:rpc_key/", + post(rpc_proxy_http::fastest_proxy_web3_rpc_with_key), + ) + .route( + "/fastest/:rpc_key", + post(rpc_proxy_http::fastest_proxy_web3_rpc_with_key), + ) + // public versus + .route("/versus/", post(rpc_proxy_http::versus_proxy_web3_rpc)) + .route("/versus", post(rpc_proxy_http::versus_proxy_web3_rpc)) + // authenticated versus with and without trailing slash + .route( + "/versus/:rpc_key/", + post(rpc_proxy_http::versus_proxy_web3_rpc_with_key), + ) + .route( + "/versus/:rpc_key", + post(rpc_proxy_http::versus_proxy_web3_rpc_with_key), + ) + // + // Websocket RPC (GET) + // If not an RPC, this will redirect to configurable urls + // + // public .route("/", get(rpc_proxy_ws::websocket_handler)) - .route( - "/rpc/:rpc_key", - post(rpc_proxy_http::proxy_web3_rpc_with_key), - ) + // authenticated with and without trailing slash .route( "/rpc/:rpc_key/", - post(rpc_proxy_http::proxy_web3_rpc_with_key), + get(rpc_proxy_ws::websocket_handler_with_key), ) .route( "/rpc/:rpc_key", get(rpc_proxy_ws::websocket_handler_with_key), ) + // public fastest with and without trailing slash + .route("/fastest/", get(rpc_proxy_ws::fastest_websocket_handler)) + .route("/fastest", get(rpc_proxy_ws::fastest_websocket_handler)) + // authenticated fastest with and without trailing slash .route( - "/rpc/:rpc_key/", - get(rpc_proxy_ws::websocket_handler_with_key), + "/fastest/:rpc_key/", + get(rpc_proxy_ws::fastest_websocket_handler_with_key), ) + .route( + "/fastest/:rpc_key", + get(rpc_proxy_ws::fastest_websocket_handler_with_key), + ) + // public versus + .route( + "/versus/", + get(rpc_proxy_ws::versus_websocket_handler_with_key), + ) + .route( + "/versus", + get(rpc_proxy_ws::versus_websocket_handler_with_key), + ) + // authenticated versus with and without trailing slash + .route( + "/versus/:rpc_key/", + get(rpc_proxy_ws::versus_websocket_handler_with_key), + ) + .route( + "/versus/:rpc_key", + get(rpc_proxy_ws::versus_websocket_handler_with_key), + ) + // + // System things + // .route("/health", get(status::health)) + .route("/status", get(status::status)) + // + // User stuff + // .route("/user/login/:user_address", get(users::user_login_get)) .route( "/user/login/:user_address/:message_eip", @@ -86,9 +158,11 @@ pub async fn serve(port: u16, proxy_app: Arc) -> anyhow::Result<() ) .route("/user/stats/detailed", get(users::user_stats_detailed_get)) .route("/user/logout", post(users::user_logout_post)) - .route("/status", get(status::status)) + // + // Axum layers // layers are ordered bottom up // the last layer is first for requests and last for responses + // // Mark the `Authorization` request header as sensitive so it doesn't show in logs .layer(SetSensitiveRequestHeadersLayer::new(once(AUTHORIZATION))) // handle cors diff --git a/web3_proxy/src/frontend/rpc_proxy_http.rs b/web3_proxy/src/frontend/rpc_proxy_http.rs index 72664812..067546db 100644 --- a/web3_proxy/src/frontend/rpc_proxy_http.rs +++ b/web3_proxy/src/frontend/rpc_proxy_http.rs @@ -2,6 +2,7 @@ use super::authorization::{ip_is_authorized, key_is_authorized}; use super::errors::FrontendResult; +use super::rpc_proxy_ws::ProxyMode; use crate::{app::Web3ProxyApp, jsonrpc::JsonRpcRequestEnum}; use axum::extract::Path; use axum::headers::{Origin, Referer, UserAgent}; @@ -18,9 +19,41 @@ use std::sync::Arc; #[debug_handler] pub async fn proxy_web3_rpc( Extension(app): Extension>, - ClientIp(ip): ClientIp, + ip: ClientIp, origin: Option>, Json(payload): Json, +) -> FrontendResult { + _proxy_web3_rpc(app, ip, origin, payload, ProxyMode::Best).await +} + +#[debug_handler] +pub async fn fastest_proxy_web3_rpc( + Extension(app): Extension>, + ip: ClientIp, + origin: Option>, + Json(payload): Json, +) -> FrontendResult { + // TODO: read the fastest number from params + // TODO: check that the app allows this without authentication + _proxy_web3_rpc(app, ip, origin, payload, ProxyMode::Fastest(0)).await +} + +#[debug_handler] +pub async fn versus_proxy_web3_rpc( + Extension(app): Extension>, + ip: ClientIp, + origin: Option>, + Json(payload): Json, +) -> FrontendResult { + _proxy_web3_rpc(app, ip, origin, payload, ProxyMode::Versus).await +} + +async fn _proxy_web3_rpc( + app: Arc, + ClientIp(ip): ClientIp, + origin: Option>, + payload: JsonRpcRequestEnum, + proxy_mode: ProxyMode, ) -> FrontendResult { // TODO: benchmark spawning this // TODO: do we care about keeping the TypedHeader wrapper? @@ -31,7 +64,7 @@ pub async fn proxy_web3_rpc( let authorization = Arc::new(authorization); let (response, rpcs, _semaphore) = app - .proxy_web3_rpc(authorization, payload) + .proxy_web3_rpc(authorization, payload, proxy_mode) .await .map(|(x, y)| (x, y, semaphore))?; @@ -58,12 +91,82 @@ pub async fn proxy_web3_rpc( #[debug_handler] pub async fn proxy_web3_rpc_with_key( Extension(app): Extension>, - ClientIp(ip): ClientIp, + ip: ClientIp, origin: Option>, referer: Option>, user_agent: Option>, Path(rpc_key): Path, Json(payload): Json, +) -> FrontendResult { + _proxy_web3_rpc_with_key( + app, + ip, + origin, + referer, + user_agent, + rpc_key, + payload, + ProxyMode::Best, + ) + .await +} + +#[debug_handler] +pub async fn fastest_proxy_web3_rpc_with_key( + Extension(app): Extension>, + ip: ClientIp, + origin: Option>, + referer: Option>, + user_agent: Option>, + Path(rpc_key): Path, + Json(payload): Json, +) -> FrontendResult { + _proxy_web3_rpc_with_key( + app, + ip, + origin, + referer, + user_agent, + rpc_key, + payload, + ProxyMode::Fastest(0), + ) + .await +} + +#[debug_handler] +pub async fn versus_proxy_web3_rpc_with_key( + Extension(app): Extension>, + ip: ClientIp, + origin: Option>, + referer: Option>, + user_agent: Option>, + Path(rpc_key): Path, + Json(payload): Json, +) -> FrontendResult { + _proxy_web3_rpc_with_key( + app, + ip, + origin, + referer, + user_agent, + rpc_key, + payload, + ProxyMode::Versus, + ) + .await +} + +#[allow(clippy::too_many_arguments)] +async fn _proxy_web3_rpc_with_key( + app: Arc, + ClientIp(ip): ClientIp, + origin: Option>, + referer: Option>, + user_agent: Option>, + rpc_key: String, + payload: JsonRpcRequestEnum, + proxy_mode: ProxyMode, ) -> FrontendResult { // TODO: DRY w/ proxy_web3_rpc // the request can take a while, so we spawn so that we can start serving another request @@ -82,7 +185,7 @@ pub async fn proxy_web3_rpc_with_key( let authorization = Arc::new(authorization); let (response, rpcs, _semaphore) = app - .proxy_web3_rpc(authorization, payload) + .proxy_web3_rpc(authorization, payload, proxy_mode) .await .map(|(x, y)| (x, y, semaphore))?; diff --git a/web3_proxy/src/frontend/rpc_proxy_ws.rs b/web3_proxy/src/frontend/rpc_proxy_ws.rs index b1f70e9f..23516738 100644 --- a/web3_proxy/src/frontend/rpc_proxy_ws.rs +++ b/web3_proxy/src/frontend/rpc_proxy_ws.rs @@ -33,10 +33,58 @@ use serde_json::value::to_raw_value; use std::sync::Arc; use std::{str::from_utf8_mut, sync::atomic::AtomicUsize}; +#[derive(Copy, Clone)] +pub enum ProxyMode { + /// send to the "best" synced server + Best, + /// send to all synced servers and return the fastest non-error response (reverts do not count as errors here) + Fastest(usize), + /// send to all servers for benchmarking. return the fastest non-error response + Versus, +} + /// Public entrypoint for WebSocket JSON-RPC requests. +/// Queries a single server at a time #[debug_handler] pub async fn websocket_handler( Extension(app): Extension>, + ip: ClientIp, + origin: Option>, + ws_upgrade: Option, +) -> FrontendResult { + _websocket_handler(ProxyMode::Fastest(1), app, ip, origin, ws_upgrade).await +} + +/// Public entrypoint for WebSocket JSON-RPC requests that uses all synced servers. +/// Queries all synced backends with every request! This might get expensive! +#[debug_handler] +pub async fn fastest_websocket_handler( + Extension(app): Extension>, + ip: ClientIp, + origin: Option>, + ws_upgrade: Option, +) -> FrontendResult { + // TODO: get the fastest number from the url params (default to 0/all) + // TODO: config to disable this + _websocket_handler(ProxyMode::Fastest(0), app, ip, origin, ws_upgrade).await +} + +/// Public entrypoint for WebSocket JSON-RPC requests that uses all synced servers. +/// Queries **all** backends with every request! This might get expensive! +#[debug_handler] +pub async fn versus_websocket_handler( + Extension(app): Extension>, + ip: ClientIp, + origin: Option>, + ws_upgrade: Option, +) -> FrontendResult { + // TODO: config to disable this + _websocket_handler(ProxyMode::Versus, app, ip, origin, ws_upgrade).await +} + +async fn _websocket_handler( + proxy_mode: ProxyMode, + app: Arc, ClientIp(ip): ClientIp, origin: Option>, ws_upgrade: Option, @@ -49,7 +97,7 @@ pub async fn websocket_handler( match ws_upgrade { Some(ws) => Ok(ws - .on_upgrade(|socket| proxy_web3_socket(app, authorization, socket)) + .on_upgrade(move |socket| proxy_web3_socket(app, authorization, socket, proxy_mode)) .into_response()), None => { if let Some(redirect) = &app.config.redirect_public_url { @@ -72,12 +120,84 @@ pub async fn websocket_handler( #[debug_handler] pub async fn websocket_handler_with_key( Extension(app): Extension>, - ClientIp(ip): ClientIp, + ip: ClientIp, Path(rpc_key): Path, origin: Option>, referer: Option>, user_agent: Option>, ws_upgrade: Option, +) -> FrontendResult { + // TODO: config instead of defaulting to fastest(1)? + _websocket_handler_with_key( + ProxyMode::Fastest(1), + app, + ip, + rpc_key, + origin, + referer, + user_agent, + ws_upgrade, + ) + .await +} + +#[debug_handler] +pub async fn fastest_websocket_handler_with_key( + Extension(app): Extension>, + ip: ClientIp, + Path(rpc_key): Path, + origin: Option>, + referer: Option>, + user_agent: Option>, + ws_upgrade: Option, +) -> FrontendResult { + // TODO: get the fastest number from the url params (default to 0/all) + _websocket_handler_with_key( + ProxyMode::Fastest(0), + app, + ip, + rpc_key, + origin, + referer, + user_agent, + ws_upgrade, + ) + .await +} + +#[debug_handler] +pub async fn versus_websocket_handler_with_key( + Extension(app): Extension>, + ip: ClientIp, + Path(rpc_key): Path, + origin: Option>, + referer: Option>, + user_agent: Option>, + ws_upgrade: Option, +) -> FrontendResult { + _websocket_handler_with_key( + ProxyMode::Versus, + app, + ip, + rpc_key, + origin, + referer, + user_agent, + ws_upgrade, + ) + .await +} + +#[allow(clippy::too_many_arguments)] +async fn _websocket_handler_with_key( + proxy_mode: ProxyMode, + app: Arc, + ClientIp(ip): ClientIp, + rpc_key: String, + origin: Option>, + referer: Option>, + user_agent: Option>, + ws_upgrade: Option, ) -> FrontendResult { let rpc_key = rpc_key.parse()?; @@ -96,9 +216,8 @@ pub async fn websocket_handler_with_key( let authorization = Arc::new(authorization); match ws_upgrade { - Some(ws_upgrade) => { - Ok(ws_upgrade.on_upgrade(move |socket| proxy_web3_socket(app, authorization, socket))) - } + Some(ws_upgrade) => Ok(ws_upgrade + .on_upgrade(move |socket| proxy_web3_socket(app, authorization, socket, proxy_mode))), None => { // if no websocket upgrade, this is probably a user loading the url with their browser @@ -154,6 +273,7 @@ async fn proxy_web3_socket( app: Arc, authorization: Arc, socket: WebSocket, + proxy_mode: ProxyMode, ) { // split the websocket so we can read and write concurrently let (ws_tx, ws_rx) = socket.split(); @@ -162,7 +282,13 @@ async fn proxy_web3_socket( let (response_sender, response_receiver) = flume::unbounded::(); tokio::spawn(write_web3_socket(response_receiver, ws_tx)); - tokio::spawn(read_web3_socket(app, authorization, ws_rx, response_sender)); + tokio::spawn(read_web3_socket( + app, + authorization, + ws_rx, + response_sender, + proxy_mode, + )); } /// websockets support a few more methods than http clients @@ -173,6 +299,7 @@ async fn handle_socket_payload( response_sender: &flume::Sender, subscription_count: &AtomicUsize, subscriptions: &mut HashMap, + proxy_mode: ProxyMode, ) -> Message { // TODO: do any clients send batches over websockets? let (id, response) = match serde_json::from_str::(payload) { @@ -183,6 +310,7 @@ async fn handle_socket_payload( [..] { "eth_subscribe" => { + // TODO: how can we subscribe with proxy_mode? match app .eth_subscribe( authorization.clone(), @@ -247,7 +375,7 @@ async fn handle_socket_payload( Ok(response.into()) } _ => app - .proxy_web3_rpc(authorization.clone(), json_request.into()) + .proxy_web3_rpc(authorization.clone(), json_request.into(), proxy_mode) .await .map_or_else( |err| match err { @@ -291,6 +419,7 @@ async fn read_web3_socket( authorization: Arc, mut ws_rx: SplitStream, response_sender: flume::Sender, + proxy_mode: ProxyMode, ) { let mut subscriptions = HashMap::new(); let subscription_count = AtomicUsize::new(1); @@ -307,6 +436,7 @@ async fn read_web3_socket( &response_sender, &subscription_count, &mut subscriptions, + proxy_mode, ) .await } @@ -333,6 +463,7 @@ async fn read_web3_socket( &response_sender, &subscription_count, &mut subscriptions, + proxy_mode, ) .await } diff --git a/web3_proxy/src/rpcs/blockchain.rs b/web3_proxy/src/rpcs/blockchain.rs index 456d21fd..e03cc6fd 100644 --- a/web3_proxy/src/rpcs/blockchain.rs +++ b/web3_proxy/src/rpcs/blockchain.rs @@ -165,7 +165,7 @@ impl Web3Connections { // TODO: request_metadata? maybe we should put it in the authorization? // TODO: don't hard code allowed lag let response = self - .try_send_best_upstream_server(60, authorization, request, None, None) + .try_send_best_consensus_head_connection(60, authorization, request, None, None) .await?; let block = response.result.context("failed fetching block")?; @@ -241,7 +241,7 @@ impl Web3Connections { // TODO: if error, retry? // TODO: request_metadata or authorization? let response = self - .try_send_best_upstream_server(60, authorization, request, None, Some(num)) + .try_send_best_consensus_head_connection(60, authorization, request, None, Some(num)) .await?; let raw_block = response.result.context("no block result")?; diff --git a/web3_proxy/src/rpcs/connections.rs b/web3_proxy/src/rpcs/connections.rs index fbd75b3f..82dcbbe7 100644 --- a/web3_proxy/src/rpcs/connections.rs +++ b/web3_proxy/src/rpcs/connections.rs @@ -2,12 +2,13 @@ use super::blockchain::{ArcBlock, BlockHashesCache}; use super::connection::Web3Connection; use super::request::{ - OpenRequestHandle, OpenRequestHandleMetrics, OpenRequestResult, RequestErrorHandler, + OpenRequestHandle, OpenRequestHandleMetrics, OpenRequestResult, RequestRevertHandler, }; use super::synced_connections::SyncedConnections; use crate::app::{flatten_handle, AnyhowJoinHandle}; use crate::config::{BlockAndRpc, TxHashAndRpc, Web3ConnectionConfig}; use crate::frontend::authorization::{Authorization, RequestMetadata}; +use crate::frontend::rpc_proxy_ws::ProxyMode; use crate::jsonrpc::{JsonRpcForwardedResponse, JsonRpcRequest}; use crate::rpcs::transactions::TxStatus; use arc_swap::ArcSwap; @@ -406,8 +407,8 @@ impl Web3Connections { unimplemented!("this shouldn't be possible") } - /// get the best available rpc server - pub async fn best_synced_backend_connection( + /// get the best available rpc server with the consensus head block. it might have blocks after the consensus head + pub async fn best_consensus_head_connection( &self, allowed_lag: u64, authorization: &Arc, @@ -662,7 +663,7 @@ impl Web3Connections { /// be sure there is a timeout on this or it might loop forever /// TODO: do not take allowed_lag here. have it be on the connections struct instead - pub async fn try_send_best_upstream_server( + pub async fn try_send_best_consensus_head_connection( &self, allowed_lag: u64, authorization: &Arc, @@ -679,7 +680,7 @@ impl Web3Connections { break; } match self - .best_synced_backend_connection( + .best_consensus_head_connection( allowed_lag, authorization, request_metadata, @@ -705,7 +706,7 @@ impl Web3Connections { .request( &request.method, &json!(request.params), - RequestErrorHandler::SaveReverts, + RequestRevertHandler::Save, ) .await; @@ -818,7 +819,7 @@ impl Web3Connections { } /// be sure there is a timeout on this or it might loop forever - pub async fn try_send_all_upstream_servers( + pub async fn try_send_all_synced_connections( &self, authorization: &Arc, request: &JsonRpcRequest, @@ -887,6 +888,31 @@ impl Web3Connections { } } } + + pub async fn try_proxy_connection( + &self, + proxy_mode: ProxyMode, + allowed_lag: u64, + authorization: &Arc, + request: JsonRpcRequest, + request_metadata: Option<&Arc>, + min_block_needed: Option<&U64>, + ) -> anyhow::Result { + match proxy_mode { + ProxyMode::Best => { + self.try_send_best_consensus_head_connection( + allowed_lag, + authorization, + request, + request_metadata, + min_block_needed, + ) + .await + } + ProxyMode::Fastest(x) => todo!("Fastest"), + ProxyMode::Versus => todo!("Versus"), + } + } } impl fmt::Debug for Web3Connections { @@ -1088,7 +1114,7 @@ mod tests { // best_synced_backend_connection requires servers to be synced with the head block // TODO: don't hard code allowed_lag let x = conns - .best_synced_backend_connection(60, &authorization, None, &[], None) + .best_consensus_head_connection(60, &authorization, None, &[], None) .await .unwrap(); @@ -1143,21 +1169,21 @@ mod tests { assert!(matches!( conns - .best_synced_backend_connection(60, &authorization, None, &[], None) + .best_consensus_head_connection(60, &authorization, None, &[], None) .await, Ok(OpenRequestResult::Handle(_)) )); assert!(matches!( conns - .best_synced_backend_connection(60, &authorization, None, &[], Some(&0.into())) + .best_consensus_head_connection(60, &authorization, None, &[], Some(&0.into())) .await, Ok(OpenRequestResult::Handle(_)) )); assert!(matches!( conns - .best_synced_backend_connection(60, &authorization, None, &[], Some(&1.into())) + .best_consensus_head_connection(60, &authorization, None, &[], Some(&1.into())) .await, Ok(OpenRequestResult::Handle(_)) )); @@ -1165,7 +1191,7 @@ mod tests { // future block should not get a handle assert!(matches!( conns - .best_synced_backend_connection(60, &authorization, None, &[], Some(&2.into())) + .best_consensus_head_connection(60, &authorization, None, &[], Some(&2.into())) .await, Ok(OpenRequestResult::NotReady) )); @@ -1298,7 +1324,7 @@ mod tests { // best_synced_backend_connection requires servers to be synced with the head block let best_head_server = conns - .best_synced_backend_connection( + .best_consensus_head_connection( 60, &authorization, None, @@ -1313,7 +1339,7 @@ mod tests { )); let best_archive_server = conns - .best_synced_backend_connection(60, &authorization, None, &[], Some(&1.into())) + .best_consensus_head_connection(60, &authorization, None, &[], Some(&1.into())) .await; match best_archive_server { diff --git a/web3_proxy/src/rpcs/request.rs b/web3_proxy/src/rpcs/request.rs index 7358982c..7db16fd5 100644 --- a/web3_proxy/src/rpcs/request.rs +++ b/web3_proxy/src/rpcs/request.rs @@ -42,7 +42,7 @@ pub struct OpenRequestHandle { } /// Depending on the context, RPC errors can require different handling. -pub enum RequestErrorHandler { +pub enum RequestRevertHandler { /// Log at the trace level. Use when errors are expected. TraceLevel, /// Log at the debug level. Use when errors are expected. @@ -52,7 +52,7 @@ pub enum RequestErrorHandler { /// Log at the warn level. Use when errors do not cause problems. WarnLevel, /// Potentially save the revert. Users can tune how often this happens - SaveReverts, + Save, } // TODO: second param could be skipped since we don't need it here @@ -65,13 +65,13 @@ struct EthCallFirstParams { data: Option, } -impl From for RequestErrorHandler { +impl From for RequestRevertHandler { fn from(level: Level) -> Self { match level { - Level::Trace => RequestErrorHandler::TraceLevel, - Level::Debug => RequestErrorHandler::DebugLevel, - Level::Error => RequestErrorHandler::ErrorLevel, - Level::Warn => RequestErrorHandler::WarnLevel, + Level::Trace => RequestRevertHandler::TraceLevel, + Level::Debug => RequestRevertHandler::DebugLevel, + Level::Error => RequestRevertHandler::ErrorLevel, + Level::Warn => RequestRevertHandler::WarnLevel, _ => unimplemented!("unexpected tracing Level"), } } @@ -213,7 +213,7 @@ impl OpenRequestHandle { &self, method: &str, params: &P, - error_handler: RequestErrorHandler, + revert_handler: RequestRevertHandler, ) -> Result where // TODO: not sure about this type. would be better to not need clones, but measure and spawns combine to need it @@ -252,36 +252,36 @@ impl OpenRequestHandle { if let Err(err) = &response { // only save reverts for some types of calls // TODO: do something special for eth_sendRawTransaction too - let error_handler = if let RequestErrorHandler::SaveReverts = error_handler { + let revert_handler = if let RequestRevertHandler::Save = revert_handler { // TODO: should all these be Trace or Debug or a mix? if !["eth_call", "eth_estimateGas"].contains(&method) { // trace!(%method, "skipping save on revert"); - RequestErrorHandler::TraceLevel + RequestRevertHandler::TraceLevel } else if self.authorization.db_conn.is_some() { let log_revert_chance = self.authorization.checks.log_revert_chance; if log_revert_chance == 0.0 { // trace!(%method, "no chance. skipping save on revert"); - RequestErrorHandler::TraceLevel + RequestRevertHandler::TraceLevel } else if log_revert_chance == 1.0 { // trace!(%method, "gaurenteed chance. SAVING on revert"); - error_handler + revert_handler } else if thread_fast_rng::thread_fast_rng().gen_range(0.0f64..=1.0) < log_revert_chance { // trace!(%method, "missed chance. skipping save on revert"); - RequestErrorHandler::TraceLevel + RequestRevertHandler::TraceLevel } else { // trace!("Saving on revert"); // TODO: is always logging at debug level fine? - error_handler + revert_handler } } else { // trace!(%method, "no database. skipping save on revert"); - RequestErrorHandler::TraceLevel + RequestRevertHandler::TraceLevel } } else { - error_handler + revert_handler }; // check for "execution reverted" here @@ -323,8 +323,8 @@ impl OpenRequestHandle { } // TODO: think more about the method and param logs. those can be sensitive information - match error_handler { - RequestErrorHandler::DebugLevel => { + match revert_handler { + RequestRevertHandler::DebugLevel => { // TODO: think about this revert check more. sometimes we might want reverts logged so this needs a flag if !is_revert { debug!( @@ -333,7 +333,7 @@ impl OpenRequestHandle { ); } } - RequestErrorHandler::TraceLevel => { + RequestRevertHandler::TraceLevel => { trace!( "bad response from {}! method={} params={:?} err={:?}", self.conn, @@ -342,21 +342,21 @@ impl OpenRequestHandle { err ); } - RequestErrorHandler::ErrorLevel => { + RequestRevertHandler::ErrorLevel => { // TODO: include params if not running in release mode error!( "bad response from {}! method={} err={:?}", self.conn, method, err ); } - RequestErrorHandler::WarnLevel => { + RequestRevertHandler::WarnLevel => { // TODO: include params if not running in release mode warn!( "bad response from {}! method={} err={:?}", self.conn, method, err ); } - RequestErrorHandler::SaveReverts => { + RequestRevertHandler::Save => { trace!( "bad response from {}! method={} params={:?} err={:?}", self.conn, From 9ba4c288c6a7508c5057926ec444ddbfa6e3704d Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Tue, 17 Jan 2023 13:34:33 -0800 Subject: [PATCH 07/80] quick health check script that logs to sentry --- .../src/bin/web3_proxy_cli/health_compass.rs | 137 ------------- web3_proxy/src/bin/web3_proxy_cli/main.rs | 67 +++++-- .../src/bin/web3_proxy_cli/sentryd/compare.rs | 183 ++++++++++++++++++ .../src/bin/web3_proxy_cli/sentryd/mod.rs | 112 +++++++++++ .../src/bin/web3_proxy_cli/sentryd/simple.rs | 22 +++ web3_proxy/src/frontend/mod.rs | 2 + 6 files changed, 374 insertions(+), 149 deletions(-) delete mode 100644 web3_proxy/src/bin/web3_proxy_cli/health_compass.rs create mode 100644 web3_proxy/src/bin/web3_proxy_cli/sentryd/compare.rs create mode 100644 web3_proxy/src/bin/web3_proxy_cli/sentryd/mod.rs create mode 100644 web3_proxy/src/bin/web3_proxy_cli/sentryd/simple.rs diff --git a/web3_proxy/src/bin/web3_proxy_cli/health_compass.rs b/web3_proxy/src/bin/web3_proxy_cli/health_compass.rs deleted file mode 100644 index 4bdffbe9..00000000 --- a/web3_proxy/src/bin/web3_proxy_cli/health_compass.rs +++ /dev/null @@ -1,137 +0,0 @@ -use argh::FromArgs; -use ethers::types::{Block, TxHash, H256}; -use log::{error, info, warn}; -use serde::{Deserialize, Serialize}; -use serde_json::json; -use web3_proxy::jsonrpc::JsonRpcErrorData; - -#[derive(FromArgs, PartialEq, Debug, Eq)] -/// Never bring only 2 compasses to sea. -#[argh(subcommand, name = "health_compass")] -pub struct HealthCompassSubCommand { - #[argh(positional)] - /// first rpc - rpc_a: String, - - #[argh(positional)] - /// second rpc - rpc_b: String, - - #[argh(positional)] - /// third rpc - rpc_c: String, -} - -#[derive(Debug, Deserialize, Serialize)] -struct JsonRpcResponse { - // pub jsonrpc: String, - // pub id: Box, - #[serde(skip_serializing_if = "Option::is_none")] - pub result: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub error: Option, -} - -impl HealthCompassSubCommand { - pub async fn main(self) -> anyhow::Result<()> { - let client = reqwest::Client::new(); - - let block_by_number_request = json!({ - "jsonrpc": "2.0", - "id": "1", - "method": "eth_getBlockByNumber", - "params": ["latest", false], - }); - - let a = client - .post(&self.rpc_a) - .json(&block_by_number_request) - .send() - .await? - .json::>>() - .await? - .result - .unwrap(); - - // check the parent because b and c might not be as fast as a - let parent_hash = a.parent_hash; - - let a = check_rpc(&parent_hash, &client, &self.rpc_a).await; - let b = check_rpc(&parent_hash, &client, &self.rpc_b).await; - let c = check_rpc(&parent_hash, &client, &self.rpc_c).await; - - match (a, b, c) { - (Ok(Ok(a)), Ok(Ok(b)), Ok(Ok(c))) => { - if a != b { - error!("A: {:?}\n\nB: {:?}\n\nC: {:?}", a, b, c); - return Err(anyhow::anyhow!("difference detected!")); - } - - if b != c { - error!("\nA: {:?}\n\nB: {:?}\n\nC: {:?}", a, b, c); - return Err(anyhow::anyhow!("difference detected!")); - } - - // all three rpcs agree - } - (Ok(Ok(a)), Ok(Ok(b)), c) => { - // not all successes! but still enough to compare - warn!("C failed: {:?}", c); - - if a != b { - error!("\nA: {:?}\n\nB: {:?}", a, b); - return Err(anyhow::anyhow!("difference detected!")); - } - } - (Ok(Ok(a)), b, Ok(Ok(c))) => { - // not all successes! but still enough to compare - warn!("B failed: {:?}", b); - - if a != c { - error!("\nA: {:?}\n\nC: {:?}", a, c); - return Err(anyhow::anyhow!("difference detected!")); - } - } - (a, b, c) => { - // not enough successes - error!("A: {:?}\n\nB: {:?}\n\nC: {:?}", a, b, c); - return Err(anyhow::anyhow!("All are failing!")); - } - } - - info!("OK"); - - Ok(()) - } -} - -// i don't think we need a whole provider. a simple http request is easiest -async fn check_rpc( - block_hash: &H256, - client: &reqwest::Client, - rpc: &str, -) -> anyhow::Result, JsonRpcErrorData>> { - let block_by_hash_request = json!({ - "jsonrpc": "2.0", - "id": "1", - "method": "eth_getBlockByHash", - "params": [block_hash, false], - }); - - // TODO: don't unwrap! don't use the try operator - let response: JsonRpcResponse> = client - .post(rpc) - .json(&block_by_hash_request) - .send() - .await? - .json() - .await?; - - if let Some(result) = response.result { - Ok(Ok(result)) - } else if let Some(result) = response.error { - Ok(Err(result)) - } else { - unimplemented!("{:?}", response) - } -} diff --git a/web3_proxy/src/bin/web3_proxy_cli/main.rs b/web3_proxy/src/bin/web3_proxy_cli/main.rs index c60b9446..e1241908 100644 --- a/web3_proxy/src/bin/web3_proxy_cli/main.rs +++ b/web3_proxy/src/bin/web3_proxy_cli/main.rs @@ -6,14 +6,15 @@ mod check_config; mod count_users; mod create_user; mod drop_migration_lock; -mod health_compass; mod list_user_tier; mod rpc_accounting; +mod sentryd; mod transfer_key; mod user_export; mod user_import; use argh::FromArgs; +use log::warn; use std::fs; use web3_proxy::{ app::{get_db, get_migrated_db}, @@ -27,13 +28,17 @@ pub struct CliConfig { #[argh(option)] pub config: Option, - /// if no config, what database the client should connect to. Defaults to dev db. + /// if no config, what database the client should connect to. Defaults to dev db #[argh( option, default = "\"mysql://root:dev_web3_proxy@127.0.0.1:13306/dev_web3_proxy\".to_string()" )] pub db_url: String, + /// if no config, what sentry url should the client should connect to + #[argh(option)] + pub sentry_url: Option, + /// this one cli can do multiple things #[argh(subcommand)] sub_command: SubCommand, @@ -50,8 +55,8 @@ enum SubCommand { CountUsers(count_users::CountUsersSubCommand), CreateUser(create_user::CreateUserSubCommand), DropMigrationLock(drop_migration_lock::DropMigrationLockSubCommand), - HealthCompass(health_compass::HealthCompassSubCommand), RpcAccounting(rpc_accounting::RpcAccountingSubCommand), + Sentryd(sentryd::SentrydSubCommand), TransferKey(transfer_key::TransferKeySubCommand), UserExport(user_export::UserExportSubCommand), UserImport(user_import::UserImportSubCommand), @@ -64,12 +69,10 @@ enum SubCommand { async fn main() -> anyhow::Result<()> { // if RUST_LOG isn't set, configure a default // TODO: is there a better way to do this? - if std::env::var("RUST_LOG").is_err() { - // std::env::set_var("RUST_LOG", "info,web3_proxy=debug,web3_proxy_cli=debug"); - std::env::set_var("RUST_LOG", "info,web3_proxy=debug,web3_proxy_cli=debug"); - } - - env_logger::init(); + let rust_log = match std::env::var("RUST_LOG") { + Ok(x) => x, + Err(_) => "info,web3_proxy=debug,web3_proxy_cli=debug".to_string(), + }; // this probably won't matter for us in docker, but better safe than sorry fdlimit::raise_fd_limit(); @@ -80,8 +83,12 @@ async fn main() -> anyhow::Result<()> { let top_config: String = fs::read_to_string(top_config_path)?; let top_config: TopConfig = toml::from_str(&top_config)?; - if let Some(top_config_db_url) = top_config.app.db_url.clone() { - cli_config.db_url = top_config_db_url; + if let Some(db_url) = top_config.app.db_url.clone() { + cli_config.db_url = db_url; + } + + if let Some(sentry_url) = top_config.app.sentry_url.clone() { + cli_config.sentry_url = Some(sentry_url); } Some(top_config) @@ -89,6 +96,36 @@ async fn main() -> anyhow::Result<()> { None }; + let logger = env_logger::builder().parse_filters(&rust_log).build(); + + let max_level = logger.filter(); + + // connect to sentry for error reporting + // if no sentry, only log to stdout + let _sentry_guard = if let Some(sentry_url) = cli_config.sentry_url.clone() { + let logger = sentry::integrations::log::SentryLogger::with_dest(logger); + + log::set_boxed_logger(Box::new(logger)).unwrap(); + + let guard = sentry::init(( + sentry_url, + sentry::ClientOptions { + release: sentry::release_name!(), + // TODO: Set this a to lower value (from config) in production + traces_sample_rate: 1.0, + ..Default::default() + }, + )); + + Some(guard) + } else { + log::set_boxed_logger(Box::new(logger)).unwrap(); + + None + }; + + log::set_max_level(max_level); + match cli_config.sub_command { SubCommand::ChangeUserAddress(x) => { let db_conn = get_db(cli_config.db_url, 1, 1).await?; @@ -127,7 +164,13 @@ async fn main() -> anyhow::Result<()> { x.main(&db_conn).await } - SubCommand::HealthCompass(x) => x.main().await, + SubCommand::Sentryd(x) => { + if cli_config.sentry_url.is_none() { + warn!("sentry_url is not set! Logs will only show in this console"); + } + + x.main().await + } SubCommand::RpcAccounting(x) => { let db_conn = get_migrated_db(cli_config.db_url, 1, 1).await?; diff --git a/web3_proxy/src/bin/web3_proxy_cli/sentryd/compare.rs b/web3_proxy/src/bin/web3_proxy_cli/sentryd/compare.rs new file mode 100644 index 00000000..50fcea7a --- /dev/null +++ b/web3_proxy/src/bin/web3_proxy_cli/sentryd/compare.rs @@ -0,0 +1,183 @@ +use anyhow::{anyhow, Context}; +use chrono::{DateTime, Utc}; +use ethers::types::{Block, TxHash, H256, U64}; +use futures::{stream::FuturesUnordered, StreamExt}; +use log::{debug, warn}; +use serde::{Deserialize, Serialize}; +use serde_json::json; +use web3_proxy::jsonrpc::JsonRpcErrorData; + +#[derive(Debug, Deserialize, Serialize)] +struct JsonRpcResponse { + // pub jsonrpc: String, + // pub id: Box, + #[serde(skip_serializing_if = "Option::is_none")] + pub result: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub error: Option, +} + +#[derive(Serialize, Ord, PartialEq, PartialOrd, Eq)] +struct AbbreviatedBlock { + pub num: u64, + pub time: DateTime, + pub hash: H256, +} + +impl From> for AbbreviatedBlock { + fn from(x: Block) -> Self { + Self { + num: x.number.unwrap().as_u64(), + hash: x.hash.unwrap(), + time: x.time().unwrap(), + } + } +} + +pub async fn main(rpc: String, others: Vec, max_lag: i64) -> anyhow::Result<()> { + let client = reqwest::Client::new(); + + let block_by_number_request = json!({ + "jsonrpc": "2.0", + "id": "1", + "method": "eth_getBlockByNumber", + "params": ["latest", false], + }); + + let a = client + .post(&rpc) + .json(&block_by_number_request) + .send() + .await? + .json::>>() + .await? + .result + .unwrap(); + + // check the parent because b and c might not be as fast as a + let parent_hash = a.parent_hash; + + let rpc_block = check_rpc(parent_hash, client.clone(), rpc.clone()) + .await + .context("Error while querying primary rpc")?; + + let fs = FuturesUnordered::new(); + for other in others.iter() { + let f = check_rpc(parent_hash, client.clone(), other.clone()); + + fs.push(tokio::spawn(f)); + } + let other_check: Vec<_> = fs.collect().await; + + if other_check.is_empty() { + return Err(anyhow::anyhow!("No other RPCs to check!")); + } + + // TODO: collect into a counter instead? + let mut newest_other = None; + for oc in other_check.iter() { + match oc { + Ok(Ok(x)) => newest_other = newest_other.max(Some(x)), + Ok(Err(err)) => warn!("failed checking other rpc: {:?}", err), + Err(err) => warn!("internal error checking other rpc: {:?}", err), + } + } + + if let Some(newest_other) = newest_other { + let duration_since = newest_other + .time + .signed_duration_since(rpc_block.time) + .num_seconds(); + + match duration_since.abs().cmp(&max_lag) { + std::cmp::Ordering::Less | std::cmp::Ordering::Equal => {} + std::cmp::Ordering::Greater => match duration_since.cmp(&0) { + std::cmp::Ordering::Equal => unimplemented!(), + std::cmp::Ordering::Less => { + return Err(anyhow::anyhow!( + "Our RPC is too far ahead ({} s)! Something might be wrong.\n{:#}\nvs\n{:#}", + duration_since.abs(), + json!(rpc_block), + json!(newest_other), + )); + } + std::cmp::Ordering::Greater => { + return Err(anyhow::anyhow!( + "Our RPC is too far behind ({} s)!\n{:#}\nvs\n{:#}", + duration_since, + json!(rpc_block), + json!(newest_other), + )); + } + }, + } + + let now = Utc::now(); + + let block_age = now + .signed_duration_since(newest_other.max(&rpc_block).time) + .num_seconds(); + + match block_age.abs().cmp(&max_lag) { + std::cmp::Ordering::Less | std::cmp::Ordering::Equal => {} + std::cmp::Ordering::Greater => match duration_since.cmp(&0) { + std::cmp::Ordering::Equal => unimplemented!(), + std::cmp::Ordering::Less => { + return Err(anyhow::anyhow!( + "Our clock is too far behind ({} s)! Something might be wrong.\n{:#}\nvs\n{:#}", + block_age.abs(), + json!(now), + json!(newest_other), + )); + } + std::cmp::Ordering::Greater => { + return Err(anyhow::anyhow!( + "block is too old ({} s)!\n{:#}\nvs\n{:#}", + block_age, + json!(now), + json!(newest_other), + )); + } + }, + } + } else { + return Err(anyhow::anyhow!("No other RPC times to check!")); + } + + debug!("rpc comparison ok: {:#}", json!(rpc_block)); + + Ok(()) +} + +// i don't think we need a whole provider. a simple http request is easiest +async fn check_rpc( + block_hash: H256, + client: reqwest::Client, + rpc: String, +) -> anyhow::Result { + let block_by_hash_request = json!({ + "jsonrpc": "2.0", + "id": "1", + "method": "eth_getBlockByHash", + "params": [block_hash, false], + }); + + // TODO: don't unwrap! don't use the try operator + let response: JsonRpcResponse> = client + .post(rpc) + .json(&block_by_hash_request) + .send() + .await? + .json() + .await?; + + if let Some(result) = response.result { + let abbreviated = AbbreviatedBlock::from(result); + + Ok(abbreviated) + } else if let Some(result) = response.error { + Err(anyhow!("Failed parsing response as JSON: {:?}", result)) + } else { + unimplemented!("{:?}", response) + } +} diff --git a/web3_proxy/src/bin/web3_proxy_cli/sentryd/mod.rs b/web3_proxy/src/bin/web3_proxy_cli/sentryd/mod.rs new file mode 100644 index 00000000..22b6aab1 --- /dev/null +++ b/web3_proxy/src/bin/web3_proxy_cli/sentryd/mod.rs @@ -0,0 +1,112 @@ +mod compare; +mod simple; + +use argh::FromArgs; +use futures::{ + stream::{FuturesUnordered, StreamExt}, + Future, +}; +use std::time::Duration; +use tokio::time::{interval, MissedTickBehavior}; + +#[derive(FromArgs, PartialEq, Debug, Eq)] +/// Loop healthchecks and send pager duty alerts if any fail +#[argh(subcommand, name = "sentryd")] +pub struct SentrydSubCommand { + #[argh(positional)] + /// a descriptive name for this node (probably the hostname) + location: String, + + #[argh(positional)] + /// the main (HTTP only) web3-proxy being checked. + web3_proxy: String, + + #[argh(option)] + /// warning threshold for seconds between the rpc and best other_rpc's head blocks + max_lag: i64, + + #[argh(option)] + /// other (HTTP only) rpcs to compare the main rpc to + other_rpc: Vec, + + #[argh(option)] + /// other (HTTP only) web3-proxies to compare the main rpc to + other_proxy: Vec, + + #[argh(option)] + /// how many seconds between running checks + seconds: Option, +} + +impl SentrydSubCommand { + pub async fn main(self) -> anyhow::Result<()> { + // sentry logging should already be configured + + let seconds = self.seconds.unwrap_or(60); + + let mut handles = FuturesUnordered::new(); + + // spawn a bunch of health check loops that do their checks on an interval + + // check the main rpc's /health endpoint + { + let url = format!("{}/health", self.web3_proxy); + + let loop_f = a_loop(seconds, log::Level::Error, move || { + simple::main(url.clone()) + }); + + handles.push(tokio::spawn(loop_f)); + } + // check any other web3-proxy /health endpoints + for other_web3_proxy in self.other_proxy.iter() { + let url = format!("{}/health", other_web3_proxy); + + let loop_f = a_loop(seconds, log::Level::Warn, move || simple::main(url.clone())); + + handles.push(tokio::spawn(loop_f)); + } + + // compare the main web3-proxy head block to all web3-proxies and rpcs + { + let max_lag = self.max_lag; + let rpc = self.web3_proxy.clone(); + + let mut others = self.other_proxy.clone(); + + others.extend(self.other_rpc.clone()); + + let loop_f = a_loop(seconds, log::Level::Error, move || { + compare::main(rpc.clone(), others.clone(), max_lag) + }); + + handles.push(tokio::spawn(loop_f)); + } + + // wait for any returned values (if everything is working, they will all run forever) + while let Some(x) = handles.next().await { + // any errors that make it here will end the program + x??; + } + + Ok(()) + } +} + +async fn a_loop(seconds: u64, error_level: log::Level, f: impl Fn() -> T) -> anyhow::Result<()> +where + T: Future> + Send + 'static, +{ + let mut interval = interval(Duration::from_secs(seconds)); + + // TODO: should we warn if there are delays? + interval.set_missed_tick_behavior(MissedTickBehavior::Delay); + + loop { + interval.tick().await; + + if let Err(err) = f().await { + log::log!(error_level, "check failed: {:?}", err); + }; + } +} diff --git a/web3_proxy/src/bin/web3_proxy_cli/sentryd/simple.rs b/web3_proxy/src/bin/web3_proxy_cli/sentryd/simple.rs new file mode 100644 index 00000000..1904553d --- /dev/null +++ b/web3_proxy/src/bin/web3_proxy_cli/sentryd/simple.rs @@ -0,0 +1,22 @@ +use anyhow::Context; +use log::{debug, trace}; + +/// GET the url and return an error if it wasn't a success +pub async fn main(url: String) -> anyhow::Result<()> { + let r = reqwest::get(&url) + .await + .context(format!("Failed GET {}", url))?; + + if r.status().is_success() { + // warn if latency is high? + debug!("{} is healthy", url); + trace!("Successful {:#?}", r); + return Ok(()); + } + + let debug_str = format!("{:#?}", r); + + let body = r.text().await?; + + Err(anyhow::anyhow!("{}: {}", debug_str, body)) +} diff --git a/web3_proxy/src/frontend/mod.rs b/web3_proxy/src/frontend/mod.rs index 1507a835..f4005963 100644 --- a/web3_proxy/src/frontend/mod.rs +++ b/web3_proxy/src/frontend/mod.rs @@ -40,6 +40,8 @@ pub async fn serve(port: u16, proxy_app: Arc) -> anyhow::Result<() .time_to_live(Duration::from_secs(1)) .build_with_hasher(hashbrown::hash_map::DefaultHashBuilder::default()); + // TODO: read config for if fastest/versus should be available publicly. default off + // build our axum Router let app = Router::new() // TODO: i think these routes could be done a lot better From 9fe63652839d99e4736e637d166eaa473c91262a Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Tue, 17 Jan 2023 15:52:31 -0800 Subject: [PATCH 08/80] serarate max_age and max_lag --- web3_proxy/src/bin/web3_proxy_cli/sentryd/compare.rs | 11 ++++++++--- web3_proxy/src/bin/web3_proxy_cli/sentryd/mod.rs | 7 ++++++- 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/web3_proxy/src/bin/web3_proxy_cli/sentryd/compare.rs b/web3_proxy/src/bin/web3_proxy_cli/sentryd/compare.rs index 50fcea7a..ada4681e 100644 --- a/web3_proxy/src/bin/web3_proxy_cli/sentryd/compare.rs +++ b/web3_proxy/src/bin/web3_proxy_cli/sentryd/compare.rs @@ -1,6 +1,6 @@ use anyhow::{anyhow, Context}; use chrono::{DateTime, Utc}; -use ethers::types::{Block, TxHash, H256, U64}; +use ethers::types::{Block, TxHash, H256}; use futures::{stream::FuturesUnordered, StreamExt}; use log::{debug, warn}; use serde::{Deserialize, Serialize}; @@ -34,7 +34,12 @@ impl From> for AbbreviatedBlock { } } -pub async fn main(rpc: String, others: Vec, max_lag: i64) -> anyhow::Result<()> { +pub async fn main( + rpc: String, + others: Vec, + max_age: i64, + max_lag: i64, +) -> anyhow::Result<()> { let client = reqwest::Client::new(); let block_by_number_request = json!({ @@ -118,7 +123,7 @@ pub async fn main(rpc: String, others: Vec, max_lag: i64) -> anyhow::Res .signed_duration_since(newest_other.max(&rpc_block).time) .num_seconds(); - match block_age.abs().cmp(&max_lag) { + match block_age.abs().cmp(&max_age) { std::cmp::Ordering::Less | std::cmp::Ordering::Equal => {} std::cmp::Ordering::Greater => match duration_since.cmp(&0) { std::cmp::Ordering::Equal => unimplemented!(), diff --git a/web3_proxy/src/bin/web3_proxy_cli/sentryd/mod.rs b/web3_proxy/src/bin/web3_proxy_cli/sentryd/mod.rs index 22b6aab1..51d0df7a 100644 --- a/web3_proxy/src/bin/web3_proxy_cli/sentryd/mod.rs +++ b/web3_proxy/src/bin/web3_proxy_cli/sentryd/mod.rs @@ -21,6 +21,10 @@ pub struct SentrydSubCommand { /// the main (HTTP only) web3-proxy being checked. web3_proxy: String, + #[argh(option)] + /// warning threshold for age of the best known head block + max_age: i64, + #[argh(option)] /// warning threshold for seconds between the rpc and best other_rpc's head blocks max_lag: i64, @@ -69,6 +73,7 @@ impl SentrydSubCommand { // compare the main web3-proxy head block to all web3-proxies and rpcs { + let max_age = self.max_age; let max_lag = self.max_lag; let rpc = self.web3_proxy.clone(); @@ -77,7 +82,7 @@ impl SentrydSubCommand { others.extend(self.other_rpc.clone()); let loop_f = a_loop(seconds, log::Level::Error, move || { - compare::main(rpc.clone(), others.clone(), max_lag) + compare::main(rpc.clone(), others.clone(), max_age, max_lag) }); handles.push(tokio::spawn(loop_f)); From e4a223732a7805128436bd9ad2b017934ac73e36 Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Tue, 17 Jan 2023 20:18:18 -0800 Subject: [PATCH 09/80] add APP_USER_AGENT to the status page --- web3_proxy/src/app/mod.rs | 7 ++++--- web3_proxy/src/bin/web3_proxy.rs | 4 +++- web3_proxy/src/bin/web3_proxy_cli/main.rs | 6 ++++-- web3_proxy/src/frontend/status.rs | 3 ++- 4 files changed, 13 insertions(+), 7 deletions(-) diff --git a/web3_proxy/src/app/mod.rs b/web3_proxy/src/app/mod.rs index 828dc6bb..44df90af 100644 --- a/web3_proxy/src/app/mod.rs +++ b/web3_proxy/src/app/mod.rs @@ -56,11 +56,12 @@ use tokio::time::{sleep, timeout}; use ulid::Ulid; // TODO: make this customizable? +// TODO: include GIT_REF in here. i had trouble getting https://docs.rs/vergen/latest/vergen/ to work with a workspace. also .git is in .dockerignore pub static APP_USER_AGENT: &str = concat!( - "satoshiandkin/", + "llamanodes_", env!("CARGO_PKG_NAME"), - "/", - env!("CARGO_PKG_VERSION"), + "/v", + env!("CARGO_PKG_VERSION") ); /// TODO: allow customizing the request period? diff --git a/web3_proxy/src/bin/web3_proxy.rs b/web3_proxy/src/bin/web3_proxy.rs index 656fdce4..f1461a22 100644 --- a/web3_proxy/src/bin/web3_proxy.rs +++ b/web3_proxy/src/bin/web3_proxy.rs @@ -17,7 +17,7 @@ use std::path::Path; use std::sync::atomic::{self, AtomicUsize}; use tokio::runtime; use tokio::sync::broadcast; -use web3_proxy::app::{flatten_handle, flatten_handles, Web3ProxyApp}; +use web3_proxy::app::{flatten_handle, flatten_handles, Web3ProxyApp, APP_USER_AGENT}; use web3_proxy::config::{CliConfig, TopConfig}; use web3_proxy::{frontend, metrics_frontend}; @@ -235,6 +235,8 @@ fn main() -> anyhow::Result<()> { log::set_max_level(max_level); + info!("{}", APP_USER_AGENT); + // we used to do this earlier, but now we attach sentry debug!("CLI config @ {:#?}", cli_config.config); diff --git a/web3_proxy/src/bin/web3_proxy_cli/main.rs b/web3_proxy/src/bin/web3_proxy_cli/main.rs index e1241908..85dd901b 100644 --- a/web3_proxy/src/bin/web3_proxy_cli/main.rs +++ b/web3_proxy/src/bin/web3_proxy_cli/main.rs @@ -14,10 +14,10 @@ mod user_export; mod user_import; use argh::FromArgs; -use log::warn; +use log::{info, warn}; use std::fs; use web3_proxy::{ - app::{get_db, get_migrated_db}, + app::{get_db, get_migrated_db, APP_USER_AGENT}, config::TopConfig, }; @@ -126,6 +126,8 @@ async fn main() -> anyhow::Result<()> { log::set_max_level(max_level); + info!("{}", APP_USER_AGENT); + match cli_config.sub_command { SubCommand::ChangeUserAddress(x) => { let db_conn = get_db(cli_config.db_url, 1, 1).await?; diff --git a/web3_proxy/src/frontend/status.rs b/web3_proxy/src/frontend/status.rs index 2e4a8198..df7f8bc9 100644 --- a/web3_proxy/src/frontend/status.rs +++ b/web3_proxy/src/frontend/status.rs @@ -4,7 +4,7 @@ //! They will eventually move to another port. use super::{FrontendResponseCache, FrontendResponseCaches}; -use crate::app::Web3ProxyApp; +use crate::app::{Web3ProxyApp, APP_USER_AGENT}; use axum::{http::StatusCode, response::IntoResponse, Extension, Json}; use axum_macros::debug_handler; use serde_json::json; @@ -33,6 +33,7 @@ pub async fn status( .get_with(FrontendResponseCaches::Status, async { // TODO: what else should we include? uptime, cache hit rates, cpu load, memory used let body = json!({ + "version": APP_USER_AGENT, "chain_id": app.config.chain_id, "balanced_rpcs": app.balanced_rpcs, "private_rpcs": app.private_rpcs, From 053947de408fc5ecdd32d8f0e00c45a06b2431b5 Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Tue, 17 Jan 2023 21:26:10 -0800 Subject: [PATCH 10/80] one bin for everything --- Dockerfile | 4 +- README.md | 4 +- TODO.md | 7 +- web3_proxy/Cargo.toml | 2 +- web3_proxy/src/bin/web3_proxy.rs | 408 -------------------- web3_proxy/src/bin/web3_proxy_cli/daemon.rs | 305 +++++++++++++++ web3_proxy/src/bin/web3_proxy_cli/main.rs | 278 +++++++++---- 7 files changed, 516 insertions(+), 492 deletions(-) delete mode 100644 web3_proxy/src/bin/web3_proxy.rs create mode 100644 web3_proxy/src/bin/web3_proxy_cli/daemon.rs diff --git a/Dockerfile b/Dockerfile index c1487de7..2f35c3bf 100644 --- a/Dockerfile +++ b/Dockerfile @@ -13,7 +13,9 @@ RUN --mount=type=cache,target=/usr/local/cargo/registry \ FROM debian:bullseye-slim COPY --from=builder /opt/bin/* /usr/local/bin/ -ENTRYPOINT ["web3_proxy"] + +# TODO: be careful changing this to just web3_proxy_cli. if you don't do it correctly, there will be a production outage! +ENTRYPOINT ["web3_proxy_cli", "proxyd"] # TODO: lower log level when done with prototyping ENV RUST_LOG "web3_proxy=debug" diff --git a/README.md b/README.md index d16a3c66..6f2e67c0 100644 --- a/README.md +++ b/README.md @@ -37,7 +37,7 @@ Options: Start the server with the defaults (listen on `http://localhost:8544` and use `./config/development.toml` which uses the database and cache running under docker and proxies to a bunch of public nodes: ``` -cargo run --release +cargo run --release -- daemon ``` ## Common commands @@ -45,7 +45,7 @@ cargo run --release Create a user: ``` -cargo run --bin web3_proxy_cli -- --db-url "$YOUR_DB_URL" create_user --address "$USER_ADDRESS_0x" +cargo run -- --db-url "$YOUR_DB_URL" create_user --address "$USER_ADDRESS_0x" ``` Check that the proxy is working: diff --git a/TODO.md b/TODO.md index 5b10936b..081844ac 100644 --- a/TODO.md +++ b/TODO.md @@ -300,6 +300,12 @@ These are not yet ordered. There might be duplicates. We might not actually need - [x] if private txs are disabled, only send trasactions to some of our servers. we were DOSing ourselves with transactions and slowing down sync - [x] retry if we get "the method X is not available" - [x] remove weight. we don't use it anymore. tiers are what we use now +- [x] make deadlock feature optional +- [x] standalone healthcheck daemon (sentryd) +- [x] status page should show version +- [x] combine the proxy and cli into one bin +- [-] proxy mode for benchmarking all backends +- [-] proxy mode for sending to multiple backends - [-] let users choose a % of reverts to log (or maybe x/second). someone like curve logging all reverts will be a BIG database very quickly - this must be opt-in and spawned in the background since it will slow things down and will make their calls less private - [ ] automatic pruning of old revert logs once too many are collected @@ -578,7 +584,6 @@ in another repo: event subscriber - [ ] sentry profiling - [ ] support alchemy_minedTransactions - [ ] debug print of user::Model's address is a big vec of numbers. make that hex somehow -- [ ] should we combine the proxy and cli into one bin? - [ ] make it so you can put a string like "LN arbitrum" into the create_user script, and have it automatically turn it into 0x4c4e20617262697472756d000000000000000000. - [ ] if --address not given, use the --description - [ ] if it is too long, (the last 4 bytes must be zero), give an error so descriptions like this stand out diff --git a/web3_proxy/Cargo.toml b/web3_proxy/Cargo.toml index 9da390b1..e0c64087 100644 --- a/web3_proxy/Cargo.toml +++ b/web3_proxy/Cargo.toml @@ -2,7 +2,7 @@ name = "web3_proxy" version = "0.12.0" edition = "2021" -default-run = "web3_proxy" +default-run = "web3_proxy_cli" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html diff --git a/web3_proxy/src/bin/web3_proxy.rs b/web3_proxy/src/bin/web3_proxy.rs deleted file mode 100644 index f1461a22..00000000 --- a/web3_proxy/src/bin/web3_proxy.rs +++ /dev/null @@ -1,408 +0,0 @@ -//! Web3_proxy is a fast caching and load balancing proxy for web3 (Ethereum or similar) JsonRPC servers. -//! -//! Signed transactions (eth_sendRawTransaction) are sent in parallel to the configured private RPCs (eden, ethermine, flashbots, etc.). -//! -//! All other requests are sent to an RPC server on the latest block (alchemy, moralis, rivet, your own node, or one of many other providers). -//! If multiple servers are in sync, the fastest server is prioritized. Since the fastest server is most likely to serve requests, slow servers are unlikely to ever get any requests. - -//#![warn(missing_docs)] -#![forbid(unsafe_code)] - -use anyhow::Context; -use futures::StreamExt; -use log::{debug, error, info, warn}; -use num::Zero; -use std::fs; -use std::path::Path; -use std::sync::atomic::{self, AtomicUsize}; -use tokio::runtime; -use tokio::sync::broadcast; -use web3_proxy::app::{flatten_handle, flatten_handles, Web3ProxyApp, APP_USER_AGENT}; -use web3_proxy::config::{CliConfig, TopConfig}; -use web3_proxy::{frontend, metrics_frontend}; - -#[cfg(feature = "deadlock")] -use parking_lot::deadlock; -#[cfg(feature = "deadlock")] -use std::thread; -#[cfg(feature = "deadlock")] -use tokio::time::Duration; - -fn run( - shutdown_sender: broadcast::Sender<()>, - cli_config: CliConfig, - top_config: TopConfig, -) -> anyhow::Result<()> { - debug!("{:?}", cli_config); - debug!("{:?}", top_config); - - let mut shutdown_receiver = shutdown_sender.subscribe(); - - #[cfg(feature = "deadlock")] - { - // spawn a thread for deadlock detection - thread::spawn(move || loop { - thread::sleep(Duration::from_secs(10)); - let deadlocks = deadlock::check_deadlock(); - if deadlocks.is_empty() { - continue; - } - - println!("{} deadlocks detected", deadlocks.len()); - for (i, threads) in deadlocks.iter().enumerate() { - println!("Deadlock #{}", i); - for t in threads { - println!("Thread Id {:#?}", t.thread_id()); - println!("{:#?}", t.backtrace()); - } - } - }); - } - - // set up tokio's async runtime - let mut rt_builder = runtime::Builder::new_multi_thread(); - - let chain_id = top_config.app.chain_id; - rt_builder.enable_all().thread_name_fn(move || { - static ATOMIC_ID: AtomicUsize = AtomicUsize::new(0); - // TODO: what ordering? i think we want seqcst so that these all happen in order, but that might be stricter than we really need - let worker_id = ATOMIC_ID.fetch_add(1, atomic::Ordering::SeqCst); - // TODO: i think these max at 15 characters - format!("web3-{}-{}", chain_id, worker_id) - }); - - if cli_config.workers > 0 { - rt_builder.worker_threads(cli_config.workers); - } - - // start tokio's async runtime - let rt = rt_builder.build()?; - - let num_workers = rt.metrics().num_workers(); - info!("num_workers: {}", num_workers); - - rt.block_on(async { - let app_frontend_port = cli_config.port; - let app_prometheus_port = cli_config.prometheus_port; - - // start the main app - let mut spawned_app = - Web3ProxyApp::spawn(top_config, num_workers, shutdown_sender.subscribe()).await?; - - let frontend_handle = - tokio::spawn(frontend::serve(app_frontend_port, spawned_app.app.clone())); - - // TODO: should we put this in a dedicated thread? - let prometheus_handle = tokio::spawn(metrics_frontend::serve( - spawned_app.app.clone(), - app_prometheus_port, - )); - - // if everything is working, these should both run forever - tokio::select! { - x = flatten_handles(spawned_app.app_handles) => { - match x { - Ok(_) => info!("app_handle exited"), - Err(e) => { - return Err(e); - } - } - } - x = flatten_handle(frontend_handle) => { - match x { - Ok(_) => info!("frontend exited"), - Err(e) => { - return Err(e); - } - } - } - x = flatten_handle(prometheus_handle) => { - match x { - Ok(_) => info!("prometheus exited"), - Err(e) => { - return Err(e); - } - } - } - x = tokio::signal::ctrl_c() => { - match x { - Ok(_) => info!("quiting from ctrl-c"), - Err(e) => { - return Err(e.into()); - } - } - } - x = shutdown_receiver.recv() => { - match x { - Ok(_) => info!("quiting from shutdown receiver"), - Err(e) => { - return Err(e.into()); - } - } - } - }; - - // one of the handles stopped. send a value so the others know to shut down - if let Err(err) = shutdown_sender.send(()) { - warn!("shutdown sender err={:?}", err); - }; - - // wait for things like saving stats to the database to complete - info!("waiting on important background tasks"); - let mut background_errors = 0; - while let Some(x) = spawned_app.background_handles.next().await { - match x { - Err(e) => { - error!("{:?}", e); - background_errors += 1; - } - Ok(Err(e)) => { - error!("{:?}", e); - background_errors += 1; - } - Ok(Ok(_)) => continue, - } - } - - if background_errors.is_zero() { - info!("finished"); - Ok(()) - } else { - // TODO: collect instead? - Err(anyhow::anyhow!("finished with errors!")) - } - }) -} - -fn main() -> anyhow::Result<()> { - // if RUST_LOG isn't set, configure a default - let rust_log = match std::env::var("RUST_LOG") { - Ok(x) => x, - Err(_) => "info,ethers=debug,redis_rate_limit=debug,web3_proxy=debug".to_string(), - }; - - // this probably won't matter for us in docker, but better safe than sorry - fdlimit::raise_fd_limit(); - - // initial configuration from flags - let cli_config: CliConfig = argh::from_env(); - - // convert to absolute path so error logging is most helpful - let config_path = Path::new(&cli_config.config) - .canonicalize() - .context(format!( - "checking full path of {} and {}", - ".", // TODO: get cwd somehow - cli_config.config - ))?; - - // advanced configuration is on disk - let top_config: String = fs::read_to_string(config_path.clone()) - .context(format!("reading config at {}", config_path.display()))?; - let top_config: TopConfig = toml::from_str(&top_config) - .context(format!("parsing config at {}", config_path.display()))?; - - // TODO: this doesn't seem to do anything - proctitle::set_title(format!("web3_proxy-{}", top_config.app.chain_id)); - - let logger = env_logger::builder().parse_filters(&rust_log).build(); - - let max_level = logger.filter(); - - // connect to sentry for error reporting - // if no sentry, only log to stdout - let _sentry_guard = if let Some(sentry_url) = top_config.app.sentry_url.clone() { - let logger = sentry::integrations::log::SentryLogger::with_dest(logger); - - log::set_boxed_logger(Box::new(logger)).unwrap(); - - let guard = sentry::init(( - sentry_url, - sentry::ClientOptions { - release: sentry::release_name!(), - // TODO: Set this a to lower value (from config) in production - traces_sample_rate: 1.0, - ..Default::default() - }, - )); - - Some(guard) - } else { - log::set_boxed_logger(Box::new(logger)).unwrap(); - - None - }; - - log::set_max_level(max_level); - - info!("{}", APP_USER_AGENT); - - // we used to do this earlier, but now we attach sentry - debug!("CLI config @ {:#?}", cli_config.config); - - // tokio has code for catching ctrl+c so we use that - // this shutdown sender is currently only used in tests, but we might make a /shutdown endpoint or something - // we do not need this receiver. new receivers are made by `shutdown_sender.subscribe()` - let (shutdown_sender, _) = broadcast::channel(1); - - run(shutdown_sender, cli_config, top_config) -} - -#[cfg(test)] -mod tests { - use ethers::{ - prelude::{Http, Provider, U256}, - utils::Anvil, - }; - use hashbrown::HashMap; - use std::env; - use std::thread; - - use web3_proxy::{ - config::{AppConfig, Web3ConnectionConfig}, - rpcs::blockchain::ArcBlock, - }; - - use super::*; - - #[tokio::test] - async fn it_works() { - // TODO: move basic setup into a test fixture - let path = env::var("PATH").unwrap(); - - println!("path: {}", path); - - // TODO: how should we handle logs in this? - // TODO: option for super verbose logs - std::env::set_var("RUST_LOG", "info,web3_proxy=debug"); - - let _ = env_logger::builder().is_test(true).try_init(); - - let anvil = Anvil::new().spawn(); - - println!("Anvil running at `{}`", anvil.endpoint()); - - let anvil_provider = Provider::::try_from(anvil.endpoint()).unwrap(); - - // mine a block because my code doesn't like being on block 0 - // TODO: make block 0 okay? is it okay now? - let _: U256 = anvil_provider - .request("evm_mine", None::<()>) - .await - .unwrap(); - - // make a test CliConfig - let cli_config = CliConfig { - port: 0, - prometheus_port: 0, - workers: 4, - config: "./does/not/exist/test.toml".to_string(), - cookie_key_filename: "./does/not/exist/development_cookie_key".to_string(), - }; - - // make a test TopConfig - // TODO: load TopConfig from a file? CliConfig could have `cli_config.load_top_config`. would need to inject our endpoint ports - let top_config = TopConfig { - app: AppConfig { - chain_id: 31337, - default_user_max_requests_per_period: Some(6_000_000), - min_sum_soft_limit: 1, - min_synced_rpcs: 1, - public_requests_per_period: Some(1_000_000), - response_cache_max_bytes: 10_usize.pow(7), - redirect_public_url: Some("example.com/".to_string()), - redirect_rpc_key_url: Some("example.com/{{rpc_key_id}}".to_string()), - ..Default::default() - }, - balanced_rpcs: HashMap::from([ - ( - "anvil".to_string(), - Web3ConnectionConfig { - disabled: false, - display_name: None, - url: anvil.endpoint(), - block_data_limit: None, - soft_limit: 100, - hard_limit: None, - tier: 0, - subscribe_txs: Some(false), - extra: Default::default(), - }, - ), - ( - "anvil_ws".to_string(), - Web3ConnectionConfig { - disabled: false, - display_name: None, - url: anvil.ws_endpoint(), - block_data_limit: None, - soft_limit: 100, - hard_limit: None, - tier: 0, - subscribe_txs: Some(false), - extra: Default::default(), - }, - ), - ]), - private_rpcs: None, - extra: Default::default(), - }; - - let (shutdown_sender, _) = broadcast::channel(1); - - // spawn another thread for running the app - // TODO: allow launching into the local tokio runtime instead of creating a new one? - let handle = { - let shutdown_sender = shutdown_sender.clone(); - - thread::spawn(move || run(shutdown_sender, cli_config, top_config)) - }; - - // TODO: do something to the node. query latest block, mine another block, query again - let proxy_provider = Provider::::try_from(anvil.endpoint()).unwrap(); - - let anvil_result = anvil_provider - .request::<_, Option>("eth_getBlockByNumber", ("latest", true)) - .await - .unwrap() - .unwrap(); - let proxy_result = proxy_provider - .request::<_, Option>("eth_getBlockByNumber", ("latest", true)) - .await - .unwrap() - .unwrap(); - - assert_eq!(anvil_result, proxy_result); - - let first_block_num = anvil_result.number.unwrap(); - - let _: U256 = anvil_provider - .request("evm_mine", None::<()>) - .await - .unwrap(); - - let anvil_result = anvil_provider - .request::<_, Option>("eth_getBlockByNumber", ("latest", true)) - .await - .unwrap() - .unwrap(); - let proxy_result = proxy_provider - .request::<_, Option>("eth_getBlockByNumber", ("latest", true)) - .await - .unwrap() - .unwrap(); - - assert_eq!(anvil_result, proxy_result); - - let second_block_num = anvil_result.number.unwrap(); - - assert_eq!(first_block_num, second_block_num - 1); - - // tell the test app to shut down - shutdown_sender.send(()).unwrap(); - - println!("waiting for shutdown..."); - // TODO: panic if a timeout is reached - handle.join().unwrap().unwrap(); - } -} diff --git a/web3_proxy/src/bin/web3_proxy_cli/daemon.rs b/web3_proxy/src/bin/web3_proxy_cli/daemon.rs new file mode 100644 index 00000000..09998ea4 --- /dev/null +++ b/web3_proxy/src/bin/web3_proxy_cli/daemon.rs @@ -0,0 +1,305 @@ +#![forbid(unsafe_code)] + +use argh::FromArgs; +use futures::StreamExt; +use log::{error, info, warn}; +use num::Zero; +use tokio::sync::broadcast; +use web3_proxy::app::{flatten_handle, flatten_handles, Web3ProxyApp}; +use web3_proxy::config::TopConfig; +use web3_proxy::{frontend, metrics_frontend}; + +/// count requests +#[derive(FromArgs, PartialEq, Debug, Eq)] +#[argh(subcommand, name = "proxyd")] +pub struct ProxydSubCommand { + /// path to a toml of rpc servers + /// what port the proxy should listen on + #[argh(option, default = "8544")] + pub port: u16, + + /// what port the proxy should expose prometheus stats on + #[argh(option, default = "8543")] + pub prometheus_port: u16, +} + +impl ProxydSubCommand { + pub async fn main(self, top_config: TopConfig, num_workers: usize) -> anyhow::Result<()> { + let (shutdown_sender, _) = broadcast::channel(1); + + run( + top_config, + self.port, + self.prometheus_port, + num_workers, + shutdown_sender, + ) + .await + } +} + +async fn run( + top_config: TopConfig, + frontend_port: u16, + prometheus_port: u16, + num_workers: usize, + shutdown_sender: broadcast::Sender<()>, +) -> anyhow::Result<()> { + // tokio has code for catching ctrl+c so we use that + // this shutdown sender is currently only used in tests, but we might make a /shutdown endpoint or something + // we do not need this receiver. new receivers are made by `shutdown_sender.subscribe()` + + let app_frontend_port = frontend_port; + let app_prometheus_port = prometheus_port; + + // start the main app + let mut spawned_app = + Web3ProxyApp::spawn(top_config, num_workers, shutdown_sender.subscribe()).await?; + + let frontend_handle = tokio::spawn(frontend::serve(app_frontend_port, spawned_app.app.clone())); + + // TODO: should we put this in a dedicated thread? + let prometheus_handle = tokio::spawn(metrics_frontend::serve( + spawned_app.app.clone(), + app_prometheus_port, + )); + + let mut shutdown_receiver = shutdown_sender.subscribe(); + + // if everything is working, these should both run forever + tokio::select! { + x = flatten_handles(spawned_app.app_handles) => { + match x { + Ok(_) => info!("app_handle exited"), + Err(e) => { + return Err(e); + } + } + } + x = flatten_handle(frontend_handle) => { + match x { + Ok(_) => info!("frontend exited"), + Err(e) => { + return Err(e); + } + } + } + x = flatten_handle(prometheus_handle) => { + match x { + Ok(_) => info!("prometheus exited"), + Err(e) => { + return Err(e); + } + } + } + x = tokio::signal::ctrl_c() => { + match x { + Ok(_) => info!("quiting from ctrl-c"), + Err(e) => { + return Err(e.into()); + } + } + } + x = shutdown_receiver.recv() => { + match x { + Ok(_) => info!("quiting from shutdown receiver"), + Err(e) => { + return Err(e.into()); + } + } + } + }; + + // one of the handles stopped. send a value so the others know to shut down + if let Err(err) = shutdown_sender.send(()) { + warn!("shutdown sender err={:?}", err); + }; + + // wait for things like saving stats to the database to complete + info!("waiting on important background tasks"); + let mut background_errors = 0; + while let Some(x) = spawned_app.background_handles.next().await { + match x { + Err(e) => { + error!("{:?}", e); + background_errors += 1; + } + Ok(Err(e)) => { + error!("{:?}", e); + background_errors += 1; + } + Ok(Ok(_)) => continue, + } + } + + if background_errors.is_zero() { + info!("finished"); + Ok(()) + } else { + // TODO: collect instead? + Err(anyhow::anyhow!("finished with errors!")) + } +} + +#[cfg(test)] +mod tests { + use ethers::{ + prelude::{Http, Provider, U256}, + utils::Anvil, + }; + use hashbrown::HashMap; + use std::env; + + use web3_proxy::{ + config::{AppConfig, Web3ConnectionConfig}, + rpcs::blockchain::ArcBlock, + }; + + use super::*; + + #[tokio::test] + async fn it_works() { + // TODO: move basic setup into a test fixture + let path = env::var("PATH").unwrap(); + + println!("path: {}", path); + + // TODO: how should we handle logs in this? + // TODO: option for super verbose logs + std::env::set_var("RUST_LOG", "info,web3_proxy=debug"); + + let _ = env_logger::builder().is_test(true).try_init(); + + let anvil = Anvil::new().spawn(); + + println!("Anvil running at `{}`", anvil.endpoint()); + + let anvil_provider = Provider::::try_from(anvil.endpoint()).unwrap(); + + // mine a block because my code doesn't like being on block 0 + // TODO: make block 0 okay? is it okay now? + let _: U256 = anvil_provider + .request("evm_mine", None::<()>) + .await + .unwrap(); + + // make a test TopConfig + // TODO: load TopConfig from a file? CliConfig could have `cli_config.load_top_config`. would need to inject our endpoint ports + let top_config = TopConfig { + app: AppConfig { + chain_id: 31337, + default_user_max_requests_per_period: Some(6_000_000), + min_sum_soft_limit: 1, + min_synced_rpcs: 1, + public_requests_per_period: Some(1_000_000), + response_cache_max_bytes: 10_usize.pow(7), + redirect_public_url: Some("example.com/".to_string()), + redirect_rpc_key_url: Some("example.com/{{rpc_key_id}}".to_string()), + ..Default::default() + }, + balanced_rpcs: HashMap::from([ + ( + "anvil".to_string(), + Web3ConnectionConfig { + disabled: false, + display_name: None, + url: anvil.endpoint(), + backup: None, + block_data_limit: None, + soft_limit: 100, + hard_limit: None, + tier: 0, + subscribe_txs: Some(false), + extra: Default::default(), + }, + ), + ( + "anvil_ws".to_string(), + Web3ConnectionConfig { + disabled: false, + display_name: None, + url: anvil.ws_endpoint(), + backup: None, + block_data_limit: None, + soft_limit: 100, + hard_limit: None, + tier: 0, + subscribe_txs: Some(false), + extra: Default::default(), + }, + ), + ]), + private_rpcs: None, + extra: Default::default(), + }; + + let (shutdown_sender, _) = broadcast::channel(1); + + // spawn another thread for running the app + // TODO: allow launching into the local tokio runtime instead of creating a new one? + let handle = { + let shutdown_sender = shutdown_sender.clone(); + + let frontend_port = 0; + let prometheus_port = 0; + + tokio::spawn(async move { + run( + top_config, + frontend_port, + prometheus_port, + 2, + shutdown_sender, + ) + .await + }) + }; + + // TODO: do something to the node. query latest block, mine another block, query again + let proxy_provider = Provider::::try_from(anvil.endpoint()).unwrap(); + + let anvil_result = anvil_provider + .request::<_, Option>("eth_getBlockByNumber", ("latest", true)) + .await + .unwrap() + .unwrap(); + let proxy_result = proxy_provider + .request::<_, Option>("eth_getBlockByNumber", ("latest", true)) + .await + .unwrap() + .unwrap(); + + assert_eq!(anvil_result, proxy_result); + + let first_block_num = anvil_result.number.unwrap(); + + let _: U256 = anvil_provider + .request("evm_mine", None::<()>) + .await + .unwrap(); + + let anvil_result = anvil_provider + .request::<_, Option>("eth_getBlockByNumber", ("latest", true)) + .await + .unwrap() + .unwrap(); + let proxy_result = proxy_provider + .request::<_, Option>("eth_getBlockByNumber", ("latest", true)) + .await + .unwrap() + .unwrap(); + + assert_eq!(anvil_result, proxy_result); + + let second_block_num = anvil_result.number.unwrap(); + + assert_eq!(first_block_num, second_block_num - 1); + + // tell the test app to shut down + shutdown_sender.send(()).unwrap(); + + println!("waiting for shutdown..."); + // TODO: panic if a timeout is reached + handle.await.unwrap().unwrap(); + } +} diff --git a/web3_proxy/src/bin/web3_proxy_cli/main.rs b/web3_proxy/src/bin/web3_proxy_cli/main.rs index 85dd901b..a99ea54d 100644 --- a/web3_proxy/src/bin/web3_proxy_cli/main.rs +++ b/web3_proxy/src/bin/web3_proxy_cli/main.rs @@ -5,6 +5,7 @@ mod change_user_tier_by_key; mod check_config; mod count_users; mod create_user; +mod daemon; mod drop_migration_lock; mod list_user_tier; mod rpc_accounting; @@ -13,27 +14,41 @@ mod transfer_key; mod user_export; mod user_import; +use anyhow::Context; use argh::FromArgs; use log::{info, warn}; -use std::fs; +use std::{ + fs, + path::Path, + sync::atomic::{self, AtomicUsize}, +}; +use tokio::runtime; use web3_proxy::{ app::{get_db, get_migrated_db, APP_USER_AGENT}, config::TopConfig, }; +#[cfg(feature = "deadlock")] +use parking_lot::deadlock; +#[cfg(feature = "deadlock")] +use std::thread; +#[cfg(feature = "deadlock")] +use tokio::time::Duration; + #[derive(Debug, FromArgs)] /// Command line interface for admins to interact with web3_proxy -pub struct CliConfig { - /// path to the application config (optional). +pub struct Web3ProxyCli { + /// path to the application config (only required for some commands; defaults to dev config). #[argh(option)] pub config: Option, - /// if no config, what database the client should connect to. Defaults to dev db - #[argh( - option, - default = "\"mysql://root:dev_web3_proxy@127.0.0.1:13306/dev_web3_proxy\".to_string()" - )] - pub db_url: String, + /// number of worker threads. Defaults to the number of logical processors + #[argh(option, default = "0")] + pub workers: usize, + + /// if no config, what database the client should connect to (only required for some commands; Defaults to dev db) + #[argh(option)] + pub db_url: Option, /// if no config, what sentry url should the client should connect to #[argh(option)] @@ -55,6 +70,7 @@ enum SubCommand { CountUsers(count_users::CountUsersSubCommand), CreateUser(create_user::CreateUserSubCommand), DropMigrationLock(drop_migration_lock::DropMigrationLockSubCommand), + Proxyd(daemon::ProxydSubCommand), RpcAccounting(rpc_accounting::RpcAccountingSubCommand), Sentryd(sentryd::SentrydSubCommand), TransferKey(transfer_key::TransferKeySubCommand), @@ -65,32 +81,65 @@ enum SubCommand { // TODO: sub command to change a user's tier } -#[tokio::main] -async fn main() -> anyhow::Result<()> { +fn main() -> anyhow::Result<()> { + #[cfg(feature = "deadlock")] + { + // spawn a thread for deadlock detection + thread::spawn(move || loop { + thread::sleep(Duration::from_secs(10)); + let deadlocks = deadlock::check_deadlock(); + if deadlocks.is_empty() { + continue; + } + + println!("{} deadlocks detected", deadlocks.len()); + for (i, threads) in deadlocks.iter().enumerate() { + println!("Deadlock #{}", i); + for t in threads { + println!("Thread Id {:#?}", t.thread_id()); + println!("{:#?}", t.backtrace()); + } + } + }); + } + // if RUST_LOG isn't set, configure a default // TODO: is there a better way to do this? let rust_log = match std::env::var("RUST_LOG") { Ok(x) => x, - Err(_) => "info,web3_proxy=debug,web3_proxy_cli=debug".to_string(), + Err(_) => "info,ethers=debug,redis_rate_limit=debug,web3_proxy=debug,web3_proxy_cli=debug" + .to_string(), }; // this probably won't matter for us in docker, but better safe than sorry fdlimit::raise_fd_limit(); - let mut cli_config: CliConfig = argh::from_env(); + let mut cli_config: Web3ProxyCli = argh::from_env(); + + if cli_config.config.is_none() && cli_config.db_url.is_none() { + info!("defaulting to development config"); + cli_config.config = Some("./config/development.toml".to_string()); + } + + let top_config = if let Some(top_config_path) = cli_config.config.clone() { + let top_config_path = Path::new(&top_config_path) + .canonicalize() + .context(format!("checking for config at {}", top_config_path))?; - let _top_config = if let Some(top_config_path) = cli_config.config.clone() { let top_config: String = fs::read_to_string(top_config_path)?; let top_config: TopConfig = toml::from_str(&top_config)?; - if let Some(db_url) = top_config.app.db_url.clone() { - cli_config.db_url = db_url; + if cli_config.db_url.is_none() { + cli_config.db_url = top_config.app.db_url.clone(); } if let Some(sentry_url) = top_config.app.sentry_url.clone() { cli_config.sentry_url = Some(sentry_url); } + // TODO: this doesn't seem to do anything + proctitle::set_title(format!("web3_proxy-{}", top_config.app.chain_id)); + Some(top_config) } else { None @@ -128,70 +177,141 @@ async fn main() -> anyhow::Result<()> { info!("{}", APP_USER_AGENT); - match cli_config.sub_command { - SubCommand::ChangeUserAddress(x) => { - let db_conn = get_db(cli_config.db_url, 1, 1).await?; + // set up tokio's async runtime + let mut rt_builder = runtime::Builder::new_multi_thread(); - x.main(&db_conn).await - } - SubCommand::ChangeUserTier(x) => { - let db_conn = get_db(cli_config.db_url, 1, 1).await?; + if let Some(top_config) = top_config.as_ref() { + let chain_id = top_config.app.chain_id; - x.main(&db_conn).await - } - SubCommand::ChangeUserTierByAddress(x) => { - let db_conn = get_db(cli_config.db_url, 1, 1).await?; - - x.main(&db_conn).await - } - SubCommand::ChangeUserTierByKey(x) => { - let db_conn = get_db(cli_config.db_url, 1, 1).await?; - - x.main(&db_conn).await - } - SubCommand::CheckConfig(x) => x.main().await, - SubCommand::CreateUser(x) => { - let db_conn = get_migrated_db(cli_config.db_url, 1, 1).await?; - - x.main(&db_conn).await - } - SubCommand::CountUsers(x) => { - let db_conn = get_db(cli_config.db_url, 1, 1).await?; - - x.main(&db_conn).await - } - SubCommand::DropMigrationLock(x) => { - // very intentionally, do NOT run migrations here - let db_conn = get_db(cli_config.db_url, 1, 1).await?; - - x.main(&db_conn).await - } - SubCommand::Sentryd(x) => { - if cli_config.sentry_url.is_none() { - warn!("sentry_url is not set! Logs will only show in this console"); - } - - x.main().await - } - SubCommand::RpcAccounting(x) => { - let db_conn = get_migrated_db(cli_config.db_url, 1, 1).await?; - - x.main(&db_conn).await - } - SubCommand::TransferKey(x) => { - let db_conn = get_db(cli_config.db_url, 1, 1).await?; - - x.main(&db_conn).await - } - SubCommand::UserExport(x) => { - let db_conn = get_migrated_db(cli_config.db_url, 1, 1).await?; - - x.main(&db_conn).await - } - SubCommand::UserImport(x) => { - let db_conn = get_migrated_db(cli_config.db_url, 1, 1).await?; - - x.main(&db_conn).await - } + rt_builder.enable_all().thread_name_fn(move || { + static ATOMIC_ID: AtomicUsize = AtomicUsize::new(0); + // TODO: what ordering? i think we want seqcst so that these all happen in order, but that might be stricter than we really need + let worker_id = ATOMIC_ID.fetch_add(1, atomic::Ordering::SeqCst); + // TODO: i think these max at 15 characters + format!("web3-{}-{}", chain_id, worker_id) + }); } + + // start tokio's async runtime + let rt = rt_builder.build()?; + + let num_workers = rt.metrics().num_workers(); + info!("num_workers: {}", num_workers); + + rt.block_on(async { + match cli_config.sub_command { + SubCommand::ChangeUserAddress(x) => { + let db_url = cli_config + .db_url + .expect("'--config' (with a db) or '--db-url' is required to run proxyd"); + + let db_conn = get_db(db_url, 1, 1).await?; + + x.main(&db_conn).await + } + SubCommand::ChangeUserTier(x) => { + let db_url = cli_config + .db_url + .expect("'--config' (with a db) or '--db-url' is required to run proxyd"); + + let db_conn = get_db(db_url, 1, 1).await?; + + x.main(&db_conn).await + } + SubCommand::ChangeUserTierByAddress(x) => { + let db_url = cli_config + .db_url + .expect("'--config' (with a db) or '--db-url' is required to run proxyd"); + + let db_conn = get_db(db_url, 1, 1).await?; + + x.main(&db_conn).await + } + SubCommand::ChangeUserTierByKey(x) => { + let db_url = cli_config + .db_url + .expect("'--config' (with a db) or '--db-url' is required to run proxyd"); + + let db_conn = get_db(db_url, 1, 1).await?; + + x.main(&db_conn).await + } + SubCommand::CheckConfig(x) => x.main().await, + SubCommand::CreateUser(x) => { + let db_url = cli_config + .db_url + .expect("'--config' (with a db) or '--db-url' is required to run proxyd"); + + let db_conn = get_migrated_db(db_url, 1, 1).await?; + + x.main(&db_conn).await + } + SubCommand::CountUsers(x) => { + let db_url = cli_config + .db_url + .expect("'--config' (with a db) or '--db-url' is required to run proxyd"); + + let db_conn = get_db(db_url, 1, 1).await?; + + x.main(&db_conn).await + } + SubCommand::Proxyd(x) => { + let top_config = top_config.expect("--config is required to run proxyd"); + + x.main(top_config, num_workers).await + } + SubCommand::DropMigrationLock(x) => { + let db_url = cli_config + .db_url + .expect("'--config' (with a db) or '--db-url' is required to run proxyd"); + + // very intentionally, do NOT run migrations here + let db_conn = get_db(db_url, 1, 1).await?; + + x.main(&db_conn).await + } + SubCommand::Sentryd(x) => { + if cli_config.sentry_url.is_none() { + warn!("sentry_url is not set! Logs will only show in this console"); + } + + x.main().await + } + SubCommand::RpcAccounting(x) => { + let db_url = cli_config + .db_url + .expect("'--config' (with a db) or '--db-url' is required to run proxyd"); + + let db_conn = get_migrated_db(db_url, 1, 1).await?; + + x.main(&db_conn).await + } + SubCommand::TransferKey(x) => { + let db_url = cli_config + .db_url + .expect("'--config' (with a db) or '--db-url' is required to run proxyd"); + let db_conn = get_db(db_url, 1, 1).await?; + + x.main(&db_conn).await + } + SubCommand::UserExport(x) => { + let db_url = cli_config + .db_url + .expect("'--config' (with a db) or '--db-url' is required to run proxyd"); + + let db_conn = get_migrated_db(db_url, 1, 1).await?; + + x.main(&db_conn).await + } + SubCommand::UserImport(x) => { + let db_url = cli_config + .db_url + .expect("'--config' (with a db) or '--db-url' is required to run proxyd"); + + let db_conn = get_migrated_db(db_url, 1, 1).await?; + + x.main(&db_conn).await + } + } + }) } From 90d3371eee892dc7d56b92034e326bf3d733e290 Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Wed, 18 Jan 2023 16:17:43 -0800 Subject: [PATCH 11/80] improved rate limiting on websockets --- TODO.md | 4 +- web3_proxy/src/app/mod.rs | 8 +- web3_proxy/src/app_stats.rs | 2 +- web3_proxy/src/bin/web3_proxy_cli/daemon.rs | 2 - web3_proxy/src/frontend/authorization.rs | 35 +++- web3_proxy/src/frontend/errors.rs | 27 +-- web3_proxy/src/frontend/rpc_proxy_ws.rs | 174 +++++++++++++------- web3_proxy/src/rpcs/request.rs | 18 +- 8 files changed, 175 insertions(+), 95 deletions(-) diff --git a/TODO.md b/TODO.md index 081844ac..2f4b8754 100644 --- a/TODO.md +++ b/TODO.md @@ -304,6 +304,7 @@ These are not yet ordered. There might be duplicates. We might not actually need - [x] standalone healthcheck daemon (sentryd) - [x] status page should show version - [x] combine the proxy and cli into one bin +- [x] improve rate limiting on websockets - [-] proxy mode for benchmarking all backends - [-] proxy mode for sending to multiple backends - [-] let users choose a % of reverts to log (or maybe x/second). someone like curve logging all reverts will be a BIG database very quickly @@ -514,7 +515,8 @@ in another repo: event subscriber - [ ] if the call is something simple like "symbol" or "decimals", cache that too. though i think this could bite us. - [ ] add a subscription that returns the head block number and hash but nothing else - [ ] if chain split detected, what should we do? don't send transactions? -- [ ] archive check works well for local servers, but public nodes (especially on other chains) seem to give unreliable results. likely because of load balancers. maybe have a "max block data limit" +- [ ] archive check works well for local servers, but public nodes (especially on other chains) seem to give unreliable results. likely because of load balancers. + - [x] configurable block data limit until better checks - [ ] https://docs.rs/derive_builder/latest/derive_builder/ - [ ] Detect orphaned transactions - [ ] https://crates.io/crates/reqwest-middleware easy retry with exponential back off diff --git a/web3_proxy/src/app/mod.rs b/web3_proxy/src/app/mod.rs index 44df90af..1e3a327a 100644 --- a/web3_proxy/src/app/mod.rs +++ b/web3_proxy/src/app/mod.rs @@ -4,7 +4,7 @@ mod ws; use crate::app_stats::{ProxyResponseStat, StatEmitter, Web3ProxyStat}; use crate::block_number::{block_needed, BlockNeeded}; use crate::config::{AppConfig, TopConfig}; -use crate::frontend::authorization::{Authorization, RequestMetadata}; +use crate::frontend::authorization::{Authorization, RequestMetadata, RpcSecretKey}; use crate::frontend::errors::FrontendErrorResponse; use crate::frontend::rpc_proxy_ws::ProxyMode; use crate::jsonrpc::{ @@ -136,12 +136,14 @@ pub type AnyhowJoinHandle = JoinHandle>; #[derive(Clone, Debug, Default, From)] pub struct AuthorizationChecks { - /// database id of the primary user. + /// database id of the primary user. 0 if anon /// TODO: do we need this? its on the authorization so probably not pub user_id: u64, + /// the key used (if any) + pub rpc_secret_key: Option, /// database id of the rpc key /// if this is None, then this request is being rate limited by ip - pub rpc_key_id: Option, + pub rpc_secret_key_id: Option, /// if None, allow unlimited queries. inherited from the user_tier pub max_requests_per_period: Option, // if None, allow unlimited concurrent requests. inherited from the user_tier diff --git a/web3_proxy/src/app_stats.rs b/web3_proxy/src/app_stats.rs index 204effd5..681dfcea 100644 --- a/web3_proxy/src/app_stats.rs +++ b/web3_proxy/src/app_stats.rs @@ -36,7 +36,7 @@ impl ProxyResponseStat { fn key(&self) -> ProxyResponseAggregateKey { // include either the rpc_key_id or the origin let (mut rpc_key_id, origin) = match ( - self.authorization.checks.rpc_key_id, + self.authorization.checks.rpc_secret_key_id, &self.authorization.origin, ) { (Some(rpc_key_id), _) => { diff --git a/web3_proxy/src/bin/web3_proxy_cli/daemon.rs b/web3_proxy/src/bin/web3_proxy_cli/daemon.rs index 09998ea4..69d0e2c7 100644 --- a/web3_proxy/src/bin/web3_proxy_cli/daemon.rs +++ b/web3_proxy/src/bin/web3_proxy_cli/daemon.rs @@ -204,7 +204,6 @@ mod tests { disabled: false, display_name: None, url: anvil.endpoint(), - backup: None, block_data_limit: None, soft_limit: 100, hard_limit: None, @@ -219,7 +218,6 @@ mod tests { disabled: false, display_name: None, url: anvil.ws_endpoint(), - backup: None, block_data_limit: None, soft_limit: 100, hard_limit: None, diff --git a/web3_proxy/src/frontend/authorization.rs b/web3_proxy/src/frontend/authorization.rs index f98cf7d0..c04ba8c2 100644 --- a/web3_proxy/src/frontend/authorization.rs +++ b/web3_proxy/src/frontend/authorization.rs @@ -660,13 +660,11 @@ impl Web3ProxyApp { let db_replica = self.db_replica().context("Getting database connection")?; - let rpc_secret_key: Uuid = rpc_secret_key.into(); - // TODO: join the user table to this to return the User? we don't always need it // TODO: join on secondary users // TODO: join on user tier match rpc_key::Entity::find() - .filter(rpc_key::Column::SecretKey.eq(rpc_secret_key)) + .filter(rpc_key::Column::SecretKey.eq(::from(rpc_secret_key))) .filter(rpc_key::Column::Active.eq(true)) .one(db_replica.conn()) .await? @@ -741,7 +739,8 @@ impl Web3ProxyApp { Ok(AuthorizationChecks { user_id: rpc_key_model.user_id, - rpc_key_id, + rpc_secret_key: Some(rpc_secret_key), + rpc_secret_key_id: rpc_key_id, allowed_ips, allowed_origins, allowed_referers, @@ -774,7 +773,7 @@ impl Web3ProxyApp { let authorization_checks = self.authorization_checks(rpc_key).await?; // if no rpc_key_id matching the given rpc was found, then we can't rate limit by key - if authorization_checks.rpc_key_id.is_none() { + if authorization_checks.rpc_secret_key_id.is_none() { return Ok(RateLimitResult::UnknownKey); } @@ -845,3 +844,29 @@ impl Web3ProxyApp { } } } + +impl Authorization { + pub async fn check_again( + &self, + app: &Arc, + ) -> Result<(Arc, Option), FrontendErrorResponse> { + // TODO: we could probably do this without clones. but this is easy + let (a, s) = if let Some(rpc_secret_key) = self.checks.rpc_secret_key { + key_is_authorized( + app, + rpc_secret_key, + self.ip, + self.origin.clone(), + self.referer.clone(), + self.user_agent.clone(), + ) + .await? + } else { + ip_is_authorized(app, self.ip, self.origin.clone()).await? + }; + + let a = Arc::new(a); + + Ok((a, s)) + } +} diff --git a/web3_proxy/src/frontend/errors.rs b/web3_proxy/src/frontend/errors.rs index 30ee053f..22f048ee 100644 --- a/web3_proxy/src/frontend/errors.rs +++ b/web3_proxy/src/frontend/errors.rs @@ -35,7 +35,6 @@ pub enum FrontendErrorResponse { NotFound, RateLimited(Authorization, Option), Redis(RedisError), - Response(Response), /// simple way to return an error message to the user and an anyhow to our logs StatusCode(StatusCode, String, Option), /// TODO: what should be attached to the timout? @@ -44,11 +43,9 @@ pub enum FrontendErrorResponse { UnknownKey, } -impl IntoResponse for FrontendErrorResponse { - fn into_response(self) -> Response { - // TODO: include the request id in these so that users can give us something that will point to logs - // TODO: status code is in the jsonrpc response and is also the first item in the tuple. DRY - let (status_code, response) = match self { +impl FrontendErrorResponse { + pub fn into_response_parts(self) -> (StatusCode, JsonRpcForwardedResponse) { + match self { Self::AccessDenied => { // TODO: attach something to this trace. probably don't include much in the message though. don't want to leak creds by accident trace!("access denied"); @@ -174,12 +171,12 @@ impl IntoResponse for FrontendErrorResponse { }; // create a string with either the IP or the rpc_key_id - let msg = if authorization.checks.rpc_key_id.is_none() { + let msg = if authorization.checks.rpc_secret_key_id.is_none() { format!("too many requests from {}.{}", authorization.ip, retry_msg) } else { format!( "too many requests from rpc key #{}.{}", - authorization.checks.rpc_key_id.unwrap(), + authorization.checks.rpc_secret_key_id.unwrap(), retry_msg ) }; @@ -204,10 +201,6 @@ impl IntoResponse for FrontendErrorResponse { ), ) } - Self::Response(r) => { - debug_assert_ne!(r.status(), StatusCode::OK); - return r; - } Self::SemaphoreAcquireError(err) => { warn!("semaphore acquire err={:?}", err); ( @@ -274,7 +267,15 @@ impl IntoResponse for FrontendErrorResponse { None, ), ), - }; + } + } +} + +impl IntoResponse for FrontendErrorResponse { + fn into_response(self) -> Response { + // TODO: include the request id in these so that users can give us something that will point to logs + // TODO: status code is in the jsonrpc response and is also the first item in the tuple. DRY + let (status_code, response) = self.into_response_parts(); (status_code, Json(response)).into_response() } diff --git a/web3_proxy/src/frontend/rpc_proxy_ws.rs b/web3_proxy/src/frontend/rpc_proxy_ws.rs index 23516738..ae6b700b 100644 --- a/web3_proxy/src/frontend/rpc_proxy_ws.rs +++ b/web3_proxy/src/frontend/rpc_proxy_ws.rs @@ -32,6 +32,7 @@ use serde_json::json; use serde_json::value::to_raw_value; use std::sync::Arc; use std::{str::from_utf8_mut, sync::atomic::AtomicUsize}; +use tokio::sync::{broadcast, OwnedSemaphorePermit, RwLock}; #[derive(Copy, Clone)] pub enum ProxyMode { @@ -52,7 +53,7 @@ pub async fn websocket_handler( origin: Option>, ws_upgrade: Option, ) -> FrontendResult { - _websocket_handler(ProxyMode::Fastest(1), app, ip, origin, ws_upgrade).await + _websocket_handler(ProxyMode::Best, app, ip, origin, ws_upgrade).await } /// Public entrypoint for WebSocket JSON-RPC requests that uses all synced servers. @@ -226,7 +227,7 @@ async fn _websocket_handler_with_key( match ( &app.config.redirect_public_url, &app.config.redirect_rpc_key_url, - authorization.checks.rpc_key_id, + authorization.checks.rpc_secret_key_id, ) { (None, None, _) => Err(FrontendErrorResponse::StatusCode( StatusCode::BAD_REQUEST, @@ -239,7 +240,7 @@ async fn _websocket_handler_with_key( (_, Some(redirect_rpc_key_url), rpc_key_id) => { let reg = Handlebars::new(); - if authorization.checks.rpc_key_id.is_none() { + if authorization.checks.rpc_secret_key_id.is_none() { // i don't think this is possible Err(FrontendErrorResponse::StatusCode( StatusCode::UNAUTHORIZED, @@ -298,9 +299,20 @@ async fn handle_socket_payload( payload: &str, response_sender: &flume::Sender, subscription_count: &AtomicUsize, - subscriptions: &mut HashMap, + subscriptions: Arc>>, proxy_mode: ProxyMode, -) -> Message { +) -> (Message, Option) { + let (authorization, semaphore) = match authorization.check_again(&app).await { + Ok((a, s)) => (a, s), + Err(err) => { + let (_, err) = err.into_response_parts(); + + let err = serde_json::to_string(&err).expect("to_string should always work here"); + + return (Message::Text(err), None); + } + }; + // TODO: do any clients send batches over websockets? let (id, response) = match serde_json::from_str::(payload) { Ok(json_request) => { @@ -322,7 +334,9 @@ async fn handle_socket_payload( { Ok((handle, response)) => { // TODO: better key - subscriptions.insert( + let mut x = subscriptions.write().await; + + x.insert( response .result .as_ref() @@ -346,8 +360,10 @@ async fn handle_socket_payload( let subscription_id = json_request.params.unwrap().to_string(); + let mut x = subscriptions.write().await; + // TODO: is this the right response? - let partial_response = match subscriptions.remove(&subscription_id) { + let partial_response = match x.remove(&subscription_id) { None => false, Some(handle) => { handle.abort(); @@ -355,6 +371,8 @@ async fn handle_socket_payload( } }; + drop(x); + let response = JsonRpcForwardedResponse::from_value(json!(partial_response), id.clone()); @@ -409,9 +427,7 @@ async fn handle_socket_payload( } }; - // TODO: what error should this be? - - Message::Text(response_str) + (Message::Text(response_str), semaphore) } async fn read_web3_socket( @@ -421,61 +437,97 @@ async fn read_web3_socket( response_sender: flume::Sender, proxy_mode: ProxyMode, ) { - let mut subscriptions = HashMap::new(); - let subscription_count = AtomicUsize::new(1); + // TODO: need a concurrent hashmap + let subscriptions = Arc::new(RwLock::new(HashMap::new())); + let subscription_count = Arc::new(AtomicUsize::new(1)); - while let Some(Ok(msg)) = ws_rx.next().await { - // TODO: spawn this? - // new message from our client. forward to a backend and then send it through response_tx - let response_msg = match msg { - Message::Text(payload) => { - handle_socket_payload( - app.clone(), - &authorization, - &payload, - &response_sender, - &subscription_count, - &mut subscriptions, - proxy_mode, - ) - .await + let (close_sender, mut close_receiver) = broadcast::channel(1); + + loop { + tokio::select! { + msg = ws_rx.next() => { + if let Some(Ok(msg)) = msg { + // spawn so that we can serve responses from this loop even faster + // TODO: only do these clones if the msg is text/binary? + let close_sender = close_sender.clone(); + let app = app.clone(); + let authorization = authorization.clone(); + let response_sender = response_sender.clone(); + let subscriptions = subscriptions.clone(); + let subscription_count = subscription_count.clone(); + + let f = async move { + let mut _semaphore = None; + + // new message from our client. forward to a backend and then send it through response_tx + let response_msg = match msg { + Message::Text(payload) => { + let (msg, s) = handle_socket_payload( + app.clone(), + &authorization, + &payload, + &response_sender, + &subscription_count, + subscriptions, + proxy_mode, + ) + .await; + + _semaphore = s; + + msg + } + Message::Ping(x) => { + trace!("ping: {:?}", x); + Message::Pong(x) + } + Message::Pong(x) => { + trace!("pong: {:?}", x); + return; + } + Message::Close(_) => { + info!("closing websocket connection"); + // TODO: do something to close subscriptions? + let _ = close_sender.send(true); + return; + } + Message::Binary(mut payload) => { + let payload = from_utf8_mut(&mut payload).unwrap(); + + let (msg, s) = handle_socket_payload( + app.clone(), + &authorization, + payload, + &response_sender, + &subscription_count, + subscriptions, + proxy_mode, + ) + .await; + + _semaphore = s; + + msg + } + }; + + if response_sender.send_async(response_msg).await.is_err() { + let _ = close_sender.send(true); + return; + }; + + _semaphore = None; + }; + + tokio::spawn(f); + } else { + break; + } } - Message::Ping(x) => { - trace!("ping: {:?}", x); - Message::Pong(x) - } - Message::Pong(x) => { - trace!("pong: {:?}", x); - continue; - } - Message::Close(_) => { - info!("closing websocket connection"); + _ = close_receiver.recv() => { break; } - Message::Binary(mut payload) => { - // TODO: poke rate limit for the user/ip - let payload = from_utf8_mut(&mut payload).unwrap(); - - handle_socket_payload( - app.clone(), - &authorization, - payload, - &response_sender, - &subscription_count, - &mut subscriptions, - proxy_mode, - ) - .await - } - }; - - match response_sender.send_async(response_msg).await { - Ok(_) => {} - Err(err) => { - error!("{}", err); - break; - } - }; + } } } diff --git a/web3_proxy/src/rpcs/request.rs b/web3_proxy/src/rpcs/request.rs index 7db16fd5..8cf22bbf 100644 --- a/web3_proxy/src/rpcs/request.rs +++ b/web3_proxy/src/rpcs/request.rs @@ -84,7 +84,7 @@ impl Authorization { method: Method, params: EthCallFirstParams, ) -> anyhow::Result<()> { - let rpc_key_id = match self.checks.rpc_key_id { + let rpc_key_id = match self.checks.rpc_secret_key_id { Some(rpc_key_id) => rpc_key_id.into(), None => { // // trace!(?self, "cannot save revert without rpc_key_id"); @@ -240,14 +240,14 @@ impl OpenRequestHandle { Web3Provider::Ws(provider) => provider.request(method, params).await, }; - // TODO: i think ethers already has trace logging (and does it much more fancy) - trace!( - "response from {} for {} {:?}: {:?}", - self.conn, - method, - params, - response, - ); + // // TODO: i think ethers already has trace logging (and does it much more fancy) + // trace!( + // "response from {} for {} {:?}: {:?}", + // self.conn, + // method, + // params, + // response, + // ); if let Err(err) = &response { // only save reverts for some types of calls From e4b0d4b76d94fbb4bd01dbeb1ff556f965b47762 Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Wed, 18 Jan 2023 16:21:15 -0800 Subject: [PATCH 12/80] todo comment --- web3_proxy/src/app/mod.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/web3_proxy/src/app/mod.rs b/web3_proxy/src/app/mod.rs index 1e3a327a..8c227695 100644 --- a/web3_proxy/src/app/mod.rs +++ b/web3_proxy/src/app/mod.rs @@ -1134,6 +1134,7 @@ impl Web3ProxyApp { } /* // erigon was giving bad estimates. but now it doesn't need it + // TODO: got reports of some gas estimate issue on polygon with their erc20s. maybe we do want it "eth_estimateGas" => { // TODO: eth_estimateGas using anvil? // TODO: modify the block requested? From 0c05b5bdee897f6e3245ecf4251c7edcbbcb62e7 Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Thu, 19 Jan 2023 02:13:00 -0800 Subject: [PATCH 13/80] major refactor to only use backup servers when absolutely necessary --- TODO.md | 2 + web3_proxy/src/app/mod.rs | 4 + web3_proxy/src/bin/web3_proxy_cli/daemon.rs | 15 +- web3_proxy/src/config.rs | 5 + web3_proxy/src/rpcs/blockchain.rs | 882 +++++++++++++------- web3_proxy/src/rpcs/connection.rs | 10 +- web3_proxy/src/rpcs/connections.rs | 47 +- web3_proxy/src/rpcs/synced_connections.rs | 20 +- 8 files changed, 656 insertions(+), 329 deletions(-) diff --git a/TODO.md b/TODO.md index 2f4b8754..afe04f5d 100644 --- a/TODO.md +++ b/TODO.md @@ -305,6 +305,8 @@ These are not yet ordered. There might be duplicates. We might not actually need - [x] status page should show version - [x] combine the proxy and cli into one bin - [x] improve rate limiting on websockets +- [x] retry another server if we get a jsonrpc response error about rate limits +- [x] major refactor to only use backup servers when absolutely necessary - [-] proxy mode for benchmarking all backends - [-] proxy mode for sending to multiple backends - [-] let users choose a % of reverts to log (or maybe x/second). someone like curve logging all reverts will be a BIG database very quickly diff --git a/web3_proxy/src/app/mod.rs b/web3_proxy/src/app/mod.rs index 8c227695..055694f3 100644 --- a/web3_proxy/src/app/mod.rs +++ b/web3_proxy/src/app/mod.rs @@ -727,6 +727,10 @@ impl Web3ProxyApp { Ok((app, cancellable_handles, important_background_handles).into()) } + pub fn head_block_receiver(&self) -> watch::Receiver { + self.head_block_receiver.clone() + } + pub async fn prometheus_metrics(&self) -> String { let globals = HashMap::new(); // TODO: what globals? should this be the hostname or what? diff --git a/web3_proxy/src/bin/web3_proxy_cli/daemon.rs b/web3_proxy/src/bin/web3_proxy_cli/daemon.rs index 69d0e2c7..000b8b51 100644 --- a/web3_proxy/src/bin/web3_proxy_cli/daemon.rs +++ b/web3_proxy/src/bin/web3_proxy_cli/daemon.rs @@ -51,22 +51,25 @@ async fn run( let app_frontend_port = frontend_port; let app_prometheus_port = prometheus_port; + let mut shutdown_receiver = shutdown_sender.subscribe(); // start the main app let mut spawned_app = Web3ProxyApp::spawn(top_config, num_workers, shutdown_sender.subscribe()).await?; - let frontend_handle = tokio::spawn(frontend::serve(app_frontend_port, spawned_app.app.clone())); - - // TODO: should we put this in a dedicated thread? + // start the prometheus metrics port let prometheus_handle = tokio::spawn(metrics_frontend::serve( spawned_app.app.clone(), app_prometheus_port, )); - let mut shutdown_receiver = shutdown_sender.subscribe(); + // wait until the app has seen its first consensus head block + let _ = spawned_app.app.head_block_receiver().changed().await; - // if everything is working, these should both run forever + // start the frontend port + let frontend_handle = tokio::spawn(frontend::serve(app_frontend_port, spawned_app.app.clone())); + + // if everything is working, these should all run forever tokio::select! { x = flatten_handles(spawned_app.app_handles) => { match x { @@ -204,6 +207,7 @@ mod tests { disabled: false, display_name: None, url: anvil.endpoint(), + backup: Some(false), block_data_limit: None, soft_limit: 100, hard_limit: None, @@ -218,6 +222,7 @@ mod tests { disabled: false, display_name: None, url: anvil.ws_endpoint(), + backup: Some(false), block_data_limit: None, soft_limit: 100, hard_limit: None, diff --git a/web3_proxy/src/config.rs b/web3_proxy/src/config.rs index 9bb125e3..20aabee3 100644 --- a/web3_proxy/src/config.rs +++ b/web3_proxy/src/config.rs @@ -198,6 +198,8 @@ pub struct Web3ConnectionConfig { pub soft_limit: u32, /// the requests per second at which the server throws errors (rate limit or otherwise) pub hard_limit: Option, + /// only use this rpc if everything else is lagging too far. this allows us to ignore fast but very low limit rpcs + pub backup: Option, /// All else equal, a server with a lower tier receives all requests #[serde(default = "default_tier")] pub tier: u64, @@ -256,6 +258,8 @@ impl Web3ConnectionConfig { None }; + let backup = self.backup.unwrap_or(false); + Web3Connection::spawn( name, allowed_lag, @@ -267,6 +271,7 @@ impl Web3ConnectionConfig { http_interval_sender, hard_limit, self.soft_limit, + backup, self.block_data_limit, block_map, block_sender, diff --git a/web3_proxy/src/rpcs/blockchain.rs b/web3_proxy/src/rpcs/blockchain.rs index e03cc6fd..e3505c97 100644 --- a/web3_proxy/src/rpcs/blockchain.rs +++ b/web3_proxy/src/rpcs/blockchain.rs @@ -4,13 +4,13 @@ use super::connections::Web3Connections; use super::transactions::TxStatus; use crate::frontend::authorization::Authorization; use crate::{ - config::BlockAndRpc, jsonrpc::JsonRpcRequest, rpcs::synced_connections::SyncedConnections, + config::BlockAndRpc, jsonrpc::JsonRpcRequest, rpcs::synced_connections::ConsensusConnections, }; use anyhow::Context; use derive_more::From; use ethers::prelude::{Block, TxHash, H256, U64}; use hashbrown::{HashMap, HashSet}; -use log::{debug, warn, Level}; +use log::{debug, error, warn, Level}; use moka::future::Cache; use serde::Serialize; use serde_json::json; @@ -24,7 +24,7 @@ pub type ArcBlock = Arc>; pub type BlockHashesCache = Cache; -/// A block's hash and number. +/// A block and its age. #[derive(Clone, Debug, Default, From, Serialize)] pub struct SavedBlock { pub block: ArcBlock, @@ -99,14 +99,18 @@ impl Display for SavedBlock { impl Web3Connections { /// add a block to our mappings and track the heaviest chain - pub async fn save_block(&self, block: &ArcBlock, heaviest_chain: bool) -> anyhow::Result<()> { + pub async fn save_block( + &self, + block: ArcBlock, + heaviest_chain: bool, + ) -> anyhow::Result { // TODO: i think we can rearrange this function to make it faster on the hot path let block_hash = block.hash.as_ref().context("no block hash")?; // skip Block::default() if block_hash.is_zero() { debug!("Skipping block without hash!"); - return Ok(()); + return Ok(block); } let block_num = block.number.as_ref().context("no block num")?; @@ -121,15 +125,17 @@ impl Web3Connections { // this block is very likely already in block_hashes // TODO: use their get_with - self.block_hashes + let block = self + .block_hashes .get_with(*block_hash, async move { block.clone() }) .await; - Ok(()) + Ok(block) } /// Get a block from caches with fallback. /// Will query a specific node or the best available. + /// TODO: return anyhow::Result>? pub async fn block( &self, authorization: &Arc, @@ -138,6 +144,7 @@ impl Web3Connections { ) -> anyhow::Result { // first, try to get the hash from our cache // the cache is set last, so if its here, its everywhere + // TODO: use try_get_with if let Some(block) = self.block_hashes.get(hash) { return Ok(block); } @@ -178,7 +185,7 @@ impl Web3Connections { // the block was fetched using eth_getBlockByHash, so it should have all fields // TODO: fill in heaviest_chain! if the block is old enough, is this definitely true? - self.save_block(&block, false).await?; + let block = self.save_block(block, false).await?; Ok(block) } @@ -249,7 +256,7 @@ impl Web3Connections { let block: ArcBlock = serde_json::from_str(raw_block.get())?; // the block was fetched using eth_getBlockByNumber, so it should have all fields and be on the heaviest chain - self.save_block(&block, true).await?; + let block = self.save_block(block, true).await?; Ok((block, archive_needed)) } @@ -265,7 +272,7 @@ impl Web3Connections { ) -> anyhow::Result<()> { // TODO: indexmap or hashmap? what hasher? with_capacity? // TODO: this will grow unbounded. prune old heads on this at the same time we prune the graph? - let mut connection_heads = HashMap::new(); + let mut connection_heads = ConsensusFinder::default(); while let Ok((new_block, rpc)) = block_receiver.recv_async().await { let new_block = new_block.map(Into::into); @@ -287,7 +294,7 @@ impl Web3Connections { } } - // TODO: if there was an error, we should return it + // TODO: if there was an error, should we return it instead of an Ok? warn!("block_receiver exited!"); Ok(()) @@ -299,327 +306,590 @@ impl Web3Connections { pub(crate) async fn process_block_from_rpc( &self, authorization: &Arc, - connection_heads: &mut HashMap, + consensus_finder: &mut ConsensusFinder, rpc_head_block: Option, rpc: Arc, head_block_sender: &watch::Sender, pending_tx_sender: &Option>, ) -> anyhow::Result<()> { - // add the rpc's block to connection_heads, or remove the rpc from connection_heads - let rpc_head_block = match rpc_head_block { - Some(rpc_head_block) => { - // we don't know if its on the heaviest chain yet - self.save_block(&rpc_head_block.block, false).await?; - - // TODO: don't default to 60. different chains are differen - if rpc_head_block.syncing(60) { - if connection_heads.remove(&rpc.name).is_some() { - warn!("{} is behind by {} seconds", &rpc.name, rpc_head_block.age); - } else { - // we didn't remove anything and this block is old. exit early - return Ok(()); - }; - - None - } else { - let rpc_head_hash = rpc_head_block.hash(); - - if let Some(prev_hash) = - connection_heads.insert(rpc.name.to_owned(), rpc_head_hash) - { - if prev_hash == rpc_head_hash { - // this block was already sent by this node. return early - return Ok(()); - } - } - - // TODO: should we just keep the ArcBlock here? - Some(rpc_head_block) - } - } - None => { - // // trace!(%rpc, "Block without number or hash!"); - - if connection_heads.remove(&rpc.name).is_none() { - // this connection was already removed. - // return early. no need to process synced connections - return Ok(()); - } - - None - } - }; - - // iterate the known heads to find the highest_work_block - let mut checked_heads = HashSet::new(); - let mut highest_num_block: Option = None; - for (conn_name, connection_head_hash) in connection_heads.iter() { - if checked_heads.contains(connection_head_hash) { - // we already checked this head from another rpc - continue; - } - // don't check the same hash multiple times - checked_heads.insert(connection_head_hash); - - let conn_head_block = if let Some(x) = self.block_hashes.get(connection_head_hash) { - x - } else { - // TODO: why does this happen?!?! seems to only happen with uncled blocks - // TODO: maybe we should do get_with? - // TODO: maybe we should just continue. this only seems to happen when an older block is received - warn!("Missing connection_head_block in block_hashes. Fetching now. hash={}. other={}. rpc={}", connection_head_hash, conn_name, rpc); - - // this option should always be populated - let conn_rpc = self.conns.get(conn_name); - - match self - .block(authorization, connection_head_hash, conn_rpc) - .await - { - Ok(block) => block, - Err(err) => { - warn!("Processing {}. Failed fetching connection_head_block for block_hashes. {} head hash={}. err={:?}", rpc, conn_name, connection_head_hash, err); - continue; - } - } - }; - - match &conn_head_block.number { - None => { - panic!("block is missing number. this is a bug"); - } - Some(conn_head_num) => { - // if this is the first block we've tried - // or if this rpc's newest block has a higher number - // we used to check total difficulty, but that isn't a thing anymore - if highest_num_block.is_none() - || conn_head_num - > highest_num_block - .as_ref() - .expect("there should always be a block here") - .number - .as_ref() - .expect("there should always be number here") - { - highest_num_block = Some(conn_head_block); - } - } - } + // TODO: how should we handle an error here? + if !consensus_finder + .update_rpc(rpc_head_block.clone(), rpc.clone(), self) + .await? + { + // nothing changed. no need + return Ok(()); } - if let Some(mut maybe_head_block) = highest_num_block { - // track rpcs on this heaviest chain so we can build a new SyncedConnections - let mut highest_rpcs = HashSet::<&String>::new(); - // a running total of the soft limits covered by the rpcs that agree on the head block - let mut highest_rpcs_sum_soft_limit: u32 = 0; - // TODO: also track highest_rpcs_sum_hard_limit? llama doesn't need this, so it can wait + let new_synced_connections = consensus_finder + .best_consensus_connections(authorization, self) + .await; - // check the highest work block for a set of rpcs that can serve our request load - // if it doesn't have enough rpcs for our request load, check the parent block - // TODO: loop for how many parent blocks? we don't want to serve blocks that are too far behind. probably different per chain - // TODO: this loop is pretty long. any way to clean up this code? - for _ in 0..3 { - let maybe_head_hash = maybe_head_block - .hash - .as_ref() - .expect("blocks here always need hashes"); + let includes_backups = new_synced_connections.includes_backups; + let consensus_head_block = new_synced_connections.head_block.clone(); + let num_consensus_rpcs = new_synced_connections.num_conns(); + let num_checked_rpcs = new_synced_connections.num_checked_conns; + let num_active_rpcs = consensus_finder.all.rpc_name_to_hash.len(); + let total_rpcs = self.conns.len(); - // find all rpcs with maybe_head_block as their current head - for (conn_name, conn_head_hash) in connection_heads.iter() { - if conn_head_hash != maybe_head_hash { - // connection is not on the desired block - continue; - } - if highest_rpcs.contains(conn_name) { - // connection is on a child block - continue; + let old_synced_connections = self + .synced_connections + .swap(Arc::new(new_synced_connections)); + + if let Some(consensus_saved_block) = consensus_head_block { + match &old_synced_connections.head_block { + None => { + debug!( + "first {}/{}/{}/{} block={}, rpc={}", + num_consensus_rpcs, + num_checked_rpcs, + num_active_rpcs, + total_rpcs, + consensus_saved_block, + rpc + ); + + if includes_backups { + // TODO: what else should be in this error? + warn!("Backup RPCs are in use!"); } - if let Some(rpc) = self.conns.get(conn_name) { - highest_rpcs.insert(conn_name); - highest_rpcs_sum_soft_limit += rpc.soft_limit; - } else { - warn!("connection missing") - } + let consensus_head_block = + self.save_block(consensus_saved_block.block, true).await?; + + head_block_sender + .send(consensus_head_block) + .context("head_block_sender sending consensus_head_block")?; } + Some(old_head_block) => { + // TODO: do this log item better + let rpc_head_str = rpc_head_block + .map(|x| x.to_string()) + .unwrap_or_else(|| "None".to_string()); - if highest_rpcs_sum_soft_limit < self.min_sum_soft_limit - || highest_rpcs.len() < self.min_head_rpcs - { - // not enough rpcs yet. check the parent - if let Some(parent_block) = self.block_hashes.get(&maybe_head_block.parent_hash) - { - // // trace!( - // child=%maybe_head_hash, parent=%parent_block.hash.unwrap(), "avoiding thundering herd", - // ); - - maybe_head_block = parent_block; - continue; - } else { - // TODO: this message - warn!( - "soft limit {}/{} from {}/{} rpcs: {}%", - highest_rpcs_sum_soft_limit, - self.min_sum_soft_limit, - highest_rpcs.len(), - self.min_head_rpcs, - highest_rpcs_sum_soft_limit * 100 / self.min_sum_soft_limit - ); - break; - } - } - } - - // TODO: if consensus_head_rpcs.is_empty, try another method of finding the head block - - let num_connection_heads = connection_heads.len(); - let total_conns = self.conns.len(); - - // we've done all the searching for the heaviest block that we can - if highest_rpcs.is_empty() { - // if we get here, something is wrong. clear synced connections - let empty_synced_connections = SyncedConnections::default(); - - let _ = self - .synced_connections - .swap(Arc::new(empty_synced_connections)); - - // TODO: log different things depending on old_synced_connections - warn!( - "Processing {}. no consensus head! {}/{}/{}", - rpc, 0, num_connection_heads, total_conns - ); - } else { - // // trace!(?highest_rpcs); - - // TODO: if maybe_head_block.time() is old, ignore it - - // success! this block has enough soft limit and nodes on it (or on later blocks) - let conns: Vec> = highest_rpcs - .into_iter() - .filter_map(|conn_name| self.conns.get(conn_name).cloned()) - .collect(); - - // TODO: DEBUG only check - let _ = maybe_head_block - .hash - .expect("head blocks always have hashes"); - let _ = maybe_head_block - .number - .expect("head blocks always have numbers"); - - let num_consensus_rpcs = conns.len(); - - let consensus_head_block: SavedBlock = maybe_head_block.into(); - - let new_synced_connections = SyncedConnections { - head_block: Some(consensus_head_block.clone()), - conns, - }; - - let old_synced_connections = self - .synced_connections - .swap(Arc::new(new_synced_connections)); - - // TODO: if the rpc_head_block != consensus_head_block, log something? - match &old_synced_connections.head_block { - None => { - debug!( - "first {}/{}/{} block={}, rpc={}", - num_consensus_rpcs, - num_connection_heads, - total_conns, - consensus_head_block, - rpc - ); - - self.save_block(&consensus_head_block.block, true).await?; - - head_block_sender - .send(consensus_head_block.block) - .context("head_block_sender sending consensus_head_block")?; - } - Some(old_head_block) => { - // TODO: do this log item better - let rpc_head_str = rpc_head_block - .map(|x| x.to_string()) - .unwrap_or_else(|| "None".to_string()); - - match consensus_head_block.number().cmp(&old_head_block.number()) { - Ordering::Equal => { - // TODO: if rpc_block_id != consensus_head_block, do a different log? - - // multiple blocks with the same fork! - if consensus_head_block.hash() == old_head_block.hash() { - // no change in hash. no need to use head_block_sender - debug!( - "con {}/{}/{} con_head={} rpc_head={} rpc={}", - num_consensus_rpcs, - num_connection_heads, - total_conns, - consensus_head_block, - rpc_head_str, - rpc, - ) - } else { - // hash changed - debug!( - "unc {}/{}/{} con_head={} old={} rpc_head={} rpc={}", - num_consensus_rpcs, - num_connection_heads, - total_conns, - consensus_head_block, - old_head_block, - rpc_head_str, - rpc, - ); - - self.save_block(&consensus_head_block.block, true) - .await - .context("save consensus_head_block as heaviest chain")?; - - head_block_sender.send(consensus_head_block.block).context( - "head_block_sender sending consensus_head_block", - )?; - } - } - Ordering::Less => { - // this is unlikely but possible - // TODO: better log - warn!("chain rolled back {}/{}/{} con_head={} old_head={} rpc_head={} rpc={}", num_consensus_rpcs, num_connection_heads, total_conns, consensus_head_block, old_head_block, rpc_head_str, rpc); - - // TODO: tell save_block to remove any higher block numbers from the cache. not needed because we have other checks on requested blocks being > head, but still seems slike a good idea - self.save_block(&consensus_head_block.block, true) - .await - .context( - "save_block sending consensus_head_block as heaviest chain", - )?; - - head_block_sender - .send(consensus_head_block.block) - .context("head_block_sender sending consensus_head_block")?; - } - Ordering::Greater => { + match consensus_saved_block.number().cmp(&old_head_block.number()) { + Ordering::Equal => { + // multiple blocks with the same fork! + if consensus_saved_block.hash() == old_head_block.hash() { + // no change in hash. no need to use head_block_sender debug!( - "new {}/{}/{} con_head={} rpc_head={} rpc={}", + "con {}/{}/{}/{} con={} rpc={}@{}", num_consensus_rpcs, - num_connection_heads, - total_conns, - consensus_head_block, + num_checked_rpcs, + num_active_rpcs, + total_rpcs, + consensus_saved_block, + rpc, + rpc_head_str, + ) + } else { + // hash changed + + if includes_backups { + // TODO: what else should be in this error? + warn!("Backup RPCs are in use!"); + } + + debug!( + "unc {}/{}/{}/{} con_head={} old={} rpc={}@{}", + num_consensus_rpcs, + num_checked_rpcs, + num_active_rpcs, + total_rpcs, + consensus_saved_block, + old_head_block, + rpc, rpc_head_str, - rpc ); - self.save_block(&consensus_head_block.block, true).await?; + let consensus_head_block = self + .save_block(consensus_saved_block.block, true) + .await + .context("save consensus_head_block as heaviest chain")?; - head_block_sender.send(consensus_head_block.block)?; + head_block_sender + .send(consensus_head_block) + .context("head_block_sender sending consensus_head_block")?; } } + Ordering::Less => { + // this is unlikely but possible + // TODO: better log + warn!( + "chain rolled back {}/{}/{}/{} con={} old={} rpc={}@{}", + num_consensus_rpcs, + num_checked_rpcs, + num_active_rpcs, + total_rpcs, + consensus_saved_block, + old_head_block, + rpc, + rpc_head_str, + ); + + if includes_backups { + // TODO: what else should be in this error? + warn!("Backup RPCs are in use!"); + } + + // TODO: tell save_block to remove any higher block numbers from the cache. not needed because we have other checks on requested blocks being > head, but still seems like a good idea + let consensus_head_block = self + .save_block(consensus_saved_block.block, true) + .await + .context( + "save_block sending consensus_head_block as heaviest chain", + )?; + + head_block_sender + .send(consensus_head_block) + .context("head_block_sender sending consensus_head_block")?; + } + Ordering::Greater => { + debug!( + "new {}/{}/{}/{} con={} rpc={}@{}", + num_consensus_rpcs, + num_checked_rpcs, + num_active_rpcs, + total_rpcs, + consensus_saved_block, + rpc, + rpc_head_str, + ); + + if includes_backups { + // TODO: what else should be in this error? + warn!("Backup RPCs are in use!"); + } + + let consensus_head_block = + self.save_block(consensus_saved_block.block, true).await?; + + head_block_sender.send(consensus_head_block)?; + } } } } + } else { + // TODO: do this log item better + let rpc_head_str = rpc_head_block + .map(|x| x.to_string()) + .unwrap_or_else(|| "None".to_string()); + + if num_checked_rpcs >= self.min_head_rpcs { + error!( + "non {}/{}/{}/{} rpc={}@{}", + num_consensus_rpcs, + num_checked_rpcs, + num_active_rpcs, + total_rpcs, + rpc, + rpc_head_str, + ); + } else { + debug!( + "non {}/{}/{}/{} rpc={}@{}", + num_consensus_rpcs, + num_checked_rpcs, + num_active_rpcs, + total_rpcs, + rpc, + rpc_head_str, + ); + } } Ok(()) } } + +struct ConnectionsGroup { + includes_backups: bool, + rpc_name_to_hash: HashMap, +} + +impl ConnectionsGroup { + fn new(with_backups: bool) -> Self { + Self { + includes_backups: with_backups, + rpc_name_to_hash: Default::default(), + } + } + + fn without_backups() -> Self { + Self::new(false) + } + + fn with_backups() -> Self { + Self::new(true) + } + + fn remove(&mut self, rpc: &Web3Connection) -> Option { + self.rpc_name_to_hash.remove(rpc.name.as_str()) + } + + fn insert(&mut self, rpc: &Web3Connection, block_hash: H256) -> Option { + self.rpc_name_to_hash.insert(rpc.name.clone(), block_hash) + } + + // TODO: i don't love having this here. move to web3_connections? + async fn get_block_from_rpc( + &self, + rpc_name: &str, + hash: &H256, + authorization: &Arc, + web3_connections: &Web3Connections, + ) -> anyhow::Result { + // // TODO: why does this happen?!?! seems to only happen with uncled blocks + // // TODO: maybe we should do try_get_with? + // // TODO: maybe we should just continue. this only seems to happen when an older block is received + // warn!( + // "Missing connection_head_block in block_hashes. Fetching now. hash={}. other={}", + // connection_head_hash, conn_name + // ); + + // this option should almost always be populated. if the connection reconnects at a bad time it might not be available though + let rpc = web3_connections.conns.get(rpc_name); + + web3_connections.block(authorization, hash, rpc).await + } + + // TODO: do this during insert/remove? + pub(self) async fn highest_block( + &self, + authorization: &Arc, + web3_connections: &Web3Connections, + ) -> Option { + let mut checked_heads = HashSet::with_capacity(self.rpc_name_to_hash.len()); + let mut highest_block = None::; + + for (rpc_name, rpc_head_hash) in self.rpc_name_to_hash.iter() { + // don't waste time checking the same hash multiple times + if checked_heads.contains(rpc_head_hash) { + continue; + } + + let rpc_block = match self + .get_block_from_rpc(rpc_name, rpc_head_hash, authorization, web3_connections) + .await + { + Ok(x) => x, + Err(err) => { + warn!( + "failed getting block {} from {} while finding highest block number: {:?}", + rpc_head_hash, rpc_name, err, + ); + continue; + } + }; + + checked_heads.insert(rpc_head_hash); + + // if this is the first block we've tried + // or if this rpc's newest block has a higher number + // we used to check total difficulty, but that isn't a thing anymore on ETH + // TODO: we still need total difficulty on some other PoW chains. whats annoying is it isn't considered part of the "block header" just the block. so websockets don't return it + let highest_num = highest_block + .as_ref() + .map(|x| x.number.expect("blocks here should always have a number")); + let rpc_num = rpc_block.as_ref().number; + + if rpc_num > highest_num { + highest_block = Some(rpc_block); + } + } + + highest_block + } + + pub(self) async fn consensus_head_connections( + &self, + authorization: &Arc, + web3_connections: &Web3Connections, + ) -> anyhow::Result { + let mut maybe_head_block = match self.highest_block(authorization, web3_connections).await { + None => return Err(anyhow::anyhow!("No blocks known")), + Some(x) => x, + }; + + let num_known = self.rpc_name_to_hash.len(); + + // track rpcs on this heaviest chain so we can build a new ConsensusConnections + let mut highest_rpcs = HashSet::<&str>::new(); + // a running total of the soft limits covered by the rpcs that agree on the head block + let mut highest_rpcs_sum_soft_limit: u32 = 0; + // TODO: also track highest_rpcs_sum_hard_limit? llama doesn't need this, so it can wait + + // check the highest work block for a set of rpcs that can serve our request load + // if it doesn't have enough rpcs for our request load, check the parent block + // TODO: loop for how many parent blocks? we don't want to serve blocks that are too far behind. probably different per chain + // TODO: this loop is pretty long. any way to clean up this code? + for _ in 0..6 { + let maybe_head_hash = maybe_head_block + .hash + .as_ref() + .expect("blocks here always need hashes"); + + // find all rpcs with maybe_head_block as their current head + for (rpc_name, rpc_head_hash) in self.rpc_name_to_hash.iter() { + if rpc_head_hash != maybe_head_hash { + // connection is not on the desired block + continue; + } + if highest_rpcs.contains(rpc_name.as_str()) { + // connection is on a child block + continue; + } + + if let Some(rpc) = web3_connections.conns.get(rpc_name.as_str()) { + highest_rpcs.insert(rpc_name); + highest_rpcs_sum_soft_limit += rpc.soft_limit; + } else { + // i don't think this is an error. i think its just if a reconnect is currently happening + warn!("connection missing: {}", rpc_name); + } + } + + if highest_rpcs_sum_soft_limit >= web3_connections.min_sum_soft_limit + && highest_rpcs.len() >= web3_connections.min_head_rpcs + { + // we have enough servers with enough requests + break; + } + + // not enough rpcs yet. check the parent block + if let Some(parent_block) = web3_connections + .block_hashes + .get(&maybe_head_block.parent_hash) + { + // trace!( + // child=%maybe_head_hash, parent=%parent_block.hash.unwrap(), "avoiding thundering herd", + // ); + + maybe_head_block = parent_block; + continue; + } else { + if num_known < web3_connections.min_head_rpcs { + return Err(anyhow::anyhow!( + "not enough rpcs connected: {}/{}/{}", + highest_rpcs.len(), + num_known, + web3_connections.min_head_rpcs, + )); + } else { + let soft_limit_percent = (highest_rpcs_sum_soft_limit as f32 + / web3_connections.min_sum_soft_limit as f32) + * 100.0; + + return Err(anyhow::anyhow!( + "ran out of parents to check. rpcs {}/{}/{}. soft limit: {:.2}% ({}/{})", + highest_rpcs.len(), + num_known, + web3_connections.min_head_rpcs, + highest_rpcs_sum_soft_limit, + web3_connections.min_sum_soft_limit, + soft_limit_percent, + )); + } + } + } + + // TODO: if consensus_head_rpcs.is_empty, try another method of finding the head block. will need to change the return Err above into breaks. + + // we've done all the searching for the heaviest block that we can + if highest_rpcs.len() < web3_connections.min_head_rpcs + || highest_rpcs_sum_soft_limit < web3_connections.min_sum_soft_limit + { + // if we get here, not enough servers are synced. return an error + let soft_limit_percent = (highest_rpcs_sum_soft_limit as f32 + / web3_connections.min_sum_soft_limit as f32) + * 100.0; + + return Err(anyhow::anyhow!( + "Not enough resources. rpcs {}/{}/{}. soft limit: {:.2}% ({}/{})", + highest_rpcs.len(), + num_known, + web3_connections.min_head_rpcs, + highest_rpcs_sum_soft_limit, + web3_connections.min_sum_soft_limit, + soft_limit_percent, + )); + } + + // success! this block has enough soft limit and nodes on it (or on later blocks) + let conns: Vec> = highest_rpcs + .into_iter() + .filter_map(|conn_name| web3_connections.conns.get(conn_name).cloned()) + .collect(); + + // TODO: DEBUG only check + let _ = maybe_head_block + .hash + .expect("head blocks always have hashes"); + let _ = maybe_head_block + .number + .expect("head blocks always have numbers"); + + let consensus_head_block: SavedBlock = maybe_head_block.into(); + + Ok(ConsensusConnections { + head_block: Some(consensus_head_block), + conns, + num_checked_conns: self.rpc_name_to_hash.len(), + includes_backups: self.includes_backups, + }) + } +} + +/// A ConsensusConnections builder that tracks all connection heads across multiple groups of servers +pub struct ConsensusFinder { + /// only main servers + main: ConnectionsGroup, + /// main and backup servers + all: ConnectionsGroup, +} + +impl Default for ConsensusFinder { + fn default() -> Self { + Self { + main: ConnectionsGroup::without_backups(), + all: ConnectionsGroup::with_backups(), + } + } +} + +impl ConsensusFinder { + fn remove(&mut self, rpc: &Web3Connection) -> Option { + // TODO: should we have multiple backup tiers? (remote datacenters vs third party) + if !rpc.backup { + self.main.remove(rpc); + } + self.all.remove(rpc) + } + + fn insert(&mut self, rpc: &Web3Connection, new_hash: H256) -> Option { + // TODO: should we have multiple backup tiers? (remote datacenters vs third party) + if !rpc.backup { + self.main.insert(rpc, new_hash); + } + self.all.insert(rpc, new_hash) + } + + /// Update our tracking of the rpc and return true if something changed + async fn update_rpc( + &mut self, + rpc_head_block: Option, + rpc: Arc, + // we need this so we can save the block to caches. i don't like it though. maybe we should use a lazy_static Cache wrapper that has a "save_block" method?. i generally dislike globals but i also dislike all the types having to pass eachother around + web3_connections: &Web3Connections, + ) -> anyhow::Result { + // add the rpc's block to connection_heads, or remove the rpc from connection_heads + let changed = match rpc_head_block { + Some(mut rpc_head_block) => { + // we don't know if its on the heaviest chain yet + rpc_head_block.block = web3_connections + .save_block(rpc_head_block.block, false) + .await?; + + // we used to remove here if the block was too far behind. but it just made things more complicated + + let rpc_head_hash = rpc_head_block.hash(); + + if let Some(prev_hash) = self.insert(&rpc, rpc_head_hash) { + if prev_hash == rpc_head_hash { + // this block was already sent by this rpc. return early + false + } else { + // new block for this rpc + true + } + } else { + // first block for this rpc + true + } + } + None => { + if self.remove(&rpc).is_none() { + // this rpc was already removed + false + } else { + // rpc head changed from being synced to not + true + } + } + }; + + Ok(changed) + } + + // TODO: this could definitely be cleaner. i don't like the error handling/unwrapping + async fn best_consensus_connections( + &mut self, + authorization: &Arc, + web3_connections: &Web3Connections, + ) -> ConsensusConnections { + let highest_block_num = match self + .all + .highest_block(authorization, web3_connections) + .await + { + None => { + return ConsensusConnections::default(); + } + Some(x) => x.number.expect("blocks here should always have a number"), + }; + + let min_block_num = highest_block_num.saturating_sub(U64::from(5)); + + // TODO: pass `min_block_num` to consensus_head_connections? + let consensus_head_for_main = self + .main + .consensus_head_connections(authorization, web3_connections) + .await + .map_err(|err| err.context("cannot use main group")); + + let consensus_num_for_main = consensus_head_for_main + .as_ref() + .ok() + .map(|x| x.head_block.as_ref().unwrap().number()); + + if let Some(consensus_num_for_main) = consensus_num_for_main { + if consensus_num_for_main >= min_block_num { + return consensus_head_for_main.unwrap(); + } + } + + // TODO: pass `min_block_num` to consensus_head_connections? + let consensus_connections_for_all = match self + .all + .consensus_head_connections(authorization, web3_connections) + .await + { + Err(err) => { + warn!("Unable to find any consensus head: {}", err); + return ConsensusConnections::default(); + } + Ok(x) => x, + }; + + let consensus_num_for_all = consensus_connections_for_all + .head_block + .as_ref() + .map(|x| x.number()); + + if consensus_num_for_all > consensus_num_for_main { + if consensus_num_for_all < Some(min_block_num) { + // TODO: this should have an alarm in sentry + error!("CONSENSUS HEAD w/ BACKUP NODES IS VERY OLD!"); + } + consensus_connections_for_all + } else { + if let Ok(x) = consensus_head_for_main { + error!("CONSENSUS HEAD IS VERY OLD! Backup RPCs did not improve this situation"); + x + } else { + error!("NO CONSENSUS HEAD!"); + ConsensusConnections::default() + } + } + } +} diff --git a/web3_proxy/src/rpcs/connection.rs b/web3_proxy/src/rpcs/connection.rs index a4e83a76..7b650316 100644 --- a/web3_proxy/src/rpcs/connection.rs +++ b/web3_proxy/src/rpcs/connection.rs @@ -84,6 +84,8 @@ pub struct Web3Connection { pub(super) soft_limit: u32, /// use web3 queries to find the block data limit for archive/pruned nodes pub(super) automatic_block_limit: bool, + /// only use this rpc if everything else is lagging too far. this allows us to ignore fast but very low limit rpcs + pub(super) backup: bool, /// TODO: have an enum for this so that "no limit" prints pretty? pub(super) block_data_limit: AtomicU64, /// Lower tiers are higher priority when sending requests @@ -111,6 +113,7 @@ impl Web3Connection { hard_limit: Option<(u64, RedisPool)>, // TODO: think more about this type soft_limit: u32, + backup: bool, block_data_limit: Option, block_map: BlockHashesCache, block_sender: Option>, @@ -149,6 +152,7 @@ impl Web3Connection { hard_limit, soft_limit, automatic_block_limit, + backup, block_data_limit, head_block: RwLock::new(Default::default()), tier, @@ -304,6 +308,7 @@ impl Web3Connection { None => return false, Some(x) => { // TODO: this 60 second limit is causing our polygons to fall behind. change this to number of blocks? + // TODO: sometimes blocks might actually just take longer than 60 seconds if x.syncing(60) { // skip syncing nodes. even though they might be able to serve a query, // latency will be poor and it will get in the way of them syncing further @@ -648,7 +653,7 @@ impl Web3Connection { // if this block is too old, return an error so we reconnect let current_lag = x.lag(); if current_lag > allowed_lag { - let level = if warned == 0 { + let level = if warned == 0 && !conn.backup { log::Level::Warn } else if warned % 100 == 0 { log::Level::Debug @@ -1225,6 +1230,7 @@ mod tests { hard_limit: None, soft_limit: 1_000, automatic_block_limit: false, + backup: false, block_data_limit: block_data_limit.into(), tier: 0, head_block: RwLock::new(Some(head_block.clone())), @@ -1273,6 +1279,7 @@ mod tests { hard_limit: None, soft_limit: 1_000, automatic_block_limit: false, + backup: false, block_data_limit: block_data_limit.into(), tier: 0, head_block: RwLock::new(Some(head_block.clone())), @@ -1325,6 +1332,7 @@ mod tests { hard_limit: None, soft_limit: 1_000, automatic_block_limit: false, + backup: false, block_data_limit: block_data_limit.into(), tier: 0, head_block: RwLock::new(Some(head_block.clone())), diff --git a/web3_proxy/src/rpcs/connections.rs b/web3_proxy/src/rpcs/connections.rs index 82dcbbe7..93493716 100644 --- a/web3_proxy/src/rpcs/connections.rs +++ b/web3_proxy/src/rpcs/connections.rs @@ -4,7 +4,7 @@ use super::connection::Web3Connection; use super::request::{ OpenRequestHandle, OpenRequestHandleMetrics, OpenRequestResult, RequestRevertHandler, }; -use super::synced_connections::SyncedConnections; +use super::synced_connections::ConsensusConnections; use crate::app::{flatten_handle, AnyhowJoinHandle}; use crate::config::{BlockAndRpc, TxHashAndRpc, Web3ConnectionConfig}; use crate::frontend::authorization::{Authorization, RequestMetadata}; @@ -40,7 +40,7 @@ use tokio::time::{interval, sleep, sleep_until, Duration, Instant, MissedTickBeh pub struct Web3Connections { pub(crate) conns: HashMap>, /// any requests will be forwarded to one (or more) of these connections - pub(super) synced_connections: ArcSwap, + pub(super) synced_connections: ArcSwap, pub(super) pending_transactions: Cache, /// TODO: this map is going to grow forever unless we do some sort of pruning. maybe store pruned in redis? @@ -196,7 +196,7 @@ impl Web3Connections { } } - let synced_connections = SyncedConnections::default(); + let synced_connections = ConsensusConnections::default(); // TODO: max_capacity and time_to_idle from config // all block hashes are the same size, so no need for weigher @@ -329,6 +329,7 @@ impl Web3Connections { } /// Send the same request to all the handles. Returning the most common success or most common error. + /// TODO: option to return the fastest response and handles for all the others instead? pub async fn try_send_parallel_requests( &self, active_request_handles: Vec, @@ -501,7 +502,7 @@ impl Web3Connections { .collect(); trace!("minimum available requests: {}", minimum); - trace!("maximum available requests: {}", minimum); + trace!("maximum available requests: {}", maximum); if maximum < 0.0 { // TODO: if maximum < 0 and there are other tiers on the same block, we should include them now @@ -725,10 +726,20 @@ impl Web3Connections { } // some errors should be retried on other nodes + let error_msg = error.message.as_str(); + + // different providers do different codes. check all of them + // TODO: there's probably more strings to add here + let rate_limit_substrings = ["limit", "exceeded"]; + for rate_limit_substr in rate_limit_substrings { + if error_msg.contains(rate_limit_substr) { + warn!("rate limited by {:?}", skip_rpcs.last()); + continue; + } + } + match error.code { -32000 => { - let error_msg = error.message.as_str(); - // TODO: regex? let retry_prefixes = [ "header not found", @@ -866,7 +877,7 @@ impl Web3Connections { // TODO: return a 502? if it does? // return Err(anyhow::anyhow!("no available rpcs!")); // TODO: sleep how long? - // TODO: subscribe to something in SyncedConnections instead + // TODO: subscribe to something in ConsensusConnections instead sleep(Duration::from_millis(200)).await; continue; @@ -951,7 +962,11 @@ mod tests { // TODO: why is this allow needed? does tokio::test get in the way somehow? #![allow(unused_imports)] use super::*; - use crate::rpcs::{blockchain::SavedBlock, connection::ProviderState, provider::Web3Provider}; + use crate::rpcs::{ + blockchain::{ConsensusFinder, SavedBlock}, + connection::ProviderState, + provider::Web3Provider, + }; use ethers::types::{Block, U256}; use log::{trace, LevelFilter}; use parking_lot::RwLock; @@ -992,8 +1007,8 @@ mod tests { let head_block = Arc::new(head_block); // TODO: write a impl From for Block -> BlockId? - let lagged_block: SavedBlock = lagged_block.into(); - let head_block: SavedBlock = head_block.into(); + let mut lagged_block: SavedBlock = lagged_block.into(); + let mut head_block: SavedBlock = head_block.into(); let block_data_limit = u64::MAX; @@ -1012,6 +1027,7 @@ mod tests { hard_limit: None, soft_limit: 1_000, automatic_block_limit: true, + backup: false, block_data_limit: block_data_limit.into(), tier: 0, head_block: RwLock::new(Some(head_block.clone())), @@ -1032,6 +1048,7 @@ mod tests { hard_limit: None, soft_limit: 1_000, automatic_block_limit: false, + backup: false, block_data_limit: block_data_limit.into(), tier: 0, head_block: RwLock::new(Some(lagged_block.clone())), @@ -1072,7 +1089,7 @@ mod tests { let (head_block_sender, _head_block_receiver) = watch::channel::(Default::default()); - let mut connection_heads = HashMap::new(); + let mut connection_heads = ConsensusFinder::default(); // process None so that conns @@ -1123,7 +1140,7 @@ mod tests { assert!(matches!(x, OpenRequestResult::NotReady)); // add lagged blocks to the conns. both servers should be allowed - conns.save_block(&lagged_block.block, true).await.unwrap(); + lagged_block.block = conns.save_block(lagged_block.block, true).await.unwrap(); conns .process_block_from_rpc( @@ -1151,7 +1168,7 @@ mod tests { assert_eq!(conns.num_synced_rpcs(), 2); // add head block to the conns. lagged_rpc should not be available - conns.save_block(&head_block.block, true).await.unwrap(); + head_block.block = conns.save_block(head_block.block, true).await.unwrap(); conns .process_block_from_rpc( @@ -1236,6 +1253,7 @@ mod tests { hard_limit: None, soft_limit: 3_000, automatic_block_limit: false, + backup: false, block_data_limit: 64.into(), tier: 1, head_block: RwLock::new(Some(head_block.clone())), @@ -1256,6 +1274,7 @@ mod tests { hard_limit: None, soft_limit: 1_000, automatic_block_limit: false, + backup: false, block_data_limit: u64::MAX.into(), tier: 2, head_block: RwLock::new(Some(head_block.clone())), @@ -1295,7 +1314,7 @@ mod tests { let (head_block_sender, _head_block_receiver) = watch::channel::(Default::default()); - let mut connection_heads = HashMap::new(); + let mut connection_heads = ConsensusFinder::default(); conns .process_block_from_rpc( diff --git a/web3_proxy/src/rpcs/synced_connections.rs b/web3_proxy/src/rpcs/synced_connections.rs index f6a5e288..824857ce 100644 --- a/web3_proxy/src/rpcs/synced_connections.rs +++ b/web3_proxy/src/rpcs/synced_connections.rs @@ -9,19 +9,33 @@ use std::sync::Arc; /// A collection of Web3Connections that are on the same block. /// Serialize is so we can print it on our debug endpoint #[derive(Clone, Default, Serialize)] -pub struct SyncedConnections { +pub struct ConsensusConnections { // TODO: store ArcBlock instead? pub(super) head_block: Option, // TODO: this should be able to serialize, but it isn't #[serde(skip_serializing)] pub(super) conns: Vec>, + pub(super) num_checked_conns: usize, + pub(super) includes_backups: bool, } -impl fmt::Debug for SyncedConnections { +impl ConsensusConnections { + pub fn num_conns(&self) -> usize { + self.conns.len() + } + + pub fn sum_soft_limit(&self) -> u32 { + self.conns.iter().fold(0, |sum, rpc| sum + rpc.soft_limit) + } + + // TODO: sum_hard_limit? +} + +impl fmt::Debug for ConsensusConnections { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { // TODO: the default formatter takes forever to write. this is too quiet though // TODO: print the actual conns? - f.debug_struct("SyncedConnections") + f.debug_struct("ConsensusConnections") .field("head_block", &self.head_block) .field("num_conns", &self.conns.len()) .finish_non_exhaustive() From 76e51e3d1120835a977e7f3c304b2d6643749d28 Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Thu, 19 Jan 2023 02:21:39 -0800 Subject: [PATCH 14/80] better log level --- web3_proxy/src/rpcs/connection.rs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/web3_proxy/src/rpcs/connection.rs b/web3_proxy/src/rpcs/connection.rs index 7b650316..070c88b8 100644 --- a/web3_proxy/src/rpcs/connection.rs +++ b/web3_proxy/src/rpcs/connection.rs @@ -653,8 +653,12 @@ impl Web3Connection { // if this block is too old, return an error so we reconnect let current_lag = x.lag(); if current_lag > allowed_lag { - let level = if warned == 0 && !conn.backup { - log::Level::Warn + let level = if warned == 0 { + if conn.backup { + log::Level::Info + } else { + log::Level::Warn + } } else if warned % 100 == 0 { log::Level::Debug } else { From 274778cd125d70cb5e6e6f1ed3a3cafb285f5b33 Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Thu, 19 Jan 2023 02:21:45 -0800 Subject: [PATCH 15/80] cargo upgrade --workspace --- Cargo.lock | 20 ++++++++++---------- deferred-rate-limiter/Cargo.toml | 2 +- entities/Cargo.toml | 2 +- migration/Cargo.toml | 4 ++-- redis-rate-limiter/Cargo.toml | 2 +- web3_proxy/Cargo.toml | 2 +- 6 files changed, 16 insertions(+), 16 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9f4bf45c..faf872a6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3982,9 +3982,9 @@ dependencies = [ [[package]] name = "sea-orm" -version = "0.10.6" +version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc2db217f2061ab2bbb1bd22323a533ace0617f97690919f3ed3894e1b3ba170" +checksum = "88694d01b528a94f90ad87f8d2f546d060d070eee180315c67d158cb69476034" dependencies = [ "async-stream", "async-trait", @@ -4010,9 +4010,9 @@ dependencies = [ [[package]] name = "sea-orm-cli" -version = "0.10.6" +version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebcce92f0f804acd10b4378a3c8b0e5fb28f3a9ae9337006bd651baa3a95632c" +checksum = "0ebe1f820fe8949cf6a57272ba9ebd0be766e47c9b85c04b3cabea40ab9459b3" dependencies = [ "chrono", "clap", @@ -4026,9 +4026,9 @@ dependencies = [ [[package]] name = "sea-orm-macros" -version = "0.10.6" +version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38066057ef1fa17ddc6ce1458cf269862b8f1df919497d110ea127b549a90fbd" +checksum = "7216195de9c6b2474fd0efab486173dccd0eff21f28cc54aa4c0205d52fb3af0" dependencies = [ "bae", "heck 0.3.3", @@ -4039,9 +4039,9 @@ dependencies = [ [[package]] name = "sea-orm-migration" -version = "0.10.6" +version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ada716f9825e4190a0a8ebaecbf7171ce0ed6f218ea2e70086bdc72ccfc1d03c" +checksum = "0ed3cdfa669e4c385922f902b9a58e0c2128782a4d0fe79c6c34f3b927565e5b" dependencies = [ "async-trait", "clap", @@ -4988,9 +4988,9 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.24.1" +version = "1.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d9f76183f91ecfb55e1d7d5602bd1d979e38a3a522fe900241cf195624d67ae" +checksum = "597a12a59981d9e3c38d216785b0c37399f6e415e8d0712047620f189371b0bb" dependencies = [ "autocfg", "bytes", diff --git a/deferred-rate-limiter/Cargo.toml b/deferred-rate-limiter/Cargo.toml index fe1909e3..14602245 100644 --- a/deferred-rate-limiter/Cargo.toml +++ b/deferred-rate-limiter/Cargo.toml @@ -11,4 +11,4 @@ anyhow = "1.0.68" hashbrown = "0.13.2" log = "0.4.17" moka = { version = "0.9.6", default-features = false, features = ["future"] } -tokio = "1.24.1" +tokio = "1.24.2" diff --git a/entities/Cargo.toml b/entities/Cargo.toml index 64d052a3..16e3ac8e 100644 --- a/entities/Cargo.toml +++ b/entities/Cargo.toml @@ -10,7 +10,7 @@ path = "src/mod.rs" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -sea-orm = "0.10.6" +sea-orm = "0.10.7" serde = "1.0.152" uuid = "1.2.2" ethers = "1.0.2" diff --git a/migration/Cargo.toml b/migration/Cargo.toml index fd1e4a12..d1791630 100644 --- a/migration/Cargo.toml +++ b/migration/Cargo.toml @@ -9,10 +9,10 @@ name = "migration" path = "src/lib.rs" [dependencies] -tokio = { version = "1.24.1", features = ["full", "tracing"] } +tokio = { version = "1.24.2", features = ["full", "tracing"] } [dependencies.sea-orm-migration] -version = "0.10.6" +version = "0.10.7" features = [ # Enable at least one `ASYNC_RUNTIME` and `DATABASE_DRIVER` feature if you want to run migration via CLI. # View the list of supported features at https://www.sea-ql.org/SeaORM/docs/install-and-config/database-and-async-runtime. diff --git a/redis-rate-limiter/Cargo.toml b/redis-rate-limiter/Cargo.toml index fcc05372..c4af3503 100644 --- a/redis-rate-limiter/Cargo.toml +++ b/redis-rate-limiter/Cargo.toml @@ -7,4 +7,4 @@ edition = "2021" [dependencies] anyhow = "1.0.68" deadpool-redis = { version = "0.11.1", features = ["rt_tokio_1", "serde"] } -tokio = "1.24.1" +tokio = "1.24.2" diff --git a/web3_proxy/Cargo.toml b/web3_proxy/Cargo.toml index e0c64087..f1fc8e33 100644 --- a/web3_proxy/Cargo.toml +++ b/web3_proxy/Cargo.toml @@ -60,7 +60,7 @@ serde_json = { version = "1.0.91", default-features = false, features = ["alloc" serde_prometheus = "0.1.6" # TODO: make sure this time version matches siwe. PR to put this in their prelude time = "0.3.17" -tokio = { version = "1.24.1", features = ["full"] } +tokio = { version = "1.24.2", features = ["full"] } # TODO: make sure this uuid version matches sea-orm. PR to put this in their prelude tokio-stream = { version = "0.1.11", features = ["sync"] } toml = "0.5.10" From 2cb6dde052235f57070050550436197476b25030 Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Thu, 19 Jan 2023 02:26:54 -0800 Subject: [PATCH 16/80] more log improvements --- web3_proxy/src/rpcs/blockchain.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/web3_proxy/src/rpcs/blockchain.rs b/web3_proxy/src/rpcs/blockchain.rs index e3505c97..f611c7d9 100644 --- a/web3_proxy/src/rpcs/blockchain.rs +++ b/web3_proxy/src/rpcs/blockchain.rs @@ -865,7 +865,9 @@ impl ConsensusFinder { .await { Err(err) => { - warn!("Unable to find any consensus head: {}", err); + if self.all.rpc_name_to_hash.len() < web3_connections.min_head_rpcs { + debug!("No consensus head yet: {}", err); + } return ConsensusConnections::default(); } Ok(x) => x, @@ -887,6 +889,7 @@ impl ConsensusFinder { error!("CONSENSUS HEAD IS VERY OLD! Backup RPCs did not improve this situation"); x } else { + // TODO: i don't think we need this error. and i doublt we'll ever even get here error!("NO CONSENSUS HEAD!"); ConsensusConnections::default() } From f6811d3c05babfaa422bc2304033942e8a93c4ba Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Thu, 19 Jan 2023 02:30:58 -0800 Subject: [PATCH 17/80] with how we do --config, we need to change entrypoint now --- Dockerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index 2f35c3bf..bb922d3d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -14,8 +14,8 @@ FROM debian:bullseye-slim COPY --from=builder /opt/bin/* /usr/local/bin/ -# TODO: be careful changing this to just web3_proxy_cli. if you don't do it correctly, there will be a production outage! -ENTRYPOINT ["web3_proxy_cli", "proxyd"] +ENTRYPOINT ["web3_proxy_cli"] +CMD [ "--config", "/web3-proxy.toml", "daemon" ] # TODO: lower log level when done with prototyping ENV RUST_LOG "web3_proxy=debug" From 2d5d115d6fbf6bac94f3dc69b07380d0dba0e891 Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Thu, 19 Jan 2023 02:32:24 -0800 Subject: [PATCH 18/80] proxyd, not daemon --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index bb922d3d..2614085b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -15,7 +15,7 @@ FROM debian:bullseye-slim COPY --from=builder /opt/bin/* /usr/local/bin/ ENTRYPOINT ["web3_proxy_cli"] -CMD [ "--config", "/web3-proxy.toml", "daemon" ] +CMD [ "--config", "/web3-proxy.toml", "proxyd" ] # TODO: lower log level when done with prototyping ENV RUST_LOG "web3_proxy=debug" From 52a9ba604c1155265e225fcee7b4ce1cb3e7a9c3 Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Thu, 19 Jan 2023 03:05:39 -0800 Subject: [PATCH 19/80] remove allowed lag --- TODO.md | 1 + web3_proxy/src/app/mod.rs | 15 ---- web3_proxy/src/config.rs | 2 - web3_proxy/src/rpcs/blockchain.rs | 9 +-- web3_proxy/src/rpcs/connection.rs | 86 ++--------------------- web3_proxy/src/rpcs/connections.rs | 108 +++++++++++++++++------------ 6 files changed, 74 insertions(+), 147 deletions(-) diff --git a/TODO.md b/TODO.md index afe04f5d..693b2179 100644 --- a/TODO.md +++ b/TODO.md @@ -307,6 +307,7 @@ These are not yet ordered. There might be duplicates. We might not actually need - [x] improve rate limiting on websockets - [x] retry another server if we get a jsonrpc response error about rate limits - [x] major refactor to only use backup servers when absolutely necessary +- [x] remove allowed lag - [-] proxy mode for benchmarking all backends - [-] proxy mode for sending to multiple backends - [-] let users choose a % of reverts to log (or maybe x/second). someone like curve logging all reverts will be a BIG database very quickly diff --git a/web3_proxy/src/app/mod.rs b/web3_proxy/src/app/mod.rs index 055694f3..81968a6b 100644 --- a/web3_proxy/src/app/mod.rs +++ b/web3_proxy/src/app/mod.rs @@ -190,7 +190,6 @@ pub struct Web3ProxyApp { head_block_receiver: watch::Receiver, pending_tx_sender: broadcast::Sender, pub config: AppConfig, - pub allowed_lag: u64, pub db_conn: Option, pub db_replica: Option, /// prometheus metrics @@ -687,20 +686,8 @@ impl Web3ProxyApp { .time_to_idle(Duration::from_secs(120)) .build_with_hasher(hashbrown::hash_map::DefaultHashBuilder::default()); - // TODO: get this out of the toml instead - let allowed_lag = match top_config.app.chain_id { - 1 => 60, - 137 => 10, - 250 => 10, - _ => { - warn!("defaulting allowed lag to 60"); - 60 - } - }; - let app = Self { config: top_config.app, - allowed_lag, balanced_rpcs, private_rpcs, response_cache, @@ -1432,7 +1419,6 @@ impl Web3ProxyApp { .balanced_rpcs .try_proxy_connection( proxy_mode, - self.allowed_lag, &authorization, request, Some(&request_metadata), @@ -1459,7 +1445,6 @@ impl Web3ProxyApp { self.balanced_rpcs .try_proxy_connection( proxy_mode, - self.allowed_lag, &authorization, request, Some(&request_metadata), diff --git a/web3_proxy/src/config.rs b/web3_proxy/src/config.rs index 20aabee3..397a9c60 100644 --- a/web3_proxy/src/config.rs +++ b/web3_proxy/src/config.rs @@ -223,7 +223,6 @@ impl Web3ConnectionConfig { pub async fn spawn( self, name: String, - allowed_lag: u64, db_conn: Option, redis_pool: Option, chain_id: u64, @@ -262,7 +261,6 @@ impl Web3ConnectionConfig { Web3Connection::spawn( name, - allowed_lag, self.display_name, chain_id, db_conn, diff --git a/web3_proxy/src/rpcs/blockchain.rs b/web3_proxy/src/rpcs/blockchain.rs index f611c7d9..56bbf045 100644 --- a/web3_proxy/src/rpcs/blockchain.rs +++ b/web3_proxy/src/rpcs/blockchain.rs @@ -78,11 +78,6 @@ impl SavedBlock { pub fn number(&self) -> U64 { self.block.number.expect("saved blocks must have a number") } - - /// When the block was received, this node was still syncing - pub fn syncing(&self, allowed_lag: u64) -> bool { - self.age > allowed_lag - } } impl From for SavedBlock { @@ -172,7 +167,7 @@ impl Web3Connections { // TODO: request_metadata? maybe we should put it in the authorization? // TODO: don't hard code allowed lag let response = self - .try_send_best_consensus_head_connection(60, authorization, request, None, None) + .try_send_best_consensus_head_connection(authorization, request, None, None) .await?; let block = response.result.context("failed fetching block")?; @@ -248,7 +243,7 @@ impl Web3Connections { // TODO: if error, retry? // TODO: request_metadata or authorization? let response = self - .try_send_best_consensus_head_connection(60, authorization, request, None, Some(num)) + .try_send_best_consensus_head_connection(authorization, request, None, Some(num)) .await?; let raw_block = response.result.context("no block result")?; diff --git a/web3_proxy/src/rpcs/connection.rs b/web3_proxy/src/rpcs/connection.rs index 070c88b8..ea3f9c67 100644 --- a/web3_proxy/src/rpcs/connection.rs +++ b/web3_proxy/src/rpcs/connection.rs @@ -63,7 +63,6 @@ pub struct Web3Connection { pub name: String, pub display_name: Option, pub db_conn: Option, - pub(super) allowed_lag: u64, /// TODO: can we get this from the provider? do we even need it? pub(super) url: String, /// Some connections use an http_client. we keep a clone for reconnecting @@ -101,7 +100,6 @@ impl Web3Connection { #[allow(clippy::too_many_arguments)] pub async fn spawn( name: String, - allowed_lag: u64, display_name: Option, chain_id: u64, db_conn: Option, @@ -140,7 +138,6 @@ impl Web3Connection { let new_connection = Self { name, - allowed_lag, db_conn: db_conn.clone(), display_name, http_client, @@ -195,25 +192,7 @@ impl Web3Connection { return Ok(None); } - // check if we are synced - let head_block: ArcBlock = self - .wait_for_request_handle(authorization, Duration::from_secs(30), true) - .await? - .request::<_, Option<_>>( - "eth_getBlockByNumber", - &json!(("latest", false)), - // error here are expected, so keep the level low - Level::Warn.into(), - ) - .await? - .context("no block during check_block_data_limit!")?; - - if SavedBlock::from(head_block).syncing(60) { - // if the node is syncing, we can't check its block data limit - return Ok(None); - } - - // TODO: add SavedBlock to self? probably best not to. we might not get marked Ready + // TODO: check eth_syncing. if it is not false, return Ok(None) let mut limit = None; @@ -296,27 +275,10 @@ impl Web3Connection { self.block_data_limit.load(atomic::Ordering::Acquire).into() } - pub fn syncing(&self, allowed_lag: u64) -> bool { - match self.head_block.read().clone() { - None => true, - Some(x) => x.syncing(allowed_lag), - } - } - pub fn has_block_data(&self, needed_block_num: &U64) -> bool { let head_block_num = match self.head_block.read().clone() { None => return false, - Some(x) => { - // TODO: this 60 second limit is causing our polygons to fall behind. change this to number of blocks? - // TODO: sometimes blocks might actually just take longer than 60 seconds - if x.syncing(60) { - // skip syncing nodes. even though they might be able to serve a query, - // latency will be poor and it will get in the way of them syncing further - return false; - } - - x.number() - } + Some(x) => x.number(), }; // this rpc doesn't have that block yet. still syncing @@ -548,7 +510,7 @@ impl Web3Connection { let _ = head_block.insert(new_head_block.clone().into()); } - if self.block_data_limit() == U64::zero() && !self.syncing(1) { + if self.block_data_limit() == U64::zero() { let authorization = Arc::new(Authorization::internal(self.db_conn.clone())?); if let Err(err) = self.check_block_data_limit(&authorization).await { warn!( @@ -596,8 +558,6 @@ impl Web3Connection { reconnect: bool, tx_id_sender: Option)>>, ) -> anyhow::Result<()> { - let allowed_lag = self.allowed_lag; - loop { let http_interval_receiver = http_interval_sender.as_ref().map(|x| x.subscribe()); @@ -629,8 +589,6 @@ impl Web3Connection { let health_sleep_seconds = 10; sleep(Duration::from_secs(health_sleep_seconds)).await; - let mut warned = 0; - loop { // TODO: what if we just happened to have this check line up with another restart? // TODO: think more about this @@ -649,38 +607,6 @@ impl Web3Connection { } // trace!("health check on {}. unlocked", conn); - if let Some(x) = &*conn.head_block.read() { - // if this block is too old, return an error so we reconnect - let current_lag = x.lag(); - if current_lag > allowed_lag { - let level = if warned == 0 { - if conn.backup { - log::Level::Info - } else { - log::Level::Warn - } - } else if warned % 100 == 0 { - log::Level::Debug - } else { - log::Level::Trace - }; - - log::log!( - level, - "{} is lagged {} secs: {} {}", - conn, - current_lag, - x.number(), - x.hash(), - ); - - warned += 1; - } else { - // reset warnings now that we are connected - warned = 0; - } - } - sleep(Duration::from_secs(health_sleep_seconds)).await; } }; @@ -1222,7 +1148,6 @@ mod tests { let x = Web3Connection { name: "name".to_string(), - allowed_lag: 10, db_conn: None, display_name: None, url: "ws://example.com".to_string(), @@ -1271,7 +1196,6 @@ mod tests { // TODO: this is getting long. have a `impl Default` let x = Web3Connection { name: "name".to_string(), - allowed_lag: 10, db_conn: None, display_name: None, url: "ws://example.com".to_string(), @@ -1299,6 +1223,8 @@ mod tests { assert!(!x.has_block_data(&(head_block.number() + 1000))); } + /* + // TODO: think about how to bring the concept of a "lagged" node back #[test] fn test_lagged_node_not_has_block_data() { let now: U256 = SystemTime::now() @@ -1324,7 +1250,6 @@ mod tests { let x = Web3Connection { name: "name".to_string(), - allowed_lag: 10, db_conn: None, display_name: None, url: "ws://example.com".to_string(), @@ -1349,4 +1274,5 @@ mod tests { assert!(!x.has_block_data(&(head_block.number() + 1))); assert!(!x.has_block_data(&(head_block.number() + 1000))); } + */ } diff --git a/web3_proxy/src/rpcs/connections.rs b/web3_proxy/src/rpcs/connections.rs index 93493716..99eb61a8 100644 --- a/web3_proxy/src/rpcs/connections.rs +++ b/web3_proxy/src/rpcs/connections.rs @@ -89,9 +89,6 @@ impl Web3Connections { } }; - // TODO: this might be too aggressive. think about this more - let allowed_lag = ((expected_block_time_ms * 3) as f64 / 1000.0).round() as u64; - let http_interval_sender = if http_client.is_some() { let (sender, receiver) = broadcast::channel(1); @@ -155,7 +152,6 @@ impl Web3Connections { server_config .spawn( server_name, - allowed_lag, db_conn, redis_pool, chain_id, @@ -408,10 +404,40 @@ impl Web3Connections { unimplemented!("this shouldn't be possible") } - /// get the best available rpc server with the consensus head block. it might have blocks after the consensus head pub async fn best_consensus_head_connection( &self, - allowed_lag: u64, + authorization: &Arc, + request_metadata: Option<&Arc>, + skip: &[Arc], + min_block_needed: Option<&U64>, + ) -> anyhow::Result { + if let Ok(without_backups) = self + ._best_consensus_head_connection( + false, + authorization, + request_metadata, + skip, + min_block_needed, + ) + .await + { + return Ok(without_backups); + } + + self._best_consensus_head_connection( + true, + authorization, + request_metadata, + skip, + min_block_needed, + ) + .await + } + + /// get the best available rpc server with the consensus head block. it might have blocks after the consensus head + async fn _best_consensus_head_connection( + &self, + allow_backups: bool, authorization: &Arc, request_metadata: Option<&Arc>, skip: &[Arc], @@ -421,12 +447,13 @@ impl Web3Connections { (Option, u64), Vec>, > = if let Some(min_block_needed) = min_block_needed { - // need a potentially old block. check all the rpcs + // need a potentially old block. check all the rpcs. prefer the most synced let mut m = BTreeMap::new(); for x in self .conns .values() + .filter(|x| if allow_backups { true } else { !x.backup }) .filter(|x| !skip.contains(x)) .filter(|x| x.has_block_data(min_block_needed)) .cloned() @@ -448,15 +475,7 @@ impl Web3Connections { // need latest. filter the synced rpcs let synced_connections = self.synced_connections.load(); - let head_block = match synced_connections.head_block.as_ref() { - None => return Ok(OpenRequestResult::NotReady), - Some(x) => x, - }; - - // TODO: self.allowed_lag instead of taking as an arg - if head_block.syncing(allowed_lag) { - return Ok(OpenRequestResult::NotReady); - } + // TODO: if head_block is super old. emit an error! let mut m = BTreeMap::new(); @@ -575,7 +594,7 @@ impl Web3Connections { None => { // none of the servers gave us a time to retry at - // TODO: bring this back? + // TODO: bring this back? need to think about how to do this with `allow_backups` // we could return an error here, but maybe waiting a second will fix the problem // TODO: configurable max wait? the whole max request time, or just some portion? // let handle = sorted_rpcs @@ -605,6 +624,24 @@ impl Web3Connections { authorization: &Arc, block_needed: Option<&U64>, max_count: Option, + ) -> Result, Option> { + if let Ok(without_backups) = self + ._all_synced_connections(false, authorization, block_needed, max_count) + .await + { + return Ok(without_backups); + } + + self._all_synced_connections(true, authorization, block_needed, max_count) + .await + } + + async fn _all_synced_connections( + &self, + allow_backups: bool, + authorization: &Arc, + block_needed: Option<&U64>, + max_count: Option, ) -> Result, Option> { let mut earliest_retry_at = None; // TODO: with capacity? @@ -621,12 +658,14 @@ impl Web3Connections { break; } + if !allow_backups && connection.backup { + continue; + } + if let Some(block_needed) = block_needed { if !connection.has_block_data(block_needed) { continue; } - } else if connection.syncing(30) { - continue; } // check rate limits and increment our connection counter @@ -663,10 +702,8 @@ impl Web3Connections { } /// be sure there is a timeout on this or it might loop forever - /// TODO: do not take allowed_lag here. have it be on the connections struct instead pub async fn try_send_best_consensus_head_connection( &self, - allowed_lag: u64, authorization: &Arc, request: JsonRpcRequest, request_metadata: Option<&Arc>, @@ -682,7 +719,6 @@ impl Web3Connections { } match self .best_consensus_head_connection( - allowed_lag, authorization, request_metadata, &skip_rpcs, @@ -903,7 +939,6 @@ impl Web3Connections { pub async fn try_proxy_connection( &self, proxy_mode: ProxyMode, - allowed_lag: u64, authorization: &Arc, request: JsonRpcRequest, request_metadata: Option<&Arc>, @@ -912,7 +947,6 @@ impl Web3Connections { match proxy_mode { ProxyMode::Best => { self.try_send_best_consensus_head_connection( - allowed_lag, authorization, request, request_metadata, @@ -1014,8 +1048,6 @@ mod tests { let head_rpc = Web3Connection { name: "synced".to_string(), - // TODO: what should this be? - allowed_lag: 10, db_conn: None, display_name: None, url: "ws://example.com/synced".to_string(), @@ -1036,7 +1068,6 @@ mod tests { let lagged_rpc = Web3Connection { name: "lagged".to_string(), - allowed_lag: 10, db_conn: None, display_name: None, url: "ws://example.com/lagged".to_string(), @@ -1129,9 +1160,8 @@ mod tests { ); // best_synced_backend_connection requires servers to be synced with the head block - // TODO: don't hard code allowed_lag let x = conns - .best_consensus_head_connection(60, &authorization, None, &[], None) + .best_consensus_head_connection(&authorization, None, &[], None) .await .unwrap(); @@ -1186,21 +1216,21 @@ mod tests { assert!(matches!( conns - .best_consensus_head_connection(60, &authorization, None, &[], None) + .best_consensus_head_connection(&authorization, None, &[], None) .await, Ok(OpenRequestResult::Handle(_)) )); assert!(matches!( conns - .best_consensus_head_connection(60, &authorization, None, &[], Some(&0.into())) + .best_consensus_head_connection(&authorization, None, &[], Some(&0.into())) .await, Ok(OpenRequestResult::Handle(_)) )); assert!(matches!( conns - .best_consensus_head_connection(60, &authorization, None, &[], Some(&1.into())) + .best_consensus_head_connection(&authorization, None, &[], Some(&1.into())) .await, Ok(OpenRequestResult::Handle(_)) )); @@ -1208,7 +1238,7 @@ mod tests { // future block should not get a handle assert!(matches!( conns - .best_consensus_head_connection(60, &authorization, None, &[], Some(&2.into())) + .best_consensus_head_connection(&authorization, None, &[], Some(&2.into())) .await, Ok(OpenRequestResult::NotReady) )); @@ -1241,7 +1271,6 @@ mod tests { let pruned_rpc = Web3Connection { name: "pruned".to_string(), - allowed_lag: 10, db_conn: None, display_name: None, url: "ws://example.com/pruned".to_string(), @@ -1262,7 +1291,6 @@ mod tests { let archive_rpc = Web3Connection { name: "archive".to_string(), - allowed_lag: 10, db_conn: None, display_name: None, url: "ws://example.com/archive".to_string(), @@ -1343,13 +1371,7 @@ mod tests { // best_synced_backend_connection requires servers to be synced with the head block let best_head_server = conns - .best_consensus_head_connection( - 60, - &authorization, - None, - &[], - Some(&head_block.number()), - ) + .best_consensus_head_connection(&authorization, None, &[], Some(&head_block.number())) .await; assert!(matches!( @@ -1358,7 +1380,7 @@ mod tests { )); let best_archive_server = conns - .best_consensus_head_connection(60, &authorization, None, &[], Some(&1.into())) + .best_consensus_head_connection(&authorization, None, &[], Some(&1.into())) .await; match best_archive_server { From aa6476ce2a23e9ef8b85d02efb157e3606ae16d3 Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Thu, 19 Jan 2023 03:13:24 -0800 Subject: [PATCH 20/80] include proxyd in the command now --- docker-compose.common.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker-compose.common.yml b/docker-compose.common.yml index 26e24c3b..e6164994 100644 --- a/docker-compose.common.yml +++ b/docker-compose.common.yml @@ -4,7 +4,7 @@ services: build: . init: true restart: unless-stopped - command: --config /config.toml --workers 16 + command: --config /config.toml --workers 16 proxyd # rust's tokio crate expects a SIGINT https://tokio.rs/tokio/topics/shutdown stop_signal: SIGINT environment: From 4b048e59cd41c1cee2204995c1e073c8fba4668c Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Thu, 19 Jan 2023 03:27:44 -0800 Subject: [PATCH 21/80] add proxyd to eth too --- docker-compose.prod.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker-compose.prod.yml b/docker-compose.prod.yml index 2fb3bd45..5c6bf809 100644 --- a/docker-compose.prod.yml +++ b/docker-compose.prod.yml @@ -68,7 +68,7 @@ services: extends: file: docker-compose.common.yml service: web3-proxy - command: --config /config.toml --workers 48 + command: --config /config.toml --workers 48 proxyd volumes: - ./config/production-eth.toml:/config.toml - ./data/scratch:/scratch From ad169eda0ef4ea780cd378d23b20cd4e035cf002 Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Thu, 19 Jan 2023 17:57:36 -0800 Subject: [PATCH 22/80] log cleanup --- web3_proxy/src/rpcs/connections.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web3_proxy/src/rpcs/connections.rs b/web3_proxy/src/rpcs/connections.rs index 99eb61a8..0953cde6 100644 --- a/web3_proxy/src/rpcs/connections.rs +++ b/web3_proxy/src/rpcs/connections.rs @@ -769,7 +769,7 @@ impl Web3Connections { let rate_limit_substrings = ["limit", "exceeded"]; for rate_limit_substr in rate_limit_substrings { if error_msg.contains(rate_limit_substr) { - warn!("rate limited by {:?}", skip_rpcs.last()); + warn!("rate limited by {}", skip_rpcs.last().unwrap()); continue; } } From 2f8920085ba1266dfa64dd3b703da0453db07922 Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Thu, 19 Jan 2023 18:08:53 -0800 Subject: [PATCH 23/80] configurable gas buffer --- TODO.md | 1 + web3_proxy/src/app/mod.rs | 36 ++++++++++++----------- web3_proxy/src/bin/web3_proxy_cli/main.rs | 17 +++++++++-- web3_proxy/src/config.rs | 7 +++++ 4 files changed, 41 insertions(+), 20 deletions(-) diff --git a/TODO.md b/TODO.md index 693b2179..8574253a 100644 --- a/TODO.md +++ b/TODO.md @@ -308,6 +308,7 @@ These are not yet ordered. There might be duplicates. We might not actually need - [x] retry another server if we get a jsonrpc response error about rate limits - [x] major refactor to only use backup servers when absolutely necessary - [x] remove allowed lag +- [x] configurable gas buffer. default to the larger of 25k or 25% on polygon to work around erigon bug - [-] proxy mode for benchmarking all backends - [-] proxy mode for sending to multiple backends - [-] let users choose a % of reverts to log (or maybe x/second). someone like curve logging all reverts will be a BIG database very quickly diff --git a/web3_proxy/src/app/mod.rs b/web3_proxy/src/app/mod.rs index 81968a6b..2db6fe52 100644 --- a/web3_proxy/src/app/mod.rs +++ b/web3_proxy/src/app/mod.rs @@ -25,6 +25,7 @@ use entities::sea_orm_active_enums::LogLevel; use entities::user; use ethers::core::utils::keccak256; use ethers::prelude::{Address, Block, Bytes, Transaction, TxHash, H256, U64}; +use ethers::types::U256; use ethers::utils::rlp::{Decodable, Rlp}; use futures::future::join_all; use futures::stream::{FuturesUnordered, StreamExt}; @@ -1119,19 +1120,14 @@ impl Web3ProxyApp { // TODO: eth_sendPrivateTransaction (https://docs.flashbots.net/flashbots-auction/searchers/advanced/rpc-endpoint#eth_sendprivatetransaction) "eth_coinbase" => { // no need for serving coinbase - // we could return a per-user payment address here, but then we might leak that to dapps // no stats on this. its cheap json!(Address::zero()) } - /* - // erigon was giving bad estimates. but now it doesn't need it - // TODO: got reports of some gas estimate issue on polygon with their erc20s. maybe we do want it "eth_estimateGas" => { - // TODO: eth_estimateGas using anvil? - // TODO: modify the block requested? let mut response = self .balanced_rpcs - .try_send_best_upstream_server( + .try_proxy_connection( + proxy_mode, authorization, request, Some(&request_metadata), @@ -1139,11 +1135,9 @@ impl Web3ProxyApp { ) .await?; - let parsed_gas_estimate = if let Some(gas_estimate) = response.result.take() { - let parsed_gas_estimate: U256 = serde_json::from_str(gas_estimate.get()) - .context("gas estimate result is not an U256")?; - - parsed_gas_estimate + let mut gas_estimate: U256 = if let Some(gas_estimate) = response.result.take() { + serde_json::from_str(gas_estimate.get()) + .context("gas estimate result is not an U256")? } else { // i think this is always an error response let rpcs = request_metadata.backend_requests.lock().clone(); @@ -1151,13 +1145,21 @@ impl Web3ProxyApp { return Ok((response, rpcs)); }; - // increase by 1.01% - let parsed_gas_estimate = - parsed_gas_estimate * U256::from(101_010) / U256::from(100_000); + let gas_increase = + if let Some(gas_increase_percent) = self.config.gas_increase_percent { + let gas_increase = gas_estimate * gas_increase_percent / U256::from(100); - json!(parsed_gas_estimate) + let min_gas_increase = self.config.gas_increase_min.unwrap_or_default(); + + gas_increase.max(min_gas_increase) + } else { + self.config.gas_increase_min.unwrap_or_default() + }; + + gas_estimate += gas_increase; + + json!(gas_estimate) } - */ // TODO: eth_gasPrice that does awesome magic to predict the future "eth_hashrate" => { // no stats on this. its cheap diff --git a/web3_proxy/src/bin/web3_proxy_cli/main.rs b/web3_proxy/src/bin/web3_proxy_cli/main.rs index a99ea54d..e542fd32 100644 --- a/web3_proxy/src/bin/web3_proxy_cli/main.rs +++ b/web3_proxy/src/bin/web3_proxy_cli/main.rs @@ -16,6 +16,7 @@ mod user_import; use anyhow::Context; use argh::FromArgs; +use ethers::types::U256; use log::{info, warn}; use std::{ fs, @@ -127,7 +128,10 @@ fn main() -> anyhow::Result<()> { .context(format!("checking for config at {}", top_config_path))?; let top_config: String = fs::read_to_string(top_config_path)?; - let top_config: TopConfig = toml::from_str(&top_config)?; + let mut top_config: TopConfig = toml::from_str(&top_config)?; + + // TODO: this doesn't seem to do anything + proctitle::set_title(format!("web3_proxy-{}", top_config.app.chain_id)); if cli_config.db_url.is_none() { cli_config.db_url = top_config.app.db_url.clone(); @@ -137,8 +141,15 @@ fn main() -> anyhow::Result<()> { cli_config.sentry_url = Some(sentry_url); } - // TODO: this doesn't seem to do anything - proctitle::set_title(format!("web3_proxy-{}", top_config.app.chain_id)); + if top_config.app.chain_id == 137 { + if top_config.app.gas_increase_min.is_none() { + top_config.app.gas_increase_min = Some(U256::from(25_000)); + } + + if top_config.app.gas_increase_percent.is_none() { + top_config.app.gas_increase_percent = Some(U256::from(25)); + } + } Some(top_config) } else { diff --git a/web3_proxy/src/config.rs b/web3_proxy/src/config.rs index 397a9c60..67f48870 100644 --- a/web3_proxy/src/config.rs +++ b/web3_proxy/src/config.rs @@ -4,6 +4,7 @@ use crate::rpcs::request::OpenRequestHandleMetrics; use crate::{app::AnyhowJoinHandle, rpcs::blockchain::ArcBlock}; use argh::FromArgs; use ethers::prelude::TxHash; +use ethers::types::U256; use hashbrown::HashMap; use log::warn; use migration::sea_orm::DatabaseConnection; @@ -90,6 +91,12 @@ pub struct AppConfig { /// None = allow all requests pub default_user_max_requests_per_period: Option, + /// minimum amount to increase eth_estimateGas results + pub gas_increase_min: Option, + + /// percentage to increase eth_estimateGas results. 100 == 100% + pub gas_increase_percent: Option, + /// Restrict user registration. /// None = no code needed pub invite_code: Option, From c3b53eb5f2007892f27c20afca8aebe46c31e015 Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Thu, 19 Jan 2023 18:14:47 -0800 Subject: [PATCH 24/80] add backup indicator to more logs --- web3_proxy/src/bin/web3_proxy_cli/daemon.rs | 1 + web3_proxy/src/rpcs/blockchain.rs | 26 ++++++++++++++------- 2 files changed, 19 insertions(+), 8 deletions(-) diff --git a/web3_proxy/src/bin/web3_proxy_cli/daemon.rs b/web3_proxy/src/bin/web3_proxy_cli/daemon.rs index 000b8b51..1beeeb86 100644 --- a/web3_proxy/src/bin/web3_proxy_cli/daemon.rs +++ b/web3_proxy/src/bin/web3_proxy_cli/daemon.rs @@ -64,6 +64,7 @@ async fn run( )); // wait until the app has seen its first consensus head block + // TODO: if backups were included, wait a little longer let _ = spawned_app.app.head_block_receiver().changed().await; // start the frontend port diff --git a/web3_proxy/src/rpcs/blockchain.rs b/web3_proxy/src/rpcs/blockchain.rs index 56bbf045..2be784a6 100644 --- a/web3_proxy/src/rpcs/blockchain.rs +++ b/web3_proxy/src/rpcs/blockchain.rs @@ -331,17 +331,20 @@ impl Web3Connections { .synced_connections .swap(Arc::new(new_synced_connections)); + let includes_backups_str = if includes_backups { "B" } else { "" }; + if let Some(consensus_saved_block) = consensus_head_block { match &old_synced_connections.head_block { None => { debug!( - "first {}/{}/{}/{} block={}, rpc={}", + "first {}/{}/{}/{} block={}, rpc={} {}", num_consensus_rpcs, num_checked_rpcs, num_active_rpcs, total_rpcs, consensus_saved_block, - rpc + rpc, + includes_backups_str, ); if includes_backups { @@ -368,7 +371,7 @@ impl Web3Connections { if consensus_saved_block.hash() == old_head_block.hash() { // no change in hash. no need to use head_block_sender debug!( - "con {}/{}/{}/{} con={} rpc={}@{}", + "con {}/{}/{}/{} con={} rpc={}@{} {}", num_consensus_rpcs, num_checked_rpcs, num_active_rpcs, @@ -376,6 +379,7 @@ impl Web3Connections { consensus_saved_block, rpc, rpc_head_str, + includes_backups_str, ) } else { // hash changed @@ -386,7 +390,7 @@ impl Web3Connections { } debug!( - "unc {}/{}/{}/{} con_head={} old={} rpc={}@{}", + "unc {}/{}/{}/{} con_head={} old={} rpc={}@{} {}", num_consensus_rpcs, num_checked_rpcs, num_active_rpcs, @@ -395,6 +399,7 @@ impl Web3Connections { old_head_block, rpc, rpc_head_str, + includes_backups_str, ); let consensus_head_block = self @@ -411,7 +416,7 @@ impl Web3Connections { // this is unlikely but possible // TODO: better log warn!( - "chain rolled back {}/{}/{}/{} con={} old={} rpc={}@{}", + "chain rolled back {}/{}/{}/{} con={} old={} rpc={}@{} {}", num_consensus_rpcs, num_checked_rpcs, num_active_rpcs, @@ -420,6 +425,7 @@ impl Web3Connections { old_head_block, rpc, rpc_head_str, + includes_backups_str, ); if includes_backups { @@ -441,7 +447,7 @@ impl Web3Connections { } Ordering::Greater => { debug!( - "new {}/{}/{}/{} con={} rpc={}@{}", + "new {}/{}/{}/{} con={} rpc={}@{} {}", num_consensus_rpcs, num_checked_rpcs, num_active_rpcs, @@ -449,6 +455,7 @@ impl Web3Connections { consensus_saved_block, rpc, rpc_head_str, + includes_backups_str, ); if includes_backups { @@ -472,23 +479,25 @@ impl Web3Connections { if num_checked_rpcs >= self.min_head_rpcs { error!( - "non {}/{}/{}/{} rpc={}@{}", + "non {}/{}/{}/{} rpc={}@{} {}", num_consensus_rpcs, num_checked_rpcs, num_active_rpcs, total_rpcs, rpc, rpc_head_str, + includes_backups_str, ); } else { debug!( - "non {}/{}/{}/{} rpc={}@{}", + "non {}/{}/{}/{} rpc={}@{} {}", num_consensus_rpcs, num_checked_rpcs, num_active_rpcs, total_rpcs, rpc, rpc_head_str, + includes_backups_str, ); } } @@ -498,6 +507,7 @@ impl Web3Connections { } struct ConnectionsGroup { + /// TODO: this group might not actually include backups, but they were at leastchecked includes_backups: bool, rpc_name_to_hash: HashMap, } From e53030e0532b3215b75447905a91b2064e20466b Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Thu, 19 Jan 2023 18:30:30 -0800 Subject: [PATCH 25/80] move backup indicator --- web3_proxy/src/rpcs/blockchain.rs | 30 +++++------ web3_proxy/src/rpcs/connections.rs | 82 +++++++++++++++++------------- 2 files changed, 61 insertions(+), 51 deletions(-) diff --git a/web3_proxy/src/rpcs/blockchain.rs b/web3_proxy/src/rpcs/blockchain.rs index 2be784a6..8b8cbce7 100644 --- a/web3_proxy/src/rpcs/blockchain.rs +++ b/web3_proxy/src/rpcs/blockchain.rs @@ -331,20 +331,20 @@ impl Web3Connections { .synced_connections .swap(Arc::new(new_synced_connections)); - let includes_backups_str = if includes_backups { "B" } else { "" }; + let includes_backups_str = if includes_backups { "B " } else { "" }; if let Some(consensus_saved_block) = consensus_head_block { match &old_synced_connections.head_block { None => { debug!( - "first {}/{}/{}/{} block={}, rpc={} {}", + "first {}{}/{}/{}/{} block={}, rpc={}", + includes_backups_str, num_consensus_rpcs, num_checked_rpcs, num_active_rpcs, total_rpcs, consensus_saved_block, rpc, - includes_backups_str, ); if includes_backups { @@ -371,7 +371,8 @@ impl Web3Connections { if consensus_saved_block.hash() == old_head_block.hash() { // no change in hash. no need to use head_block_sender debug!( - "con {}/{}/{}/{} con={} rpc={}@{} {}", + "con {}{}/{}/{}/{} con={} rpc={}@{}", + includes_backups_str, num_consensus_rpcs, num_checked_rpcs, num_active_rpcs, @@ -379,7 +380,6 @@ impl Web3Connections { consensus_saved_block, rpc, rpc_head_str, - includes_backups_str, ) } else { // hash changed @@ -390,7 +390,8 @@ impl Web3Connections { } debug!( - "unc {}/{}/{}/{} con_head={} old={} rpc={}@{} {}", + "unc {}{}/{}/{}/{} con_head={} old={} rpc={}@{}", + includes_backups_str, num_consensus_rpcs, num_checked_rpcs, num_active_rpcs, @@ -399,7 +400,6 @@ impl Web3Connections { old_head_block, rpc, rpc_head_str, - includes_backups_str, ); let consensus_head_block = self @@ -416,7 +416,8 @@ impl Web3Connections { // this is unlikely but possible // TODO: better log warn!( - "chain rolled back {}/{}/{}/{} con={} old={} rpc={}@{} {}", + "chain rolled back {}{}/{}/{}/{} con={} old={} rpc={}@{}", + includes_backups_str, num_consensus_rpcs, num_checked_rpcs, num_active_rpcs, @@ -425,7 +426,6 @@ impl Web3Connections { old_head_block, rpc, rpc_head_str, - includes_backups_str, ); if includes_backups { @@ -447,7 +447,8 @@ impl Web3Connections { } Ordering::Greater => { debug!( - "new {}/{}/{}/{} con={} rpc={}@{} {}", + "new {}{}/{}/{}/{} con={} rpc={}@{}", + includes_backups_str, num_consensus_rpcs, num_checked_rpcs, num_active_rpcs, @@ -455,7 +456,6 @@ impl Web3Connections { consensus_saved_block, rpc, rpc_head_str, - includes_backups_str, ); if includes_backups { @@ -479,25 +479,25 @@ impl Web3Connections { if num_checked_rpcs >= self.min_head_rpcs { error!( - "non {}/{}/{}/{} rpc={}@{} {}", + "non {}{}/{}/{}/{} rpc={}@{}", + includes_backups_str, num_consensus_rpcs, num_checked_rpcs, num_active_rpcs, total_rpcs, rpc, rpc_head_str, - includes_backups_str, ); } else { debug!( - "non {}/{}/{}/{} rpc={}@{} {}", + "non {}{}/{}/{}/{} rpc={}@{}", + includes_backups_str, num_consensus_rpcs, num_checked_rpcs, num_active_rpcs, total_rpcs, rpc, rpc_head_str, - includes_backups_str, ); } } diff --git a/web3_proxy/src/rpcs/connections.rs b/web3_proxy/src/rpcs/connections.rs index 0953cde6..9618d92d 100644 --- a/web3_proxy/src/rpcs/connections.rs +++ b/web3_proxy/src/rpcs/connections.rs @@ -27,9 +27,9 @@ use serde::Serialize; use serde_json::json; use serde_json::value::RawValue; use std::collections::BTreeMap; -use std::fmt; use std::sync::atomic::Ordering; use std::sync::Arc; +use std::{cmp, fmt}; use thread_fast_rng::rand::seq::SliceRandom; use tokio::sync::{broadcast, watch}; use tokio::task; @@ -446,47 +446,57 @@ impl Web3Connections { let usable_rpcs_by_head_num_and_weight: BTreeMap< (Option, u64), Vec>, - > = if let Some(min_block_needed) = min_block_needed { - // need a potentially old block. check all the rpcs. prefer the most synced - let mut m = BTreeMap::new(); - - for x in self - .conns - .values() - .filter(|x| if allow_backups { true } else { !x.backup }) - .filter(|x| !skip.contains(x)) - .filter(|x| x.has_block_data(min_block_needed)) - .cloned() - { - let x_head_block = x.head_block.read().clone(); - - match x_head_block { - None => continue, - Some(x_head) => { - let key = (Some(x_head.number()), u64::MAX - x.tier); - - m.entry(key).or_insert_with(Vec::new).push(x); - } - } - } - - m - } else { - // need latest. filter the synced rpcs + > = { let synced_connections = self.synced_connections.load(); - // TODO: if head_block is super old. emit an error! + let head_block_num = if let Some(head_block) = synced_connections.head_block.as_ref() { + head_block.number() + } else { + return Ok(OpenRequestResult::NotReady); + }; + + let min_block_needed = min_block_needed.unwrap_or(&head_block_num); let mut m = BTreeMap::new(); - for x in synced_connections - .conns - .iter() - .filter(|x| !skip.contains(x)) - { - let key = (None, u64::MAX - x.tier); + match min_block_needed.cmp(&head_block_num) { + cmp::Ordering::Less => { + // need an old block. check all the rpcs. prefer the most synced + for x in self + .conns + .values() + .filter(|x| if allow_backups { true } else { !x.backup }) + .filter(|x| !skip.contains(x)) + .filter(|x| x.has_block_data(min_block_needed)) + .cloned() + { + let x_head_block = x.head_block.read().clone(); - m.entry(key).or_insert_with(Vec::new).push(x.clone()); + match x_head_block { + None => continue, + Some(x_head) => { + let key = (Some(x_head.number()), u64::MAX - x.tier); + + m.entry(key).or_insert_with(Vec::new).push(x); + } + } + } + } + cmp::Ordering::Equal => { + // need the consensus head block. filter the synced rpcs + for x in synced_connections + .conns + .iter() + .filter(|x| !skip.contains(x)) + { + let key = (None, u64::MAX - x.tier); + + m.entry(key).or_insert_with(Vec::new).push(x.clone()); + } + } + cmp::Ordering::Greater => { + return Ok(OpenRequestResult::NotReady); + } } m From 0731d92dec4dc4644bf996630b4f843fb0a06ae3 Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Thu, 19 Jan 2023 18:32:31 -0800 Subject: [PATCH 26/80] use best, not fastest with websocket --- web3_proxy/src/frontend/rpc_proxy_ws.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/web3_proxy/src/frontend/rpc_proxy_ws.rs b/web3_proxy/src/frontend/rpc_proxy_ws.rs index ae6b700b..f031aaf6 100644 --- a/web3_proxy/src/frontend/rpc_proxy_ws.rs +++ b/web3_proxy/src/frontend/rpc_proxy_ws.rs @@ -128,9 +128,8 @@ pub async fn websocket_handler_with_key( user_agent: Option>, ws_upgrade: Option, ) -> FrontendResult { - // TODO: config instead of defaulting to fastest(1)? _websocket_handler_with_key( - ProxyMode::Fastest(1), + ProxyMode::Best, app, ip, rpc_key, From 17d8ea0b7fe6abb7151c3b4e6f823592b696f0e6 Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Thu, 19 Jan 2023 21:28:33 -0800 Subject: [PATCH 27/80] make free tier even better and improve migration locking --- Cargo.lock | 6 +-- README.md | 2 +- entities/Cargo.toml | 2 +- migration/Cargo.toml | 2 +- migration/README.md | 2 +- migration/src/lib.rs | 2 + web3_proxy/Cargo.toml | 2 +- web3_proxy/src/app/mod.rs | 46 ++++++++++++------- .../bin/web3_proxy_cli/drop_migration_lock.rs | 15 ++++-- 9 files changed, 52 insertions(+), 27 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index faf872a6..13f8115b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1355,7 +1355,7 @@ dependencies = [ [[package]] name = "entities" -version = "0.12.0" +version = "0.13.0" dependencies = [ "ethers", "sea-orm", @@ -2746,7 +2746,7 @@ dependencies = [ [[package]] name = "migration" -version = "0.12.0" +version = "0.13.0" dependencies = [ "sea-orm-migration", "tokio", @@ -5565,7 +5565,7 @@ dependencies = [ [[package]] name = "web3_proxy" -version = "0.12.0" +version = "0.13.0" dependencies = [ "anyhow", "arc-swap", diff --git a/README.md b/README.md index 6f2e67c0..9a0ade50 100644 --- a/README.md +++ b/README.md @@ -104,7 +104,7 @@ web3_proxy_cli --config ... change_user_tier_by_key "$RPC_ULID_KEY_FROM_PREV_COM Health check 3 servers and error if the first one doesn't match the others. ``` -web3_proxy_cli https://eth.llamarpc.com/ https://rpc.ankr.com/eth https://cloudflare-eth.com +web3_proxy_cli health_compass https://eth.llamarpc.com/ https://rpc.ankr.com/eth https://cloudflare-eth.com ``` ## Adding new database tables diff --git a/entities/Cargo.toml b/entities/Cargo.toml index 16e3ac8e..606a2f39 100644 --- a/entities/Cargo.toml +++ b/entities/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "entities" -version = "0.12.0" +version = "0.13.0" edition = "2021" [lib] diff --git a/migration/Cargo.toml b/migration/Cargo.toml index d1791630..61d25f6d 100644 --- a/migration/Cargo.toml +++ b/migration/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "migration" -version = "0.12.0" +version = "0.13.0" edition = "2021" publish = false diff --git a/migration/README.md b/migration/README.md index b3ea53eb..3b438d89 100644 --- a/migration/README.md +++ b/migration/README.md @@ -2,7 +2,7 @@ - Generate a new migration file ```sh - cargo run -- migrate generate MIGRATION_NAME + cargo run -- generate MIGRATION_NAME ``` - Apply all pending migrations ```sh diff --git a/migration/src/lib.rs b/migration/src/lib.rs index 0f221af2..ca074f18 100644 --- a/migration/src/lib.rs +++ b/migration/src/lib.rs @@ -12,6 +12,7 @@ mod m20221101_222349_archive_request; mod m20221108_200345_save_anon_stats; mod m20221211_124002_request_method_privacy; mod m20221213_134158_move_login_into_database; +mod m20230119_204135_better_free_tier; pub struct Migrator; @@ -31,6 +32,7 @@ impl MigratorTrait for Migrator { Box::new(m20221108_200345_save_anon_stats::Migration), Box::new(m20221211_124002_request_method_privacy::Migration), Box::new(m20221213_134158_move_login_into_database::Migration), + Box::new(m20230119_204135_better_free_tier::Migration), ] } } diff --git a/web3_proxy/Cargo.toml b/web3_proxy/Cargo.toml index f1fc8e33..a77e3858 100644 --- a/web3_proxy/Cargo.toml +++ b/web3_proxy/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "web3_proxy" -version = "0.12.0" +version = "0.13.0" edition = "2021" default-run = "web3_proxy_cli" diff --git a/web3_proxy/src/app/mod.rs b/web3_proxy/src/app/mod.rs index 2db6fe52..2e05ace5 100644 --- a/web3_proxy/src/app/mod.rs +++ b/web3_proxy/src/app/mod.rs @@ -273,18 +273,14 @@ pub async fn drop_migration_lock(db_conn: &DatabaseConnection) -> Result<(), DbE Ok(()) } -/// Connect to the database and run migrations -pub async fn get_migrated_db( - db_url: String, - min_connections: u32, - max_connections: u32, -) -> anyhow::Result { - // TODO: this seems to fail silently - let db_conn = get_db(db_url, min_connections, max_connections).await?; - +/// Be super careful with override_existing_lock! It is very important that only one process is running the migrations at a time! +pub async fn migrate_db( + db_conn: &DatabaseConnection, + override_existing_lock: bool, +) -> Result<(), DbErr> { let db_backend = db_conn.get_database_backend(); - // TODO: put the timestamp into this? + // TODO: put the timestamp and hostname into this as columns? let create_lock_statment = db_backend.build( Table::create() .table(Alias::new("migration_lock")) @@ -294,18 +290,24 @@ pub async fn get_migrated_db( loop { if Migrator::get_pending_migrations(&db_conn).await?.is_empty() { info!("no migrations to apply"); - return Ok(db_conn); + return Ok(()); } // there are migrations to apply // acquire a lock if let Err(err) = db_conn.execute(create_lock_statment.clone()).await { - debug!("Unable to acquire lock. err={:?}", err); + if override_existing_lock { + warn!("OVERRIDING EXISTING LOCK in 10 seconds! ctrl+c now if other migrations are actually running!"); - // TODO: exponential backoff with jitter - sleep(Duration::from_secs(1)).await; + sleep(Duration::from_secs(10)).await + } else { + debug!("Unable to acquire lock. if you are positive no migration is running, run \"web3_proxy_cli drop_migration_lock\". err={:?}", err); - continue; + // TODO: exponential backoff with jitter? + sleep(Duration::from_secs(1)).await; + + continue; + } } debug!("migration lock acquired"); @@ -318,7 +320,19 @@ pub async fn get_migrated_db( drop_migration_lock(&db_conn).await?; // return if migrations erred - migration_result?; + migration_result +} + +/// Connect to the database and run migrations +pub async fn get_migrated_db( + db_url: String, + min_connections: u32, + max_connections: u32, +) -> Result { + // TODO: this seems to fail silently + let db_conn = get_db(db_url, min_connections, max_connections).await?; + + migrate_db(&db_conn, false).await?; Ok(db_conn) } diff --git a/web3_proxy/src/bin/web3_proxy_cli/drop_migration_lock.rs b/web3_proxy/src/bin/web3_proxy_cli/drop_migration_lock.rs index 633a0610..ace59c65 100644 --- a/web3_proxy/src/bin/web3_proxy_cli/drop_migration_lock.rs +++ b/web3_proxy/src/bin/web3_proxy_cli/drop_migration_lock.rs @@ -1,15 +1,24 @@ use argh::FromArgs; use migration::sea_orm::DatabaseConnection; -use web3_proxy::app::drop_migration_lock; +use web3_proxy::app::{drop_migration_lock, migrate_db}; #[derive(FromArgs, PartialEq, Debug, Eq)] /// In case of emergency, break glass. #[argh(subcommand, name = "drop_migration_lock")] -pub struct DropMigrationLockSubCommand {} +pub struct DropMigrationLockSubCommand { + #[argh(option)] + /// run migrations after dropping the lock + and_migrate: bool, +} impl DropMigrationLockSubCommand { pub async fn main(&self, db_conn: &DatabaseConnection) -> anyhow::Result<()> { - drop_migration_lock(db_conn).await?; + if self.and_migrate { + migrate_db(db_conn, true).await?; + } else { + // just drop the lock + drop_migration_lock(db_conn).await?; + } Ok(()) } From 10573f5d1977ab3cb8b73bc5d9e3b6e248ac0df7 Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Thu, 19 Jan 2023 21:29:33 -0800 Subject: [PATCH 28/80] actually add the migration. derp --- .../src/m20230119_204135_better_free_tier.rs | 39 +++++++++++++++++++ 1 file changed, 39 insertions(+) create mode 100644 migration/src/m20230119_204135_better_free_tier.rs diff --git a/migration/src/m20230119_204135_better_free_tier.rs b/migration/src/m20230119_204135_better_free_tier.rs new file mode 100644 index 00000000..aef9e5f8 --- /dev/null +++ b/migration/src/m20230119_204135_better_free_tier.rs @@ -0,0 +1,39 @@ +//! Increase requests per minute for the free tier to be better than our public tier (which has 3900/min) +use sea_orm_migration::{prelude::*, sea_orm::ConnectionTrait}; + +#[derive(DeriveMigrationName)] +pub struct Migration; + +#[async_trait::async_trait] +impl MigrationTrait for Migration { + async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { + let db_conn = manager.get_connection(); + let db_backend = manager.get_database_backend(); + + let update_free = Query::update() + .table(UserTier::Table) + .value(UserTier::MaxRequestsPerPeriod, 6000) + .and_where(Expr::col(UserTier::Title).eq("Free")) + .limit(1) + .to_owned(); + + let x = db_backend.build(&update_free); + + let rows_affected = db_conn.execute(x).await?.rows_affected(); + + assert_eq!(rows_affected, 1, "unable to update free tier"); + + Ok(()) + } + + async fn down(&self, _manager: &SchemaManager) -> Result<(), DbErr> { + todo!(); + } +} + +#[derive(Iden)] +enum UserTier { + Table, + Title, + MaxRequestsPerPeriod, +} From 501cb4a1b9ffa58643361df3fe4ad0c35a7f7f84 Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Thu, 19 Jan 2023 21:30:24 -0800 Subject: [PATCH 29/80] cargo upgrade --workspace for some perf improvements --- Cargo.lock | 16 +++++++++++----- web3_proxy/Cargo.toml | 4 ++-- 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 13f8115b..6fc27a98 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -484,6 +484,12 @@ version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ea22880d78093b0cbe17c89f64a7d457941e65759157ec6cb31a31d652b05e5" +[[package]] +name = "base64" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4a4ddaa51a5bc52a6948f74c06d20aaaddb71924eab79b8c97a8c556e942d6a" + [[package]] name = "base64ct" version = "1.0.1" @@ -3697,11 +3703,11 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.11.13" +version = "0.11.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68cc60575865c7831548863cc02356512e3f1dc2f3f82cb837d7fc4cc8f3c97c" +checksum = "21eed90ec8570952d53b772ecf8f206aa1ec9a3d76b2521c56c42973f2d91ee9" dependencies = [ - "base64 0.13.0", + "base64 0.21.0", "bytes", "encoding_rs", "futures-core", @@ -5085,9 +5091,9 @@ dependencies = [ [[package]] name = "toml" -version = "0.5.10" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1333c76748e868a4d9d1017b5ab53171dfd095f70c712fdb4653a406547f598f" +checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234" dependencies = [ "serde", ] diff --git a/web3_proxy/Cargo.toml b/web3_proxy/Cargo.toml index a77e3858..dafac505 100644 --- a/web3_proxy/Cargo.toml +++ b/web3_proxy/Cargo.toml @@ -50,7 +50,7 @@ parking_lot = { version = "0.12.1", features = ["arc_lock"] } proctitle = "0.1.1" # TODO: regex has several "perf" features that we might want to use regex = "1.7.1" -reqwest = { version = "0.11.13", default-features = false, features = ["json", "tokio-rustls"] } +reqwest = { version = "0.11.14", default-features = false, features = ["json", "tokio-rustls"] } handlebars = "4.3.6" rustc-hash = "1.1.0" siwe = "0.5.0" @@ -63,7 +63,7 @@ time = "0.3.17" tokio = { version = "1.24.2", features = ["full"] } # TODO: make sure this uuid version matches sea-orm. PR to put this in their prelude tokio-stream = { version = "0.1.11", features = ["sync"] } -toml = "0.5.10" +toml = "0.5.11" tower = "0.4.13" tower-http = { version = "0.3.5", features = ["cors", "sensitive-headers"] } ulid = { version = "1.0.0", features = ["serde"] } From a820b559872eed28174bb7eeb2401f340ef790bb Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Thu, 19 Jan 2023 21:30:48 -0800 Subject: [PATCH 30/80] clean up todos --- TODO.md | 1 + 1 file changed, 1 insertion(+) diff --git a/TODO.md b/TODO.md index 8574253a..27c35a8f 100644 --- a/TODO.md +++ b/TODO.md @@ -309,6 +309,7 @@ These are not yet ordered. There might be duplicates. We might not actually need - [x] major refactor to only use backup servers when absolutely necessary - [x] remove allowed lag - [x] configurable gas buffer. default to the larger of 25k or 25% on polygon to work around erigon bug +- [x] public is 3900, but free is 360. free should be at least 3900 but probably more - [-] proxy mode for benchmarking all backends - [-] proxy mode for sending to multiple backends - [-] let users choose a % of reverts to log (or maybe x/second). someone like curve logging all reverts will be a BIG database very quickly From b9a12756b99d7706b23eef012bf483f895d2fa02 Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Thu, 19 Jan 2023 21:46:47 -0800 Subject: [PATCH 31/80] collect request_metadata.response_from_backup_rpc --- web3_proxy/src/frontend/authorization.rs | 2 ++ web3_proxy/src/rpcs/connections.rs | 32 ++++++++++++++++++------ 2 files changed, 27 insertions(+), 7 deletions(-) diff --git a/web3_proxy/src/frontend/authorization.rs b/web3_proxy/src/frontend/authorization.rs index c04ba8c2..8c9380da 100644 --- a/web3_proxy/src/frontend/authorization.rs +++ b/web3_proxy/src/frontend/authorization.rs @@ -85,6 +85,7 @@ pub struct RequestMetadata { pub error_response: AtomicBool, pub response_bytes: AtomicU64, pub response_millis: AtomicU64, + pub response_from_backup_rpc: AtomicBool, } impl RequestMetadata { @@ -103,6 +104,7 @@ impl RequestMetadata { error_response: false.into(), response_bytes: 0.into(), response_millis: 0.into(), + response_from_backup_rpc: false.into(), }; Ok(new) diff --git a/web3_proxy/src/rpcs/connections.rs b/web3_proxy/src/rpcs/connections.rs index 9618d92d..4db3c308 100644 --- a/web3_proxy/src/rpcs/connections.rs +++ b/web3_proxy/src/rpcs/connections.rs @@ -723,6 +723,7 @@ impl Web3Connections { // TODO: maximum retries? right now its the total number of servers loop { + // TODO: is self.conns still right now that we split main and backup servers? if skip_rpcs.len() == self.conns.len() { // no servers to try break; @@ -738,14 +739,17 @@ impl Web3Connections { { OpenRequestResult::Handle(active_request_handle) => { // save the rpc in case we get an error and want to retry on another server + // TODO: look at backend_requests instead skip_rpcs.push(active_request_handle.clone_connection()); if let Some(request_metadata) = request_metadata { - // TODO: request_metadata.backend_requests instead of skip_rpcs + let rpc = active_request_handle.clone_connection(); + request_metadata - .backend_requests - .lock() - .push(active_request_handle.clone_connection()); + .response_from_backup_rpc + .store(rpc.backup, Ordering::Release); + + request_metadata.backend_requests.lock().push(rpc); } // TODO: get the log percent from the user data @@ -896,10 +900,24 @@ impl Web3Connections { // TODO: this is not working right. simplify if let Some(request_metadata) = request_metadata { + let mut backup_used = false; + + request_metadata.backend_requests.lock().extend( + active_request_handles.iter().map(|x| { + let rpc = x.clone_connection(); + + if rpc.backup { + // TODO: its possible we serve from a synced connection though. think about this more + backup_used = true; + } + + x.clone_connection() + }), + ); + request_metadata - .backend_requests - .lock() - .extend(active_request_handles.iter().map(|x| x.clone_connection())); + .response_from_backup_rpc + .store(true, Ordering::Release); } return self From f392ff68ffec13b41f54c3ed727eb1a0a0cd7e4e Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Thu, 19 Jan 2023 21:52:43 -0800 Subject: [PATCH 32/80] add influxdb to dev containers --- docker-compose.yml | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/docker-compose.yml b/docker-compose.yml index 88d69521..df6fe4b6 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -23,6 +23,22 @@ services: volumes: - ./data/dev_mysql:/var/lib/mysql + # influxdb for stats + dev-influxdb: + image: influxdb:2.6.1-alpine + environment: + DOCKER_INFLUXDB_INIT_MODE: setup + DOCKER_INFLUXDB_INIT_USERNAME: dev_web3_proxy + DOCKER_INFLUXDB_INIT_PASSWORD: dev_web3_proxy + DOCKER_INFLUXDB_INIT_ORG: dev_org + DOCKER_INFLUXDB_INIT_BUCKET: dev_web3_proxy + DOCKER_INFLUXDB_INIT_ADMIN_TOKEN: dev_web3_proxy_auth_token + ports: + - 127.0.0.1:8086:8086 + volumes: + - ./data/dev_influxdb/data:/var/lib/influxdb2 + - ./data/dev_influxdb/config:/etc/influxdb2 + # volatile redis for storing rate limits dev-vredis: extends: From 412733d505cd1436ca82a71c85bd7e5d863c9574 Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Fri, 20 Jan 2023 15:43:16 -0800 Subject: [PATCH 33/80] improve wait_for_sync --- TODO.md | 2 + web3_proxy/src/bin/wait_for_sync.rs | 159 ++++++++++++++++++++++------ 2 files changed, 130 insertions(+), 31 deletions(-) diff --git a/TODO.md b/TODO.md index 27c35a8f..b28791d7 100644 --- a/TODO.md +++ b/TODO.md @@ -310,6 +310,8 @@ These are not yet ordered. There might be duplicates. We might not actually need - [x] remove allowed lag - [x] configurable gas buffer. default to the larger of 25k or 25% on polygon to work around erigon bug - [x] public is 3900, but free is 360. free should be at least 3900 but probably more +- [x] add --max-wait to wait_for_sync +- [x] add automatic compare urls to wait_for_sync - [-] proxy mode for benchmarking all backends - [-] proxy mode for sending to multiple backends - [-] let users choose a % of reverts to log (or maybe x/second). someone like curve logging all reverts will be a BIG database very quickly diff --git a/web3_proxy/src/bin/wait_for_sync.rs b/web3_proxy/src/bin/wait_for_sync.rs index a214d67b..51cf4ddf 100644 --- a/web3_proxy/src/bin/wait_for_sync.rs +++ b/web3_proxy/src/bin/wait_for_sync.rs @@ -1,27 +1,39 @@ -// TODO: websockets instead of http +// TODO: support websockets use anyhow::Context; use argh::FromArgs; use chrono::Utc; +use ethers::types::U64; use ethers::types::{Block, TxHash}; use log::info; use log::warn; use reqwest::Client; use serde::Deserialize; use serde_json::json; +use std::sync::atomic::{AtomicU32, Ordering}; use tokio::time::sleep; use tokio::time::Duration; #[derive(Debug, FromArgs)] /// Command line interface for admins to interact with web3_proxy pub struct CliConfig { - /// the RPC to check + /// the HTTP RPC to check #[argh(option, default = "\"http://localhost:8545\".to_string()")] pub check_url: String, - /// the RPC to compare to - #[argh(option, default = "\"https://eth.llamarpc.com\".to_string()")] - pub compare_url: String, + /// the HTTP RPC to compare against. defaults to LlamaNodes public RPC + #[argh(option)] + pub compare_url: Option, + + /// how many seconds to wait for sync. + /// Defaults to waiting forever. + /// if the wait is exceeded, will exit with code 2 + #[argh(option)] + pub max_wait: Option, + + /// require a specific chain id (for extra safety) + #[argh(option)] + pub chain_id: Option, } #[tokio::main] @@ -38,26 +50,73 @@ async fn main() -> anyhow::Result<()> { let cli_config: CliConfig = argh::from_env(); - let json_request = json!({ - "id": "1", - "jsonrpc": "2.0", - "method": "eth_getBlockByNumber", - "params": [ - "latest", - false, - ], - }); - let client = reqwest::Client::new(); - // TODO: make sure the chain ids match - // TODO: automatic compare_url based on the chain id + let check_url = cli_config.check_url; + + // make sure the chain ids match + let check_id = get_chain_id(&check_url, &client) + .await + .context("unknown chain id for check_url")?; + + if let Some(chain_id) = cli_config.chain_id { + if chain_id != check_id { + return Err(anyhow::anyhow!( + "chain_id of check_url is wrong! Need {}. Found {}", + chain_id, + check_id, + )); + } + } + + let compare_url: String = match cli_config.compare_url { + Some(x) => x, + None => match check_id { + 1 => "https://eth.llamarpc.com", + 137 => "https://polygon.llamarpc.com", + _ => { + return Err(anyhow::anyhow!( + "--compare-url required for chain {}", + check_id + )) + } + } + .to_string(), + }; + + info!( + "comparing {} to {} (chain {})", + check_url, compare_url, check_id + ); + + let compare_id = get_chain_id(&compare_url, &client) + .await + .context("unknown chain id for compare_url")?; + + if check_id != compare_id { + return Err(anyhow::anyhow!( + "chain_id does not match! Need {}. Found {}", + check_id, + compare_id, + )); + } + + // start ids at 2 because id 1 was checking the chain id + let counter = AtomicU32::new(2); + let start = tokio::time::Instant::now(); loop { - match main_loop(&cli_config, &client, &json_request).await { + match main_loop(&check_url, &compare_url, &client, &counter).await { Ok(()) => break, Err(err) => { warn!("{:?}", err); + + if let Some(max_wait) = cli_config.max_wait { + if max_wait == 0 || start.elapsed().as_secs() > max_wait { + std::process::exit(2); + } + } + sleep(Duration::from_secs(10)).await; } } @@ -66,38 +125,76 @@ async fn main() -> anyhow::Result<()> { Ok(()) } +#[derive(Deserialize)] +struct JsonRpcChainIdResult { + result: U64, +} + +async fn get_chain_id(rpc: &str, client: &reqwest::Client) -> anyhow::Result { + let get_chain_id_request = json!({ + "id": "1", + "jsonrpc": "2.0", + "method": "eth_chainId", + }); + + let check_result = client + .post(rpc) + .json(&get_chain_id_request) + .send() + .await + .context("failed querying chain id")? + .json::() + .await + .context("failed parsing chain id")? + .result + .as_u64(); + + Ok(check_result) +} + #[derive(Deserialize)] struct JsonRpcBlockResult { result: Block, } async fn main_loop( - cli_config: &CliConfig, + check_url: &str, + compare_url: &str, client: &Client, - json_request: &serde_json::Value, + counter: &AtomicU32, ) -> anyhow::Result<()> { - let check_result = client - .post(&cli_config.check_url) - .json(json_request) + // TODO: have a real id here that increments every call? + let get_block_number_request = json!({ + "id": counter.fetch_add(1, Ordering::SeqCst), + "jsonrpc": "2.0", + "method": "eth_getBlockByNumber", + "params": [ + "latest", + false, + ], + }); + + let check_block = client + .post(check_url) + .json(&get_block_number_request) .send() .await .context("querying check block")? .json::() .await - .context("parsing check block")?; + .context("parsing check block")? + .result; - let compare_result = client - .post(&cli_config.compare_url) - .json(json_request) + let compare_block = client + .post(compare_url) + .json(&get_block_number_request) .send() .await .context("querying compare block")? .json::() .await - .context("parsing compare block")?; - - let check_block = check_result.result; - let compare_block = compare_result.result; + .context("parsing compare block")? + .result; let check_number = check_block.number.context("no check block number")?; let compare_number = compare_block.number.context("no compare block number")?; From 6ffdcd42e3fd8f26a920cd2c9645b09efe8bc022 Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Fri, 20 Jan 2023 15:50:39 -0800 Subject: [PATCH 34/80] add todo --- web3_proxy/src/bin/wait_for_sync.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/web3_proxy/src/bin/wait_for_sync.rs b/web3_proxy/src/bin/wait_for_sync.rs index 51cf4ddf..c13d5fe5 100644 --- a/web3_proxy/src/bin/wait_for_sync.rs +++ b/web3_proxy/src/bin/wait_for_sync.rs @@ -137,6 +137,7 @@ async fn get_chain_id(rpc: &str, client: &reqwest::Client) -> anyhow::Result Date: Fri, 20 Jan 2023 15:51:19 -0800 Subject: [PATCH 35/80] cargo upgrade --workspace --- Cargo.lock | 8 ++++---- web3_proxy/Cargo.toml | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6fc27a98..8f1a8ee9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -342,9 +342,9 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "axum" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1304eab461cf02bd70b083ed8273388f9724c549b316ba3d1e213ce0e9e7fb7e" +checksum = "678c5130a507ae3a7c797f9a17393c14849300b8440eac47cdb90a5bdcb3a543" dependencies = [ "async-trait", "axum-core", @@ -389,9 +389,9 @@ dependencies = [ [[package]] name = "axum-core" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f487e40dc9daee24d8a1779df88522f159a54a980f99cfbe43db0be0bd3444a8" +checksum = "1cae3e661676ffbacb30f1a824089a8c9150e71017f7e1e38f2aa32009188d34" dependencies = [ "async-trait", "bytes", diff --git a/web3_proxy/Cargo.toml b/web3_proxy/Cargo.toml index dafac505..32cfe92c 100644 --- a/web3_proxy/Cargo.toml +++ b/web3_proxy/Cargo.toml @@ -22,7 +22,7 @@ thread-fast-rng = { path = "../thread-fast-rng" } anyhow = { version = "1.0.68", features = ["backtrace"] } arc-swap = "1.6.0" argh = "0.1.10" -axum = { version = "0.6.2", features = ["headers", "ws"] } +axum = { version = "0.6.3", features = ["headers", "ws"] } axum-client-ip = "0.3.1" axum-macros = "0.3.1" # TODO: import chrono from sea-orm so we always have the same version From 51a9beaf6fbb453efd49873d5b2ca9ea3f1cb89c Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Sun, 22 Jan 2023 17:19:31 -0800 Subject: [PATCH 36/80] allow no config --- web3_proxy/src/bin/web3_proxy_cli/main.rs | 50 +++++++++++++---------- 1 file changed, 28 insertions(+), 22 deletions(-) diff --git a/web3_proxy/src/bin/web3_proxy_cli/main.rs b/web3_proxy/src/bin/web3_proxy_cli/main.rs index e542fd32..8ed0726b 100644 --- a/web3_proxy/src/bin/web3_proxy_cli/main.rs +++ b/web3_proxy/src/bin/web3_proxy_cli/main.rs @@ -123,35 +123,39 @@ fn main() -> anyhow::Result<()> { } let top_config = if let Some(top_config_path) = cli_config.config.clone() { - let top_config_path = Path::new(&top_config_path) - .canonicalize() - .context(format!("checking for config at {}", top_config_path))?; + if top_config_path.is_empty() { + None + } else { + let top_config_path = Path::new(&top_config_path) + .canonicalize() + .context(format!("checking for config at {}", top_config_path))?; - let top_config: String = fs::read_to_string(top_config_path)?; - let mut top_config: TopConfig = toml::from_str(&top_config)?; + let top_config: String = fs::read_to_string(top_config_path)?; + let mut top_config: TopConfig = toml::from_str(&top_config)?; - // TODO: this doesn't seem to do anything - proctitle::set_title(format!("web3_proxy-{}", top_config.app.chain_id)); + // TODO: this doesn't seem to do anything + proctitle::set_title(format!("web3_proxy-{}", top_config.app.chain_id)); - if cli_config.db_url.is_none() { - cli_config.db_url = top_config.app.db_url.clone(); - } - - if let Some(sentry_url) = top_config.app.sentry_url.clone() { - cli_config.sentry_url = Some(sentry_url); - } - - if top_config.app.chain_id == 137 { - if top_config.app.gas_increase_min.is_none() { - top_config.app.gas_increase_min = Some(U256::from(25_000)); + if cli_config.db_url.is_none() { + cli_config.db_url = top_config.app.db_url.clone(); } - if top_config.app.gas_increase_percent.is_none() { - top_config.app.gas_increase_percent = Some(U256::from(25)); + if let Some(sentry_url) = top_config.app.sentry_url.clone() { + cli_config.sentry_url = Some(sentry_url); } - } - Some(top_config) + if top_config.app.chain_id == 137 { + if top_config.app.gas_increase_min.is_none() { + top_config.app.gas_increase_min = Some(U256::from(25_000)); + } + + if top_config.app.gas_increase_percent.is_none() { + top_config.app.gas_increase_percent = Some(U256::from(25)); + } + } + + Some(top_config) + } } else { None }; @@ -201,6 +205,8 @@ fn main() -> anyhow::Result<()> { // TODO: i think these max at 15 characters format!("web3-{}-{}", chain_id, worker_id) }); + } else { + rt_builder.enable_all(); } // start tokio's async runtime From ec5c28b64b6a81d42a46f382b3b2b653ad37b7a8 Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Sun, 22 Jan 2023 17:48:33 -0800 Subject: [PATCH 37/80] better way to allow no config --- web3_proxy/src/bin/web3_proxy_cli/main.rs | 58 +++++++++++------------ 1 file changed, 28 insertions(+), 30 deletions(-) diff --git a/web3_proxy/src/bin/web3_proxy_cli/main.rs b/web3_proxy/src/bin/web3_proxy_cli/main.rs index 8ed0726b..54027093 100644 --- a/web3_proxy/src/bin/web3_proxy_cli/main.rs +++ b/web3_proxy/src/bin/web3_proxy_cli/main.rs @@ -117,45 +117,43 @@ fn main() -> anyhow::Result<()> { let mut cli_config: Web3ProxyCli = argh::from_env(); - if cli_config.config.is_none() && cli_config.db_url.is_none() { + if cli_config.config.is_none() && cli_config.db_url.is_none() && cli_config.sentry_url.is_none() + { + // TODO: default to example.toml if development.toml doesn't exist info!("defaulting to development config"); cli_config.config = Some("./config/development.toml".to_string()); } let top_config = if let Some(top_config_path) = cli_config.config.clone() { - if top_config_path.is_empty() { - None - } else { - let top_config_path = Path::new(&top_config_path) - .canonicalize() - .context(format!("checking for config at {}", top_config_path))?; + let top_config_path = Path::new(&top_config_path) + .canonicalize() + .context(format!("checking for config at {}", top_config_path))?; - let top_config: String = fs::read_to_string(top_config_path)?; - let mut top_config: TopConfig = toml::from_str(&top_config)?; + let top_config: String = fs::read_to_string(top_config_path)?; + let mut top_config: TopConfig = toml::from_str(&top_config)?; - // TODO: this doesn't seem to do anything - proctitle::set_title(format!("web3_proxy-{}", top_config.app.chain_id)); + // TODO: this doesn't seem to do anything + proctitle::set_title(format!("web3_proxy-{}", top_config.app.chain_id)); - if cli_config.db_url.is_none() { - cli_config.db_url = top_config.app.db_url.clone(); - } - - if let Some(sentry_url) = top_config.app.sentry_url.clone() { - cli_config.sentry_url = Some(sentry_url); - } - - if top_config.app.chain_id == 137 { - if top_config.app.gas_increase_min.is_none() { - top_config.app.gas_increase_min = Some(U256::from(25_000)); - } - - if top_config.app.gas_increase_percent.is_none() { - top_config.app.gas_increase_percent = Some(U256::from(25)); - } - } - - Some(top_config) + if cli_config.db_url.is_none() { + cli_config.db_url = top_config.app.db_url.clone(); } + + if let Some(sentry_url) = top_config.app.sentry_url.clone() { + cli_config.sentry_url = Some(sentry_url); + } + + if top_config.app.chain_id == 137 { + if top_config.app.gas_increase_min.is_none() { + top_config.app.gas_increase_min = Some(U256::from(25_000)); + } + + if top_config.app.gas_increase_percent.is_none() { + top_config.app.gas_increase_percent = Some(U256::from(25)); + } + } + + Some(top_config) } else { None }; From 86e3f2991f2fd8282dd53f43075126d1929d43d8 Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Sun, 22 Jan 2023 22:02:08 -0800 Subject: [PATCH 38/80] use watch instead of arcswap --- Cargo.lock | 7 -- web3_proxy/Cargo.toml | 1 - web3_proxy/src/app/mod.rs | 21 +++--- web3_proxy/src/app/ws.rs | 2 +- web3_proxy/src/rpcs/blockchain.rs | 65 +++++++++++----- web3_proxy/src/rpcs/connections.rs | 90 +++++++++++++++++------ web3_proxy/src/rpcs/synced_connections.rs | 28 ++++--- 7 files changed, 140 insertions(+), 74 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8f1a8ee9..b131edf9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -105,12 +105,6 @@ dependencies = [ "backtrace", ] -[[package]] -name = "arc-swap" -version = "1.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bddcadddf5e9015d310179a59bb28c4d4b9920ad0f11e8e14dbadf654890c9a6" - [[package]] name = "argh" version = "0.1.10" @@ -5574,7 +5568,6 @@ name = "web3_proxy" version = "0.13.0" dependencies = [ "anyhow", - "arc-swap", "argh", "axum", "axum-client-ip", diff --git a/web3_proxy/Cargo.toml b/web3_proxy/Cargo.toml index 32cfe92c..ecd7015d 100644 --- a/web3_proxy/Cargo.toml +++ b/web3_proxy/Cargo.toml @@ -20,7 +20,6 @@ redis-rate-limiter = { path = "../redis-rate-limiter" } thread-fast-rng = { path = "../thread-fast-rng" } anyhow = { version = "1.0.68", features = ["backtrace"] } -arc-swap = "1.6.0" argh = "0.1.10" axum = { version = "0.6.3", features = ["headers", "ws"] } axum-client-ip = "0.3.1" diff --git a/web3_proxy/src/app/mod.rs b/web3_proxy/src/app/mod.rs index 2e05ace5..0c60ce81 100644 --- a/web3_proxy/src/app/mod.rs +++ b/web3_proxy/src/app/mod.rs @@ -188,7 +188,7 @@ pub struct Web3ProxyApp { response_cache: ResponseCache, // don't drop this or the sender will stop working // TODO: broadcast channel instead? - head_block_receiver: watch::Receiver, + watch_consensus_head_receiver: watch::Receiver, pending_tx_sender: broadcast::Sender, pub config: AppConfig, pub db_conn: Option, @@ -533,7 +533,8 @@ impl Web3ProxyApp { }; // TODO: i don't like doing Block::default here! Change this to "None"? - let (head_block_sender, head_block_receiver) = watch::channel(Arc::new(Block::default())); + let (watch_consensus_head_sender, watch_consensus_head_receiver) = + watch::channel(Arc::new(Block::default())); // TODO: will one receiver lagging be okay? how big should this be? let (pending_tx_sender, pending_tx_receiver) = broadcast::channel(256); @@ -570,7 +571,7 @@ impl Web3ProxyApp { http_client.clone(), vredis_pool.clone(), block_map.clone(), - Some(head_block_sender), + Some(watch_consensus_head_sender), top_config.app.min_sum_soft_limit, top_config.app.min_synced_rpcs, Some(pending_tx_sender.clone()), @@ -598,6 +599,8 @@ impl Web3ProxyApp { vredis_pool.clone(), block_map, // subscribing to new heads here won't work well. if they are fast, they might be ahead of balanced_rpcs + // they also often have low rate limits + // however, they are well connected to miners/validators. so maybe using them as a safety check would be good None, 0, 0, @@ -706,7 +709,7 @@ impl Web3ProxyApp { balanced_rpcs, private_rpcs, response_cache, - head_block_receiver, + watch_consensus_head_receiver, pending_tx_sender, pending_transactions, frontend_ip_rate_limiter, @@ -730,7 +733,7 @@ impl Web3ProxyApp { } pub fn head_block_receiver(&self) -> watch::Receiver { - self.head_block_receiver.clone() + self.watch_consensus_head_receiver.clone() } pub async fn prometheus_metrics(&self) -> String { @@ -1362,10 +1365,10 @@ impl Web3ProxyApp { method => { // emit stats - // TODO: if no servers synced, wait for them to be synced? - let head_block = self + // TODO: if no servers synced, wait for them to be synced? probably better to error and let haproxy retry another server + let head_block_num = self .balanced_rpcs - .head_block() + .head_block_num() .context("no servers synced")?; // we do this check before checking caches because it might modify the request params @@ -1375,7 +1378,7 @@ impl Web3ProxyApp { authorization, method, request.params.as_mut(), - head_block.number(), + head_block_num, &self.balanced_rpcs, ) .await? diff --git a/web3_proxy/src/app/ws.rs b/web3_proxy/src/app/ws.rs index e6ac30c0..582ea814 100644 --- a/web3_proxy/src/app/ws.rs +++ b/web3_proxy/src/app/ws.rs @@ -50,7 +50,7 @@ impl Web3ProxyApp { match request_json.params.as_ref() { Some(x) if x == &json!(["newHeads"]) => { let authorization = authorization.clone(); - let head_block_receiver = self.head_block_receiver.clone(); + let head_block_receiver = self.watch_consensus_head_receiver.clone(); let stat_sender = self.stat_sender.clone(); trace!("newHeads subscription {:?}", subscription_id); diff --git a/web3_proxy/src/rpcs/blockchain.rs b/web3_proxy/src/rpcs/blockchain.rs index 8b8cbce7..199fb65b 100644 --- a/web3_proxy/src/rpcs/blockchain.rs +++ b/web3_proxy/src/rpcs/blockchain.rs @@ -165,9 +165,15 @@ impl Web3Connections { let request: JsonRpcRequest = serde_json::from_value(request)?; // TODO: request_metadata? maybe we should put it in the authorization? - // TODO: don't hard code allowed lag + // TODO: think more about this wait_for_sync let response = self - .try_send_best_consensus_head_connection(authorization, request, None, None) + .try_send_best_consensus_head_connection( + authorization, + request, + None, + None, + true, + ) .await?; let block = response.result.context("failed fetching block")?; @@ -199,6 +205,7 @@ impl Web3Connections { } /// Get the heaviest chain's block from cache or backend rpc + /// Caution! If a future block is requested, this might wait forever. Be sure to have a timeout outside of this! pub async fn cannonical_block( &self, authorization: &Arc, @@ -208,23 +215,33 @@ impl Web3Connections { // maybe save them during save_block in a blocks_by_number Cache> // if theres multiple, use petgraph to find the one on the main chain (and remove the others if they have enough confirmations) + let mut consensus_head_receiver = self + .watch_consensus_head_receiver + .as_ref() + .context("need new head subscriptions to fetch cannonical_block")? + .clone(); + // be sure the requested block num exists - let head_block_num = self.head_block_num().context("no servers in sync")?; + let mut head_block_num = consensus_head_receiver.borrow_and_update().number; + + loop { + if let Some(head_block_num) = head_block_num { + if num <= &head_block_num { + break; + } + } + + consensus_head_receiver.changed().await?; + + head_block_num = consensus_head_receiver.borrow_and_update().number; + } + + let head_block_num = + head_block_num.expect("we should only get here if we have a head block"); // TODO: geth does 64, erigon does 90k. sometimes we run a mix let archive_needed = num < &(head_block_num - U64::from(64)); - if num > &head_block_num { - // TODO: i'm seeing this a lot when using ethspam. i dont know why though. i thought we delayed publishing - // TODO: instead of error, maybe just sleep and try again? - // TODO: this should be a 401, not a 500 - return Err(anyhow::anyhow!( - "Head block is #{}, but #{} was requested", - head_block_num, - num - )); - } - // try to get the hash from our cache // deref to not keep the lock open if let Some(block_hash) = self.block_numbers.get(num) { @@ -243,7 +260,7 @@ impl Web3Connections { // TODO: if error, retry? // TODO: request_metadata or authorization? let response = self - .try_send_best_consensus_head_connection(authorization, request, None, Some(num)) + .try_send_best_consensus_head_connection(authorization, request, None, Some(num), true) .await?; let raw_block = response.result.context("no block result")?; @@ -320,6 +337,8 @@ impl Web3Connections { .best_consensus_connections(authorization, self) .await; + // TODO: what should we do if the block number of new_synced_connections is < old_synced_connections? wait? + let includes_backups = new_synced_connections.includes_backups; let consensus_head_block = new_synced_connections.head_block.clone(); let num_consensus_rpcs = new_synced_connections.num_conns(); @@ -327,14 +346,14 @@ impl Web3Connections { let num_active_rpcs = consensus_finder.all.rpc_name_to_hash.len(); let total_rpcs = self.conns.len(); - let old_synced_connections = self - .synced_connections - .swap(Arc::new(new_synced_connections)); + let old_consensus_head_connections = self + .watch_consensus_connections_sender + .send_replace(Arc::new(new_synced_connections)); let includes_backups_str = if includes_backups { "B " } else { "" }; if let Some(consensus_saved_block) = consensus_head_block { - match &old_synced_connections.head_block { + match &old_consensus_head_connections.head_block { None => { debug!( "first {}{}/{}/{}/{} block={}, rpc={}", @@ -843,7 +862,13 @@ impl ConsensusFinder { Some(x) => x.number.expect("blocks here should always have a number"), }; - let min_block_num = highest_block_num.saturating_sub(U64::from(5)); + // TODO: also needs to be not less than our current head + let mut min_block_num = highest_block_num.saturating_sub(U64::from(5)); + + // we also want to be sure we don't ever go backwards! + if let Some(current_consensus_head_num) = web3_connections.head_block_num() { + min_block_num = min_block_num.max(current_consensus_head_num); + } // TODO: pass `min_block_num` to consensus_head_connections? let consensus_head_for_main = self diff --git a/web3_proxy/src/rpcs/connections.rs b/web3_proxy/src/rpcs/connections.rs index 4db3c308..d0c28e85 100644 --- a/web3_proxy/src/rpcs/connections.rs +++ b/web3_proxy/src/rpcs/connections.rs @@ -11,7 +11,6 @@ use crate::frontend::authorization::{Authorization, RequestMetadata}; use crate::frontend::rpc_proxy_ws::ProxyMode; use crate::jsonrpc::{JsonRpcForwardedResponse, JsonRpcRequest}; use crate::rpcs::transactions::TxStatus; -use arc_swap::ArcSwap; use counter::Counter; use derive_more::From; use ethers::prelude::{ProviderError, TxHash, H256, U64}; @@ -38,9 +37,12 @@ use tokio::time::{interval, sleep, sleep_until, Duration, Instant, MissedTickBeh /// A collection of web3 connections. Sends requests either the current best server or all servers. #[derive(From)] pub struct Web3Connections { - pub(crate) conns: HashMap>, /// any requests will be forwarded to one (or more) of these connections - pub(super) synced_connections: ArcSwap, + pub(crate) conns: HashMap>, + /// all providers with the same consensus head block. won't update if there is no `self.watch_consensus_head_sender` + pub(super) watch_consensus_connections_sender: watch::Sender>, + /// this head receiver makes it easy to wait until there is a new block + pub(super) watch_consensus_head_receiver: Option>, pub(super) pending_transactions: Cache, /// TODO: this map is going to grow forever unless we do some sort of pruning. maybe store pruned in redis? @@ -62,7 +64,7 @@ impl Web3Connections { http_client: Option, redis_pool: Option, block_map: BlockHashesCache, - head_block_sender: Option>, + watch_consensus_head_sender: Option>, min_sum_soft_limit: u32, min_head_rpcs: usize, pending_tx_sender: Option>, @@ -138,7 +140,7 @@ impl Web3Connections { let redis_pool = redis_pool.clone(); let http_interval_sender = http_interval_sender.clone(); - let block_sender = if head_block_sender.is_some() { + let block_sender = if watch_consensus_head_sender.is_some() { Some(block_sender.clone()) } else { None @@ -192,8 +194,6 @@ impl Web3Connections { } } - let synced_connections = ConsensusConnections::default(); - // TODO: max_capacity and time_to_idle from config // all block hashes are the same size, so no need for weigher let block_hashes = Cache::builder() @@ -206,9 +206,15 @@ impl Web3Connections { .max_capacity(10_000) .build_with_hasher(hashbrown::hash_map::DefaultHashBuilder::default()); + let (watch_consensus_connections_sender, _) = watch::channel(Default::default()); + + let watch_consensus_head_receiver = + watch_consensus_head_sender.as_ref().map(|x| x.subscribe()); + let connections = Arc::new(Self { conns: connections, - synced_connections: ArcSwap::new(Arc::new(synced_connections)), + watch_consensus_connections_sender, + watch_consensus_head_receiver, pending_transactions, block_hashes, block_numbers, @@ -228,7 +234,7 @@ impl Web3Connections { authorization, pending_tx_id_receiver, block_receiver, - head_block_sender, + watch_consensus_head_sender, pending_tx_sender, ) .await @@ -447,11 +453,12 @@ impl Web3Connections { (Option, u64), Vec>, > = { - let synced_connections = self.synced_connections.load(); + let synced_connections = self.watch_consensus_connections_sender.borrow().clone(); let head_block_num = if let Some(head_block) = synced_connections.head_block.as_ref() { head_block.number() } else { + // TODO: optionally wait for a head block >= min_block_needed return Ok(OpenRequestResult::NotReady); }; @@ -495,6 +502,7 @@ impl Web3Connections { } } cmp::Ordering::Greater => { + // TODO? if the blocks is close and wait_for_sync and allow_backups, wait for change on a watch_consensus_connections_receiver().subscribe() return Ok(OpenRequestResult::NotReady); } } @@ -712,18 +720,27 @@ impl Web3Connections { } /// be sure there is a timeout on this or it might loop forever + /// TODO: think more about wait_for_sync pub async fn try_send_best_consensus_head_connection( &self, authorization: &Arc, request: JsonRpcRequest, request_metadata: Option<&Arc>, min_block_needed: Option<&U64>, + wait_for_sync: bool, ) -> anyhow::Result { let mut skip_rpcs = vec![]; + let mut watch_consensus_connections = if wait_for_sync { + Some(self.watch_consensus_connections_sender.subscribe()) + } else { + None + }; + // TODO: maximum retries? right now its the total number of servers loop { // TODO: is self.conns still right now that we split main and backup servers? + // TODO: if a new block arrives, we probably want to reset the skip list if skip_rpcs.len() == self.conns.len() { // no servers to try break; @@ -833,9 +850,6 @@ impl Web3Connections { rpc, err ); - // TODO: sleep how long? until synced_connections changes or rate limits are available - // sleep(Duration::from_millis(100)).await; - continue; } } @@ -851,16 +865,38 @@ impl Web3Connections { request_metadata.no_servers.fetch_add(1, Ordering::Release); } - sleep_until(retry_at).await; - - continue; + if let Some(watch_consensus_connections) = watch_consensus_connections.as_mut() + { + // TODO: if there are other servers in synced_connections, we should continue now + // wait until retry_at OR synced_connections changes + tokio::select! { + _ = sleep_until(retry_at) => { + skip_rpcs.pop(); + } + _ = watch_consensus_connections.changed() => { + // TODO: would be nice to save this retry_at so we don't keep hitting limits + let _ = watch_consensus_connections.borrow_and_update(); + } + } + continue; + } else { + break; + } } OpenRequestResult::NotReady => { if let Some(request_metadata) = request_metadata { request_metadata.no_servers.fetch_add(1, Ordering::Release); } - break; + if wait_for_sync { + // TODO: race here. there might have been a change while we were waiting on the previous server + self.watch_consensus_connections_sender + .subscribe() + .changed() + .await?; + } else { + break; + } } } } @@ -979,6 +1015,7 @@ impl Web3Connections { request, request_metadata, min_block_needed, + true, ) .await } @@ -1007,8 +1044,11 @@ impl Serialize for Web3Connections { let conns: Vec<&Web3Connection> = self.conns.values().map(|x| x.as_ref()).collect(); state.serialize_field("conns", &conns)?; - let synced_connections = &**self.synced_connections.load(); - state.serialize_field("synced_connections", synced_connections)?; + { + let consensus_connections = self.watch_consensus_connections_sender.borrow().clone(); + // TODO: rename synced_connections to consensus_connections? + state.serialize_field("synced_connections", &consensus_connections)?; + } self.block_hashes.sync(); self.block_numbers.sync(); @@ -1128,9 +1168,13 @@ mod tests { (lagged_rpc.name.clone(), lagged_rpc.clone()), ]); + let (watch_consensus_connections_sender, _) = watch::channel(Default::default()); + + // TODO: make a Web3Connections::new let conns = Web3Connections { conns, - synced_connections: Default::default(), + watch_consensus_head_receiver: None, + watch_consensus_connections_sender, pending_transactions: Cache::builder() .max_capacity(10_000) .build_with_hasher(hashbrown::hash_map::DefaultHashBuilder::default()), @@ -1350,9 +1394,13 @@ mod tests { (archive_rpc.name.clone(), archive_rpc.clone()), ]); + let (watch_consensus_connections_sender, _) = watch::channel(Default::default()); + + // TODO: make a Web3Connections::new let conns = Web3Connections { conns, - synced_connections: Default::default(), + watch_consensus_head_receiver: None, + watch_consensus_connections_sender, pending_transactions: Cache::builder() .max_capacity(10) .build_with_hasher(hashbrown::hash_map::DefaultHashBuilder::default()), diff --git a/web3_proxy/src/rpcs/synced_connections.rs b/web3_proxy/src/rpcs/synced_connections.rs index 824857ce..224381df 100644 --- a/web3_proxy/src/rpcs/synced_connections.rs +++ b/web3_proxy/src/rpcs/synced_connections.rs @@ -1,4 +1,4 @@ -use super::blockchain::SavedBlock; +use super::blockchain::{ArcBlock, SavedBlock}; use super::connection::Web3Connection; use super::connections::Web3Connections; use ethers::prelude::{H256, U64}; @@ -43,31 +43,29 @@ impl fmt::Debug for ConsensusConnections { } impl Web3Connections { - pub fn head_block(&self) -> Option { - self.synced_connections.load().head_block.clone() + pub fn head_block(&self) -> Option { + self.watch_consensus_head_receiver + .as_ref() + .map(|x| x.borrow().clone()) } pub fn head_block_hash(&self) -> Option { - self.synced_connections - .load() - .head_block - .as_ref() - .map(|head_block| head_block.hash()) + self.head_block().and_then(|x| x.hash) } pub fn head_block_num(&self) -> Option { - self.synced_connections - .load() - .head_block - .as_ref() - .map(|head_block| head_block.number()) + self.head_block().and_then(|x| x.number) } pub fn synced(&self) -> bool { - !self.synced_connections.load().conns.is_empty() + !self + .watch_consensus_connections_sender + .borrow() + .conns + .is_empty() } pub fn num_synced_rpcs(&self) -> usize { - self.synced_connections.load().conns.len() + self.watch_consensus_connections_sender.borrow().conns.len() } } From 56fcd68be7d30b1c393aaf2b24c189e19fab8c0d Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Mon, 23 Jan 2023 12:32:59 -0800 Subject: [PATCH 39/80] increase gas estimation on polygon even more --- web3_proxy/src/bin/web3_proxy_cli/main.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/web3_proxy/src/bin/web3_proxy_cli/main.rs b/web3_proxy/src/bin/web3_proxy_cli/main.rs index 54027093..539c04c2 100644 --- a/web3_proxy/src/bin/web3_proxy_cli/main.rs +++ b/web3_proxy/src/bin/web3_proxy_cli/main.rs @@ -144,12 +144,13 @@ fn main() -> anyhow::Result<()> { } if top_config.app.chain_id == 137 { + // TODO: these numbers are arbitrary. i think the maticnetwork/erigon fork has a bug if top_config.app.gas_increase_min.is_none() { - top_config.app.gas_increase_min = Some(U256::from(25_000)); + top_config.app.gas_increase_min = Some(U256::from(40_000)); } if top_config.app.gas_increase_percent.is_none() { - top_config.app.gas_increase_percent = Some(U256::from(25)); + top_config.app.gas_increase_percent = Some(U256::from(40)); } } From cfa840a140b95bfb6e8d40d8cff5e8682fb8f650 Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Mon, 23 Jan 2023 21:08:24 -0800 Subject: [PATCH 40/80] DRY and fix comment --- web3_proxy/src/bin/web3_proxy_cli/daemon.rs | 2 +- web3_proxy/src/bin/web3_proxy_cli/main.rs | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/web3_proxy/src/bin/web3_proxy_cli/daemon.rs b/web3_proxy/src/bin/web3_proxy_cli/daemon.rs index 1beeeb86..9019592a 100644 --- a/web3_proxy/src/bin/web3_proxy_cli/daemon.rs +++ b/web3_proxy/src/bin/web3_proxy_cli/daemon.rs @@ -9,7 +9,7 @@ use web3_proxy::app::{flatten_handle, flatten_handles, Web3ProxyApp}; use web3_proxy::config::TopConfig; use web3_proxy::{frontend, metrics_frontend}; -/// count requests +/// start the main proxy daemon #[derive(FromArgs, PartialEq, Debug, Eq)] #[argh(subcommand, name = "proxyd")] pub struct ProxydSubCommand { diff --git a/web3_proxy/src/bin/web3_proxy_cli/main.rs b/web3_proxy/src/bin/web3_proxy_cli/main.rs index 539c04c2..08f80ff5 100644 --- a/web3_proxy/src/bin/web3_proxy_cli/main.rs +++ b/web3_proxy/src/bin/web3_proxy_cli/main.rs @@ -194,18 +194,18 @@ fn main() -> anyhow::Result<()> { // set up tokio's async runtime let mut rt_builder = runtime::Builder::new_multi_thread(); + rt_builder.enable_all(); + if let Some(top_config) = top_config.as_ref() { let chain_id = top_config.app.chain_id; - rt_builder.enable_all().thread_name_fn(move || { + rt_builder.thread_name_fn(move || { static ATOMIC_ID: AtomicUsize = AtomicUsize::new(0); // TODO: what ordering? i think we want seqcst so that these all happen in order, but that might be stricter than we really need let worker_id = ATOMIC_ID.fetch_add(1, atomic::Ordering::SeqCst); // TODO: i think these max at 15 characters format!("web3-{}-{}", chain_id, worker_id) }); - } else { - rt_builder.enable_all(); } // start tokio's async runtime From 47daab3b67c9bc0a23870ad071bf8b12a531bb64 Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Mon, 23 Jan 2023 21:37:23 -0800 Subject: [PATCH 41/80] drop log level for backup servers --- web3_proxy/src/app/mod.rs | 2 ++ web3_proxy/src/rpcs/connection.rs | 10 +++++++++- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/web3_proxy/src/app/mod.rs b/web3_proxy/src/app/mod.rs index 0c60ce81..accd2d60 100644 --- a/web3_proxy/src/app/mod.rs +++ b/web3_proxy/src/app/mod.rs @@ -1099,6 +1099,7 @@ impl Web3ProxyApp { | "shh_version") => { // TODO: client error stat // TODO: proper error code + // TODO: right now this sends a warn level log. thats too verbose return Err(anyhow::anyhow!("method unsupported: {}", method)); } // TODO: implement these commands @@ -1109,6 +1110,7 @@ impl Web3ProxyApp { | "eth_newPendingTransactionFilter" | "eth_uninstallFilter") => { // TODO: unsupported command stat + // TODO: right now this sends a warn level log. thats too verbose return Err(anyhow::anyhow!("not yet implemented: {}", method)); } // some commands can use local data or caches diff --git a/web3_proxy/src/rpcs/connection.rs b/web3_proxy/src/rpcs/connection.rs index ea3f9c67..bfb8a9a3 100644 --- a/web3_proxy/src/rpcs/connection.rs +++ b/web3_proxy/src/rpcs/connection.rs @@ -337,7 +337,15 @@ impl Web3Connection { ); let retry_in = Duration::from_millis(sleep_ms); - info!( + + let error_level = if self.backup { + log::Level::Debug + } else { + log::Level::Info + }; + + log::log!( + error_level, "Failed reconnect to {}! Retry in {}ms. err={:?}", self, retry_in.as_millis(), From 5df3f9e28b3f47fa0a0b208c1e0faa761f2dd91f Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Mon, 23 Jan 2023 21:39:42 -0800 Subject: [PATCH 42/80] enable lto on release --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 75a15ab9..be3c4406 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -12,6 +12,6 @@ members = [ # we leave debug = true on so that sentry can give us line numbers debug = true # TODO: enable lto (and maybe other things proven with benchmarks) once rapid development is done -#lto = true +lto = true # TODO: we can't do panic = abort because the websockets disconnect by panicking sometimes From c1e81089b155601e9894ce36bd436537d5a1d231 Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Mon, 23 Jan 2023 21:50:59 -0800 Subject: [PATCH 43/80] sort dependencies --- Cargo.toml | 1 + web3_proxy/Cargo.toml | 24 ++++++++++++++---------- 2 files changed, 15 insertions(+), 10 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index be3c4406..7b5d68aa 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -15,3 +15,4 @@ debug = true lto = true # TODO: we can't do panic = abort because the websockets disconnect by panicking sometimes +# TODO: i want to have a panic handler than sends things to pagerduty when we panic, but that will be too verbose too diff --git a/web3_proxy/Cargo.toml b/web3_proxy/Cargo.toml index ecd7015d..df8b73c6 100644 --- a/web3_proxy/Cargo.toml +++ b/web3_proxy/Cargo.toml @@ -19,48 +19,51 @@ migration = { path = "../migration" } redis-rate-limiter = { path = "../redis-rate-limiter" } thread-fast-rng = { path = "../thread-fast-rng" } +# TODO: regex has several "perf" features that we might want to use +# TODO: make sure this uuid version matches sea-orm. PR to put this in their prelude +# TODO: import num_traits from sea-orm so we always have the same version +# TODO: import chrono from sea-orm so we always have the same version +# TODO: make sure this time version matches siwe. PR to put this in their prelude + anyhow = { version = "1.0.68", features = ["backtrace"] } argh = "0.1.10" axum = { version = "0.6.3", features = ["headers", "ws"] } axum-client-ip = "0.3.1" axum-macros = "0.3.1" -# TODO: import chrono from sea-orm so we always have the same version chrono = "0.4.23" counter = "0.5.7" derive_more = "0.99.17" dotenv = "0.15.0" -ethers = { version = "1.0.2", default-features = false, features = ["rustls", "ws"] } env_logger = "0.10.0" +ethers = { version = "1.0.2", default-features = false, features = ["rustls", "ws"] } fdlimit = "0.2.1" flume = "0.10.14" futures = { version = "0.3.25", features = ["thread-pool"] } +glob = "0.3.1" +handlebars = "4.3.6" hashbrown = { version = "0.13.2", features = ["serde"] } hdrhistogram = "7.5.2" http = "0.2.8" ipnet = "2.7.1" +itertools = "0.10.5" log = "0.4.17" metered = { version = "0.9.0", features = ["serialize"] } moka = { version = "0.9.6", default-features = false, features = ["future"] } notify = "5.0.0" num = "0.4.0" -# TODO: import num_traits from sea-orm so we always have the same version num-traits = "0.2.15" parking_lot = { version = "0.12.1", features = ["arc_lock"] } proctitle = "0.1.1" -# TODO: regex has several "perf" features that we might want to use regex = "1.7.1" reqwest = { version = "0.11.14", default-features = false, features = ["json", "tokio-rustls"] } -handlebars = "4.3.6" rustc-hash = "1.1.0" -siwe = "0.5.0" sentry = { version = "0.29.1", default-features = false, features = ["backtrace", "contexts", "panic", "anyhow", "reqwest", "rustls", "log", "sentry-log"] } serde = { version = "1.0.152", features = [] } serde_json = { version = "1.0.91", default-features = false, features = ["alloc", "raw_value"] } serde_prometheus = "0.1.6" -# TODO: make sure this time version matches siwe. PR to put this in their prelude +siwe = "0.5.0" time = "0.3.17" tokio = { version = "1.24.2", features = ["full"] } -# TODO: make sure this uuid version matches sea-orm. PR to put this in their prelude tokio-stream = { version = "0.1.11", features = ["sync"] } toml = "0.5.11" tower = "0.4.13" @@ -68,5 +71,6 @@ tower-http = { version = "0.3.5", features = ["cors", "sensitive-headers"] } ulid = { version = "1.0.0", features = ["serde"] } url = "2.3.1" uuid = "1.2.2" -itertools = "0.10.5" -glob = "0.3.1" + +# # TODO: i'd like to add this, but websockets with ethers often disconnect with a panic +# pagerduty_panic = "0.1.1" From 776bcd149f10d5c6775cca54197dd16a633e2c2f Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Tue, 24 Jan 2023 00:05:31 -0800 Subject: [PATCH 44/80] send panics to pagerduty --- Cargo.lock | 191 ++++++++++++++++++ web3_proxy/Cargo.toml | 2 + web3_proxy/src/bin/web3_proxy_cli/main.rs | 81 +++++++- .../src/bin/web3_proxy_cli/sentryd/mod.rs | 4 - 4 files changed, 272 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b131edf9..47dab722 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -948,6 +948,16 @@ dependencies = [ "unicode-segmentation", ] +[[package]] +name = "core-foundation" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146" +dependencies = [ + "core-foundation-sys", + "libc", +] + [[package]] name = "core-foundation-sys" version = "0.8.3" @@ -1834,6 +1844,21 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +[[package]] +name = "foreign-types" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" +dependencies = [ + "foreign-types-shared", +] + +[[package]] +name = "foreign-types-shared" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" + [[package]] name = "form_urlencoded" version = "1.1.0" @@ -2045,6 +2070,16 @@ dependencies = [ "version_check", ] +[[package]] +name = "gethostname" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a329e22866dd78b35d2c639a4a23d7b950aeae300dfd79f4fb19f74055c2404" +dependencies = [ + "libc", + "windows", +] + [[package]] name = "getrandom" version = "0.2.6" @@ -2344,6 +2379,19 @@ dependencies = [ "tokio-rustls", ] +[[package]] +name = "hyper-tls" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" +dependencies = [ + "bytes", + "hyper", + "native-tls", + "tokio", + "tokio-native-tls", +] + [[package]] name = "iana-time-zone" version = "0.1.46" @@ -2819,6 +2867,24 @@ dependencies = [ "getrandom", ] +[[package]] +name = "native-tls" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07226173c32f2926027b63cce4bcd8076c3552846cbe7925f3aaffeac0a3b92e" +dependencies = [ + "lazy_static", + "libc", + "log", + "openssl", + "openssl-probe", + "openssl-sys", + "schannel", + "security-framework", + "security-framework-sys", + "tempfile", +] + [[package]] name = "new_debug_unreachable" version = "1.0.4" @@ -3031,6 +3097,51 @@ dependencies = [ "syn", ] +[[package]] +name = "openssl" +version = "0.10.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b102428fd03bc5edf97f62620f7298614c45cedf287c271e7ed450bbaf83f2e1" +dependencies = [ + "bitflags", + "cfg-if", + "foreign-types", + "libc", + "once_cell", + "openssl-macros", + "openssl-sys", +] + +[[package]] +name = "openssl-macros" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b501e44f11665960c7e7fcf062c7d96a14ade4aa98116c004b2e37b5be7d736c" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "openssl-probe" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" + +[[package]] +name = "openssl-sys" +version = "0.9.80" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23bbbf7854cd45b83958ebe919f0e8e516793727652e27fda10a8384cfc790b7" +dependencies = [ + "autocfg", + "cc", + "libc", + "pkg-config", + "vcpkg", +] + [[package]] name = "os_info" version = "3.5.1" @@ -3078,6 +3189,18 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" +[[package]] +name = "pagerduty-rs" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd10bab2b6df910bbe6c4987d76aa4221235103d9a9c000cfabcee6a6abc8f7a" +dependencies = [ + "reqwest", + "serde", + "time 0.3.17", + "url", +] + [[package]] name = "parity-scale-codec" version = "3.1.2" @@ -3711,10 +3834,12 @@ dependencies = [ "http-body", "hyper", "hyper-rustls", + "hyper-tls", "ipnet", "js-sys", "log", "mime", + "native-tls", "once_cell", "percent-encoding", "pin-project-lite", @@ -3724,6 +3849,7 @@ dependencies = [ "serde_json", "serde_urlencoded", "tokio", + "tokio-native-tls", "tokio-rustls", "tower-service", "url", @@ -3943,6 +4069,15 @@ dependencies = [ "syn", ] +[[package]] +name = "schannel" +version = "0.1.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "713cfb06c7059f3588fb8044c0fad1d09e3c01d225e25b9220dbfdcf16dbb1b3" +dependencies = [ + "windows-sys 0.42.0", +] + [[package]] name = "scheduled-thread-pool" version = "0.2.6" @@ -4154,6 +4289,29 @@ dependencies = [ "zeroize", ] +[[package]] +name = "security-framework" +version = "2.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "645926f31b250a2dca3c232496c2d898d91036e45ca0e97e0e2390c54e11be36" +dependencies = [ + "bitflags", + "core-foundation", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + +[[package]] +name = "security-framework-sys" +version = "2.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31c9bb296072e961fcbd8853511dd39c2d8be2deb1e17c6860b1d30732b323b4" +dependencies = [ + "core-foundation-sys", + "libc", +] + [[package]] name = "semver" version = "1.0.14" @@ -5018,6 +5176,16 @@ dependencies = [ "syn", ] +[[package]] +name = "tokio-native-tls" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7d995660bd2b7f8c1568414c1126076c13fbb725c40112dc0120b78eb9b717b" +dependencies = [ + "native-tls", + "tokio", +] + [[package]] name = "tokio-rustls" version = "0.23.4" @@ -5427,6 +5595,12 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" +[[package]] +name = "vcpkg" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" + [[package]] name = "version_check" version = "0.9.4" @@ -5583,6 +5757,7 @@ dependencies = [ "fdlimit", "flume", "futures", + "gethostname", "glob", "handlebars", "hashbrown 0.13.2", @@ -5597,6 +5772,7 @@ dependencies = [ "notify", "num", "num-traits", + "pagerduty-rs", "parking_lot 0.12.1", "proctitle", "redis-rate-limiter", @@ -5679,6 +5855,21 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +[[package]] +name = "windows" +version = "0.43.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04662ed0e3e5630dfa9b26e4cb823b817f1a9addda855d973a9458c236556244" +dependencies = [ + "windows_aarch64_gnullvm", + "windows_aarch64_msvc 0.42.0", + "windows_i686_gnu 0.42.0", + "windows_i686_msvc 0.42.0", + "windows_x86_64_gnu 0.42.0", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc 0.42.0", +] + [[package]] name = "windows-sys" version = "0.36.1" diff --git a/web3_proxy/Cargo.toml b/web3_proxy/Cargo.toml index df8b73c6..2f1089b9 100644 --- a/web3_proxy/Cargo.toml +++ b/web3_proxy/Cargo.toml @@ -39,6 +39,7 @@ ethers = { version = "1.0.2", default-features = false, features = ["rustls", "w fdlimit = "0.2.1" flume = "0.10.14" futures = { version = "0.3.25", features = ["thread-pool"] } +gethostname = "0.4.1" glob = "0.3.1" handlebars = "4.3.6" hashbrown = { version = "0.13.2", features = ["serde"] } @@ -52,6 +53,7 @@ moka = { version = "0.9.6", default-features = false, features = ["future"] } notify = "5.0.0" num = "0.4.0" num-traits = "0.2.15" +pagerduty-rs = { version = "0.1.6", features = ["async", "sync"] } parking_lot = { version = "0.12.1", features = ["arc_lock"] } proctitle = "0.1.1" regex = "1.7.1" diff --git a/web3_proxy/src/bin/web3_proxy_cli/main.rs b/web3_proxy/src/bin/web3_proxy_cli/main.rs index 08f80ff5..7a3a3ee9 100644 --- a/web3_proxy/src/bin/web3_proxy_cli/main.rs +++ b/web3_proxy/src/bin/web3_proxy_cli/main.rs @@ -8,6 +8,7 @@ mod create_user; mod daemon; mod drop_migration_lock; mod list_user_tier; +mod pagerduty; mod rpc_accounting; mod sentryd; mod transfer_key; @@ -17,9 +18,13 @@ mod user_import; use anyhow::Context; use argh::FromArgs; use ethers::types::U256; -use log::{info, warn}; +use gethostname::gethostname; +use log::{error, info, warn}; +use pagerduty_rs::eventsv2sync::EventsV2 as PagerdutySyncEventsV2; +use pagerduty_rs::types::{AlertTrigger, AlertTriggerPayload}; +use pagerduty_rs::{eventsv2async::EventsV2 as PagerdutyAsyncEventsV2, types::Event}; use std::{ - fs, + fs, panic, path::Path, sync::atomic::{self, AtomicUsize}, }; @@ -71,6 +76,7 @@ enum SubCommand { CountUsers(count_users::CountUsersSubCommand), CreateUser(create_user::CreateUserSubCommand), DropMigrationLock(drop_migration_lock::DropMigrationLockSubCommand), + Pagerduty(pagerduty::PagerdutySubCommand), Proxyd(daemon::ProxydSubCommand), RpcAccounting(rpc_accounting::RpcAccountingSubCommand), Sentryd(sentryd::SentrydSubCommand), @@ -191,6 +197,70 @@ fn main() -> anyhow::Result<()> { info!("{}", APP_USER_AGENT); + // optionally connect to pagerduty + // TODO: fix this nested result + let (pagerduty_async, pagerduty_sync) = if let Ok(pagerduty_key) = + std::env::var("PAGERDUTY_INTEGRATION_KEY") + { + let pagerduty_async = + PagerdutyAsyncEventsV2::new(pagerduty_key.clone(), Some(APP_USER_AGENT.to_string()))?; + let pagerduty_sync = + PagerdutySyncEventsV2::new(pagerduty_key, Some(APP_USER_AGENT.to_string()))?; + + (Some(pagerduty_async), Some(pagerduty_sync)) + } else { + info!("No PAGERDUTY_INTEGRATION_KEY"); + + (None, None) + }; + + // panic handler that sends to pagerduty + // TODO: there is a `pagerduty_panic` module that looks like it would work with minor tweaks, but ethers-rs panics when a websocket exit and that would fire too many alerts + + if let Some(pagerduty_sync) = pagerduty_sync { + let client = top_config + .as_ref() + .map(|top_config| format!("web3-proxy chain #{}", top_config.app.chain_id)) + .unwrap_or_else(|| format!("web3-proxy w/o chain")); + + let client_url = top_config + .as_ref() + .and_then(|x| x.app.redirect_public_url.clone()); + + panic::set_hook(Box::new(move |x| { + let hostname = gethostname().into_string().unwrap_or("unknown".to_string()); + let panic_msg = format!("{} {:?}", x, x); + + error!("sending panic to pagerduty: {}", panic_msg); + + let payload = AlertTriggerPayload { + severity: pagerduty_rs::types::Severity::Error, + summary: panic_msg.clone(), + source: hostname, + timestamp: None, + component: None, + group: Some("web3-proxy".to_string()), + class: Some("panic".to_string()), + custom_details: None::<()>, + }; + + let event = Event::AlertTrigger(AlertTrigger { + payload, + dedup_key: None, + images: None, + links: None, + client: Some(client.clone()), + client_url: client_url.clone(), + }); + + if let Err(err) = pagerduty_sync.event(event) { + error!("Failed sending panic to pagerduty: {}", err); + } + })); + } else { + info!("No pagerduty key. Using default panic handler"); + } + // set up tokio's async runtime let mut rt_builder = runtime::Builder::new_multi_thread(); @@ -286,6 +356,13 @@ fn main() -> anyhow::Result<()> { x.main(&db_conn).await } + SubCommand::Pagerduty(x) => { + if cli_config.sentry_url.is_none() { + warn!("sentry_url is not set! Logs will only show in this console"); + } + + x.main(pagerduty_async, top_config).await + } SubCommand::Sentryd(x) => { if cli_config.sentry_url.is_none() { warn!("sentry_url is not set! Logs will only show in this console"); diff --git a/web3_proxy/src/bin/web3_proxy_cli/sentryd/mod.rs b/web3_proxy/src/bin/web3_proxy_cli/sentryd/mod.rs index 51d0df7a..8199bfc3 100644 --- a/web3_proxy/src/bin/web3_proxy_cli/sentryd/mod.rs +++ b/web3_proxy/src/bin/web3_proxy_cli/sentryd/mod.rs @@ -13,10 +13,6 @@ use tokio::time::{interval, MissedTickBehavior}; /// Loop healthchecks and send pager duty alerts if any fail #[argh(subcommand, name = "sentryd")] pub struct SentrydSubCommand { - #[argh(positional)] - /// a descriptive name for this node (probably the hostname) - location: String, - #[argh(positional)] /// the main (HTTP only) web3-proxy being checked. web3_proxy: String, From 7ec4c69fd763749a7c510bb5ee50121473933ab4 Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Tue, 24 Jan 2023 00:05:41 -0800 Subject: [PATCH 45/80] actually add the new file --- .../src/bin/web3_proxy_cli/pagerduty.rs | 89 +++++++++++++++++++ 1 file changed, 89 insertions(+) create mode 100644 web3_proxy/src/bin/web3_proxy_cli/pagerduty.rs diff --git a/web3_proxy/src/bin/web3_proxy_cli/pagerduty.rs b/web3_proxy/src/bin/web3_proxy_cli/pagerduty.rs new file mode 100644 index 00000000..2097068d --- /dev/null +++ b/web3_proxy/src/bin/web3_proxy_cli/pagerduty.rs @@ -0,0 +1,89 @@ +use argh::FromArgs; +use gethostname::gethostname; +use log::{error, info}; +use pagerduty_rs::{ + eventsv2async::EventsV2 as PagerdutyAsyncEventsV2, + types::{AlertTrigger, AlertTriggerPayload, Event}, +}; +use web3_proxy::config::TopConfig; + +#[derive(FromArgs, PartialEq, Debug, Eq)] +/// Quickly create a pagerduty alert +#[argh(subcommand, name = "pagerduty")] +pub struct PagerdutySubCommand { + #[argh(positional)] + /// short description of the alert + summary: String, + + #[argh(option)] + /// the class/type of the event + class: Option, + + #[argh(option)] + /// deduplicate alerts based on this key. + /// If there are no open incidents with this key, a new incident will be created. + /// If there is an open incident with a matching key, the new event will be appended to that incident's Alerts log as an additional Trigger log entry. + dedup_key: Option, + + #[argh(option, default = "\"web3-proxy\".to_string()")] + /// a cluster or grouping of sources. + /// For example, sources "ethereum-proxy" and "polygon-proxy" might both be part of "web3-proxy". + group: String, +} + +impl PagerdutySubCommand { + pub async fn main( + &self, + pagerduty_async: Option, + top_config: Option, + ) -> anyhow::Result<()> { + let client = top_config + .as_ref() + .map(|top_config| format!("web3-proxy chain #{}", top_config.app.chain_id)) + .unwrap_or_else(|| format!("web3-proxy w/o chain")); + + let client_url = top_config + .as_ref() + .and_then(|x| x.app.redirect_public_url.clone()); + + let hostname = gethostname().into_string().unwrap_or("unknown".to_string()); + + let payload = AlertTriggerPayload { + severity: pagerduty_rs::types::Severity::Error, + summary: self.summary.clone(), + source: hostname, + timestamp: None, + component: None, + group: Some(self.group.clone()), + class: self.class.clone(), + custom_details: None::<()>, + }; + + let event = AlertTrigger { + payload, + dedup_key: None, + images: None, + links: None, + client: Some(client), + client_url: client_url, + }; + + if let Some(pagerduty_async) = pagerduty_async { + info!( + "sending to pagerduty: {}", + serde_json::to_string_pretty(&event)? + ); + + if let Err(err) = pagerduty_async.event(Event::AlertTrigger(event)).await { + error!("Failed sending to pagerduty: {}", err); + } + } else { + info!( + "would send to pagerduty if PAGERDUTY_INTEGRATION_KEY were set: {}", + serde_json::to_string_pretty(&event)? + ); + } + + Ok(()) + } +} From a242244a353060a6ad5c47bdd9d80b0fdcab15a8 Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Tue, 24 Jan 2023 01:58:31 -0800 Subject: [PATCH 46/80] broadcast transactions to more servers --- TODO.md | 6 +++++ web3_proxy/src/app/mod.rs | 4 +-- web3_proxy/src/rpcs/connections.rs | 43 +++++++++++++++++++++--------- 3 files changed, 39 insertions(+), 14 deletions(-) diff --git a/TODO.md b/TODO.md index b28791d7..10393a3e 100644 --- a/TODO.md +++ b/TODO.md @@ -312,6 +312,12 @@ These are not yet ordered. There might be duplicates. We might not actually need - [x] public is 3900, but free is 360. free should be at least 3900 but probably more - [x] add --max-wait to wait_for_sync - [x] add automatic compare urls to wait_for_sync +- [x] send panics to pagerduty +- [x] enable lto on release builds +- [x] less logs for backup servers +- [x] use channels instead of arcswap + - this will let us easily wait for a new head or a new synced connection +- [x] broadcast transactions to more servers - [-] proxy mode for benchmarking all backends - [-] proxy mode for sending to multiple backends - [-] let users choose a % of reverts to log (or maybe x/second). someone like curve logging all reverts will be a BIG database very quickly diff --git a/web3_proxy/src/app/mod.rs b/web3_proxy/src/app/mod.rs index accd2d60..2234382f 100644 --- a/web3_proxy/src/app/mod.rs +++ b/web3_proxy/src/app/mod.rs @@ -1194,12 +1194,12 @@ impl Web3ProxyApp { // TODO: how should we handle private_mode here? let default_num = match proxy_mode { // TODO: how many balanced rpcs should we send to? configurable? percentage of total? - ProxyMode::Best => Some(2), + ProxyMode::Best => Some(4), ProxyMode::Fastest(0) => None, // TODO: how many balanced rpcs should we send to? configurable? percentage of total? // TODO: what if we do 2 per tier? we want to blast the third party rpcs // TODO: maybe having the third party rpcs in their own Web3Connections would be good for this - ProxyMode::Fastest(x) => Some(x * 2), + ProxyMode::Fastest(x) => Some(x * 4), ProxyMode::Versus => None, }; diff --git a/web3_proxy/src/rpcs/connections.rs b/web3_proxy/src/rpcs/connections.rs index d0c28e85..edd94dc3 100644 --- a/web3_proxy/src/rpcs/connections.rs +++ b/web3_proxy/src/rpcs/connections.rs @@ -17,7 +17,7 @@ use ethers::prelude::{ProviderError, TxHash, H256, U64}; use futures::future::{join_all, try_join_all}; use futures::stream::FuturesUnordered; use futures::StreamExt; -use hashbrown::HashMap; +use hashbrown::{HashMap, HashSet}; use log::{debug, error, info, trace, warn, Level}; use migration::sea_orm::DatabaseConnection; use moka::future::{Cache, ConcurrentCacheExt}; @@ -635,26 +635,27 @@ impl Web3Connections { } /// get all rpc servers that are not rate limited - /// returns servers even if they aren't fully in sync. This is useful for broadcasting signed transactions + /// this prefers synced servers, but it will return servers even if they aren't fully in sync. + /// This is useful for broadcasting signed transactions. // TODO: better type on this that can return an anyhow::Result - pub async fn all_synced_connections( + pub async fn all_connections( &self, authorization: &Arc, block_needed: Option<&U64>, max_count: Option, ) -> Result, Option> { if let Ok(without_backups) = self - ._all_synced_connections(false, authorization, block_needed, max_count) + ._all_connections(false, authorization, block_needed, max_count) .await { return Ok(without_backups); } - self._all_synced_connections(true, authorization, block_needed, max_count) + self._all_connections(true, authorization, block_needed, max_count) .await } - async fn _all_synced_connections( + async fn _all_connections( &self, allow_backups: bool, authorization: &Arc, @@ -665,17 +666,35 @@ impl Web3Connections { // TODO: with capacity? let mut selected_rpcs = vec![]; - let mut max_count = if max_count.is_none() { - self.conns.len() + let mut max_count = if let Some(max_count) = max_count { + max_count } else { - self.conns.len().min(max_count.unwrap()) + self.conns.len() }; - for connection in self.conns.values() { + let mut tried = HashSet::new(); + + let conns_to_try = itertools::chain( + // TODO: sort by tier + self.watch_consensus_connections_sender + .borrow() + .conns + .clone(), + // TODO: sort by tier + self.conns.values().cloned(), + ); + + for connection in conns_to_try { if max_count == 0 { break; } + if tried.contains(&connection.name) { + continue; + } + + tried.insert(connection.name.clone()); + if !allow_backups && connection.backup { continue; } @@ -927,7 +946,7 @@ impl Web3Connections { ) -> anyhow::Result { loop { match self - .all_synced_connections(authorization, block_needed, max_count) + .all_connections(authorization, block_needed, max_count) .await { Ok(active_request_handles) => { @@ -1224,7 +1243,7 @@ mod tests { // all_backend_connections gives everything regardless of sync status assert_eq!( conns - .all_synced_connections(&authorization, None, None) + .all_connections(&authorization, None, None) .await .unwrap() .len(), From 54d190acfc0ce82f70e4e209abc9ba2203dbc70b Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Tue, 24 Jan 2023 02:45:48 -0800 Subject: [PATCH 47/80] dryer pagerduty code --- .../src/bin/web3_proxy_cli/pagerduty.rs | 71 +++++++++---------- .../src/bin/web3_proxy_cli/sentryd/mod.rs | 40 +++++++++-- web3_proxy/src/lib.rs | 1 + 3 files changed, 71 insertions(+), 41 deletions(-) diff --git a/web3_proxy/src/bin/web3_proxy_cli/pagerduty.rs b/web3_proxy/src/bin/web3_proxy_cli/pagerduty.rs index 2097068d..34cd5586 100644 --- a/web3_proxy/src/bin/web3_proxy_cli/pagerduty.rs +++ b/web3_proxy/src/bin/web3_proxy_cli/pagerduty.rs @@ -1,11 +1,10 @@ use argh::FromArgs; -use gethostname::gethostname; use log::{error, info}; -use pagerduty_rs::{ - eventsv2async::EventsV2 as PagerdutyAsyncEventsV2, - types::{AlertTrigger, AlertTriggerPayload, Event}, +use pagerduty_rs::{eventsv2async::EventsV2 as PagerdutyAsyncEventsV2, types::Event}; +use web3_proxy::{ + config::TopConfig, + pagerduty::{pagerduty_event_for_config, trigger_pagerduty_alert}, }; -use web3_proxy::config::TopConfig; #[derive(FromArgs, PartialEq, Debug, Eq)] /// Quickly create a pagerduty alert @@ -19,6 +18,10 @@ pub struct PagerdutySubCommand { /// the class/type of the event class: Option, + #[argh(option)] + /// the component of the event + component: Option, + #[argh(option)] /// deduplicate alerts based on this key. /// If there are no open incidents with this key, a new incident will be created. @@ -33,40 +36,36 @@ pub struct PagerdutySubCommand { impl PagerdutySubCommand { pub async fn main( - &self, + self, pagerduty_async: Option, top_config: Option, ) -> anyhow::Result<()> { - let client = top_config - .as_ref() - .map(|top_config| format!("web3-proxy chain #{}", top_config.app.chain_id)) - .unwrap_or_else(|| format!("web3-proxy w/o chain")); - - let client_url = top_config - .as_ref() - .and_then(|x| x.app.redirect_public_url.clone()); - - let hostname = gethostname().into_string().unwrap_or("unknown".to_string()); - - let payload = AlertTriggerPayload { - severity: pagerduty_rs::types::Severity::Error, - summary: self.summary.clone(), - source: hostname, - timestamp: None, - component: None, - group: Some(self.group.clone()), - class: self.class.clone(), - custom_details: None::<()>, - }; - - let event = AlertTrigger { - payload, - dedup_key: None, - images: None, - links: None, - client: Some(client), - client_url: client_url, - }; + let event = top_config + .map(|top_config| { + pagerduty_event_for_config( + top_config, + self.class.clone(), + self.component.clone(), + Some(self.group.clone()), + self.summary.clone(), + None, + None::<()>, + ) + }) + .unwrap_or_else(|| { + trigger_pagerduty_alert( + "web3-proxy".to_string(), + None, + self.class, + None, + self.component, + Some(self.group), + None, + self.summary, + None, + None::<()>, + ) + }); if let Some(pagerduty_async) = pagerduty_async { info!( diff --git a/web3_proxy/src/bin/web3_proxy_cli/sentryd/mod.rs b/web3_proxy/src/bin/web3_proxy_cli/sentryd/mod.rs index 8199bfc3..f9e46cd7 100644 --- a/web3_proxy/src/bin/web3_proxy_cli/sentryd/mod.rs +++ b/web3_proxy/src/bin/web3_proxy_cli/sentryd/mod.rs @@ -7,6 +7,7 @@ use futures::{ Future, }; use std::time::Duration; +use tokio::sync::mpsc; use tokio::time::{interval, MissedTickBehavior}; #[derive(FromArgs, PartialEq, Debug, Eq)] @@ -46,13 +47,33 @@ impl SentrydSubCommand { let mut handles = FuturesUnordered::new(); + // channels and a task for sending errors to logs/pagerduty + let (error_sender, mut error_receiver) = mpsc::channel::<(log::Level, anyhow::Error)>(10); + + { + let error_handler_f = async move { + while let Some((error_level, err)) = error_receiver.recv().await { + log::log!(error_level, "check failed: {:?}", err); + + if matches!(error_level, log::Level::Error) { + todo!("send to pager duty if pager duty exists"); + } + } + + Ok(()) + }; + + handles.push(tokio::spawn(error_handler_f)); + } + // spawn a bunch of health check loops that do their checks on an interval // check the main rpc's /health endpoint { let url = format!("{}/health", self.web3_proxy); + let error_sender = error_sender.clone(); - let loop_f = a_loop(seconds, log::Level::Error, move || { + let loop_f = a_loop(seconds, log::Level::Error, error_sender, move || { simple::main(url.clone()) }); @@ -61,8 +82,11 @@ impl SentrydSubCommand { // check any other web3-proxy /health endpoints for other_web3_proxy in self.other_proxy.iter() { let url = format!("{}/health", other_web3_proxy); + let error_sender = error_sender.clone(); - let loop_f = a_loop(seconds, log::Level::Warn, move || simple::main(url.clone())); + let loop_f = a_loop(seconds, log::Level::Warn, error_sender, move || { + simple::main(url.clone()) + }); handles.push(tokio::spawn(loop_f)); } @@ -72,12 +96,13 @@ impl SentrydSubCommand { let max_age = self.max_age; let max_lag = self.max_lag; let rpc = self.web3_proxy.clone(); + let error_sender = error_sender.clone(); let mut others = self.other_proxy.clone(); others.extend(self.other_rpc.clone()); - let loop_f = a_loop(seconds, log::Level::Error, move || { + let loop_f = a_loop(seconds, log::Level::Error, error_sender, move || { compare::main(rpc.clone(), others.clone(), max_age, max_lag) }); @@ -94,7 +119,12 @@ impl SentrydSubCommand { } } -async fn a_loop(seconds: u64, error_level: log::Level, f: impl Fn() -> T) -> anyhow::Result<()> +async fn a_loop( + seconds: u64, + error_level: log::Level, + error_sender: mpsc::Sender<(log::Level, anyhow::Error)>, + f: impl Fn() -> T, +) -> anyhow::Result<()> where T: Future> + Send + 'static, { @@ -107,7 +137,7 @@ where interval.tick().await; if let Err(err) = f().await { - log::log!(error_level, "check failed: {:?}", err); + error_sender.send((error_level, err)).await?; }; } } diff --git a/web3_proxy/src/lib.rs b/web3_proxy/src/lib.rs index 0ae97055..571e245f 100644 --- a/web3_proxy/src/lib.rs +++ b/web3_proxy/src/lib.rs @@ -6,6 +6,7 @@ pub mod frontend; pub mod jsonrpc; pub mod metered; pub mod metrics_frontend; +pub mod pagerduty; pub mod rpcs; pub mod user_queries; pub mod user_token; From 36d64489d8200e539da6ac74aa7f13f5b873f278 Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Tue, 24 Jan 2023 03:12:23 -0800 Subject: [PATCH 48/80] sentryd to pagerduty --- TODO.md | 1 + web3_proxy/src/bin/web3_proxy_cli/main.rs | 2 +- .../src/bin/web3_proxy_cli/pagerduty.rs | 15 +-- .../src/bin/web3_proxy_cli/sentryd/mod.rs | 92 +++++++++++++++---- 4 files changed, 86 insertions(+), 24 deletions(-) diff --git a/TODO.md b/TODO.md index 10393a3e..15ba2239 100644 --- a/TODO.md +++ b/TODO.md @@ -318,6 +318,7 @@ These are not yet ordered. There might be duplicates. We might not actually need - [x] use channels instead of arcswap - this will let us easily wait for a new head or a new synced connection - [x] broadcast transactions to more servers +- [x] send sentryd errors to pagerduty - [-] proxy mode for benchmarking all backends - [-] proxy mode for sending to multiple backends - [-] let users choose a % of reverts to log (or maybe x/second). someone like curve logging all reverts will be a BIG database very quickly diff --git a/web3_proxy/src/bin/web3_proxy_cli/main.rs b/web3_proxy/src/bin/web3_proxy_cli/main.rs index 7a3a3ee9..a0aebbaa 100644 --- a/web3_proxy/src/bin/web3_proxy_cli/main.rs +++ b/web3_proxy/src/bin/web3_proxy_cli/main.rs @@ -368,7 +368,7 @@ fn main() -> anyhow::Result<()> { warn!("sentry_url is not set! Logs will only show in this console"); } - x.main().await + x.main(pagerduty_async).await } SubCommand::RpcAccounting(x) => { let db_url = cli_config diff --git a/web3_proxy/src/bin/web3_proxy_cli/pagerduty.rs b/web3_proxy/src/bin/web3_proxy_cli/pagerduty.rs index 34cd5586..4fadf11a 100644 --- a/web3_proxy/src/bin/web3_proxy_cli/pagerduty.rs +++ b/web3_proxy/src/bin/web3_proxy_cli/pagerduty.rs @@ -3,7 +3,7 @@ use log::{error, info}; use pagerduty_rs::{eventsv2async::EventsV2 as PagerdutyAsyncEventsV2, types::Event}; use web3_proxy::{ config::TopConfig, - pagerduty::{pagerduty_event_for_config, trigger_pagerduty_alert}, + pagerduty::{pagerduty_alert, pagerduty_event_for_config}, }; #[derive(FromArgs, PartialEq, Debug, Eq)] @@ -40,30 +40,33 @@ impl PagerdutySubCommand { pagerduty_async: Option, top_config: Option, ) -> anyhow::Result<()> { + // TODO: allow customizing severity let event = top_config .map(|top_config| { pagerduty_event_for_config( - top_config, self.class.clone(), self.component.clone(), + None::<()>, Some(self.group.clone()), + pagerduty_rs::types::Severity::Error, self.summary.clone(), None, - None::<()>, + top_config, ) }) .unwrap_or_else(|| { - trigger_pagerduty_alert( - "web3-proxy".to_string(), + pagerduty_alert( None, self.class, + "web3-proxy".to_string(), None, self.component, + None::<()>, Some(self.group), + pagerduty_rs::types::Severity::Error, None, self.summary, None, - None::<()>, ) }); diff --git a/web3_proxy/src/bin/web3_proxy_cli/sentryd/mod.rs b/web3_proxy/src/bin/web3_proxy_cli/sentryd/mod.rs index f9e46cd7..d7f03da9 100644 --- a/web3_proxy/src/bin/web3_proxy_cli/sentryd/mod.rs +++ b/web3_proxy/src/bin/web3_proxy_cli/sentryd/mod.rs @@ -6,9 +6,12 @@ use futures::{ stream::{FuturesUnordered, StreamExt}, Future, }; +use log::{error, info}; +use pagerduty_rs::{eventsv2async::EventsV2 as PagerdutyAsyncEventsV2, types::Event}; use std::time::Duration; use tokio::sync::mpsc; use tokio::time::{interval, MissedTickBehavior}; +use web3_proxy::pagerduty::pagerduty_alert; #[derive(FromArgs, PartialEq, Debug, Eq)] /// Loop healthchecks and send pager duty alerts if any fail @@ -39,8 +42,15 @@ pub struct SentrydSubCommand { seconds: Option, } +#[derive(Debug)] +struct Error { + class: String, + level: log::Level, + anyhow: anyhow::Error, +} + impl SentrydSubCommand { - pub async fn main(self) -> anyhow::Result<()> { + pub async fn main(self, pagerduty_async: Option) -> anyhow::Result<()> { // sentry logging should already be configured let seconds = self.seconds.unwrap_or(60); @@ -48,15 +58,44 @@ impl SentrydSubCommand { let mut handles = FuturesUnordered::new(); // channels and a task for sending errors to logs/pagerduty - let (error_sender, mut error_receiver) = mpsc::channel::<(log::Level, anyhow::Error)>(10); + let (error_sender, mut error_receiver) = mpsc::channel::(10); { let error_handler_f = async move { - while let Some((error_level, err)) = error_receiver.recv().await { - log::log!(error_level, "check failed: {:?}", err); + if pagerduty_async.is_none() { + info!("set PAGERDUTY_INTEGRATION_KEY to send create alerts for errors"); + } - if matches!(error_level, log::Level::Error) { - todo!("send to pager duty if pager duty exists"); + while let Some(err) = error_receiver.recv().await { + log::log!(err.level, "check failed: {:?}", err); + + if matches!(err.level, log::Level::Error) { + let alert = pagerduty_alert( + None, + Some(err.class), + "web3-proxy-sentry".to_string(), + None, + None, + None::<()>, + Some("web3-proxy-sentry".to_string()), + pagerduty_rs::types::Severity::Error, + None, + format!("{}", err.anyhow), + None, + ); + + if let Some(pagerduty_async) = pagerduty_async.as_ref() { + info!( + "sending to pagerduty: {}", + serde_json::to_string_pretty(&alert)? + ); + + if let Err(err) = + pagerduty_async.event(Event::AlertTrigger(alert)).await + { + error!("Failed sending to pagerduty: {}", err); + } + } } } @@ -73,9 +112,13 @@ impl SentrydSubCommand { let url = format!("{}/health", self.web3_proxy); let error_sender = error_sender.clone(); - let loop_f = a_loop(seconds, log::Level::Error, error_sender, move || { - simple::main(url.clone()) - }); + let loop_f = a_loop( + "main /health", + seconds, + log::Level::Error, + error_sender, + move || simple::main(url.clone()), + ); handles.push(tokio::spawn(loop_f)); } @@ -84,9 +127,13 @@ impl SentrydSubCommand { let url = format!("{}/health", other_web3_proxy); let error_sender = error_sender.clone(); - let loop_f = a_loop(seconds, log::Level::Warn, error_sender, move || { - simple::main(url.clone()) - }); + let loop_f = a_loop( + "other /health", + seconds, + log::Level::Warn, + error_sender, + move || simple::main(url.clone()), + ); handles.push(tokio::spawn(loop_f)); } @@ -102,9 +149,13 @@ impl SentrydSubCommand { others.extend(self.other_rpc.clone()); - let loop_f = a_loop(seconds, log::Level::Error, error_sender, move || { - compare::main(rpc.clone(), others.clone(), max_age, max_lag) - }); + let loop_f = a_loop( + "head block comparison", + seconds, + log::Level::Error, + error_sender, + move || compare::main(rpc.clone(), others.clone(), max_age, max_lag), + ); handles.push(tokio::spawn(loop_f)); } @@ -120,9 +171,10 @@ impl SentrydSubCommand { } async fn a_loop( + class: &str, seconds: u64, error_level: log::Level, - error_sender: mpsc::Sender<(log::Level, anyhow::Error)>, + error_sender: mpsc::Sender, f: impl Fn() -> T, ) -> anyhow::Result<()> where @@ -137,7 +189,13 @@ where interval.tick().await; if let Err(err) = f().await { - error_sender.send((error_level, err)).await?; + let err = Error { + class: class.to_string(), + level: error_level, + anyhow: err, + }; + + error_sender.send(err).await?; }; } } From 7b046451be8ddf107a01cdbd63dbb43b189ecbdb Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Tue, 24 Jan 2023 03:12:36 -0800 Subject: [PATCH 49/80] actually add the file --- web3_proxy/src/pagerduty.rs | 75 +++++++++++++++++++++++++++++++++++++ 1 file changed, 75 insertions(+) create mode 100644 web3_proxy/src/pagerduty.rs diff --git a/web3_proxy/src/pagerduty.rs b/web3_proxy/src/pagerduty.rs new file mode 100644 index 00000000..de777c4d --- /dev/null +++ b/web3_proxy/src/pagerduty.rs @@ -0,0 +1,75 @@ +use crate::config::TopConfig; +use gethostname::gethostname; +use pagerduty_rs::types::{AlertTrigger, AlertTriggerPayload}; +use serde::Serialize; +use time::OffsetDateTime; + +pub fn pagerduty_event_for_config( + class: Option, + component: Option, + custom_details: Option, + group: Option, + severity: pagerduty_rs::types::Severity, + summary: String, + timestamp: Option, + top_config: TopConfig, +) -> AlertTrigger { + let chain_id = top_config.app.chain_id; + + let client_url = top_config.app.redirect_public_url.clone(); + + pagerduty_alert( + Some(chain_id), + class, + "web3-proxy".to_string(), + client_url, + component, + custom_details, + group, + severity, + None, + summary, + timestamp, + ) +} + +pub fn pagerduty_alert( + chain_id: Option, + class: Option, + client: String, + client_url: Option, + component: Option, + custom_details: Option, + group: Option, + severity: pagerduty_rs::types::Severity, + source: Option, + summary: String, + timestamp: Option, +) -> AlertTrigger { + let client = chain_id + .map(|x| format!("{} chain #{}", x, client)) + .unwrap_or_else(|| format!("{} w/o chain", client)); + + let source = + source.unwrap_or_else(|| gethostname().into_string().unwrap_or("unknown".to_string())); + + let payload = AlertTriggerPayload { + severity, + summary, + source, + timestamp, + component, + group, + class, + custom_details, + }; + + AlertTrigger { + payload, + dedup_key: None, + images: None, + links: None, + client: Some(client), + client_url: client_url, + } +} From 17c446b68c479b399f814a567d7b29781372c319 Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Tue, 24 Jan 2023 03:25:12 -0800 Subject: [PATCH 50/80] why did cargo upgrade miss all these? --- Cargo.lock | 1322 ++++++++++++++++++++++++----------------- web3_proxy/Cargo.toml | 2 +- 2 files changed, 762 insertions(+), 562 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 47dab722..d1dd7adb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -14,9 +14,9 @@ dependencies = [ [[package]] name = "addr2line" -version = "0.17.0" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9ecd88a8c8378ca913a680cd98f0f13ac67383d35993f86c90a70e3f137816b" +checksum = "a76fd60b23679b7d19bd066031410fb7e458ccc5e958eb5c325888ce4baedc97" dependencies = [ "gimli", ] @@ -41,9 +41,9 @@ dependencies = [ [[package]] name = "aes" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfe0133578c0986e1fe3dfcd4af1cc5b2dd6c3dbf534d69916ce16a2701d40ba" +checksum = "433cfd6710c9986c576a25ca913c39d66a6474107b406f34f91d4a8923395241" dependencies = [ "cfg-if", "cipher 0.4.3", @@ -74,9 +74,9 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "0.7.18" +version = "0.7.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e37cfd5e7657ada45f742d6e99ca5788580b5c529dc78faf11ece6dc702656f" +checksum = "cc936419f96fa211c1b9166887b38e5e40b19958e5b895be7c1f93adec7071ac" dependencies = [ "memchr", ] @@ -89,9 +89,9 @@ checksum = "250f629c0161ad8107cf89319e990051fae62832fd343083bea452d93e2205fd" [[package]] name = "android_system_properties" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7ed72e1635e121ca3e79420540282af22da58be50de153d36f81ddc6b83aa9e" +checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" dependencies = [ "libc", ] @@ -172,30 +172,32 @@ dependencies = [ [[package]] name = "async-io" -version = "1.7.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5e18f61464ae81cde0a23e713ae8fd299580c54d697a35820cfd0625b8b0e07" +checksum = "8c374dda1ed3e7d8f0d9ba58715f924862c63eae6849c92d3a18e7fbde9e2794" dependencies = [ + "async-lock", + "autocfg", "concurrent-queue", "futures-lite", "libc", "log", - "once_cell", "parking", "polling", "slab", "socket2", "waker-fn", - "winapi", + "windows-sys", ] [[package]] name = "async-lock" -version = "2.5.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e97a171d191782fba31bb902b14ad94e24a68145032b7eedf871ab0bc0d077b6" +checksum = "c8101efe8695a6c17e02911402145357e718ac92d3ff88ae8419e84b1707b685" dependencies = [ "event-listener", + "futures-lite", ] [[package]] @@ -221,9 +223,9 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.57" +version = "0.1.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76464446b8bc32758d7e88ee1a804d9914cd9b1cb264c029899680b0be29826f" +checksum = "eff18d764974428cf3a9328e23fc5c986f5fbed46e6cd4cdf42544df5d297ec1" dependencies = [ "proc-macro2", "quote", @@ -350,7 +352,7 @@ dependencies = [ "http", "http-body", "hyper", - "itoa 1.0.2", + "itoa 1.0.5", "matchit", "memchr", "mime", @@ -412,9 +414,9 @@ dependencies = [ [[package]] name = "backtrace" -version = "0.3.65" +version = "0.3.67" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11a17d453482a265fd5f8479f2a3f405566e6ca627837aaddb85af8b1ab8ef61" +checksum = "233d376d6d185f2a3093e58f283f60f880315b6c60075b01f36b3b85154564ca" dependencies = [ "addr2line", "cc", @@ -468,9 +470,9 @@ checksum = "3441f0f7b02788e948e47f457ca01f1d7e6d92c693bc132c22b087d3141c03ff" [[package]] name = "base64" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" +checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64" @@ -486,9 +488,9 @@ checksum = "a4a4ddaa51a5bc52a6948f74c06d20aaaddb71924eab79b8c97a8c556e942d6a" [[package]] name = "base64ct" -version = "1.0.1" +version = "1.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a32fd6af2b5827bce66c29053ba0e7c42b9dcab01835835058558c10851a46b" +checksum = "b645a089122eccb6111b4f81cbc1a49f5900ac4666bb93ac027feaecf15607bf" [[package]] name = "bech32" @@ -507,9 +509,9 @@ dependencies = [ [[package]] name = "bit-set" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e11e16035ea35e4e5997b393eacbf6f63983188f7a2ad25bfb13465f5ad59de" +checksum = "0700ddab506f33b20a03b13996eccd309a48e5ff77d0d95926aa0210fb4e95f1" dependencies = [ "bit-vec", ] @@ -538,9 +540,9 @@ dependencies = [ [[package]] name = "bitvec" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1489fcb93a5bb47da0462ca93ad252ad6af2145cce58d10d46a83931ba9f016b" +checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" dependencies = [ "funty", "radium 0.7.0", @@ -550,11 +552,11 @@ dependencies = [ [[package]] name = "blake2" -version = "0.10.4" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9cf849ee05b2ee5fba5e36f97ff8ec2533916700fc0758d40d92136a42f3388" +checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe" dependencies = [ - "digest 0.10.5", + "digest 0.10.6", ] [[package]] @@ -580,9 +582,9 @@ dependencies = [ [[package]] name = "block-buffer" -version = "0.10.2" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bf7fe51849ea569fd452f37822f606a5cabb684dc918707a0193fd4664ff324" +checksum = "69cce20737498f97b993470a6e536b8523f0af7892a4f928cceb1ac5e52ebe7e" dependencies = [ "generic-array 0.14.6", ] @@ -596,6 +598,51 @@ dependencies = [ "byte-tools", ] +[[package]] +name = "borsh" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15bf3650200d8bffa99015595e10f1fbd17de07abbc25bb067da79e769939bfa" +dependencies = [ + "borsh-derive", + "hashbrown 0.11.2", +] + +[[package]] +name = "borsh-derive" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6441c552f230375d18e3cc377677914d2ca2b0d36e52129fe15450a2dce46775" +dependencies = [ + "borsh-derive-internal", + "borsh-schema-derive-internal", + "proc-macro-crate 0.1.5", + "proc-macro2", + "syn", +] + +[[package]] +name = "borsh-derive-internal" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5449c28a7b352f2d1e592a8a28bf139bc71afb0764a14f3c02500935d8c44065" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "borsh-schema-derive-internal" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cdbd5696d8bfa21d53d9fe39a714a18538bad11492a42d066dbbc395fb1951c0" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "bs58" version = "0.4.0" @@ -604,15 +651,15 @@ checksum = "771fe0050b883fcc3ea2359b1a96bcfbc090b7116eae7c3c512c7a083fdf23d3" [[package]] name = "bumpalo" -version = "3.9.1" +version = "3.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a45a46ab1f2412e53d3a0ade76ffad2025804294569aae387231a0cd6e0899" +checksum = "0d261e256854913907f67ed06efbc3338dfe6179796deefc1ff763fc1aee5535" [[package]] name = "byte-slice-cast" -version = "1.2.1" +version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87c5fdd0166095e1d463fc6cc01aa8ce547ad77a4e84d42eb6762b084e28067e" +checksum = "c3ac9f8b63eca6fd385229b3675f6cc0dc5c8a5c8a54a59d4f52ffd670d87b0c" [[package]] name = "byte-tools" @@ -620,6 +667,27 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" +[[package]] +name = "bytecheck" +version = "0.6.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d11cac2c12b5adc6570dad2ee1b87eff4955dac476fe12d81e5fdd352e52406f" +dependencies = [ + "bytecheck_derive", + "ptr_meta", +] + +[[package]] +name = "bytecheck_derive" +version = "0.6.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13e576ebe98e605500b3c8041bb888e966653577172df6dd97398714eb30b9bf" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "bytecount" version = "0.6.3" @@ -634,18 +702,18 @@ checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec8a7b6a70fde80372154c65702f00a0f56f3e1c36abbc6c440484be248856db" +checksum = "dfb24e866b15a1af2a1b663f10c6b6b8f397a84aadb828f12e5b289ec23a3a3c" dependencies = [ "serde", ] [[package]] name = "bzip2" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6afcd980b5f3a45017c57e57a2fcccbb351cc43a356ce117ef760ef8052b89b0" +checksum = "bdb116a6ef3f6c3698828873ad02c3014b3c85cadb88496095628e3ef1e347f8" dependencies = [ "bzip2-sys", "libc", @@ -662,17 +730,11 @@ dependencies = [ "pkg-config", ] -[[package]] -name = "cache-padded" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1db59621ec70f09c5e9b597b220c7a2b43611f4710dc03ceb8748637775692c" - [[package]] name = "camino" -version = "1.0.9" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "869119e97797867fd90f5e22af7d0bd274bd4635ebb9eb68c04f3f513ae6c412" +checksum = "c77df041dc383319cc661b428b6961a005db4d6808d5e12536931b1ca9556055" dependencies = [ "serde", ] @@ -701,22 +763,23 @@ dependencies = [ [[package]] name = "cargo_metadata" -version = "0.15.0" +version = "0.15.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3abb7553d5b9b8421c6de7cb02606ff15e0c6eea7d8eadd75ef013fd636bec36" +checksum = "982a0cf6a99c350d7246035613882e376d58cebe571785abc5da4f648d53ac0a" dependencies = [ "camino", "cargo-platform", "semver", "serde", "serde_json", + "thiserror", ] [[package]] name = "cc" -version = "1.0.73" +version = "1.0.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fff2a6927b3bb87f9595d67196a70493f627687a71d87a0d692242c33f58c11" +checksum = "a20104e2335ce8a659d6dd92a51a767a0c062599c73b343fd152cb401e828c3d" dependencies = [ "jobserver", ] @@ -738,17 +801,11 @@ dependencies = [ "num-integer", "num-traits", "serde", - "time 0.1.43", + "time 0.1.45", "wasm-bindgen", "winapi", ] -[[package]] -name = "chunked_transfer" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fff857943da45f546682664a79488be82e69e43c1a7a2307679ab9afb3a66d2e" - [[package]] name = "cipher" version = "0.3.0" @@ -770,9 +827,9 @@ dependencies = [ [[package]] name = "clap" -version = "3.2.15" +version = "3.2.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44bbe24bbd31a185bc2c4f7c2abe80bea13a20d57ee4e55be70ac512bdc76417" +checksum = "71655c45cb9845d3270c9d6df84ebe72b4dad3c2ba3f7023ad47c144e4e473a5" dependencies = [ "atty", "bitflags", @@ -787,9 +844,9 @@ dependencies = [ [[package]] name = "clap_derive" -version = "3.2.15" +version = "3.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ba52acd3b0a5c33aeada5cdaa3267cdc7c594a98731d4268cdc1532f4264cb4" +checksum = "ea0c8bce528c4be4da13ea6fead8965e95b6073585a2f05204bd8f4119f82a65" dependencies = [ "heck 0.4.0", "proc-macro-error", @@ -807,6 +864,16 @@ dependencies = [ "os_str_bytes", ] +[[package]] +name = "codespan-reporting" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3538270d33cc669650c4b093848450d380def10c331d38c768e34cac80576e6e" +dependencies = [ + "termcolor", + "unicode-width", +] + [[package]] name = "coins-bip32" version = "0.7.0" @@ -816,7 +883,7 @@ dependencies = [ "bincode", "bs58", "coins-core", - "digest 0.10.5", + "digest 0.10.6", "getrandom", "hmac", "k256", @@ -837,7 +904,7 @@ dependencies = [ "getrandom", "hex", "hmac", - "pbkdf2 0.11.0", + "pbkdf2", "rand", "sha2 0.10.6", "thiserror", @@ -853,7 +920,7 @@ dependencies = [ "base64 0.12.3", "bech32", "blake2", - "digest 0.10.5", + "digest 0.10.6", "generic-array 0.14.6", "hex", "ripemd", @@ -866,9 +933,9 @@ dependencies = [ [[package]] name = "combine" -version = "4.6.4" +version = "4.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a604e93b79d1808327a6fca85a6f2d69de66461e7620f5a4cbf5fb4d1d7c948" +checksum = "35ed6e9d84f0b51a7f52daf1c7d71dd136fd7a3f41a8462b8cdb8c78d920fad4" dependencies = [ "bytes", "futures-core", @@ -880,11 +947,11 @@ dependencies = [ [[package]] name = "concurrent-queue" -version = "1.2.2" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30ed07550be01594c6026cff2a1d7fe9c8f683caa798e12b68694ac9e88286a3" +checksum = "c278839b831783b70278b14df4d45e1beb1aad306c07bb796637de9a0e323e8e" dependencies = [ - "cache-padded", + "crossbeam-utils", ] [[package]] @@ -904,15 +971,14 @@ dependencies = [ [[package]] name = "console" -version = "0.15.0" +version = "0.15.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a28b32d32ca44b70c3e4acd7db1babf555fa026e385fb95f18028f88848b3c31" +checksum = "c3d79fbe8970a77e3e34151cc13d3b3e248aa0faaecb9f6091fa07ebefe5ad60" dependencies = [ "encode_unicode", + "lazy_static", "libc", - "once_cell", - "terminal_size", - "winapi", + "windows-sys", ] [[package]] @@ -923,9 +989,9 @@ checksum = "e4c78c047431fee22c1a7bb92e00ad095a02a983affe4d8a72e2a2c62c1b94f3" [[package]] name = "const-oid" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "722e23542a15cea1f65d4a1419c4cfd7a26706c70871a13a04238ca3f40f1661" +checksum = "cec318a675afcb6a1ea1d4340e2d377e56e47c266f28043ceccbf4412ddfdd3b" [[package]] name = "constant_time_eq" @@ -975,9 +1041,9 @@ dependencies = [ [[package]] name = "cpufeatures" -version = "0.2.2" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59a6001667ab124aebae2a495118e11d30984c3a653e99d86d58971708cf5e4b" +checksum = "28d997bd5e24a5928dd43e46dc529867e207907fe0b239c3477d924f7f2ca320" dependencies = [ "libc", ] @@ -993,9 +1059,9 @@ dependencies = [ [[package]] name = "crc-catalog" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d0165d2900ae6778e36e80bbc4da3b5eefccee9ba939761f9c2882a5d9af3ff" +checksum = "9cace84e55f07e7301bae1c519df89cdad8cc3cd868413d3fdbdeca9ff3db484" [[package]] name = "crc32fast" @@ -1018,9 +1084,9 @@ dependencies = [ [[package]] name = "crossbeam-deque" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6455c0ca19f0d2fbf751b908d5c55c1f5cbc65e03c4225427254b46890bdde1e" +checksum = "715e8152b692bba2d374b53d4875445368fdf21a94751410af607a5ac677d1fc" dependencies = [ "cfg-if", "crossbeam-epoch", @@ -1029,23 +1095,22 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.10" +version = "0.9.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "045ebe27666471bb549370b4b0b3e51b07f56325befa4284db65fc89c02511b1" +checksum = "01a9af1f4c2ef74bb8aa1f7e19706bc72d03598c8a570bb5de72243c7a9d9d5a" dependencies = [ "autocfg", "cfg-if", "crossbeam-utils", "memoffset", - "once_cell", "scopeguard", ] [[package]] name = "crossbeam-queue" -version = "0.3.6" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cd42583b04998a5363558e5f9291ee5a5ff6b49944332103f251e7479a82aa7" +checksum = "d1cfb3ea8a53f37c40dea2c7bedcbd88bdfae54f5e2175d6ecaff1c988353add" dependencies = [ "cfg-if", "crossbeam-utils", @@ -1053,12 +1118,11 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.8" +version = "0.8.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bf124c720b7686e3c2663cf54062ab0f68a88af2fb6a030e87e30bf721fcb38" +checksum = "4fb766fa798726286dbbb842f174001dab8abc7b627a1dd86e0b7222a95d929f" dependencies = [ "cfg-if", - "lazy_static", ] [[package]] @@ -1079,9 +1143,9 @@ dependencies = [ [[package]] name = "crypto-bigint" -version = "0.4.8" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f2b443d17d49dad5ef0ede301c3179cc923b8822f3393b4d2c28c269dd4a122" +checksum = "ef2b4b23cddf68b89b8f8069890e8c270d54e2d5fe1b143820234805e4cb17ef" dependencies = [ "generic-array 0.14.6", "rand_core", @@ -1091,9 +1155,9 @@ dependencies = [ [[package]] name = "crypto-common" -version = "0.1.3" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57952ca27b5e3606ff4dd79b0020231aaf9d6aa76dc05fd30137538c50bd3ce8" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ "generic-array 0.14.6", "typenum", @@ -1108,6 +1172,50 @@ dependencies = [ "cipher 0.4.3", ] +[[package]] +name = "cxx" +version = "1.0.87" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b61a7545f753a88bcbe0a70de1fcc0221e10bfc752f576754fa91e663db1622e" +dependencies = [ + "cc", + "cxxbridge-flags", + "cxxbridge-macro", + "link-cplusplus", +] + +[[package]] +name = "cxx-build" +version = "1.0.87" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f464457d494b5ed6905c63b0c4704842aba319084a0a3561cdc1359536b53200" +dependencies = [ + "cc", + "codespan-reporting", + "once_cell", + "proc-macro2", + "quote", + "scratch", + "syn", +] + +[[package]] +name = "cxxbridge-flags" +version = "1.0.87" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43c7119ce3a3701ed81aca8410b9acf6fc399d2629d057b87e2efa4e63a3aaea" + +[[package]] +name = "cxxbridge-macro" +version = "1.0.87" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "65e07508b90551e610910fa648a1878991d367064997a596135b86df30daf07e" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "deadpool" version = "0.9.5" @@ -1177,11 +1285,11 @@ dependencies = [ [[package]] name = "der" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13dd2ae565c0a381dde7fade45fce95984c568bdcb4700a4fdbe3175e0380b2f" +checksum = "f1a467a65c5e759bce6e65eaf91cc29f466cdc57cb65777bd646872a8a1fd4de" dependencies = [ - "const-oid 0.9.0", + "const-oid 0.9.1", "zeroize", ] @@ -1223,9 +1331,9 @@ dependencies = [ [[package]] name = "diff" -version = "0.1.12" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e25ea47919b1560c4e3b7fe0aaab9becf5b84a10325ddf7db0f0ba5e1026499" +checksum = "56254986775e3233ffa9c4d7d3faaf6d36a2c09d30b20687e9f88bc8bafc16c8" [[package]] name = "digest" @@ -1247,11 +1355,11 @@ dependencies = [ [[package]] name = "digest" -version = "0.10.5" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adfbc57365a37acbd2ebf2b64d7e69bb766e2fea813521ed536f5d0520dcf86c" +checksum = "8168378f4e5023e7218c89c891c0fd8ecdb5e5e4f18cb78f38cf245dd021e76f" dependencies = [ - "block-buffer 0.10.2", + "block-buffer 0.10.3", "crypto-common", "subtle", ] @@ -1303,11 +1411,11 @@ checksum = "0bd4b30a6560bbd9b4620f4de34c3f14f60848e58a9b7216801afcb4c7b31c3c" [[package]] name = "ecdsa" -version = "0.14.3" +version = "0.14.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3bd46e0c364655e5baf2f5e99b603e7a09905da9966d7928d7470af393b28670" +checksum = "413301934810f597c1d19ca71c8710e99a3f1ba28a0d2ebc01551a2daeea3c5c" dependencies = [ - "der 0.6.0", + "der 0.6.1", "elliptic-curve", "rfc6979", "signature", @@ -1315,9 +1423,9 @@ dependencies = [ [[package]] name = "either" -version = "1.6.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" +checksum = "90e5c1c8368803113bf0c9584fc495a58b86dc8a29edbf8fe877d21d9507e797" [[package]] name = "elliptic-curve" @@ -1326,9 +1434,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e7bb888ab5300a19b8e5bceef25ac745ad065f3c9f7efc6de1b91958110891d3" dependencies = [ "base16ct", - "crypto-bigint 0.4.8", - "der 0.6.0", - "digest 0.10.5", + "crypto-bigint 0.4.9", + "der 0.6.1", + "digest 0.10.6", "ff", "generic-array 0.14.6", "group", @@ -1423,12 +1531,12 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fda3bf123be441da5260717e0661c25a2fd9cb2b2c1d20bf2e05580047158ab" dependencies = [ - "aes 0.8.1", + "aes 0.8.2", "ctr", - "digest 0.10.5", + "digest 0.10.6", "hex", "hmac", - "pbkdf2 0.11.0", + "pbkdf2", "rand", "scrypt", "serde", @@ -1441,9 +1549,9 @@ dependencies = [ [[package]] name = "ethabi" -version = "17.2.0" +version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4966fba78396ff92db3b817ee71143eccd98acf0f876b8d600e585a670c5d1b" +checksum = "7413c5f74cc903ea37386a8965a936cbeb334bd270862fdece542c1b2dcbc898" dependencies = [ "ethereum-types", "hex", @@ -1458,9 +1566,9 @@ dependencies = [ [[package]] name = "ethbloom" -version = "0.12.1" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11da94e443c60508eb62cf256243a64da87304c2802ac2528847f79d750007ef" +checksum = "c22d4b5885b6aa2fe5e8b9329fb8d232bf739e434e6b87347c63bdd00c120f60" dependencies = [ "crunchy", "fixed-hash", @@ -1473,9 +1581,9 @@ dependencies = [ [[package]] name = "ethereum-types" -version = "0.13.1" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2827b94c556145446fcce834ca86b7abf0c39a805883fe20e72c5bfdb5a0dc6" +checksum = "02d215cbf040552efcbe99a38372fe80ab9d00268e20012b79fcd0f073edd8ee" dependencies = [ "ethbloom", "fixed-hash", @@ -1505,9 +1613,9 @@ dependencies = [ [[package]] name = "ethers-addressbook" -version = "1.0.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4b8c9da375d178d59a50f9a5d31ede4475a0f60cd5184c3db00f172b25f7e11" +checksum = "fe4be54dd2260945d784e06ccdeb5ad573e8f1541838cee13a1ab885485eaa0b" dependencies = [ "ethers-core", "once_cell", @@ -1517,9 +1625,9 @@ dependencies = [ [[package]] name = "ethers-contract" -version = "1.0.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "002a0d58a7d921b496f5f19b5b9508d01d25fbe25078286b1fcb6f4e7562acf7" +checksum = "e9c3c3e119a89f0a9a1e539e7faecea815f74ddcf7c90d0b00d1f524db2fdc9c" dependencies = [ "ethers-contract-abigen", "ethers-contract-derive", @@ -1536,9 +1644,9 @@ dependencies = [ [[package]] name = "ethers-contract-abigen" -version = "1.0.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "486f389525de61c1c4807fddc804a151ca3c5a5b6f2dc759689424777b7ba617" +checksum = "3d4e5ad46aede34901f71afdb7bb555710ed9613d88d644245c657dc371aa228" dependencies = [ "Inflector", "cfg-if", @@ -1554,6 +1662,7 @@ dependencies = [ "serde", "serde_json", "syn", + "toml 0.5.11", "url", "walkdir", ] @@ -1575,13 +1684,13 @@ dependencies = [ [[package]] name = "ethers-core" -version = "1.0.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06338c311c6a0a7ed04877d0fb0f0d627ed390aaa3429b4e041b8d17348a506d" +checksum = "ade3e9c97727343984e1ceada4fdab11142d2ee3472d2c67027d56b1251d4f15" dependencies = [ "arrayvec", "bytes", - "cargo_metadata 0.15.0", + "cargo_metadata 0.15.2", "chrono", "convert_case 0.6.0", "elliptic-curve", @@ -1595,7 +1704,6 @@ dependencies = [ "rand", "rlp", "rlp-derive", - "rust_decimal", "serde", "serde_json", "strum", @@ -1607,9 +1715,9 @@ dependencies = [ [[package]] name = "ethers-etherscan" -version = "1.0.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c3acd2c48d240ae13a4ed3ac88dc15b31bc1ba9513a072e080d4a32fda1637b" +checksum = "a9713f525348e5dde025d09b0a4217429f8074e8ff22c886263cc191e87d8216" dependencies = [ "ethers-core", "getrandom", @@ -1624,9 +1732,9 @@ dependencies = [ [[package]] name = "ethers-middleware" -version = "1.0.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f51bc2555522673e8a890b79615e04dd9ef40f0ab0a73e745024fdda15710d69" +checksum = "e71df7391b0a9a51208ffb5c7f2d068900e99d6b3128d3a4849d138f194778b7" dependencies = [ "async-trait", "auto_impl 0.5.0", @@ -1650,13 +1758,13 @@ dependencies = [ [[package]] name = "ethers-providers" -version = "1.0.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6cc65f79e2168aac5ca4a659bb6639c78164a6a5b18c954cc7699b6ce5ac6275" +checksum = "a1a9e0597aa6b2fdc810ff58bc95e4eeaa2c219b3e615ed025106ecb027407d8" dependencies = [ "async-trait", "auto_impl 1.0.1", - "base64 0.13.0", + "base64 0.13.1", "ethers-core", "futures-channel", "futures-core", @@ -1687,9 +1795,9 @@ dependencies = [ [[package]] name = "ethers-signers" -version = "1.0.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1f97da069cd77dd91a0a7f0c979f063a4bf9d2533b277ff5ccb19b7ac348376" +checksum = "3f41ced186867f64773db2e55ffdd92959e094072a1d09a5e5e831d443204f98" dependencies = [ "async-trait", "coins-bip32", @@ -1705,9 +1813,9 @@ dependencies = [ [[package]] name = "ethers-solc" -version = "1.0.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ed856e3e0d07a0ffbc79157e3dd0ed10b45b6736eff6a878d40a1e57f224988" +checksum = "cbe9c0a6d296c57191e5f8a613a3b5e816812c28f4a28d6178a17c21db903d77" dependencies = [ "cfg-if", "dunce", @@ -1737,9 +1845,9 @@ dependencies = [ [[package]] name = "event-listener" -version = "2.5.2" +version = "2.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77f3309417938f28bf8228fcff79a4a37103981e3e186d2ccd19c74b38f4eb71" +checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" [[package]] name = "eyre" @@ -1759,9 +1867,9 @@ checksum = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed" [[package]] name = "fastrand" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3fcf0cee53519c866c09b5de1f6c56ff9d647101f81c1964fa632e148896cdf" +checksum = "a7a407cfaa3385c4ae6b23e84623d48c2798d06e3e6a1878f7f59f17b3f86499" dependencies = [ "instant", ] @@ -1777,9 +1885,9 @@ dependencies = [ [[package]] name = "ff" -version = "0.12.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df689201f395c6b90dfe87127685f8dbfc083a5e779e613575d8bd7314300c3e" +checksum = "d013fc25338cc558c5c2cfbad646908fb23591e2404481826742b651c9af7160" dependencies = [ "rand_core", "subtle", @@ -1787,21 +1895,21 @@ dependencies = [ [[package]] name = "filetime" -version = "0.2.17" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e94a7bbaa59354bc20dd75b67f23e2797b4490e9d6928203fb105c79e448c86c" +checksum = "4e884668cd0c7480504233e951174ddc3b382f7c2666e3b7310b5c4e7b0c37f9" dependencies = [ "cfg-if", "libc", "redox_syscall", - "windows-sys 0.36.1", + "windows-sys", ] [[package]] name = "fixed-hash" -version = "0.7.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfcf0ed7fe52a17a03854ec54a9f76d6d84508d1c0e66bc1793301c73fc8493c" +checksum = "835c052cb0c08c1acf6ffd71c022172e18723949c8282f2b9f27efbc51e64534" dependencies = [ "byteorder", "rand", @@ -1811,15 +1919,15 @@ dependencies = [ [[package]] name = "fixedbitset" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "279fb028e20b3c4c320317955b77c5e0c9701f05a1d309905d6fc702cdc5053e" +checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" [[package]] name = "flate2" -version = "1.0.24" +version = "1.0.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f82b0f4c27ad9f8bfd1f3208d882da2b09c301bc1c828fd3a00d0216d2fbbff6" +checksum = "a8a2db397cb1c8772f31494cb8917e48cd1e64f0fa7efac59fbd741a0a8ce841" dependencies = [ "crc32fast", "miniz_oxide", @@ -1835,7 +1943,7 @@ dependencies = [ "futures-sink", "nanorand", "pin-project", - "spin 0.9.3", + "spin 0.9.4", ] [[package]] @@ -1954,9 +2062,9 @@ dependencies = [ [[package]] name = "futures-intrusive" -version = "0.4.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62007592ac46aa7c2b6416f7deb9a8a8f63a01e0f1d6e1787d5630170db2b63e" +checksum = "a604f7a68fbf8103337523b1fadc8ade7361ee3f112f7c680ad179651616aed5" dependencies = [ "futures-core", "lock_api", @@ -1986,13 +2094,12 @@ dependencies = [ [[package]] name = "futures-locks" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3eb42d4fb72227be5778429f9ef5240a38a358925a49f05b5cf702ce7c7e558a" +checksum = "45ec6fe3675af967e67c5536c0b9d44e34e6c52f86bedc4ea49c5317b8e94d06" dependencies = [ "futures-channel", "futures-task", - "tokio", ] [[package]] @@ -2082,22 +2189,22 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.6" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9be70c98951c83b8d2f8f60d7065fa6d5146873094452a1008da8c2f1e4205ad" +checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31" dependencies = [ "cfg-if", "js-sys", "libc", - "wasi 0.10.2+wasi-snapshot-preview1", + "wasi 0.11.0+wasi-snapshot-preview1", "wasm-bindgen", ] [[package]] name = "gimli" -version = "0.26.1" +version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78cc372d058dcf6d5ecd98510e7fbc9e5aec4d21de70f65fea8fecebcd881bd4" +checksum = "221996f774192f0f718773def8201c4ae31f02616a54ccfc2d358bb0e5cefdec" [[package]] name = "glob" @@ -2107,9 +2214,9 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] name = "group" -version = "0.12.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7391856def869c1c81063a03457c676fbcd419709c3dfb33d8d319de484b154d" +checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7" dependencies = [ "ff", "rand_core", @@ -2118,9 +2225,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.13" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37a82c6d637fc9515a4694bbf1cb2457b79d81ce52b3108bdeea58b07dd34a57" +checksum = "5f9f29bc9dda355256b2916cf526ab02ce0aeaaaf2bad60d65ef3f12f11dd0f4" dependencies = [ "bytes", "fnv", @@ -2149,6 +2256,15 @@ dependencies = [ "thiserror", ] +[[package]] +name = "hashbrown" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" +dependencies = [ + "ahash 0.7.6", +] + [[package]] name = "hashbrown" version = "0.12.3" @@ -2179,9 +2295,9 @@ dependencies = [ [[package]] name = "hashlink" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d452c155cb93fecdfb02a73dd57b5d8e442c2063bd7aac72f1bc5e4263a43086" +checksum = "69fe1fcf8b4278d860ad0548329f892a3631fb63f82574df68275f34cdbe0ffa" dependencies = [ "hashbrown 0.12.3", ] @@ -2192,7 +2308,7 @@ version = "7.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f19b9f54f7c7f55e31401bb647626ce0cf0f67b0004982ce815b3ee72a02aa8" dependencies = [ - "base64 0.13.0", + "base64 0.13.1", "byteorder", "crossbeam-channel", "flate2", @@ -2202,18 +2318,18 @@ dependencies = [ [[package]] name = "headers" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4cff78e5788be1e0ab65b04d306b2ed5092c815ec97ec70f4ebd5aee158aa55d" +checksum = "f3e372db8e5c0d213e0cd0b9be18be2aca3d44cf2fe30a9d46a65581cd454584" dependencies = [ - "base64 0.13.0", + "base64 0.13.1", "bitflags", "bytes", "headers-core", "http", "httpdate", "mime", - "sha-1", + "sha1", ] [[package]] @@ -2273,7 +2389,7 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" dependencies = [ - "digest 0.10.5", + "digest 0.10.6", ] [[package]] @@ -2304,7 +2420,7 @@ checksum = "75f43d41e26995c17e71ee126451dd3941010b0514a81a9d11f3b341debc2399" dependencies = [ "bytes", "fnv", - "itoa 1.0.2", + "itoa 1.0.5", ] [[package]] @@ -2326,9 +2442,9 @@ checksum = "0bfe8eed0a9285ef776bb792479ea3834e8b94e13d615c2f66d03dd50a435a29" [[package]] name = "httparse" -version = "1.7.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "496ce29bb5a52785b44e0f7ca2847ae0bb839c9bd28f69acac9b99d461c0c04c" +checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" [[package]] name = "httpdate" @@ -2344,9 +2460,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.19" +version = "0.14.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42dc3c131584288d375f2d07f822b0cb012d8c6fb899a5b9fdb3cb7eb9b6004f" +checksum = "034711faac9d2166cb1baf1a2fb0b60b1f277f8492fd72176c17f3515e1abd3c" dependencies = [ "bytes", "futures-channel", @@ -2357,7 +2473,7 @@ dependencies = [ "http-body", "httparse", "httpdate", - "itoa 1.0.2", + "itoa 1.0.5", "pin-project-lite", "socket2", "tokio", @@ -2368,9 +2484,9 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.23.0" +version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d87c48c02e0dc5e3b849a2041db3029fd066650f8f717c07bf8ed78ccb895cac" +checksum = "1788965e61b367cd03a62950836d5cd41560c3577d90e40e0819373194d1661c" dependencies = [ "http", "hyper", @@ -2394,17 +2510,28 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.46" +version = "0.1.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad2bfd338099682614d3ee3fe0cd72e0b6a41ca6a87f6a74a3bd593c91650501" +checksum = "64c122667b287044802d6ce17ee2ddf13207ed924c712de9a66a5814d5b64765" dependencies = [ "android_system_properties", "core-foundation-sys", + "iana-time-zone-haiku", "js-sys", "wasm-bindgen", "winapi", ] +[[package]] +name = "iana-time-zone-haiku" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0703ae284fc167426161c2e3f1da3ea71d94b21bedbcc9494e92b28e334e3dca" +dependencies = [ + "cxx", + "cxx-build", +] + [[package]] name = "idna" version = "0.3.0" @@ -2435,9 +2562,9 @@ dependencies = [ [[package]] name = "impl-serde" -version = "0.3.2" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4551f042f3438e64dbd6226b20527fc84a6e1fe65688b58746a2f53623f25f5c" +checksum = "ebc88fc67028ae3db0c853baa36269d398d5f45b6982f95549ff5def78c935cd" dependencies = [ "serde", ] @@ -2461,9 +2588,9 @@ checksum = "ce23b50ad8242c51a442f3ff322d56b02f08852c77e4c0b4d3fd684abc89c683" [[package]] name = "indexmap" -version = "1.9.1" +version = "1.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10a35a97730320ffe8e2d410b5d3b69279b98d2c14bdb8b70ea89ecf7888d41e" +checksum = "1885e79c1fc4b10f0e172c475f458b7f7b93061064d98c3293e98c5ba0c8b399" dependencies = [ "autocfg", "hashbrown 0.12.3", @@ -2476,7 +2603,7 @@ version = "0.16.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2d207dc617c7a380ab07ff572a6e52fa202a2a8f355860ac9c38e23f8196be1b" dependencies = [ - "console 0.15.0", + "console 0.15.5", "lazy_static", "number_prefix", "regex", @@ -2525,12 +2652,12 @@ dependencies = [ [[package]] name = "io-lifetimes" -version = "1.0.1" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7d367024b3f3414d8e01f437f704f41a9f64ab36f9067fa73e526ad4c763c87" +checksum = "e7d6c6f8c91b4b9ed43484ad1a938e393caf35960fce7f82a040497207bd8e9e" dependencies = [ "libc", - "windows-sys 0.42.0", + "windows-sys", ] [[package]] @@ -2551,14 +2678,14 @@ dependencies = [ [[package]] name = "is-terminal" -version = "0.4.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aae5bc6e2eb41c9def29a3e0f1306382807764b9b53112030eff57435667352d" +checksum = "28dfb6c8100ccc63462345b67d1bbc3679177c75ee4bf59bf29c8b1d110b8189" dependencies = [ "hermit-abi 0.2.6", "io-lifetimes", "rustix", - "windows-sys 0.42.0", + "windows-sys", ] [[package]] @@ -2578,33 +2705,33 @@ checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" [[package]] name = "itoa" -version = "1.0.2" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "112c678d4050afce233f4f2852bb2eb519230b3cf12f33585275537d7e41578d" +checksum = "fad582f4b9e86b6caa621cabeb0963332d92eea04729ab12892c2533951e6440" [[package]] name = "jobserver" -version = "0.1.24" +version = "0.1.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af25a77299a7f711a01975c35a6a424eb6862092cc2d6c72c4ed6cbc56dfc1fa" +checksum = "068b1ee6743e4d11fb9c6a1e6064b3693a1b600e7f5f5988047d98b3dc9fb90b" dependencies = [ "libc", ] [[package]] name = "js-sys" -version = "0.3.59" +version = "0.3.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "258451ab10b34f8af53416d1fdab72c22e805f0c92a1136d59470ec0b11138b2" +checksum = "49409df3e3bf0856b916e2ceaca09ee28e6871cf7d9ce97a692cacfdb2a25a47" dependencies = [ "wasm-bindgen", ] [[package]] name = "k256" -version = "0.11.3" +version = "0.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c8a5a96d92d849c4499d99461da81c9cdc1467418a8ed2aaeb407e8d85940ed" +checksum = "72c1e0b51e7ec0a97369623508396067a486bd0cbed95a2659a4b863d28cfc8b" dependencies = [ "cfg-if", "ecdsa", @@ -2615,15 +2742,18 @@ dependencies = [ [[package]] name = "keccak" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9b7d56ba4a8344d6be9729995e6b06f928af29998cdf79fe390cbf6b1fee838" +checksum = "3afef3b6eff9ce9d8ff9b3601125eec7f0c8cbac7abd14f355d053fa56c98768" +dependencies = [ + "cpufeatures", +] [[package]] name = "kqueue" -version = "1.0.6" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d6112e8f37b59803ac47a42d14f1f3a59bbf72fc6857ffc5be455e28a691f8e" +checksum = "2c8fc60ba15bf51257aa9807a48a61013db043fcf3a78cb0d916e8e396dcad98" dependencies = [ "kqueue-sys", "libc", @@ -2682,27 +2812,36 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.137" +version = "0.2.139" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc7fcc620a3bff7cdd7a365be3376c97191aeaccc2a603e600951e452615bf89" +checksum = "201de327520df007757c1f0adce6e827fe8562fbc28bfd9c15571c66ca1f5f79" [[package]] name = "libm" -version = "0.2.2" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33a33a362ce288760ec6a508b94caaec573ae7d3bbbd91b87aa0bad4456839db" +checksum = "348108ab3fba42ec82ff6e9564fc4ca0247bdccdc68dd8af9764bbc79c3c8ffb" + +[[package]] +name = "link-cplusplus" +version = "1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ecd207c9c713c34f95a097a5b029ac2ce6010530c7b49d7fea24d977dede04f5" +dependencies = [ + "cc", +] [[package]] name = "linux-raw-sys" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f9f08d8963a6c613f4b1a78f4f4a4dbfadf8e6545b2d72861731e4858b8b47f" +checksum = "f051f77a7c8e6957c0696eac88f26b0117e54f52d3fc682ab19397a8812846a4" [[package]] name = "lock_api" -version = "0.4.8" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f80bf5aacaf25cbfc8210d1cfb718f2bf3b11c4c54e5afe36c236853a8ec390" +checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df" dependencies = [ "autocfg", "scopeguard", @@ -2744,7 +2883,7 @@ version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6365506850d44bff6e2fbcb5176cf63650e48bd45ef2fe2665ae1570e0f4b9ca" dependencies = [ - "digest 0.10.5", + "digest 0.10.6", ] [[package]] @@ -2755,9 +2894,9 @@ checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" [[package]] name = "memoffset" -version = "0.6.5" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce" +checksum = "5de893c32cde5f383baa4c04c5d6dbdd735cfd4a794b0debdb2bb1b421da5ff4" dependencies = [ "autocfg", ] @@ -2814,23 +2953,23 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.5.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2b29bd4bc3f33391105ebee3589c19197c4271e3e5a9ec9bfe8127eeff8f082" +checksum = "b275950c28b37e794e8c55d88aeb5e139d0ce23fdbbeda68f8d7174abdf9e8fa" dependencies = [ "adler", ] [[package]] name = "mio" -version = "0.8.4" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57ee1c23c7c63b0c9250c339ffdc69255f110b298b901b9f6c82547b7b87caaf" +checksum = "e5d732bc30207a6423068df043e3d02e0735b155ad7ce1a6f76fe2baa5b158de" dependencies = [ "libc", "log", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.36.1", + "windows-sys", ] [[package]] @@ -2893,14 +3032,23 @@ checksum = "e4a24736216ec316047a1fc4252e27dabb04218aa4a3f37c6e7ddbf1f9782b54" [[package]] name = "nom" -version = "7.1.1" +version = "7.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8903e5a29a317527874d0402f867152a3d21c908bb0b933e416c65e301d4c36" +checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" dependencies = [ "memchr", "minimal-lexical", ] +[[package]] +name = "nom8" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae01545c9c7fc4486ab7debaf2aad7003ac19431791868fb2e8066df97fad2f8" +dependencies = [ + "memchr", +] + [[package]] name = "nonempty" version = "0.7.0" @@ -2962,9 +3110,9 @@ dependencies = [ [[package]] name = "num-bigint-dig" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "566d173b2f9406afbc5510a90925d5a2cd80cae4605631f1212303df265de011" +checksum = "2399c9463abc5f909349d8aa9ba080e0b88b3ce2885389b60b993f39b1a56905" dependencies = [ "byteorder", "lazy_static", @@ -2979,9 +3127,9 @@ dependencies = [ [[package]] name = "num-complex" -version = "0.4.2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ae39348c8bc5fbd7f40c727a9925f03517afd2ab27d46702108b6a7e5414c19" +checksum = "02e0d21255c828d6f128a1e41534206671e8c3ea0c62f32291e808dc82cff17d" dependencies = [ "num-traits", ] @@ -3031,11 +3179,11 @@ dependencies = [ [[package]] name = "num_cpus" -version = "1.13.1" +version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19e64526ebdee182341572e50e9ad03965aa510cd94427a4549448f285e957a1" +checksum = "0fac9e2da13b5eb447a6ce3d392f23a29d8694bff781bf03a16cd9ac8697593b" dependencies = [ - "hermit-abi 0.1.19", + "hermit-abi 0.2.6", "libc", ] @@ -3047,18 +3195,18 @@ checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3" [[package]] name = "object" -version = "0.28.4" +version = "0.30.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e42c982f2d955fac81dd7e1d0e1426a7d702acd9c98d19ab01083a6a0328c424" +checksum = "ea86265d3d3dcb6a27fc51bd29a4bf387fae9d2986b823079d4986af253eb439" dependencies = [ "memchr", ] [[package]] name = "once_cell" -version = "1.15.0" +version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e82dad04139b71a90c080c8463fe0dc7902db5192d939bd0950f074d014339e1" +checksum = "6f61fba1741ea2b3d6a1e3178721804bb716a68a6aeba1149b5d52e3d464ea66" [[package]] name = "opaque-debug" @@ -3074,9 +3222,9 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "open-fastrlp" -version = "0.1.2" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "131de184f045153e72c537ef4f1d57babddf2a897ca19e67bdff697aebba7f3d" +checksum = "786393f80485445794f6043fd3138854dd109cc6c4bd1a6383db304c9ce9b9ce" dependencies = [ "arrayvec", "auto_impl 1.0.1", @@ -3155,26 +3303,25 @@ dependencies = [ [[package]] name = "os_str_bytes" -version = "6.1.0" +version = "6.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21326818e99cfe6ce1e524c2a805c189a99b5ae555a35d19f9a284b427d86afa" +checksum = "9b7820b9daea5457c9f21c69448905d723fbd21136ccf521748f23fd49e723ee" [[package]] name = "ouroboros" -version = "0.15.0" +version = "0.15.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f31a3b678685b150cba82b702dcdc5e155893f63610cf388d30cd988d4ca2bf" +checksum = "dfbb50b356159620db6ac971c6d5c9ab788c9cc38a6f49619fca2a27acb062ca" dependencies = [ "aliasable", "ouroboros_macro", - "stable_deref_trait", ] [[package]] name = "ouroboros_macro" -version = "0.15.0" +version = "0.15.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "084fd65d5dd8b3772edccb5ffd1e4b7eba43897ecd0f9401e330e8c542959408" +checksum = "4a0d9d1a6191c4f391f87219d1ea42b23f09ee84d64763cd05ee6ea88d9f384d" dependencies = [ "Inflector", "proc-macro-error", @@ -3203,12 +3350,12 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "3.1.2" +version = "3.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8b44461635bbb1a0300f100a841e571e7d919c81c73075ef5d152ffdb521066" +checksum = "e7ab01d0f889e957861bc65888d5ccbe82c158d0270136ba46820d43837cdf72" dependencies = [ "arrayvec", - "bitvec 1.0.0", + "bitvec 1.0.1", "byte-slice-cast", "impl-trait-for-tuples", "parity-scale-codec-derive", @@ -3217,11 +3364,11 @@ dependencies = [ [[package]] name = "parity-scale-codec-derive" -version = "3.1.2" +version = "3.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c45ed1f39709f5a89338fab50e59816b2e8815f5bb58276e7ddf9afd495f73f8" +checksum = "86b26a931f824dd4eca30b3e43bb4f31cd5f0d3a403c5f5ff27106b805bfde7b" dependencies = [ - "proc-macro-crate", + "proc-macro-crate 1.3.0", "proc-macro2", "quote", "syn", @@ -3241,7 +3388,7 @@ checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" dependencies = [ "instant", "lock_api", - "parking_lot_core 0.8.5", + "parking_lot_core 0.8.6", ] [[package]] @@ -3251,14 +3398,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ "lock_api", - "parking_lot_core 0.9.3", + "parking_lot_core 0.9.6", ] [[package]] name = "parking_lot_core" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d76e8e1493bcac0d2766c42737f34458f1c8c50c0d23bcb24ea953affb273216" +checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc" dependencies = [ "cfg-if", "instant", @@ -3270,9 +3417,9 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.3" +version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09a279cbf25cb0757810394fbc1e359949b59e348145c643a939a525692e6929" +checksum = "ba1ef8814b5c993410bb3adfad7a5ed269563e4a2f90c41f5d85be7fb47133bf" dependencies = [ "backtrace", "cfg-if", @@ -3281,18 +3428,7 @@ dependencies = [ "redox_syscall", "smallvec", "thread-id", - "windows-sys 0.36.1", -] - -[[package]] -name = "password-hash" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d791538a6dcc1e7cb7fe6f6b58aca40e7f79403c45b2bc274008b5e647af1d8" -dependencies = [ - "base64ct", - "rand_core", - "subtle", + "windows-sys", ] [[package]] @@ -3308,9 +3444,9 @@ dependencies = [ [[package]] name = "paste" -version = "1.0.7" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c520e05135d6e763148b6426a837e239041653ba7becd2e538c076c738025fc" +checksum = "d01a5bd0424d00070b0098dd17ebca6f961a959dead1dbcbbbc1d1cd8d3deeba" [[package]] name = "path-slash" @@ -3318,27 +3454,15 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e91099d4268b0e11973f036e885d652fb0b21fedcf69738c627f94db6a44f42" -[[package]] -name = "pbkdf2" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "271779f35b581956db91a3e55737327a03aa051e90b1c47aeb189508533adfd7" -dependencies = [ - "digest 0.10.5", - "hmac", - "password-hash 0.3.2", - "sha2 0.10.6", -] - [[package]] name = "pbkdf2" version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "83a0692ec44e4cf1ef28ca317f14f8f07da2d95ec3fa01f86e4467b725e60917" dependencies = [ - "digest 0.10.5", + "digest 0.10.6", "hmac", - "password-hash 0.4.2", + "password-hash", "sha2 0.10.6", ] @@ -3359,9 +3483,9 @@ checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" [[package]] name = "pest" -version = "2.2.1" +version = "2.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69486e2b8c2d2aeb9762db7b4e00b0331156393555cff467f4163ff06821eef8" +checksum = "4257b4a04d91f7e9e6290be5d3da4804dd5784fafde3a497d73eb2b4a158c30a" dependencies = [ "thiserror", "ucd-trie", @@ -3369,9 +3493,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.2.1" +version = "2.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b13570633aff33c6d22ce47dd566b10a3b9122c2fe9d8e7501895905be532b91" +checksum = "241cda393b0cdd65e62e07e12454f1f25d57017dcc514b1514cd3c4645e3a0a6" dependencies = [ "pest", "pest_generator", @@ -3379,9 +3503,9 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.2.1" +version = "2.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3c567e5702efdc79fb18859ea74c3eb36e14c43da7b8c1f098a4ed6514ec7a0" +checksum = "46b53634d8c8196302953c74d5352f33d0c512a9499bd2ce468fc9f4128fa27c" dependencies = [ "pest", "pest_meta", @@ -3392,13 +3516,13 @@ dependencies = [ [[package]] name = "pest_meta" -version = "2.2.1" +version = "2.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5eb32be5ee3bbdafa8c7a18b0a8a8d962b66cfa2ceee4037f49267a50ee821fe" +checksum = "0ef4f1332a8d4678b41966bb4cc1d0676880e84183a1ecc3f4b69f03e99c7a51" dependencies = [ "once_cell", "pest", - "sha-1", + "sha2 0.10.6", ] [[package]] @@ -3473,18 +3597,18 @@ checksum = "db8bcd96cb740d03149cbad5518db9fd87126a10ab519c011893b1754134c468" [[package]] name = "pin-project" -version = "1.0.11" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78203e83c48cffbe01e4a2d35d566ca4de445d79a85372fc64e378bfc812a260" +checksum = "ad29a609b6bcd67fee905812e544992d216af9d755757c05ed2d0e15a74c6ecc" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.0.11" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "710faf75e1b33345361201d36d04e98ac1ed8909151a017ed384700836104c74" +checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" dependencies = [ "proc-macro2", "quote", @@ -3531,34 +3655,35 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9eca2c590a5f85da82668fa685c09ce2888b9430e83299debf1f34b65fd4a4ba" dependencies = [ - "der 0.6.0", + "der 0.6.1", "spki 0.6.0", ] [[package]] name = "pkg-config" -version = "0.3.25" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1df8c4ec4b0627e53bdf214615ad287367e482558cf84b109250b37464dc03ae" +checksum = "6ac9a59f73473f1b8d852421e59e64809f025994837ef743615c6d0c5b305160" [[package]] name = "polling" -version = "2.2.0" +version = "2.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "685404d509889fade3e86fe3a5803bca2ec09b0c0778d5ada6ec8bf7a8de5259" +checksum = "22122d5ec4f9fe1b3916419b76be1e80bcb93f618d071d2edf841b137b2a2bd6" dependencies = [ + "autocfg", "cfg-if", "libc", "log", "wepoll-ffi", - "winapi", + "windows-sys", ] [[package]] name = "ppv-lite86" -version = "0.2.16" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb9f9e6e233e5c4a35559a617bf40a4ec447db2e84c20b55a6f83167b7e57872" +checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] name = "precomputed-hash" @@ -3568,9 +3693,9 @@ checksum = "925383efa346730478fb4838dbe9137d2a47675ad789c546d150a6e1dd4ab31c" [[package]] name = "primitive-types" -version = "0.11.1" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e28720988bff275df1f51b171e1b2a18c30d194c4d2b61defdacecd625a5d94a" +checksum = "9f3486ccba82358b11a77516035647c34ba167dfa53312630de83b12bd4f3d66" dependencies = [ "fixed-hash", "impl-codec", @@ -3582,12 +3707,21 @@ dependencies = [ [[package]] name = "proc-macro-crate" -version = "1.1.3" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e17d47ce914bf4de440332250b0edd23ce48c005f59fab39d3335866b114f11a" +checksum = "1d6ea3c4595b96363c13943497db34af4460fb474a95c43f4446ad341b8c9785" dependencies = [ - "thiserror", - "toml", + "toml 0.5.11", +] + +[[package]] +name = "proc-macro-crate" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "66618389e4ec1c7afe67d51a9bf34ff9236480f8d51e7489b7d5ab0303c13f34" +dependencies = [ + "once_cell", + "toml_edit", ] [[package]] @@ -3616,15 +3750,15 @@ dependencies = [ [[package]] name = "proc-macro-hack" -version = "0.5.19" +version = "0.5.20+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" +checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" [[package]] name = "proc-macro2" -version = "1.0.47" +version = "1.0.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ea3d908b0e36316caf9e9e2c4625cdde190a7e6f440d794667ed17a1855e725" +checksum = "6ef7d57beacfaf2d8aee5937dab7b7f28de3cb8b1828479bb5de2a7106f2bae2" dependencies = [ "unicode-ident", ] @@ -3640,6 +3774,26 @@ dependencies = [ "winapi", ] +[[package]] +name = "ptr_meta" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0738ccf7ea06b608c10564b31debd4f5bc5e197fc8bfe088f68ae5ce81e7a4f1" +dependencies = [ + "ptr_meta_derive", +] + +[[package]] +name = "ptr_meta_derive" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16b845dbfca988fa33db069c0e230574d15a3088f147a87b64c7589eb662c9ac" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "pulldown-cmark" version = "0.9.2" @@ -3653,9 +3807,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.18" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1feb54ed693b93a84e14094943b84b7c4eae204c512b7ccb95ab0c66d278ad1" +checksum = "8856d8364d252a14d474036ea1358d63c9e6965c8e5c1885c18f73d70bff9c7b" dependencies = [ "proc-macro2", ] @@ -3695,9 +3849,9 @@ dependencies = [ [[package]] name = "rand_core" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ "getrandom", ] @@ -3713,21 +3867,19 @@ dependencies = [ [[package]] name = "rayon" -version = "1.5.3" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd99e5772ead8baa5215278c9b15bf92087709e9c1b2d1f97cdb5a183c933a7d" +checksum = "6db3a213adf02b3bcfd2d3846bb41cb22857d131789e01df434fb7e7bc0759b7" dependencies = [ - "autocfg", - "crossbeam-deque", "either", "rayon-core", ] [[package]] name = "rayon-core" -version = "1.9.3" +version = "1.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "258bcdb5ac6dad48491bb2992db6b7cf74878b0384908af124823d118c99683f" +checksum = "356a0625f1954f730c0201cdab48611198dc6ce21f4acff55089b5a78e6e835b" dependencies = [ "crossbeam-channel", "crossbeam-deque", @@ -3737,15 +3889,15 @@ dependencies = [ [[package]] name = "redis" -version = "0.22.1" +version = "0.22.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "513b3649f1a111c17954296e4a3b9eecb108b766c803e2b99f179ebe27005985" +checksum = "aa8455fa3621f6b41c514946de66ea0531f57ca017b2e6c7cc368035ea5b46df" dependencies = [ "async-trait", "bytes", "combine", "futures-util", - "itoa 1.0.2", + "itoa 1.0.5", "percent-encoding", "pin-project-lite", "ryu", @@ -3765,9 +3917,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.2.13" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62f25bc4c7e55e0b0b7a1d43fb893f4fa1361d0abe38b9ce4f323c2adfe6ef42" +checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" dependencies = [ "bitflags", ] @@ -3805,9 +3957,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.27" +version = "0.6.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3f87b73ce11b1619a3c6332f45341e0047173771e8b8b73f87bfeefb7b56244" +checksum = "456c603be3e8d448b072f410900c09faf164fbce2d480456f50eea6e25f9c848" [[package]] name = "remove_dir_all" @@ -3818,6 +3970,15 @@ dependencies = [ "winapi", ] +[[package]] +name = "rend" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79af64b4b6362ffba04eef3a4e10829718a4896dac19daa741851c86781edf95" +dependencies = [ + "bytecheck", +] + [[package]] name = "reqwest" version = "0.11.14" @@ -3868,11 +4029,11 @@ checksum = "4389f1d5789befaf6029ebd9f7dac4af7f7e3d61b69d4f30e2ac02b57e7712b0" [[package]] name = "rfc6979" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88c86280f057430a52f4861551b092a01b419b8eacefc7c995eacb9dc132fe32" +checksum = "7743f17af12fa0b03b803ba12cd6a8d9483a587e89c69445e3909655c0b9fabb" dependencies = [ - "crypto-bigint 0.4.8", + "crypto-bigint 0.4.9", "hmac", "zeroize", ] @@ -3894,18 +4055,43 @@ dependencies = [ [[package]] name = "ripemd" -version = "0.1.1" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1facec54cb5e0dc08553501fa740091086d0259ad0067e0d4103448e4cb22ed3" +checksum = "bd124222d17ad93a644ed9d011a40f4fb64aa54275c08cc216524a9ea82fb09f" dependencies = [ - "digest 0.10.5", + "digest 0.10.6", +] + +[[package]] +name = "rkyv" +version = "0.7.39" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cec2b3485b07d96ddfd3134767b8a447b45ea4eb91448d0a35180ec0ffd5ed15" +dependencies = [ + "bytecheck", + "hashbrown 0.12.3", + "ptr_meta", + "rend", + "rkyv_derive", + "seahash", +] + +[[package]] +name = "rkyv_derive" +version = "0.7.39" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6eaedadc88b53e36dd32d940ed21ae4d850d5916f2581526921f553a72ac34c4" +dependencies = [ + "proc-macro2", + "quote", + "syn", ] [[package]] name = "rlp" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "999508abb0ae792aabed2460c45b89106d97fe4adac593bdaef433c2605847b5" +checksum = "bb919243f34364b6bd2fc10ef797edbfa75f33c252e7998527479c6d6b47e1ec" dependencies = [ "bytes", "rustc-hex", @@ -3929,7 +4115,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4cf22754c49613d2b3b119f0e5d46e34a2c628a937e3024b8762de4e7d8c710b" dependencies = [ "byteorder", - "digest 0.10.5", + "digest 0.10.6", "num-bigint-dig", "num-integer", "num-iter", @@ -3944,13 +4130,20 @@ dependencies = [ [[package]] name = "rust_decimal" -version = "1.26.1" +version = "1.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee9164faf726e4f3ece4978b25ca877ddc6802fa77f38cdccb32c7f805ecd70c" +checksum = "7fe32e8c89834541077a5c5bbe5691aa69324361e27e6aeb3552a737db4a70c8" dependencies = [ "arrayvec", + "borsh", + "bytecheck", + "byteorder", + "bytes", "num-traits", + "rand", + "rkyv", "serde", + "serde_json", ] [[package]] @@ -3982,23 +4175,23 @@ dependencies = [ [[package]] name = "rustix" -version = "0.36.3" +version = "0.36.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b1fbb4dfc4eb1d390c02df47760bb19a84bb80b301ecc947ab5406394d8223e" +checksum = "d4fdebc4b395b7fbb9ab11e462e20ed9051e7b16e42d24042c776eca0ac81b03" dependencies = [ "bitflags", "errno", "io-lifetimes", "libc", "linux-raw-sys", - "windows-sys 0.42.0", + "windows-sys", ] [[package]] name = "rustls" -version = "0.20.6" +version = "0.20.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aab8ee6c7097ed6057f43c187a62418d0c05a4bd5f18b3571db50ee0f9ce033" +checksum = "fff78fc74d175294f4e83b28343315ffcfb114b156f0185e9741cb5570f50e2f" dependencies = [ "log", "ring", @@ -4008,24 +4201,24 @@ dependencies = [ [[package]] name = "rustls-pemfile" -version = "1.0.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7522c9de787ff061458fe9a829dc790a3f5b22dc571694fc5883f448b94d9a9" +checksum = "d194b56d58803a43635bdc398cd17e383d6f71f9182b9a192c127ca42494a59b" dependencies = [ - "base64 0.13.0", + "base64 0.21.0", ] [[package]] name = "rustversion" -version = "1.0.9" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97477e48b4cf8603ad5f7aaf897467cf42ab4218a38ef76fb14c2d6773a6d6a8" +checksum = "5583e89e108996506031660fe09baa5011b9dd0341b89029313006d1fb508d70" [[package]] name = "ryu" -version = "1.0.10" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3f6f92acf49d1b98f7a81226834412ada05458b7364277387724a237f062695" +checksum = "7b4b9743ed687d4b4bcedf9ff5eaa7398495ae14e61cba0a295704edbc7decde" [[package]] name = "salsa20" @@ -4047,9 +4240,9 @@ dependencies = [ [[package]] name = "scale-info" -version = "2.2.0" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "333af15b02563b8182cd863f925bd31ef8fa86a0e095d30c091956057d436153" +checksum = "001cf62ece89779fd16105b5f515ad0e5cedcd5440d3dd806bb067978e7c3608" dependencies = [ "cfg-if", "derive_more", @@ -4059,11 +4252,11 @@ dependencies = [ [[package]] name = "scale-info-derive" -version = "2.2.0" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53f56acbd0743d29ffa08f911ab5397def774ad01bab3786804cf6ee057fb5e1" +checksum = "303959cf613a6f6efd19ed4b4ad5bf79966a13352716299ad532cfb115f4205c" dependencies = [ - "proc-macro-crate", + "proc-macro-crate 1.3.0", "proc-macro2", "quote", "syn", @@ -4075,7 +4268,7 @@ version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "713cfb06c7059f3588fb8044c0fad1d09e3c01d225e25b9220dbfdcf16dbb1b3" dependencies = [ - "windows-sys 0.42.0", + "windows-sys", ] [[package]] @@ -4093,6 +4286,12 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" +[[package]] +name = "scratch" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ddccb15bcce173023b3fedd9436f882a0739b8dfb45e4f6b6002bee5929f61b2" + [[package]] name = "scrypt" version = "0.10.0" @@ -4100,7 +4299,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9f9e24d2b632954ded8ab2ef9fea0a0c769ea56ea98bddbafbad22caeeadf45d" dependencies = [ "hmac", - "pbkdf2 0.11.0", + "pbkdf2", "salsa20", "sha2 0.10.6", ] @@ -4190,9 +4389,9 @@ dependencies = [ [[package]] name = "sea-query" -version = "0.27.1" +version = "0.27.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3497a83851c4be4d1fdc8cbc7215105b828a2a944abb64dd2e0ba233f2ce187f" +checksum = "a4f0fc4d8e44e1d51c739a68d336252a18bc59553778075d5e32649be6ec92ed" dependencies = [ "chrono", "rust_decimal", @@ -4204,9 +4403,9 @@ dependencies = [ [[package]] name = "sea-query-binder" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7dddc5c3889bbc63b7e3374d3a7494551eb801c45f2f9cb460bfa4921653563d" +checksum = "9c2585b89c985cfacfe0ec9fc9e7bb055b776c1a2581c4e3c6185af2b8bf8865" dependencies = [ "chrono", "rust_decimal", @@ -4232,9 +4431,9 @@ dependencies = [ [[package]] name = "sea-schema" -version = "0.10.2" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a2e6fd7fb2c4adc28f1b8fb29944fa5e6a77968df57f32b7146c9ae10fb2f2b" +checksum = "38d5fda574d980e9352b6c7abd6fc75697436fe0078cac2b548559b52643ad3b" dependencies = [ "futures", "sea-query", @@ -4275,6 +4474,12 @@ dependencies = [ "syn", ] +[[package]] +name = "seahash" +version = "4.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c107b6f4780854c8b126e228ea8869f4d7b71260f962fefb57b996b8959ba6b" + [[package]] name = "sec1" version = "0.3.0" @@ -4282,7 +4487,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3be24c1842290c45df0a7bf069e0c268a747ad05a192f2fd7dcfdbc1cba40928" dependencies = [ "base16ct", - "der 0.6.0", + "der 0.6.1", "generic-array 0.14.6", "pkcs8 0.9.0", "subtle", @@ -4314,9 +4519,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.14" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e25dfac463d778e353db5be2449d1cce89bd6fd23c9f1ea21310ce6e5a1b29c4" +checksum = "58bc9567378fc7690d6b2addae4e60ac2eeea07becb2c64b9f218b53865cba2a" dependencies = [ "serde", ] @@ -4445,9 +4650,9 @@ dependencies = [ [[package]] name = "serde-aux" -version = "4.0.0" +version = "4.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c79c1a5a310c28bf9f7a4b9bd848553051120d80a5952f993c7eb62f6ed6e4c5" +checksum = "c599b3fd89a75e0c18d6d2be693ddb12cccaf771db4ff9e39097104808a014c0" dependencies = [ "serde", "serde_json", @@ -4470,16 +4675,16 @@ version = "1.0.91" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "877c235533714907a8c2464236f5c4b2a17262ef1bd71f38f35ea592c8da6883" dependencies = [ - "itoa 1.0.2", + "itoa 1.0.5", "ryu", "serde", ] [[package]] name = "serde_path_to_error" -version = "0.1.8" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "184c643044780f7ceb59104cef98a5a6f12cb2288a7bc701ab93a362b49fd47d" +checksum = "26b04f22b563c91331a10074bda3dd5492e3cc39d56bd557e91c0af42b6c7341" dependencies = [ "serde", ] @@ -4499,6 +4704,15 @@ dependencies = [ "snafu", ] +[[package]] +name = "serde_spanned" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c68e921cef53841b8925c2abadd27c9b891d9613bdc43d6b823062866df38e8" +dependencies = [ + "serde", +] + [[package]] name = "serde_urlencoded" version = "0.7.1" @@ -4506,31 +4720,31 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" dependencies = [ "form_urlencoded", - "itoa 1.0.2", + "itoa 1.0.5", "ryu", "serde", ] [[package]] name = "sha-1" -version = "0.10.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "028f48d513f9678cda28f6e4064755b3fbb2af6acd672f2c209b62323f7aea0f" +checksum = "f5058ada175748e33390e40e872bd0fe59a19f265d0158daa551c5a88a76009c" dependencies = [ "cfg-if", "cpufeatures", - "digest 0.10.5", + "digest 0.10.6", ] [[package]] name = "sha1" -version = "0.10.1" +version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c77f4e7f65455545c2153c1253d25056825e77ee2533f0e41deb65a93a34852f" +checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" dependencies = [ "cfg-if", "cpufeatures", - "digest 0.10.5", + "digest 0.10.6", ] [[package]] @@ -4566,16 +4780,16 @@ checksum = "82e6b795fe2e3b1e845bafcb27aa35405c4d47cdfc92af5fc8d3002f76cebdc0" dependencies = [ "cfg-if", "cpufeatures", - "digest 0.10.5", + "digest 0.10.6", ] [[package]] name = "sha3" -version = "0.10.4" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eaedf34ed289ea47c2b741bb72e5357a209512d67bcd4bda44359e5bf0470f56" +checksum = "bdf0c33fae925bdc080598b84bc15c55e7b9a4a43b3c704da051f977469691c9" dependencies = [ - "digest 0.10.5", + "digest 0.10.6", "keccak", ] @@ -4599,11 +4813,11 @@ dependencies = [ [[package]] name = "signature" -version = "1.5.0" +version = "1.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f054c6c1a6e95179d6f23ed974060dcefb2d9388bb7256900badad682c499de4" +checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" dependencies = [ - "digest 0.10.5", + "digest 0.10.6", "rand_core", ] @@ -4682,9 +4896,9 @@ dependencies = [ [[package]] name = "socket2" -version = "0.4.4" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66d72b759436ae32898a2af0a14218dbf55efde3feeb170eb623637db85ee1e0" +checksum = "02e2d2db9033d13a1567121ddd7a095ee144db4e1ca1b1bda3419bc0da294ebd" dependencies = [ "libc", "winapi", @@ -4711,9 +4925,9 @@ checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] name = "spin" -version = "0.9.3" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c530c2b0d0bf8b69304b39fe2001993e267461948b890cd037d8ad4293fa1a0d" +checksum = "7f6002a767bff9e83f8eeecf883ecb8011875a21ae8da43bffb817a57e78cc09" dependencies = [ "lock_api", ] @@ -4735,14 +4949,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "67cf02bbac7a337dc36e4f5a693db6c21e7863f45070f7064577eb4367a3212b" dependencies = [ "base64ct", - "der 0.6.0", + "der 0.6.1", ] [[package]] name = "sqlformat" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f87e292b4291f154971a43c3774364e2cbcaec599d3f5bf6fa9d122885dbc38a" +checksum = "0c12bc9199d1db8234678b7051747c07f517cdcf019262d1847b94ec8b1aee3e" dependencies = [ "itertools", "nom", @@ -4773,7 +4987,7 @@ dependencies = [ "chrono", "crc", "crossbeam-queue", - "digest 0.10.5", + "digest 0.10.6", "dotenvy", "either", "event-listener", @@ -4785,7 +4999,7 @@ dependencies = [ "hashlink", "hex", "indexmap", - "itoa 1.0.2", + "itoa 1.0.5", "libc", "log", "memchr", @@ -4845,12 +5059,6 @@ dependencies = [ "tokio-rustls", ] -[[package]] -name = "stable_deref_trait" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" - [[package]] name = "static_assertions" version = "1.1.0" @@ -4888,18 +5096,18 @@ checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" [[package]] name = "strum" -version = "0.24.0" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e96acfc1b70604b8b2f1ffa4c57e59176c7dbb05d556c71ecd2f5498a1dee7f8" +checksum = "063e6045c0e62079840579a7e47a355ae92f60eb74daaf156fb1e84ba164e63f" dependencies = [ "strum_macros", ] [[package]] name = "strum_macros" -version = "0.24.0" +version = "0.24.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6878079b17446e4d3eba6192bb0a2950d5b14f0ed8424b852310e5a94345d0ef" +checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" dependencies = [ "heck 0.4.0", "proc-macro2", @@ -4916,9 +5124,9 @@ checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" [[package]] name = "svm-rs" -version = "0.2.18" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e4cdcf91153dc0e4e0637f26f042ada32a3b552bc8115935c7bf96f80132b0a" +checksum = "e18bbb2b229a2cc0d8ba58603adb0e460ad49a3451b1540fd6f7a5d37fd03b80" dependencies = [ "anyhow", "cfg-if", @@ -4947,9 +5155,9 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.105" +version = "1.0.107" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60b9b43d45702de4c839cb9b51d9f529c5dd26a4aff255b42b1ebc03e88ee908" +checksum = "1f4064b5b16e03ae50984a5a8ed5d4f8803e6bc1fd170a3cda91a1be4b18e3f5" dependencies = [ "proc-macro2", "quote", @@ -5012,9 +5220,9 @@ dependencies = [ [[package]] name = "termcolor" -version = "1.1.3" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bab24d30b911b2376f3a13cc2cd443142f0c81dda04c118693e35b3835757755" +checksum = "be55cf8942feac5c765c2c993422806843c9a9a45d4d5c407ad6dd2ea95eb9b6" dependencies = [ "winapi-util", ] @@ -5031,24 +5239,24 @@ dependencies = [ [[package]] name = "textwrap" -version = "0.15.0" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1141d4d61095b28419e22cb0bbf02755f5e54e0526f97f1e3d1d160e60885fb" +checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d" [[package]] name = "thiserror" -version = "1.0.37" +version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10deb33631e3c9018b9baf9dcbbc4f737320d2b576bac10f6aefa048fa407e3e" +checksum = "6a9cd18aa97d5c45c6603caea1da6628790b37f7a34b6ca89522331c5180fed0" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.37" +version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "982d17546b47146b28f7c22e3d08465f6b8903d0ea13c1660d9d84a6e7adcdbb" +checksum = "1fb327af4685e4d03fa8cbcf1716380da910eeb2bb8be417e7f9fd3fb164f36f" dependencies = [ "proc-macro2", "quote", @@ -5085,11 +5293,12 @@ dependencies = [ [[package]] name = "time" -version = "0.1.43" +version = "0.1.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438" +checksum = "1b797afad3f312d1c66a56d11d0316f916356d11bd158fbc6ca6389ff6bf805a" dependencies = [ "libc", + "wasi 0.10.0+wasi-snapshot-preview1", "winapi", ] @@ -5099,7 +5308,7 @@ version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a561bf4617eebd33bca6434b988f39ed798e527f51a1e797d0ee4f61c0a38376" dependencies = [ - "itoa 1.0.2", + "itoa 1.0.5", "serde", "time-core", "time-macros", @@ -5162,14 +5371,14 @@ dependencies = [ "socket2", "tokio-macros", "tracing", - "windows-sys 0.42.0", + "windows-sys", ] [[package]] name = "tokio-macros" -version = "1.7.0" +version = "1.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b557f72f448c511a979e2564e55d74e6c4432fc96ff4f6241bc6bded342643b7" +checksum = "d266c00fde287f55d3f1c3e96c500c362a2b8c695076ec180f27918820bc6df8" dependencies = [ "proc-macro2", "quote", @@ -5239,9 +5448,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.2" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f988a1a1adc2fb21f9c12aa96441da33a1728193ae0b95d2be22dbd17fcb4e5c" +checksum = "0bb2e075f03b3d66d8d8785356224ba688d2906a371015e225beeb65ca92c740" dependencies = [ "bytes", "futures-core", @@ -5260,6 +5469,40 @@ dependencies = [ "serde", ] +[[package]] +name = "toml" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fb9d890e4dc9298b70f740f615f2e05b9db37dce531f6b24fb77ac993f9f217" +dependencies = [ + "serde", + "serde_spanned", + "toml_datetime", + "toml_edit", +] + +[[package]] +name = "toml_datetime" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4553f467ac8e3d374bc9a177a26801e5d0f9b211aa1673fb137a403afd1c9cf5" +dependencies = [ + "serde", +] + +[[package]] +name = "toml_edit" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "729bfd096e40da9c001f778f5cdecbd2957929a24e10e5883d9392220a751581" +dependencies = [ + "indexmap", + "nom8", + "serde", + "serde_spanned", + "toml_datetime", +] + [[package]] name = "tower" version = "0.4.13" @@ -5303,9 +5546,9 @@ checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" [[package]] name = "tower-service" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" +checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" [[package]] name = "tracing" @@ -5382,15 +5625,15 @@ dependencies = [ [[package]] name = "triomphe" -version = "0.1.7" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fe1b3800b35f9b936c28dc59dbda91b195371269396784d931fe2a5a2be3d2f" +checksum = "f1ee9bd9239c339d714d657fac840c6d2a4f9c45f4f9ec7b0975113458be78db" [[package]] name = "try-lock" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" +checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" [[package]] name = "tungstenite" @@ -5398,7 +5641,7 @@ version = "0.17.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e27992fd6a8c29ee7eef28fc78349aa244134e10ad447ce3b9f0ac0ed0fa4ce0" dependencies = [ - "base64 0.13.0", + "base64 0.13.1", "byteorder", "bytes", "http", @@ -5419,7 +5662,7 @@ version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "30ee6ab729cd4cf0fd55218530c4522ed30b7b6081752839b68fcec8d0960788" dependencies = [ - "base64 0.13.0", + "base64 0.13.1", "byteorder", "bytes", "http", @@ -5434,21 +5677,21 @@ dependencies = [ [[package]] name = "typenum" -version = "1.15.0" +version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987" +checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" [[package]] name = "ucd-trie" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89570599c4fe5585de2b388aab47e99f7fa4e9238a1399f707a02e356058141c" +checksum = "9e79c4d996edb816c91e4308506774452e55e95c3c9de07b6729e17e15a5ef81" [[package]] name = "uint" -version = "0.9.3" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12f03af7ccf01dd611cc450a0d10dbc9b745770d096473e2faf0ca6e2d66d1e0" +checksum = "76f64bba2c53b04fcab63c01a7d7427eadc821e3bc48c34dc9ba29c501164b52" dependencies = [ "byteorder", "crunchy", @@ -5486,36 +5729,36 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.8" +version = "0.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "099b7128301d285f79ddd55b9a83d5e6b9e97c92e0ea0daebee7263e932de992" +checksum = "d54675592c1dbefd78cbd98db9bacd89886e1ca50692a0692baefffdeb92dd58" [[package]] name = "unicode-ident" -version = "1.0.0" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d22af068fba1eb5edcb4aea19d382b2a3deb4c8f9d475c589b6ada9e0fd493ee" +checksum = "84a22b9f218b40614adcb3f4ff08b703773ad44fa9423e4e0d346d5db86e4ebc" [[package]] name = "unicode-normalization" -version = "0.1.19" +version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d54590932941a9e9266f0832deed84ebe1bf2e4c9e4a3554d393d18f5e854bf9" +checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" dependencies = [ "tinyvec", ] [[package]] name = "unicode-segmentation" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e8820f5d777f6224dc4be3632222971ac30164d4a258d595640799554ebfd99" +checksum = "0fdbf052a0783de01e944a6ce7a8cb939e295b1e7be835a1112c3b9a7f047a5a" [[package]] name = "unicode-width" -version = "0.1.9" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973" +checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b" [[package]] name = "unicode-xid" @@ -5537,12 +5780,11 @@ checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" [[package]] name = "ureq" -version = "2.5.0" +version = "2.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b97acb4c28a254fd7a4aeec976c46a7fa404eac4d7c134b30c75144846d7cb8f" +checksum = "338b31dd1314f68f3aabf3ed57ab922df95ffcd902476ca7ba3c4ce7b908c46d" dependencies = [ - "base64 0.13.0", - "chunked_transfer", + "base64 0.13.1", "log", "once_cell", "rustls", @@ -5636,9 +5878,9 @@ dependencies = [ [[package]] name = "wasi" -version = "0.10.2+wasi-snapshot-preview1" +version = "0.10.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" +checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" [[package]] name = "wasi" @@ -5648,9 +5890,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.82" +version = "0.2.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc7652e3f6c4706c8d9cd54832c4a4ccb9b5336e2c3bd154d5cccfbf1c1f5f7d" +checksum = "eaf9f5aceeec8be17c128b2e93e031fb8a4d469bb9c4ae2d7dc1888b26887268" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -5658,9 +5900,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.82" +version = "0.2.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "662cd44805586bd52971b9586b1df85cdbbd9112e4ef4d8f41559c334dc6ac3f" +checksum = "4c8ffb332579b0557b52d268b91feab8df3615f265d5270fec2a8c95b17c1142" dependencies = [ "bumpalo", "log", @@ -5673,9 +5915,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.30" +version = "0.4.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f741de44b75e14c35df886aff5f1eb73aa114fa5d4d00dcd37b5e01259bf3b2" +checksum = "23639446165ca5a5de86ae1d8896b737ae80319560fbaa4c2887b7da6e7ebd7d" dependencies = [ "cfg-if", "js-sys", @@ -5685,9 +5927,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.82" +version = "0.2.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b260f13d3012071dfb1512849c033b1925038373aea48ced3012c09df952c602" +checksum = "052be0f94026e6cbc75cdefc9bae13fd6052cdcaf532fa6c45e7ae33a1e6c810" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -5695,9 +5937,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.82" +version = "0.2.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5be8e654bdd9b79216c2929ab90721aa82faf65c48cdf08bdc4e7f51357b80da" +checksum = "07bc0c051dc5f23e307b13285f9d75df86bfdf816c5721e573dec1f9b8aa193c" dependencies = [ "proc-macro2", "quote", @@ -5708,9 +5950,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.82" +version = "0.2.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6598dd0bd3c7d51095ff6531a5b23e02acdc81804e30d8f07afb77b7215a140a" +checksum = "1c38c045535d93ec4f0b4defec448e4291638ee608530863b1e2ba115d4fff7f" [[package]] name = "wasm-timer" @@ -5729,9 +5971,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.57" +version = "0.3.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b17e741662c70c8bd24ac5c5b18de314a2c26c32bf8346ee1e6f53de919c283" +checksum = "bcda906d8be16e728fd5adc5b729afad4e444e106ab28cd1c7256e54fa61510f" dependencies = [ "js-sys", "wasm-bindgen", @@ -5788,7 +6030,7 @@ dependencies = [ "time 0.3.17", "tokio", "tokio-stream", - "toml", + "toml 0.6.0", "tower", "tower-http", "ulid", @@ -5808,9 +6050,9 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.22.5" +version = "0.22.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "368bfe657969fb01238bb756d351dcade285e0f6fcbd36dcb23359a5169975be" +checksum = "b6c71e40d7d2c34a5106301fb632274ca37242cd0c9d3e64dbece371a40a2d87" dependencies = [ "webpki", ] @@ -5862,25 +6104,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "04662ed0e3e5630dfa9b26e4cb823b817f1a9addda855d973a9458c236556244" dependencies = [ "windows_aarch64_gnullvm", - "windows_aarch64_msvc 0.42.0", - "windows_i686_gnu 0.42.0", - "windows_i686_msvc 0.42.0", - "windows_x86_64_gnu 0.42.0", + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_msvc", + "windows_x86_64_gnu", "windows_x86_64_gnullvm", - "windows_x86_64_msvc 0.42.0", -] - -[[package]] -name = "windows-sys" -version = "0.36.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea04155a16a59f9eab786fe12a4a450e75cdb175f9e0d80da1e17db09f55b8d2" -dependencies = [ - "windows_aarch64_msvc 0.36.1", - "windows_i686_gnu 0.36.1", - "windows_i686_msvc 0.36.1", - "windows_x86_64_gnu 0.36.1", - "windows_x86_64_msvc 0.36.1", + "windows_x86_64_msvc", ] [[package]] @@ -5890,85 +6119,55 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" dependencies = [ "windows_aarch64_gnullvm", - "windows_aarch64_msvc 0.42.0", - "windows_i686_gnu 0.42.0", - "windows_i686_msvc 0.42.0", - "windows_x86_64_gnu 0.42.0", + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_msvc", + "windows_x86_64_gnu", "windows_x86_64_gnullvm", - "windows_x86_64_msvc 0.42.0", + "windows_x86_64_msvc", ] [[package]] name = "windows_aarch64_gnullvm" -version = "0.42.0" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41d2aa71f6f0cbe00ae5167d90ef3cfe66527d6f613ca78ac8024c3ccab9a19e" +checksum = "8c9864e83243fdec7fc9c5444389dcbbfd258f745e7853198f365e3c4968a608" [[package]] name = "windows_aarch64_msvc" -version = "0.36.1" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47" - -[[package]] -name = "windows_aarch64_msvc" -version = "0.42.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd0f252f5a35cac83d6311b2e795981f5ee6e67eb1f9a7f64eb4500fbc4dcdb4" +checksum = "4c8b1b673ffc16c47a9ff48570a9d85e25d265735c503681332589af6253c6c7" [[package]] name = "windows_i686_gnu" -version = "0.36.1" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6" - -[[package]] -name = "windows_i686_gnu" -version = "0.42.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbeae19f6716841636c28d695375df17562ca208b2b7d0dc47635a50ae6c5de7" +checksum = "de3887528ad530ba7bdbb1faa8275ec7a1155a45ffa57c37993960277145d640" [[package]] name = "windows_i686_msvc" -version = "0.36.1" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024" - -[[package]] -name = "windows_i686_msvc" -version = "0.42.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84c12f65daa39dd2babe6e442988fc329d6243fdce47d7d2d155b8d874862246" +checksum = "bf4d1122317eddd6ff351aa852118a2418ad4214e6613a50e0191f7004372605" [[package]] name = "windows_x86_64_gnu" -version = "0.36.1" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.42.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf7b1b21b5362cbc318f686150e5bcea75ecedc74dd157d874d754a2ca44b0ed" +checksum = "c1040f221285e17ebccbc2591ffdc2d44ee1f9186324dd3e84e99ac68d699c45" [[package]] name = "windows_x86_64_gnullvm" -version = "0.42.0" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09d525d2ba30eeb3297665bd434a54297e4170c7f1a44cad4ef58095b4cd2028" +checksum = "628bfdf232daa22b0d64fdb62b09fcc36bb01f05a3939e20ab73aaf9470d0463" [[package]] name = "windows_x86_64_msvc" -version = "0.36.1" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680" - -[[package]] -name = "windows_x86_64_msvc" -version = "0.42.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f40009d85759725a34da6d89a94e63d7bdc50a862acf0dbc7c8e488f1edcb6f5" +checksum = "447660ad36a13288b1db4d4248e857b510e8c3a225c822ba4fb748c0aafecffd" [[package]] name = "winreg" @@ -5999,9 +6198,9 @@ dependencies = [ [[package]] name = "wyz" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30b31594f29d27036c383b53b59ed3476874d518f0efb151b27a4c275141390e" +checksum = "05f360fc0b24296329c78fda852a1e9ae82de9cf7b27dae4b7f62f118f77b9ed" dependencies = [ "tap", ] @@ -6020,9 +6219,9 @@ checksum = "c394b5bd0c6f669e7275d9c20aa90ae064cb22e75a1cad54e1b34088034b149f" [[package]] name = "zip" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf225bcf73bb52cbb496e70475c7bd7a3f769df699c0020f6c7bd9a96dcf0b8d" +checksum = "537ce7411d25e54e8ae21a7ce0b15840e7bfcff15b51d697ec3266cc76bdf080" dependencies = [ "aes 0.7.5", "byteorder", @@ -6032,7 +6231,7 @@ dependencies = [ "crossbeam-utils", "flate2", "hmac", - "pbkdf2 0.10.1", + "pbkdf2", "sha1", "time 0.3.17", "zstd", @@ -6040,18 +6239,18 @@ dependencies = [ [[package]] name = "zstd" -version = "0.10.2+zstd.1.5.2" +version = "0.11.2+zstd.1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f4a6bd64f22b5e3e94b4e238669ff9f10815c27a5180108b849d24174a83847" +checksum = "20cc960326ece64f010d2d2107537f26dc589a6573a316bd5b1dba685fa5fde4" dependencies = [ "zstd-safe", ] [[package]] name = "zstd-safe" -version = "4.1.6+zstd.1.5.2" +version = "5.0.2+zstd.1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94b61c51bb270702d6167b8ce67340d2754b088d0c091b06e593aa772c3ee9bb" +checksum = "1d2a5585e04f9eea4b2a3d1eca508c4dee9592a89ef6f450c11719da0726f4db" dependencies = [ "libc", "zstd-sys", @@ -6059,10 +6258,11 @@ dependencies = [ [[package]] name = "zstd-sys" -version = "1.6.3+zstd.1.5.2" +version = "2.0.5+zstd.1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc49afa5c8d634e75761feda8c592051e7eeb4683ba827211eb0d731d3402ea8" +checksum = "edc50ffce891ad571e9f9afe5039c4837bede781ac4bb13052ed7ae695518596" dependencies = [ "cc", "libc", + "pkg-config", ] diff --git a/web3_proxy/Cargo.toml b/web3_proxy/Cargo.toml index 2f1089b9..e4633a19 100644 --- a/web3_proxy/Cargo.toml +++ b/web3_proxy/Cargo.toml @@ -67,7 +67,7 @@ siwe = "0.5.0" time = "0.3.17" tokio = { version = "1.24.2", features = ["full"] } tokio-stream = { version = "0.1.11", features = ["sync"] } -toml = "0.5.11" +toml = "0.6.0" tower = "0.4.13" tower-http = { version = "0.3.5", features = ["cors", "sensitive-headers"] } ulid = { version = "1.0.0", features = ["serde"] } From 953bb27adc63ed1600fa66300993bd161890078d Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Tue, 24 Jan 2023 04:17:39 -0800 Subject: [PATCH 51/80] add --chain-id to sentryd --- web3_proxy/src/bin/web3_proxy_cli/main.rs | 2 -- web3_proxy/src/bin/web3_proxy_cli/pagerduty.rs | 6 +++++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/web3_proxy/src/bin/web3_proxy_cli/main.rs b/web3_proxy/src/bin/web3_proxy_cli/main.rs index a0aebbaa..da524b68 100644 --- a/web3_proxy/src/bin/web3_proxy_cli/main.rs +++ b/web3_proxy/src/bin/web3_proxy_cli/main.rs @@ -257,8 +257,6 @@ fn main() -> anyhow::Result<()> { error!("Failed sending panic to pagerduty: {}", err); } })); - } else { - info!("No pagerduty key. Using default panic handler"); } // set up tokio's async runtime diff --git a/web3_proxy/src/bin/web3_proxy_cli/pagerduty.rs b/web3_proxy/src/bin/web3_proxy_cli/pagerduty.rs index 4fadf11a..3c43daa3 100644 --- a/web3_proxy/src/bin/web3_proxy_cli/pagerduty.rs +++ b/web3_proxy/src/bin/web3_proxy_cli/pagerduty.rs @@ -14,6 +14,10 @@ pub struct PagerdutySubCommand { /// short description of the alert summary: String, + /// the chain id to require. Only used if not using --config. + #[argh(option)] + chain_id: Option, + #[argh(option)] /// the class/type of the event class: Option, @@ -56,7 +60,7 @@ impl PagerdutySubCommand { }) .unwrap_or_else(|| { pagerduty_alert( - None, + self.chain_id, self.class, "web3-proxy".to_string(), None, From 23f31c91294b86fe90b0f38664dee0bb8cc36009 Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Tue, 24 Jan 2023 04:29:12 -0800 Subject: [PATCH 52/80] sanitize inputs and improve logs --- .../src/bin/web3_proxy_cli/sentryd/compare.rs | 14 ++++++++---- .../src/bin/web3_proxy_cli/sentryd/mod.rs | 22 +++++++++++++++---- 2 files changed, 28 insertions(+), 8 deletions(-) diff --git a/web3_proxy/src/bin/web3_proxy_cli/sentryd/compare.rs b/web3_proxy/src/bin/web3_proxy_cli/sentryd/compare.rs index ada4681e..f4b5c27f 100644 --- a/web3_proxy/src/bin/web3_proxy_cli/sentryd/compare.rs +++ b/web3_proxy/src/bin/web3_proxy_cli/sentryd/compare.rs @@ -169,19 +169,25 @@ async fn check_rpc( // TODO: don't unwrap! don't use the try operator let response: JsonRpcResponse> = client - .post(rpc) + .post(rpc.clone()) .json(&block_by_hash_request) .send() - .await? + .await + .context(format!("awaiting response from {}", rpc))? .json() - .await?; + .await + .context(format!("reading json on {}", rpc))?; if let Some(result) = response.result { let abbreviated = AbbreviatedBlock::from(result); Ok(abbreviated) } else if let Some(result) = response.error { - Err(anyhow!("Failed parsing response as JSON: {:?}", result)) + Err(anyhow!( + "Failed parsing response from {} as JSON: {:?}", + rpc, + result + )) } else { unimplemented!("{:?}", response) } diff --git a/web3_proxy/src/bin/web3_proxy_cli/sentryd/mod.rs b/web3_proxy/src/bin/web3_proxy_cli/sentryd/mod.rs index d7f03da9..26dc43bd 100644 --- a/web3_proxy/src/bin/web3_proxy_cli/sentryd/mod.rs +++ b/web3_proxy/src/bin/web3_proxy_cli/sentryd/mod.rs @@ -53,6 +53,20 @@ impl SentrydSubCommand { pub async fn main(self, pagerduty_async: Option) -> anyhow::Result<()> { // sentry logging should already be configured + let web3_proxy = self.web3_proxy.trim_end_matches("/").to_string(); + + let other_proxy: Vec<_> = self + .other_proxy + .into_iter() + .map(|x| x.trim_end_matches("/").to_string()) + .collect(); + + let other_rpc: Vec<_> = self + .other_rpc + .into_iter() + .map(|x| x.trim_end_matches("/").to_string()) + .collect(); + let seconds = self.seconds.unwrap_or(60); let mut handles = FuturesUnordered::new(); @@ -109,7 +123,7 @@ impl SentrydSubCommand { // check the main rpc's /health endpoint { - let url = format!("{}/health", self.web3_proxy); + let url = format!("{}/health", web3_proxy); let error_sender = error_sender.clone(); let loop_f = a_loop( @@ -123,7 +137,7 @@ impl SentrydSubCommand { handles.push(tokio::spawn(loop_f)); } // check any other web3-proxy /health endpoints - for other_web3_proxy in self.other_proxy.iter() { + for other_web3_proxy in other_proxy.iter() { let url = format!("{}/health", other_web3_proxy); let error_sender = error_sender.clone(); @@ -145,9 +159,9 @@ impl SentrydSubCommand { let rpc = self.web3_proxy.clone(); let error_sender = error_sender.clone(); - let mut others = self.other_proxy.clone(); + let mut others = other_proxy.clone(); - others.extend(self.other_rpc.clone()); + others.extend(other_rpc); let loop_f = a_loop( "head block comparison", From 4f9d0f6336ca9ea9450433e625877f17ac585993 Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Tue, 24 Jan 2023 04:51:55 -0800 Subject: [PATCH 53/80] add --chain-id to sentryd too --- web3_proxy/src/bin/web3_proxy_cli/main.rs | 2 +- .../src/bin/web3_proxy_cli/sentryd/mod.rs | 20 ++++++++++++++++--- 2 files changed, 18 insertions(+), 4 deletions(-) diff --git a/web3_proxy/src/bin/web3_proxy_cli/main.rs b/web3_proxy/src/bin/web3_proxy_cli/main.rs index da524b68..cea65a8f 100644 --- a/web3_proxy/src/bin/web3_proxy_cli/main.rs +++ b/web3_proxy/src/bin/web3_proxy_cli/main.rs @@ -366,7 +366,7 @@ fn main() -> anyhow::Result<()> { warn!("sentry_url is not set! Logs will only show in this console"); } - x.main(pagerduty_async).await + x.main(pagerduty_async, top_config).await } SubCommand::RpcAccounting(x) => { let db_url = cli_config diff --git a/web3_proxy/src/bin/web3_proxy_cli/sentryd/mod.rs b/web3_proxy/src/bin/web3_proxy_cli/sentryd/mod.rs index 26dc43bd..0708f7a7 100644 --- a/web3_proxy/src/bin/web3_proxy_cli/sentryd/mod.rs +++ b/web3_proxy/src/bin/web3_proxy_cli/sentryd/mod.rs @@ -1,6 +1,7 @@ mod compare; mod simple; +use anyhow::Context; use argh::FromArgs; use futures::{ stream::{FuturesUnordered, StreamExt}, @@ -11,7 +12,7 @@ use pagerduty_rs::{eventsv2async::EventsV2 as PagerdutyAsyncEventsV2, types::Eve use std::time::Duration; use tokio::sync::mpsc; use tokio::time::{interval, MissedTickBehavior}; -use web3_proxy::pagerduty::pagerduty_alert; +use web3_proxy::{config::TopConfig, pagerduty::pagerduty_alert}; #[derive(FromArgs, PartialEq, Debug, Eq)] /// Loop healthchecks and send pager duty alerts if any fail @@ -21,6 +22,10 @@ pub struct SentrydSubCommand { /// the main (HTTP only) web3-proxy being checked. web3_proxy: String, + /// the chain id to require. Only used if not using --config. + #[argh(option)] + chain_id: Option, + #[argh(option)] /// warning threshold for age of the best known head block max_age: i64, @@ -50,9 +55,18 @@ struct Error { } impl SentrydSubCommand { - pub async fn main(self, pagerduty_async: Option) -> anyhow::Result<()> { + pub async fn main( + self, + pagerduty_async: Option, + top_config: Option, + ) -> anyhow::Result<()> { // sentry logging should already be configured + let chain_id = self + .chain_id + .or_else(|| top_config.map(|x| x.app.chain_id)) + .context("--config or --chain-id required")?; + let web3_proxy = self.web3_proxy.trim_end_matches("/").to_string(); let other_proxy: Vec<_> = self @@ -85,7 +99,7 @@ impl SentrydSubCommand { if matches!(err.level, log::Level::Error) { let alert = pagerduty_alert( - None, + Some(chain_id), Some(err.class), "web3-proxy-sentry".to_string(), None, From c9b8e5dfb9e1cf6251915b048d37b7f01c72991a Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Tue, 24 Jan 2023 05:27:07 -0800 Subject: [PATCH 54/80] wrote our own panic handler --- web3_proxy/Cargo.toml | 3 --- 1 file changed, 3 deletions(-) diff --git a/web3_proxy/Cargo.toml b/web3_proxy/Cargo.toml index e4633a19..09630499 100644 --- a/web3_proxy/Cargo.toml +++ b/web3_proxy/Cargo.toml @@ -73,6 +73,3 @@ tower-http = { version = "0.3.5", features = ["cors", "sensitive-headers"] } ulid = { version = "1.0.0", features = ["serde"] } url = "2.3.1" uuid = "1.2.2" - -# # TODO: i'd like to add this, but websockets with ethers often disconnect with a panic -# pagerduty_panic = "0.1.1" From 447cf90eed4a6ba67b0cf655c802dc86a9930e49 Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Tue, 24 Jan 2023 08:07:10 -0800 Subject: [PATCH 55/80] jsonrpc instead of 500 errors --- web3_proxy/src/app/mod.rs | 72 ++++++++++++++++++++++++++++++------- web3_proxy/src/pagerduty.rs | 2 +- 2 files changed, 61 insertions(+), 13 deletions(-) diff --git a/web3_proxy/src/app/mod.rs b/web3_proxy/src/app/mod.rs index 2234382f..7f92955e 100644 --- a/web3_proxy/src/app/mod.rs +++ b/web3_proxy/src/app/mod.rs @@ -1098,9 +1098,15 @@ impl Web3ProxyApp { | "shh_uninstallFilter" | "shh_version") => { // TODO: client error stat - // TODO: proper error code - // TODO: right now this sends a warn level log. thats too verbose - return Err(anyhow::anyhow!("method unsupported: {}", method)); + // TODO: what error code? + return Ok(( + JsonRpcForwardedResponse::from_string( + format!("method unsupported: {}", method), + None, + Some(request_id), + ), + vec![], + )); } // TODO: implement these commands method @ ("eth_getFilterChanges" @@ -1110,8 +1116,15 @@ impl Web3ProxyApp { | "eth_newPendingTransactionFilter" | "eth_uninstallFilter") => { // TODO: unsupported command stat - // TODO: right now this sends a warn level log. thats too verbose - return Err(anyhow::anyhow!("not yet implemented: {}", method)); + // TODO: what error code? + return Ok(( + JsonRpcForwardedResponse::from_string( + format!("not yet implemented: {}", method), + None, + Some(request_id), + ), + vec![], + )); } // some commands can use local data or caches "eth_accounts" => { @@ -1312,13 +1325,23 @@ impl Web3ProxyApp { json!(false) } "eth_subscribe" => { - return Err(anyhow::anyhow!( - "notifications not supported. eth_subscribe is only available over a websocket" + return Ok(( + JsonRpcForwardedResponse::from_string( + format!("notifications not supported. eth_subscribe is only available over a websocket"), + Some(-32601), + Some(request_id), + ), + vec![], )); } "eth_unsubscribe" => { - return Err(anyhow::anyhow!( - "notifications not supported. eth_unsubscribe is only available over a websocket" + return Ok(( + JsonRpcForwardedResponse::from_string( + format!("notifications not supported. eth_unsubscribe is only available over a websocket"), + Some(-32601), + Some(request_id), + ), + vec![], )); } "net_listening" => { @@ -1342,10 +1365,18 @@ impl Web3ProxyApp { Some(serde_json::Value::Array(params)) => { // TODO: make a struct and use serde conversion to clean this up if params.len() != 1 || !params[0].is_string() { - // TODO: this needs the correct error code in the response - return Err(anyhow::anyhow!("invalid request")); + // TODO: what error code? + return Ok(( + JsonRpcForwardedResponse::from_str( + "Invalid request", + Some(-32600), + Some(request_id), + ), + vec![], + )); } + // TODO: don't return with ? here. send a jsonrpc invalid request let param = Bytes::from_str( params[0] .as_str() @@ -1359,10 +1390,27 @@ impl Web3ProxyApp { _ => { // TODO: this needs the correct error code in the response // TODO: emit stat? - return Err(anyhow::anyhow!("invalid request")); + return Ok(( + JsonRpcForwardedResponse::from_str( + "invalid request", + None, + Some(request_id), + ), + vec![], + )); } } } + "test" => { + return Ok(( + JsonRpcForwardedResponse::from_str( + "The method test does not exist/is not available.", + Some(-32601), + Some(request_id), + ), + vec![], + )); + } // anything else gets sent to backend rpcs and cached method => { // emit stats diff --git a/web3_proxy/src/pagerduty.rs b/web3_proxy/src/pagerduty.rs index de777c4d..24f41089 100644 --- a/web3_proxy/src/pagerduty.rs +++ b/web3_proxy/src/pagerduty.rs @@ -47,7 +47,7 @@ pub fn pagerduty_alert( timestamp: Option, ) -> AlertTrigger { let client = chain_id - .map(|x| format!("{} chain #{}", x, client)) + .map(|x| format!("{} chain #{}", client, x)) .unwrap_or_else(|| format!("{} w/o chain", client)); let source = From 106dec294ffa695dad2b8056cdb67118d0370726 Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Tue, 24 Jan 2023 09:36:07 -0800 Subject: [PATCH 56/80] better handling when method not available --- web3_proxy/src/rpcs/connections.rs | 42 +++++++++++++++++++++++++----- 1 file changed, 36 insertions(+), 6 deletions(-) diff --git a/web3_proxy/src/rpcs/connections.rs b/web3_proxy/src/rpcs/connections.rs index edd94dc3..2c573dd4 100644 --- a/web3_proxy/src/rpcs/connections.rs +++ b/web3_proxy/src/rpcs/connections.rs @@ -749,6 +749,7 @@ impl Web3Connections { wait_for_sync: bool, ) -> anyhow::Result { let mut skip_rpcs = vec![]; + let mut method_not_available_response = None; let mut watch_consensus_connections = if wait_for_sync { Some(self.watch_consensus_connections_sender.subscribe()) @@ -761,7 +762,6 @@ impl Web3Connections { // TODO: is self.conns still right now that we split main and backup servers? // TODO: if a new block arrives, we probably want to reset the skip list if skip_rpcs.len() == self.conns.len() { - // no servers to try break; } match self @@ -802,7 +802,7 @@ impl Web3Connections { request.id.clone(), ) { Ok(response) => { - if let Some(error) = &response.error { + if let Some(error) = &response.error.as_ref() { // trace!(?response, "rpc error"); if let Some(request_metadata) = request_metadata { @@ -843,9 +843,22 @@ impl Web3Connections { -32601 => { let error_msg = error.message.as_str(); + // sometimes a provider does not support all rpc methods + // we check other connections rather than returning the error + // but sometimes the method is something that is actually unsupported, + // so we save the response here to return it later + + // some providers look like this if error_msg.starts_with("the method") && error_msg.ends_with("is not available") { + method_not_available_response = Some(response); + continue; + } + + // others look like this + if error_msg == "Method not found" { + method_not_available_response = Some(response); continue; } } @@ -888,6 +901,7 @@ impl Web3Connections { { // TODO: if there are other servers in synced_connections, we should continue now // wait until retry_at OR synced_connections changes + trace!("waiting for change in synced servers or retry_at"); tokio::select! { _ = sleep_until(retry_at) => { skip_rpcs.pop(); @@ -899,7 +913,8 @@ impl Web3Connections { } continue; } else { - break; + sleep_until(retry_at).await; + continue; } } OpenRequestResult::NotReady => { @@ -908,18 +923,25 @@ impl Web3Connections { } if wait_for_sync { + trace!("waiting for change in synced servers"); // TODO: race here. there might have been a change while we were waiting on the previous server self.watch_consensus_connections_sender .subscribe() .changed() .await?; } else { - break; + // TODO: continue or break? + continue; } } } } + if let Some(r) = method_not_available_response { + // TODO: emit a stat for unsupported methods? + return Ok(r); + } + // TODO: do we need this here, or do we do it somewhere else? if let Some(request_metadata) = request_metadata { request_metadata @@ -929,9 +951,17 @@ impl Web3Connections { let num_conns = self.conns.len(); - error!("No servers synced ({} known)", num_conns); + if skip_rpcs.is_empty() { + error!("No servers synced ({} known)", num_conns); - Err(anyhow::anyhow!("No servers synced ({} known)", num_conns)) + Err(anyhow::anyhow!("No servers synced ({} known)", num_conns)) + } else { + Err(anyhow::anyhow!( + "{}/{} servers erred", + skip_rpcs.len(), + num_conns + )) + } } /// be sure there is a timeout on this or it might loop forever From 522678e394a132d054c9d2a4912054f1f2aa2ee0 Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Tue, 24 Jan 2023 09:38:12 -0800 Subject: [PATCH 57/80] don't send pagerduty alerts for websocket panics --- TODO.md | 2 + web3_proxy/src/bin/web3_proxy_cli/main.rs | 46 ++++++++++++----------- 2 files changed, 27 insertions(+), 21 deletions(-) diff --git a/TODO.md b/TODO.md index 15ba2239..986c4816 100644 --- a/TODO.md +++ b/TODO.md @@ -319,6 +319,8 @@ These are not yet ordered. There might be duplicates. We might not actually need - this will let us easily wait for a new head or a new synced connection - [x] broadcast transactions to more servers - [x] send sentryd errors to pagerduty +- [x] improve handling of unknown methods +- [x] don't send pagerduty alerts for websocket panics - [-] proxy mode for benchmarking all backends - [-] proxy mode for sending to multiple backends - [-] let users choose a % of reverts to log (or maybe x/second). someone like curve logging all reverts will be a BIG database very quickly diff --git a/web3_proxy/src/bin/web3_proxy_cli/main.rs b/web3_proxy/src/bin/web3_proxy_cli/main.rs index cea65a8f..f658b46e 100644 --- a/web3_proxy/src/bin/web3_proxy_cli/main.rs +++ b/web3_proxy/src/bin/web3_proxy_cli/main.rs @@ -231,30 +231,34 @@ fn main() -> anyhow::Result<()> { let hostname = gethostname().into_string().unwrap_or("unknown".to_string()); let panic_msg = format!("{} {:?}", x, x); - error!("sending panic to pagerduty: {}", panic_msg); + if panic_msg.starts_with("panicked at 'WS Server panic") { + info!("Underlying library {}", panic_msg); + } else { + error!("sending panic to pagerduty: {}", panic_msg); - let payload = AlertTriggerPayload { - severity: pagerduty_rs::types::Severity::Error, - summary: panic_msg.clone(), - source: hostname, - timestamp: None, - component: None, - group: Some("web3-proxy".to_string()), - class: Some("panic".to_string()), - custom_details: None::<()>, - }; + let payload = AlertTriggerPayload { + severity: pagerduty_rs::types::Severity::Error, + summary: panic_msg.clone(), + source: hostname, + timestamp: None, + component: None, + group: Some("web3-proxy".to_string()), + class: Some("panic".to_string()), + custom_details: None::<()>, + }; - let event = Event::AlertTrigger(AlertTrigger { - payload, - dedup_key: None, - images: None, - links: None, - client: Some(client.clone()), - client_url: client_url.clone(), - }); + let event = Event::AlertTrigger(AlertTrigger { + payload, + dedup_key: None, + images: None, + links: None, + client: Some(client.clone()), + client_url: client_url.clone(), + }); - if let Err(err) = pagerduty_sync.event(event) { - error!("Failed sending panic to pagerduty: {}", err); + if let Err(err) = pagerduty_sync.event(event) { + error!("Failed sending panic to pagerduty: {}", err); + } } })); } From 641d11a19b7427ce94cbc89a6e0aa33e9c0278ca Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Tue, 24 Jan 2023 10:09:12 -0800 Subject: [PATCH 58/80] remove excess continues --- web3_proxy/src/rpcs/connections.rs | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/web3_proxy/src/rpcs/connections.rs b/web3_proxy/src/rpcs/connections.rs index 2c573dd4..24e4e856 100644 --- a/web3_proxy/src/rpcs/connections.rs +++ b/web3_proxy/src/rpcs/connections.rs @@ -897,9 +897,10 @@ impl Web3Connections { request_metadata.no_servers.fetch_add(1, Ordering::Release); } + // TODO: if there are other servers in synced_connections, we should continue now + if let Some(watch_consensus_connections) = watch_consensus_connections.as_mut() { - // TODO: if there are other servers in synced_connections, we should continue now // wait until retry_at OR synced_connections changes trace!("waiting for change in synced servers or retry_at"); tokio::select! { @@ -911,10 +912,8 @@ impl Web3Connections { let _ = watch_consensus_connections.borrow_and_update(); } } - continue; } else { sleep_until(retry_at).await; - continue; } } OpenRequestResult::NotReady => { @@ -929,9 +928,6 @@ impl Web3Connections { .subscribe() .changed() .await?; - } else { - // TODO: continue or break? - continue; } } } From 0ae240492aa61bcee9ce2ad4b2d819829e412b8e Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Tue, 24 Jan 2023 20:44:17 -0800 Subject: [PATCH 59/80] easy trace logging --- web3_proxy/src/bin/web3_proxy_cli/main.rs | 24 +++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/web3_proxy/src/bin/web3_proxy_cli/main.rs b/web3_proxy/src/bin/web3_proxy_cli/main.rs index f658b46e..113336b4 100644 --- a/web3_proxy/src/bin/web3_proxy_cli/main.rs +++ b/web3_proxy/src/bin/web3_proxy_cli/main.rs @@ -114,8 +114,28 @@ fn main() -> anyhow::Result<()> { // TODO: is there a better way to do this? let rust_log = match std::env::var("RUST_LOG") { Ok(x) => x, - Err(_) => "info,ethers=debug,redis_rate_limit=debug,web3_proxy=debug,web3_proxy_cli=debug" - .to_string(), + Err(_) => match std::env::var("WEB3_PROXY_TRACE").map(|x| x == "true") { + Ok(true) => { + vec![ + "info", + "ethers=debug", + "redis_rate_limit=debug", + "web3_proxy=trace", + "web3_proxy_cli=trace", + "web3_proxy::rpcs::blockchain=info", + ] + } + _ => { + vec![ + "info", + "ethers=debug", + "redis_rate_limit=debug", + "web3_proxy=debug", + "web3_proxy_cli=debug", + ] + } + } + .join(","), }; // this probably won't matter for us in docker, but better safe than sorry From 694e552b5db01dc2338ea367a4f2c36e7eedbb20 Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Tue, 24 Jan 2023 20:44:50 -0800 Subject: [PATCH 60/80] improve waiting for sync when rate limited --- TODO.md | 1 + web3_proxy/src/rpcs/blockchain.rs | 10 +-- web3_proxy/src/rpcs/connection.rs | 68 ++++++++++++++++----- web3_proxy/src/rpcs/connections.rs | 97 ++++++++++++++++-------------- web3_proxy/src/rpcs/request.rs | 36 ++++++++--- 5 files changed, 138 insertions(+), 74 deletions(-) diff --git a/TODO.md b/TODO.md index 986c4816..e1b8711c 100644 --- a/TODO.md +++ b/TODO.md @@ -321,6 +321,7 @@ These are not yet ordered. There might be duplicates. We might not actually need - [x] send sentryd errors to pagerduty - [x] improve handling of unknown methods - [x] don't send pagerduty alerts for websocket panics +- [x] improve waiting for sync when rate limited - [-] proxy mode for benchmarking all backends - [-] proxy mode for sending to multiple backends - [-] let users choose a % of reverts to log (or maybe x/second). someone like curve logging all reverts will be a BIG database very quickly diff --git a/web3_proxy/src/rpcs/blockchain.rs b/web3_proxy/src/rpcs/blockchain.rs index 199fb65b..bcda8579 100644 --- a/web3_proxy/src/rpcs/blockchain.rs +++ b/web3_proxy/src/rpcs/blockchain.rs @@ -167,13 +167,7 @@ impl Web3Connections { // TODO: request_metadata? maybe we should put it in the authorization? // TODO: think more about this wait_for_sync let response = self - .try_send_best_consensus_head_connection( - authorization, - request, - None, - None, - true, - ) + .try_send_best_consensus_head_connection(authorization, request, None, None) .await?; let block = response.result.context("failed fetching block")?; @@ -260,7 +254,7 @@ impl Web3Connections { // TODO: if error, retry? // TODO: request_metadata or authorization? let response = self - .try_send_best_consensus_head_connection(authorization, request, None, Some(num), true) + .try_send_best_consensus_head_connection(authorization, request, None, Some(num)) .await?; let raw_block = response.result.context("no block result")?; diff --git a/web3_proxy/src/rpcs/connection.rs b/web3_proxy/src/rpcs/connection.rs index bfb8a9a3..e56a4448 100644 --- a/web3_proxy/src/rpcs/connection.rs +++ b/web3_proxy/src/rpcs/connection.rs @@ -24,22 +24,22 @@ use std::sync::atomic::{self, AtomicU32, AtomicU64}; use std::{cmp::Ordering, sync::Arc}; use thread_fast_rng::rand::Rng; use thread_fast_rng::thread_fast_rng; -use tokio::sync::{broadcast, oneshot, RwLock as AsyncRwLock}; +use tokio::sync::{broadcast, oneshot, watch, RwLock as AsyncRwLock}; use tokio::time::{interval, sleep, sleep_until, timeout, Duration, Instant, MissedTickBehavior}; // TODO: maybe provider state should have the block data limit in it. but it is inside an async lock and we can't Serialize then #[derive(Clone, Debug)] pub enum ProviderState { None, - NotReady(Arc), - Ready(Arc), + Connecting(Arc), + Connected(Arc), } impl ProviderState { pub async fn provider(&self, allow_not_ready: bool) -> Option<&Arc> { match self { ProviderState::None => None, - ProviderState::NotReady(x) => { + ProviderState::Connecting(x) => { if allow_not_ready { Some(x) } else { @@ -47,7 +47,7 @@ impl ProviderState { None } } - ProviderState::Ready(x) => { + ProviderState::Connected(x) => { if x.ready() { Some(x) } else { @@ -76,6 +76,8 @@ pub struct Web3Connection { /// provider is in a RwLock so that we can replace it if re-connecting /// it is an async lock because we hold it open across awaits pub(super) provider_state: AsyncRwLock, + /// keep track of hard limits + pub(super) hard_limit_until: Option>, /// rate limits are stored in a central redis so that multiple proxies can share their rate limits /// We do not use the deferred rate limiter because going over limits would cause errors pub(super) hard_limit: Option, @@ -136,6 +138,16 @@ impl Web3Connection { let automatic_block_limit = (block_data_limit.load(atomic::Ordering::Acquire) == 0) && block_sender.is_some(); + // track hard limit until on backup servers (which might surprise us with rate limit changes) + // and track on servers that have a configured hard limit + let hard_limit_until = if backup || hard_limit.is_some() { + let (sender, _) = watch::channel(Instant::now()); + + Some(sender) + } else { + None + }; + let new_connection = Self { name, db_conn: db_conn.clone(), @@ -147,6 +159,7 @@ impl Web3Connection { internal_requests: 0.into(), provider_state: AsyncRwLock::new(ProviderState::None), hard_limit, + hard_limit_until, soft_limit, automatic_block_limit, backup, @@ -376,7 +389,7 @@ impl Web3Connection { ProviderState::None => { info!("connecting to {}", self); } - ProviderState::NotReady(provider) | ProviderState::Ready(provider) => { + ProviderState::Connecting(provider) | ProviderState::Connected(provider) => { // disconnect the current provider if let Web3Provider::Mock = provider.as_ref() { return Ok(()); @@ -410,7 +423,7 @@ impl Web3Connection { let new_provider = Web3Provider::from_str(&self.url, self.http_client.clone()).await?; // trace!("saving provider state as NotReady on {}", self); - *provider_state = ProviderState::NotReady(Arc::new(new_provider)); + *provider_state = ProviderState::Connecting(Arc::new(new_provider)); // drop the lock so that we can get a request handle // trace!("provider_state {} unlocked", self); @@ -464,7 +477,7 @@ impl Web3Connection { .context("provider missing")? .clone(); - *provider_state = ProviderState::Ready(ready_provider); + *provider_state = ProviderState::Connected(ready_provider); // trace!("unlocked for ready..."); } @@ -693,7 +706,7 @@ impl Web3Connection { // trace!("unlocked on new heads"); // TODO: need a timeout - if let ProviderState::Ready(provider) = provider_state { + if let ProviderState::Connected(provider) = provider_state { match provider.as_ref() { Web3Provider::Mock => unimplemented!(), Web3Provider::Http(_provider) => { @@ -865,7 +878,7 @@ impl Web3Connection { authorization: Arc, tx_id_sender: flume::Sender<(TxHash, Arc)>, ) -> anyhow::Result<()> { - if let ProviderState::Ready(provider) = self + if let ProviderState::Connected(provider) = self .provider_state .try_read() .context("subscribe_pending_transactions")? @@ -938,6 +951,7 @@ impl Web3Connection { /// be careful with this; it might wait forever! /// `allow_not_ready` is only for use by health checks while starting the provider + /// TODO: don't use anyhow. use specific error type pub async fn wait_for_request_handle( self: &Arc, authorization: &Arc, @@ -954,21 +968,29 @@ impl Web3Connection { Ok(OpenRequestResult::Handle(handle)) => return Ok(handle), Ok(OpenRequestResult::RetryAt(retry_at)) => { // TODO: emit a stat? - // // trace!(?retry_at); + trace!("{} waiting for request handle until {:?}", self, retry_at); if retry_at > max_wait { // break now since we will wait past our maximum wait time // TODO: don't use anyhow. use specific error type return Err(anyhow::anyhow!("timeout waiting for request handle")); } + sleep_until(retry_at).await; } Ok(OpenRequestResult::NotReady) => { // TODO: when can this happen? log? emit a stat? - // TODO: subscribe to the head block on this + trace!("{} has no handle ready", self); + + let now = Instant::now(); + + if now > max_wait { + return Err(anyhow::anyhow!("unable to retry for request handle")); + } + // TODO: sleep how long? maybe just error? - // TODO: don't use anyhow. use specific error type - return Err(anyhow::anyhow!("unable to retry for request handle")); + // TODO: instead of an arbitrary sleep, subscribe to the head block on this + sleep(Duration::from_millis(10)).await; } Err(err) => return Err(err), } @@ -994,12 +1016,22 @@ impl Web3Connection { return Ok(OpenRequestResult::NotReady); } + if let Some(hard_limit_until) = self.hard_limit_until.as_ref() { + let hard_limit_ready = hard_limit_until.borrow().clone(); + + let now = Instant::now(); + + if now < hard_limit_ready { + return Ok(OpenRequestResult::RetryAt(hard_limit_ready)); + } + } + // check rate limits if let Some(ratelimiter) = self.hard_limit.as_ref() { // TODO: how should we know if we should set expire or not? match ratelimiter.throttle().await? { RedisRateLimitResult::Allowed(_) => { - // // trace!("rate limit succeeded") + // trace!("rate limit succeeded") } RedisRateLimitResult::RetryAt(retry_at, _) => { // rate limit failed @@ -1008,6 +1040,10 @@ impl Web3Connection { // TODO: i'm seeing "Exhausted rate limit on moralis: 0ns". How is it getting 0? warn!("Exhausted rate limit on {}. Retry at {:?}", self, retry_at); + if let Some(hard_limit_until) = self.hard_limit_until.as_ref() { + hard_limit_until.send(retry_at.clone())?; + } + return Ok(OpenRequestResult::RetryAt(retry_at)); } RedisRateLimitResult::RetryNever => { @@ -1165,6 +1201,7 @@ mod tests { internal_requests: 0.into(), provider_state: AsyncRwLock::new(ProviderState::None), hard_limit: None, + hard_limit_until: None, soft_limit: 1_000, automatic_block_limit: false, backup: false, @@ -1213,6 +1250,7 @@ mod tests { internal_requests: 0.into(), provider_state: AsyncRwLock::new(ProviderState::None), hard_limit: None, + hard_limit_until: None, soft_limit: 1_000, automatic_block_limit: false, backup: false, diff --git a/web3_proxy/src/rpcs/connections.rs b/web3_proxy/src/rpcs/connections.rs index 24e4e856..5b97a49a 100644 --- a/web3_proxy/src/rpcs/connections.rs +++ b/web3_proxy/src/rpcs/connections.rs @@ -128,6 +128,7 @@ impl Web3Connections { // turn configs into connections (in parallel) // TODO: move this into a helper function. then we can use it when configs change (will need a remove function too) + // TODO: futures unordered? let spawn_handles: Vec<_> = server_configs .into_iter() .filter_map(|(server_name, server_config)| { @@ -175,7 +176,7 @@ impl Web3Connections { let mut connections = HashMap::new(); let mut handles = vec![]; - // TODO: do we need to join this? + // TODO: futures unordered? for x in join_all(spawn_handles).await { // TODO: how should we handle errors here? one rpc being down shouldn't cause the program to exit match x { @@ -529,7 +530,7 @@ impl Web3Connections { let available_requests = soft_limit - active_requests; - trace!("available requests on {}: {}", rpc, available_requests); + // trace!("available requests on {}: {}", rpc, available_requests); minimum = minimum.min(available_requests); maximum = maximum.max(available_requests); @@ -538,8 +539,8 @@ impl Web3Connections { }) .collect(); - trace!("minimum available requests: {}", minimum); - trace!("maximum available requests: {}", maximum); + // trace!("minimum available requests: {}", minimum); + // trace!("maximum available requests: {}", maximum); if maximum < 0.0 { // TODO: if maximum < 0 and there are other tiers on the same block, we should include them now @@ -588,7 +589,7 @@ impl Web3Connections { .await { Ok(OpenRequestResult::Handle(handle)) => { - trace!("opened handle: {}", best_rpc); + // trace!("opened handle: {}", best_rpc); return Ok(OpenRequestResult::Handle(handle)); } Ok(OpenRequestResult::RetryAt(retry_at)) => { @@ -746,24 +747,25 @@ impl Web3Connections { request: JsonRpcRequest, request_metadata: Option<&Arc>, min_block_needed: Option<&U64>, - wait_for_sync: bool, ) -> anyhow::Result { let mut skip_rpcs = vec![]; let mut method_not_available_response = None; - let mut watch_consensus_connections = if wait_for_sync { - Some(self.watch_consensus_connections_sender.subscribe()) - } else { - None - }; + let mut watch_consensus_connections = self.watch_consensus_connections_sender.subscribe(); // TODO: maximum retries? right now its the total number of servers loop { - // TODO: is self.conns still right now that we split main and backup servers? - // TODO: if a new block arrives, we probably want to reset the skip list - if skip_rpcs.len() == self.conns.len() { - break; + let num_skipped = skip_rpcs.len(); + + if num_skipped > 0 { + // trace!("skip_rpcs: {:?}", skip_rpcs); + + // TODO: is self.conns still right now that we split main and backup servers? + if num_skipped == self.conns.len() { + break; + } } + match self .best_consensus_head_connection( authorization, @@ -890,30 +892,23 @@ impl Web3Connections { // TODO: move this to a helper function // sleep (TODO: with a lock?) until our rate limits should be available // TODO: if a server catches up sync while we are waiting, we could stop waiting - warn!("All rate limits exceeded. Sleeping until {:?}", retry_at); + warn!( + "All rate limits exceeded. waiting for change in synced servers or {:?}", + retry_at + ); // TODO: have a separate column for rate limited? if let Some(request_metadata) = request_metadata { request_metadata.no_servers.fetch_add(1, Ordering::Release); } - // TODO: if there are other servers in synced_connections, we should continue now - - if let Some(watch_consensus_connections) = watch_consensus_connections.as_mut() - { - // wait until retry_at OR synced_connections changes - trace!("waiting for change in synced servers or retry_at"); - tokio::select! { - _ = sleep_until(retry_at) => { - skip_rpcs.pop(); - } - _ = watch_consensus_connections.changed() => { - // TODO: would be nice to save this retry_at so we don't keep hitting limits - let _ = watch_consensus_connections.borrow_and_update(); - } + tokio::select! { + _ = sleep_until(retry_at) => { + skip_rpcs.pop(); + } + _ = watch_consensus_connections.changed() => { + watch_consensus_connections.borrow_and_update(); } - } else { - sleep_until(retry_at).await; } } OpenRequestResult::NotReady => { @@ -921,13 +916,16 @@ impl Web3Connections { request_metadata.no_servers.fetch_add(1, Ordering::Release); } - if wait_for_sync { - trace!("waiting for change in synced servers"); - // TODO: race here. there might have been a change while we were waiting on the previous server - self.watch_consensus_connections_sender - .subscribe() - .changed() - .await?; + trace!("No servers ready. Waiting up to 1 second for change in synced servers"); + + // TODO: exponential backoff? + tokio::select! { + _ = sleep(Duration::from_secs(1)) => { + skip_rpcs.pop(); + } + _ = watch_consensus_connections.changed() => { + watch_consensus_connections.borrow_and_update(); + } } } } @@ -1060,7 +1058,6 @@ impl Web3Connections { request, request_metadata, min_block_needed, - true, ) .await } @@ -1168,8 +1165,11 @@ mod tests { active_requests: 0.into(), frontend_requests: 0.into(), internal_requests: 0.into(), - provider_state: AsyncRwLock::new(ProviderState::Ready(Arc::new(Web3Provider::Mock))), + provider_state: AsyncRwLock::new(ProviderState::Connected(Arc::new( + Web3Provider::Mock, + ))), hard_limit: None, + hard_limit_until: None, soft_limit: 1_000, automatic_block_limit: true, backup: false, @@ -1188,8 +1188,11 @@ mod tests { active_requests: 0.into(), frontend_requests: 0.into(), internal_requests: 0.into(), - provider_state: AsyncRwLock::new(ProviderState::Ready(Arc::new(Web3Provider::Mock))), + provider_state: AsyncRwLock::new(ProviderState::Connected(Arc::new( + Web3Provider::Mock, + ))), hard_limit: None, + hard_limit_until: None, soft_limit: 1_000, automatic_block_limit: false, backup: false, @@ -1395,8 +1398,11 @@ mod tests { active_requests: 0.into(), frontend_requests: 0.into(), internal_requests: 0.into(), - provider_state: AsyncRwLock::new(ProviderState::Ready(Arc::new(Web3Provider::Mock))), + provider_state: AsyncRwLock::new(ProviderState::Connected(Arc::new( + Web3Provider::Mock, + ))), hard_limit: None, + hard_limit_until: None, soft_limit: 3_000, automatic_block_limit: false, backup: false, @@ -1415,8 +1421,11 @@ mod tests { active_requests: 0.into(), frontend_requests: 0.into(), internal_requests: 0.into(), - provider_state: AsyncRwLock::new(ProviderState::Ready(Arc::new(Web3Provider::Mock))), + provider_state: AsyncRwLock::new(ProviderState::Connected(Arc::new( + Web3Provider::Mock, + ))), hard_limit: None, + hard_limit_until: None, soft_limit: 1_000, automatic_block_limit: false, backup: false, diff --git a/web3_proxy/src/rpcs/request.rs b/web3_proxy/src/rpcs/request.rs index 8cf22bbf..2c440d26 100644 --- a/web3_proxy/src/rpcs/request.rs +++ b/web3_proxy/src/rpcs/request.rs @@ -284,8 +284,14 @@ impl OpenRequestHandle { revert_handler }; + enum ResponseTypes { + Revert, + RateLimit, + Ok, + } + // check for "execution reverted" here - let is_revert = if let ProviderError::JsonRpcClientError(err) = err { + let response_type = if let ProviderError::JsonRpcClientError(err) = err { // Http and Ws errors are very similar, but different types let msg = match &*self.provider { Web3Provider::Mock => unimplemented!(), @@ -310,23 +316,39 @@ impl OpenRequestHandle { }; if let Some(msg) = msg { - msg.starts_with("execution reverted") + if msg.starts_with("execution reverted") { + trace!("revert from {}", self.conn); + ResponseTypes::Revert + } else if msg.contains("limit") || msg.contains("request") { + trace!("rate limit from {}", self.conn); + ResponseTypes::RateLimit + } else { + ResponseTypes::Ok + } } else { - false + ResponseTypes::Ok } } else { - false + ResponseTypes::Ok }; - if is_revert { - trace!("revert from {}", self.conn); + if matches!(response_type, ResponseTypes::RateLimit) { + if let Some(hard_limit_until) = self.conn.hard_limit_until.as_ref() { + let retry_at = Instant::now() + Duration::from_secs(1); + + trace!("retry {} at: {:?}", self.conn, retry_at); + + hard_limit_until + .send(retry_at) + .expect("sending hard limit retry times should always work"); + } } // TODO: think more about the method and param logs. those can be sensitive information match revert_handler { RequestRevertHandler::DebugLevel => { // TODO: think about this revert check more. sometimes we might want reverts logged so this needs a flag - if !is_revert { + if matches!(response_type, ResponseTypes::Revert) { debug!( "bad response from {}! method={} params={:?} err={:?}", self.conn, method, params, err From cffc60e7f66b8107d431b135d2c173f30a258bb5 Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Tue, 24 Jan 2023 22:45:20 -0800 Subject: [PATCH 61/80] improve responses when blocks are not available --- TODO.md | 2 +- web3_proxy/src/app/mod.rs | 8 +-- web3_proxy/src/bin/web3_proxy_cli/main.rs | 1 + web3_proxy/src/rpcs/blockchain.rs | 11 ++- web3_proxy/src/rpcs/connection.rs | 75 ++++++++++++-------- web3_proxy/src/rpcs/connections.rs | 86 ++++++++++++++++++----- web3_proxy/src/rpcs/request.rs | 3 +- 7 files changed, 131 insertions(+), 55 deletions(-) diff --git a/TODO.md b/TODO.md index e1b8711c..1ad82e88 100644 --- a/TODO.md +++ b/TODO.md @@ -347,7 +347,7 @@ These are not yet ordered. There might be duplicates. We might not actually need - [ ] `stat delay` script - query database for newest stat - [ ] period_datetime should always be :00. right now it depends on start time -- [ ] two servers running will confuse rpc_accounting! +- [ ] we have our hard rate limiter set up with a period of 60. but most providers have period of 1- [ ] two servers running will confuse rpc_accounting! - it won't happen with users often because they should be sticky to one proxy, but unauthenticated users will definitely hit this - one option: we need the insert to be an upsert, but how do we merge historgrams? - [ ] don't use systemtime. use chrono diff --git a/web3_proxy/src/app/mod.rs b/web3_proxy/src/app/mod.rs index 7f92955e..f41c1210 100644 --- a/web3_proxy/src/app/mod.rs +++ b/web3_proxy/src/app/mod.rs @@ -1326,8 +1326,8 @@ impl Web3ProxyApp { } "eth_subscribe" => { return Ok(( - JsonRpcForwardedResponse::from_string( - format!("notifications not supported. eth_subscribe is only available over a websocket"), + JsonRpcForwardedResponse::from_str( + "notifications not supported. eth_subscribe is only available over a websocket", Some(-32601), Some(request_id), ), @@ -1336,8 +1336,8 @@ impl Web3ProxyApp { } "eth_unsubscribe" => { return Ok(( - JsonRpcForwardedResponse::from_string( - format!("notifications not supported. eth_unsubscribe is only available over a websocket"), + JsonRpcForwardedResponse::from_str( + "notifications not supported. eth_unsubscribe is only available over a websocket", Some(-32601), Some(request_id), ), diff --git a/web3_proxy/src/bin/web3_proxy_cli/main.rs b/web3_proxy/src/bin/web3_proxy_cli/main.rs index 113336b4..da95dc13 100644 --- a/web3_proxy/src/bin/web3_proxy_cli/main.rs +++ b/web3_proxy/src/bin/web3_proxy_cli/main.rs @@ -123,6 +123,7 @@ fn main() -> anyhow::Result<()> { "web3_proxy=trace", "web3_proxy_cli=trace", "web3_proxy::rpcs::blockchain=info", + "web3_proxy::rpcs::request=debug", ] } _ => { diff --git a/web3_proxy/src/rpcs/blockchain.rs b/web3_proxy/src/rpcs/blockchain.rs index bcda8579..da1c2188 100644 --- a/web3_proxy/src/rpcs/blockchain.rs +++ b/web3_proxy/src/rpcs/blockchain.rs @@ -149,7 +149,7 @@ impl Web3Connections { // TODO: if error, retry? let block: ArcBlock = match rpc { Some(rpc) => rpc - .wait_for_request_handle(authorization, Duration::from_secs(30), false) + .wait_for_request_handle(authorization, Some(Duration::from_secs(30)), false) .await? .request::<_, Option<_>>( "eth_getBlockByHash", @@ -253,11 +253,16 @@ impl Web3Connections { // TODO: if error, retry? // TODO: request_metadata or authorization? + // we don't actually set min_block_needed here because all nodes have all blocks let response = self - .try_send_best_consensus_head_connection(authorization, request, None, Some(num)) + .try_send_best_consensus_head_connection(authorization, request, None, None) .await?; - let raw_block = response.result.context("no block result")?; + if let Some(err) = response.error { + debug!("could not find canonical block {}: {:?}", num, err); + } + + let raw_block = response.result.context("no cannonical block result")?; let block: ArcBlock = serde_json::from_str(raw_block.get())?; diff --git a/web3_proxy/src/rpcs/connection.rs b/web3_proxy/src/rpcs/connection.rs index e56a4448..99fc3cd1 100644 --- a/web3_proxy/src/rpcs/connection.rs +++ b/web3_proxy/src/rpcs/connection.rs @@ -213,7 +213,7 @@ impl Web3Connection { // TODO: start at 0 or 1? for block_data_limit in [0, 32, 64, 128, 256, 512, 1024, 90_000, u64::MAX] { let handle = self - .wait_for_request_handle(authorization, Duration::from_secs(30), true) + .wait_for_request_handle(authorization, None, true) .await?; let head_block_num_future = handle.request::, U256>( @@ -239,7 +239,7 @@ impl Web3Connection { // TODO: wait for the handle BEFORE we check the current block number. it might be delayed too! // TODO: what should the request be? let handle = self - .wait_for_request_handle(authorization, Duration::from_secs(30), true) + .wait_for_request_handle(authorization, None, true) .await?; let archive_result: Result = handle @@ -436,7 +436,7 @@ impl Web3Connection { // TODO: what should the timeout be? should there be a request timeout? // trace!("waiting on chain id for {}", self); let found_chain_id: Result = self - .wait_for_request_handle(&authorization, Duration::from_secs(30), true) + .wait_for_request_handle(&authorization, None, true) .await? .request( "eth_chainId", @@ -720,7 +720,7 @@ impl Web3Connection { loop { // TODO: what should the max_wait be? match self - .wait_for_request_handle(&authorization, Duration::from_secs(30), false) + .wait_for_request_handle(&authorization, None, false) .await { Ok(active_request_handle) => { @@ -806,7 +806,7 @@ impl Web3Connection { Web3Provider::Ws(provider) => { // todo: move subscribe_blocks onto the request handle? let active_request_handle = self - .wait_for_request_handle(&authorization, Duration::from_secs(30), false) + .wait_for_request_handle(&authorization, None, false) .await; let mut stream = provider.subscribe_blocks().await?; drop(active_request_handle); @@ -816,7 +816,7 @@ impl Web3Connection { // all it does is print "new block" for the same block as current block // TODO: how does this get wrapped in an arc? does ethers handle that? let block: Result, _> = self - .wait_for_request_handle(&authorization, Duration::from_secs(30), false) + .wait_for_request_handle(&authorization, None, false) .await? .request( "eth_getBlockByNumber", @@ -917,8 +917,8 @@ impl Web3Connection { Web3Provider::Ws(provider) => { // TODO: maybe the subscribe_pending_txs function should be on the active_request_handle let active_request_handle = self - .wait_for_request_handle(&authorization, Duration::from_secs(30), false) - .await; + .wait_for_request_handle(&authorization, None, false) + .await?; let mut stream = provider.subscribe_pending_txs().await?; @@ -955,10 +955,10 @@ impl Web3Connection { pub async fn wait_for_request_handle( self: &Arc, authorization: &Arc, - max_wait: Duration, + max_wait: Option, allow_not_ready: bool, ) -> anyhow::Result { - let max_wait = Instant::now() + max_wait; + let max_wait = max_wait.map(|x| Instant::now() + x); loop { match self @@ -968,24 +968,34 @@ impl Web3Connection { Ok(OpenRequestResult::Handle(handle)) => return Ok(handle), Ok(OpenRequestResult::RetryAt(retry_at)) => { // TODO: emit a stat? - trace!("{} waiting for request handle until {:?}", self, retry_at); + let wait = retry_at.duration_since(Instant::now()); - if retry_at > max_wait { - // break now since we will wait past our maximum wait time - // TODO: don't use anyhow. use specific error type - return Err(anyhow::anyhow!("timeout waiting for request handle")); + trace!( + "waiting {} millis for request handle on {}", + wait.as_millis(), + self + ); + + if let Some(max_wait) = max_wait { + if retry_at > max_wait { + // break now since we will wait past our maximum wait time + // TODO: don't use anyhow. use specific error type + return Err(anyhow::anyhow!("timeout waiting for request handle")); + } } sleep_until(retry_at).await; } - Ok(OpenRequestResult::NotReady) => { + Ok(OpenRequestResult::NotReady(_)) => { // TODO: when can this happen? log? emit a stat? trace!("{} has no handle ready", self); - let now = Instant::now(); + if let Some(max_wait) = max_wait { + let now = Instant::now(); - if now > max_wait { - return Err(anyhow::anyhow!("unable to retry for request handle")); + if now > max_wait { + return Err(anyhow::anyhow!("unable to retry for request handle")); + } } // TODO: sleep how long? maybe just error? @@ -1013,7 +1023,8 @@ impl Web3Connection { .await .is_none() { - return Ok(OpenRequestResult::NotReady); + trace!("{} is not ready", self); + return Ok(OpenRequestResult::NotReady(self.backup)); } if let Some(hard_limit_until) = self.hard_limit_until.as_ref() { @@ -1029,25 +1040,33 @@ impl Web3Connection { // check rate limits if let Some(ratelimiter) = self.hard_limit.as_ref() { // TODO: how should we know if we should set expire or not? - match ratelimiter.throttle().await? { + match ratelimiter + .throttle() + .await + .context(format!("attempting to throttle {}", self))? + { RedisRateLimitResult::Allowed(_) => { // trace!("rate limit succeeded") } RedisRateLimitResult::RetryAt(retry_at, _) => { - // rate limit failed - // save the smallest retry_after. if nothing succeeds, return an Err with retry_after in it - // TODO: use tracing better - // TODO: i'm seeing "Exhausted rate limit on moralis: 0ns". How is it getting 0? - warn!("Exhausted rate limit on {}. Retry at {:?}", self, retry_at); + // rate limit gave us a wait time + if !self.backup { + let when = retry_at.duration_since(Instant::now()); + warn!( + "Exhausted rate limit on {}. Retry in {}ms", + self, + when.as_millis() + ); + } if let Some(hard_limit_until) = self.hard_limit_until.as_ref() { - hard_limit_until.send(retry_at.clone())?; + hard_limit_until.send_replace(retry_at.clone()); } return Ok(OpenRequestResult::RetryAt(retry_at)); } RedisRateLimitResult::RetryNever => { - return Ok(OpenRequestResult::NotReady); + return Ok(OpenRequestResult::NotReady(self.backup)); } } }; diff --git a/web3_proxy/src/rpcs/connections.rs b/web3_proxy/src/rpcs/connections.rs index 5b97a49a..9ecf3fd9 100644 --- a/web3_proxy/src/rpcs/connections.rs +++ b/web3_proxy/src/rpcs/connections.rs @@ -428,7 +428,10 @@ impl Web3Connections { ) .await { - return Ok(without_backups); + // TODO: this might use backups too eagerly. but even when we allow backups, we still prioritize our own + if matches!(without_backups, OpenRequestResult::Handle(_)) { + return Ok(without_backups); + } } self._best_consensus_head_connection( @@ -460,7 +463,7 @@ impl Web3Connections { head_block.number() } else { // TODO: optionally wait for a head block >= min_block_needed - return Ok(OpenRequestResult::NotReady); + return Ok(OpenRequestResult::NotReady(allow_backups)); }; let min_block_needed = min_block_needed.unwrap_or(&head_block_num); @@ -504,7 +507,7 @@ impl Web3Connections { } cmp::Ordering::Greater => { // TODO? if the blocks is close and wait_for_sync and allow_backups, wait for change on a watch_consensus_connections_receiver().subscribe() - return Ok(OpenRequestResult::NotReady); + return Ok(OpenRequestResult::NotReady(allow_backups)); } } @@ -595,7 +598,7 @@ impl Web3Connections { Ok(OpenRequestResult::RetryAt(retry_at)) => { earliest_retry_at = earliest_retry_at.min(Some(retry_at)); } - Ok(OpenRequestResult::NotReady) => { + Ok(OpenRequestResult::NotReady(_)) => { // TODO: log a warning? emit a stat? } Err(err) => { @@ -625,7 +628,7 @@ impl Web3Connections { // TODO: should we log here? - Ok(OpenRequestResult::NotReady) + Ok(OpenRequestResult::NotReady(allow_backups)) } Some(earliest_retry_at) => { warn!("no servers on {:?}! {:?}", self, earliest_retry_at); @@ -719,7 +722,7 @@ impl Web3Connections { max_count -= 1; selected_rpcs.push(handle) } - Ok(OpenRequestResult::NotReady) => { + Ok(OpenRequestResult::NotReady(_)) => { warn!("no request handle for {}", connection) } Err(err) => { @@ -911,17 +914,51 @@ impl Web3Connections { } } } - OpenRequestResult::NotReady => { + OpenRequestResult::NotReady(backups_included) => { if let Some(request_metadata) = request_metadata { request_metadata.no_servers.fetch_add(1, Ordering::Release); } - trace!("No servers ready. Waiting up to 1 second for change in synced servers"); + // todo!( + // "check if we are requesting an old block and no archive servers are synced" + // ); + + if let Some(min_block_needed) = min_block_needed { + let mut theres_a_chance = false; + + for potential_conn in self.conns.values() { + if skip_rpcs.contains(potential_conn) { + continue; + } + + // TODO: should we instead check if has_block_data but with the current head block? + if potential_conn.has_block_data(min_block_needed) { + trace!("chance for {} on {}", min_block_needed, potential_conn); + theres_a_chance = true; + break; + } + + skip_rpcs.push(potential_conn.clone()); + } + + if !theres_a_chance { + debug!("no chance of finding data in block #{}", min_block_needed); + break; + } + } + + if backups_included { + // if NotReady and we tried backups, there's no chance + warn!("No servers ready even after checking backups"); + break; + } + + debug!("No servers ready. Waiting up to 1 second for change in synced servers"); // TODO: exponential backoff? tokio::select! { _ = sleep(Duration::from_secs(1)) => { - skip_rpcs.pop(); + // do NOT pop the last rpc off skip here } _ = watch_consensus_connections.changed() => { watch_consensus_connections.borrow_and_update(); @@ -944,17 +981,30 @@ impl Web3Connections { } let num_conns = self.conns.len(); + let num_skipped = skip_rpcs.len(); - if skip_rpcs.is_empty() { + if num_skipped == 0 { error!("No servers synced ({} known)", num_conns); - Err(anyhow::anyhow!("No servers synced ({} known)", num_conns)) + return Ok(JsonRpcForwardedResponse::from_str( + "No servers synced", + Some(-32000), + Some(request.id), + )); } else { - Err(anyhow::anyhow!( - "{}/{} servers erred", - skip_rpcs.len(), - num_conns - )) + // TODO: warn? debug? trace? + warn!( + "Requested data was not available on {}/{} servers", + num_skipped, num_conns + ); + + // TODO: what error code? + // cloudflare gives {"jsonrpc":"2.0","error":{"code":-32043,"message":"Requested data cannot be older than 128 blocks."},"id":1} + return Ok(JsonRpcForwardedResponse::from_str( + "Requested data is not available", + Some(-32043), + Some(request.id), + )); } } @@ -1287,7 +1337,7 @@ mod tests { dbg!(&x); - assert!(matches!(x, OpenRequestResult::NotReady)); + assert!(matches!(x, OpenRequestResult::NotReady(true))); // add lagged blocks to the conns. both servers should be allowed lagged_block.block = conns.save_block(lagged_block.block, true).await.unwrap(); @@ -1360,7 +1410,7 @@ mod tests { conns .best_consensus_head_connection(&authorization, None, &[], Some(&2.into())) .await, - Ok(OpenRequestResult::NotReady) + Ok(OpenRequestResult::NotReady(true)) )); } diff --git a/web3_proxy/src/rpcs/request.rs b/web3_proxy/src/rpcs/request.rs index 2c440d26..87e79ba8 100644 --- a/web3_proxy/src/rpcs/request.rs +++ b/web3_proxy/src/rpcs/request.rs @@ -27,7 +27,8 @@ pub enum OpenRequestResult { /// Unable to start a request. Retry at the given time. RetryAt(Instant), /// Unable to start a request because the server is not synced - NotReady, + /// contains "true" if backup servers were attempted + NotReady(bool), } /// Make RPC requests through this handle and drop it when you are done. From 07bac8618e3678db7d76023bcf4a7469e7878dbf Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Tue, 24 Jan 2023 22:46:58 -0800 Subject: [PATCH 62/80] cargo upgrade --workspace --- Cargo.lock | 8 ++++---- web3_proxy/Cargo.toml | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d1dd7adb..790c2b04 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -338,9 +338,9 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "axum" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "678c5130a507ae3a7c797f9a17393c14849300b8440eac47cdb90a5bdcb3a543" +checksum = "e5694b64066a2459918d8074c2ce0d5a88f409431994c2356617c8ae0c4721fc" dependencies = [ "async-trait", "axum-core", @@ -402,9 +402,9 @@ dependencies = [ [[package]] name = "axum-macros" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc7d7c3e69f305217e317a28172aab29f275667f2e1c15b87451e134fe27c7b1" +checksum = "9dbcf61bed07d554bd5c225cd07bc41b793eab63e79c6f0ceac7e1aed2f1c670" dependencies = [ "heck 0.4.0", "proc-macro2", diff --git a/web3_proxy/Cargo.toml b/web3_proxy/Cargo.toml index 09630499..962c4147 100644 --- a/web3_proxy/Cargo.toml +++ b/web3_proxy/Cargo.toml @@ -27,9 +27,9 @@ thread-fast-rng = { path = "../thread-fast-rng" } anyhow = { version = "1.0.68", features = ["backtrace"] } argh = "0.1.10" -axum = { version = "0.6.3", features = ["headers", "ws"] } +axum = { version = "0.6.4", features = ["headers", "ws"] } axum-client-ip = "0.3.1" -axum-macros = "0.3.1" +axum-macros = "0.3.2" chrono = "0.4.23" counter = "0.5.7" derive_more = "0.99.17" From 37656a596c38d5d85dd8a36dc7f18cf6eb0019ff Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Tue, 24 Jan 2023 23:21:39 -0800 Subject: [PATCH 63/80] add openssl to maybe fix coredumps --- Dockerfile | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 2614085b..ba7c30be 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,11 @@ FROM rust:1-bullseye as builder +# our app uses rust-tls, but the sentry crate only uses openssl +RUN set -eux; \ + apt-get update; \ + apt-get install -y libssl-dev; \ + rm -rf /var/lib/apt/lists/* + ENV PATH /root/.foundry/bin:$PATH RUN curl -L https://foundry.paradigm.xyz | bash && foundryup @@ -12,10 +18,16 @@ RUN --mount=type=cache,target=/usr/local/cargo/registry \ FROM debian:bullseye-slim +# our app uses rust-tls, but the sentry crate only uses openssl +RUN set -eux; \ + apt-get update; \ + apt-get install -y libssl-dev; \ + rm -rf /var/lib/apt/lists/* + COPY --from=builder /opt/bin/* /usr/local/bin/ ENTRYPOINT ["web3_proxy_cli"] CMD [ "--config", "/web3-proxy.toml", "proxyd" ] # TODO: lower log level when done with prototyping -ENV RUST_LOG "web3_proxy=debug" +ENV RUST_LOG "warn,web3_proxy=debug,web3_proxy_cli=debug" From 0c058614cef0f468e5afe9e387a424826c1dea6e Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Tue, 24 Jan 2023 23:21:50 -0800 Subject: [PATCH 64/80] fix not using the workers config option --- web3_proxy/src/bin/web3_proxy_cli/main.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/web3_proxy/src/bin/web3_proxy_cli/main.rs b/web3_proxy/src/bin/web3_proxy_cli/main.rs index da95dc13..d46e9c1d 100644 --- a/web3_proxy/src/bin/web3_proxy_cli/main.rs +++ b/web3_proxy/src/bin/web3_proxy_cli/main.rs @@ -289,6 +289,10 @@ fn main() -> anyhow::Result<()> { rt_builder.enable_all(); + if cli_config.workers > 0 { + rt_builder.worker_threads(cli_config.workers); + } + if let Some(top_config) = top_config.as_ref() { let chain_id = top_config.app.chain_id; From 7e5418a8e7b19ba6a7a5c2ca79bd2a2b42c093be Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Wed, 25 Jan 2023 00:19:35 -0800 Subject: [PATCH 65/80] handle empty result --- web3_proxy/src/bin/web3_proxy_cli/sentryd/compare.rs | 10 +++++++--- web3_proxy/src/bin/web3_proxy_cli/sentryd/mod.rs | 9 ++++++++- 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/web3_proxy/src/bin/web3_proxy_cli/sentryd/compare.rs b/web3_proxy/src/bin/web3_proxy_cli/sentryd/compare.rs index f4b5c27f..c04f350d 100644 --- a/web3_proxy/src/bin/web3_proxy_cli/sentryd/compare.rs +++ b/web3_proxy/src/bin/web3_proxy_cli/sentryd/compare.rs @@ -184,11 +184,15 @@ async fn check_rpc( Ok(abbreviated) } else if let Some(result) = response.error { Err(anyhow!( - "Failed parsing response from {} as JSON: {:?}", + "jsonrpc error during check_rpc from {}: {:#}", rpc, - result + json!(result), )) } else { - unimplemented!("{:?}", response) + Err(anyhow!( + "empty result during check_rpc from {}: {:#}", + rpc, + json!(response) + )) } } diff --git a/web3_proxy/src/bin/web3_proxy_cli/sentryd/mod.rs b/web3_proxy/src/bin/web3_proxy_cli/sentryd/mod.rs index 0708f7a7..cd14e351 100644 --- a/web3_proxy/src/bin/web3_proxy_cli/sentryd/mod.rs +++ b/web3_proxy/src/bin/web3_proxy_cli/sentryd/mod.rs @@ -152,7 +152,14 @@ impl SentrydSubCommand { } // check any other web3-proxy /health endpoints for other_web3_proxy in other_proxy.iter() { - let url = format!("{}/health", other_web3_proxy); + let url = if other_web3_proxy.contains("/rpc/") { + let x = other_web3_proxy.split("/rpc/").next().unwrap(); + + format!("{}/health", x) + } else { + format!("{}/health", other_web3_proxy) + }; + let error_sender = error_sender.clone(); let loop_f = a_loop( From 0c387af58912a82331fde44e279ee9039101a3b5 Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Wed, 25 Jan 2023 00:27:07 -0800 Subject: [PATCH 66/80] dedup keys --- web3_proxy/src/bin/web3_proxy_cli/main.rs | 11 +++++++++-- web3_proxy/src/pagerduty.rs | 16 +++++++++++++++- 2 files changed, 24 insertions(+), 3 deletions(-) diff --git a/web3_proxy/src/bin/web3_proxy_cli/main.rs b/web3_proxy/src/bin/web3_proxy_cli/main.rs index d46e9c1d..70cbf73c 100644 --- a/web3_proxy/src/bin/web3_proxy_cli/main.rs +++ b/web3_proxy/src/bin/web3_proxy_cli/main.rs @@ -23,6 +23,8 @@ use log::{error, info, warn}; use pagerduty_rs::eventsv2sync::EventsV2 as PagerdutySyncEventsV2; use pagerduty_rs::types::{AlertTrigger, AlertTriggerPayload}; use pagerduty_rs::{eventsv2async::EventsV2 as PagerdutyAsyncEventsV2, types::Event}; +use std::collections::hash_map::DefaultHasher; +use std::hash::{Hash, Hasher}; use std::{ fs, panic, path::Path, @@ -257,9 +259,14 @@ fn main() -> anyhow::Result<()> { } else { error!("sending panic to pagerduty: {}", panic_msg); + let mut s = DefaultHasher::new(); + panic_msg.hash(&mut s); + panic_msg.hash(&mut s); + let dedup_key = s.finish().to_string(); + let payload = AlertTriggerPayload { severity: pagerduty_rs::types::Severity::Error, - summary: panic_msg.clone(), + summary: panic_msg, source: hostname, timestamp: None, component: None, @@ -270,7 +277,7 @@ fn main() -> anyhow::Result<()> { let event = Event::AlertTrigger(AlertTrigger { payload, - dedup_key: None, + dedup_key: Some(dedup_key), images: None, links: None, client: Some(client.clone()), diff --git a/web3_proxy/src/pagerduty.rs b/web3_proxy/src/pagerduty.rs index 24f41089..b32f3f8a 100644 --- a/web3_proxy/src/pagerduty.rs +++ b/web3_proxy/src/pagerduty.rs @@ -1,3 +1,8 @@ +use std::{ + collections::hash_map::DefaultHasher, + hash::{Hash, Hasher}, +}; + use crate::config::TopConfig; use gethostname::gethostname; use pagerduty_rs::types::{AlertTrigger, AlertTriggerPayload}; @@ -53,6 +58,15 @@ pub fn pagerduty_alert( let source = source.unwrap_or_else(|| gethostname().into_string().unwrap_or("unknown".to_string())); + let mut s = DefaultHasher::new(); + summary.hash(&mut s); + client.hash(&mut s); + client_url.hash(&mut s); + component.hash(&mut s); + group.hash(&mut s); + class.hash(&mut s); + let dedup_key = s.finish().to_string(); + let payload = AlertTriggerPayload { severity, summary, @@ -66,7 +80,7 @@ pub fn pagerduty_alert( AlertTrigger { payload, - dedup_key: None, + dedup_key: Some(dedup_key), images: None, links: None, client: Some(client), From 0c879c56137aadfead26201bdf1d0df76b6c6b4d Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Wed, 25 Jan 2023 00:45:49 -0800 Subject: [PATCH 67/80] one less unwrap --- web3_proxy/src/bin/web3_proxy_cli/sentryd/compare.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web3_proxy/src/bin/web3_proxy_cli/sentryd/compare.rs b/web3_proxy/src/bin/web3_proxy_cli/sentryd/compare.rs index c04f350d..ab670fa6 100644 --- a/web3_proxy/src/bin/web3_proxy_cli/sentryd/compare.rs +++ b/web3_proxy/src/bin/web3_proxy_cli/sentryd/compare.rs @@ -57,7 +57,7 @@ pub async fn main( .json::>>() .await? .result - .unwrap(); + .context(format!("error fetching block from {}", rpc))?; // check the parent because b and c might not be as fast as a let parent_hash = a.parent_hash; From e85965ec12d9c7bbf89b531d00376fc5d725d191 Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Wed, 25 Jan 2023 01:00:28 -0800 Subject: [PATCH 68/80] more context on everything --- web3_proxy/src/bin/web3_proxy_cli/sentryd/compare.rs | 8 +++++--- web3_proxy/src/bin/web3_proxy_cli/sentryd/mod.rs | 8 ++++---- web3_proxy/src/pagerduty.rs | 1 + 3 files changed, 10 insertions(+), 7 deletions(-) diff --git a/web3_proxy/src/bin/web3_proxy_cli/sentryd/compare.rs b/web3_proxy/src/bin/web3_proxy_cli/sentryd/compare.rs index ab670fa6..1421afd3 100644 --- a/web3_proxy/src/bin/web3_proxy_cli/sentryd/compare.rs +++ b/web3_proxy/src/bin/web3_proxy_cli/sentryd/compare.rs @@ -53,11 +53,13 @@ pub async fn main( .post(&rpc) .json(&block_by_number_request) .send() - .await? + .await + .context(format!("error fetching block from {}", rpc))? .json::>>() - .await? + .await + .context(format!("error parsing block from {}", rpc))? .result - .context(format!("error fetching block from {}", rpc))?; + .context(format!("no block from {}", rpc))?; // check the parent because b and c might not be as fast as a let parent_hash = a.parent_hash; diff --git a/web3_proxy/src/bin/web3_proxy_cli/sentryd/mod.rs b/web3_proxy/src/bin/web3_proxy_cli/sentryd/mod.rs index cd14e351..b9a77937 100644 --- a/web3_proxy/src/bin/web3_proxy_cli/sentryd/mod.rs +++ b/web3_proxy/src/bin/web3_proxy_cli/sentryd/mod.rs @@ -95,7 +95,7 @@ impl SentrydSubCommand { } while let Some(err) = error_receiver.recv().await { - log::log!(err.level, "check failed: {:?}", err); + log::log!(err.level, "check failed: {:#?}", err); if matches!(err.level, log::Level::Error) { let alert = pagerduty_alert( @@ -108,20 +108,20 @@ impl SentrydSubCommand { Some("web3-proxy-sentry".to_string()), pagerduty_rs::types::Severity::Error, None, - format!("{}", err.anyhow), + format!("{:#?}", err.anyhow), None, ); if let Some(pagerduty_async) = pagerduty_async.as_ref() { info!( - "sending to pagerduty: {}", + "sending to pagerduty: {:#}", serde_json::to_string_pretty(&alert)? ); if let Err(err) = pagerduty_async.event(Event::AlertTrigger(alert)).await { - error!("Failed sending to pagerduty: {}", err); + error!("Failed sending to pagerduty: {:#?}", err); } } } diff --git a/web3_proxy/src/pagerduty.rs b/web3_proxy/src/pagerduty.rs index b32f3f8a..75675ec8 100644 --- a/web3_proxy/src/pagerduty.rs +++ b/web3_proxy/src/pagerduty.rs @@ -59,6 +59,7 @@ pub fn pagerduty_alert( source.unwrap_or_else(|| gethostname().into_string().unwrap_or("unknown".to_string())); let mut s = DefaultHasher::new(); + // TODO: include severity here? summary.hash(&mut s); client.hash(&mut s); client_url.hash(&mut s); From f2260ecdba3cdbf5f990885d1f219b8832d117ab Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Wed, 25 Jan 2023 10:01:36 -0800 Subject: [PATCH 69/80] don't require hard limit send --- web3_proxy/src/rpcs/request.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/web3_proxy/src/rpcs/request.rs b/web3_proxy/src/rpcs/request.rs index 87e79ba8..d7f2aaf9 100644 --- a/web3_proxy/src/rpcs/request.rs +++ b/web3_proxy/src/rpcs/request.rs @@ -339,9 +339,7 @@ impl OpenRequestHandle { trace!("retry {} at: {:?}", self.conn, retry_at); - hard_limit_until - .send(retry_at) - .expect("sending hard limit retry times should always work"); + hard_limit_until.send_replace(retry_at); } } From d5f2d6eb18961c960d11460fd0539b0c8efa7f21 Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Wed, 25 Jan 2023 14:04:06 -0800 Subject: [PATCH 70/80] improve pager duty errors for smarter deduping --- web3_proxy/src/bin/web3_proxy_cli/main.rs | 61 +-------- .../src/bin/web3_proxy_cli/pagerduty.rs | 13 +- .../src/bin/web3_proxy_cli/sentryd/compare.rs | 94 ++++++++----- .../src/bin/web3_proxy_cli/sentryd/mod.rs | 78 ++++++++--- .../src/bin/web3_proxy_cli/sentryd/simple.rs | 52 ++++++- web3_proxy/src/config.rs | 6 +- web3_proxy/src/pagerduty.rs | 129 ++++++++++++++++-- 7 files changed, 293 insertions(+), 140 deletions(-) diff --git a/web3_proxy/src/bin/web3_proxy_cli/main.rs b/web3_proxy/src/bin/web3_proxy_cli/main.rs index 70cbf73c..852334f9 100644 --- a/web3_proxy/src/bin/web3_proxy_cli/main.rs +++ b/web3_proxy/src/bin/web3_proxy_cli/main.rs @@ -18,19 +18,16 @@ mod user_import; use anyhow::Context; use argh::FromArgs; use ethers::types::U256; -use gethostname::gethostname; -use log::{error, info, warn}; +use log::{info, warn}; +use pagerduty_rs::eventsv2async::EventsV2 as PagerdutyAsyncEventsV2; use pagerduty_rs::eventsv2sync::EventsV2 as PagerdutySyncEventsV2; -use pagerduty_rs::types::{AlertTrigger, AlertTriggerPayload}; -use pagerduty_rs::{eventsv2async::EventsV2 as PagerdutyAsyncEventsV2, types::Event}; -use std::collections::hash_map::DefaultHasher; -use std::hash::{Hash, Hasher}; use std::{ fs, panic, path::Path, sync::atomic::{self, AtomicUsize}, }; use tokio::runtime; +use web3_proxy::pagerduty::panic_handler; use web3_proxy::{ app::{get_db, get_migrated_db, APP_USER_AGENT}, config::TopConfig, @@ -237,57 +234,13 @@ fn main() -> anyhow::Result<()> { (None, None) }; - // panic handler that sends to pagerduty - // TODO: there is a `pagerduty_panic` module that looks like it would work with minor tweaks, but ethers-rs panics when a websocket exit and that would fire too many alerts - + // panic handler that sends to pagerduty. + // TODO: use the sentry handler if no pager duty. use default if no sentry if let Some(pagerduty_sync) = pagerduty_sync { - let client = top_config - .as_ref() - .map(|top_config| format!("web3-proxy chain #{}", top_config.app.chain_id)) - .unwrap_or_else(|| format!("web3-proxy w/o chain")); - - let client_url = top_config - .as_ref() - .and_then(|x| x.app.redirect_public_url.clone()); + let top_config = top_config.clone(); panic::set_hook(Box::new(move |x| { - let hostname = gethostname().into_string().unwrap_or("unknown".to_string()); - let panic_msg = format!("{} {:?}", x, x); - - if panic_msg.starts_with("panicked at 'WS Server panic") { - info!("Underlying library {}", panic_msg); - } else { - error!("sending panic to pagerduty: {}", panic_msg); - - let mut s = DefaultHasher::new(); - panic_msg.hash(&mut s); - panic_msg.hash(&mut s); - let dedup_key = s.finish().to_string(); - - let payload = AlertTriggerPayload { - severity: pagerduty_rs::types::Severity::Error, - summary: panic_msg, - source: hostname, - timestamp: None, - component: None, - group: Some("web3-proxy".to_string()), - class: Some("panic".to_string()), - custom_details: None::<()>, - }; - - let event = Event::AlertTrigger(AlertTrigger { - payload, - dedup_key: Some(dedup_key), - images: None, - links: None, - client: Some(client.clone()), - client_url: client_url.clone(), - }); - - if let Err(err) = pagerduty_sync.event(event) { - error!("Failed sending panic to pagerduty: {}", err); - } - } + panic_handler(top_config.clone(), &pagerduty_sync, x); })); } diff --git a/web3_proxy/src/bin/web3_proxy_cli/pagerduty.rs b/web3_proxy/src/bin/web3_proxy_cli/pagerduty.rs index 3c43daa3..7e55103a 100644 --- a/web3_proxy/src/bin/web3_proxy_cli/pagerduty.rs +++ b/web3_proxy/src/bin/web3_proxy_cli/pagerduty.rs @@ -3,7 +3,7 @@ use log::{error, info}; use pagerduty_rs::{eventsv2async::EventsV2 as PagerdutyAsyncEventsV2, types::Event}; use web3_proxy::{ config::TopConfig, - pagerduty::{pagerduty_alert, pagerduty_event_for_config}, + pagerduty::{pagerduty_alert, pagerduty_alert_for_config}, }; #[derive(FromArgs, PartialEq, Debug, Eq)] @@ -31,11 +31,6 @@ pub struct PagerdutySubCommand { /// If there are no open incidents with this key, a new incident will be created. /// If there is an open incident with a matching key, the new event will be appended to that incident's Alerts log as an additional Trigger log entry. dedup_key: Option, - - #[argh(option, default = "\"web3-proxy\".to_string()")] - /// a cluster or grouping of sources. - /// For example, sources "ethereum-proxy" and "polygon-proxy" might both be part of "web3-proxy". - group: String, } impl PagerdutySubCommand { @@ -47,11 +42,10 @@ impl PagerdutySubCommand { // TODO: allow customizing severity let event = top_config .map(|top_config| { - pagerduty_event_for_config( + pagerduty_alert_for_config( self.class.clone(), self.component.clone(), None::<()>, - Some(self.group.clone()), pagerduty_rs::types::Severity::Error, self.summary.clone(), None, @@ -62,11 +56,10 @@ impl PagerdutySubCommand { pagerduty_alert( self.chain_id, self.class, - "web3-proxy".to_string(), + None, None, self.component, None::<()>, - Some(self.group), pagerduty_rs::types::Severity::Error, None, self.summary, diff --git a/web3_proxy/src/bin/web3_proxy_cli/sentryd/compare.rs b/web3_proxy/src/bin/web3_proxy_cli/sentryd/compare.rs index 1421afd3..5333c738 100644 --- a/web3_proxy/src/bin/web3_proxy_cli/sentryd/compare.rs +++ b/web3_proxy/src/bin/web3_proxy_cli/sentryd/compare.rs @@ -7,6 +7,8 @@ use serde::{Deserialize, Serialize}; use serde_json::json; use web3_proxy::jsonrpc::JsonRpcErrorData; +use super::{SentrydErrorBuilder, SentrydResult}; + #[derive(Debug, Deserialize, Serialize)] struct JsonRpcResponse { // pub jsonrpc: String, @@ -35,11 +37,12 @@ impl From> for AbbreviatedBlock { } pub async fn main( + error_builder: SentrydErrorBuilder, rpc: String, others: Vec, max_age: i64, max_lag: i64, -) -> anyhow::Result<()> { +) -> SentrydResult { let client = reqwest::Client::new(); let block_by_number_request = json!({ @@ -54,30 +57,48 @@ pub async fn main( .json(&block_by_number_request) .send() .await - .context(format!("error fetching block from {}", rpc))? + .context(format!("error querying block from {}", rpc)) + .map_err(|x| error_builder.build(x))?; + + // TODO: capture response headers now in case of error. store them in the extra data on the pager duty alert + let headers = format!("{:#?}", a.headers()); + + let a = a .json::>>() .await - .context(format!("error parsing block from {}", rpc))? - .result - .context(format!("no block from {}", rpc))?; + .context(format!("error parsing block from {}", rpc)) + .map_err(|x| error_builder.build(x))?; + + let a = if let Some(block) = a.result { + block + } else if let Some(err) = a.error { + return error_builder.result( + anyhow::anyhow!("headers: {:#?}. err: {:#?}", headers, err) + .context(format!("jsonrpc error from {}", rpc)), + ); + } else { + return error_builder + .result(anyhow!("{:#?}", a).context(format!("empty response from {}", rpc))); + }; // check the parent because b and c might not be as fast as a let parent_hash = a.parent_hash; - let rpc_block = check_rpc(parent_hash, client.clone(), rpc.clone()) + let rpc_block = check_rpc(parent_hash, client.clone(), rpc.to_string()) .await - .context("Error while querying primary rpc")?; + .context(format!("Error while querying primary rpc: {}", rpc)) + .map_err(|err| error_builder.build(err))?; let fs = FuturesUnordered::new(); for other in others.iter() { - let f = check_rpc(parent_hash, client.clone(), other.clone()); + let f = check_rpc(parent_hash, client.clone(), other.to_string()); fs.push(tokio::spawn(f)); } let other_check: Vec<_> = fs.collect().await; if other_check.is_empty() { - return Err(anyhow::anyhow!("No other RPCs to check!")); + return error_builder.result(anyhow::anyhow!("No other RPCs to check!")); } // TODO: collect into a counter instead? @@ -99,22 +120,27 @@ pub async fn main( match duration_since.abs().cmp(&max_lag) { std::cmp::Ordering::Less | std::cmp::Ordering::Equal => {} std::cmp::Ordering::Greater => match duration_since.cmp(&0) { - std::cmp::Ordering::Equal => unimplemented!(), + std::cmp::Ordering::Equal => { + unimplemented!("we already checked that they are not equal") + } std::cmp::Ordering::Less => { - return Err(anyhow::anyhow!( + return error_builder.result(anyhow::anyhow!( "Our RPC is too far ahead ({} s)! Something might be wrong.\n{:#}\nvs\n{:#}", duration_since.abs(), json!(rpc_block), json!(newest_other), - )); + ).context(format!("{} is too far ahead", rpc))); } std::cmp::Ordering::Greater => { - return Err(anyhow::anyhow!( - "Our RPC is too far behind ({} s)!\n{:#}\nvs\n{:#}", - duration_since, - json!(rpc_block), - json!(newest_other), - )); + return error_builder.result( + anyhow::anyhow!( + "Behind {} s!\n{:#}\nvs\n{:#}", + duration_since, + json!(rpc_block), + json!(newest_other), + ) + .context(format!("{} is too far behind", rpc)), + ); } }, } @@ -130,25 +156,31 @@ pub async fn main( std::cmp::Ordering::Greater => match duration_since.cmp(&0) { std::cmp::Ordering::Equal => unimplemented!(), std::cmp::Ordering::Less => { - return Err(anyhow::anyhow!( - "Our clock is too far behind ({} s)! Something might be wrong.\n{:#}\nvs\n{:#}", - block_age.abs(), - json!(now), - json!(newest_other), - )); + return error_builder.result( + anyhow::anyhow!( + "Clock is behind {}s! Something might be wrong.\n{:#}\nvs\n{:#}", + block_age.abs(), + json!(now), + json!(newest_other), + ) + .context(format!("Clock is too far behind on {}!", rpc)), + ); } std::cmp::Ordering::Greater => { - return Err(anyhow::anyhow!( - "block is too old ({} s)!\n{:#}\nvs\n{:#}", - block_age, - json!(now), - json!(newest_other), - )); + return error_builder.result( + anyhow::anyhow!( + "block is too old ({}s)!\n{:#}\nvs\n{:#}", + block_age, + json!(now), + json!(newest_other), + ) + .context(format!("block is too old on {}!", rpc)), + ); } }, } } else { - return Err(anyhow::anyhow!("No other RPC times to check!")); + return error_builder.result(anyhow::anyhow!("No other RPC times to check!")); } debug!("rpc comparison ok: {:#}", json!(rpc_block)); diff --git a/web3_proxy/src/bin/web3_proxy_cli/sentryd/mod.rs b/web3_proxy/src/bin/web3_proxy_cli/sentryd/mod.rs index b9a77937..3f32e68b 100644 --- a/web3_proxy/src/bin/web3_proxy_cli/sentryd/mod.rs +++ b/web3_proxy/src/bin/web3_proxy_cli/sentryd/mod.rs @@ -9,6 +9,7 @@ use futures::{ }; use log::{error, info}; use pagerduty_rs::{eventsv2async::EventsV2 as PagerdutyAsyncEventsV2, types::Event}; +use serde_json::json; use std::time::Duration; use tokio::sync::mpsc; use tokio::time::{interval, MissedTickBehavior}; @@ -48,12 +49,41 @@ pub struct SentrydSubCommand { } #[derive(Debug)] -struct Error { +pub struct SentrydError { + /// The class/type of the event, for example ping failure or cpu load + class: String, + /// Errors will send a pagerduty alert. others just give log messages + level: log::Level, + /// A short summary that should be mostly static + summary: String, + /// Lots of detail about the error + extra: Option, +} + +/// helper for creating SentrydErrors +#[derive(Clone)] +pub struct SentrydErrorBuilder { class: String, level: log::Level, - anyhow: anyhow::Error, } +impl SentrydErrorBuilder { + fn build(&self, err: anyhow::Error) -> SentrydError { + SentrydError { + class: self.class.to_owned(), + level: self.level.to_owned(), + summary: format!("{}", err), + extra: Some(json!(format!("{:#?}", err))), + } + } + + fn result(&self, err: anyhow::Error) -> SentrydResult { + Err(self.build(err)) + } +} + +type SentrydResult = Result<(), SentrydError>; + impl SentrydSubCommand { pub async fn main( self, @@ -86,7 +116,7 @@ impl SentrydSubCommand { let mut handles = FuturesUnordered::new(); // channels and a task for sending errors to logs/pagerduty - let (error_sender, mut error_receiver) = mpsc::channel::(10); + let (error_sender, mut error_receiver) = mpsc::channel::(10); { let error_handler_f = async move { @@ -101,14 +131,13 @@ impl SentrydSubCommand { let alert = pagerduty_alert( Some(chain_id), Some(err.class), - "web3-proxy-sentry".to_string(), - None, - None, - None::<()>, Some("web3-proxy-sentry".to_string()), + None, + None, + err.extra, pagerduty_rs::types::Severity::Error, None, - format!("{:#?}", err.anyhow), + err.summary, None, ); @@ -140,12 +169,15 @@ impl SentrydSubCommand { let url = format!("{}/health", web3_proxy); let error_sender = error_sender.clone(); + // TODO: what timeout? + let timeout = Duration::from_secs(1); + let loop_f = a_loop( "main /health", seconds, log::Level::Error, error_sender, - move || simple::main(url.clone()), + move |error_builder| simple::main(error_builder, url.clone(), timeout), ); handles.push(tokio::spawn(loop_f)); @@ -162,12 +194,15 @@ impl SentrydSubCommand { let error_sender = error_sender.clone(); + // TODO: what timeout? + let timeout = Duration::from_secs(1); + let loop_f = a_loop( "other /health", seconds, log::Level::Warn, error_sender, - move || simple::main(url.clone()), + move |error_builder| simple::main(error_builder, url.clone(), timeout), ); handles.push(tokio::spawn(loop_f)); @@ -189,7 +224,9 @@ impl SentrydSubCommand { seconds, log::Level::Error, error_sender, - move || compare::main(rpc.clone(), others.clone(), max_age, max_lag), + move |error_builder| { + compare::main(error_builder, rpc.clone(), others.clone(), max_age, max_lag) + }, ); handles.push(tokio::spawn(loop_f)); @@ -209,12 +246,17 @@ async fn a_loop( class: &str, seconds: u64, error_level: log::Level, - error_sender: mpsc::Sender, - f: impl Fn() -> T, + error_sender: mpsc::Sender, + f: impl Fn(SentrydErrorBuilder) -> T, ) -> anyhow::Result<()> where - T: Future> + Send + 'static, + T: Future + Send + 'static, { + let error_builder = SentrydErrorBuilder { + class: class.to_owned(), + level: error_level, + }; + let mut interval = interval(Duration::from_secs(seconds)); // TODO: should we warn if there are delays? @@ -223,13 +265,7 @@ where loop { interval.tick().await; - if let Err(err) = f().await { - let err = Error { - class: class.to_string(), - level: error_level, - anyhow: err, - }; - + if let Err(err) = f(error_builder.clone()).await { error_sender.send(err).await?; }; } diff --git a/web3_proxy/src/bin/web3_proxy_cli/sentryd/simple.rs b/web3_proxy/src/bin/web3_proxy_cli/sentryd/simple.rs index 1904553d..54dffde4 100644 --- a/web3_proxy/src/bin/web3_proxy_cli/sentryd/simple.rs +++ b/web3_proxy/src/bin/web3_proxy_cli/sentryd/simple.rs @@ -1,22 +1,60 @@ +use std::time::Duration; + +use super::{SentrydErrorBuilder, SentrydResult}; use anyhow::Context; use log::{debug, trace}; +use tokio::time::Instant; /// GET the url and return an error if it wasn't a success -pub async fn main(url: String) -> anyhow::Result<()> { +pub async fn main( + error_builder: SentrydErrorBuilder, + url: String, + timeout: Duration, +) -> SentrydResult { + let start = Instant::now(); + let r = reqwest::get(&url) .await - .context(format!("Failed GET {}", url))?; + .context(format!("Failed GET {}", &url)) + .map_err(|x| error_builder.build(x))?; + + let elapsed = start.elapsed(); + + if elapsed > timeout { + return error_builder.result( + anyhow::anyhow!( + "query took longer than {}ms ({}ms): {:#?}", + timeout.as_millis(), + elapsed.as_millis(), + r + ) + .context(format!("fetching {} took too long", &url)), + ); + } + + // TODO: what should we do if we get rate limited here? if r.status().is_success() { - // warn if latency is high? - debug!("{} is healthy", url); + debug!("{} is healthy", &url); trace!("Successful {:#?}", r); return Ok(()); } - let debug_str = format!("{:#?}", r); + // TODO: capture headers? or is that already part of r? + let detail = format!("{:#?}", r); - let body = r.text().await?; + let summary = format!("{} is unhealthy: {}", &url, r.status()); - Err(anyhow::anyhow!("{}: {}", debug_str, body)) + let body = r + .text() + .await + .context(detail.clone()) + .context(summary.clone()) + .map_err(|x| error_builder.build(x))?; + + error_builder.result( + anyhow::anyhow!("body: {}", body) + .context(detail) + .context(summary), + ) } diff --git a/web3_proxy/src/config.rs b/web3_proxy/src/config.rs index 67f48870..9e40db5a 100644 --- a/web3_proxy/src/config.rs +++ b/web3_proxy/src/config.rs @@ -39,7 +39,7 @@ pub struct CliConfig { pub cookie_key_filename: String, } -#[derive(Debug, Deserialize)] +#[derive(Clone, Debug, Deserialize)] pub struct TopConfig { pub app: AppConfig, pub balanced_rpcs: HashMap, @@ -52,7 +52,7 @@ pub struct TopConfig { /// shared configuration between Web3Connections // TODO: no String, only &str -#[derive(Debug, Default, Deserialize)] +#[derive(Clone, Debug, Default, Deserialize)] pub struct AppConfig { /// Request limit for allowed origins for anonymous users. /// These requests get rate limited by IP. @@ -190,7 +190,7 @@ fn default_response_cache_max_bytes() -> usize { } /// Configuration for a backend web3 RPC server -#[derive(Debug, Deserialize)] +#[derive(Clone, Debug, Deserialize)] pub struct Web3ConnectionConfig { /// simple way to disable a connection without deleting the row #[serde(default)] diff --git a/web3_proxy/src/pagerduty.rs b/web3_proxy/src/pagerduty.rs index 75675ec8..9eacbff9 100644 --- a/web3_proxy/src/pagerduty.rs +++ b/web3_proxy/src/pagerduty.rs @@ -1,19 +1,122 @@ +use crate::config::TopConfig; +use gethostname::gethostname; +use log::{debug, error}; +use pagerduty_rs::eventsv2sync::EventsV2 as PagerdutySyncEventsV2; +use pagerduty_rs::types::{AlertTrigger, AlertTriggerPayload, Event}; +use serde::Serialize; use std::{ collections::hash_map::DefaultHasher, hash::{Hash, Hasher}, + panic::PanicInfo, }; - -use crate::config::TopConfig; -use gethostname::gethostname; -use pagerduty_rs::types::{AlertTrigger, AlertTriggerPayload}; -use serde::Serialize; use time::OffsetDateTime; -pub fn pagerduty_event_for_config( +/* + + let client = top_config + .as_ref() + .map(|top_config| format!("web3-proxy chain #{}", top_config.app.chain_id)) + .unwrap_or_else(|| format!("web3-proxy w/o chain")); + + let client_url = top_config + .as_ref() + .and_then(|x| x.app.redirect_public_url.clone()); + + panic::set_hook(Box::new(move |x| { + let hostname = gethostname().into_string().unwrap_or("unknown".to_string()); + let panic_msg = format!("{} {:?}", x, x); + + if panic_msg.starts_with("panicked at 'WS Server panic") { + info!("Underlying library {}", panic_msg); + } else { + error!("sending panic to pagerduty: {}", panic_msg); + + let mut s = DefaultHasher::new(); + panic_msg.hash(&mut s); + panic_msg.hash(&mut s); + let dedup_key = s.finish().to_string(); + + let payload = AlertTriggerPayload { + severity: pagerduty_rs::types::Severity::Error, + summary: panic_msg, + source: hostname, + timestamp: None, + component: None, + group: Some("web3-proxy".to_string()), + class: Some("panic".to_string()), + custom_details: None::<()>, + }; + + let event = Event::AlertTrigger(AlertTrigger { + payload, + dedup_key: Some(dedup_key), + images: None, + links: None, + client: Some(client.clone()), + client_url: client_url.clone(), + }); + + if let Err(err) = pagerduty_sync.event(event) { + error!("Failed sending panic to pagerduty: {}", err); + } + } + })); + +*/ + +pub fn panic_handler( + top_config: Option, + pagerduty_sync: &PagerdutySyncEventsV2, + panic_info: &PanicInfo, +) { + let summary = format!("{}", panic_info); + + let details = format!("{:#?}", panic_info); + + if summary.starts_with("panicked at 'WS Server panic") { + // the ethers-rs library panics when websockets disconnect. this isn't a panic we care about reporting + debug!("Underlying library {}", details); + return; + } + + let class = Some("panic".to_string()); + + let alert = if let Some(top_config) = top_config { + pagerduty_alert_for_config( + class, + None, + Some(details), + pagerduty_rs::types::Severity::Critical, + summary, + None, + top_config, + ) + } else { + pagerduty_alert( + None, + class, + None, + None, + None, + Some(details), + pagerduty_rs::types::Severity::Critical, + None, + summary, + None, + ) + }; + + let event = Event::AlertTrigger(alert); + + if let Err(err) = pagerduty_sync.event(event) { + error!("Failed sending alert to pagerduty! {:#?}", err); + } +} + +pub fn pagerduty_alert_for_config( class: Option, component: Option, custom_details: Option, - group: Option, severity: pagerduty_rs::types::Severity, summary: String, timestamp: Option, @@ -26,11 +129,10 @@ pub fn pagerduty_event_for_config( pagerduty_alert( Some(chain_id), class, - "web3-proxy".to_string(), + None, client_url, component, custom_details, - group, severity, None, summary, @@ -41,19 +143,18 @@ pub fn pagerduty_event_for_config( pub fn pagerduty_alert( chain_id: Option, class: Option, - client: String, + client: Option, client_url: Option, component: Option, custom_details: Option, - group: Option, severity: pagerduty_rs::types::Severity, source: Option, summary: String, timestamp: Option, ) -> AlertTrigger { - let client = chain_id - .map(|x| format!("{} chain #{}", client, x)) - .unwrap_or_else(|| format!("{} w/o chain", client)); + let client = client.unwrap_or_else(|| "web3-proxy".to_string()); + + let group = chain_id.map(|x| format!("chain #{}", x)); let source = source.unwrap_or_else(|| gethostname().into_string().unwrap_or("unknown".to_string())); From f80390c88a87306c3271c8c0c076177ae68eb5ff Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Wed, 25 Jan 2023 14:11:20 -0800 Subject: [PATCH 71/80] upgrade sentry and fix pagerduty features so we do not need openssl --- Cargo.lock | 206 ++++++------------------------------------ Dockerfile | 12 --- TODO.md | 1 + web3_proxy/Cargo.toml | 4 +- 4 files changed, 30 insertions(+), 193 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 790c2b04..80281cfa 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -63,9 +63,9 @@ dependencies = [ [[package]] name = "ahash" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf6ccdb167abbf410dcb915cabd428929d7f6a04980b54a11f26a39f1c7f7107" +checksum = "2c99f64d1e06488f620f932677e24bc6e2897582980441ae90a671415bd7ec2f" dependencies = [ "cfg-if", "once_cell", @@ -1014,16 +1014,6 @@ dependencies = [ "unicode-segmentation", ] -[[package]] -name = "core-foundation" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146" -dependencies = [ - "core-foundation-sys", - "libc", -] - [[package]] name = "core-foundation-sys" version = "0.8.3" @@ -1952,21 +1942,6 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" -[[package]] -name = "foreign-types" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" -dependencies = [ - "foreign-types-shared", -] - -[[package]] -name = "foreign-types-shared" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" - [[package]] name = "form_urlencoded" version = "1.1.0" @@ -2280,7 +2255,7 @@ version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" dependencies = [ - "ahash 0.8.2", + "ahash 0.8.3", "serde", ] @@ -2495,19 +2470,6 @@ dependencies = [ "tokio-rustls", ] -[[package]] -name = "hyper-tls" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" -dependencies = [ - "bytes", - "hyper", - "native-tls", - "tokio", - "tokio-native-tls", -] - [[package]] name = "iana-time-zone" version = "0.1.53" @@ -3006,24 +2968,6 @@ dependencies = [ "getrandom", ] -[[package]] -name = "native-tls" -version = "0.2.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07226173c32f2926027b63cce4bcd8076c3552846cbe7925f3aaffeac0a3b92e" -dependencies = [ - "lazy_static", - "libc", - "log", - "openssl", - "openssl-probe", - "openssl-sys", - "schannel", - "security-framework", - "security-framework-sys", - "tempfile", -] - [[package]] name = "new_debug_unreachable" version = "1.0.4" @@ -3245,51 +3189,6 @@ dependencies = [ "syn", ] -[[package]] -name = "openssl" -version = "0.10.45" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b102428fd03bc5edf97f62620f7298614c45cedf287c271e7ed450bbaf83f2e1" -dependencies = [ - "bitflags", - "cfg-if", - "foreign-types", - "libc", - "once_cell", - "openssl-macros", - "openssl-sys", -] - -[[package]] -name = "openssl-macros" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b501e44f11665960c7e7fcf062c7d96a14ade4aa98116c004b2e37b5be7d736c" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "openssl-probe" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" - -[[package]] -name = "openssl-sys" -version = "0.9.80" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23bbbf7854cd45b83958ebe919f0e8e516793727652e27fda10a8384cfc790b7" -dependencies = [ - "autocfg", - "cc", - "libc", - "pkg-config", - "vcpkg", -] - [[package]] name = "os_info" version = "3.5.1" @@ -3483,9 +3382,9 @@ checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" [[package]] name = "pest" -version = "2.5.3" +version = "2.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4257b4a04d91f7e9e6290be5d3da4804dd5784fafde3a497d73eb2b4a158c30a" +checksum = "4ab62d2fa33726dbe6321cc97ef96d8cde531e3eeaf858a058de53a8a6d40d8f" dependencies = [ "thiserror", "ucd-trie", @@ -3493,9 +3392,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.5.3" +version = "2.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "241cda393b0cdd65e62e07e12454f1f25d57017dcc514b1514cd3c4645e3a0a6" +checksum = "8bf026e2d0581559db66d837fe5242320f525d85c76283c61f4d51a1238d65ea" dependencies = [ "pest", "pest_generator", @@ -3503,9 +3402,9 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.5.3" +version = "2.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46b53634d8c8196302953c74d5352f33d0c512a9499bd2ce468fc9f4128fa27c" +checksum = "2b27bd18aa01d91c8ed2b61ea23406a676b42d82609c6e2581fba42f0c15f17f" dependencies = [ "pest", "pest_meta", @@ -3516,9 +3415,9 @@ dependencies = [ [[package]] name = "pest_meta" -version = "2.5.3" +version = "2.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ef4f1332a8d4678b41966bb4cc1d0676880e84183a1ecc3f4b69f03e99c7a51" +checksum = "9f02b677c1859756359fc9983c2e56a0237f18624a3789528804406b7e915e5d" dependencies = [ "once_cell", "pest", @@ -3995,12 +3894,10 @@ dependencies = [ "http-body", "hyper", "hyper-rustls", - "hyper-tls", "ipnet", "js-sys", "log", "mime", - "native-tls", "once_cell", "percent-encoding", "pin-project-lite", @@ -4010,7 +3907,6 @@ dependencies = [ "serde_json", "serde_urlencoded", "tokio", - "tokio-native-tls", "tokio-rustls", "tower-service", "url", @@ -4262,15 +4158,6 @@ dependencies = [ "syn", ] -[[package]] -name = "schannel" -version = "0.1.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "713cfb06c7059f3588fb8044c0fad1d09e3c01d225e25b9220dbfdcf16dbb1b3" -dependencies = [ - "windows-sys", -] - [[package]] name = "scheduled-thread-pool" version = "0.2.6" @@ -4494,29 +4381,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "security-framework" -version = "2.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "645926f31b250a2dca3c232496c2d898d91036e45ca0e97e0e2390c54e11be36" -dependencies = [ - "bitflags", - "core-foundation", - "core-foundation-sys", - "libc", - "security-framework-sys", -] - -[[package]] -name = "security-framework-sys" -version = "2.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31c9bb296072e961fcbd8853511dd39c2d8be2deb1e17c6860b1d30732b323b4" -dependencies = [ - "core-foundation-sys", - "libc", -] - [[package]] name = "semver" version = "1.0.16" @@ -4534,9 +4398,9 @@ checksum = "930c0acf610d3fdb5e2ab6213019aaa04e227ebe9547b0649ba599b16d788bd7" [[package]] name = "sentry" -version = "0.29.1" +version = "0.29.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17ad137b9df78294b98cab1a650bef237cc6c950e82e5ce164655e674d07c5cc" +checksum = "a6097dc270a9c4555c5d6222ed243eaa97ff38e29299ed7c5cb36099033c604e" dependencies = [ "httpdate", "reqwest", @@ -4554,9 +4418,9 @@ dependencies = [ [[package]] name = "sentry-anyhow" -version = "0.29.1" +version = "0.29.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f55adcceaad4189af35d82c3c51613c0c372f15c25f42f70bf23c9c3ede223e1" +checksum = "45a52d909ea1f5107fe29aa86581da01b88bde811fbde875773237c1596fbab6" dependencies = [ "anyhow", "sentry-backtrace", @@ -4565,9 +4429,9 @@ dependencies = [ [[package]] name = "sentry-backtrace" -version = "0.29.1" +version = "0.29.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afe4800806552aab314129761d5d3b3d422284eca3de2ab59e9fd133636cbd3d" +checksum = "9d92d1e4d591534ae4f872d6142f3b500f4ffc179a6aed8a3e86c7cc96d10a6a" dependencies = [ "backtrace", "once_cell", @@ -4577,9 +4441,9 @@ dependencies = [ [[package]] name = "sentry-contexts" -version = "0.29.1" +version = "0.29.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a42938426670f6e7974989cd1417837a96dd8bbb01567094f567d6acb360bf88" +checksum = "3afa877b1898ff67dd9878cf4bec4e53cef7d3be9f14b1fc9e4fcdf36f8e4259" dependencies = [ "hostname", "libc", @@ -4591,9 +4455,9 @@ dependencies = [ [[package]] name = "sentry-core" -version = "0.29.1" +version = "0.29.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4df9b9d8de2658a1ecd4e45f7b06c80c5dd97b891bfbc7c501186189b7e9bbdf" +checksum = "fc43eb7e4e3a444151a0fe8a0e9ce60eabd905dae33d66e257fa26f1b509c1bd" dependencies = [ "once_cell", "rand", @@ -4604,9 +4468,9 @@ dependencies = [ [[package]] name = "sentry-log" -version = "0.29.1" +version = "0.29.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7518096b31fa4075d1bbab79ad62da3258f6c67bafeb4a8b2b3f803695b9205e" +checksum = "598aefe14750bcec956adebc8992dd432f4e22c12cd524633963113864aa39b4" dependencies = [ "log", "sentry-core", @@ -4614,9 +4478,9 @@ dependencies = [ [[package]] name = "sentry-panic" -version = "0.29.1" +version = "0.29.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0af37b8500f273e511ebd6eb0d342ff7937d64ce3f134764b2b4653112d48cb4" +checksum = "ccab4fab11e3e63c45f4524bee2e75cde39cdf164cb0b0cbe6ccd1948ceddf66" dependencies = [ "sentry-backtrace", "sentry-core", @@ -4624,9 +4488,9 @@ dependencies = [ [[package]] name = "sentry-types" -version = "0.29.1" +version = "0.29.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccc95faa4078768a6bf8df45e2b894bbf372b3dbbfb364e9429c1c58ab7545c6" +checksum = "f63708ec450b6bdcb657af760c447416d69c38ce421f34e5e2e9ce8118410bc7" dependencies = [ "debugid", "getrandom", @@ -5385,16 +5249,6 @@ dependencies = [ "syn", ] -[[package]] -name = "tokio-native-tls" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7d995660bd2b7f8c1568414c1126076c13fbb725c40112dc0120b78eb9b717b" -dependencies = [ - "native-tls", - "tokio", -] - [[package]] name = "tokio-rustls" version = "0.23.4" @@ -5837,12 +5691,6 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" -[[package]] -name = "vcpkg" -version = "0.2.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" - [[package]] name = "version_check" version = "0.9.4" diff --git a/Dockerfile b/Dockerfile index ba7c30be..eb74e040 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,11 +1,5 @@ FROM rust:1-bullseye as builder -# our app uses rust-tls, but the sentry crate only uses openssl -RUN set -eux; \ - apt-get update; \ - apt-get install -y libssl-dev; \ - rm -rf /var/lib/apt/lists/* - ENV PATH /root/.foundry/bin:$PATH RUN curl -L https://foundry.paradigm.xyz | bash && foundryup @@ -18,12 +12,6 @@ RUN --mount=type=cache,target=/usr/local/cargo/registry \ FROM debian:bullseye-slim -# our app uses rust-tls, but the sentry crate only uses openssl -RUN set -eux; \ - apt-get update; \ - apt-get install -y libssl-dev; \ - rm -rf /var/lib/apt/lists/* - COPY --from=builder /opt/bin/* /usr/local/bin/ ENTRYPOINT ["web3_proxy_cli"] diff --git a/TODO.md b/TODO.md index 1ad82e88..231b7839 100644 --- a/TODO.md +++ b/TODO.md @@ -322,6 +322,7 @@ These are not yet ordered. There might be duplicates. We might not actually need - [x] improve handling of unknown methods - [x] don't send pagerduty alerts for websocket panics - [x] improve waiting for sync when rate limited +- [x] improve pager duty errors for smarter deduping - [-] proxy mode for benchmarking all backends - [-] proxy mode for sending to multiple backends - [-] let users choose a % of reverts to log (or maybe x/second). someone like curve logging all reverts will be a BIG database very quickly diff --git a/web3_proxy/Cargo.toml b/web3_proxy/Cargo.toml index 962c4147..433715e3 100644 --- a/web3_proxy/Cargo.toml +++ b/web3_proxy/Cargo.toml @@ -53,13 +53,13 @@ moka = { version = "0.9.6", default-features = false, features = ["future"] } notify = "5.0.0" num = "0.4.0" num-traits = "0.2.15" -pagerduty-rs = { version = "0.1.6", features = ["async", "sync"] } +pagerduty-rs = { version = "0.1.6", default-features = false, features = ["async", "rustls", "sync"] } parking_lot = { version = "0.12.1", features = ["arc_lock"] } proctitle = "0.1.1" regex = "1.7.1" reqwest = { version = "0.11.14", default-features = false, features = ["json", "tokio-rustls"] } rustc-hash = "1.1.0" -sentry = { version = "0.29.1", default-features = false, features = ["backtrace", "contexts", "panic", "anyhow", "reqwest", "rustls", "log", "sentry-log"] } +sentry = { version = "0.29.2", default-features = false, features = ["backtrace", "contexts", "panic", "anyhow", "reqwest", "rustls", "log", "sentry-log"] } serde = { version = "1.0.152", features = [] } serde_json = { version = "1.0.91", default-features = false, features = ["alloc", "raw_value"] } serde_prometheus = "0.1.6" From 43b5652ba8efaf560fc4d111c3cb5658160b611c Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Wed, 25 Jan 2023 14:24:38 -0800 Subject: [PATCH 72/80] better handling when rate limited --- .../src/bin/web3_proxy_cli/sentryd/compare.rs | 33 ++++++++++++++----- .../src/bin/web3_proxy_cli/sentryd/mod.rs | 14 +++++--- 2 files changed, 34 insertions(+), 13 deletions(-) diff --git a/web3_proxy/src/bin/web3_proxy_cli/sentryd/compare.rs b/web3_proxy/src/bin/web3_proxy_cli/sentryd/compare.rs index 5333c738..3e6cb89f 100644 --- a/web3_proxy/src/bin/web3_proxy_cli/sentryd/compare.rs +++ b/web3_proxy/src/bin/web3_proxy_cli/sentryd/compare.rs @@ -201,22 +201,37 @@ async fn check_rpc( "params": [block_hash, false], }); - // TODO: don't unwrap! don't use the try operator - let response: JsonRpcResponse> = client + let response = client .post(rpc.clone()) .json(&block_by_hash_request) .send() .await - .context(format!("awaiting response from {}", rpc))? - .json() - .await - .context(format!("reading json on {}", rpc))?; + .context(format!("awaiting response from {}", rpc))?; - if let Some(result) = response.result { + if !response.status().is_success() { + return Err(anyhow::anyhow!( + "bad response from {}: {}", + rpc, + response.status(), + )); + } + + let body = response + .text() + .await + .context(format!("failed parsing body from {}", rpc))?; + + let response_json: JsonRpcResponse> = serde_json::from_str(&body) + .context(format!("body: {}", body)) + .context(format!("failed parsing json from {}", rpc))?; + + if let Some(result) = response_json.result { let abbreviated = AbbreviatedBlock::from(result); + debug!("{} has {:?}@{}", rpc, abbreviated.hash, abbreviated.num); + Ok(abbreviated) - } else if let Some(result) = response.error { + } else if let Some(result) = response_json.error { Err(anyhow!( "jsonrpc error during check_rpc from {}: {:#}", rpc, @@ -226,7 +241,7 @@ async fn check_rpc( Err(anyhow!( "empty result during check_rpc from {}: {:#}", rpc, - json!(response) + json!(response_json) )) } } diff --git a/web3_proxy/src/bin/web3_proxy_cli/sentryd/mod.rs b/web3_proxy/src/bin/web3_proxy_cli/sentryd/mod.rs index 3f32e68b..b6a6844d 100644 --- a/web3_proxy/src/bin/web3_proxy_cli/sentryd/mod.rs +++ b/web3_proxy/src/bin/web3_proxy_cli/sentryd/mod.rs @@ -97,7 +97,7 @@ impl SentrydSubCommand { .or_else(|| top_config.map(|x| x.app.chain_id)) .context("--config or --chain-id required")?; - let web3_proxy = self.web3_proxy.trim_end_matches("/").to_string(); + let primary_proxy = self.web3_proxy.trim_end_matches("/").to_string(); let other_proxy: Vec<_> = self .other_proxy @@ -166,7 +166,7 @@ impl SentrydSubCommand { // check the main rpc's /health endpoint { - let url = format!("{}/health", web3_proxy); + let url = format!("{}/health", primary_proxy); let error_sender = error_sender.clone(); // TODO: what timeout? @@ -212,7 +212,7 @@ impl SentrydSubCommand { { let max_age = self.max_age; let max_lag = self.max_lag; - let rpc = self.web3_proxy.clone(); + let primary_proxy = primary_proxy.clone(); let error_sender = error_sender.clone(); let mut others = other_proxy.clone(); @@ -225,7 +225,13 @@ impl SentrydSubCommand { log::Level::Error, error_sender, move |error_builder| { - compare::main(error_builder, rpc.clone(), others.clone(), max_age, max_lag) + compare::main( + error_builder, + primary_proxy.clone(), + others.clone(), + max_age, + max_lag, + ) }, ); From 315ac4838deec21859cc315c319ffb1f5b4e2393 Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Wed, 25 Jan 2023 14:59:11 -0800 Subject: [PATCH 73/80] include code in the error message --- web3_proxy/src/bin/web3_proxy_cli/sentryd/compare.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/web3_proxy/src/bin/web3_proxy_cli/sentryd/compare.rs b/web3_proxy/src/bin/web3_proxy_cli/sentryd/compare.rs index 3e6cb89f..30de22fa 100644 --- a/web3_proxy/src/bin/web3_proxy_cli/sentryd/compare.rs +++ b/web3_proxy/src/bin/web3_proxy_cli/sentryd/compare.rs @@ -60,6 +60,8 @@ pub async fn main( .context(format!("error querying block from {}", rpc)) .map_err(|x| error_builder.build(x))?; + // TODO: if !a.status().is_success() + // TODO: capture response headers now in case of error. store them in the extra data on the pager duty alert let headers = format!("{:#?}", a.headers()); @@ -74,7 +76,7 @@ pub async fn main( } else if let Some(err) = a.error { return error_builder.result( anyhow::anyhow!("headers: {:#?}. err: {:#?}", headers, err) - .context(format!("jsonrpc error from {}", rpc)), + .context(format!("jsonrpc error from {}: code {}", rpc, err.code)), ); } else { return error_builder From f756bc7390241b8462dffeb852c89d7d29ad3d28 Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Wed, 25 Jan 2023 17:42:16 -0800 Subject: [PATCH 74/80] longer timeout --- web3_proxy/src/bin/web3_proxy_cli/sentryd/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/web3_proxy/src/bin/web3_proxy_cli/sentryd/mod.rs b/web3_proxy/src/bin/web3_proxy_cli/sentryd/mod.rs index b6a6844d..72511345 100644 --- a/web3_proxy/src/bin/web3_proxy_cli/sentryd/mod.rs +++ b/web3_proxy/src/bin/web3_proxy_cli/sentryd/mod.rs @@ -170,7 +170,7 @@ impl SentrydSubCommand { let error_sender = error_sender.clone(); // TODO: what timeout? - let timeout = Duration::from_secs(1); + let timeout = Duration::from_secs(5); let loop_f = a_loop( "main /health", @@ -195,7 +195,7 @@ impl SentrydSubCommand { let error_sender = error_sender.clone(); // TODO: what timeout? - let timeout = Duration::from_secs(1); + let timeout = Duration::from_secs(5); let loop_f = a_loop( "other /health", From c0ab400af680e4b791ae012502fa831813eae444 Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Wed, 25 Jan 2023 17:58:10 -0800 Subject: [PATCH 75/80] add create_key cli command --- TODO.md | 1 + web3_proxy/src/bin/web3_proxy_cli/main.rs | 11 +++++++++++ 2 files changed, 12 insertions(+) diff --git a/TODO.md b/TODO.md index 231b7839..f115d336 100644 --- a/TODO.md +++ b/TODO.md @@ -323,6 +323,7 @@ These are not yet ordered. There might be duplicates. We might not actually need - [x] don't send pagerduty alerts for websocket panics - [x] improve waiting for sync when rate limited - [x] improve pager duty errors for smarter deduping +- [x] add create_key cli command - [-] proxy mode for benchmarking all backends - [-] proxy mode for sending to multiple backends - [-] let users choose a % of reverts to log (or maybe x/second). someone like curve logging all reverts will be a BIG database very quickly diff --git a/web3_proxy/src/bin/web3_proxy_cli/main.rs b/web3_proxy/src/bin/web3_proxy_cli/main.rs index 852334f9..0b8dbe3a 100644 --- a/web3_proxy/src/bin/web3_proxy_cli/main.rs +++ b/web3_proxy/src/bin/web3_proxy_cli/main.rs @@ -4,6 +4,7 @@ mod change_user_tier_by_address; mod change_user_tier_by_key; mod check_config; mod count_users; +mod create_key; mod create_user; mod daemon; mod drop_migration_lock; @@ -73,6 +74,7 @@ enum SubCommand { ChangeUserTierByKey(change_user_tier_by_key::ChangeUserTierByKeySubCommand), CheckConfig(check_config::CheckConfigSubCommand), CountUsers(count_users::CountUsersSubCommand), + CreateKey(create_key::CreateKeySubCommand), CreateUser(create_user::CreateUserSubCommand), DropMigrationLock(drop_migration_lock::DropMigrationLockSubCommand), Pagerduty(pagerduty::PagerdutySubCommand), @@ -310,6 +312,15 @@ fn main() -> anyhow::Result<()> { x.main(&db_conn).await } SubCommand::CheckConfig(x) => x.main().await, + SubCommand::CreateKey(x) => { + let db_url = cli_config + .db_url + .expect("'--config' (with a db) or '--db-url' is required to run create a key"); + + let db_conn = get_migrated_db(db_url, 1, 1).await?; + + x.main(&db_conn).await + } SubCommand::CreateUser(x) => { let db_url = cli_config .db_url From b808a72b355f50860292788d6e89731f98596faa Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Wed, 25 Jan 2023 17:59:08 -0800 Subject: [PATCH 76/80] actually add the script --- .../src/bin/web3_proxy_cli/create_key.rs | 77 +++++++++++++++++++ 1 file changed, 77 insertions(+) create mode 100644 web3_proxy/src/bin/web3_proxy_cli/create_key.rs diff --git a/web3_proxy/src/bin/web3_proxy_cli/create_key.rs b/web3_proxy/src/bin/web3_proxy_cli/create_key.rs new file mode 100644 index 00000000..3afee516 --- /dev/null +++ b/web3_proxy/src/bin/web3_proxy_cli/create_key.rs @@ -0,0 +1,77 @@ +use anyhow::Context; +use argh::FromArgs; +use entities::{rpc_key, user}; +use ethers::prelude::Address; +use log::info; +use migration::sea_orm::{self, ActiveModelTrait, ColumnTrait, EntityTrait, QueryFilter}; +use ulid::Ulid; +use uuid::Uuid; +use web3_proxy::frontend::authorization::RpcSecretKey; + +#[derive(FromArgs, PartialEq, Debug, Eq)] +/// Create a new user and api key +#[argh(subcommand, name = "create_key")] +pub struct CreateKeySubCommand { + /// the user's ethereum address or descriptive string. + /// If a string is given, it will be converted to hex and potentially truncated. + /// Users from strings are only for testing since they won't be able to log in. + #[argh(positional)] + address: String, + + /// the user's api ULID or UUID key. + /// If none given, one will be created. + #[argh(option)] + rpc_secret_key: Option, + + /// an optional short description of the key's purpose. + #[argh(option)] + description: Option, +} + +impl CreateKeySubCommand { + pub async fn main(self, db: &sea_orm::DatabaseConnection) -> anyhow::Result<()> { + // TODO: would be nice to use the fixed array instead of a Vec in the entities + // take a simple String. If it starts with 0x, parse as address. otherwise convert ascii to hex + let address: Vec = if self.address.starts_with("0x") { + let address = self.address.parse::
()?; + + address.to_fixed_bytes().into() + } else { + // TODO: allow ENS + // left pad and truncate the string + let address = &format!("{:\x00>20}", self.address)[0..20]; + + // convert the string to bytes + let bytes = address.as_bytes(); + + // convert the slice to a Vec + bytes.try_into().expect("Bytes can always be a Vec") + }; + + // TODO: get existing or create a new one + let u = user::Entity::find() + .filter(user::Column::Address.eq(address)) + .one(db) + .await? + .context("No user found with that address")?; + + info!("user #{}", u.id); + + let rpc_secret_key = self.rpc_secret_key.unwrap_or_else(RpcSecretKey::new); + + // create a key for the new user + let uk = rpc_key::ActiveModel { + user_id: sea_orm::Set(u.id), + secret_key: sea_orm::Set(rpc_secret_key.into()), + description: sea_orm::Set(self.description), + ..Default::default() + }; + + let _uk = uk.save(db).await.context("Failed saving new user key")?; + + info!("user key as ULID: {}", Ulid::from(rpc_secret_key)); + info!("user key as UUID: {}", Uuid::from(rpc_secret_key)); + + Ok(()) + } +} From f9d3eb1e0da1cd754cf3360567aa796de66c83f6 Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Wed, 25 Jan 2023 19:05:11 -0800 Subject: [PATCH 77/80] cut /rpc/ off --- web3_proxy/src/bin/web3_proxy_cli/sentryd/mod.rs | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/web3_proxy/src/bin/web3_proxy_cli/sentryd/mod.rs b/web3_proxy/src/bin/web3_proxy_cli/sentryd/mod.rs index 72511345..90398f20 100644 --- a/web3_proxy/src/bin/web3_proxy_cli/sentryd/mod.rs +++ b/web3_proxy/src/bin/web3_proxy_cli/sentryd/mod.rs @@ -166,7 +166,13 @@ impl SentrydSubCommand { // check the main rpc's /health endpoint { - let url = format!("{}/health", primary_proxy); + let url = if primary_proxy.contains("/rpc/") { + let x = primary_proxy.split("/rpc/").next().unwrap(); + + format!("{}/health", x) + } else { + format!("{}/health", primary_proxy) + }; let error_sender = error_sender.clone(); // TODO: what timeout? From 2c05c63350affb360c6ffe36d9a7899da7e79e01 Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Thu, 26 Jan 2023 19:07:27 -0800 Subject: [PATCH 78/80] error with body if not json --- .../src/bin/web3_proxy_cli/sentryd/compare.rs | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/web3_proxy/src/bin/web3_proxy_cli/sentryd/compare.rs b/web3_proxy/src/bin/web3_proxy_cli/sentryd/compare.rs index 30de22fa..8417061c 100644 --- a/web3_proxy/src/bin/web3_proxy_cli/sentryd/compare.rs +++ b/web3_proxy/src/bin/web3_proxy_cli/sentryd/compare.rs @@ -60,16 +60,25 @@ pub async fn main( .context(format!("error querying block from {}", rpc)) .map_err(|x| error_builder.build(x))?; - // TODO: if !a.status().is_success() + if !a.status().is_success() { + return Err(anyhow::anyhow!( + "bad response from {}: {}", + rpc, + response.status(), + )); + } // TODO: capture response headers now in case of error. store them in the extra data on the pager duty alert let headers = format!("{:#?}", a.headers()); let a = a - .json::>>() + .text() .await - .context(format!("error parsing block from {}", rpc)) - .map_err(|x| error_builder.build(x))?; + .context(format!("failed parsing body from {}", rpc))?; + + let a: JsonRpcResponse> = serde_json::from_str(&body) + .context(format!("body: {}", body)) + .context(format!("failed parsing json from {}", rpc))?; let a = if let Some(block) = a.result { block @@ -204,7 +213,7 @@ async fn check_rpc( }); let response = client - .post(rpc.clone()) + .post(&rpc) .json(&block_by_hash_request) .send() .await From b3584639287457a36dd27fe9258ab39295ad3a9b Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Thu, 26 Jan 2023 19:18:58 -0800 Subject: [PATCH 79/80] use error_builder --- .../src/bin/web3_proxy_cli/sentryd/compare.rs | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/web3_proxy/src/bin/web3_proxy_cli/sentryd/compare.rs b/web3_proxy/src/bin/web3_proxy_cli/sentryd/compare.rs index 8417061c..e5225fbc 100644 --- a/web3_proxy/src/bin/web3_proxy_cli/sentryd/compare.rs +++ b/web3_proxy/src/bin/web3_proxy_cli/sentryd/compare.rs @@ -61,24 +61,22 @@ pub async fn main( .map_err(|x| error_builder.build(x))?; if !a.status().is_success() { - return Err(anyhow::anyhow!( - "bad response from {}: {}", - rpc, - response.status(), - )); + return error_builder.result(anyhow!("bad response from {}: {}", rpc, a.status())); } // TODO: capture response headers now in case of error. store them in the extra data on the pager duty alert let headers = format!("{:#?}", a.headers()); - let a = a + let body = a .text() .await - .context(format!("failed parsing body from {}", rpc))?; + .context(format!("failed parsing body from {}", rpc)) + .map_err(|x| error_builder.build(x))?; let a: JsonRpcResponse> = serde_json::from_str(&body) .context(format!("body: {}", body)) - .context(format!("failed parsing json from {}", rpc))?; + .context(format!("failed parsing json from {}", rpc)) + .map_err(|x| error_builder.build(x))?; let a = if let Some(block) = a.result { block From 5628068888bc8360081a1342f5edec7e82747e89 Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Thu, 26 Jan 2023 19:23:52 -0800 Subject: [PATCH 80/80] codegen-units = 1 --- Cargo.toml | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 7b5d68aa..1d5c2238 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,10 +9,7 @@ members = [ ] [profile.release] -# we leave debug = true on so that sentry can give us line numbers +# `debug = true` so that sentry can give us line numbers debug = true -# TODO: enable lto (and maybe other things proven with benchmarks) once rapid development is done -lto = true - -# TODO: we can't do panic = abort because the websockets disconnect by panicking sometimes -# TODO: i want to have a panic handler than sends things to pagerduty when we panic, but that will be too verbose too +# spend longer compiling for a slightly faster binary +codegen-units = 1