From f82f5ea544cd3988dc32c47ec9d72e1193c83bc2 Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Thu, 12 May 2022 19:44:31 +0000 Subject: [PATCH] one less clone --- config/example.toml | 6 +++--- web3-proxy/src/app.rs | 8 ++------ web3-proxy/src/connection.rs | 9 ++++----- web3-proxy/src/connections.rs | 22 +++------------------- 4 files changed, 12 insertions(+), 33 deletions(-) diff --git a/config/example.toml b/config/example.toml index 84177003..9dd84e92 100644 --- a/config/example.toml +++ b/config/example.toml @@ -9,9 +9,9 @@ chain_id = 1 url = "ws://127.0.0.1:8546" soft_limit = 200_000 - [balanced_rpc_tiers.0.ankr] - url = "https://rpc.ankr.com/eth" - soft_limit = 3_000 + #[balanced_rpc_tiers.0.ankr] + #url = "https://rpc.ankr.com/eth" + #soft_limit = 3_000 [private_rpcs] diff --git a/web3-proxy/src/app.rs b/web3-proxy/src/app.rs index cbba03e2..574dd550 100644 --- a/web3-proxy/src/app.rs +++ b/web3-proxy/src/app.rs @@ -278,12 +278,8 @@ impl Web3ProxyApp { // TODO: what allowed lag? match balanced_rpcs.next_upstream_server().await { Ok(active_request_handle) => { - let response = balanced_rpcs - .try_send_request( - active_request_handle, - &request.method, - &request.params, - ) + let response = active_request_handle + .request(&request.method, &request.params) .await; let response = match response { diff --git a/web3-proxy/src/connection.rs b/web3-proxy/src/connection.rs index b60402e1..4c1e7460 100644 --- a/web3-proxy/src/connection.rs +++ b/web3-proxy/src/connection.rs @@ -13,7 +13,7 @@ use std::sync::atomic::{self, AtomicU32, AtomicU64}; use std::time::Duration; use std::{cmp::Ordering, sync::Arc}; use tokio::time::{interval, sleep, MissedTickBehavior}; -use tracing::{info, trace, warn}; +use tracing::{debug, info, warn}; use crate::connections::Web3Connections; @@ -141,8 +141,7 @@ impl Web3Connection { )); } - // TODO: use anyhow - assert_eq!(chain_id, found_chain_id); + info!("Successful connection: {}", connection); Ok(connection) } @@ -326,7 +325,7 @@ impl ActiveRequestHandle { { // TODO: this should probably be trace level and use a span // TODO: it would be nice to have the request id on this - trace!("Sending {}({:?}) to {}", method, params, self.0); + debug!("Sending {}({:?}) to {}", method, params, self.0); let response = match &self.0.provider { Web3Provider::Http(provider) => provider.request(method, params).await, @@ -335,7 +334,7 @@ impl ActiveRequestHandle { // TODO: i think ethers already has trace logging (and does it much more fancy) // TODO: at least instrument this with more useful information - trace!("Response from {}: {:?}", self.0, response); + debug!("Response from {}: {:?}", self.0, response); response } diff --git a/web3-proxy/src/connections.rs b/web3-proxy/src/connections.rs index a32fa31b..b8cdfa18 100644 --- a/web3-proxy/src/connections.rs +++ b/web3-proxy/src/connections.rs @@ -114,19 +114,6 @@ impl Web3Connections { self.best_head_block_number.load(atomic::Ordering::Acquire) } - pub async fn try_send_request( - &self, - active_request_handle: ActiveRequestHandle, - method: &str, - params: &Option>, - ) -> Result, ethers::prelude::ProviderError> { - let response = active_request_handle.request(method, params).await; - - // TODO: if "no block with that header" or some other jsonrpc errors, skip this response? - - response - } - /// Send the same request to all the handles. Returning the fastest successful result. pub async fn try_send_parallel_requests( self: Arc, @@ -138,18 +125,15 @@ impl Web3Connections { // TODO: if only 1 active_request_handles, do self.try_send_request let mut unordered_futures = FuturesUnordered::new(); - for connection in active_request_handles { + for active_request_handle in active_request_handles { // clone things so we can pass them to a future - let connections = self.clone(); let method = method.clone(); let params = params.clone(); let response_sender = response_sender.clone(); let handle = tokio::spawn(async move { - // get the client for this rpc server - let response = connections - .try_send_request(connection, &method, ¶ms) - .await?; + let response: Box = + active_request_handle.request(&method, ¶ms).await?; // send the first good response to a one shot channel. that way we respond quickly // drop the result because errors are expected after the first send