improve eth_sendRawTransaction and other timeouts

This commit is contained in:
Bryan Stitt 2023-08-03 11:39:48 -07:00
parent 74224977b7
commit 690601643d
3 changed files with 86 additions and 64 deletions

View File

@ -1106,9 +1106,9 @@ impl Web3ProxyApp {
Some(request_metadata), Some(request_metadata),
None, None,
None, None,
Some(Duration::from_secs(30)), Some(Duration::from_secs(10)),
Some(Level::TRACE.into()), Some(Level::TRACE.into()),
None, Some(3),
) )
.await; .await;
@ -1136,7 +1136,7 @@ impl Web3ProxyApp {
Some(request_metadata), Some(request_metadata),
None, None,
None, None,
Some(Duration::from_secs(30)), Some(Duration::from_secs(10)),
Some(Level::TRACE.into()), Some(Level::TRACE.into()),
num_public_rpcs, num_public_rpcs,
) )
@ -1164,56 +1164,66 @@ impl Web3ProxyApp {
// turn some of the Web3ProxyErrors into Ok results // turn some of the Web3ProxyErrors into Ok results
// TODO: move this into a helper function // TODO: move this into a helper function
let (code, response_data) = match self let max_tries = 3;
._proxy_request_with_caching( let mut tries = 0;
&request.method, loop {
&mut request.params, let (code, response_data) = match self
head_block, ._proxy_request_with_caching(
&request_metadata, &request.method,
) &mut request.params,
.await head_block,
{ &request_metadata,
Ok(response_data) => { )
request_metadata .await
.error_response {
.store(false, Ordering::Release); Ok(response_data) => {
request_metadata
.error_response
.store(false, Ordering::Release);
(StatusCode::OK, response_data) (StatusCode::OK, response_data)
} }
Err(err @ Web3ProxyError::NullJsonRpcResult) => { Err(err @ Web3ProxyError::NullJsonRpcResult) => {
request_metadata request_metadata
.error_response .error_response
.store(false, Ordering::Release); .store(false, Ordering::Release);
err.as_response_parts() err.as_response_parts()
} }
Err(Web3ProxyError::JsonRpcResponse(response_data)) => { Err(Web3ProxyError::JsonRpcResponse(response_data)) => {
request_metadata request_metadata
.error_response .error_response
.store(response_data.is_error(), Ordering::Release); .store(response_data.is_error(), Ordering::Release);
(StatusCode::OK, response_data) (StatusCode::OK, response_data)
} }
Err(err) => { Err(err) => {
request_metadata tries += 1;
.error_response if tries < max_tries {
.store(true, Ordering::Release); // try again
continue
}
err.as_response_parts() request_metadata
} .error_response
}; .store(true, Ordering::Release);
let response = JsonRpcForwardedResponse::from_response_data(response_data, response_id); err.as_response_parts()
}
};
// TODO: this serializes twice :/ let response = JsonRpcForwardedResponse::from_response_data(response_data, response_id);
request_metadata.add_response(ResponseOrBytes::Response(&response));
let rpcs = request_metadata.backend_rpcs_used(); // TODO: this serializes twice :/
request_metadata.add_response(ResponseOrBytes::Response(&response));
// there might be clones in the background, so this isn't a sure thing let rpcs = request_metadata.backend_rpcs_used();
let _ = request_metadata.try_send_arc_stat();
(code, response, rpcs) // there might be clones in the background, so this isn't a sure thing
let _ = request_metadata.try_send_arc_stat();
return (code, response, rpcs)
}
} }
/// main logic for proxy_cached_request but in a dedicated function so the try operator is easy to use /// main logic for proxy_cached_request but in a dedicated function so the try operator is easy to use
@ -1453,16 +1463,12 @@ impl Web3ProxyApp {
// TODO: error if the chain_id is incorrect // TODO: error if the chain_id is incorrect
let response = timeout( let response = self
Duration::from_secs(30), .try_send_protected(
self method,
.try_send_protected( params,
method, request_metadata,
params, ).await;
request_metadata,
)
)
.await?;
let mut response = response.try_into()?; let mut response = response.try_into()?;

View File

@ -1,6 +1,7 @@
#![feature(lazy_cell)] #![feature(lazy_cell)]
#![feature(let_chains)] #![feature(let_chains)]
#![feature(trait_alias)] #![feature(trait_alias)]
#![feature(result_flattening)]
#![forbid(unsafe_code)] #![forbid(unsafe_code)]
pub mod admin_queries; pub mod admin_queries;

View File

@ -12,10 +12,10 @@ use crate::frontend::status::MokaCacheSerializer;
use crate::jsonrpc::{JsonRpcErrorData, JsonRpcParams, JsonRpcResultData}; use crate::jsonrpc::{JsonRpcErrorData, JsonRpcParams, JsonRpcResultData};
use counter::Counter; use counter::Counter;
use derive_more::From; use derive_more::From;
use ethers::prelude::{ProviderError, U64}; use ethers::prelude::U64;
use futures::future::try_join_all; use futures::future::try_join_all;
use futures::stream::FuturesUnordered; use futures::stream::FuturesUnordered;
use futures::StreamExt; use futures::{StreamExt, TryFutureExt};
use hashbrown::HashMap; use hashbrown::HashMap;
use itertools::Itertools; use itertools::Itertools;
use moka::future::CacheBuilder; use moka::future::CacheBuilder;
@ -31,7 +31,7 @@ use std::sync::atomic::Ordering;
use std::sync::Arc; use std::sync::Arc;
use tokio::select; use tokio::select;
use tokio::sync::{mpsc, watch}; use tokio::sync::{mpsc, watch};
use tokio::time::{sleep, sleep_until, Duration, Instant}; use tokio::time::{sleep, sleep_until, timeout, Duration, Instant};
use tracing::{debug, error, info, instrument, trace, warn}; use tracing::{debug, error, info, instrument, trace, warn};
/// A collection of web3 connections. Sends requests either the current best server or all servers. /// A collection of web3 connections. Sends requests either the current best server or all servers.
@ -379,20 +379,30 @@ impl Web3Rpcs {
active_request_handles: Vec<OpenRequestHandle>, active_request_handles: Vec<OpenRequestHandle>,
method: &str, method: &str,
params: &P, params: &P,
// TODO: remove this box once i figure out how to do the options max_wait: Option<Duration>,
) -> Result<Box<RawValue>, ProviderError> { ) -> Result<Box<RawValue>, Web3ProxyError> {
// TODO: if only 1 active_request_handles, do self.try_send_request? // TODO: if only 1 active_request_handles, do self.try_send_request?
let max_wait = max_wait.unwrap_or_else(|| Duration::from_secs(300));
// TODO: iter stream // TODO: iter stream
let responses = active_request_handles let responses = active_request_handles
.into_iter() .into_iter()
.map(|active_request_handle| async move { .map(|active_request_handle| async move {
let result: Result<Box<RawValue>, _> = let result: Result<Result<Box<RawValue>, Web3ProxyError>, Web3ProxyError> =
active_request_handle.request(method, &json!(&params)).await; timeout(
result max_wait,
active_request_handle
.request(method, &json!(&params))
.map_err(Web3ProxyError::EthersProvider),
)
.await
.map_err(Web3ProxyError::from);
result.flatten()
}) })
.collect::<FuturesUnordered<_>>() .collect::<FuturesUnordered<_>>()
.collect::<Vec<Result<Box<RawValue>, ProviderError>>>() .collect::<Vec<Result<Box<RawValue>, Web3ProxyError>>>()
.await; .await;
// TODO: Strings are not great keys, but we can't use RawValue or ProviderError as keys because they don't implement Hash or Eq // TODO: Strings are not great keys, but we can't use RawValue or ProviderError as keys because they don't implement Hash or Eq
@ -1123,7 +1133,12 @@ impl Web3Rpcs {
} }
let x = self let x = self
.try_send_parallel_requests(active_request_handles, method, params) .try_send_parallel_requests(
active_request_handles,
method,
params,
max_wait,
)
.await?; .await?;
return Ok(x); return Ok(x);