From 5f85a17b19d9d51c9d5d44bdcea2ad0e3efc6515 Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Mon, 18 Sep 2023 23:08:16 -0700 Subject: [PATCH] put timeout around the moka stuff we went a long time without seeing errors on this, but i just saw some again --- web3_proxy/src/app/mod.rs | 73 ++++++++++++++++++++++----------------- 1 file changed, 42 insertions(+), 31 deletions(-) diff --git a/web3_proxy/src/app/mod.rs b/web3_proxy/src/app/mod.rs index a8eeda1f..76a72e97 100644 --- a/web3_proxy/src/app/mod.rs +++ b/web3_proxy/src/app/mod.rs @@ -53,7 +53,7 @@ use std::time::Duration; use tokio::select; use tokio::sync::{broadcast, mpsc, oneshot, watch, Semaphore}; use tokio::task::{yield_now, JoinHandle}; -use tokio::time::{sleep, Instant}; +use tokio::time::{sleep, timeout, Instant}; use tracing::{error, info, trace, warn, Level}; // TODO: make this customizable? @@ -1743,41 +1743,52 @@ impl Web3ProxyApp { .jsonrpc_response_cache .try_get_with::<_, Web3ProxyError>(cache_key.hash(), async { // TODO: think more about this timeout. we should probably have a `request_expires_at` Duration on the request_metadata - let response_data = self.balanced_rpcs - .try_proxy_connection::<_, Arc>( - method, - params, - Some(request_metadata), - max_wait, - from_block_num.as_ref(), - to_block_num.as_ref(), - ) - .await; + let proxy_with_timeout= timeout( + Duration::from_secs(300), + self.balanced_rpcs + .try_proxy_connection::<_, Arc>( + method, + params, + Some(request_metadata), + max_wait, + from_block_num.as_ref(), + to_block_num.as_ref(), + ) + ); - if !cache_jsonrpc_errors && let Err(err) = response_data { - // if we are not supposed to cache jsonrpc errors, - // then we must not convert Provider errors into a JsonRpcResponseEnum - // return all the errors now. moka will not cache Err results - Err(err) - } else { - // convert jsonrpc errors into JsonRpcResponseEnum, but leave the rest as errors - let response_data: JsonRpcResponseEnum> = response_data.try_into()?; + match proxy_with_timeout.await { + Ok(response_data) => { + if !cache_jsonrpc_errors && let Err(err) = response_data { + // if we are not supposed to cache jsonrpc errors, + // then we must not convert Provider errors into a JsonRpcResponseEnum + // return all the errors now. moka will not cache Err results + Err(err) + } else { + // convert jsonrpc errors into JsonRpcResponseEnum, but leave the rest as errors + let response_data: JsonRpcResponseEnum> = response_data.try_into()?; - if response_data.is_null() { - // don't ever cache "null" as a success. its too likely to be a problem - Err(Web3ProxyError::NullJsonRpcResult) - } else if response_data.num_bytes() > max_response_cache_bytes { - // don't cache really large requests - // TODO: emit a stat - Err(Web3ProxyError::JsonRpcResponse(response_data)) - } else { - // TODO: response data should maybe be Arc>>, but that's more work - Ok(response_data) + if response_data.is_null() { + // don't ever cache "null" as a success. its too likely to be a problem + Err(Web3ProxyError::NullJsonRpcResult) + } else if response_data.num_bytes() > max_response_cache_bytes { + // don't cache really large requests + // TODO: emit a stat + Err(Web3ProxyError::JsonRpcResponse(response_data)) + } else { + // TODO: response data should maybe be Arc>>, but that's more work + Ok(response_data) + } + } + } + Err(err) => { + Err(Web3ProxyError::from(err)) } } }).await? } else { - let x = self.balanced_rpcs + let x = timeout( + Duration::from_secs(300), + self.balanced_rpcs .try_proxy_connection::<_, Arc>( method, params, @@ -1786,7 +1797,7 @@ impl Web3ProxyApp { None, None, ) - .await?; + ).await??; x.into() }