refactor
This commit is contained in:
parent
07fbd3c71d
commit
d8c8e6591d
@ -14,7 +14,7 @@ use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use tokio::sync::watch;
|
||||
use tokio::task;
|
||||
use tokio::time::{sleep, timeout};
|
||||
use tokio::time::timeout;
|
||||
use tracing::{debug, info, instrument, trace, warn};
|
||||
|
||||
static APP_USER_AGENT: &str = concat!(
|
||||
@ -239,7 +239,6 @@ impl Web3ProxyApp {
|
||||
|
||||
// TODO: how much should we retry? probably with a timeout and not with a count like this
|
||||
// TODO: think more about this loop.
|
||||
for _i in 0..10usize {
|
||||
// // TODO: add more to this span
|
||||
// let span = info_span!("i", ?i);
|
||||
// let _enter = span.enter(); // DO NOT ENTER! we can't use enter across awaits! (clippy lint soon)
|
||||
@ -302,25 +301,10 @@ impl Web3ProxyApp {
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: move this whole match to a function on self.balanced_rpcs. incoming requests checks makes it awkward
|
||||
match self.balanced_rpcs.next_upstream_server().await {
|
||||
Ok(active_request_handle) => {
|
||||
let response = active_request_handle
|
||||
.request(&request.method, &request.params)
|
||||
.await;
|
||||
|
||||
let response = match response {
|
||||
Ok(partial_response) => {
|
||||
// TODO: trace here was really slow with millions of requests.
|
||||
// trace!("forwarding request from {}", upstream_server);
|
||||
|
||||
let response = JsonRpcForwardedResponse {
|
||||
jsonrpc: "2.0".to_string(),
|
||||
id: request.id,
|
||||
// TODO: since we only use the result here, should that be all we return from try_send_request?
|
||||
result: Some(partial_response),
|
||||
error: None,
|
||||
};
|
||||
let response = self
|
||||
.balanced_rpcs
|
||||
.try_send_best_upstream_server(request)
|
||||
.await?;
|
||||
|
||||
// TODO: small race condidition here. parallel requests with the same query will both be saved to the cache
|
||||
let mut response_cache = response_cache.write();
|
||||
@ -334,63 +318,10 @@ impl Web3ProxyApp {
|
||||
|
||||
drop(response_cache);
|
||||
|
||||
// TODO: needing to remove manually here makes me think we should do this differently
|
||||
let _ = self.incoming_requests.remove(&cache_key);
|
||||
let _ = incoming_tx.send(false);
|
||||
|
||||
response
|
||||
Ok(response)
|
||||
}
|
||||
Err(e) => {
|
||||
// send now since we aren't going to cache an error response
|
||||
let _ = incoming_tx.send(false);
|
||||
|
||||
JsonRpcForwardedResponse::from_ethers_error(e, request.id)
|
||||
}
|
||||
};
|
||||
|
||||
// TODO: needing to remove manually here makes me think we should do this differently
|
||||
let _ = self.incoming_requests.remove(&cache_key);
|
||||
|
||||
if response.error.is_some() {
|
||||
trace!("Sending error reply: {:?}", response);
|
||||
|
||||
// errors already sent false to the in_flight_tx
|
||||
} else {
|
||||
trace!("Sending reply: {:?}", response);
|
||||
|
||||
let _ = incoming_tx.send(false);
|
||||
}
|
||||
|
||||
return Ok(response);
|
||||
}
|
||||
Err(None) => {
|
||||
// TODO: this is too verbose. if there are other servers in other tiers, we use those!
|
||||
warn!("No servers in sync!");
|
||||
|
||||
// TODO: needing to remove manually here makes me think we should do this differently
|
||||
let _ = self.incoming_requests.remove(&cache_key);
|
||||
let _ = incoming_tx.send(false);
|
||||
|
||||
return Err(anyhow::anyhow!("no servers in sync"));
|
||||
}
|
||||
Err(Some(retry_after)) => {
|
||||
// TODO: move this to a helper function
|
||||
// sleep (TODO: with a lock?) until our rate limits should be available
|
||||
// TODO: if a server catches up sync while we are waiting, we could stop waiting
|
||||
warn!("All rate limits exceeded. Sleeping");
|
||||
|
||||
sleep(retry_after).await;
|
||||
|
||||
// TODO: needing to remove manually here makes me think we should do this differently
|
||||
let _ = self.incoming_requests.remove(&cache_key);
|
||||
let _ = incoming_tx.send(false);
|
||||
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Err(anyhow::anyhow!("internal error"))
|
||||
}
|
||||
}
|
||||
|
@ -434,6 +434,49 @@ impl Web3Connections {
|
||||
Err(earliest_retry_after)
|
||||
}
|
||||
|
||||
/// be sure there is a timeout on this or it might loop forever
|
||||
pub async fn try_send_best_upstream_server(
|
||||
&self,
|
||||
request: JsonRpcRequest,
|
||||
) -> anyhow::Result<JsonRpcForwardedResponse> {
|
||||
loop {
|
||||
match self.next_upstream_server().await {
|
||||
Ok(active_request_handle) => {
|
||||
let response_result = active_request_handle
|
||||
.request(&request.method, &request.params)
|
||||
.await;
|
||||
|
||||
let response =
|
||||
JsonRpcForwardedResponse::from_response_result(response_result, request.id);
|
||||
|
||||
if response.error.is_some() {
|
||||
trace!(?response, "Sending error reply",);
|
||||
// errors already sent false to the in_flight_tx
|
||||
} else {
|
||||
trace!(?response, "Sending reply");
|
||||
}
|
||||
|
||||
return Ok(response);
|
||||
}
|
||||
Err(None) => {
|
||||
warn!(?self, "No servers in sync!");
|
||||
|
||||
return Err(anyhow::anyhow!("no servers in sync"));
|
||||
}
|
||||
Err(Some(retry_after)) => {
|
||||
// TODO: move this to a helper function
|
||||
// sleep (TODO: with a lock?) until our rate limits should be available
|
||||
// TODO: if a server catches up sync while we are waiting, we could stop waiting
|
||||
warn!(?retry_after, "All rate limits exceeded. Sleeping");
|
||||
|
||||
sleep(retry_after).await;
|
||||
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn try_send_all_upstream_servers(
|
||||
&self,
|
||||
request: JsonRpcRequest,
|
||||
@ -463,6 +506,8 @@ impl Web3Connections {
|
||||
return Ok(response);
|
||||
}
|
||||
Err(None) => {
|
||||
warn!(?self, "No servers in sync!");
|
||||
|
||||
// TODO: return a 502?
|
||||
// TODO: i don't think this will ever happen
|
||||
return Err(anyhow::anyhow!("no available rpcs!"));
|
||||
|
@ -162,7 +162,27 @@ impl fmt::Debug for JsonRpcForwardedResponse {
|
||||
}
|
||||
|
||||
impl JsonRpcForwardedResponse {
|
||||
pub fn from_ethers_error(e: ProviderError, id: Box<serde_json::value::RawValue>) -> Self {
|
||||
pub fn from_response_result(
|
||||
result: Result<Box<RawValue>, ProviderError>,
|
||||
id: Box<RawValue>,
|
||||
) -> Self {
|
||||
match result {
|
||||
Ok(response) => Self::from_response(response, id),
|
||||
Err(e) => Self::from_ethers_error(e, id),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_response(partial_response: Box<RawValue>, id: Box<RawValue>) -> Self {
|
||||
JsonRpcForwardedResponse {
|
||||
jsonrpc: "2.0".to_string(),
|
||||
id,
|
||||
// TODO: since we only use the result here, should that be all we return from try_send_request?
|
||||
result: Some(partial_response),
|
||||
error: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_ethers_error(e: ProviderError, id: Box<RawValue>) -> Self {
|
||||
// TODO: move turning ClientError into json to a helper function?
|
||||
let code;
|
||||
let message: String;
|
||||
|
Loading…
Reference in New Issue
Block a user