web3-proxy/web3_proxy/src/rpcs/request.rs

99 lines
3.4 KiB
Rust
Raw Normal View History

2022-08-24 03:11:49 +03:00
use super::connection::Web3Connection;
use super::provider::Web3Provider;
2022-09-09 00:01:36 +03:00
// use metered::{measure, ErrorCount, HitCount, InFlight, ResponseTime, Throughput};
2022-08-24 03:11:49 +03:00
use std::fmt;
2022-08-24 03:14:49 +03:00
use std::sync::atomic;
use std::sync::Arc;
2022-08-30 23:01:42 +03:00
use tokio::time::{sleep, Duration, Instant};
use tracing::warn;
2022-08-24 03:14:49 +03:00
use tracing::{instrument, trace};
2022-08-24 03:11:49 +03:00
2022-08-30 23:01:42 +03:00
#[derive(Debug)]
2022-08-24 03:14:49 +03:00
pub enum OpenRequestResult {
2022-08-24 03:59:05 +03:00
Handle(OpenRequestHandle),
/// Unable to start a request. Retry at the given time.
2022-08-24 03:11:49 +03:00
RetryAt(Instant),
2022-08-24 03:59:05 +03:00
/// Unable to start a request. Retrying will not succeed.
2022-08-30 23:01:42 +03:00
RetryNever,
2022-08-24 03:11:49 +03:00
}
2022-08-24 03:59:05 +03:00
/// Make RPC requests through this handle and drop it when you are done.
2022-08-30 23:01:42 +03:00
#[derive(Debug)]
2022-08-24 03:14:49 +03:00
pub struct OpenRequestHandle(Arc<Web3Connection>);
2022-08-24 03:11:49 +03:00
2022-08-24 03:14:49 +03:00
impl OpenRequestHandle {
2022-08-24 03:11:49 +03:00
pub fn new(connection: Arc<Web3Connection>) -> Self {
// TODO: attach a unique id to this? customer requests have one, but not internal queries
2022-08-24 03:11:49 +03:00
// TODO: what ordering?!
connection
.active_requests
.fetch_add(1, atomic::Ordering::AcqRel);
// TODO: handle overflows?
// TODO: what ordering?
connection
.total_requests
.fetch_add(1, atomic::Ordering::Relaxed);
2022-08-24 03:11:49 +03:00
Self(connection)
}
pub fn clone_connection(&self) -> Arc<Web3Connection> {
self.0.clone()
}
/// Send a web3 request
/// By having the request method here, we ensure that the rate limiter was called and connection counts were properly incremented
/// By taking self here, we ensure that this is dropped after the request is complete
2022-09-09 00:01:36 +03:00
// #[measure([ErrorCount, HitCount, InFlight, ResponseTime, Throughput])]
2022-08-24 03:11:49 +03:00
#[instrument(skip_all)]
pub async fn request<T, R>(
&self,
method: &str,
params: T,
) -> Result<R, ethers::prelude::ProviderError>
where
T: fmt::Debug + serde::Serialize + Send + Sync,
R: serde::Serialize + serde::de::DeserializeOwned + fmt::Debug,
{
// TODO: use tracing spans properly
2022-08-30 23:01:42 +03:00
// TODO: requests from customers have request ids, but we should add
2022-08-24 03:11:49 +03:00
// TODO: including params in this is way too verbose
2022-09-09 00:01:36 +03:00
trace!(rpc=%self.0, %method, "request");
2022-08-24 03:11:49 +03:00
let mut provider = None;
while provider.is_none() {
match self.0.provider.read().await.as_ref() {
2022-08-30 23:01:42 +03:00
None => {
2022-09-05 19:25:21 +03:00
warn!(rpc=%self.0, "no provider!");
2022-08-30 23:01:42 +03:00
// TODO: how should this work? a reconnect should be in progress. but maybe force one now?
// TODO: sleep how long? subscribe to something instead?
sleep(Duration::from_millis(100)).await
}
2022-08-24 03:11:49 +03:00
Some(found_provider) => provider = Some(found_provider.clone()),
}
}
let response = match &*provider.unwrap() {
Web3Provider::Http(provider) => provider.request(method, params).await,
Web3Provider::Ws(provider) => provider.request(method, params).await,
};
// TODO: i think ethers already has trace logging (and does it much more fancy)
// TODO: at least instrument this with more useful information
2022-09-09 00:01:36 +03:00
// trace!(rpc=%self.0, %method, ?response);
trace!(rpc=%self.0, %method, "response");
2022-08-24 03:11:49 +03:00
response
}
}
2022-08-24 03:14:49 +03:00
impl Drop for OpenRequestHandle {
2022-08-24 03:11:49 +03:00
fn drop(&mut self) {
self.0
.active_requests
.fetch_sub(1, atomic::Ordering::AcqRel);
}
}