2022-08-24 03:11:49 +03:00
|
|
|
use super::connection::Web3Connection;
|
|
|
|
use super::provider::Web3Provider;
|
2022-09-20 09:56:24 +03:00
|
|
|
use crate::metered::{JsonRpcErrorCount, ProviderErrorCount};
|
2022-09-21 07:48:21 +03:00
|
|
|
use ethers::providers::{HttpClientError, ProviderError, WsClientError};
|
2022-09-09 06:53:16 +03:00
|
|
|
use metered::metered;
|
|
|
|
use metered::HitCount;
|
|
|
|
use metered::ResponseTime;
|
|
|
|
use metered::Throughput;
|
2022-09-20 09:00:27 +03:00
|
|
|
use parking_lot::Mutex;
|
2022-08-24 03:11:49 +03:00
|
|
|
use std::fmt;
|
2022-08-24 03:14:49 +03:00
|
|
|
use std::sync::atomic;
|
|
|
|
use std::sync::Arc;
|
2022-08-30 23:01:42 +03:00
|
|
|
use tokio::time::{sleep, Duration, Instant};
|
2022-09-21 07:48:21 +03:00
|
|
|
use tracing::Level;
|
2022-09-22 22:57:21 +03:00
|
|
|
use tracing::{debug, error, trace, warn};
|
2022-08-24 03:11:49 +03:00
|
|
|
|
2022-08-30 23:01:42 +03:00
|
|
|
#[derive(Debug)]
|
2022-08-24 03:14:49 +03:00
|
|
|
pub enum OpenRequestResult {
|
2022-08-24 03:59:05 +03:00
|
|
|
Handle(OpenRequestHandle),
|
|
|
|
/// Unable to start a request. Retry at the given time.
|
2022-08-24 03:11:49 +03:00
|
|
|
RetryAt(Instant),
|
2022-08-24 03:59:05 +03:00
|
|
|
/// Unable to start a request. Retrying will not succeed.
|
2022-08-30 23:01:42 +03:00
|
|
|
RetryNever,
|
2022-08-24 03:11:49 +03:00
|
|
|
}
|
|
|
|
|
2022-08-24 03:59:05 +03:00
|
|
|
/// Make RPC requests through this handle and drop it when you are done.
|
2022-08-30 23:01:42 +03:00
|
|
|
#[derive(Debug)]
|
2022-09-09 06:53:16 +03:00
|
|
|
pub struct OpenRequestHandle {
|
2022-09-20 09:00:27 +03:00
|
|
|
conn: Mutex<Option<Arc<Web3Connection>>>,
|
|
|
|
// TODO: this is the same metrics on the conn. use a reference?
|
2022-09-09 06:53:16 +03:00
|
|
|
metrics: Arc<OpenRequestHandleMetrics>,
|
|
|
|
}
|
2022-08-24 03:11:49 +03:00
|
|
|
|
2022-09-21 07:48:21 +03:00
|
|
|
pub enum RequestErrorHandler {
|
|
|
|
SaveReverts(f32),
|
|
|
|
DebugLevel,
|
|
|
|
ErrorLevel,
|
|
|
|
WarnLevel,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl From<Level> for RequestErrorHandler {
|
|
|
|
fn from(level: Level) -> Self {
|
|
|
|
match level {
|
|
|
|
Level::DEBUG => RequestErrorHandler::DebugLevel,
|
|
|
|
Level::ERROR => RequestErrorHandler::ErrorLevel,
|
|
|
|
Level::WARN => RequestErrorHandler::WarnLevel,
|
|
|
|
_ => unimplemented!(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-09-09 06:53:16 +03:00
|
|
|
#[metered(registry = OpenRequestHandleMetrics, visibility = pub)]
|
2022-08-24 03:14:49 +03:00
|
|
|
impl OpenRequestHandle {
|
2022-09-09 06:53:16 +03:00
|
|
|
pub fn new(conn: Arc<Web3Connection>) -> Self {
|
2022-09-20 09:00:27 +03:00
|
|
|
// TODO: take request_id as an argument?
|
2022-09-06 23:12:45 +03:00
|
|
|
// TODO: attach a unique id to this? customer requests have one, but not internal queries
|
2022-08-24 03:11:49 +03:00
|
|
|
// TODO: what ordering?!
|
2022-09-09 06:53:16 +03:00
|
|
|
// TODO: should we be using metered, or not? i think not because we want stats for each handle
|
|
|
|
// TODO: these should maybe be sent to an influxdb instance?
|
2022-09-20 09:00:27 +03:00
|
|
|
conn.active_requests.fetch_add(1, atomic::Ordering::Relaxed);
|
2022-08-24 03:11:49 +03:00
|
|
|
|
2022-09-06 23:12:45 +03:00
|
|
|
// TODO: handle overflows?
|
|
|
|
// TODO: what ordering?
|
2022-09-09 06:53:16 +03:00
|
|
|
conn.total_requests.fetch_add(1, atomic::Ordering::Relaxed);
|
|
|
|
|
|
|
|
let metrics = conn.open_request_handle_metrics.clone();
|
2022-09-06 23:12:45 +03:00
|
|
|
|
2022-09-20 09:00:27 +03:00
|
|
|
let conn = Mutex::new(Some(conn));
|
2022-09-10 03:12:14 +03:00
|
|
|
|
2022-09-20 09:00:27 +03:00
|
|
|
Self { conn, metrics }
|
2022-08-24 03:11:49 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
pub fn clone_connection(&self) -> Arc<Web3Connection> {
|
2022-09-20 09:00:27 +03:00
|
|
|
if let Some(conn) = self.conn.lock().as_ref() {
|
|
|
|
conn.clone()
|
|
|
|
} else {
|
|
|
|
unimplemented!("this shouldn't happen")
|
|
|
|
}
|
2022-08-24 03:11:49 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Send a web3 request
|
|
|
|
/// By having the request method here, we ensure that the rate limiter was called and connection counts were properly incremented
|
2022-09-10 03:12:14 +03:00
|
|
|
/// TODO: we no longer take self because metered doesn't like that
|
2022-09-14 07:27:18 +03:00
|
|
|
/// TODO: ErrorCount includes too many types of errors, such as transaction reverts
|
2022-09-20 09:56:24 +03:00
|
|
|
#[measure([JsonRpcErrorCount, HitCount, ProviderErrorCount, ResponseTime, Throughput])]
|
2022-08-24 03:11:49 +03:00
|
|
|
pub async fn request<T, R>(
|
|
|
|
&self,
|
|
|
|
method: &str,
|
2022-09-21 07:48:21 +03:00
|
|
|
params: &T,
|
|
|
|
error_handler: RequestErrorHandler,
|
2022-09-20 09:00:27 +03:00
|
|
|
) -> Result<R, ProviderError>
|
2022-08-24 03:11:49 +03:00
|
|
|
where
|
|
|
|
T: fmt::Debug + serde::Serialize + Send + Sync,
|
|
|
|
R: serde::Serialize + serde::de::DeserializeOwned + fmt::Debug,
|
|
|
|
{
|
2022-09-20 09:00:27 +03:00
|
|
|
let conn = self
|
|
|
|
.conn
|
|
|
|
.lock()
|
|
|
|
.take()
|
|
|
|
.expect("cannot use request multiple times");
|
|
|
|
|
2022-08-24 03:11:49 +03:00
|
|
|
// TODO: use tracing spans properly
|
2022-08-30 23:01:42 +03:00
|
|
|
// TODO: requests from customers have request ids, but we should add
|
2022-08-24 03:11:49 +03:00
|
|
|
// TODO: including params in this is way too verbose
|
2022-09-20 09:00:27 +03:00
|
|
|
trace!(rpc=%conn, %method, "request");
|
2022-08-24 03:11:49 +03:00
|
|
|
|
|
|
|
let mut provider = None;
|
|
|
|
|
|
|
|
while provider.is_none() {
|
2022-09-20 09:00:27 +03:00
|
|
|
match conn.provider.read().await.as_ref() {
|
2022-08-30 23:01:42 +03:00
|
|
|
None => {
|
2022-09-20 09:00:27 +03:00
|
|
|
warn!(rpc=%conn, "no provider!");
|
2022-08-30 23:01:42 +03:00
|
|
|
// TODO: how should this work? a reconnect should be in progress. but maybe force one now?
|
2022-09-21 07:48:21 +03:00
|
|
|
// TODO: maybe use a watch handle?
|
2022-08-30 23:01:42 +03:00
|
|
|
// TODO: sleep how long? subscribe to something instead?
|
2022-09-21 07:48:21 +03:00
|
|
|
// TODO: this is going to be very verbose!
|
2022-08-30 23:01:42 +03:00
|
|
|
sleep(Duration::from_millis(100)).await
|
|
|
|
}
|
2022-08-24 03:11:49 +03:00
|
|
|
Some(found_provider) => provider = Some(found_provider.clone()),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-09-21 07:48:21 +03:00
|
|
|
let provider = &*provider.expect("provider was checked already");
|
|
|
|
|
|
|
|
let response = match provider {
|
2022-08-24 03:11:49 +03:00
|
|
|
Web3Provider::Http(provider) => provider.request(method, params).await,
|
|
|
|
Web3Provider::Ws(provider) => provider.request(method, params).await,
|
|
|
|
};
|
|
|
|
|
2022-09-21 05:48:02 +03:00
|
|
|
conn.active_requests.fetch_sub(1, atomic::Ordering::AcqRel);
|
2022-09-10 03:58:33 +03:00
|
|
|
|
2022-09-10 03:12:14 +03:00
|
|
|
if let Err(err) = &response {
|
2022-09-21 07:48:21 +03:00
|
|
|
match error_handler {
|
|
|
|
RequestErrorHandler::ErrorLevel => {
|
|
|
|
error!(?err, %method, rpc=%conn, "bad response!");
|
|
|
|
}
|
|
|
|
RequestErrorHandler::DebugLevel => {
|
|
|
|
debug!(?err, %method, rpc=%conn, "bad response!");
|
|
|
|
}
|
|
|
|
RequestErrorHandler::WarnLevel => {
|
|
|
|
warn!(?err, %method, rpc=%conn, "bad response!");
|
|
|
|
}
|
|
|
|
RequestErrorHandler::SaveReverts(chance) => {
|
2022-09-22 02:50:55 +03:00
|
|
|
// TODO: only set SaveReverts if this is an eth_call or eth_estimateGas? we'll need eth_sendRawTransaction somewhere else
|
2022-09-21 07:48:21 +03:00
|
|
|
// TODO: logging every one is going to flood the database
|
|
|
|
// TODO: have a percent chance to do this. or maybe a "logged reverts per second"
|
|
|
|
if let ProviderError::JsonRpcClientError(err) = err {
|
|
|
|
match provider {
|
|
|
|
Web3Provider::Http(_) => {
|
|
|
|
if let Some(HttpClientError::JsonRpcError(err)) =
|
|
|
|
err.downcast_ref::<HttpClientError>()
|
|
|
|
{
|
2022-09-22 02:50:55 +03:00
|
|
|
if err.message.starts_with("execution reverted") {
|
2022-09-21 07:48:21 +03:00
|
|
|
debug!(%method, ?params, "TODO: save the request");
|
2022-09-22 02:50:55 +03:00
|
|
|
// TODO: don't do this on the hot path. spawn it
|
2022-09-21 07:48:21 +03:00
|
|
|
} else {
|
|
|
|
debug!(?err, %method, rpc=%conn, "bad response!");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Web3Provider::Ws(_) => {
|
|
|
|
if let Some(WsClientError::JsonRpcError(err)) =
|
|
|
|
err.downcast_ref::<WsClientError>()
|
|
|
|
{
|
2022-09-22 02:50:55 +03:00
|
|
|
if err.message.starts_with("execution reverted") {
|
2022-09-21 07:48:21 +03:00
|
|
|
debug!(%method, ?params, "TODO: save the request");
|
2022-09-22 02:50:55 +03:00
|
|
|
// TODO: don't do this on the hot path. spawn it
|
2022-09-21 07:48:21 +03:00
|
|
|
} else {
|
|
|
|
debug!(?err, %method, rpc=%conn, "bad response!");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2022-09-14 07:27:18 +03:00
|
|
|
}
|
2022-09-10 03:12:14 +03:00
|
|
|
} else {
|
2022-09-21 07:48:21 +03:00
|
|
|
// TODO: i think ethers already has trace logging (and does it much more fancy)
|
2022-09-10 03:58:33 +03:00
|
|
|
// TODO: opt-in response inspection to log reverts with their request. put into redis or what?
|
2022-09-10 03:12:14 +03:00
|
|
|
// trace!(rpc=%self.0, %method, ?response);
|
2022-09-20 09:00:27 +03:00
|
|
|
trace!(%method, rpc=%conn, "response");
|
2022-09-10 03:12:14 +03:00
|
|
|
}
|
|
|
|
|
2022-08-24 03:11:49 +03:00
|
|
|
response
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-08-24 03:14:49 +03:00
|
|
|
impl Drop for OpenRequestHandle {
|
2022-08-24 03:11:49 +03:00
|
|
|
fn drop(&mut self) {
|
2022-09-20 09:00:27 +03:00
|
|
|
if let Some(conn) = self.conn.lock().take() {
|
|
|
|
conn.active_requests.fetch_sub(1, atomic::Ordering::AcqRel);
|
2022-09-10 03:12:14 +03:00
|
|
|
}
|
2022-08-24 03:11:49 +03:00
|
|
|
}
|
|
|
|
}
|