web3-proxy/web3_proxy/src/rpcs/request.rs

247 lines
9.7 KiB
Rust
Raw Normal View History

2022-08-24 03:11:49 +03:00
use super::connection::Web3Connection;
use super::provider::Web3Provider;
2022-09-23 00:03:37 +03:00
use crate::frontend::authorization::AuthorizedRequest;
2022-09-20 09:56:24 +03:00
use crate::metered::{JsonRpcErrorCount, ProviderErrorCount};
use anyhow::Context;
use ethers::providers::{HttpClientError, ProviderError, WsClientError};
2022-09-09 06:53:16 +03:00
use metered::metered;
use metered::HitCount;
use metered::ResponseTime;
use metered::Throughput;
2022-09-23 01:10:28 +03:00
use rand::Rng;
2022-08-24 03:11:49 +03:00
use std::fmt;
2022-09-23 00:03:37 +03:00
use std::sync::atomic::{self, AtomicBool, Ordering};
2022-08-24 03:14:49 +03:00
use std::sync::Arc;
2022-08-30 23:01:42 +03:00
use tokio::time::{sleep, Duration, Instant};
use tracing::Level;
use tracing::{debug, error, trace, warn};
2022-08-24 03:11:49 +03:00
2022-08-30 23:01:42 +03:00
#[derive(Debug)]
2022-08-24 03:14:49 +03:00
pub enum OpenRequestResult {
2022-08-24 03:59:05 +03:00
Handle(OpenRequestHandle),
/// Unable to start a request. Retry at the given time.
2022-08-24 03:11:49 +03:00
RetryAt(Instant),
2022-08-24 03:59:05 +03:00
/// Unable to start a request. Retrying will not succeed.
2022-08-30 23:01:42 +03:00
RetryNever,
2022-08-24 03:11:49 +03:00
}
2022-08-24 03:59:05 +03:00
/// Make RPC requests through this handle and drop it when you are done.
2022-08-30 23:01:42 +03:00
#[derive(Debug)]
2022-09-09 06:53:16 +03:00
pub struct OpenRequestHandle {
2022-09-23 00:51:52 +03:00
authorization: Arc<AuthorizedRequest>,
2022-09-23 00:03:37 +03:00
conn: Arc<Web3Connection>,
// TODO: this is the same metrics on the conn. use a reference?
2022-09-09 06:53:16 +03:00
metrics: Arc<OpenRequestHandleMetrics>,
2022-09-23 00:03:37 +03:00
used: AtomicBool,
2022-09-09 06:53:16 +03:00
}
2022-08-24 03:11:49 +03:00
2022-09-23 01:34:43 +03:00
/// Depending on the context, RPC errors can require different handling.
pub enum RequestErrorHandler {
2022-09-23 01:15:56 +03:00
/// Contains the percent chance to save the revert
SaveReverts(f32),
2022-09-23 01:15:56 +03:00
/// Log at the debug level. Use when errors are expected.
DebugLevel,
2022-09-23 01:15:56 +03:00
/// Log at the error level. Use when errors are bad.
ErrorLevel,
2022-09-23 01:15:56 +03:00
/// Log at the warn level. Use when errors do not cause problems.
WarnLevel,
}
impl From<Level> for RequestErrorHandler {
fn from(level: Level) -> Self {
match level {
Level::DEBUG => RequestErrorHandler::DebugLevel,
Level::ERROR => RequestErrorHandler::ErrorLevel,
Level::WARN => RequestErrorHandler::WarnLevel,
2022-09-23 01:15:56 +03:00
_ => unimplemented!("unexpected tracing Level"),
}
}
}
2022-09-23 00:51:52 +03:00
impl AuthorizedRequest {
2022-09-23 01:34:43 +03:00
/// Save a RPC call that return "execution reverted" to the database.
2022-09-23 00:51:52 +03:00
async fn save_revert<T>(self: Arc<Self>, method: String, params: T) -> anyhow::Result<()>
where
2022-09-23 01:14:24 +03:00
T: Clone + fmt::Debug + serde::Serialize + Send + Sync + 'static,
2022-09-23 00:51:52 +03:00
{
let db_conn = self.db_conn().context("db_conn needed to save reverts")?;
2022-09-23 00:51:52 +03:00
todo!("save the revert to the database");
}
}
2022-09-09 06:53:16 +03:00
#[metered(registry = OpenRequestHandleMetrics, visibility = pub)]
2022-08-24 03:14:49 +03:00
impl OpenRequestHandle {
2022-09-23 00:51:52 +03:00
pub fn new(conn: Arc<Web3Connection>, authorization: Option<Arc<AuthorizedRequest>>) -> Self {
// TODO: take request_id as an argument?
// TODO: attach a unique id to this? customer requests have one, but not internal queries
2022-08-24 03:11:49 +03:00
// TODO: what ordering?!
2022-09-09 06:53:16 +03:00
// TODO: should we be using metered, or not? i think not because we want stats for each handle
// TODO: these should maybe be sent to an influxdb instance?
conn.active_requests.fetch_add(1, atomic::Ordering::Relaxed);
2022-08-24 03:11:49 +03:00
// TODO: handle overflows?
// TODO: what ordering?
2022-09-09 06:53:16 +03:00
conn.total_requests.fetch_add(1, atomic::Ordering::Relaxed);
let metrics = conn.open_request_handle_metrics.clone();
2022-09-23 00:03:37 +03:00
let used = false.into();
2022-09-23 01:10:28 +03:00
let authorization = authorization.unwrap_or_else(|| {
let db_conn = conn.db_conn.clone();
Arc::new(AuthorizedRequest::Internal(db_conn))
});
2022-09-23 00:03:37 +03:00
Self {
2022-09-23 00:51:52 +03:00
authorization,
2022-09-23 00:03:37 +03:00
conn,
metrics,
used,
}
2022-08-24 03:11:49 +03:00
}
2022-09-23 01:34:43 +03:00
#[inline]
2022-08-24 03:11:49 +03:00
pub fn clone_connection(&self) -> Arc<Web3Connection> {
2022-09-23 00:03:37 +03:00
self.conn.clone()
2022-08-24 03:11:49 +03:00
}
/// Send a web3 request
/// By having the request method here, we ensure that the rate limiter was called and connection counts were properly incremented
/// TODO: we no longer take self because metered doesn't like that
/// TODO: ErrorCount includes too many types of errors, such as transaction reverts
2022-09-20 09:56:24 +03:00
#[measure([JsonRpcErrorCount, HitCount, ProviderErrorCount, ResponseTime, Throughput])]
2022-08-24 03:11:49 +03:00
pub async fn request<T, R>(
&self,
method: &str,
2022-09-23 01:14:24 +03:00
params: &T,
error_handler: RequestErrorHandler,
) -> Result<R, ProviderError>
2022-08-24 03:11:49 +03:00
where
2022-09-23 00:51:52 +03:00
// TODO: not sure about this type. would be better to not need clones, but measure and spawns combine to need it
T: Clone + fmt::Debug + serde::Serialize + Send + Sync + 'static,
2022-08-24 03:11:49 +03:00
R: serde::Serialize + serde::de::DeserializeOwned + fmt::Debug,
{
2022-09-23 00:03:37 +03:00
// ensure this function only runs once
if self.used.swap(true, Ordering::Release) {
unimplemented!("a request handle should only be used once");
}
2022-09-23 00:51:52 +03:00
// TODO: use tracing spans
2022-08-30 23:01:42 +03:00
// TODO: requests from customers have request ids, but we should add
2022-08-24 03:11:49 +03:00
// TODO: including params in this is way too verbose
2022-09-23 00:51:52 +03:00
// the authorization field is already on a parent span
2022-09-23 00:03:37 +03:00
trace!(rpc=%self.conn, %method, "request");
2022-08-24 03:11:49 +03:00
let mut provider = None;
while provider.is_none() {
2022-09-23 00:51:52 +03:00
match self.conn.provider.read().await.clone() {
2022-08-30 23:01:42 +03:00
None => {
2022-09-23 00:03:37 +03:00
warn!(rpc=%self.conn, "no provider!");
2022-08-30 23:01:42 +03:00
// TODO: how should this work? a reconnect should be in progress. but maybe force one now?
2022-09-23 00:51:52 +03:00
// TODO: sleep how long? subscribe to something instead? maybe use a watch handle?
// TODO: this is going to be way too verbose!
2022-08-30 23:01:42 +03:00
sleep(Duration::from_millis(100)).await
}
2022-09-23 00:51:52 +03:00
Some(found_provider) => provider = Some(found_provider),
2022-08-24 03:11:49 +03:00
}
}
let provider = &*provider.expect("provider was checked already");
2022-09-23 00:51:52 +03:00
// TODO: really sucks that we have to clone here
let response = match provider {
2022-09-23 01:14:24 +03:00
Web3Provider::Http(provider) => provider.request(method, params).await,
Web3Provider::Ws(provider) => provider.request(method, params).await,
2022-08-24 03:11:49 +03:00
};
if let Err(err) = &response {
2022-09-23 01:42:44 +03:00
// only save reverts for some types of calls
// TODO: do something special for eth_sendRawTransaction too
let error_handler = if let RequestErrorHandler::SaveReverts(save_chance) = error_handler
{
if ["eth_call", "eth_estimateGas"].contains(&method)
&& self.authorization.db_conn().is_some()
&& save_chance != 0.0
2022-09-23 01:42:44 +03:00
&& (save_chance == 1.0
|| rand::thread_rng().gen_range(0.0..=1.0) <= save_chance)
{
error_handler
} else {
// TODO: is always logging at debug level fine?
RequestErrorHandler::DebugLevel
}
2022-09-23 01:42:44 +03:00
} else {
error_handler
};
match error_handler {
RequestErrorHandler::DebugLevel => {
2022-09-23 00:03:37 +03:00
debug!(?err, %method, rpc=%self.conn, "bad response!");
}
2022-09-23 01:42:44 +03:00
RequestErrorHandler::ErrorLevel => {
error!(?err, %method, rpc=%self.conn, "bad response!");
}
RequestErrorHandler::WarnLevel => {
2022-09-23 00:03:37 +03:00
warn!(?err, %method, rpc=%self.conn, "bad response!");
}
2022-09-23 01:42:44 +03:00
RequestErrorHandler::SaveReverts(_) => {
// TODO: logging every one is going to flood the database
// TODO: have a percent chance to do this. or maybe a "logged reverts per second"
2022-09-23 01:42:44 +03:00
if let ProviderError::JsonRpcClientError(err) = err {
let msg = match provider {
Web3Provider::Http(_) => {
if let Some(HttpClientError::JsonRpcError(err)) =
err.downcast_ref::<HttpClientError>()
{
Some(&err.message)
} else {
None
}
2022-09-23 01:42:44 +03:00
}
Web3Provider::Ws(_) => {
if let Some(WsClientError::JsonRpcError(err)) =
err.downcast_ref::<WsClientError>()
{
Some(&err.message)
2022-09-23 01:34:43 +03:00
} else {
2022-09-23 01:42:44 +03:00
None
2022-09-23 01:34:43 +03:00
}
}
2022-09-23 01:42:44 +03:00
};
if let Some(msg) = msg {
if msg.starts_with("execution reverted") {
// spawn saving to the database so we don't slow down the request (or error if no db)
let f = self
.authorization
.clone()
.save_revert(method.to_string(), params.clone());
tokio::spawn(async move { f.await });
} else {
debug!(?err, %method, rpc=%self.conn, "bad response!");
}
}
}
}
}
} else {
// TODO: i think ethers already has trace logging (and does it much more fancy)
2022-09-10 03:58:33 +03:00
// TODO: opt-in response inspection to log reverts with their request. put into redis or what?
2022-09-23 00:03:37 +03:00
// trace!(rpc=%self.conn, %method, ?response);
trace!(%method, rpc=%self.conn, "response");
}
2022-08-24 03:11:49 +03:00
response
}
}
2022-08-24 03:14:49 +03:00
impl Drop for OpenRequestHandle {
2022-08-24 03:11:49 +03:00
fn drop(&mut self) {
2022-09-23 00:03:37 +03:00
self.conn
.active_requests
.fetch_sub(1, atomic::Ordering::AcqRel);
2022-08-24 03:11:49 +03:00
}
}