2023-02-06 20:55:27 +03:00
|
|
|
use super::one::Web3Rpc;
|
2022-08-24 03:11:49 +03:00
|
|
|
use super::provider::Web3Provider;
|
2023-02-12 12:22:53 +03:00
|
|
|
use crate::frontend::authorization::Authorization;
|
2022-09-23 08:22:33 +03:00
|
|
|
use anyhow::Context;
|
2022-09-24 05:47:44 +03:00
|
|
|
use chrono::Utc;
|
2022-11-01 21:54:39 +03:00
|
|
|
use entities::revert_log;
|
2022-09-24 05:47:44 +03:00
|
|
|
use entities::sea_orm_active_enums::Method;
|
2023-04-11 01:01:36 +03:00
|
|
|
use ethers::providers::ProviderError;
|
2022-09-24 10:35:56 +03:00
|
|
|
use ethers::types::{Address, Bytes};
|
2023-05-13 21:13:02 +03:00
|
|
|
use log::{debug, error, trace, warn, Level};
|
2022-11-14 21:24:52 +03:00
|
|
|
use migration::sea_orm::{self, ActiveEnum, ActiveModelTrait};
|
2022-09-24 05:47:44 +03:00
|
|
|
use serde_json::json;
|
2022-08-24 03:11:49 +03:00
|
|
|
use std::fmt;
|
2023-01-26 08:24:09 +03:00
|
|
|
use std::sync::atomic;
|
2022-08-24 03:14:49 +03:00
|
|
|
use std::sync::Arc;
|
2022-11-12 09:11:58 +03:00
|
|
|
use thread_fast_rng::rand::Rng;
|
2022-08-30 23:01:42 +03:00
|
|
|
use tokio::time::{sleep, Duration, Instant};
|
2022-08-24 03:11:49 +03:00
|
|
|
|
2022-08-30 23:01:42 +03:00
|
|
|
#[derive(Debug)]
|
2022-08-24 03:14:49 +03:00
|
|
|
pub enum OpenRequestResult {
|
2022-08-24 03:59:05 +03:00
|
|
|
Handle(OpenRequestHandle),
|
|
|
|
/// Unable to start a request. Retry at the given time.
|
2022-08-24 03:11:49 +03:00
|
|
|
RetryAt(Instant),
|
2023-02-15 04:41:40 +03:00
|
|
|
/// Unable to start a request because no servers are synced
|
|
|
|
NotReady,
|
2022-08-24 03:11:49 +03:00
|
|
|
}
|
|
|
|
|
2022-08-24 03:59:05 +03:00
|
|
|
/// Make RPC requests through this handle and drop it when you are done.
|
2023-02-12 12:22:53 +03:00
|
|
|
/// Opening this handle checks rate limits. Developers, try to keep opening a handle and using it as close together as possible
|
2022-08-30 23:01:42 +03:00
|
|
|
#[derive(Debug)]
|
2022-09-09 06:53:16 +03:00
|
|
|
pub struct OpenRequestHandle {
|
2022-11-08 22:58:11 +03:00
|
|
|
authorization: Arc<Authorization>,
|
2023-02-12 21:22:20 +03:00
|
|
|
rpc: Arc<Web3Rpc>,
|
2022-09-09 06:53:16 +03:00
|
|
|
}
|
2022-08-24 03:11:49 +03:00
|
|
|
|
2023-04-06 01:34:28 +03:00
|
|
|
/// Depending on the context, RPC errors require different handling.
|
2023-02-16 02:31:59 +03:00
|
|
|
#[derive(Copy, Clone)]
|
2023-04-06 01:34:28 +03:00
|
|
|
pub enum RequestErrorHandler {
|
2022-11-25 03:45:13 +03:00
|
|
|
/// Log at the trace level. Use when errors are expected.
|
|
|
|
TraceLevel,
|
2022-09-23 01:15:56 +03:00
|
|
|
/// Log at the debug level. Use when errors are expected.
|
2022-09-21 07:48:21 +03:00
|
|
|
DebugLevel,
|
2022-09-23 01:15:56 +03:00
|
|
|
/// Log at the error level. Use when errors are bad.
|
2022-09-21 07:48:21 +03:00
|
|
|
ErrorLevel,
|
2022-09-23 01:15:56 +03:00
|
|
|
/// Log at the warn level. Use when errors do not cause problems.
|
2022-09-21 07:48:21 +03:00
|
|
|
WarnLevel,
|
2022-11-25 03:45:13 +03:00
|
|
|
/// Potentially save the revert. Users can tune how often this happens
|
2023-04-04 15:40:22 +03:00
|
|
|
Save,
|
2022-09-21 07:48:21 +03:00
|
|
|
}
|
|
|
|
|
2022-09-24 10:04:11 +03:00
|
|
|
// TODO: second param could be skipped since we don't need it here
|
2022-09-24 05:47:44 +03:00
|
|
|
#[derive(serde::Deserialize, serde::Serialize)]
|
2022-09-24 10:04:11 +03:00
|
|
|
struct EthCallParams((EthCallFirstParams, Option<serde_json::Value>));
|
|
|
|
|
|
|
|
#[derive(serde::Deserialize, serde::Serialize)]
|
|
|
|
struct EthCallFirstParams {
|
2022-09-24 10:35:56 +03:00
|
|
|
to: Address,
|
|
|
|
data: Option<Bytes>,
|
2022-09-24 05:47:44 +03:00
|
|
|
}
|
|
|
|
|
2023-04-06 01:34:28 +03:00
|
|
|
impl From<Level> for RequestErrorHandler {
|
2022-09-21 07:48:21 +03:00
|
|
|
fn from(level: Level) -> Self {
|
|
|
|
match level {
|
2023-04-06 01:34:28 +03:00
|
|
|
Level::Trace => RequestErrorHandler::TraceLevel,
|
|
|
|
Level::Debug => RequestErrorHandler::DebugLevel,
|
|
|
|
Level::Error => RequestErrorHandler::ErrorLevel,
|
|
|
|
Level::Warn => RequestErrorHandler::WarnLevel,
|
2022-09-23 01:15:56 +03:00
|
|
|
_ => unimplemented!("unexpected tracing Level"),
|
2022-09-21 07:48:21 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-11-08 22:58:11 +03:00
|
|
|
impl Authorization {
|
2022-09-23 01:34:43 +03:00
|
|
|
/// Save a RPC call that return "execution reverted" to the database.
|
2022-09-24 10:35:56 +03:00
|
|
|
async fn save_revert(
|
|
|
|
self: Arc<Self>,
|
|
|
|
method: Method,
|
|
|
|
params: EthCallFirstParams,
|
|
|
|
) -> anyhow::Result<()> {
|
2023-01-19 03:17:43 +03:00
|
|
|
let rpc_key_id = match self.checks.rpc_secret_key_id {
|
2022-11-10 02:58:07 +03:00
|
|
|
Some(rpc_key_id) => rpc_key_id.into(),
|
|
|
|
None => {
|
2023-04-06 01:34:28 +03:00
|
|
|
// trace!(?self, "cannot save revert without rpc_key_id");
|
2022-11-10 02:58:07 +03:00
|
|
|
return Ok(());
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2022-11-08 22:58:11 +03:00
|
|
|
let db_conn = self.db_conn.as_ref().context("no database connection")?;
|
|
|
|
|
|
|
|
// TODO: should the database set the timestamp?
|
|
|
|
// we intentionally use "now" and not the time the request started
|
|
|
|
// why? because we aggregate stats and setting one in the past could cause confusion
|
|
|
|
let timestamp = Utc::now();
|
|
|
|
let to: Vec<u8> = params
|
|
|
|
.to
|
|
|
|
.as_bytes()
|
|
|
|
.try_into()
|
|
|
|
.expect("address should always convert to a Vec<u8>");
|
|
|
|
let call_data = params.data.map(|x| format!("{}", x));
|
|
|
|
|
|
|
|
let rl = revert_log::ActiveModel {
|
2022-11-10 02:58:07 +03:00
|
|
|
rpc_key_id: sea_orm::Set(rpc_key_id),
|
2022-11-08 22:58:11 +03:00
|
|
|
method: sea_orm::Set(method),
|
|
|
|
to: sea_orm::Set(to),
|
|
|
|
call_data: sea_orm::Set(call_data),
|
|
|
|
timestamp: sea_orm::Set(timestamp),
|
|
|
|
..Default::default()
|
|
|
|
};
|
2022-09-24 05:47:44 +03:00
|
|
|
|
2022-11-08 22:58:11 +03:00
|
|
|
let rl = rl
|
|
|
|
.save(db_conn)
|
|
|
|
.await
|
|
|
|
.context("Failed saving new revert log")?;
|
2022-09-23 08:22:33 +03:00
|
|
|
|
2022-11-08 22:58:11 +03:00
|
|
|
// TODO: what log level?
|
|
|
|
// TODO: better format
|
2022-11-16 10:19:56 +03:00
|
|
|
trace!("revert_log: {:?}", rl);
|
2022-09-24 05:47:44 +03:00
|
|
|
|
|
|
|
// TODO: return something useful
|
|
|
|
Ok(())
|
2022-09-23 00:51:52 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-05-13 09:00:03 +03:00
|
|
|
impl Drop for OpenRequestHandle {
|
|
|
|
fn drop(&mut self) {
|
2023-05-13 09:04:56 +03:00
|
|
|
self.rpc
|
2023-05-13 09:00:03 +03:00
|
|
|
.active_requests
|
|
|
|
.fetch_sub(1, atomic::Ordering::AcqRel);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-08-24 03:14:49 +03:00
|
|
|
impl OpenRequestHandle {
|
2023-01-26 08:24:09 +03:00
|
|
|
pub async fn new(authorization: Arc<Authorization>, rpc: Arc<Web3Rpc>) -> Self {
|
|
|
|
// TODO: take request_id as an argument?
|
|
|
|
// TODO: attach a unique id to this? customer requests have one, but not internal queries
|
|
|
|
// TODO: what ordering?!
|
2023-05-13 09:00:03 +03:00
|
|
|
rpc.active_requests
|
|
|
|
.fetch_add(1, std::sync::atomic::Ordering::AcqRel);
|
2023-01-26 08:24:09 +03:00
|
|
|
|
|
|
|
Self { authorization, rpc }
|
2022-08-24 03:11:49 +03:00
|
|
|
}
|
|
|
|
|
2022-12-20 02:59:01 +03:00
|
|
|
pub fn connection_name(&self) -> String {
|
2023-02-12 21:22:20 +03:00
|
|
|
self.rpc.name.clone()
|
2022-12-20 02:59:01 +03:00
|
|
|
}
|
|
|
|
|
2022-09-23 01:34:43 +03:00
|
|
|
#[inline]
|
2023-02-06 20:55:27 +03:00
|
|
|
pub fn clone_connection(&self) -> Arc<Web3Rpc> {
|
2023-02-12 21:22:20 +03:00
|
|
|
self.rpc.clone()
|
2022-08-24 03:11:49 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Send a web3 request
|
|
|
|
/// By having the request method here, we ensure that the rate limiter was called and connection counts were properly incremented
|
2023-02-16 11:26:58 +03:00
|
|
|
/// depending on how things are locked, you might need to pass the provider in
|
2023-01-26 08:24:09 +03:00
|
|
|
/// we take self to ensure this function only runs once
|
2022-09-24 05:47:44 +03:00
|
|
|
pub async fn request<P, R>(
|
2023-02-06 05:16:09 +03:00
|
|
|
self,
|
2022-08-24 03:11:49 +03:00
|
|
|
method: &str,
|
2022-09-24 05:47:44 +03:00
|
|
|
params: &P,
|
2023-04-06 01:34:28 +03:00
|
|
|
mut error_handler: RequestErrorHandler,
|
2023-02-12 12:22:53 +03:00
|
|
|
unlocked_provider: Option<Arc<Web3Provider>>,
|
2022-09-20 09:00:27 +03:00
|
|
|
) -> Result<R, ProviderError>
|
2022-08-24 03:11:49 +03:00
|
|
|
where
|
2022-09-23 00:51:52 +03:00
|
|
|
// TODO: not sure about this type. would be better to not need clones, but measure and spawns combine to need it
|
2022-09-24 05:47:44 +03:00
|
|
|
P: Clone + fmt::Debug + serde::Serialize + Send + Sync + 'static,
|
2023-04-11 01:01:36 +03:00
|
|
|
R: serde::Serialize + serde::de::DeserializeOwned + fmt::Debug + Send,
|
2022-08-24 03:11:49 +03:00
|
|
|
{
|
2022-09-23 00:51:52 +03:00
|
|
|
// TODO: use tracing spans
|
2023-02-06 05:16:09 +03:00
|
|
|
// TODO: including params in this log is way too verbose
|
2023-01-26 08:24:09 +03:00
|
|
|
// trace!(rpc=%self.rpc, %method, "request");
|
2023-02-12 21:22:20 +03:00
|
|
|
trace!("requesting from {}", self.rpc);
|
2023-02-12 12:22:53 +03:00
|
|
|
|
2023-02-16 11:26:58 +03:00
|
|
|
let mut provider = if unlocked_provider.is_some() {
|
|
|
|
unlocked_provider
|
|
|
|
} else {
|
|
|
|
self.rpc.provider.read().await.clone()
|
|
|
|
};
|
|
|
|
|
2023-02-12 12:22:53 +03:00
|
|
|
let mut logged = false;
|
2023-04-11 01:01:36 +03:00
|
|
|
// TODO: instead of a lock, i guess it should be a watch?
|
|
|
|
while provider.is_none() {
|
2023-02-12 12:22:53 +03:00
|
|
|
// trace!("waiting on provider: locking...");
|
2023-03-23 04:43:13 +03:00
|
|
|
// TODO: i dont like this. subscribing to a channel could be better
|
2023-02-16 11:26:58 +03:00
|
|
|
sleep(Duration::from_millis(100)).await;
|
2023-02-12 12:22:53 +03:00
|
|
|
|
|
|
|
if !logged {
|
2023-02-12 21:22:20 +03:00
|
|
|
debug!("no provider for open handle on {}", self.rpc);
|
2023-02-12 12:22:53 +03:00
|
|
|
logged = true;
|
|
|
|
}
|
|
|
|
|
2023-02-16 11:26:58 +03:00
|
|
|
provider = self.rpc.provider.read().await.clone();
|
2023-02-12 12:22:53 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
let provider = provider.expect("provider was checked already");
|
2022-08-24 03:11:49 +03:00
|
|
|
|
2023-02-15 23:33:43 +03:00
|
|
|
self.rpc
|
|
|
|
.total_requests
|
2023-05-13 09:00:03 +03:00
|
|
|
.fetch_add(1, std::sync::atomic::Ordering::AcqRel);
|
2023-02-15 23:33:43 +03:00
|
|
|
|
2023-05-13 09:00:03 +03:00
|
|
|
// we used to fetch_add the active_request count here, but sometimes a request is made without going through this function (like with subscriptions)
|
2023-02-16 11:26:58 +03:00
|
|
|
|
2023-05-11 23:09:15 +03:00
|
|
|
let start = Instant::now();
|
2023-02-15 23:33:43 +03:00
|
|
|
|
2023-02-06 05:16:09 +03:00
|
|
|
// TODO: replace ethers-rs providers with our own that supports streaming the responses
|
2023-02-12 12:22:53 +03:00
|
|
|
let response = match provider.as_ref() {
|
|
|
|
#[cfg(test)]
|
2023-03-31 14:43:41 +03:00
|
|
|
Web3Provider::Mock => {
|
|
|
|
return Err(ProviderError::CustomError(
|
|
|
|
"mock provider can't respond".to_string(),
|
|
|
|
))
|
|
|
|
}
|
2023-02-12 12:22:53 +03:00
|
|
|
Web3Provider::Ws(p) => p.request(method, params).await,
|
|
|
|
Web3Provider::Http(p) | Web3Provider::Both(p, _) => {
|
|
|
|
// TODO: i keep hearing that http is faster. but ws has always been better for me. investigate more with actual benchmarks
|
|
|
|
p.request(method, params).await
|
|
|
|
}
|
2022-08-24 03:11:49 +03:00
|
|
|
};
|
|
|
|
|
2023-02-16 11:26:58 +03:00
|
|
|
// note. we intentionally do not record this latency now. we do NOT want to measure errors
|
2023-05-13 09:00:03 +03:00
|
|
|
let latency = start.elapsed();
|
2023-02-16 11:26:58 +03:00
|
|
|
|
2023-05-13 09:00:03 +03:00
|
|
|
// we used to fetch_sub the active_request count here, but sometimes the handle is dropped without request being called!
|
2023-02-16 11:26:58 +03:00
|
|
|
|
2023-04-24 21:21:53 +03:00
|
|
|
trace!(
|
2023-04-24 21:00:12 +03:00
|
|
|
"response from {} for {} {:?}: {:?}",
|
2023-05-13 09:00:03 +03:00
|
|
|
self.rpc,
|
|
|
|
method,
|
|
|
|
params,
|
|
|
|
response,
|
2023-04-24 21:00:12 +03:00
|
|
|
);
|
2022-12-06 00:13:36 +03:00
|
|
|
|
2022-09-10 03:12:14 +03:00
|
|
|
if let Err(err) = &response {
|
2022-09-23 01:42:44 +03:00
|
|
|
// only save reverts for some types of calls
|
|
|
|
// TODO: do something special for eth_sendRawTransaction too
|
2023-04-06 01:34:28 +03:00
|
|
|
error_handler = if let RequestErrorHandler::Save = error_handler {
|
2022-11-26 07:57:25 +03:00
|
|
|
// TODO: should all these be Trace or Debug or a mix?
|
2022-09-24 10:04:11 +03:00
|
|
|
if !["eth_call", "eth_estimateGas"].contains(&method) {
|
2022-11-12 11:24:32 +03:00
|
|
|
// trace!(%method, "skipping save on revert");
|
2023-04-06 01:34:28 +03:00
|
|
|
RequestErrorHandler::TraceLevel
|
2022-11-08 22:58:11 +03:00
|
|
|
} else if self.authorization.db_conn.is_some() {
|
|
|
|
let log_revert_chance = self.authorization.checks.log_revert_chance;
|
|
|
|
|
|
|
|
if log_revert_chance == 0.0 {
|
2022-11-12 11:24:32 +03:00
|
|
|
// trace!(%method, "no chance. skipping save on revert");
|
2023-04-06 01:34:28 +03:00
|
|
|
RequestErrorHandler::TraceLevel
|
2022-11-08 22:58:11 +03:00
|
|
|
} else if log_revert_chance == 1.0 {
|
2022-11-12 11:24:32 +03:00
|
|
|
// trace!(%method, "gaurenteed chance. SAVING on revert");
|
2023-04-06 01:34:28 +03:00
|
|
|
error_handler
|
2022-11-12 09:11:58 +03:00
|
|
|
} else if thread_fast_rng::thread_fast_rng().gen_range(0.0f64..=1.0)
|
|
|
|
< log_revert_chance
|
|
|
|
{
|
2022-11-12 11:24:32 +03:00
|
|
|
// trace!(%method, "missed chance. skipping save on revert");
|
2023-04-06 01:34:28 +03:00
|
|
|
RequestErrorHandler::TraceLevel
|
2022-09-24 10:04:11 +03:00
|
|
|
} else {
|
2022-11-12 11:24:32 +03:00
|
|
|
// trace!("Saving on revert");
|
2022-11-08 22:58:11 +03:00
|
|
|
// TODO: is always logging at debug level fine?
|
2023-04-06 01:34:28 +03:00
|
|
|
error_handler
|
2022-09-24 10:04:11 +03:00
|
|
|
}
|
2022-09-23 01:42:44 +03:00
|
|
|
} else {
|
2022-11-12 11:24:32 +03:00
|
|
|
// trace!(%method, "no database. skipping save on revert");
|
2023-04-06 01:34:28 +03:00
|
|
|
RequestErrorHandler::TraceLevel
|
2022-09-21 07:48:21 +03:00
|
|
|
}
|
2022-09-23 01:42:44 +03:00
|
|
|
} else {
|
2023-04-06 01:34:28 +03:00
|
|
|
error_handler
|
2022-09-23 01:42:44 +03:00
|
|
|
};
|
|
|
|
|
2023-01-26 08:24:09 +03:00
|
|
|
// TODO: simple enum -> string derive?
|
2023-04-06 01:34:28 +03:00
|
|
|
// TODO: if ProviderError::UnsupportedRpc, we should retry on another server
|
2023-01-26 08:24:09 +03:00
|
|
|
#[derive(Debug)]
|
2023-04-04 15:40:22 +03:00
|
|
|
enum ResponseTypes {
|
2023-01-25 07:44:50 +03:00
|
|
|
Revert,
|
|
|
|
RateLimit,
|
2023-04-06 01:34:28 +03:00
|
|
|
Error,
|
2023-01-25 07:44:50 +03:00
|
|
|
}
|
|
|
|
|
2022-10-12 00:31:34 +03:00
|
|
|
// check for "execution reverted" here
|
2023-01-26 08:24:09 +03:00
|
|
|
// TODO: move this info a function on ResponseErrorType
|
2023-01-25 07:44:50 +03:00
|
|
|
let response_type = if let ProviderError::JsonRpcClientError(err) = err {
|
2022-10-12 00:31:34 +03:00
|
|
|
// Http and Ws errors are very similar, but different types
|
2023-02-12 12:22:53 +03:00
|
|
|
let msg = match &*provider {
|
|
|
|
#[cfg(test)]
|
2022-11-23 01:45:22 +03:00
|
|
|
Web3Provider::Mock => unimplemented!(),
|
2023-04-11 01:01:36 +03:00
|
|
|
_ => err.as_error_response().map(|x| x.message.clone()),
|
2022-10-12 00:31:34 +03:00
|
|
|
};
|
|
|
|
|
2023-05-13 21:13:02 +03:00
|
|
|
trace!("error message: {:?}", msg);
|
|
|
|
|
2022-10-12 00:31:34 +03:00
|
|
|
if let Some(msg) = msg {
|
2023-01-25 07:44:50 +03:00
|
|
|
if msg.starts_with("execution reverted") {
|
2023-02-12 21:22:20 +03:00
|
|
|
trace!("revert from {}", self.rpc);
|
2023-04-04 15:40:22 +03:00
|
|
|
ResponseTypes::Revert
|
2023-01-25 07:44:50 +03:00
|
|
|
} else if msg.contains("limit") || msg.contains("request") {
|
2023-05-13 21:13:02 +03:00
|
|
|
// TODO: too verbose
|
|
|
|
if self.rpc.backup {
|
|
|
|
trace!("rate limit from {}", self.rpc);
|
|
|
|
} else {
|
|
|
|
warn!("rate limit from {}", self.rpc);
|
|
|
|
}
|
2023-04-04 15:40:22 +03:00
|
|
|
ResponseTypes::RateLimit
|
2023-01-25 07:44:50 +03:00
|
|
|
} else {
|
2023-04-06 01:34:28 +03:00
|
|
|
ResponseTypes::Error
|
2023-01-25 07:44:50 +03:00
|
|
|
}
|
2022-10-12 00:31:34 +03:00
|
|
|
} else {
|
2023-04-06 01:34:28 +03:00
|
|
|
ResponseTypes::Error
|
2022-10-12 00:31:34 +03:00
|
|
|
}
|
|
|
|
} else {
|
2023-04-06 01:34:28 +03:00
|
|
|
ResponseTypes::Error
|
2022-10-12 00:31:34 +03:00
|
|
|
};
|
2022-10-10 07:15:07 +03:00
|
|
|
|
2023-04-04 15:40:22 +03:00
|
|
|
if matches!(response_type, ResponseTypes::RateLimit) {
|
|
|
|
if let Some(hard_limit_until) = self.rpc.hard_limit_until.as_ref() {
|
2023-04-06 01:34:28 +03:00
|
|
|
// TODO: how long should we actually wait? different providers have different times
|
2023-05-13 21:13:02 +03:00
|
|
|
// TODO: if rate_limit_period_seconds is set, use that
|
|
|
|
// TODO: check response headers for rate limits too
|
|
|
|
// TODO: warn if production, debug if backup
|
|
|
|
if self.rpc.backup {
|
|
|
|
debug!("unexpected rate limit on {}!", self.rpc);
|
|
|
|
} else {
|
|
|
|
warn!("unexpected rate limit on {}!", self.rpc);
|
|
|
|
}
|
|
|
|
|
2023-04-04 15:40:22 +03:00
|
|
|
let retry_at = Instant::now() + Duration::from_secs(1);
|
2023-01-25 07:44:50 +03:00
|
|
|
|
2023-04-04 15:40:22 +03:00
|
|
|
trace!("retry {} at: {:?}", self.rpc, retry_at);
|
2023-01-25 07:44:50 +03:00
|
|
|
|
2023-04-04 15:40:22 +03:00
|
|
|
hard_limit_until.send_replace(retry_at);
|
2022-09-21 07:48:21 +03:00
|
|
|
}
|
2023-04-04 15:40:22 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: think more about the method and param logs. those can be sensitive information
|
2023-04-06 01:34:28 +03:00
|
|
|
match error_handler {
|
|
|
|
RequestErrorHandler::DebugLevel => {
|
2023-04-04 15:40:22 +03:00
|
|
|
// TODO: think about this revert check more. sometimes we might want reverts logged so this needs a flag
|
|
|
|
if matches!(response_type, ResponseTypes::Revert) {
|
|
|
|
debug!(
|
|
|
|
"bad response from {}! method={} params={:?} err={:?}",
|
|
|
|
self.rpc, method, params, err
|
|
|
|
);
|
2023-01-26 08:24:09 +03:00
|
|
|
}
|
2022-09-21 07:48:21 +03:00
|
|
|
}
|
2023-04-06 01:34:28 +03:00
|
|
|
RequestErrorHandler::TraceLevel => {
|
2023-04-04 15:40:22 +03:00
|
|
|
trace!(
|
|
|
|
"bad response from {}! method={} params={:?} err={:?}",
|
|
|
|
self.rpc,
|
|
|
|
method,
|
|
|
|
params,
|
|
|
|
err
|
|
|
|
);
|
|
|
|
}
|
2023-04-06 01:34:28 +03:00
|
|
|
RequestErrorHandler::ErrorLevel => {
|
2023-04-04 15:40:22 +03:00
|
|
|
// TODO: include params if not running in release mode
|
|
|
|
error!(
|
|
|
|
"bad response from {}! method={} err={:?}",
|
|
|
|
self.rpc, method, err
|
|
|
|
);
|
|
|
|
}
|
2023-04-06 01:34:28 +03:00
|
|
|
RequestErrorHandler::WarnLevel => {
|
2023-04-04 15:40:22 +03:00
|
|
|
// TODO: include params if not running in release mode
|
|
|
|
warn!(
|
|
|
|
"bad response from {}! method={} err={:?}",
|
|
|
|
self.rpc, method, err
|
|
|
|
);
|
|
|
|
}
|
2023-04-06 01:34:28 +03:00
|
|
|
RequestErrorHandler::Save => {
|
2023-04-04 15:40:22 +03:00
|
|
|
trace!(
|
|
|
|
"bad response from {}! method={} params={:?} err={:?}",
|
|
|
|
self.rpc,
|
|
|
|
method,
|
|
|
|
params,
|
|
|
|
err
|
|
|
|
);
|
|
|
|
|
|
|
|
// TODO: do not unwrap! (doesn't matter much since we check method as a string above)
|
|
|
|
let method: Method = Method::try_from_value(&method.to_string()).unwrap();
|
|
|
|
|
2023-05-13 09:00:03 +03:00
|
|
|
match serde_json::from_value::<EthCallParams>(json!(params)) {
|
|
|
|
Ok(params) => {
|
|
|
|
// spawn saving to the database so we don't slow down the request
|
|
|
|
let f = self.authorization.clone().save_revert(method, params.0 .0);
|
2023-04-04 15:40:22 +03:00
|
|
|
|
2023-05-13 09:00:03 +03:00
|
|
|
tokio::spawn(f);
|
|
|
|
}
|
|
|
|
Err(err) => {
|
2023-05-13 09:04:56 +03:00
|
|
|
warn!(
|
|
|
|
"failed parsing eth_call params. unable to save revert. {}",
|
|
|
|
err
|
|
|
|
);
|
2023-05-13 09:00:03 +03:00
|
|
|
}
|
|
|
|
}
|
2022-09-21 07:48:21 +03:00
|
|
|
}
|
2022-09-14 07:27:18 +03:00
|
|
|
}
|
2023-05-11 23:09:15 +03:00
|
|
|
} else if let Some(peak_latency) = &self.rpc.peak_latency {
|
2023-05-13 21:13:02 +03:00
|
|
|
// trace!("updating peak_latency: {}", latency.as_secs_f64());
|
|
|
|
// peak_latency.report(latency);
|
|
|
|
trace!("peak latency disabled for now");
|
2023-02-16 02:52:42 +03:00
|
|
|
} else {
|
2023-05-11 23:09:15 +03:00
|
|
|
unreachable!("peak_latency not initialized");
|
2022-09-10 03:12:14 +03:00
|
|
|
}
|
|
|
|
|
2022-08-24 03:11:49 +03:00
|
|
|
response
|
|
|
|
}
|
|
|
|
}
|