let the frontend handle their own cookies

This commit is contained in:
Bryan Stitt 2022-09-24 02:47:44 +00:00
parent 8035ee5a0c
commit 8459dcd1f1
17 changed files with 138 additions and 198 deletions

101
Cargo.lock generated
View File

@ -27,15 +27,6 @@ version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe"
[[package]]
name = "aead"
version = "0.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0b613b8e1e3cf911a086f53f03bf286f52fd7a7258e4fa606f0ef220d39d8877"
dependencies = [
"generic-array 0.14.5",
]
[[package]]
name = "aes"
version = "0.7.5"
@ -48,20 +39,6 @@ dependencies = [
"opaque-debug 0.3.0",
]
[[package]]
name = "aes-gcm"
version = "0.9.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "df5f85a83a7d8b0442b6aa7b504b8212c1733da07b98aae43d4bc21b2cb3cdf6"
dependencies = [
"aead",
"aes",
"cipher",
"ctr",
"ghash",
"subtle",
]
[[package]]
name = "ahash"
version = "0.7.6"
@ -1060,24 +1037,6 @@ version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fb4a24b1aaf0fd0ce8b45161144d6f42cd91677fd5940fd431183eb023b3a2b8"
[[package]]
name = "cookie"
version = "0.16.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "94d4706de1b0fa5b132270cddffa8585166037822e260a944fe161acd137ca05"
dependencies = [
"aes-gcm",
"base64 0.13.0",
"hkdf",
"hmac",
"percent-encoding",
"rand 0.8.5",
"sha2 0.10.2",
"subtle",
"time 0.3.14",
"version_check",
]
[[package]]
name = "core-foundation-sys"
version = "0.8.3"
@ -2180,16 +2139,6 @@ dependencies = [
"wasm-bindgen",
]
[[package]]
name = "ghash"
version = "0.4.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1583cc1656d7839fd3732b80cf4f38850336cdb9b8ded1cd399ca62958de3c99"
dependencies = [
"opaque-debug 0.3.0",
"polyval",
]
[[package]]
name = "gimli"
version = "0.26.1"
@ -2358,15 +2307,6 @@ version = "0.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70"
[[package]]
name = "hkdf"
version = "0.12.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "791a029f6b9fc27657f6f188ec6e5e43f6911f6f878e0dc5501396e09809d437"
dependencies = [
"hmac",
]
[[package]]
name = "hmac"
version = "0.12.1"
@ -3471,18 +3411,6 @@ dependencies = [
"winapi",
]
[[package]]
name = "polyval"
version = "0.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8419d2b623c7c0896ff2d5d96e2cb4ede590fed28fcc34934f4c33c036e620a1"
dependencies = [
"cfg-if",
"cpufeatures",
"opaque-debug 0.3.0",
"universal-hash",
]
[[package]]
name = "postgres-protocol"
version = "0.6.4"
@ -5143,23 +5071,6 @@ dependencies = [
"tracing",
]
[[package]]
name = "tower-cookies"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "19833e336396f3953e5ab1513d72b5e5ea51d5ad39b78d306766a05740b48b97"
dependencies = [
"async-trait",
"axum-core",
"cookie",
"futures-util",
"http",
"parking_lot 0.12.1",
"pin-project-lite",
"tower-layer",
"tower-service",
]
[[package]]
name = "tower-http"
version = "0.3.4"
@ -5410,16 +5321,6 @@ version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "39ec24b3121d976906ece63c9daad25b85969647682eee313cb5779fdd69e14e"
[[package]]
name = "universal-hash"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9f214e8f697e925001e66ec2c6e37a4ef93f0f78c2eed7814394e10c62025b05"
dependencies = [
"generic-array 0.14.5",
"subtle",
]
[[package]]
name = "untrusted"
version = "0.7.1"
@ -5625,6 +5526,7 @@ dependencies = [
"axum",
"axum-client-ip",
"axum-macros",
"chrono",
"counter",
"dashmap",
"deferred-rate-limiter",
@ -5662,7 +5564,6 @@ dependencies = [
"tokio-stream",
"toml",
"tower",
"tower-cookies",
"tower-http",
"tower-request-id",
"tracing",

20
TODO.md
View File

@ -157,19 +157,19 @@ These are roughly in order of completition
- [x] change user creation script to have a "unlimited requests per minute" flag that sets it to u64::MAX (18446744073709551615)
- [x] in /status, block hashes has a lower count than block numbers. how is that possible?
- we weren't calling sync. now we are
- [-] opt-in debug mode that inspects responses for reverts and saves the request to the database for the user.
- [-] let them choose a % to log (or maybe x/second). someone like curve logging all reverts will be a BIG database very quickly
- this must be opt-in or spawned since it will slow things down and will make their calls less private
- [x] opt-in debug mode that inspects responses for reverts and saves the request to the database for the user.
- [x] Api keys need option to lock to IP, cors header, referer, user agent, etc
- [ ] endpoint for creating/modifying api keys and their advanced security features
- [x] /user/logout to clear bearer token and jwt
- [x] bearer tokens should expire
- [-] user login should return the bearer token, the user keys, and a jwt (jsonwebtoken rust crate should make it easy)
- [-] let users choose a % to log (or maybe x/second). someone like curve logging all reverts will be a BIG database very quickly
- this must be opt-in or spawned since it will slow things down and will make their calls less private
- [ ] we currently default to 0.0 and don't expose a way to edit it. we have a database row, but we don't use it
- [-] add configurable size limits to all the Caches
- [ ] active requests per second per api key
- [ ] distribution of methods per api key (eth_call, eth_getLogs, etc.)
- [-] add configurable size limits to all the Caches
- [ ] /user/logout to clear bearer token and jwt
- [ ] endpoint for creating/modifying api keys and their advanced security features
- [ ] BUG: i think if all backend servers stop, the server doesn't properly reconnect. It appears to stop listening on 8854, but not shut down.
- [ ] bearer tokens should expire
- [-] signed cookie jar
- [ ] user login should return both the bearer token and a jwt (jsonwebtoken rust crate should make it easy)
- [ ] revert logs should have a maximum age and a maximum count to keep the database from being huge
- [ ] Ulid instead of Uuid for user keys
- <https://discord.com/channels/873880840487206962/900758376164757555/1012942974608474142>
@ -177,6 +177,7 @@ These are roughly in order of completition
- [ ] Ulid instead of Uuid for database ids
- might have to use Uuid in sea-orm and then convert to Ulid on display
- [ ] option to rotate api key
- [ ] read the cookie key from a file. easy to re-use and no giant blob of hex in our app config
## V1
@ -380,3 +381,4 @@ in another repo: event subscriber
- if we get a connection refused, we should remove the server's block info so it is taken out of rotation
- [ ] web3_proxy_cli command should read database settings from config
- [ ] how should we handle reverting transactions? they won't confirm for a while after we send them
- [ ] allow configuration of the expiration time of bearer tokens

View File

@ -17,7 +17,7 @@ pub struct Model {
pub active: bool,
pub requests_per_minute: Option<u64>,
#[sea_orm(column_type = "Decimal(Some((5, 4)))")]
pub log_reverts: Decimal,
pub log_revert_chance: Decimal,
#[sea_orm(column_type = "Text", nullable)]
pub allowed_ips: Option<String>,
#[sea_orm(column_type = "Text", nullable)]

View File

@ -20,7 +20,7 @@ impl MigrationTrait for Migration {
)
// add a column for logging reverts in the RevertLogs table
.add_column(
ColumnDef::new(UserKeys::LogReverts)
ColumnDef::new(UserKeys::LogRevertChance)
.decimal_len(5, 4)
.not_null()
.default("0.0"),
@ -90,7 +90,7 @@ impl MigrationTrait for Migration {
.unsigned()
.not_null(),
)
.drop_column(UserKeys::LogReverts)
.drop_column(UserKeys::LogRevertChance)
.to_owned(),
)
.await
@ -109,7 +109,7 @@ pub enum UserKeys {
// PrivateTxs,
// Active,
RequestsPerMinute,
LogReverts,
LogRevertChance,
AllowedIps,
AllowedOrigins,
AllowedReferers,

View File

@ -25,6 +25,8 @@ argh = "0.1.9"
axum = { version = "0.5.16", features = ["headers", "serde_json", "tokio-tungstenite", "ws"] }
axum-client-ip = "0.2.0"
axum-macros = "0.2.3"
# TODO: import this from ethorm so we always have the same version
chrono = "0.4.22"
counter = "0.5.6"
dashmap = "5.4.0"
derive_more = "0.99.17"
@ -59,7 +61,6 @@ time = "0.3.14"
tokio = { version = "1.21.1", features = ["full", "tracing"] }
# TODO: make sure this uuid version matches sea-orm. PR to put this in their prelude
tokio-stream = { version = "0.1.10", features = ["sync"] }
tower-cookies = { version = "0.7.0", features = ["private"] }
toml = "0.5.9"
tower = "0.4.13"
tower-request-id = "0.2.0"

View File

@ -29,6 +29,7 @@ use metered::{metered, ErrorCount, HitCount, ResponseTime, Throughput};
use migration::{Migrator, MigratorTrait};
use moka::future::Cache;
use redis_rate_limiter::{DeadpoolRuntime, RedisConfig, RedisPool, RedisRateLimiter};
use sea_orm::prelude::Decimal;
use sea_orm::DatabaseConnection;
use serde::Serialize;
use serde_json::json;
@ -44,7 +45,6 @@ use tokio::sync::{broadcast, watch};
use tokio::task::JoinHandle;
use tokio::time::timeout;
use tokio_stream::wrappers::{BroadcastStream, WatchStream};
use tower_cookies::Key;
use tracing::{error, info, trace, warn};
use uuid::Uuid;
@ -64,7 +64,7 @@ type ResponseCache =
pub type AnyhowJoinHandle<T> = JoinHandle<anyhow::Result<T>>;
#[derive(Clone, Debug, From)]
#[derive(Clone, Debug, Default, From)]
/// TODO: rename this?
pub struct UserKeyData {
pub user_key_id: u64,
@ -78,6 +78,8 @@ pub struct UserKeyData {
pub allowed_user_agents: Option<Vec<UserAgent>>,
/// if None, allow any IP Address
pub allowed_ips: Option<Vec<IpNet>>,
/// Chance to save reverting eth_call, eth_estimateGas, and eth_sendRawTransaction to the database.
pub log_revert_chance: Decimal,
}
/// The application
@ -88,8 +90,6 @@ pub struct Web3ProxyApp {
pub balanced_rpcs: Arc<Web3Connections>,
/// Send private requests (like eth_sendRawTransaction) to all these servers
pub private_rpcs: Option<Arc<Web3Connections>>,
// TODO: this lifetime is definitely wrong
pub cookie_key: Key,
response_cache: ResponseCache,
// don't drop this or the sender will stop working
// TODO: broadcast channel instead?
@ -371,12 +371,8 @@ impl Web3ProxyApp {
.time_to_live(Duration::from_secs(60))
.build_with_hasher(hashbrown::hash_map::DefaultHashBuilder::new());
// TODO: get this from the app's config
let cookie_key = Key::from(&[0; 64]);
let app = Self {
config: top_config.app,
cookie_key,
balanced_rpcs,
private_rpcs,
response_cache,

View File

@ -204,6 +204,7 @@ mod tests {
prometheus_port: 0,
workers: 4,
config: "./does/not/exist/test.toml".to_string(),
cookie_key_filename: "./does/not/exist/development_cookie_key".to_string(),
};
// make a test AppConfig

View File

@ -36,6 +36,7 @@ impl CreateUserSubCommand {
// TODO: would be nice to use the fixed array instead of a Vec in the entities
// TODO: how can we use custom types with
// TODO: take a simple String. If it starts with 0x, parse as address. otherwise convert ascii to hex
let address = self.address.to_fixed_bytes().into();
let u = user::ActiveModel {

View File

@ -32,6 +32,10 @@ pub struct CliConfig {
/// number of worker threads. Defaults to the number of logical processors
#[argh(option, default = "0")]
pub workers: usize,
/// path to a binary file used to encrypt cookies. Should be at least 64 bytes.
#[argh(option, default = "\"./data/development_cookie_key\".to_string()")]
pub cookie_key_filename: String,
}
#[derive(Debug, Deserialize)]
@ -46,6 +50,8 @@ pub struct TopConfig {
pub struct AppConfig {
// TODO: better type for chain_id? max of `u64::MAX / 2 - 36` https://github.com/ethereum/EIPs/issues/2294
pub chain_id: u64,
pub cookie_domain: Option<String>,
pub cookie_secure: Option<bool>,
pub db_url: Option<String>,
/// minimum size of the connection pool for the database
/// If none, the number of workers are used

View File

@ -5,7 +5,7 @@ use axum::headers::{Origin, Referer, UserAgent};
use deferred_rate_limiter::DeferredRateLimitResult;
use entities::user_keys;
use ipnet::IpNet;
use sea_orm::{ColumnTrait, DatabaseConnection, EntityTrait, QueryFilter};
use sea_orm::{prelude::Decimal, ColumnTrait, DatabaseConnection, EntityTrait, QueryFilter};
use serde::Serialize;
use std::{net::IpAddr, sync::Arc};
use tokio::time::Instant;
@ -28,9 +28,10 @@ pub enum RateLimitResult {
#[derive(Debug, Serialize)]
pub struct AuthorizedKey {
ip: IpAddr,
origin: Option<String>,
user_key_id: u64,
pub ip: IpAddr,
pub origin: Option<String>,
pub user_key_id: u64,
pub log_revert_chance: Decimal,
// TODO: what else?
}
@ -96,6 +97,7 @@ impl AuthorizedKey {
ip,
origin,
user_key_id: user_data.user_key_id,
log_revert_chance: user_data.log_revert_chance,
})
}
}
@ -269,16 +271,10 @@ impl Web3ProxyApp {
allowed_origins,
allowed_referers,
allowed_user_agents,
log_revert_chance: user_key_model.log_revert_chance,
})
}
None => Ok(UserKeyData {
user_key_id: 0,
user_max_requests_per_period: Some(0),
allowed_ips: None,
allowed_origins: None,
allowed_referers: None,
allowed_user_agents: None,
}),
None => Ok(UserKeyData::default()),
}
})
.await;

View File

@ -81,7 +81,7 @@ impl IntoResponse for FrontendErrorResponse {
),
)
}
Self::RateLimitedIp(ip, retry_at) => {
Self::RateLimitedIp(ip, _retry_at) => {
// TODO: emit a stat
// TODO: include retry_at in the error
// TODO: if retry_at is None, give an unauthorized status code?
@ -95,7 +95,7 @@ impl IntoResponse for FrontendErrorResponse {
)
}
// TODO: this should actually by the id of the key. multiple users might control one key
Self::RateLimitedUser(user_data, retry_at) => {
Self::RateLimitedUser(user_data, _retry_at) => {
// TODO: emit a stat
// TODO: include retry_at in the error
(

View File

@ -15,7 +15,6 @@ use axum::{
};
use std::net::SocketAddr;
use std::sync::Arc;
use tower_cookies::CookieManagerLayer;
use tower_http::trace::TraceLayer;
use tower_request_id::{RequestId, RequestIdLayer};
use tracing::{error_span, info};
@ -70,8 +69,6 @@ pub async fn serve(port: u16, proxy_app: Arc<Web3ProxyApp>) -> anyhow::Result<()
.layer(request_tracing_layer)
// create a unique id for each request
.layer(RequestIdLayer)
// signed cookies
.layer(CookieManagerLayer::new())
// 404 for any unknown routes
.fallback(errors::handler_404.into_service());

View File

@ -30,7 +30,6 @@ use siwe::Message;
use std::ops::Add;
use std::sync::Arc;
use time::{Duration, OffsetDateTime};
use tower_cookies::Cookies;
use ulid::Ulid;
use uuid::Uuid;
@ -130,15 +129,16 @@ pub struct PostLogin {
// signer: String,
}
/// TODO: what information should we return?
#[derive(Serialize)]
pub struct PostLoginResponse {
bearer_token: Ulid,
// TODO: change this Ulid
api_key: Uuid,
api_keys: Vec<Uuid>,
}
#[debug_handler]
/// Post to the user endpoint to register or login.
/// It is recommended to save the returned bearer this in a cookie and send bac
#[debug_handler]
pub async fn post_login(
Extension(app): Extension<Arc<Web3ProxyApp>>,
ClientIp(ip): ClientIp,
@ -183,7 +183,7 @@ pub async fn post_login(
.await
.unwrap();
let (u, uk, response) = match u {
let (u, _uks, response) = match u {
None => {
let txn = db.begin().await?;
@ -209,71 +209,65 @@ pub async fn post_login(
.await
.context("Failed saving new user key")?;
let uks = vec![uk];
txn.commit().await?;
let response_json = PostLoginResponse {
bearer_token,
api_key: uk.api_key,
api_keys: uks.iter().map(|uk| uk.api_key).collect(),
};
let response = (StatusCode::CREATED, Json(response_json)).into_response();
(u, uk, response)
(u, uks, response)
}
Some(u) => {
// the user is already registered
// TODO: what if the user has multiple keys?
let uk = user_keys::Entity::find()
let uks = user_keys::Entity::find()
.filter(user_keys::Column::UserId.eq(u.id))
.one(db)
.all(db)
.await
.context("failed loading user's key")?
.unwrap();
.context("failed loading user's key")?;
let response_json = PostLoginResponse {
bearer_token,
api_key: uk.api_key,
api_keys: uks.iter().map(|uk| uk.api_key).collect(),
};
let response = (StatusCode::OK, Json(response_json)).into_response();
(u, uk, response)
(u, uks, response)
}
};
// TODO: set a session cookie with the bearer token?
// save the bearer token in redis with a long (7 or 30 day?) expiry. or in database?
// add bearer to redis
let mut redis_conn = app.redis_conn().await?;
// TODO: move this into a struct so this is less fragile
let bearer_key = format!("bearer:{}", bearer_token);
let bearer_redis_key = format!("bearer:{}", bearer_token);
redis_conn.set(bearer_key, u.id.to_string()).await?;
// TODO: save user_data. we already have uk, so this could be more efficient. it works for now
// expire in 4 weeks
// TODO: get expiration time from app config
// TODO: do we use this?
redis_conn
.set_ex(bearer_redis_key, u.id.to_string(), 2_419_200)
.await?;
Ok(response)
}
/// Log out the user connected to the given Authentication header.
#[debug_handler]
pub async fn get_logout(
cookies: Cookies,
Extension(app): Extension<Arc<Web3ProxyApp>>,
TypedHeader(Authorization(bearer)): TypedHeader<Authorization<Bearer>>,
) -> FrontendResult {
// delete the cookie if it exists
let private_cookies = cookies.private(&app.cookie_key);
// TODO: i don't like this. move this to a helper function so it is less fragile
let bearer_cache_key = format!("bearer:{}", bearer.token());
if let Some(c) = private_cookies.get("bearer") {
let bearer_cache_key = format!("bearer:{}", c.value());
let mut redis_conn = app.redis_conn().await?;
// TODO: should deleting the cookie be last? redis being down shouldn't block the user
private_cookies.remove(c);
let mut redis_conn = app.redis_conn().await?;
redis_conn.del(bearer_cache_key).await?;
}
redis_conn.del(bearer_cache_key).await?;
// TODO: what should the response be? probably json something
Ok("goodbye".into_response())
@ -348,8 +342,6 @@ impl ProtectedAction {
// TODO: is this type correct?
let u_id: Option<u64> = redis_conn.get(bearer_cache_key).await?;
// TODO: if not in redis, check the db?
// TODO: if auth_address == primary_address, allow
// TODO: if auth_address != primary_address, only allow if they are a secondary user with the correct role
todo!("verify token for the given user");

View File

@ -108,7 +108,11 @@ impl Web3Connections {
Some(rpc) => {
rpc.wait_for_request_handle(authorization, Duration::from_secs(30))
.await?
.request("eth_getBlockByHash", &get_block_params, Level::ERROR.into())
.request(
"eth_getBlockByHash",
&json!(get_block_params),
Level::ERROR.into(),
)
.await?
}
None => {

View File

@ -15,6 +15,7 @@ use redis_rate_limiter::{RedisPool, RedisRateLimitResult, RedisRateLimiter};
use sea_orm::DatabaseConnection;
use serde::ser::{SerializeStruct, Serializer};
use serde::Serialize;
use serde_json::json;
use std::cmp::min;
use std::fmt;
use std::hash::{Hash, Hasher};
@ -121,7 +122,11 @@ impl Web3Connection {
let found_chain_id: Result<U64, _> = new_connection
.wait_for_request_handle(None, Duration::from_secs(30))
.await?
.request("eth_chainId", &Option::None::<()>, Level::ERROR.into())
.request(
"eth_chainId",
&json!(Option::None::<()>),
Level::ERROR.into(),
)
.await;
match found_chain_id {
@ -209,10 +214,10 @@ impl Web3Connection {
.await?
.request(
"eth_getCode",
&(
&json!((
"0xdead00000000000000000000000000000000beef",
maybe_archive_block,
),
)),
// error here are expected, so keep the level low
tracing::Level::DEBUG.into(),
)
@ -543,7 +548,7 @@ impl Web3Connection {
let block: Result<Block<TxHash>, _> = active_request_handle
.request(
"eth_getBlockByNumber",
&("latest", false),
&json!(("latest", false)),
tracing::Level::ERROR.into(),
)
.await;
@ -619,7 +624,7 @@ impl Web3Connection {
.await?
.request(
"eth_getBlockByNumber",
&("latest", false),
&json!(("latest", false)),
tracing::Level::ERROR.into(),
)
.await

View File

@ -23,6 +23,7 @@ use petgraph::graphmap::DiGraphMap;
use sea_orm::DatabaseConnection;
use serde::ser::{SerializeStruct, Serializer};
use serde::Serialize;
use serde_json::json;
use serde_json::value::RawValue;
use std::cmp;
use std::cmp::Reverse;
@ -320,7 +321,11 @@ impl Web3Connections {
.into_iter()
.map(|active_request_handle| async move {
let result: Result<Box<RawValue>, _> = active_request_handle
.request(method, &params.cloned(), tracing::Level::ERROR.into())
.request(
method,
&json!(params.cloned()),
tracing::Level::ERROR.into(),
)
.await;
result
})
@ -517,12 +522,12 @@ impl Web3Connections {
// save the rpc in case we get an error and want to retry on another server
skip_rpcs.push(active_request_handle.clone_connection());
// TODO: get the log percent from the user data?
// TODO: get the log percent from the user data
let response_result = active_request_handle
.request(
&request.method,
&request.params,
RequestErrorHandler::SaveReverts(100.0),
&json!(request.params),
RequestErrorHandler::SaveReverts(0.0),
)
.await;

View File

@ -3,12 +3,17 @@ use super::provider::Web3Provider;
use crate::frontend::authorization::AuthorizedRequest;
use crate::metered::{JsonRpcErrorCount, ProviderErrorCount};
use anyhow::Context;
use chrono::Utc;
use entities::revert_logs;
use entities::sea_orm_active_enums::Method;
use ethers::providers::{HttpClientError, ProviderError, WsClientError};
use metered::metered;
use metered::HitCount;
use metered::ResponseTime;
use metered::Throughput;
use rand::Rng;
use sea_orm::ActiveModelTrait;
use serde_json::json;
use std::fmt;
use std::sync::atomic::{self, AtomicBool, Ordering};
use std::sync::Arc;
@ -47,6 +52,15 @@ pub enum RequestErrorHandler {
WarnLevel,
}
#[derive(serde::Deserialize, serde::Serialize)]
struct EthCallParams {
method: Method,
// TODO: do this as Address instead
to: Vec<u8>,
// TODO: do this as a Bytes instead
data: String,
}
impl From<Level> for RequestErrorHandler {
fn from(level: Level) -> Self {
match level {
@ -60,13 +74,31 @@ impl From<Level> for RequestErrorHandler {
impl AuthorizedRequest {
/// Save a RPC call that return "execution reverted" to the database.
async fn save_revert<T>(self: Arc<Self>, method: String, params: T) -> anyhow::Result<()>
where
T: Clone + fmt::Debug + serde::Serialize + Send + Sync + 'static,
{
let db_conn = self.db_conn().context("db_conn needed to save reverts")?;
async fn save_revert(self: Arc<Self>, params: EthCallParams) -> anyhow::Result<()> {
if let Self::User(Some(db_conn), authorized_request) = &*self {
// TODO: do this on the database side?
let timestamp = Utc::now();
todo!("save the revert to the database");
let rl = revert_logs::ActiveModel {
user_key_id: sea_orm::Set(authorized_request.user_key_id),
method: sea_orm::Set(params.method),
to: sea_orm::Set(params.to),
call_data: sea_orm::Set(params.data),
timestamp: sea_orm::Set(timestamp),
..Default::default()
};
let rl = rl
.save(db_conn)
.await
.context("Failed saving new revert log")?;
// TODO: what log level?
trace!(?rl);
}
// TODO: return something useful
Ok(())
}
}
@ -110,15 +142,15 @@ impl OpenRequestHandle {
/// TODO: we no longer take self because metered doesn't like that
/// TODO: ErrorCount includes too many types of errors, such as transaction reverts
#[measure([JsonRpcErrorCount, HitCount, ProviderErrorCount, ResponseTime, Throughput])]
pub async fn request<T, R>(
pub async fn request<P, R>(
&self,
method: &str,
params: &T,
params: &P,
error_handler: RequestErrorHandler,
) -> Result<R, ProviderError>
where
// TODO: not sure about this type. would be better to not need clones, but measure and spawns combine to need it
T: Clone + fmt::Debug + serde::Serialize + Send + Sync + 'static,
P: Clone + fmt::Debug + serde::Serialize + Send + Sync + 'static,
R: serde::Serialize + serde::de::DeserializeOwned + fmt::Debug,
{
// ensure this function only runs once
@ -212,11 +244,12 @@ impl OpenRequestHandle {
if let Some(msg) = msg {
if msg.starts_with("execution reverted") {
// TODO: is there a more efficient way to do this?
let params: EthCallParams = serde_json::from_value(json!(params))
.expect("parsing eth_call");
// spawn saving to the database so we don't slow down the request (or error if no db)
let f = self
.authorization
.clone()
.save_revert(method.to_string(), params.clone());
let f = self.authorization.clone().save_revert(params);
tokio::spawn(async move { f.await });
} else {