2022-04-26 19:46:32 +03:00
|
|
|
// TODO: don't use RwLock<HashMap>. i think we need a concurrent hashmap or we will hit all sorts of deadlocks
|
2022-04-26 23:33:37 +03:00
|
|
|
mod block_watcher;
|
|
|
|
mod provider;
|
2022-04-26 09:54:24 +03:00
|
|
|
|
2022-04-25 00:54:29 +03:00
|
|
|
use futures::future;
|
2022-04-25 04:12:07 +03:00
|
|
|
use governor::clock::{Clock, QuantaClock, QuantaInstant};
|
2022-04-24 21:56:46 +03:00
|
|
|
use governor::middleware::NoOpMiddleware;
|
|
|
|
use governor::state::{InMemoryState, NotKeyed};
|
2022-04-26 23:33:37 +03:00
|
|
|
use governor::NotUntil;
|
|
|
|
use governor::RateLimiter;
|
2022-04-26 09:54:24 +03:00
|
|
|
use std::collections::HashMap;
|
2022-04-24 21:56:46 +03:00
|
|
|
use std::num::NonZeroU32;
|
2022-03-05 06:46:57 +03:00
|
|
|
use std::sync::Arc;
|
2022-04-25 22:14:10 +03:00
|
|
|
use std::time::Duration;
|
2022-04-26 09:54:24 +03:00
|
|
|
use tokio::sync::{mpsc, RwLock};
|
2022-04-25 04:12:07 +03:00
|
|
|
use tokio::time::sleep;
|
2022-04-26 23:33:37 +03:00
|
|
|
use tracing::log::warn;
|
2022-03-05 06:46:57 +03:00
|
|
|
use warp::Filter;
|
|
|
|
|
2022-04-26 23:33:37 +03:00
|
|
|
// use crate::types::{BlockMap, ConnectionsMap, RpcRateLimiterMap};
|
|
|
|
use crate::block_watcher::{BlockWatcher, BlockWatcherSender};
|
|
|
|
use crate::provider::Web3Connection;
|
|
|
|
|
2022-04-26 09:54:24 +03:00
|
|
|
static APP_USER_AGENT: &str = concat!(
|
|
|
|
"satoshiandkin/",
|
|
|
|
env!("CARGO_PKG_NAME"),
|
|
|
|
"/",
|
|
|
|
env!("CARGO_PKG_VERSION"),
|
|
|
|
);
|
|
|
|
|
2022-04-26 20:03:38 +03:00
|
|
|
type RpcRateLimiter =
|
|
|
|
RateLimiter<NotKeyed, InMemoryState, QuantaClock, NoOpMiddleware<QuantaInstant>>;
|
|
|
|
|
2022-04-26 23:33:37 +03:00
|
|
|
type RpcRateLimiterMap = RwLock<HashMap<String, RpcRateLimiter>>;
|
|
|
|
type ConnectionsMap = RwLock<HashMap<String, Web3Connection>>;
|
2022-04-26 09:54:24 +03:00
|
|
|
|
2022-04-26 20:03:38 +03:00
|
|
|
/// Load balance to the rpc
|
2022-04-26 23:33:37 +03:00
|
|
|
/// TODO: i'm not sure about having 3 locks here. can we share them?
|
2022-04-26 09:54:24 +03:00
|
|
|
struct RpcTier {
|
2022-04-26 23:33:37 +03:00
|
|
|
/// RPC urls sorted by active requests
|
|
|
|
/// TODO: what type for the rpc?
|
2022-04-24 10:26:00 +03:00
|
|
|
rpcs: RwLock<Vec<String>>,
|
2022-04-26 09:54:24 +03:00
|
|
|
connections: Arc<ConnectionsMap>,
|
2022-04-26 23:33:37 +03:00
|
|
|
ratelimits: RpcRateLimiterMap,
|
2022-04-24 10:26:00 +03:00
|
|
|
}
|
2022-03-05 07:51:38 +03:00
|
|
|
|
2022-04-26 09:54:24 +03:00
|
|
|
impl RpcTier {
|
|
|
|
async fn try_new(
|
|
|
|
servers: Vec<(&str, u32)>,
|
|
|
|
http_client: Option<reqwest::Client>,
|
2022-04-26 23:33:37 +03:00
|
|
|
block_watcher_sender: BlockWatcherSender,
|
2022-04-26 09:54:24 +03:00
|
|
|
clock: &QuantaClock,
|
|
|
|
) -> anyhow::Result<RpcTier> {
|
2022-04-24 10:26:00 +03:00
|
|
|
let mut rpcs: Vec<String> = vec![];
|
2022-04-26 09:54:24 +03:00
|
|
|
let mut connections = HashMap::new();
|
|
|
|
let mut ratelimits = HashMap::new();
|
2022-04-24 10:26:00 +03:00
|
|
|
|
2022-04-25 04:12:07 +03:00
|
|
|
for (s, limit) in servers.into_iter() {
|
2022-04-24 10:26:00 +03:00
|
|
|
rpcs.push(s.to_string());
|
2022-04-25 23:26:54 +03:00
|
|
|
|
2022-04-26 23:33:37 +03:00
|
|
|
let connection = Web3Connection::try_new(
|
|
|
|
s.to_string(),
|
|
|
|
http_client.clone(),
|
|
|
|
block_watcher_sender.clone(),
|
|
|
|
)
|
|
|
|
.await?;
|
2022-04-25 23:26:54 +03:00
|
|
|
|
2022-04-26 09:54:24 +03:00
|
|
|
connections.insert(s.to_string(), connection);
|
2022-04-24 21:56:46 +03:00
|
|
|
|
|
|
|
if limit > 0 {
|
|
|
|
let quota = governor::Quota::per_second(NonZeroU32::new(limit).unwrap());
|
|
|
|
|
2022-04-25 04:12:07 +03:00
|
|
|
let rate_limiter = governor::RateLimiter::direct_with_clock(quota, clock);
|
2022-04-24 21:56:46 +03:00
|
|
|
|
|
|
|
ratelimits.insert(s.to_string(), rate_limiter);
|
|
|
|
}
|
2022-04-24 10:26:00 +03:00
|
|
|
}
|
|
|
|
|
2022-04-26 09:54:24 +03:00
|
|
|
Ok(RpcTier {
|
2022-04-24 10:26:00 +03:00
|
|
|
rpcs: RwLock::new(rpcs),
|
2022-04-26 09:54:24 +03:00
|
|
|
connections: Arc::new(RwLock::new(connections)),
|
|
|
|
ratelimits: RwLock::new(ratelimits),
|
|
|
|
})
|
2022-04-24 10:26:00 +03:00
|
|
|
}
|
2022-03-05 07:51:38 +03:00
|
|
|
|
2022-04-25 22:14:10 +03:00
|
|
|
/// get the best available rpc server
|
2022-04-26 09:54:24 +03:00
|
|
|
async fn next_upstream_server(&self) -> Result<String, NotUntil<QuantaInstant>> {
|
2022-04-24 10:26:00 +03:00
|
|
|
let mut balanced_rpcs = self.rpcs.write().await;
|
2022-03-05 07:51:38 +03:00
|
|
|
|
2022-04-26 09:54:24 +03:00
|
|
|
// sort rpcs by their active connections
|
|
|
|
let connections = self.connections.read().await;
|
|
|
|
|
|
|
|
balanced_rpcs
|
|
|
|
.sort_unstable_by(|a, b| connections.get(a).unwrap().cmp(connections.get(b).unwrap()));
|
2022-03-05 06:46:57 +03:00
|
|
|
|
2022-04-24 21:56:46 +03:00
|
|
|
let mut earliest_not_until = None;
|
|
|
|
|
|
|
|
for selected_rpc in balanced_rpcs.iter() {
|
2022-04-26 09:54:24 +03:00
|
|
|
// TODO: check current block number. if behind, make our own NotUntil here
|
|
|
|
let ratelimits = self.ratelimits.write().await;
|
|
|
|
|
2022-04-24 21:56:46 +03:00
|
|
|
// check rate limits
|
2022-04-26 09:54:24 +03:00
|
|
|
match ratelimits.get(selected_rpc).unwrap().check() {
|
2022-04-24 21:56:46 +03:00
|
|
|
Ok(_) => {
|
|
|
|
// rate limit succeeded
|
|
|
|
}
|
|
|
|
Err(not_until) => {
|
|
|
|
// rate limit failed
|
|
|
|
// save the smallest not_until. if nothing succeeds, return an Err with not_until in it
|
|
|
|
if earliest_not_until.is_none() {
|
|
|
|
earliest_not_until = Some(not_until);
|
|
|
|
} else {
|
|
|
|
let earliest_possible =
|
|
|
|
earliest_not_until.as_ref().unwrap().earliest_possible();
|
|
|
|
let new_earliest_possible = not_until.earliest_possible();
|
|
|
|
|
|
|
|
if earliest_possible > new_earliest_possible {
|
|
|
|
earliest_not_until = Some(not_until);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
// increment our connection counter
|
2022-04-26 09:54:24 +03:00
|
|
|
self.connections
|
|
|
|
.write()
|
|
|
|
.await
|
|
|
|
.get_mut(selected_rpc)
|
|
|
|
.unwrap()
|
2022-04-26 20:03:38 +03:00
|
|
|
.inc_active_requests();
|
2022-03-05 06:46:57 +03:00
|
|
|
|
2022-04-24 21:56:46 +03:00
|
|
|
// return the selected RPC
|
|
|
|
return Ok(selected_rpc.clone());
|
2022-04-24 10:26:00 +03:00
|
|
|
}
|
2022-03-05 06:46:57 +03:00
|
|
|
|
2022-04-24 21:56:46 +03:00
|
|
|
// return the smallest not_until
|
|
|
|
if let Some(not_until) = earliest_not_until {
|
2022-04-25 22:14:10 +03:00
|
|
|
Err(not_until)
|
2022-04-24 21:56:46 +03:00
|
|
|
} else {
|
|
|
|
unimplemented!();
|
|
|
|
}
|
2022-04-24 10:26:00 +03:00
|
|
|
}
|
2022-03-05 06:46:57 +03:00
|
|
|
|
2022-04-25 22:14:10 +03:00
|
|
|
/// get all available rpc servers
|
2022-04-25 04:12:07 +03:00
|
|
|
async fn get_upstream_servers(&self) -> Result<Vec<String>, NotUntil<QuantaInstant>> {
|
|
|
|
let mut earliest_not_until = None;
|
|
|
|
|
|
|
|
let mut selected_rpcs = vec![];
|
|
|
|
|
2022-04-26 09:54:24 +03:00
|
|
|
for selected_rpc in self.rpcs.read().await.iter() {
|
2022-04-25 04:12:07 +03:00
|
|
|
// check rate limits
|
2022-04-26 09:54:24 +03:00
|
|
|
match self
|
|
|
|
.ratelimits
|
|
|
|
.write()
|
|
|
|
.await
|
|
|
|
.get(selected_rpc)
|
|
|
|
.unwrap()
|
|
|
|
.check()
|
|
|
|
{
|
2022-04-25 04:12:07 +03:00
|
|
|
Ok(_) => {
|
|
|
|
// rate limit succeeded
|
|
|
|
}
|
|
|
|
Err(not_until) => {
|
|
|
|
// rate limit failed
|
|
|
|
// save the smallest not_until. if nothing succeeds, return an Err with not_until in it
|
|
|
|
if earliest_not_until.is_none() {
|
|
|
|
earliest_not_until = Some(not_until);
|
|
|
|
} else {
|
|
|
|
let earliest_possible =
|
|
|
|
earliest_not_until.as_ref().unwrap().earliest_possible();
|
|
|
|
let new_earliest_possible = not_until.earliest_possible();
|
|
|
|
|
|
|
|
if earliest_possible > new_earliest_possible {
|
|
|
|
earliest_not_until = Some(not_until);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2022-04-26 09:54:24 +03:00
|
|
|
// increment our connection counter
|
|
|
|
self.connections
|
|
|
|
.write()
|
|
|
|
.await
|
|
|
|
.get_mut(selected_rpc)
|
|
|
|
.unwrap()
|
2022-04-26 20:03:38 +03:00
|
|
|
.inc_active_requests();
|
2022-04-26 09:54:24 +03:00
|
|
|
|
2022-04-25 22:14:10 +03:00
|
|
|
// this is rpc should work
|
2022-04-25 04:12:07 +03:00
|
|
|
selected_rpcs.push(selected_rpc.clone());
|
|
|
|
}
|
|
|
|
|
2022-04-25 22:14:10 +03:00
|
|
|
if !selected_rpcs.is_empty() {
|
2022-04-25 04:12:07 +03:00
|
|
|
return Ok(selected_rpcs);
|
|
|
|
}
|
|
|
|
|
|
|
|
// return the earliest not_until
|
|
|
|
if let Some(not_until) = earliest_not_until {
|
2022-04-25 22:14:10 +03:00
|
|
|
Err(not_until)
|
2022-04-25 04:12:07 +03:00
|
|
|
} else {
|
2022-04-26 09:54:24 +03:00
|
|
|
// TODO: is this right?
|
|
|
|
Ok(vec![])
|
2022-04-25 04:12:07 +03:00
|
|
|
}
|
2022-04-24 10:26:00 +03:00
|
|
|
}
|
|
|
|
}
|
2022-03-05 06:46:57 +03:00
|
|
|
|
2022-04-26 23:33:37 +03:00
|
|
|
/// The application
|
|
|
|
struct Web3ProxyApp {
|
2022-04-26 20:03:38 +03:00
|
|
|
/// clock used for rate limiting
|
|
|
|
/// TODO: use tokio's clock (will require a different ratelimiting crate)
|
2022-04-25 04:12:07 +03:00
|
|
|
clock: QuantaClock,
|
2022-04-26 20:03:38 +03:00
|
|
|
/// Send requests to the best server available
|
2022-04-26 09:54:24 +03:00
|
|
|
balanced_rpc_tiers: Arc<Vec<RpcTier>>,
|
2022-04-26 20:03:38 +03:00
|
|
|
/// Send private requests (like eth_sendRawTransaction) to all these servers
|
2022-04-26 09:54:24 +03:00
|
|
|
private_rpcs: Option<Arc<RpcTier>>,
|
|
|
|
/// write lock on these when all rate limits are hit
|
2022-04-25 04:26:23 +03:00
|
|
|
balanced_rpc_ratelimiter_lock: RwLock<()>,
|
|
|
|
private_rpcs_ratelimiter_lock: RwLock<()>,
|
2022-04-24 10:26:00 +03:00
|
|
|
}
|
2022-03-05 06:46:57 +03:00
|
|
|
|
2022-04-26 23:33:37 +03:00
|
|
|
impl Web3ProxyApp {
|
2022-04-26 09:54:24 +03:00
|
|
|
async fn try_new(
|
2022-04-25 01:36:51 +03:00
|
|
|
balanced_rpc_tiers: Vec<Vec<(&str, u32)>>,
|
|
|
|
private_rpcs: Vec<(&str, u32)>,
|
2022-04-26 23:33:37 +03:00
|
|
|
) -> anyhow::Result<Web3ProxyApp> {
|
2022-04-25 04:12:07 +03:00
|
|
|
let clock = QuantaClock::default();
|
|
|
|
|
2022-04-26 23:33:37 +03:00
|
|
|
let (mut block_watcher, block_watcher_sender) = BlockWatcher::new(clock.clone());
|
2022-04-26 09:54:24 +03:00
|
|
|
|
2022-04-26 23:33:37 +03:00
|
|
|
// make a http shared client
|
|
|
|
// TODO: how should we configure the connection pool?
|
2022-04-26 09:54:24 +03:00
|
|
|
// TODO: 5 minutes is probably long enough. unlimited is a bad idea if something
|
|
|
|
let http_client = reqwest::ClientBuilder::new()
|
|
|
|
.timeout(Duration::from_secs(300))
|
|
|
|
.user_agent(APP_USER_AGENT)
|
|
|
|
.build()?;
|
|
|
|
|
2022-04-26 23:33:37 +03:00
|
|
|
// start the block_watcher
|
|
|
|
tokio::spawn(async move { block_watcher.run().await });
|
|
|
|
|
2022-04-26 09:54:24 +03:00
|
|
|
let balanced_rpc_tiers = Arc::new(
|
|
|
|
future::join_all(balanced_rpc_tiers.into_iter().map(|balanced_rpc_tier| {
|
|
|
|
RpcTier::try_new(
|
|
|
|
balanced_rpc_tier,
|
|
|
|
Some(http_client.clone()),
|
2022-04-26 23:33:37 +03:00
|
|
|
block_watcher_sender.clone(),
|
2022-04-26 09:54:24 +03:00
|
|
|
&clock,
|
|
|
|
)
|
|
|
|
}))
|
|
|
|
.await
|
2022-04-25 22:14:10 +03:00
|
|
|
.into_iter()
|
2022-04-26 09:54:24 +03:00
|
|
|
.collect::<anyhow::Result<Vec<RpcTier>>>()?,
|
|
|
|
);
|
2022-04-25 22:14:10 +03:00
|
|
|
|
2022-04-26 09:54:24 +03:00
|
|
|
let private_rpcs = if private_rpcs.is_empty() {
|
|
|
|
None
|
|
|
|
} else {
|
|
|
|
Some(Arc::new(
|
2022-04-26 23:33:37 +03:00
|
|
|
RpcTier::try_new(
|
|
|
|
private_rpcs,
|
|
|
|
Some(http_client),
|
|
|
|
block_watcher_sender,
|
|
|
|
&clock,
|
|
|
|
)
|
|
|
|
.await?,
|
2022-04-26 09:54:24 +03:00
|
|
|
))
|
|
|
|
};
|
2022-04-25 22:14:10 +03:00
|
|
|
|
2022-04-24 10:26:00 +03:00
|
|
|
// TODO: warn if no private relays
|
2022-04-26 23:33:37 +03:00
|
|
|
Ok(Web3ProxyApp {
|
2022-04-25 22:14:10 +03:00
|
|
|
clock,
|
|
|
|
balanced_rpc_tiers,
|
|
|
|
private_rpcs,
|
2022-04-25 04:26:23 +03:00
|
|
|
balanced_rpc_ratelimiter_lock: Default::default(),
|
|
|
|
private_rpcs_ratelimiter_lock: Default::default(),
|
2022-04-26 09:54:24 +03:00
|
|
|
})
|
2022-04-24 10:26:00 +03:00
|
|
|
}
|
2022-03-05 06:46:57 +03:00
|
|
|
|
2022-04-24 10:26:00 +03:00
|
|
|
/// send the request to the approriate RPCs
|
2022-04-26 09:54:24 +03:00
|
|
|
/// TODO: dry this up
|
2022-04-24 10:26:00 +03:00
|
|
|
async fn proxy_web3_rpc(
|
2022-04-26 23:33:37 +03:00
|
|
|
self: Arc<Web3ProxyApp>,
|
2022-04-24 10:26:00 +03:00
|
|
|
json_body: serde_json::Value,
|
|
|
|
) -> anyhow::Result<impl warp::Reply> {
|
|
|
|
let eth_send_raw_transaction =
|
|
|
|
serde_json::Value::String("eth_sendRawTransaction".to_string());
|
|
|
|
|
2022-04-26 09:54:24 +03:00
|
|
|
if self.private_rpcs.is_some() && json_body.get("method") == Some(ð_send_raw_transaction)
|
2022-04-24 10:26:00 +03:00
|
|
|
{
|
2022-04-26 09:54:24 +03:00
|
|
|
let private_rpcs = self.private_rpcs.clone().unwrap();
|
|
|
|
|
2022-04-25 04:14:34 +03:00
|
|
|
// there are private rpcs configured and the request is eth_sendSignedTransaction. send to all private rpcs
|
2022-04-25 04:12:07 +03:00
|
|
|
loop {
|
2022-04-25 04:26:23 +03:00
|
|
|
let read_lock = self.private_rpcs_ratelimiter_lock.read().await;
|
|
|
|
|
2022-04-26 09:54:24 +03:00
|
|
|
match private_rpcs.get_upstream_servers().await {
|
2022-04-25 04:12:07 +03:00
|
|
|
Ok(upstream_servers) => {
|
2022-04-26 10:10:13 +03:00
|
|
|
let (tx, mut rx) =
|
|
|
|
mpsc::unbounded_channel::<anyhow::Result<serde_json::Value>>();
|
2022-04-26 09:54:24 +03:00
|
|
|
|
|
|
|
let clone = self.clone();
|
|
|
|
let connections = private_rpcs.connections.clone();
|
|
|
|
let json_body = json_body.clone();
|
|
|
|
|
|
|
|
tokio::spawn(async move {
|
|
|
|
clone
|
|
|
|
.try_send_requests(upstream_servers, connections, json_body, tx)
|
|
|
|
.await
|
|
|
|
});
|
|
|
|
|
|
|
|
let response = rx
|
|
|
|
.recv()
|
2022-04-25 04:12:07 +03:00
|
|
|
.await
|
2022-04-26 10:10:13 +03:00
|
|
|
.ok_or_else(|| anyhow::anyhow!("no successful response"))?;
|
2022-04-26 09:54:24 +03:00
|
|
|
|
2022-04-26 10:10:13 +03:00
|
|
|
if let Ok(response) = response {
|
|
|
|
return Ok(warp::reply::json(&response));
|
|
|
|
}
|
2022-04-25 04:12:07 +03:00
|
|
|
}
|
|
|
|
Err(not_until) => {
|
2022-04-25 22:14:10 +03:00
|
|
|
// TODO: move this to a helper function
|
2022-04-25 04:30:55 +03:00
|
|
|
// sleep (with a lock) until our rate limits should be available
|
2022-04-25 04:26:23 +03:00
|
|
|
drop(read_lock);
|
|
|
|
|
|
|
|
let write_lock = self.balanced_rpc_ratelimiter_lock.write().await;
|
|
|
|
|
2022-04-25 04:12:07 +03:00
|
|
|
let deadline = not_until.wait_time_from(self.clock.now());
|
|
|
|
sleep(deadline).await;
|
2022-04-25 04:26:23 +03:00
|
|
|
|
|
|
|
drop(write_lock);
|
2022-04-25 04:12:07 +03:00
|
|
|
}
|
|
|
|
};
|
2022-04-24 10:26:00 +03:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// this is not a private transaction (or no private relays are configured)
|
2022-04-26 10:10:13 +03:00
|
|
|
// try to send to each tier, stopping at the first success
|
2022-04-25 04:12:07 +03:00
|
|
|
loop {
|
2022-04-26 20:03:38 +03:00
|
|
|
// TODO: i'm not positive that this locking is correct
|
2022-04-25 04:26:23 +03:00
|
|
|
let read_lock = self.balanced_rpc_ratelimiter_lock.read().await;
|
|
|
|
|
|
|
|
// there are multiple tiers. save the earliest not_until (if any). if we don't return, we will sleep until then and then try again
|
2022-04-25 04:12:07 +03:00
|
|
|
let mut earliest_not_until = None;
|
|
|
|
|
|
|
|
for balanced_rpcs in self.balanced_rpc_tiers.iter() {
|
2022-04-26 09:54:24 +03:00
|
|
|
match balanced_rpcs.next_upstream_server().await {
|
2022-04-25 04:12:07 +03:00
|
|
|
Ok(upstream_server) => {
|
2022-04-26 10:10:13 +03:00
|
|
|
let (tx, mut rx) =
|
|
|
|
mpsc::unbounded_channel::<anyhow::Result<serde_json::Value>>();
|
2022-04-26 09:54:24 +03:00
|
|
|
|
|
|
|
let clone = self.clone();
|
|
|
|
let connections = balanced_rpcs.connections.clone();
|
|
|
|
let json_body = json_body.clone();
|
|
|
|
|
|
|
|
tokio::spawn(async move {
|
|
|
|
clone
|
|
|
|
.try_send_requests(
|
|
|
|
vec![upstream_server],
|
|
|
|
connections,
|
|
|
|
json_body,
|
|
|
|
tx,
|
|
|
|
)
|
|
|
|
.await
|
|
|
|
});
|
|
|
|
|
|
|
|
let response = rx
|
|
|
|
.recv()
|
2022-04-25 04:12:07 +03:00
|
|
|
.await
|
2022-04-26 10:10:13 +03:00
|
|
|
.ok_or_else(|| anyhow::anyhow!("no successful response"))?;
|
2022-04-26 09:54:24 +03:00
|
|
|
|
2022-04-26 10:10:13 +03:00
|
|
|
if let Ok(response) = response {
|
|
|
|
return Ok(warp::reply::json(&response));
|
|
|
|
}
|
2022-04-25 04:12:07 +03:00
|
|
|
}
|
|
|
|
Err(not_until) => {
|
|
|
|
// save the smallest not_until. if nothing succeeds, return an Err with not_until in it
|
|
|
|
if earliest_not_until.is_none() {
|
|
|
|
earliest_not_until = Some(not_until);
|
|
|
|
} else {
|
|
|
|
// TODO: do we need to unwrap this far? can we just compare the not_untils
|
|
|
|
let earliest_possible =
|
|
|
|
earliest_not_until.as_ref().unwrap().earliest_possible();
|
|
|
|
let new_earliest_possible = not_until.earliest_possible();
|
|
|
|
|
|
|
|
if earliest_possible > new_earliest_possible {
|
|
|
|
earliest_not_until = Some(not_until);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2022-04-24 10:26:00 +03:00
|
|
|
}
|
|
|
|
}
|
2022-04-25 04:12:07 +03:00
|
|
|
|
2022-04-25 04:30:55 +03:00
|
|
|
// we haven't returned an Ok, sleep and try again
|
2022-04-25 22:14:10 +03:00
|
|
|
// TODO: move this to a helper function
|
2022-04-25 04:26:23 +03:00
|
|
|
drop(read_lock);
|
|
|
|
let write_lock = self.balanced_rpc_ratelimiter_lock.write().await;
|
|
|
|
|
2022-04-25 04:12:07 +03:00
|
|
|
// unwrap should be safe since we would have returned if it wasn't set
|
2022-04-26 10:10:13 +03:00
|
|
|
let deadline = if let Some(earliest_not_until) = earliest_not_until {
|
|
|
|
earliest_not_until.wait_time_from(self.clock.now())
|
|
|
|
} else {
|
|
|
|
// TODO: exponential backoff?
|
|
|
|
Duration::from_secs(1)
|
|
|
|
};
|
2022-04-25 04:30:55 +03:00
|
|
|
|
2022-04-25 04:12:07 +03:00
|
|
|
sleep(deadline).await;
|
2022-04-25 04:26:23 +03:00
|
|
|
|
|
|
|
drop(write_lock);
|
2022-04-24 10:26:00 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2022-03-05 08:01:45 +03:00
|
|
|
|
2022-04-24 10:26:00 +03:00
|
|
|
async fn try_send_requests(
|
|
|
|
&self,
|
2022-04-26 09:54:24 +03:00
|
|
|
rpc_servers: Vec<String>,
|
|
|
|
connections: Arc<ConnectionsMap>,
|
|
|
|
json_request_body: serde_json::Value,
|
2022-04-26 10:10:13 +03:00
|
|
|
tx: mpsc::UnboundedSender<anyhow::Result<serde_json::Value>>,
|
2022-04-26 09:54:24 +03:00
|
|
|
) -> anyhow::Result<()> {
|
|
|
|
// {"jsonrpc":"2.0","method":"eth_syncing","params":[],"id":1}
|
|
|
|
let incoming_id = json_request_body
|
|
|
|
.get("id")
|
|
|
|
.ok_or_else(|| anyhow::anyhow!("bad id"))?
|
|
|
|
.to_owned();
|
|
|
|
let method = json_request_body
|
|
|
|
.get("method")
|
|
|
|
.and_then(|x| x.as_str())
|
|
|
|
.ok_or_else(|| anyhow::anyhow!("bad id"))?
|
|
|
|
.to_string();
|
|
|
|
let params = json_request_body
|
|
|
|
.get("params")
|
|
|
|
.ok_or_else(|| anyhow::anyhow!("no params"))?
|
|
|
|
.to_owned();
|
|
|
|
|
2022-04-24 10:26:00 +03:00
|
|
|
// send the query to all the servers
|
2022-04-26 09:54:24 +03:00
|
|
|
let bodies = future::join_all(rpc_servers.into_iter().map(|rpc| {
|
|
|
|
let incoming_id = incoming_id.clone();
|
|
|
|
let connections = connections.clone();
|
|
|
|
let method = method.clone();
|
|
|
|
let params = params.clone();
|
|
|
|
let tx = tx.clone();
|
2022-04-24 21:56:46 +03:00
|
|
|
|
2022-04-26 09:54:24 +03:00
|
|
|
async move {
|
|
|
|
// get the client for this rpc server
|
2022-04-26 23:33:37 +03:00
|
|
|
let provider = connections.read().await.get(&rpc).unwrap().clone_provider();
|
2022-04-24 21:56:46 +03:00
|
|
|
|
2022-04-26 20:03:38 +03:00
|
|
|
let response = provider.request(&method, params).await;
|
|
|
|
|
|
|
|
connections
|
|
|
|
.write()
|
|
|
|
.await
|
|
|
|
.get_mut(&rpc)
|
|
|
|
.unwrap()
|
|
|
|
.dec_active_requests();
|
2022-04-24 22:55:13 +03:00
|
|
|
|
2022-04-26 20:03:38 +03:00
|
|
|
let mut response = response?;
|
2022-04-24 22:55:13 +03:00
|
|
|
|
2022-04-26 23:33:37 +03:00
|
|
|
// replace the id with what we originally received
|
2022-04-26 09:54:24 +03:00
|
|
|
if let Some(response_id) = response.get_mut("id") {
|
|
|
|
*response_id = incoming_id;
|
2022-04-24 21:56:46 +03:00
|
|
|
}
|
2022-04-26 09:54:24 +03:00
|
|
|
|
|
|
|
// send the first good response to a one shot channel. that way we respond quickly
|
|
|
|
// drop the result because errors are expected after the first send
|
|
|
|
// TODO: if "no block with that header" or some other jsonrpc errors, skip this response
|
2022-04-26 10:10:13 +03:00
|
|
|
let _ = tx.send(Ok(response));
|
2022-04-26 09:54:24 +03:00
|
|
|
|
|
|
|
Ok::<(), anyhow::Error>(())
|
|
|
|
}
|
|
|
|
}))
|
|
|
|
.await;
|
|
|
|
|
|
|
|
// TODO: use iterators instead of pushing into a vec
|
|
|
|
let mut errs = vec![];
|
|
|
|
for x in bodies {
|
|
|
|
match x {
|
|
|
|
Ok(_) => {}
|
2022-04-24 21:56:46 +03:00
|
|
|
Err(e) => {
|
|
|
|
// TODO: better errors
|
2022-04-26 23:33:37 +03:00
|
|
|
warn!("Got an error sending request: {}", e);
|
|
|
|
errs.push(e);
|
2022-04-24 21:56:46 +03:00
|
|
|
}
|
2022-04-24 10:26:00 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-04-26 23:33:37 +03:00
|
|
|
// get the first error (if any)
|
2022-04-26 10:10:13 +03:00
|
|
|
let e: anyhow::Result<serde_json::Value> = if !errs.is_empty() {
|
2022-04-25 22:14:10 +03:00
|
|
|
Err(errs.pop().unwrap())
|
2022-04-24 21:56:46 +03:00
|
|
|
} else {
|
2022-04-26 10:10:13 +03:00
|
|
|
Err(anyhow::anyhow!("no successful responses"))
|
|
|
|
};
|
|
|
|
|
2022-04-26 23:33:37 +03:00
|
|
|
// send the error to the channel
|
2022-04-26 10:10:13 +03:00
|
|
|
if tx.send(e).is_ok() {
|
|
|
|
// if we were able to send an error, then we never sent a success
|
2022-04-24 21:56:46 +03:00
|
|
|
return Err(anyhow::anyhow!("no successful responses"));
|
2022-04-26 10:10:13 +03:00
|
|
|
} else {
|
2022-04-26 23:33:37 +03:00
|
|
|
// if sending the error failed. the other side must be closed (which means we sent a success earlier)
|
2022-04-26 10:10:13 +03:00
|
|
|
Ok(())
|
2022-04-24 10:26:00 +03:00
|
|
|
}
|
|
|
|
}
|
2022-03-05 06:46:57 +03:00
|
|
|
}
|
|
|
|
|
2022-04-24 10:26:00 +03:00
|
|
|
#[tokio::main]
|
|
|
|
async fn main() {
|
2022-04-26 10:16:16 +03:00
|
|
|
// install global collector configured based on RUST_LOG env var.
|
|
|
|
tracing_subscriber::fmt::init();
|
|
|
|
|
2022-04-24 10:26:00 +03:00
|
|
|
// TODO: load the config from yaml instead of hard coding
|
|
|
|
// TODO: support multiple chains in one process. then we could just point "chain.stytt.com" at this and caddy wouldn't need anything else
|
|
|
|
// TODO: i kind of want to make use of caddy's load balancing and health checking and such though
|
|
|
|
let listen_port = 8445;
|
2022-04-25 04:12:07 +03:00
|
|
|
|
2022-04-24 10:26:00 +03:00
|
|
|
// TODO: be smart about about using archive nodes?
|
2022-04-26 23:33:37 +03:00
|
|
|
let state = Web3ProxyApp::try_new(
|
2022-04-24 10:26:00 +03:00
|
|
|
vec![
|
|
|
|
// local nodes
|
2022-04-25 23:26:54 +03:00
|
|
|
vec![("ws://10.11.12.16:8545", 0), ("ws://10.11.12.16:8946", 0)],
|
2022-04-24 10:26:00 +03:00
|
|
|
// paid nodes
|
2022-04-24 21:56:46 +03:00
|
|
|
// TODO: add paid nodes (with rate limits)
|
2022-04-24 10:26:00 +03:00
|
|
|
// free nodes
|
2022-04-24 21:56:46 +03:00
|
|
|
// TODO: add rate limits
|
|
|
|
vec![
|
|
|
|
("https://main-rpc.linkpool.io", 0),
|
|
|
|
("https://rpc.ankr.com/eth", 0),
|
|
|
|
],
|
|
|
|
],
|
|
|
|
vec![
|
2022-04-25 01:36:51 +03:00
|
|
|
("https://api.edennetwork.io/v1/beta", 0),
|
|
|
|
("https://api.edennetwork.io/v1/", 0),
|
2022-04-24 10:26:00 +03:00
|
|
|
],
|
2022-04-26 09:54:24 +03:00
|
|
|
)
|
|
|
|
.await
|
|
|
|
.unwrap();
|
2022-04-24 10:26:00 +03:00
|
|
|
|
2022-04-26 23:33:37 +03:00
|
|
|
let state: Arc<Web3ProxyApp> = Arc::new(state);
|
2022-04-24 10:26:00 +03:00
|
|
|
|
|
|
|
let proxy_rpc_filter = warp::any()
|
|
|
|
.and(warp::post())
|
|
|
|
.and(warp::body::json())
|
|
|
|
.then(move |json_body| state.clone().proxy_web3_rpc(json_body))
|
|
|
|
.map(handle_anyhow_errors);
|
|
|
|
|
|
|
|
warp::serve(proxy_rpc_filter)
|
|
|
|
.run(([0, 0, 0, 0], listen_port))
|
|
|
|
.await;
|
2022-03-05 06:46:57 +03:00
|
|
|
}
|
|
|
|
|
2022-03-25 00:08:40 +03:00
|
|
|
/// convert result into an http response. use this at the end of your warp filter
|
2022-03-05 06:46:57 +03:00
|
|
|
pub fn handle_anyhow_errors<T: warp::Reply>(res: anyhow::Result<T>) -> Box<dyn warp::Reply> {
|
|
|
|
match res {
|
|
|
|
Ok(r) => Box::new(r.into_response()),
|
|
|
|
// TODO: json error?
|
|
|
|
Err(e) => Box::new(warp::reply::with_status(
|
|
|
|
format!("{}", e),
|
|
|
|
reqwest::StatusCode::INTERNAL_SERVER_ERROR,
|
|
|
|
)),
|
|
|
|
}
|
|
|
|
}
|