web3-proxy/src/main.rs

433 lines
15 KiB
Rust
Raw Normal View History

2022-04-24 10:26:00 +03:00
use dashmap::DashMap;
2022-04-25 00:54:29 +03:00
use futures::future;
2022-04-25 04:12:07 +03:00
use governor::clock::{Clock, QuantaClock, QuantaInstant};
2022-04-24 21:56:46 +03:00
use governor::middleware::NoOpMiddleware;
use governor::state::{InMemoryState, NotKeyed};
use governor::{NotUntil, RateLimiter};
use std::num::NonZeroU32;
2022-03-05 06:46:57 +03:00
use std::sync::Arc;
2022-04-24 10:26:00 +03:00
use tokio::sync::RwLock;
2022-04-25 04:12:07 +03:00
use tokio::time::sleep;
2022-03-05 06:46:57 +03:00
use warp::Filter;
2022-04-25 01:36:51 +03:00
type RateLimiterMap = DashMap<String, RpcRateLimiter>;
2022-04-24 22:55:13 +03:00
type ConnectionsMap = DashMap<String, u32>;
2022-04-25 04:12:07 +03:00
type RpcRateLimiter =
RateLimiter<NotKeyed, InMemoryState, QuantaClock, NoOpMiddleware<QuantaInstant>>;
2022-04-24 10:26:00 +03:00
/// Load balance to the least-connection rpc
struct BalancedRpcs {
rpcs: RwLock<Vec<String>>,
2022-04-24 22:55:13 +03:00
connections: ConnectionsMap,
2022-04-25 01:36:51 +03:00
ratelimits: RateLimiterMap,
2022-04-24 10:26:00 +03:00
}
2022-03-05 07:51:38 +03:00
2022-04-25 04:12:07 +03:00
impl BalancedRpcs {
fn new(servers: Vec<(&str, u32)>, clock: &QuantaClock) -> BalancedRpcs {
2022-04-24 10:26:00 +03:00
let mut rpcs: Vec<String> = vec![];
let connections = DashMap::new();
2022-04-24 21:56:46 +03:00
let ratelimits = DashMap::new();
2022-04-24 10:26:00 +03:00
2022-04-25 04:12:07 +03:00
for (s, limit) in servers.into_iter() {
2022-04-24 10:26:00 +03:00
rpcs.push(s.to_string());
connections.insert(s.to_string(), 0);
2022-04-24 21:56:46 +03:00
if limit > 0 {
let quota = governor::Quota::per_second(NonZeroU32::new(limit).unwrap());
2022-04-25 04:12:07 +03:00
let rate_limiter = governor::RateLimiter::direct_with_clock(quota, clock);
2022-04-24 21:56:46 +03:00
ratelimits.insert(s.to_string(), rate_limiter);
}
2022-04-24 10:26:00 +03:00
}
BalancedRpcs {
rpcs: RwLock::new(rpcs),
connections,
2022-04-24 21:56:46 +03:00
ratelimits,
2022-04-24 10:26:00 +03:00
}
}
2022-03-05 07:51:38 +03:00
2022-04-24 21:56:46 +03:00
async fn get_upstream_server(&self) -> Result<String, NotUntil<QuantaInstant>> {
2022-04-24 10:26:00 +03:00
let mut balanced_rpcs = self.rpcs.write().await;
2022-03-05 07:51:38 +03:00
2022-04-24 10:26:00 +03:00
balanced_rpcs.sort_unstable_by(|a, b| {
self.connections
.get(a)
.unwrap()
.cmp(&self.connections.get(b).unwrap())
});
2022-03-05 06:46:57 +03:00
2022-04-24 21:56:46 +03:00
let mut earliest_not_until = None;
for selected_rpc in balanced_rpcs.iter() {
// check rate limits
match self.ratelimits.get(selected_rpc).unwrap().check() {
Ok(_) => {
// rate limit succeeded
}
Err(not_until) => {
// rate limit failed
// save the smallest not_until. if nothing succeeds, return an Err with not_until in it
if earliest_not_until.is_none() {
earliest_not_until = Some(not_until);
} else {
let earliest_possible =
earliest_not_until.as_ref().unwrap().earliest_possible();
let new_earliest_possible = not_until.earliest_possible();
if earliest_possible > new_earliest_possible {
earliest_not_until = Some(not_until);
}
}
continue;
}
};
// increment our connection counter
2022-04-24 10:26:00 +03:00
let mut connections = self.connections.get_mut(selected_rpc).unwrap();
*connections += 1;
2022-03-05 06:46:57 +03:00
2022-04-24 21:56:46 +03:00
// return the selected RPC
return Ok(selected_rpc.clone());
2022-04-24 10:26:00 +03:00
}
2022-03-05 06:46:57 +03:00
2022-04-24 21:56:46 +03:00
// return the smallest not_until
if let Some(not_until) = earliest_not_until {
return Err(not_until);
} else {
unimplemented!();
}
2022-04-24 10:26:00 +03:00
}
}
2022-03-05 06:46:57 +03:00
2022-04-24 10:26:00 +03:00
/// Send to all the Rpcs
2022-04-25 01:36:51 +03:00
/// Unlike BalancedRpcs, there is no tracking of connections
/// We do still track rate limits
2022-04-24 10:26:00 +03:00
struct LoudRpcs {
rpcs: Vec<String>,
// TODO: what type? store with connections?
2022-04-25 01:36:51 +03:00
ratelimits: RateLimiterMap,
2022-04-24 10:26:00 +03:00
}
2022-03-05 06:46:57 +03:00
2022-04-25 04:12:07 +03:00
impl LoudRpcs {
fn new(servers: Vec<(&str, u32)>, clock: &QuantaClock) -> LoudRpcs {
2022-04-24 10:26:00 +03:00
let mut rpcs: Vec<String> = vec![];
2022-04-25 01:36:51 +03:00
let ratelimits = RateLimiterMap::new();
2022-04-24 10:26:00 +03:00
2022-04-25 04:12:07 +03:00
for (s, limit) in servers.into_iter() {
2022-04-24 10:26:00 +03:00
rpcs.push(s.to_string());
2022-04-25 01:36:51 +03:00
if limit > 0 {
let quota = governor::Quota::per_second(NonZeroU32::new(limit).unwrap());
2022-04-25 04:12:07 +03:00
let rate_limiter = governor::RateLimiter::direct_with_clock(quota, clock);
2022-04-25 01:36:51 +03:00
ratelimits.insert(s.to_string(), rate_limiter);
}
2022-04-24 10:26:00 +03:00
}
2022-04-25 01:36:51 +03:00
LoudRpcs { rpcs, ratelimits }
2022-04-24 10:26:00 +03:00
}
2022-03-05 06:46:57 +03:00
2022-04-25 04:12:07 +03:00
async fn get_upstream_servers(&self) -> Result<Vec<String>, NotUntil<QuantaInstant>> {
let mut earliest_not_until = None;
let mut selected_rpcs = vec![];
for selected_rpc in self.rpcs.iter() {
// check rate limits
match self.ratelimits.get(selected_rpc).unwrap().check() {
Ok(_) => {
// rate limit succeeded
}
Err(not_until) => {
// rate limit failed
// save the smallest not_until. if nothing succeeds, return an Err with not_until in it
if earliest_not_until.is_none() {
earliest_not_until = Some(not_until);
} else {
let earliest_possible =
earliest_not_until.as_ref().unwrap().earliest_possible();
let new_earliest_possible = not_until.earliest_possible();
if earliest_possible > new_earliest_possible {
earliest_not_until = Some(not_until);
}
}
continue;
}
};
// return the selected RPC
selected_rpcs.push(selected_rpc.clone());
}
if selected_rpcs.len() > 0 {
return Ok(selected_rpcs);
}
// return the earliest not_until
if let Some(not_until) = earliest_not_until {
return Err(not_until);
} else {
panic!("i don't think this should happen")
}
2022-04-24 10:26:00 +03:00
}
2022-03-05 06:46:57 +03:00
2022-04-24 10:26:00 +03:00
fn as_bool(&self) -> bool {
self.rpcs.len() > 0
}
}
2022-03-05 06:46:57 +03:00
2022-04-24 10:26:00 +03:00
struct Web3ProxyState {
2022-04-25 04:12:07 +03:00
clock: QuantaClock,
2022-04-24 10:26:00 +03:00
client: reqwest::Client,
2022-04-25 04:12:07 +03:00
// TODO: LoudRpcs and BalancedRpcs should probably share a trait or something
2022-04-24 10:26:00 +03:00
balanced_rpc_tiers: Vec<BalancedRpcs>,
private_rpcs: LoudRpcs,
2022-04-25 04:26:23 +03:00
/// lock this when all rate limiters are hit
balanced_rpc_ratelimiter_lock: RwLock<()>,
private_rpcs_ratelimiter_lock: RwLock<()>,
2022-04-24 10:26:00 +03:00
}
2022-03-05 06:46:57 +03:00
2022-04-24 10:26:00 +03:00
impl Web3ProxyState {
2022-04-25 01:36:51 +03:00
fn new(
balanced_rpc_tiers: Vec<Vec<(&str, u32)>>,
private_rpcs: Vec<(&str, u32)>,
) -> Web3ProxyState {
2022-04-25 04:12:07 +03:00
let clock = QuantaClock::default();
2022-04-24 10:26:00 +03:00
// TODO: warn if no private relays
Web3ProxyState {
2022-04-25 04:12:07 +03:00
clock: clock.clone(),
2022-04-24 10:26:00 +03:00
client: reqwest::Client::new(),
2022-04-25 04:12:07 +03:00
balanced_rpc_tiers: balanced_rpc_tiers
.into_iter()
.map(|servers| BalancedRpcs::new(servers, &clock))
.collect(),
private_rpcs: LoudRpcs::new(private_rpcs, &clock),
2022-04-25 04:26:23 +03:00
balanced_rpc_ratelimiter_lock: Default::default(),
private_rpcs_ratelimiter_lock: Default::default(),
2022-04-24 10:26:00 +03:00
}
}
2022-03-05 06:46:57 +03:00
2022-04-24 10:26:00 +03:00
/// send the request to the approriate RPCs
async fn proxy_web3_rpc(
self: Arc<Web3ProxyState>,
json_body: serde_json::Value,
) -> anyhow::Result<impl warp::Reply> {
let eth_send_raw_transaction =
serde_json::Value::String("eth_sendRawTransaction".to_string());
if self.private_rpcs.as_bool() && json_body.get("method") == Some(&eth_send_raw_transaction)
{
2022-04-25 04:14:34 +03:00
// there are private rpcs configured and the request is eth_sendSignedTransaction. send to all private rpcs
2022-04-25 04:12:07 +03:00
loop {
2022-04-25 04:26:23 +03:00
let read_lock = self.private_rpcs_ratelimiter_lock.read().await;
2022-04-25 04:12:07 +03:00
match self.private_rpcs.get_upstream_servers().await {
Ok(upstream_servers) => {
if let Ok(result) = self
.try_send_requests(upstream_servers, None, &json_body)
.await
{
return Ok(result);
}
}
Err(not_until) => {
2022-04-25 04:26:23 +03:00
drop(read_lock);
let write_lock = self.balanced_rpc_ratelimiter_lock.write().await;
2022-04-25 04:12:07 +03:00
let deadline = not_until.wait_time_from(self.clock.now());
sleep(deadline).await;
2022-04-25 04:26:23 +03:00
drop(write_lock);
2022-04-25 04:12:07 +03:00
}
};
2022-04-24 10:26:00 +03:00
}
} else {
// this is not a private transaction (or no private relays are configured)
2022-04-25 04:12:07 +03:00
loop {
2022-04-25 04:26:23 +03:00
let read_lock = self.balanced_rpc_ratelimiter_lock.read().await;
// there are multiple tiers. save the earliest not_until (if any). if we don't return, we will sleep until then and then try again
2022-04-25 04:12:07 +03:00
let mut earliest_not_until = None;
for balanced_rpcs in self.balanced_rpc_tiers.iter() {
match balanced_rpcs.get_upstream_server().await {
Ok(upstream_server) => {
// TODO: capture any errors. at least log them
if let Ok(result) = self
.try_send_requests(
vec![upstream_server],
Some(&balanced_rpcs.connections),
&json_body,
)
.await
{
return Ok(result);
}
}
Err(not_until) => {
// save the smallest not_until. if nothing succeeds, return an Err with not_until in it
if earliest_not_until.is_none() {
earliest_not_until = Some(not_until);
} else {
// TODO: do we need to unwrap this far? can we just compare the not_untils
let earliest_possible =
earliest_not_until.as_ref().unwrap().earliest_possible();
let new_earliest_possible = not_until.earliest_possible();
if earliest_possible > new_earliest_possible {
earliest_not_until = Some(not_until);
}
}
}
2022-04-24 10:26:00 +03:00
}
}
2022-04-25 04:12:07 +03:00
2022-04-25 04:26:23 +03:00
drop(read_lock);
let write_lock = self.balanced_rpc_ratelimiter_lock.write().await;
2022-04-25 04:14:34 +03:00
// TODO: some sort of lock here?
2022-04-25 04:12:07 +03:00
// we haven't returned an Ok, sleep and try again
// unwrap should be safe since we would have returned if it wasn't set
let deadline = earliest_not_until.unwrap().wait_time_from(self.clock.now());
sleep(deadline).await;
2022-04-25 04:26:23 +03:00
drop(write_lock);
2022-04-24 10:26:00 +03:00
}
}
}
2022-03-05 08:01:45 +03:00
2022-04-24 10:26:00 +03:00
async fn try_send_requests(
&self,
upstream_servers: Vec<String>,
2022-04-24 22:55:13 +03:00
connections: Option<&ConnectionsMap>,
2022-04-24 10:26:00 +03:00
json_body: &serde_json::Value,
) -> anyhow::Result<String> {
// send the query to all the servers
2022-04-25 00:54:29 +03:00
let bodies = future::join_all(upstream_servers.into_iter().map(|url| {
let client = self.client.clone();
let json_body = json_body.clone();
tokio::spawn(async move {
2022-04-25 01:12:31 +03:00
// TODO: there has to be a better way to do this map and map_err
client
2022-04-25 00:54:29 +03:00
.post(&url)
.json(&json_body)
.send()
.await
2022-04-25 01:12:31 +03:00
// add the url to the error so that we can decrement
.map_err(|e| (url.clone(), e))?
.text()
2022-04-25 00:54:29 +03:00
.await
2022-04-25 01:12:31 +03:00
// add the url to the result and the error so that we can decrement
2022-04-25 00:54:29 +03:00
.map(|t| (url.clone(), t))
.map_err(|e| (url, e))
2022-04-24 21:56:46 +03:00
})
2022-04-25 00:54:29 +03:00
}))
.await;
2022-04-24 21:56:46 +03:00
2022-04-25 00:54:29 +03:00
// we are going to collect successes and failures
2022-04-24 21:56:46 +03:00
let mut oks = vec![];
let mut errs = vec![];
2022-04-25 00:54:29 +03:00
// TODO: parallel?
for b in bodies {
2022-04-24 21:56:46 +03:00
match b {
2022-04-24 22:55:13 +03:00
Ok(Ok((url, b))) => {
2022-04-25 00:54:29 +03:00
// reduce connection counter
2022-04-24 22:55:13 +03:00
if let Some(connections) = connections {
*connections.get_mut(&url).unwrap() -= 1;
}
2022-04-24 21:56:46 +03:00
// TODO: if "no block with that header", skip this response (maybe retry)
oks.push(b);
}
2022-04-24 22:55:13 +03:00
Ok(Err((url, e))) => {
2022-04-25 00:54:29 +03:00
// reduce connection counter
2022-04-24 22:55:13 +03:00
if let Some(connections) = connections {
*connections.get_mut(&url).unwrap() -= 1;
}
2022-04-24 21:56:46 +03:00
// TODO: better errors
eprintln!("Got a reqwest::Error: {}", e);
errs.push(anyhow::anyhow!("Got a reqwest::Error"));
}
Err(e) => {
// TODO: better errors
eprintln!("Got a tokio::JoinError: {}", e);
errs.push(anyhow::anyhow!("Got a tokio::JoinError"));
}
2022-04-24 10:26:00 +03:00
}
}
2022-04-24 21:56:46 +03:00
// TODO: which response should we use?
if oks.len() > 0 {
return Ok(oks.pop().unwrap());
} else if errs.len() > 0 {
return Err(errs.pop().unwrap());
} else {
return Err(anyhow::anyhow!("no successful responses"));
2022-04-24 10:26:00 +03:00
}
}
2022-03-05 06:46:57 +03:00
}
2022-04-24 10:26:00 +03:00
#[tokio::main]
async fn main() {
// TODO: load the config from yaml instead of hard coding
// TODO: support multiple chains in one process. then we could just point "chain.stytt.com" at this and caddy wouldn't need anything else
// TODO: i kind of want to make use of caddy's load balancing and health checking and such though
let listen_port = 8445;
2022-04-25 04:12:07 +03:00
2022-04-24 10:26:00 +03:00
// TODO: be smart about about using archive nodes?
let state = Web3ProxyState::new(
vec![
// local nodes
2022-04-24 21:56:46 +03:00
vec![("https://10.11.12.16:8545", 0)],
2022-04-24 10:26:00 +03:00
// paid nodes
2022-04-24 21:56:46 +03:00
// TODO: add paid nodes (with rate limits)
2022-04-24 10:26:00 +03:00
// free nodes
2022-04-24 21:56:46 +03:00
// TODO: add rate limits
vec![
("https://main-rpc.linkpool.io", 0),
("https://rpc.ankr.com/eth", 0),
],
],
vec![
2022-04-25 01:36:51 +03:00
("https://api.edennetwork.io/v1/beta", 0),
("https://api.edennetwork.io/v1/", 0),
2022-04-24 10:26:00 +03:00
],
);
let state: Arc<Web3ProxyState> = Arc::new(state);
let proxy_rpc_filter = warp::any()
.and(warp::post())
.and(warp::body::json())
.then(move |json_body| state.clone().proxy_web3_rpc(json_body))
.map(handle_anyhow_errors);
println!("Listening on 0.0.0.0:{}", listen_port);
warp::serve(proxy_rpc_filter)
.run(([0, 0, 0, 0], listen_port))
.await;
2022-03-05 06:46:57 +03:00
}
2022-03-25 00:08:40 +03:00
/// convert result into an http response. use this at the end of your warp filter
2022-03-05 06:46:57 +03:00
pub fn handle_anyhow_errors<T: warp::Reply>(res: anyhow::Result<T>) -> Box<dyn warp::Reply> {
match res {
Ok(r) => Box::new(r.into_response()),
// TODO: json error?
Err(e) => Box::new(warp::reply::with_status(
format!("{}", e),
reqwest::StatusCode::INTERNAL_SERVER_ERROR,
)),
}
}