2022-04-24 10:26:00 +03:00
|
|
|
use dashmap::DashMap;
|
2022-04-24 21:56:46 +03:00
|
|
|
use futures::stream;
|
2022-04-24 10:26:00 +03:00
|
|
|
use futures::StreamExt;
|
2022-04-24 21:56:46 +03:00
|
|
|
use governor::clock::{QuantaClock, QuantaInstant};
|
|
|
|
use governor::middleware::NoOpMiddleware;
|
|
|
|
use governor::state::{InMemoryState, NotKeyed};
|
|
|
|
use governor::{NotUntil, RateLimiter};
|
|
|
|
use std::num::NonZeroU32;
|
2022-03-05 06:46:57 +03:00
|
|
|
use std::sync::Arc;
|
2022-04-24 10:26:00 +03:00
|
|
|
use tokio::sync::RwLock;
|
2022-04-24 21:56:46 +03:00
|
|
|
// use tokio::time::{sleep, Duration};
|
2022-03-05 06:46:57 +03:00
|
|
|
use warp::Filter;
|
|
|
|
|
2022-04-24 21:56:46 +03:00
|
|
|
// TODO: what should this be?
|
|
|
|
const PARALLEL_REQUESTS: usize = 4;
|
|
|
|
|
|
|
|
type RpcRateLimiter =
|
|
|
|
RateLimiter<NotKeyed, InMemoryState, QuantaClock, NoOpMiddleware<QuantaInstant>>;
|
|
|
|
|
2022-04-24 10:26:00 +03:00
|
|
|
/// Load balance to the least-connection rpc
|
|
|
|
struct BalancedRpcs {
|
|
|
|
rpcs: RwLock<Vec<String>>,
|
|
|
|
connections: DashMap<String, u32>,
|
|
|
|
// TODO: what type? store with connections?
|
2022-04-24 21:56:46 +03:00
|
|
|
// ratelimits: RateLimiter<K, DashMapStateStore<K>, dyn governor::clock::Clock>,
|
|
|
|
ratelimits: DashMap<String, RpcRateLimiter>,
|
2022-04-24 10:26:00 +03:00
|
|
|
}
|
2022-03-05 07:51:38 +03:00
|
|
|
|
2022-04-24 10:26:00 +03:00
|
|
|
// TODO: also pass rate limits to this?
|
2022-04-24 21:56:46 +03:00
|
|
|
impl Into<BalancedRpcs> for Vec<(&str, u32)> {
|
2022-04-24 10:26:00 +03:00
|
|
|
fn into(self) -> BalancedRpcs {
|
|
|
|
let mut rpcs: Vec<String> = vec![];
|
|
|
|
let connections = DashMap::new();
|
2022-04-24 21:56:46 +03:00
|
|
|
let ratelimits = DashMap::new();
|
2022-04-24 10:26:00 +03:00
|
|
|
|
2022-04-24 21:56:46 +03:00
|
|
|
// TODO: where should we get the rate limits from?
|
|
|
|
// TODO: this is not going to work. we need different rate limits for different endpoints
|
|
|
|
|
|
|
|
for (s, limit) in self.into_iter() {
|
2022-04-24 10:26:00 +03:00
|
|
|
rpcs.push(s.to_string());
|
|
|
|
connections.insert(s.to_string(), 0);
|
2022-04-24 21:56:46 +03:00
|
|
|
|
|
|
|
if limit > 0 {
|
|
|
|
let quota = governor::Quota::per_second(NonZeroU32::new(limit).unwrap());
|
|
|
|
|
|
|
|
let rate_limiter = governor::RateLimiter::direct(quota);
|
|
|
|
|
|
|
|
ratelimits.insert(s.to_string(), rate_limiter);
|
|
|
|
}
|
2022-04-24 10:26:00 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
BalancedRpcs {
|
|
|
|
rpcs: RwLock::new(rpcs),
|
|
|
|
connections,
|
2022-04-24 21:56:46 +03:00
|
|
|
ratelimits,
|
2022-04-24 10:26:00 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2022-03-05 07:51:38 +03:00
|
|
|
|
2022-04-24 10:26:00 +03:00
|
|
|
impl BalancedRpcs {
|
2022-04-24 21:56:46 +03:00
|
|
|
async fn get_upstream_server(&self) -> Result<String, NotUntil<QuantaInstant>> {
|
2022-04-24 10:26:00 +03:00
|
|
|
let mut balanced_rpcs = self.rpcs.write().await;
|
2022-03-05 07:51:38 +03:00
|
|
|
|
2022-04-24 10:26:00 +03:00
|
|
|
balanced_rpcs.sort_unstable_by(|a, b| {
|
|
|
|
self.connections
|
|
|
|
.get(a)
|
|
|
|
.unwrap()
|
|
|
|
.cmp(&self.connections.get(b).unwrap())
|
|
|
|
});
|
2022-03-05 06:46:57 +03:00
|
|
|
|
2022-04-24 21:56:46 +03:00
|
|
|
let mut earliest_not_until = None;
|
|
|
|
|
|
|
|
for selected_rpc in balanced_rpcs.iter() {
|
|
|
|
// check rate limits
|
|
|
|
match self.ratelimits.get(selected_rpc).unwrap().check() {
|
|
|
|
Ok(_) => {
|
|
|
|
// rate limit succeeded
|
|
|
|
}
|
|
|
|
Err(not_until) => {
|
|
|
|
// rate limit failed
|
|
|
|
// save the smallest not_until. if nothing succeeds, return an Err with not_until in it
|
|
|
|
if earliest_not_until.is_none() {
|
|
|
|
earliest_not_until = Some(not_until);
|
|
|
|
} else {
|
|
|
|
let earliest_possible =
|
|
|
|
earliest_not_until.as_ref().unwrap().earliest_possible();
|
|
|
|
let new_earliest_possible = not_until.earliest_possible();
|
|
|
|
|
|
|
|
if earliest_possible > new_earliest_possible {
|
|
|
|
earliest_not_until = Some(not_until);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
// increment our connection counter
|
|
|
|
// TODO: need to change this to be an atomic counter!
|
2022-04-24 10:26:00 +03:00
|
|
|
let mut connections = self.connections.get_mut(selected_rpc).unwrap();
|
|
|
|
*connections += 1;
|
2022-03-05 06:46:57 +03:00
|
|
|
|
2022-04-24 21:56:46 +03:00
|
|
|
// return the selected RPC
|
|
|
|
return Ok(selected_rpc.clone());
|
2022-04-24 10:26:00 +03:00
|
|
|
}
|
2022-03-05 06:46:57 +03:00
|
|
|
|
2022-04-24 21:56:46 +03:00
|
|
|
// return the smallest not_until
|
|
|
|
if let Some(not_until) = earliest_not_until {
|
|
|
|
return Err(not_until);
|
|
|
|
} else {
|
|
|
|
unimplemented!();
|
|
|
|
}
|
2022-04-24 10:26:00 +03:00
|
|
|
}
|
|
|
|
}
|
2022-03-05 06:46:57 +03:00
|
|
|
|
2022-04-24 10:26:00 +03:00
|
|
|
/// Send to all the Rpcs
|
|
|
|
struct LoudRpcs {
|
|
|
|
rpcs: Vec<String>,
|
|
|
|
// TODO: what type? store with connections?
|
|
|
|
// ratelimits: DashMap<String, u32>,
|
|
|
|
}
|
2022-03-05 06:46:57 +03:00
|
|
|
|
2022-04-24 10:26:00 +03:00
|
|
|
impl Into<LoudRpcs> for Vec<&str> {
|
|
|
|
fn into(self) -> LoudRpcs {
|
|
|
|
let mut rpcs: Vec<String> = vec![];
|
|
|
|
// let ratelimits = DashMap::new();
|
|
|
|
|
|
|
|
for s in self.into_iter() {
|
|
|
|
rpcs.push(s.to_string());
|
|
|
|
// ratelimits.insert(s.to_string(), 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
LoudRpcs {
|
|
|
|
rpcs,
|
|
|
|
// ratelimits,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2022-03-05 06:46:57 +03:00
|
|
|
|
2022-04-24 10:26:00 +03:00
|
|
|
impl LoudRpcs {
|
|
|
|
async fn get_upstream_servers(&self) -> Vec<String> {
|
|
|
|
self.rpcs.clone()
|
|
|
|
}
|
2022-03-05 06:46:57 +03:00
|
|
|
|
2022-04-24 10:26:00 +03:00
|
|
|
fn as_bool(&self) -> bool {
|
|
|
|
self.rpcs.len() > 0
|
|
|
|
}
|
|
|
|
}
|
2022-03-05 06:46:57 +03:00
|
|
|
|
2022-04-24 10:26:00 +03:00
|
|
|
struct Web3ProxyState {
|
|
|
|
client: reqwest::Client,
|
|
|
|
balanced_rpc_tiers: Vec<BalancedRpcs>,
|
|
|
|
private_rpcs: LoudRpcs,
|
|
|
|
}
|
2022-03-05 06:46:57 +03:00
|
|
|
|
2022-04-24 10:26:00 +03:00
|
|
|
impl Web3ProxyState {
|
2022-04-24 21:56:46 +03:00
|
|
|
fn new(balanced_rpc_tiers: Vec<Vec<(&str, u32)>>, private_rpcs: Vec<&str>) -> Web3ProxyState {
|
2022-04-24 10:26:00 +03:00
|
|
|
// TODO: warn if no private relays
|
|
|
|
Web3ProxyState {
|
|
|
|
client: reqwest::Client::new(),
|
|
|
|
balanced_rpc_tiers: balanced_rpc_tiers.into_iter().map(Into::into).collect(),
|
|
|
|
private_rpcs: private_rpcs.into(),
|
|
|
|
}
|
|
|
|
}
|
2022-03-05 06:46:57 +03:00
|
|
|
|
2022-04-24 10:26:00 +03:00
|
|
|
/// send the request to the approriate RPCs
|
|
|
|
async fn proxy_web3_rpc(
|
|
|
|
self: Arc<Web3ProxyState>,
|
|
|
|
json_body: serde_json::Value,
|
|
|
|
) -> anyhow::Result<impl warp::Reply> {
|
|
|
|
let eth_send_raw_transaction =
|
|
|
|
serde_json::Value::String("eth_sendRawTransaction".to_string());
|
|
|
|
|
|
|
|
if self.private_rpcs.as_bool() && json_body.get("method") == Some(ð_send_raw_transaction)
|
|
|
|
{
|
|
|
|
// there are private rpcs configured and the request is eth_sendSignedTransaction. send to all private rpcs
|
|
|
|
let upstream_servers = self.private_rpcs.get_upstream_servers().await;
|
|
|
|
|
|
|
|
if let Ok(result) = self.try_send_requests(upstream_servers, &json_body).await {
|
|
|
|
return Ok(result);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// this is not a private transaction (or no private relays are configured)
|
|
|
|
for balanced_rpcs in self.balanced_rpc_tiers.iter() {
|
2022-04-24 21:56:46 +03:00
|
|
|
if let Ok(upstream_server) = balanced_rpcs.get_upstream_server().await {
|
2022-04-24 10:26:00 +03:00
|
|
|
// TODO: capture any errors. at least log them
|
|
|
|
if let Ok(result) = self
|
|
|
|
.try_send_requests(vec![upstream_server], &json_body)
|
|
|
|
.await
|
|
|
|
{
|
|
|
|
return Ok(result);
|
|
|
|
}
|
2022-04-24 21:56:46 +03:00
|
|
|
} else {
|
|
|
|
// TODO: if we got an error. save the ratelimit NotUntil so we can sleep until then before trying again
|
2022-04-24 10:26:00 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return Err(anyhow::anyhow!("all servers failed"));
|
|
|
|
}
|
2022-03-05 08:01:45 +03:00
|
|
|
|
2022-04-24 10:26:00 +03:00
|
|
|
async fn try_send_requests(
|
|
|
|
&self,
|
|
|
|
upstream_servers: Vec<String>,
|
|
|
|
json_body: &serde_json::Value,
|
|
|
|
) -> anyhow::Result<String> {
|
|
|
|
// send the query to all the servers
|
2022-04-24 21:56:46 +03:00
|
|
|
let mut bodies = stream::iter(upstream_servers)
|
|
|
|
.map(|url| {
|
|
|
|
let client = self.client.clone();
|
|
|
|
let json_body = json_body.clone();
|
|
|
|
tokio::spawn(async move {
|
|
|
|
let resp = client.post(url).json(&json_body).send().await?;
|
|
|
|
resp.text().await
|
|
|
|
})
|
|
|
|
})
|
|
|
|
.buffer_unordered(PARALLEL_REQUESTS);
|
|
|
|
|
|
|
|
let mut oks = vec![];
|
|
|
|
let mut errs = vec![];
|
|
|
|
|
|
|
|
while let Some(b) = bodies.next().await {
|
|
|
|
// TODO: reduce connection counter
|
|
|
|
match b {
|
|
|
|
Ok(Ok(b)) => {
|
|
|
|
// TODO: if "no block with that header", skip this response (maybe retry)
|
|
|
|
oks.push(b);
|
|
|
|
}
|
|
|
|
Ok(Err(e)) => {
|
|
|
|
// TODO: better errors
|
|
|
|
eprintln!("Got a reqwest::Error: {}", e);
|
|
|
|
errs.push(anyhow::anyhow!("Got a reqwest::Error"));
|
|
|
|
}
|
|
|
|
Err(e) => {
|
|
|
|
// TODO: better errors
|
|
|
|
eprintln!("Got a tokio::JoinError: {}", e);
|
|
|
|
errs.push(anyhow::anyhow!("Got a tokio::JoinError"));
|
|
|
|
}
|
2022-04-24 10:26:00 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-04-24 21:56:46 +03:00
|
|
|
// TODO: which response should we use?
|
|
|
|
if oks.len() > 0 {
|
|
|
|
return Ok(oks.pop().unwrap());
|
|
|
|
} else if errs.len() > 0 {
|
|
|
|
return Err(errs.pop().unwrap());
|
|
|
|
} else {
|
|
|
|
return Err(anyhow::anyhow!("no successful responses"));
|
2022-04-24 10:26:00 +03:00
|
|
|
}
|
|
|
|
}
|
2022-03-05 06:46:57 +03:00
|
|
|
}
|
|
|
|
|
2022-04-24 10:26:00 +03:00
|
|
|
#[tokio::main]
|
|
|
|
async fn main() {
|
|
|
|
// TODO: load the config from yaml instead of hard coding
|
|
|
|
// TODO: support multiple chains in one process. then we could just point "chain.stytt.com" at this and caddy wouldn't need anything else
|
|
|
|
// TODO: i kind of want to make use of caddy's load balancing and health checking and such though
|
|
|
|
let listen_port = 8445;
|
|
|
|
// TODO: be smart about about using archive nodes?
|
|
|
|
let state = Web3ProxyState::new(
|
|
|
|
vec![
|
|
|
|
// local nodes
|
2022-04-24 21:56:46 +03:00
|
|
|
vec![("https://10.11.12.16:8545", 0)],
|
2022-04-24 10:26:00 +03:00
|
|
|
// paid nodes
|
2022-04-24 21:56:46 +03:00
|
|
|
// TODO: add paid nodes (with rate limits)
|
2022-04-24 10:26:00 +03:00
|
|
|
// free nodes
|
2022-04-24 21:56:46 +03:00
|
|
|
// TODO: add rate limits
|
|
|
|
vec![
|
|
|
|
("https://main-rpc.linkpool.io", 0),
|
|
|
|
("https://rpc.ankr.com/eth", 0),
|
|
|
|
],
|
|
|
|
],
|
|
|
|
vec![
|
|
|
|
"https://api.edennetwork.io/v1/beta",
|
|
|
|
"https://api.edennetwork.io/v1/",
|
2022-04-24 10:26:00 +03:00
|
|
|
],
|
|
|
|
);
|
|
|
|
|
|
|
|
let state: Arc<Web3ProxyState> = Arc::new(state);
|
|
|
|
|
|
|
|
let proxy_rpc_filter = warp::any()
|
|
|
|
.and(warp::post())
|
|
|
|
.and(warp::body::json())
|
|
|
|
.then(move |json_body| state.clone().proxy_web3_rpc(json_body))
|
|
|
|
.map(handle_anyhow_errors);
|
|
|
|
|
|
|
|
println!("Listening on 0.0.0.0:{}", listen_port);
|
|
|
|
|
|
|
|
warp::serve(proxy_rpc_filter)
|
|
|
|
.run(([0, 0, 0, 0], listen_port))
|
|
|
|
.await;
|
2022-03-05 06:46:57 +03:00
|
|
|
}
|
|
|
|
|
2022-03-25 00:08:40 +03:00
|
|
|
/// convert result into an http response. use this at the end of your warp filter
|
2022-03-05 06:46:57 +03:00
|
|
|
pub fn handle_anyhow_errors<T: warp::Reply>(res: anyhow::Result<T>) -> Box<dyn warp::Reply> {
|
|
|
|
match res {
|
|
|
|
Ok(r) => Box::new(r.into_response()),
|
|
|
|
// TODO: json error?
|
|
|
|
Err(e) => Box::new(warp::reply::with_status(
|
|
|
|
format!("{}", e),
|
|
|
|
reqwest::StatusCode::INTERNAL_SERVER_ERROR,
|
|
|
|
)),
|
|
|
|
}
|
|
|
|
}
|