2022-04-24 10:26:00 +03:00
use dashmap ::DashMap ;
2022-04-25 00:54:29 +03:00
use futures ::future ;
2022-04-25 22:14:10 +03:00
use futures ::future ::{ AbortHandle , Abortable } ;
use futures ::SinkExt ;
use futures ::StreamExt ;
2022-04-25 04:12:07 +03:00
use governor ::clock ::{ Clock , QuantaClock , QuantaInstant } ;
2022-04-24 21:56:46 +03:00
use governor ::middleware ::NoOpMiddleware ;
use governor ::state ::{ InMemoryState , NotKeyed } ;
use governor ::{ NotUntil , RateLimiter } ;
2022-04-25 22:14:10 +03:00
use regex ::Regex ;
2022-04-24 21:56:46 +03:00
use std ::num ::NonZeroU32 ;
2022-03-05 06:46:57 +03:00
use std ::sync ::Arc ;
2022-04-25 22:14:10 +03:00
use std ::time ::Duration ;
2022-04-24 10:26:00 +03:00
use tokio ::sync ::RwLock ;
2022-04-25 04:12:07 +03:00
use tokio ::time ::sleep ;
2022-04-25 22:14:10 +03:00
use tokio_tungstenite ::{ connect_async , tungstenite } ;
2022-03-05 06:46:57 +03:00
use warp ::Filter ;
2022-04-25 01:36:51 +03:00
type RateLimiterMap = DashMap < String , RpcRateLimiter > ;
2022-04-24 22:55:13 +03:00
type ConnectionsMap = DashMap < String , u32 > ;
2022-04-25 04:12:07 +03:00
type RpcRateLimiter =
RateLimiter < NotKeyed , InMemoryState , QuantaClock , NoOpMiddleware < QuantaInstant > > ;
2022-04-24 10:26:00 +03:00
/// Load balance to the least-connection rpc
struct BalancedRpcs {
rpcs : RwLock < Vec < String > > ,
2022-04-24 22:55:13 +03:00
connections : ConnectionsMap ,
2022-04-25 01:36:51 +03:00
ratelimits : RateLimiterMap ,
2022-04-25 22:14:10 +03:00
new_heads_handles : Vec < AbortHandle > ,
}
impl Drop for BalancedRpcs {
fn drop ( & mut self ) {
for handle in self . new_heads_handles . iter ( ) {
handle . abort ( ) ;
}
}
}
async fn handle_new_head_message ( message : tungstenite ::Message ) -> anyhow ::Result < ( ) > {
// TODO: move this to a "handle_new_head_message" function so that we can use the ? helper
let data : serde_json ::Value = serde_json ::from_str ( message . to_text ( ) . unwrap ( ) ) . unwrap ( ) ;
// TODO: parse the message as json and get out the block data. then update a map for this rpc
println! ( " now what? {:?} " , data ) ;
unimplemented! ( ) ;
2022-04-24 10:26:00 +03:00
}
2022-03-05 07:51:38 +03:00
2022-04-25 04:12:07 +03:00
impl BalancedRpcs {
fn new ( servers : Vec < ( & str , u32 ) > , clock : & QuantaClock ) -> BalancedRpcs {
2022-04-24 10:26:00 +03:00
let mut rpcs : Vec < String > = vec! [ ] ;
let connections = DashMap ::new ( ) ;
2022-04-24 21:56:46 +03:00
let ratelimits = DashMap ::new ( ) ;
2022-04-24 10:26:00 +03:00
2022-04-25 04:12:07 +03:00
for ( s , limit ) in servers . into_iter ( ) {
2022-04-24 10:26:00 +03:00
rpcs . push ( s . to_string ( ) ) ;
connections . insert ( s . to_string ( ) , 0 ) ;
2022-04-24 21:56:46 +03:00
if limit > 0 {
let quota = governor ::Quota ::per_second ( NonZeroU32 ::new ( limit ) . unwrap ( ) ) ;
2022-04-25 04:12:07 +03:00
let rate_limiter = governor ::RateLimiter ::direct_with_clock ( quota , clock ) ;
2022-04-24 21:56:46 +03:00
ratelimits . insert ( s . to_string ( ) , rate_limiter ) ;
}
2022-04-24 10:26:00 +03:00
}
2022-04-25 22:14:10 +03:00
// TODO: subscribe to new_heads
let new_heads_handles = rpcs
. clone ( )
. into_iter ( )
. map ( | rpc | {
// start the subscription inside an abort handler. this way, dropping this BalancedRpcs will close these connections
let ( abort_handle , abort_registration ) = AbortHandle ::new_pair ( ) ;
tokio ::spawn ( Abortable ::new (
async move {
// replace "http" at the start with "ws"
// TODO: this is fragile. some nodes use different ports, too. use proper config
// TODO: maybe we should use this websocket for more than just the new heads subscription. we could send all our requests over it (but would need to modify ids)
let re = Regex ::new ( " ^http " ) . expect ( " bad regex " ) ;
let ws_rpc = re . replace ( & rpc , " ws " ) ;
// TODO: if websocket not supported, use polling?
let ws_rpc = url ::Url ::parse ( & ws_rpc ) . expect ( " invalid websocket url " ) ;
// loop so that if it disconnects, we reconnect
loop {
match connect_async ( & ws_rpc ) . await {
Ok ( ( ws_stream , _ ) ) = > {
let ( mut write , mut read ) = ws_stream . split ( ) ;
// TODO: send eth_subscribe New Heads
if ( write . send ( tungstenite ::Message ::Text ( " { \" id \" : 1, \" method \" : \" eth_subscribe \" , \" params \" : [ \" newHeads \" ]} " . to_string ( ) ) ) . await ) . is_ok ( ) {
if let Some ( Ok ( _first ) ) = read . next ( ) . await {
// TODO: what should we do with the first message?
while let Some ( Ok ( message ) ) = read . next ( ) . await {
if let Err ( e ) = handle_new_head_message ( message ) . await {
eprintln! ( " error handling new head message @ {} : {} " , ws_rpc , e ) ;
break ;
}
}
}
// no more messages or we got an error
}
}
Err ( e ) = > {
// TODO: proper logging
eprintln! ( " error connecting to websocket @ {} : {} " , ws_rpc , e ) ;
}
}
// TODO: log that we are going to reconnectto ws_rpc in 1 second
// TODO: how long should we wait? exponential backoff?
sleep ( Duration ::from_secs ( 1 ) ) . await ;
}
} ,
abort_registration ,
) ) ;
abort_handle
} )
. collect ( ) ;
2022-04-24 10:26:00 +03:00
BalancedRpcs {
rpcs : RwLock ::new ( rpcs ) ,
connections ,
2022-04-24 21:56:46 +03:00
ratelimits ,
2022-04-25 22:14:10 +03:00
new_heads_handles ,
2022-04-24 10:26:00 +03:00
}
}
2022-03-05 07:51:38 +03:00
2022-04-25 22:14:10 +03:00
/// get the best available rpc server
2022-04-24 21:56:46 +03:00
async fn get_upstream_server ( & self ) -> Result < String , NotUntil < QuantaInstant > > {
2022-04-24 10:26:00 +03:00
let mut balanced_rpcs = self . rpcs . write ( ) . await ;
2022-03-05 07:51:38 +03:00
2022-04-24 10:26:00 +03:00
balanced_rpcs . sort_unstable_by ( | a , b | {
self . connections
. get ( a )
. unwrap ( )
. cmp ( & self . connections . get ( b ) . unwrap ( ) )
} ) ;
2022-03-05 06:46:57 +03:00
2022-04-24 21:56:46 +03:00
let mut earliest_not_until = None ;
for selected_rpc in balanced_rpcs . iter ( ) {
// check rate limits
match self . ratelimits . get ( selected_rpc ) . unwrap ( ) . check ( ) {
Ok ( _ ) = > {
// rate limit succeeded
}
Err ( not_until ) = > {
// rate limit failed
// save the smallest not_until. if nothing succeeds, return an Err with not_until in it
if earliest_not_until . is_none ( ) {
earliest_not_until = Some ( not_until ) ;
} else {
let earliest_possible =
earliest_not_until . as_ref ( ) . unwrap ( ) . earliest_possible ( ) ;
let new_earliest_possible = not_until . earliest_possible ( ) ;
if earliest_possible > new_earliest_possible {
earliest_not_until = Some ( not_until ) ;
}
}
continue ;
}
} ;
// increment our connection counter
2022-04-24 10:26:00 +03:00
let mut connections = self . connections . get_mut ( selected_rpc ) . unwrap ( ) ;
* connections + = 1 ;
2022-03-05 06:46:57 +03:00
2022-04-24 21:56:46 +03:00
// return the selected RPC
return Ok ( selected_rpc . clone ( ) ) ;
2022-04-24 10:26:00 +03:00
}
2022-03-05 06:46:57 +03:00
2022-04-24 21:56:46 +03:00
// return the smallest not_until
if let Some ( not_until ) = earliest_not_until {
2022-04-25 22:14:10 +03:00
Err ( not_until )
2022-04-24 21:56:46 +03:00
} else {
unimplemented! ( ) ;
}
2022-04-24 10:26:00 +03:00
}
}
2022-03-05 06:46:57 +03:00
2022-04-24 10:26:00 +03:00
/// Send to all the Rpcs
2022-04-25 01:36:51 +03:00
/// Unlike BalancedRpcs, there is no tracking of connections
/// We do still track rate limits
2022-04-24 10:26:00 +03:00
struct LoudRpcs {
rpcs : Vec < String > ,
// TODO: what type? store with connections?
2022-04-25 01:36:51 +03:00
ratelimits : RateLimiterMap ,
2022-04-24 10:26:00 +03:00
}
2022-03-05 06:46:57 +03:00
2022-04-25 04:12:07 +03:00
impl LoudRpcs {
fn new ( servers : Vec < ( & str , u32 ) > , clock : & QuantaClock ) -> LoudRpcs {
2022-04-24 10:26:00 +03:00
let mut rpcs : Vec < String > = vec! [ ] ;
2022-04-25 01:36:51 +03:00
let ratelimits = RateLimiterMap ::new ( ) ;
2022-04-24 10:26:00 +03:00
2022-04-25 04:12:07 +03:00
for ( s , limit ) in servers . into_iter ( ) {
2022-04-24 10:26:00 +03:00
rpcs . push ( s . to_string ( ) ) ;
2022-04-25 01:36:51 +03:00
if limit > 0 {
let quota = governor ::Quota ::per_second ( NonZeroU32 ::new ( limit ) . unwrap ( ) ) ;
2022-04-25 04:12:07 +03:00
let rate_limiter = governor ::RateLimiter ::direct_with_clock ( quota , clock ) ;
2022-04-25 01:36:51 +03:00
ratelimits . insert ( s . to_string ( ) , rate_limiter ) ;
}
2022-04-24 10:26:00 +03:00
}
2022-04-25 01:36:51 +03:00
LoudRpcs { rpcs , ratelimits }
2022-04-24 10:26:00 +03:00
}
2022-03-05 06:46:57 +03:00
2022-04-25 22:14:10 +03:00
/// get all available rpc servers
2022-04-25 04:12:07 +03:00
async fn get_upstream_servers ( & self ) -> Result < Vec < String > , NotUntil < QuantaInstant > > {
let mut earliest_not_until = None ;
let mut selected_rpcs = vec! [ ] ;
for selected_rpc in self . rpcs . iter ( ) {
// check rate limits
match self . ratelimits . get ( selected_rpc ) . unwrap ( ) . check ( ) {
Ok ( _ ) = > {
// rate limit succeeded
}
Err ( not_until ) = > {
// rate limit failed
// save the smallest not_until. if nothing succeeds, return an Err with not_until in it
if earliest_not_until . is_none ( ) {
earliest_not_until = Some ( not_until ) ;
} else {
let earliest_possible =
earliest_not_until . as_ref ( ) . unwrap ( ) . earliest_possible ( ) ;
let new_earliest_possible = not_until . earliest_possible ( ) ;
if earliest_possible > new_earliest_possible {
earliest_not_until = Some ( not_until ) ;
}
}
continue ;
}
} ;
2022-04-25 22:14:10 +03:00
// this is rpc should work
2022-04-25 04:12:07 +03:00
selected_rpcs . push ( selected_rpc . clone ( ) ) ;
}
2022-04-25 22:14:10 +03:00
if ! selected_rpcs . is_empty ( ) {
2022-04-25 04:12:07 +03:00
return Ok ( selected_rpcs ) ;
}
// return the earliest not_until
if let Some ( not_until ) = earliest_not_until {
2022-04-25 22:14:10 +03:00
Err ( not_until )
2022-04-25 04:12:07 +03:00
} else {
panic! ( " i don't think this should happen " )
}
2022-04-24 10:26:00 +03:00
}
2022-03-05 06:46:57 +03:00
2022-04-24 10:26:00 +03:00
fn as_bool ( & self ) -> bool {
2022-04-25 22:14:10 +03:00
! self . rpcs . is_empty ( )
2022-04-24 10:26:00 +03:00
}
}
2022-03-05 06:46:57 +03:00
2022-04-24 10:26:00 +03:00
struct Web3ProxyState {
2022-04-25 04:12:07 +03:00
clock : QuantaClock ,
2022-04-24 10:26:00 +03:00
client : reqwest ::Client ,
2022-04-25 04:12:07 +03:00
// TODO: LoudRpcs and BalancedRpcs should probably share a trait or something
2022-04-24 10:26:00 +03:00
balanced_rpc_tiers : Vec < BalancedRpcs > ,
private_rpcs : LoudRpcs ,
2022-04-25 04:26:23 +03:00
/// lock this when all rate limiters are hit
balanced_rpc_ratelimiter_lock : RwLock < ( ) > ,
private_rpcs_ratelimiter_lock : RwLock < ( ) > ,
2022-04-24 10:26:00 +03:00
}
2022-03-05 06:46:57 +03:00
2022-04-24 10:26:00 +03:00
impl Web3ProxyState {
2022-04-25 01:36:51 +03:00
fn new (
balanced_rpc_tiers : Vec < Vec < ( & str , u32 ) > > ,
private_rpcs : Vec < ( & str , u32 ) > ,
) -> Web3ProxyState {
2022-04-25 04:12:07 +03:00
let clock = QuantaClock ::default ( ) ;
2022-04-25 22:14:10 +03:00
let balanced_rpc_tiers = balanced_rpc_tiers
. into_iter ( )
. map ( | servers | BalancedRpcs ::new ( servers , & clock ) )
. collect ( ) ;
let private_rpcs = LoudRpcs ::new ( private_rpcs , & clock ) ;
2022-04-24 10:26:00 +03:00
// TODO: warn if no private relays
Web3ProxyState {
2022-04-25 22:14:10 +03:00
clock ,
2022-04-24 10:26:00 +03:00
client : reqwest ::Client ::new ( ) ,
2022-04-25 22:14:10 +03:00
balanced_rpc_tiers ,
private_rpcs ,
2022-04-25 04:26:23 +03:00
balanced_rpc_ratelimiter_lock : Default ::default ( ) ,
private_rpcs_ratelimiter_lock : Default ::default ( ) ,
2022-04-24 10:26:00 +03:00
}
}
2022-03-05 06:46:57 +03:00
2022-04-24 10:26:00 +03:00
/// send the request to the approriate RPCs
async fn proxy_web3_rpc (
self : Arc < Web3ProxyState > ,
json_body : serde_json ::Value ,
) -> anyhow ::Result < impl warp ::Reply > {
let eth_send_raw_transaction =
serde_json ::Value ::String ( " eth_sendRawTransaction " . to_string ( ) ) ;
if self . private_rpcs . as_bool ( ) & & json_body . get ( " method " ) = = Some ( & eth_send_raw_transaction )
{
2022-04-25 04:14:34 +03:00
// there are private rpcs configured and the request is eth_sendSignedTransaction. send to all private rpcs
2022-04-25 04:12:07 +03:00
loop {
2022-04-25 04:26:23 +03:00
let read_lock = self . private_rpcs_ratelimiter_lock . read ( ) . await ;
2022-04-25 04:12:07 +03:00
match self . private_rpcs . get_upstream_servers ( ) . await {
Ok ( upstream_servers ) = > {
if let Ok ( result ) = self
. try_send_requests ( upstream_servers , None , & json_body )
. await
{
return Ok ( result ) ;
}
}
Err ( not_until ) = > {
2022-04-25 22:14:10 +03:00
// TODO: move this to a helper function
2022-04-25 04:30:55 +03:00
// sleep (with a lock) until our rate limits should be available
2022-04-25 04:26:23 +03:00
drop ( read_lock ) ;
let write_lock = self . balanced_rpc_ratelimiter_lock . write ( ) . await ;
2022-04-25 04:12:07 +03:00
let deadline = not_until . wait_time_from ( self . clock . now ( ) ) ;
sleep ( deadline ) . await ;
2022-04-25 04:26:23 +03:00
drop ( write_lock ) ;
2022-04-25 04:12:07 +03:00
}
} ;
2022-04-24 10:26:00 +03:00
}
} else {
// this is not a private transaction (or no private relays are configured)
2022-04-25 04:12:07 +03:00
loop {
2022-04-25 04:26:23 +03:00
let read_lock = self . balanced_rpc_ratelimiter_lock . read ( ) . await ;
// there are multiple tiers. save the earliest not_until (if any). if we don't return, we will sleep until then and then try again
2022-04-25 04:12:07 +03:00
let mut earliest_not_until = None ;
for balanced_rpcs in self . balanced_rpc_tiers . iter ( ) {
match balanced_rpcs . get_upstream_server ( ) . await {
Ok ( upstream_server ) = > {
// TODO: capture any errors. at least log them
if let Ok ( result ) = self
. try_send_requests (
vec! [ upstream_server ] ,
Some ( & balanced_rpcs . connections ) ,
& json_body ,
)
. await
{
return Ok ( result ) ;
}
}
Err ( not_until ) = > {
// save the smallest not_until. if nothing succeeds, return an Err with not_until in it
if earliest_not_until . is_none ( ) {
earliest_not_until = Some ( not_until ) ;
} else {
// TODO: do we need to unwrap this far? can we just compare the not_untils
let earliest_possible =
earliest_not_until . as_ref ( ) . unwrap ( ) . earliest_possible ( ) ;
let new_earliest_possible = not_until . earliest_possible ( ) ;
if earliest_possible > new_earliest_possible {
earliest_not_until = Some ( not_until ) ;
}
}
}
2022-04-24 10:26:00 +03:00
}
}
2022-04-25 04:12:07 +03:00
2022-04-25 04:30:55 +03:00
// we haven't returned an Ok, sleep and try again
2022-04-25 22:14:10 +03:00
// TODO: move this to a helper function
2022-04-25 04:26:23 +03:00
drop ( read_lock ) ;
let write_lock = self . balanced_rpc_ratelimiter_lock . write ( ) . await ;
2022-04-25 04:12:07 +03:00
// unwrap should be safe since we would have returned if it wasn't set
let deadline = earliest_not_until . unwrap ( ) . wait_time_from ( self . clock . now ( ) ) ;
2022-04-25 04:30:55 +03:00
2022-04-25 04:12:07 +03:00
sleep ( deadline ) . await ;
2022-04-25 04:26:23 +03:00
drop ( write_lock ) ;
2022-04-24 10:26:00 +03:00
}
}
}
2022-03-05 08:01:45 +03:00
2022-04-24 10:26:00 +03:00
async fn try_send_requests (
& self ,
upstream_servers : Vec < String > ,
2022-04-24 22:55:13 +03:00
connections : Option < & ConnectionsMap > ,
2022-04-24 10:26:00 +03:00
json_body : & serde_json ::Value ,
) -> anyhow ::Result < String > {
// send the query to all the servers
2022-04-25 00:54:29 +03:00
let bodies = future ::join_all ( upstream_servers . into_iter ( ) . map ( | url | {
let client = self . client . clone ( ) ;
let json_body = json_body . clone ( ) ;
tokio ::spawn ( async move {
2022-04-25 22:14:10 +03:00
// TODO: there has to be a better way to attach the url to the result
2022-04-25 01:12:31 +03:00
client
2022-04-25 00:54:29 +03:00
. post ( & url )
. json ( & json_body )
. send ( )
. await
2022-04-25 22:14:10 +03:00
// add the url to the error so that we can reduce connection counters
2022-04-25 01:12:31 +03:00
. map_err ( | e | ( url . clone ( ) , e ) ) ?
. text ( )
2022-04-25 00:54:29 +03:00
. await
2022-04-25 22:14:10 +03:00
// add the url to the result so that we can reduce connection counters
2022-04-25 00:54:29 +03:00
. map ( | t | ( url . clone ( ) , t ) )
2022-04-25 22:14:10 +03:00
// add the url to the error so that we can reduce connection counters
2022-04-25 00:54:29 +03:00
. map_err ( | e | ( url , e ) )
2022-04-24 21:56:46 +03:00
} )
2022-04-25 00:54:29 +03:00
} ) )
. await ;
2022-04-24 21:56:46 +03:00
2022-04-25 00:54:29 +03:00
// we are going to collect successes and failures
2022-04-24 21:56:46 +03:00
let mut oks = vec! [ ] ;
let mut errs = vec! [ ] ;
2022-04-25 00:54:29 +03:00
// TODO: parallel?
for b in bodies {
2022-04-24 21:56:46 +03:00
match b {
2022-04-24 22:55:13 +03:00
Ok ( Ok ( ( url , b ) ) ) = > {
2022-04-25 00:54:29 +03:00
// reduce connection counter
2022-04-24 22:55:13 +03:00
if let Some ( connections ) = connections {
* connections . get_mut ( & url ) . unwrap ( ) - = 1 ;
}
2022-04-25 22:14:10 +03:00
// TODO: if "no block with that header" or some other jsonrpc errors, skip this response
2022-04-24 21:56:46 +03:00
oks . push ( b ) ;
}
2022-04-24 22:55:13 +03:00
Ok ( Err ( ( url , e ) ) ) = > {
2022-04-25 00:54:29 +03:00
// reduce connection counter
2022-04-24 22:55:13 +03:00
if let Some ( connections ) = connections {
* connections . get_mut ( & url ) . unwrap ( ) - = 1 ;
}
2022-04-24 21:56:46 +03:00
// TODO: better errors
eprintln! ( " Got a reqwest::Error: {} " , e ) ;
errs . push ( anyhow ::anyhow! ( " Got a reqwest::Error " ) ) ;
}
Err ( e ) = > {
// TODO: better errors
eprintln! ( " Got a tokio::JoinError: {} " , e ) ;
errs . push ( anyhow ::anyhow! ( " Got a tokio::JoinError " ) ) ;
}
2022-04-24 10:26:00 +03:00
}
}
2022-04-24 21:56:46 +03:00
// TODO: which response should we use?
2022-04-25 22:14:10 +03:00
if ! oks . is_empty ( ) {
Ok ( oks . pop ( ) . unwrap ( ) )
} else if ! errs . is_empty ( ) {
Err ( errs . pop ( ) . unwrap ( ) )
2022-04-24 21:56:46 +03:00
} else {
return Err ( anyhow ::anyhow! ( " no successful responses " ) ) ;
2022-04-24 10:26:00 +03:00
}
}
2022-03-05 06:46:57 +03:00
}
2022-04-24 10:26:00 +03:00
#[ tokio::main ]
async fn main ( ) {
// TODO: load the config from yaml instead of hard coding
// TODO: support multiple chains in one process. then we could just point "chain.stytt.com" at this and caddy wouldn't need anything else
// TODO: i kind of want to make use of caddy's load balancing and health checking and such though
let listen_port = 8445 ;
2022-04-25 04:12:07 +03:00
2022-04-24 10:26:00 +03:00
// TODO: be smart about about using archive nodes?
let state = Web3ProxyState ::new (
vec! [
// local nodes
2022-04-24 21:56:46 +03:00
vec! [ ( " https://10.11.12.16:8545 " , 0 ) ] ,
2022-04-24 10:26:00 +03:00
// paid nodes
2022-04-24 21:56:46 +03:00
// TODO: add paid nodes (with rate limits)
2022-04-24 10:26:00 +03:00
// free nodes
2022-04-24 21:56:46 +03:00
// TODO: add rate limits
vec! [
( " https://main-rpc.linkpool.io " , 0 ) ,
( " https://rpc.ankr.com/eth " , 0 ) ,
] ,
] ,
vec! [
2022-04-25 01:36:51 +03:00
( " https://api.edennetwork.io/v1/beta " , 0 ) ,
( " https://api.edennetwork.io/v1/ " , 0 ) ,
2022-04-24 10:26:00 +03:00
] ,
) ;
let state : Arc < Web3ProxyState > = Arc ::new ( state ) ;
let proxy_rpc_filter = warp ::any ( )
. and ( warp ::post ( ) )
. and ( warp ::body ::json ( ) )
. then ( move | json_body | state . clone ( ) . proxy_web3_rpc ( json_body ) )
. map ( handle_anyhow_errors ) ;
println! ( " Listening on 0.0.0.0: {} " , listen_port ) ;
warp ::serve ( proxy_rpc_filter )
. run ( ( [ 0 , 0 , 0 , 0 ] , listen_port ) )
. await ;
2022-03-05 06:46:57 +03:00
}
2022-03-25 00:08:40 +03:00
/// convert result into an http response. use this at the end of your warp filter
2022-03-05 06:46:57 +03:00
pub fn handle_anyhow_errors < T : warp ::Reply > ( res : anyhow ::Result < T > ) -> Box < dyn warp ::Reply > {
match res {
Ok ( r ) = > Box ::new ( r . into_response ( ) ) ,
// TODO: json error?
Err ( e ) = > Box ::new ( warp ::reply ::with_status (
format! ( " {} " , e ) ,
reqwest ::StatusCode ::INTERNAL_SERVER_ERROR ,
) ) ,
}
}