cleanup stale TODOs

This commit is contained in:
Bryan Stitt 2022-04-27 06:14:35 +00:00
parent 8d68900fd4
commit 2edf0cf4b3
4 changed files with 15 additions and 26 deletions

@ -17,15 +17,12 @@ pub type BlockWatcherReceiver = mpsc::UnboundedReceiver<NewHead>;
pub struct BlockWatcher { pub struct BlockWatcher {
sender: BlockWatcherSender, sender: BlockWatcherSender,
receiver: Mutex<BlockWatcherReceiver>, receiver: Mutex<BlockWatcherReceiver>,
// TODO: i don't think we want a RwLock. we want an ArcSwap or something
// TODO: should we just store the block number?
block_numbers: DashMap<String, u64>, block_numbers: DashMap<String, u64>,
head_block_number: AtomicU64, head_block_number: AtomicU64,
} }
impl BlockWatcher { impl BlockWatcher {
pub fn new() -> Self { pub fn new() -> Self {
// TODO: this also needs to return a reader for blocks
let (sender, receiver) = mpsc::unbounded_channel(); let (sender, receiver) = mpsc::unbounded_channel();
Self { Self {
@ -56,7 +53,6 @@ impl BlockWatcher {
} }
cmp::Ordering::Less => { cmp::Ordering::Less => {
// allow being some behind // allow being some behind
// TODO: why do we need a clone here?
let lag = head_block_number - *rpc_block_number; let lag = head_block_number - *rpc_block_number;
Ok(lag <= allowed_lag) Ok(lag <= allowed_lag)
} }
@ -88,7 +84,7 @@ impl BlockWatcher {
.as_secs() as i64; .as_secs() as i64;
// save the block for this rpc // save the block for this rpc
// TODO:store the actual chain as a graph and then have self.blocks point to that? // TODO: store the actual chain as a graph and then have self.blocks point to that?
self.block_numbers.insert(rpc.clone(), new_block_number); self.block_numbers.insert(rpc.clone(), new_block_number);
let head_number = self.head_block_number.load(atomic::Ordering::SeqCst); let head_number = self.head_block_number.load(atomic::Ordering::SeqCst);
@ -112,7 +108,6 @@ impl BlockWatcher {
"+".to_string() "+".to_string()
} }
cmp::Ordering::Less => { cmp::Ordering::Less => {
// TODO: include how many blocks behind?
let lag = new_block_number as i64 - head_number as i64; let lag = new_block_number as i64 - head_number as i64;
lag.to_string() lag.to_string()
} }

@ -46,7 +46,7 @@ impl Web3ProxyApp {
// make a http shared client // make a http shared client
// TODO: how should we configure the connection pool? // TODO: how should we configure the connection pool?
// TODO: 5 minutes is probably long enough. unlimited is a bad idea if something // TODO: 5 minutes is probably long enough. unlimited is a bad idea if something is wrong with the remote server
let http_client = reqwest::ClientBuilder::new() let http_client = reqwest::ClientBuilder::new()
.timeout(Duration::from_secs(300)) .timeout(Duration::from_secs(300))
.user_agent(APP_USER_AGENT) .user_agent(APP_USER_AGENT)
@ -74,6 +74,7 @@ impl Web3ProxyApp {
); );
let private_rpcs = if private_rpcs.is_empty() { let private_rpcs = if private_rpcs.is_empty() {
warn!("No private relays configured. Any transactions will be broadcast to the public mempool!");
None None
} else { } else {
Some(Arc::new( Some(Arc::new(
@ -87,7 +88,6 @@ impl Web3ProxyApp {
)) ))
}; };
// TODO: warn if no private relays
Ok(Web3ProxyApp { Ok(Web3ProxyApp {
block_watcher, block_watcher,
clock, clock,
@ -160,7 +160,6 @@ impl Web3ProxyApp {
// this is not a private transaction (or no private relays are configured) // this is not a private transaction (or no private relays are configured)
// try to send to each tier, stopping at the first success // try to send to each tier, stopping at the first success
loop { loop {
// TODO: i'm not positive that this locking is correct
let read_lock = self.balanced_rpc_ratelimiter_lock.read().await; let read_lock = self.balanced_rpc_ratelimiter_lock.read().await;
// there are multiple tiers. save the earliest not_until (if any). if we don't return, we will sleep until then and then try again // there are multiple tiers. save the earliest not_until (if any). if we don't return, we will sleep until then and then try again
@ -204,7 +203,6 @@ impl Web3ProxyApp {
if earliest_not_until.is_none() { if earliest_not_until.is_none() {
earliest_not_until = Some(not_until); earliest_not_until = Some(not_until);
} else { } else {
// TODO: do we need to unwrap this far? can we just compare the not_untils
let earliest_possible = let earliest_possible =
earliest_not_until.as_ref().unwrap().earliest_possible(); earliest_not_until.as_ref().unwrap().earliest_possible();
let new_earliest_possible = not_until.earliest_possible(); let new_earliest_possible = not_until.earliest_possible();
@ -277,6 +275,8 @@ impl Web3ProxyApp {
let mut response = response?; let mut response = response?;
// TODO: if "no block with that header" or some other jsonrpc errors, skip this response
// replace the id with what we originally received // replace the id with what we originally received
if let Some(response_id) = response.get_mut("id") { if let Some(response_id) = response.get_mut("id") {
*response_id = incoming_id; *response_id = incoming_id;
@ -284,7 +284,6 @@ impl Web3ProxyApp {
// send the first good response to a one shot channel. that way we respond quickly // send the first good response to a one shot channel. that way we respond quickly
// drop the result because errors are expected after the first send // drop the result because errors are expected after the first send
// TODO: if "no block with that header" or some other jsonrpc errors, skip this response
let _ = tx.send(Ok(response)); let _ = tx.send(Ok(response));
Ok::<(), anyhow::Error>(()) Ok::<(), anyhow::Error>(())
@ -329,11 +328,10 @@ async fn main() {
tracing_subscriber::fmt::init(); tracing_subscriber::fmt::init();
// TODO: load the config from yaml instead of hard coding // TODO: load the config from yaml instead of hard coding
// TODO: support multiple chains in one process. then we could just point "chain.stytt.com" at this and caddy wouldn't need anything else // TODO: support multiple chains in one process? then we could just point "chain.stytt.com" at this and caddy wouldn't need anything else
// TODO: i kind of want to make use of caddy's load balancing and health checking and such though // TODO: be smart about about using archive nodes? have a set that doesn't use archive nodes since queries to them are more valuable
let listen_port = 8445; let listen_port = 8445;
// TODO: be smart about about using archive nodes?
let state = Web3ProxyApp::try_new( let state = Web3ProxyApp::try_new(
vec![ vec![
// local nodes // local nodes
@ -378,7 +376,6 @@ async fn main() {
pub fn handle_anyhow_errors<T: warp::Reply>(res: anyhow::Result<T>) -> Box<dyn warp::Reply> { pub fn handle_anyhow_errors<T: warp::Reply>(res: anyhow::Result<T>) -> Box<dyn warp::Reply> {
match res { match res {
Ok(r) => Box::new(r.into_response()), Ok(r) => Box::new(r.into_response()),
// TODO: json error?
Err(e) => Box::new(warp::reply::with_status( Err(e) => Box::new(warp::reply::with_status(
format!("{}", e), format!("{}", e),
reqwest::StatusCode::INTERNAL_SERVER_ERROR, reqwest::StatusCode::INTERNAL_SERVER_ERROR,

@ -38,12 +38,11 @@ impl Web3Provider {
) -> anyhow::Result<()> { ) -> anyhow::Result<()> {
info!("Watching new_heads from {}", url); info!("Watching new_heads from {}", url);
// TODO: automatically reconnect
match &self { match &self {
Web3Provider::Http(provider) => { Web3Provider::Http(provider) => {
// TODO: there is a "watch_blocks" function, but a lot of public nodes do not support the necessary rpc endpoints // TODO: there is a "watch_blocks" function, but a lot of public nodes do not support the necessary rpc endpoints
// TODO: how often? // TODO: what should this interval be?
// TODO: maybe it would be better to have one interval for all of these, but this works for now // TODO: maybe it would be better to have one interval for all of the http providers, but this works for now
let mut interval = interval(Duration::from_secs(2)); let mut interval = interval(Duration::from_secs(2));
loop { loop {
@ -58,6 +57,7 @@ impl Web3Provider {
} }
} }
Web3Provider::Ws(provider) => { Web3Provider::Ws(provider) => {
// TODO: automatically reconnect?
let mut stream = provider.subscribe_blocks().await?; let mut stream = provider.subscribe_blocks().await?;
while let Some(block) = stream.next().await { while let Some(block) = stream.next().await {
block_watcher_sender.send((url.clone(), block)).unwrap(); block_watcher_sender.send((url.clone(), block)).unwrap();
@ -89,7 +89,6 @@ impl Web3Connection {
http_client: Option<reqwest::Client>, http_client: Option<reqwest::Client>,
block_watcher_sender: BlockWatcherSender, block_watcher_sender: BlockWatcherSender,
) -> anyhow::Result<Web3Connection> { ) -> anyhow::Result<Web3Connection> {
// TODO: create an ethers-rs rpc client and subscribe/watch new heads in a spawned task
let provider = if url_str.starts_with("http") { let provider = if url_str.starts_with("http") {
let url: url::Url = url_str.parse()?; let url: url::Url = url_str.parse()?;
@ -97,16 +96,16 @@ impl Web3Connection {
let provider = ethers::providers::Http::new_with_client(url, http_client); let provider = ethers::providers::Http::new_with_client(url, http_client);
// TODO: dry this up // TODO: dry this up (needs https://github.com/gakonst/ethers-rs/issues/592)
ethers::providers::Provider::new(provider) ethers::providers::Provider::new(provider)
.interval(Duration::from_secs(1)) .interval(Duration::from_secs(1))
.into() .into()
} else if url_str.starts_with("ws") { } else if url_str.starts_with("ws") {
let provider = ethers::providers::Ws::connect(url_str.clone()).await?; let provider = ethers::providers::Ws::connect(url_str.clone()).await?;
// TODO: make sure this survives disconnects // TODO: make sure this automatically reconnects
// TODO: dry this up // TODO: dry this up (needs https://github.com/gakonst/ethers-rs/issues/592)
ethers::providers::Provider::new(provider) ethers::providers::Provider::new(provider)
.interval(Duration::from_secs(1)) .interval(Duration::from_secs(1))
.into() .into()
@ -117,7 +116,6 @@ impl Web3Connection {
let provider = Arc::new(provider); let provider = Arc::new(provider);
// subscribe to new heads in a spawned future // subscribe to new heads in a spawned future
// TODO: if http, maybe we should check them all on the same interval. and if there is at least one websocket, use that message to start check?
let provider_clone: Arc<Web3Provider> = Arc::clone(&provider); let provider_clone: Arc<Web3Provider> = Arc::clone(&provider);
tokio::spawn(async move { tokio::spawn(async move {
while let Err(e) = provider_clone while let Err(e) = provider_clone

@ -20,7 +20,6 @@ type Web3RateLimiterMap = DashMap<String, Web3RateLimiter>;
pub type Web3ConnectionMap = DashMap<String, Web3Connection>; pub type Web3ConnectionMap = DashMap<String, Web3Connection>;
/// Load balance to the rpc /// Load balance to the rpc
/// TODO: i'm not sure about having 3 locks here. can we share them?
pub struct Web3ProviderTier { pub struct Web3ProviderTier {
/// RPC urls sorted by active requests /// RPC urls sorted by active requests
/// TODO: what type for the rpc? /// TODO: what type for the rpc?
@ -90,13 +89,14 @@ impl Web3ProviderTier {
let mut earliest_not_until = None; let mut earliest_not_until = None;
for selected_rpc in balanced_rpcs.iter() { for selected_rpc in balanced_rpcs.iter() {
// TODO: check current block number. if too far behind, make our own NotUntil here // check current block number
if !block_watcher if !block_watcher
.is_synced(selected_rpc.clone(), 3) .is_synced(selected_rpc.clone(), 3)
.await .await
.expect("checking is_synced failed") .expect("checking is_synced failed")
{ {
// skip this rpc because it is not synced // skip this rpc because it is not synced
// TODO: make a NotUntil here?
continue; continue;
} }
@ -204,7 +204,6 @@ impl Web3ProviderTier {
if let Some(not_until) = earliest_not_until { if let Some(not_until) = earliest_not_until {
Err(not_until) Err(not_until)
} else { } else {
// TODO: is this right?
Ok(vec![]) Ok(vec![])
} }
} }