From 3eec828f1bd6873d82dd2e4dcd7d8cbfb035ff07 Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Sat, 16 Jul 2022 05:21:08 +0000 Subject: [PATCH] fix http interval --- TODO.md | 3 ++- web3-proxy/src/connection.rs | 17 ++++++++++------- web3-proxy/src/connections.rs | 5 ++++- 3 files changed, 16 insertions(+), 9 deletions(-) diff --git a/TODO.md b/TODO.md index 430bc32f..630b8b53 100644 --- a/TODO.md +++ b/TODO.md @@ -138,4 +138,5 @@ in another repo: event subscriber - [ ] if no redis set, but public rate limits are set, exit with an error - [ ] i saw "WebSocket connection closed unexpectedly" but no auto reconnect. need better logs on these - [ ] if archive servers are added to the rotation while they are still syncing, they might get requests too soon. keep archive servers out of the configs until they are done syncing. full nodes should be fine to add to the configs even while syncing, though its a wasted connection -- [ ] when under load, i'm seeing "http interval lagging!". sometimes it happens when not loaded. +- [x] when under load, i'm seeing "http interval lagging!". sometimes it happens when not loaded. + - we were skipping our delay interval when block hash wasn't changed. so if a block was ever slow, the http provider would get the same hash twice and then would try eth_getBlockByNumber a ton of times \ No newline at end of file diff --git a/web3-proxy/src/connection.rs b/web3-proxy/src/connection.rs index 38d8b24c..80f274f1 100644 --- a/web3-proxy/src/connection.rs +++ b/web3-proxy/src/connection.rs @@ -379,6 +379,16 @@ impl Web3Connection { let mut last_hash = Default::default(); loop { + // wait for the interval + // TODO: if error or rate limit, increase interval? + while let Err(err) = http_interval_receiver.recv().await { + // TODO: if recverror is not Lagged, exit? + // querying the block was delayed. this can happen if tokio was busy. + warn!(?err, ?self, "http interval lagging!"); + } + + trace!(?self, "ok http interval"); + match self.try_request_handle().await { Ok(active_request_handle) => { // TODO: i feel like this should be easier. there is a provider.getBlock, but i don't know how to give it "latest" @@ -403,13 +413,6 @@ impl Web3Connection { warn!(?err, "Rate limited on latest block from {}", self); } } - - // wait for the interval - // TODO: if error or rate limit, increase interval? - while let Err(err) = http_interval_receiver.recv().await { - // querying the block was delayed. this can happen if tokio was busy. - warn!(?err, ?self, "http interval lagging!") - } } } Web3Provider::Ws(provider) => { diff --git a/web3-proxy/src/connections.rs b/web3-proxy/src/connections.rs index bb179512..c3caeab9 100644 --- a/web3-proxy/src/connections.rs +++ b/web3-proxy/src/connections.rs @@ -129,6 +129,8 @@ impl Web3Connections { // TODO: every time a head_block arrives (maybe with a small delay), or on the interval. interval.tick().await; + info!("http interval ready"); + // errors are okay. they mean that all receivers have been dropped let _ = sender.send(()); } @@ -243,13 +245,14 @@ impl Web3Connections { // TODO: how many retries? until some timestamp is hit is probably better. maybe just loop and call this with a timeout // TODO: after more investigation, i don't think retries will help. i think this is because chains of transactions get dropped from memory // TODO: also check the "confirmed transactions" mapping? maybe one shared mapping with TxState in it? - trace!(?pending_tx_id, "checking pending_transactions on {}", rpc); if pending_tx_sender.receiver_count() == 0 { // no receivers, so no point in querying to get the full transaction return Ok(()); } + trace!(?pending_tx_id, "checking pending_transactions on {}", rpc); + if self.pending_transactions.contains_key(&pending_tx_id) { // this transaction has already been processed return Ok(());