fix http interval

This commit is contained in:
Bryan Stitt 2022-07-16 05:21:08 +00:00
parent a179e7fd87
commit 3eec828f1b
3 changed files with 16 additions and 9 deletions

@ -138,4 +138,5 @@ in another repo: event subscriber
- [ ] if no redis set, but public rate limits are set, exit with an error
- [ ] i saw "WebSocket connection closed unexpectedly" but no auto reconnect. need better logs on these
- [ ] if archive servers are added to the rotation while they are still syncing, they might get requests too soon. keep archive servers out of the configs until they are done syncing. full nodes should be fine to add to the configs even while syncing, though its a wasted connection
- [ ] when under load, i'm seeing "http interval lagging!". sometimes it happens when not loaded.
- [x] when under load, i'm seeing "http interval lagging!". sometimes it happens when not loaded.
- we were skipping our delay interval when block hash wasn't changed. so if a block was ever slow, the http provider would get the same hash twice and then would try eth_getBlockByNumber a ton of times

@ -379,6 +379,16 @@ impl Web3Connection {
let mut last_hash = Default::default();
loop {
// wait for the interval
// TODO: if error or rate limit, increase interval?
while let Err(err) = http_interval_receiver.recv().await {
// TODO: if recverror is not Lagged, exit?
// querying the block was delayed. this can happen if tokio was busy.
warn!(?err, ?self, "http interval lagging!");
}
trace!(?self, "ok http interval");
match self.try_request_handle().await {
Ok(active_request_handle) => {
// TODO: i feel like this should be easier. there is a provider.getBlock, but i don't know how to give it "latest"
@ -403,13 +413,6 @@ impl Web3Connection {
warn!(?err, "Rate limited on latest block from {}", self);
}
}
// wait for the interval
// TODO: if error or rate limit, increase interval?
while let Err(err) = http_interval_receiver.recv().await {
// querying the block was delayed. this can happen if tokio was busy.
warn!(?err, ?self, "http interval lagging!")
}
}
}
Web3Provider::Ws(provider) => {

@ -129,6 +129,8 @@ impl Web3Connections {
// TODO: every time a head_block arrives (maybe with a small delay), or on the interval.
interval.tick().await;
info!("http interval ready");
// errors are okay. they mean that all receivers have been dropped
let _ = sender.send(());
}
@ -243,13 +245,14 @@ impl Web3Connections {
// TODO: how many retries? until some timestamp is hit is probably better. maybe just loop and call this with a timeout
// TODO: after more investigation, i don't think retries will help. i think this is because chains of transactions get dropped from memory
// TODO: also check the "confirmed transactions" mapping? maybe one shared mapping with TxState in it?
trace!(?pending_tx_id, "checking pending_transactions on {}", rpc);
if pending_tx_sender.receiver_count() == 0 {
// no receivers, so no point in querying to get the full transaction
return Ok(());
}
trace!(?pending_tx_id, "checking pending_transactions on {}", rpc);
if self.pending_transactions.contains_key(&pending_tx_id) {
// this transaction has already been processed
return Ok(());