investigate deadlock

This commit is contained in:
Bryan Stitt 2022-05-13 17:22:37 +00:00
parent c884cf5bbb
commit 9a68ece8a4
3 changed files with 15 additions and 10 deletions

@ -263,6 +263,8 @@ impl Web3ProxyApp {
// TODO: building this cache key is slow and its large, but i don't see a better way right now
// TODO: inspect the params and see if a block is specified. if so, use that block number instead of current_block
// TODO: move this to a helper function. have it be a lot smarter
// TODO: have 2 caches. a small `block_cache` for most calls and a large `immutable_cache` for items that won't change
let cache_key = (
best_head_block_number,
request.method.clone(),
@ -296,13 +298,15 @@ impl Web3ProxyApp {
};
// TODO: small race condidition here. parallel requests with the same query will both be saved to the cache
let mut response_cache = self.response_cache.write().await;
// TODO: cache the warp::reply to save us serializing every time
response_cache.insert(cache_key, response.clone());
if response_cache.len() >= RESPONSE_CACHE_CAP {
// TODO: this isn't really an LRU. what is this called? should we make it an lru? these caches only live for one block
response_cache.pop_front();
// TODO: try_write is unlikely to get the lock. i think instead we should spawn another task for caching
if let Ok(mut response_cache) = self.response_cache.try_write()
{
// TODO: cache the warp::reply to save us serializing every time
response_cache.insert(cache_key, response.clone());
if response_cache.len() >= RESPONSE_CACHE_CAP {
// TODO: this isn't really an LRU. what is this called? should we make it an lru? these caches only live for one block
response_cache.pop_front();
}
}
response

@ -249,9 +249,9 @@ impl Web3Connection {
while let Some(new_block) = stream.next().await {
let new_block_number = new_block.number.unwrap().as_u64();
// TODO: only store if this isn't already stored?
// TODO: also send something to the provider_tier so it can sort?
// TODO: do we need this old block number check? its helpful on http, but here it shouldn't dupe except maybe on the first run
// TODO: we need to include the block hash here otherwise
// TODO: add the block to our cache
self.head_block_number
.fetch_max(new_block_number, atomic::Ordering::AcqRel);

@ -174,6 +174,7 @@ impl Web3Connections {
}
}
/// TODO: possible dead lock here. investigate more. probably refactor
pub async fn update_synced_rpcs(&self, rpc: &Arc<Web3Connection>) -> anyhow::Result<()> {
let mut synced_connections = self.synced_connections.write().await;