From 283bae6245d8e694e1eb2ad113ad626f60acc53a Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Fri, 13 May 2022 17:43:02 +0000 Subject: [PATCH] Revert "investigate deadlock" This reverts commit 9a68ece8a446d8fca3eb8638ddc88297e73110a3. --- web3-proxy/src/app.rs | 18 +++++++----------- web3-proxy/src/connection.rs | 6 +++--- web3-proxy/src/connections.rs | 1 - 3 files changed, 10 insertions(+), 15 deletions(-) diff --git a/web3-proxy/src/app.rs b/web3-proxy/src/app.rs index 09fbc730..700079e2 100644 --- a/web3-proxy/src/app.rs +++ b/web3-proxy/src/app.rs @@ -263,8 +263,6 @@ impl Web3ProxyApp { // TODO: building this cache key is slow and its large, but i don't see a better way right now // TODO: inspect the params and see if a block is specified. if so, use that block number instead of current_block - // TODO: move this to a helper function. have it be a lot smarter - // TODO: have 2 caches. a small `block_cache` for most calls and a large `immutable_cache` for items that won't change let cache_key = ( best_head_block_number, request.method.clone(), @@ -298,15 +296,13 @@ impl Web3ProxyApp { }; // TODO: small race condidition here. parallel requests with the same query will both be saved to the cache - // TODO: try_write is unlikely to get the lock. i think instead we should spawn another task for caching - if let Ok(mut response_cache) = self.response_cache.try_write() - { - // TODO: cache the warp::reply to save us serializing every time - response_cache.insert(cache_key, response.clone()); - if response_cache.len() >= RESPONSE_CACHE_CAP { - // TODO: this isn't really an LRU. what is this called? should we make it an lru? these caches only live for one block - response_cache.pop_front(); - } + let mut response_cache = self.response_cache.write().await; + + // TODO: cache the warp::reply to save us serializing every time + response_cache.insert(cache_key, response.clone()); + if response_cache.len() >= RESPONSE_CACHE_CAP { + // TODO: this isn't really an LRU. what is this called? should we make it an lru? these caches only live for one block + response_cache.pop_front(); } response diff --git a/web3-proxy/src/connection.rs b/web3-proxy/src/connection.rs index 19825fc7..fb03b941 100644 --- a/web3-proxy/src/connection.rs +++ b/web3-proxy/src/connection.rs @@ -249,9 +249,9 @@ impl Web3Connection { while let Some(new_block) = stream.next().await { let new_block_number = new_block.number.unwrap().as_u64(); - // TODO: we need to include the block hash here otherwise - // TODO: add the block to our cache - + // TODO: only store if this isn't already stored? + // TODO: also send something to the provider_tier so it can sort? + // TODO: do we need this old block number check? its helpful on http, but here it shouldn't dupe except maybe on the first run self.head_block_number .fetch_max(new_block_number, atomic::Ordering::AcqRel); diff --git a/web3-proxy/src/connections.rs b/web3-proxy/src/connections.rs index d735631e..29c78588 100644 --- a/web3-proxy/src/connections.rs +++ b/web3-proxy/src/connections.rs @@ -174,7 +174,6 @@ impl Web3Connections { } } - /// TODO: possible dead lock here. investigate more. probably refactor pub async fn update_synced_rpcs(&self, rpc: &Arc) -> anyhow::Result<()> { let mut synced_connections = self.synced_connections.write().await;