diff --git a/web3-proxy/src/app.rs b/web3-proxy/src/app.rs index 700079e2..09fbc730 100644 --- a/web3-proxy/src/app.rs +++ b/web3-proxy/src/app.rs @@ -263,6 +263,8 @@ impl Web3ProxyApp { // TODO: building this cache key is slow and its large, but i don't see a better way right now // TODO: inspect the params and see if a block is specified. if so, use that block number instead of current_block + // TODO: move this to a helper function. have it be a lot smarter + // TODO: have 2 caches. a small `block_cache` for most calls and a large `immutable_cache` for items that won't change let cache_key = ( best_head_block_number, request.method.clone(), @@ -296,13 +298,15 @@ impl Web3ProxyApp { }; // TODO: small race condidition here. parallel requests with the same query will both be saved to the cache - let mut response_cache = self.response_cache.write().await; - - // TODO: cache the warp::reply to save us serializing every time - response_cache.insert(cache_key, response.clone()); - if response_cache.len() >= RESPONSE_CACHE_CAP { - // TODO: this isn't really an LRU. what is this called? should we make it an lru? these caches only live for one block - response_cache.pop_front(); + // TODO: try_write is unlikely to get the lock. i think instead we should spawn another task for caching + if let Ok(mut response_cache) = self.response_cache.try_write() + { + // TODO: cache the warp::reply to save us serializing every time + response_cache.insert(cache_key, response.clone()); + if response_cache.len() >= RESPONSE_CACHE_CAP { + // TODO: this isn't really an LRU. what is this called? should we make it an lru? these caches only live for one block + response_cache.pop_front(); + } } response diff --git a/web3-proxy/src/connection.rs b/web3-proxy/src/connection.rs index fb03b941..19825fc7 100644 --- a/web3-proxy/src/connection.rs +++ b/web3-proxy/src/connection.rs @@ -249,9 +249,9 @@ impl Web3Connection { while let Some(new_block) = stream.next().await { let new_block_number = new_block.number.unwrap().as_u64(); - // TODO: only store if this isn't already stored? - // TODO: also send something to the provider_tier so it can sort? - // TODO: do we need this old block number check? its helpful on http, but here it shouldn't dupe except maybe on the first run + // TODO: we need to include the block hash here otherwise + // TODO: add the block to our cache + self.head_block_number .fetch_max(new_block_number, atomic::Ordering::AcqRel); diff --git a/web3-proxy/src/connections.rs b/web3-proxy/src/connections.rs index 29c78588..d735631e 100644 --- a/web3-proxy/src/connections.rs +++ b/web3-proxy/src/connections.rs @@ -174,6 +174,7 @@ impl Web3Connections { } } + /// TODO: possible dead lock here. investigate more. probably refactor pub async fn update_synced_rpcs(&self, rpc: &Arc) -> anyhow::Result<()> { let mut synced_connections = self.synced_connections.write().await;