Revert "investigate deadlock"

This reverts commit 9a68ece8a4.
This commit is contained in:
Bryan Stitt 2022-05-13 17:43:02 +00:00
parent 57fc8160ca
commit 283bae6245
3 changed files with 10 additions and 15 deletions

View File

@ -263,8 +263,6 @@ impl Web3ProxyApp {
// TODO: building this cache key is slow and its large, but i don't see a better way right now // TODO: building this cache key is slow and its large, but i don't see a better way right now
// TODO: inspect the params and see if a block is specified. if so, use that block number instead of current_block // TODO: inspect the params and see if a block is specified. if so, use that block number instead of current_block
// TODO: move this to a helper function. have it be a lot smarter
// TODO: have 2 caches. a small `block_cache` for most calls and a large `immutable_cache` for items that won't change
let cache_key = ( let cache_key = (
best_head_block_number, best_head_block_number,
request.method.clone(), request.method.clone(),
@ -298,15 +296,13 @@ impl Web3ProxyApp {
}; };
// TODO: small race condidition here. parallel requests with the same query will both be saved to the cache // TODO: small race condidition here. parallel requests with the same query will both be saved to the cache
// TODO: try_write is unlikely to get the lock. i think instead we should spawn another task for caching let mut response_cache = self.response_cache.write().await;
if let Ok(mut response_cache) = self.response_cache.try_write()
{ // TODO: cache the warp::reply to save us serializing every time
// TODO: cache the warp::reply to save us serializing every time response_cache.insert(cache_key, response.clone());
response_cache.insert(cache_key, response.clone()); if response_cache.len() >= RESPONSE_CACHE_CAP {
if response_cache.len() >= RESPONSE_CACHE_CAP { // TODO: this isn't really an LRU. what is this called? should we make it an lru? these caches only live for one block
// TODO: this isn't really an LRU. what is this called? should we make it an lru? these caches only live for one block response_cache.pop_front();
response_cache.pop_front();
}
} }
response response

View File

@ -249,9 +249,9 @@ impl Web3Connection {
while let Some(new_block) = stream.next().await { while let Some(new_block) = stream.next().await {
let new_block_number = new_block.number.unwrap().as_u64(); let new_block_number = new_block.number.unwrap().as_u64();
// TODO: we need to include the block hash here otherwise // TODO: only store if this isn't already stored?
// TODO: add the block to our cache // TODO: also send something to the provider_tier so it can sort?
// TODO: do we need this old block number check? its helpful on http, but here it shouldn't dupe except maybe on the first run
self.head_block_number self.head_block_number
.fetch_max(new_block_number, atomic::Ordering::AcqRel); .fetch_max(new_block_number, atomic::Ordering::AcqRel);

View File

@ -174,7 +174,6 @@ impl Web3Connections {
} }
} }
/// TODO: possible dead lock here. investigate more. probably refactor
pub async fn update_synced_rpcs(&self, rpc: &Arc<Web3Connection>) -> anyhow::Result<()> { pub async fn update_synced_rpcs(&self, rpc: &Arc<Web3Connection>) -> anyhow::Result<()> {
let mut synced_connections = self.synced_connections.write().await; let mut synced_connections = self.synced_connections.write().await;