From df87a41d5beb09eb703825eb3b58d427ea3bc214 Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Wed, 15 Feb 2023 21:54:07 -0800 Subject: [PATCH] theres a perf regression in here somewhere --- web3_proxy/src/rpcs/one.rs | 9 ++++++++- web3_proxy/src/rpcs/request.rs | 7 ++++--- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/web3_proxy/src/rpcs/one.rs b/web3_proxy/src/rpcs/one.rs index 8e49a92c..dcbfe220 100644 --- a/web3_proxy/src/rpcs/one.rs +++ b/web3_proxy/src/rpcs/one.rs @@ -671,8 +671,14 @@ impl Web3Rpc { // TODO: how often? different depending on the chain? // TODO: reset this timeout when a new block is seen? we need to keep request_latency updated though - let health_sleep_seconds = 10; + // let health_sleep_seconds = 10; + futures::future::pending::<()>().await; + + Ok(()) + + // TODO: benchmark this and lock contention + /* let mut old_total_requests = 0; let mut new_total_requests; @@ -729,6 +735,7 @@ impl Web3Rpc { old_total_requests = new_total_requests; } } + */ }; futures.push(flatten_handle(tokio::spawn(f))); diff --git a/web3_proxy/src/rpcs/request.rs b/web3_proxy/src/rpcs/request.rs index 2f2cf7b3..8a9254a6 100644 --- a/web3_proxy/src/rpcs/request.rs +++ b/web3_proxy/src/rpcs/request.rs @@ -375,12 +375,13 @@ impl OpenRequestHandle { } } } else { - let latency_ms = start.elapsed().as_secs_f64() * 1000.0; + // TODO: record request latency + // let latency_ms = start.elapsed().as_secs_f64() * 1000.0; // TODO: is this lock here a problem? should this be done through a channel? i started to code it, but it didn't seem to matter - let mut latency_recording = self.rpc.request_latency.write(); + // let mut latency_recording = self.rpc.request_latency.write(); - latency_recording.record(latency_ms); + // latency_recording.record(latency_ms); } response