theres a perf regression in here somewhere

This commit is contained in:
Bryan Stitt 2023-02-15 21:54:07 -08:00 committed by yenicelik
parent 678cd8067b
commit df87a41d5b
2 changed files with 12 additions and 4 deletions

View File

@ -671,8 +671,14 @@ impl Web3Rpc {
// TODO: how often? different depending on the chain? // TODO: how often? different depending on the chain?
// TODO: reset this timeout when a new block is seen? we need to keep request_latency updated though // TODO: reset this timeout when a new block is seen? we need to keep request_latency updated though
let health_sleep_seconds = 10; // let health_sleep_seconds = 10;
futures::future::pending::<()>().await;
Ok(())
// TODO: benchmark this and lock contention
/*
let mut old_total_requests = 0; let mut old_total_requests = 0;
let mut new_total_requests; let mut new_total_requests;
@ -729,6 +735,7 @@ impl Web3Rpc {
old_total_requests = new_total_requests; old_total_requests = new_total_requests;
} }
} }
*/
}; };
futures.push(flatten_handle(tokio::spawn(f))); futures.push(flatten_handle(tokio::spawn(f)));

View File

@ -375,12 +375,13 @@ impl OpenRequestHandle {
} }
} }
} else { } else {
let latency_ms = start.elapsed().as_secs_f64() * 1000.0; // TODO: record request latency
// let latency_ms = start.elapsed().as_secs_f64() * 1000.0;
// TODO: is this lock here a problem? should this be done through a channel? i started to code it, but it didn't seem to matter // TODO: is this lock here a problem? should this be done through a channel? i started to code it, but it didn't seem to matter
let mut latency_recording = self.rpc.request_latency.write(); // let mut latency_recording = self.rpc.request_latency.write();
latency_recording.record(latency_ms); // latency_recording.record(latency_ms);
} }
response response