From 58cc129837022561393ff83c812888f4a6433ac2 Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Sun, 18 Jun 2023 17:57:21 -0700 Subject: [PATCH] forgot to convert tier_sec_size from ms. 1 was a very large default --- web3_proxy/src/rpcs/consensus.rs | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/web3_proxy/src/rpcs/consensus.rs b/web3_proxy/src/rpcs/consensus.rs index 8b10bc19..5f2306eb 100644 --- a/web3_proxy/src/rpcs/consensus.rs +++ b/web3_proxy/src/rpcs/consensus.rs @@ -10,7 +10,7 @@ use hashbrown::{HashMap, HashSet}; use hdrhistogram::serialization::{Serializer, V2DeflateSerializer}; use hdrhistogram::Histogram; use itertools::{Itertools, MinMaxResult}; -use log::{log_enabled, trace, warn, Level}; +use log::{debug, log_enabled, trace, warn, Level}; use moka::future::Cache; use serde::Serialize; use std::cmp::{Ordering, Reverse}; @@ -497,9 +497,17 @@ impl ConsensusFinder { trace!("weighted_latencies: {}", encoded); } - // TODO: get someone who is better at math to do something smarter. maybe involving stddev? - // bucket sizes of the larger of 30ms or 1/2 the lowest latency - let tier_sec_size = 30f32.max(min_median_latency_sec / 2.0); + trace!("median_latencies_sec: {:#?}", median_latencies_sec); + + trace!("min_median_latency_sec: {}", min_median_latency_sec); + + // TODO: get someone who is better at math to do something smarter. maybe involving stddev? maybe involving cutting the histogram at the troughs? + // bucket sizes of the larger of 20ms or 1/2 the lowest latency + // TODO: is 20ms an okay default? make it configurable? + // TODO: does keeping the buckets the same size make sense? + let tier_sec_size = 0.020f32.max(min_median_latency_sec / 2.0); + + trace!("tier_sec_size: {}", tier_sec_size); for (rpc, median_latency_sec) in median_latencies_sec.into_iter() { let tier = (median_latency_sec - min_median_latency_sec) / tier_sec_size;