From 12f3f940a0c1fcb0908e7745c40f5dcbd1f62384 Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Sat, 22 Jul 2023 01:56:13 -0700 Subject: [PATCH] include the window in the tsdb logs --- web3_proxy/src/config.rs | 2 +- web3_proxy/src/stats/mod.rs | 5 ++- web3_proxy/src/stats/stat_buffer.rs | 4 +- web3_proxy/tests/test_multiple_proxy.rs | 52 +++++++++++++------------ 4 files changed, 33 insertions(+), 30 deletions(-) diff --git a/web3_proxy/src/config.rs b/web3_proxy/src/config.rs index 7386fca1..8557c3a9 100644 --- a/web3_proxy/src/config.rs +++ b/web3_proxy/src/config.rs @@ -344,7 +344,7 @@ mod tests { assert_eq!(a.min_synced_rpcs, 1); // b is from Default - let mut b: AppConfig = AppConfig { + let b: AppConfig = AppConfig { // influxdb_id is randomized, so we clone it influxdb_id: a.influxdb_id.clone(), ..Default::default() diff --git a/web3_proxy/src/stats/mod.rs b/web3_proxy/src/stats/mod.rs index c41fc417..4bdc7ee2 100644 --- a/web3_proxy/src/stats/mod.rs +++ b/web3_proxy/src/stats/mod.rs @@ -503,9 +503,10 @@ impl BufferedRpcQueryStats { builder = builder.tag("rpc_secret_key_id", rpc_secret_key_id.to_string()); } - // [add "uniq" to the timstamp](https://docs.influxdata.com/influxdb/v2.0/write-data/best-practices/duplicate-points/#increment-the-timestamp) + // [add "uniq" to the timestamp](https://docs.influxdata.com/influxdb/v2.0/write-data/best-practices/duplicate-points/#increment-the-timestamp) // i64 timestamps get us to Friday, April 11, 2262 - let timestamp_ns: i64 = key.response_timestamp * 1_000_000_000 + uniq % 1_000_000_000; + assert!(uniq < 1_000_000_000, "uniq is way too big"); + let timestamp_ns: i64 = key.response_timestamp * 1_000_000_000 + uniq; builder = builder.timestamp(timestamp_ns); let point = builder.build()?; diff --git a/web3_proxy/src/stats/stat_buffer.rs b/web3_proxy/src/stats/stat_buffer.rs index 2c8c65ed..75a564b0 100644 --- a/web3_proxy/src/stats/stat_buffer.rs +++ b/web3_proxy/src/stats/stat_buffer.rs @@ -162,7 +162,7 @@ impl StatBuffer { let (count, new_frontend_requests) = self.save_tsdb_stats().await; if count > 0 { tsdb_frontend_requests += new_frontend_requests; - debug!("Saved {} stats for {} requests to the tsdb", count, new_frontend_requests); + debug!("Saved {} stats for {} requests to the tsdb @ {}/{}", count, new_frontend_requests, self.tsdb_window, self.num_tsdb_windows); } } x = flush_receiver.recv() => { @@ -411,7 +411,7 @@ impl StatBuffer { let mut frontend_requests = 0; if let Some(influxdb_client) = self.influxdb_client.as_ref() { - // every time we save, we increment the ts_db_window. this is used to ensure that stats don't overwrite others because the keys match + // every time we save, we increment the tsdb_window. this is used to ensure that stats don't overwrite others because the keys match // this has to be done carefully or cardinality becomes a problem! // https://docs.influxdata.com/influxdb/v2.0/write-data/best-practices/duplicate-points/ self.tsdb_window += 1; diff --git a/web3_proxy/tests/test_multiple_proxy.rs b/web3_proxy/tests/test_multiple_proxy.rs index 9a5d7dea..33d67474 100644 --- a/web3_proxy/tests/test_multiple_proxy.rs +++ b/web3_proxy/tests/test_multiple_proxy.rs @@ -118,30 +118,32 @@ async fn test_multiple_proxies_stats_add_up() { // just because the handles are all joined doesn't mean the stats have had time to make it into the buffer // TODO: don't sleep. watch an atomic counter or something - sleep(Duration::from_secs(10)).await; + sleep(Duration::from_secs(5)).await; // Flush all stats here // TODO: the test should maybe pause time so that stats definitely flush from our queries. - let flush_0_count_0 = x_0.flush_stats().await.unwrap(); - let flush_1_count_0 = x_1.flush_stats().await.unwrap(); + let _flush_0_count_0 = x_0.flush_stats().await.unwrap(); + let _flush_1_count_0 = x_1.flush_stats().await.unwrap(); - info!("Counts 0 are: {:?}", flush_0_count_0); - assert_eq!(flush_0_count_0.relational, 1); - assert_eq!(flush_0_count_0.timeseries, 2); + // // the counts might actually be zero because we flushed from timers + // // TODO: tests should probably have the option to set flush interval to infinity for more control. + // info!(?flush_0_count_0); + // assert_eq!(flush_0_count_0.relational, 1); + // assert_eq!(flush_0_count_0.timeseries, 2); + // info!(?flush_1_count_0); + // assert_eq!(flush_1_count_0.relational, 1); + // assert_eq!(flush_1_count_0.timeseries, 2); - // Wait a bit. TODO: instead of waiting. make flush stats more robust - sleep(Duration::from_secs(5)).await; - info!("Counts 1 are: {:?}", flush_1_count_0); - assert_eq!(flush_1_count_0.relational, 1); - assert_eq!(flush_1_count_0.timeseries, 2); + // give time for more stats to arrive + sleep(Duration::from_secs(2)).await; // no more stats should arrive let flush_0_count_1 = x_0.flush_stats().await.unwrap(); let flush_1_count_1 = x_1.flush_stats().await.unwrap(); - info!("Counts 0 are: {:?}", flush_0_count_1); + info!(?flush_0_count_1); assert_eq!(flush_0_count_1.relational, 0); assert_eq!(flush_0_count_1.timeseries, 0); - info!("Counts 1 are: {:?}", flush_1_count_0); + info!(?flush_1_count_1); assert_eq!(flush_1_count_1.relational, 0); assert_eq!(flush_1_count_1.timeseries, 0); @@ -172,18 +174,14 @@ async fn test_multiple_proxies_stats_add_up() { user_0_balance_post.total_frontend_requests, number_requests * 3 ); - assert_eq!( - mysql_stats["error_response"], - influx_stats["error_response"] - ); - assert_eq!( - mysql_stats["archive_needed"], - influx_stats["archive_needed"] - ); assert_eq!( Decimal::from_str(&mysql_stats["chain_id"].to_string().replace('"', "")).unwrap(), Decimal::from_str(&influx_stats["chain_id"].to_string().replace('"', "")).unwrap() ); + assert_eq!( + Decimal::from_str(&mysql_stats["frontend_requests"].to_string()).unwrap(), + Decimal::from_str(&influx_stats["total_frontend_requests"].to_string()).unwrap() + ); assert_eq!( Decimal::from_str(&mysql_stats["no_servers"].to_string()).unwrap(), Decimal::from_str(&influx_stats["no_servers"].to_string()).unwrap() @@ -196,10 +194,6 @@ async fn test_multiple_proxies_stats_add_up() { Decimal::from_str(&mysql_stats["cache_misses"].to_string()).unwrap(), Decimal::from_str(&influx_stats["total_cache_misses"].to_string()).unwrap() ); - assert_eq!( - Decimal::from_str(&mysql_stats["frontend_requests"].to_string()).unwrap(), - Decimal::from_str(&influx_stats["total_frontend_requests"].to_string()).unwrap() - ); assert_eq!( Decimal::from_str(&mysql_stats["sum_credits_used"].to_string().replace('"', "")).unwrap(), Decimal::from_str( @@ -249,6 +243,14 @@ async fn test_multiple_proxies_stats_add_up() { Decimal::from_str(&mysql_stats["backend_requests"].to_string()).unwrap(), Decimal::from_str(&influx_stats["total_backend_requests"].to_string()).unwrap() ); + assert_eq!( + mysql_stats["error_response"], + influx_stats["error_response"] + ); + assert_eq!( + mysql_stats["archive_needed"], + influx_stats["archive_needed"] + ); // We don't have gauges so we cant really fix this in influx. will get back to this later // assert_eq!(