include the window in the tsdb logs
This commit is contained in:
parent
35b03cffad
commit
12f3f940a0
@ -344,7 +344,7 @@ mod tests {
|
||||
assert_eq!(a.min_synced_rpcs, 1);
|
||||
|
||||
// b is from Default
|
||||
let mut b: AppConfig = AppConfig {
|
||||
let b: AppConfig = AppConfig {
|
||||
// influxdb_id is randomized, so we clone it
|
||||
influxdb_id: a.influxdb_id.clone(),
|
||||
..Default::default()
|
||||
|
@ -503,9 +503,10 @@ impl BufferedRpcQueryStats {
|
||||
builder = builder.tag("rpc_secret_key_id", rpc_secret_key_id.to_string());
|
||||
}
|
||||
|
||||
// [add "uniq" to the timstamp](https://docs.influxdata.com/influxdb/v2.0/write-data/best-practices/duplicate-points/#increment-the-timestamp)
|
||||
// [add "uniq" to the timestamp](https://docs.influxdata.com/influxdb/v2.0/write-data/best-practices/duplicate-points/#increment-the-timestamp)
|
||||
// i64 timestamps get us to Friday, April 11, 2262
|
||||
let timestamp_ns: i64 = key.response_timestamp * 1_000_000_000 + uniq % 1_000_000_000;
|
||||
assert!(uniq < 1_000_000_000, "uniq is way too big");
|
||||
let timestamp_ns: i64 = key.response_timestamp * 1_000_000_000 + uniq;
|
||||
builder = builder.timestamp(timestamp_ns);
|
||||
|
||||
let point = builder.build()?;
|
||||
|
@ -162,7 +162,7 @@ impl StatBuffer {
|
||||
let (count, new_frontend_requests) = self.save_tsdb_stats().await;
|
||||
if count > 0 {
|
||||
tsdb_frontend_requests += new_frontend_requests;
|
||||
debug!("Saved {} stats for {} requests to the tsdb", count, new_frontend_requests);
|
||||
debug!("Saved {} stats for {} requests to the tsdb @ {}/{}", count, new_frontend_requests, self.tsdb_window, self.num_tsdb_windows);
|
||||
}
|
||||
}
|
||||
x = flush_receiver.recv() => {
|
||||
@ -411,7 +411,7 @@ impl StatBuffer {
|
||||
let mut frontend_requests = 0;
|
||||
|
||||
if let Some(influxdb_client) = self.influxdb_client.as_ref() {
|
||||
// every time we save, we increment the ts_db_window. this is used to ensure that stats don't overwrite others because the keys match
|
||||
// every time we save, we increment the tsdb_window. this is used to ensure that stats don't overwrite others because the keys match
|
||||
// this has to be done carefully or cardinality becomes a problem!
|
||||
// https://docs.influxdata.com/influxdb/v2.0/write-data/best-practices/duplicate-points/
|
||||
self.tsdb_window += 1;
|
||||
|
@ -118,30 +118,32 @@ async fn test_multiple_proxies_stats_add_up() {
|
||||
|
||||
// just because the handles are all joined doesn't mean the stats have had time to make it into the buffer
|
||||
// TODO: don't sleep. watch an atomic counter or something
|
||||
sleep(Duration::from_secs(10)).await;
|
||||
sleep(Duration::from_secs(5)).await;
|
||||
|
||||
// Flush all stats here
|
||||
// TODO: the test should maybe pause time so that stats definitely flush from our queries.
|
||||
let flush_0_count_0 = x_0.flush_stats().await.unwrap();
|
||||
let flush_1_count_0 = x_1.flush_stats().await.unwrap();
|
||||
let _flush_0_count_0 = x_0.flush_stats().await.unwrap();
|
||||
let _flush_1_count_0 = x_1.flush_stats().await.unwrap();
|
||||
|
||||
info!("Counts 0 are: {:?}", flush_0_count_0);
|
||||
assert_eq!(flush_0_count_0.relational, 1);
|
||||
assert_eq!(flush_0_count_0.timeseries, 2);
|
||||
// // the counts might actually be zero because we flushed from timers
|
||||
// // TODO: tests should probably have the option to set flush interval to infinity for more control.
|
||||
// info!(?flush_0_count_0);
|
||||
// assert_eq!(flush_0_count_0.relational, 1);
|
||||
// assert_eq!(flush_0_count_0.timeseries, 2);
|
||||
// info!(?flush_1_count_0);
|
||||
// assert_eq!(flush_1_count_0.relational, 1);
|
||||
// assert_eq!(flush_1_count_0.timeseries, 2);
|
||||
|
||||
// Wait a bit. TODO: instead of waiting. make flush stats more robust
|
||||
sleep(Duration::from_secs(5)).await;
|
||||
info!("Counts 1 are: {:?}", flush_1_count_0);
|
||||
assert_eq!(flush_1_count_0.relational, 1);
|
||||
assert_eq!(flush_1_count_0.timeseries, 2);
|
||||
// give time for more stats to arrive
|
||||
sleep(Duration::from_secs(2)).await;
|
||||
|
||||
// no more stats should arrive
|
||||
let flush_0_count_1 = x_0.flush_stats().await.unwrap();
|
||||
let flush_1_count_1 = x_1.flush_stats().await.unwrap();
|
||||
info!("Counts 0 are: {:?}", flush_0_count_1);
|
||||
info!(?flush_0_count_1);
|
||||
assert_eq!(flush_0_count_1.relational, 0);
|
||||
assert_eq!(flush_0_count_1.timeseries, 0);
|
||||
info!("Counts 1 are: {:?}", flush_1_count_0);
|
||||
info!(?flush_1_count_1);
|
||||
assert_eq!(flush_1_count_1.relational, 0);
|
||||
assert_eq!(flush_1_count_1.timeseries, 0);
|
||||
|
||||
@ -172,18 +174,14 @@ async fn test_multiple_proxies_stats_add_up() {
|
||||
user_0_balance_post.total_frontend_requests,
|
||||
number_requests * 3
|
||||
);
|
||||
assert_eq!(
|
||||
mysql_stats["error_response"],
|
||||
influx_stats["error_response"]
|
||||
);
|
||||
assert_eq!(
|
||||
mysql_stats["archive_needed"],
|
||||
influx_stats["archive_needed"]
|
||||
);
|
||||
assert_eq!(
|
||||
Decimal::from_str(&mysql_stats["chain_id"].to_string().replace('"', "")).unwrap(),
|
||||
Decimal::from_str(&influx_stats["chain_id"].to_string().replace('"', "")).unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
Decimal::from_str(&mysql_stats["frontend_requests"].to_string()).unwrap(),
|
||||
Decimal::from_str(&influx_stats["total_frontend_requests"].to_string()).unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
Decimal::from_str(&mysql_stats["no_servers"].to_string()).unwrap(),
|
||||
Decimal::from_str(&influx_stats["no_servers"].to_string()).unwrap()
|
||||
@ -196,10 +194,6 @@ async fn test_multiple_proxies_stats_add_up() {
|
||||
Decimal::from_str(&mysql_stats["cache_misses"].to_string()).unwrap(),
|
||||
Decimal::from_str(&influx_stats["total_cache_misses"].to_string()).unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
Decimal::from_str(&mysql_stats["frontend_requests"].to_string()).unwrap(),
|
||||
Decimal::from_str(&influx_stats["total_frontend_requests"].to_string()).unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
Decimal::from_str(&mysql_stats["sum_credits_used"].to_string().replace('"', "")).unwrap(),
|
||||
Decimal::from_str(
|
||||
@ -249,6 +243,14 @@ async fn test_multiple_proxies_stats_add_up() {
|
||||
Decimal::from_str(&mysql_stats["backend_requests"].to_string()).unwrap(),
|
||||
Decimal::from_str(&influx_stats["total_backend_requests"].to_string()).unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
mysql_stats["error_response"],
|
||||
influx_stats["error_response"]
|
||||
);
|
||||
assert_eq!(
|
||||
mysql_stats["archive_needed"],
|
||||
influx_stats["archive_needed"]
|
||||
);
|
||||
|
||||
// We don't have gauges so we cant really fix this in influx. will get back to this later
|
||||
// assert_eq!(
|
||||
|
Loading…
Reference in New Issue
Block a user