will push this for short review
This commit is contained in:
parent
65ca628cc7
commit
ca41cb5c9e
@ -5,3 +5,6 @@
|
|||||||
|
|
||||||
curl -X GET \
|
curl -X GET \
|
||||||
"http://localhost:8544/user/stats/aggregate?query_start=1678780033&query_window_seconds=1000"
|
"http://localhost:8544/user/stats/aggregate?query_start=1678780033&query_window_seconds=1000"
|
||||||
|
|
||||||
|
#curl -X GET \
|
||||||
|
#"http://localhost:8544/user/stats/detailed?query_start=1678780033&query_window_seconds=1000"
|
||||||
|
@ -25,13 +25,25 @@ use serde_json::{json};
|
|||||||
use entities::{rpc_accounting, rpc_key};
|
use entities::{rpc_accounting, rpc_key};
|
||||||
use crate::http_params::get_stats_column_from_params;
|
use crate::http_params::get_stats_column_from_params;
|
||||||
|
|
||||||
// TODO: include chain_id, method, and some other things in this struct
|
|
||||||
#[derive(Debug, Default, FromDataPoint, Serialize)]
|
#[derive(Debug, Default, FromDataPoint, Serialize)]
|
||||||
pub struct AggregatedRpcAccounting {
|
pub struct AggregatedRpcAccounting {
|
||||||
|
chain_id: u64,
|
||||||
field: String,
|
field: String,
|
||||||
value: f64,
|
value: f64,
|
||||||
time: DateTime<FixedOffset>,
|
time: DateTime<FixedOffset>,
|
||||||
// error_response: bool,
|
error_response: bool,
|
||||||
|
archive_needed: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Default, FromDataPoint, Serialize)]
|
||||||
|
pub struct DetailedRpcAccounting {
|
||||||
|
chain_id: u64,
|
||||||
|
field: String,
|
||||||
|
value: f64,
|
||||||
|
time: DateTime<FixedOffset>,
|
||||||
|
error_response: bool,
|
||||||
|
archive_needed: bool,
|
||||||
|
method: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
// pub struct AggregatedRpcAccountingErrors {
|
// pub struct AggregatedRpcAccountingErrors {
|
||||||
@ -107,9 +119,17 @@ pub async fn query_user_stats<'a>(
|
|||||||
|
|
||||||
info!("Got this far 7");
|
info!("Got this far 7");
|
||||||
// , "archive_needed", "error_response"
|
// , "archive_needed", "error_response"
|
||||||
let mut group_columns = vec!["_measurement", "_field"];
|
let mut group_columns = vec!["chain_id", "_measurement", "_field", "_measurement", "error_response", "archive_needed"];
|
||||||
let mut filter_chain_id = "".to_string();
|
let mut filter_chain_id = "".to_string();
|
||||||
|
|
||||||
|
// Add to group columns the method, if we want the detailed view as well
|
||||||
|
match stat_response_type {
|
||||||
|
StatType::Detailed => {
|
||||||
|
group_columns.push("method");
|
||||||
|
},
|
||||||
|
_ => {}
|
||||||
|
}
|
||||||
|
|
||||||
if chain_id == 0 {
|
if chain_id == 0 {
|
||||||
group_columns.push("chain_id");
|
group_columns.push("chain_id");
|
||||||
} else {
|
} else {
|
||||||
@ -127,15 +147,14 @@ pub async fn query_user_stats<'a>(
|
|||||||
|
|
||||||
info!("Got this far 10");
|
info!("Got this far 10");
|
||||||
let filter_field = match stat_response_type {
|
let filter_field = match stat_response_type {
|
||||||
// StatType::Aggregated => f!(r#"|> filter(fn: (r) => r["_field"] == "frontend_requests")"#),
|
|
||||||
// Let's show all endpoints in a detailed stats
|
|
||||||
// StatType::Aggregated => "".to_string(), // f!(r#"|> filter(fn: (r) => r["_field"] == "frontend_requests")"#),
|
|
||||||
StatType::Aggregated => {
|
StatType::Aggregated => {
|
||||||
f!(r#"|> filter(fn: (r) => r["_field"] == "{stats_column}")"#)
|
f!(r#"|> filter(fn: (r) => r["_field"] == "{stats_column}")"#)
|
||||||
},
|
},
|
||||||
// TODO: Detailed should still filter it, but just "group-by" method (call it once per each method ...
|
// TODO: Detailed should still filter it, but just "group-by" method (call it once per each method ...
|
||||||
// Or maybe it shouldn't filter it ...
|
// Or maybe it shouldn't filter it ...
|
||||||
StatType::Detailed => "".to_string(),
|
StatType::Detailed => {
|
||||||
|
"".to_string()
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
info!("Query start and stop are: {:?} {:?}", query_start, query_stop);
|
info!("Query start and stop are: {:?} {:?}", query_start, query_stop);
|
||||||
@ -172,33 +191,8 @@ pub async fn query_user_stats<'a>(
|
|||||||
{group}
|
{group}
|
||||||
|> aggregateWindow(every: {query_window_seconds}s, fn: mean, createEmpty: false)
|
|> aggregateWindow(every: {query_window_seconds}s, fn: mean, createEmpty: false)
|
||||||
|> group()
|
|> group()
|
||||||
// |> yield(name: "mean")
|
|
||||||
"#);
|
"#);
|
||||||
|
|
||||||
// Also make a query for archived
|
|
||||||
// let query_archived = f!(r#"
|
|
||||||
// // from(bucket: "{bucket}")
|
|
||||||
// |> range(start: {query_start}, stop: {query_stop})
|
|
||||||
// |> filter(fn: (r) => r["_measurement"] == "{measurement}")
|
|
||||||
// |> filter(fn: (r) => r["archive_needed"] == true)
|
|
||||||
// |> aggregateWindow(every: {query_window_seconds}s, fn: count, createEmpty: false)
|
|
||||||
// |> yield(name: "count")
|
|
||||||
// "#);
|
|
||||||
// let query_archived = f!(r#"
|
|
||||||
// from(bucket: "{bucket}")
|
|
||||||
// |> range(start: {query_start}, stop: {query_stop})
|
|
||||||
// |> filter(fn: (r) => r["_measurement"] == "{measurement}")
|
|
||||||
// |> filter(fn: (r) => r["error_responses"] == true)
|
|
||||||
// |> aggregateWindow(every: {query_window_seconds}s, fn: count, createEmpty: false)
|
|
||||||
// |> yield(name: "count")
|
|
||||||
// "#);
|
|
||||||
|
|
||||||
// Also make a query for errors
|
|
||||||
|
|
||||||
|
|
||||||
// TODO: Also make a query thats detailed
|
|
||||||
|
|
||||||
|
|
||||||
info!("Raw query to db is: {:?}", query);
|
info!("Raw query to db is: {:?}", query);
|
||||||
let query = Query::new(query.to_string());
|
let query = Query::new(query.to_string());
|
||||||
info!("Query to db is: {:?}", query);
|
info!("Query to db is: {:?}", query);
|
||||||
@ -209,27 +203,51 @@ pub async fn query_user_stats<'a>(
|
|||||||
// info!("Direct response is: {:?}", unparsed);
|
// info!("Direct response is: {:?}", unparsed);
|
||||||
info!("Got this far 12");
|
info!("Got this far 12");
|
||||||
|
|
||||||
|
// Return a different result based on the query
|
||||||
|
let datapoints = match stat_response_type {
|
||||||
|
StatType::Aggregated => {
|
||||||
let influx_responses: Vec<AggregatedRpcAccounting> = influxdb_client.query::<AggregatedRpcAccounting>(Some(query)).await?;
|
let influx_responses: Vec<AggregatedRpcAccounting> = influxdb_client.query::<AggregatedRpcAccounting>(Some(query)).await?;
|
||||||
info!("Influx responses are {:?}", &influx_responses);
|
info!("Influx responses are {:?}", &influx_responses);
|
||||||
for res in &influx_responses {
|
for res in &influx_responses {
|
||||||
info!("Resp is: {:?}", res);
|
info!("Resp is: {:?}", res);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// let tmp = influx_responses.into_iter().group_by(|x| {x.time.timestamp()}).into_iter().collect::<Vec<_>>();
|
||||||
|
// info!("Printing grouped item {}", tmp);
|
||||||
|
|
||||||
// Group by all fields together ..
|
// Group by all fields together ..
|
||||||
let datapoints = influx_responses
|
// let influx_responses = Vec::new();
|
||||||
|
// let grouped_items = Vec::new();
|
||||||
|
|
||||||
|
// let mut grouped_items = influx_responses
|
||||||
|
// .into_iter()
|
||||||
|
// .map(|x| {
|
||||||
|
// (x.time.clone(), x)
|
||||||
|
// })
|
||||||
|
// .into_group_map();
|
||||||
|
// info!("Grouped items are {:?}", grouped_items);
|
||||||
|
|
||||||
|
influx_responses
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.group_by(|x| {
|
.map(|x| {
|
||||||
// This looks ugly, revisit later
|
(x.time.clone(), x)
|
||||||
// x.field.clone()
|
|
||||||
(x.clone().field.clone(), x.clone().time)
|
|
||||||
})
|
})
|
||||||
|
.into_group_map()
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|(group, grouped_items)| {
|
.map(|(group, grouped_items)| {
|
||||||
|
|
||||||
|
info!("Group is: {:?}", group);
|
||||||
|
|
||||||
// Now put all the fields next to each other
|
// Now put all the fields next to each other
|
||||||
// (there will be exactly one field per timestamp, but we want to arrive at a new object)
|
// (there will be exactly one field per timestamp, but we want to arrive at a new object)
|
||||||
let mut out = HashMap::new();
|
let mut out = HashMap::new();
|
||||||
// Could also add a timestamp
|
// Could also add a timestamp
|
||||||
|
|
||||||
|
let mut archive_requests = 0;
|
||||||
|
let mut error_responses = 0;
|
||||||
|
|
||||||
|
out.insert("method".to_owned(), json!("null"));
|
||||||
|
|
||||||
for x in grouped_items {
|
for x in grouped_items {
|
||||||
info!("Iterating over grouped item {:?}", x);
|
info!("Iterating over grouped item {:?}", x);
|
||||||
out.insert(
|
out.insert(
|
||||||
@ -245,9 +263,88 @@ pub async fn query_user_stats<'a>(
|
|||||||
json!(x.time.timestamp())
|
json!(x.time.timestamp())
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Add up to archive requests and error responses
|
||||||
|
// TODO: Gotta double check if errors & archive is based on frontend requests, or other metrics
|
||||||
|
if x.field == "frontend_requests" && x.archive_needed {
|
||||||
|
archive_requests += x.value as i32 // This is the number of requests
|
||||||
}
|
}
|
||||||
|
if x.field == "frontend_requests" && x.error_response {
|
||||||
|
error_responses += x.value as i32
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
out.insert("archive_request".to_owned(), json!(archive_requests));
|
||||||
|
out.insert("error_response".to_owned(), json!(error_responses));
|
||||||
|
|
||||||
json!(out)
|
json!(out)
|
||||||
}).collect::<Vec<_>>();
|
}).collect::<Vec<_>>()
|
||||||
|
|
||||||
|
|
||||||
|
},
|
||||||
|
StatType::Detailed => {
|
||||||
|
let influx_responses: Vec<DetailedRpcAccounting> = influxdb_client.query::<DetailedRpcAccounting>(Some(query)).await?;
|
||||||
|
info!("Influx responses are {:?}", &influx_responses);
|
||||||
|
for res in &influx_responses {
|
||||||
|
info!("Resp is: {:?}", res);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Group by all fields together ..
|
||||||
|
influx_responses
|
||||||
|
.into_iter()
|
||||||
|
.map(|x| {
|
||||||
|
((x.time.clone(), x.method.clone()), x)
|
||||||
|
})
|
||||||
|
.into_group_map()
|
||||||
|
.into_iter()
|
||||||
|
.map(|(group, grouped_items)| {
|
||||||
|
// Now put all the fields next to each other
|
||||||
|
// (there will be exactly one field per timestamp, but we want to arrive at a new object)
|
||||||
|
let mut out = HashMap::new();
|
||||||
|
// Could also add a timestamp
|
||||||
|
|
||||||
|
let mut archive_requests = 0;
|
||||||
|
let mut error_responses = 0;
|
||||||
|
|
||||||
|
// Should probably move this outside ... (?)
|
||||||
|
let method = group.1;
|
||||||
|
out.insert("method".to_owned(), json!(method));
|
||||||
|
|
||||||
|
for x in grouped_items {
|
||||||
|
info!("Iterating over grouped item {:?}", x);
|
||||||
|
out.insert(
|
||||||
|
f!(r#"total_{x.field}"#),
|
||||||
|
// serde_json::Value::Number(serde_json::Number::from(x.value))
|
||||||
|
json!(x.value)
|
||||||
|
);
|
||||||
|
|
||||||
|
if !out.contains_key("query_window_timestamp") {
|
||||||
|
out.insert(
|
||||||
|
"query_window_timestamp".to_owned(),
|
||||||
|
// serde_json::Value::Number(x.time.timestamp().into())
|
||||||
|
json!(x.time.timestamp())
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add up to archive requests and error responses
|
||||||
|
// TODO: Gotta double check if errors & archive is based on frontend requests, or other metrics
|
||||||
|
if x.field == "frontend_requests" && x.archive_needed {
|
||||||
|
archive_requests += x.value as i32 // This is the number of requests
|
||||||
|
}
|
||||||
|
if x.field == "frontend_requests" && x.error_response {
|
||||||
|
error_responses += x.value as i32
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
out.insert("archive_request".to_owned(), json!(archive_requests));
|
||||||
|
out.insert("error_response".to_owned(), json!(error_responses));
|
||||||
|
|
||||||
|
json!(out)
|
||||||
|
}).collect::<Vec<_>>()
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
// I suppose archive requests could be either gathered by default (then summed up), or retrieved on a second go.
|
// I suppose archive requests could be either gathered by default (then summed up), or retrieved on a second go.
|
||||||
// Same with error responses ..
|
// Same with error responses ..
|
||||||
|
Loading…
Reference in New Issue
Block a user