2022-12-16 11:48:24 +03:00
|
|
|
use crate::app::DatabaseReplica;
|
2022-11-04 07:32:09 +03:00
|
|
|
use crate::frontend::errors::FrontendErrorResponse;
|
|
|
|
use crate::{app::Web3ProxyApp, user_token::UserBearerToken};
|
2022-10-20 07:44:33 +03:00
|
|
|
use anyhow::Context;
|
2022-12-16 09:32:58 +03:00
|
|
|
use axum::response::{IntoResponse, Response};
|
|
|
|
use axum::Json;
|
2022-10-20 09:17:20 +03:00
|
|
|
use axum::{
|
|
|
|
headers::{authorization::Bearer, Authorization},
|
|
|
|
TypedHeader,
|
|
|
|
};
|
2022-12-14 05:13:23 +03:00
|
|
|
use chrono::{NaiveDateTime, Utc};
|
|
|
|
use entities::{login, rpc_accounting, rpc_key};
|
2022-10-20 02:02:34 +03:00
|
|
|
use hashbrown::HashMap;
|
2022-11-04 07:32:09 +03:00
|
|
|
use http::StatusCode;
|
2022-12-14 05:13:23 +03:00
|
|
|
use log::{debug, warn};
|
2022-11-14 21:24:52 +03:00
|
|
|
use migration::sea_orm::{
|
2022-12-14 05:13:23 +03:00
|
|
|
ColumnTrait, DatabaseConnection, EntityTrait, PaginatorTrait, QueryFilter, QueryOrder,
|
|
|
|
QuerySelect, Select,
|
2022-10-20 00:34:05 +03:00
|
|
|
};
|
2022-11-14 21:24:52 +03:00
|
|
|
use migration::{Condition, Expr, SimpleExpr};
|
2022-12-16 09:32:58 +03:00
|
|
|
use redis_rate_limiter::redis;
|
2022-11-14 21:24:52 +03:00
|
|
|
use redis_rate_limiter::{redis::AsyncCommands, RedisConnection};
|
2022-12-16 09:32:58 +03:00
|
|
|
use serde_json::json;
|
2022-10-19 21:38:00 +03:00
|
|
|
|
2022-12-14 05:13:23 +03:00
|
|
|
/// get the attached address for the given bearer token.
|
|
|
|
/// First checks redis. Then checks the database.
|
2022-12-16 09:32:58 +03:00
|
|
|
/// 0 means all users.
|
|
|
|
/// This authenticates that the bearer is allowed to view this user_id's stats
|
2022-11-04 06:40:43 +03:00
|
|
|
pub async fn get_user_id_from_params(
|
2022-12-16 09:32:58 +03:00
|
|
|
redis_conn: &mut RedisConnection,
|
2022-12-16 11:48:24 +03:00
|
|
|
db_conn: &DatabaseConnection,
|
|
|
|
db_replica: &DatabaseReplica,
|
2022-10-20 09:17:20 +03:00
|
|
|
// this is a long type. should we strip it down?
|
|
|
|
bearer: Option<TypedHeader<Authorization<Bearer>>>,
|
|
|
|
params: &HashMap<String, String>,
|
2022-12-06 00:13:36 +03:00
|
|
|
) -> Result<u64, FrontendErrorResponse> {
|
2022-10-20 09:17:20 +03:00
|
|
|
match (bearer, params.get("user_id")) {
|
2022-10-31 23:05:58 +03:00
|
|
|
(Some(TypedHeader(Authorization(bearer))), Some(user_id)) => {
|
2022-10-20 09:17:20 +03:00
|
|
|
// check for the bearer cache key
|
2022-12-14 05:13:23 +03:00
|
|
|
let user_bearer_token = UserBearerToken::try_from(bearer)?;
|
|
|
|
|
|
|
|
let user_redis_key = user_bearer_token.redis_key();
|
|
|
|
|
|
|
|
let mut save_to_redis = false;
|
2022-10-20 09:17:20 +03:00
|
|
|
|
|
|
|
// get the user id that is attached to this bearer token
|
2022-12-14 05:13:23 +03:00
|
|
|
let bearer_user_id = match redis_conn.get::<_, u64>(&user_redis_key).await {
|
|
|
|
Err(_) => {
|
|
|
|
// TODO: inspect the redis error? if redis is down we should warn
|
2022-12-14 08:05:23 +03:00
|
|
|
// this also means redis being down will not kill our app. Everything will need a db read query though.
|
2022-12-14 05:13:23 +03:00
|
|
|
|
|
|
|
let user_login = login::Entity::find()
|
|
|
|
.filter(login::Column::BearerToken.eq(user_bearer_token.uuid()))
|
2022-12-16 11:48:24 +03:00
|
|
|
.one(db_replica.conn())
|
2022-12-14 05:13:23 +03:00
|
|
|
.await
|
|
|
|
.context("database error while querying for user")?
|
|
|
|
.ok_or(FrontendErrorResponse::AccessDenied)?;
|
|
|
|
|
2022-12-16 11:48:24 +03:00
|
|
|
// if expired, delete ALL expired logins
|
2022-12-14 05:13:23 +03:00
|
|
|
let now = Utc::now();
|
|
|
|
if now > user_login.expires_at {
|
|
|
|
// this row is expired! do not allow auth!
|
2022-12-16 11:48:24 +03:00
|
|
|
// delete ALL expired logins.
|
2022-12-14 05:13:23 +03:00
|
|
|
let delete_result = login::Entity::delete_many()
|
|
|
|
.filter(login::Column::ExpiresAt.lte(now))
|
2022-12-16 11:48:24 +03:00
|
|
|
.exec(db_conn)
|
2022-12-14 05:13:23 +03:00
|
|
|
.await?;
|
|
|
|
|
2022-12-14 08:05:23 +03:00
|
|
|
// TODO: emit a stat? if this is high something weird might be happening
|
2022-12-16 11:48:24 +03:00
|
|
|
debug!("cleared expired logins: {:?}", delete_result);
|
2022-12-14 05:13:23 +03:00
|
|
|
|
|
|
|
return Err(FrontendErrorResponse::AccessDenied);
|
|
|
|
}
|
|
|
|
|
|
|
|
save_to_redis = true;
|
|
|
|
|
|
|
|
user_login.user_id
|
|
|
|
}
|
|
|
|
Ok(x) => {
|
2022-12-14 08:05:23 +03:00
|
|
|
// TODO: push cache ttl further in the future?
|
2022-12-14 05:13:23 +03:00
|
|
|
x
|
|
|
|
}
|
|
|
|
};
|
2022-11-01 22:12:57 +03:00
|
|
|
|
|
|
|
let user_id: u64 = user_id.parse().context("Parsing user_id param")?;
|
|
|
|
|
|
|
|
if bearer_user_id != user_id {
|
2022-12-14 05:13:23 +03:00
|
|
|
return Err(FrontendErrorResponse::AccessDenied);
|
2022-11-01 22:12:57 +03:00
|
|
|
}
|
2022-12-14 05:13:23 +03:00
|
|
|
|
|
|
|
if save_to_redis {
|
|
|
|
// TODO: how long? we store in database for 4 weeks
|
|
|
|
let one_day = 60 * 60 * 24;
|
|
|
|
|
|
|
|
if let Err(err) = redis_conn
|
|
|
|
.set_ex::<_, _, ()>(user_redis_key, user_id, one_day)
|
|
|
|
.await
|
|
|
|
{
|
|
|
|
warn!("Unable to save user bearer token to redis: {}", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(bearer_user_id)
|
2022-10-20 09:17:20 +03:00
|
|
|
}
|
|
|
|
(_, None) => {
|
|
|
|
// they have a bearer token. we don't care about it on public pages
|
|
|
|
// 0 means all
|
|
|
|
Ok(0)
|
|
|
|
}
|
2022-12-06 00:13:36 +03:00
|
|
|
(None, Some(_)) => {
|
2022-10-20 09:17:20 +03:00
|
|
|
// they do not have a bearer token, but requested a specific id. block
|
2022-11-27 22:49:32 +03:00
|
|
|
// TODO: proper error code from a useful error code
|
2022-10-20 09:17:20 +03:00
|
|
|
// TODO: maybe instead of this sharp edged warn, we have a config value?
|
|
|
|
// TODO: check config for if we should deny or allow this
|
2022-12-06 00:13:36 +03:00
|
|
|
Err(FrontendErrorResponse::AccessDenied)
|
2022-11-27 22:49:32 +03:00
|
|
|
// // TODO: make this a flag
|
|
|
|
// warn!("allowing without auth during development!");
|
|
|
|
// Ok(x.parse()?)
|
2022-10-20 09:17:20 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-10-27 03:12:42 +03:00
|
|
|
/// only allow rpc_key to be set if user_id is also set.
|
2022-10-20 09:17:20 +03:00
|
|
|
/// this will keep people from reading someone else's keys.
|
|
|
|
/// 0 means none.
|
2022-11-12 11:24:32 +03:00
|
|
|
|
2022-10-27 03:12:42 +03:00
|
|
|
pub fn get_rpc_key_id_from_params(
|
2022-10-20 09:54:45 +03:00
|
|
|
user_id: u64,
|
|
|
|
params: &HashMap<String, String>,
|
|
|
|
) -> anyhow::Result<u64> {
|
2022-10-20 09:17:20 +03:00
|
|
|
if user_id > 0 {
|
2022-10-27 03:12:42 +03:00
|
|
|
params.get("rpc_key_id").map_or_else(
|
2022-10-20 09:17:20 +03:00
|
|
|
|| Ok(0),
|
|
|
|
|c| {
|
|
|
|
let c = c.parse()?;
|
|
|
|
|
|
|
|
Ok(c)
|
|
|
|
},
|
|
|
|
)
|
|
|
|
} else {
|
|
|
|
Ok(0)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-10-26 00:10:05 +03:00
|
|
|
pub fn get_chain_id_from_params(
|
2022-10-20 09:17:20 +03:00
|
|
|
app: &Web3ProxyApp,
|
|
|
|
params: &HashMap<String, String>,
|
|
|
|
) -> anyhow::Result<u64> {
|
|
|
|
params.get("chain_id").map_or_else(
|
|
|
|
|| Ok(app.config.chain_id),
|
|
|
|
|c| {
|
|
|
|
let c = c.parse()?;
|
|
|
|
|
|
|
|
Ok(c)
|
|
|
|
},
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2022-10-26 00:10:05 +03:00
|
|
|
pub fn get_query_start_from_params(
|
2022-10-20 09:17:20 +03:00
|
|
|
params: &HashMap<String, String>,
|
|
|
|
) -> anyhow::Result<chrono::NaiveDateTime> {
|
|
|
|
params.get("query_start").map_or_else(
|
|
|
|
|| {
|
|
|
|
// no timestamp in params. set default
|
|
|
|
let x = chrono::Utc::now() - chrono::Duration::days(30);
|
|
|
|
|
|
|
|
Ok(x.naive_utc())
|
|
|
|
},
|
|
|
|
|x: &String| {
|
|
|
|
// parse the given timestamp
|
|
|
|
let x = x.parse::<i64>().context("parsing timestamp query param")?;
|
|
|
|
|
|
|
|
// TODO: error code 401
|
|
|
|
let x =
|
|
|
|
NaiveDateTime::from_timestamp_opt(x, 0).context("parsing timestamp query param")?;
|
|
|
|
|
|
|
|
Ok(x)
|
|
|
|
},
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2022-10-26 00:10:05 +03:00
|
|
|
pub fn get_page_from_params(params: &HashMap<String, String>) -> anyhow::Result<u64> {
|
2022-10-25 06:41:59 +03:00
|
|
|
params.get("page").map_or_else::<anyhow::Result<u64>, _, _>(
|
|
|
|
|| {
|
|
|
|
// no page in params. set default
|
|
|
|
Ok(0)
|
|
|
|
},
|
|
|
|
|x: &String| {
|
|
|
|
// parse the given timestamp
|
|
|
|
// TODO: error code 401
|
|
|
|
let x = x.parse().context("parsing page query from params")?;
|
2022-10-20 09:17:20 +03:00
|
|
|
|
2022-10-25 06:41:59 +03:00
|
|
|
Ok(x)
|
|
|
|
},
|
|
|
|
)
|
2022-10-20 09:17:20 +03:00
|
|
|
}
|
|
|
|
|
2022-10-26 00:10:05 +03:00
|
|
|
pub fn get_query_window_seconds_from_params(
|
|
|
|
params: &HashMap<String, String>,
|
2022-11-04 07:32:09 +03:00
|
|
|
) -> Result<u64, FrontendErrorResponse> {
|
2022-10-20 09:17:20 +03:00
|
|
|
params.get("query_window_seconds").map_or_else(
|
|
|
|
|| {
|
|
|
|
// no page in params. set default
|
|
|
|
Ok(0)
|
|
|
|
},
|
2022-11-04 07:32:09 +03:00
|
|
|
|query_window_seconds: &String| {
|
2022-10-20 09:17:20 +03:00
|
|
|
// parse the given timestamp
|
|
|
|
// TODO: error code 401
|
2022-11-04 07:32:09 +03:00
|
|
|
query_window_seconds.parse::<u64>().map_err(|e| {
|
|
|
|
FrontendErrorResponse::StatusCode(
|
|
|
|
StatusCode::BAD_REQUEST,
|
|
|
|
"Unable to parse rpc_key_id".to_string(),
|
2022-11-08 22:58:11 +03:00
|
|
|
Some(e.into()),
|
2022-11-04 07:32:09 +03:00
|
|
|
)
|
|
|
|
})
|
2022-10-20 09:17:20 +03:00
|
|
|
},
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2022-11-04 07:32:09 +03:00
|
|
|
pub fn filter_query_window_seconds(
|
2022-12-16 09:32:58 +03:00
|
|
|
query_window_seconds: u64,
|
2022-11-04 07:32:09 +03:00
|
|
|
response: &mut HashMap<&str, serde_json::Value>,
|
|
|
|
q: Select<rpc_accounting::Entity>,
|
|
|
|
) -> Result<Select<rpc_accounting::Entity>, FrontendErrorResponse> {
|
|
|
|
if query_window_seconds == 0 {
|
|
|
|
// TODO: order by more than this?
|
|
|
|
// query_window_seconds is not set so we aggregate all records
|
|
|
|
// TODO: i am pretty sure we need to filter by something
|
|
|
|
return Ok(q);
|
|
|
|
}
|
2022-10-20 07:44:33 +03:00
|
|
|
|
2022-11-04 07:32:09 +03:00
|
|
|
// TODO: is there a better way to do this? how can we get "period_datetime" into this with types?
|
|
|
|
// TODO: how can we get the first window to start at query_start_timestamp
|
|
|
|
let expr = Expr::cust_with_values(
|
|
|
|
"FLOOR(UNIX_TIMESTAMP(rpc_accounting.period_datetime) / ?) * ?",
|
|
|
|
[query_window_seconds, query_window_seconds],
|
|
|
|
);
|
2022-10-20 02:02:34 +03:00
|
|
|
|
2022-10-20 09:17:20 +03:00
|
|
|
response.insert(
|
2022-11-04 07:32:09 +03:00
|
|
|
"query_window_seconds",
|
|
|
|
serde_json::Value::Number(query_window_seconds.into()),
|
2022-10-20 09:17:20 +03:00
|
|
|
);
|
2022-10-20 02:02:34 +03:00
|
|
|
|
2022-11-04 07:32:09 +03:00
|
|
|
let q = q
|
2022-11-04 22:01:17 +03:00
|
|
|
.column_as(expr, "query_window_timestamp")
|
|
|
|
.group_by(Expr::cust("query_window_timestamp"))
|
2022-11-04 07:32:09 +03:00
|
|
|
// TODO: is there a simpler way to order_by?
|
2022-11-04 22:01:17 +03:00
|
|
|
.order_by_asc(SimpleExpr::Custom("query_window_timestamp".to_string()));
|
2022-11-04 07:32:09 +03:00
|
|
|
|
|
|
|
Ok(q)
|
|
|
|
}
|
|
|
|
|
|
|
|
pub enum StatResponse {
|
2022-12-12 22:00:15 +03:00
|
|
|
Aggregated,
|
2022-11-04 07:32:09 +03:00
|
|
|
Detailed,
|
|
|
|
}
|
|
|
|
|
|
|
|
pub async fn query_user_stats<'a>(
|
|
|
|
app: &'a Web3ProxyApp,
|
|
|
|
bearer: Option<TypedHeader<Authorization<Bearer>>>,
|
|
|
|
params: &'a HashMap<String, String>,
|
|
|
|
stat_response_type: StatResponse,
|
2022-12-16 09:32:58 +03:00
|
|
|
) -> Result<Response, FrontendErrorResponse> {
|
2022-12-16 11:48:24 +03:00
|
|
|
let db_conn = app.db_conn().context("query_user_stats needs a db")?;
|
|
|
|
let db_replica = app
|
|
|
|
.db_replica()
|
|
|
|
.context("query_user_stats needs a db replica")?;
|
|
|
|
let mut redis_conn = app
|
|
|
|
.redis_conn()
|
|
|
|
.await
|
2022-12-29 09:21:09 +03:00
|
|
|
.context("query_user_stats had a redis connection error")?
|
2022-12-16 11:48:24 +03:00
|
|
|
.context("query_user_stats needs a redis")?;
|
2022-12-16 09:32:58 +03:00
|
|
|
|
|
|
|
// get the user id first. if it is 0, we should use a cache on the app
|
2022-12-16 11:48:24 +03:00
|
|
|
let user_id =
|
|
|
|
get_user_id_from_params(&mut redis_conn, &db_conn, &db_replica, bearer, params).await?;
|
2022-12-16 09:32:58 +03:00
|
|
|
// get the query window seconds now so that we can pick a cache with a good TTL
|
|
|
|
// TODO: for now though, just do one cache. its easier
|
|
|
|
let query_window_seconds = get_query_window_seconds_from_params(params)?;
|
|
|
|
let query_start = get_query_start_from_params(params)?;
|
|
|
|
let chain_id = get_chain_id_from_params(app, params)?;
|
|
|
|
let page = get_page_from_params(params)?;
|
2022-11-04 07:32:09 +03:00
|
|
|
|
2022-12-16 09:32:58 +03:00
|
|
|
let cache_key = if user_id == 0 {
|
|
|
|
// TODO: cacheable query_window_seconds from config
|
|
|
|
if [60, 600, 3600, 86400, 86400 * 7, 86400 * 30].contains(&query_window_seconds)
|
|
|
|
&& query_start.timestamp() % (query_window_seconds as i64) == 0
|
|
|
|
{
|
|
|
|
None
|
|
|
|
} else {
|
|
|
|
// TODO: is this a good key?
|
|
|
|
let redis_cache_key = format!(
|
|
|
|
"query_user_stats:{}:{}:{}:{}:{}",
|
|
|
|
chain_id, user_id, query_start, query_window_seconds, page,
|
|
|
|
);
|
|
|
|
|
|
|
|
let cached_result: Result<(String, u64), _> = redis::pipe()
|
|
|
|
.atomic()
|
|
|
|
// get the key and its ttl
|
|
|
|
.get(&redis_cache_key)
|
|
|
|
.ttl(&redis_cache_key)
|
|
|
|
// do the query
|
|
|
|
.query_async(&mut redis_conn)
|
|
|
|
.await;
|
|
|
|
|
|
|
|
// redis being down should not break the stats page!
|
|
|
|
if let Ok((body, ttl)) = cached_result {
|
|
|
|
let mut response = body.into_response();
|
|
|
|
|
|
|
|
let headers = response.headers_mut();
|
|
|
|
|
|
|
|
headers.insert(
|
|
|
|
"Cache-Control",
|
|
|
|
format!("max-age={}", ttl)
|
|
|
|
.parse()
|
|
|
|
.expect("max-age should always parse"),
|
|
|
|
);
|
|
|
|
|
2022-12-16 11:48:24 +03:00
|
|
|
// TODO: emit a stat
|
2022-12-16 09:32:58 +03:00
|
|
|
|
|
|
|
return Ok(response);
|
|
|
|
}
|
|
|
|
|
|
|
|
Some(redis_cache_key)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
};
|
|
|
|
|
|
|
|
let mut response_body = HashMap::new();
|
2022-11-04 07:32:09 +03:00
|
|
|
|
2022-12-15 23:27:39 +03:00
|
|
|
let mut q = rpc_accounting::Entity::find()
|
2022-10-20 02:02:34 +03:00
|
|
|
.select_only()
|
|
|
|
.column_as(
|
|
|
|
rpc_accounting::Column::FrontendRequests.sum(),
|
2022-11-04 07:40:39 +03:00
|
|
|
"total_frontend_requests",
|
2022-10-20 02:02:34 +03:00
|
|
|
)
|
2022-11-03 02:14:16 +03:00
|
|
|
.column_as(
|
|
|
|
rpc_accounting::Column::BackendRequests.sum(),
|
2022-11-04 07:32:09 +03:00
|
|
|
"total_backend_retries",
|
2022-11-03 02:14:16 +03:00
|
|
|
)
|
2022-10-20 02:02:34 +03:00
|
|
|
.column_as(
|
|
|
|
rpc_accounting::Column::CacheMisses.sum(),
|
|
|
|
"total_cache_misses",
|
|
|
|
)
|
|
|
|
.column_as(rpc_accounting::Column::CacheHits.sum(), "total_cache_hits")
|
|
|
|
.column_as(
|
|
|
|
rpc_accounting::Column::SumResponseBytes.sum(),
|
|
|
|
"total_response_bytes",
|
|
|
|
)
|
|
|
|
.column_as(
|
|
|
|
rpc_accounting::Column::ErrorResponse.sum(),
|
|
|
|
"total_error_responses",
|
|
|
|
)
|
|
|
|
.column_as(
|
|
|
|
rpc_accounting::Column::SumResponseMillis.sum(),
|
|
|
|
"total_response_millis",
|
2022-11-04 07:32:09 +03:00
|
|
|
);
|
2022-10-20 07:44:33 +03:00
|
|
|
|
2022-11-30 08:51:31 +03:00
|
|
|
// TODO: make this and q mutable and clean up the code below. no need for more `let q`
|
2022-12-15 23:27:39 +03:00
|
|
|
let mut condition = Condition::all();
|
2022-10-20 07:44:33 +03:00
|
|
|
|
2022-12-15 23:27:39 +03:00
|
|
|
if let StatResponse::Detailed = stat_response_type {
|
2022-11-04 07:32:09 +03:00
|
|
|
// group by the columns that we use as keys in other places of the code
|
2022-12-15 23:27:39 +03:00
|
|
|
q = q
|
|
|
|
.column(rpc_accounting::Column::ErrorResponse)
|
2022-11-04 07:32:09 +03:00
|
|
|
.group_by(rpc_accounting::Column::ErrorResponse)
|
|
|
|
.column(rpc_accounting::Column::Method)
|
|
|
|
.group_by(rpc_accounting::Column::Method)
|
|
|
|
.column(rpc_accounting::Column::ArchiveRequest)
|
2022-12-15 23:27:39 +03:00
|
|
|
.group_by(rpc_accounting::Column::ArchiveRequest);
|
|
|
|
}
|
2022-11-04 07:32:09 +03:00
|
|
|
|
2022-12-16 09:32:58 +03:00
|
|
|
// TODO: have q be &mut?
|
|
|
|
q = filter_query_window_seconds(query_window_seconds, &mut response_body, q)?;
|
2022-11-04 07:32:09 +03:00
|
|
|
|
|
|
|
// aggregate stats after query_start
|
2022-12-16 09:32:58 +03:00
|
|
|
// TODO: maximum query_start of 90 days ago?
|
2022-11-04 07:32:09 +03:00
|
|
|
// TODO: if no query_start, don't add to response or condition
|
2022-12-16 09:32:58 +03:00
|
|
|
response_body.insert(
|
2022-11-04 07:32:09 +03:00
|
|
|
"query_start",
|
|
|
|
serde_json::Value::Number(query_start.timestamp().into()),
|
|
|
|
);
|
2022-12-15 23:27:39 +03:00
|
|
|
condition = condition.add(rpc_accounting::Column::PeriodDatetime.gte(query_start));
|
2022-10-20 07:44:33 +03:00
|
|
|
|
2022-12-15 23:27:39 +03:00
|
|
|
if chain_id == 0 {
|
|
|
|
// fetch all the chains
|
2022-10-20 07:44:33 +03:00
|
|
|
} else {
|
2022-12-16 09:32:58 +03:00
|
|
|
// filter on chain_id
|
2022-12-15 23:27:39 +03:00
|
|
|
condition = condition.add(rpc_accounting::Column::ChainId.eq(chain_id));
|
2022-10-20 07:44:33 +03:00
|
|
|
|
2022-12-16 09:32:58 +03:00
|
|
|
response_body.insert("chain_id", serde_json::Value::Number(chain_id.into()));
|
2022-12-15 23:27:39 +03:00
|
|
|
}
|
2022-10-20 07:44:33 +03:00
|
|
|
|
2022-12-15 23:27:39 +03:00
|
|
|
if user_id == 0 {
|
2022-11-05 01:58:15 +03:00
|
|
|
// 0 means everyone. don't filter on user
|
|
|
|
} else {
|
2022-12-15 23:27:39 +03:00
|
|
|
q = q.left_join(rpc_key::Entity);
|
2022-11-05 01:58:15 +03:00
|
|
|
|
2022-12-15 23:27:39 +03:00
|
|
|
condition = condition.add(rpc_key::Column::UserId.eq(user_id));
|
2022-11-05 01:58:15 +03:00
|
|
|
|
2022-12-16 09:32:58 +03:00
|
|
|
response_body.insert("user_id", serde_json::Value::Number(user_id.into()));
|
2022-12-15 23:27:39 +03:00
|
|
|
}
|
2022-11-05 01:58:15 +03:00
|
|
|
|
2022-11-04 07:32:09 +03:00
|
|
|
// filter on rpc_key_id
|
2022-11-08 22:58:11 +03:00
|
|
|
// if rpc_key_id, all the requests without a key will be loaded
|
2022-11-04 07:32:09 +03:00
|
|
|
// TODO: move getting the param and checking the bearer token into a helper function
|
2022-12-15 23:27:39 +03:00
|
|
|
if let Some(rpc_key_id) = params.get("rpc_key_id") {
|
2022-11-04 07:32:09 +03:00
|
|
|
let rpc_key_id = rpc_key_id.parse::<u64>().map_err(|e| {
|
|
|
|
FrontendErrorResponse::StatusCode(
|
|
|
|
StatusCode::BAD_REQUEST,
|
|
|
|
"Unable to parse rpc_key_id".to_string(),
|
2022-11-08 22:58:11 +03:00
|
|
|
Some(e.into()),
|
2022-11-04 07:32:09 +03:00
|
|
|
)
|
|
|
|
})?;
|
|
|
|
|
2022-12-16 09:32:58 +03:00
|
|
|
response_body.insert("rpc_key_id", serde_json::Value::Number(rpc_key_id.into()));
|
2022-11-04 01:16:27 +03:00
|
|
|
|
2022-12-15 23:27:39 +03:00
|
|
|
condition = condition.add(rpc_accounting::Column::RpcKeyId.eq(rpc_key_id));
|
2022-11-04 01:16:27 +03:00
|
|
|
|
2022-12-15 23:27:39 +03:00
|
|
|
q = q.group_by(rpc_accounting::Column::RpcKeyId);
|
2022-10-27 03:12:42 +03:00
|
|
|
|
2022-11-08 22:58:11 +03:00
|
|
|
if user_id == 0 {
|
|
|
|
// no user id, we did not join above
|
2022-12-15 23:27:39 +03:00
|
|
|
q = q.left_join(rpc_key::Entity);
|
2022-11-08 22:58:11 +03:00
|
|
|
} else {
|
|
|
|
// user_id added a join on rpc_key already. only filter on user_id
|
2022-12-15 23:27:39 +03:00
|
|
|
condition = condition.add(rpc_key::Column::UserId.eq(user_id));
|
2022-11-05 01:58:15 +03:00
|
|
|
}
|
2022-12-15 23:27:39 +03:00
|
|
|
}
|
2022-10-20 07:44:33 +03:00
|
|
|
|
2022-11-04 07:32:09 +03:00
|
|
|
// now that all the conditions are set up. add them to the query
|
2022-12-15 23:27:39 +03:00
|
|
|
q = q.filter(condition);
|
2022-10-20 22:01:07 +03:00
|
|
|
|
2022-11-04 07:32:09 +03:00
|
|
|
// TODO: trace log query here? i think sea orm has a useful log level for this
|
2022-10-20 22:01:07 +03:00
|
|
|
|
2022-11-04 07:32:09 +03:00
|
|
|
// set up pagination
|
2022-12-16 09:32:58 +03:00
|
|
|
response_body.insert("page", serde_json::Value::Number(page.into()));
|
2022-10-20 23:26:14 +03:00
|
|
|
|
2022-11-04 07:32:09 +03:00
|
|
|
// TODO: page size from param with a max from the config
|
2022-12-16 09:32:58 +03:00
|
|
|
let page_size = 1_000;
|
|
|
|
response_body.insert("page_size", serde_json::Value::Number(page_size.into()));
|
2022-10-20 07:44:33 +03:00
|
|
|
|
2022-12-15 05:45:54 +03:00
|
|
|
// query the database for number of items and pages
|
|
|
|
let pages_result = q
|
|
|
|
.clone()
|
2022-12-16 11:48:24 +03:00
|
|
|
.paginate(db_replica.conn(), page_size)
|
2022-12-15 05:45:54 +03:00
|
|
|
.num_items_and_pages()
|
|
|
|
.await?;
|
|
|
|
|
2022-12-16 09:32:58 +03:00
|
|
|
response_body.insert("num_items", pages_result.number_of_items.into());
|
|
|
|
response_body.insert("num_pages", pages_result.number_of_pages.into());
|
2022-12-15 05:45:54 +03:00
|
|
|
|
|
|
|
// query the database (todo: combine with the pages_result query?)
|
2022-11-04 07:32:09 +03:00
|
|
|
let query_response = q
|
2022-10-20 07:44:33 +03:00
|
|
|
.into_json()
|
2022-12-16 11:48:24 +03:00
|
|
|
.paginate(db_replica.conn(), page_size)
|
2022-10-20 07:44:33 +03:00
|
|
|
.fetch_page(page)
|
|
|
|
.await?;
|
|
|
|
|
2022-12-16 09:32:58 +03:00
|
|
|
// TODO: be a lot smart about caching
|
|
|
|
let ttl = 60;
|
|
|
|
|
2022-11-04 07:32:09 +03:00
|
|
|
// add the query_response to the json response
|
2022-12-16 09:32:58 +03:00
|
|
|
response_body.insert("result", serde_json::Value::Array(query_response));
|
|
|
|
|
|
|
|
let mut response = Json(&response_body).into_response();
|
|
|
|
|
|
|
|
let headers = response.headers_mut();
|
|
|
|
|
|
|
|
if let Some(cache_key) = cache_key {
|
|
|
|
headers.insert(
|
|
|
|
"Cache-Control",
|
|
|
|
format!("public, max-age={}", ttl)
|
|
|
|
.parse()
|
|
|
|
.expect("max-age should always parse"),
|
|
|
|
);
|
|
|
|
|
|
|
|
let cache_body = json!(response_body).to_string();
|
|
|
|
|
|
|
|
if let Err(err) = redis_conn
|
|
|
|
.set_ex::<_, _, ()>(cache_key, cache_body, ttl)
|
|
|
|
.await
|
|
|
|
{
|
|
|
|
warn!("Redis error while caching query_user_stats: {:?}", err);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
headers.insert(
|
|
|
|
"Cache-Control",
|
|
|
|
format!("private, max-age={}", ttl)
|
|
|
|
.parse()
|
|
|
|
.expect("max-age should always parse"),
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: Last-Modified header?
|
2022-10-20 07:44:33 +03:00
|
|
|
|
|
|
|
Ok(response)
|
|
|
|
}
|