web3-proxy/web3_proxy/src/frontend/status.rs

232 lines
7.0 KiB
Rust
Raw Normal View History

2022-10-18 00:47:58 +03:00
//! Used by admins for health checks and inspecting global statistics.
//!
//! For ease of development, users can currently access these endponts.
//! They will eventually move to another port.
2023-05-17 00:58:00 +03:00
use super::{ResponseCache, ResponseCacheKey};
2023-05-15 20:48:59 +03:00
use crate::app::{Web3ProxyApp, APP_USER_AGENT};
2023-05-17 01:27:18 +03:00
use axum::{
body::{Bytes, Full},
http::StatusCode,
response::{IntoResponse, Response},
2023-06-21 21:28:33 +03:00
Extension, Json,
2023-05-17 01:27:18 +03:00
};
2023-06-21 21:28:33 +03:00
use axum_client_ip::InsecureClientIp;
2022-10-27 03:12:42 +03:00
use axum_macros::debug_handler;
2023-06-21 21:28:33 +03:00
use hashbrown::HashMap;
use http::HeaderMap;
2023-06-08 03:26:38 +03:00
use moka::future::Cache;
use once_cell::sync::Lazy;
2023-06-08 03:26:38 +03:00
use serde::{ser::SerializeStruct, Serialize};
2022-09-10 05:59:07 +03:00
use serde_json::json;
use std::sync::Arc;
use tracing::trace;
static HEALTH_OK: Lazy<Bytes> = Lazy::new(|| Bytes::from("OK\n"));
static HEALTH_NOT_OK: Lazy<Bytes> = Lazy::new(|| Bytes::from(":(\n"));
static BACKUPS_NEEDED_TRUE: Lazy<Bytes> = Lazy::new(|| Bytes::from("true\n"));
static BACKUPS_NEEDED_FALSE: Lazy<Bytes> = Lazy::new(|| Bytes::from("false\n"));
2023-05-17 01:27:18 +03:00
static CONTENT_TYPE_JSON: &str = "application/json";
static CONTENT_TYPE_PLAIN: &str = "text/plain";
2023-06-21 21:28:33 +03:00
#[debug_handler]
pub async fn debug_request(
Extension(app): Extension<Arc<Web3ProxyApp>>,
ip: InsecureClientIp,
headers: HeaderMap,
) -> impl IntoResponse {
let (_, _, status) = _status(app).await;
let status: serde_json::Value = serde_json::from_slice(&status).unwrap();
let headers: HashMap<_, _> = headers
.into_iter()
.filter_map(|(k, v)| {
if let Some(k) = k {
let k = k.to_string();
let v = if let Ok(v) = std::str::from_utf8(v.as_bytes()) {
v.to_string()
} else {
format!("{:?}", v)
};
Some((k, v))
} else {
None
}
})
.collect();
let x = json!({
"ip": format!("{:?}", ip),
"status": status,
"headers": headers,
});
Json(x)
}
2022-10-18 00:47:58 +03:00
/// Health check page for load balancers to use.
2022-10-20 01:26:33 +03:00
#[debug_handler]
pub async fn health(
Extension(app): Extension<Arc<Web3ProxyApp>>,
2023-05-17 00:58:00 +03:00
Extension(cache): Extension<Arc<ResponseCache>>,
) -> impl IntoResponse {
let (code, content_type, body) = cache
2023-06-08 03:26:38 +03:00
.get_with(ResponseCacheKey::Health, async move { _health(app).await })
.await;
2023-05-17 01:27:18 +03:00
Response::builder()
.status(code)
.header("content-type", content_type)
.body(Full::from(body))
.unwrap()
}
2023-05-15 20:48:59 +03:00
// TODO: _health doesn't need to be async, but _quick_cache_ttl needs an async function
#[inline]
2023-05-17 01:27:18 +03:00
async fn _health(app: Arc<Web3ProxyApp>) -> (StatusCode, &'static str, Bytes) {
2023-05-30 03:26:32 +03:00
trace!("health is not cached");
if app.balanced_rpcs.synced() {
2023-05-17 01:27:18 +03:00
(StatusCode::OK, CONTENT_TYPE_PLAIN, HEALTH_OK.clone())
2022-06-29 21:22:53 +03:00
} else {
2023-05-17 01:27:18 +03:00
(
StatusCode::SERVICE_UNAVAILABLE,
CONTENT_TYPE_PLAIN,
HEALTH_NOT_OK.clone(),
)
2022-06-29 21:22:53 +03:00
}
}
/// Easy alerting if backup servers are in use.
#[debug_handler]
pub async fn backups_needed(
Extension(app): Extension<Arc<Web3ProxyApp>>,
2023-05-17 00:58:00 +03:00
Extension(cache): Extension<Arc<ResponseCache>>,
) -> impl IntoResponse {
let (code, content_type, body) = cache
2023-06-08 03:26:38 +03:00
.get_with(ResponseCacheKey::BackupsNeeded, async move {
_backups_needed(app).await
})
.await;
2023-05-17 01:27:18 +03:00
Response::builder()
.status(code)
.header("content-type", content_type)
.body(Full::from(body))
.unwrap()
}
2023-05-15 20:48:59 +03:00
#[inline]
2023-05-17 01:27:18 +03:00
async fn _backups_needed(app: Arc<Web3ProxyApp>) -> (StatusCode, &'static str, Bytes) {
2023-05-30 03:26:32 +03:00
trace!("backups_needed is not cached");
let code = {
let consensus_rpcs = app
.balanced_rpcs
.watch_consensus_rpcs_sender
.borrow()
.clone();
if let Some(ref consensus_rpcs) = consensus_rpcs {
if consensus_rpcs.backups_needed {
StatusCode::INTERNAL_SERVER_ERROR
} else {
StatusCode::OK
}
} else {
// if no consensus, we still "need backups". we just don't have any. which is worse
StatusCode::INTERNAL_SERVER_ERROR
}
};
if matches!(code, StatusCode::OK) {
2023-05-17 01:27:18 +03:00
(code, CONTENT_TYPE_PLAIN, BACKUPS_NEEDED_FALSE.clone())
} else {
2023-05-17 01:27:18 +03:00
(code, CONTENT_TYPE_PLAIN, BACKUPS_NEEDED_TRUE.clone())
}
}
2022-10-18 00:47:58 +03:00
/// Very basic status page.
///
2023-05-15 20:48:59 +03:00
/// TODO: replace this with proper stats and monitoring. frontend uses it for their public dashboards though
2022-10-20 01:26:33 +03:00
#[debug_handler]
2022-11-16 23:17:33 +03:00
pub async fn status(
Extension(app): Extension<Arc<Web3ProxyApp>>,
2023-05-17 00:58:00 +03:00
Extension(cache): Extension<Arc<ResponseCache>>,
2022-11-16 23:17:33 +03:00
) -> impl IntoResponse {
let (code, content_type, body) = cache
2023-06-08 03:26:38 +03:00
.get_with(ResponseCacheKey::Status, async move { _status(app).await })
.await;
2023-05-17 01:27:18 +03:00
Response::builder()
.status(code)
.header("content-type", content_type)
.body(Full::from(body))
.unwrap()
}
2023-05-15 20:48:59 +03:00
// TODO: _status doesn't need to be async, but _quick_cache_ttl needs an async function
#[inline]
2023-05-17 01:27:18 +03:00
async fn _status(app: Arc<Web3ProxyApp>) -> (StatusCode, &'static str, Bytes) {
2023-05-30 03:26:32 +03:00
trace!("status is not cached");
2023-06-27 07:11:16 +03:00
// TODO: get out of app.balanced_rpcs instead?
let head_block = app.watch_consensus_head_receiver.borrow().clone();
// TODO: what else should we include? uptime, cache hit rates, cpu load, memory used
// TODO: the hostname is probably not going to change. only get once at the start?
let body = json!({
"balanced_rpcs": app.balanced_rpcs,
"bundler_4337_rpcs": app.bundler_4337_rpcs,
"caches": [
MokaCacheSerializer(&app.bearer_token_semaphores),
MokaCacheSerializer(&app.ip_semaphores),
MokaCacheSerializer(&app.jsonrpc_response_cache),
MokaCacheSerializer(&app.rpc_secret_key_cache),
MokaCacheSerializer(&app.user_balance_cache),
MokaCacheSerializer(&app.user_semaphores),
],
2023-06-07 20:48:55 +03:00
"chain_id": app.config.chain_id,
2023-06-27 07:11:16 +03:00
"head_block_num": head_block.as_ref().map(|x| x.number()),
"head_block_hash": head_block.as_ref().map(|x| x.hash()),
"hostname": app.hostname,
2023-06-13 09:03:38 +03:00
"payment_factory_address": app.config.deposit_factory_contract,
2023-06-07 20:48:55 +03:00
"private_rpcs": app.private_rpcs,
"version": APP_USER_AGENT,
});
let body = body.to_string().into_bytes();
let body = Bytes::from(body);
let code = if app.balanced_rpcs.synced() {
StatusCode::OK
} else {
StatusCode::INTERNAL_SERVER_ERROR
};
2022-09-10 05:59:07 +03:00
2023-05-17 01:27:18 +03:00
(code, CONTENT_TYPE_JSON, body)
2022-06-05 22:58:47 +03:00
}
2023-06-08 03:26:38 +03:00
pub struct MokaCacheSerializer<'a, K, V>(pub &'a Cache<K, V>);
impl<'a, K, V> Serialize for MokaCacheSerializer<'a, K, V> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
2023-06-08 03:55:34 +03:00
let mut state = serializer.serialize_struct("MokaCache", 3)?;
2023-06-08 03:26:38 +03:00
state.serialize_field("entry_count", &self.0.entry_count())?;
2023-06-08 03:55:34 +03:00
state.serialize_field("name", &self.0.name())?;
state.serialize_field("weighted_size", &self.0.weighted_size())?;
2023-06-08 03:26:38 +03:00
2023-06-08 03:55:34 +03:00
state.end()
2023-06-08 03:26:38 +03:00
}
}