cache status page for 1 second

This commit is contained in:
Bryan Stitt 2022-11-16 20:17:33 +00:00
parent 4d8808b886
commit 5886db20dd
2 changed files with 42 additions and 14 deletions

View File

@ -16,15 +16,31 @@ use axum::{
};
use http::header::AUTHORIZATION;
use log::info;
use std::iter::once;
use moka::future::Cache;
use std::net::SocketAddr;
use std::sync::Arc;
use std::{iter::once, time::Duration};
use tower_http::cors::CorsLayer;
use tower_http::sensitive_headers::SetSensitiveRequestHeadersLayer;
/// Start the frontend server.
#[derive(Clone, Hash, PartialEq, Eq)]
pub enum FrontendResponseCaches {
Status,
}
// TODO: what should this cache's value be?
pub type FrontendResponseCache =
Cache<FrontendResponseCaches, Arc<serde_json::Value>, hashbrown::hash_map::DefaultHashBuilder>;
/// Start the frontend server.
pub async fn serve(port: u16, proxy_app: Arc<Web3ProxyApp>) -> anyhow::Result<()> {
// setup caches for whatever the frontend needs
// TODO: a moka cache is probably way overkill for this.
// no need for max items. only expire because of time to live
let response_cache: FrontendResponseCache = Cache::builder()
.time_to_live(Duration::from_secs(1))
.build_with_hasher(hashbrown::hash_map::DefaultHashBuilder::default());
// build our axum Router
let app = Router::new()
// routes should be ordered most to least common
@ -70,6 +86,8 @@ pub async fn serve(port: u16, proxy_app: Arc<Web3ProxyApp>) -> anyhow::Result<()
.layer(CorsLayer::very_permissive())
// application state
.layer(Extension(proxy_app.clone()))
// frontend caches
.layer(Extension(response_cache))
// 404 for any unknown routes
.fallback(errors::handler_404.into_service());

View File

@ -3,6 +3,7 @@
//! For ease of development, users can currently access these endponts.
//! They will eventually move to another port.
use super::{FrontendResponseCache, FrontendResponseCaches};
use crate::app::Web3ProxyApp;
use axum::{http::StatusCode, response::IntoResponse, Extension, Json};
use axum_macros::debug_handler;
@ -36,19 +37,28 @@ pub async fn prometheus(Extension(app): Extension<Arc<Web3ProxyApp>>) -> impl In
/// TODO: replace this with proper stats and monitoring
#[debug_handler]
pub async fn status(Extension(app): Extension<Arc<Web3ProxyApp>>) -> impl IntoResponse {
app.pending_transactions.sync();
app.rpc_secret_key_cache.sync();
pub async fn status(
Extension(app): Extension<Arc<Web3ProxyApp>>,
Extension(response_cache): Extension<FrontendResponseCache>,
) -> impl IntoResponse {
let body = response_cache
.get_with(FrontendResponseCaches::Status, async {
app.pending_transactions.sync();
app.rpc_secret_key_cache.sync();
// TODO: what else should we include? uptime, cache hit rates, cpu load
let body = json!({
"pending_transactions_count": app.pending_transactions.entry_count(),
"pending_transactions_size": app.pending_transactions.weighted_size(),
"user_cache_count": app.rpc_secret_key_cache.entry_count(),
"user_cache_size": app.rpc_secret_key_cache.weighted_size(),
"balanced_rpcs": app.balanced_rpcs,
"private_rpcs": app.private_rpcs,
});
// TODO: what else should we include? uptime, cache hit rates, cpu load
let body = json!({
"pending_transactions_count": app.pending_transactions.entry_count(),
"pending_transactions_size": app.pending_transactions.weighted_size(),
"user_cache_count": app.rpc_secret_key_cache.entry_count(),
"user_cache_size": app.rpc_secret_key_cache.weighted_size(),
"balanced_rpcs": app.balanced_rpcs,
"private_rpcs": app.private_rpcs,
});
Arc::new(body)
})
.await;
Json(body)
}