From 5886db20dd9f8d0c65dce7aa794da85d6741463e Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Wed, 16 Nov 2022 20:17:33 +0000 Subject: [PATCH] cache status page for 1 second --- web3_proxy/src/frontend/mod.rs | 22 ++++++++++++++++++-- web3_proxy/src/frontend/status.rs | 34 ++++++++++++++++++++----------- 2 files changed, 42 insertions(+), 14 deletions(-) diff --git a/web3_proxy/src/frontend/mod.rs b/web3_proxy/src/frontend/mod.rs index da55808d..4faaaf58 100644 --- a/web3_proxy/src/frontend/mod.rs +++ b/web3_proxy/src/frontend/mod.rs @@ -16,15 +16,31 @@ use axum::{ }; use http::header::AUTHORIZATION; use log::info; -use std::iter::once; +use moka::future::Cache; use std::net::SocketAddr; use std::sync::Arc; +use std::{iter::once, time::Duration}; use tower_http::cors::CorsLayer; use tower_http::sensitive_headers::SetSensitiveRequestHeadersLayer; -/// Start the frontend server. +#[derive(Clone, Hash, PartialEq, Eq)] +pub enum FrontendResponseCaches { + Status, +} +// TODO: what should this cache's value be? +pub type FrontendResponseCache = + Cache, hashbrown::hash_map::DefaultHashBuilder>; + +/// Start the frontend server. pub async fn serve(port: u16, proxy_app: Arc) -> anyhow::Result<()> { + // setup caches for whatever the frontend needs + // TODO: a moka cache is probably way overkill for this. + // no need for max items. only expire because of time to live + let response_cache: FrontendResponseCache = Cache::builder() + .time_to_live(Duration::from_secs(1)) + .build_with_hasher(hashbrown::hash_map::DefaultHashBuilder::default()); + // build our axum Router let app = Router::new() // routes should be ordered most to least common @@ -70,6 +86,8 @@ pub async fn serve(port: u16, proxy_app: Arc) -> anyhow::Result<() .layer(CorsLayer::very_permissive()) // application state .layer(Extension(proxy_app.clone())) + // frontend caches + .layer(Extension(response_cache)) // 404 for any unknown routes .fallback(errors::handler_404.into_service()); diff --git a/web3_proxy/src/frontend/status.rs b/web3_proxy/src/frontend/status.rs index 5d1fb2e3..ceda6e95 100644 --- a/web3_proxy/src/frontend/status.rs +++ b/web3_proxy/src/frontend/status.rs @@ -3,6 +3,7 @@ //! For ease of development, users can currently access these endponts. //! They will eventually move to another port. +use super::{FrontendResponseCache, FrontendResponseCaches}; use crate::app::Web3ProxyApp; use axum::{http::StatusCode, response::IntoResponse, Extension, Json}; use axum_macros::debug_handler; @@ -36,19 +37,28 @@ pub async fn prometheus(Extension(app): Extension>) -> impl In /// TODO: replace this with proper stats and monitoring #[debug_handler] -pub async fn status(Extension(app): Extension>) -> impl IntoResponse { - app.pending_transactions.sync(); - app.rpc_secret_key_cache.sync(); +pub async fn status( + Extension(app): Extension>, + Extension(response_cache): Extension, +) -> impl IntoResponse { + let body = response_cache + .get_with(FrontendResponseCaches::Status, async { + app.pending_transactions.sync(); + app.rpc_secret_key_cache.sync(); - // TODO: what else should we include? uptime, cache hit rates, cpu load - let body = json!({ - "pending_transactions_count": app.pending_transactions.entry_count(), - "pending_transactions_size": app.pending_transactions.weighted_size(), - "user_cache_count": app.rpc_secret_key_cache.entry_count(), - "user_cache_size": app.rpc_secret_key_cache.weighted_size(), - "balanced_rpcs": app.balanced_rpcs, - "private_rpcs": app.private_rpcs, - }); + // TODO: what else should we include? uptime, cache hit rates, cpu load + let body = json!({ + "pending_transactions_count": app.pending_transactions.entry_count(), + "pending_transactions_size": app.pending_transactions.weighted_size(), + "user_cache_count": app.rpc_secret_key_cache.entry_count(), + "user_cache_size": app.rpc_secret_key_cache.weighted_size(), + "balanced_rpcs": app.balanced_rpcs, + "private_rpcs": app.private_rpcs, + }); + + Arc::new(body) + }) + .await; Json(body) }