diff --git a/web3_proxy/src/block_number.rs b/web3_proxy/src/block_number.rs index 478d4dbd..9faaaa62 100644 --- a/web3_proxy/src/block_number.rs +++ b/web3_proxy/src/block_number.rs @@ -222,6 +222,7 @@ impl From<&Web3ProxyBlock> for BlockNumOrHash { } } +/// TODO: have another type that contains &mut Value of the block_needed or from_block+to_block. this will make it easier to modify the request /// TODO: change this to also return the hash needed? /// this replaces any "latest" identifiers in the JsonRpcRequest with the current block number which feels like the data is structured wrong #[derive(Debug, Default, Hash, Eq, PartialEq)] diff --git a/web3_proxy/src/config.rs b/web3_proxy/src/config.rs index 3f5993e2..89c1d3ec 100644 --- a/web3_proxy/src/config.rs +++ b/web3_proxy/src/config.rs @@ -203,6 +203,12 @@ pub struct AppConfig { /// the stats page url for a logged in user. if set, must contain "{rpc_key_id}" pub redirect_rpc_key_url: Option, + /// optional script to run before shutting the frontend down. + /// this is useful for keeping load balancers happy. + pub shutdown_script: Option, + /// optional arguments for your shutdown script. + pub shutdown_script_args: Vec, + /// Optionally send errors to pub sentry_url: Option, diff --git a/web3_proxy/src/frontend/mod.rs b/web3_proxy/src/frontend/mod.rs index 470259a7..c1f8b54c 100644 --- a/web3_proxy/src/frontend/mod.rs +++ b/web3_proxy/src/frontend/mod.rs @@ -24,10 +24,10 @@ use std::sync::Arc; use std::{iter::once, time::Duration}; use std::{net::SocketAddr, sync::atomic::Ordering}; use strum::{EnumCount, EnumIter}; -use tokio::sync::broadcast; +use tokio::{process::Command, sync::broadcast}; use tower_http::sensitive_headers::SetSensitiveRequestHeadersLayer; use tower_http::{cors::CorsLayer, normalize_path::NormalizePathLayer, trace::TraceLayer}; -use tracing::{error_span, info, trace_span}; +use tracing::{error, error_span, info, trace_span}; use ulid::Ulid; #[cfg(feature = "listenfd")] @@ -364,6 +364,22 @@ pub async fn serve( // TODO: option to use with_connect_info. we want it in dev, but not when running behind a proxy, but not .with_graceful_shutdown(async move { let _ = shutdown_receiver.recv().await; + + if let Some(shutdown_script) = app.config.shutdown_script.as_ref() { + let shutdown_script = Command::new(shutdown_script) + .args(&app.config.shutdown_script_args) + .spawn() + .expect("failed to execute script"); + + match shutdown_script.wait_with_output().await { + Ok(x) => { + info!(?x, "shutdown script finished"); + } + Err(err) => { + error!(?err, "shutdown script failed"); + } + }; + } }) .await .map_err(Into::into);