From e4d25b207d4a4cc16b0ba8aa598e458458d5d2b2 Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Tue, 6 Sep 2022 20:50:37 +0000 Subject: [PATCH] better default connection counts --- web3_proxy/src/app.rs | 10 ++++++---- web3_proxy/src/bin/web3_proxy.rs | 4 ++-- web3_proxy/src/config.rs | 11 ++++++----- 3 files changed, 14 insertions(+), 11 deletions(-) diff --git a/web3_proxy/src/app.rs b/web3_proxy/src/app.rs index 7007a0b2..69fbf621 100644 --- a/web3_proxy/src/app.rs +++ b/web3_proxy/src/app.rs @@ -144,6 +144,7 @@ impl Web3ProxyApp { pub async fn spawn( app_stats: AppStats, top_config: TopConfig, + num_workers: u32, ) -> anyhow::Result<( Arc, Pin>>>, @@ -156,13 +157,13 @@ impl Web3ProxyApp { // first, we connect to mysql and make sure the latest migrations have run let db_conn = if let Some(db_url) = &top_config.app.db_url { - let db_min_connections = top_config.app.db_min_connections; + let db_min_connections = top_config.app.db_min_connections.unwrap_or(num_workers); // TODO: what default multiple? let redis_max_connections = top_config .app .db_max_connections - .unwrap_or(db_min_connections * 4); + .unwrap_or(db_min_connections * 2); let db = get_migrated_db(db_url.clone(), db_min_connections, redis_max_connections).await?; @@ -201,12 +202,13 @@ impl Web3ProxyApp { let manager = RedisConnectionManager::new(redis_url.as_ref())?; - let redis_min_connections = top_config.app.redis_min_connections; + let redis_min_connections = + top_config.app.redis_min_connections.unwrap_or(num_workers); let redis_max_connections = top_config .app .redis_max_connections - .unwrap_or(redis_min_connections * 4); + .unwrap_or(redis_min_connections * 2); // TODO: min_idle? // TODO: set max_size based on max expected concurrent connections? set based on num_workers? diff --git a/web3_proxy/src/bin/web3_proxy.rs b/web3_proxy/src/bin/web3_proxy.rs index 0057fb8a..923f6d86 100644 --- a/web3_proxy/src/bin/web3_proxy.rs +++ b/web3_proxy/src/bin/web3_proxy.rs @@ -65,7 +65,7 @@ fn run( // start tokio's async runtime let rt = rt_builder.build()?; - let num_workers = rt.metrics().num_workers(); + let num_workers = rt.metrics().num_workers() as u32; debug!(?num_workers); rt.block_on(async { @@ -76,7 +76,7 @@ fn run( let app_frontend_port = cli_config.port; let app_prometheus_port = cli_config.prometheus_port; - let (app, app_handle) = Web3ProxyApp::spawn(app_stats, top_config).await?; + let (app, app_handle) = Web3ProxyApp::spawn(app_stats, top_config, num_workers).await?; let frontend_handle = tokio::spawn(frontend::serve(app_frontend_port, app)); diff --git a/web3_proxy/src/config.rs b/web3_proxy/src/config.rs index df1bda6d..a3a713a5 100644 --- a/web3_proxy/src/config.rs +++ b/web3_proxy/src/config.rs @@ -5,7 +5,6 @@ use argh::FromArgs; use derive_more::Constructor; use ethers::prelude::TxHash; use hashbrown::HashMap; -use num::One; use serde::Deserialize; use std::sync::Arc; use tokio::sync::broadcast; @@ -47,9 +46,10 @@ pub struct AppConfig { pub chain_id: u64, pub db_url: Option, /// minimum size of the connection pool for the database - #[serde(default = "u32::one")] - pub db_min_connections: u32, + /// If none, the number of workers are used + pub db_min_connections: Option, /// minimum size of the connection pool for the database + /// If none, the minimum * 2 is used pub db_max_connections: Option, #[serde(default = "default_default_requests_per_minute")] pub default_requests_per_minute: u32, @@ -63,9 +63,10 @@ pub struct AppConfig { pub public_rate_limit_per_minute: u64, pub redis_url: Option, /// minimum size of the connection pool for the cache - #[serde(default = "u32::one")] - pub redis_min_connections: u32, + /// If none, the number of workers are used + pub redis_min_connections: Option, /// maximum size of the connection pool for the cache + /// If none, the minimum * 2 is used pub redis_max_connections: Option, #[serde(default = "default_response_cache_max_bytes")] pub response_cache_max_bytes: usize,