better default connection counts
This commit is contained in:
parent
c89295ef7e
commit
e4d25b207d
@ -144,6 +144,7 @@ impl Web3ProxyApp {
|
||||
pub async fn spawn(
|
||||
app_stats: AppStats,
|
||||
top_config: TopConfig,
|
||||
num_workers: u32,
|
||||
) -> anyhow::Result<(
|
||||
Arc<Web3ProxyApp>,
|
||||
Pin<Box<dyn Future<Output = anyhow::Result<()>>>>,
|
||||
@ -156,13 +157,13 @@ impl Web3ProxyApp {
|
||||
|
||||
// first, we connect to mysql and make sure the latest migrations have run
|
||||
let db_conn = if let Some(db_url) = &top_config.app.db_url {
|
||||
let db_min_connections = top_config.app.db_min_connections;
|
||||
let db_min_connections = top_config.app.db_min_connections.unwrap_or(num_workers);
|
||||
|
||||
// TODO: what default multiple?
|
||||
let redis_max_connections = top_config
|
||||
.app
|
||||
.db_max_connections
|
||||
.unwrap_or(db_min_connections * 4);
|
||||
.unwrap_or(db_min_connections * 2);
|
||||
|
||||
let db =
|
||||
get_migrated_db(db_url.clone(), db_min_connections, redis_max_connections).await?;
|
||||
@ -201,12 +202,13 @@ impl Web3ProxyApp {
|
||||
|
||||
let manager = RedisConnectionManager::new(redis_url.as_ref())?;
|
||||
|
||||
let redis_min_connections = top_config.app.redis_min_connections;
|
||||
let redis_min_connections =
|
||||
top_config.app.redis_min_connections.unwrap_or(num_workers);
|
||||
|
||||
let redis_max_connections = top_config
|
||||
.app
|
||||
.redis_max_connections
|
||||
.unwrap_or(redis_min_connections * 4);
|
||||
.unwrap_or(redis_min_connections * 2);
|
||||
|
||||
// TODO: min_idle?
|
||||
// TODO: set max_size based on max expected concurrent connections? set based on num_workers?
|
||||
|
@ -65,7 +65,7 @@ fn run(
|
||||
// start tokio's async runtime
|
||||
let rt = rt_builder.build()?;
|
||||
|
||||
let num_workers = rt.metrics().num_workers();
|
||||
let num_workers = rt.metrics().num_workers() as u32;
|
||||
debug!(?num_workers);
|
||||
|
||||
rt.block_on(async {
|
||||
@ -76,7 +76,7 @@ fn run(
|
||||
let app_frontend_port = cli_config.port;
|
||||
let app_prometheus_port = cli_config.prometheus_port;
|
||||
|
||||
let (app, app_handle) = Web3ProxyApp::spawn(app_stats, top_config).await?;
|
||||
let (app, app_handle) = Web3ProxyApp::spawn(app_stats, top_config, num_workers).await?;
|
||||
|
||||
let frontend_handle = tokio::spawn(frontend::serve(app_frontend_port, app));
|
||||
|
||||
|
@ -5,7 +5,6 @@ use argh::FromArgs;
|
||||
use derive_more::Constructor;
|
||||
use ethers::prelude::TxHash;
|
||||
use hashbrown::HashMap;
|
||||
use num::One;
|
||||
use serde::Deserialize;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::broadcast;
|
||||
@ -47,9 +46,10 @@ pub struct AppConfig {
|
||||
pub chain_id: u64,
|
||||
pub db_url: Option<String>,
|
||||
/// minimum size of the connection pool for the database
|
||||
#[serde(default = "u32::one")]
|
||||
pub db_min_connections: u32,
|
||||
/// If none, the number of workers are used
|
||||
pub db_min_connections: Option<u32>,
|
||||
/// minimum size of the connection pool for the database
|
||||
/// If none, the minimum * 2 is used
|
||||
pub db_max_connections: Option<u32>,
|
||||
#[serde(default = "default_default_requests_per_minute")]
|
||||
pub default_requests_per_minute: u32,
|
||||
@ -63,9 +63,10 @@ pub struct AppConfig {
|
||||
pub public_rate_limit_per_minute: u64,
|
||||
pub redis_url: Option<String>,
|
||||
/// minimum size of the connection pool for the cache
|
||||
#[serde(default = "u32::one")]
|
||||
pub redis_min_connections: u32,
|
||||
/// If none, the number of workers are used
|
||||
pub redis_min_connections: Option<u32>,
|
||||
/// maximum size of the connection pool for the cache
|
||||
/// If none, the minimum * 2 is used
|
||||
pub redis_max_connections: Option<u32>,
|
||||
#[serde(default = "default_response_cache_max_bytes")]
|
||||
pub response_cache_max_bytes: usize,
|
||||
|
Loading…
Reference in New Issue
Block a user