rename get to connect

This commit is contained in:
Bryan Stitt 2023-07-13 11:32:48 -07:00
parent d8e4115d5a
commit a893a41c90
4 changed files with 18 additions and 16 deletions

View File

@ -12,7 +12,7 @@ use crate::jsonrpc::{
JsonRpcErrorData, JsonRpcForwardedResponse, JsonRpcForwardedResponseEnum, JsonRpcId,
JsonRpcParams, JsonRpcRequest, JsonRpcRequestEnum, JsonRpcResultData,
};
use crate::relational_db::{get_db, get_migrated_db, DatabaseConnection, DatabaseReplica};
use crate::relational_db::{connect_db, get_migrated_db, DatabaseConnection, DatabaseReplica};
use crate::response_cache::{
JsonRpcQueryCacheKey, JsonRpcResponseCache, JsonRpcResponseEnum, JsonRpcResponseWeigher,
};
@ -245,7 +245,7 @@ impl Web3ProxyApp {
.db_replica_max_connections
.unwrap_or(db_max_connections);
let db_replica = get_db(
let db_replica = connect_db(
db_replica_url,
db_replica_min_connections,
db_replica_max_connections,

View File

@ -18,7 +18,7 @@ use web3_proxy::sub_commands;
use web3_proxy::{
app::APP_USER_AGENT,
config::TopConfig,
relational_db::{get_db, get_migrated_db},
relational_db::{connect_db, get_migrated_db},
};
#[cfg(feature = "mimalloc")]
@ -307,7 +307,7 @@ fn main() -> anyhow::Result<()> {
"'--config' (with a db) or '--db-url' is required to run change_admin_status",
);
let db_conn = get_db(db_url, 1, 1).await?;
let db_conn = connect_db(db_url, 1, 1).await?;
x.main(&db_conn).await
}
@ -316,7 +316,7 @@ fn main() -> anyhow::Result<()> {
.db_url
.expect("'--config' (with a db) or '--db-url' is required to run change_user_addres");
let db_conn = get_db(db_url, 1, 1).await?;
let db_conn = connect_db(db_url, 1, 1).await?;
x.main(&db_conn).await
}
@ -325,7 +325,7 @@ fn main() -> anyhow::Result<()> {
.db_url
.expect("'--config' (with a db) or '--db-url' is required to run change_user_tier");
let db_conn = get_db(db_url, 1, 1).await?;
let db_conn = connect_db(db_url, 1, 1).await?;
x.main(&db_conn).await
}
@ -334,7 +334,7 @@ fn main() -> anyhow::Result<()> {
"'--config' (with a db) or '--db-url' is required to run change_user_tier_by_address",
);
let db_conn = get_db(db_url, 1, 1).await?;
let db_conn = connect_db(db_url, 1, 1).await?;
x.main(&db_conn).await
}
@ -343,7 +343,7 @@ fn main() -> anyhow::Result<()> {
.db_url
.expect("'--config' (with a db) or '--db-url' is required to run change_user_tier_by_key");
let db_conn = get_db(db_url, 1, 1).await?;
let db_conn = connect_db(db_url, 1, 1).await?;
x.main(&db_conn).await
}
@ -371,7 +371,7 @@ fn main() -> anyhow::Result<()> {
.db_url
.expect("'--config' (with a db) or '--db-url' is required to run count_users");
let db_conn = get_db(db_url, 1, 1).await?;
let db_conn = connect_db(db_url, 1, 1).await?;
x.main(&db_conn).await
}
@ -388,7 +388,7 @@ fn main() -> anyhow::Result<()> {
.expect("'--config' (with a db) or '--db-url' is required to run drop_migration_lock");
// very intentionally, do NOT run migrations here. that would wait forever if the migration lock is abandoned
let db_conn = get_db(db_url, 1, 1).await?;
let db_conn = connect_db(db_url, 1, 1).await?;
x.main(&db_conn).await
}
@ -402,7 +402,7 @@ fn main() -> anyhow::Result<()> {
.db_url
.expect("'--config' (with a db) or '--db-url' is required to run the migration from stats-mysql to stats-influx");
let db_conn = get_db(db_url, 1, 1).await?;
let db_conn = connect_db(db_url, 1, 1).await?;
x.main(top_config, &db_conn).await
}
SubCommand::Pagerduty(x) => {
@ -434,7 +434,7 @@ fn main() -> anyhow::Result<()> {
let db_url = cli_config
.db_url
.expect("'--config' (with a db) or '--db-url' is required to run transfer_key");
let db_conn = get_db(db_url, 1, 1).await?;
let db_conn = connect_db(db_url, 1, 1).await?;
x.main(&db_conn).await
}

View File

@ -22,7 +22,7 @@ impl AsRef<DatabaseConnection> for DatabaseReplica {
}
}
pub async fn get_db(
pub async fn connect_db(
db_url: String,
min_connections: u32,
max_connections: u32,
@ -33,10 +33,12 @@ pub async fn get_db(
let mut db_opt = sea_orm::ConnectOptions::new(db_url);
// TODO: load all these options from the config file. i think docker mysql default max is 100
// Amazon RDS Proxy default idle timeout is 1800 seconds
// TODO: sqlx info logging is way too verbose for production.
db_opt
.acquire_timeout(Duration::from_secs(5))
.connect_timeout(Duration::from_secs(5))
.idle_timeout(Duration::from_secs(1795))
.min_connections(min_connections)
.max_connections(max_connections)
.sqlx_logging_level(tracing::log::LevelFilter::Trace)
@ -121,7 +123,7 @@ pub async fn get_migrated_db(
max_connections: u32,
) -> anyhow::Result<DatabaseConnection> {
// TODO: this seems to fail silently
let db_conn = get_db(db_url, min_connections, max_connections)
let db_conn = connect_db(db_url, min_connections, max_connections)
.await
.context("getting db")?;

View File

@ -1,4 +1,4 @@
use crate::{config::TopConfig, frontend::authorization::RpcSecretKey, relational_db::get_db};
use crate::{config::TopConfig, frontend::authorization::RpcSecretKey, relational_db::connect_db};
use anyhow::Context;
use argh::FromArgs;
use entities::rpc_key;
@ -35,7 +35,7 @@ impl SearchKafkaSubCommand {
let mut rpc_key_id = self.rpc_key_id.map(|x| x.get());
if let Some(rpc_key) = self.rpc_key {
let db_conn = get_db(top_config.app.db_url.unwrap(), 1, 1).await?;
let db_conn = connect_db(top_config.app.db_url.unwrap(), 1, 1).await?;
let rpc_key: Uuid = rpc_key.into();