From 5da334fcb7f904999c04007d7a320be3a2bb0d99 Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Thu, 29 Jun 2023 21:28:31 -0700 Subject: [PATCH] start adding tests that need docker for mysql management --- web3_proxy/Cargo.toml | 1 + web3_proxy/src/relational_db.rs | 35 ++-- web3_proxy/src/rpcs/many.rs | 2 +- web3_proxy/src/sub_commands/rpc_accounting.rs | 10 +- web3_proxy/tests/common/app.rs | 184 ++++++++++++++++-- web3_proxy/tests/test_admins.rs | 15 +- web3_proxy/tests/test_proxy.rs | 9 +- web3_proxy/tests/test_users.rs | 23 ++- 8 files changed, 239 insertions(+), 40 deletions(-) diff --git a/web3_proxy/Cargo.toml b/web3_proxy/Cargo.toml index bd38f6ad..9bdc4050 100644 --- a/web3_proxy/Cargo.toml +++ b/web3_proxy/Cargo.toml @@ -12,6 +12,7 @@ deadlock_detection = ["parking_lot/deadlock_detection"] mimalloc = ["dep:mimalloc"] tokio-console = ["dep:tokio-console", "dep:console-subscriber"] rdkafka-src = ["rdkafka/cmake-build", "rdkafka/libz", "rdkafka/ssl-vendored", "rdkafka/zstd-pkg-config"] +tests-needing-docker = [] [dependencies] deferred-rate-limiter = { path = "../deferred-rate-limiter" } diff --git a/web3_proxy/src/relational_db.rs b/web3_proxy/src/relational_db.rs index 068d2494..e4aa5ee3 100644 --- a/web3_proxy/src/relational_db.rs +++ b/web3_proxy/src/relational_db.rs @@ -1,3 +1,4 @@ +use anyhow::Context; use derive_more::From; use migration::sea_orm::{self, ConnectionTrait, Database}; use migration::sea_query::table::ColumnDef; @@ -31,24 +32,28 @@ pub async fn get_db( let mut db_opt = sea_orm::ConnectOptions::new(db_url); - // TODO: load all these options from the config file. i think mysql default max is 100 - // TODO: sqlx logging only in debug. way too verbose for production + // TODO: load all these options from the config file. i think docker mysql default max is 100 + // TODO: sqlx info logging is way too verbose for production. db_opt - .connect_timeout(Duration::from_secs(30)) + .acquire_timeout(Duration::from_secs(5)) + .connect_timeout(Duration::from_secs(5)) .min_connections(min_connections) .max_connections(max_connections) - .sqlx_logging(false); - // .sqlx_logging_level(log::LevelFilter::Info); + .sqlx_logging_level(tracing::log::LevelFilter::Warn) + .sqlx_logging(true); Database::connect(db_opt).await } -pub async fn drop_migration_lock(db_conn: &DatabaseConnection) -> Result<(), DbErr> { +pub async fn drop_migration_lock(db_conn: &DatabaseConnection) -> anyhow::Result<()> { let db_backend = db_conn.get_database_backend(); let drop_lock_statment = db_backend.build(Table::drop().table(Alias::new("migration_lock"))); - db_conn.execute(drop_lock_statment).await?; + db_conn + .execute(drop_lock_statment) + .await + .context("dropping lock")?; debug!("migration lock unlocked"); @@ -59,7 +64,7 @@ pub async fn drop_migration_lock(db_conn: &DatabaseConnection) -> Result<(), DbE pub async fn migrate_db( db_conn: &DatabaseConnection, override_existing_lock: bool, -) -> Result<(), DbErr> { +) -> anyhow::Result<()> { let db_backend = db_conn.get_database_backend(); // TODO: put the timestamp and hostname into this as columns? @@ -75,6 +80,8 @@ pub async fn migrate_db( return Ok(()); } + info!("waiting for migration lock..."); + // there are migrations to apply // acquire a lock if let Err(err) = db_conn.execute(create_lock_statment.clone()).await { @@ -96,13 +103,15 @@ pub async fn migrate_db( break; } + info!("migrating..."); + let migration_result = Migrator::up(db_conn, None).await; // drop the distributed lock drop_migration_lock(db_conn).await?; // return if migrations erred - migration_result + migration_result.map_err(Into::into) } /// Connect to the database and run migrations @@ -110,11 +119,13 @@ pub async fn get_migrated_db( db_url: String, min_connections: u32, max_connections: u32, -) -> Result { +) -> anyhow::Result { // TODO: this seems to fail silently - let db_conn = get_db(db_url, min_connections, max_connections).await?; + let db_conn = get_db(db_url, min_connections, max_connections) + .await + .context("getting db")?; - migrate_db(&db_conn, false).await?; + migrate_db(&db_conn, false).await.context("migrating db")?; Ok(db_conn) } diff --git a/web3_proxy/src/rpcs/many.rs b/web3_proxy/src/rpcs/many.rs index 7a92df0f..d756338a 100644 --- a/web3_proxy/src/rpcs/many.rs +++ b/web3_proxy/src/rpcs/many.rs @@ -1565,7 +1565,7 @@ mod tests { .await .unwrap(); - dbg!(&x); + info!(?x); assert!(matches!(x, OpenRequestResult::NotReady)); diff --git a/web3_proxy/src/sub_commands/rpc_accounting.rs b/web3_proxy/src/sub_commands/rpc_accounting.rs index 1df2e50d..cdd14051 100644 --- a/web3_proxy/src/sub_commands/rpc_accounting.rs +++ b/web3_proxy/src/sub_commands/rpc_accounting.rs @@ -148,23 +148,23 @@ impl RpcAccountingSubCommand { .signed_duration_since(stats.first_period_datetime) .num_seconds() .into(); - dbg!(query_seconds); + info!(%query_seconds); let avg_request_per_second = (stats.total_frontend_requests / query_seconds).round_dp(2); - dbg!(avg_request_per_second); + info!(%avg_request_per_second); let cache_hit_rate = (stats.total_cache_hits / stats.total_frontend_requests * Decimal::from(100)) .round_dp(2); - dbg!(cache_hit_rate); + info!(%cache_hit_rate); let avg_response_millis = (stats.total_response_millis / stats.total_frontend_requests).round_dp(3); - dbg!(avg_response_millis); + info!(%avg_response_millis); let avg_response_bytes = (stats.total_response_bytes / stats.total_frontend_requests).round(); - dbg!(avg_response_bytes); + info!(%avg_response_bytes); Ok(()) } diff --git a/web3_proxy/tests/common/app.rs b/web3_proxy/tests/common/app.rs index b1dde01c..5b474f7f 100644 --- a/web3_proxy/tests/common/app.rs +++ b/web3_proxy/tests/common/app.rs @@ -1,5 +1,8 @@ use ethers::{ - prelude::{Http, Provider}, + prelude::{ + rand::{self, distributions::Alphanumeric, Rng}, + Http, Provider, + }, signers::LocalWallet, types::Address, utils::{Anvil, AnvilInstance}, @@ -8,21 +11,30 @@ use hashbrown::HashMap; use parking_lot::Mutex; use std::{ env, + process::Command as SyncCommand, str::FromStr, sync::atomic::{AtomicU16, Ordering}, }; use std::{sync::Arc, time::Duration}; use tokio::{ + net::TcpStream, + process::Command as AsyncCommand, sync::broadcast::{self, error::SendError}, task::JoinHandle, time::{sleep, Instant}, }; -use tracing::info; +use tracing::{info, trace}; use web3_proxy::{ config::{AppConfig, TopConfig, Web3RpcConfig}, + relational_db::get_migrated_db, sub_commands::ProxydSubCommand, }; +pub struct DbData { + container_name: String, + url: Option, +} + pub struct TestApp { /// anvil shuts down when this guard is dropped. pub anvil: AnvilInstance, @@ -30,8 +42,11 @@ pub struct TestApp { /// connection to anvil. pub anvil_provider: Provider, + /// keep track of the database so it can be stopped on drop + pub db: Option, + /// spawn handle for the proxy. - pub handle: Mutex>>>, + pub proxy_handle: Mutex>>>, /// connection to the proxy that is connected to anil. pub proxy_provider: Provider, @@ -41,7 +56,7 @@ pub struct TestApp { } impl TestApp { - pub async fn spawn() -> Self { + pub async fn spawn(setup_db: bool) -> Self { let num_workers = 2; // TODO: move basic setup into a test fixture @@ -58,14 +73,146 @@ impl TestApp { let anvil_provider = Provider::::try_from(anvil.endpoint()).unwrap(); + let db = if setup_db { + // sqlite doesn't seem to work. our migrations are written for mysql + // so lets use docker to start mysql + let password: String = rand::thread_rng() + .sample_iter(&Alphanumeric) + .take(32) + .map(char::from) + .collect(); + + let random: String = rand::thread_rng() + .sample_iter(&Alphanumeric) + .take(8) + .map(char::from) + .collect(); + + let db_container_name = format!("web3-proxy-test-{}", random); + + info!(%db_container_name); + + // create the db_data as soon as the url is known + // when this is dropped, the db will be stopped + let mut db_data = DbData { + container_name: db_container_name.clone(), + url: None, + }; + + let _ = AsyncCommand::new("docker") + .args([ + "run", + "--name", + &db_container_name, + "--rm", + "-d", + "-e", + &format!("MYSQL_ROOT_PASSWORD={}", password), + "-e", + "MYSQL_DATABASE=web3_proxy_test", + "-p", + "0:3306", + "mysql", + ]) + .output() + .await + .expect("failed to start db"); + + // give the db a second to start + // TODO: wait until docker says it is healthy + sleep(Duration::from_secs(1)).await; + + // TODO: why is this always empty?! + let docker_inspect_output = AsyncCommand::new("docker") + .args(["inspect", &db_container_name]) + .output() + .await + .unwrap(); + + let docker_inspect_json = String::from_utf8(docker_inspect_output.stdout).unwrap(); + + trace!(%docker_inspect_json); + + let docker_inspect_json: serde_json::Value = + serde_json::from_str(&docker_inspect_json).unwrap(); + + let mysql_ports = docker_inspect_json + .get(0) + .unwrap() + .get("NetworkSettings") + .unwrap() + .get("Ports") + .unwrap() + .get("3306/tcp") + .unwrap() + .get(0) + .unwrap(); + + trace!(?mysql_ports); + + let mysql_port: u64 = mysql_ports + .get("HostPort") + .expect("unable to determine mysql port") + .as_str() + .unwrap() + .parse() + .unwrap(); + + let mysql_ip = mysql_ports + .get("HostIp") + .and_then(|x| x.as_str()) + .expect("unable to determine mysql ip"); + // let mysql_ip = "localhost"; + // let mysql_ip = "127.0.0.1"; + + let db_url = format!( + "mysql://root:{}@{}:{}/web3_proxy_test", + password, mysql_ip, mysql_port + ); + + info!(%db_url, "waiting for start"); + + db_data.url = Some(db_url.clone()); + + let start = Instant::now(); + let max_wait = Duration::from_secs(30); + loop { + if start.elapsed() > max_wait { + panic!("db took too long to start"); + } + + if TcpStream::connect(format!("{}:{}", mysql_ip, mysql_port)) + .await + .is_ok() + { + break; + }; + + // not open wait. sleep and then try again + sleep(Duration::from_secs(1)).await; + } + + info!(%db_url, "db is ready for connections"); + + // try to migrate + let _ = get_migrated_db(db_url, 1, 1) + .await + .expect("failed migration"); + + Some(db_data) + } else { + None + }; + + let db_url = db.as_ref().and_then(|x| x.url.clone()); + // make a test TopConfig // TODO: test influx // TODO: test redis let top_config = TopConfig { app: AppConfig { chain_id: 31337, - // TODO: [make sqlite work]() - // db_url: Some("sqlite::memory:".into()), + db_url, default_user_max_requests_per_period: Some(6_000_000), deposit_factory_contract: Address::from_str( "4e3BC2054788De923A04936C6ADdB99A05B0Ea36", @@ -111,7 +258,8 @@ impl TestApp { let mut frontend_port = frontend_port_arc.load(Ordering::Relaxed); let start = Instant::now(); while frontend_port == 0 { - if start.elapsed() > Duration::from_secs(1) { + // we have to give it some time because it might have to do migrations + if start.elapsed() > Duration::from_secs(10) { panic!("took too long to start!"); } @@ -126,7 +274,8 @@ impl TestApp { Self { anvil, anvil_provider, - handle: Mutex::new(Some(handle)), + db, + proxy_handle: Mutex::new(Some(handle)), proxy_provider, shutdown_sender, } @@ -136,18 +285,20 @@ impl TestApp { self.shutdown_sender.send(()) } + #[allow(unused)] pub async fn wait(&self) { + let _ = self.stop(); + // TODO: lock+take feels weird, but it works - let handle = self.handle.lock().take(); + let handle = self.proxy_handle.lock().take(); if let Some(handle) = handle { - let _ = self.stop(); - info!("waiting for the app to stop..."); handle.await.unwrap().unwrap(); } } + #[allow(unused)] pub fn wallet(&self, id: usize) -> LocalWallet { self.anvil.keys()[id].clone().into() } @@ -160,3 +311,14 @@ impl Drop for TestApp { // TODO: do we care about waiting for it to stop? it will slow our tests down so we probably only care about waiting in some tests } } + +impl Drop for DbData { + fn drop(&mut self) { + // TODO: this doesn't seem to run + info!(%self.container_name, "killing db"); + + let _ = SyncCommand::new("docker") + .args(["kill", "-s", "9", &self.container_name]) + .output(); + } +} diff --git a/web3_proxy/tests/test_admins.rs b/web3_proxy/tests/test_admins.rs index 1e211b8c..442434d1 100644 --- a/web3_proxy/tests/test_admins.rs +++ b/web3_proxy/tests/test_admins.rs @@ -2,26 +2,29 @@ mod common; use crate::common::TestApp; -#[ignore] +#[cfg_attr(not(feature = "tests-needing-docker"), ignore)] +#[ignore = "under construction"] #[test_log::test(tokio::test)] async fn test_admin_imitate_user() { - let x = TestApp::spawn().await; + let x = TestApp::spawn(true).await; todo!(); } -#[ignore] +#[cfg_attr(not(feature = "tests-needing-docker"), ignore)] +#[ignore = "under construction"] #[test_log::test(tokio::test)] async fn test_admin_grant_credits() { - let x = TestApp::spawn().await; + let x = TestApp::spawn(true).await; todo!(); } -#[ignore] +#[cfg_attr(not(feature = "tests-needing-docker"), ignore)] +#[ignore = "under construction"] #[test_log::test(tokio::test)] async fn test_admin_change_user_tier() { - let x = TestApp::spawn().await; + let x = TestApp::spawn(true).await; todo!(); } diff --git a/web3_proxy/tests/test_proxy.rs b/web3_proxy/tests/test_proxy.rs index ddefce15..dd6c5e90 100644 --- a/web3_proxy/tests/test_proxy.rs +++ b/web3_proxy/tests/test_proxy.rs @@ -10,9 +10,16 @@ use tokio::{ }; use web3_proxy::rpcs::blockchain::ArcBlock; +#[cfg_attr(not(feature = "tests-needing-docker"), ignore)] +#[ignore = "under construction"] +#[test_log::test(tokio::test)] +async fn it_migrates_the_db() { + TestApp::spawn(true).await; +} + #[test_log::test(tokio::test)] async fn it_starts_and_stops() { - let x = TestApp::spawn().await; + let x = TestApp::spawn(false).await; let anvil_provider = &x.anvil_provider; let proxy_provider = &x.proxy_provider; diff --git a/web3_proxy/tests/test_users.rs b/web3_proxy/tests/test_users.rs index 41d12396..17c09012 100644 --- a/web3_proxy/tests/test_users.rs +++ b/web3_proxy/tests/test_users.rs @@ -1,21 +1,36 @@ mod common; use crate::common::TestApp; +use ethers::signers::Signer; +use tracing::info; -#[ignore] +/// TODO: 191 and the other message formats in another test +#[cfg_attr(not(feature = "tests-needing-docker"), ignore)] #[test_log::test(tokio::test)] async fn test_log_in_and_out() { - let x = TestApp::spawn().await; + let x = TestApp::spawn(true).await; let w = x.wallet(0); + let login_url = format!("{}user/login/{:?}", x.proxy_provider.url(), w.address()); + let login_response = reqwest::get(login_url).await.unwrap(); + + info!(?login_response); + + // TODO: sign the message and POST it + + // TODO: get bearer token out of response + + // TODO: log out + todo!(); } -#[ignore] +#[cfg_attr(not(feature = "tests-needing-docker"), ignore)] +#[ignore = "under construction"] #[test_log::test(tokio::test)] async fn test_referral_bonus() { - let x = TestApp::spawn().await; + let x = TestApp::spawn(true).await; todo!(); }