2023-03-06 07:39:35 +03:00
|
|
|
mod change_admin_status;
|
2022-11-30 00:29:17 +03:00
|
|
|
mod change_user_address;
|
2022-11-26 07:25:53 +03:00
|
|
|
mod change_user_tier;
|
2022-12-28 19:43:44 +03:00
|
|
|
mod change_user_tier_by_address;
|
2022-11-16 10:19:42 +03:00
|
|
|
mod change_user_tier_by_key;
|
2022-08-15 20:23:13 +03:00
|
|
|
mod check_config;
|
2023-01-03 04:06:36 +03:00
|
|
|
mod count_users;
|
2023-01-26 04:58:10 +03:00
|
|
|
mod create_key;
|
2022-08-06 03:07:12 +03:00
|
|
|
mod create_user;
|
2022-11-16 10:19:42 +03:00
|
|
|
mod drop_migration_lock;
|
2022-11-22 01:52:47 +03:00
|
|
|
mod list_user_tier;
|
2023-03-10 22:26:15 +03:00
|
|
|
mod migrate_stats_to_v2;
|
2023-01-24 11:05:31 +03:00
|
|
|
mod pagerduty;
|
2023-02-03 00:44:57 +03:00
|
|
|
mod popularity_contest;
|
2023-02-27 09:44:09 +03:00
|
|
|
mod proxyd;
|
2023-01-12 04:36:23 +03:00
|
|
|
mod rpc_accounting;
|
2023-03-03 07:58:07 +03:00
|
|
|
mod search_kafka;
|
2023-01-18 00:34:33 +03:00
|
|
|
mod sentryd;
|
2023-01-10 04:50:09 +03:00
|
|
|
mod transfer_key;
|
2022-11-22 01:52:47 +03:00
|
|
|
mod user_export;
|
|
|
|
mod user_import;
|
2022-11-14 22:35:33 +03:00
|
|
|
|
2023-01-18 08:26:10 +03:00
|
|
|
use anyhow::Context;
|
2022-08-06 03:07:12 +03:00
|
|
|
use argh::FromArgs;
|
2023-01-20 05:08:53 +03:00
|
|
|
use ethers::types::U256;
|
2023-01-26 01:04:06 +03:00
|
|
|
use log::{info, warn};
|
|
|
|
use pagerduty_rs::eventsv2async::EventsV2 as PagerdutyAsyncEventsV2;
|
2023-01-24 11:05:31 +03:00
|
|
|
use pagerduty_rs::eventsv2sync::EventsV2 as PagerdutySyncEventsV2;
|
2023-01-18 08:26:10 +03:00
|
|
|
use std::{
|
2023-01-24 11:05:31 +03:00
|
|
|
fs, panic,
|
2023-01-18 08:26:10 +03:00
|
|
|
path::Path,
|
|
|
|
sync::atomic::{self, AtomicUsize},
|
|
|
|
};
|
|
|
|
use tokio::runtime;
|
2023-01-26 01:04:06 +03:00
|
|
|
use web3_proxy::pagerduty::panic_handler;
|
2022-11-14 22:35:33 +03:00
|
|
|
use web3_proxy::{
|
2023-01-18 07:18:18 +03:00
|
|
|
app::{get_db, get_migrated_db, APP_USER_AGENT},
|
2022-11-14 22:35:33 +03:00
|
|
|
config::TopConfig,
|
|
|
|
};
|
2022-08-06 03:07:12 +03:00
|
|
|
|
2023-01-18 08:26:10 +03:00
|
|
|
#[cfg(feature = "deadlock")]
|
2023-03-03 04:50:20 +03:00
|
|
|
use {parking_lot::deadlock, std::thread, tokio::time::Duration};
|
2023-01-18 08:26:10 +03:00
|
|
|
|
2022-08-06 03:07:12 +03:00
|
|
|
#[derive(Debug, FromArgs)]
|
2022-08-06 08:46:33 +03:00
|
|
|
/// Command line interface for admins to interact with web3_proxy
|
2023-01-18 08:26:10 +03:00
|
|
|
pub struct Web3ProxyCli {
|
|
|
|
/// path to the application config (only required for some commands; defaults to dev config).
|
2022-11-14 22:35:33 +03:00
|
|
|
#[argh(option)]
|
|
|
|
pub config: Option<String>,
|
|
|
|
|
2023-01-18 08:26:10 +03:00
|
|
|
/// number of worker threads. Defaults to the number of logical processors
|
|
|
|
#[argh(option, default = "0")]
|
|
|
|
pub workers: usize,
|
|
|
|
|
|
|
|
/// if no config, what database the client should connect to (only required for some commands; Defaults to dev db)
|
|
|
|
#[argh(option)]
|
|
|
|
pub db_url: Option<String>,
|
2022-08-06 03:07:12 +03:00
|
|
|
|
2023-01-18 00:34:33 +03:00
|
|
|
/// if no config, what sentry url should the client should connect to
|
|
|
|
#[argh(option)]
|
|
|
|
pub sentry_url: Option<String>,
|
|
|
|
|
2022-08-06 03:07:12 +03:00
|
|
|
/// this one cli can do multiple things
|
|
|
|
#[argh(subcommand)]
|
|
|
|
sub_command: SubCommand,
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(FromArgs, PartialEq, Debug)]
|
|
|
|
#[argh(subcommand)]
|
|
|
|
enum SubCommand {
|
2023-03-06 07:39:35 +03:00
|
|
|
ChangeAdminStatus(change_admin_status::ChangeAdminStatusSubCommand),
|
2023-01-03 04:06:36 +03:00
|
|
|
ChangeUserAddress(change_user_address::ChangeUserAddressSubCommand),
|
|
|
|
ChangeUserTier(change_user_tier::ChangeUserTierSubCommand),
|
|
|
|
ChangeUserTierByAddress(change_user_tier_by_address::ChangeUserTierByAddressSubCommand),
|
|
|
|
ChangeUserTierByKey(change_user_tier_by_key::ChangeUserTierByKeySubCommand),
|
2022-08-15 20:23:13 +03:00
|
|
|
CheckConfig(check_config::CheckConfigSubCommand),
|
2023-01-03 04:06:36 +03:00
|
|
|
CountUsers(count_users::CountUsersSubCommand),
|
2023-01-26 04:58:10 +03:00
|
|
|
CreateKey(create_key::CreateKeySubCommand),
|
2022-11-16 10:19:42 +03:00
|
|
|
CreateUser(create_user::CreateUserSubCommand),
|
|
|
|
DropMigrationLock(drop_migration_lock::DropMigrationLockSubCommand),
|
2023-03-08 00:40:34 +03:00
|
|
|
MigrateStatsToV2(migrate_stats_to_v2::MigrateStatsToV2),
|
2023-01-24 11:05:31 +03:00
|
|
|
Pagerduty(pagerduty::PagerdutySubCommand),
|
2023-02-03 00:44:57 +03:00
|
|
|
PopularityContest(popularity_contest::PopularityContestSubCommand),
|
2023-02-27 09:44:09 +03:00
|
|
|
Proxyd(proxyd::ProxydSubCommand),
|
2023-01-12 04:36:23 +03:00
|
|
|
RpcAccounting(rpc_accounting::RpcAccountingSubCommand),
|
2023-03-03 07:58:07 +03:00
|
|
|
SearchKafka(search_kafka::SearchKafkaSubCommand),
|
2023-01-18 00:34:33 +03:00
|
|
|
Sentryd(sentryd::SentrydSubCommand),
|
2023-01-10 04:50:09 +03:00
|
|
|
TransferKey(transfer_key::TransferKeySubCommand),
|
2022-11-22 01:52:47 +03:00
|
|
|
UserExport(user_export::UserExportSubCommand),
|
2022-11-22 08:42:02 +03:00
|
|
|
UserImport(user_import::UserImportSubCommand),
|
|
|
|
// TODO: sub command to downgrade migrations? sea-orm has this but doing downgrades here would be easier+safer
|
|
|
|
// TODO: sub command to add new api keys to an existing user?
|
|
|
|
// TODO: sub command to change a user's tier
|
2022-08-06 03:07:12 +03:00
|
|
|
}
|
|
|
|
|
2023-01-18 08:26:10 +03:00
|
|
|
fn main() -> anyhow::Result<()> {
|
2023-02-27 09:44:09 +03:00
|
|
|
// this probably won't matter for us in docker, but better safe than sorry
|
|
|
|
fdlimit::raise_fd_limit();
|
|
|
|
|
2023-01-18 08:26:10 +03:00
|
|
|
#[cfg(feature = "deadlock")]
|
|
|
|
{
|
|
|
|
// spawn a thread for deadlock detection
|
|
|
|
thread::spawn(move || loop {
|
|
|
|
thread::sleep(Duration::from_secs(10));
|
|
|
|
let deadlocks = deadlock::check_deadlock();
|
|
|
|
if deadlocks.is_empty() {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
println!("{} deadlocks detected", deadlocks.len());
|
|
|
|
for (i, threads) in deadlocks.iter().enumerate() {
|
|
|
|
println!("Deadlock #{}", i);
|
|
|
|
for t in threads {
|
|
|
|
println!("Thread Id {:#?}", t.thread_id());
|
|
|
|
println!("{:#?}", t.backtrace());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2022-08-06 03:07:12 +03:00
|
|
|
// if RUST_LOG isn't set, configure a default
|
|
|
|
// TODO: is there a better way to do this?
|
2023-03-23 04:43:13 +03:00
|
|
|
#[cfg(tokio_console)]
|
|
|
|
console_subscriber::init();
|
|
|
|
|
|
|
|
#[cfg(not(tokio_console))]
|
2023-01-18 00:34:33 +03:00
|
|
|
let rust_log = match std::env::var("RUST_LOG") {
|
|
|
|
Ok(x) => x,
|
2023-01-25 07:44:17 +03:00
|
|
|
Err(_) => match std::env::var("WEB3_PROXY_TRACE").map(|x| x == "true") {
|
|
|
|
Ok(true) => {
|
|
|
|
vec![
|
|
|
|
"info",
|
|
|
|
"ethers=debug",
|
2023-04-11 02:05:53 +03:00
|
|
|
"ethers_providers=debug",
|
2023-01-25 07:44:17 +03:00
|
|
|
"redis_rate_limit=debug",
|
|
|
|
"web3_proxy=trace",
|
|
|
|
"web3_proxy_cli=trace",
|
|
|
|
"web3_proxy::rpcs::blockchain=info",
|
2023-01-25 09:45:20 +03:00
|
|
|
"web3_proxy::rpcs::request=debug",
|
2023-01-25 07:44:17 +03:00
|
|
|
]
|
|
|
|
}
|
|
|
|
_ => {
|
|
|
|
vec![
|
|
|
|
"info",
|
|
|
|
"ethers=debug",
|
2023-04-11 08:29:02 +03:00
|
|
|
// TODO: even error is too verbose for our purposes. how can we turn off this logging entirely?
|
|
|
|
"ethers_providers=error",
|
2023-01-25 07:44:17 +03:00
|
|
|
"redis_rate_limit=debug",
|
|
|
|
"web3_proxy=debug",
|
|
|
|
"web3_proxy_cli=debug",
|
|
|
|
]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
.join(","),
|
2023-01-18 00:34:33 +03:00
|
|
|
};
|
2022-08-06 03:07:12 +03:00
|
|
|
|
2023-01-18 08:26:10 +03:00
|
|
|
let mut cli_config: Web3ProxyCli = argh::from_env();
|
|
|
|
|
2023-01-23 04:48:33 +03:00
|
|
|
if cli_config.config.is_none() && cli_config.db_url.is_none() && cli_config.sentry_url.is_none()
|
|
|
|
{
|
|
|
|
// TODO: default to example.toml if development.toml doesn't exist
|
2023-01-18 08:26:10 +03:00
|
|
|
info!("defaulting to development config");
|
|
|
|
cli_config.config = Some("./config/development.toml".to_string());
|
|
|
|
}
|
|
|
|
|
2023-02-27 09:44:09 +03:00
|
|
|
let (top_config, top_config_path) = if let Some(top_config_path) = cli_config.config.clone() {
|
2023-01-23 04:48:33 +03:00
|
|
|
let top_config_path = Path::new(&top_config_path)
|
|
|
|
.canonicalize()
|
|
|
|
.context(format!("checking for config at {}", top_config_path))?;
|
2023-01-20 05:08:53 +03:00
|
|
|
|
2023-02-27 09:44:09 +03:00
|
|
|
let top_config: String = fs::read_to_string(top_config_path.clone())?;
|
|
|
|
|
2023-01-23 04:48:33 +03:00
|
|
|
let mut top_config: TopConfig = toml::from_str(&top_config)?;
|
2022-11-14 22:35:33 +03:00
|
|
|
|
2023-01-23 04:48:33 +03:00
|
|
|
// TODO: this doesn't seem to do anything
|
|
|
|
proctitle::set_title(format!("web3_proxy-{}", top_config.app.chain_id));
|
2022-11-14 22:35:33 +03:00
|
|
|
|
2023-01-23 04:48:33 +03:00
|
|
|
if cli_config.db_url.is_none() {
|
|
|
|
cli_config.db_url = top_config.app.db_url.clone();
|
|
|
|
}
|
2023-01-20 05:08:53 +03:00
|
|
|
|
2023-01-23 04:48:33 +03:00
|
|
|
if let Some(sentry_url) = top_config.app.sentry_url.clone() {
|
|
|
|
cli_config.sentry_url = Some(sentry_url);
|
|
|
|
}
|
2023-01-23 04:19:31 +03:00
|
|
|
|
2023-01-23 04:48:33 +03:00
|
|
|
if top_config.app.chain_id == 137 {
|
2023-01-23 23:32:59 +03:00
|
|
|
// TODO: these numbers are arbitrary. i think the maticnetwork/erigon fork has a bug
|
2023-01-23 04:48:33 +03:00
|
|
|
if top_config.app.gas_increase_min.is_none() {
|
2023-01-23 23:32:59 +03:00
|
|
|
top_config.app.gas_increase_min = Some(U256::from(40_000));
|
2023-01-20 05:08:53 +03:00
|
|
|
}
|
2023-01-18 08:26:10 +03:00
|
|
|
|
2023-01-23 04:48:33 +03:00
|
|
|
if top_config.app.gas_increase_percent.is_none() {
|
2023-01-23 23:32:59 +03:00
|
|
|
top_config.app.gas_increase_percent = Some(U256::from(40));
|
2023-01-23 04:48:33 +03:00
|
|
|
}
|
2023-01-23 04:19:31 +03:00
|
|
|
}
|
2023-01-23 04:48:33 +03:00
|
|
|
|
2023-02-27 09:44:09 +03:00
|
|
|
(Some(top_config), Some(top_config_path))
|
2022-11-14 22:35:33 +03:00
|
|
|
} else {
|
2023-02-27 09:44:09 +03:00
|
|
|
(None, None)
|
2022-11-14 22:35:33 +03:00
|
|
|
};
|
2022-08-06 03:07:12 +03:00
|
|
|
|
2023-03-23 04:43:13 +03:00
|
|
|
#[cfg(not(tokio_console))]
|
|
|
|
{
|
|
|
|
let logger = env_logger::builder().parse_filters(&rust_log).build();
|
2023-01-18 00:34:33 +03:00
|
|
|
|
2023-03-23 04:43:13 +03:00
|
|
|
let max_level = logger.filter();
|
2023-01-18 00:34:33 +03:00
|
|
|
|
2023-03-23 04:43:13 +03:00
|
|
|
// connect to sentry for error reporting
|
|
|
|
// if no sentry, only log to stdout
|
|
|
|
let _sentry_guard = if let Some(sentry_url) = cli_config.sentry_url.clone() {
|
|
|
|
let logger = sentry::integrations::log::SentryLogger::with_dest(logger);
|
2023-01-18 00:34:33 +03:00
|
|
|
|
2023-03-23 04:43:13 +03:00
|
|
|
log::set_boxed_logger(Box::new(logger)).unwrap();
|
2023-01-18 00:34:33 +03:00
|
|
|
|
2023-03-23 04:43:13 +03:00
|
|
|
let guard = sentry::init((
|
|
|
|
sentry_url,
|
|
|
|
sentry::ClientOptions {
|
|
|
|
release: sentry::release_name!(),
|
|
|
|
// TODO: Set this a to lower value (from config) in production
|
|
|
|
traces_sample_rate: 1.0,
|
|
|
|
..Default::default()
|
|
|
|
},
|
|
|
|
));
|
2023-01-18 00:34:33 +03:00
|
|
|
|
2023-03-23 04:43:13 +03:00
|
|
|
Some(guard)
|
|
|
|
} else {
|
|
|
|
log::set_boxed_logger(Box::new(logger)).unwrap();
|
2023-01-18 00:34:33 +03:00
|
|
|
|
2023-03-23 04:43:13 +03:00
|
|
|
None
|
|
|
|
};
|
2023-01-18 00:34:33 +03:00
|
|
|
|
2023-03-23 04:43:13 +03:00
|
|
|
log::set_max_level(max_level);
|
2023-04-11 08:29:02 +03:00
|
|
|
|
|
|
|
info!("RUST_LOG={}", rust_log);
|
2023-03-23 04:43:13 +03:00
|
|
|
}
|
2023-01-18 00:34:33 +03:00
|
|
|
|
2023-01-18 07:18:18 +03:00
|
|
|
info!("{}", APP_USER_AGENT);
|
|
|
|
|
2023-01-24 11:05:31 +03:00
|
|
|
// optionally connect to pagerduty
|
|
|
|
// TODO: fix this nested result
|
|
|
|
let (pagerduty_async, pagerduty_sync) = if let Ok(pagerduty_key) =
|
|
|
|
std::env::var("PAGERDUTY_INTEGRATION_KEY")
|
|
|
|
{
|
|
|
|
let pagerduty_async =
|
|
|
|
PagerdutyAsyncEventsV2::new(pagerduty_key.clone(), Some(APP_USER_AGENT.to_string()))?;
|
|
|
|
let pagerduty_sync =
|
|
|
|
PagerdutySyncEventsV2::new(pagerduty_key, Some(APP_USER_AGENT.to_string()))?;
|
|
|
|
|
|
|
|
(Some(pagerduty_async), Some(pagerduty_sync))
|
|
|
|
} else {
|
|
|
|
info!("No PAGERDUTY_INTEGRATION_KEY");
|
|
|
|
|
|
|
|
(None, None)
|
|
|
|
};
|
|
|
|
|
2023-01-26 01:04:06 +03:00
|
|
|
// panic handler that sends to pagerduty.
|
|
|
|
// TODO: use the sentry handler if no pager duty. use default if no sentry
|
2023-01-24 11:05:31 +03:00
|
|
|
if let Some(pagerduty_sync) = pagerduty_sync {
|
2023-01-26 01:04:06 +03:00
|
|
|
let top_config = top_config.clone();
|
2023-01-24 11:05:31 +03:00
|
|
|
|
|
|
|
panic::set_hook(Box::new(move |x| {
|
2023-01-26 01:04:06 +03:00
|
|
|
panic_handler(top_config.clone(), &pagerduty_sync, x);
|
2023-01-24 11:05:31 +03:00
|
|
|
}));
|
|
|
|
}
|
|
|
|
|
2023-01-18 08:26:10 +03:00
|
|
|
// set up tokio's async runtime
|
2023-01-26 08:24:09 +03:00
|
|
|
#[cfg(tokio_uring)]
|
|
|
|
let mut rt_builder = tokio_uring::Builder::new_multi_thread();
|
|
|
|
#[cfg(not(tokio_uring))]
|
2023-01-18 08:26:10 +03:00
|
|
|
let mut rt_builder = runtime::Builder::new_multi_thread();
|
2022-11-30 00:29:17 +03:00
|
|
|
|
2023-01-24 08:08:24 +03:00
|
|
|
rt_builder.enable_all();
|
|
|
|
|
2023-01-25 10:21:50 +03:00
|
|
|
if cli_config.workers > 0 {
|
|
|
|
rt_builder.worker_threads(cli_config.workers);
|
|
|
|
}
|
|
|
|
|
2023-01-18 08:26:10 +03:00
|
|
|
if let Some(top_config) = top_config.as_ref() {
|
|
|
|
let chain_id = top_config.app.chain_id;
|
2022-11-26 07:25:53 +03:00
|
|
|
|
2023-01-24 08:08:24 +03:00
|
|
|
rt_builder.thread_name_fn(move || {
|
2023-01-18 08:26:10 +03:00
|
|
|
static ATOMIC_ID: AtomicUsize = AtomicUsize::new(0);
|
|
|
|
// TODO: what ordering? i think we want seqcst so that these all happen in order, but that might be stricter than we really need
|
|
|
|
let worker_id = ATOMIC_ID.fetch_add(1, atomic::Ordering::SeqCst);
|
|
|
|
// TODO: i think these max at 15 characters
|
|
|
|
format!("web3-{}-{}", chain_id, worker_id)
|
|
|
|
});
|
|
|
|
}
|
2022-12-28 19:43:44 +03:00
|
|
|
|
2023-01-18 08:26:10 +03:00
|
|
|
// start tokio's async runtime
|
|
|
|
let rt = rt_builder.build()?;
|
2022-11-16 10:19:42 +03:00
|
|
|
|
2023-01-18 08:26:10 +03:00
|
|
|
let num_workers = rt.metrics().num_workers();
|
|
|
|
info!("num_workers: {}", num_workers);
|
2022-08-06 03:07:12 +03:00
|
|
|
|
2023-01-18 08:26:10 +03:00
|
|
|
rt.block_on(async {
|
|
|
|
match cli_config.sub_command {
|
2023-03-06 07:39:35 +03:00
|
|
|
SubCommand::ChangeAdminStatus(x) => {
|
|
|
|
let db_url = cli_config.db_url.expect(
|
|
|
|
"'--config' (with a db) or '--db-url' is required to run change_admin_status",
|
|
|
|
);
|
2022-11-30 08:51:31 +03:00
|
|
|
|
2023-01-18 08:26:10 +03:00
|
|
|
let db_conn = get_db(db_url, 1, 1).await?;
|
2022-11-14 22:13:42 +03:00
|
|
|
|
2023-01-18 08:26:10 +03:00
|
|
|
x.main(&db_conn).await
|
2023-01-18 00:34:33 +03:00
|
|
|
}
|
2023-03-06 07:39:35 +03:00
|
|
|
SubCommand::ChangeUserAddress(x) => {
|
2023-02-19 23:33:33 +03:00
|
|
|
let db_url = cli_config
|
|
|
|
.db_url
|
2023-03-06 07:39:35 +03:00
|
|
|
.expect("'--config' (with a db) or '--db-url' is required to run change_user_addres");
|
2023-02-19 23:33:33 +03:00
|
|
|
|
|
|
|
let db_conn = get_db(db_url, 1, 1).await?;
|
|
|
|
|
|
|
|
x.main(&db_conn).await
|
|
|
|
}
|
2023-01-18 08:26:10 +03:00
|
|
|
SubCommand::ChangeUserTier(x) => {
|
|
|
|
let db_url = cli_config
|
|
|
|
.db_url
|
2023-03-06 07:39:35 +03:00
|
|
|
.expect("'--config' (with a db) or '--db-url' is required to run change_user_tier");
|
2023-01-18 00:34:33 +03:00
|
|
|
|
2023-01-18 08:26:10 +03:00
|
|
|
let db_conn = get_db(db_url, 1, 1).await?;
|
2023-01-12 04:36:23 +03:00
|
|
|
|
2023-01-18 08:26:10 +03:00
|
|
|
x.main(&db_conn).await
|
|
|
|
}
|
|
|
|
SubCommand::ChangeUserTierByAddress(x) => {
|
2023-03-06 07:39:35 +03:00
|
|
|
let db_url = cli_config.db_url.expect(
|
|
|
|
"'--config' (with a db) or '--db-url' is required to run change_user_tier_by_address",
|
|
|
|
);
|
2023-01-10 04:50:09 +03:00
|
|
|
|
2023-01-18 08:26:10 +03:00
|
|
|
let db_conn = get_db(db_url, 1, 1).await?;
|
2022-11-22 01:52:47 +03:00
|
|
|
|
2023-01-18 08:26:10 +03:00
|
|
|
x.main(&db_conn).await
|
|
|
|
}
|
|
|
|
SubCommand::ChangeUserTierByKey(x) => {
|
|
|
|
let db_url = cli_config
|
|
|
|
.db_url
|
2023-03-06 07:39:35 +03:00
|
|
|
.expect("'--config' (with a db) or '--db-url' is required to run change_user_tier_by_key");
|
2022-11-22 01:52:47 +03:00
|
|
|
|
2023-01-18 08:26:10 +03:00
|
|
|
let db_conn = get_db(db_url, 1, 1).await?;
|
|
|
|
|
|
|
|
x.main(&db_conn).await
|
|
|
|
}
|
|
|
|
SubCommand::CheckConfig(x) => x.main().await,
|
2023-01-26 04:58:10 +03:00
|
|
|
SubCommand::CreateKey(x) => {
|
|
|
|
let db_url = cli_config
|
|
|
|
.db_url
|
|
|
|
.expect("'--config' (with a db) or '--db-url' is required to run create a key");
|
|
|
|
|
|
|
|
let db_conn = get_migrated_db(db_url, 1, 1).await?;
|
|
|
|
|
|
|
|
x.main(&db_conn).await
|
|
|
|
}
|
2023-01-18 08:26:10 +03:00
|
|
|
SubCommand::CreateUser(x) => {
|
|
|
|
let db_url = cli_config
|
|
|
|
.db_url
|
2023-03-06 07:39:35 +03:00
|
|
|
.expect("'--config' (with a db) or '--db-url' is required to run create_user");
|
2023-01-18 08:26:10 +03:00
|
|
|
|
|
|
|
let db_conn = get_migrated_db(db_url, 1, 1).await?;
|
|
|
|
|
|
|
|
x.main(&db_conn).await
|
|
|
|
}
|
|
|
|
SubCommand::CountUsers(x) => {
|
|
|
|
let db_url = cli_config
|
|
|
|
.db_url
|
2023-03-06 07:39:35 +03:00
|
|
|
.expect("'--config' (with a db) or '--db-url' is required to run count_users");
|
2023-01-18 08:26:10 +03:00
|
|
|
|
|
|
|
let db_conn = get_db(db_url, 1, 1).await?;
|
|
|
|
|
|
|
|
x.main(&db_conn).await
|
|
|
|
}
|
|
|
|
SubCommand::Proxyd(x) => {
|
|
|
|
let top_config = top_config.expect("--config is required to run proxyd");
|
2023-02-27 09:44:09 +03:00
|
|
|
let top_config_path =
|
|
|
|
top_config_path.expect("path must be set if top_config exists");
|
2023-01-18 08:26:10 +03:00
|
|
|
|
2023-02-27 09:44:09 +03:00
|
|
|
x.main(top_config, top_config_path, num_workers).await
|
2023-01-18 08:26:10 +03:00
|
|
|
}
|
|
|
|
SubCommand::DropMigrationLock(x) => {
|
|
|
|
let db_url = cli_config
|
|
|
|
.db_url
|
2023-03-06 07:39:35 +03:00
|
|
|
.expect("'--config' (with a db) or '--db-url' is required to run drop_migration_lock");
|
2023-01-18 08:26:10 +03:00
|
|
|
|
2023-03-06 07:39:35 +03:00
|
|
|
// very intentionally, do NOT run migrations here. that would wait forever if the migration lock is abandoned
|
2023-01-18 08:26:10 +03:00
|
|
|
let db_conn = get_db(db_url, 1, 1).await?;
|
|
|
|
|
|
|
|
x.main(&db_conn).await
|
|
|
|
}
|
2023-03-10 22:26:15 +03:00
|
|
|
SubCommand::MigrateStatsToV2(x) => {
|
2023-03-25 19:56:45 +03:00
|
|
|
|
|
|
|
let top_config = top_config.expect("--config is required to run the migration from stats-mysql to stats-influx");
|
|
|
|
// let top_config_path =
|
|
|
|
// top_config_path.expect("path must be set if top_config exists");
|
|
|
|
|
2023-03-10 22:26:15 +03:00
|
|
|
let db_url = cli_config
|
|
|
|
.db_url
|
|
|
|
.expect("'--config' (with a db) or '--db-url' is required to run the migration from stats-mysql to stats-influx");
|
|
|
|
|
|
|
|
let db_conn = get_db(db_url, 1, 1).await?;
|
2023-03-25 19:56:45 +03:00
|
|
|
x.main(top_config, &db_conn).await
|
2023-03-10 22:26:15 +03:00
|
|
|
}
|
2023-01-24 11:05:31 +03:00
|
|
|
SubCommand::Pagerduty(x) => {
|
|
|
|
if cli_config.sentry_url.is_none() {
|
|
|
|
warn!("sentry_url is not set! Logs will only show in this console");
|
|
|
|
}
|
|
|
|
|
|
|
|
x.main(pagerduty_async, top_config).await
|
|
|
|
}
|
2023-02-03 00:44:57 +03:00
|
|
|
SubCommand::PopularityContest(x) => x.main().await,
|
2023-03-06 07:39:35 +03:00
|
|
|
SubCommand::SearchKafka(x) => x.main(top_config.unwrap()).await,
|
2023-01-18 08:26:10 +03:00
|
|
|
SubCommand::Sentryd(x) => {
|
|
|
|
if cli_config.sentry_url.is_none() {
|
|
|
|
warn!("sentry_url is not set! Logs will only show in this console");
|
|
|
|
}
|
|
|
|
|
2023-01-24 15:51:55 +03:00
|
|
|
x.main(pagerduty_async, top_config).await
|
2023-01-18 08:26:10 +03:00
|
|
|
}
|
|
|
|
SubCommand::RpcAccounting(x) => {
|
|
|
|
let db_url = cli_config
|
|
|
|
.db_url
|
2023-03-06 07:39:35 +03:00
|
|
|
.expect("'--config' (with a db) or '--db-url' is required to run rpc_accounting");
|
2023-01-18 08:26:10 +03:00
|
|
|
|
|
|
|
let db_conn = get_migrated_db(db_url, 1, 1).await?;
|
|
|
|
|
|
|
|
x.main(&db_conn).await
|
|
|
|
}
|
|
|
|
SubCommand::TransferKey(x) => {
|
|
|
|
let db_url = cli_config
|
|
|
|
.db_url
|
2023-03-06 07:39:35 +03:00
|
|
|
.expect("'--config' (with a db) or '--db-url' is required to run transfer_key");
|
2023-01-18 08:26:10 +03:00
|
|
|
let db_conn = get_db(db_url, 1, 1).await?;
|
|
|
|
|
|
|
|
x.main(&db_conn).await
|
|
|
|
}
|
|
|
|
SubCommand::UserExport(x) => {
|
|
|
|
let db_url = cli_config
|
|
|
|
.db_url
|
2023-03-06 07:39:35 +03:00
|
|
|
.expect("'--config' (with a db) or '--db-url' is required to run user_export");
|
2023-01-18 08:26:10 +03:00
|
|
|
|
|
|
|
let db_conn = get_migrated_db(db_url, 1, 1).await?;
|
|
|
|
|
|
|
|
x.main(&db_conn).await
|
|
|
|
}
|
|
|
|
SubCommand::UserImport(x) => {
|
|
|
|
let db_url = cli_config
|
|
|
|
.db_url
|
2023-03-06 07:39:35 +03:00
|
|
|
.expect("'--config' (with a db) or '--db-url' is required to run user_import");
|
2023-01-18 08:26:10 +03:00
|
|
|
|
|
|
|
let db_conn = get_migrated_db(db_url, 1, 1).await?;
|
|
|
|
|
|
|
|
x.main(&db_conn).await
|
|
|
|
}
|
2022-11-14 22:13:42 +03:00
|
|
|
}
|
2023-01-18 08:26:10 +03:00
|
|
|
})
|
2022-08-06 03:07:12 +03:00
|
|
|
}
|