diff --git a/TODO.md b/TODO.md index b27e5964..5b51c357 100644 --- a/TODO.md +++ b/TODO.md @@ -294,9 +294,9 @@ These are not yet ordered. There might be duplicates. We might not actually need - [x] upgrade user tier by address - [x] all_backend_connections skips syncing servers - [x] change weight back to tier -- [-] fix multiple origin and referer checks -- [-] let users choose a % to log (or maybe x/second). someone like curve logging all reverts will be a BIG database very quickly - - this must be opt-in or spawned since it will slow things down and will make their calls less private +- [x] fix multiple origin and referer checks +- [-] let users choose a % of reverts to log (or maybe x/second). someone like curve logging all reverts will be a BIG database very quickly + - this must be opt-in and spawned in the background since it will slow things down and will make their calls less private - [ ] automatic pruning of old revert logs once too many are collected - [ ] we currently default to 0.0 and don't expose a way to edit it. we have a database row, but we don't use it - [-] add configurable size limits to all the Caches @@ -586,4 +586,5 @@ in another repo: event subscriber - be careful not to make an infinite loop - [ ] request timeout messages should include the request id - [ ] have an upgrade tier that queries multiple backends at once. returns on first Ok result, collects errors. if no Ok, find the most common error and then respond with that -- [ ] give public_recent_ips_salt a better, more general, name \ No newline at end of file +- [ ] give public_recent_ips_salt a better, more general, name +- [ ] include tier in the head block logs? diff --git a/web3_proxy/src/bin/web3_proxy_cli/change_user_address_by_key.rs b/web3_proxy/src/bin/web3_proxy_cli/change_user_address_by_key.rs deleted file mode 100644 index 828615dd..00000000 --- a/web3_proxy/src/bin/web3_proxy_cli/change_user_address_by_key.rs +++ /dev/null @@ -1,67 +0,0 @@ -use anyhow::Context; -use argh::FromArgs; -use entities::{rpc_key, user}; -use ethers::types::Address; -use log::{debug, info}; -use migration::sea_orm::{ - self, ActiveModelTrait, ColumnTrait, DatabaseConnection, EntityTrait, IntoActiveModel, - QueryFilter, -}; -use uuid::Uuid; -use web3_proxy::frontend::authorization::RpcSecretKey; - -/// change a user's tier. -#[derive(FromArgs, PartialEq, Eq, Debug)] -#[argh(subcommand, name = "change_user_address_by_key")] -pub struct ChangeUserAddressByKeySubCommand { - #[argh(positional)] - /// the RPC key owned by the user you want to change. - rpc_secret_key: RpcSecretKey, - - /// the new address for the user. - #[argh(positional)] - new_address: String, -} - -impl ChangeUserAddressByKeySubCommand { - pub async fn main(self, db_conn: &DatabaseConnection) -> anyhow::Result<()> { - let rpc_secret_key: Uuid = self.rpc_secret_key.into(); - - let new_address: Address = self.new_address.parse()?; - - let new_address: Vec = new_address.to_fixed_bytes().into(); - - let uk = rpc_key::Entity::find() - .filter(rpc_key::Column::SecretKey.eq(rpc_secret_key)) - .one(db_conn) - .await? - .context("No key found")?; - - debug!("user key: {:#?}", uk); - - // use the rpc secret key to get the user - // TODO: get this with a join on rpc_key - let u = user::Entity::find_by_id(uk.user_id) - .one(db_conn) - .await? - .context("No user found with that key")?; - - debug!("user: {:#?}", u); - - if u.address == new_address { - info!("user already has that address"); - } else { - let mut u = u.into_active_model(); - - u.address = sea_orm::Set(new_address); - - let u = u.save(db_conn).await?; - - debug!("user: {:#?}", u); - - info!("user's address changed"); - } - - Ok(()) - } -} diff --git a/web3_proxy/src/bin/web3_proxy_cli/main.rs b/web3_proxy/src/bin/web3_proxy_cli/main.rs index b6da3b0c..98b6d958 100644 --- a/web3_proxy/src/bin/web3_proxy_cli/main.rs +++ b/web3_proxy/src/bin/web3_proxy_cli/main.rs @@ -1,5 +1,4 @@ mod change_user_address; -mod change_user_address_by_key; mod change_user_tier; mod change_user_tier_by_address; mod change_user_tier_by_key; @@ -10,6 +9,7 @@ mod create_user; mod drop_migration_lock; mod health_compass; mod list_user_tier; +mod transfer_key; mod user_export; mod user_import; @@ -43,7 +43,6 @@ pub struct CliConfig { #[argh(subcommand)] enum SubCommand { ChangeUserAddress(change_user_address::ChangeUserAddressSubCommand), - ChangeUserAddressByKey(change_user_address_by_key::ChangeUserAddressByKeySubCommand), ChangeUserTier(change_user_tier::ChangeUserTierSubCommand), ChangeUserTierByAddress(change_user_tier_by_address::ChangeUserTierByAddressSubCommand), ChangeUserTierByKey(change_user_tier_by_key::ChangeUserTierByKeySubCommand), @@ -53,6 +52,7 @@ enum SubCommand { CreateUser(create_user::CreateUserSubCommand), DropMigrationLock(drop_migration_lock::DropMigrationLockSubCommand), HealthCompass(health_compass::HealthCompassSubCommand), + TransferKey(transfer_key::TransferKeySubCommand), UserExport(user_export::UserExportSubCommand), UserImport(user_import::UserImportSubCommand), // TODO: sub command to downgrade migrations? sea-orm has this but doing downgrades here would be easier+safer @@ -95,11 +95,6 @@ async fn main() -> anyhow::Result<()> { x.main(&db_conn).await } - SubCommand::ChangeUserAddressByKey(x) => { - let db_conn = get_db(cli_config.db_url, 1, 1).await?; - - x.main(&db_conn).await - } SubCommand::ChangeUserTier(x) => { let db_conn = get_db(cli_config.db_url, 1, 1).await?; @@ -138,6 +133,11 @@ async fn main() -> anyhow::Result<()> { x.main(&db_conn).await } SubCommand::HealthCompass(x) => x.main().await, + SubCommand::TransferKey(x) => { + let db_conn = get_db(cli_config.db_url, 1, 1).await?; + + x.main(&db_conn).await + } SubCommand::UserExport(x) => { let db_conn = get_migrated_db(cli_config.db_url, 1, 1).await?;