From 8785cb6d2d858f019602c06daaeaa802fcb4b158 Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Mon, 19 Sep 2022 22:41:53 +0000 Subject: [PATCH] no idle timeout or max lifetime --- deferred-rate-limiter/src/lib.rs | 16 ++++++++-------- web3_proxy/src/app.rs | 4 +--- 2 files changed, 9 insertions(+), 11 deletions(-) diff --git a/deferred-rate-limiter/src/lib.rs b/deferred-rate-limiter/src/lib.rs index 3f680f85..f921a54c 100644 --- a/deferred-rate-limiter/src/lib.rs +++ b/deferred-rate-limiter/src/lib.rs @@ -68,9 +68,9 @@ where // TODO: DO NOT UNWRAP HERE. figure out how to handle anyhow error being wrapped in an Arc // TODO: i'm sure this could be a lot better. but race conditions make this hard to think through. brain needs sleep - let arc_key_count: Arc = { + let local_key_count: Arc = { // clone things outside of the `async move` - let arc_deferred_rate_limit_result = arc_deferred_rate_limit_result.clone(); + let deferred_rate_limit_result = arc_deferred_rate_limit_result.clone(); let redis_key = redis_key.clone(); let rrl = Arc::new(self.rrl.clone()); @@ -83,14 +83,14 @@ where .await { Ok(RedisRateLimitResult::Allowed(count)) => { - let _ = arc_deferred_rate_limit_result + let _ = deferred_rate_limit_result .lock() .await .insert(DeferredRateLimitResult::Allowed); count } Ok(RedisRateLimitResult::RetryAt(retry_at, count)) => { - let _ = arc_deferred_rate_limit_result + let _ = deferred_rate_limit_result .lock() .await .insert(DeferredRateLimitResult::RetryAt(retry_at)); @@ -100,7 +100,7 @@ where panic!("RetryNever shouldn't happen") } Err(err) => { - let _ = arc_deferred_rate_limit_result + let _ = deferred_rate_limit_result .lock() .await .insert(DeferredRateLimitResult::Allowed); @@ -126,7 +126,7 @@ where Ok(deferred_rate_limit_result) } else { // we have a cached amount here - let cached_key_count = arc_key_count.fetch_add(count, Ordering::Acquire); + let cached_key_count = local_key_count.fetch_add(count, Ordering::Acquire); // assuming no other parallel futures incremented this key, this is the count that redis has let expected_key_count = cached_key_count + count; @@ -153,11 +153,11 @@ where .await { Ok(RedisRateLimitResult::Allowed(count)) => { - arc_key_count.store(count, Ordering::Release); + local_key_count.store(count, Ordering::Release); DeferredRateLimitResult::Allowed } Ok(RedisRateLimitResult::RetryAt(retry_at, count)) => { - arc_key_count.store(count, Ordering::Release); + local_key_count.store(count, Ordering::Release); DeferredRateLimitResult::RetryAt(retry_at) } Ok(RedisRateLimitResult::RetryNever) => { diff --git a/web3_proxy/src/app.rs b/web3_proxy/src/app.rs index 0453fa24..436a5c82 100644 --- a/web3_proxy/src/app.rs +++ b/web3_proxy/src/app.rs @@ -133,11 +133,9 @@ pub async fn get_migrated_db( // TODO: load all these options from the config file. i think mysql default max is 100 // TODO: sqlx logging only in debug. way too verbose for production db_opt + .connect_timeout(Duration::from_secs(30)) .min_connections(min_connections) .max_connections(max_connections) - .connect_timeout(Duration::from_secs(8)) - .idle_timeout(Duration::from_secs(8)) - .max_lifetime(Duration::from_secs(60)) .sqlx_logging(false); // .sqlx_logging_level(log::LevelFilter::Info);