more quick cache ttl

This commit is contained in:
Bryan Stitt 2023-05-18 00:04:30 -07:00
parent 1c61390ad1
commit 0f367d9035
4 changed files with 28 additions and 24 deletions

2
Cargo.lock generated
View File

@ -1310,7 +1310,7 @@ dependencies = [
"anyhow", "anyhow",
"hashbrown 0.13.2", "hashbrown 0.13.2",
"log", "log",
"moka", "quick_cache_ttl",
"redis-rate-limiter", "redis-rate-limiter",
"tokio", "tokio",
] ]

View File

@ -5,10 +5,10 @@ authors = ["Bryan Stitt <bryan@stitthappens.com>"]
edition = "2021" edition = "2021"
[dependencies] [dependencies]
quick_cache_ttl = { path = "../quick_cache_ttl" }
redis-rate-limiter = { path = "../redis-rate-limiter" } redis-rate-limiter = { path = "../redis-rate-limiter" }
anyhow = "1.0.71" anyhow = "1.0.71"
hashbrown = "0.13.2" hashbrown = "0.13.2"
log = "0.4.17" log = "0.4.17"
moka = { version = "0.11.0", default-features = false, features = ["future"] }
tokio = "1.28.1" tokio = "1.28.1"

View File

@ -1,6 +1,6 @@
//#![warn(missing_docs)] //#![warn(missing_docs)]
use log::error; use log::error;
use moka::future::Cache; use quick_cache_ttl::{CacheWithTTL, UnitWeighter};
use redis_rate_limiter::{RedisRateLimitResult, RedisRateLimiter}; use redis_rate_limiter::{RedisRateLimitResult, RedisRateLimiter};
use std::cmp::Eq; use std::cmp::Eq;
use std::fmt::{Debug, Display}; use std::fmt::{Debug, Display};
@ -16,7 +16,8 @@ pub struct DeferredRateLimiter<K>
where where
K: Send + Sync, K: Send + Sync,
{ {
local_cache: Cache<K, Arc<AtomicU64>, hashbrown::hash_map::DefaultHashBuilder>, local_cache:
CacheWithTTL<K, Arc<AtomicU64>, UnitWeighter, hashbrown::hash_map::DefaultHashBuilder>,
prefix: String, prefix: String,
rrl: RedisRateLimiter, rrl: RedisRateLimiter,
/// if None, defers to the max on rrl /// if None, defers to the max on rrl
@ -33,9 +34,9 @@ impl<K> DeferredRateLimiter<K>
where where
K: Copy + Debug + Display + Hash + Eq + Send + Sync + 'static, K: Copy + Debug + Display + Hash + Eq + Send + Sync + 'static,
{ {
pub fn new( pub async fn new(
// TODO: change this to cache_size in bytes // TODO: change this to cache_size in bytes
cache_size: u64, cache_size: usize,
prefix: &str, prefix: &str,
rrl: RedisRateLimiter, rrl: RedisRateLimiter,
default_max_requests_per_second: Option<u64>, default_max_requests_per_second: Option<u64>,
@ -45,11 +46,18 @@ where
// TODO: time to live is not exactly right. we want this ttl counter to start only after redis is down. this works for now // TODO: time to live is not exactly right. we want this ttl counter to start only after redis is down. this works for now
// TODO: what do these weigh? // TODO: what do these weigh?
// TODO: allow skipping max_capacity // TODO: allow skipping max_capacity
let local_cache = Cache::builder() let local_cache = CacheWithTTL::new(
.time_to_live(Duration::from_secs(ttl)) cache_size,
.max_capacity(cache_size) cache_size as u64,
.name(prefix) UnitWeighter,
.build_with_hasher(hashbrown::hash_map::DefaultHashBuilder::default()); hashbrown::hash_map::DefaultHashBuilder::default(),
Duration::from_secs(ttl),
)
.await;
// .time_to_live(Duration::from_secs(ttl))
// .max_capacity(cache_size)
// .name(prefix)
// .build_with_hasher(hashbrown::hash_map::DefaultHashBuilder::default());
Self { Self {
local_cache, local_cache,
@ -87,9 +95,9 @@ where
let redis_key = redis_key.clone(); let redis_key = redis_key.clone();
let rrl = Arc::new(self.rrl.clone()); let rrl = Arc::new(self.rrl.clone());
// set arc_deferred_rate_limit_result and return the coun // set arc_deferred_rate_limit_result and return the count
self.local_cache self.local_cache
.get_with_by_ref(&key, async move { .get_or_insert_async::<anyhow::Error, _>(&key, async move {
// we do not use the try operator here because we want to be okay with redis errors // we do not use the try operator here because we want to be okay with redis errors
let redis_count = match rrl let redis_count = match rrl
.throttle_label(&redis_key, Some(max_requests_per_period), count) .throttle_label(&redis_key, Some(max_requests_per_period), count)
@ -126,9 +134,9 @@ where
} }
}; };
Arc::new(AtomicU64::new(redis_count)) Ok(Arc::new(AtomicU64::new(redis_count)))
}) })
.await .await?
}; };
let mut locked = deferred_rate_limit_result.lock().await; let mut locked = deferred_rate_limit_result.lock().await;

View File

@ -573,15 +573,11 @@ impl Web3ProxyApp {
// these two rate limiters can share the base limiter // these two rate limiters can share the base limiter
// these are deferred rate limiters because we don't want redis network requests on the hot path // these are deferred rate limiters because we don't want redis network requests on the hot path
// TODO: take cache_size from config // TODO: take cache_size from config
frontend_ip_rate_limiter = Some(DeferredRateLimiter::<IpAddr>::new( frontend_ip_rate_limiter = Some(
10_000, DeferredRateLimiter::<IpAddr>::new(20_000, "ip", rpc_rrl.clone(), None).await,
"ip", );
rpc_rrl.clone(), frontend_registered_user_rate_limiter =
None, Some(DeferredRateLimiter::<u64>::new(10_000, "key", rpc_rrl, None).await);
));
frontend_registered_user_rate_limiter = Some(DeferredRateLimiter::<u64>::new(
10_000, "key", rpc_rrl, None,
));
} }
// login rate limiter // login rate limiter