disable redis rate limits

This commit is contained in:
Bryan Stitt 2022-09-06 22:55:17 +00:00
parent e4d25b207d
commit 5eef5173a1
4 changed files with 50 additions and 42 deletions

View File

@ -127,7 +127,10 @@ fn main() -> anyhow::Result<()> {
// if RUST_LOG isn't set, configure a default
// TODO: is there a better way to do this?
if std::env::var("RUST_LOG").is_err() {
std::env::set_var("RUST_LOG", "info,redis_rate_limit=debug,web3_proxy=debug");
std::env::set_var(
"RUST_LOG",
"info,ethers=debug,redis_rate_limit=debug,web3_proxy=debug",
);
}
// install global collector configured based on RUST_LOG env var.

View File

@ -226,7 +226,7 @@ impl Web3ProxyApp {
.context("no user data")?
} else {
// unwrap the cache's result
user_data.context("no user data")?
user_data.expect("we just checked the user_data is_none above")
};
if user_data.user_id == 0 {
@ -234,46 +234,49 @@ impl Web3ProxyApp {
}
// user key is valid. now check rate limits
if let Some(rate_limiter) = &self.rate_limiter {
// TODO: query redis in the background so that users don't have to wait on this network request
// TODO: better key? have a prefix so its easy to delete all of these
// TODO: we should probably hash this or something
let rate_limiter_label = user_key.to_string();
// TODO: this is throwing errors when curve-api hits us with high concurrency. investigate
if false {
if let Some(rate_limiter) = &self.rate_limiter {
// TODO: query redis in the background so that users don't have to wait on this network request
// TODO: better key? have a prefix so its easy to delete all of these
// TODO: we should probably hash this or something
let rate_limiter_label = user_key.to_string();
match rate_limiter
.throttle_label(
&rate_limiter_label,
Some(user_data.user_count_per_period),
1,
)
.await
{
Ok(ThrottleResult::Allowed) => {}
Ok(ThrottleResult::RetryAt(retry_at)) => {
// TODO: set headers so they know when they can retry or maybe tarpit them? if they are barely over?
debug!(?rate_limiter_label, "user rate limit exceeded"); // this is too verbose, but a stat might be good
// TODO: use their id if possible
return Ok(RateLimitResult::UserRateLimitExceeded(user_data.user_id));
match rate_limiter
.throttle_label(
&rate_limiter_label,
Some(user_data.user_count_per_period),
1,
)
.await
{
Ok(ThrottleResult::Allowed) => {}
Ok(ThrottleResult::RetryAt(retry_at)) => {
// TODO: set headers so they know when they can retry or maybe tarpit them? if they are barely over?
debug!(?rate_limiter_label, "user rate limit exceeded"); // this is too verbose, but a stat might be good
// TODO: use their id if possible
return Ok(RateLimitResult::UserRateLimitExceeded(user_data.user_id));
}
Ok(ThrottleResult::RetryNever) => {
// TODO: prettier error for the user
return Err(anyhow::anyhow!("user blocked by rate limiter"));
}
Err(err) => {
// internal error, not rate limit being hit
// rather than have downtime, i think its better to just use in-process rate limiting
// TODO: in-process rate limits that pipe into redis
error!(?err, "redis is unhappy. allowing ip");
return Ok(RateLimitResult::AllowedUser(user_data.user_id));
} // // TODO: set headers so they know when they can retry
// // warn!(?ip, "public rate limit exceeded"); // this is too verbose, but a stat might be good
// // TODO: use their id if possible
// // TODO: StatusCode::TOO_MANY_REQUESTS
// return Err(anyhow::anyhow!("too many requests from this key"));
}
Ok(ThrottleResult::RetryNever) => {
// TODO: prettier error for the user
return Err(anyhow::anyhow!("user blocked by rate limiter"));
}
Err(err) => {
// internal error, not rate limit being hit
// rather than have downtime, i think its better to just use in-process rate limiting
// TODO: in-process rate limits that pipe into redis
error!(?err, "redis is unhappy. allowing ip");
return Ok(RateLimitResult::AllowedUser(user_data.user_id));
} // // TODO: set headers so they know when they can retry
// // warn!(?ip, "public rate limit exceeded"); // this is too verbose, but a stat might be good
// // TODO: use their id if possible
// // TODO: StatusCode::TOO_MANY_REQUESTS
// return Err(anyhow::anyhow!("too many requests from this key"));
} else {
// TODO: if no redis, rate limit with a local cache?
todo!("no redis. cannot rate limit")
}
} else {
// TODO: if no redis, rate limit with a local cache?
todo!("no redis. cannot rate limit")
}
Ok(RateLimitResult::AllowedUser(user_data.user_id))

View File

@ -186,6 +186,7 @@ impl fmt::Debug for JsonRpcForwardedResponse {
impl JsonRpcForwardedResponse {
pub fn from_anyhow_error(err: anyhow::Error, id: Box<RawValue>) -> Self {
// TODO: this is too verbose. plenty of errors are valid, like users giving an invalid address. no need to log that
// TODO: can we somehow get the initial request here? if we put that into a tracing span, will things slow down a ton?
warn!(?err, "forwarding error");
JsonRpcForwardedResponse {

View File

@ -4,6 +4,7 @@ use std::fmt;
use std::sync::atomic;
use std::sync::Arc;
use tokio::time::{sleep, Duration, Instant};
use tracing::debug;
use tracing::warn;
use tracing::{instrument, trace};
@ -57,7 +58,7 @@ impl OpenRequestHandle {
// TODO: use tracing spans properly
// TODO: requests from customers have request ids, but we should add
// TODO: including params in this is way too verbose
trace!("Sending {} to {}", method, self.0);
debug!("Sending {} to {}", method, self.0);
let mut provider = None;
@ -80,8 +81,8 @@ impl OpenRequestHandle {
// TODO: i think ethers already has trace logging (and does it much more fancy)
// TODO: at least instrument this with more useful information
// trace!("Reply from {}: {:?}", self.0, response);
trace!("Reply from {}", self.0);
trace!("Reply from {} for {}: {:?}", self.0, method, response);
// trace!("Reply from {}", self.0);
response
}