more todos

This commit is contained in:
Bryan Stitt 2022-08-06 02:29:55 +00:00
parent 60d31aa1b5
commit 64f4a4b419
4 changed files with 42 additions and 33 deletions

11
TODO.md

@ -58,7 +58,11 @@
- we can improve this by only publishing the synced connections once a threshold of total available soft and hard limits is passed. how can we do this without hammering redis? at least its only once per block per server
- [x] instead of tracking `pending_synced_connections`, have a mapping of where all connections are individually. then each change, re-check for consensus.
- [x] synced connections swap threshold set to 1 so that it always serves something
- [x] cli tool for creating new users
- [ ] basic request method stats
- [ ] incoming rate limiting by api key
- [ ] give users different rate limits looked up from the database
- [ ] allow blocking public requests
## V1
@ -72,8 +76,7 @@
- [cancelled] eth_getBlockByNumber and similar calls served from the block map
- will need all Block<TxHash> **and** Block<TransactionReceipt> in caches or fetched efficiently
- so maybe we don't want this. we can just use the general request cache for these. they will only require 1 request and it means requests won't get in the way as much on writes as new blocks arrive.
- [ ] cli tool for managing users and resetting api keys
- [ ] incoming rate limiting by api key
- [ ] cli tool for resetting api keys
- [ ] nice output when cargo doc is run
- [ ] if we request an old block, more servers can handle it than we currently use.
- [ ] instead of the one list of just heads, store our intermediate mappings (rpcs_by_hash, rpcs_by_num, blocks_by_hash) in SyncedConnections. this shouldn't be too much slower than what we have now
@ -200,4 +203,6 @@ in another repo: event subscriber
- [ ] threshold should check actual available request limits (if any) instead of just the soft limit
- [ ] foreign key on_update and on_delete
- [ ] database creation timestamps
- [ ] better error handling. we warn too often for validation errors and use the same error code for most every request
- [ ] better error handling. we warn too often for validation errors and use the same error code for most every request
- [ ] use &str more instead of String. lifetime annotations get really annoying though
- [ ] tarpit instead of reject requests (unless theres a lot)

@ -7,17 +7,14 @@ pub use bb8_redis::{bb8, RedisConnectionManager};
use std::time::Duration;
// TODO: take this as an argument to open?
const KEY_PREFIX: &str = "rate-limit";
pub type RedisClientPool = bb8::Pool<RedisConnectionManager>;
pub struct RedisCellClient {
pool: RedisClientPool,
key: String,
max_burst: u32,
count_per_period: u32,
period: u32,
default_max_burst: u32,
default_count_per_period: u32,
default_period: u32,
}
impl RedisCellClient {
@ -26,26 +23,38 @@ impl RedisCellClient {
// TODO: use r2d2 for connection pooling?
pub fn new(
pool: bb8::Pool<RedisConnectionManager>,
default_key: String,
max_burst: u32,
count_per_period: u32,
period: u32,
app: &str,
key: &str,
default_max_burst: u32,
default_count_per_period: u32,
default_period: u32,
) -> Self {
let default_key = format!("{}:{}", KEY_PREFIX, default_key);
let key = format!("{}:redis-cell:{}", app, key);
Self {
pool,
key: default_key,
max_burst,
count_per_period,
period,
key,
default_max_burst,
default_count_per_period,
default_period,
}
}
#[inline]
async fn _throttle(&self, key: &str, quantity: u32) -> Result<(), Duration> {
async fn _throttle(
&self,
key: &str,
max_burst: Option<u32>,
count_per_period: Option<u32>,
period: Option<u32>,
quantity: u32,
) -> Result<(), Duration> {
let mut conn = self.pool.get().await.unwrap();
let max_burst = max_burst.unwrap_or(self.default_max_burst);
let count_per_period = count_per_period.unwrap_or(self.default_count_per_period);
let period = period.unwrap_or(self.default_period);
/*
https://github.com/brandur/redis-cell#response
@ -62,13 +71,7 @@ impl RedisCellClient {
// TODO: don't unwrap. maybe return anyhow::Result<Result<(), Duration>>
// TODO: should we return more error info?
let x: Vec<isize> = cmd("CL.THROTTLE")
.arg(&(
key,
self.max_burst,
self.count_per_period,
self.period,
quantity,
))
.arg(&(key, max_burst, count_per_period, period, quantity))
.query_async(&mut *conn)
.await
.unwrap();
@ -88,18 +91,18 @@ impl RedisCellClient {
#[inline]
pub async fn throttle(&self) -> Result<(), Duration> {
self._throttle(&self.key, 1).await
self._throttle(&self.key, None, None, None, 1).await
}
#[inline]
pub async fn throttle_key(&self, key: &str) -> Result<(), Duration> {
let key = format!("{}:{}", self.key, key);
self._throttle(key.as_ref(), 1).await
self._throttle(key.as_ref(), None, None, None, 1).await
}
#[inline]
pub async fn throttle_quantity(&self, quantity: u32) -> Result<(), Duration> {
self._throttle(&self.key, quantity).await
self._throttle(&self.key, None, None, None, quantity).await
}
}

@ -441,7 +441,8 @@ impl Web3ProxyApp {
redis_client_pool.as_ref().map(|redis_client_pool| {
RedisCellClient::new(
redis_client_pool.clone(),
"ip".to_string(),
"web3-proxy",
"ip",
public_max_burst,
app_config.shared.public_rate_limit_per_minute,
60,

@ -147,12 +147,12 @@ impl Web3Connection {
reconnect: bool,
) -> anyhow::Result<(Arc<Web3Connection>, AnyhowJoinHandle<()>)> {
let hard_limit = hard_limit.map(|(hard_rate_limit, redis_conection)| {
// TODO: allow different max_burst and count_per_period and period
// TODO: if this url rate limits by IP instead of api key, we want to include our public ip in this key
// TODO: allow configurable period and max_burst
let period = 1;
RedisCellClient::new(
redis_conection,
format!("{},{}", chain_id, url_str),
"web3-proxy",
&format!("{}:{}", chain_id, url_str),
hard_rate_limit,
hard_rate_limit,
period,