larger max_capacity now that there is a weigher
This commit is contained in:
parent
a7761a0430
commit
3e3a9fcf64
3
TODO.md
3
TODO.md
@ -159,7 +159,7 @@ These are roughly in order of completition
|
||||
- this must be opt-in or spawned since it will slow things down and will make their calls less private
|
||||
- [ ] add configurable size limits to all the Caches
|
||||
- [ ] Api keys need option to lock to IP, cors header, referer, etc
|
||||
- [ ] requests per second per api key
|
||||
- [ ] active requests per second per api key
|
||||
- [ ] distribution of methods per api key (eth_call, eth_getLogs, etc.)
|
||||
- [ ] web3 on rpc1 exited without errors. maybe promote some shutdown messages from debug to info?
|
||||
- [ ] Ulid instead of Uuid for user keys
|
||||
@ -180,6 +180,7 @@ These are not yet ordered.
|
||||
- [ ] remove the if/else where we optionally route to archive and refactor to require a BlockNumber enum
|
||||
- [ ] then check syncedconnections for the blockNum. if num given, use the cannonical chain to figure out the winning hash
|
||||
- [ ] this means if someone requests a recent but not ancient block, they can use all our servers, even the slower ones. need smart sorting for priority here
|
||||
- [ ] script that looks at config and estimates max memory used by caches
|
||||
- [ ] favicon
|
||||
- eth_1 | 2022-09-07T17:10:48.431536Z WARN web3_proxy::jsonrpc: forwarding error err=nothing to see here
|
||||
- use the one on https://staging.llamanodes.com/
|
||||
|
@ -34,7 +34,7 @@ where
|
||||
pub fn new(cache_size: u64, prefix: &str, rrl: RedisRateLimiter) -> Self {
|
||||
let ttl = rrl.period as u64;
|
||||
|
||||
// TODO: time to live is not right. we want this ttl counter to start only after redis is down. this works for now
|
||||
// TODO: time to live is not exactly right. we want this ttl counter to start only after redis is down. this works for now
|
||||
let local_cache = Cache::builder()
|
||||
.time_to_live(Duration::from_secs(ttl))
|
||||
.max_capacity(cache_size)
|
||||
|
@ -254,23 +254,21 @@ impl Web3ProxyApp {
|
||||
let (pending_tx_sender, pending_tx_receiver) = broadcast::channel(256);
|
||||
|
||||
// TODO: use this? it could listen for confirmed transactions and then clear pending_transactions, but the head_block_sender is doing that
|
||||
// TODO: don't drop the pending_tx_receiver. instead, read it to mark transactions as "seen". once seen, we won't re-send them
|
||||
// TODO: once a transaction is "Confirmed" we remove it from the map. this should prevent major memory leaks.
|
||||
// TODO: we should still have some sort of expiration or maximum size limit for the map
|
||||
drop(pending_tx_receiver);
|
||||
|
||||
// TODO: capacity from configs
|
||||
// all these are the same size, so no need for a weigher
|
||||
let pending_transactions = Cache::builder()
|
||||
.max_capacity(10_000)
|
||||
.build_with_hasher(ahash::RandomState::new());
|
||||
|
||||
// TODO: don't drop the pending_tx_receiver. instead, read it to mark transactions as "seen". once seen, we won't re-send them
|
||||
// TODO: once a transaction is "Confirmed" we remove it from the map. this should prevent major memory leaks.
|
||||
// TODO: we should still have some sort of expiration or maximum size limit for the map
|
||||
|
||||
// this block map is shared between balanced_rpcs and private_rpcs.
|
||||
// keep 1GB of blocks in the cache
|
||||
// TODO: limits from config
|
||||
// TODO: these blocks don't have full transactions, but they do have rather variable amounts of transaction hashes
|
||||
// these blocks don't have full transactions, but they do have rather variable amounts of transaction hashes
|
||||
let block_map = Cache::builder()
|
||||
.max_capacity(10_000)
|
||||
.max_capacity(1024 * 1024 * 1024)
|
||||
.weigher(|_k, v| size_of_val(v) as u32)
|
||||
.build_with_hasher(ahash::RandomState::new());
|
||||
|
||||
@ -342,11 +340,12 @@ impl Web3ProxyApp {
|
||||
frontend_key_rate_limiter = Some(DeferredRateLimiter::<Uuid>::new(10_000, "key", rrl));
|
||||
}
|
||||
|
||||
// TODO: change this to a sized cache. theres some potentially giant responses that will break things
|
||||
// keep 1GB of blocks in the cache
|
||||
// responses can be very different in sizes, so this definitely needs a weigher
|
||||
// TODO: max_capacity from config
|
||||
// TODO: don't allow any response to be bigger than X% of the cache
|
||||
let response_cache = Cache::builder()
|
||||
.max_capacity(10_000)
|
||||
.max_capacity(1024 * 1024 * 1024)
|
||||
.weigher(|k, v| (size_of_val(k) + size_of_val(v)) as u32)
|
||||
.build_with_hasher(ahash::RandomState::new());
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user