this works, but its not super fast

This commit is contained in:
Bryan Stitt 2022-11-01 20:51:33 +00:00
parent f859ed56fc
commit f2268dbb1b
3 changed files with 36 additions and 14 deletions

14
Cargo.lock generated
View File

@ -1091,6 +1091,19 @@ dependencies = [
"cipher 0.4.3", "cipher 0.4.3",
] ]
[[package]]
name = "dashmap"
version = "5.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "907076dfda823b0b36d2a1bb5f90c96660a5bbcd7729e10727f07858f22c4edc"
dependencies = [
"cfg-if",
"hashbrown",
"lock_api",
"once_cell",
"parking_lot_core 0.9.3",
]
[[package]] [[package]]
name = "deadpool" name = "deadpool"
version = "0.9.5" version = "0.9.5"
@ -5538,6 +5551,7 @@ dependencies = [
"axum-macros", "axum-macros",
"chrono", "chrono",
"counter", "counter",
"dashmap",
"deferred-rate-limiter", "deferred-rate-limiter",
"derive_more", "derive_more",
"dotenv", "dotenv",

View File

@ -28,6 +28,7 @@ axum-macros = "0.2.3"
# TODO: import chrono from sea-orm so we always have the same version # TODO: import chrono from sea-orm so we always have the same version
chrono = "0.4.22" chrono = "0.4.22"
counter = "0.5.7" counter = "0.5.7"
dashmap = "5.4.0"
derive_more = "0.99.17" derive_more = "0.99.17"
dotenv = "0.15.0" dotenv = "0.15.0"
ethers = { version = "1.0.0", features = ["rustls", "ws"] } ethers = { version = "1.0.0", features = ["rustls", "ws"] }

View File

@ -2,6 +2,8 @@ use crate::frontend::authorization::{AuthorizedKey, RequestMetadata};
use crate::jsonrpc::JsonRpcForwardedResponse; use crate::jsonrpc::JsonRpcForwardedResponse;
use anyhow::Context; use anyhow::Context;
use chrono::{TimeZone, Utc}; use chrono::{TimeZone, Utc};
use dashmap::mapref::entry::Entry;
use dashmap::DashMap;
use derive_more::From; use derive_more::From;
use entities::rpc_accounting; use entities::rpc_accounting;
use hdrhistogram::Histogram; use hdrhistogram::Histogram;
@ -80,11 +82,8 @@ pub struct UserProxyResponseKey {
error_response: bool, error_response: bool,
} }
pub type UserProxyResponseCache = Cache< // TODO: think about nested maps more. does this need an arc?
UserProxyResponseKey, pub type UserProxyResponseCache = DashMap<UserProxyResponseKey, Arc<ProxyResponseAggregate>>;
Arc<ProxyResponseAggregate>,
hashbrown::hash_map::DefaultHashBuilder,
>;
/// key is the "time bucket's timestamp" (timestamp / period * period) /// key is the "time bucket's timestamp" (timestamp / period * period)
pub type TimeProxyResponseCache = pub type TimeProxyResponseCache =
Cache<TimeBucketTimestamp, UserProxyResponseCache, hashbrown::hash_map::DefaultHashBuilder>; Cache<TimeBucketTimestamp, UserProxyResponseCache, hashbrown::hash_map::DefaultHashBuilder>;
@ -147,6 +146,7 @@ impl StatEmitter {
// this needs to be long enough that there are definitely no outstanding queries // this needs to be long enough that there are definitely no outstanding queries
// TODO: what should the "safe" multiplier be? what if something is late? // TODO: what should the "safe" multiplier be? what if something is late?
// TODO: in most cases this delays more than necessary. think of how to do this without dashmap which might let us proceed
let ttl_seconds = period_seconds * 3; let ttl_seconds = period_seconds * 3;
let aggregated_proxy_responses = CacheBuilder::default() let aggregated_proxy_responses = CacheBuilder::default()
@ -351,18 +351,18 @@ impl StatEmitter {
// TODO: i don't think this works right. maybe do DashMap entry api as the outer variable // TODO: i don't think this works right. maybe do DashMap entry api as the outer variable
let user_cache = self let user_cache = self
.aggregated_proxy_responses .aggregated_proxy_responses
.get_with_by_ref(&stat.period_timestamp, async move { .get_with_by_ref(&stat.period_timestamp, async move { Default::default() })
CacheBuilder::default()
.build_with_hasher(hashbrown::hash_map::DefaultHashBuilder::new())
})
.await; .await;
let key = (stat.rpc_key_id, stat.method, stat.error_response).into(); let key = (stat.rpc_key_id, stat.method, stat.error_response).into();
let user_aggregate = user_cache let user_aggregate = match user_cache.entry(key) {
.get_with(key, async move { Entry::Occupied(x) => x.get().clone(),
Entry::Vacant(y) => {
let histograms = ProxyResponseHistograms::default(); let histograms = ProxyResponseHistograms::default();
// TODO: create a counter here that we use to tell when it is safe to flush these? faster than waiting 3 periods
let aggregate = ProxyResponseAggregate { let aggregate = ProxyResponseAggregate {
period_timestamp: stat.period_timestamp, period_timestamp: stat.period_timestamp,
// start most things at 0 because we add outside this getter // start most things at 0 because we add outside this getter
@ -378,9 +378,16 @@ impl StatEmitter {
histograms: AsyncMutex::new(histograms), histograms: AsyncMutex::new(histograms),
}; };
Arc::new(aggregate) // TODO: store this arc in the map
}) // TODO: does this have a race condition?
.await;
let aggregate = Arc::new(aggregate);
y.insert(aggregate.clone());
aggregate
}
};
// a stat always come from just 1 frontend request // a stat always come from just 1 frontend request
user_aggregate user_aggregate