dry user data caching

This commit is contained in:
Bryan Stitt 2022-09-03 19:43:19 +00:00
parent 8225285bb8
commit 1c2f3e1445
3 changed files with 67 additions and 53 deletions

@ -134,6 +134,9 @@
- [ ] web3connection3.block(...) might wait forever. be sure to do it safely - [ ] web3connection3.block(...) might wait forever. be sure to do it safely
- [ ] search for all "todo!" - [ ] search for all "todo!"
- [ ] replace all `.context("no servers in sync")` with proper error type - [ ] replace all `.context("no servers in sync")` with proper error type
- [ ] when using a bunch of slow public servers, i see "no servers in sync" even when things should be right
- [ ] i think checking the parents of the heaviest chain works most of the time, but not always
- maybe iterate connection heads by total weight? i still think we need to include parent hashes
## V1 ## V1
@ -179,6 +182,7 @@
- [ ] refactor from_anyhow_error to have consistent error codes and http codes. maybe implement the Error trait - [ ] refactor from_anyhow_error to have consistent error codes and http codes. maybe implement the Error trait
- [ ] when handling errors from axum parsing the Json...Enum, the errors don't get wrapped in json. i think we need a Layer - [ ] when handling errors from axum parsing the Json...Enum, the errors don't get wrapped in json. i think we need a Layer
- [ ] don't "unwrap" anywhere. give proper errors - [ ] don't "unwrap" anywhere. give proper errors
- [ ] tool to revoke bearer tokens that also clears redis
new endpoints for users: new endpoints for users:
- [x] GET /u/:api_key - [x] GET /u/:api_key

@ -1,5 +1,6 @@
use super::errors::{anyhow_error_into_response, FrontendErrorResponse}; use super::errors::{anyhow_error_into_response, FrontendErrorResponse};
use crate::app::{UserCacheValue, Web3ProxyApp}; use crate::app::{UserCacheValue, Web3ProxyApp};
use anyhow::Context;
use axum::response::Response; use axum::response::Response;
use derive_more::From; use derive_more::From;
use entities::user_keys; use entities::user_keys;
@ -151,25 +152,9 @@ impl Web3ProxyApp {
Ok(RateLimitResult::AllowedIp(ip)) Ok(RateLimitResult::AllowedIp(ip))
} }
pub async fn rate_limit_by_key(&self, user_key: Uuid) -> anyhow::Result<RateLimitResult> { pub(crate) async fn cache_user_data(&self, user_key: Uuid) -> anyhow::Result<UserCacheValue> {
// check the local cache let db = self.db_conn.as_ref().context("no database")?;
let user_data = if let Some(cached_user) = self.user_cache.read().get(&user_key) {
// TODO: also include the time this value was last checked! otherwise we cache forever!
if cached_user.expires_at < Instant::now() {
// old record found
None
} else {
// this key was active in the database recently
Some(*cached_user)
}
} else {
// cache miss
None
};
// if cache was empty, check the database
let user_data = if user_data.is_none() {
if let Some(db) = &self.db_conn {
/// helper enum for query just a few columns instead of the entire table /// helper enum for query just a few columns instead of the entire table
#[derive(Copy, Clone, Debug, EnumIter, DeriveColumn)] #[derive(Copy, Clone, Debug, EnumIter, DeriveColumn)]
enum QueryAs { enum QueryAs {
@ -212,14 +197,34 @@ impl Web3ProxyApp {
// save for the next run // save for the next run
self.user_cache.write().insert(user_key, user_data); self.user_cache.write().insert(user_key, user_data);
user_data Ok(user_data)
}
pub async fn rate_limit_by_key(&self, user_key: Uuid) -> anyhow::Result<RateLimitResult> {
// check the local cache
let user_data = if let Some(cached_user) = self.user_cache.read().get(&user_key) {
// TODO: also include the time this value was last checked! otherwise we cache forever!
if cached_user.expires_at < Instant::now() {
// old record found
None
} else { } else {
// TODO: rate limit with only local caches? // this key was active in the database recently
unimplemented!("no cache hit and no database connection") Some(*cached_user)
} }
} else {
// cache miss
None
};
// if cache was empty, check the database
// TODO: i think there is a cleaner way to do this
let user_data = if user_data.is_none() {
self.cache_user_data(user_key)
.await
.context("no user data")?
} else { } else {
// unwrap the cache's result // unwrap the cache's result
user_data.unwrap() user_data.context("no user data")?
}; };
if user_data.user_id == 0 { if user_data.user_id == 0 {

@ -178,7 +178,7 @@ pub async fn post_login(
.await .await
.unwrap(); .unwrap();
let (u_id, response) = match u { let (u, uk, response) = match u {
None => { None => {
let txn = db.begin().await?; let txn = db.begin().await?;
@ -213,7 +213,7 @@ pub async fn post_login(
let response = (StatusCode::CREATED, Json(response_json)).into_response(); let response = (StatusCode::CREATED, Json(response_json)).into_response();
(u.id, response) (u, uk, response)
} }
Some(u) => { Some(u) => {
// the user is already registered // the user is already registered
@ -232,17 +232,22 @@ pub async fn post_login(
let response = (StatusCode::OK, Json(response_json)).into_response(); let response = (StatusCode::OK, Json(response_json)).into_response();
(u.id, response) (u, uk, response)
} }
}; };
// TODO: set a session cookie with the bearer token? // TODO: set a session cookie with the bearer token?
// save the bearer token in redis with a long (7 or 30 day?) expiry. or in database? // save the bearer token in redis with a long (7 or 30 day?) expiry. or in database?
let mut redis_conn = app.redis_conn().await?; let mut redis_conn = app.redis_conn().await?;
let bearer_key = format!("bearer:{}", bearer_token); let bearer_key = format!("bearer:{}", bearer_token);
redis_conn.set(bearer_key, u_id.to_string()).await?; redis_conn.set(bearer_key, u.id.to_string()).await?;
// save the user data in redis with a short expiry
// TODO: we already have uk, so this could be more efficient. it works for now
app.cache_user_data(uk.api_key).await?;
Ok(response) Ok(response)
} }