no need for async lock

This commit is contained in:
Bryan Stitt 2022-08-10 03:38:04 +00:00
parent 80a3c74120
commit 996d1fb11b
3 changed files with 6 additions and 6 deletions

View File

@ -25,7 +25,6 @@ use std::str::FromStr;
use std::sync::atomic::{self, AtomicUsize}; use std::sync::atomic::{self, AtomicUsize};
use std::sync::Arc; use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
use tokio::sync::RwLock as AsyncRwLock;
use tokio::sync::{broadcast, watch}; use tokio::sync::{broadcast, watch};
use tokio::task::JoinHandle; use tokio::task::JoinHandle;
use tokio::time::{timeout, Instant}; use tokio::time::{timeout, Instant};
@ -142,7 +141,7 @@ pub struct Web3ProxyApp {
head_block_receiver: watch::Receiver<Arc<Block<TxHash>>>, head_block_receiver: watch::Receiver<Arc<Block<TxHash>>>,
pending_tx_sender: broadcast::Sender<TxState>, pending_tx_sender: broadcast::Sender<TxState>,
pending_transactions: Arc<DashMap<TxHash, TxState>>, pending_transactions: Arc<DashMap<TxHash, TxState>>,
user_cache: AsyncRwLock<FifoCountMap<Uuid, UserCacheValue>>, user_cache: RwLock<FifoCountMap<Uuid, UserCacheValue>>,
redis_pool: Option<RedisPool>, redis_pool: Option<RedisPool>,
rate_limiter: Option<RedisCell>, rate_limiter: Option<RedisCell>,
db_conn: Option<sea_orm::DatabaseConnection>, db_conn: Option<sea_orm::DatabaseConnection>,
@ -172,7 +171,7 @@ impl Web3ProxyApp {
self.redis_pool.as_ref() self.redis_pool.as_ref()
} }
pub fn user_cache(&self) -> &AsyncRwLock<FifoCountMap<Uuid, UserCacheValue>> { pub fn user_cache(&self) -> &RwLock<FifoCountMap<Uuid, UserCacheValue>> {
&self.user_cache &self.user_cache
} }
@ -327,7 +326,7 @@ impl Web3ProxyApp {
redis_pool, redis_pool,
// TODO: make the size configurable // TODO: make the size configurable
// TODO: why does this need to be async but the other one doesn't? // TODO: why does this need to be async but the other one doesn't?
user_cache: AsyncRwLock::new(FifoCountMap::new(1_000)), user_cache: RwLock::new(FifoCountMap::new(1_000)),
}; };
let app = Arc::new(app); let app = Arc::new(app);

View File

@ -78,6 +78,7 @@ pub struct Web3Connection {
/// keep track of currently open requests. We sort on this /// keep track of currently open requests. We sort on this
active_requests: AtomicU32, active_requests: AtomicU32,
/// provider is in a RwLock so that we can replace it if re-connecting /// provider is in a RwLock so that we can replace it if re-connecting
/// it is an async lock because we hold it open across awaits
provider: AsyncRwLock<Option<Arc<Web3Provider>>>, provider: AsyncRwLock<Option<Arc<Web3Provider>>>,
/// rate limits are stored in a central redis so that multiple proxies can share their rate limits /// rate limits are stored in a central redis so that multiple proxies can share their rate limits
hard_limit: Option<redis_cell_client::RedisCell>, hard_limit: Option<redis_cell_client::RedisCell>,

View File

@ -55,7 +55,7 @@ impl Web3ProxyApp {
let user_cache = self.user_cache(); let user_cache = self.user_cache();
// check the local cache // check the local cache
let user_data = if let Some(cached_user) = user_cache.read().await.get(&user_key) { let user_data = if let Some(cached_user) = user_cache.read().get(&user_key) {
// TODO: also include the time this value was last checked! otherwise we cache forever! // TODO: also include the time this value was last checked! otherwise we cache forever!
if cached_user.expires_at < Instant::now() { if cached_user.expires_at < Instant::now() {
// old record found // old record found
@ -105,7 +105,7 @@ impl Web3ProxyApp {
}; };
// save for the next run // save for the next run
user_cache.write().await.insert(user_key, user_data); user_cache.write().insert(user_key, user_data);
user_data user_data
} else { } else {