web3-proxy/web3_proxy/src/frontend/authorization.rs

673 lines
25 KiB
Rust
Raw Normal View History

2022-10-18 00:47:58 +03:00
//! Utilities for authorization of logged in and anonymous users.
2022-09-23 00:03:37 +03:00
use super::errors::FrontendErrorResponse;
use crate::app::{UserKeyData, Web3ProxyApp};
2022-10-10 07:15:07 +03:00
use crate::jsonrpc::JsonRpcRequest;
2022-10-31 23:05:58 +03:00
use crate::user_token::UserBearerToken;
2022-09-23 00:03:37 +03:00
use anyhow::Context;
2022-10-26 00:10:05 +03:00
use axum::headers::authorization::Bearer;
2022-10-27 00:39:26 +03:00
use axum::headers::{Header, Origin, Referer, UserAgent};
2022-10-21 23:59:05 +03:00
use axum::TypedHeader;
2022-10-11 08:13:00 +03:00
use chrono::Utc;
2022-09-23 00:03:37 +03:00
use deferred_rate_limiter::DeferredRateLimitResult;
2022-11-01 21:54:39 +03:00
use entities::{rpc_key, user, user_tier};
2022-10-27 00:39:26 +03:00
use http::HeaderValue;
use ipnet::IpNet;
2022-10-26 00:10:05 +03:00
use redis_rate_limiter::redis::AsyncCommands;
2022-09-24 06:59:21 +03:00
use redis_rate_limiter::RedisRateLimitResult;
2022-11-01 21:54:39 +03:00
use sea_orm::{ColumnTrait, DatabaseConnection, EntityTrait, QueryFilter};
2022-09-24 08:53:45 +03:00
use std::fmt::Display;
2022-10-25 06:41:59 +03:00
use std::sync::atomic::{AtomicBool, AtomicU64};
2022-09-24 08:53:45 +03:00
use std::{net::IpAddr, str::FromStr, sync::Arc};
2022-09-28 06:35:55 +03:00
use tokio::sync::{OwnedSemaphorePermit, Semaphore};
2022-09-23 00:03:37 +03:00
use tokio::time::Instant;
2022-10-29 01:52:47 +03:00
use tracing::{error, instrument, trace};
2022-09-24 08:53:45 +03:00
use ulid::Ulid;
2022-09-23 00:03:37 +03:00
use uuid::Uuid;
2022-09-24 08:53:45 +03:00
/// This lets us use UUID and ULID while we transition to only ULIDs
2022-10-18 00:47:58 +03:00
/// TODO: include the key's description.
2022-10-26 03:22:58 +03:00
#[derive(Copy, Clone, Debug, Eq, PartialEq, serde::Deserialize, serde::Serialize)]
2022-11-01 21:54:39 +03:00
pub enum RpcSecretKey {
2022-09-24 08:53:45 +03:00
Ulid(Ulid),
Uuid(Uuid),
}
2022-10-10 07:15:07 +03:00
#[derive(Debug)]
pub enum RateLimitResult {
/// contains the IP of the anonymous user
/// TODO: option inside or outside the arc?
2022-10-25 07:01:41 +03:00
AllowedIp(IpAddr, Option<OwnedSemaphorePermit>),
2022-10-27 03:12:42 +03:00
/// contains the rpc_key_id of an authenticated user
2022-10-10 07:15:07 +03:00
AllowedUser(UserKeyData, Option<OwnedSemaphorePermit>),
/// contains the IP and retry_at of the anonymous user
RateLimitedIp(IpAddr, Option<Instant>),
2022-10-27 03:12:42 +03:00
/// contains the rpc_key_id and retry_at of an authenticated user key
2022-10-10 07:15:07 +03:00
RateLimitedUser(UserKeyData, Option<Instant>),
/// This key is not in our database. Deny access!
UnknownKey,
}
2022-10-27 00:39:26 +03:00
#[derive(Clone, Debug)]
2022-10-10 07:15:07 +03:00
pub struct AuthorizedKey {
pub ip: IpAddr,
2022-10-27 00:39:26 +03:00
pub origin: Option<Origin>,
2022-10-21 23:59:05 +03:00
pub user_id: u64,
2022-10-27 03:12:42 +03:00
pub rpc_key_id: u64,
2022-10-10 07:15:07 +03:00
// TODO: just use an f32? even an f16 is probably fine
2022-11-01 21:54:39 +03:00
pub log_revert_chance: f64,
2022-10-10 07:15:07 +03:00
}
#[derive(Debug)]
2022-10-10 07:15:07 +03:00
pub struct RequestMetadata {
pub start_datetime: chrono::DateTime<Utc>,
pub start_instant: tokio::time::Instant,
// TODO: better name for this
pub period_seconds: u64,
2022-10-11 20:34:25 +03:00
pub request_bytes: u64,
/// if this is 0, there was a cache_hit
2022-10-25 06:41:59 +03:00
pub backend_requests: AtomicU64,
pub no_servers: AtomicU64,
pub error_response: AtomicBool,
pub response_bytes: AtomicU64,
pub response_millis: AtomicU64,
2022-10-10 07:15:07 +03:00
}
2022-10-21 23:59:05 +03:00
#[derive(Clone, Debug)]
2022-10-10 07:15:07 +03:00
pub enum AuthorizedRequest {
/// Request from this app
Internal,
/// Request from an anonymous IP address
2022-10-21 23:59:05 +03:00
Ip(IpAddr, Option<Origin>),
2022-10-10 07:15:07 +03:00
/// Request from an authenticated and authorized user
2022-10-21 23:59:05 +03:00
User(Option<DatabaseConnection>, AuthorizedKey),
2022-10-10 07:15:07 +03:00
}
impl RequestMetadata {
pub fn new(period_seconds: u64, request: &JsonRpcRequest) -> anyhow::Result<Self> {
// TODO: how can we do this without turning it into a string first. this is going to slow us down!
let request_bytes = serde_json::to_string(request)
.context("finding request size")?
.len()
.try_into()?;
2022-10-10 07:15:07 +03:00
let new = Self {
start_instant: Instant::now(),
start_datetime: Utc::now(),
period_seconds,
2022-10-11 20:34:25 +03:00
request_bytes,
backend_requests: 0.into(),
no_servers: 0.into(),
error_response: false.into(),
response_bytes: 0.into(),
response_millis: 0.into(),
};
Ok(new)
2022-10-10 07:15:07 +03:00
}
}
2022-11-01 21:54:39 +03:00
impl RpcSecretKey {
2022-09-24 08:53:45 +03:00
pub fn new() -> Self {
Ulid::new().into()
}
}
2022-11-01 21:54:39 +03:00
impl Default for RpcSecretKey {
2022-10-26 00:10:05 +03:00
fn default() -> Self {
Self::new()
}
}
2022-11-01 21:54:39 +03:00
impl Display for RpcSecretKey {
2022-09-24 08:53:45 +03:00
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
// TODO: do this without dereferencing
let ulid: Ulid = (*self).into();
ulid.fmt(f)
}
}
2022-11-01 21:54:39 +03:00
impl FromStr for RpcSecretKey {
2022-09-24 08:53:45 +03:00
type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
if let Ok(ulid) = s.parse::<Ulid>() {
Ok(ulid.into())
} else if let Ok(uuid) = s.parse::<Uuid>() {
Ok(uuid.into())
} else {
2022-10-10 07:15:07 +03:00
// TODO: custom error type so that this shows as a 400
2022-09-24 08:53:45 +03:00
Err(anyhow::anyhow!("UserKey was not a ULID or UUID"))
}
}
}
2022-11-01 21:54:39 +03:00
impl From<Ulid> for RpcSecretKey {
2022-09-24 08:53:45 +03:00
fn from(x: Ulid) -> Self {
2022-11-01 21:54:39 +03:00
RpcSecretKey::Ulid(x)
2022-09-24 08:53:45 +03:00
}
}
2022-11-01 21:54:39 +03:00
impl From<Uuid> for RpcSecretKey {
2022-09-24 08:53:45 +03:00
fn from(x: Uuid) -> Self {
2022-11-01 21:54:39 +03:00
RpcSecretKey::Uuid(x)
2022-09-24 08:53:45 +03:00
}
}
2022-11-01 21:54:39 +03:00
impl From<RpcSecretKey> for Ulid {
fn from(x: RpcSecretKey) -> Self {
2022-09-24 08:53:45 +03:00
match x {
2022-11-01 21:54:39 +03:00
RpcSecretKey::Ulid(x) => x,
RpcSecretKey::Uuid(x) => Ulid::from(x.as_u128()),
2022-09-24 08:53:45 +03:00
}
}
}
2022-11-01 21:54:39 +03:00
impl From<RpcSecretKey> for Uuid {
fn from(x: RpcSecretKey) -> Self {
2022-09-24 08:53:45 +03:00
match x {
2022-11-01 21:54:39 +03:00
RpcSecretKey::Ulid(x) => Uuid::from_u128(x.0),
RpcSecretKey::Uuid(x) => x,
2022-09-24 08:53:45 +03:00
}
}
}
2022-09-23 00:03:37 +03:00
impl AuthorizedKey {
pub fn try_new(
ip: IpAddr,
origin: Option<Origin>,
2022-09-23 00:03:37 +03:00
referer: Option<Referer>,
user_agent: Option<UserAgent>,
2022-10-27 03:12:42 +03:00
rpc_key_data: UserKeyData,
2022-09-23 00:03:37 +03:00
) -> anyhow::Result<Self> {
// check ip
2022-10-27 03:12:42 +03:00
match &rpc_key_data.allowed_ips {
None => {}
Some(allowed_ips) => {
if !allowed_ips.iter().any(|x| x.contains(&ip)) {
return Err(anyhow::anyhow!("IP is not allowed!"));
}
}
}
// check origin
2022-10-27 03:12:42 +03:00
match (&origin, &rpc_key_data.allowed_origins) {
(None, None) => {}
(Some(_), None) => {}
(None, Some(_)) => return Err(anyhow::anyhow!("Origin required")),
(Some(origin), Some(allowed_origins)) => {
2022-10-27 00:39:26 +03:00
if !allowed_origins.contains(origin) {
return Err(anyhow::anyhow!("IP is not allowed!"));
}
}
}
// check referer
2022-10-27 03:12:42 +03:00
match (referer, &rpc_key_data.allowed_referers) {
(None, None) => {}
(Some(_), None) => {}
(None, Some(_)) => return Err(anyhow::anyhow!("Referer required")),
(Some(referer), Some(allowed_referers)) => {
if !allowed_referers.contains(&referer) {
return Err(anyhow::anyhow!("Referer is not allowed!"));
}
}
}
// check user_agent
2022-10-27 03:12:42 +03:00
match (user_agent, &rpc_key_data.allowed_user_agents) {
(None, None) => {}
(Some(_), None) => {}
(None, Some(_)) => return Err(anyhow::anyhow!("User agent required")),
(Some(user_agent), Some(allowed_user_agents)) => {
if !allowed_user_agents.contains(&user_agent) {
return Err(anyhow::anyhow!("User agent is not allowed!"));
}
}
}
2022-09-23 00:03:37 +03:00
Ok(Self {
ip,
origin,
2022-10-27 03:12:42 +03:00
user_id: rpc_key_data.user_id,
rpc_key_id: rpc_key_data.rpc_key_id,
log_revert_chance: rpc_key_data.log_revert_chance,
2022-09-23 00:03:37 +03:00
})
}
}
2022-09-23 01:34:43 +03:00
impl AuthorizedRequest {
2022-09-24 10:04:11 +03:00
/// Only User has a database connection in case it needs to save a revert to the database.
pub fn db_conn(&self) -> Option<&DatabaseConnection> {
match self {
Self::User(x, _) => x.as_ref(),
2022-10-21 23:59:05 +03:00
_ => None,
}
2022-09-23 01:34:43 +03:00
}
}
impl Display for &AuthorizedRequest {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
2022-10-07 05:21:34 +03:00
AuthorizedRequest::Internal => f.write_str("int"),
2022-10-21 23:59:05 +03:00
AuthorizedRequest::Ip(x, _) => f.write_str(&format!("ip-{}", x)),
2022-10-27 03:12:42 +03:00
AuthorizedRequest::User(_, x) => f.write_str(&format!("uk-{}", x.rpc_key_id)),
}
}
}
2022-09-24 06:59:21 +03:00
pub async fn login_is_authorized(
app: &Web3ProxyApp,
ip: IpAddr,
2022-10-25 07:01:41 +03:00
) -> Result<AuthorizedRequest, FrontendErrorResponse> {
let (ip, _semaphore) = match app.rate_limit_login(ip).await? {
2022-09-27 05:01:45 +03:00
RateLimitResult::AllowedIp(x, semaphore) => (x, semaphore),
2022-09-24 06:59:21 +03:00
RateLimitResult::RateLimitedIp(x, retry_at) => {
return Err(FrontendErrorResponse::RateLimitedIp(x, retry_at));
}
// TODO: don't panic. give the user an error
x => unimplemented!("rate_limit_login shouldn't ever see these: {:?}", x),
};
2022-10-25 07:01:41 +03:00
Ok(AuthorizedRequest::Ip(ip, None))
2022-09-24 06:59:21 +03:00
}
2022-09-23 00:03:37 +03:00
pub async fn ip_is_authorized(
app: &Web3ProxyApp,
ip: IpAddr,
2022-10-21 23:59:05 +03:00
origin: Option<TypedHeader<Origin>>,
2022-09-28 06:35:55 +03:00
) -> Result<(AuthorizedRequest, Option<OwnedSemaphorePermit>), FrontendErrorResponse> {
2022-10-21 23:59:05 +03:00
let origin = origin.map(|x| x.0);
2022-09-23 00:03:37 +03:00
// TODO: i think we could write an `impl From` for this
// TODO: move this to an AuthorizedUser extrator
2022-10-21 23:59:05 +03:00
let (ip, semaphore) = match app.rate_limit_by_ip(ip, origin.as_ref()).await? {
2022-10-25 07:01:41 +03:00
RateLimitResult::AllowedIp(ip, semaphore) => (ip, semaphore),
2022-09-23 00:03:37 +03:00
RateLimitResult::RateLimitedIp(x, retry_at) => {
return Err(FrontendErrorResponse::RateLimitedIp(x, retry_at));
}
// TODO: don't panic. give the user an error
x => unimplemented!("rate_limit_by_ip shouldn't ever see these: {:?}", x),
};
2022-09-28 06:35:55 +03:00
// semaphore won't ever be None, but its easier if key auth and ip auth work the same way
2022-10-21 23:59:05 +03:00
Ok((AuthorizedRequest::Ip(ip, origin), semaphore))
2022-09-23 00:03:37 +03:00
}
pub async fn key_is_authorized(
app: &Web3ProxyApp,
2022-11-01 21:54:39 +03:00
rpc_key: RpcSecretKey,
2022-09-23 00:03:37 +03:00
ip: IpAddr,
origin: Option<Origin>,
2022-09-23 00:03:37 +03:00
referer: Option<Referer>,
user_agent: Option<UserAgent>,
2022-09-28 06:35:55 +03:00
) -> Result<(AuthorizedRequest, Option<OwnedSemaphorePermit>), FrontendErrorResponse> {
2022-09-23 00:03:37 +03:00
// check the rate limits. error if over the limit
2022-10-27 03:12:42 +03:00
let (user_data, semaphore) = match app.rate_limit_by_key(rpc_key).await? {
2022-09-27 05:01:45 +03:00
RateLimitResult::AllowedUser(x, semaphore) => (x, semaphore),
2022-09-23 00:03:37 +03:00
RateLimitResult::RateLimitedUser(x, retry_at) => {
return Err(FrontendErrorResponse::RateLimitedUser(x, retry_at));
}
RateLimitResult::UnknownKey => return Err(FrontendErrorResponse::UnknownKey),
// TODO: don't panic. give the user an error
x => unimplemented!("rate_limit_by_key shouldn't ever see these: {:?}", x),
};
let authorized_user = AuthorizedKey::try_new(ip, origin, referer, user_agent, user_data)?;
2022-09-23 00:03:37 +03:00
2022-10-20 09:17:20 +03:00
let db_conn = app.db_conn.clone();
2022-09-23 01:10:28 +03:00
2022-10-20 09:17:20 +03:00
Ok((AuthorizedRequest::User(db_conn, authorized_user), semaphore))
2022-09-23 00:03:37 +03:00
}
impl Web3ProxyApp {
2022-10-27 03:12:42 +03:00
/// Limit the number of concurrent requests from the given ip address.
2022-10-29 01:52:47 +03:00
#[instrument(level = "trace")]
2022-10-25 07:01:41 +03:00
pub async fn ip_semaphore(&self, ip: IpAddr) -> anyhow::Result<Option<OwnedSemaphorePermit>> {
if let Some(max_concurrent_requests) = self.config.public_max_concurrent_requests {
let semaphore = self
.ip_semaphores
.get_with(ip, async move {
// TODO: set max_concurrent_requests dynamically based on load?
2022-10-25 07:31:18 +03:00
let s = Semaphore::new(max_concurrent_requests);
2022-10-25 07:01:41 +03:00
Arc::new(s)
})
.await;
// if semaphore.available_permits() == 0 {
// // TODO: concurrent limit hit! emit a stat? less important for anon users
// // TODO: there is probably a race here
// }
let semaphore_permit = semaphore.acquire_owned().await?;
Ok(Some(semaphore_permit))
} else {
Ok(None)
}
2022-09-28 06:35:55 +03:00
}
2022-10-27 03:12:42 +03:00
/// Limit the number of concurrent requests from the given key address.
2022-10-29 01:52:47 +03:00
#[instrument(level = "trace")]
2022-10-27 03:12:42 +03:00
pub async fn user_rpc_key_semaphore(
2022-09-28 06:35:55 +03:00
&self,
2022-10-27 03:12:42 +03:00
rpc_key_data: &UserKeyData,
2022-09-28 06:35:55 +03:00
) -> anyhow::Result<Option<OwnedSemaphorePermit>> {
2022-10-27 03:12:42 +03:00
if let Some(max_concurrent_requests) = rpc_key_data.max_concurrent_requests {
2022-09-28 06:35:55 +03:00
let semaphore = self
2022-10-27 03:12:42 +03:00
.rpc_key_semaphores
.get_with(rpc_key_data.rpc_key_id, async move {
2022-10-25 07:31:18 +03:00
let s = Semaphore::new(max_concurrent_requests as usize);
2022-10-27 03:12:42 +03:00
trace!("new semaphore for rpc_key_id {}", rpc_key_data.rpc_key_id);
Arc::new(s)
2022-09-28 06:35:55 +03:00
})
.await;
2022-09-28 06:35:55 +03:00
2022-10-10 07:15:07 +03:00
// if semaphore.available_permits() == 0 {
// // TODO: concurrent limit hit! emit a stat
// }
2022-09-28 06:35:55 +03:00
let semaphore_permit = semaphore.acquire_owned().await?;
Ok(Some(semaphore_permit))
} else {
Ok(None)
}
}
2022-10-26 00:10:05 +03:00
/// Verify that the given bearer token and address are allowed to take the specified action.
/// This includes concurrent request limiting.
2022-10-29 01:52:47 +03:00
#[instrument(level = "trace")]
2022-10-26 00:10:05 +03:00
pub async fn bearer_is_authorized(
&self,
bearer: Bearer,
) -> anyhow::Result<(user::Model, OwnedSemaphorePermit)> {
// limit concurrent requests
let semaphore = self
.bearer_token_semaphores
.get_with(bearer.token().to_string(), async move {
let s = Semaphore::new(self.config.bearer_token_max_concurrent_requests as usize);
Arc::new(s)
})
.await;
let semaphore_permit = semaphore.acquire_owned().await?;
// get the user id for this bearer token
// TODO: move redis key building to a helper function
2022-10-31 23:05:58 +03:00
let bearer_cache_key = UserBearerToken::try_from(bearer)?.to_string();
2022-10-26 00:10:05 +03:00
// get the attached address from redis for the given auth_token.
let mut redis_conn = self.redis_conn().await?;
let user_id: u64 = redis_conn
.get::<_, Option<u64>>(bearer_cache_key)
.await
.context("fetching bearer cache key from redis")?
.context("unknown bearer token")?;
// turn user id into a user
let db_conn = self.db_conn().context("Getting database connection")?;
let user = user::Entity::find_by_id(user_id)
.one(&db_conn)
.await
.context("fetching user from db by id")?
.context("unknown user id")?;
Ok((user, semaphore_permit))
}
2022-10-29 01:52:47 +03:00
#[instrument(level = "trace")]
2022-09-24 06:59:21 +03:00
pub async fn rate_limit_login(&self, ip: IpAddr) -> anyhow::Result<RateLimitResult> {
// TODO: dry this up with rate_limit_by_key
2022-10-25 07:12:24 +03:00
// TODO: do we want a semaphore here?
2022-09-24 06:59:21 +03:00
if let Some(rate_limiter) = &self.login_rate_limiter {
match rate_limiter.throttle_label(&ip.to_string(), None, 1).await {
2022-10-25 07:01:41 +03:00
Ok(RedisRateLimitResult::Allowed(_)) => Ok(RateLimitResult::AllowedIp(ip, None)),
2022-09-24 06:59:21 +03:00
Ok(RedisRateLimitResult::RetryAt(retry_at, _)) => {
// TODO: set headers so they know when they can retry
// TODO: debug or trace?
// this is too verbose, but a stat might be good
trace!(?ip, "login rate limit exceeded until {:?}", retry_at);
Ok(RateLimitResult::RateLimitedIp(ip, Some(retry_at)))
}
Ok(RedisRateLimitResult::RetryNever) => {
// TODO: i don't think we'll get here. maybe if we ban an IP forever? seems unlikely
trace!(?ip, "login rate limit is 0");
Ok(RateLimitResult::RateLimitedIp(ip, None))
}
Err(err) => {
// internal error, not rate limit being hit
// TODO: i really want axum to do this for us in a single place.
error!(?err, "login rate limiter is unhappy. allowing ip");
2022-09-27 05:01:45 +03:00
2022-10-25 07:01:41 +03:00
Ok(RateLimitResult::AllowedIp(ip, None))
2022-09-24 06:59:21 +03:00
}
}
} else {
// TODO: if no redis, rate limit with a local cache? "warn!" probably isn't right
2022-10-27 00:39:26 +03:00
Ok(RateLimitResult::AllowedIp(ip, None))
2022-09-24 06:59:21 +03:00
}
}
2022-10-29 01:52:47 +03:00
#[instrument(level = "trace")]
2022-10-21 23:59:05 +03:00
pub async fn rate_limit_by_ip(
&self,
ip: IpAddr,
origin: Option<&Origin>,
) -> anyhow::Result<RateLimitResult> {
2022-09-23 00:03:37 +03:00
// TODO: dry this up with rate_limit_by_key
2022-09-28 06:35:55 +03:00
let semaphore = self.ip_semaphore(ip).await?;
2022-09-23 00:03:37 +03:00
if let Some(rate_limiter) = &self.frontend_ip_rate_limiter {
2022-10-21 23:59:05 +03:00
let max_requests_per_period = origin
.map(|origin| {
self.config
2022-11-01 22:12:57 +03:00
.allowed_origin_requests_per_period
2022-10-21 23:59:05 +03:00
.get(&origin.to_string())
.cloned()
})
.unwrap_or_default();
match rate_limiter.throttle(ip, max_requests_per_period, 1).await {
2022-09-27 05:01:45 +03:00
Ok(DeferredRateLimitResult::Allowed) => {
Ok(RateLimitResult::AllowedIp(ip, semaphore))
}
2022-09-23 00:03:37 +03:00
Ok(DeferredRateLimitResult::RetryAt(retry_at)) => {
// TODO: set headers so they know when they can retry
// TODO: debug or trace?
// this is too verbose, but a stat might be good
trace!(?ip, "rate limit exceeded until {:?}", retry_at);
Ok(RateLimitResult::RateLimitedIp(ip, Some(retry_at)))
}
Ok(DeferredRateLimitResult::RetryNever) => {
// TODO: i don't think we'll get here. maybe if we ban an IP forever? seems unlikely
trace!(?ip, "rate limit is 0");
Ok(RateLimitResult::RateLimitedIp(ip, None))
}
Err(err) => {
// internal error, not rate limit being hit
// TODO: i really want axum to do this for us in a single place.
error!(?err, "rate limiter is unhappy. allowing ip");
2022-09-27 05:01:45 +03:00
Ok(RateLimitResult::AllowedIp(ip, semaphore))
2022-09-23 00:03:37 +03:00
}
}
} else {
// TODO: if no redis, rate limit with a local cache? "warn!" probably isn't right
2022-10-10 07:15:07 +03:00
Ok(RateLimitResult::AllowedIp(ip, semaphore))
2022-09-23 00:03:37 +03:00
}
}
// check the local cache for user data, or query the database
2022-10-29 01:52:47 +03:00
#[instrument(level = "trace")]
2022-11-01 21:54:39 +03:00
pub(crate) async fn user_data(
&self,
rpc_secret_key: RpcSecretKey,
) -> anyhow::Result<UserKeyData> {
2022-09-23 00:03:37 +03:00
let user_data: Result<_, Arc<anyhow::Error>> = self
2022-11-01 21:54:39 +03:00
.rpc_secret_key_cache
.try_get_with(rpc_secret_key.into(), async move {
trace!(?rpc_secret_key, "user cache miss");
2022-09-23 00:03:37 +03:00
2022-10-20 09:17:20 +03:00
let db_conn = self.db_conn().context("Getting database connection")?;
2022-09-23 00:03:37 +03:00
2022-11-01 21:54:39 +03:00
let rpc_secret_key: Uuid = rpc_secret_key.into();
2022-09-24 08:53:45 +03:00
2022-09-23 00:03:37 +03:00
// TODO: join the user table to this to return the User? we don't always need it
2022-11-01 21:54:39 +03:00
// TODO: join on secondary users
// TODO: join on user tier
match rpc_key::Entity::find()
.filter(rpc_key::Column::SecretKey.eq(rpc_secret_key))
.filter(rpc_key::Column::Active.eq(true))
2022-10-20 09:17:20 +03:00
.one(&db_conn)
2022-09-23 00:03:37 +03:00
.await?
{
2022-10-27 03:12:42 +03:00
Some(rpc_key_model) => {
2022-10-27 00:39:26 +03:00
// TODO: move these splits into helper functions
// TODO: can we have sea orm handle this for us?
2022-11-01 22:12:57 +03:00
let user_model = user::Entity::find_by_id(rpc_key_model.user_id)
.one(&db_conn)
.await?
.expect("related user");
let user_tier_model =
user_tier::Entity::find_by_id(user_model.user_tier_id)
.one(&db_conn)
.await?
.expect("related user tier");
2022-10-27 00:39:26 +03:00
let allowed_ips: Option<Vec<IpNet>> =
2022-10-27 03:12:42 +03:00
if let Some(allowed_ips) = rpc_key_model.allowed_ips {
2022-10-27 00:39:26 +03:00
let x = allowed_ips
.split(',')
.map(|x| x.parse::<IpNet>())
.collect::<Result<Vec<_>, _>>()?;
Some(x)
} else {
None
};
let allowed_origins: Option<Vec<Origin>> =
2022-10-27 03:12:42 +03:00
if let Some(allowed_origins) = rpc_key_model.allowed_origins {
2022-10-27 00:39:26 +03:00
// TODO: do this without collecting twice?
let x = allowed_origins
.split(',')
.map(HeaderValue::from_str)
.collect::<Result<Vec<_>, _>>()?
.into_iter()
2022-10-27 00:39:26 +03:00
.map(|x| Origin::decode(&mut [x].iter()))
.collect::<Result<Vec<_>, _>>()?;
Some(x)
} else {
None
};
let allowed_referers: Option<Vec<Referer>> =
2022-10-27 03:12:42 +03:00
if let Some(allowed_referers) = rpc_key_model.allowed_referers {
2022-10-27 00:39:26 +03:00
let x = allowed_referers
.split(',')
.map(|x| x.parse::<Referer>())
.collect::<Result<Vec<_>, _>>()?;
Some(x)
} else {
None
};
let allowed_user_agents: Option<Vec<UserAgent>> =
2022-10-27 03:12:42 +03:00
if let Some(allowed_user_agents) = rpc_key_model.allowed_user_agents {
2022-10-27 00:39:26 +03:00
let x: Result<Vec<_>, _> = allowed_user_agents
.split(',')
.map(|x| x.parse::<UserAgent>())
.collect();
Some(x?)
} else {
None
};
2022-09-23 00:03:37 +03:00
2022-11-01 21:54:39 +03:00
// let user_tier_model = user_tier
2022-09-23 00:03:37 +03:00
Ok(UserKeyData {
2022-10-27 03:12:42 +03:00
user_id: rpc_key_model.user_id,
rpc_key_id: rpc_key_model.id,
allowed_ips,
allowed_origins,
allowed_referers,
allowed_user_agents,
2022-10-27 03:12:42 +03:00
log_revert_chance: rpc_key_model.log_revert_chance,
2022-11-01 22:12:57 +03:00
max_concurrent_requests: user_tier_model.max_concurrent_requests,
max_requests_per_period: user_tier_model.max_requests_per_period,
2022-09-23 00:03:37 +03:00
})
}
None => Ok(UserKeyData::default()),
2022-09-23 00:03:37 +03:00
}
})
.await;
2022-09-30 07:18:18 +03:00
// TODO: what's the best way to handle this arc? try_unwrap will not work
user_data.map_err(|err| anyhow::anyhow!(err))
2022-09-23 00:03:37 +03:00
}
2022-10-29 01:52:47 +03:00
#[instrument(level = "trace")]
2022-11-01 21:54:39 +03:00
pub async fn rate_limit_by_key(
&self,
rpc_key: RpcSecretKey,
) -> anyhow::Result<RateLimitResult> {
2022-10-27 03:12:42 +03:00
let user_data = self.user_data(rpc_key).await?;
2022-09-23 00:03:37 +03:00
2022-10-27 03:12:42 +03:00
if user_data.rpc_key_id == 0 {
2022-09-23 00:03:37 +03:00
return Ok(RateLimitResult::UnknownKey);
}
2022-10-27 03:12:42 +03:00
let semaphore = self.user_rpc_key_semaphore(&user_data).await?;
2022-09-27 05:01:45 +03:00
2022-09-28 06:35:55 +03:00
let user_max_requests_per_period = match user_data.max_requests_per_period {
None => {
2022-09-27 05:01:45 +03:00
return Ok(RateLimitResult::AllowedUser(user_data, semaphore));
}
2022-09-23 00:03:37 +03:00
Some(x) => x,
};
// user key is valid. now check rate limits
if let Some(rate_limiter) = &self.frontend_key_rate_limiter {
match rate_limiter
2022-10-27 03:12:42 +03:00
.throttle(rpc_key.into(), Some(user_max_requests_per_period), 1)
2022-09-23 00:03:37 +03:00
.await
{
2022-09-27 05:01:45 +03:00
Ok(DeferredRateLimitResult::Allowed) => {
Ok(RateLimitResult::AllowedUser(user_data, semaphore))
}
2022-09-23 00:03:37 +03:00
Ok(DeferredRateLimitResult::RetryAt(retry_at)) => {
// TODO: set headers so they know when they can retry
// TODO: debug or trace?
// this is too verbose, but a stat might be good
// TODO: keys are secrets! use the id instead
2022-10-10 07:15:07 +03:00
// TODO: emit a stat
2022-10-27 03:12:42 +03:00
trace!(?rpc_key, "rate limit exceeded until {:?}", retry_at);
2022-09-23 00:03:37 +03:00
Ok(RateLimitResult::RateLimitedUser(user_data, Some(retry_at)))
}
Ok(DeferredRateLimitResult::RetryNever) => {
// TODO: keys are secret. don't log them!
2022-10-27 03:12:42 +03:00
trace!(?rpc_key, "rate limit is 0");
2022-10-10 07:15:07 +03:00
// TODO: emit a stat
2022-09-23 00:03:37 +03:00
Ok(RateLimitResult::RateLimitedUser(user_data, None))
}
Err(err) => {
// internal error, not rate limit being hit
// TODO: i really want axum to do this for us in a single place.
error!(?err, "rate limiter is unhappy. allowing ip");
2022-09-27 05:01:45 +03:00
Ok(RateLimitResult::AllowedUser(user_data, semaphore))
2022-09-23 00:03:37 +03:00
}
}
} else {
// TODO: if no redis, rate limit with just a local cache?
2022-10-10 07:15:07 +03:00
Ok(RateLimitResult::AllowedUser(user_data, semaphore))
2022-09-23 00:03:37 +03:00
}
}
}