2022-10-18 00:47:58 +03:00
|
|
|
//! Utilities for authorization of logged in and anonymous users.
|
|
|
|
|
2023-03-17 05:38:11 +03:00
|
|
|
use super::errors::{Web3ProxyError, Web3ProxyResult};
|
2023-03-03 04:39:50 +03:00
|
|
|
use super::rpc_proxy_ws::ProxyMode;
|
2022-11-08 22:58:11 +03:00
|
|
|
use crate::app::{AuthorizationChecks, Web3ProxyApp, APP_USER_AGENT};
|
2023-02-06 20:55:27 +03:00
|
|
|
use crate::rpcs::one::Web3Rpc;
|
2022-10-31 23:05:58 +03:00
|
|
|
use crate::user_token::UserBearerToken;
|
2022-09-23 00:03:37 +03:00
|
|
|
use anyhow::Context;
|
2022-10-26 00:10:05 +03:00
|
|
|
use axum::headers::authorization::Bearer;
|
2022-10-27 00:39:26 +03:00
|
|
|
use axum::headers::{Header, Origin, Referer, UserAgent};
|
2022-10-11 08:13:00 +03:00
|
|
|
use chrono::Utc;
|
2022-09-23 00:03:37 +03:00
|
|
|
use deferred_rate_limiter::DeferredRateLimitResult;
|
2023-01-26 08:24:09 +03:00
|
|
|
use entities::sea_orm_active_enums::TrackingLevel;
|
2023-03-01 22:23:59 +03:00
|
|
|
use entities::{login, rpc_key, user, user_tier};
|
2022-12-28 09:11:18 +03:00
|
|
|
use ethers::types::Bytes;
|
|
|
|
use ethers::utils::keccak256;
|
|
|
|
use futures::TryFutureExt;
|
2022-11-08 22:58:11 +03:00
|
|
|
use hashbrown::HashMap;
|
2022-10-27 00:39:26 +03:00
|
|
|
use http::HeaderValue;
|
2022-09-23 08:22:33 +03:00
|
|
|
use ipnet::IpNet;
|
2022-12-28 11:32:51 +03:00
|
|
|
use log::{error, warn};
|
2022-11-14 21:24:52 +03:00
|
|
|
use migration::sea_orm::{ColumnTrait, DatabaseConnection, EntityTrait, QueryFilter};
|
2022-12-20 02:59:01 +03:00
|
|
|
use parking_lot::Mutex;
|
2022-12-28 09:11:18 +03:00
|
|
|
use redis_rate_limiter::redis::AsyncCommands;
|
2022-09-24 06:59:21 +03:00
|
|
|
use redis_rate_limiter::RedisRateLimitResult;
|
2022-09-24 08:53:45 +03:00
|
|
|
use std::fmt::Display;
|
2022-10-25 06:41:59 +03:00
|
|
|
use std::sync::atomic::{AtomicBool, AtomicU64};
|
2022-09-24 08:53:45 +03:00
|
|
|
use std::{net::IpAddr, str::FromStr, sync::Arc};
|
2022-09-28 06:35:55 +03:00
|
|
|
use tokio::sync::{OwnedSemaphorePermit, Semaphore};
|
2022-09-23 00:03:37 +03:00
|
|
|
use tokio::time::Instant;
|
2022-09-24 08:53:45 +03:00
|
|
|
use ulid::Ulid;
|
2022-09-23 00:03:37 +03:00
|
|
|
use uuid::Uuid;
|
|
|
|
|
2022-09-24 08:53:45 +03:00
|
|
|
/// This lets us use UUID and ULID while we transition to only ULIDs
|
2022-10-18 00:47:58 +03:00
|
|
|
/// TODO: include the key's description.
|
2022-10-26 03:22:58 +03:00
|
|
|
#[derive(Copy, Clone, Debug, Eq, PartialEq, serde::Deserialize, serde::Serialize)]
|
2022-11-01 21:54:39 +03:00
|
|
|
pub enum RpcSecretKey {
|
2022-09-24 08:53:45 +03:00
|
|
|
Ulid(Ulid),
|
|
|
|
Uuid(Uuid),
|
|
|
|
}
|
|
|
|
|
2022-11-08 22:58:11 +03:00
|
|
|
/// TODO: should this have IpAddr and Origin or AuthorizationChecks?
|
2022-10-10 07:15:07 +03:00
|
|
|
#[derive(Debug)]
|
|
|
|
pub enum RateLimitResult {
|
2022-11-08 22:58:11 +03:00
|
|
|
Allowed(Authorization, Option<OwnedSemaphorePermit>),
|
|
|
|
RateLimited(
|
|
|
|
Authorization,
|
|
|
|
/// when their rate limit resets and they can try more requests
|
|
|
|
Option<Instant>,
|
|
|
|
),
|
2022-10-10 07:15:07 +03:00
|
|
|
/// This key is not in our database. Deny access!
|
|
|
|
UnknownKey,
|
|
|
|
}
|
|
|
|
|
2022-11-25 03:45:13 +03:00
|
|
|
#[derive(Clone, Debug)]
|
2022-12-12 07:39:54 +03:00
|
|
|
pub enum AuthorizationType {
|
2022-11-25 03:45:13 +03:00
|
|
|
Internal,
|
|
|
|
Frontend,
|
|
|
|
}
|
|
|
|
|
2022-11-08 22:58:11 +03:00
|
|
|
/// TODO: include the authorization checks in this?
|
2022-10-27 00:39:26 +03:00
|
|
|
#[derive(Clone, Debug)]
|
2022-11-08 22:58:11 +03:00
|
|
|
pub struct Authorization {
|
|
|
|
pub checks: AuthorizationChecks,
|
2022-12-16 11:48:24 +03:00
|
|
|
// TODO: instead of the conn, have a channel?
|
2022-11-08 22:58:11 +03:00
|
|
|
pub db_conn: Option<DatabaseConnection>,
|
2022-10-10 07:15:07 +03:00
|
|
|
pub ip: IpAddr,
|
2022-10-27 00:39:26 +03:00
|
|
|
pub origin: Option<Origin>,
|
2022-11-08 22:58:11 +03:00
|
|
|
pub referer: Option<Referer>,
|
|
|
|
pub user_agent: Option<UserAgent>,
|
2022-12-12 07:39:54 +03:00
|
|
|
pub authorization_type: AuthorizationType,
|
2022-10-10 07:15:07 +03:00
|
|
|
}
|
|
|
|
|
2022-10-21 02:50:23 +03:00
|
|
|
#[derive(Debug)]
|
2022-10-10 07:15:07 +03:00
|
|
|
pub struct RequestMetadata {
|
2022-10-21 02:50:23 +03:00
|
|
|
pub start_instant: tokio::time::Instant,
|
2022-10-11 20:34:25 +03:00
|
|
|
pub request_bytes: u64,
|
2022-11-08 22:58:11 +03:00
|
|
|
// TODO: do we need atomics? seems like we should be able to pass a &mut around
|
2022-11-03 02:14:16 +03:00
|
|
|
// TODO: "archive" isn't really a boolean.
|
|
|
|
pub archive_request: AtomicBool,
|
2022-12-20 02:59:01 +03:00
|
|
|
/// if this is empty, there was a cache_hit
|
2023-02-06 20:55:27 +03:00
|
|
|
pub backend_requests: Mutex<Vec<Arc<Web3Rpc>>>,
|
2022-10-25 06:41:59 +03:00
|
|
|
pub no_servers: AtomicU64,
|
2022-10-11 22:58:25 +03:00
|
|
|
pub error_response: AtomicBool,
|
|
|
|
pub response_bytes: AtomicU64,
|
|
|
|
pub response_millis: AtomicU64,
|
2023-01-20 08:46:47 +03:00
|
|
|
pub response_from_backup_rpc: AtomicBool,
|
2022-10-10 07:15:07 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
impl RequestMetadata {
|
2023-03-20 04:52:28 +03:00
|
|
|
pub fn new(request_bytes: usize) -> Self {
|
2022-10-11 22:58:25 +03:00
|
|
|
// TODO: how can we do this without turning it into a string first. this is going to slow us down!
|
2022-11-20 01:05:51 +03:00
|
|
|
let request_bytes = request_bytes as u64;
|
2022-10-10 07:15:07 +03:00
|
|
|
|
2023-03-20 04:52:28 +03:00
|
|
|
Self {
|
2022-10-21 02:50:23 +03:00
|
|
|
start_instant: Instant::now(),
|
2022-10-11 20:34:25 +03:00
|
|
|
request_bytes,
|
2022-11-03 02:14:16 +03:00
|
|
|
archive_request: false.into(),
|
2022-12-20 02:59:01 +03:00
|
|
|
backend_requests: Default::default(),
|
2022-10-21 02:50:23 +03:00
|
|
|
no_servers: 0.into(),
|
|
|
|
error_response: false.into(),
|
|
|
|
response_bytes: 0.into(),
|
|
|
|
response_millis: 0.into(),
|
2023-01-20 08:46:47 +03:00
|
|
|
response_from_backup_rpc: false.into(),
|
2023-03-20 04:52:28 +03:00
|
|
|
}
|
2022-10-10 07:15:07 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-11-01 21:54:39 +03:00
|
|
|
impl RpcSecretKey {
|
2022-09-24 08:53:45 +03:00
|
|
|
pub fn new() -> Self {
|
|
|
|
Ulid::new().into()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-11-01 21:54:39 +03:00
|
|
|
impl Default for RpcSecretKey {
|
2022-10-26 00:10:05 +03:00
|
|
|
fn default() -> Self {
|
|
|
|
Self::new()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-11-01 21:54:39 +03:00
|
|
|
impl Display for RpcSecretKey {
|
2022-09-24 08:53:45 +03:00
|
|
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
|
|
|
// TODO: do this without dereferencing
|
|
|
|
let ulid: Ulid = (*self).into();
|
|
|
|
|
|
|
|
ulid.fmt(f)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-11-01 21:54:39 +03:00
|
|
|
impl FromStr for RpcSecretKey {
|
2022-09-24 08:53:45 +03:00
|
|
|
type Err = anyhow::Error;
|
|
|
|
|
|
|
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
|
|
|
if let Ok(ulid) = s.parse::<Ulid>() {
|
|
|
|
Ok(ulid.into())
|
|
|
|
} else if let Ok(uuid) = s.parse::<Uuid>() {
|
|
|
|
Ok(uuid.into())
|
|
|
|
} else {
|
2022-10-10 07:15:07 +03:00
|
|
|
// TODO: custom error type so that this shows as a 400
|
2022-09-24 08:53:45 +03:00
|
|
|
Err(anyhow::anyhow!("UserKey was not a ULID or UUID"))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-11-01 21:54:39 +03:00
|
|
|
impl From<Ulid> for RpcSecretKey {
|
2022-09-24 08:53:45 +03:00
|
|
|
fn from(x: Ulid) -> Self {
|
2022-11-01 21:54:39 +03:00
|
|
|
RpcSecretKey::Ulid(x)
|
2022-09-24 08:53:45 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-11-01 21:54:39 +03:00
|
|
|
impl From<Uuid> for RpcSecretKey {
|
2022-09-24 08:53:45 +03:00
|
|
|
fn from(x: Uuid) -> Self {
|
2022-11-01 21:54:39 +03:00
|
|
|
RpcSecretKey::Uuid(x)
|
2022-09-24 08:53:45 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-11-01 21:54:39 +03:00
|
|
|
impl From<RpcSecretKey> for Ulid {
|
|
|
|
fn from(x: RpcSecretKey) -> Self {
|
2022-09-24 08:53:45 +03:00
|
|
|
match x {
|
2022-11-01 21:54:39 +03:00
|
|
|
RpcSecretKey::Ulid(x) => x,
|
|
|
|
RpcSecretKey::Uuid(x) => Ulid::from(x.as_u128()),
|
2022-09-24 08:53:45 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-11-01 21:54:39 +03:00
|
|
|
impl From<RpcSecretKey> for Uuid {
|
|
|
|
fn from(x: RpcSecretKey) -> Self {
|
2022-09-24 08:53:45 +03:00
|
|
|
match x {
|
2022-11-01 21:54:39 +03:00
|
|
|
RpcSecretKey::Ulid(x) => Uuid::from_u128(x.0),
|
|
|
|
RpcSecretKey::Uuid(x) => x,
|
2022-09-24 08:53:45 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-11-08 22:58:11 +03:00
|
|
|
impl Authorization {
|
2023-03-20 01:50:25 +03:00
|
|
|
pub fn internal(db_conn: Option<DatabaseConnection>) -> Web3ProxyResult<Self> {
|
2022-11-08 22:58:11 +03:00
|
|
|
let authorization_checks = AuthorizationChecks {
|
|
|
|
// any error logs on a local (internal) query are likely problems. log them all
|
|
|
|
log_revert_chance: 1.0,
|
2023-01-26 08:24:09 +03:00
|
|
|
tracking_level: TrackingLevel::Detailed,
|
2022-11-08 22:58:11 +03:00
|
|
|
// default for everything else should be fine. we don't have a user_id or ip to give
|
|
|
|
..Default::default()
|
|
|
|
};
|
|
|
|
|
|
|
|
let ip: IpAddr = "127.0.0.1".parse().expect("localhost should always parse");
|
|
|
|
let user_agent = UserAgent::from_str(APP_USER_AGENT).ok();
|
|
|
|
|
2022-11-25 03:45:13 +03:00
|
|
|
Self::try_new(
|
|
|
|
authorization_checks,
|
|
|
|
db_conn,
|
|
|
|
ip,
|
|
|
|
None,
|
|
|
|
None,
|
|
|
|
user_agent,
|
2022-12-12 07:39:54 +03:00
|
|
|
AuthorizationType::Internal,
|
2022-11-25 03:45:13 +03:00
|
|
|
)
|
2022-11-08 22:58:11 +03:00
|
|
|
}
|
|
|
|
|
2022-11-25 03:45:13 +03:00
|
|
|
pub fn external(
|
2022-11-08 22:58:11 +03:00
|
|
|
allowed_origin_requests_per_period: &HashMap<String, u64>,
|
|
|
|
db_conn: Option<DatabaseConnection>,
|
|
|
|
ip: IpAddr,
|
|
|
|
origin: Option<Origin>,
|
2023-03-03 04:39:50 +03:00
|
|
|
proxy_mode: ProxyMode,
|
2022-11-08 22:58:11 +03:00
|
|
|
referer: Option<Referer>,
|
|
|
|
user_agent: Option<UserAgent>,
|
2023-03-20 01:50:25 +03:00
|
|
|
) -> Web3ProxyResult<Self> {
|
2022-11-08 22:58:11 +03:00
|
|
|
// some origins can override max_requests_per_period for anon users
|
|
|
|
let max_requests_per_period = origin
|
|
|
|
.as_ref()
|
|
|
|
.map(|origin| {
|
|
|
|
allowed_origin_requests_per_period
|
|
|
|
.get(&origin.to_string())
|
|
|
|
.cloned()
|
|
|
|
})
|
|
|
|
.unwrap_or_default();
|
|
|
|
|
|
|
|
let authorization_checks = AuthorizationChecks {
|
|
|
|
max_requests_per_period,
|
2023-03-03 04:39:50 +03:00
|
|
|
proxy_mode,
|
2023-01-26 08:24:09 +03:00
|
|
|
tracking_level: TrackingLevel::Detailed,
|
2022-11-08 22:58:11 +03:00
|
|
|
..Default::default()
|
|
|
|
};
|
|
|
|
|
|
|
|
Self::try_new(
|
|
|
|
authorization_checks,
|
|
|
|
db_conn,
|
|
|
|
ip,
|
|
|
|
origin,
|
|
|
|
referer,
|
|
|
|
user_agent,
|
2022-12-12 07:39:54 +03:00
|
|
|
AuthorizationType::Frontend,
|
2022-11-08 22:58:11 +03:00
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2023-03-03 04:39:50 +03:00
|
|
|
#[allow(clippy::too_many_arguments)]
|
2022-09-23 00:03:37 +03:00
|
|
|
pub fn try_new(
|
2022-11-08 22:58:11 +03:00
|
|
|
authorization_checks: AuthorizationChecks,
|
|
|
|
db_conn: Option<DatabaseConnection>,
|
2022-09-23 00:03:37 +03:00
|
|
|
ip: IpAddr,
|
2022-09-23 08:22:33 +03:00
|
|
|
origin: Option<Origin>,
|
2022-09-23 00:03:37 +03:00
|
|
|
referer: Option<Referer>,
|
|
|
|
user_agent: Option<UserAgent>,
|
2022-12-12 07:39:54 +03:00
|
|
|
authorization_type: AuthorizationType,
|
2023-03-20 01:50:25 +03:00
|
|
|
) -> Web3ProxyResult<Self> {
|
2022-09-23 08:22:33 +03:00
|
|
|
// check ip
|
2022-11-08 22:58:11 +03:00
|
|
|
match &authorization_checks.allowed_ips {
|
2022-09-23 08:22:33 +03:00
|
|
|
None => {}
|
|
|
|
Some(allowed_ips) => {
|
|
|
|
if !allowed_ips.iter().any(|x| x.contains(&ip)) {
|
2023-03-20 01:50:25 +03:00
|
|
|
return Err(Web3ProxyError::IpNotAllowed(ip));
|
2022-09-23 08:22:33 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// check origin
|
2022-11-08 22:58:11 +03:00
|
|
|
match (&origin, &authorization_checks.allowed_origins) {
|
2022-09-23 08:22:33 +03:00
|
|
|
(None, None) => {}
|
|
|
|
(Some(_), None) => {}
|
2023-03-20 01:50:25 +03:00
|
|
|
(None, Some(_)) => return Err(Web3ProxyError::OriginRequired),
|
2022-09-23 08:22:33 +03:00
|
|
|
(Some(origin), Some(allowed_origins)) => {
|
2022-10-27 00:39:26 +03:00
|
|
|
if !allowed_origins.contains(origin) {
|
2023-03-20 01:50:25 +03:00
|
|
|
return Err(Web3ProxyError::OriginNotAllowed(origin.clone()));
|
2022-09-23 08:22:33 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// check referer
|
2022-11-08 22:58:11 +03:00
|
|
|
match (&referer, &authorization_checks.allowed_referers) {
|
2022-09-23 08:22:33 +03:00
|
|
|
(None, None) => {}
|
|
|
|
(Some(_), None) => {}
|
2023-03-20 01:50:25 +03:00
|
|
|
(None, Some(_)) => return Err(Web3ProxyError::RefererRequired),
|
2022-09-23 08:22:33 +03:00
|
|
|
(Some(referer), Some(allowed_referers)) => {
|
2022-11-08 22:58:11 +03:00
|
|
|
if !allowed_referers.contains(referer) {
|
2023-03-20 01:50:25 +03:00
|
|
|
return Err(Web3ProxyError::RefererNotAllowed(referer.clone()));
|
2022-09-23 08:22:33 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// check user_agent
|
2022-11-08 22:58:11 +03:00
|
|
|
match (&user_agent, &authorization_checks.allowed_user_agents) {
|
2022-09-23 08:22:33 +03:00
|
|
|
(None, None) => {}
|
|
|
|
(Some(_), None) => {}
|
2023-03-20 01:50:25 +03:00
|
|
|
(None, Some(_)) => return Err(Web3ProxyError::UserAgentRequired),
|
2022-09-23 08:22:33 +03:00
|
|
|
(Some(user_agent), Some(allowed_user_agents)) => {
|
2022-11-08 22:58:11 +03:00
|
|
|
if !allowed_user_agents.contains(user_agent) {
|
2023-03-20 01:50:25 +03:00
|
|
|
return Err(Web3ProxyError::UserAgentNotAllowed(user_agent.clone()));
|
2022-09-23 08:22:33 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2022-09-23 00:03:37 +03:00
|
|
|
|
|
|
|
Ok(Self {
|
2022-11-08 22:58:11 +03:00
|
|
|
checks: authorization_checks,
|
|
|
|
db_conn,
|
2022-09-23 00:03:37 +03:00
|
|
|
ip,
|
2022-09-23 08:22:33 +03:00
|
|
|
origin,
|
2022-11-08 22:58:11 +03:00
|
|
|
referer,
|
|
|
|
user_agent,
|
2022-11-25 03:45:13 +03:00
|
|
|
authorization_type,
|
2022-09-23 00:03:37 +03:00
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-11-08 22:58:11 +03:00
|
|
|
/// rate limit logins only by ip.
|
|
|
|
/// we want all origins and referers and user agents to count together
|
2023-03-17 05:38:11 +03:00
|
|
|
pub async fn login_is_authorized(app: &Web3ProxyApp, ip: IpAddr) -> Web3ProxyResult<Authorization> {
|
2023-03-03 04:39:50 +03:00
|
|
|
let authorization = match app.rate_limit_login(ip, ProxyMode::Best).await? {
|
2022-11-08 22:58:11 +03:00
|
|
|
RateLimitResult::Allowed(authorization, None) => authorization,
|
|
|
|
RateLimitResult::RateLimited(authorization, retry_at) => {
|
2023-03-17 05:38:11 +03:00
|
|
|
return Err(Web3ProxyError::RateLimited(authorization, retry_at));
|
2022-09-24 06:59:21 +03:00
|
|
|
}
|
|
|
|
// TODO: don't panic. give the user an error
|
|
|
|
x => unimplemented!("rate_limit_login shouldn't ever see these: {:?}", x),
|
|
|
|
};
|
|
|
|
|
2022-11-08 22:58:11 +03:00
|
|
|
Ok(authorization)
|
2022-09-24 06:59:21 +03:00
|
|
|
}
|
|
|
|
|
2022-11-08 22:58:11 +03:00
|
|
|
/// semaphore won't ever be None, but its easier if key auth and ip auth work the same way
|
2022-09-23 00:03:37 +03:00
|
|
|
pub async fn ip_is_authorized(
|
2022-12-28 09:11:18 +03:00
|
|
|
app: &Arc<Web3ProxyApp>,
|
2022-09-23 00:03:37 +03:00
|
|
|
ip: IpAddr,
|
2022-11-08 22:58:11 +03:00
|
|
|
origin: Option<Origin>,
|
2023-03-03 04:39:50 +03:00
|
|
|
proxy_mode: ProxyMode,
|
2023-03-17 05:38:11 +03:00
|
|
|
) -> Web3ProxyResult<(Authorization, Option<OwnedSemaphorePermit>)> {
|
2022-09-23 00:03:37 +03:00
|
|
|
// TODO: i think we could write an `impl From` for this
|
2022-09-24 00:46:27 +03:00
|
|
|
// TODO: move this to an AuthorizedUser extrator
|
2022-11-08 22:58:11 +03:00
|
|
|
let (authorization, semaphore) = match app
|
2023-03-03 04:39:50 +03:00
|
|
|
.rate_limit_by_ip(
|
|
|
|
&app.config.allowed_origin_requests_per_period,
|
|
|
|
ip,
|
|
|
|
origin,
|
|
|
|
proxy_mode,
|
|
|
|
)
|
2022-11-08 22:58:11 +03:00
|
|
|
.await?
|
|
|
|
{
|
|
|
|
RateLimitResult::Allowed(authorization, semaphore) => (authorization, semaphore),
|
|
|
|
RateLimitResult::RateLimited(authorization, retry_at) => {
|
2022-12-28 06:43:02 +03:00
|
|
|
// TODO: in the background, emit a stat (maybe simplest to use a channel?)
|
2023-03-17 05:38:11 +03:00
|
|
|
return Err(Web3ProxyError::RateLimited(authorization, retry_at));
|
2022-09-23 00:03:37 +03:00
|
|
|
}
|
|
|
|
// TODO: don't panic. give the user an error
|
|
|
|
x => unimplemented!("rate_limit_by_ip shouldn't ever see these: {:?}", x),
|
|
|
|
};
|
|
|
|
|
2022-12-29 00:50:34 +03:00
|
|
|
// in the background, add the ip to a recent_users map
|
2022-12-28 09:11:18 +03:00
|
|
|
if app.config.public_recent_ips_salt.is_some() {
|
2022-12-29 00:50:34 +03:00
|
|
|
let app = app.clone();
|
2022-12-28 09:11:18 +03:00
|
|
|
let f = async move {
|
|
|
|
let now = Utc::now().timestamp();
|
|
|
|
|
2022-12-29 09:21:09 +03:00
|
|
|
if let Some(mut redis_conn) = app.redis_conn().await? {
|
|
|
|
let salt = app
|
|
|
|
.config
|
|
|
|
.public_recent_ips_salt
|
|
|
|
.as_ref()
|
|
|
|
.expect("public_recent_ips_salt must exist in here");
|
2022-12-28 09:11:18 +03:00
|
|
|
|
2022-12-29 09:21:09 +03:00
|
|
|
let salted_ip = format!("{}:{}", salt, ip);
|
2022-12-28 09:11:18 +03:00
|
|
|
|
2022-12-29 09:21:09 +03:00
|
|
|
let hashed_ip = Bytes::from(keccak256(salted_ip.as_bytes()));
|
2022-12-28 09:11:18 +03:00
|
|
|
|
2022-12-29 09:21:09 +03:00
|
|
|
let recent_ip_key = format!("recent_users:ip:{}", app.config.chain_id);
|
2022-12-28 09:11:18 +03:00
|
|
|
|
2022-12-29 09:21:09 +03:00
|
|
|
redis_conn
|
|
|
|
.zadd(recent_ip_key, hashed_ip.to_string(), now)
|
|
|
|
.await?;
|
|
|
|
};
|
2022-12-28 09:11:18 +03:00
|
|
|
|
|
|
|
Ok::<_, anyhow::Error>(())
|
|
|
|
}
|
|
|
|
.map_err(|err| {
|
2022-12-29 00:50:34 +03:00
|
|
|
warn!("background update of recent_users:ip failed: {}", err);
|
2022-12-28 09:11:18 +03:00
|
|
|
|
|
|
|
err
|
|
|
|
});
|
|
|
|
|
|
|
|
tokio::spawn(f);
|
|
|
|
}
|
2022-12-28 06:43:02 +03:00
|
|
|
|
2022-11-08 22:58:11 +03:00
|
|
|
Ok((authorization, semaphore))
|
2022-09-23 00:03:37 +03:00
|
|
|
}
|
|
|
|
|
2023-03-17 05:38:11 +03:00
|
|
|
/// like app.rate_limit_by_rpc_key but converts to a Web3ProxyError;
|
2022-09-23 00:03:37 +03:00
|
|
|
pub async fn key_is_authorized(
|
2022-12-29 00:50:34 +03:00
|
|
|
app: &Arc<Web3ProxyApp>,
|
2022-11-01 21:54:39 +03:00
|
|
|
rpc_key: RpcSecretKey,
|
2022-09-23 00:03:37 +03:00
|
|
|
ip: IpAddr,
|
2022-09-23 08:22:33 +03:00
|
|
|
origin: Option<Origin>,
|
2023-03-03 04:39:50 +03:00
|
|
|
proxy_mode: ProxyMode,
|
2022-09-23 00:03:37 +03:00
|
|
|
referer: Option<Referer>,
|
|
|
|
user_agent: Option<UserAgent>,
|
2023-03-17 05:38:11 +03:00
|
|
|
) -> Web3ProxyResult<(Authorization, Option<OwnedSemaphorePermit>)> {
|
2022-09-23 00:03:37 +03:00
|
|
|
// check the rate limits. error if over the limit
|
2022-11-08 22:58:11 +03:00
|
|
|
// TODO: i think this should be in an "impl From" or "impl Into"
|
|
|
|
let (authorization, semaphore) = match app
|
2023-03-03 04:39:50 +03:00
|
|
|
.rate_limit_by_rpc_key(ip, origin, proxy_mode, referer, rpc_key, user_agent)
|
2022-11-08 22:58:11 +03:00
|
|
|
.await?
|
|
|
|
{
|
|
|
|
RateLimitResult::Allowed(authorization, semaphore) => (authorization, semaphore),
|
|
|
|
RateLimitResult::RateLimited(authorization, retry_at) => {
|
2023-03-17 05:38:11 +03:00
|
|
|
return Err(Web3ProxyError::RateLimited(authorization, retry_at));
|
2022-09-23 00:03:37 +03:00
|
|
|
}
|
2023-03-17 05:38:11 +03:00
|
|
|
RateLimitResult::UnknownKey => return Err(Web3ProxyError::UnknownKey),
|
2022-09-23 00:03:37 +03:00
|
|
|
};
|
|
|
|
|
2022-12-29 00:50:34 +03:00
|
|
|
// TODO: DRY and maybe optimize the hashing
|
|
|
|
// in the background, add the ip to a recent_users map
|
|
|
|
if app.config.public_recent_ips_salt.is_some() {
|
|
|
|
let app = app.clone();
|
|
|
|
let user_id = authorization.checks.user_id;
|
|
|
|
let f = async move {
|
|
|
|
let now = Utc::now().timestamp();
|
|
|
|
|
2022-12-29 09:21:09 +03:00
|
|
|
if let Some(mut redis_conn) = app.redis_conn().await? {
|
|
|
|
let salt = app
|
|
|
|
.config
|
|
|
|
.public_recent_ips_salt
|
|
|
|
.as_ref()
|
|
|
|
.expect("public_recent_ips_salt must exist in here");
|
2022-12-29 00:50:34 +03:00
|
|
|
|
2022-12-29 09:21:09 +03:00
|
|
|
let salted_user_id = format!("{}:{}", salt, user_id);
|
2022-12-29 00:50:34 +03:00
|
|
|
|
2022-12-29 09:21:09 +03:00
|
|
|
let hashed_user_id = Bytes::from(keccak256(salted_user_id.as_bytes()));
|
2022-12-29 00:50:34 +03:00
|
|
|
|
2022-12-29 10:16:35 +03:00
|
|
|
let recent_user_id_key = format!("recent_users:id:{}", app.config.chain_id);
|
2022-12-29 00:50:34 +03:00
|
|
|
|
2022-12-29 09:21:09 +03:00
|
|
|
redis_conn
|
|
|
|
.zadd(recent_user_id_key, hashed_user_id.to_string(), now)
|
|
|
|
.await?;
|
|
|
|
}
|
2022-12-29 00:50:34 +03:00
|
|
|
|
|
|
|
Ok::<_, anyhow::Error>(())
|
|
|
|
}
|
|
|
|
.map_err(|err| {
|
2022-12-29 10:16:35 +03:00
|
|
|
warn!("background update of recent_users:id failed: {}", err);
|
2022-12-29 00:50:34 +03:00
|
|
|
|
|
|
|
err
|
|
|
|
});
|
|
|
|
|
|
|
|
tokio::spawn(f);
|
|
|
|
}
|
|
|
|
|
2022-11-08 22:58:11 +03:00
|
|
|
Ok((authorization, semaphore))
|
2022-09-23 00:03:37 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
impl Web3ProxyApp {
|
2022-10-27 03:12:42 +03:00
|
|
|
/// Limit the number of concurrent requests from the given ip address.
|
2023-03-20 01:50:25 +03:00
|
|
|
pub async fn ip_semaphore(&self, ip: IpAddr) -> Web3ProxyResult<Option<OwnedSemaphorePermit>> {
|
2022-10-25 07:01:41 +03:00
|
|
|
if let Some(max_concurrent_requests) = self.config.public_max_concurrent_requests {
|
|
|
|
let semaphore = self
|
|
|
|
.ip_semaphores
|
|
|
|
.get_with(ip, async move {
|
|
|
|
// TODO: set max_concurrent_requests dynamically based on load?
|
2022-10-25 07:31:18 +03:00
|
|
|
let s = Semaphore::new(max_concurrent_requests);
|
2022-10-25 07:01:41 +03:00
|
|
|
Arc::new(s)
|
|
|
|
})
|
|
|
|
.await;
|
|
|
|
|
|
|
|
// if semaphore.available_permits() == 0 {
|
|
|
|
// // TODO: concurrent limit hit! emit a stat? less important for anon users
|
|
|
|
// // TODO: there is probably a race here
|
|
|
|
// }
|
|
|
|
|
|
|
|
let semaphore_permit = semaphore.acquire_owned().await?;
|
|
|
|
|
|
|
|
Ok(Some(semaphore_permit))
|
|
|
|
} else {
|
|
|
|
Ok(None)
|
|
|
|
}
|
2022-09-28 06:35:55 +03:00
|
|
|
}
|
|
|
|
|
2022-11-10 02:58:07 +03:00
|
|
|
/// Limit the number of concurrent requests from the given rpc key.
|
2022-12-29 00:50:34 +03:00
|
|
|
pub async fn registered_user_semaphore(
|
2022-09-28 06:35:55 +03:00
|
|
|
&self,
|
2022-11-08 22:58:11 +03:00
|
|
|
authorization_checks: &AuthorizationChecks,
|
2022-09-28 06:35:55 +03:00
|
|
|
) -> anyhow::Result<Option<OwnedSemaphorePermit>> {
|
2022-11-08 22:58:11 +03:00
|
|
|
if let Some(max_concurrent_requests) = authorization_checks.max_concurrent_requests {
|
2022-12-29 00:50:34 +03:00
|
|
|
let user_id = authorization_checks
|
|
|
|
.user_id
|
|
|
|
.try_into()
|
|
|
|
.context("user ids should always be non-zero")?;
|
2022-11-10 02:58:07 +03:00
|
|
|
|
2022-09-28 06:35:55 +03:00
|
|
|
let semaphore = self
|
2022-12-29 00:50:34 +03:00
|
|
|
.registered_user_semaphores
|
|
|
|
.get_with(user_id, async move {
|
2022-10-25 07:31:18 +03:00
|
|
|
let s = Semaphore::new(max_concurrent_requests as usize);
|
2022-12-29 00:50:34 +03:00
|
|
|
// trace!("new semaphore for user_id {}", user_id);
|
2022-10-25 21:26:58 +03:00
|
|
|
Arc::new(s)
|
2022-09-28 06:35:55 +03:00
|
|
|
})
|
2022-10-25 21:26:58 +03:00
|
|
|
.await;
|
2022-09-28 06:35:55 +03:00
|
|
|
|
2022-10-10 07:15:07 +03:00
|
|
|
// if semaphore.available_permits() == 0 {
|
2022-11-10 02:58:07 +03:00
|
|
|
// // TODO: concurrent limit hit! emit a stat? this has a race condition though.
|
|
|
|
// // TODO: maybe have a stat on how long we wait to acquire the semaphore instead?
|
2022-10-10 07:15:07 +03:00
|
|
|
// }
|
|
|
|
|
2022-09-28 06:35:55 +03:00
|
|
|
let semaphore_permit = semaphore.acquire_owned().await?;
|
|
|
|
|
|
|
|
Ok(Some(semaphore_permit))
|
|
|
|
} else {
|
2022-11-10 02:58:07 +03:00
|
|
|
// unlimited requests allowed
|
2022-09-28 06:35:55 +03:00
|
|
|
Ok(None)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-10-26 00:10:05 +03:00
|
|
|
/// Verify that the given bearer token and address are allowed to take the specified action.
|
|
|
|
/// This includes concurrent request limiting.
|
|
|
|
pub async fn bearer_is_authorized(
|
|
|
|
&self,
|
|
|
|
bearer: Bearer,
|
2023-03-17 05:38:11 +03:00
|
|
|
) -> Web3ProxyResult<(user::Model, OwnedSemaphorePermit)> {
|
2022-12-14 05:13:23 +03:00
|
|
|
// get the user id for this bearer token
|
|
|
|
let user_bearer_token = UserBearerToken::try_from(bearer)?;
|
|
|
|
|
2022-10-26 00:10:05 +03:00
|
|
|
// limit concurrent requests
|
|
|
|
let semaphore = self
|
|
|
|
.bearer_token_semaphores
|
2022-12-14 05:13:23 +03:00
|
|
|
.get_with(user_bearer_token.clone(), async move {
|
2022-10-26 00:10:05 +03:00
|
|
|
let s = Semaphore::new(self.config.bearer_token_max_concurrent_requests as usize);
|
|
|
|
Arc::new(s)
|
|
|
|
})
|
|
|
|
.await;
|
|
|
|
|
|
|
|
let semaphore_permit = semaphore.acquire_owned().await?;
|
|
|
|
|
2022-12-14 05:13:23 +03:00
|
|
|
// get the attached address from the database for the given auth_token.
|
2022-12-16 11:48:24 +03:00
|
|
|
let db_replica = self
|
|
|
|
.db_replica()
|
2022-12-14 05:13:23 +03:00
|
|
|
.context("checking if bearer token is authorized")?;
|
2022-10-26 00:10:05 +03:00
|
|
|
|
2022-12-14 05:13:23 +03:00
|
|
|
let user_bearer_uuid: Uuid = user_bearer_token.into();
|
2022-10-26 00:10:05 +03:00
|
|
|
|
2022-12-14 05:13:23 +03:00
|
|
|
let user = user::Entity::find()
|
|
|
|
.left_join(login::Entity)
|
|
|
|
.filter(login::Column::BearerToken.eq(user_bearer_uuid))
|
2022-12-16 11:48:24 +03:00
|
|
|
.one(db_replica.conn())
|
2022-10-26 00:10:05 +03:00
|
|
|
.await
|
2022-12-14 05:13:23 +03:00
|
|
|
.context("fetching user from db by bearer token")?
|
|
|
|
.context("unknown bearer token")?;
|
2022-10-26 00:10:05 +03:00
|
|
|
|
|
|
|
Ok((user, semaphore_permit))
|
|
|
|
}
|
|
|
|
|
2023-03-03 04:39:50 +03:00
|
|
|
pub async fn rate_limit_login(
|
|
|
|
&self,
|
|
|
|
ip: IpAddr,
|
|
|
|
proxy_mode: ProxyMode,
|
2023-03-20 01:50:25 +03:00
|
|
|
) -> Web3ProxyResult<RateLimitResult> {
|
2022-11-08 22:58:11 +03:00
|
|
|
// TODO: dry this up with rate_limit_by_rpc_key?
|
|
|
|
|
|
|
|
// we don't care about user agent or origin or referer
|
2022-11-25 03:45:13 +03:00
|
|
|
let authorization = Authorization::external(
|
2022-11-08 22:58:11 +03:00
|
|
|
&self.config.allowed_origin_requests_per_period,
|
|
|
|
self.db_conn(),
|
|
|
|
ip,
|
|
|
|
None,
|
2023-03-03 04:39:50 +03:00
|
|
|
proxy_mode,
|
2022-11-08 22:58:11 +03:00
|
|
|
None,
|
|
|
|
None,
|
|
|
|
)?;
|
|
|
|
|
|
|
|
// no semaphore is needed here because login rate limits are low
|
|
|
|
// TODO: are we sure do we want a semaphore here?
|
|
|
|
let semaphore = None;
|
|
|
|
|
2022-09-24 06:59:21 +03:00
|
|
|
if let Some(rate_limiter) = &self.login_rate_limiter {
|
|
|
|
match rate_limiter.throttle_label(&ip.to_string(), None, 1).await {
|
2022-11-08 22:58:11 +03:00
|
|
|
Ok(RedisRateLimitResult::Allowed(_)) => {
|
|
|
|
Ok(RateLimitResult::Allowed(authorization, semaphore))
|
|
|
|
}
|
2022-09-24 06:59:21 +03:00
|
|
|
Ok(RedisRateLimitResult::RetryAt(retry_at, _)) => {
|
|
|
|
// TODO: set headers so they know when they can retry
|
|
|
|
// TODO: debug or trace?
|
|
|
|
// this is too verbose, but a stat might be good
|
2022-11-12 11:24:32 +03:00
|
|
|
// // trace!(?ip, "login rate limit exceeded until {:?}", retry_at);
|
2022-11-08 22:58:11 +03:00
|
|
|
|
|
|
|
Ok(RateLimitResult::RateLimited(authorization, Some(retry_at)))
|
2022-09-24 06:59:21 +03:00
|
|
|
}
|
|
|
|
Ok(RedisRateLimitResult::RetryNever) => {
|
|
|
|
// TODO: i don't think we'll get here. maybe if we ban an IP forever? seems unlikely
|
2022-11-12 11:24:32 +03:00
|
|
|
// // trace!(?ip, "login rate limit is 0");
|
2022-11-08 22:58:11 +03:00
|
|
|
Ok(RateLimitResult::RateLimited(authorization, None))
|
2022-09-24 06:59:21 +03:00
|
|
|
}
|
|
|
|
Err(err) => {
|
|
|
|
// internal error, not rate limit being hit
|
|
|
|
// TODO: i really want axum to do this for us in a single place.
|
2022-11-12 11:24:32 +03:00
|
|
|
error!("login rate limiter is unhappy. allowing ip. err={:?}", err);
|
2022-09-27 05:01:45 +03:00
|
|
|
|
2022-11-08 22:58:11 +03:00
|
|
|
Ok(RateLimitResult::Allowed(authorization, None))
|
2022-09-24 06:59:21 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// TODO: if no redis, rate limit with a local cache? "warn!" probably isn't right
|
2022-11-08 22:58:11 +03:00
|
|
|
Ok(RateLimitResult::Allowed(authorization, None))
|
2022-09-24 06:59:21 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-11-08 22:58:11 +03:00
|
|
|
/// origin is included because it can override the default rate limits
|
2022-10-21 23:59:05 +03:00
|
|
|
pub async fn rate_limit_by_ip(
|
|
|
|
&self,
|
2022-11-08 22:58:11 +03:00
|
|
|
allowed_origin_requests_per_period: &HashMap<String, u64>,
|
2022-10-21 23:59:05 +03:00
|
|
|
ip: IpAddr,
|
2022-11-08 22:58:11 +03:00
|
|
|
origin: Option<Origin>,
|
2023-03-03 04:39:50 +03:00
|
|
|
proxy_mode: ProxyMode,
|
2023-03-20 01:50:25 +03:00
|
|
|
) -> Web3ProxyResult<RateLimitResult> {
|
2022-11-08 22:58:11 +03:00
|
|
|
// ip rate limits don't check referer or user agent
|
2023-01-26 08:24:09 +03:00
|
|
|
// the do check origin because we can override rate limits for some origins
|
2022-11-25 03:45:13 +03:00
|
|
|
let authorization = Authorization::external(
|
2022-11-08 22:58:11 +03:00
|
|
|
allowed_origin_requests_per_period,
|
|
|
|
self.db_conn.clone(),
|
|
|
|
ip,
|
|
|
|
origin,
|
2023-03-03 04:39:50 +03:00
|
|
|
proxy_mode,
|
2022-11-08 22:58:11 +03:00
|
|
|
None,
|
|
|
|
None,
|
|
|
|
)?;
|
2022-09-28 06:35:55 +03:00
|
|
|
|
2022-09-23 00:03:37 +03:00
|
|
|
if let Some(rate_limiter) = &self.frontend_ip_rate_limiter {
|
2022-11-08 22:58:11 +03:00
|
|
|
match rate_limiter
|
|
|
|
.throttle(ip, authorization.checks.max_requests_per_period, 1)
|
|
|
|
.await
|
|
|
|
{
|
2022-09-27 05:01:45 +03:00
|
|
|
Ok(DeferredRateLimitResult::Allowed) => {
|
2022-11-08 22:58:11 +03:00
|
|
|
// rate limit allowed us. check concurrent request limits
|
|
|
|
let semaphore = self.ip_semaphore(ip).await?;
|
|
|
|
|
|
|
|
Ok(RateLimitResult::Allowed(authorization, semaphore))
|
2022-09-27 05:01:45 +03:00
|
|
|
}
|
2022-09-23 00:03:37 +03:00
|
|
|
Ok(DeferredRateLimitResult::RetryAt(retry_at)) => {
|
|
|
|
// TODO: set headers so they know when they can retry
|
2022-11-12 11:24:32 +03:00
|
|
|
// // trace!(?ip, "rate limit exceeded until {:?}", retry_at);
|
2022-11-08 22:58:11 +03:00
|
|
|
Ok(RateLimitResult::RateLimited(authorization, Some(retry_at)))
|
2022-09-23 00:03:37 +03:00
|
|
|
}
|
|
|
|
Ok(DeferredRateLimitResult::RetryNever) => {
|
|
|
|
// TODO: i don't think we'll get here. maybe if we ban an IP forever? seems unlikely
|
2022-11-12 11:24:32 +03:00
|
|
|
// // trace!(?ip, "rate limit is 0");
|
2022-11-08 22:58:11 +03:00
|
|
|
Ok(RateLimitResult::RateLimited(authorization, None))
|
2022-09-23 00:03:37 +03:00
|
|
|
}
|
|
|
|
Err(err) => {
|
2022-11-08 22:58:11 +03:00
|
|
|
// this an internal error of some kind, not the rate limit being hit
|
2022-09-23 00:03:37 +03:00
|
|
|
// TODO: i really want axum to do this for us in a single place.
|
2022-11-12 11:24:32 +03:00
|
|
|
error!("rate limiter is unhappy. allowing ip. err={:?}", err);
|
2022-09-27 05:01:45 +03:00
|
|
|
|
2022-11-08 22:58:11 +03:00
|
|
|
// at least we can still check the semaphore
|
|
|
|
let semaphore = self.ip_semaphore(ip).await?;
|
|
|
|
|
|
|
|
Ok(RateLimitResult::Allowed(authorization, semaphore))
|
2022-09-23 00:03:37 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
2022-11-08 22:58:11 +03:00
|
|
|
// no redis, but we can still check the ip semaphore
|
|
|
|
let semaphore = self.ip_semaphore(ip).await?;
|
|
|
|
|
2022-09-23 00:03:37 +03:00
|
|
|
// TODO: if no redis, rate limit with a local cache? "warn!" probably isn't right
|
2022-11-08 22:58:11 +03:00
|
|
|
Ok(RateLimitResult::Allowed(authorization, semaphore))
|
2022-09-23 00:03:37 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// check the local cache for user data, or query the database
|
2022-11-08 22:58:11 +03:00
|
|
|
pub(crate) async fn authorization_checks(
|
2022-11-01 21:54:39 +03:00
|
|
|
&self,
|
2023-03-03 04:39:50 +03:00
|
|
|
proxy_mode: ProxyMode,
|
2022-11-01 21:54:39 +03:00
|
|
|
rpc_secret_key: RpcSecretKey,
|
2022-11-08 22:58:11 +03:00
|
|
|
) -> anyhow::Result<AuthorizationChecks> {
|
|
|
|
let authorization_checks: Result<_, Arc<anyhow::Error>> = self
|
2022-11-01 21:54:39 +03:00
|
|
|
.rpc_secret_key_cache
|
|
|
|
.try_get_with(rpc_secret_key.into(), async move {
|
2022-12-16 11:48:24 +03:00
|
|
|
// trace!(?rpc_secret_key, "user cache miss");
|
2022-09-23 00:03:37 +03:00
|
|
|
|
2022-12-16 11:48:24 +03:00
|
|
|
let db_replica = self.db_replica().context("Getting database connection")?;
|
2022-09-23 00:03:37 +03:00
|
|
|
|
|
|
|
// TODO: join the user table to this to return the User? we don't always need it
|
2022-11-01 21:54:39 +03:00
|
|
|
// TODO: join on secondary users
|
|
|
|
// TODO: join on user tier
|
|
|
|
match rpc_key::Entity::find()
|
2023-01-19 03:17:43 +03:00
|
|
|
.filter(rpc_key::Column::SecretKey.eq(<Uuid>::from(rpc_secret_key)))
|
2022-11-01 21:54:39 +03:00
|
|
|
.filter(rpc_key::Column::Active.eq(true))
|
2022-12-16 11:48:24 +03:00
|
|
|
.one(db_replica.conn())
|
2022-09-23 00:03:37 +03:00
|
|
|
.await?
|
|
|
|
{
|
2022-10-27 03:12:42 +03:00
|
|
|
Some(rpc_key_model) => {
|
2022-10-27 00:39:26 +03:00
|
|
|
// TODO: move these splits into helper functions
|
|
|
|
// TODO: can we have sea orm handle this for us?
|
2022-11-01 22:12:57 +03:00
|
|
|
let user_model = user::Entity::find_by_id(rpc_key_model.user_id)
|
2022-12-16 11:48:24 +03:00
|
|
|
.one(db_replica.conn())
|
2022-11-01 22:12:57 +03:00
|
|
|
.await?
|
|
|
|
.expect("related user");
|
|
|
|
|
|
|
|
let user_tier_model =
|
|
|
|
user_tier::Entity::find_by_id(user_model.user_tier_id)
|
2022-12-16 11:48:24 +03:00
|
|
|
.one(db_replica.conn())
|
2022-11-01 22:12:57 +03:00
|
|
|
.await?
|
|
|
|
.expect("related user tier");
|
2022-10-27 00:39:26 +03:00
|
|
|
|
2022-09-23 08:22:33 +03:00
|
|
|
let allowed_ips: Option<Vec<IpNet>> =
|
2022-10-27 03:12:42 +03:00
|
|
|
if let Some(allowed_ips) = rpc_key_model.allowed_ips {
|
2022-10-27 00:39:26 +03:00
|
|
|
let x = allowed_ips
|
|
|
|
.split(',')
|
2022-12-24 06:03:30 +03:00
|
|
|
.map(|x| x.trim().parse::<IpNet>())
|
2022-10-27 00:39:26 +03:00
|
|
|
.collect::<Result<Vec<_>, _>>()?;
|
|
|
|
Some(x)
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
};
|
|
|
|
|
|
|
|
let allowed_origins: Option<Vec<Origin>> =
|
2022-10-27 03:12:42 +03:00
|
|
|
if let Some(allowed_origins) = rpc_key_model.allowed_origins {
|
2022-10-27 00:39:26 +03:00
|
|
|
// TODO: do this without collecting twice?
|
|
|
|
let x = allowed_origins
|
|
|
|
.split(',')
|
2022-12-24 06:03:30 +03:00
|
|
|
.map(|x| HeaderValue::from_str(x.trim()))
|
2022-10-27 00:39:26 +03:00
|
|
|
.collect::<Result<Vec<_>, _>>()?
|
2022-09-23 08:22:33 +03:00
|
|
|
.into_iter()
|
2022-10-27 00:39:26 +03:00
|
|
|
.map(|x| Origin::decode(&mut [x].iter()))
|
|
|
|
.collect::<Result<Vec<_>, _>>()?;
|
|
|
|
|
|
|
|
Some(x)
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
};
|
|
|
|
|
|
|
|
let allowed_referers: Option<Vec<Referer>> =
|
2022-10-27 03:12:42 +03:00
|
|
|
if let Some(allowed_referers) = rpc_key_model.allowed_referers {
|
2022-10-27 00:39:26 +03:00
|
|
|
let x = allowed_referers
|
|
|
|
.split(',')
|
2022-12-24 06:03:30 +03:00
|
|
|
.map(|x| x.trim().parse::<Referer>())
|
2022-10-27 00:39:26 +03:00
|
|
|
.collect::<Result<Vec<_>, _>>()?;
|
|
|
|
|
|
|
|
Some(x)
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
};
|
|
|
|
|
|
|
|
let allowed_user_agents: Option<Vec<UserAgent>> =
|
2022-10-27 03:12:42 +03:00
|
|
|
if let Some(allowed_user_agents) = rpc_key_model.allowed_user_agents {
|
2022-10-27 00:39:26 +03:00
|
|
|
let x: Result<Vec<_>, _> = allowed_user_agents
|
|
|
|
.split(',')
|
2022-12-24 06:03:30 +03:00
|
|
|
.map(|x| x.trim().parse::<UserAgent>())
|
2022-10-27 00:39:26 +03:00
|
|
|
.collect();
|
|
|
|
|
|
|
|
Some(x?)
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
};
|
2022-09-23 00:03:37 +03:00
|
|
|
|
2022-11-10 02:58:07 +03:00
|
|
|
let rpc_key_id =
|
|
|
|
Some(rpc_key_model.id.try_into().expect("db ids are never 0"));
|
|
|
|
|
2022-11-08 22:58:11 +03:00
|
|
|
Ok(AuthorizationChecks {
|
2022-10-27 03:12:42 +03:00
|
|
|
user_id: rpc_key_model.user_id,
|
2023-01-19 03:17:43 +03:00
|
|
|
rpc_secret_key: Some(rpc_secret_key),
|
|
|
|
rpc_secret_key_id: rpc_key_id,
|
2022-09-23 08:22:33 +03:00
|
|
|
allowed_ips,
|
|
|
|
allowed_origins,
|
|
|
|
allowed_referers,
|
|
|
|
allowed_user_agents,
|
2023-01-26 08:24:09 +03:00
|
|
|
tracking_level: rpc_key_model.log_level,
|
2022-10-27 03:12:42 +03:00
|
|
|
log_revert_chance: rpc_key_model.log_revert_chance,
|
2022-11-01 22:12:57 +03:00
|
|
|
max_concurrent_requests: user_tier_model.max_concurrent_requests,
|
|
|
|
max_requests_per_period: user_tier_model.max_requests_per_period,
|
2023-01-12 01:51:01 +03:00
|
|
|
private_txs: rpc_key_model.private_txs,
|
2023-03-03 04:39:50 +03:00
|
|
|
proxy_mode,
|
2022-09-23 00:03:37 +03:00
|
|
|
})
|
|
|
|
}
|
2022-11-08 22:58:11 +03:00
|
|
|
None => Ok(AuthorizationChecks::default()),
|
2022-09-23 00:03:37 +03:00
|
|
|
}
|
|
|
|
})
|
|
|
|
.await;
|
|
|
|
|
2022-09-30 07:18:18 +03:00
|
|
|
// TODO: what's the best way to handle this arc? try_unwrap will not work
|
2022-11-08 22:58:11 +03:00
|
|
|
authorization_checks.map_err(|err| anyhow::anyhow!(err))
|
2022-09-23 00:03:37 +03:00
|
|
|
}
|
|
|
|
|
2022-11-08 22:58:11 +03:00
|
|
|
/// Authorized the ip/origin/referer/useragent and rate limit and concurrency
|
|
|
|
pub async fn rate_limit_by_rpc_key(
|
2022-11-01 21:54:39 +03:00
|
|
|
&self,
|
2022-11-08 22:58:11 +03:00
|
|
|
ip: IpAddr,
|
|
|
|
origin: Option<Origin>,
|
2023-03-03 04:39:50 +03:00
|
|
|
proxy_mode: ProxyMode,
|
2022-11-08 22:58:11 +03:00
|
|
|
referer: Option<Referer>,
|
2022-11-01 21:54:39 +03:00
|
|
|
rpc_key: RpcSecretKey,
|
2022-11-08 22:58:11 +03:00
|
|
|
user_agent: Option<UserAgent>,
|
2023-03-20 01:50:25 +03:00
|
|
|
) -> Web3ProxyResult<RateLimitResult> {
|
2023-03-03 04:39:50 +03:00
|
|
|
let authorization_checks = self.authorization_checks(proxy_mode, rpc_key).await?;
|
2022-09-23 00:03:37 +03:00
|
|
|
|
2022-11-08 22:58:11 +03:00
|
|
|
// if no rpc_key_id matching the given rpc was found, then we can't rate limit by key
|
2023-01-19 03:17:43 +03:00
|
|
|
if authorization_checks.rpc_secret_key_id.is_none() {
|
2022-09-23 00:03:37 +03:00
|
|
|
return Ok(RateLimitResult::UnknownKey);
|
|
|
|
}
|
|
|
|
|
2022-12-29 00:50:34 +03:00
|
|
|
// TODO: rpc_key should have an option to rate limit by ip instead of by key
|
|
|
|
|
2022-11-08 22:58:11 +03:00
|
|
|
// only allow this rpc_key to run a limited amount of concurrent requests
|
|
|
|
// TODO: rate limit should be BEFORE the semaphore!
|
2022-12-29 00:50:34 +03:00
|
|
|
let semaphore = self
|
|
|
|
.registered_user_semaphore(&authorization_checks)
|
|
|
|
.await?;
|
2022-11-08 22:58:11 +03:00
|
|
|
|
|
|
|
let authorization = Authorization::try_new(
|
|
|
|
authorization_checks,
|
|
|
|
self.db_conn(),
|
|
|
|
ip,
|
|
|
|
origin,
|
|
|
|
referer,
|
|
|
|
user_agent,
|
2022-12-12 07:39:54 +03:00
|
|
|
AuthorizationType::Frontend,
|
2022-11-08 22:58:11 +03:00
|
|
|
)?;
|
2022-09-27 05:01:45 +03:00
|
|
|
|
2022-11-08 22:58:11 +03:00
|
|
|
let user_max_requests_per_period = match authorization.checks.max_requests_per_period {
|
2022-09-28 06:35:55 +03:00
|
|
|
None => {
|
2022-11-08 22:58:11 +03:00
|
|
|
return Ok(RateLimitResult::Allowed(authorization, semaphore));
|
2022-09-27 05:01:45 +03:00
|
|
|
}
|
2022-09-23 00:03:37 +03:00
|
|
|
Some(x) => x,
|
|
|
|
};
|
|
|
|
|
|
|
|
// user key is valid. now check rate limits
|
2022-12-29 00:50:34 +03:00
|
|
|
if let Some(rate_limiter) = &self.frontend_registered_user_rate_limiter {
|
2022-09-23 00:03:37 +03:00
|
|
|
match rate_limiter
|
2022-12-29 00:50:34 +03:00
|
|
|
.throttle(
|
|
|
|
authorization.checks.user_id,
|
|
|
|
Some(user_max_requests_per_period),
|
|
|
|
1,
|
|
|
|
)
|
2022-09-23 00:03:37 +03:00
|
|
|
.await
|
|
|
|
{
|
2022-09-27 05:01:45 +03:00
|
|
|
Ok(DeferredRateLimitResult::Allowed) => {
|
2022-11-08 22:58:11 +03:00
|
|
|
Ok(RateLimitResult::Allowed(authorization, semaphore))
|
2022-09-27 05:01:45 +03:00
|
|
|
}
|
2022-09-23 00:03:37 +03:00
|
|
|
Ok(DeferredRateLimitResult::RetryAt(retry_at)) => {
|
|
|
|
// TODO: set headers so they know when they can retry
|
|
|
|
// TODO: debug or trace?
|
|
|
|
// this is too verbose, but a stat might be good
|
|
|
|
// TODO: keys are secrets! use the id instead
|
2022-10-10 07:15:07 +03:00
|
|
|
// TODO: emit a stat
|
2022-11-12 11:24:32 +03:00
|
|
|
// // trace!(?rpc_key, "rate limit exceeded until {:?}", retry_at);
|
2022-11-08 22:58:11 +03:00
|
|
|
Ok(RateLimitResult::RateLimited(authorization, Some(retry_at)))
|
2022-09-23 00:03:37 +03:00
|
|
|
}
|
|
|
|
Ok(DeferredRateLimitResult::RetryNever) => {
|
|
|
|
// TODO: keys are secret. don't log them!
|
2022-11-12 11:24:32 +03:00
|
|
|
// // trace!(?rpc_key, "rate limit is 0");
|
2022-10-10 07:15:07 +03:00
|
|
|
// TODO: emit a stat
|
2022-11-08 22:58:11 +03:00
|
|
|
Ok(RateLimitResult::RateLimited(authorization, None))
|
2022-09-23 00:03:37 +03:00
|
|
|
}
|
|
|
|
Err(err) => {
|
|
|
|
// internal error, not rate limit being hit
|
|
|
|
// TODO: i really want axum to do this for us in a single place.
|
2022-11-12 11:24:32 +03:00
|
|
|
error!("rate limiter is unhappy. allowing ip. err={:?}", err);
|
2022-09-27 05:01:45 +03:00
|
|
|
|
2022-11-08 22:58:11 +03:00
|
|
|
Ok(RateLimitResult::Allowed(authorization, semaphore))
|
2022-09-23 00:03:37 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// TODO: if no redis, rate limit with just a local cache?
|
2022-11-08 22:58:11 +03:00
|
|
|
Ok(RateLimitResult::Allowed(authorization, semaphore))
|
2022-09-23 00:03:37 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2023-01-19 03:17:43 +03:00
|
|
|
|
|
|
|
impl Authorization {
|
|
|
|
pub async fn check_again(
|
|
|
|
&self,
|
|
|
|
app: &Arc<Web3ProxyApp>,
|
2023-03-17 05:38:11 +03:00
|
|
|
) -> Web3ProxyResult<(Arc<Self>, Option<OwnedSemaphorePermit>)> {
|
2023-01-19 03:17:43 +03:00
|
|
|
// TODO: we could probably do this without clones. but this is easy
|
|
|
|
let (a, s) = if let Some(rpc_secret_key) = self.checks.rpc_secret_key {
|
|
|
|
key_is_authorized(
|
|
|
|
app,
|
|
|
|
rpc_secret_key,
|
|
|
|
self.ip,
|
|
|
|
self.origin.clone(),
|
2023-03-03 04:39:50 +03:00
|
|
|
self.checks.proxy_mode,
|
2023-01-19 03:17:43 +03:00
|
|
|
self.referer.clone(),
|
|
|
|
self.user_agent.clone(),
|
|
|
|
)
|
|
|
|
.await?
|
|
|
|
} else {
|
2023-03-03 04:39:50 +03:00
|
|
|
ip_is_authorized(app, self.ip, self.origin.clone(), self.checks.proxy_mode).await?
|
2023-01-19 03:17:43 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
let a = Arc::new(a);
|
|
|
|
|
|
|
|
Ok((a, s))
|
|
|
|
}
|
|
|
|
}
|