2022-10-18 00:47:58 +03:00
|
|
|
//! Utilities for authorization of logged in and anonymous users.
|
|
|
|
|
2023-03-03 04:39:50 +03:00
|
|
|
use super::rpc_proxy_ws::ProxyMode;
|
2023-06-17 09:14:43 +03:00
|
|
|
use crate::app::{Web3ProxyApp, APP_USER_AGENT};
|
2023-07-10 06:13:03 +03:00
|
|
|
use crate::balance::Balance;
|
2023-10-03 23:46:27 +03:00
|
|
|
use crate::block_number::CacheMode;
|
2023-07-08 01:15:41 +03:00
|
|
|
use crate::caches::RegisteredUserRateLimitKey;
|
2023-05-31 07:26:11 +03:00
|
|
|
use crate::errors::{Web3ProxyError, Web3ProxyErrorContext, Web3ProxyResult};
|
2023-10-03 23:46:27 +03:00
|
|
|
use crate::globals::{global_db_replica_conn, APP};
|
|
|
|
use crate::jsonrpc::{self, JsonRpcId, JsonRpcParams, JsonRpcRequest};
|
|
|
|
use crate::kafka::KafkaDebugLogger;
|
|
|
|
use crate::response_cache::JsonRpcQueryCacheKey;
|
2023-06-29 03:42:43 +03:00
|
|
|
use crate::rpcs::blockchain::Web3ProxyBlock;
|
2023-02-06 20:55:27 +03:00
|
|
|
use crate::rpcs::one::Web3Rpc;
|
2023-10-03 23:46:27 +03:00
|
|
|
use crate::secrets::RpcSecretKey;
|
2023-07-12 10:35:07 +03:00
|
|
|
use crate::stats::{AppStat, BackendRequests};
|
2022-10-31 23:05:58 +03:00
|
|
|
use crate::user_token::UserBearerToken;
|
2023-06-08 00:45:57 +03:00
|
|
|
use anyhow::Context;
|
2023-05-31 09:52:12 +03:00
|
|
|
use axum::headers::authorization::Bearer;
|
2022-10-27 00:39:26 +03:00
|
|
|
use axum::headers::{Header, Origin, Referer, UserAgent};
|
2022-10-11 08:13:00 +03:00
|
|
|
use chrono::Utc;
|
2023-09-27 04:18:06 +03:00
|
|
|
use deferred_rate_limiter::{DeferredRateLimitResult, DeferredRateLimiter};
|
2023-07-06 04:18:10 +03:00
|
|
|
use derivative::Derivative;
|
2023-05-13 01:15:32 +03:00
|
|
|
use derive_more::From;
|
2023-07-10 06:13:03 +03:00
|
|
|
use entities::{login, rpc_key, user, user_tier};
|
2023-05-13 01:15:32 +03:00
|
|
|
use ethers::types::{Bytes, U64};
|
2022-12-28 09:11:18 +03:00
|
|
|
use ethers::utils::keccak256;
|
|
|
|
use futures::TryFutureExt;
|
2022-11-08 22:58:11 +03:00
|
|
|
use hashbrown::HashMap;
|
2022-10-27 00:39:26 +03:00
|
|
|
use http::HeaderValue;
|
2022-09-23 08:22:33 +03:00
|
|
|
use ipnet::IpNet;
|
2023-06-07 19:39:30 +03:00
|
|
|
use migration::sea_orm::prelude::Decimal;
|
2023-07-15 04:30:01 +03:00
|
|
|
use migration::sea_orm::{ColumnTrait, EntityTrait, QueryFilter};
|
2022-12-28 09:11:18 +03:00
|
|
|
use redis_rate_limiter::redis::AsyncCommands;
|
2023-09-27 04:18:06 +03:00
|
|
|
use redis_rate_limiter::{RedisRateLimitResult, RedisRateLimiter};
|
2023-10-03 23:46:27 +03:00
|
|
|
use serde::Serialize;
|
2023-09-27 04:18:06 +03:00
|
|
|
use serde_json::json;
|
2023-10-03 23:46:27 +03:00
|
|
|
use serde_json::value::RawValue;
|
2023-06-20 03:47:38 +03:00
|
|
|
use std::borrow::Cow;
|
2023-07-12 10:35:07 +03:00
|
|
|
use std::fmt::Debug;
|
2022-09-24 08:53:45 +03:00
|
|
|
use std::fmt::Display;
|
2023-05-24 00:40:34 +03:00
|
|
|
use std::hash::{Hash, Hasher};
|
2023-05-13 01:15:32 +03:00
|
|
|
use std::mem;
|
2023-06-07 19:39:30 +03:00
|
|
|
use std::num::NonZeroU64;
|
2023-10-03 23:46:27 +03:00
|
|
|
use std::sync::atomic::{self, AtomicBool, AtomicI64, AtomicU64};
|
2023-05-13 01:15:32 +03:00
|
|
|
use std::time::Duration;
|
2022-09-24 08:53:45 +03:00
|
|
|
use std::{net::IpAddr, str::FromStr, sync::Arc};
|
2023-07-10 05:23:32 +03:00
|
|
|
use tokio::sync::RwLock as AsyncRwLock;
|
2023-07-11 09:08:06 +03:00
|
|
|
use tokio::sync::{mpsc, OwnedSemaphorePermit, Semaphore};
|
2022-09-23 00:03:37 +03:00
|
|
|
use tokio::time::Instant;
|
2023-07-15 04:30:01 +03:00
|
|
|
use tracing::{error, trace, warn};
|
2022-09-24 08:53:45 +03:00
|
|
|
use ulid::Ulid;
|
2022-09-23 00:03:37 +03:00
|
|
|
use uuid::Uuid;
|
|
|
|
|
2022-11-08 22:58:11 +03:00
|
|
|
/// TODO: should this have IpAddr and Origin or AuthorizationChecks?
|
2022-10-10 07:15:07 +03:00
|
|
|
#[derive(Debug)]
|
|
|
|
pub enum RateLimitResult {
|
2022-11-08 22:58:11 +03:00
|
|
|
Allowed(Authorization, Option<OwnedSemaphorePermit>),
|
|
|
|
RateLimited(
|
|
|
|
Authorization,
|
|
|
|
/// when their rate limit resets and they can try more requests
|
|
|
|
Option<Instant>,
|
|
|
|
),
|
2022-10-10 07:15:07 +03:00
|
|
|
/// This key is not in our database. Deny access!
|
|
|
|
UnknownKey,
|
|
|
|
}
|
|
|
|
|
2023-10-03 23:46:27 +03:00
|
|
|
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
|
2022-12-12 07:39:54 +03:00
|
|
|
pub enum AuthorizationType {
|
2022-11-25 03:45:13 +03:00
|
|
|
Internal,
|
|
|
|
Frontend,
|
|
|
|
}
|
|
|
|
|
2023-06-17 09:14:43 +03:00
|
|
|
/// TODO: move this
|
|
|
|
#[derive(Clone, Debug, Default, From)]
|
|
|
|
pub struct AuthorizationChecks {
|
|
|
|
/// database id of the primary user. 0 if anon
|
|
|
|
pub user_id: u64,
|
|
|
|
/// locally cached balance that may drift slightly if the user is on multiple servers
|
2023-07-10 05:23:32 +03:00
|
|
|
pub latest_balance: Arc<AsyncRwLock<Balance>>,
|
2023-06-17 09:14:43 +03:00
|
|
|
/// the key used (if any)
|
|
|
|
pub rpc_secret_key: Option<RpcSecretKey>,
|
|
|
|
/// database id of the rpc key
|
|
|
|
/// if this is None, then this request is being rate limited by ip
|
|
|
|
pub rpc_secret_key_id: Option<NonZeroU64>,
|
|
|
|
/// if None, allow unlimited queries. inherited from the user_tier
|
|
|
|
pub max_requests_per_period: Option<u64>,
|
|
|
|
// if None, allow unlimited concurrent requests. inherited from the user_tier
|
|
|
|
pub max_concurrent_requests: Option<u32>,
|
|
|
|
/// if None, allow any Origin
|
|
|
|
pub allowed_origins: Option<Vec<Origin>>,
|
|
|
|
/// if None, allow any Referer
|
|
|
|
pub allowed_referers: Option<Vec<Referer>>,
|
|
|
|
/// if None, allow any UserAgent
|
|
|
|
pub allowed_user_agents: Option<Vec<UserAgent>>,
|
|
|
|
/// if None, allow any IP Address
|
|
|
|
pub allowed_ips: Option<Vec<IpNet>>,
|
|
|
|
/// Chance to save reverting eth_call, eth_estimateGas, and eth_sendRawTransaction to the database.
|
|
|
|
/// depending on the caller, errors might be expected. this keeps us from bloating our database
|
|
|
|
/// u16::MAX == 100%
|
|
|
|
pub log_revert_chance: u16,
|
|
|
|
/// if true, transactions are broadcast only to private mempools.
|
|
|
|
/// IMPORTANT! Once confirmed by a miner, they will be public on the blockchain!
|
|
|
|
pub private_txs: bool,
|
|
|
|
pub proxy_mode: ProxyMode,
|
2023-07-12 10:35:07 +03:00
|
|
|
/// if the account had premium when this request metadata was created
|
|
|
|
/// they might spend slightly more than they've paid, but we are okay with that
|
|
|
|
/// TODO: we could price the request now and if its too high, downgrade. but thats more complex than we need
|
|
|
|
pub paid_credits_used: bool,
|
2023-06-17 09:14:43 +03:00
|
|
|
}
|
|
|
|
|
2022-11-08 22:58:11 +03:00
|
|
|
/// TODO: include the authorization checks in this?
|
2022-10-27 00:39:26 +03:00
|
|
|
#[derive(Clone, Debug)]
|
2022-11-08 22:58:11 +03:00
|
|
|
pub struct Authorization {
|
|
|
|
pub checks: AuthorizationChecks,
|
2022-10-10 07:15:07 +03:00
|
|
|
pub ip: IpAddr,
|
2022-10-27 00:39:26 +03:00
|
|
|
pub origin: Option<Origin>,
|
2022-11-08 22:58:11 +03:00
|
|
|
pub referer: Option<Referer>,
|
|
|
|
pub user_agent: Option<UserAgent>,
|
2022-12-12 07:39:54 +03:00
|
|
|
pub authorization_type: AuthorizationType,
|
2022-10-10 07:15:07 +03:00
|
|
|
}
|
|
|
|
|
2023-06-17 09:14:43 +03:00
|
|
|
/// Ulids and Uuids matching the same bits hash the same
|
2023-05-24 00:40:34 +03:00
|
|
|
impl Hash for RpcSecretKey {
|
|
|
|
fn hash<H: Hasher>(&self, state: &mut H) {
|
2023-07-12 10:35:07 +03:00
|
|
|
let x = self.as_128();
|
2023-05-24 00:40:34 +03:00
|
|
|
|
|
|
|
x.hash(state);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-10-03 23:46:27 +03:00
|
|
|
#[derive(Debug, Default, From, Serialize)]
|
|
|
|
pub enum RequestOrMethod {
|
|
|
|
Request(JsonRpcRequest),
|
|
|
|
/// sometimes we don't have a full request. for example, when we are logging a websocket subscription
|
|
|
|
Method(Cow<'static, str>, usize),
|
|
|
|
#[default]
|
|
|
|
None,
|
2023-05-13 01:15:32 +03:00
|
|
|
}
|
|
|
|
|
2023-09-13 22:35:09 +03:00
|
|
|
/// TODO: instead of a bunch of atomics, this should probably use a RwLock
|
2023-07-06 04:18:10 +03:00
|
|
|
#[derive(Debug, Derivative)]
|
|
|
|
#[derivative(Default)]
|
2023-10-03 23:46:27 +03:00
|
|
|
pub struct Web3Request {
|
2023-05-13 01:15:32 +03:00
|
|
|
/// TODO: set archive_request during the new instead of after
|
|
|
|
/// TODO: this is more complex than "requires a block older than X height". different types of data can be pruned differently
|
2022-11-03 02:14:16 +03:00
|
|
|
pub archive_request: AtomicBool,
|
2023-05-13 01:15:32 +03:00
|
|
|
|
2023-09-27 04:18:06 +03:00
|
|
|
pub authorization: Arc<Authorization>,
|
2023-05-13 01:15:32 +03:00
|
|
|
|
2023-10-03 23:46:27 +03:00
|
|
|
pub cache_mode: CacheMode,
|
2023-06-20 03:47:38 +03:00
|
|
|
|
2023-10-03 23:46:27 +03:00
|
|
|
/// TODO: this should probably be in a global config. although maybe if we run multiple chains in one process this will be useful
|
|
|
|
pub chain_id: u64,
|
2023-07-12 10:35:07 +03:00
|
|
|
|
2023-10-03 23:46:27 +03:00
|
|
|
pub head_block: Option<Web3ProxyBlock>,
|
2023-05-13 01:15:32 +03:00
|
|
|
|
2023-10-03 23:46:27 +03:00
|
|
|
/// TODO: this should be in a global config. not copied to every single request
|
|
|
|
pub usd_per_cu: Decimal,
|
2023-05-13 01:15:32 +03:00
|
|
|
|
2023-10-03 23:46:27 +03:00
|
|
|
pub request: RequestOrMethod,
|
2023-05-13 01:15:32 +03:00
|
|
|
|
|
|
|
/// Instant that the request was received (or at least close to it)
|
|
|
|
/// We use Instant and not timestamps to avoid problems with leap seconds and similar issues
|
2023-07-06 04:18:10 +03:00
|
|
|
#[derivative(Default(value = "Instant::now()"))]
|
|
|
|
pub start_instant: Instant,
|
2023-10-03 23:46:27 +03:00
|
|
|
#[derivative(Default(value = "Instant::now() + Duration::from_secs(295)"))]
|
|
|
|
pub expire_instant: Instant,
|
2022-12-20 02:59:01 +03:00
|
|
|
/// if this is empty, there was a cache_hit
|
2023-05-13 01:15:32 +03:00
|
|
|
/// otherwise, it is populated with any rpc servers that were used by this request
|
|
|
|
pub backend_requests: BackendRequests,
|
|
|
|
/// The number of times the request got stuck waiting because no servers were synced
|
2022-10-25 06:41:59 +03:00
|
|
|
pub no_servers: AtomicU64,
|
2023-05-13 01:15:32 +03:00
|
|
|
/// If handling the request hit an application error
|
|
|
|
/// This does not count things like a transcation reverting or a malformed request
|
2022-10-11 22:58:25 +03:00
|
|
|
pub error_response: AtomicBool,
|
2023-05-13 01:15:32 +03:00
|
|
|
/// Size in bytes of the JSON response. Does not include headers or things like that.
|
2022-10-11 22:58:25 +03:00
|
|
|
pub response_bytes: AtomicU64,
|
2023-05-13 01:15:32 +03:00
|
|
|
/// How many milliseconds it took to respond to the request
|
2022-10-11 22:58:25 +03:00
|
|
|
pub response_millis: AtomicU64,
|
2023-05-13 01:15:32 +03:00
|
|
|
/// What time the (first) response was proxied.
|
|
|
|
/// TODO: think about how to store response times for ProxyMode::Versus
|
|
|
|
pub response_timestamp: AtomicI64,
|
|
|
|
/// True if the response required querying a backup RPC
|
|
|
|
/// RPC aggregators that query multiple providers to compare response may use this header to ignore our response.
|
2023-01-20 08:46:47 +03:00
|
|
|
pub response_from_backup_rpc: AtomicBool,
|
2023-07-06 04:18:10 +03:00
|
|
|
/// If the request is invalid or received a jsonrpc error response (excluding reverts)
|
|
|
|
pub user_error_response: AtomicBool,
|
2022-10-10 07:15:07 +03:00
|
|
|
|
2023-05-13 01:15:32 +03:00
|
|
|
/// ProxyMode::Debug logs requests and responses with Kafka
|
|
|
|
/// TODO: maybe this shouldn't be determined by ProxyMode. A request param should probably enable this
|
|
|
|
pub kafka_debug_logger: Option<Arc<KafkaDebugLogger>>,
|
|
|
|
|
2023-05-13 21:13:02 +03:00
|
|
|
/// Cancel-safe channel for sending stats to the buffer
|
2023-07-11 09:08:06 +03:00
|
|
|
pub stat_sender: Option<mpsc::UnboundedSender<AppStat>>,
|
2023-05-13 01:15:32 +03:00
|
|
|
}
|
2022-10-10 07:15:07 +03:00
|
|
|
|
2023-05-31 02:32:34 +03:00
|
|
|
impl Default for Authorization {
|
|
|
|
fn default() -> Self {
|
2023-07-15 04:30:01 +03:00
|
|
|
Authorization::internal().unwrap()
|
2023-05-31 02:32:34 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-10-03 23:46:27 +03:00
|
|
|
impl RequestOrMethod {
|
|
|
|
pub fn id(&self) -> Box<RawValue> {
|
|
|
|
match self {
|
|
|
|
Self::Request(x) => x.id.clone(),
|
|
|
|
Self::Method(_, _) => Default::default(),
|
|
|
|
Self::None => Default::default(),
|
|
|
|
}
|
2023-05-31 02:32:34 +03:00
|
|
|
}
|
2023-05-13 01:15:32 +03:00
|
|
|
|
2023-10-03 23:46:27 +03:00
|
|
|
pub fn method(&self) -> &str {
|
|
|
|
match self {
|
|
|
|
Self::Request(x) => x.method.as_str(),
|
|
|
|
Self::Method(x, _) => x,
|
|
|
|
Self::None => "unknown",
|
|
|
|
}
|
|
|
|
}
|
2023-06-20 03:47:38 +03:00
|
|
|
|
2023-10-03 23:46:27 +03:00
|
|
|
/// TODO: should this panic on Self::None|Self::Method?
|
|
|
|
pub fn params(&self) -> &serde_json::Value {
|
|
|
|
match self {
|
|
|
|
Self::Request(x) => &x.params,
|
|
|
|
Self::Method(..) => &serde_json::Value::Null,
|
|
|
|
Self::None => &serde_json::Value::Null,
|
|
|
|
}
|
2023-05-13 01:15:32 +03:00
|
|
|
}
|
|
|
|
|
2023-10-03 23:46:27 +03:00
|
|
|
pub fn jsonrpc_request(&self) -> Option<&JsonRpcRequest> {
|
2023-05-13 01:15:32 +03:00
|
|
|
match self {
|
|
|
|
Self::Request(x) => Some(x),
|
|
|
|
_ => None,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-10-03 23:46:27 +03:00
|
|
|
pub fn num_bytes(&self) -> usize {
|
2023-05-13 01:15:32 +03:00
|
|
|
match self {
|
2023-10-03 23:46:27 +03:00
|
|
|
Self::Method(_, num_bytes) => *num_bytes,
|
|
|
|
Self::Request(x) => x.num_bytes(),
|
|
|
|
Self::None => 0,
|
2023-05-13 01:15:32 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: i think a trait is actually the right thing to use here
|
|
|
|
#[derive(From)]
|
|
|
|
pub enum ResponseOrBytes<'a> {
|
|
|
|
Json(&'a serde_json::Value),
|
2023-09-27 04:18:06 +03:00
|
|
|
Response(&'a jsonrpc::SingleResponse),
|
2023-07-20 23:19:50 +03:00
|
|
|
Error(&'a Web3ProxyError),
|
2023-05-13 01:15:32 +03:00
|
|
|
Bytes(usize),
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<'a> From<u64> for ResponseOrBytes<'a> {
|
|
|
|
fn from(value: u64) -> Self {
|
|
|
|
Self::Bytes(value as usize)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl ResponseOrBytes<'_> {
|
|
|
|
pub fn num_bytes(&self) -> usize {
|
|
|
|
match self {
|
|
|
|
Self::Json(x) => serde_json::to_string(x)
|
|
|
|
.expect("this should always serialize")
|
|
|
|
.len(),
|
2023-09-27 04:18:06 +03:00
|
|
|
Self::Response(x) => x.num_bytes(),
|
2023-05-13 01:15:32 +03:00
|
|
|
Self::Bytes(num_bytes) => *num_bytes,
|
2023-07-20 23:19:50 +03:00
|
|
|
Self::Error(x) => {
|
2023-08-03 01:22:26 +03:00
|
|
|
let (_, x) = x.as_response_parts();
|
2023-07-20 23:19:50 +03:00
|
|
|
|
|
|
|
x.num_bytes() as usize
|
|
|
|
}
|
2023-05-13 01:15:32 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-10-03 23:46:27 +03:00
|
|
|
impl Web3Request {
|
|
|
|
#[allow(clippy::too_many_arguments)]
|
|
|
|
async fn new_with_options(
|
2023-05-13 01:15:32 +03:00
|
|
|
authorization: Arc<Authorization>,
|
2023-10-03 23:46:27 +03:00
|
|
|
chain_id: u64,
|
|
|
|
head_block: Option<Web3ProxyBlock>,
|
|
|
|
kafka_debug_logger: Option<Arc<KafkaDebugLogger>>,
|
|
|
|
max_wait: Option<Duration>,
|
|
|
|
mut request: RequestOrMethod,
|
|
|
|
stat_sender: Option<mpsc::UnboundedSender<AppStat>>,
|
|
|
|
usd_per_cu: Decimal,
|
|
|
|
app: Option<&Web3ProxyApp>,
|
2023-05-13 01:15:32 +03:00
|
|
|
) -> Arc<Self> {
|
2023-10-03 23:46:27 +03:00
|
|
|
let start_instant = Instant::now();
|
2023-05-13 01:15:32 +03:00
|
|
|
|
2023-10-03 23:46:27 +03:00
|
|
|
// TODO: get this default from config, or from user settings
|
|
|
|
// 5 minutes with a buffer for other things being slow
|
|
|
|
let expire_instant = start_instant + max_wait.unwrap_or_else(|| Duration::from_secs(295));
|
2023-05-13 01:15:32 +03:00
|
|
|
|
2023-10-03 23:46:27 +03:00
|
|
|
// let request: RequestOrMethod = request.into();
|
2023-05-13 01:15:32 +03:00
|
|
|
|
2023-10-03 23:46:27 +03:00
|
|
|
// we VERY INTENTIONALLY log to kafka BEFORE calculating the cache key
|
|
|
|
// this is because calculating the cache_key may modify the params!
|
|
|
|
// for example, if the request specifies "latest" as the block number, we replace it with the actual latest block number
|
2023-05-13 01:15:32 +03:00
|
|
|
if let Some(ref kafka_debug_logger) = kafka_debug_logger {
|
2023-10-03 23:46:27 +03:00
|
|
|
// TODO: channels might be more ergonomic than spawned futures
|
|
|
|
// spawned things run in parallel easier but generally need more Arcs
|
|
|
|
kafka_debug_logger.log_debug_request(&request);
|
2023-05-13 01:15:32 +03:00
|
|
|
}
|
|
|
|
|
2023-10-03 23:46:27 +03:00
|
|
|
// now that kafka has logged the user's original params, we can calculate the cache key
|
|
|
|
let cache_mode = match &mut request {
|
|
|
|
RequestOrMethod::Request(x) => CacheMode::new(x, head_block.as_ref(), app).await,
|
|
|
|
_ => CacheMode::Never,
|
|
|
|
};
|
2023-07-12 10:35:07 +03:00
|
|
|
|
2023-05-13 01:15:32 +03:00
|
|
|
let x = Self {
|
2022-11-03 02:14:16 +03:00
|
|
|
archive_request: false.into(),
|
2023-09-27 04:18:06 +03:00
|
|
|
authorization,
|
2022-12-20 02:59:01 +03:00
|
|
|
backend_requests: Default::default(),
|
2023-10-03 23:46:27 +03:00
|
|
|
cache_mode,
|
2023-07-12 10:35:07 +03:00
|
|
|
chain_id,
|
2022-10-21 02:50:23 +03:00
|
|
|
error_response: false.into(),
|
2023-10-03 23:46:27 +03:00
|
|
|
expire_instant,
|
|
|
|
head_block: head_block.clone(),
|
2023-05-13 01:15:32 +03:00
|
|
|
kafka_debug_logger,
|
|
|
|
no_servers: 0.into(),
|
2023-10-03 23:46:27 +03:00
|
|
|
request,
|
2022-10-21 02:50:23 +03:00
|
|
|
response_bytes: 0.into(),
|
2023-01-20 08:46:47 +03:00
|
|
|
response_from_backup_rpc: false.into(),
|
2023-05-13 01:15:32 +03:00
|
|
|
response_millis: 0.into(),
|
|
|
|
response_timestamp: 0.into(),
|
2023-10-03 23:46:27 +03:00
|
|
|
start_instant,
|
|
|
|
stat_sender,
|
|
|
|
usd_per_cu,
|
2023-07-06 04:18:10 +03:00
|
|
|
user_error_response: false.into(),
|
2023-05-13 01:15:32 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
Arc::new(x)
|
|
|
|
}
|
|
|
|
|
2023-10-03 23:46:27 +03:00
|
|
|
pub async fn new_with_app(
|
|
|
|
app: &Web3ProxyApp,
|
|
|
|
authorization: Arc<Authorization>,
|
|
|
|
max_wait: Option<Duration>,
|
|
|
|
request: RequestOrMethod,
|
|
|
|
head_block: Option<Web3ProxyBlock>,
|
|
|
|
) -> Arc<Self> {
|
|
|
|
// TODO: get this out of tracing instead (where we have a String from Amazon's LB)
|
2023-09-27 04:18:06 +03:00
|
|
|
let request_ulid = Ulid::new();
|
|
|
|
|
2023-10-03 23:46:27 +03:00
|
|
|
let kafka_debug_logger = if matches!(authorization.checks.proxy_mode, ProxyMode::Debug) {
|
|
|
|
KafkaDebugLogger::try_new(
|
|
|
|
app,
|
|
|
|
authorization.clone(),
|
|
|
|
head_block.as_ref().map(|x| x.number()),
|
|
|
|
"web3_proxy:rpc",
|
|
|
|
request_ulid,
|
|
|
|
)
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
};
|
|
|
|
|
|
|
|
let chain_id = app.config.chain_id;
|
2023-09-27 04:18:06 +03:00
|
|
|
|
2023-10-03 23:46:27 +03:00
|
|
|
let stat_sender = app.stat_sender.clone();
|
2023-09-27 04:18:06 +03:00
|
|
|
|
2023-10-03 23:46:27 +03:00
|
|
|
let usd_per_cu = app.config.usd_per_cu.unwrap_or_default();
|
2023-09-27 04:18:06 +03:00
|
|
|
|
2023-10-03 23:46:27 +03:00
|
|
|
Self::new_with_options(
|
2023-09-27 04:18:06 +03:00
|
|
|
authorization,
|
|
|
|
chain_id,
|
2023-10-03 23:46:27 +03:00
|
|
|
head_block,
|
|
|
|
kafka_debug_logger,
|
|
|
|
max_wait,
|
|
|
|
request,
|
2023-09-27 04:18:06 +03:00
|
|
|
stat_sender,
|
|
|
|
usd_per_cu,
|
2023-10-03 23:46:27 +03:00
|
|
|
Some(app),
|
|
|
|
)
|
|
|
|
.await
|
|
|
|
}
|
2023-09-27 04:18:06 +03:00
|
|
|
|
2023-10-03 23:46:27 +03:00
|
|
|
pub async fn new_internal<P: JsonRpcParams>(
|
|
|
|
method: String,
|
|
|
|
params: &P,
|
|
|
|
head_block: Option<Web3ProxyBlock>,
|
|
|
|
max_wait: Option<Duration>,
|
|
|
|
) -> Arc<Self> {
|
|
|
|
let authorization = Arc::new(Authorization::internal().unwrap());
|
|
|
|
|
|
|
|
// TODO: we need a real id! increment a counter on the app
|
|
|
|
let id = JsonRpcId::Number(1);
|
|
|
|
|
|
|
|
// TODO: this seems inefficient
|
|
|
|
let request = JsonRpcRequest::new(id, method, json!(params)).unwrap();
|
|
|
|
|
|
|
|
if let Some(app) = APP.get() {
|
|
|
|
Self::new_with_app(app, authorization, max_wait, request.into(), head_block).await
|
|
|
|
} else {
|
|
|
|
Self::new_with_options(
|
|
|
|
authorization,
|
|
|
|
0,
|
|
|
|
head_block,
|
|
|
|
None,
|
|
|
|
max_wait,
|
|
|
|
request.into(),
|
|
|
|
None,
|
|
|
|
Default::default(),
|
|
|
|
None,
|
|
|
|
)
|
|
|
|
.await
|
|
|
|
}
|
2023-09-27 04:18:06 +03:00
|
|
|
}
|
|
|
|
|
2023-10-03 23:46:27 +03:00
|
|
|
#[inline]
|
2023-05-13 01:15:32 +03:00
|
|
|
pub fn backend_rpcs_used(&self) -> Vec<Arc<Web3Rpc>> {
|
|
|
|
self.backend_requests.lock().clone()
|
|
|
|
}
|
|
|
|
|
2023-10-03 23:46:27 +03:00
|
|
|
pub fn cache_key(&self) -> Option<u64> {
|
|
|
|
match &self.cache_mode {
|
|
|
|
CacheMode::Never => None,
|
|
|
|
x => {
|
|
|
|
let x = JsonRpcQueryCacheKey::new(x, &self.request).hash();
|
|
|
|
|
|
|
|
Some(x)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
pub fn cache_jsonrpc_errors(&self) -> bool {
|
|
|
|
self.cache_mode.cache_jsonrpc_errors()
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
pub fn id(&self) -> Box<RawValue> {
|
|
|
|
self.request.id()
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn max_block_needed(&self) -> Option<U64> {
|
|
|
|
self.cache_mode.to_block().map(|x| *x.num())
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn min_block_needed(&self) -> Option<U64> {
|
|
|
|
if self.archive_request.load(atomic::Ordering::Relaxed) {
|
|
|
|
Some(U64::zero())
|
|
|
|
} else {
|
|
|
|
self.cache_mode.from_block().map(|x| *x.num())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn ttl(&self) -> Duration {
|
|
|
|
self.expire_instant
|
|
|
|
.saturating_duration_since(Instant::now())
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn ttl_expired(&self) -> bool {
|
|
|
|
self.expire_instant < Instant::now()
|
|
|
|
}
|
|
|
|
|
2023-07-12 10:35:07 +03:00
|
|
|
pub fn try_send_stat(mut self) -> Web3ProxyResult<()> {
|
2023-05-13 01:15:32 +03:00
|
|
|
if let Some(stat_sender) = self.stat_sender.take() {
|
2023-07-08 07:58:00 +03:00
|
|
|
trace!(?self, "sending stat");
|
2023-05-13 01:15:32 +03:00
|
|
|
|
2023-07-12 10:35:07 +03:00
|
|
|
let stat: AppStat = self.into();
|
2023-05-13 01:15:32 +03:00
|
|
|
|
2023-07-11 09:08:06 +03:00
|
|
|
if let Err(err) = stat_sender.send(stat) {
|
2023-07-08 07:58:00 +03:00
|
|
|
error!(?err, "failed sending stat");
|
2023-05-13 01:15:32 +03:00
|
|
|
// TODO: return it? that seems like it might cause an infinite loop
|
2023-05-13 02:02:43 +03:00
|
|
|
// TODO: but dropping stats is bad... hmm... i guess better to undercharge customers than overcharge
|
2023-05-13 01:15:32 +03:00
|
|
|
};
|
|
|
|
|
2023-07-08 07:58:00 +03:00
|
|
|
trace!("stat sent successfully");
|
2023-05-13 01:15:32 +03:00
|
|
|
}
|
2023-07-12 10:35:07 +03:00
|
|
|
|
|
|
|
Ok(())
|
2023-05-13 01:15:32 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
pub fn add_response<'a, R: Into<ResponseOrBytes<'a>>>(&'a self, response: R) {
|
|
|
|
// TODO: fetch? set? should it be None in a Mutex? or a OnceCell?
|
|
|
|
let response = response.into();
|
|
|
|
|
|
|
|
let num_bytes = response.num_bytes() as u64;
|
|
|
|
|
|
|
|
self.response_bytes
|
2023-09-13 22:35:09 +03:00
|
|
|
.fetch_add(num_bytes, atomic::Ordering::Relaxed);
|
2023-05-13 01:15:32 +03:00
|
|
|
|
|
|
|
self.response_millis.fetch_add(
|
|
|
|
self.start_instant.elapsed().as_millis() as u64,
|
2023-09-13 22:35:09 +03:00
|
|
|
atomic::Ordering::Relaxed,
|
2023-05-13 01:15:32 +03:00
|
|
|
);
|
|
|
|
|
|
|
|
// TODO: record first or last timestamp? really, we need multiple
|
|
|
|
self.response_timestamp
|
2023-09-13 22:35:09 +03:00
|
|
|
.store(Utc::now().timestamp(), atomic::Ordering::Relaxed);
|
2023-05-13 01:15:32 +03:00
|
|
|
|
2023-08-03 01:22:26 +03:00
|
|
|
// TODO: set user_error_response and error_response here instead of outside this function
|
|
|
|
|
2023-05-13 01:15:32 +03:00
|
|
|
if let Some(kafka_debug_logger) = self.kafka_debug_logger.as_ref() {
|
|
|
|
if let ResponseOrBytes::Response(response) = response {
|
2023-09-27 04:18:06 +03:00
|
|
|
match response {
|
|
|
|
jsonrpc::SingleResponse::Parsed(response) => {
|
|
|
|
kafka_debug_logger.log_debug_response(response);
|
|
|
|
}
|
|
|
|
jsonrpc::SingleResponse::Stream(_) => {
|
|
|
|
warn!("need to handle streaming response debug logging");
|
|
|
|
}
|
|
|
|
}
|
2023-05-13 01:15:32 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-07-12 10:35:07 +03:00
|
|
|
pub fn try_send_arc_stat(self: Arc<Self>) -> Web3ProxyResult<()> {
|
|
|
|
match Arc::into_inner(self) {
|
|
|
|
Some(x) => x.try_send_stat(),
|
|
|
|
None => {
|
|
|
|
trace!("could not send stat while other arcs are active");
|
|
|
|
Ok(())
|
2023-05-13 01:15:32 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-10-03 23:46:27 +03:00
|
|
|
#[inline]
|
|
|
|
pub fn proxy_mode(&self) -> ProxyMode {
|
|
|
|
self.authorization.checks.proxy_mode
|
|
|
|
}
|
|
|
|
|
2023-05-13 01:15:32 +03:00
|
|
|
// TODO: helper function to duplicate? needs to clear request_bytes, and all the atomics tho...
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: is this where the panic comes from?
|
2023-10-03 23:46:27 +03:00
|
|
|
impl Drop for Web3Request {
|
2023-05-13 01:15:32 +03:00
|
|
|
fn drop(&mut self) {
|
|
|
|
if self.stat_sender.is_some() {
|
|
|
|
// turn `&mut self` into `self`
|
|
|
|
let x = mem::take(self);
|
|
|
|
|
2023-07-12 10:35:07 +03:00
|
|
|
trace!(?x, "request metadata dropped without stat send");
|
2023-05-13 01:15:32 +03:00
|
|
|
let _ = x.try_send_stat();
|
2023-03-20 04:52:28 +03:00
|
|
|
}
|
2022-10-10 07:15:07 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-11-01 21:54:39 +03:00
|
|
|
impl Default for RpcSecretKey {
|
2022-10-26 00:10:05 +03:00
|
|
|
fn default() -> Self {
|
|
|
|
Self::new()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-11-01 21:54:39 +03:00
|
|
|
impl Display for RpcSecretKey {
|
2022-09-24 08:53:45 +03:00
|
|
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
|
|
|
// TODO: do this without dereferencing
|
|
|
|
let ulid: Ulid = (*self).into();
|
|
|
|
|
2023-07-12 10:35:07 +03:00
|
|
|
Display::fmt(&ulid, f)
|
2022-09-24 08:53:45 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-11-01 21:54:39 +03:00
|
|
|
impl FromStr for RpcSecretKey {
|
2023-03-20 22:47:57 +03:00
|
|
|
type Err = Web3ProxyError;
|
2022-09-24 08:53:45 +03:00
|
|
|
|
|
|
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
|
|
|
if let Ok(ulid) = s.parse::<Ulid>() {
|
|
|
|
Ok(ulid.into())
|
|
|
|
} else if let Ok(uuid) = s.parse::<Uuid>() {
|
|
|
|
Ok(uuid.into())
|
|
|
|
} else {
|
2022-10-10 07:15:07 +03:00
|
|
|
// TODO: custom error type so that this shows as a 400
|
2023-03-20 22:47:57 +03:00
|
|
|
Err(Web3ProxyError::InvalidUserKey)
|
2022-09-24 08:53:45 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-11-01 21:54:39 +03:00
|
|
|
impl From<Ulid> for RpcSecretKey {
|
2022-09-24 08:53:45 +03:00
|
|
|
fn from(x: Ulid) -> Self {
|
2022-11-01 21:54:39 +03:00
|
|
|
RpcSecretKey::Ulid(x)
|
2022-09-24 08:53:45 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-11-01 21:54:39 +03:00
|
|
|
impl From<Uuid> for RpcSecretKey {
|
2022-09-24 08:53:45 +03:00
|
|
|
fn from(x: Uuid) -> Self {
|
2022-11-01 21:54:39 +03:00
|
|
|
RpcSecretKey::Uuid(x)
|
2022-09-24 08:53:45 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-11-01 21:54:39 +03:00
|
|
|
impl From<RpcSecretKey> for Ulid {
|
|
|
|
fn from(x: RpcSecretKey) -> Self {
|
2022-09-24 08:53:45 +03:00
|
|
|
match x {
|
2022-11-01 21:54:39 +03:00
|
|
|
RpcSecretKey::Ulid(x) => x,
|
|
|
|
RpcSecretKey::Uuid(x) => Ulid::from(x.as_u128()),
|
2022-09-24 08:53:45 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-11-01 21:54:39 +03:00
|
|
|
impl From<RpcSecretKey> for Uuid {
|
|
|
|
fn from(x: RpcSecretKey) -> Self {
|
2022-09-24 08:53:45 +03:00
|
|
|
match x {
|
2022-11-01 21:54:39 +03:00
|
|
|
RpcSecretKey::Ulid(x) => Uuid::from_u128(x.0),
|
|
|
|
RpcSecretKey::Uuid(x) => x,
|
2022-09-24 08:53:45 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-11-08 22:58:11 +03:00
|
|
|
impl Authorization {
|
2023-09-25 20:38:55 +03:00
|
|
|
/// this acquires a read lock on the latest balance. Be careful not to deadlock!
|
|
|
|
pub async fn active_premium(&self) -> bool {
|
|
|
|
let user_balance = self.checks.latest_balance.read().await;
|
|
|
|
|
|
|
|
user_balance.active_premium()
|
|
|
|
}
|
|
|
|
|
2023-07-15 04:30:01 +03:00
|
|
|
pub fn internal() -> Web3ProxyResult<Self> {
|
2022-11-08 22:58:11 +03:00
|
|
|
let authorization_checks = AuthorizationChecks {
|
|
|
|
// any error logs on a local (internal) query are likely problems. log them all
|
2023-06-13 05:13:06 +03:00
|
|
|
log_revert_chance: 100,
|
2022-11-08 22:58:11 +03:00
|
|
|
// default for everything else should be fine. we don't have a user_id or ip to give
|
|
|
|
..Default::default()
|
|
|
|
};
|
|
|
|
|
|
|
|
let ip: IpAddr = "127.0.0.1".parse().expect("localhost should always parse");
|
|
|
|
let user_agent = UserAgent::from_str(APP_USER_AGENT).ok();
|
|
|
|
|
2022-11-25 03:45:13 +03:00
|
|
|
Self::try_new(
|
|
|
|
authorization_checks,
|
2023-06-22 08:11:26 +03:00
|
|
|
&ip,
|
2022-11-25 03:45:13 +03:00
|
|
|
None,
|
|
|
|
None,
|
2023-06-22 08:11:26 +03:00
|
|
|
user_agent.as_ref(),
|
2022-12-12 07:39:54 +03:00
|
|
|
AuthorizationType::Internal,
|
2022-11-25 03:45:13 +03:00
|
|
|
)
|
2022-11-08 22:58:11 +03:00
|
|
|
}
|
|
|
|
|
2022-11-25 03:45:13 +03:00
|
|
|
pub fn external(
|
2022-11-08 22:58:11 +03:00
|
|
|
allowed_origin_requests_per_period: &HashMap<String, u64>,
|
2023-06-22 08:11:26 +03:00
|
|
|
ip: &IpAddr,
|
|
|
|
origin: Option<&Origin>,
|
2023-03-03 04:39:50 +03:00
|
|
|
proxy_mode: ProxyMode,
|
2023-06-22 08:11:26 +03:00
|
|
|
referer: Option<&Referer>,
|
|
|
|
user_agent: Option<&UserAgent>,
|
2023-03-20 01:50:25 +03:00
|
|
|
) -> Web3ProxyResult<Self> {
|
2022-11-08 22:58:11 +03:00
|
|
|
// some origins can override max_requests_per_period for anon users
|
2023-06-22 08:11:26 +03:00
|
|
|
// TODO: i don't like the `to_string` here
|
2022-11-08 22:58:11 +03:00
|
|
|
let max_requests_per_period = origin
|
|
|
|
.map(|origin| {
|
|
|
|
allowed_origin_requests_per_period
|
|
|
|
.get(&origin.to_string())
|
|
|
|
.cloned()
|
|
|
|
})
|
|
|
|
.unwrap_or_default();
|
|
|
|
|
|
|
|
let authorization_checks = AuthorizationChecks {
|
|
|
|
max_requests_per_period,
|
2023-03-03 04:39:50 +03:00
|
|
|
proxy_mode,
|
2022-11-08 22:58:11 +03:00
|
|
|
..Default::default()
|
|
|
|
};
|
|
|
|
|
|
|
|
Self::try_new(
|
|
|
|
authorization_checks,
|
|
|
|
ip,
|
|
|
|
origin,
|
|
|
|
referer,
|
|
|
|
user_agent,
|
2022-12-12 07:39:54 +03:00
|
|
|
AuthorizationType::Frontend,
|
2022-11-08 22:58:11 +03:00
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2022-09-23 00:03:37 +03:00
|
|
|
pub fn try_new(
|
2022-11-08 22:58:11 +03:00
|
|
|
authorization_checks: AuthorizationChecks,
|
2023-06-22 08:11:26 +03:00
|
|
|
ip: &IpAddr,
|
|
|
|
origin: Option<&Origin>,
|
|
|
|
referer: Option<&Referer>,
|
|
|
|
user_agent: Option<&UserAgent>,
|
2022-12-12 07:39:54 +03:00
|
|
|
authorization_type: AuthorizationType,
|
2023-03-20 01:50:25 +03:00
|
|
|
) -> Web3ProxyResult<Self> {
|
2022-09-23 08:22:33 +03:00
|
|
|
// check ip
|
2022-11-08 22:58:11 +03:00
|
|
|
match &authorization_checks.allowed_ips {
|
2022-09-23 08:22:33 +03:00
|
|
|
None => {}
|
|
|
|
Some(allowed_ips) => {
|
2023-06-22 08:11:26 +03:00
|
|
|
if !allowed_ips.iter().any(|x| x.contains(ip)) {
|
|
|
|
return Err(Web3ProxyError::IpNotAllowed(ip.to_owned()));
|
2022-09-23 08:22:33 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// check origin
|
2023-06-22 08:11:26 +03:00
|
|
|
match (origin, &authorization_checks.allowed_origins) {
|
2022-09-23 08:22:33 +03:00
|
|
|
(None, None) => {}
|
|
|
|
(Some(_), None) => {}
|
2023-03-20 01:50:25 +03:00
|
|
|
(None, Some(_)) => return Err(Web3ProxyError::OriginRequired),
|
2022-09-23 08:22:33 +03:00
|
|
|
(Some(origin), Some(allowed_origins)) => {
|
2022-10-27 00:39:26 +03:00
|
|
|
if !allowed_origins.contains(origin) {
|
2023-06-22 08:11:26 +03:00
|
|
|
return Err(Web3ProxyError::OriginNotAllowed(origin.to_owned()));
|
2022-09-23 08:22:33 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// check referer
|
2023-06-22 08:11:26 +03:00
|
|
|
match (referer, &authorization_checks.allowed_referers) {
|
2022-09-23 08:22:33 +03:00
|
|
|
(None, None) => {}
|
|
|
|
(Some(_), None) => {}
|
2023-03-20 01:50:25 +03:00
|
|
|
(None, Some(_)) => return Err(Web3ProxyError::RefererRequired),
|
2022-09-23 08:22:33 +03:00
|
|
|
(Some(referer), Some(allowed_referers)) => {
|
2022-11-08 22:58:11 +03:00
|
|
|
if !allowed_referers.contains(referer) {
|
2023-06-22 08:11:26 +03:00
|
|
|
return Err(Web3ProxyError::RefererNotAllowed(referer.to_owned()));
|
2022-09-23 08:22:33 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// check user_agent
|
2023-06-22 08:11:26 +03:00
|
|
|
match (user_agent, &authorization_checks.allowed_user_agents) {
|
2022-09-23 08:22:33 +03:00
|
|
|
(None, None) => {}
|
|
|
|
(Some(_), None) => {}
|
2023-03-20 01:50:25 +03:00
|
|
|
(None, Some(_)) => return Err(Web3ProxyError::UserAgentRequired),
|
2022-09-23 08:22:33 +03:00
|
|
|
(Some(user_agent), Some(allowed_user_agents)) => {
|
2022-11-08 22:58:11 +03:00
|
|
|
if !allowed_user_agents.contains(user_agent) {
|
2023-06-22 08:11:26 +03:00
|
|
|
return Err(Web3ProxyError::UserAgentNotAllowed(user_agent.to_owned()));
|
2022-09-23 08:22:33 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2022-09-23 00:03:37 +03:00
|
|
|
|
|
|
|
Ok(Self {
|
2022-11-08 22:58:11 +03:00
|
|
|
checks: authorization_checks,
|
2023-06-22 08:11:26 +03:00
|
|
|
ip: *ip,
|
|
|
|
origin: origin.cloned(),
|
|
|
|
referer: referer.cloned(),
|
|
|
|
user_agent: user_agent.cloned(),
|
2022-11-25 03:45:13 +03:00
|
|
|
authorization_type,
|
2022-09-23 00:03:37 +03:00
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-11-08 22:58:11 +03:00
|
|
|
/// rate limit logins only by ip.
|
|
|
|
/// we want all origins and referers and user agents to count together
|
2023-03-17 05:38:11 +03:00
|
|
|
pub async fn login_is_authorized(app: &Web3ProxyApp, ip: IpAddr) -> Web3ProxyResult<Authorization> {
|
2023-03-03 04:39:50 +03:00
|
|
|
let authorization = match app.rate_limit_login(ip, ProxyMode::Best).await? {
|
2022-11-08 22:58:11 +03:00
|
|
|
RateLimitResult::Allowed(authorization, None) => authorization,
|
|
|
|
RateLimitResult::RateLimited(authorization, retry_at) => {
|
2023-03-17 05:38:11 +03:00
|
|
|
return Err(Web3ProxyError::RateLimited(authorization, retry_at));
|
2022-09-24 06:59:21 +03:00
|
|
|
}
|
|
|
|
// TODO: don't panic. give the user an error
|
|
|
|
x => unimplemented!("rate_limit_login shouldn't ever see these: {:?}", x),
|
|
|
|
};
|
|
|
|
|
2022-11-08 22:58:11 +03:00
|
|
|
Ok(authorization)
|
2022-09-24 06:59:21 +03:00
|
|
|
}
|
|
|
|
|
2022-11-08 22:58:11 +03:00
|
|
|
/// semaphore won't ever be None, but its easier if key auth and ip auth work the same way
|
2023-05-31 07:08:21 +03:00
|
|
|
/// keep the semaphore alive until the user's request is entirely complete
|
2022-09-23 00:03:37 +03:00
|
|
|
pub async fn ip_is_authorized(
|
2022-12-28 09:11:18 +03:00
|
|
|
app: &Arc<Web3ProxyApp>,
|
2023-06-22 08:11:26 +03:00
|
|
|
ip: &IpAddr,
|
|
|
|
origin: Option<&Origin>,
|
2023-03-03 04:39:50 +03:00
|
|
|
proxy_mode: ProxyMode,
|
2023-03-17 05:38:11 +03:00
|
|
|
) -> Web3ProxyResult<(Authorization, Option<OwnedSemaphorePermit>)> {
|
2022-09-23 00:03:37 +03:00
|
|
|
// TODO: i think we could write an `impl From` for this
|
2022-09-24 00:46:27 +03:00
|
|
|
// TODO: move this to an AuthorizedUser extrator
|
2023-09-27 04:18:06 +03:00
|
|
|
let (authorization, semaphore) = match app.rate_limit_public(ip, origin, proxy_mode).await? {
|
2022-11-08 22:58:11 +03:00
|
|
|
RateLimitResult::Allowed(authorization, semaphore) => (authorization, semaphore),
|
|
|
|
RateLimitResult::RateLimited(authorization, retry_at) => {
|
2022-12-28 06:43:02 +03:00
|
|
|
// TODO: in the background, emit a stat (maybe simplest to use a channel?)
|
2023-03-17 05:38:11 +03:00
|
|
|
return Err(Web3ProxyError::RateLimited(authorization, retry_at));
|
2022-09-23 00:03:37 +03:00
|
|
|
}
|
|
|
|
// TODO: don't panic. give the user an error
|
|
|
|
x => unimplemented!("rate_limit_by_ip shouldn't ever see these: {:?}", x),
|
|
|
|
};
|
|
|
|
|
2023-07-25 02:25:48 +03:00
|
|
|
// in the background, add the hashed ip to a recent_users map
|
2022-12-28 09:11:18 +03:00
|
|
|
if app.config.public_recent_ips_salt.is_some() {
|
2022-12-29 00:50:34 +03:00
|
|
|
let app = app.clone();
|
2023-06-22 08:11:26 +03:00
|
|
|
let ip = *ip;
|
|
|
|
|
2022-12-28 09:11:18 +03:00
|
|
|
let f = async move {
|
|
|
|
let now = Utc::now().timestamp();
|
|
|
|
|
2023-06-24 21:11:07 +03:00
|
|
|
if let Ok(mut redis_conn) = app.redis_conn().await {
|
2022-12-29 09:21:09 +03:00
|
|
|
let salt = app
|
|
|
|
.config
|
|
|
|
.public_recent_ips_salt
|
|
|
|
.as_ref()
|
|
|
|
.expect("public_recent_ips_salt must exist in here");
|
2022-12-28 09:11:18 +03:00
|
|
|
|
2022-12-29 09:21:09 +03:00
|
|
|
let salted_ip = format!("{}:{}", salt, ip);
|
2022-12-28 09:11:18 +03:00
|
|
|
|
2022-12-29 09:21:09 +03:00
|
|
|
let hashed_ip = Bytes::from(keccak256(salted_ip.as_bytes()));
|
2022-12-28 09:11:18 +03:00
|
|
|
|
2022-12-29 09:21:09 +03:00
|
|
|
let recent_ip_key = format!("recent_users:ip:{}", app.config.chain_id);
|
2022-12-28 09:11:18 +03:00
|
|
|
|
2022-12-29 09:21:09 +03:00
|
|
|
redis_conn
|
|
|
|
.zadd(recent_ip_key, hashed_ip.to_string(), now)
|
|
|
|
.await?;
|
|
|
|
};
|
2022-12-28 09:11:18 +03:00
|
|
|
|
2023-03-20 22:47:57 +03:00
|
|
|
Ok::<_, Web3ProxyError>(())
|
2022-12-28 09:11:18 +03:00
|
|
|
}
|
|
|
|
.map_err(|err| {
|
2023-06-29 07:30:00 +03:00
|
|
|
warn!(?err, "background update of recent_users:ip failed");
|
2022-12-28 09:11:18 +03:00
|
|
|
|
|
|
|
err
|
|
|
|
});
|
|
|
|
|
|
|
|
tokio::spawn(f);
|
|
|
|
}
|
2022-12-28 06:43:02 +03:00
|
|
|
|
2022-11-08 22:58:11 +03:00
|
|
|
Ok((authorization, semaphore))
|
2022-09-23 00:03:37 +03:00
|
|
|
}
|
|
|
|
|
2023-03-17 05:38:11 +03:00
|
|
|
/// like app.rate_limit_by_rpc_key but converts to a Web3ProxyError;
|
2023-05-31 07:08:21 +03:00
|
|
|
/// keep the semaphore alive until the user's request is entirely complete
|
2022-09-23 00:03:37 +03:00
|
|
|
pub async fn key_is_authorized(
|
2022-12-29 00:50:34 +03:00
|
|
|
app: &Arc<Web3ProxyApp>,
|
2023-06-22 08:11:26 +03:00
|
|
|
rpc_key: &RpcSecretKey,
|
|
|
|
ip: &IpAddr,
|
|
|
|
origin: Option<&Origin>,
|
2023-03-03 04:39:50 +03:00
|
|
|
proxy_mode: ProxyMode,
|
2023-06-22 08:11:26 +03:00
|
|
|
referer: Option<&Referer>,
|
|
|
|
user_agent: Option<&UserAgent>,
|
2023-03-17 05:38:11 +03:00
|
|
|
) -> Web3ProxyResult<(Authorization, Option<OwnedSemaphorePermit>)> {
|
2022-09-23 00:03:37 +03:00
|
|
|
// check the rate limits. error if over the limit
|
2022-11-08 22:58:11 +03:00
|
|
|
// TODO: i think this should be in an "impl From" or "impl Into"
|
|
|
|
let (authorization, semaphore) = match app
|
2023-09-27 04:18:06 +03:00
|
|
|
.rate_limit_premium(ip, origin, proxy_mode, referer, rpc_key, user_agent)
|
2022-11-08 22:58:11 +03:00
|
|
|
.await?
|
|
|
|
{
|
|
|
|
RateLimitResult::Allowed(authorization, semaphore) => (authorization, semaphore),
|
|
|
|
RateLimitResult::RateLimited(authorization, retry_at) => {
|
2023-03-17 05:38:11 +03:00
|
|
|
return Err(Web3ProxyError::RateLimited(authorization, retry_at));
|
2022-09-23 00:03:37 +03:00
|
|
|
}
|
2023-03-17 05:38:11 +03:00
|
|
|
RateLimitResult::UnknownKey => return Err(Web3ProxyError::UnknownKey),
|
2022-09-23 00:03:37 +03:00
|
|
|
};
|
|
|
|
|
2022-12-29 00:50:34 +03:00
|
|
|
// TODO: DRY and maybe optimize the hashing
|
|
|
|
// in the background, add the ip to a recent_users map
|
|
|
|
if app.config.public_recent_ips_salt.is_some() {
|
|
|
|
let app = app.clone();
|
|
|
|
let user_id = authorization.checks.user_id;
|
|
|
|
let f = async move {
|
|
|
|
let now = Utc::now().timestamp();
|
|
|
|
|
2023-06-24 21:11:07 +03:00
|
|
|
if let Ok(mut redis_conn) = app.redis_conn().await {
|
2022-12-29 09:21:09 +03:00
|
|
|
let salt = app
|
|
|
|
.config
|
|
|
|
.public_recent_ips_salt
|
|
|
|
.as_ref()
|
|
|
|
.expect("public_recent_ips_salt must exist in here");
|
2022-12-29 00:50:34 +03:00
|
|
|
|
2022-12-29 09:21:09 +03:00
|
|
|
let salted_user_id = format!("{}:{}", salt, user_id);
|
2022-12-29 00:50:34 +03:00
|
|
|
|
2022-12-29 09:21:09 +03:00
|
|
|
let hashed_user_id = Bytes::from(keccak256(salted_user_id.as_bytes()));
|
2022-12-29 00:50:34 +03:00
|
|
|
|
2022-12-29 10:16:35 +03:00
|
|
|
let recent_user_id_key = format!("recent_users:id:{}", app.config.chain_id);
|
2022-12-29 00:50:34 +03:00
|
|
|
|
2022-12-29 09:21:09 +03:00
|
|
|
redis_conn
|
|
|
|
.zadd(recent_user_id_key, hashed_user_id.to_string(), now)
|
|
|
|
.await?;
|
|
|
|
}
|
2022-12-29 00:50:34 +03:00
|
|
|
|
2023-03-20 22:47:57 +03:00
|
|
|
Ok::<_, Web3ProxyError>(())
|
2022-12-29 00:50:34 +03:00
|
|
|
}
|
|
|
|
.map_err(|err| {
|
2023-06-29 07:30:00 +03:00
|
|
|
warn!(?err, "background update of recent_users:id failed");
|
2022-12-29 00:50:34 +03:00
|
|
|
|
|
|
|
err
|
|
|
|
});
|
|
|
|
|
|
|
|
tokio::spawn(f);
|
|
|
|
}
|
|
|
|
|
2022-11-08 22:58:11 +03:00
|
|
|
Ok((authorization, semaphore))
|
2022-09-23 00:03:37 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
impl Web3ProxyApp {
|
2022-10-27 03:12:42 +03:00
|
|
|
/// Limit the number of concurrent requests from the given ip address.
|
2023-09-27 04:18:06 +03:00
|
|
|
pub async fn permit_public_concurrency(
|
|
|
|
&self,
|
|
|
|
ip: &IpAddr,
|
|
|
|
) -> Web3ProxyResult<Option<OwnedSemaphorePermit>> {
|
2022-10-25 07:01:41 +03:00
|
|
|
if let Some(max_concurrent_requests) = self.config.public_max_concurrent_requests {
|
|
|
|
let semaphore = self
|
|
|
|
.ip_semaphores
|
2023-06-08 03:26:38 +03:00
|
|
|
.get_with_by_ref(ip, async {
|
2022-10-25 07:01:41 +03:00
|
|
|
// TODO: set max_concurrent_requests dynamically based on load?
|
2022-10-25 07:31:18 +03:00
|
|
|
let s = Semaphore::new(max_concurrent_requests);
|
2023-06-08 03:26:38 +03:00
|
|
|
Arc::new(s)
|
2022-10-25 07:01:41 +03:00
|
|
|
})
|
2023-06-08 03:26:38 +03:00
|
|
|
.await;
|
2022-10-25 07:01:41 +03:00
|
|
|
|
2023-09-25 22:34:00 +03:00
|
|
|
let semaphore_permit = tokio::select! {
|
|
|
|
biased;
|
|
|
|
|
|
|
|
p = semaphore.acquire_owned() => {
|
|
|
|
p
|
|
|
|
}
|
|
|
|
p = self.bonus_ip_concurrency.clone().acquire_owned() => {
|
|
|
|
p
|
|
|
|
}
|
|
|
|
}?;
|
2022-10-25 07:01:41 +03:00
|
|
|
|
|
|
|
Ok(Some(semaphore_permit))
|
|
|
|
} else {
|
|
|
|
Ok(None)
|
|
|
|
}
|
2022-09-28 06:35:55 +03:00
|
|
|
}
|
|
|
|
|
2023-05-24 00:40:34 +03:00
|
|
|
/// Limit the number of concurrent requests for a given user across all of their keys
|
2023-05-31 07:08:21 +03:00
|
|
|
/// keep the semaphore alive until the user's request is entirely complete
|
2023-09-27 04:18:06 +03:00
|
|
|
pub async fn permit_premium_concurrency(
|
2022-09-28 06:35:55 +03:00
|
|
|
&self,
|
2023-09-27 04:18:06 +03:00
|
|
|
authorization: &Authorization,
|
2023-07-08 01:15:41 +03:00
|
|
|
ip: &IpAddr,
|
2023-03-20 22:47:57 +03:00
|
|
|
) -> Web3ProxyResult<Option<OwnedSemaphorePermit>> {
|
2023-09-27 04:18:06 +03:00
|
|
|
let authorization_checks = &authorization.checks;
|
|
|
|
|
2022-11-08 22:58:11 +03:00
|
|
|
if let Some(max_concurrent_requests) = authorization_checks.max_concurrent_requests {
|
2022-12-29 00:50:34 +03:00
|
|
|
let user_id = authorization_checks
|
|
|
|
.user_id
|
|
|
|
.try_into()
|
2023-05-18 23:34:22 +03:00
|
|
|
.or(Err(Web3ProxyError::UserIdZero))?;
|
2022-11-10 02:58:07 +03:00
|
|
|
|
2022-09-28 06:35:55 +03:00
|
|
|
let semaphore = self
|
2023-05-24 00:40:34 +03:00
|
|
|
.user_semaphores
|
2023-07-08 01:15:41 +03:00
|
|
|
.get_with_by_ref(&(user_id, *ip), async move {
|
2022-10-25 07:31:18 +03:00
|
|
|
let s = Semaphore::new(max_concurrent_requests as usize);
|
2023-06-08 03:26:38 +03:00
|
|
|
Arc::new(s)
|
2022-09-28 06:35:55 +03:00
|
|
|
})
|
2023-06-08 03:26:38 +03:00
|
|
|
.await;
|
2022-10-10 07:15:07 +03:00
|
|
|
|
2023-09-25 22:34:00 +03:00
|
|
|
let semaphore_permit = tokio::select! {
|
|
|
|
biased;
|
|
|
|
|
|
|
|
p = semaphore.acquire_owned() => {
|
|
|
|
p
|
|
|
|
}
|
|
|
|
p = self.bonus_user_concurrency.clone().acquire_owned() => {
|
|
|
|
p
|
|
|
|
}
|
|
|
|
p = self.bonus_ip_concurrency.clone().acquire_owned() => {
|
|
|
|
p
|
|
|
|
}
|
|
|
|
}?;
|
2022-09-28 06:35:55 +03:00
|
|
|
|
|
|
|
Ok(Some(semaphore_permit))
|
|
|
|
} else {
|
2023-05-24 00:40:34 +03:00
|
|
|
// unlimited concurrency
|
2022-09-28 06:35:55 +03:00
|
|
|
Ok(None)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-10-26 00:10:05 +03:00
|
|
|
/// Verify that the given bearer token and address are allowed to take the specified action.
|
|
|
|
/// This includes concurrent request limiting.
|
2023-05-31 07:08:21 +03:00
|
|
|
/// keep the semaphore alive until the user's request is entirely complete
|
2023-07-04 02:54:25 +03:00
|
|
|
pub async fn bearer_is_authorized(&self, bearer: Bearer) -> Web3ProxyResult<user::Model> {
|
2022-12-14 05:13:23 +03:00
|
|
|
// get the user id for this bearer token
|
|
|
|
let user_bearer_token = UserBearerToken::try_from(bearer)?;
|
|
|
|
|
|
|
|
// get the attached address from the database for the given auth_token.
|
2023-10-03 23:46:27 +03:00
|
|
|
let db_replica = global_db_replica_conn()?;
|
2022-10-26 00:10:05 +03:00
|
|
|
|
2022-12-14 05:13:23 +03:00
|
|
|
let user_bearer_uuid: Uuid = user_bearer_token.into();
|
2022-10-26 00:10:05 +03:00
|
|
|
|
2022-12-14 05:13:23 +03:00
|
|
|
let user = user::Entity::find()
|
|
|
|
.left_join(login::Entity)
|
|
|
|
.filter(login::Column::BearerToken.eq(user_bearer_uuid))
|
2023-05-31 02:32:34 +03:00
|
|
|
.one(db_replica.as_ref())
|
2022-10-26 00:10:05 +03:00
|
|
|
.await
|
2023-03-20 22:47:57 +03:00
|
|
|
.web3_context("fetching user from db by bearer token")?
|
|
|
|
.web3_context("unknown bearer token")?;
|
2022-10-26 00:10:05 +03:00
|
|
|
|
2023-07-04 02:54:25 +03:00
|
|
|
Ok(user)
|
2022-10-26 00:10:05 +03:00
|
|
|
}
|
|
|
|
|
2023-03-03 04:39:50 +03:00
|
|
|
pub async fn rate_limit_login(
|
|
|
|
&self,
|
|
|
|
ip: IpAddr,
|
|
|
|
proxy_mode: ProxyMode,
|
2023-03-20 01:50:25 +03:00
|
|
|
) -> Web3ProxyResult<RateLimitResult> {
|
2023-09-27 04:18:06 +03:00
|
|
|
// TODO: if ip is on the local network, always allow?
|
2022-11-08 22:58:11 +03:00
|
|
|
|
|
|
|
// we don't care about user agent or origin or referer
|
2022-11-25 03:45:13 +03:00
|
|
|
let authorization = Authorization::external(
|
2022-11-08 22:58:11 +03:00
|
|
|
&self.config.allowed_origin_requests_per_period,
|
2023-06-22 08:11:26 +03:00
|
|
|
&ip,
|
2022-11-08 22:58:11 +03:00
|
|
|
None,
|
2023-03-03 04:39:50 +03:00
|
|
|
proxy_mode,
|
2022-11-08 22:58:11 +03:00
|
|
|
None,
|
|
|
|
None,
|
|
|
|
)?;
|
|
|
|
|
2023-09-27 04:18:06 +03:00
|
|
|
let label = ip.to_string();
|
2022-11-08 22:58:11 +03:00
|
|
|
|
2023-09-27 04:18:06 +03:00
|
|
|
redis_rate_limit(
|
|
|
|
&self.login_rate_limiter,
|
|
|
|
authorization,
|
|
|
|
None,
|
|
|
|
Some(&label),
|
|
|
|
None,
|
|
|
|
)
|
|
|
|
.await
|
2022-09-24 06:59:21 +03:00
|
|
|
}
|
|
|
|
|
2022-11-08 22:58:11 +03:00
|
|
|
/// origin is included because it can override the default rate limits
|
2023-09-27 04:18:06 +03:00
|
|
|
pub async fn rate_limit_public(
|
2022-10-21 23:59:05 +03:00
|
|
|
&self,
|
2023-06-22 08:11:26 +03:00
|
|
|
ip: &IpAddr,
|
|
|
|
origin: Option<&Origin>,
|
2023-03-03 04:39:50 +03:00
|
|
|
proxy_mode: ProxyMode,
|
2023-03-20 01:50:25 +03:00
|
|
|
) -> Web3ProxyResult<RateLimitResult> {
|
2023-05-31 08:31:35 +03:00
|
|
|
if ip.is_loopback() {
|
|
|
|
// TODO: localhost being unlimited should be optional
|
2023-07-15 04:30:01 +03:00
|
|
|
let authorization = Authorization::internal()?;
|
2023-05-31 08:31:35 +03:00
|
|
|
|
|
|
|
return Ok(RateLimitResult::Allowed(authorization, None));
|
|
|
|
}
|
|
|
|
|
2023-07-14 10:23:05 +03:00
|
|
|
let allowed_origin_requests_per_period = &self.config.allowed_origin_requests_per_period;
|
|
|
|
|
2022-11-08 22:58:11 +03:00
|
|
|
// ip rate limits don't check referer or user agent
|
2023-03-31 14:43:41 +03:00
|
|
|
// they do check origin because we can override rate limits for some origins
|
2022-11-25 03:45:13 +03:00
|
|
|
let authorization = Authorization::external(
|
2022-11-08 22:58:11 +03:00
|
|
|
allowed_origin_requests_per_period,
|
|
|
|
ip,
|
|
|
|
origin,
|
2023-03-03 04:39:50 +03:00
|
|
|
proxy_mode,
|
2022-11-08 22:58:11 +03:00
|
|
|
None,
|
|
|
|
None,
|
|
|
|
)?;
|
2022-09-28 06:35:55 +03:00
|
|
|
|
2023-09-27 04:18:06 +03:00
|
|
|
if let Some(rate_limiter) = &self.frontend_public_rate_limiter {
|
|
|
|
let mut x = deferred_redis_rate_limit(authorization, *ip, None, rate_limiter).await?;
|
|
|
|
|
|
|
|
if let RateLimitResult::RateLimited(authorization, retry_at) = x {
|
|
|
|
// we got rate limited, try bonus_frontend_public_rate_limiter
|
|
|
|
x = redis_rate_limit(
|
|
|
|
&self.bonus_frontend_public_rate_limiter,
|
|
|
|
authorization,
|
|
|
|
retry_at,
|
|
|
|
None,
|
|
|
|
None,
|
|
|
|
)
|
|
|
|
.await?;
|
|
|
|
}
|
2022-11-08 22:58:11 +03:00
|
|
|
|
2023-09-27 04:18:06 +03:00
|
|
|
if let RateLimitResult::Allowed(a, b) = x {
|
|
|
|
debug_assert!(b.is_none());
|
2022-09-27 05:01:45 +03:00
|
|
|
|
2023-09-27 04:18:06 +03:00
|
|
|
let permit = self.permit_public_concurrency(ip).await?;
|
2022-11-08 22:58:11 +03:00
|
|
|
|
2023-09-27 04:18:06 +03:00
|
|
|
x = RateLimitResult::Allowed(a, permit)
|
2022-09-23 00:03:37 +03:00
|
|
|
}
|
2023-09-27 04:18:06 +03:00
|
|
|
|
|
|
|
debug_assert!(!matches!(x, RateLimitResult::UnknownKey));
|
|
|
|
|
|
|
|
Ok(x)
|
2022-09-23 00:03:37 +03:00
|
|
|
} else {
|
2022-11-08 22:58:11 +03:00
|
|
|
// no redis, but we can still check the ip semaphore
|
2023-09-27 04:18:06 +03:00
|
|
|
let permit = self.permit_public_concurrency(ip).await?;
|
2022-11-08 22:58:11 +03:00
|
|
|
|
2022-09-23 00:03:37 +03:00
|
|
|
// TODO: if no redis, rate limit with a local cache? "warn!" probably isn't right
|
2023-09-27 04:18:06 +03:00
|
|
|
Ok(RateLimitResult::Allowed(authorization, permit))
|
2022-09-23 00:03:37 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// check the local cache for user data, or query the database
|
2022-11-08 22:58:11 +03:00
|
|
|
pub(crate) async fn authorization_checks(
|
2022-11-01 21:54:39 +03:00
|
|
|
&self,
|
2023-03-03 04:39:50 +03:00
|
|
|
proxy_mode: ProxyMode,
|
2023-06-22 08:11:26 +03:00
|
|
|
rpc_secret_key: &RpcSecretKey,
|
2023-03-20 22:47:57 +03:00
|
|
|
) -> Web3ProxyResult<AuthorizationChecks> {
|
2023-07-12 10:35:07 +03:00
|
|
|
// TODO: move onto a helper function
|
|
|
|
|
|
|
|
let x = self
|
|
|
|
.rpc_secret_key_cache
|
2023-06-22 08:11:26 +03:00
|
|
|
.try_get_with_by_ref(rpc_secret_key, async move {
|
2023-10-03 23:46:27 +03:00
|
|
|
let db_replica = global_db_replica_conn()?;
|
2022-09-23 00:03:37 +03:00
|
|
|
|
|
|
|
// TODO: join the user table to this to return the User? we don't always need it
|
2022-11-01 21:54:39 +03:00
|
|
|
// TODO: join on secondary users
|
|
|
|
// TODO: join on user tier
|
|
|
|
match rpc_key::Entity::find()
|
2023-06-22 08:11:26 +03:00
|
|
|
.filter(rpc_key::Column::SecretKey.eq(<Uuid>::from(*rpc_secret_key)))
|
2022-11-01 21:54:39 +03:00
|
|
|
.filter(rpc_key::Column::Active.eq(true))
|
2023-05-31 02:32:34 +03:00
|
|
|
.one(db_replica.as_ref())
|
2022-09-23 00:03:37 +03:00
|
|
|
.await?
|
|
|
|
{
|
2022-10-27 03:12:42 +03:00
|
|
|
Some(rpc_key_model) => {
|
2022-10-27 00:39:26 +03:00
|
|
|
// TODO: move these splits into helper functions
|
|
|
|
// TODO: can we have sea orm handle this for us?
|
2022-09-23 08:22:33 +03:00
|
|
|
let allowed_ips: Option<Vec<IpNet>> =
|
2022-10-27 03:12:42 +03:00
|
|
|
if let Some(allowed_ips) = rpc_key_model.allowed_ips {
|
2022-10-27 00:39:26 +03:00
|
|
|
let x = allowed_ips
|
|
|
|
.split(',')
|
2022-12-24 06:03:30 +03:00
|
|
|
.map(|x| x.trim().parse::<IpNet>())
|
2022-10-27 00:39:26 +03:00
|
|
|
.collect::<Result<Vec<_>, _>>()?;
|
|
|
|
Some(x)
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
};
|
|
|
|
|
|
|
|
let allowed_origins: Option<Vec<Origin>> =
|
2022-10-27 03:12:42 +03:00
|
|
|
if let Some(allowed_origins) = rpc_key_model.allowed_origins {
|
2022-10-27 00:39:26 +03:00
|
|
|
// TODO: do this without collecting twice?
|
|
|
|
let x = allowed_origins
|
|
|
|
.split(',')
|
2022-12-24 06:03:30 +03:00
|
|
|
.map(|x| HeaderValue::from_str(x.trim()))
|
2022-10-27 00:39:26 +03:00
|
|
|
.collect::<Result<Vec<_>, _>>()?
|
2022-09-23 08:22:33 +03:00
|
|
|
.into_iter()
|
2022-10-27 00:39:26 +03:00
|
|
|
.map(|x| Origin::decode(&mut [x].iter()))
|
|
|
|
.collect::<Result<Vec<_>, _>>()?;
|
|
|
|
|
|
|
|
Some(x)
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
};
|
|
|
|
|
|
|
|
let allowed_referers: Option<Vec<Referer>> =
|
2022-10-27 03:12:42 +03:00
|
|
|
if let Some(allowed_referers) = rpc_key_model.allowed_referers {
|
2022-10-27 00:39:26 +03:00
|
|
|
let x = allowed_referers
|
|
|
|
.split(',')
|
2023-03-20 22:47:57 +03:00
|
|
|
.map(|x| {
|
|
|
|
x.trim()
|
|
|
|
.parse::<Referer>()
|
|
|
|
.or(Err(Web3ProxyError::InvalidReferer))
|
|
|
|
})
|
2022-10-27 00:39:26 +03:00
|
|
|
.collect::<Result<Vec<_>, _>>()?;
|
|
|
|
|
|
|
|
Some(x)
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
};
|
|
|
|
|
|
|
|
let allowed_user_agents: Option<Vec<UserAgent>> =
|
2022-10-27 03:12:42 +03:00
|
|
|
if let Some(allowed_user_agents) = rpc_key_model.allowed_user_agents {
|
2022-10-27 00:39:26 +03:00
|
|
|
let x: Result<Vec<_>, _> = allowed_user_agents
|
|
|
|
.split(',')
|
2023-03-20 22:47:57 +03:00
|
|
|
.map(|x| {
|
|
|
|
x.trim()
|
|
|
|
.parse::<UserAgent>()
|
|
|
|
.or(Err(Web3ProxyError::InvalidUserAgent))
|
|
|
|
})
|
2022-10-27 00:39:26 +03:00
|
|
|
.collect();
|
|
|
|
|
|
|
|
Some(x?)
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
};
|
2022-09-23 00:03:37 +03:00
|
|
|
|
2023-06-07 19:39:30 +03:00
|
|
|
// Get the user_tier
|
|
|
|
let user_model = user::Entity::find_by_id(rpc_key_model.user_id)
|
|
|
|
.one(db_replica.as_ref())
|
|
|
|
.await?
|
2023-06-24 21:11:07 +03:00
|
|
|
.web3_context(
|
2023-06-17 09:14:43 +03:00
|
|
|
"user model was not found, but every rpc_key should have a user",
|
|
|
|
)?;
|
2023-06-07 19:39:30 +03:00
|
|
|
|
2023-06-17 09:14:43 +03:00
|
|
|
let mut user_tier_model = user_tier::Entity::find_by_id(
|
|
|
|
user_model.user_tier_id,
|
|
|
|
)
|
|
|
|
.one(db_replica.as_ref())
|
|
|
|
.await?
|
2023-06-24 21:11:07 +03:00
|
|
|
.web3_context(
|
2023-06-17 09:14:43 +03:00
|
|
|
"related user tier not found, but every user should have a tier",
|
|
|
|
)?;
|
2023-06-07 19:39:30 +03:00
|
|
|
|
2023-07-12 10:35:07 +03:00
|
|
|
let latest_balance = self
|
|
|
|
.user_balance_cache
|
|
|
|
.get_or_insert(db_replica.as_ref(), rpc_key_model.user_id)
|
|
|
|
.await?;
|
2023-06-07 19:39:30 +03:00
|
|
|
|
2023-07-12 10:35:07 +03:00
|
|
|
let paid_credits_used: bool;
|
2023-06-17 09:14:43 +03:00
|
|
|
if let Some(downgrade_user_tier) = user_tier_model.downgrade_tier_id {
|
2023-07-12 10:35:07 +03:00
|
|
|
trace!("user belongs to a premium tier. checking balance");
|
|
|
|
|
2023-07-10 07:37:50 +03:00
|
|
|
let active_premium = latest_balance.read().await.active_premium();
|
2023-06-17 09:14:43 +03:00
|
|
|
|
|
|
|
// only consider the user premium if they have paid at least $10 and have a balance > $.01
|
|
|
|
// otherwise, set user_tier_model to the downograded tier
|
2023-07-12 10:35:07 +03:00
|
|
|
if active_premium {
|
|
|
|
paid_credits_used = true;
|
|
|
|
} else {
|
|
|
|
paid_credits_used = false;
|
|
|
|
|
2023-06-25 01:24:46 +03:00
|
|
|
// TODO: include boolean to mark that the user is downgraded
|
2023-06-07 19:39:30 +03:00
|
|
|
user_tier_model =
|
|
|
|
user_tier::Entity::find_by_id(downgrade_user_tier)
|
|
|
|
.one(db_replica.as_ref())
|
|
|
|
.await?
|
2023-06-24 21:11:07 +03:00
|
|
|
.web3_context(format!(
|
2023-06-17 09:14:43 +03:00
|
|
|
"downgrade user tier ({}) is missing!",
|
|
|
|
downgrade_user_tier
|
|
|
|
))?;
|
2023-06-07 19:39:30 +03:00
|
|
|
}
|
2023-07-12 10:35:07 +03:00
|
|
|
} else {
|
|
|
|
paid_credits_used = false;
|
2023-06-07 19:39:30 +03:00
|
|
|
}
|
|
|
|
|
2023-06-17 09:14:43 +03:00
|
|
|
let rpc_key_id =
|
|
|
|
Some(rpc_key_model.id.try_into().context("db ids are never 0")?);
|
2022-11-10 02:58:07 +03:00
|
|
|
|
2023-07-15 04:30:01 +03:00
|
|
|
Ok::<_, Web3ProxyError>(AuthorizationChecks {
|
2022-09-23 08:22:33 +03:00
|
|
|
allowed_ips,
|
|
|
|
allowed_origins,
|
|
|
|
allowed_referers,
|
|
|
|
allowed_user_agents,
|
2023-06-17 09:14:43 +03:00
|
|
|
latest_balance,
|
|
|
|
// TODO: is floating point math going to scale this correctly?
|
|
|
|
log_revert_chance: (rpc_key_model.log_revert_chance * u16::MAX as f64)
|
|
|
|
as u16,
|
2022-11-01 22:12:57 +03:00
|
|
|
max_concurrent_requests: user_tier_model.max_concurrent_requests,
|
|
|
|
max_requests_per_period: user_tier_model.max_requests_per_period,
|
2023-01-12 01:51:01 +03:00
|
|
|
private_txs: rpc_key_model.private_txs,
|
2023-03-03 04:39:50 +03:00
|
|
|
proxy_mode,
|
2023-06-22 08:11:26 +03:00
|
|
|
rpc_secret_key: Some(*rpc_secret_key),
|
2023-06-17 09:14:43 +03:00
|
|
|
rpc_secret_key_id: rpc_key_id,
|
|
|
|
user_id: rpc_key_model.user_id,
|
2023-07-12 10:35:07 +03:00
|
|
|
paid_credits_used,
|
2022-09-23 00:03:37 +03:00
|
|
|
})
|
|
|
|
}
|
2022-11-08 22:58:11 +03:00
|
|
|
None => Ok(AuthorizationChecks::default()),
|
2022-09-23 00:03:37 +03:00
|
|
|
}
|
|
|
|
})
|
2023-07-12 10:35:07 +03:00
|
|
|
.await?;
|
|
|
|
|
|
|
|
Ok(x)
|
2022-09-23 00:03:37 +03:00
|
|
|
}
|
|
|
|
|
2023-09-27 04:18:06 +03:00
|
|
|
/// Authorize the key/ip/origin/referer/useragent and handle rate and concurrency limits
|
|
|
|
pub async fn rate_limit_premium(
|
2022-11-01 21:54:39 +03:00
|
|
|
&self,
|
2023-06-22 08:11:26 +03:00
|
|
|
ip: &IpAddr,
|
|
|
|
origin: Option<&Origin>,
|
2023-03-03 04:39:50 +03:00
|
|
|
proxy_mode: ProxyMode,
|
2023-06-22 08:11:26 +03:00
|
|
|
referer: Option<&Referer>,
|
|
|
|
rpc_key: &RpcSecretKey,
|
|
|
|
user_agent: Option<&UserAgent>,
|
2023-03-20 01:50:25 +03:00
|
|
|
) -> Web3ProxyResult<RateLimitResult> {
|
2023-07-15 04:30:01 +03:00
|
|
|
let authorization_checks = match self.authorization_checks(proxy_mode, rpc_key).await {
|
|
|
|
Ok(x) => x,
|
|
|
|
Err(err) => {
|
|
|
|
if let Ok(_err) = err.split_db_errors() {
|
|
|
|
// // TODO: this is too verbose during an outage. the warnings on the config reloader should be fine
|
|
|
|
// warn!(
|
|
|
|
// ?err,
|
|
|
|
// "db is down. cannot check rpc key. fallback to ip rate limits"
|
|
|
|
// );
|
2023-09-27 04:18:06 +03:00
|
|
|
return self.rate_limit_public(ip, origin, proxy_mode).await;
|
2023-07-15 04:30:01 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return Err(err);
|
|
|
|
}
|
|
|
|
};
|
2022-09-23 00:03:37 +03:00
|
|
|
|
2022-11-08 22:58:11 +03:00
|
|
|
// if no rpc_key_id matching the given rpc was found, then we can't rate limit by key
|
2023-01-19 03:17:43 +03:00
|
|
|
if authorization_checks.rpc_secret_key_id.is_none() {
|
2023-07-15 04:30:01 +03:00
|
|
|
trace!("unknown key. falling back to free limits");
|
2023-09-27 04:18:06 +03:00
|
|
|
return self.rate_limit_public(ip, origin, proxy_mode).await;
|
2022-09-23 00:03:37 +03:00
|
|
|
}
|
|
|
|
|
2022-11-08 22:58:11 +03:00
|
|
|
let authorization = Authorization::try_new(
|
|
|
|
authorization_checks,
|
|
|
|
ip,
|
|
|
|
origin,
|
|
|
|
referer,
|
|
|
|
user_agent,
|
2022-12-12 07:39:54 +03:00
|
|
|
AuthorizationType::Frontend,
|
2022-11-08 22:58:11 +03:00
|
|
|
)?;
|
2022-09-27 05:01:45 +03:00
|
|
|
|
2022-09-23 00:03:37 +03:00
|
|
|
// user key is valid. now check rate limits
|
2023-07-08 01:15:41 +03:00
|
|
|
if let Some(user_max_requests_per_period) = authorization.checks.max_requests_per_period {
|
2023-09-27 04:18:06 +03:00
|
|
|
if let Some(rate_limiter) = &self.frontend_premium_rate_limiter {
|
|
|
|
let key = RegisteredUserRateLimitKey(authorization.checks.user_id, *ip);
|
|
|
|
|
|
|
|
let mut x = deferred_redis_rate_limit(
|
|
|
|
authorization,
|
|
|
|
key,
|
|
|
|
Some(user_max_requests_per_period),
|
|
|
|
rate_limiter,
|
|
|
|
)
|
|
|
|
.await?;
|
|
|
|
|
|
|
|
if let RateLimitResult::RateLimited(authorization, retry_at) = x {
|
|
|
|
// rate limited by the user's key+ip. check to see if there are any limits available in the bonus premium pool
|
|
|
|
x = redis_rate_limit(
|
|
|
|
&self.bonus_frontend_premium_rate_limiter,
|
|
|
|
authorization,
|
|
|
|
retry_at,
|
|
|
|
None,
|
|
|
|
None,
|
2023-07-08 01:15:41 +03:00
|
|
|
)
|
2023-09-27 04:18:06 +03:00
|
|
|
.await?;
|
|
|
|
}
|
2022-09-27 05:01:45 +03:00
|
|
|
|
2023-09-27 04:18:06 +03:00
|
|
|
if let RateLimitResult::RateLimited(authorization, retry_at) = x {
|
|
|
|
// premium got rate limited too. check the bonus public pool
|
|
|
|
x = redis_rate_limit(
|
|
|
|
&self.bonus_frontend_public_rate_limiter,
|
|
|
|
authorization,
|
|
|
|
retry_at,
|
|
|
|
None,
|
|
|
|
None,
|
|
|
|
)
|
|
|
|
.await?;
|
|
|
|
}
|
|
|
|
|
|
|
|
if let RateLimitResult::Allowed(a, b) = x {
|
|
|
|
debug_assert!(b.is_none());
|
|
|
|
|
|
|
|
// only allow this rpc_key to run a limited amount of concurrent requests
|
|
|
|
let permit = self.permit_premium_concurrency(&a, ip).await?;
|
|
|
|
|
|
|
|
x = RateLimitResult::Allowed(a, permit)
|
2022-09-23 00:03:37 +03:00
|
|
|
}
|
2023-09-27 04:18:06 +03:00
|
|
|
|
|
|
|
debug_assert!(!matches!(x, RateLimitResult::UnknownKey));
|
|
|
|
|
|
|
|
return Ok(x);
|
2023-07-08 01:15:41 +03:00
|
|
|
} else {
|
|
|
|
// TODO: if no redis, rate limit with just a local cache?
|
2022-09-23 00:03:37 +03:00
|
|
|
}
|
|
|
|
}
|
2023-07-08 01:15:41 +03:00
|
|
|
|
2023-09-27 04:18:06 +03:00
|
|
|
let permit = self.permit_premium_concurrency(&authorization, ip).await?;
|
|
|
|
|
|
|
|
Ok(RateLimitResult::Allowed(authorization, permit))
|
2022-09-23 00:03:37 +03:00
|
|
|
}
|
|
|
|
}
|
2023-01-19 03:17:43 +03:00
|
|
|
|
|
|
|
impl Authorization {
|
|
|
|
pub async fn check_again(
|
|
|
|
&self,
|
|
|
|
app: &Arc<Web3ProxyApp>,
|
2023-03-17 05:38:11 +03:00
|
|
|
) -> Web3ProxyResult<(Arc<Self>, Option<OwnedSemaphorePermit>)> {
|
2023-01-19 03:17:43 +03:00
|
|
|
// TODO: we could probably do this without clones. but this is easy
|
2023-06-22 08:11:26 +03:00
|
|
|
let (a, s) = if let Some(ref rpc_secret_key) = self.checks.rpc_secret_key {
|
2023-01-19 03:17:43 +03:00
|
|
|
key_is_authorized(
|
|
|
|
app,
|
|
|
|
rpc_secret_key,
|
2023-06-22 08:11:26 +03:00
|
|
|
&self.ip,
|
|
|
|
self.origin.as_ref(),
|
2023-03-03 04:39:50 +03:00
|
|
|
self.checks.proxy_mode,
|
2023-06-22 08:11:26 +03:00
|
|
|
self.referer.as_ref(),
|
|
|
|
self.user_agent.as_ref(),
|
2023-01-19 03:17:43 +03:00
|
|
|
)
|
|
|
|
.await?
|
|
|
|
} else {
|
2023-06-22 08:11:26 +03:00
|
|
|
ip_is_authorized(app, &self.ip, self.origin.as_ref(), self.checks.proxy_mode).await?
|
2023-01-19 03:17:43 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
let a = Arc::new(a);
|
|
|
|
|
|
|
|
Ok((a, s))
|
|
|
|
}
|
|
|
|
}
|
2023-09-27 04:18:06 +03:00
|
|
|
|
|
|
|
/// this fails open!
|
|
|
|
/// this never includes a semaphore! if you want one, add it after this call
|
|
|
|
/// if `max_requests_per_period` is none, the limit in the authorization is used
|
|
|
|
pub async fn deferred_redis_rate_limit<K>(
|
|
|
|
authorization: Authorization,
|
|
|
|
key: K,
|
|
|
|
max_requests_per_period: Option<u64>,
|
|
|
|
rate_limiter: &DeferredRateLimiter<K>,
|
|
|
|
) -> Web3ProxyResult<RateLimitResult>
|
|
|
|
where
|
|
|
|
K: Send + Sync + Copy + Clone + Display + Hash + Eq + PartialEq + 'static,
|
|
|
|
{
|
|
|
|
let max_requests_per_period =
|
|
|
|
max_requests_per_period.or(authorization.checks.max_requests_per_period);
|
|
|
|
|
|
|
|
let x = match rate_limiter.throttle(key, max_requests_per_period, 1).await {
|
|
|
|
Ok(DeferredRateLimitResult::Allowed) => RateLimitResult::Allowed(authorization, None),
|
|
|
|
Ok(DeferredRateLimitResult::RetryAt(retry_at)) => {
|
|
|
|
// TODO: set headers so they know when they can retry
|
|
|
|
// TODO: debug or trace?
|
|
|
|
// this is too verbose, but a stat might be good
|
|
|
|
// TODO: emit a stat
|
|
|
|
// trace!(?rpc_key, "rate limit exceeded until {:?}", retry_at);
|
|
|
|
RateLimitResult::RateLimited(authorization, Some(retry_at))
|
|
|
|
}
|
|
|
|
Ok(DeferredRateLimitResult::RetryNever) => {
|
|
|
|
// TODO: keys are secret. don't log them!
|
|
|
|
// trace!(?rpc_key, "rate limit is 0");
|
|
|
|
// TODO: emit a stat
|
|
|
|
RateLimitResult::RateLimited(authorization, None)
|
|
|
|
}
|
|
|
|
Err(err) => {
|
|
|
|
// internal error, not rate limit being hit
|
|
|
|
error!(?err, %key, "rate limiter is unhappy. allowing key");
|
|
|
|
|
|
|
|
RateLimitResult::Allowed(authorization, None)
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
Ok(x)
|
|
|
|
}
|
|
|
|
|
|
|
|
/// this never includes a semaphore! if you want one, add it after this call
|
|
|
|
/// if `max_requests_per_period` is none, the limit in the authorization is used
|
|
|
|
pub async fn redis_rate_limit(
|
|
|
|
rate_limiter: &Option<RedisRateLimiter>,
|
|
|
|
authorization: Authorization,
|
|
|
|
mut retry_at: Option<Instant>,
|
|
|
|
label: Option<&str>,
|
|
|
|
max_requests_per_period: Option<u64>,
|
|
|
|
) -> Web3ProxyResult<RateLimitResult> {
|
|
|
|
let max_requests_per_period =
|
|
|
|
max_requests_per_period.or(authorization.checks.max_requests_per_period);
|
|
|
|
|
|
|
|
let x = if let Some(rate_limiter) = rate_limiter {
|
|
|
|
match rate_limiter
|
|
|
|
.throttle_label(label.unwrap_or_default(), max_requests_per_period, 1)
|
|
|
|
.await
|
|
|
|
{
|
|
|
|
Ok(RedisRateLimitResult::Allowed(..)) => RateLimitResult::Allowed(authorization, None),
|
|
|
|
Ok(RedisRateLimitResult::RetryAt(new_retry_at, ..)) => {
|
|
|
|
retry_at = retry_at.min(Some(new_retry_at));
|
|
|
|
|
|
|
|
RateLimitResult::RateLimited(authorization, retry_at)
|
|
|
|
}
|
|
|
|
Ok(RedisRateLimitResult::RetryNever) => {
|
|
|
|
RateLimitResult::RateLimited(authorization, retry_at)
|
|
|
|
}
|
|
|
|
Err(err) => {
|
|
|
|
// this an internal error of some kind, not the rate limit being hit
|
|
|
|
error!("rate limiter is unhappy. allowing ip. err={:?}", err);
|
|
|
|
|
|
|
|
RateLimitResult::Allowed(authorization, None)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
RateLimitResult::Allowed(authorization, None)
|
|
|
|
};
|
|
|
|
|
|
|
|
Ok(x)
|
|
|
|
}
|