2022-08-06 08:49:52 +03:00
|
|
|
/// this should move into web3_proxy once the basics are working
|
2022-06-05 22:58:47 +03:00
|
|
|
mod errors;
|
|
|
|
mod http;
|
|
|
|
mod http_proxy;
|
2022-07-14 00:49:57 +03:00
|
|
|
mod users;
|
2022-06-05 22:58:47 +03:00
|
|
|
mod ws_proxy;
|
2022-07-07 06:22:09 +03:00
|
|
|
|
2022-06-05 22:58:47 +03:00
|
|
|
use axum::{
|
|
|
|
handler::Handler,
|
2022-08-04 04:10:27 +03:00
|
|
|
response::IntoResponse,
|
2022-06-05 22:58:47 +03:00
|
|
|
routing::{get, post},
|
|
|
|
Extension, Router,
|
|
|
|
};
|
2022-08-05 22:22:23 +03:00
|
|
|
use entities::user_keys;
|
2022-08-07 22:33:16 +03:00
|
|
|
use redis_cell_client::ThrottleResult;
|
2022-08-04 04:10:27 +03:00
|
|
|
use reqwest::StatusCode;
|
2022-08-06 04:55:18 +03:00
|
|
|
use sea_orm::{
|
|
|
|
ColumnTrait, DeriveColumn, EntityTrait, EnumIter, IdenStatic, QueryFilter, QuerySelect,
|
|
|
|
};
|
2022-08-04 04:10:27 +03:00
|
|
|
use std::net::{IpAddr, SocketAddr};
|
2022-06-05 22:58:47 +03:00
|
|
|
use std::sync::Arc;
|
2022-08-07 22:33:16 +03:00
|
|
|
use tracing::{debug, info};
|
2022-08-06 04:17:25 +03:00
|
|
|
use uuid::Uuid;
|
2022-06-15 04:02:26 +03:00
|
|
|
|
2022-06-05 22:58:47 +03:00
|
|
|
use crate::app::Web3ProxyApp;
|
|
|
|
|
2022-08-04 04:10:27 +03:00
|
|
|
use self::errors::handle_anyhow_error;
|
|
|
|
|
|
|
|
pub async fn rate_limit_by_ip(app: &Web3ProxyApp, ip: &IpAddr) -> Result<(), impl IntoResponse> {
|
2022-08-07 22:33:16 +03:00
|
|
|
let rate_limiter_key = format!("ip-{}", ip);
|
2022-08-04 04:10:27 +03:00
|
|
|
|
2022-08-06 04:17:25 +03:00
|
|
|
// TODO: dry this up with rate_limit_by_key
|
|
|
|
if let Some(rate_limiter) = app.rate_limiter() {
|
2022-08-07 22:33:16 +03:00
|
|
|
match rate_limiter
|
2022-08-06 08:26:43 +03:00
|
|
|
.throttle_key(&rate_limiter_key, None, None, None)
|
|
|
|
.await
|
|
|
|
{
|
2022-08-07 22:33:16 +03:00
|
|
|
Ok(ThrottleResult::Allowed) => {}
|
|
|
|
Ok(ThrottleResult::RetryAt(_retry_at)) => {
|
|
|
|
// TODO: set headers so they know when they can retry
|
|
|
|
debug!(?rate_limiter_key, "rate limit exceeded"); // this is too verbose, but a stat might be good
|
|
|
|
// TODO: use their id if possible
|
|
|
|
return Err(handle_anyhow_error(
|
|
|
|
Some(StatusCode::TOO_MANY_REQUESTS),
|
|
|
|
None,
|
|
|
|
anyhow::anyhow!(format!("too many requests from this ip: {}", ip)),
|
|
|
|
)
|
|
|
|
.await
|
|
|
|
.into_response());
|
|
|
|
}
|
|
|
|
Err(err) => {
|
|
|
|
// internal error, not rate limit being hit
|
|
|
|
// TODO: i really want axum to do this for us in a single place.
|
|
|
|
return Err(handle_anyhow_error(
|
|
|
|
Some(StatusCode::INTERNAL_SERVER_ERROR),
|
|
|
|
None,
|
|
|
|
anyhow::anyhow!(format!("too many requests from this ip: {}", ip)),
|
|
|
|
)
|
|
|
|
.await
|
|
|
|
.into_response());
|
|
|
|
}
|
2022-08-06 04:17:25 +03:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// TODO: if no redis, rate limit with a local cache?
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
2022-08-04 04:10:27 +03:00
|
|
|
}
|
|
|
|
|
2022-08-05 22:22:23 +03:00
|
|
|
/// if Ok(()), rate limits are acceptable
|
|
|
|
/// if Err(response), rate limits exceeded
|
2022-08-04 04:10:27 +03:00
|
|
|
pub async fn rate_limit_by_key(
|
|
|
|
app: &Web3ProxyApp,
|
2022-08-06 04:17:25 +03:00
|
|
|
user_key: Uuid,
|
2022-08-04 04:10:27 +03:00
|
|
|
) -> Result<(), impl IntoResponse> {
|
|
|
|
let db = app.db_conn();
|
|
|
|
|
2022-08-07 22:33:16 +03:00
|
|
|
/// query just a few columns instead of the entire table
|
2022-08-06 04:55:18 +03:00
|
|
|
#[derive(Copy, Clone, Debug, EnumIter, DeriveColumn)]
|
|
|
|
enum QueryAs {
|
|
|
|
UserId,
|
2022-08-07 22:33:16 +03:00
|
|
|
RequestsPerMinute,
|
2022-08-06 04:55:18 +03:00
|
|
|
}
|
|
|
|
|
2022-08-05 22:22:23 +03:00
|
|
|
// query the db to make sure this key is active
|
|
|
|
// TODO: probably want a cache on this
|
|
|
|
match user_keys::Entity::find()
|
2022-08-06 04:55:18 +03:00
|
|
|
.select_only()
|
|
|
|
.column_as(user_keys::Column::UserId, QueryAs::UserId)
|
2022-08-07 22:33:16 +03:00
|
|
|
.column_as(
|
|
|
|
user_keys::Column::RequestsPerMinute,
|
|
|
|
QueryAs::RequestsPerMinute,
|
|
|
|
)
|
2022-08-05 22:22:23 +03:00
|
|
|
.filter(user_keys::Column::ApiKey.eq(user_key))
|
|
|
|
.filter(user_keys::Column::Active.eq(true))
|
2022-08-06 04:55:18 +03:00
|
|
|
.into_values::<_, QueryAs>()
|
2022-08-05 22:22:23 +03:00
|
|
|
.one(db)
|
|
|
|
.await
|
|
|
|
{
|
2022-08-07 22:33:16 +03:00
|
|
|
Ok::<Option<(i64, u32)>, _>(Some((_user_id, user_count_per_period))) => {
|
2022-08-05 22:22:23 +03:00
|
|
|
// user key is valid
|
2022-08-06 04:17:25 +03:00
|
|
|
if let Some(rate_limiter) = app.rate_limiter() {
|
2022-08-06 08:26:43 +03:00
|
|
|
// TODO: how does max burst actually work? what should it be?
|
2022-08-07 22:33:16 +03:00
|
|
|
let user_max_burst = user_count_per_period / 3;
|
|
|
|
let user_period = 60;
|
2022-08-06 08:26:43 +03:00
|
|
|
|
2022-08-06 04:17:25 +03:00
|
|
|
if rate_limiter
|
2022-08-06 08:26:43 +03:00
|
|
|
.throttle_key(
|
|
|
|
&user_key.to_string(),
|
|
|
|
Some(user_max_burst),
|
|
|
|
Some(user_count_per_period),
|
|
|
|
Some(user_period),
|
|
|
|
)
|
2022-08-06 04:17:25 +03:00
|
|
|
.await
|
|
|
|
.is_err()
|
|
|
|
{
|
|
|
|
// TODO: set headers so they know when they can retry
|
|
|
|
// warn!(?ip, "public rate limit exceeded"); // this is too verbose, but a stat might be good
|
|
|
|
// TODO: use their id if possible
|
|
|
|
return Err(handle_anyhow_error(
|
|
|
|
Some(StatusCode::TOO_MANY_REQUESTS),
|
|
|
|
None,
|
2022-08-06 09:17:49 +03:00
|
|
|
// TODO: include the user id (NOT THE API KEY!) here
|
2022-08-06 08:26:43 +03:00
|
|
|
anyhow::anyhow!("too many requests from this key"),
|
2022-08-06 04:17:25 +03:00
|
|
|
)
|
|
|
|
.await
|
|
|
|
.into_response());
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// TODO: if no redis, rate limit with a local cache?
|
|
|
|
}
|
2022-08-05 22:22:23 +03:00
|
|
|
}
|
|
|
|
Ok(None) => {
|
|
|
|
// invalid user key
|
|
|
|
// TODO: rate limit by ip here, too? maybe tarpit?
|
|
|
|
return Err(handle_anyhow_error(
|
|
|
|
Some(StatusCode::FORBIDDEN),
|
|
|
|
None,
|
|
|
|
anyhow::anyhow!("unknown api key"),
|
|
|
|
)
|
|
|
|
.await
|
|
|
|
.into_response());
|
|
|
|
}
|
2022-08-06 04:17:25 +03:00
|
|
|
Err(err) => {
|
|
|
|
let err: anyhow::Error = err.into();
|
2022-08-04 04:10:27 +03:00
|
|
|
|
|
|
|
return Err(handle_anyhow_error(
|
2022-08-06 04:17:25 +03:00
|
|
|
Some(StatusCode::INTERNAL_SERVER_ERROR),
|
2022-08-04 04:10:27 +03:00
|
|
|
None,
|
2022-08-06 04:17:25 +03:00
|
|
|
err.context("failed checking database for user key"),
|
2022-08-04 04:10:27 +03:00
|
|
|
)
|
|
|
|
.await
|
|
|
|
.into_response());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2022-06-05 22:58:47 +03:00
|
|
|
pub async fn run(port: u16, proxy_app: Arc<Web3ProxyApp>) -> anyhow::Result<()> {
|
|
|
|
// build our application with a route
|
2022-07-26 08:01:08 +03:00
|
|
|
// order most to least common
|
2022-06-05 22:58:47 +03:00
|
|
|
let app = Router::new()
|
2022-08-05 22:22:23 +03:00
|
|
|
.route("/", post(http_proxy::public_proxy_web3_rpc))
|
|
|
|
.route("/", get(ws_proxy::public_websocket_handler))
|
2022-08-06 04:17:25 +03:00
|
|
|
.route("/u/:user_key", post(http_proxy::user_proxy_web3_rpc))
|
|
|
|
.route("/u/:user_key", get(ws_proxy::user_websocket_handler))
|
2022-06-29 21:22:53 +03:00
|
|
|
.route("/health", get(http::health))
|
2022-06-05 22:58:47 +03:00
|
|
|
.route("/status", get(http::status))
|
2022-07-14 00:49:57 +03:00
|
|
|
.route("/users", post(users::create_user))
|
2022-06-05 22:58:47 +03:00
|
|
|
.layer(Extension(proxy_app));
|
|
|
|
|
|
|
|
// 404 for any unknown routes
|
|
|
|
let app = app.fallback(errors::handler_404.into_service());
|
|
|
|
|
|
|
|
// run our app with hyper
|
|
|
|
// `axum::Server` is a re-export of `hyper::Server`
|
2022-08-05 22:22:23 +03:00
|
|
|
// TODO: allow only listening on localhost?
|
2022-06-05 22:58:47 +03:00
|
|
|
let addr = SocketAddr::from(([0, 0, 0, 0], port));
|
2022-08-06 04:17:25 +03:00
|
|
|
info!("listening on port {}", port);
|
2022-07-07 06:22:09 +03:00
|
|
|
// TODO: into_make_service is enough if we always run behind a proxy. make into_make_service_with_connect_info optional?
|
2022-06-05 22:58:47 +03:00
|
|
|
axum::Server::bind(&addr)
|
2022-08-07 09:48:57 +03:00
|
|
|
// TODO: option to use with_connect_info. we want it in dev, but not when running behind a proxy, but not
|
|
|
|
.serve(app.into_make_service_with_connect_info::<SocketAddr>())
|
2022-08-07 22:33:16 +03:00
|
|
|
.with_graceful_shutdown(signal_shutdown())
|
2022-08-07 09:48:57 +03:00
|
|
|
// .serve(app.into_make_service())
|
2022-06-05 22:58:47 +03:00
|
|
|
.await
|
|
|
|
.map_err(Into::into)
|
|
|
|
}
|
2022-08-07 22:33:16 +03:00
|
|
|
|
|
|
|
/// Tokio signal handler that will wait for a user to press CTRL+C.
|
|
|
|
/// We use this in our hyper `Server` method `with_graceful_shutdown`.
|
|
|
|
async fn signal_shutdown() {
|
|
|
|
tokio::signal::ctrl_c()
|
|
|
|
.await
|
|
|
|
.expect("expect tokio signal ctrl-c");
|
|
|
|
info!("signal shutdown");
|
|
|
|
}
|