unlimited localhost
This commit is contained in:
parent
fcc1843af0
commit
f1636d3b85
@ -19,6 +19,7 @@ use crate::rpcs::blockchain::Web3ProxyBlock;
|
|||||||
use crate::rpcs::consensus::ConsensusWeb3Rpcs;
|
use crate::rpcs::consensus::ConsensusWeb3Rpcs;
|
||||||
use crate::rpcs::many::Web3Rpcs;
|
use crate::rpcs::many::Web3Rpcs;
|
||||||
use crate::rpcs::one::Web3Rpc;
|
use crate::rpcs::one::Web3Rpc;
|
||||||
|
use crate::rpcs::provider::{connect_http, EthersHttpProvider};
|
||||||
use crate::rpcs::transactions::TxStatus;
|
use crate::rpcs::transactions::TxStatus;
|
||||||
use crate::stats::{AppStat, StatBuffer};
|
use crate::stats::{AppStat, StatBuffer};
|
||||||
use crate::user_token::UserBearerToken;
|
use crate::user_token::UserBearerToken;
|
||||||
@ -140,6 +141,7 @@ pub struct Web3ProxyApp {
|
|||||||
/// Optional read-only database for users and accounting
|
/// Optional read-only database for users and accounting
|
||||||
pub db_replica: Option<DatabaseReplica>,
|
pub db_replica: Option<DatabaseReplica>,
|
||||||
pub hostname: Option<String>,
|
pub hostname: Option<String>,
|
||||||
|
pub internal_provider: Arc<EthersHttpProvider>,
|
||||||
/// store pending transactions that we've seen so that we don't send duplicates to subscribers
|
/// store pending transactions that we've seen so that we don't send duplicates to subscribers
|
||||||
/// TODO: think about this more. might be worth storing if we sent the transaction or not and using this for automatic retries
|
/// TODO: think about this more. might be worth storing if we sent the transaction or not and using this for automatic retries
|
||||||
pub pending_transactions: Arc<CacheWithTTL<TxHash, TxStatus>>,
|
pub pending_transactions: Arc<CacheWithTTL<TxHash, TxStatus>>,
|
||||||
@ -212,6 +214,7 @@ pub struct Web3ProxyAppSpawn {
|
|||||||
impl Web3ProxyApp {
|
impl Web3ProxyApp {
|
||||||
/// The main entrypoint.
|
/// The main entrypoint.
|
||||||
pub async fn spawn(
|
pub async fn spawn(
|
||||||
|
app_frontend_port: u16,
|
||||||
top_config: TopConfig,
|
top_config: TopConfig,
|
||||||
num_workers: usize,
|
num_workers: usize,
|
||||||
shutdown_sender: broadcast::Sender<()>,
|
shutdown_sender: broadcast::Sender<()>,
|
||||||
@ -604,30 +607,45 @@ impl Web3ProxyApp {
|
|||||||
.ok()
|
.ok()
|
||||||
.and_then(|x| x.to_str().map(|x| x.to_string()));
|
.and_then(|x| x.to_str().map(|x| x.to_string()));
|
||||||
|
|
||||||
|
// TODO: i'm sure theres much better ways to do this, but i don't want to spend time fighting traits right now
|
||||||
|
// TODO: what interval? i don't think we use it
|
||||||
|
// i tried and failed to `impl JsonRpcClient for Web3ProxyApi`
|
||||||
|
// i tried and failed to set up ipc. http is already running, so lets just use that
|
||||||
|
let internal_provider = connect_http(
|
||||||
|
format!("http://127.0.0.1:{}", app_frontend_port)
|
||||||
|
.parse()
|
||||||
|
.unwrap(),
|
||||||
|
http_client.clone(),
|
||||||
|
Duration::from_secs(10),
|
||||||
|
)?;
|
||||||
|
|
||||||
|
let internal_provider = Arc::new(internal_provider);
|
||||||
|
|
||||||
let app = Self {
|
let app = Self {
|
||||||
config: top_config.app.clone(),
|
|
||||||
balanced_rpcs,
|
balanced_rpcs,
|
||||||
|
bearer_token_semaphores,
|
||||||
bundler_4337_rpcs,
|
bundler_4337_rpcs,
|
||||||
http_client,
|
config: top_config.app.clone(),
|
||||||
kafka_producer,
|
|
||||||
private_rpcs,
|
|
||||||
jsonrpc_response_cache: response_cache,
|
|
||||||
watch_consensus_head_receiver,
|
|
||||||
pending_tx_sender,
|
|
||||||
pending_transactions,
|
|
||||||
frontend_ip_rate_limiter,
|
|
||||||
frontend_registered_user_rate_limiter,
|
|
||||||
login_rate_limiter,
|
|
||||||
db_conn,
|
db_conn,
|
||||||
db_replica,
|
db_replica,
|
||||||
influxdb_client,
|
frontend_ip_rate_limiter,
|
||||||
|
frontend_registered_user_rate_limiter,
|
||||||
hostname,
|
hostname,
|
||||||
vredis_pool,
|
http_client,
|
||||||
rpc_secret_key_cache,
|
influxdb_client,
|
||||||
bearer_token_semaphores,
|
internal_provider,
|
||||||
ip_semaphores,
|
ip_semaphores,
|
||||||
user_semaphores,
|
jsonrpc_response_cache: response_cache,
|
||||||
|
kafka_producer,
|
||||||
|
login_rate_limiter,
|
||||||
|
pending_transactions,
|
||||||
|
pending_tx_sender,
|
||||||
|
private_rpcs,
|
||||||
|
rpc_secret_key_cache,
|
||||||
stat_sender,
|
stat_sender,
|
||||||
|
user_semaphores,
|
||||||
|
vredis_pool,
|
||||||
|
watch_consensus_head_receiver,
|
||||||
};
|
};
|
||||||
|
|
||||||
let app = Arc::new(app);
|
let app = Arc::new(app);
|
||||||
|
@ -75,59 +75,17 @@ async fn run(
|
|||||||
broadcast::channel(1);
|
broadcast::channel(1);
|
||||||
|
|
||||||
// start the main app
|
// start the main app
|
||||||
// let mut spawned_app = Web3ProxyApp::spawn(top_config, num_workers, app_shutdown_sender.clone()).await?;
|
let mut spawned_app = Web3ProxyApp::spawn(
|
||||||
let mut spawned_app =
|
app_frontend_port,
|
||||||
Web3ProxyApp::spawn(top_config.clone(), num_workers, app_shutdown_sender.clone()).await?;
|
top_config.clone(),
|
||||||
|
num_workers,
|
||||||
|
app_shutdown_sender.clone(),
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
// start thread for watching config
|
// start thread for watching config
|
||||||
if let Some(top_config_path) = top_config_path {
|
if let Some(top_config_path) = top_config_path {
|
||||||
let config_sender = spawned_app.new_top_config_sender;
|
let config_sender = spawned_app.new_top_config_sender;
|
||||||
/*
|
|
||||||
#[cfg(feature = "inotify")]
|
|
||||||
{
|
|
||||||
let mut inotify = Inotify::init().expect("Failed to initialize inotify");
|
|
||||||
|
|
||||||
inotify
|
|
||||||
.add_watch(top_config_path.clone(), WatchMask::MODIFY)
|
|
||||||
.expect("Failed to add inotify watch on config");
|
|
||||||
|
|
||||||
let mut buffer = [0u8; 4096];
|
|
||||||
|
|
||||||
// TODO: exit the app if this handle exits
|
|
||||||
thread::spawn(move || loop {
|
|
||||||
// TODO: debounce
|
|
||||||
|
|
||||||
let events = inotify
|
|
||||||
.read_events_blocking(&mut buffer)
|
|
||||||
.expect("Failed to read inotify events");
|
|
||||||
|
|
||||||
for event in events {
|
|
||||||
if event.mask.contains(EventMask::MODIFY) {
|
|
||||||
info!("config changed");
|
|
||||||
match fs::read_to_string(&top_config_path) {
|
|
||||||
Ok(top_config) => match toml::from_str(&top_config) {
|
|
||||||
Ok(top_config) => {
|
|
||||||
config_sender.send(top_config).unwrap();
|
|
||||||
}
|
|
||||||
Err(err) => {
|
|
||||||
// TODO: panic?
|
|
||||||
error!("Unable to parse config! {:#?}", err);
|
|
||||||
}
|
|
||||||
},
|
|
||||||
Err(err) => {
|
|
||||||
// TODO: panic?
|
|
||||||
error!("Unable to read config! {:#?}", err);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
} else {
|
|
||||||
// TODO: is "MODIFY" enough, or do we want CLOSE_WRITE?
|
|
||||||
unimplemented!();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
// #[cfg(not(feature = "inotify"))]
|
|
||||||
{
|
{
|
||||||
thread::spawn(move || loop {
|
thread::spawn(move || loop {
|
||||||
match fs::read_to_string(&top_config_path) {
|
match fs::read_to_string(&top_config_path) {
|
||||||
@ -154,35 +112,6 @@ async fn run(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// start thread for watching config
|
|
||||||
// if let Some(top_config_path) = top_config_path {
|
|
||||||
// let config_sender = spawned_app.new_top_config_sender;
|
|
||||||
// {
|
|
||||||
// thread::spawn(move || loop {
|
|
||||||
// match fs::read_to_string(&top_config_path) {
|
|
||||||
// Ok(new_top_config) => match toml::from_str(&new_top_config) {
|
|
||||||
// Ok(new_top_config) => {
|
|
||||||
// if new_top_config != top_config {
|
|
||||||
// top_config = new_top_config;
|
|
||||||
// config_sender.send(top_config.clone()).unwrap();
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// Err(err) => {
|
|
||||||
// // TODO: panic?
|
|
||||||
// error!("Unable to parse config! {:#?}", err);
|
|
||||||
// }
|
|
||||||
// },
|
|
||||||
// Err(err) => {
|
|
||||||
// // TODO: panic?
|
|
||||||
// error!("Unable to read config! {:#?}", err);
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// thread::sleep(Duration::from_secs(10));
|
|
||||||
// });
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
|
|
||||||
// start the prometheus metrics port
|
// start the prometheus metrics port
|
||||||
let prometheus_handle = tokio::spawn(prometheus::serve(
|
let prometheus_handle = tokio::spawn(prometheus::serve(
|
||||||
spawned_app.app.clone(),
|
spawned_app.app.clone(),
|
||||||
|
@ -8,7 +8,7 @@ use crate::rpcs::one::Web3Rpc;
|
|||||||
use crate::stats::{AppStat, BackendRequests, RpcQueryStats};
|
use crate::stats::{AppStat, BackendRequests, RpcQueryStats};
|
||||||
use crate::user_token::UserBearerToken;
|
use crate::user_token::UserBearerToken;
|
||||||
use anyhow::Context;
|
use anyhow::Context;
|
||||||
use axum::headers::authorization::Bearer;
|
use axum::headers::authorization::{self, Bearer};
|
||||||
use axum::headers::{Header, Origin, Referer, UserAgent};
|
use axum::headers::{Header, Origin, Referer, UserAgent};
|
||||||
use chrono::Utc;
|
use chrono::Utc;
|
||||||
use core::fmt;
|
use core::fmt;
|
||||||
@ -1049,6 +1049,13 @@ impl Web3ProxyApp {
|
|||||||
origin: Option<Origin>,
|
origin: Option<Origin>,
|
||||||
proxy_mode: ProxyMode,
|
proxy_mode: ProxyMode,
|
||||||
) -> Web3ProxyResult<RateLimitResult> {
|
) -> Web3ProxyResult<RateLimitResult> {
|
||||||
|
if ip.is_loopback() {
|
||||||
|
// TODO: localhost being unlimited should be optional
|
||||||
|
let authorization = Authorization::internal(self.db_conn())?;
|
||||||
|
|
||||||
|
return Ok(RateLimitResult::Allowed(authorization, None));
|
||||||
|
}
|
||||||
|
|
||||||
// ip rate limits don't check referer or user agent
|
// ip rate limits don't check referer or user agent
|
||||||
// they do check origin because we can override rate limits for some origins
|
// they do check origin because we can override rate limits for some origins
|
||||||
let authorization = Authorization::external(
|
let authorization = Authorization::external(
|
||||||
|
@ -1,16 +0,0 @@
|
|||||||
use std::path::PathBuf;
|
|
||||||
use std::sync::Arc;
|
|
||||||
use tokio::sync::broadcast;
|
|
||||||
|
|
||||||
use crate::app::Web3ProxyApp;
|
|
||||||
use crate::errors::Web3ProxyResult;
|
|
||||||
|
|
||||||
/// Start an ipc server that has no rate limits
|
|
||||||
pub async fn serve(
|
|
||||||
socket_path: PathBuf,
|
|
||||||
proxy_app: Arc<Web3ProxyApp>,
|
|
||||||
mut shutdown_receiver: broadcast::Receiver<()>,
|
|
||||||
shutdown_complete_sender: broadcast::Sender<()>,
|
|
||||||
) -> Web3ProxyResult<()> {
|
|
||||||
todo!();
|
|
||||||
}
|
|
@ -7,7 +7,6 @@ pub mod config;
|
|||||||
pub mod errors;
|
pub mod errors;
|
||||||
pub mod frontend;
|
pub mod frontend;
|
||||||
pub mod http_params;
|
pub mod http_params;
|
||||||
pub mod ipc;
|
|
||||||
pub mod jsonrpc;
|
pub mod jsonrpc;
|
||||||
pub mod pagerduty;
|
pub mod pagerduty;
|
||||||
pub mod prometheus;
|
pub mod prometheus;
|
||||||
|
Loading…
Reference in New Issue
Block a user