inotify is a pain cross platform. just check file hashes

This commit is contained in:
Bryan Stitt 2023-02-27 13:13:18 -08:00
parent 6067369ee3
commit 11ee0aafe9
5 changed files with 70 additions and 66 deletions

23
Cargo.lock generated

@ -2535,28 +2535,6 @@ dependencies = [
"regex",
]
[[package]]
name = "inotify"
version = "0.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "abf888f9575c290197b2c948dc9e9ff10bd1a39ad1ea8585f734585fa6b9d3f9"
dependencies = [
"bitflags",
"futures-core",
"inotify-sys",
"libc",
"tokio",
]
[[package]]
name = "inotify-sys"
version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e05c02b5e89bff3b946cedeca278abc628fe811e604f027c45a8aa3cf793d0eb"
dependencies = [
"libc",
]
[[package]]
name = "inout"
version = "0.1.3"
@ -5764,7 +5742,6 @@ dependencies = [
"hashbrown 0.13.2",
"hdrhistogram",
"http",
"inotify",
"ipnet",
"itertools",
"log",

@ -29,7 +29,7 @@ RUN --mount=type=cache,target=/usr/local/cargo/registry \
# using a "release" profile (which install does) is **very** important
RUN --mount=type=cache,target=/usr/local/cargo/registry \
--mount=type=cache,target=/app/target \
cargo install --locked --no-default-features --profile faster_release --root /opt/bin --path ./web3_proxy
cargo install --locked --no-default-features --features inotify --profile faster_release --root /opt/bin --path ./web3_proxy
#
# We do not need the Rust toolchain to run the binary!

@ -45,7 +45,6 @@ handlebars = "4.3.6"
hashbrown = { version = "0.13.2", features = ["serde"] }
hdrhistogram = "7.5.2"
http = "0.2.9"
inotify = "0.10"
ipnet = "2.7.1"
itertools = "0.10.5"
log = "0.4.17"

@ -1,13 +1,11 @@
#![forbid(unsafe_code)]
use std::path::PathBuf;
use std::{fs, thread};
use argh::FromArgs;
use futures::StreamExt;
use inotify::{EventMask, Inotify, WatchMask};
use log::{error, info, warn};
use num::Zero;
use std::path::PathBuf;
use std::time::Duration;
use std::{fs, thread};
use tokio::sync::broadcast;
use web3_proxy::app::{flatten_handle, flatten_handles, Web3ProxyApp};
use web3_proxy::config::TopConfig;
@ -51,7 +49,7 @@ impl ProxydSubCommand {
}
async fn run(
top_config: TopConfig,
mut top_config: TopConfig,
top_config_path: Option<PathBuf>,
frontend_port: u16,
prometheus_port: u16,
@ -68,51 +66,81 @@ async fn run(
// start the main app
let mut spawned_app =
Web3ProxyApp::spawn(top_config, num_workers, shutdown_sender.subscribe()).await?;
Web3ProxyApp::spawn(top_config.clone(), num_workers, shutdown_sender.subscribe()).await?;
// start thread for watching config
if let Some(top_config_path) = top_config_path {
let mut inotify = Inotify::init().expect("Failed to initialize inotify");
inotify
.add_watch(top_config_path.clone(), WatchMask::MODIFY)
.expect("Failed to add inotify watch on config");
let mut buffer = [0u8; 4096];
let config_sender = spawned_app.new_top_config_sender;
/*
#[cfg(feature = "inotify")]
{
let mut inotify = Inotify::init().expect("Failed to initialize inotify");
// TODO: exit the app if this handle exits
// TODO: debounce
thread::spawn(move || loop {
let events = inotify
.read_events_blocking(&mut buffer)
.expect("Failed to read inotify events");
inotify
.add_watch(top_config_path.clone(), WatchMask::MODIFY)
.expect("Failed to add inotify watch on config");
for event in events {
if event.mask.contains(EventMask::MODIFY) {
info!("config changed");
match fs::read_to_string(&top_config_path) {
Ok(top_config) => match toml::from_str(&top_config) {
Ok(top_config) => {
config_sender.send(top_config).unwrap();
}
let mut buffer = [0u8; 4096];
// TODO: exit the app if this handle exits
thread::spawn(move || loop {
// TODO: debounce
let events = inotify
.read_events_blocking(&mut buffer)
.expect("Failed to read inotify events");
for event in events {
if event.mask.contains(EventMask::MODIFY) {
info!("config changed");
match fs::read_to_string(&top_config_path) {
Ok(top_config) => match toml::from_str(&top_config) {
Ok(top_config) => {
config_sender.send(top_config).unwrap();
}
Err(err) => {
// TODO: panic?
error!("Unable to parse config! {:#?}", err);
}
},
Err(err) => {
// TODO: panic?
error!("Unable to parse config! {:#?}", err);
error!("Unable to read config! {:#?}", err);
}
},
};
} else {
// TODO: is "MODIFY" enough, or do we want CLOSE_WRITE?
unimplemented!();
}
}
});
}
*/
// #[cfg(not(feature = "inotify"))]
{
thread::spawn(move || loop {
match fs::read_to_string(&top_config_path) {
Ok(new_top_config) => match toml::from_str(&new_top_config) {
Ok(new_top_config) => {
if new_top_config != top_config {
top_config = new_top_config;
config_sender.send(top_config.clone()).unwrap();
}
}
Err(err) => {
// TODO: panic?
error!("Unable to read config! {:#?}", err);
error!("Unable to parse config! {:#?}", err);
}
};
} else {
// TODO: is "MODIFY" enough, or do we want CLOSE_WRITE?
unimplemented!();
},
Err(err) => {
// TODO: panic?
error!("Unable to read config! {:#?}", err);
}
}
}
});
thread::sleep(Duration::from_secs(10));
});
}
}
// start the prometheus metrics port

@ -38,7 +38,7 @@ pub struct CliConfig {
pub cookie_key_filename: String,
}
#[derive(Clone, Debug, Deserialize)]
#[derive(Clone, Debug, Deserialize, PartialEq, Eq)]
pub struct TopConfig {
pub app: AppConfig,
pub balanced_rpcs: HashMap<String, Web3RpcConfig>,
@ -51,7 +51,7 @@ pub struct TopConfig {
/// shared configuration between Web3Rpcs
// TODO: no String, only &str
#[derive(Clone, Debug, Default, Deserialize)]
#[derive(Clone, Debug, Default, Deserialize, PartialEq, Eq)]
pub struct AppConfig {
/// Request limit for allowed origins for anonymous users.
/// These requests get rate limited by IP.
@ -203,7 +203,7 @@ fn default_response_cache_max_bytes() -> u64 {
}
/// Configuration for a backend web3 RPC server
#[derive(Clone, Debug, Default, Deserialize)]
#[derive(Clone, Debug, Default, Deserialize, PartialEq, Eq)]
pub struct Web3RpcConfig {
/// simple way to disable a connection without deleting the row
#[serde(default)]