2023-05-11 23:09:15 +03:00
|
|
|
mod rtt_estimate;
|
|
|
|
|
|
|
|
use std::sync::Arc;
|
|
|
|
|
2023-05-13 21:13:02 +03:00
|
|
|
use log::{error, info};
|
|
|
|
use tokio::sync::mpsc;
|
|
|
|
use tokio::sync::mpsc::error::TrySendError;
|
2023-05-11 23:09:15 +03:00
|
|
|
use tokio::task::JoinHandle;
|
|
|
|
use tokio::time::{Duration, Instant};
|
|
|
|
|
|
|
|
use self::rtt_estimate::AtomicRttEstimate;
|
|
|
|
use crate::util::nanos::nanos;
|
|
|
|
|
|
|
|
/// Latency calculation using Peak EWMA algorithm
|
|
|
|
///
|
|
|
|
/// Updates are done in a separate task to avoid locking or race
|
|
|
|
/// conditions. Reads may happen on any thread.
|
|
|
|
#[derive(Debug)]
|
|
|
|
pub struct PeakEwmaLatency {
|
|
|
|
/// Join handle for the latency calculation task
|
|
|
|
pub join_handle: JoinHandle<()>,
|
|
|
|
/// Send to update with each request duration
|
2023-05-13 21:13:02 +03:00
|
|
|
request_tx: mpsc::Sender<Duration>,
|
2023-05-11 23:09:15 +03:00
|
|
|
/// Latency average and last update time
|
|
|
|
rtt_estimate: Arc<AtomicRttEstimate>,
|
|
|
|
/// Decay time
|
|
|
|
decay_ns: f64,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl PeakEwmaLatency {
|
|
|
|
/// Spawn the task for calculating peak request latency
|
|
|
|
///
|
|
|
|
/// Returns a handle that can also be used to read the current
|
|
|
|
/// average latency.
|
|
|
|
pub fn spawn(decay_ns: f64, buf_size: usize, start_latency: Duration) -> Self {
|
|
|
|
debug_assert!(decay_ns > 0.0, "decay_ns must be positive");
|
2023-05-13 21:13:02 +03:00
|
|
|
let (request_tx, request_rx) = mpsc::channel(buf_size);
|
2023-05-11 23:09:15 +03:00
|
|
|
let rtt_estimate = Arc::new(AtomicRttEstimate::new(start_latency));
|
|
|
|
let task = PeakEwmaLatencyTask {
|
|
|
|
request_rx,
|
|
|
|
rtt_estimate: rtt_estimate.clone(),
|
|
|
|
update_at: Instant::now(),
|
|
|
|
decay_ns,
|
|
|
|
};
|
|
|
|
let join_handle = tokio::spawn(task.run());
|
|
|
|
Self {
|
|
|
|
join_handle,
|
|
|
|
request_tx,
|
|
|
|
rtt_estimate,
|
|
|
|
decay_ns,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Get the current peak-ewma latency estimate
|
|
|
|
pub fn latency(&self) -> Duration {
|
|
|
|
let mut estimate = self.rtt_estimate.load();
|
|
|
|
|
|
|
|
let now = Instant::now();
|
2023-05-13 21:13:02 +03:00
|
|
|
assert!(
|
2023-05-11 23:09:15 +03:00
|
|
|
estimate.update_at <= now,
|
2023-05-13 21:13:02 +03:00
|
|
|
"update_at is {}ns in the future",
|
|
|
|
estimate.update_at.duration_since(now).as_nanos(),
|
2023-05-11 23:09:15 +03:00
|
|
|
);
|
|
|
|
|
|
|
|
// Update the RTT estimate to account for decay since the last update.
|
2023-05-14 07:03:45 +03:00
|
|
|
// TODO: having an update here means we don't actually write from just one thread!! Thats how we get partially written stuff i think
|
2023-05-11 23:09:15 +03:00
|
|
|
estimate.update(0.0, self.decay_ns, now)
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Report latency from a single request
|
|
|
|
///
|
2023-05-13 21:13:02 +03:00
|
|
|
/// Should only be called with a duration from the Web3Rpc that owns it.
|
2023-05-11 23:09:15 +03:00
|
|
|
pub fn report(&self, duration: Duration) {
|
|
|
|
match self.request_tx.try_send(duration) {
|
2023-05-13 21:13:02 +03:00
|
|
|
Ok(()) => {}
|
|
|
|
Err(TrySendError::Full(_)) => {
|
2023-05-11 23:09:15 +03:00
|
|
|
// We don't want to block if the channel is full, just
|
|
|
|
// report the error
|
|
|
|
error!("Latency report channel full");
|
|
|
|
// TODO: could we spawn a new tokio task to report tthis later?
|
|
|
|
}
|
2023-05-13 21:13:02 +03:00
|
|
|
Err(TrySendError::Closed(_)) => {
|
2023-05-11 23:09:15 +03:00
|
|
|
unreachable!("Owner should keep channel open");
|
|
|
|
}
|
|
|
|
};
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Task to be spawned per-Web3Rpc for calculating the peak request latency
|
|
|
|
#[derive(Debug)]
|
|
|
|
struct PeakEwmaLatencyTask {
|
|
|
|
/// Receive new request timings for update
|
2023-05-13 21:13:02 +03:00
|
|
|
request_rx: mpsc::Receiver<Duration>,
|
2023-05-11 23:09:15 +03:00
|
|
|
/// Current estimate and update time
|
|
|
|
rtt_estimate: Arc<AtomicRttEstimate>,
|
|
|
|
/// Last update time, used for decay calculation
|
|
|
|
update_at: Instant,
|
|
|
|
/// Decay time
|
|
|
|
decay_ns: f64,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl PeakEwmaLatencyTask {
|
|
|
|
/// Run the loop for updating latency
|
|
|
|
async fn run(mut self) {
|
2023-05-13 21:13:02 +03:00
|
|
|
while let Some(rtt) = self.request_rx.recv().await {
|
2023-05-11 23:09:15 +03:00
|
|
|
self.update(rtt);
|
|
|
|
}
|
2023-05-17 00:09:10 +03:00
|
|
|
trace!("latency loop exited");
|
2023-05-11 23:09:15 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Update the estimate object atomically.
|
2023-05-13 21:13:02 +03:00
|
|
|
fn update(&self, rtt: Duration) {
|
2023-05-11 23:09:15 +03:00
|
|
|
let rtt = nanos(rtt);
|
|
|
|
|
|
|
|
let now = Instant::now();
|
2023-05-13 21:13:02 +03:00
|
|
|
assert!(
|
2023-05-11 23:09:15 +03:00
|
|
|
self.update_at <= now,
|
2023-05-13 21:13:02 +03:00
|
|
|
"update_at is {}ns in the future",
|
|
|
|
self.update_at.duration_since(now).as_nanos(),
|
2023-05-11 23:09:15 +03:00
|
|
|
);
|
|
|
|
|
2023-05-13 21:13:02 +03:00
|
|
|
self.rtt_estimate
|
2023-05-11 23:09:15 +03:00
|
|
|
.fetch_update(|mut rtt_estimate| rtt_estimate.update(rtt, self.decay_ns, now));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
mod tests {
|
|
|
|
use tokio::time::{self, Duration};
|
|
|
|
|
|
|
|
use crate::util::nanos::NANOS_PER_MILLI;
|
|
|
|
|
|
|
|
/// The default RTT estimate decays, so that new nodes are considered if the
|
|
|
|
/// default RTT is too high.
|
|
|
|
#[tokio::test(start_paused = true)]
|
|
|
|
async fn default_decay() {
|
|
|
|
let estimate =
|
|
|
|
super::PeakEwmaLatency::spawn(NANOS_PER_MILLI * 1_000.0, 8, Duration::from_millis(10));
|
|
|
|
let load = estimate.latency();
|
|
|
|
assert_eq!(load, Duration::from_millis(10));
|
|
|
|
|
|
|
|
time::advance(Duration::from_millis(100)).await;
|
|
|
|
let load = estimate.latency();
|
|
|
|
assert!(Duration::from_millis(9) < load && load < Duration::from_millis(10));
|
|
|
|
|
|
|
|
time::advance(Duration::from_millis(100)).await;
|
|
|
|
let load = estimate.latency();
|
|
|
|
assert!(Duration::from_millis(8) < load && load < Duration::from_millis(9));
|
|
|
|
}
|
|
|
|
}
|