add thread for config file watching and run clippy lint

This commit is contained in:
Bryan Stitt 2023-02-26 22:44:09 -08:00
parent bf79d677b0
commit f8f5e7a1c8
12 changed files with 356 additions and 306 deletions

11
Cargo.lock generated
View File

@ -2972,6 +2972,15 @@ dependencies = [
"windows-sys 0.42.0", "windows-sys 0.42.0",
] ]
[[package]]
name = "notify-debouncer-mini"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e23e9fa24f094b143c1eb61f90ac6457de87be6987bc70746e0179f7dbc9007b"
dependencies = [
"notify",
]
[[package]] [[package]]
name = "num" name = "num"
version = "0.4.0" version = "0.4.0"
@ -5826,7 +5835,7 @@ dependencies = [
"log", "log",
"migration", "migration",
"moka", "moka",
"notify", "notify-debouncer-mini",
"num", "num",
"num-traits", "num-traits",
"once_cell", "once_cell",

View File

@ -49,7 +49,7 @@ ipnet = "2.7.1"
itertools = "0.10.5" itertools = "0.10.5"
log = "0.4.17" log = "0.4.17"
moka = { version = "0.10.0", default-features = false, features = ["future"] } moka = { version = "0.10.0", default-features = false, features = ["future"] }
notify = "5.1.0" notify-debouncer-mini = { version = "0.2.0", default-features = false }
num = "0.4.0" num = "0.4.0"
num-traits = "0.2.15" num-traits = "0.2.15"
once_cell = { version = "1.17.1" } once_cell = { version = "1.17.1" }

View File

@ -10,7 +10,7 @@ use crate::frontend::rpc_proxy_ws::ProxyMode;
use crate::jsonrpc::{ use crate::jsonrpc::{
JsonRpcForwardedResponse, JsonRpcForwardedResponseEnum, JsonRpcRequest, JsonRpcRequestEnum, JsonRpcForwardedResponse, JsonRpcForwardedResponseEnum, JsonRpcRequest, JsonRpcRequestEnum,
}; };
use crate::rpcs::blockchain::{Web3ProxyBlock}; use crate::rpcs::blockchain::Web3ProxyBlock;
use crate::rpcs::many::Web3Rpcs; use crate::rpcs::many::Web3Rpcs;
use crate::rpcs::one::Web3Rpc; use crate::rpcs::one::Web3Rpc;
use crate::rpcs::transactions::TxStatus; use crate::rpcs::transactions::TxStatus;
@ -26,7 +26,7 @@ use ethers::core::utils::keccak256;
use ethers::prelude::{Address, Bytes, Transaction, TxHash, H256, U64}; use ethers::prelude::{Address, Bytes, Transaction, TxHash, H256, U64};
use ethers::types::U256; use ethers::types::U256;
use ethers::utils::rlp::{Decodable, Rlp}; use ethers::utils::rlp::{Decodable, Rlp};
use futures::future::join_all; use futures::future::{join_all, pending};
use futures::stream::{FuturesUnordered, StreamExt}; use futures::stream::{FuturesUnordered, StreamExt};
use hashbrown::{HashMap, HashSet}; use hashbrown::{HashMap, HashSet};
use ipnet::IpNet; use ipnet::IpNet;
@ -37,18 +37,20 @@ use migration::sea_orm::{
use migration::sea_query::table::ColumnDef; use migration::sea_query::table::ColumnDef;
use migration::{Alias, DbErr, Migrator, MigratorTrait, Table}; use migration::{Alias, DbErr, Migrator, MigratorTrait, Table};
use moka::future::Cache; use moka::future::Cache;
use notify_debouncer_mini::{new_debouncer, notify, DebounceEventResult};
use redis_rate_limiter::redis::AsyncCommands; use redis_rate_limiter::redis::AsyncCommands;
use redis_rate_limiter::{redis, DeadpoolRuntime, RedisConfig, RedisPool, RedisRateLimiter}; use redis_rate_limiter::{redis, DeadpoolRuntime, RedisConfig, RedisPool, RedisRateLimiter};
use serde::Serialize; use serde::Serialize;
use serde_json::json; use serde_json::json;
use serde_json::value::to_raw_value; use serde_json::value::to_raw_value;
use std::fmt;
use std::hash::{Hash, Hasher}; use std::hash::{Hash, Hasher};
use std::net::IpAddr; use std::net::IpAddr;
use std::num::NonZeroU64; use std::num::NonZeroU64;
use std::path::PathBuf;
use std::str::FromStr; use std::str::FromStr;
use std::sync::{atomic, Arc}; use std::sync::{atomic, Arc};
use std::time::Duration; use std::time::Duration;
use std::{fmt, fs};
use tokio::sync::{broadcast, watch, Semaphore}; use tokio::sync::{broadcast, watch, Semaphore};
use tokio::task::JoinHandle; use tokio::task::JoinHandle;
use tokio::time::{sleep, timeout}; use tokio::time::{sleep, timeout};
@ -365,11 +367,13 @@ impl Web3ProxyApp {
/// The main entrypoint. /// The main entrypoint.
pub async fn spawn( pub async fn spawn(
top_config: TopConfig, top_config: TopConfig,
top_config_path: Option<PathBuf>,
num_workers: usize, num_workers: usize,
shutdown_receiver: broadcast::Receiver<()>, shutdown_receiver: broadcast::Receiver<()>,
) -> anyhow::Result<Web3ProxyAppSpawn> { ) -> anyhow::Result<Web3ProxyAppSpawn> {
// safety checks on the config // safety checks on the config
// while i would prefer this to be in a "apply_top_config" function, that is a larger refactor // while i would prefer this to be in a "apply_top_config" function, that is a larger refactor
// TODO: maybe don't spawn with a config at all. have all config updates come through an apply_top_config call
if let Some(redirect) = &top_config.app.redirect_rpc_key_url { if let Some(redirect) = &top_config.app.redirect_rpc_key_url {
assert!( assert!(
redirect.contains("{{rpc_key_id}}"), redirect.contains("{{rpc_key_id}}"),
@ -391,13 +395,15 @@ impl Web3ProxyApp {
); );
} }
// these futures are key parts of the app. if they stop running, the app has encountered an irrecoverable error
let app_handles = FuturesUnordered::new();
// we must wait for these to end on their own (and they need to subscribe to shutdown_sender) // we must wait for these to end on their own (and they need to subscribe to shutdown_sender)
let important_background_handles = FuturesUnordered::new(); let important_background_handles = FuturesUnordered::new();
// connect to the database and make sure the latest migrations have run
let mut db_conn = None::<DatabaseConnection>; let mut db_conn = None::<DatabaseConnection>;
let mut db_replica = None::<DatabaseReplica>; let mut db_replica = None::<DatabaseReplica>;
// connect to mysql and make sure the latest migrations have run
if let Some(db_url) = top_config.app.db_url.clone() { if let Some(db_url) = top_config.app.db_url.clone() {
let db_min_connections = top_config let db_min_connections = top_config
.app .app
@ -620,8 +626,6 @@ impl Web3ProxyApp {
.time_to_idle(Duration::from_secs(120)) .time_to_idle(Duration::from_secs(120))
.build_with_hasher(hashbrown::hash_map::DefaultHashBuilder::default()); .build_with_hasher(hashbrown::hash_map::DefaultHashBuilder::default());
let app_handles = FuturesUnordered::new();
// prepare a Web3Rpcs to hold all our balanced connections // prepare a Web3Rpcs to hold all our balanced connections
let (balanced_rpcs, balanced_rpcs_handle) = Web3Rpcs::spawn( let (balanced_rpcs, balanced_rpcs_handle) = Web3Rpcs::spawn(
top_config.app.chain_id, top_config.app.chain_id,
@ -697,9 +701,64 @@ impl Web3ProxyApp {
let app = Arc::new(app); let app = Arc::new(app);
app.apply_top_config(top_config).await?; // watch for config changes
// TODO: use channel for receiving new top_configs // TODO: initial config reload should be from this channel. not from the call to spawn
// TODO: return a channel for sending new top_configs if let Some(top_config_path) = top_config_path {
let (top_config_sender, mut top_config_receiver) = watch::channel(top_config);
// TODO: i think the debouncer is exiting
let mut debouncer = new_debouncer(
Duration::from_secs(2),
None,
move |res: DebounceEventResult| match res {
Ok(events) => events.iter().for_each(|e| {
debug!("Event {:?} for {:?}", e.kind, e.path);
// TODO: use tokio::fs here?
let new_top_config: String = fs::read_to_string(&e.path).unwrap();
let new_top_config: TopConfig = toml::from_str(&new_top_config).unwrap();
top_config_sender.send_replace(new_top_config);
}),
Err(errors) => errors
.iter()
.for_each(|e| error!("config watcher error {:#?}", e)),
},
)
.context("failed starting debouncer config watcher")?;
// Add a path to be watched. All files and directories at that path and below will be monitored for changes.
info!("watching config @ {}", top_config_path.display());
debouncer
.watcher()
.watch(top_config_path.as_path(), notify::RecursiveMode::Recursive)
.context("failed starting config watcher")?;
let app = app.clone();
let config_handle = tokio::spawn(async move {
loop {
let new_top_config = top_config_receiver.borrow_and_update().to_owned();
app.apply_top_config(new_top_config)
.await
.context("failed applying new top_config")?;
top_config_receiver
.changed()
.await
.context("failed awaiting top_config change")?;
info!("config changed");
}
});
app_handles.push(config_handle);
} else {
// no path to config, so we don't know what to watch
// this isn't an error. the config might just be in memory
app.apply_top_config(top_config).await?;
}
Ok((app, app_handles, important_background_handles).into()) Ok((app, app_handles, important_background_handles).into())
} }

View File

@ -6,11 +6,11 @@ mod check_config;
mod count_users; mod count_users;
mod create_key; mod create_key;
mod create_user; mod create_user;
mod daemon;
mod drop_migration_lock; mod drop_migration_lock;
mod list_user_tier; mod list_user_tier;
mod pagerduty; mod pagerduty;
mod popularity_contest; mod popularity_contest;
mod proxyd;
mod rpc_accounting; mod rpc_accounting;
mod sentryd; mod sentryd;
mod transfer_key; mod transfer_key;
@ -80,7 +80,7 @@ enum SubCommand {
DropMigrationLock(drop_migration_lock::DropMigrationLockSubCommand), DropMigrationLock(drop_migration_lock::DropMigrationLockSubCommand),
Pagerduty(pagerduty::PagerdutySubCommand), Pagerduty(pagerduty::PagerdutySubCommand),
PopularityContest(popularity_contest::PopularityContestSubCommand), PopularityContest(popularity_contest::PopularityContestSubCommand),
Proxyd(daemon::ProxydSubCommand), Proxyd(proxyd::ProxydSubCommand),
RpcAccounting(rpc_accounting::RpcAccountingSubCommand), RpcAccounting(rpc_accounting::RpcAccountingSubCommand),
Sentryd(sentryd::SentrydSubCommand), Sentryd(sentryd::SentrydSubCommand),
TransferKey(transfer_key::TransferKeySubCommand), TransferKey(transfer_key::TransferKeySubCommand),
@ -92,6 +92,9 @@ enum SubCommand {
} }
fn main() -> anyhow::Result<()> { fn main() -> anyhow::Result<()> {
// this probably won't matter for us in docker, but better safe than sorry
fdlimit::raise_fd_limit();
#[cfg(feature = "deadlock")] #[cfg(feature = "deadlock")]
{ {
// spawn a thread for deadlock detection // spawn a thread for deadlock detection
@ -142,9 +145,6 @@ fn main() -> anyhow::Result<()> {
.join(","), .join(","),
}; };
// this probably won't matter for us in docker, but better safe than sorry
fdlimit::raise_fd_limit();
let mut cli_config: Web3ProxyCli = argh::from_env(); let mut cli_config: Web3ProxyCli = argh::from_env();
if cli_config.config.is_none() && cli_config.db_url.is_none() && cli_config.sentry_url.is_none() if cli_config.config.is_none() && cli_config.db_url.is_none() && cli_config.sentry_url.is_none()
@ -154,12 +154,13 @@ fn main() -> anyhow::Result<()> {
cli_config.config = Some("./config/development.toml".to_string()); cli_config.config = Some("./config/development.toml".to_string());
} }
let top_config = if let Some(top_config_path) = cli_config.config.clone() { let (top_config, top_config_path) = if let Some(top_config_path) = cli_config.config.clone() {
let top_config_path = Path::new(&top_config_path) let top_config_path = Path::new(&top_config_path)
.canonicalize() .canonicalize()
.context(format!("checking for config at {}", top_config_path))?; .context(format!("checking for config at {}", top_config_path))?;
let top_config: String = fs::read_to_string(top_config_path)?; let top_config: String = fs::read_to_string(top_config_path.clone())?;
let mut top_config: TopConfig = toml::from_str(&top_config)?; let mut top_config: TopConfig = toml::from_str(&top_config)?;
// TODO: this doesn't seem to do anything // TODO: this doesn't seem to do anything
@ -184,9 +185,9 @@ fn main() -> anyhow::Result<()> {
} }
} }
Some(top_config) (Some(top_config), Some(top_config_path))
} else { } else {
None (None, None)
}; };
let logger = env_logger::builder().parse_filters(&rust_log).build(); let logger = env_logger::builder().parse_filters(&rust_log).build();
@ -343,8 +344,10 @@ fn main() -> anyhow::Result<()> {
} }
SubCommand::Proxyd(x) => { SubCommand::Proxyd(x) => {
let top_config = top_config.expect("--config is required to run proxyd"); let top_config = top_config.expect("--config is required to run proxyd");
let top_config_path =
top_config_path.expect("path must be set if top_config exists");
x.main(top_config, num_workers).await x.main(top_config, top_config_path, num_workers).await
} }
SubCommand::DropMigrationLock(x) => { SubCommand::DropMigrationLock(x) => {
let db_url = cli_config let db_url = cli_config

View File

@ -1,5 +1,7 @@
#![forbid(unsafe_code)] #![forbid(unsafe_code)]
use std::path::PathBuf;
use argh::FromArgs; use argh::FromArgs;
use futures::StreamExt; use futures::StreamExt;
use log::{error, info, warn}; use log::{error, info, warn};
@ -24,11 +26,19 @@ pub struct ProxydSubCommand {
} }
impl ProxydSubCommand { impl ProxydSubCommand {
pub async fn main(self, top_config: TopConfig, num_workers: usize) -> anyhow::Result<()> { pub async fn main(
self,
top_config: TopConfig,
top_config_path: PathBuf,
num_workers: usize,
) -> anyhow::Result<()> {
let (shutdown_sender, _) = broadcast::channel(1); let (shutdown_sender, _) = broadcast::channel(1);
// TODO: i think there is a small race. if config_path changes
run( run(
top_config, top_config,
Some(top_config_path),
self.port, self.port,
self.prometheus_port, self.prometheus_port,
num_workers, num_workers,
@ -40,6 +50,7 @@ impl ProxydSubCommand {
async fn run( async fn run(
top_config: TopConfig, top_config: TopConfig,
top_config_path: Option<PathBuf>,
frontend_port: u16, frontend_port: u16,
prometheus_port: u16, prometheus_port: u16,
num_workers: usize, num_workers: usize,
@ -54,8 +65,13 @@ async fn run(
let mut shutdown_receiver = shutdown_sender.subscribe(); let mut shutdown_receiver = shutdown_sender.subscribe();
// start the main app // start the main app
let mut spawned_app = let mut spawned_app = Web3ProxyApp::spawn(
Web3ProxyApp::spawn(top_config, num_workers, shutdown_sender.subscribe()).await?; top_config,
top_config_path,
num_workers,
shutdown_sender.subscribe(),
)
.await?;
// start the prometheus metrics port // start the prometheus metrics port
let prometheus_handle = tokio::spawn(metrics_frontend::serve( let prometheus_handle = tokio::spawn(metrics_frontend::serve(
@ -246,6 +262,7 @@ mod tests {
tokio::spawn(async move { tokio::spawn(async move {
run( run(
top_config, top_config,
None,
frontend_port, frontend_port,
prometheus_port, prometheus_port,
2, 2,

View File

@ -97,18 +97,18 @@ impl SentrydSubCommand {
.or_else(|| top_config.map(|x| x.app.chain_id)) .or_else(|| top_config.map(|x| x.app.chain_id))
.context("--config or --chain-id required")?; .context("--config or --chain-id required")?;
let primary_proxy = self.web3_proxy.trim_end_matches("/").to_string(); let primary_proxy = self.web3_proxy.trim_end_matches('/').to_string();
let other_proxy: Vec<_> = self let other_proxy: Vec<_> = self
.other_proxy .other_proxy
.into_iter() .into_iter()
.map(|x| x.trim_end_matches("/").to_string()) .map(|x| x.trim_end_matches('/').to_string())
.collect(); .collect();
let other_rpc: Vec<_> = self let other_rpc: Vec<_> = self
.other_rpc .other_rpc
.into_iter() .into_iter()
.map(|x| x.trim_end_matches("/").to_string()) .map(|x| x.trim_end_matches('/').to_string())
.collect(); .collect();
let seconds = self.seconds.unwrap_or(60); let seconds = self.seconds.unwrap_or(60);

View File

@ -211,6 +211,7 @@ impl JsonRpcForwardedResponse {
error: Some(JsonRpcErrorData { error: Some(JsonRpcErrorData {
code: code.unwrap_or(-32099), code: code.unwrap_or(-32099),
message, message,
// TODO: accept data as an argument
data: None, data: None,
}), }),
} }

View File

@ -124,7 +124,7 @@ pub fn pagerduty_alert_for_config<T: Serialize>(
) -> AlertTrigger<T> { ) -> AlertTrigger<T> {
let chain_id = top_config.app.chain_id; let chain_id = top_config.app.chain_id;
let client_url = top_config.app.redirect_public_url.clone(); let client_url = top_config.app.redirect_public_url;
pagerduty_alert( pagerduty_alert(
Some(chain_id), Some(chain_id),
@ -140,6 +140,7 @@ pub fn pagerduty_alert_for_config<T: Serialize>(
) )
} }
#[allow(clippy::too_many_arguments)]
pub fn pagerduty_alert<T: Serialize>( pub fn pagerduty_alert<T: Serialize>(
chain_id: Option<u64>, chain_id: Option<u64>,
class: Option<String>, class: Option<String>,
@ -186,6 +187,6 @@ pub fn pagerduty_alert<T: Serialize>(
images: None, images: None,
links: None, links: None,
client: Some(client), client: Some(client),
client_url: client_url, client_url,
} }
} }

View File

@ -8,7 +8,7 @@ use crate::{config::BlockAndRpc, jsonrpc::JsonRpcRequest};
use anyhow::{anyhow, Context}; use anyhow::{anyhow, Context};
use derive_more::From; use derive_more::From;
use ethers::prelude::{Block, TxHash, H256, U64}; use ethers::prelude::{Block, TxHash, H256, U64};
use log::{debug, error, trace, warn, Level}; use log::{debug, trace, warn, Level};
use moka::future::Cache; use moka::future::Cache;
use serde::Serialize; use serde::Serialize;
use serde_json::json; use serde_json::json;
@ -72,7 +72,7 @@ impl Web3ProxyBlock {
if block_timestamp < now { if block_timestamp < now {
// this server is still syncing from too far away to serve requests // this server is still syncing from too far away to serve requests
// u64 is safe because ew checked equality above // u64 is safe because ew checked equality above
(now - block_timestamp).as_secs() as u64 (now - block_timestamp).as_secs()
} else { } else {
0 0
} }
@ -392,8 +392,7 @@ impl Web3Rpcs {
.await .await
.context("no consensus head block!") .context("no consensus head block!")
.map_err(|err| { .map_err(|err| {
self.watch_consensus_rpcs_sender self.watch_consensus_rpcs_sender.send_replace(None);
.send_replace(Arc::new(Default::default()));
err err
})?; })?;
@ -414,100 +413,73 @@ impl Web3Rpcs {
let old_consensus_head_connections = self let old_consensus_head_connections = self
.watch_consensus_rpcs_sender .watch_consensus_rpcs_sender
.send_replace(Arc::new(new_synced_connections)); .send_replace(Some(Arc::new(new_synced_connections)));
let backups_voted_str = if backups_needed { "B " } else { "" }; let backups_voted_str = if backups_needed { "B " } else { "" };
if let Some(consensus_head_block) = consensus_head_block { match old_consensus_head_connections.as_ref() {
match &old_consensus_head_connections.head_block { None => {
None => { debug!(
debug!( "first {}/{} {}{}/{}/{} block={}, rpc={}",
"first {}/{} {}{}/{}/{} block={}, rpc={}", consensus_tier,
consensus_tier, total_tiers,
total_tiers, backups_voted_str,
backups_voted_str, num_consensus_rpcs,
num_consensus_rpcs, num_active_rpcs,
num_active_rpcs, total_rpcs,
total_rpcs, consensus_head_block,
consensus_head_block, rpc,
rpc, );
);
if backups_needed { if backups_needed {
// TODO: what else should be in this error? // TODO: what else should be in this error?
warn!("Backup RPCs are in use!"); warn!("Backup RPCs are in use!");
}
// this should already be cached
let consensus_head_block =
self.try_cache_block(consensus_head_block, true).await?;
watch_consensus_head_sender
.send(Some(consensus_head_block))
.context(
"watch_consensus_head_sender failed sending first consensus_head_block",
)?;
} }
Some(old_head_block) => {
// TODO: do this log item better
let rpc_head_str = new_block
.map(|x| x.to_string())
.unwrap_or_else(|| "None".to_string());
match consensus_head_block.number().cmp(&old_head_block.number()) { // this should already be cached
Ordering::Equal => { let consensus_head_block = self.try_cache_block(consensus_head_block, true).await?;
// multiple blocks with the same fork!
if consensus_head_block.hash() == old_head_block.hash() {
// no change in hash. no need to use watch_consensus_head_sender
// TODO: trace level if rpc is backup
debug!(
"con {}/{} {}{}/{}/{} con={} rpc={}@{}",
consensus_tier,
total_tiers,
backups_voted_str,
num_consensus_rpcs,
num_active_rpcs,
total_rpcs,
consensus_head_block,
rpc,
rpc_head_str,
)
} else {
// hash changed
if backups_needed {
// TODO: what else should be in this error?
warn!("Backup RPCs are in use!");
}
debug!( watch_consensus_head_sender
"unc {}/{} {}{}/{}/{} con_head={} old={} rpc={}@{}", .send(Some(consensus_head_block))
consensus_tier, .context(
total_tiers, "watch_consensus_head_sender failed sending first consensus_head_block",
backups_voted_str, )?;
num_consensus_rpcs, }
num_active_rpcs, Some(old_consensus_connections) => {
total_rpcs, let old_head_block = &old_consensus_connections.head_block;
consensus_head_block,
old_head_block,
rpc,
rpc_head_str,
);
let consensus_head_block = self // TODO: do this log item better
.try_cache_block(consensus_head_block, true) let rpc_head_str = new_block
.await .map(|x| x.to_string())
.context("save consensus_head_block as heaviest chain")?; .unwrap_or_else(|| "None".to_string());
watch_consensus_head_sender match consensus_head_block.number().cmp(old_head_block.number()) {
.send(Some(consensus_head_block)) Ordering::Equal => {
.context("watch_consensus_head_sender failed sending uncled consensus_head_block")?; // multiple blocks with the same fork!
if consensus_head_block.hash() == old_head_block.hash() {
// no change in hash. no need to use watch_consensus_head_sender
// TODO: trace level if rpc is backup
debug!(
"con {}/{} {}{}/{}/{} con={} rpc={}@{}",
consensus_tier,
total_tiers,
backups_voted_str,
num_consensus_rpcs,
num_active_rpcs,
total_rpcs,
consensus_head_block,
rpc,
rpc_head_str,
)
} else {
// hash changed
if backups_needed {
// TODO: what else should be in this error?
warn!("Backup RPCs are in use!");
} }
}
Ordering::Less => { debug!(
// this is unlikely but possible "unc {}/{} {}{}/{}/{} con_head={} old={} rpc={}@{}",
// TODO: better log
warn!(
"chain rolled back {}/{} {}{}/{}/{} con={} old={} rpc={}@{}",
consensus_tier, consensus_tier,
total_tiers, total_tiers,
backups_voted_str, backups_voted_str,
@ -520,82 +492,73 @@ impl Web3Rpcs {
rpc_head_str, rpc_head_str,
); );
if backups_needed {
// TODO: what else should be in this error?
warn!("Backup RPCs are in use!");
}
// TODO: tell save_block to remove any higher block numbers from the cache. not needed because we have other checks on requested blocks being > head, but still seems like a good idea
let consensus_head_block = self let consensus_head_block = self
.try_cache_block(consensus_head_block, true) .try_cache_block(consensus_head_block, true)
.await .await
.context( .context("save consensus_head_block as heaviest chain")?;
"save_block sending consensus_head_block as heaviest chain",
)?;
watch_consensus_head_sender watch_consensus_head_sender
.send(Some(consensus_head_block)) .send(Some(consensus_head_block))
.context("watch_consensus_head_sender failed sending rollback consensus_head_block")?; .context("watch_consensus_head_sender failed sending uncled consensus_head_block")?;
}
Ordering::Greater => {
debug!(
"new {}/{} {}{}/{}/{} con={} rpc={}@{}",
consensus_tier,
total_tiers,
backups_voted_str,
num_consensus_rpcs,
num_active_rpcs,
total_rpcs,
consensus_head_block,
rpc,
rpc_head_str,
);
if backups_needed {
// TODO: what else should be in this error?
warn!("Backup RPCs are in use!");
}
let consensus_head_block =
self.try_cache_block(consensus_head_block, true).await?;
watch_consensus_head_sender.send(Some(consensus_head_block)).context("watch_consensus_head_sender failed sending new consensus_head_block")?;
} }
} }
} Ordering::Less => {
} // this is unlikely but possible
} else { // TODO: better log
// TODO: do this log item better warn!(
let rpc_head_str = new_block "chain rolled back {}/{} {}{}/{}/{} con={} old={} rpc={}@{}",
.map(|x| x.to_string()) consensus_tier,
.unwrap_or_else(|| "None".to_string()); total_tiers,
backups_voted_str,
num_consensus_rpcs,
num_active_rpcs,
total_rpcs,
consensus_head_block,
old_head_block,
rpc,
rpc_head_str,
);
if num_active_rpcs >= self.min_head_rpcs { if backups_needed {
// no consensus!!! // TODO: what else should be in this error?
error!( warn!("Backup RPCs are in use!");
"non {}/{} {}{}/{}/{} rpc={}@{}", }
consensus_tier,
total_tiers, // TODO: tell save_block to remove any higher block numbers from the cache. not needed because we have other checks on requested blocks being > head, but still seems like a good idea
backups_voted_str, let consensus_head_block = self
num_consensus_rpcs, .try_cache_block(consensus_head_block, true)
num_active_rpcs, .await
total_rpcs, .context("save_block sending consensus_head_block as heaviest chain")?;
rpc,
rpc_head_str, watch_consensus_head_sender
); .send(Some(consensus_head_block))
} else { .context("watch_consensus_head_sender failed sending rollback consensus_head_block")?;
// no consensus, but we do not have enough rpcs connected yet to panic }
debug!( Ordering::Greater => {
"non {}/{} {}{}/{}/{} rpc={}@{}", debug!(
consensus_tier, "new {}/{} {}{}/{}/{} con={} rpc={}@{}",
total_tiers, consensus_tier,
backups_voted_str, total_tiers,
num_consensus_rpcs, backups_voted_str,
num_active_rpcs, num_consensus_rpcs,
total_rpcs, num_active_rpcs,
rpc, total_rpcs,
rpc_head_str, consensus_head_block,
); rpc,
rpc_head_str,
);
if backups_needed {
// TODO: what else should be in this error?
warn!("Backup RPCs are in use!");
}
let consensus_head_block =
self.try_cache_block(consensus_head_block, true).await?;
watch_consensus_head_sender.send(Some(consensus_head_block)).context("watch_consensus_head_sender failed sending new consensus_head_block")?;
}
}
} }
} }

View File

@ -16,10 +16,11 @@ use tokio::time::Instant;
/// A collection of Web3Rpcs that are on the same block. /// A collection of Web3Rpcs that are on the same block.
/// Serialize is so we can print it on our debug endpoint /// Serialize is so we can print it on our debug endpoint
#[derive(Clone, Default, Serialize)] #[derive(Clone, Serialize)]
pub struct ConsensusWeb3Rpcs { pub struct ConsensusWeb3Rpcs {
// TODO: tier should be an option, or we should have consensus be stored as an Option<ConsensusWeb3Rpcs>
pub(super) tier: u64, pub(super) tier: u64,
pub(super) head_block: Option<Web3ProxyBlock>, pub(super) head_block: Web3ProxyBlock,
// TODO: this should be able to serialize, but it isn't // TODO: this should be able to serialize, but it isn't
#[serde(skip_serializing)] #[serde(skip_serializing)]
pub(super) rpcs: Vec<Arc<Web3Rpc>>, pub(super) rpcs: Vec<Arc<Web3Rpc>>,
@ -69,11 +70,23 @@ impl Web3Rpcs {
} }
pub fn synced(&self) -> bool { pub fn synced(&self) -> bool {
!self.watch_consensus_rpcs_sender.borrow().rpcs.is_empty() let consensus = self.watch_consensus_rpcs_sender.borrow();
if let Some(consensus) = consensus.as_ref() {
!consensus.rpcs.is_empty()
} else {
false
}
} }
pub fn num_synced_rpcs(&self) -> usize { pub fn num_synced_rpcs(&self) -> usize {
self.watch_consensus_rpcs_sender.borrow().rpcs.len() let consensus = self.watch_consensus_rpcs_sender.borrow();
if let Some(consensus) = consensus.as_ref() {
consensus.rpcs.len()
} else {
0
}
} }
} }
@ -100,6 +113,10 @@ impl ConnectionsGroup {
self.rpc_name_to_block.len() self.rpc_name_to_block.len()
} }
pub fn is_empty(&self) -> bool {
self.rpc_name_to_block.is_empty()
}
fn remove(&mut self, rpc_name: &str) -> Option<Web3ProxyBlock> { fn remove(&mut self, rpc_name: &str) -> Option<Web3ProxyBlock> {
if let Some(removed_block) = self.rpc_name_to_block.remove(rpc_name) { if let Some(removed_block) = self.rpc_name_to_block.remove(rpc_name) {
match self.highest_block.as_mut() { match self.highest_block.as_mut() {
@ -255,14 +272,14 @@ impl ConnectionsGroup {
// not enough rpcs on this block. check the parent block // not enough rpcs on this block. check the parent block
match web3_rpcs match web3_rpcs
.block(authorization, &maybe_head_block.parent_hash(), None) .block(authorization, maybe_head_block.parent_hash(), None)
.await .await
{ {
Ok(parent_block) => { Ok(parent_block) => {
// trace!( // trace!(
// child=%maybe_head_hash, parent=%parent_block.hash.unwrap(), "avoiding thundering herd. checking consensus on parent block", // child=%maybe_head_hash, parent=%parent_block.hash.unwrap(), "avoiding thundering herd. checking consensus on parent block",
// ); // );
maybe_head_block = parent_block.into(); maybe_head_block = parent_block;
continue; continue;
} }
Err(err) => { Err(err) => {
@ -325,7 +342,7 @@ impl ConnectionsGroup {
Ok(ConsensusWeb3Rpcs { Ok(ConsensusWeb3Rpcs {
tier: *tier, tier: *tier,
head_block: Some(maybe_head_block), head_block: maybe_head_block,
rpcs, rpcs,
backups_voted: backup_rpcs_voted, backups_voted: backup_rpcs_voted,
backups_needed: primary_rpcs_voted.is_none(), backups_needed: primary_rpcs_voted.is_none(),
@ -371,6 +388,10 @@ impl ConsensusFinder {
self.tiers.len() self.tiers.len()
} }
pub fn is_empty(&self) -> bool {
self.tiers.is_empty()
}
/// get the ConnectionsGroup that contains all rpcs /// get the ConnectionsGroup that contains all rpcs
/// panics if there are no tiers /// panics if there are no tiers
pub fn all_rpcs_group(&self) -> Option<&ConnectionsGroup> { pub fn all_rpcs_group(&self) -> Option<&ConnectionsGroup> {
@ -457,26 +478,18 @@ impl ConsensusFinder {
} }
if let Some(prev_block) = self.insert(&rpc, rpc_head_block.clone()).await { if let Some(prev_block) = self.insert(&rpc, rpc_head_block.clone()).await {
if prev_block.hash() == rpc_head_block.hash() { // false if this block was already sent by this rpc. return early
// this block was already sent by this rpc. return early // true if new block for this rpc
false prev_block.hash() != rpc_head_block.hash()
} else {
// new block for this rpc
true
}
} else { } else {
// first block for this rpc // first block for this rpc
true true
} }
} }
None => { None => {
if self.remove(&rpc).is_none() { // false if this rpc was already removed
// this rpc was already removed // true if rpc head changed from being synced to not
false self.remove(&rpc).is_some()
} else {
// rpc head changed from being synced to not
true
}
} }
}; };

View File

@ -50,7 +50,7 @@ pub struct Web3Rpcs {
/// TODO: document that this is a watch sender and not a broadcast! if things get busy, blocks might get missed /// TODO: document that this is a watch sender and not a broadcast! if things get busy, blocks might get missed
/// TODO: why is watch_consensus_head_sender in an Option, but this one isn't? /// TODO: why is watch_consensus_head_sender in an Option, but this one isn't?
/// Geth's subscriptions have the same potential for skipping blocks. /// Geth's subscriptions have the same potential for skipping blocks.
pub(super) watch_consensus_rpcs_sender: watch::Sender<Arc<ConsensusWeb3Rpcs>>, pub(super) watch_consensus_rpcs_sender: watch::Sender<Option<Arc<ConsensusWeb3Rpcs>>>,
/// this head receiver makes it easy to wait until there is a new block /// this head receiver makes it easy to wait until there is a new block
pub(super) watch_consensus_head_sender: Option<watch::Sender<Option<Web3ProxyBlock>>>, pub(super) watch_consensus_head_sender: Option<watch::Sender<Option<Web3ProxyBlock>>>,
pub(super) pending_transaction_cache: pub(super) pending_transaction_cache:
@ -125,7 +125,7 @@ impl Web3Rpcs {
// trace!("http interval ready"); // trace!("http interval ready");
if let Err(_) = sender.send(()) { if sender.send(()).is_err() {
// errors are okay. they mean that all receivers have been dropped, or the rpcs just haven't started yet // errors are okay. they mean that all receivers have been dropped, or the rpcs just haven't started yet
trace!("no http receivers"); trace!("no http receivers");
}; };
@ -181,7 +181,7 @@ impl Web3Rpcs {
max_block_lag, max_block_lag,
}); });
let authorization = Arc::new(Authorization::internal(db_conn.clone())?); let authorization = Arc::new(Authorization::internal(db_conn)?);
let handle = { let handle = {
let connections = connections.clone(); let connections = connections.clone();
@ -478,12 +478,14 @@ impl Web3Rpcs {
let usable_rpcs_by_tier_and_head_number: BTreeMap<(u64, Option<U64>), Vec<Arc<Web3Rpc>>> = { let usable_rpcs_by_tier_and_head_number: BTreeMap<(u64, Option<U64>), Vec<Arc<Web3Rpc>>> = {
let synced_connections = self.watch_consensus_rpcs_sender.borrow().clone(); let synced_connections = self.watch_consensus_rpcs_sender.borrow().clone();
let (head_block_num, head_block_age) = if synced_connections.is_none() {
if let Some(head_block) = synced_connections.head_block.as_ref() { return Ok(OpenRequestResult::NotReady);
(head_block.number(), head_block.age()) }
} else { let synced_connections =
return Ok(OpenRequestResult::NotReady); synced_connections.expect("synced_connections can't be None here");
};
let head_block_num = synced_connections.head_block.number();
let head_block_age = synced_connections.head_block.age();
let needed_blocks_comparison = match (min_block_needed, max_block_needed) { let needed_blocks_comparison = match (min_block_needed, max_block_needed) {
(None, None) => { (None, None) => {
@ -530,28 +532,20 @@ impl Web3Rpcs {
.values() .values()
.filter(|x| { .filter(|x| {
// TODO: move a bunch of this onto a rpc.is_synced function // TODO: move a bunch of this onto a rpc.is_synced function
#[allow(clippy::if_same_then_else)]
if skip.contains(x) { if skip.contains(x) {
// we've already tried this server or have some other reason to skip it // we've already tried this server or have some other reason to skip it
false false
} else if max_block_needed } else if max_block_needed
.and_then(|max_block_needed| { .map(|max_block_needed| !x.has_block_data(max_block_needed))
Some(!x.has_block_data(max_block_needed))
})
.unwrap_or(false) .unwrap_or(false)
{ {
// server does not have the max block // server does not have the max block
false false
} else if min_block_needed
.and_then(|min_block_needed| {
Some(!x.has_block_data(min_block_needed))
})
.unwrap_or(false)
{
// server does not have the min block
false
} else { } else {
// server has the block we need! !min_block_needed
true .map(|min_block_needed| !x.has_block_data(min_block_needed))
.unwrap_or(false)
} }
}) })
.cloned() .cloned()
@ -599,9 +593,7 @@ impl Web3Rpcs {
} }
trace!("not skipped!"); trace!("not skipped!");
m.entry(key.clone()) m.entry(key).or_insert_with(Vec::new).push(x.clone());
.or_insert_with(Vec::new)
.push(x.clone());
} }
} }
cmp::Ordering::Greater => { cmp::Ordering::Greater => {
@ -703,44 +695,9 @@ impl Web3Rpcs {
min_block_needed: Option<&U64>, min_block_needed: Option<&U64>,
max_block_needed: Option<&U64>, max_block_needed: Option<&U64>,
max_count: Option<usize>, max_count: Option<usize>,
always_include_backups: bool,
) -> Result<Vec<OpenRequestHandle>, Option<Instant>> {
if !always_include_backups {
if let Ok(without_backups) = self
._all_connections(
false,
authorization,
min_block_needed,
max_block_needed,
max_count,
)
.await
{
return Ok(without_backups);
}
}
self._all_connections(
true,
authorization,
min_block_needed,
max_block_needed,
max_count,
)
.await
}
async fn _all_connections(
&self,
allow_backups: bool, allow_backups: bool,
authorization: &Arc<Authorization>,
min_block_needed: Option<&U64>,
max_block_needed: Option<&U64>,
max_count: Option<usize>,
) -> Result<Vec<OpenRequestHandle>, Option<Instant>> { ) -> Result<Vec<OpenRequestHandle>, Option<Instant>> {
let mut earliest_retry_at = None; let mut earliest_retry_at = None;
// TODO: with capacity?
let mut selected_rpcs = vec![];
let mut max_count = if let Some(max_count) = max_count { let mut max_count = if let Some(max_count) = max_count {
max_count max_count
@ -748,63 +705,83 @@ impl Web3Rpcs {
self.by_name.read().len() self.by_name.read().len()
}; };
trace!("max_count: {}", max_count);
let mut selected_rpcs = Vec::with_capacity(max_count);
let mut tried = HashSet::new(); let mut tried = HashSet::new();
let mut synced_conns = self.watch_consensus_rpcs_sender.borrow().rpcs.clone(); let mut synced_rpcs = {
let synced_rpcs = self.watch_consensus_rpcs_sender.borrow();
if let Some(synced_rpcs) = synced_rpcs.as_ref() {
synced_rpcs.rpcs.clone()
} else {
vec![]
}
};
// synced connections are all on the same block. sort them by tier with higher soft limits first // synced connections are all on the same block. sort them by tier with higher soft limits first
synced_conns.sort_by_cached_key(rpc_sync_status_sort_key); synced_rpcs.sort_by_cached_key(rpc_sync_status_sort_key);
trace!("synced_rpcs: {:#?}", synced_rpcs);
// if there aren't enough synced connections, include more connections // if there aren't enough synced connections, include more connections
// TODO: only do this sorting if the synced_conns isn't enough // TODO: only do this sorting if the synced_rpcs isn't enough
let mut all_conns: Vec<_> = self.by_name.read().values().cloned().collect(); let mut all_rpcs: Vec<_> = self.by_name.read().values().cloned().collect();
all_conns.sort_by_cached_key(rpc_sync_status_sort_key); all_rpcs.sort_by_cached_key(rpc_sync_status_sort_key);
for connection in itertools::chain(synced_conns, all_conns) { trace!("all_rpcs: {:#?}", all_rpcs);
for rpc in itertools::chain(synced_rpcs, all_rpcs) {
if max_count == 0 { if max_count == 0 {
break; break;
} }
if tried.contains(&connection.name) { if tried.contains(&rpc.name) {
continue; continue;
} }
tried.insert(connection.name.clone()); trace!("trying {}", rpc);
if !allow_backups && connection.backup { tried.insert(rpc.name.clone());
if !allow_backups && rpc.backup {
trace!("{} is a backup. skipping", rpc);
continue; continue;
} }
if let Some(block_needed) = min_block_needed { if let Some(block_needed) = min_block_needed {
if !connection.has_block_data(block_needed) { if !rpc.has_block_data(block_needed) {
trace!("{} is missing min_block_needed. skipping", rpc);
continue; continue;
} }
} }
if let Some(block_needed) = max_block_needed { if let Some(block_needed) = max_block_needed {
if !connection.has_block_data(block_needed) { if !rpc.has_block_data(block_needed) {
trace!("{} is missing max_block_needed. skipping", rpc);
continue; continue;
} }
} }
// check rate limits and increment our connection counter // check rate limits and increment our connection counter
match connection.try_request_handle(authorization, None).await { match rpc.try_request_handle(authorization, None).await {
Ok(OpenRequestResult::RetryAt(retry_at)) => { Ok(OpenRequestResult::RetryAt(retry_at)) => {
// this rpc is not available. skip it // this rpc is not available. skip it
trace!("{} is rate limited. skipping", rpc);
earliest_retry_at = earliest_retry_at.min(Some(retry_at)); earliest_retry_at = earliest_retry_at.min(Some(retry_at));
} }
Ok(OpenRequestResult::Handle(handle)) => { Ok(OpenRequestResult::Handle(handle)) => {
trace!("{} is available", rpc);
max_count -= 1; max_count -= 1;
selected_rpcs.push(handle) selected_rpcs.push(handle)
} }
Ok(OpenRequestResult::NotReady) => { Ok(OpenRequestResult::NotReady) => {
warn!("no request handle for {}", connection) warn!("no request handle for {}", rpc)
} }
Err(err) => { Err(err) => {
warn!( warn!("error getting request handle for {}. err={:?}", rpc, err)
"error getting request handle for {}. err={:?}",
connection, err
)
} }
} }
} }
@ -1015,6 +992,7 @@ impl Web3Rpcs {
if num_skipped == 0 { if num_skipped == 0 {
error!("No servers synced ({} known). None skipped", num_conns); error!("No servers synced ({} known). None skipped", num_conns);
// TODO: what error code?
Ok(JsonRpcForwardedResponse::from_str( Ok(JsonRpcForwardedResponse::from_str(
"No servers synced", "No servers synced",
Some(-32000), Some(-32000),
@ -1038,6 +1016,7 @@ impl Web3Rpcs {
} }
/// be sure there is a timeout on this or it might loop forever /// be sure there is a timeout on this or it might loop forever
#[allow(clippy::too_many_arguments)]
pub async fn try_send_all_synced_connections( pub async fn try_send_all_synced_connections(
&self, &self,
authorization: &Arc<Authorization>, authorization: &Arc<Authorization>,
@ -1180,9 +1159,14 @@ impl Serialize for Web3Rpcs {
} }
{ {
let consensus_connections = self.watch_consensus_rpcs_sender.borrow().clone(); let consensus_rpcs = self.watch_consensus_rpcs_sender.borrow();
// TODO: rename synced_connections to consensus_connections? // TODO: rename synced_connections to consensus_rpcs
state.serialize_field("synced_connections", &consensus_connections)?;
if let Some(consensus_rpcs) = consensus_rpcs.as_ref() {
state.serialize_field("synced_connections", consensus_rpcs)?;
} else {
state.serialize_field("synced_connections", &None::<()>)?;
}
} }
self.blocks_by_hash.sync(); self.blocks_by_hash.sync();
@ -1369,11 +1353,11 @@ mod tests {
..Default::default() ..Default::default()
}; };
assert!(head_rpc.has_block_data(&lagged_block.number())); assert!(head_rpc.has_block_data(lagged_block.number()));
assert!(head_rpc.has_block_data(&head_block.number())); assert!(head_rpc.has_block_data(head_block.number()));
assert!(lagged_rpc.has_block_data(&lagged_block.number())); assert!(lagged_rpc.has_block_data(lagged_block.number()));
assert!(!lagged_rpc.has_block_data(&head_block.number())); assert!(!lagged_rpc.has_block_data(head_block.number()));
let head_rpc = Arc::new(head_rpc); let head_rpc = Arc::new(head_rpc);
let lagged_rpc = Arc::new(lagged_rpc); let lagged_rpc = Arc::new(lagged_rpc);
@ -1383,10 +1367,12 @@ mod tests {
(lagged_rpc.name.clone(), lagged_rpc.clone()), (lagged_rpc.name.clone(), lagged_rpc.clone()),
]); ]);
let (block_sender, _) = flume::unbounded(); let (block_sender, _block_receiver) = flume::unbounded();
let (pending_tx_id_sender, pending_tx_id_receiver) = flume::unbounded(); let (pending_tx_id_sender, pending_tx_id_receiver) = flume::unbounded();
let (watch_consensus_rpcs_sender, _) = watch::channel(Default::default()); let (watch_consensus_rpcs_sender, _watch_consensus_rpcs_receiver) =
let (watch_consensus_head_sender, _) = watch::channel(Default::default()); watch::channel(Default::default());
let (watch_consensus_head_sender, _watch_consensus_head_receiver) =
watch::channel(Default::default());
// TODO: make a Web3Rpcs::new // TODO: make a Web3Rpcs::new
let rpcs = Web3Rpcs { let rpcs = Web3Rpcs {
@ -1570,8 +1556,8 @@ mod tests {
..Default::default() ..Default::default()
}; };
assert!(pruned_rpc.has_block_data(&head_block.number())); assert!(pruned_rpc.has_block_data(head_block.number()));
assert!(archive_rpc.has_block_data(&head_block.number())); assert!(archive_rpc.has_block_data(head_block.number()));
assert!(!pruned_rpc.has_block_data(&1.into())); assert!(!pruned_rpc.has_block_data(&1.into()));
assert!(archive_rpc.has_block_data(&1.into())); assert!(archive_rpc.has_block_data(&1.into()));
@ -1640,7 +1626,7 @@ mod tests {
// best_synced_backend_connection requires servers to be synced with the head block // best_synced_backend_connection requires servers to be synced with the head block
// TODO: test with and without passing the head_block.number? // TODO: test with and without passing the head_block.number?
let best_available_server = rpcs let best_available_server = rpcs
.best_available_rpc(&authorization, None, &[], Some(&head_block.number()), None) .best_available_rpc(&authorization, None, &[], Some(head_block.number()), None)
.await; .await;
debug!("best_available_server: {:#?}", best_available_server); debug!("best_available_server: {:#?}", best_available_server);

View File

@ -558,10 +558,8 @@ impl Web3Rpc {
drop(unlocked_provider); drop(unlocked_provider);
info!("successfully connected to {}", self); info!("successfully connected to {}", self);
} else { } else if self.provider.read().await.is_none() {
if self.provider.read().await.is_none() { return Err(anyhow!("failed waiting for client"));
return Err(anyhow!("failed waiting for client"));
}
}; };
Ok(()) Ok(())
@ -604,7 +602,7 @@ impl Web3Rpc {
{ {
let mut head_block = self.head_block.write(); let mut head_block = self.head_block.write();
let _ = head_block.insert(new_head_block.clone().into()); let _ = head_block.insert(new_head_block.clone());
} }
if self.block_data_limit() == U64::zero() { if self.block_data_limit() == U64::zero() {
@ -712,7 +710,7 @@ impl Web3Rpc {
let head_block = conn.head_block.read().clone(); let head_block = conn.head_block.read().clone();
if let Some((block_number, txid)) = head_block.and_then(|x| { if let Some((block_number, txid)) = head_block.and_then(|x| {
let block = x.block.clone(); let block = x.block;
let block_number = block.number?; let block_number = block.number?;
let txid = block.transactions.last().cloned()?; let txid = block.transactions.last().cloned()?;
@ -1146,7 +1144,7 @@ impl Web3Rpc {
} }
if let Some(hard_limit_until) = self.hard_limit_until.as_ref() { if let Some(hard_limit_until) = self.hard_limit_until.as_ref() {
let hard_limit_ready = hard_limit_until.borrow().clone(); let hard_limit_ready = *hard_limit_until.borrow();
let now = Instant::now(); let now = Instant::now();
@ -1178,7 +1176,7 @@ impl Web3Rpc {
} }
if let Some(hard_limit_until) = self.hard_limit_until.as_ref() { if let Some(hard_limit_until) = self.hard_limit_until.as_ref() {
hard_limit_until.send_replace(retry_at.clone()); hard_limit_until.send_replace(retry_at);
} }
return Ok(OpenRequestResult::RetryAt(retry_at)); return Ok(OpenRequestResult::RetryAt(retry_at));
@ -1355,7 +1353,7 @@ mod tests {
assert!(x.has_block_data(&0.into())); assert!(x.has_block_data(&0.into()));
assert!(x.has_block_data(&1.into())); assert!(x.has_block_data(&1.into()));
assert!(x.has_block_data(&head_block.number())); assert!(x.has_block_data(head_block.number()));
assert!(!x.has_block_data(&(head_block.number() + 1))); assert!(!x.has_block_data(&(head_block.number() + 1)));
assert!(!x.has_block_data(&(head_block.number() + 1000))); assert!(!x.has_block_data(&(head_block.number() + 1000)));
} }
@ -1394,7 +1392,7 @@ mod tests {
assert!(!x.has_block_data(&1.into())); assert!(!x.has_block_data(&1.into()));
assert!(!x.has_block_data(&(head_block.number() - block_data_limit - 1))); assert!(!x.has_block_data(&(head_block.number() - block_data_limit - 1)));
assert!(x.has_block_data(&(head_block.number() - block_data_limit))); assert!(x.has_block_data(&(head_block.number() - block_data_limit)));
assert!(x.has_block_data(&head_block.number())); assert!(x.has_block_data(head_block.number()));
assert!(!x.has_block_data(&(head_block.number() + 1))); assert!(!x.has_block_data(&(head_block.number() + 1)));
assert!(!x.has_block_data(&(head_block.number() + 1000))); assert!(!x.has_block_data(&(head_block.number() + 1000)));
} }