make missing docs a warning

This commit is contained in:
Bryan Stitt 2022-07-25 18:21:58 +00:00
parent f0691efc5c
commit 0178d09b79
4 changed files with 19 additions and 16 deletions

@ -58,8 +58,8 @@
- we can improve this by only publishing the synced connections once a threshold of total available soft and hard limits is passed. how can we do this without hammering redis? at least its only once per block per server
- [x] instead of tracking `pending_synced_connections`, have a mapping of where all connections are individually. then each change, re-check for consensus.
- [x] synced connections swap threshold set to 1 so that it always serves something
- [ ] basic request method stats
- [ ] nice output when cargo doc is run
- [ ] basic request method stats
## V1

@ -1,3 +1,5 @@
#![warn(missing_docs)]
use bb8_redis::redis::cmd;
pub use bb8_redis::redis::RedisError;

@ -44,9 +44,11 @@ struct SyncedConnections {
impl fmt::Debug for SyncedConnections {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// TODO: the default formatter takes forever to write. this is too quiet though
// TODO: print the actual conns?
f.debug_struct("SyncedConnections")
.field("head_num", &self.head_block_num)
.field("head_hash", &self.head_block_hash)
.field("num_conns", &self.conns.len())
.finish_non_exhaustive()
}
}
@ -702,6 +704,9 @@ impl Web3Connections {
// TODO: default_min_soft_limit? without, we start serving traffic at the start too quickly
// let min_soft_limit = total_soft_limit / 2;
let min_soft_limit = 1;
let num_possible_heads = rpcs_by_hash.len();
trace!(?rpcs_by_hash);
struct State<'a> {
block: &'a Arc<Block<TxHash>>,
@ -724,8 +729,6 @@ impl Web3Connections {
}
}
trace!(?rpcs_by_hash);
// TODO: i'm always getting None
if let Some(x) = rpcs_by_hash
.into_iter()
@ -773,9 +776,11 @@ impl Web3Connections {
best_head_hash
);
} else {
// TODO: this isn't necessarily a fork. this might just be an rpc being slow
// TODO: log all the heads?
warn!(
"chain is forked! {} possible heads. {}/{}/{}/{} rpcs have {}",
"?", // TODO: how should we get this?
num_possible_heads,
best_rpcs.len(),
synced_rpcs.len(),
connection_heads.len(),
@ -805,6 +810,7 @@ impl Web3Connections {
if new_head_block {
self.chain.add_block(new_block.clone(), true);
// TODO: include the fastest rpc here?
info!(
"{}/{} rpcs at {} ({}). publishing new head!",
pending_synced_connections.conns.len(),

@ -1,13 +1,7 @@
#![forbid(unsafe_code)]
//! Run the web3 proxy
mod app;
mod bb8_helpers;
mod config;
mod connection;
mod connections;
mod firewall;
mod frontend;
mod jsonrpc;
#![forbid(unsafe_code)]
#![warn(missing_docs)]
use parking_lot::deadlock;
use std::fs;
@ -18,8 +12,9 @@ use tokio::runtime;
use tracing::{debug, info};
use tracing_subscriber::EnvFilter;
use crate::app::{flatten_handle, Web3ProxyApp};
use crate::config::{AppConfig, CliConfig};
use web3_proxy::app::{flatten_handle, Web3ProxyApp};
use web3_proxy::config::{AppConfig, CliConfig};
use web3_proxy::frontend;
fn run(
shutdown_receiver: flume::Receiver<()>,
@ -148,7 +143,7 @@ mod tests {
use hashbrown::HashMap;
use std::env;
use crate::config::{RpcSharedConfig, Web3ConnectionConfig};
use web3_proxy::config::{RpcSharedConfig, Web3ConnectionConfig};
use super::*;