make checking block data optional

this is needed so that private/bundler rpcs can send requests
This commit is contained in:
Bryan Stitt 2023-11-01 22:55:28 -07:00
parent 971f690d4f
commit ff14045b64
2 changed files with 27 additions and 11 deletions

View File

@ -73,7 +73,9 @@ pub enum ShouldWaitForBlock {
#[derive(Clone, Debug, Serialize)] #[derive(Clone, Debug, Serialize)]
enum SortMethod { enum SortMethod {
/// shuffle the servers randomly instead of by latency
Shuffle, Shuffle,
/// sort the servers by latency (among other things)
Sort, Sort,
} }
@ -87,6 +89,7 @@ pub struct RankedRpcs {
pub head_block: Web3ProxyBlock, pub head_block: Web3ProxyBlock,
pub num_synced: usize, pub num_synced: usize,
pub backups_needed: bool, pub backups_needed: bool,
pub check_block_data: bool,
pub(crate) inner: HashSet<Arc<Web3Rpc>>, pub(crate) inner: HashSet<Arc<Web3Rpc>>,
@ -102,7 +105,11 @@ pub struct RpcsForRequest {
} }
impl RankedRpcs { impl RankedRpcs {
pub fn from_rpcs(rpcs: Vec<Arc<Web3Rpc>>, head_block: Option<Web3ProxyBlock>) -> Self { pub fn from_rpcs(
rpcs: Vec<Arc<Web3Rpc>>,
head_block: Option<Web3ProxyBlock>,
check_block_data: bool,
) -> Self {
// we don't need to sort the rpcs now. we will sort them when a request neds them // we don't need to sort the rpcs now. we will sort them when a request neds them
// TODO: the shame about this is that we lose just being able to compare 2 random servers // TODO: the shame about this is that we lose just being able to compare 2 random servers
@ -119,6 +126,7 @@ impl RankedRpcs {
Self { Self {
backups_needed, backups_needed,
check_block_data,
head_block, head_block,
inner: rpcs, inner: rpcs,
num_synced, num_synced,
@ -195,6 +203,7 @@ impl RankedRpcs {
let consensus = RankedRpcs { let consensus = RankedRpcs {
backups_needed, backups_needed,
check_block_data: true,
head_block: best_block, head_block: best_block,
sort_mode, sort_mode,
inner: best_rpcs, inner: best_rpcs,
@ -233,16 +242,18 @@ impl RankedRpcs {
continue; continue;
} }
if let Some(block_needed) = min_block_needed { if self.check_block_data {
if !rpc.has_block_data(block_needed) { if let Some(block_needed) = min_block_needed {
outer_for_request.push(rpc); if !rpc.has_block_data(block_needed) {
continue; outer_for_request.push(rpc);
continue;
}
} }
} if let Some(block_needed) = max_block_needed {
if let Some(block_needed) = max_block_needed { if !rpc.has_block_data(block_needed) {
if !rpc.has_block_data(block_needed) { outer_for_request.push(rpc);
outer_for_request.push(rpc); continue;
continue; }
} }
} }

View File

@ -45,6 +45,7 @@ pub struct Web3Rpcs {
/// Geth's subscriptions have the same potential for skipping blocks. /// Geth's subscriptions have the same potential for skipping blocks.
pub(crate) watch_ranked_rpcs: watch::Sender<Option<Arc<RankedRpcs>>>, pub(crate) watch_ranked_rpcs: watch::Sender<Option<Arc<RankedRpcs>>>,
/// this head receiver makes it easy to wait until there is a new block /// this head receiver makes it easy to wait until there is a new block
/// this is None if none of the child Rpcs are subscribed to newHeads
pub(super) watch_head_block: Option<watch::Sender<Option<Web3ProxyBlock>>>, pub(super) watch_head_block: Option<watch::Sender<Option<Web3ProxyBlock>>>,
/// TODO: this map is going to grow forever unless we do some sort of pruning. maybe store pruned in redis? /// TODO: this map is going to grow forever unless we do some sort of pruning. maybe store pruned in redis?
/// all blocks, including uncles /// all blocks, including uncles
@ -416,7 +417,11 @@ impl Web3Rpcs {
let rpcs = self.by_name.read().values().cloned().collect(); let rpcs = self.by_name.read().values().cloned().collect();
// TODO: does this need the head_block? i don't think so // TODO: does this need the head_block? i don't think so
let x = RankedRpcs::from_rpcs(rpcs, web3_request.head_block.clone()); let x = RankedRpcs::from_rpcs(
rpcs,
web3_request.head_block.clone(),
self.watch_head_block.is_some(),
);
Arc::new(x) Arc::new(x)
}; };