bring back outer rpc checking

This commit is contained in:
Bryan Stitt 2023-10-12 23:57:12 -07:00
parent 433bf02c3c
commit 51ba539800
3 changed files with 53 additions and 40 deletions

View File

@ -103,6 +103,7 @@ pub async fn clean_block_number<'a>(
if block_hash == *head_block.hash() { if block_hash == *head_block.hash() {
(head_block.into(), false) (head_block.into(), false)
} else if let Some(app) = app { } else if let Some(app) = app {
// TODO: query for the block
let block = app let block = app
.balanced_rpcs .balanced_rpcs
.blocks_by_hash .blocks_by_hash
@ -161,7 +162,7 @@ pub async fn clean_block_number<'a>(
let head_block_num = head_block.number(); let head_block_num = head_block.number();
if block_num > head_block_num { if block_num > head_block_num {
// TODO: option to wait for the block // todo!(option to wait for the block here)
return Err(Web3ProxyError::UnknownBlockNumber { return Err(Web3ProxyError::UnknownBlockNumber {
known: head_block_num, known: head_block_num,
unknown: block_num, unknown: block_num,

View File

@ -97,6 +97,7 @@ pub struct RankedRpcs {
// TODO: could these be refs? The owning RankedRpcs lifetime might work. `stream!` might make it complicated // TODO: could these be refs? The owning RankedRpcs lifetime might work. `stream!` might make it complicated
pub struct RpcsForRequest { pub struct RpcsForRequest {
inner: Vec<Arc<Web3Rpc>>, inner: Vec<Arc<Web3Rpc>>,
outer: Vec<Arc<Web3Rpc>>,
request: Arc<ValidatedRequest>, request: Arc<ValidatedRequest>,
} }
@ -205,8 +206,11 @@ impl RankedRpcs {
let head_block = self.head_block.number(); let head_block = self.head_block.number();
let num_active = self.num_active_rpcs();
// these are bigger than we need, but how much does that matter? // these are bigger than we need, but how much does that matter?
let mut inner_for_request = Vec::<Arc<Web3Rpc>>::with_capacity(self.num_active_rpcs()); let mut inner_for_request = Vec::<Arc<Web3Rpc>>::with_capacity(num_active);
let mut outer_for_request = Vec::with_capacity(num_active);
// TODO: what if min is set to some future block? // TODO: what if min is set to some future block?
// TODO: what if max is set to some future block? // TODO: what if max is set to some future block?
@ -214,21 +218,24 @@ impl RankedRpcs {
let max_block_needed = web3_request.max_block_needed(); let max_block_needed = web3_request.max_block_needed();
// TODO: max lag was already handled // TODO: max lag was already handled
for rpc in self.inner.iter() { for rpc in self.inner.iter().cloned() {
if let Some(block_needed) = min_block_needed { if let Some(block_needed) = min_block_needed {
if !rpc.has_block_data(block_needed) { if !rpc.has_block_data(block_needed) {
outer_for_request.push(rpc);
continue; continue;
} }
} }
if let Some(block_needed) = max_block_needed { if let Some(block_needed) = max_block_needed {
if !rpc.has_block_data(block_needed) { if !rpc.has_block_data(block_needed) {
outer_for_request.push(rpc);
continue; continue;
} }
} }
inner_for_request.push(rpc.clone()); inner_for_request.push(rpc);
} }
// TODO: use web3_request.start_instant? I think we want it to be as recent as possible
let now = Instant::now(); let now = Instant::now();
match self.sort_mode { match self.sort_mode {
@ -239,26 +246,31 @@ impl RankedRpcs {
// we use shuffle instead of sort si that the load gets spread around more // we use shuffle instead of sort si that the load gets spread around more
// we will still compare weights during `RpcsForRequest::to_stream` // we will still compare weights during `RpcsForRequest::to_stream`
// TODO: use web3_request.start_instant? I think we want it to be as recent as possible
inner_for_request.sort_by_cached_key(|x| { inner_for_request.sort_by_cached_key(|x| {
x.shuffle_for_load_balancing_on(max_block_needed, &mut rng, now) x.shuffle_for_load_balancing_on(max_block_needed, &mut rng, now)
}); });
outer_for_request.sort_by_cached_key(|x| {
x.shuffle_for_load_balancing_on(max_block_needed, &mut rng, now)
});
} }
SortMethod::Sort => { SortMethod::Sort => {
// we sort so that the best nodes are always preferred. we will compare weights during `RpcsForRequest::to_stream` // we sort so that the best nodes are always preferred. we will compare weights during `RpcsForRequest::to_stream`
inner_for_request inner_for_request
.sort_by_cached_key(|x| x.sort_for_load_balancing_on(max_block_needed, now)); .sort_by_cached_key(|x| x.sort_for_load_balancing_on(max_block_needed, now));
outer_for_request
.sort_by_cached_key(|x| x.sort_for_load_balancing_on(max_block_needed, now));
} }
} }
if inner_for_request.is_empty() { if inner_for_request.is_empty() {
warn!(?inner_for_request, %web3_request, %head_block, "no rpcs for request"); warn!(?inner_for_request, ?outer_for_request, %web3_request, %head_block, "no rpcs for request");
None None
} else { } else {
trace!(?inner_for_request, %web3_request, "for_request"); trace!(?inner_for_request, ?outer_for_request, %web3_request, "for_request");
Some(RpcsForRequest { Some(RpcsForRequest {
inner: inner_for_request, inner: inner_for_request,
outer: outer_for_request,
request: web3_request.clone(), request: web3_request.clone(),
}) })
} }
@ -870,46 +882,46 @@ impl RpcsForRequest {
loop { loop {
if self.request.connect_timeout() { if self.request.connect_timeout() {
break; break;
} else {
// yield_now().await;
} }
let mut earliest_retry_at = None; let mut earliest_retry_at = None;
let mut wait_for_sync = Vec::new(); let mut wait_for_sync = Vec::new();
// TODO: we used to do a neat power of 2 random choices here, but it had bugs. bring that back // TODO: we used to do a neat power of 2 random choices here, but it had bugs. bring that back
for best_rpc in self.inner.iter() { for rpcs in [self.inner.iter(), self.outer.iter()] {
match best_rpc for best_rpc in rpcs {
.try_request_handle(&self.request, error_handler, false) match best_rpc
.await .try_request_handle(&self.request, error_handler, false)
{ .await
Ok(OpenRequestResult::Handle(handle)) => { {
trace!("opened handle: {}", best_rpc); Ok(OpenRequestResult::Handle(handle)) => {
yield handle; trace!("opened handle: {}", best_rpc);
yield handle;
}
Ok(OpenRequestResult::RetryAt(retry_at)) => {
trace!(
"retry on {} @ {}",
best_rpc,
retry_at.duration_since(Instant::now()).as_secs_f32()
);
earliest_retry_at = earliest_retry_at.min(Some(retry_at, ));
}
Ok(OpenRequestResult::Lagged(x)) => {
// this will probably always be the same block, right?
trace!("{} is lagged. will not work now", best_rpc);
wait_for_sync.push(x);
}
Ok(OpenRequestResult::Failed) => {
// TODO: log a warning? emit a stat?
trace!("best_rpc not ready: {}", best_rpc);
}
Err(err) => {
trace!("No request handle for {}. err={:?}", best_rpc, err);
}
} }
Ok(OpenRequestResult::RetryAt(retry_at)) => {
trace!(
"retry on {} @ {}",
best_rpc,
retry_at.duration_since(Instant::now()).as_secs_f32()
);
earliest_retry_at = earliest_retry_at.min(Some(retry_at, ));
}
Ok(OpenRequestResult::Lagged(x)) => {
// this will probably always be the same block, right?
trace!("{} is lagged. will not work now", best_rpc);
wait_for_sync.push(x);
}
Ok(OpenRequestResult::Failed) => {
// TODO: log a warning? emit a stat?
trace!("best_rpc not ready: {}", best_rpc);
}
Err(err) => {
trace!("No request handle for {}. err={:?}", best_rpc, err);
}
}
yield_now().await; yield_now().await;
}
} }
// if we got this far, no inner or outer rpcs are ready. thats suprising since an inner should have been ready. maybe it got rate limited // if we got this far, no inner or outer rpcs are ready. thats suprising since an inner should have been ready. maybe it got rate limited

View File

@ -462,7 +462,7 @@ impl Web3Rpcs {
// TODO: limit number of tries // TODO: limit number of tries
let rpcs = self.try_rpcs_for_request(web3_request).await?; let rpcs = self.try_rpcs_for_request(web3_request).await?;
let stream = rpcs.to_stream().take(3); let stream = rpcs.to_stream();
pin!(stream); pin!(stream);