add to skip list earlier

This commit is contained in:
Bryan Stitt 2023-05-18 13:51:28 -07:00
parent a92c93706b
commit 9c584354d9
3 changed files with 32 additions and 27 deletions

@ -153,7 +153,7 @@ pub async fn user_balance_post(
// Just make an rpc request, idk if i need to call this super extensive code // Just make an rpc request, idk if i need to call this super extensive code
let transaction_receipt: TransactionReceipt = match app let transaction_receipt: TransactionReceipt = match app
.balanced_rpcs .balanced_rpcs
.best_available_rpc(&authorization, None, &[], None, None) .best_available_rpc(&authorization, None, &mut vec![], None, None)
.await .await
{ {
Ok(OpenRequestResult::Handle(handle)) => { Ok(OpenRequestResult::Handle(handle)) => {
@ -188,7 +188,7 @@ pub async fn user_balance_post(
debug!("Transaction receipt is: {:?}", transaction_receipt); debug!("Transaction receipt is: {:?}", transaction_receipt);
let accepted_token: Address = match app let accepted_token: Address = match app
.balanced_rpcs .balanced_rpcs
.best_available_rpc(&authorization, None, &[], None, None) .best_available_rpc(&authorization, None, &mut vec![], None, None)
.await .await
{ {
Ok(OpenRequestResult::Handle(handle)) => { Ok(OpenRequestResult::Handle(handle)) => {
@ -243,7 +243,7 @@ pub async fn user_balance_post(
debug!("Accepted token is: {:?}", accepted_token); debug!("Accepted token is: {:?}", accepted_token);
let decimals: u32 = match app let decimals: u32 = match app
.balanced_rpcs .balanced_rpcs
.best_available_rpc(&authorization, None, &[], None, None) .best_available_rpc(&authorization, None, &mut vec![], None, None)
.await .await
{ {
Ok(OpenRequestResult::Handle(handle)) => { Ok(OpenRequestResult::Handle(handle)) => {

@ -130,18 +130,18 @@ impl ConsensusWeb3Rpcs {
skip_rpcs: &[Arc<Web3Rpc>], skip_rpcs: &[Arc<Web3Rpc>],
) -> ShouldWaitForBlock { ) -> ShouldWaitForBlock {
// TODO: i think checking synced is always a waste of time. though i guess there could be a race // TODO: i think checking synced is always a waste of time. though i guess there could be a race
// if self if self
// .head_rpcs .head_rpcs
// .iter() .iter()
// .any(|rpc| self.rpc_will_work_eventually(rpc, needed_block_num, skip_rpcs)) .any(|rpc| self.rpc_will_work_eventually(rpc, needed_block_num, skip_rpcs))
// { {
// let head_num = self.head_block.number(); let head_num = self.head_block.number();
// if Some(head_num) >= needed_block_num { if Some(head_num) >= needed_block_num {
// debug!("best (head) block: {}", head_num); debug!("best (head) block: {}", head_num);
// return ShouldWaitForBlock::Ready; return ShouldWaitForBlock::Ready;
// } }
// } }
// all of the head rpcs are skipped // all of the head rpcs are skipped

@ -495,7 +495,7 @@ impl Web3Rpcs {
&self, &self,
authorization: &Arc<Authorization>, authorization: &Arc<Authorization>,
request_metadata: Option<&Arc<RequestMetadata>>, request_metadata: Option<&Arc<RequestMetadata>>,
skip: &[Arc<Web3Rpc>], skip: &mut Vec<Arc<Web3Rpc>>,
min_block_needed: Option<&U64>, min_block_needed: Option<&U64>,
max_block_needed: Option<&U64>, max_block_needed: Option<&U64>,
) -> Web3ProxyResult<OpenRequestResult> { ) -> Web3ProxyResult<OpenRequestResult> {
@ -592,6 +592,8 @@ impl Web3Rpcs {
let best_rpc = min_by_key(rpc_a, rpc_b, |x| x.peak_ewma()); let best_rpc = min_by_key(rpc_a, rpc_b, |x| x.peak_ewma());
trace!("{:?} - winner: {}", request_ulid, best_rpc); trace!("{:?} - winner: {}", request_ulid, best_rpc);
skip.push(best_rpc.clone());
// just because it has lower latency doesn't mean we are sure to get a connection // just because it has lower latency doesn't mean we are sure to get a connection
match best_rpc.try_request_handle(authorization, None).await { match best_rpc.try_request_handle(authorization, None).await {
Ok(OpenRequestResult::Handle(handle)) => { Ok(OpenRequestResult::Handle(handle)) => {
@ -805,7 +807,7 @@ impl Web3Rpcs {
.best_available_rpc( .best_available_rpc(
authorization, authorization,
request_metadata, request_metadata,
&skip_rpcs, &mut skip_rpcs,
min_block_needed, min_block_needed,
max_block_needed, max_block_needed,
) )
@ -822,9 +824,6 @@ impl Web3Rpcs {
let is_backup_response = rpc.backup; let is_backup_response = rpc.backup;
// TODO: instead of entirely skipping, maybe demote a tier?
skip_rpcs.push(rpc);
// TODO: get the log percent from the user data // TODO: get the log percent from the user data
let response_result: Result<Box<RawValue>, _> = active_request_handle let response_result: Result<Box<RawValue>, _> = active_request_handle
.request( .request(
@ -1493,7 +1492,7 @@ mod tests {
.best_available_rpc( .best_available_rpc(
&authorization, &authorization,
None, None,
&[], &mut vec![],
Some(head_block.number.as_ref().unwrap()), Some(head_block.number.as_ref().unwrap()),
None, None,
) )
@ -1587,28 +1586,28 @@ mod tests {
// TODO: make sure the handle is for the expected rpc // TODO: make sure the handle is for the expected rpc
assert!(matches!( assert!(matches!(
rpcs.best_available_rpc(&authorization, None, &[], None, None) rpcs.best_available_rpc(&authorization, None, &mut vec![], None, None)
.await, .await,
Ok(OpenRequestResult::Handle(_)) Ok(OpenRequestResult::Handle(_))
)); ));
// TODO: make sure the handle is for the expected rpc // TODO: make sure the handle is for the expected rpc
assert!(matches!( assert!(matches!(
rpcs.best_available_rpc(&authorization, None, &[], Some(&0.into()), None) rpcs.best_available_rpc(&authorization, None, &mut vec![], Some(&0.into()), None)
.await, .await,
Ok(OpenRequestResult::Handle(_)) Ok(OpenRequestResult::Handle(_))
)); ));
// TODO: make sure the handle is for the expected rpc // TODO: make sure the handle is for the expected rpc
assert!(matches!( assert!(matches!(
rpcs.best_available_rpc(&authorization, None, &[], Some(&1.into()), None) rpcs.best_available_rpc(&authorization, None, &mut vec![], Some(&1.into()), None)
.await, .await,
Ok(OpenRequestResult::Handle(_)) Ok(OpenRequestResult::Handle(_))
)); ));
// future block should not get a handle // future block should not get a handle
let future_rpc = rpcs let future_rpc = rpcs
.best_available_rpc(&authorization, None, &[], Some(&2.into()), None) .best_available_rpc(&authorization, None, &mut vec![], Some(&2.into()), None)
.await; .await;
assert!(matches!(future_rpc, Ok(OpenRequestResult::NotReady))); assert!(matches!(future_rpc, Ok(OpenRequestResult::NotReady)));
} }
@ -1733,7 +1732,13 @@ mod tests {
// best_synced_backend_connection requires servers to be synced with the head block // best_synced_backend_connection requires servers to be synced with the head block
// TODO: test with and without passing the head_block.number? // TODO: test with and without passing the head_block.number?
let best_available_server = rpcs let best_available_server = rpcs
.best_available_rpc(&authorization, None, &[], Some(head_block.number()), None) .best_available_rpc(
&authorization,
None,
&mut vec![],
Some(head_block.number()),
None,
)
.await; .await;
debug!("best_available_server: {:#?}", best_available_server); debug!("best_available_server: {:#?}", best_available_server);
@ -1744,13 +1749,13 @@ mod tests {
)); ));
let _best_available_server_from_none = rpcs let _best_available_server_from_none = rpcs
.best_available_rpc(&authorization, None, &[], None, None) .best_available_rpc(&authorization, None, &mut vec![], None, None)
.await; .await;
// assert_eq!(best_available_server, best_available_server_from_none); // assert_eq!(best_available_server, best_available_server_from_none);
let best_archive_server = rpcs let best_archive_server = rpcs
.best_available_rpc(&authorization, None, &[], Some(&1.into()), None) .best_available_rpc(&authorization, None, &mut vec![], Some(&1.into()), None)
.await; .await;
match best_archive_server { match best_archive_server {