Improve max wait and timeouts (#137)
* pass max wait with more functions * move rpc_head_str higher up to use it in all the logs * refresh consensus at double the block time * new pricing that also includes archive check * lint * don't clone method * put caches into one list they serialize with their name, so it will still be easy to tell them apart * more consistent waiting. still waits more than I want right now * clean up default max waits * more timeouts
This commit is contained in:
parent
fa55e5ce9b
commit
3c2f95de10
@ -1071,6 +1071,7 @@ impl Web3ProxyApp {
|
|||||||
Some(request_metadata),
|
Some(request_metadata),
|
||||||
None,
|
None,
|
||||||
None,
|
None,
|
||||||
|
Some(Duration::from_secs(30)),
|
||||||
Some(Level::Trace.into()),
|
Some(Level::Trace.into()),
|
||||||
None,
|
None,
|
||||||
true,
|
true,
|
||||||
@ -1101,6 +1102,7 @@ impl Web3ProxyApp {
|
|||||||
Some(request_metadata),
|
Some(request_metadata),
|
||||||
None,
|
None,
|
||||||
None,
|
None,
|
||||||
|
Some(Duration::from_secs(30)),
|
||||||
Some(Level::Trace.into()),
|
Some(Level::Trace.into()),
|
||||||
num_public_rpcs,
|
num_public_rpcs,
|
||||||
true,
|
true,
|
||||||
@ -1271,6 +1273,7 @@ impl Web3ProxyApp {
|
|||||||
method,
|
method,
|
||||||
¶ms,
|
¶ms,
|
||||||
Some(request_metadata),
|
Some(request_metadata),
|
||||||
|
Some(Duration::from_secs(30)),
|
||||||
None,
|
None,
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
@ -1311,6 +1314,7 @@ impl Web3ProxyApp {
|
|||||||
method,
|
method,
|
||||||
¶ms,
|
¶ms,
|
||||||
Some(request_metadata),
|
Some(request_metadata),
|
||||||
|
Some(Duration::from_secs(30)),
|
||||||
None,
|
None,
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
@ -1343,6 +1347,7 @@ impl Web3ProxyApp {
|
|||||||
method,
|
method,
|
||||||
¶ms,
|
¶ms,
|
||||||
Some(request_metadata),
|
Some(request_metadata),
|
||||||
|
Some(Duration::from_secs(30)),
|
||||||
None,
|
None,
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
@ -1366,6 +1371,7 @@ impl Web3ProxyApp {
|
|||||||
method,
|
method,
|
||||||
¶ms,
|
¶ms,
|
||||||
Some(request_metadata),
|
Some(request_metadata),
|
||||||
|
Some(Duration::from_secs(30)),
|
||||||
Some(&U64::one()),
|
Some(&U64::one()),
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
@ -1610,7 +1616,7 @@ impl Web3ProxyApp {
|
|||||||
|
|
||||||
let request_block = self
|
let request_block = self
|
||||||
.balanced_rpcs
|
.balanced_rpcs
|
||||||
.block(&authorization, &request_block_hash, None)
|
.block(&authorization, &request_block_hash, None, None)
|
||||||
.await?
|
.await?
|
||||||
.block;
|
.block;
|
||||||
|
|
||||||
@ -1640,7 +1646,7 @@ impl Web3ProxyApp {
|
|||||||
|
|
||||||
let from_block = self
|
let from_block = self
|
||||||
.balanced_rpcs
|
.balanced_rpcs
|
||||||
.block(&authorization, &from_block_hash, None)
|
.block(&authorization, &from_block_hash, None, None)
|
||||||
.await?
|
.await?
|
||||||
.block;
|
.block;
|
||||||
|
|
||||||
@ -1651,7 +1657,7 @@ impl Web3ProxyApp {
|
|||||||
|
|
||||||
let to_block = self
|
let to_block = self
|
||||||
.balanced_rpcs
|
.balanced_rpcs
|
||||||
.block(&authorization, &to_block_hash, None)
|
.block(&authorization, &to_block_hash, None, None)
|
||||||
.await?
|
.await?
|
||||||
.block;
|
.block;
|
||||||
|
|
||||||
@ -1666,7 +1672,7 @@ impl Web3ProxyApp {
|
|||||||
};
|
};
|
||||||
|
|
||||||
// TODO: different timeouts for different user tiers. get the duration out of the request_metadata
|
// TODO: different timeouts for different user tiers. get the duration out of the request_metadata
|
||||||
let duration = Duration::from_secs(240);
|
let max_wait = Duration::from_secs(240);
|
||||||
|
|
||||||
if let Some(cache_key) = cache_key {
|
if let Some(cache_key) = cache_key {
|
||||||
let from_block_num = cache_key.from_block_num();
|
let from_block_num = cache_key.from_block_num();
|
||||||
@ -1679,16 +1685,16 @@ impl Web3ProxyApp {
|
|||||||
.jsonrpc_response_cache
|
.jsonrpc_response_cache
|
||||||
.try_get_with::<_, Web3ProxyError>(cache_key.hash(), async {
|
.try_get_with::<_, Web3ProxyError>(cache_key.hash(), async {
|
||||||
let response_data = timeout(
|
let response_data = timeout(
|
||||||
duration,
|
max_wait + Duration::from_millis(10),
|
||||||
self.balanced_rpcs
|
self.balanced_rpcs
|
||||||
.try_proxy_connection::<_, Arc<RawValue>>(
|
.try_proxy_connection::<_, Arc<RawValue>>(
|
||||||
method,
|
method,
|
||||||
¶ms,
|
¶ms,
|
||||||
Some(request_metadata),
|
Some(request_metadata),
|
||||||
|
Some(max_wait),
|
||||||
from_block_num.as_ref(),
|
from_block_num.as_ref(),
|
||||||
to_block_num.as_ref(),
|
to_block_num.as_ref(),
|
||||||
)
|
))
|
||||||
)
|
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
if !cache_jsonrpc_errors && let Err(err) = response_data {
|
if !cache_jsonrpc_errors && let Err(err) = response_data {
|
||||||
@ -1705,12 +1711,13 @@ impl Web3ProxyApp {
|
|||||||
}).await?
|
}).await?
|
||||||
} else {
|
} else {
|
||||||
let x = timeout(
|
let x = timeout(
|
||||||
duration,
|
max_wait + Duration::from_millis(10),
|
||||||
self.balanced_rpcs
|
self.balanced_rpcs
|
||||||
.try_proxy_connection::<_, Arc<RawValue>>(
|
.try_proxy_connection::<_, Arc<RawValue>>(
|
||||||
method,
|
method,
|
||||||
¶ms,
|
¶ms,
|
||||||
Some(request_metadata),
|
Some(request_metadata),
|
||||||
|
Some(max_wait),
|
||||||
None,
|
None,
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
|
@ -76,7 +76,7 @@ pub async fn clean_block_number(
|
|||||||
serde_json::from_value(block_hash).context("decoding blockHash")?;
|
serde_json::from_value(block_hash).context("decoding blockHash")?;
|
||||||
|
|
||||||
let block = rpcs
|
let block = rpcs
|
||||||
.block(authorization, &block_hash, None)
|
.block(authorization, &block_hash, None, None)
|
||||||
.await
|
.await
|
||||||
.context("fetching block number from hash")?;
|
.context("fetching block number from hash")?;
|
||||||
|
|
||||||
|
@ -8,6 +8,7 @@
|
|||||||
|
|
||||||
use log::warn;
|
use log::warn;
|
||||||
use migration::sea_orm::prelude::Decimal;
|
use migration::sea_orm::prelude::Decimal;
|
||||||
|
use std::str::FromStr;
|
||||||
|
|
||||||
pub struct ComputeUnit(Decimal);
|
pub struct ComputeUnit(Decimal);
|
||||||
|
|
||||||
@ -121,12 +122,16 @@ impl ComputeUnit {
|
|||||||
/// Compute cost per request
|
/// Compute cost per request
|
||||||
/// All methods cost the same
|
/// All methods cost the same
|
||||||
/// The number of bytes are based on input, and output bytes
|
/// The number of bytes are based on input, and output bytes
|
||||||
pub fn cost(&self, cache_hit: bool, usd_per_cu: Decimal) -> Decimal {
|
pub fn cost(&self, archive_request: bool, cache_hit: bool, usd_per_cu: Decimal) -> Decimal {
|
||||||
let mut cost = self.0 * usd_per_cu;
|
let mut cost = self.0 * usd_per_cu;
|
||||||
|
|
||||||
|
if archive_request {
|
||||||
|
cost *= Decimal::from_str("2.5").unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
// cache hits get a 50% discount
|
// cache hits get a 50% discount
|
||||||
if cache_hit {
|
if cache_hit {
|
||||||
cost /= Decimal::from(2)
|
cost *= Decimal::from_str("0.75").unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
cost
|
cost
|
||||||
|
@ -17,7 +17,7 @@ use axum::{
|
|||||||
};
|
};
|
||||||
use http::{header::AUTHORIZATION, StatusCode};
|
use http::{header::AUTHORIZATION, StatusCode};
|
||||||
use listenfd::ListenFd;
|
use listenfd::ListenFd;
|
||||||
use log::{info};
|
use log::info;
|
||||||
use moka::future::{Cache, CacheBuilder};
|
use moka::future::{Cache, CacheBuilder};
|
||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
@ -138,17 +138,19 @@ async fn _status(app: Arc<Web3ProxyApp>) -> (StatusCode, &'static str, Bytes) {
|
|||||||
// TODO: the hostname is probably not going to change. only get once at the start?
|
// TODO: the hostname is probably not going to change. only get once at the start?
|
||||||
let body = json!({
|
let body = json!({
|
||||||
"balanced_rpcs": app.balanced_rpcs,
|
"balanced_rpcs": app.balanced_rpcs,
|
||||||
"bearer_token_semaphores": MokaCacheSerializer(&app.bearer_token_semaphores),
|
|
||||||
"bundler_4337_rpcs": app.bundler_4337_rpcs,
|
"bundler_4337_rpcs": app.bundler_4337_rpcs,
|
||||||
|
"caches": [
|
||||||
|
MokaCacheSerializer(&app.bearer_token_semaphores),
|
||||||
|
MokaCacheSerializer(&app.ip_semaphores),
|
||||||
|
MokaCacheSerializer(&app.jsonrpc_response_cache),
|
||||||
|
MokaCacheSerializer(&app.rpc_secret_key_cache),
|
||||||
|
MokaCacheSerializer(&app.user_balance_cache),
|
||||||
|
MokaCacheSerializer(&app.user_semaphores),
|
||||||
|
],
|
||||||
"chain_id": app.config.chain_id,
|
"chain_id": app.config.chain_id,
|
||||||
"hostname": app.hostname,
|
"hostname": app.hostname,
|
||||||
"ip_semaphores": MokaCacheSerializer(&app.ip_semaphores),
|
|
||||||
"jsonrpc_response_cache": MokaCacheSerializer(&app.jsonrpc_response_cache),
|
|
||||||
"payment_factory_address": app.config.deposit_factory_contract,
|
"payment_factory_address": app.config.deposit_factory_contract,
|
||||||
"private_rpcs": app.private_rpcs,
|
"private_rpcs": app.private_rpcs,
|
||||||
"rpc_secret_key_cache": MokaCacheSerializer(&app.rpc_secret_key_cache),
|
|
||||||
"user_balance_cache": MokaCacheSerializer(&app.user_balance_cache),
|
|
||||||
"user_semaphores": MokaCacheSerializer(&app.user_semaphores),
|
|
||||||
"version": APP_USER_AGENT,
|
"version": APP_USER_AGENT,
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -3,20 +3,21 @@ use super::consensus::ConsensusFinder;
|
|||||||
use super::many::Web3Rpcs;
|
use super::many::Web3Rpcs;
|
||||||
use super::one::Web3Rpc;
|
use super::one::Web3Rpc;
|
||||||
use super::transactions::TxStatus;
|
use super::transactions::TxStatus;
|
||||||
use crate::config::BlockAndRpc;
|
use crate::config::{average_block_interval, BlockAndRpc};
|
||||||
use crate::errors::{Web3ProxyError, Web3ProxyErrorContext, Web3ProxyResult};
|
use crate::errors::{Web3ProxyError, Web3ProxyErrorContext, Web3ProxyResult};
|
||||||
use crate::frontend::authorization::Authorization;
|
use crate::frontend::authorization::Authorization;
|
||||||
use derive_more::From;
|
use derive_more::From;
|
||||||
use ethers::prelude::{Block, TxHash, H256, U64};
|
use ethers::prelude::{Block, TxHash, H256, U64};
|
||||||
use log::{debug, trace, warn};
|
use log::{debug, error, trace, warn};
|
||||||
use moka::future::Cache;
|
use moka::future::Cache;
|
||||||
use serde::ser::SerializeStruct;
|
use serde::ser::SerializeStruct;
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
use serde_json::json;
|
use serde_json::json;
|
||||||
use std::hash::Hash;
|
use std::hash::Hash;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use std::{cmp::Ordering, fmt::Display, sync::Arc};
|
use std::{fmt::Display, sync::Arc};
|
||||||
use tokio::sync::broadcast;
|
use tokio::sync::broadcast;
|
||||||
|
use tokio::time::timeout;
|
||||||
|
|
||||||
// TODO: type for Hydrated Blocks with their full transactions?
|
// TODO: type for Hydrated Blocks with their full transactions?
|
||||||
pub type ArcBlock = Arc<Block<TxHash>>;
|
pub type ArcBlock = Arc<Block<TxHash>>;
|
||||||
@ -269,6 +270,7 @@ impl Web3Rpcs {
|
|||||||
authorization: &Arc<Authorization>,
|
authorization: &Arc<Authorization>,
|
||||||
hash: &H256,
|
hash: &H256,
|
||||||
rpc: Option<&Arc<Web3Rpc>>,
|
rpc: Option<&Arc<Web3Rpc>>,
|
||||||
|
max_wait: Option<Duration>,
|
||||||
) -> Web3ProxyResult<Web3ProxyBlock> {
|
) -> Web3ProxyResult<Web3ProxyBlock> {
|
||||||
// first, try to get the hash from our cache
|
// first, try to get the hash from our cache
|
||||||
// the cache is set last, so if its here, its everywhere
|
// the cache is set last, so if its here, its everywhere
|
||||||
@ -299,12 +301,18 @@ impl Web3Rpcs {
|
|||||||
&get_block_params,
|
&get_block_params,
|
||||||
authorization,
|
authorization,
|
||||||
None,
|
None,
|
||||||
|
max_wait,
|
||||||
)
|
)
|
||||||
.await?
|
.await?
|
||||||
} else {
|
} else {
|
||||||
// ask any rpc
|
// ask any rpc
|
||||||
|
// TODO: retry if "Requested data is not available"
|
||||||
// TODO: request_with_metadata instead of internal_request
|
// TODO: request_with_metadata instead of internal_request
|
||||||
self.internal_request::<_, Option<ArcBlock>>("eth_getBlockByHash", &get_block_params)
|
self.internal_request::<_, Option<ArcBlock>>(
|
||||||
|
"eth_getBlockByHash",
|
||||||
|
&get_block_params,
|
||||||
|
max_wait,
|
||||||
|
)
|
||||||
.await?
|
.await?
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -375,8 +383,8 @@ impl Web3Rpcs {
|
|||||||
// deref to not keep the lock open
|
// deref to not keep the lock open
|
||||||
if let Some(block_hash) = self.blocks_by_number.get(num) {
|
if let Some(block_hash) = self.blocks_by_number.get(num) {
|
||||||
// TODO: sometimes this needs to fetch the block. why? i thought block_numbers would only be set if the block hash was set
|
// TODO: sometimes this needs to fetch the block. why? i thought block_numbers would only be set if the block hash was set
|
||||||
// TODO: pass authorization through here?
|
// TODO: configurable max wait and rpc
|
||||||
let block = self.block(authorization, &block_hash, None).await?;
|
let block = self.block(authorization, &block_hash, None, None).await?;
|
||||||
|
|
||||||
return Ok((block, block_depth));
|
return Ok((block, block_depth));
|
||||||
}
|
}
|
||||||
@ -384,7 +392,7 @@ impl Web3Rpcs {
|
|||||||
// block number not in cache. we need to ask an rpc for it
|
// block number not in cache. we need to ask an rpc for it
|
||||||
// TODO: this error is too broad
|
// TODO: this error is too broad
|
||||||
let response = self
|
let response = self
|
||||||
.internal_request::<_, Option<ArcBlock>>("eth_getBlockByNumber", &(*num, false))
|
.internal_request::<_, Option<ArcBlock>>("eth_getBlockByNumber", &(*num, false), None)
|
||||||
.await?
|
.await?
|
||||||
.ok_or(Web3ProxyError::NoBlocksKnown)?;
|
.ok_or(Web3ProxyError::NoBlocksKnown)?;
|
||||||
|
|
||||||
@ -404,240 +412,68 @@ impl Web3Rpcs {
|
|||||||
// Geth's subscriptions have the same potential for skipping blocks.
|
// Geth's subscriptions have the same potential for skipping blocks.
|
||||||
pending_tx_sender: Option<broadcast::Sender<TxStatus>>,
|
pending_tx_sender: Option<broadcast::Sender<TxStatus>>,
|
||||||
) -> Web3ProxyResult<()> {
|
) -> Web3ProxyResult<()> {
|
||||||
let mut connection_heads =
|
let mut consensus_finder =
|
||||||
ConsensusFinder::new(Some(self.max_head_block_age), Some(self.max_head_block_lag));
|
ConsensusFinder::new(Some(self.max_head_block_age), Some(self.max_head_block_lag));
|
||||||
|
|
||||||
|
// TODO: what timeout on block receiver? we want to keep consensus_finder fresh so that server tiers are correct
|
||||||
|
let double_block_time = average_block_interval(self.chain_id).mul_f32(2.0);
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
match block_receiver.recv_async().await {
|
match timeout(double_block_time, block_receiver.recv_async()).await {
|
||||||
Ok((new_block, rpc)) => {
|
Ok(Ok((new_block, rpc))) => {
|
||||||
let rpc_name = rpc.name.clone();
|
let rpc_name = rpc.name.clone();
|
||||||
|
|
||||||
if let Err(err) = self
|
// TODO: what timeout on this?
|
||||||
.process_block_from_rpc(
|
match timeout(
|
||||||
|
Duration::from_secs(2),
|
||||||
|
consensus_finder.process_block_from_rpc(
|
||||||
|
self,
|
||||||
authorization,
|
authorization,
|
||||||
&mut connection_heads,
|
|
||||||
new_block,
|
new_block,
|
||||||
rpc,
|
rpc,
|
||||||
&pending_tx_sender,
|
&pending_tx_sender,
|
||||||
|
),
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
warn!(
|
Ok(Ok(_)) => {}
|
||||||
|
Ok(Err(err)) => {
|
||||||
|
error!(
|
||||||
"error while processing block from rpc {}: {:#?}",
|
"error while processing block from rpc {}: {:#?}",
|
||||||
rpc_name, err
|
rpc_name, err
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
Err(timeout) => {
|
||||||
|
error!(
|
||||||
|
"timeout while processing block from {}: {:#?}",
|
||||||
|
rpc_name, timeout
|
||||||
|
);
|
||||||
}
|
}
|
||||||
Err(err) => {
|
}
|
||||||
warn!("block_receiver exited! {:#?}", err);
|
}
|
||||||
|
Ok(Err(err)) => {
|
||||||
|
// TODO: panic is probably too much, but getting here is definitely not good
|
||||||
|
error!("block_receiver on {} exited! {:#?}", self, err);
|
||||||
return Err(err.into());
|
return Err(err.into());
|
||||||
}
|
}
|
||||||
}
|
Err(_) => {
|
||||||
}
|
// TODO: what timeout on this?
|
||||||
}
|
match timeout(
|
||||||
|
Duration::from_secs(2),
|
||||||
/// `connection_heads` is a mapping of rpc_names to head block hashes.
|
consensus_finder.refresh(self, authorization, None, None),
|
||||||
/// self.blockchain_map is a mapping of hashes to the complete ArcBlock.
|
|
||||||
/// TODO: return something?
|
|
||||||
pub(crate) async fn process_block_from_rpc(
|
|
||||||
&self,
|
|
||||||
authorization: &Arc<Authorization>,
|
|
||||||
consensus_finder: &mut ConsensusFinder,
|
|
||||||
new_block: Option<Web3ProxyBlock>,
|
|
||||||
rpc: Arc<Web3Rpc>,
|
|
||||||
_pending_tx_sender: &Option<broadcast::Sender<TxStatus>>,
|
|
||||||
) -> Web3ProxyResult<()> {
|
|
||||||
// TODO: how should we handle an error here?
|
|
||||||
if !consensus_finder
|
|
||||||
.update_rpc(new_block.clone(), rpc.clone(), self)
|
|
||||||
.await
|
|
||||||
.web3_context("failed to update rpc")?
|
|
||||||
{
|
|
||||||
// nothing changed. no need to scan for a new consensus head
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
let new_consensus_rpcs = match consensus_finder
|
|
||||||
.find_consensus_connections(authorization, self)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
Err(err) => {
|
|
||||||
return Err(err).web3_context("error while finding consensus head block!");
|
|
||||||
}
|
|
||||||
Ok(None) => {
|
|
||||||
return Err(Web3ProxyError::NoConsensusHeadBlock);
|
|
||||||
}
|
|
||||||
Ok(Some(x)) => x,
|
|
||||||
};
|
|
||||||
|
|
||||||
trace!("new_synced_connections: {:#?}", new_consensus_rpcs);
|
|
||||||
|
|
||||||
let watch_consensus_head_sender = self.watch_consensus_head_sender.as_ref().unwrap();
|
|
||||||
let consensus_tier = new_consensus_rpcs.tier;
|
|
||||||
// TODO: think more about the default for total_tiers
|
|
||||||
let total_tiers = consensus_finder.worst_tier().unwrap_or_default();
|
|
||||||
let backups_needed = new_consensus_rpcs.backups_needed;
|
|
||||||
let consensus_head_block = new_consensus_rpcs.head_block.clone();
|
|
||||||
let num_consensus_rpcs = new_consensus_rpcs.num_consensus_rpcs();
|
|
||||||
let num_active_rpcs = consensus_finder.len();
|
|
||||||
let total_rpcs = self.len();
|
|
||||||
|
|
||||||
let new_consensus_rpcs = Arc::new(new_consensus_rpcs);
|
|
||||||
|
|
||||||
let old_consensus_head_connections = self
|
|
||||||
.watch_consensus_rpcs_sender
|
|
||||||
.send_replace(Some(new_consensus_rpcs.clone()));
|
|
||||||
|
|
||||||
let backups_voted_str = if backups_needed { "B " } else { "" };
|
|
||||||
|
|
||||||
match old_consensus_head_connections.as_ref() {
|
|
||||||
None => {
|
|
||||||
debug!(
|
|
||||||
"first {}/{} {}{}/{}/{} block={}, rpc={}",
|
|
||||||
consensus_tier,
|
|
||||||
total_tiers,
|
|
||||||
backups_voted_str,
|
|
||||||
num_consensus_rpcs,
|
|
||||||
num_active_rpcs,
|
|
||||||
total_rpcs,
|
|
||||||
consensus_head_block,
|
|
||||||
rpc,
|
|
||||||
);
|
|
||||||
|
|
||||||
if backups_needed {
|
|
||||||
// TODO: what else should be in this error?
|
|
||||||
warn!("Backup RPCs are in use!");
|
|
||||||
}
|
|
||||||
|
|
||||||
// this should already be cached
|
|
||||||
let consensus_head_block = self.try_cache_block(consensus_head_block, true).await?;
|
|
||||||
|
|
||||||
watch_consensus_head_sender
|
|
||||||
.send(Some(consensus_head_block))
|
|
||||||
.or(Err(Web3ProxyError::WatchSendError))
|
|
||||||
.web3_context(
|
|
||||||
"watch_consensus_head_sender failed sending first consensus_head_block",
|
|
||||||
)?;
|
|
||||||
}
|
|
||||||
Some(old_consensus_connections) => {
|
|
||||||
let old_head_block = &old_consensus_connections.head_block;
|
|
||||||
|
|
||||||
// TODO: do this log item better
|
|
||||||
let rpc_head_str = new_block
|
|
||||||
.map(|x| x.to_string())
|
|
||||||
.unwrap_or_else(|| "None".to_string());
|
|
||||||
|
|
||||||
match consensus_head_block.number().cmp(old_head_block.number()) {
|
|
||||||
Ordering::Equal => {
|
|
||||||
// multiple blocks with the same fork!
|
|
||||||
if consensus_head_block.hash() == old_head_block.hash() {
|
|
||||||
// no change in hash. no need to use watch_consensus_head_sender
|
|
||||||
// TODO: trace level if rpc is backup
|
|
||||||
debug!(
|
|
||||||
"con {}/{} {}{}/{}/{} con={} rpc={}@{}",
|
|
||||||
consensus_tier,
|
|
||||||
total_tiers,
|
|
||||||
backups_voted_str,
|
|
||||||
num_consensus_rpcs,
|
|
||||||
num_active_rpcs,
|
|
||||||
total_rpcs,
|
|
||||||
consensus_head_block,
|
|
||||||
rpc,
|
|
||||||
rpc_head_str,
|
|
||||||
)
|
)
|
||||||
} else {
|
|
||||||
// hash changed
|
|
||||||
|
|
||||||
debug!(
|
|
||||||
"unc {}/{} {}{}/{}/{} con={} old={} rpc={}@{}",
|
|
||||||
consensus_tier,
|
|
||||||
total_tiers,
|
|
||||||
backups_voted_str,
|
|
||||||
num_consensus_rpcs,
|
|
||||||
num_active_rpcs,
|
|
||||||
total_rpcs,
|
|
||||||
consensus_head_block,
|
|
||||||
old_head_block,
|
|
||||||
rpc,
|
|
||||||
rpc_head_str,
|
|
||||||
);
|
|
||||||
|
|
||||||
let consensus_head_block = self
|
|
||||||
.try_cache_block(consensus_head_block, true)
|
|
||||||
.await
|
.await
|
||||||
.web3_context("save consensus_head_block as heaviest chain")?;
|
{
|
||||||
|
Ok(Ok(_)) => {}
|
||||||
watch_consensus_head_sender
|
Ok(Err(err)) => {
|
||||||
.send(Some(consensus_head_block))
|
error!("error while refreshing consensus finder: {:#?}", err);
|
||||||
.or(Err(Web3ProxyError::WatchSendError))
|
}
|
||||||
.web3_context("watch_consensus_head_sender failed sending uncled consensus_head_block")?;
|
Err(timeout) => {
|
||||||
|
error!("timeout while refreshing consensus finder: {:#?}", timeout);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
Ordering::Less => {
|
|
||||||
// this is unlikely but possible
|
|
||||||
// TODO: better log that includes all the votes
|
|
||||||
warn!(
|
|
||||||
"chain rolled back {}/{} {}{}/{}/{} con={} old={} rpc={}@{}",
|
|
||||||
consensus_tier,
|
|
||||||
total_tiers,
|
|
||||||
backups_voted_str,
|
|
||||||
num_consensus_rpcs,
|
|
||||||
num_active_rpcs,
|
|
||||||
total_rpcs,
|
|
||||||
consensus_head_block,
|
|
||||||
old_head_block,
|
|
||||||
rpc,
|
|
||||||
rpc_head_str,
|
|
||||||
);
|
|
||||||
|
|
||||||
if backups_needed {
|
|
||||||
// TODO: what else should be in this error?
|
|
||||||
warn!("Backup RPCs are in use!");
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: tell save_block to remove any higher block numbers from the cache. not needed because we have other checks on requested blocks being > head, but still seems like a good idea
|
|
||||||
let consensus_head_block = self
|
|
||||||
.try_cache_block(consensus_head_block, true)
|
|
||||||
.await
|
|
||||||
.web3_context(
|
|
||||||
"save_block sending consensus_head_block as heaviest chain",
|
|
||||||
)?;
|
|
||||||
|
|
||||||
watch_consensus_head_sender
|
|
||||||
.send(Some(consensus_head_block))
|
|
||||||
.or(Err(Web3ProxyError::WatchSendError))
|
|
||||||
.web3_context("watch_consensus_head_sender failed sending rollback consensus_head_block")?;
|
|
||||||
}
|
|
||||||
Ordering::Greater => {
|
|
||||||
debug!(
|
|
||||||
"new {}/{} {}{}/{}/{} con={} rpc={}@{}",
|
|
||||||
consensus_tier,
|
|
||||||
total_tiers,
|
|
||||||
backups_voted_str,
|
|
||||||
num_consensus_rpcs,
|
|
||||||
num_active_rpcs,
|
|
||||||
total_rpcs,
|
|
||||||
consensus_head_block,
|
|
||||||
rpc,
|
|
||||||
rpc_head_str,
|
|
||||||
);
|
|
||||||
|
|
||||||
if backups_needed {
|
|
||||||
// TODO: what else should be in this error?
|
|
||||||
warn!("Backup RPCs are in use!");
|
|
||||||
}
|
|
||||||
|
|
||||||
let consensus_head_block =
|
|
||||||
self.try_cache_block(consensus_head_block, true).await?;
|
|
||||||
|
|
||||||
watch_consensus_head_sender.send(Some(consensus_head_block))
|
|
||||||
.or(Err(Web3ProxyError::WatchSendError))
|
|
||||||
.web3_context("watch_consensus_head_sender failed sending new consensus_head_block")?;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,7 +1,8 @@
|
|||||||
use super::blockchain::Web3ProxyBlock;
|
use super::blockchain::Web3ProxyBlock;
|
||||||
use super::many::Web3Rpcs;
|
use super::many::Web3Rpcs;
|
||||||
use super::one::Web3Rpc;
|
use super::one::Web3Rpc;
|
||||||
use crate::errors::{Web3ProxyErrorContext, Web3ProxyResult};
|
use super::transactions::TxStatus;
|
||||||
|
use crate::errors::{Web3ProxyError, Web3ProxyErrorContext, Web3ProxyResult};
|
||||||
use crate::frontend::authorization::Authorization;
|
use crate::frontend::authorization::Authorization;
|
||||||
use base64::engine::general_purpose;
|
use base64::engine::general_purpose;
|
||||||
use derive_more::Constructor;
|
use derive_more::Constructor;
|
||||||
@ -10,7 +11,7 @@ use hashbrown::{HashMap, HashSet};
|
|||||||
use hdrhistogram::serialization::{Serializer, V2DeflateSerializer};
|
use hdrhistogram::serialization::{Serializer, V2DeflateSerializer};
|
||||||
use hdrhistogram::Histogram;
|
use hdrhistogram::Histogram;
|
||||||
use itertools::{Itertools, MinMaxResult};
|
use itertools::{Itertools, MinMaxResult};
|
||||||
use log::{log_enabled, trace, warn, Level};
|
use log::{debug, log_enabled, trace, warn, Level};
|
||||||
use moka::future::Cache;
|
use moka::future::Cache;
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
use std::cmp::{Ordering, Reverse};
|
use std::cmp::{Ordering, Reverse};
|
||||||
@ -18,6 +19,7 @@ use std::collections::BTreeMap;
|
|||||||
use std::fmt;
|
use std::fmt;
|
||||||
use std::sync::{atomic, Arc};
|
use std::sync::{atomic, Arc};
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
use tokio::sync::broadcast;
|
||||||
use tokio::time::Instant;
|
use tokio::time::Instant;
|
||||||
|
|
||||||
#[derive(Clone, Serialize)]
|
#[derive(Clone, Serialize)]
|
||||||
@ -369,6 +371,229 @@ impl ConsensusFinder {
|
|||||||
self.rpc_heads.is_empty()
|
self.rpc_heads.is_empty()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// `connection_heads` is a mapping of rpc_names to head block hashes.
|
||||||
|
/// self.blockchain_map is a mapping of hashes to the complete ArcBlock.
|
||||||
|
/// TODO: return something?
|
||||||
|
/// TODO: move this onto ConsensusFinder
|
||||||
|
pub(super) async fn refresh(
|
||||||
|
&mut self,
|
||||||
|
web3_rpcs: &Web3Rpcs,
|
||||||
|
authorization: &Arc<Authorization>,
|
||||||
|
rpc: Option<&Arc<Web3Rpc>>,
|
||||||
|
new_block: Option<Web3ProxyBlock>,
|
||||||
|
) -> Web3ProxyResult<()> {
|
||||||
|
let new_consensus_rpcs = match self
|
||||||
|
.find_consensus_connections(authorization, web3_rpcs)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Err(err) => {
|
||||||
|
return Err(err).web3_context("error while finding consensus head block!");
|
||||||
|
}
|
||||||
|
Ok(None) => {
|
||||||
|
return Err(Web3ProxyError::NoConsensusHeadBlock);
|
||||||
|
}
|
||||||
|
Ok(Some(x)) => x,
|
||||||
|
};
|
||||||
|
|
||||||
|
trace!("new_synced_connections: {:#?}", new_consensus_rpcs);
|
||||||
|
|
||||||
|
let watch_consensus_head_sender = web3_rpcs.watch_consensus_head_sender.as_ref().unwrap();
|
||||||
|
let consensus_tier = new_consensus_rpcs.tier;
|
||||||
|
// TODO: think more about the default for total_tiers
|
||||||
|
let total_tiers = self.worst_tier().unwrap_or_default();
|
||||||
|
let backups_needed = new_consensus_rpcs.backups_needed;
|
||||||
|
let consensus_head_block = new_consensus_rpcs.head_block.clone();
|
||||||
|
let num_consensus_rpcs = new_consensus_rpcs.num_consensus_rpcs();
|
||||||
|
let num_active_rpcs = self.len();
|
||||||
|
let total_rpcs = web3_rpcs.len();
|
||||||
|
|
||||||
|
let new_consensus_rpcs = Arc::new(new_consensus_rpcs);
|
||||||
|
|
||||||
|
let old_consensus_head_connections = web3_rpcs
|
||||||
|
.watch_consensus_rpcs_sender
|
||||||
|
.send_replace(Some(new_consensus_rpcs.clone()));
|
||||||
|
|
||||||
|
let backups_voted_str = if backups_needed { "B " } else { "" };
|
||||||
|
|
||||||
|
let rpc_head_str = if let Some(rpc) = rpc.as_ref() {
|
||||||
|
format!(
|
||||||
|
"{}@{}",
|
||||||
|
rpc,
|
||||||
|
new_block
|
||||||
|
.map(|x| x.to_string())
|
||||||
|
.unwrap_or_else(|| "None".to_string()),
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
"None".to_string()
|
||||||
|
};
|
||||||
|
|
||||||
|
match old_consensus_head_connections.as_ref() {
|
||||||
|
None => {
|
||||||
|
debug!(
|
||||||
|
"first {}/{} {}{}/{}/{} block={}, rpc={}",
|
||||||
|
consensus_tier,
|
||||||
|
total_tiers,
|
||||||
|
backups_voted_str,
|
||||||
|
num_consensus_rpcs,
|
||||||
|
num_active_rpcs,
|
||||||
|
total_rpcs,
|
||||||
|
consensus_head_block,
|
||||||
|
rpc_head_str,
|
||||||
|
);
|
||||||
|
|
||||||
|
if backups_needed {
|
||||||
|
// TODO: what else should be in this error?
|
||||||
|
warn!("Backup RPCs are in use!");
|
||||||
|
}
|
||||||
|
|
||||||
|
// this should already be cached
|
||||||
|
let consensus_head_block = web3_rpcs
|
||||||
|
.try_cache_block(consensus_head_block, true)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
watch_consensus_head_sender
|
||||||
|
.send(Some(consensus_head_block))
|
||||||
|
.or(Err(Web3ProxyError::WatchSendError))
|
||||||
|
.web3_context(
|
||||||
|
"watch_consensus_head_sender failed sending first consensus_head_block",
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
Some(old_consensus_connections) => {
|
||||||
|
let old_head_block = &old_consensus_connections.head_block;
|
||||||
|
|
||||||
|
match consensus_head_block.number().cmp(old_head_block.number()) {
|
||||||
|
Ordering::Equal => {
|
||||||
|
// multiple blocks with the same fork!
|
||||||
|
if consensus_head_block.hash() == old_head_block.hash() {
|
||||||
|
// no change in hash. no need to use watch_consensus_head_sender
|
||||||
|
// TODO: trace level if rpc is backup
|
||||||
|
debug!(
|
||||||
|
"con {}/{} {}{}/{}/{} con={} rpc={}",
|
||||||
|
consensus_tier,
|
||||||
|
total_tiers,
|
||||||
|
backups_voted_str,
|
||||||
|
num_consensus_rpcs,
|
||||||
|
num_active_rpcs,
|
||||||
|
total_rpcs,
|
||||||
|
consensus_head_block,
|
||||||
|
rpc_head_str,
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
// hash changed
|
||||||
|
|
||||||
|
debug!(
|
||||||
|
"unc {}/{} {}{}/{}/{} con={} old={} rpc={}",
|
||||||
|
consensus_tier,
|
||||||
|
total_tiers,
|
||||||
|
backups_voted_str,
|
||||||
|
num_consensus_rpcs,
|
||||||
|
num_active_rpcs,
|
||||||
|
total_rpcs,
|
||||||
|
consensus_head_block,
|
||||||
|
old_head_block,
|
||||||
|
rpc_head_str,
|
||||||
|
);
|
||||||
|
|
||||||
|
let consensus_head_block = web3_rpcs
|
||||||
|
.try_cache_block(consensus_head_block, true)
|
||||||
|
.await
|
||||||
|
.web3_context("save consensus_head_block as heaviest chain")?;
|
||||||
|
|
||||||
|
watch_consensus_head_sender
|
||||||
|
.send(Some(consensus_head_block))
|
||||||
|
.or(Err(Web3ProxyError::WatchSendError))
|
||||||
|
.web3_context("watch_consensus_head_sender failed sending uncled consensus_head_block")?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ordering::Less => {
|
||||||
|
// this is unlikely but possible
|
||||||
|
// TODO: better log that includes all the votes
|
||||||
|
warn!(
|
||||||
|
"chain rolled back {}/{} {}{}/{}/{} con={} old={} rpc={}",
|
||||||
|
consensus_tier,
|
||||||
|
total_tiers,
|
||||||
|
backups_voted_str,
|
||||||
|
num_consensus_rpcs,
|
||||||
|
num_active_rpcs,
|
||||||
|
total_rpcs,
|
||||||
|
consensus_head_block,
|
||||||
|
old_head_block,
|
||||||
|
rpc_head_str,
|
||||||
|
);
|
||||||
|
|
||||||
|
if backups_needed {
|
||||||
|
// TODO: what else should be in this error?
|
||||||
|
warn!("Backup RPCs are in use!");
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: tell save_block to remove any higher block numbers from the cache. not needed because we have other checks on requested blocks being > head, but still seems like a good idea
|
||||||
|
let consensus_head_block = web3_rpcs
|
||||||
|
.try_cache_block(consensus_head_block, true)
|
||||||
|
.await
|
||||||
|
.web3_context(
|
||||||
|
"save_block sending consensus_head_block as heaviest chain",
|
||||||
|
)?;
|
||||||
|
|
||||||
|
watch_consensus_head_sender
|
||||||
|
.send(Some(consensus_head_block))
|
||||||
|
.or(Err(Web3ProxyError::WatchSendError))
|
||||||
|
.web3_context("watch_consensus_head_sender failed sending rollback consensus_head_block")?;
|
||||||
|
}
|
||||||
|
Ordering::Greater => {
|
||||||
|
debug!(
|
||||||
|
"new {}/{} {}{}/{}/{} con={} rpc={}",
|
||||||
|
consensus_tier,
|
||||||
|
total_tiers,
|
||||||
|
backups_voted_str,
|
||||||
|
num_consensus_rpcs,
|
||||||
|
num_active_rpcs,
|
||||||
|
total_rpcs,
|
||||||
|
consensus_head_block,
|
||||||
|
rpc_head_str,
|
||||||
|
);
|
||||||
|
|
||||||
|
if backups_needed {
|
||||||
|
// TODO: what else should be in this error?
|
||||||
|
warn!("Backup RPCs are in use!");
|
||||||
|
}
|
||||||
|
|
||||||
|
let consensus_head_block = web3_rpcs
|
||||||
|
.try_cache_block(consensus_head_block, true)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
watch_consensus_head_sender.send(Some(consensus_head_block))
|
||||||
|
.or(Err(Web3ProxyError::WatchSendError))
|
||||||
|
.web3_context("watch_consensus_head_sender failed sending new consensus_head_block")?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) async fn process_block_from_rpc(
|
||||||
|
&mut self,
|
||||||
|
web3_rpcs: &Web3Rpcs,
|
||||||
|
authorization: &Arc<Authorization>,
|
||||||
|
new_block: Option<Web3ProxyBlock>,
|
||||||
|
rpc: Arc<Web3Rpc>,
|
||||||
|
_pending_tx_sender: &Option<broadcast::Sender<TxStatus>>,
|
||||||
|
) -> Web3ProxyResult<()> {
|
||||||
|
// TODO: how should we handle an error here?
|
||||||
|
if !self
|
||||||
|
.update_rpc(new_block.clone(), rpc.clone(), web3_rpcs)
|
||||||
|
.await
|
||||||
|
.web3_context("failed to update rpc")?
|
||||||
|
{
|
||||||
|
// nothing changed. no need to scan for a new consensus head
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
self.refresh(web3_rpcs, authorization, Some(&rpc), new_block)
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
fn remove(&mut self, rpc: &Arc<Web3Rpc>) -> Option<Web3ProxyBlock> {
|
fn remove(&mut self, rpc: &Arc<Web3Rpc>) -> Option<Web3ProxyBlock> {
|
||||||
self.rpc_heads.remove(rpc)
|
self.rpc_heads.remove(rpc)
|
||||||
}
|
}
|
||||||
@ -590,14 +815,17 @@ impl ConsensusFinder {
|
|||||||
backup_entry.0.insert(rpc);
|
backup_entry.0.insert(rpc);
|
||||||
backup_entry.1 += rpc.soft_limit;
|
backup_entry.1 += rpc.soft_limit;
|
||||||
|
|
||||||
|
// we used to specify rpc on this, but it shouldn't be necessary
|
||||||
|
let parent_hash = block_to_check.parent_hash();
|
||||||
match web3_rpcs
|
match web3_rpcs
|
||||||
.block(authorization, block_to_check.parent_hash(), Some(rpc))
|
.block(authorization, parent_hash, None, None)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
Ok(parent_block) => block_to_check = parent_block,
|
Ok(parent_block) => block_to_check = parent_block,
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
warn!(
|
debug!(
|
||||||
"Problem fetching parent block of {:?} during consensus finding: {:#?}",
|
"Problem fetching {:?} (parent of {:?}) during consensus finding: {:#?}",
|
||||||
|
parent_hash,
|
||||||
block_to_check.hash(),
|
block_to_check.hash(),
|
||||||
err
|
err
|
||||||
);
|
);
|
||||||
|
@ -40,6 +40,7 @@ use tokio::time::{sleep, sleep_until, Duration, Instant};
|
|||||||
pub struct Web3Rpcs {
|
pub struct Web3Rpcs {
|
||||||
/// TODO: this should be a Cow
|
/// TODO: this should be a Cow
|
||||||
pub(crate) name: String,
|
pub(crate) name: String,
|
||||||
|
pub(crate) chain_id: u64,
|
||||||
/// if watch_consensus_head_sender is some, Web3Rpc inside self will send blocks here when they get them
|
/// if watch_consensus_head_sender is some, Web3Rpc inside self will send blocks here when they get them
|
||||||
pub(crate) block_sender: flume::Sender<(Option<Web3ProxyBlock>, Arc<Web3Rpc>)>,
|
pub(crate) block_sender: flume::Sender<(Option<Web3ProxyBlock>, Arc<Web3Rpc>)>,
|
||||||
/// any requests will be forwarded to one (or more) of these connections
|
/// any requests will be forwarded to one (or more) of these connections
|
||||||
@ -126,6 +127,7 @@ impl Web3Rpcs {
|
|||||||
blocks_by_hash,
|
blocks_by_hash,
|
||||||
blocks_by_number,
|
blocks_by_number,
|
||||||
by_name,
|
by_name,
|
||||||
|
chain_id,
|
||||||
max_head_block_age,
|
max_head_block_age,
|
||||||
max_head_block_lag,
|
max_head_block_lag,
|
||||||
min_synced_rpcs: min_head_rpcs,
|
min_synced_rpcs: min_head_rpcs,
|
||||||
@ -515,6 +517,8 @@ impl Web3Rpcs {
|
|||||||
max_wait: Option<Duration>,
|
max_wait: Option<Duration>,
|
||||||
error_handler: Option<RequestErrorHandler>,
|
error_handler: Option<RequestErrorHandler>,
|
||||||
) -> Web3ProxyResult<OpenRequestResult> {
|
) -> Web3ProxyResult<OpenRequestResult> {
|
||||||
|
let start = Instant::now();
|
||||||
|
|
||||||
let mut earliest_retry_at: Option<Instant> = None;
|
let mut earliest_retry_at: Option<Instant> = None;
|
||||||
|
|
||||||
// TODO: pass db_conn to the "default" authorization for revert logging
|
// TODO: pass db_conn to the "default" authorization for revert logging
|
||||||
@ -561,9 +565,6 @@ impl Web3Rpcs {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
let stop_trying_at =
|
|
||||||
Instant::now() + max_wait.unwrap_or_else(|| Duration::from_secs(10));
|
|
||||||
|
|
||||||
let mut watch_consensus_rpcs = self.watch_consensus_rpcs_sender.subscribe();
|
let mut watch_consensus_rpcs = self.watch_consensus_rpcs_sender.subscribe();
|
||||||
|
|
||||||
let mut potential_rpcs = Vec::with_capacity(self.len());
|
let mut potential_rpcs = Vec::with_capacity(self.len());
|
||||||
@ -703,23 +704,31 @@ impl Web3Rpcs {
|
|||||||
|
|
||||||
let waiting_for = min_block_needed.max(max_block_needed);
|
let waiting_for = min_block_needed.max(max_block_needed);
|
||||||
|
|
||||||
|
if let Some(max_wait) = max_wait {
|
||||||
match consensus_rpcs.should_wait_for_block(waiting_for, skip_rpcs) {
|
match consensus_rpcs.should_wait_for_block(waiting_for, skip_rpcs) {
|
||||||
ShouldWaitForBlock::NeverReady => break,
|
ShouldWaitForBlock::NeverReady => break,
|
||||||
ShouldWaitForBlock::Ready => {
|
ShouldWaitForBlock::Ready => {
|
||||||
if Instant::now() > stop_trying_at {
|
if start.elapsed() > max_wait {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
ShouldWaitForBlock::Wait { .. } => select! {
|
ShouldWaitForBlock::Wait { .. } => select! {
|
||||||
_ = watch_consensus_rpcs.changed() => {},
|
_ = watch_consensus_rpcs.changed() => {
|
||||||
_ = sleep_until(stop_trying_at) => break,
|
// no need to borrow_and_update because we do that at the top of the loop
|
||||||
|
},
|
||||||
|
_ = sleep_until(start + max_wait) => break,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
select! {
|
|
||||||
_ = watch_consensus_rpcs.changed() => {},
|
|
||||||
_ = sleep_until(stop_trying_at) => break,
|
|
||||||
}
|
}
|
||||||
|
} else if let Some(max_wait) = max_wait {
|
||||||
|
select! {
|
||||||
|
_ = watch_consensus_rpcs.changed() => {
|
||||||
|
// no need to borrow_and_update because we do that at the top of the loop
|
||||||
|
},
|
||||||
|
_ = sleep_until(start + max_wait) => break,
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -769,6 +778,11 @@ impl Web3Rpcs {
|
|||||||
|
|
||||||
trace!("max_count: {}", max_count);
|
trace!("max_count: {}", max_count);
|
||||||
|
|
||||||
|
if max_count == 0 {
|
||||||
|
// TODO: return a future that resolves when we know a head block?
|
||||||
|
return Err(None);
|
||||||
|
}
|
||||||
|
|
||||||
let mut selected_rpcs = Vec::with_capacity(max_count);
|
let mut selected_rpcs = Vec::with_capacity(max_count);
|
||||||
|
|
||||||
let mut tried = HashSet::new();
|
let mut tried = HashSet::new();
|
||||||
@ -779,6 +793,7 @@ impl Web3Rpcs {
|
|||||||
if let Some(synced_rpcs) = synced_rpcs.as_ref() {
|
if let Some(synced_rpcs) = synced_rpcs.as_ref() {
|
||||||
synced_rpcs.head_rpcs.clone()
|
synced_rpcs.head_rpcs.clone()
|
||||||
} else {
|
} else {
|
||||||
|
// TODO: make this an Option instead of making an empty vec?
|
||||||
vec![]
|
vec![]
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -800,10 +815,6 @@ impl Web3Rpcs {
|
|||||||
.unwrap_or_default();
|
.unwrap_or_default();
|
||||||
|
|
||||||
for rpc in itertools::chain(synced_rpcs, all_rpcs) {
|
for rpc in itertools::chain(synced_rpcs, all_rpcs) {
|
||||||
if max_count == 0 {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
if tried.contains(&rpc) {
|
if tried.contains(&rpc) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
@ -813,10 +824,11 @@ impl Web3Rpcs {
|
|||||||
tried.insert(rpc.clone());
|
tried.insert(rpc.clone());
|
||||||
|
|
||||||
if !allow_backups && rpc.backup {
|
if !allow_backups && rpc.backup {
|
||||||
warn!("{} is a backup. skipping", rpc);
|
trace!("{} is a backup. skipping", rpc);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: this has_block_data check is in a few places now. move it onto the rpc
|
||||||
if let Some(block_needed) = min_block_needed {
|
if let Some(block_needed) = min_block_needed {
|
||||||
if !rpc.has_block_data(block_needed) {
|
if !rpc.has_block_data(block_needed) {
|
||||||
trace!("{} is missing min_block_needed. skipping", rpc);
|
trace!("{} is missing min_block_needed. skipping", rpc);
|
||||||
@ -840,8 +852,12 @@ impl Web3Rpcs {
|
|||||||
}
|
}
|
||||||
Ok(OpenRequestResult::Handle(handle)) => {
|
Ok(OpenRequestResult::Handle(handle)) => {
|
||||||
trace!("{} is available", rpc);
|
trace!("{} is available", rpc);
|
||||||
|
selected_rpcs.push(handle);
|
||||||
|
|
||||||
max_count -= 1;
|
max_count -= 1;
|
||||||
selected_rpcs.push(handle)
|
if max_count == 0 {
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
Ok(OpenRequestResult::NotReady) => {
|
Ok(OpenRequestResult::NotReady) => {
|
||||||
warn!("no request handle for {}", rpc)
|
warn!("no request handle for {}", rpc)
|
||||||
@ -864,9 +880,10 @@ impl Web3Rpcs {
|
|||||||
&self,
|
&self,
|
||||||
method: &str,
|
method: &str,
|
||||||
params: &P,
|
params: &P,
|
||||||
|
max_wait: Option<Duration>,
|
||||||
) -> Web3ProxyResult<R> {
|
) -> Web3ProxyResult<R> {
|
||||||
// TODO: no request_metadata means we won't have stats on this internal request.
|
// TODO: no request_metadata means we won't have stats on this internal request.
|
||||||
self.request_with_metadata(method, params, None, None, None)
|
self.request_with_metadata(method, params, None, max_wait, None, None)
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -876,6 +893,7 @@ impl Web3Rpcs {
|
|||||||
method: &str,
|
method: &str,
|
||||||
params: &P,
|
params: &P,
|
||||||
request_metadata: Option<&Arc<RequestMetadata>>,
|
request_metadata: Option<&Arc<RequestMetadata>>,
|
||||||
|
max_wait: Option<Duration>,
|
||||||
min_block_needed: Option<&U64>,
|
min_block_needed: Option<&U64>,
|
||||||
max_block_needed: Option<&U64>,
|
max_block_needed: Option<&U64>,
|
||||||
) -> Web3ProxyResult<R> {
|
) -> Web3ProxyResult<R> {
|
||||||
@ -886,20 +904,24 @@ impl Web3Rpcs {
|
|||||||
|
|
||||||
let start = Instant::now();
|
let start = Instant::now();
|
||||||
|
|
||||||
// TODO: get from config or arguments
|
// set error_handler to Save. this might be overridden depending on the request_metadata.authorization
|
||||||
let max_wait = Duration::from_secs(10);
|
|
||||||
|
|
||||||
let error_handler = Some(RequestErrorHandler::Save);
|
let error_handler = Some(RequestErrorHandler::Save);
|
||||||
|
|
||||||
// TODO: the loop here feels somewhat redundant with the loop in best_available_rpc
|
// TODO: the loop here feels somewhat redundant with the loop in best_available_rpc
|
||||||
while start.elapsed() < max_wait {
|
loop {
|
||||||
|
if let Some(max_wait) = max_wait {
|
||||||
|
if start.elapsed() > max_wait {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
match self
|
match self
|
||||||
.wait_for_best_rpc(
|
.wait_for_best_rpc(
|
||||||
request_metadata,
|
request_metadata,
|
||||||
&mut skip_rpcs,
|
&mut skip_rpcs,
|
||||||
min_block_needed,
|
min_block_needed,
|
||||||
max_block_needed,
|
max_block_needed,
|
||||||
None,
|
max_wait,
|
||||||
error_handler,
|
error_handler,
|
||||||
)
|
)
|
||||||
.await?
|
.await?
|
||||||
@ -1135,18 +1157,22 @@ impl Web3Rpcs {
|
|||||||
request_metadata: Option<&Arc<RequestMetadata>>,
|
request_metadata: Option<&Arc<RequestMetadata>>,
|
||||||
min_block_needed: Option<&U64>,
|
min_block_needed: Option<&U64>,
|
||||||
max_block_needed: Option<&U64>,
|
max_block_needed: Option<&U64>,
|
||||||
|
max_wait: Option<Duration>,
|
||||||
error_level: Option<RequestErrorHandler>,
|
error_level: Option<RequestErrorHandler>,
|
||||||
max_sends: Option<usize>,
|
max_sends: Option<usize>,
|
||||||
include_backups: bool,
|
include_backups: bool,
|
||||||
) -> Web3ProxyResult<Box<RawValue>> {
|
) -> Web3ProxyResult<Box<RawValue>> {
|
||||||
let mut watch_consensus_rpcs = self.watch_consensus_rpcs_sender.subscribe();
|
let mut watch_consensus_rpcs = self.watch_consensus_rpcs_sender.subscribe();
|
||||||
|
|
||||||
// TODO: get from config or function arguments
|
let start = Instant::now();
|
||||||
let max_wait = Duration::from_secs(5);
|
|
||||||
|
|
||||||
let wait_until = Instant::now() + max_wait;
|
loop {
|
||||||
|
if let Some(max_wait) = max_wait {
|
||||||
|
if start.elapsed() > max_wait {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
while Instant::now() < wait_until {
|
|
||||||
match self
|
match self
|
||||||
.all_connections(
|
.all_connections(
|
||||||
request_metadata,
|
request_metadata,
|
||||||
@ -1197,27 +1223,43 @@ impl Web3Rpcs {
|
|||||||
request_metadata.no_servers.fetch_add(1, Ordering::AcqRel);
|
request_metadata.no_servers.fetch_add(1, Ordering::AcqRel);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let max_sleep = if let Some(max_wait) = max_wait {
|
||||||
|
start + max_wait
|
||||||
|
} else {
|
||||||
|
break;
|
||||||
|
};
|
||||||
|
|
||||||
tokio::select! {
|
tokio::select! {
|
||||||
_ = sleep_until(wait_until) => break,
|
_ = sleep_until(max_sleep) => {
|
||||||
|
// rpcs didn't change and we have waited too long. break to return an error
|
||||||
|
warn!("timeout waiting for try_send_all_synced_connections!");
|
||||||
|
break;
|
||||||
|
},
|
||||||
_ = watch_consensus_rpcs.changed() => {
|
_ = watch_consensus_rpcs.changed() => {
|
||||||
|
// consensus rpcs changed!
|
||||||
watch_consensus_rpcs.borrow_and_update();
|
watch_consensus_rpcs.borrow_and_update();
|
||||||
}
|
// continue to try again
|
||||||
}
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
Err(Some(retry_at)) => {
|
Err(Some(retry_at)) => {
|
||||||
if let Some(request_metadata) = &request_metadata {
|
if let Some(request_metadata) = &request_metadata {
|
||||||
request_metadata.no_servers.fetch_add(1, Ordering::AcqRel);
|
request_metadata.no_servers.fetch_add(1, Ordering::AcqRel);
|
||||||
}
|
}
|
||||||
|
|
||||||
if retry_at > wait_until {
|
if let Some(max_wait) = max_wait {
|
||||||
|
if start.elapsed() > max_wait {
|
||||||
warn!("All rate limits exceeded. And sleeping would take too long");
|
warn!("All rate limits exceeded. And sleeping would take too long");
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
warn!("All rate limits exceeded. Sleeping");
|
warn!("All rate limits exceeded. Sleeping");
|
||||||
|
|
||||||
|
// TODO: only make one of these sleep_untils
|
||||||
|
|
||||||
tokio::select! {
|
tokio::select! {
|
||||||
|
_ = sleep_until(start + max_wait) => {break}
|
||||||
_ = sleep_until(retry_at) => {}
|
_ = sleep_until(retry_at) => {}
|
||||||
_ = watch_consensus_rpcs.changed() => {
|
_ = watch_consensus_rpcs.changed() => {
|
||||||
watch_consensus_rpcs.borrow_and_update();
|
watch_consensus_rpcs.borrow_and_update();
|
||||||
@ -1225,6 +1267,10 @@ impl Web3Rpcs {
|
|||||||
}
|
}
|
||||||
|
|
||||||
continue;
|
continue;
|
||||||
|
} else {
|
||||||
|
warn!("All rate limits exceeded.");
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1237,6 +1283,7 @@ impl Web3Rpcs {
|
|||||||
method: &str,
|
method: &str,
|
||||||
params: &P,
|
params: &P,
|
||||||
request_metadata: Option<&Arc<RequestMetadata>>,
|
request_metadata: Option<&Arc<RequestMetadata>>,
|
||||||
|
max_wait: Option<Duration>,
|
||||||
min_block_needed: Option<&U64>,
|
min_block_needed: Option<&U64>,
|
||||||
max_block_needed: Option<&U64>,
|
max_block_needed: Option<&U64>,
|
||||||
) -> Web3ProxyResult<R> {
|
) -> Web3ProxyResult<R> {
|
||||||
@ -1248,6 +1295,7 @@ impl Web3Rpcs {
|
|||||||
method,
|
method,
|
||||||
params,
|
params,
|
||||||
request_metadata,
|
request_metadata,
|
||||||
|
max_wait,
|
||||||
min_block_needed,
|
min_block_needed,
|
||||||
max_block_needed,
|
max_block_needed,
|
||||||
)
|
)
|
||||||
@ -1518,10 +1566,13 @@ mod tests {
|
|||||||
let (watch_consensus_rpcs_sender, _watch_consensus_rpcs_receiver) = watch::channel(None);
|
let (watch_consensus_rpcs_sender, _watch_consensus_rpcs_receiver) = watch::channel(None);
|
||||||
let (watch_consensus_head_sender, _watch_consensus_head_receiver) = watch::channel(None);
|
let (watch_consensus_head_sender, _watch_consensus_head_receiver) = watch::channel(None);
|
||||||
|
|
||||||
|
let chain_id = 1;
|
||||||
|
|
||||||
// TODO: make a Web3Rpcs::new
|
// TODO: make a Web3Rpcs::new
|
||||||
let rpcs = Web3Rpcs {
|
let rpcs = Web3Rpcs {
|
||||||
block_sender: block_sender.clone(),
|
block_sender: block_sender.clone(),
|
||||||
by_name: RwLock::new(rpcs_by_name),
|
by_name: RwLock::new(rpcs_by_name),
|
||||||
|
chain_id,
|
||||||
name: "test".to_string(),
|
name: "test".to_string(),
|
||||||
watch_consensus_head_sender: Some(watch_consensus_head_sender),
|
watch_consensus_head_sender: Some(watch_consensus_head_sender),
|
||||||
watch_consensus_rpcs_sender,
|
watch_consensus_rpcs_sender,
|
||||||
@ -1548,23 +1599,14 @@ mod tests {
|
|||||||
|
|
||||||
let mut consensus_finder = ConsensusFinder::new(None, None);
|
let mut consensus_finder = ConsensusFinder::new(None, None);
|
||||||
|
|
||||||
// process None so that
|
consensus_finder
|
||||||
rpcs.process_block_from_rpc(
|
.process_block_from_rpc(&rpcs, &authorization, None, lagged_rpc.clone(), &None)
|
||||||
&authorization,
|
|
||||||
&mut consensus_finder,
|
|
||||||
None,
|
|
||||||
lagged_rpc.clone(),
|
|
||||||
&None,
|
|
||||||
)
|
|
||||||
.await
|
.await
|
||||||
.expect("its lagged, but it should still be seen as consensus if its the first to report");
|
.expect(
|
||||||
rpcs.process_block_from_rpc(
|
"its lagged, but it should still be seen as consensus if its the first to report",
|
||||||
&authorization,
|
);
|
||||||
&mut consensus_finder,
|
consensus_finder
|
||||||
None,
|
.process_block_from_rpc(&rpcs, &authorization, None, head_rpc.clone(), &None)
|
||||||
head_rpc.clone(),
|
|
||||||
&None,
|
|
||||||
)
|
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
@ -1607,10 +1649,11 @@ mod tests {
|
|||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
// TODO: this is fragile
|
// TODO: calling process_block_from_rpc and send_head_block_result seperate seems very fragile
|
||||||
rpcs.process_block_from_rpc(
|
consensus_finder
|
||||||
|
.process_block_from_rpc(
|
||||||
|
&rpcs,
|
||||||
&authorization,
|
&authorization,
|
||||||
&mut consensus_finder,
|
|
||||||
Some(lagged_block.clone().try_into().unwrap()),
|
Some(lagged_block.clone().try_into().unwrap()),
|
||||||
lagged_rpc.clone(),
|
lagged_rpc.clone(),
|
||||||
&None,
|
&None,
|
||||||
@ -1628,9 +1671,10 @@ mod tests {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
// TODO: this is fragile
|
// TODO: this is fragile
|
||||||
rpcs.process_block_from_rpc(
|
consensus_finder
|
||||||
|
.process_block_from_rpc(
|
||||||
|
&rpcs,
|
||||||
&authorization,
|
&authorization,
|
||||||
&mut consensus_finder,
|
|
||||||
Some(lagged_block.clone().try_into().unwrap()),
|
Some(lagged_block.clone().try_into().unwrap()),
|
||||||
head_rpc.clone(),
|
head_rpc.clone(),
|
||||||
&None,
|
&None,
|
||||||
@ -1660,9 +1704,10 @@ mod tests {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
// TODO: this is fragile
|
// TODO: this is fragile
|
||||||
rpcs.process_block_from_rpc(
|
consensus_finder
|
||||||
|
.process_block_from_rpc(
|
||||||
|
&rpcs,
|
||||||
&authorization,
|
&authorization,
|
||||||
&mut consensus_finder,
|
|
||||||
Some(head_block.clone().try_into().unwrap()),
|
Some(head_block.clone().try_into().unwrap()),
|
||||||
head_rpc.clone(),
|
head_rpc.clone(),
|
||||||
&None,
|
&None,
|
||||||
@ -1799,9 +1844,12 @@ mod tests {
|
|||||||
let (watch_consensus_rpcs_sender, _) = watch::channel(None);
|
let (watch_consensus_rpcs_sender, _) = watch::channel(None);
|
||||||
let (watch_consensus_head_sender, _watch_consensus_head_receiver) = watch::channel(None);
|
let (watch_consensus_head_sender, _watch_consensus_head_receiver) = watch::channel(None);
|
||||||
|
|
||||||
|
let chain_id = 1;
|
||||||
|
|
||||||
let rpcs = Web3Rpcs {
|
let rpcs = Web3Rpcs {
|
||||||
block_sender,
|
block_sender,
|
||||||
by_name: RwLock::new(rpcs_by_name),
|
by_name: RwLock::new(rpcs_by_name),
|
||||||
|
chain_id,
|
||||||
name: "test".to_string(),
|
name: "test".to_string(),
|
||||||
watch_consensus_head_sender: Some(watch_consensus_head_sender),
|
watch_consensus_head_sender: Some(watch_consensus_head_sender),
|
||||||
watch_consensus_rpcs_sender,
|
watch_consensus_rpcs_sender,
|
||||||
@ -1827,9 +1875,10 @@ mod tests {
|
|||||||
let mut connection_heads = ConsensusFinder::new(None, None);
|
let mut connection_heads = ConsensusFinder::new(None, None);
|
||||||
|
|
||||||
// min sum soft limit will require tier 2
|
// min sum soft limit will require tier 2
|
||||||
rpcs.process_block_from_rpc(
|
connection_heads
|
||||||
|
.process_block_from_rpc(
|
||||||
|
&rpcs,
|
||||||
&authorization,
|
&authorization,
|
||||||
&mut connection_heads,
|
|
||||||
Some(head_block.clone()),
|
Some(head_block.clone()),
|
||||||
pruned_rpc.clone(),
|
pruned_rpc.clone(),
|
||||||
&None,
|
&None,
|
||||||
@ -1837,9 +1886,10 @@ mod tests {
|
|||||||
.await
|
.await
|
||||||
.unwrap_err();
|
.unwrap_err();
|
||||||
|
|
||||||
rpcs.process_block_from_rpc(
|
connection_heads
|
||||||
|
.process_block_from_rpc(
|
||||||
|
&rpcs,
|
||||||
&authorization,
|
&authorization,
|
||||||
&mut connection_heads,
|
|
||||||
Some(head_block.clone()),
|
Some(head_block.clone()),
|
||||||
archive_rpc.clone(),
|
archive_rpc.clone(),
|
||||||
&None,
|
&None,
|
||||||
@ -1984,10 +2034,13 @@ mod tests {
|
|||||||
let (watch_consensus_rpcs_sender, _) = watch::channel(None);
|
let (watch_consensus_rpcs_sender, _) = watch::channel(None);
|
||||||
let (watch_consensus_head_sender, _watch_consensus_head_receiver) = watch::channel(None);
|
let (watch_consensus_head_sender, _watch_consensus_head_receiver) = watch::channel(None);
|
||||||
|
|
||||||
|
let chain_id = 1;
|
||||||
|
|
||||||
// TODO: make a Web3Rpcs::new
|
// TODO: make a Web3Rpcs::new
|
||||||
let rpcs = Web3Rpcs {
|
let rpcs = Web3Rpcs {
|
||||||
block_sender,
|
block_sender,
|
||||||
by_name: RwLock::new(rpcs_by_name),
|
by_name: RwLock::new(rpcs_by_name),
|
||||||
|
chain_id,
|
||||||
name: "test".to_string(),
|
name: "test".to_string(),
|
||||||
watch_consensus_head_sender: Some(watch_consensus_head_sender),
|
watch_consensus_head_sender: Some(watch_consensus_head_sender),
|
||||||
watch_consensus_rpcs_sender,
|
watch_consensus_rpcs_sender,
|
||||||
@ -2004,11 +2057,12 @@ mod tests {
|
|||||||
|
|
||||||
let authorization = Arc::new(Authorization::internal(None).unwrap());
|
let authorization = Arc::new(Authorization::internal(None).unwrap());
|
||||||
|
|
||||||
let mut connection_heads = ConsensusFinder::new(None, None);
|
let mut consensus_finder = ConsensusFinder::new(None, None);
|
||||||
|
|
||||||
rpcs.process_block_from_rpc(
|
consensus_finder
|
||||||
|
.process_block_from_rpc(
|
||||||
|
&rpcs,
|
||||||
&authorization,
|
&authorization,
|
||||||
&mut connection_heads,
|
|
||||||
Some(block_1.clone()),
|
Some(block_1.clone()),
|
||||||
mock_geth.clone(),
|
mock_geth.clone(),
|
||||||
&None,
|
&None,
|
||||||
@ -2016,9 +2070,10 @@ mod tests {
|
|||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
rpcs.process_block_from_rpc(
|
consensus_finder
|
||||||
|
.process_block_from_rpc(
|
||||||
|
&rpcs,
|
||||||
&authorization,
|
&authorization,
|
||||||
&mut connection_heads,
|
|
||||||
Some(block_2.clone()),
|
Some(block_2.clone()),
|
||||||
mock_erigon_archive.clone(),
|
mock_erigon_archive.clone(),
|
||||||
&None,
|
&None,
|
||||||
|
@ -29,7 +29,7 @@ use std::hash::{Hash, Hasher};
|
|||||||
use std::sync::atomic::{self, AtomicU32, AtomicU64, AtomicUsize};
|
use std::sync::atomic::{self, AtomicU32, AtomicU64, AtomicUsize};
|
||||||
use std::{cmp::Ordering, sync::Arc};
|
use std::{cmp::Ordering, sync::Arc};
|
||||||
use tokio::sync::watch;
|
use tokio::sync::watch;
|
||||||
use tokio::time::{interval, sleep, sleep_until, timeout, Duration, Instant, MissedTickBehavior};
|
use tokio::time::{interval, sleep, sleep_until, Duration, Instant, MissedTickBehavior};
|
||||||
use url::Url;
|
use url::Url;
|
||||||
|
|
||||||
/// An active connection to a Web3 RPC server like geth or erigon.
|
/// An active connection to a Web3 RPC server like geth or erigon.
|
||||||
@ -318,17 +318,16 @@ impl Web3Rpc {
|
|||||||
// TODO: binary search between 90k and max?
|
// TODO: binary search between 90k and max?
|
||||||
// TODO: start at 0 or 1?
|
// TODO: start at 0 or 1?
|
||||||
for block_data_limit in [0, 32, 64, 128, 256, 512, 1024, 90_000, u64::MAX] {
|
for block_data_limit in [0, 32, 64, 128, 256, 512, 1024, 90_000, u64::MAX] {
|
||||||
let head_block_num_future = self.internal_request::<_, U256>(
|
let head_block_num = self
|
||||||
|
.internal_request::<_, U256>(
|
||||||
"eth_blockNumber",
|
"eth_blockNumber",
|
||||||
&(),
|
&(),
|
||||||
// error here are expected, so keep the level low
|
// error here are expected, so keep the level low
|
||||||
Some(Level::Debug.into()),
|
Some(Level::Debug.into()),
|
||||||
);
|
Some(Duration::from_secs(5)),
|
||||||
|
)
|
||||||
let head_block_num = timeout(Duration::from_secs(5), head_block_num_future)
|
|
||||||
.await
|
.await
|
||||||
.context("timeout fetching eth_blockNumber")?
|
.context("head_block_num error during check_block_data_limit")?;
|
||||||
.context("provider error")?;
|
|
||||||
|
|
||||||
let maybe_archive_block = head_block_num.saturating_sub((block_data_limit).into());
|
let maybe_archive_block = head_block_num.saturating_sub((block_data_limit).into());
|
||||||
|
|
||||||
@ -349,6 +348,7 @@ impl Web3Rpc {
|
|||||||
)),
|
)),
|
||||||
// error here are expected, so keep the level low
|
// error here are expected, so keep the level low
|
||||||
Some(Level::Trace.into()),
|
Some(Level::Trace.into()),
|
||||||
|
Some(Duration::from_secs(5)),
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
@ -434,7 +434,12 @@ impl Web3Rpc {
|
|||||||
// TODO: what should the timeout be? should there be a request timeout?
|
// TODO: what should the timeout be? should there be a request timeout?
|
||||||
// trace!("waiting on chain id for {}", self);
|
// trace!("waiting on chain id for {}", self);
|
||||||
let found_chain_id: U64 = self
|
let found_chain_id: U64 = self
|
||||||
.internal_request("eth_chainId", &(), Some(Level::Trace.into()))
|
.internal_request(
|
||||||
|
"eth_chainId",
|
||||||
|
&(),
|
||||||
|
Some(Level::Trace.into()),
|
||||||
|
Some(Duration::from_secs(5)),
|
||||||
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
trace!("found_chain_id: {:#?}", found_chain_id);
|
trace!("found_chain_id: {:#?}", found_chain_id);
|
||||||
@ -556,6 +561,7 @@ impl Web3Rpc {
|
|||||||
"eth_getTransactionByHash",
|
"eth_getTransactionByHash",
|
||||||
&(txid,),
|
&(txid,),
|
||||||
error_handler,
|
error_handler,
|
||||||
|
Some(Duration::from_secs(5)),
|
||||||
)
|
)
|
||||||
.await?
|
.await?
|
||||||
.context("no transaction")?;
|
.context("no transaction")?;
|
||||||
@ -577,6 +583,7 @@ impl Web3Rpc {
|
|||||||
"eth_getCode",
|
"eth_getCode",
|
||||||
&(to, block_number),
|
&(to, block_number),
|
||||||
error_handler,
|
error_handler,
|
||||||
|
Some(Duration::from_secs(5)),
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
} else {
|
} else {
|
||||||
@ -821,6 +828,7 @@ impl Web3Rpc {
|
|||||||
&("latest", false),
|
&("latest", false),
|
||||||
&authorization,
|
&authorization,
|
||||||
Some(Level::Warn.into()),
|
Some(Level::Warn.into()),
|
||||||
|
Some(Duration::from_secs(5)),
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
@ -839,7 +847,7 @@ impl Web3Rpc {
|
|||||||
.await?;
|
.await?;
|
||||||
}
|
}
|
||||||
} else if self.http_provider.is_some() {
|
} else if self.http_provider.is_some() {
|
||||||
// there is a "watch_blocks" function, but a lot of public nodes do not support the necessary rpc endpoints
|
// there is a "watch_blocks" function, but a lot of public nodes (including llamanodes) do not support the necessary rpc endpoints
|
||||||
// TODO: is 1/2 the block time okay?
|
// TODO: is 1/2 the block time okay?
|
||||||
let mut i = interval(self.block_interval / 2);
|
let mut i = interval(self.block_interval / 2);
|
||||||
i.set_missed_tick_behavior(MissedTickBehavior::Delay);
|
i.set_missed_tick_behavior(MissedTickBehavior::Delay);
|
||||||
@ -856,6 +864,7 @@ impl Web3Rpc {
|
|||||||
&("latest", false),
|
&("latest", false),
|
||||||
&authorization,
|
&authorization,
|
||||||
Some(Level::Warn.into()),
|
Some(Level::Warn.into()),
|
||||||
|
Some(Duration::from_secs(5)),
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
@ -1057,10 +1066,11 @@ impl Web3Rpc {
|
|||||||
method: &str,
|
method: &str,
|
||||||
params: &P,
|
params: &P,
|
||||||
error_handler: Option<RequestErrorHandler>,
|
error_handler: Option<RequestErrorHandler>,
|
||||||
|
max_wait: Option<Duration>,
|
||||||
) -> Web3ProxyResult<R> {
|
) -> Web3ProxyResult<R> {
|
||||||
let authorization = Default::default();
|
let authorization = Default::default();
|
||||||
|
|
||||||
self.authorized_request(method, params, &authorization, error_handler)
|
self.authorized_request(method, params, &authorization, error_handler, max_wait)
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1070,10 +1080,11 @@ impl Web3Rpc {
|
|||||||
params: &P,
|
params: &P,
|
||||||
authorization: &Arc<Authorization>,
|
authorization: &Arc<Authorization>,
|
||||||
error_handler: Option<RequestErrorHandler>,
|
error_handler: Option<RequestErrorHandler>,
|
||||||
|
max_wait: Option<Duration>,
|
||||||
) -> Web3ProxyResult<R> {
|
) -> Web3ProxyResult<R> {
|
||||||
// TODO: take max_wait as a function argument?
|
// TODO: take max_wait as a function argument?
|
||||||
let x = self
|
let x = self
|
||||||
.wait_for_request_handle(authorization, None, error_handler)
|
.wait_for_request_handle(authorization, max_wait, error_handler)
|
||||||
.await?
|
.await?
|
||||||
.request::<P, R>(method, params)
|
.request::<P, R>(method, params)
|
||||||
.await?;
|
.await?;
|
||||||
|
@ -27,6 +27,7 @@ use migration::{Expr, LockType, OnConflict};
|
|||||||
use num_traits::ToPrimitive;
|
use num_traits::ToPrimitive;
|
||||||
use parking_lot::Mutex;
|
use parking_lot::Mutex;
|
||||||
use std::borrow::Cow;
|
use std::borrow::Cow;
|
||||||
|
use std::mem;
|
||||||
use std::num::NonZeroU64;
|
use std::num::NonZeroU64;
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
use std::sync::atomic::{self, Ordering};
|
use std::sync::atomic::{self, Ordering};
|
||||||
@ -806,26 +807,25 @@ impl TryFrom<RequestMetadata> for RpcQueryStats {
|
|||||||
x => x,
|
x => x,
|
||||||
};
|
};
|
||||||
|
|
||||||
let method = metadata.method.clone();
|
let cu = ComputeUnit::new(&metadata.method, metadata.chain_id);
|
||||||
let chain_id = metadata.chain_id;
|
|
||||||
|
|
||||||
let cu = ComputeUnit::new(&method, metadata.chain_id);
|
|
||||||
|
|
||||||
// TODO: get from config? a helper function? how should we pick this?
|
// TODO: get from config? a helper function? how should we pick this?
|
||||||
let usd_per_cu = match chain_id {
|
let usd_per_cu = match metadata.chain_id {
|
||||||
137 => Decimal::from_str("0.000000692307692307"),
|
137 => Decimal::from_str("0.000000533333333333333"),
|
||||||
_ => Decimal::from_str("0.000000692307692307"),
|
_ => Decimal::from_str("0.000000400000000000000"),
|
||||||
}?;
|
}?;
|
||||||
|
|
||||||
let cache_hit = !backend_rpcs_used.is_empty();
|
let cache_hit = !backend_rpcs_used.is_empty();
|
||||||
|
|
||||||
let compute_unit_cost = cu.cost(cache_hit, usd_per_cu);
|
let compute_unit_cost = cu.cost(archive_request, cache_hit, usd_per_cu);
|
||||||
|
|
||||||
|
let method = mem::take(&mut metadata.method);
|
||||||
|
|
||||||
let x = Self {
|
let x = Self {
|
||||||
archive_request,
|
archive_request,
|
||||||
authorization,
|
authorization,
|
||||||
backend_rpcs_used,
|
backend_rpcs_used,
|
||||||
chain_id,
|
chain_id: metadata.chain_id,
|
||||||
compute_unit_cost,
|
compute_unit_cost,
|
||||||
error_response,
|
error_response,
|
||||||
method,
|
method,
|
||||||
|
@ -1,8 +1,7 @@
|
|||||||
use std::str::FromStr;
|
|
||||||
|
|
||||||
use axum::headers::authorization::Bearer;
|
use axum::headers::authorization::Bearer;
|
||||||
use migration::sea_orm::prelude::Uuid;
|
use migration::sea_orm::prelude::Uuid;
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
|
use std::str::FromStr;
|
||||||
use ulid::Ulid;
|
use ulid::Ulid;
|
||||||
|
|
||||||
/// Key used for caching the user's login
|
/// Key used for caching the user's login
|
||||||
|
Loading…
Reference in New Issue
Block a user