dont return max depth so we can do cannonical block faster

This commit is contained in:
Bryan Stitt 2023-08-15 16:12:40 -07:00
parent aa71a406bb
commit b742a25729
4 changed files with 41 additions and 43 deletions

View File

@ -1712,6 +1712,9 @@ impl Web3ProxyApp {
}
};
// TODO: think more about timeouts
let max_wait = Some(Duration::from_secs(299));
if let Some(cache_key) = cache_key {
let from_block_num = cache_key.from_block_num().copied();
let to_block_num = cache_key.to_block_num().copied();
@ -1731,7 +1734,7 @@ impl Web3ProxyApp {
method,
params,
Some(request_metadata),
Some(Duration::from_secs(240)),
max_wait,
from_block_num.as_ref(),
to_block_num.as_ref(),
)
@ -1751,6 +1754,7 @@ impl Web3ProxyApp {
Err(Web3ProxyError::NullJsonRpcResult)
} else if response_data.num_bytes() > max_response_cache_bytes {
// don't cache really large requests
// TODO: emit a stat
Err(Web3ProxyError::JsonRpcResponse(response_data))
} else {
// TODO: response data should maybe be Arc<JsonRpcResponseEnum<Box<RawValue>>>, but that's more work
@ -1764,7 +1768,7 @@ impl Web3ProxyApp {
method,
params,
Some(request_metadata),
Some(Duration::from_secs(240)),
max_wait,
None,
None,
)

View File

@ -113,7 +113,7 @@ pub async fn clean_block_number(
// TODO: "BlockNumber" needs a better name
// TODO: move this to a helper function?
if let Ok(block_num) = serde_json::from_value::<U64>(x.clone()) {
let (block_hash, _) = rpcs
let block_hash = rpcs
.block_hash(&block_num)
.await
.context("fetching block hash from number")?;
@ -134,7 +134,7 @@ pub async fn clean_block_number(
if block_num == *latest_block.number() {
(latest_block.into(), change)
} else {
let (block_hash, _) = rpcs
let block_hash = rpcs
.block_hash(&block_num)
.await
.context("fetching block hash from number")?;
@ -256,6 +256,8 @@ impl CacheMode {
}
match method {
"net_listening" => Ok(CacheMode::CacheSuccessForever),
"net_version" => Ok(CacheMode::CacheSuccessForever),
"eth_gasPrice" => Ok(CacheMode::Cache {
block: head_block.into(),
cache_errors: false,
@ -307,7 +309,7 @@ impl CacheMode {
*x = json!(block_num);
}
let (block_hash, _) = rpcs.block_hash(&block_num).await?;
let block_hash = rpcs.block_hash(&block_num).await?;
BlockNumAndHash(block_num, block_hash)
} else {
@ -327,7 +329,7 @@ impl CacheMode {
*x = json!(block_num);
}
let (block_hash, _) = rpcs.block_hash(&block_num).await?;
let block_hash = rpcs.block_hash(&block_num).await?;
BlockNumAndHash(block_num, block_hash)
} else {

View File

@ -335,20 +335,27 @@ impl Web3Rpcs {
}
/// Convenience method to get the cannonical block at a given block height.
pub async fn block_hash(&self, num: &U64) -> Web3ProxyResult<(H256, u64)> {
let (block, block_depth) = self.cannonical_block(num).await?;
pub async fn block_hash(&self, num: &U64) -> Web3ProxyResult<H256> {
let block = self.cannonical_block(num).await?;
let hash = *block.hash();
Ok((hash, block_depth))
Ok(hash)
}
/// Get the heaviest chain's block from cache or backend rpc
/// Caution! If a future block is requested, this might wait forever. Be sure to have a timeout outside of this!
pub async fn cannonical_block(&self, num: &U64) -> Web3ProxyResult<(Web3ProxyBlock, u64)> {
pub async fn cannonical_block(&self, num: &U64) -> Web3ProxyResult<Web3ProxyBlock> {
// we only have blocks by hash now
// maybe save them during save_block in a blocks_by_number Cache<U64, Vec<ArcBlock>>
// if theres multiple, use petgraph to find the one on the main chain (and remove the others if they have enough confirmations)
// TODO: if theres multiple, use petgraph to find the one on the main chain (and remove the others if they have enough confirmations)
// try to get the hash from our cache
if let Some(block_hash) = self.blocks_by_number.get(num) {
// TODO: sometimes this needs to fetch the block. why? i thought block_numbers would only be set if the block hash was set
// TODO: configurable max wait and rpc
return self.block(&block_hash, None, None).await;
}
let mut consensus_head_receiver = self
.watch_head_block
@ -384,18 +391,6 @@ impl Web3Rpcs {
}
}
let block_depth = (head_block_num - num).as_u64();
// try to get the hash from our cache
// deref to not keep the lock open
if let Some(block_hash) = self.blocks_by_number.get(num) {
// TODO: sometimes this needs to fetch the block. why? i thought block_numbers would only be set if the block hash was set
// TODO: configurable max wait and rpc
let block = self.block(&block_hash, None, None).await?;
return Ok((block, block_depth));
}
// block number not in cache. we need to ask an rpc for it
// TODO: this error is too broad
let response = self
@ -408,7 +403,7 @@ impl Web3Rpcs {
// the block was fetched using eth_getBlockByNumber, so it should have all fields and be on the heaviest chain
let block = self.try_cache_block(block, true).await?;
Ok((block, block_depth))
Ok(block)
}
pub(super) async fn process_incoming_blocks(

View File

@ -53,7 +53,8 @@ pub struct Web3Rpcs {
/// this head receiver makes it easy to wait until there is a new block
pub(super) watch_head_block: Option<watch::Sender<Option<Web3ProxyBlock>>>,
/// TODO: this map is going to grow forever unless we do some sort of pruning. maybe store pruned in redis?
/// all blocks, including orphans
/// all blocks, including uncles
/// TODO: i think uncles should be excluded
pub(super) blocks_by_hash: BlocksByHashCache,
/// blocks on the heaviest chain
pub(super) blocks_by_number: BlocksByNumberCache,
@ -87,7 +88,7 @@ impl Web3Rpcs {
// these blocks don't have full transactions, but they do have rather variable amounts of transaction hashes
// TODO: actual weighter on this
// TODO: time_to_idle instead?
let blocks_by_hash: BlocksByHashCache = CacheBuilder::new(1_000)
let blocks_by_hash: BlocksByHashCache = CacheBuilder::new(10_000)
.name("blocks_by_hash")
.time_to_idle(Duration::from_secs(30 * 60))
.build();
@ -95,7 +96,7 @@ impl Web3Rpcs {
// all block numbers are the same size, so no need for weigher
// TODO: limits from config
// TODO: time_to_idle instead?
let blocks_by_number = CacheBuilder::new(1_000)
let blocks_by_number = CacheBuilder::new(10_000)
.name("blocks_by_number")
.time_to_idle(Duration::from_secs(30 * 60))
.build();
@ -1768,21 +1769,17 @@ mod tests {
let rpcs = Web3Rpcs {
block_sender,
blocks_by_hash: CacheBuilder::new(100).build(),
blocks_by_number: CacheBuilder::new(100).build(),
by_name: RwLock::new(by_name),
chain_id,
max_head_block_age: Duration::from_secs(60),
max_head_block_lag: 5.into(),
min_sum_soft_limit: 4_000,
min_synced_rpcs: 1,
name: "test".into(),
watch_head_block: Some(watch_consensus_head_sender),
watch_ranked_rpcs,
blocks_by_hash: CacheBuilder::new(100)
.time_to_live(Duration::from_secs(120))
.build(),
blocks_by_number: CacheBuilder::new(100)
.time_to_live(Duration::from_secs(120))
.build(),
min_synced_rpcs: 1,
min_sum_soft_limit: 4_000,
max_head_block_age: Duration::from_secs(60),
max_head_block_lag: 5.into(),
};
let mut connection_heads = ConsensusFinder::new(None, None);
@ -1936,17 +1933,17 @@ mod tests {
// TODO: make a Web3Rpcs::new
let rpcs = Web3Rpcs {
block_sender,
blocks_by_hash: Cache::new(100),
blocks_by_number: Cache::new(100),
by_name: RwLock::new(by_name),
chain_id,
max_head_block_age: Duration::from_secs(60),
max_head_block_lag: 5.into(),
min_sum_soft_limit: 1_000,
min_synced_rpcs: 1,
name: "test".into(),
watch_head_block: Some(watch_consensus_head_sender),
watch_ranked_rpcs,
blocks_by_hash: Cache::new(10_000),
blocks_by_number: Cache::new(10_000),
min_synced_rpcs: 1,
min_sum_soft_limit: 1_000,
max_head_block_age: Duration::from_secs(60),
max_head_block_lag: 5.into(),
};
let mut consensus_finder = ConsensusFinder::new(None, None);