2023-05-31 02:32:34 +03:00
|
|
|
//! Keep track of the blockchain as seen by a Web3Rpcs.
|
2023-02-14 23:14:50 +03:00
|
|
|
use super::consensus::ConsensusFinder;
|
2023-02-06 20:55:27 +03:00
|
|
|
use super::many::Web3Rpcs;
|
|
|
|
use super::one::Web3Rpc;
|
2023-06-21 00:22:14 +03:00
|
|
|
use crate::config::{average_block_interval, BlockAndRpc};
|
2023-05-31 07:26:11 +03:00
|
|
|
use crate::errors::{Web3ProxyError, Web3ProxyErrorContext, Web3ProxyResult};
|
2022-08-26 20:26:17 +03:00
|
|
|
use derive_more::From;
|
2022-08-27 06:11:58 +03:00
|
|
|
use ethers::prelude::{Block, TxHash, H256, U64};
|
2023-06-08 03:26:38 +03:00
|
|
|
use moka::future::Cache;
|
2023-01-26 08:24:09 +03:00
|
|
|
use serde::ser::SerializeStruct;
|
2022-09-01 08:58:55 +03:00
|
|
|
use serde::Serialize;
|
2022-08-24 02:56:47 +03:00
|
|
|
use serde_json::json;
|
2023-03-21 21:16:18 +03:00
|
|
|
use std::hash::Hash;
|
2023-06-17 20:11:48 +03:00
|
|
|
use std::time::Duration;
|
2023-06-21 00:22:14 +03:00
|
|
|
use std::{fmt::Display, sync::Arc};
|
2023-07-13 20:58:22 +03:00
|
|
|
use tokio::sync::mpsc;
|
2023-06-21 00:22:14 +03:00
|
|
|
use tokio::time::timeout;
|
2023-06-29 04:44:54 +03:00
|
|
|
use tracing::{debug, error, warn};
|
2022-08-24 02:56:47 +03:00
|
|
|
|
2022-09-05 08:53:58 +03:00
|
|
|
// TODO: type for Hydrated Blocks with their full transactions?
|
2022-08-30 23:01:42 +03:00
|
|
|
pub type ArcBlock = Arc<Block<TxHash>>;
|
|
|
|
|
2023-06-08 03:26:38 +03:00
|
|
|
pub type BlocksByHashCache = Cache<H256, Web3ProxyBlock>;
|
|
|
|
pub type BlocksByNumberCache = Cache<U64, H256>;
|
2022-08-30 23:01:42 +03:00
|
|
|
|
2023-01-19 13:13:00 +03:00
|
|
|
/// A block and its age.
|
2023-01-26 08:24:09 +03:00
|
|
|
#[derive(Clone, Debug, Default, From)]
|
2023-02-14 23:14:50 +03:00
|
|
|
pub struct Web3ProxyBlock {
|
2022-12-03 08:31:03 +03:00
|
|
|
pub block: ArcBlock,
|
|
|
|
/// number of seconds this block was behind the current time when received
|
2023-02-14 23:14:50 +03:00
|
|
|
/// this is only set if the block is from a subscription
|
|
|
|
pub received_age: Option<u64>,
|
2022-08-26 20:26:17 +03:00
|
|
|
}
|
|
|
|
|
2023-01-26 08:24:09 +03:00
|
|
|
impl Serialize for Web3ProxyBlock {
|
|
|
|
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
|
|
|
where
|
|
|
|
S: serde::Serializer,
|
|
|
|
{
|
|
|
|
// TODO: i'm not sure about this name
|
|
|
|
let mut state = serializer.serialize_struct("saved_block", 2)?;
|
|
|
|
|
|
|
|
state.serialize_field("age", &self.age())?;
|
|
|
|
|
|
|
|
let block = json!({
|
2023-04-21 05:55:18 +03:00
|
|
|
"hash": self.block.hash,
|
2023-01-26 08:24:09 +03:00
|
|
|
"parent_hash": self.block.parent_hash,
|
|
|
|
"number": self.block.number,
|
|
|
|
"timestamp": self.block.timestamp,
|
|
|
|
});
|
|
|
|
|
|
|
|
state.serialize_field("block", &block)?;
|
|
|
|
|
|
|
|
state.end()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-02-14 23:14:50 +03:00
|
|
|
impl PartialEq for Web3ProxyBlock {
|
2022-12-17 07:05:01 +03:00
|
|
|
fn eq(&self, other: &Self) -> bool {
|
|
|
|
match (self.block.hash, other.block.hash) {
|
|
|
|
(None, None) => true,
|
|
|
|
(Some(_), None) => false,
|
|
|
|
(None, Some(_)) => false,
|
|
|
|
(Some(s), Some(o)) => s == o,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-03-21 21:16:18 +03:00
|
|
|
impl Eq for Web3ProxyBlock {}
|
|
|
|
|
|
|
|
impl Hash for Web3ProxyBlock {
|
|
|
|
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
|
|
|
|
self.block.hash.hash(state);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-02-14 23:14:50 +03:00
|
|
|
impl Web3ProxyBlock {
|
|
|
|
/// A new block has arrived over a subscription
|
2023-02-15 04:41:40 +03:00
|
|
|
pub fn try_new(block: ArcBlock) -> Option<Self> {
|
|
|
|
if block.number.is_none() || block.hash.is_none() {
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
|
2023-02-14 23:14:50 +03:00
|
|
|
let mut x = Self {
|
|
|
|
block,
|
|
|
|
received_age: None,
|
|
|
|
};
|
2022-12-06 00:13:36 +03:00
|
|
|
|
|
|
|
// no need to recalulate lag every time
|
|
|
|
// if the head block gets too old, a health check restarts this connection
|
2023-02-14 23:14:50 +03:00
|
|
|
// TODO: emit a stat for received_age
|
2023-06-17 20:11:48 +03:00
|
|
|
x.received_age = Some(x.age().as_secs());
|
2022-12-06 00:13:36 +03:00
|
|
|
|
2023-02-15 04:41:40 +03:00
|
|
|
Some(x)
|
2022-12-06 00:13:36 +03:00
|
|
|
}
|
|
|
|
|
2023-06-17 20:11:48 +03:00
|
|
|
pub fn age(&self) -> Duration {
|
2023-01-26 08:24:09 +03:00
|
|
|
let now = chrono::Utc::now().timestamp();
|
2022-12-03 08:31:03 +03:00
|
|
|
|
2023-01-26 08:24:09 +03:00
|
|
|
let block_timestamp = self.block.timestamp.as_u32() as i64;
|
2022-12-03 08:31:03 +03:00
|
|
|
|
2023-06-17 20:11:48 +03:00
|
|
|
let x = if block_timestamp < now {
|
2022-12-03 08:31:03 +03:00
|
|
|
// this server is still syncing from too far away to serve requests
|
2023-01-26 08:24:09 +03:00
|
|
|
// u64 is safe because we checked equality above
|
|
|
|
(now - block_timestamp) as u64
|
2022-12-03 08:31:03 +03:00
|
|
|
} else {
|
|
|
|
0
|
2023-06-17 20:11:48 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
Duration::from_secs(x)
|
2022-12-03 08:31:03 +03:00
|
|
|
}
|
|
|
|
|
2023-02-14 23:14:50 +03:00
|
|
|
#[inline(always)]
|
|
|
|
pub fn parent_hash(&self) -> &H256 {
|
|
|
|
&self.block.parent_hash
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline(always)]
|
|
|
|
pub fn hash(&self) -> &H256 {
|
|
|
|
self.block
|
|
|
|
.hash
|
|
|
|
.as_ref()
|
|
|
|
.expect("saved blocks must have a hash")
|
2022-12-03 08:31:03 +03:00
|
|
|
}
|
|
|
|
|
2023-02-14 23:14:50 +03:00
|
|
|
#[inline(always)]
|
|
|
|
pub fn number(&self) -> &U64 {
|
|
|
|
self.block
|
|
|
|
.number
|
|
|
|
.as_ref()
|
|
|
|
.expect("saved blocks must have a number")
|
2022-12-03 08:31:03 +03:00
|
|
|
}
|
2023-06-16 10:46:27 +03:00
|
|
|
|
|
|
|
pub fn uncles(&self) -> &[H256] {
|
|
|
|
&self.block.uncles
|
|
|
|
}
|
2022-12-03 08:31:03 +03:00
|
|
|
}
|
|
|
|
|
2023-02-15 04:41:40 +03:00
|
|
|
impl TryFrom<ArcBlock> for Web3ProxyBlock {
|
2023-03-20 23:45:21 +03:00
|
|
|
type Error = Web3ProxyError;
|
2023-02-15 04:41:40 +03:00
|
|
|
|
|
|
|
fn try_from(x: ArcBlock) -> Result<Self, Self::Error> {
|
|
|
|
if x.number.is_none() || x.hash.is_none() {
|
2023-03-20 23:45:21 +03:00
|
|
|
return Err(Web3ProxyError::NoBlockNumberOrHash);
|
2023-02-15 04:41:40 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
let b = Web3ProxyBlock {
|
2023-02-14 23:14:50 +03:00
|
|
|
block: x,
|
|
|
|
received_age: None,
|
2023-02-15 04:41:40 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
Ok(b)
|
2022-12-03 08:31:03 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-02-14 23:14:50 +03:00
|
|
|
impl Display for Web3ProxyBlock {
|
2022-09-01 08:58:55 +03:00
|
|
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
2023-02-14 23:14:50 +03:00
|
|
|
write!(
|
|
|
|
f,
|
|
|
|
"{} ({}, {}s old)",
|
|
|
|
self.number(),
|
|
|
|
self.hash(),
|
2023-06-17 20:11:48 +03:00
|
|
|
self.age().as_secs()
|
2023-02-14 23:14:50 +03:00
|
|
|
)
|
2022-09-01 08:58:55 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-02-06 20:55:27 +03:00
|
|
|
impl Web3Rpcs {
|
2022-12-03 08:31:03 +03:00
|
|
|
/// add a block to our mappings and track the heaviest chain
|
2023-02-14 23:14:50 +03:00
|
|
|
pub async fn try_cache_block(
|
2023-01-19 13:13:00 +03:00
|
|
|
&self,
|
2023-02-14 23:14:50 +03:00
|
|
|
block: Web3ProxyBlock,
|
2023-06-16 10:46:27 +03:00
|
|
|
consensus_head: bool,
|
2023-03-20 04:52:28 +03:00
|
|
|
) -> Web3ProxyResult<Web3ProxyBlock> {
|
2023-06-16 10:46:27 +03:00
|
|
|
let block_hash = *block.hash();
|
|
|
|
|
2022-09-03 00:35:03 +03:00
|
|
|
// TODO: i think we can rearrange this function to make it faster on the hot path
|
2023-06-16 10:46:27 +03:00
|
|
|
if block_hash.is_zero() {
|
2022-09-14 08:26:46 +03:00
|
|
|
debug!("Skipping block without hash!");
|
2023-01-19 13:13:00 +03:00
|
|
|
return Ok(block);
|
2022-09-06 15:29:37 +03:00
|
|
|
}
|
|
|
|
|
2023-05-30 03:19:05 +03:00
|
|
|
// this block is very likely already in block_hashes
|
|
|
|
|
2023-06-16 10:46:27 +03:00
|
|
|
if consensus_head {
|
2023-06-08 03:26:38 +03:00
|
|
|
let block_num = block.number();
|
|
|
|
|
2023-06-16 10:46:27 +03:00
|
|
|
// TODO: if there is an existing entry with a different block_hash,
|
|
|
|
// TODO: use entry api to handle changing existing entries
|
|
|
|
self.blocks_by_number.insert(*block_num, block_hash).await;
|
|
|
|
|
|
|
|
for uncle in block.uncles() {
|
|
|
|
self.blocks_by_hash.invalidate(uncle).await;
|
|
|
|
// TODO: save uncles somewhere?
|
|
|
|
}
|
|
|
|
|
|
|
|
// loop to make sure parent hashes match our caches
|
|
|
|
// set the first ancestor to the blocks' parent hash. but keep going up the chain
|
|
|
|
if let Some(parent_num) = block.number().checked_sub(1.into()) {
|
|
|
|
struct Ancestor {
|
|
|
|
num: U64,
|
|
|
|
hash: H256,
|
|
|
|
}
|
|
|
|
let mut ancestor = Ancestor {
|
|
|
|
num: parent_num,
|
|
|
|
hash: *block.parent_hash(),
|
|
|
|
};
|
2023-08-05 04:54:16 +03:00
|
|
|
// TODO: smarter max loop on this
|
|
|
|
for _ in 0..16 {
|
2023-06-16 10:46:27 +03:00
|
|
|
let ancestor_number_to_hash_entry = self
|
|
|
|
.blocks_by_number
|
|
|
|
.entry_by_ref(&ancestor.num)
|
|
|
|
.or_insert(ancestor.hash)
|
|
|
|
.await;
|
|
|
|
|
|
|
|
if *ancestor_number_to_hash_entry.value() == ancestor.hash {
|
|
|
|
// the existing number entry matches. all good
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// oh no! ancestor_number_to_hash_entry is different
|
|
|
|
|
|
|
|
// remove the uncled entry in blocks_by_hash
|
|
|
|
// we will look it up later if necessary
|
|
|
|
self.blocks_by_hash
|
|
|
|
.invalidate(ancestor_number_to_hash_entry.value())
|
|
|
|
.await;
|
|
|
|
|
|
|
|
// TODO: delete any cached entries for eth_getBlockByHash or eth_getBlockByNumber
|
|
|
|
|
|
|
|
// TODO: race on this drop and insert?
|
|
|
|
drop(ancestor_number_to_hash_entry);
|
|
|
|
|
|
|
|
// update the entry in blocks_by_number
|
|
|
|
self.blocks_by_number
|
|
|
|
.insert(ancestor.num, ancestor.hash)
|
|
|
|
.await;
|
|
|
|
|
|
|
|
// try to check the parent of this ancestor
|
|
|
|
if let Some(ancestor_block) = self.blocks_by_hash.get(&ancestor.hash) {
|
|
|
|
match ancestor_block.number().checked_sub(1.into()) {
|
|
|
|
None => break,
|
|
|
|
Some(ancestor_parent_num) => {
|
|
|
|
ancestor = Ancestor {
|
|
|
|
num: ancestor_parent_num,
|
|
|
|
hash: *ancestor_block.parent_hash(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2022-09-02 08:40:56 +03:00
|
|
|
}
|
|
|
|
|
2023-01-19 13:13:00 +03:00
|
|
|
let block = self
|
2023-02-26 10:52:33 +03:00
|
|
|
.blocks_by_hash
|
2023-06-08 03:26:38 +03:00
|
|
|
.get_with_by_ref(&block_hash, async move { block })
|
2023-05-30 01:48:22 +03:00
|
|
|
.await;
|
2022-09-05 09:13:36 +03:00
|
|
|
|
2023-01-19 13:13:00 +03:00
|
|
|
Ok(block)
|
2022-08-24 02:56:47 +03:00
|
|
|
}
|
|
|
|
|
2022-08-27 05:13:36 +03:00
|
|
|
/// Get a block from caches with fallback.
|
|
|
|
/// Will query a specific node or the best available.
|
2022-08-26 20:26:17 +03:00
|
|
|
pub async fn block(
|
|
|
|
&self,
|
|
|
|
hash: &H256,
|
2023-02-06 20:55:27 +03:00
|
|
|
rpc: Option<&Arc<Web3Rpc>>,
|
2023-06-21 00:22:14 +03:00
|
|
|
max_wait: Option<Duration>,
|
2023-03-20 04:52:28 +03:00
|
|
|
) -> Web3ProxyResult<Web3ProxyBlock> {
|
2022-08-24 02:56:47 +03:00
|
|
|
// first, try to get the hash from our cache
|
2022-09-03 00:35:03 +03:00
|
|
|
// the cache is set last, so if its here, its everywhere
|
2023-01-19 13:13:00 +03:00
|
|
|
// TODO: use try_get_with
|
2023-02-26 10:52:33 +03:00
|
|
|
if let Some(block) = self.blocks_by_hash.get(hash) {
|
2023-06-16 10:46:27 +03:00
|
|
|
// double check that it matches the blocks_by_number cache
|
|
|
|
let cached_hash = self
|
|
|
|
.blocks_by_number
|
|
|
|
.get_with_by_ref(block.number(), async { *hash })
|
|
|
|
.await;
|
|
|
|
|
|
|
|
if cached_hash == *hash {
|
|
|
|
return Ok(block);
|
|
|
|
}
|
|
|
|
|
|
|
|
// hashes don't match! this block must be in the middle of being uncled
|
2023-08-03 19:15:31 +03:00
|
|
|
// TODO: check known uncles. clear uncle caches
|
2022-08-24 02:56:47 +03:00
|
|
|
}
|
|
|
|
|
2023-06-30 09:13:22 +03:00
|
|
|
if hash == &H256::zero() {
|
|
|
|
// TODO: think more about this
|
|
|
|
return Err(Web3ProxyError::UnknownBlockHash(*hash));
|
|
|
|
}
|
|
|
|
|
2022-08-24 02:56:47 +03:00
|
|
|
// block not in cache. we need to ask an rpc for it
|
2022-09-23 00:51:52 +03:00
|
|
|
let get_block_params = (*hash, false);
|
2023-05-31 02:32:34 +03:00
|
|
|
|
2023-06-29 01:04:55 +03:00
|
|
|
let mut block: Option<ArcBlock> = if let Some(rpc) = rpc {
|
2023-06-16 10:46:27 +03:00
|
|
|
// ask a specific rpc
|
2023-08-03 10:29:13 +03:00
|
|
|
// if this errors, other rpcs will be tried
|
2023-07-13 20:58:22 +03:00
|
|
|
rpc.internal_request::<_, Option<ArcBlock>>(
|
2023-05-31 02:32:34 +03:00
|
|
|
"eth_getBlockByHash",
|
|
|
|
&get_block_params,
|
|
|
|
None,
|
2023-06-21 00:22:14 +03:00
|
|
|
max_wait,
|
2023-05-31 02:32:34 +03:00
|
|
|
)
|
2023-08-03 10:29:13 +03:00
|
|
|
.await
|
|
|
|
.ok()
|
|
|
|
.flatten()
|
2023-05-31 02:32:34 +03:00
|
|
|
} else {
|
2023-06-29 01:04:55 +03:00
|
|
|
None
|
|
|
|
};
|
|
|
|
|
|
|
|
if block.is_none() {
|
|
|
|
// try by asking any rpc
|
2023-06-21 00:22:14 +03:00
|
|
|
// TODO: retry if "Requested data is not available"
|
2023-06-16 10:46:27 +03:00
|
|
|
// TODO: request_with_metadata instead of internal_request
|
2023-06-29 01:04:55 +03:00
|
|
|
block = self
|
|
|
|
.internal_request::<_, Option<ArcBlock>>(
|
|
|
|
"eth_getBlockByHash",
|
|
|
|
&get_block_params,
|
|
|
|
max_wait,
|
|
|
|
)
|
|
|
|
.await?;
|
2022-08-27 05:13:36 +03:00
|
|
|
};
|
2022-08-24 02:56:47 +03:00
|
|
|
|
2023-05-31 02:32:34 +03:00
|
|
|
match block {
|
|
|
|
Some(block) => {
|
|
|
|
let block = self.try_cache_block(block.try_into()?, false).await?;
|
|
|
|
Ok(block)
|
|
|
|
}
|
2023-06-29 01:04:55 +03:00
|
|
|
None => Err(Web3ProxyError::UnknownBlockHash(*hash)),
|
2023-05-31 02:32:34 +03:00
|
|
|
}
|
2022-08-24 02:56:47 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Convenience method to get the cannonical block at a given block height.
|
2023-07-13 20:58:22 +03:00
|
|
|
pub async fn block_hash(&self, num: &U64) -> Web3ProxyResult<(H256, u64)> {
|
|
|
|
let (block, block_depth) = self.cannonical_block(num).await?;
|
2022-08-24 02:56:47 +03:00
|
|
|
|
2023-02-14 23:14:50 +03:00
|
|
|
let hash = *block.hash();
|
2022-08-24 02:56:47 +03:00
|
|
|
|
2023-02-06 04:58:03 +03:00
|
|
|
Ok((hash, block_depth))
|
2022-08-24 02:56:47 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Get the heaviest chain's block from cache or backend rpc
|
2023-01-23 09:02:08 +03:00
|
|
|
/// Caution! If a future block is requested, this might wait forever. Be sure to have a timeout outside of this!
|
2023-07-13 20:58:22 +03:00
|
|
|
pub async fn cannonical_block(&self, num: &U64) -> Web3ProxyResult<(Web3ProxyBlock, u64)> {
|
2022-08-28 02:49:41 +03:00
|
|
|
// we only have blocks by hash now
|
2022-09-05 08:53:58 +03:00
|
|
|
// maybe save them during save_block in a blocks_by_number Cache<U64, Vec<ArcBlock>>
|
2022-08-28 02:49:41 +03:00
|
|
|
// if theres multiple, use petgraph to find the one on the main chain (and remove the others if they have enough confirmations)
|
2022-08-26 20:26:17 +03:00
|
|
|
|
2023-01-23 09:02:08 +03:00
|
|
|
let mut consensus_head_receiver = self
|
2023-06-27 22:36:41 +03:00
|
|
|
.watch_head_block
|
2023-01-23 09:02:08 +03:00
|
|
|
.as_ref()
|
2023-03-20 23:45:21 +03:00
|
|
|
.web3_context("need new head subscriptions to fetch cannonical_block")?
|
2023-02-26 10:52:33 +03:00
|
|
|
.subscribe();
|
2023-01-23 09:02:08 +03:00
|
|
|
|
2022-09-03 00:35:03 +03:00
|
|
|
// be sure the requested block num exists
|
2023-02-14 23:14:50 +03:00
|
|
|
// TODO: is this okay? what if we aren't synced?!
|
2023-02-15 04:41:40 +03:00
|
|
|
let mut head_block_num = *consensus_head_receiver
|
|
|
|
.borrow_and_update()
|
|
|
|
.as_ref()
|
2023-03-20 23:45:21 +03:00
|
|
|
.web3_context("no consensus head block")?
|
2023-02-15 04:41:40 +03:00
|
|
|
.number();
|
2022-11-03 02:14:16 +03:00
|
|
|
|
2023-07-13 20:58:22 +03:00
|
|
|
if *num > head_block_num {
|
|
|
|
// if num is too far in the future, error now
|
|
|
|
if *num - head_block_num > self.max_head_block_lag {
|
|
|
|
return Err(Web3ProxyError::UnknownBlockNumber {
|
|
|
|
known: head_block_num,
|
|
|
|
unknown: *num,
|
|
|
|
});
|
2023-01-23 09:02:08 +03:00
|
|
|
}
|
2022-11-03 02:14:16 +03:00
|
|
|
|
2023-07-13 20:58:22 +03:00
|
|
|
while *num > head_block_num {
|
|
|
|
debug!(%head_block_num, %num, "waiting for future block");
|
2023-06-29 03:42:43 +03:00
|
|
|
|
2023-07-13 20:58:22 +03:00
|
|
|
consensus_head_receiver.changed().await?;
|
2023-01-23 09:02:08 +03:00
|
|
|
|
2023-07-13 20:58:22 +03:00
|
|
|
if let Some(head) = consensus_head_receiver.borrow_and_update().as_ref() {
|
|
|
|
head_block_num = *head.number();
|
|
|
|
}
|
2023-02-15 04:41:40 +03:00
|
|
|
}
|
2022-08-24 02:56:47 +03:00
|
|
|
}
|
|
|
|
|
2023-02-14 23:14:50 +03:00
|
|
|
let block_depth = (head_block_num - num).as_u64();
|
2023-01-23 09:02:08 +03:00
|
|
|
|
2022-09-03 00:35:03 +03:00
|
|
|
// try to get the hash from our cache
|
|
|
|
// deref to not keep the lock open
|
2023-02-26 10:52:33 +03:00
|
|
|
if let Some(block_hash) = self.blocks_by_number.get(num) {
|
2022-09-03 00:35:03 +03:00
|
|
|
// TODO: sometimes this needs to fetch the block. why? i thought block_numbers would only be set if the block hash was set
|
2023-06-21 00:22:14 +03:00
|
|
|
// TODO: configurable max wait and rpc
|
2023-08-03 19:15:31 +03:00
|
|
|
let block = self.block(&block_hash, None, None).await?;
|
2022-11-03 02:14:16 +03:00
|
|
|
|
2023-02-06 04:58:03 +03:00
|
|
|
return Ok((block, block_depth));
|
2022-09-03 00:35:03 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// block number not in cache. we need to ask an rpc for it
|
2023-05-31 02:32:34 +03:00
|
|
|
// TODO: this error is too broad
|
2022-08-24 02:56:47 +03:00
|
|
|
let response = self
|
2023-08-03 19:15:31 +03:00
|
|
|
.internal_request::<_, Option<ArcBlock>>("eth_getBlockByNumber", &(*num, false), None)
|
2023-05-31 02:32:34 +03:00
|
|
|
.await?
|
|
|
|
.ok_or(Web3ProxyError::NoBlocksKnown)?;
|
2022-08-24 02:56:47 +03:00
|
|
|
|
2023-05-31 02:32:34 +03:00
|
|
|
let block = Web3ProxyBlock::try_from(response)?;
|
2023-02-14 23:14:50 +03:00
|
|
|
|
2022-09-02 08:40:56 +03:00
|
|
|
// the block was fetched using eth_getBlockByNumber, so it should have all fields and be on the heaviest chain
|
2023-02-14 23:14:50 +03:00
|
|
|
let block = self.try_cache_block(block, true).await?;
|
2022-08-24 02:56:47 +03:00
|
|
|
|
2023-02-06 04:58:03 +03:00
|
|
|
Ok((block, block_depth))
|
2022-08-24 02:56:47 +03:00
|
|
|
}
|
|
|
|
|
2022-08-26 20:26:17 +03:00
|
|
|
pub(super) async fn process_incoming_blocks(
|
2022-08-24 03:59:05 +03:00
|
|
|
&self,
|
2023-07-11 09:08:06 +03:00
|
|
|
mut block_receiver: mpsc::UnboundedReceiver<BlockAndRpc>,
|
2023-05-24 00:40:34 +03:00
|
|
|
) -> Web3ProxyResult<()> {
|
2023-06-21 00:22:14 +03:00
|
|
|
let mut consensus_finder =
|
2023-06-17 20:11:48 +03:00
|
|
|
ConsensusFinder::new(Some(self.max_head_block_age), Some(self.max_head_block_lag));
|
2022-08-24 03:59:05 +03:00
|
|
|
|
2023-06-21 00:22:14 +03:00
|
|
|
// TODO: what timeout on block receiver? we want to keep consensus_finder fresh so that server tiers are correct
|
|
|
|
let double_block_time = average_block_interval(self.chain_id).mul_f32(2.0);
|
|
|
|
|
2023-06-27 20:05:07 +03:00
|
|
|
let mut had_first_success = false;
|
|
|
|
|
2023-02-07 02:20:36 +03:00
|
|
|
loop {
|
2023-07-11 09:08:06 +03:00
|
|
|
match timeout(double_block_time, block_receiver.recv()).await {
|
|
|
|
Ok(Some((new_block, rpc))) => {
|
2023-02-07 02:20:36 +03:00
|
|
|
let rpc_name = rpc.name.clone();
|
2023-06-29 04:44:54 +03:00
|
|
|
let rpc_is_backup = rpc.backup;
|
2023-02-07 02:20:36 +03:00
|
|
|
|
2023-06-21 00:22:14 +03:00
|
|
|
// TODO: what timeout on this?
|
|
|
|
match timeout(
|
2023-06-29 04:44:54 +03:00
|
|
|
Duration::from_secs(1),
|
2023-07-13 20:58:22 +03:00
|
|
|
consensus_finder.process_block_from_rpc(self, new_block, rpc),
|
2023-06-21 00:22:14 +03:00
|
|
|
)
|
|
|
|
.await
|
2023-02-07 02:20:36 +03:00
|
|
|
{
|
2023-06-27 20:05:07 +03:00
|
|
|
Ok(Ok(_)) => had_first_success = true,
|
2023-06-21 00:22:14 +03:00
|
|
|
Ok(Err(err)) => {
|
2023-06-27 20:05:07 +03:00
|
|
|
if had_first_success {
|
|
|
|
error!(
|
|
|
|
"error while processing block from rpc {}: {:#?}",
|
|
|
|
rpc_name, err
|
|
|
|
);
|
|
|
|
} else {
|
|
|
|
debug!(
|
|
|
|
"startup error while processing block from rpc {}: {:#?}",
|
|
|
|
rpc_name, err
|
|
|
|
);
|
|
|
|
}
|
2023-06-21 00:22:14 +03:00
|
|
|
}
|
|
|
|
Err(timeout) => {
|
2023-06-29 04:44:54 +03:00
|
|
|
if rpc_is_backup {
|
|
|
|
debug!(
|
|
|
|
?timeout,
|
|
|
|
"timeout while processing block from {}", rpc_name
|
|
|
|
);
|
|
|
|
} else {
|
|
|
|
warn!(?timeout, "timeout while processing block from {}", rpc_name);
|
|
|
|
}
|
2023-06-21 00:22:14 +03:00
|
|
|
}
|
2023-02-07 02:20:36 +03:00
|
|
|
}
|
|
|
|
}
|
2023-07-11 09:08:06 +03:00
|
|
|
Ok(None) => {
|
2023-06-21 00:22:14 +03:00
|
|
|
// TODO: panic is probably too much, but getting here is definitely not good
|
2023-07-11 09:08:06 +03:00
|
|
|
return Err(anyhow::anyhow!("block_receiver on {} exited", self).into());
|
2023-02-07 02:20:36 +03:00
|
|
|
}
|
2023-06-21 00:22:14 +03:00
|
|
|
Err(_) => {
|
|
|
|
// TODO: what timeout on this?
|
|
|
|
match timeout(
|
|
|
|
Duration::from_secs(2),
|
2023-07-13 20:58:22 +03:00
|
|
|
consensus_finder.refresh(self, None, None),
|
2023-06-21 00:22:14 +03:00
|
|
|
)
|
|
|
|
.await
|
|
|
|
{
|
|
|
|
Ok(Ok(_)) => {}
|
|
|
|
Ok(Err(err)) => {
|
|
|
|
error!("error while refreshing consensus finder: {:#?}", err);
|
2023-01-19 13:13:00 +03:00
|
|
|
}
|
2023-06-21 00:22:14 +03:00
|
|
|
Err(timeout) => {
|
|
|
|
error!("timeout while refreshing consensus finder: {:#?}", timeout);
|
2023-02-27 09:44:09 +03:00
|
|
|
}
|
2023-01-19 13:13:00 +03:00
|
|
|
}
|
|
|
|
}
|
2022-08-24 02:56:47 +03:00
|
|
|
}
|
2023-01-19 13:13:00 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|