2023-05-31 02:32:34 +03:00
//! Keep track of the blockchain as seen by a Web3Rpcs.
2023-02-14 23:14:50 +03:00
use super ::consensus ::ConsensusFinder ;
2023-02-06 20:55:27 +03:00
use super ::many ::Web3Rpcs ;
use super ::one ::Web3Rpc ;
2022-08-24 03:59:05 +03:00
use super ::transactions ::TxStatus ;
2023-05-31 02:32:34 +03:00
use crate ::config ::BlockAndRpc ;
2023-05-31 07:26:11 +03:00
use crate ::errors ::{ Web3ProxyError , Web3ProxyErrorContext , Web3ProxyResult } ;
2022-11-08 22:58:11 +03:00
use crate ::frontend ::authorization ::Authorization ;
2022-08-26 20:26:17 +03:00
use derive_more ::From ;
2022-08-27 06:11:58 +03:00
use ethers ::prelude ::{ Block , TxHash , H256 , U64 } ;
2023-05-31 02:32:34 +03:00
use log ::{ debug , trace , warn } ;
2023-06-08 03:26:38 +03:00
use moka ::future ::Cache ;
2023-01-26 08:24:09 +03:00
use serde ::ser ::SerializeStruct ;
2022-09-01 08:58:55 +03:00
use serde ::Serialize ;
2022-08-24 02:56:47 +03:00
use serde_json ::json ;
2023-03-21 21:16:18 +03:00
use std ::hash ::Hash ;
2023-06-17 20:11:48 +03:00
use std ::time ::Duration ;
2022-09-01 08:58:55 +03:00
use std ::{ cmp ::Ordering , fmt ::Display , sync ::Arc } ;
2023-02-26 10:52:33 +03:00
use tokio ::sync ::broadcast ;
2022-08-24 02:56:47 +03:00
2022-09-05 08:53:58 +03:00
// TODO: type for Hydrated Blocks with their full transactions?
2022-08-30 23:01:42 +03:00
pub type ArcBlock = Arc < Block < TxHash > > ;
2023-06-08 03:26:38 +03:00
pub type BlocksByHashCache = Cache < H256 , Web3ProxyBlock > ;
pub type BlocksByNumberCache = Cache < U64 , H256 > ;
2022-08-30 23:01:42 +03:00
2023-01-19 13:13:00 +03:00
/// A block and its age.
2023-01-26 08:24:09 +03:00
#[ derive(Clone, Debug, Default, From) ]
2023-02-14 23:14:50 +03:00
pub struct Web3ProxyBlock {
2022-12-03 08:31:03 +03:00
pub block : ArcBlock ,
/// number of seconds this block was behind the current time when received
2023-02-14 23:14:50 +03:00
/// this is only set if the block is from a subscription
pub received_age : Option < u64 > ,
2022-08-26 20:26:17 +03:00
}
2023-01-26 08:24:09 +03:00
impl Serialize for Web3ProxyBlock {
fn serialize < S > ( & self , serializer : S ) -> Result < S ::Ok , S ::Error >
where
S : serde ::Serializer ,
{
// TODO: i'm not sure about this name
let mut state = serializer . serialize_struct ( " saved_block " , 2 ) ? ;
state . serialize_field ( " age " , & self . age ( ) ) ? ;
let block = json! ( {
2023-04-21 05:55:18 +03:00
" hash " : self . block . hash ,
2023-01-26 08:24:09 +03:00
" parent_hash " : self . block . parent_hash ,
" number " : self . block . number ,
" timestamp " : self . block . timestamp ,
} ) ;
state . serialize_field ( " block " , & block ) ? ;
state . end ( )
}
}
2023-02-14 23:14:50 +03:00
impl PartialEq for Web3ProxyBlock {
2022-12-17 07:05:01 +03:00
fn eq ( & self , other : & Self ) -> bool {
match ( self . block . hash , other . block . hash ) {
( None , None ) = > true ,
( Some ( _ ) , None ) = > false ,
( None , Some ( _ ) ) = > false ,
( Some ( s ) , Some ( o ) ) = > s = = o ,
}
}
}
2023-03-21 21:16:18 +03:00
impl Eq for Web3ProxyBlock { }
impl Hash for Web3ProxyBlock {
fn hash < H : std ::hash ::Hasher > ( & self , state : & mut H ) {
self . block . hash . hash ( state ) ;
}
}
2023-02-14 23:14:50 +03:00
impl Web3ProxyBlock {
/// A new block has arrived over a subscription
2023-02-15 04:41:40 +03:00
pub fn try_new ( block : ArcBlock ) -> Option < Self > {
if block . number . is_none ( ) | | block . hash . is_none ( ) {
return None ;
}
2023-02-14 23:14:50 +03:00
let mut x = Self {
block ,
received_age : None ,
} ;
2022-12-06 00:13:36 +03:00
// no need to recalulate lag every time
// if the head block gets too old, a health check restarts this connection
2023-02-14 23:14:50 +03:00
// TODO: emit a stat for received_age
2023-06-17 20:11:48 +03:00
x . received_age = Some ( x . age ( ) . as_secs ( ) ) ;
2022-12-06 00:13:36 +03:00
2023-02-15 04:41:40 +03:00
Some ( x )
2022-12-06 00:13:36 +03:00
}
2023-06-17 20:11:48 +03:00
pub fn age ( & self ) -> Duration {
2023-01-26 08:24:09 +03:00
let now = chrono ::Utc ::now ( ) . timestamp ( ) ;
2022-12-03 08:31:03 +03:00
2023-01-26 08:24:09 +03:00
let block_timestamp = self . block . timestamp . as_u32 ( ) as i64 ;
2022-12-03 08:31:03 +03:00
2023-06-17 20:11:48 +03:00
let x = if block_timestamp < now {
2022-12-03 08:31:03 +03:00
// this server is still syncing from too far away to serve requests
2023-01-26 08:24:09 +03:00
// u64 is safe because we checked equality above
( now - block_timestamp ) as u64
2022-12-03 08:31:03 +03:00
} else {
0
2023-06-17 20:11:48 +03:00
} ;
Duration ::from_secs ( x )
2022-12-03 08:31:03 +03:00
}
2023-02-14 23:14:50 +03:00
#[ inline(always) ]
pub fn parent_hash ( & self ) -> & H256 {
& self . block . parent_hash
}
#[ inline(always) ]
pub fn hash ( & self ) -> & H256 {
self . block
. hash
. as_ref ( )
. expect ( " saved blocks must have a hash " )
2022-12-03 08:31:03 +03:00
}
2023-02-14 23:14:50 +03:00
#[ inline(always) ]
pub fn number ( & self ) -> & U64 {
self . block
. number
. as_ref ( )
. expect ( " saved blocks must have a number " )
2022-12-03 08:31:03 +03:00
}
2023-06-16 10:46:27 +03:00
pub fn uncles ( & self ) -> & [ H256 ] {
& self . block . uncles
}
2022-12-03 08:31:03 +03:00
}
2023-02-15 04:41:40 +03:00
impl TryFrom < ArcBlock > for Web3ProxyBlock {
2023-03-20 23:45:21 +03:00
type Error = Web3ProxyError ;
2023-02-15 04:41:40 +03:00
fn try_from ( x : ArcBlock ) -> Result < Self , Self ::Error > {
if x . number . is_none ( ) | | x . hash . is_none ( ) {
2023-03-20 23:45:21 +03:00
return Err ( Web3ProxyError ::NoBlockNumberOrHash ) ;
2023-02-15 04:41:40 +03:00
}
let b = Web3ProxyBlock {
2023-02-14 23:14:50 +03:00
block : x ,
received_age : None ,
2023-02-15 04:41:40 +03:00
} ;
Ok ( b )
2022-12-03 08:31:03 +03:00
}
}
2023-02-14 23:14:50 +03:00
impl Display for Web3ProxyBlock {
2022-09-01 08:58:55 +03:00
fn fmt ( & self , f : & mut std ::fmt ::Formatter < '_ > ) -> std ::fmt ::Result {
2023-02-14 23:14:50 +03:00
write! (
f ,
" {} ({}, {}s old) " ,
self . number ( ) ,
self . hash ( ) ,
2023-06-17 20:11:48 +03:00
self . age ( ) . as_secs ( )
2023-02-14 23:14:50 +03:00
)
2022-09-01 08:58:55 +03:00
}
}
2023-02-06 20:55:27 +03:00
impl Web3Rpcs {
2022-12-03 08:31:03 +03:00
/// add a block to our mappings and track the heaviest chain
2023-02-14 23:14:50 +03:00
pub async fn try_cache_block (
2023-01-19 13:13:00 +03:00
& self ,
2023-02-14 23:14:50 +03:00
block : Web3ProxyBlock ,
2023-06-16 10:46:27 +03:00
consensus_head : bool ,
2023-03-20 04:52:28 +03:00
) -> Web3ProxyResult < Web3ProxyBlock > {
2023-06-16 10:46:27 +03:00
let block_hash = * block . hash ( ) ;
2022-09-03 00:35:03 +03:00
// TODO: i think we can rearrange this function to make it faster on the hot path
2023-06-16 10:46:27 +03:00
if block_hash . is_zero ( ) {
2022-09-14 08:26:46 +03:00
debug! ( " Skipping block without hash! " ) ;
2023-01-19 13:13:00 +03:00
return Ok ( block ) ;
2022-09-06 15:29:37 +03:00
}
2023-05-30 03:19:05 +03:00
// this block is very likely already in block_hashes
2023-06-16 10:46:27 +03:00
if consensus_head {
2023-06-08 03:26:38 +03:00
let block_num = block . number ( ) ;
2023-06-16 10:46:27 +03:00
// TODO: if there is an existing entry with a different block_hash,
// TODO: use entry api to handle changing existing entries
self . blocks_by_number . insert ( * block_num , block_hash ) . await ;
for uncle in block . uncles ( ) {
self . blocks_by_hash . invalidate ( uncle ) . await ;
// TODO: save uncles somewhere?
}
// loop to make sure parent hashes match our caches
// set the first ancestor to the blocks' parent hash. but keep going up the chain
if let Some ( parent_num ) = block . number ( ) . checked_sub ( 1. into ( ) ) {
struct Ancestor {
num : U64 ,
hash : H256 ,
}
let mut ancestor = Ancestor {
num : parent_num ,
hash : * block . parent_hash ( ) ,
} ;
loop {
let ancestor_number_to_hash_entry = self
. blocks_by_number
. entry_by_ref ( & ancestor . num )
. or_insert ( ancestor . hash )
. await ;
if * ancestor_number_to_hash_entry . value ( ) = = ancestor . hash {
// the existing number entry matches. all good
break ;
}
// oh no! ancestor_number_to_hash_entry is different
// remove the uncled entry in blocks_by_hash
// we will look it up later if necessary
self . blocks_by_hash
. invalidate ( ancestor_number_to_hash_entry . value ( ) )
. await ;
// TODO: delete any cached entries for eth_getBlockByHash or eth_getBlockByNumber
// TODO: race on this drop and insert?
drop ( ancestor_number_to_hash_entry ) ;
// update the entry in blocks_by_number
self . blocks_by_number
. insert ( ancestor . num , ancestor . hash )
. await ;
// try to check the parent of this ancestor
if let Some ( ancestor_block ) = self . blocks_by_hash . get ( & ancestor . hash ) {
match ancestor_block . number ( ) . checked_sub ( 1. into ( ) ) {
None = > break ,
Some ( ancestor_parent_num ) = > {
ancestor = Ancestor {
num : ancestor_parent_num ,
hash : * ancestor_block . parent_hash ( ) ,
}
}
}
} else {
break ;
}
}
}
2022-09-02 08:40:56 +03:00
}
2023-01-19 13:13:00 +03:00
let block = self
2023-02-26 10:52:33 +03:00
. blocks_by_hash
2023-06-08 03:26:38 +03:00
. get_with_by_ref ( & block_hash , async move { block } )
2023-05-30 01:48:22 +03:00
. await ;
2022-09-05 09:13:36 +03:00
2023-01-19 13:13:00 +03:00
Ok ( block )
2022-08-24 02:56:47 +03:00
}
2022-08-27 05:13:36 +03:00
/// Get a block from caches with fallback.
/// Will query a specific node or the best available.
2022-08-26 20:26:17 +03:00
pub async fn block (
& self ,
2022-11-08 22:58:11 +03:00
authorization : & Arc < Authorization > ,
2022-08-26 20:26:17 +03:00
hash : & H256 ,
2023-02-06 20:55:27 +03:00
rpc : Option < & Arc < Web3Rpc > > ,
2023-03-20 04:52:28 +03:00
) -> Web3ProxyResult < Web3ProxyBlock > {
2022-08-24 02:56:47 +03:00
// first, try to get the hash from our cache
2022-09-03 00:35:03 +03:00
// the cache is set last, so if its here, its everywhere
2023-01-19 13:13:00 +03:00
// TODO: use try_get_with
2023-02-26 10:52:33 +03:00
if let Some ( block ) = self . blocks_by_hash . get ( hash ) {
2023-06-16 10:46:27 +03:00
// double check that it matches the blocks_by_number cache
let cached_hash = self
. blocks_by_number
. get_with_by_ref ( block . number ( ) , async { * hash } )
. await ;
if cached_hash = = * hash {
return Ok ( block ) ;
}
// hashes don't match! this block must be in the middle of being uncled
// TODO: check known uncles
2022-08-24 02:56:47 +03:00
}
// block not in cache. we need to ask an rpc for it
2022-09-23 00:51:52 +03:00
let get_block_params = ( * hash , false ) ;
2023-05-31 02:32:34 +03:00
let block : Option < ArcBlock > = if let Some ( rpc ) = rpc {
2023-06-16 10:46:27 +03:00
// ask a specific rpc
// TODO: request_with_metadata would probably be better than authorized_request
2023-05-31 02:32:34 +03:00
rpc . authorized_request ::< _ , Option < ArcBlock > > (
" eth_getBlockByHash " ,
& get_block_params ,
authorization ,
None ,
)
. await ?
} else {
2023-06-16 10:46:27 +03:00
// ask any rpc
// TODO: request_with_metadata instead of internal_request
2023-05-31 02:32:34 +03:00
self . internal_request ::< _ , Option < ArcBlock > > ( " eth_getBlockByHash " , & get_block_params )
2022-12-06 03:18:31 +03:00
. await ?
2022-08-27 05:13:36 +03:00
} ;
2022-08-24 02:56:47 +03:00
2023-05-31 02:32:34 +03:00
match block {
Some ( block ) = > {
let block = self . try_cache_block ( block . try_into ( ) ? , false ) . await ? ;
Ok ( block )
}
// TODO: better error. some blocks are known, just not this one
None = > Err ( Web3ProxyError ::NoBlocksKnown ) ,
}
2022-08-24 02:56:47 +03:00
}
/// Convenience method to get the cannonical block at a given block height.
2022-11-08 22:58:11 +03:00
pub async fn block_hash (
& self ,
authorization : & Arc < Authorization > ,
num : & U64 ,
2023-03-20 04:52:28 +03:00
) -> Web3ProxyResult < ( H256 , u64 ) > {
2023-02-06 04:58:03 +03:00
let ( block , block_depth ) = self . cannonical_block ( authorization , num ) . await ? ;
2022-08-24 02:56:47 +03:00
2023-02-14 23:14:50 +03:00
let hash = * block . hash ( ) ;
2022-08-24 02:56:47 +03:00
2023-02-06 04:58:03 +03:00
Ok ( ( hash , block_depth ) )
2022-08-24 02:56:47 +03:00
}
/// Get the heaviest chain's block from cache or backend rpc
2023-01-23 09:02:08 +03:00
/// Caution! If a future block is requested, this might wait forever. Be sure to have a timeout outside of this!
2022-11-08 22:58:11 +03:00
pub async fn cannonical_block (
& self ,
authorization : & Arc < Authorization > ,
num : & U64 ,
2023-03-20 04:52:28 +03:00
) -> Web3ProxyResult < ( Web3ProxyBlock , u64 ) > {
2022-08-28 02:49:41 +03:00
// we only have blocks by hash now
2022-09-05 08:53:58 +03:00
// maybe save them during save_block in a blocks_by_number Cache<U64, Vec<ArcBlock>>
2022-08-28 02:49:41 +03:00
// if theres multiple, use petgraph to find the one on the main chain (and remove the others if they have enough confirmations)
2022-08-26 20:26:17 +03:00
2023-01-23 09:02:08 +03:00
let mut consensus_head_receiver = self
2023-02-26 10:52:33 +03:00
. watch_consensus_head_sender
2023-01-23 09:02:08 +03:00
. as_ref ( )
2023-03-20 23:45:21 +03:00
. web3_context ( " need new head subscriptions to fetch cannonical_block " ) ?
2023-02-26 10:52:33 +03:00
. subscribe ( ) ;
2023-01-23 09:02:08 +03:00
2022-09-03 00:35:03 +03:00
// be sure the requested block num exists
2023-02-14 23:14:50 +03:00
// TODO: is this okay? what if we aren't synced?!
2023-02-15 04:41:40 +03:00
let mut head_block_num = * consensus_head_receiver
. borrow_and_update ( )
. as_ref ( )
2023-03-20 23:45:21 +03:00
. web3_context ( " no consensus head block " ) ?
2023-02-15 04:41:40 +03:00
. number ( ) ;
2022-11-03 02:14:16 +03:00
2023-01-23 09:02:08 +03:00
loop {
2023-02-14 23:14:50 +03:00
if num < = & head_block_num {
break ;
2023-01-23 09:02:08 +03:00
}
2022-11-03 02:14:16 +03:00
2023-02-14 23:14:50 +03:00
trace! ( " waiting for future block {} > {} " , num , head_block_num ) ;
2023-01-23 09:02:08 +03:00
consensus_head_receiver . changed ( ) . await ? ;
2023-02-15 04:41:40 +03:00
if let Some ( head ) = consensus_head_receiver . borrow_and_update ( ) . as_ref ( ) {
head_block_num = * head . number ( ) ;
}
2022-08-24 02:56:47 +03:00
}
2023-02-14 23:14:50 +03:00
let block_depth = ( head_block_num - num ) . as_u64 ( ) ;
2023-01-23 09:02:08 +03:00
2022-09-03 00:35:03 +03:00
// try to get the hash from our cache
// deref to not keep the lock open
2023-02-26 10:52:33 +03:00
if let Some ( block_hash ) = self . blocks_by_number . get ( num ) {
2022-09-03 00:35:03 +03:00
// TODO: sometimes this needs to fetch the block. why? i thought block_numbers would only be set if the block hash was set
2022-11-08 22:58:11 +03:00
// TODO: pass authorization through here?
let block = self . block ( authorization , & block_hash , None ) . await ? ;
2022-11-03 02:14:16 +03:00
2023-02-06 04:58:03 +03:00
return Ok ( ( block , block_depth ) ) ;
2022-09-03 00:35:03 +03:00
}
// block number not in cache. we need to ask an rpc for it
2023-05-31 02:32:34 +03:00
// TODO: this error is too broad
2022-08-24 02:56:47 +03:00
let response = self
2023-05-31 02:32:34 +03:00
. internal_request ::< _ , Option < ArcBlock > > ( " eth_getBlockByNumber " , & ( * num , false ) )
. await ?
. ok_or ( Web3ProxyError ::NoBlocksKnown ) ? ;
2022-08-24 02:56:47 +03:00
2023-05-31 02:32:34 +03:00
let block = Web3ProxyBlock ::try_from ( response ) ? ;
2023-02-14 23:14:50 +03:00
2022-09-02 08:40:56 +03:00
// the block was fetched using eth_getBlockByNumber, so it should have all fields and be on the heaviest chain
2023-02-14 23:14:50 +03:00
let block = self . try_cache_block ( block , true ) . await ? ;
2022-08-24 02:56:47 +03:00
2023-02-06 04:58:03 +03:00
Ok ( ( block , block_depth ) )
2022-08-24 02:56:47 +03:00
}
2022-08-26 20:26:17 +03:00
pub ( super ) async fn process_incoming_blocks (
2022-08-24 03:59:05 +03:00
& self ,
2022-11-08 22:58:11 +03:00
authorization : & Arc < Authorization > ,
2023-05-13 21:13:02 +03:00
block_receiver : flume ::Receiver < BlockAndRpc > ,
2022-09-05 08:53:58 +03:00
// TODO: document that this is a watch sender and not a broadcast! if things get busy, blocks might get missed
// Geth's subscriptions have the same potential for skipping blocks.
2022-08-24 03:59:05 +03:00
pending_tx_sender : Option < broadcast ::Sender < TxStatus > > ,
2023-05-24 00:40:34 +03:00
) -> Web3ProxyResult < ( ) > {
2023-06-17 19:19:05 +03:00
let mut connection_heads =
2023-06-17 20:11:48 +03:00
ConsensusFinder ::new ( Some ( self . max_head_block_age ) , Some ( self . max_head_block_lag ) ) ;
2022-08-24 03:59:05 +03:00
2023-02-07 02:20:36 +03:00
loop {
2023-05-13 21:13:02 +03:00
match block_receiver . recv_async ( ) . await {
2023-02-07 02:20:36 +03:00
Ok ( ( new_block , rpc ) ) = > {
let rpc_name = rpc . name . clone ( ) ;
if let Err ( err ) = self
. process_block_from_rpc (
authorization ,
& mut connection_heads ,
new_block ,
rpc ,
& pending_tx_sender ,
)
. await
{
2023-03-30 15:42:56 +03:00
warn! (
" error while processing block from rpc {}: {:#?} " ,
rpc_name , err
) ;
2023-02-07 02:20:36 +03:00
}
}
Err ( err ) = > {
warn! ( " block_receiver exited! {:#?} " , err ) ;
return Err ( err . into ( ) ) ;
}
2022-09-14 08:26:46 +03:00
}
2022-08-24 03:59:05 +03:00
}
}
2022-08-26 20:26:17 +03:00
/// `connection_heads` is a mapping of rpc_names to head block hashes.
2022-11-21 01:52:08 +03:00
/// self.blockchain_map is a mapping of hashes to the complete ArcBlock.
2022-08-27 02:44:25 +03:00
/// TODO: return something?
2022-11-23 01:45:22 +03:00
pub ( crate ) async fn process_block_from_rpc (
2022-08-24 02:56:47 +03:00
& self ,
2022-11-08 22:58:11 +03:00
authorization : & Arc < Authorization > ,
2023-01-19 13:13:00 +03:00
consensus_finder : & mut ConsensusFinder ,
2023-02-27 07:00:13 +03:00
new_block : Option < Web3ProxyBlock > ,
2023-02-06 20:55:27 +03:00
rpc : Arc < Web3Rpc > ,
2023-03-17 05:38:11 +03:00
_pending_tx_sender : & Option < broadcast ::Sender < TxStatus > > ,
2023-03-20 23:45:21 +03:00
) -> Web3ProxyResult < ( ) > {
2023-01-19 13:13:00 +03:00
// TODO: how should we handle an error here?
if ! consensus_finder
2023-02-27 07:00:13 +03:00
. update_rpc ( new_block . clone ( ) , rpc . clone ( ) , self )
2023-02-14 23:14:50 +03:00
. await
2023-03-20 23:45:21 +03:00
. web3_context ( " failed to update rpc " ) ?
2023-01-19 13:13:00 +03:00
{
2023-02-14 23:14:50 +03:00
// nothing changed. no need to scan for a new consensus head
2023-01-19 13:13:00 +03:00
return Ok ( ( ) ) ;
}
2022-12-01 01:11:14 +03:00
2023-05-18 23:34:22 +03:00
let new_consensus_rpcs = match consensus_finder
2023-03-21 21:16:18 +03:00
. find_consensus_connections ( authorization , self )
2023-02-14 23:14:50 +03:00
. await
2023-03-30 15:42:56 +03:00
{
Err ( err ) = > {
2023-04-01 09:23:30 +03:00
return Err ( err ) . web3_context ( " error while finding consensus head block! " ) ;
2023-03-30 15:42:56 +03:00
}
Ok ( None ) = > {
2023-04-01 09:23:30 +03:00
return Err ( Web3ProxyError ::NoConsensusHeadBlock ) ;
2023-03-30 15:42:56 +03:00
}
Ok ( Some ( x ) ) = > x ,
} ;
2023-01-23 09:02:08 +03:00
2023-05-18 23:34:22 +03:00
trace! ( " new_synced_connections: {:#?} " , new_consensus_rpcs ) ;
2023-05-12 03:04:33 +03:00
2023-02-26 10:52:33 +03:00
let watch_consensus_head_sender = self . watch_consensus_head_sender . as_ref ( ) . unwrap ( ) ;
2023-05-18 23:34:22 +03:00
let consensus_tier = new_consensus_rpcs . tier ;
2023-06-16 20:52:13 +03:00
// TODO: think more about the default for total_tiers
let total_tiers = consensus_finder . worst_tier ( ) . unwrap_or_default ( ) ;
2023-05-18 23:34:22 +03:00
let backups_needed = new_consensus_rpcs . backups_needed ;
let consensus_head_block = new_consensus_rpcs . head_block . clone ( ) ;
let num_consensus_rpcs = new_consensus_rpcs . num_consensus_rpcs ( ) ;
2023-03-21 21:16:18 +03:00
let num_active_rpcs = consensus_finder . len ( ) ;
2023-06-16 20:40:02 +03:00
let total_rpcs = self . len ( ) ;
2023-01-19 13:13:00 +03:00
2023-06-09 23:30:00 +03:00
let new_consensus_rpcs = Arc ::new ( new_consensus_rpcs ) ;
2023-01-23 09:02:08 +03:00
let old_consensus_head_connections = self
2023-02-14 23:14:50 +03:00
. watch_consensus_rpcs_sender
2023-06-09 23:30:00 +03:00
. send_replace ( Some ( new_consensus_rpcs . clone ( ) ) ) ;
2023-01-19 13:13:00 +03:00
2023-02-14 23:14:50 +03:00
let backups_voted_str = if backups_needed { " B " } else { " " } ;
2023-01-20 05:14:47 +03:00
2023-02-27 09:44:09 +03:00
match old_consensus_head_connections . as_ref ( ) {
None = > {
debug! (
" first {}/{} {}{}/{}/{} block={}, rpc={} " ,
consensus_tier ,
total_tiers ,
backups_voted_str ,
num_consensus_rpcs ,
num_active_rpcs ,
total_rpcs ,
consensus_head_block ,
rpc ,
) ;
2022-08-24 02:56:47 +03:00
2023-02-27 09:44:09 +03:00
if backups_needed {
// TODO: what else should be in this error?
warn! ( " Backup RPCs are in use! " ) ;
2022-12-03 08:31:03 +03:00
}
2023-02-27 09:44:09 +03:00
// this should already be cached
let consensus_head_block = self . try_cache_block ( consensus_head_block , true ) . await ? ;
watch_consensus_head_sender
. send ( Some ( consensus_head_block ) )
2023-03-20 23:45:21 +03:00
. or ( Err ( Web3ProxyError ::WatchSendError ) )
. web3_context (
2023-02-27 09:44:09 +03:00
" watch_consensus_head_sender failed sending first consensus_head_block " ,
) ? ;
}
Some ( old_consensus_connections ) = > {
let old_head_block = & old_consensus_connections . head_block ;
// TODO: do this log item better
let rpc_head_str = new_block
. map ( | x | x . to_string ( ) )
. unwrap_or_else ( | | " None " . to_string ( ) ) ;
match consensus_head_block . number ( ) . cmp ( old_head_block . number ( ) ) {
Ordering ::Equal = > {
// multiple blocks with the same fork!
if consensus_head_block . hash ( ) = = old_head_block . hash ( ) {
// no change in hash. no need to use watch_consensus_head_sender
// TODO: trace level if rpc is backup
debug! (
2023-03-21 21:16:18 +03:00
" con {}/{} {}{}/{}/{} con={} rpc={}@{} " ,
2023-02-15 23:33:43 +03:00
consensus_tier ,
total_tiers ,
2023-02-14 23:14:50 +03:00
backups_voted_str ,
2023-01-19 13:13:00 +03:00
num_consensus_rpcs ,
num_active_rpcs ,
total_rpcs ,
2023-02-14 23:14:50 +03:00
consensus_head_block ,
2023-01-19 13:13:00 +03:00
rpc ,
rpc_head_str ,
2023-02-27 09:44:09 +03:00
)
} else {
// hash changed
2023-01-19 13:13:00 +03:00
debug! (
2023-06-08 03:26:38 +03:00
" unc {}/{} {}{}/{}/{} con={} old={} rpc={}@{} " ,
2023-02-15 23:33:43 +03:00
consensus_tier ,
total_tiers ,
2023-02-14 23:14:50 +03:00
backups_voted_str ,
2023-01-19 13:13:00 +03:00
num_consensus_rpcs ,
num_active_rpcs ,
total_rpcs ,
2023-02-14 23:14:50 +03:00
consensus_head_block ,
2023-02-27 09:44:09 +03:00
old_head_block ,
2023-01-19 13:13:00 +03:00
rpc ,
rpc_head_str ,
) ;
2023-02-27 09:44:09 +03:00
let consensus_head_block = self
. try_cache_block ( consensus_head_block , true )
. await
2023-03-20 23:45:21 +03:00
. web3_context ( " save consensus_head_block as heaviest chain " ) ? ;
2023-01-19 13:13:00 +03:00
2023-02-27 09:44:09 +03:00
watch_consensus_head_sender
. send ( Some ( consensus_head_block ) )
2023-03-20 23:45:21 +03:00
. or ( Err ( Web3ProxyError ::WatchSendError ) )
. web3_context ( " watch_consensus_head_sender failed sending uncled consensus_head_block " ) ? ;
2023-02-27 09:44:09 +03:00
}
}
Ordering ::Less = > {
// this is unlikely but possible
2023-06-09 23:30:00 +03:00
// TODO: better log that includes all the votes
2023-02-27 09:44:09 +03:00
warn! (
2023-06-10 02:35:14 +03:00
" chain rolled back {}/{} {}{}/{}/{} con={} old={} rpc={}@{} " ,
2023-02-27 09:44:09 +03:00
consensus_tier ,
total_tiers ,
backups_voted_str ,
num_consensus_rpcs ,
num_active_rpcs ,
total_rpcs ,
consensus_head_block ,
old_head_block ,
rpc ,
rpc_head_str ,
) ;
2023-01-19 13:13:00 +03:00
2023-02-27 09:44:09 +03:00
if backups_needed {
// TODO: what else should be in this error?
warn! ( " Backup RPCs are in use! " ) ;
2023-01-19 13:13:00 +03:00
}
2023-02-27 09:44:09 +03:00
// TODO: tell save_block to remove any higher block numbers from the cache. not needed because we have other checks on requested blocks being > head, but still seems like a good idea
let consensus_head_block = self
. try_cache_block ( consensus_head_block , true )
. await
2023-03-20 23:45:21 +03:00
. web3_context (
" save_block sending consensus_head_block as heaviest chain " ,
) ? ;
2023-02-27 09:44:09 +03:00
watch_consensus_head_sender
. send ( Some ( consensus_head_block ) )
2023-03-20 23:45:21 +03:00
. or ( Err ( Web3ProxyError ::WatchSendError ) )
. web3_context ( " watch_consensus_head_sender failed sending rollback consensus_head_block " ) ? ;
2023-02-27 09:44:09 +03:00
}
Ordering ::Greater = > {
debug! (
2023-03-21 21:16:18 +03:00
" new {}/{} {}{}/{}/{} con={} rpc={}@{} " ,
2023-02-27 09:44:09 +03:00
consensus_tier ,
total_tiers ,
backups_voted_str ,
num_consensus_rpcs ,
num_active_rpcs ,
total_rpcs ,
consensus_head_block ,
rpc ,
rpc_head_str ,
) ;
if backups_needed {
// TODO: what else should be in this error?
warn! ( " Backup RPCs are in use! " ) ;
}
let consensus_head_block =
self . try_cache_block ( consensus_head_block , true ) . await ? ;
2023-03-20 23:45:21 +03:00
watch_consensus_head_sender . send ( Some ( consensus_head_block ) )
. or ( Err ( Web3ProxyError ::WatchSendError ) )
. web3_context ( " watch_consensus_head_sender failed sending new consensus_head_block " ) ? ;
2023-01-19 13:13:00 +03:00
}
}
2022-08-24 02:56:47 +03:00
}
2023-01-19 13:13:00 +03:00
}
Ok ( ( ) )
}
}