2023-01-26 08:24:09 +03:00
///! Keep track of the blockchain as seen by a Web3Rpcs.
2023-02-14 23:14:50 +03:00
use super ::consensus ::ConsensusFinder ;
2023-02-06 20:55:27 +03:00
use super ::many ::Web3Rpcs ;
use super ::one ::Web3Rpc ;
2022-08-24 03:59:05 +03:00
use super ::transactions ::TxStatus ;
2022-11-08 22:58:11 +03:00
use crate ::frontend ::authorization ::Authorization ;
2023-03-20 23:45:21 +03:00
use crate ::frontend ::errors ::{ Web3ProxyError , Web3ProxyErrorContext , Web3ProxyResult } ;
2023-05-13 21:13:02 +03:00
use crate ::response_cache ::JsonRpcResponseData ;
2023-02-14 23:14:50 +03:00
use crate ::{ config ::BlockAndRpc , jsonrpc ::JsonRpcRequest } ;
2022-08-26 20:26:17 +03:00
use derive_more ::From ;
2022-08-27 06:11:58 +03:00
use ethers ::prelude ::{ Block , TxHash , H256 , U64 } ;
2023-02-27 09:44:09 +03:00
use log ::{ debug , trace , warn , Level } ;
2023-05-18 23:34:22 +03:00
use quick_cache_ttl ::CacheWithTTL ;
2023-01-26 08:24:09 +03:00
use serde ::ser ::SerializeStruct ;
2022-09-01 08:58:55 +03:00
use serde ::Serialize ;
2022-08-24 02:56:47 +03:00
use serde_json ::json ;
2023-03-21 21:16:18 +03:00
use std ::hash ::Hash ;
2022-09-01 08:58:55 +03:00
use std ::{ cmp ::Ordering , fmt ::Display , sync ::Arc } ;
2023-02-26 10:52:33 +03:00
use tokio ::sync ::broadcast ;
2022-08-24 02:56:47 +03:00
2022-09-05 08:53:58 +03:00
// TODO: type for Hydrated Blocks with their full transactions?
2022-08-30 23:01:42 +03:00
pub type ArcBlock = Arc < Block < TxHash > > ;
2023-05-18 23:34:22 +03:00
pub type BlocksByHashCache = Arc < CacheWithTTL < H256 , Web3ProxyBlock > > ;
pub type BlocksByNumberCache = Arc < CacheWithTTL < U64 , H256 > > ;
2022-08-30 23:01:42 +03:00
2023-01-19 13:13:00 +03:00
/// A block and its age.
2023-01-26 08:24:09 +03:00
#[ derive(Clone, Debug, Default, From) ]
2023-02-14 23:14:50 +03:00
pub struct Web3ProxyBlock {
2022-12-03 08:31:03 +03:00
pub block : ArcBlock ,
/// number of seconds this block was behind the current time when received
2023-02-14 23:14:50 +03:00
/// this is only set if the block is from a subscription
pub received_age : Option < u64 > ,
2022-08-26 20:26:17 +03:00
}
2023-01-26 08:24:09 +03:00
impl Serialize for Web3ProxyBlock {
fn serialize < S > ( & self , serializer : S ) -> Result < S ::Ok , S ::Error >
where
S : serde ::Serializer ,
{
// TODO: i'm not sure about this name
let mut state = serializer . serialize_struct ( " saved_block " , 2 ) ? ;
state . serialize_field ( " age " , & self . age ( ) ) ? ;
let block = json! ( {
2023-04-21 05:55:18 +03:00
" hash " : self . block . hash ,
2023-01-26 08:24:09 +03:00
" parent_hash " : self . block . parent_hash ,
" number " : self . block . number ,
" timestamp " : self . block . timestamp ,
} ) ;
state . serialize_field ( " block " , & block ) ? ;
state . end ( )
}
}
2023-02-14 23:14:50 +03:00
impl PartialEq for Web3ProxyBlock {
2022-12-17 07:05:01 +03:00
fn eq ( & self , other : & Self ) -> bool {
match ( self . block . hash , other . block . hash ) {
( None , None ) = > true ,
( Some ( _ ) , None ) = > false ,
( None , Some ( _ ) ) = > false ,
( Some ( s ) , Some ( o ) ) = > s = = o ,
}
}
}
2023-03-21 21:16:18 +03:00
impl Eq for Web3ProxyBlock { }
impl Hash for Web3ProxyBlock {
fn hash < H : std ::hash ::Hasher > ( & self , state : & mut H ) {
self . block . hash . hash ( state ) ;
}
}
2023-02-14 23:14:50 +03:00
impl Web3ProxyBlock {
/// A new block has arrived over a subscription
2023-02-15 04:41:40 +03:00
pub fn try_new ( block : ArcBlock ) -> Option < Self > {
if block . number . is_none ( ) | | block . hash . is_none ( ) {
return None ;
}
2023-02-14 23:14:50 +03:00
let mut x = Self {
block ,
received_age : None ,
} ;
2022-12-06 00:13:36 +03:00
// no need to recalulate lag every time
// if the head block gets too old, a health check restarts this connection
2023-02-14 23:14:50 +03:00
// TODO: emit a stat for received_age
x . received_age = Some ( x . age ( ) ) ;
2022-12-06 00:13:36 +03:00
2023-02-15 04:41:40 +03:00
Some ( x )
2022-12-06 00:13:36 +03:00
}
2023-02-14 23:14:50 +03:00
pub fn age ( & self ) -> u64 {
2023-01-26 08:24:09 +03:00
let now = chrono ::Utc ::now ( ) . timestamp ( ) ;
2022-12-03 08:31:03 +03:00
2023-01-26 08:24:09 +03:00
let block_timestamp = self . block . timestamp . as_u32 ( ) as i64 ;
2022-12-03 08:31:03 +03:00
2023-01-03 19:33:49 +03:00
if block_timestamp < now {
2022-12-03 08:31:03 +03:00
// this server is still syncing from too far away to serve requests
2023-01-26 08:24:09 +03:00
// u64 is safe because we checked equality above
( now - block_timestamp ) as u64
2022-12-03 08:31:03 +03:00
} else {
0
2022-12-06 00:13:36 +03:00
}
2022-12-03 08:31:03 +03:00
}
2023-02-14 23:14:50 +03:00
#[ inline(always) ]
pub fn parent_hash ( & self ) -> & H256 {
& self . block . parent_hash
}
#[ inline(always) ]
pub fn hash ( & self ) -> & H256 {
self . block
. hash
. as_ref ( )
. expect ( " saved blocks must have a hash " )
2022-12-03 08:31:03 +03:00
}
2023-02-14 23:14:50 +03:00
#[ inline(always) ]
pub fn number ( & self ) -> & U64 {
self . block
. number
. as_ref ( )
. expect ( " saved blocks must have a number " )
2022-12-03 08:31:03 +03:00
}
}
2023-02-15 04:41:40 +03:00
impl TryFrom < ArcBlock > for Web3ProxyBlock {
2023-03-20 23:45:21 +03:00
type Error = Web3ProxyError ;
2023-02-15 04:41:40 +03:00
fn try_from ( x : ArcBlock ) -> Result < Self , Self ::Error > {
if x . number . is_none ( ) | | x . hash . is_none ( ) {
2023-03-20 23:45:21 +03:00
return Err ( Web3ProxyError ::NoBlockNumberOrHash ) ;
2023-02-15 04:41:40 +03:00
}
let b = Web3ProxyBlock {
2023-02-14 23:14:50 +03:00
block : x ,
received_age : None ,
2023-02-15 04:41:40 +03:00
} ;
Ok ( b )
2022-12-03 08:31:03 +03:00
}
}
2023-02-14 23:14:50 +03:00
impl Display for Web3ProxyBlock {
2022-09-01 08:58:55 +03:00
fn fmt ( & self , f : & mut std ::fmt ::Formatter < '_ > ) -> std ::fmt ::Result {
2023-02-14 23:14:50 +03:00
write! (
f ,
" {} ({}, {}s old) " ,
self . number ( ) ,
self . hash ( ) ,
self . age ( )
)
2022-09-01 08:58:55 +03:00
}
}
2023-02-06 20:55:27 +03:00
impl Web3Rpcs {
2022-12-03 08:31:03 +03:00
/// add a block to our mappings and track the heaviest chain
2023-02-14 23:14:50 +03:00
pub async fn try_cache_block (
2023-01-19 13:13:00 +03:00
& self ,
2023-02-14 23:14:50 +03:00
block : Web3ProxyBlock ,
2023-01-19 13:13:00 +03:00
heaviest_chain : bool ,
2023-03-20 04:52:28 +03:00
) -> Web3ProxyResult < Web3ProxyBlock > {
2022-09-03 00:35:03 +03:00
// TODO: i think we can rearrange this function to make it faster on the hot path
2023-05-18 23:34:22 +03:00
if block . hash ( ) . is_zero ( ) {
2022-09-14 08:26:46 +03:00
debug! ( " Skipping block without hash! " ) ;
2023-01-19 13:13:00 +03:00
return Ok ( block ) ;
2022-09-06 15:29:37 +03:00
}
2023-02-14 23:14:50 +03:00
let block_num = block . number ( ) ;
2022-10-27 00:39:26 +03:00
2022-09-30 07:18:18 +03:00
// TODO: think more about heaviest_chain. would be better to do the check inside this function
2022-09-05 09:13:36 +03:00
if heaviest_chain {
// this is the only place that writes to block_numbers
2022-09-14 08:26:46 +03:00
// multiple inserts should be okay though
2022-12-03 08:31:03 +03:00
// TODO: info that there was a fork?
2023-05-30 01:48:22 +03:00
if let Err ( ( k , v ) ) = self . blocks_by_number . try_insert ( * block_num , * block . hash ( ) ) {
warn! ( " unable to cache {} as {} " , k , v ) ;
}
2022-09-02 08:40:56 +03:00
}
2022-12-03 08:31:03 +03:00
// this block is very likely already in block_hashes
// TODO: use their get_with
2023-05-18 23:34:22 +03:00
let block_hash = * block . hash ( ) ;
2023-01-19 13:13:00 +03:00
let block = self
2023-02-26 10:52:33 +03:00
. blocks_by_hash
2023-05-30 01:48:22 +03:00
. get_or_insert_async ( & block_hash , async move { block } )
. await ;
2022-09-05 09:13:36 +03:00
2023-01-19 13:13:00 +03:00
Ok ( block )
2022-08-24 02:56:47 +03:00
}
2022-08-27 05:13:36 +03:00
/// Get a block from caches with fallback.
/// Will query a specific node or the best available.
2023-04-11 01:06:40 +03:00
/// TODO: return `Web3ProxyResult<Option<ArcBlock>>`?
2022-08-26 20:26:17 +03:00
pub async fn block (
& self ,
2022-11-08 22:58:11 +03:00
authorization : & Arc < Authorization > ,
2022-08-26 20:26:17 +03:00
hash : & H256 ,
2023-02-06 20:55:27 +03:00
rpc : Option < & Arc < Web3Rpc > > ,
2023-03-20 04:52:28 +03:00
) -> Web3ProxyResult < Web3ProxyBlock > {
2022-08-24 02:56:47 +03:00
// first, try to get the hash from our cache
2022-09-03 00:35:03 +03:00
// the cache is set last, so if its here, its everywhere
2023-01-19 13:13:00 +03:00
// TODO: use try_get_with
2023-02-26 10:52:33 +03:00
if let Some ( block ) = self . blocks_by_hash . get ( hash ) {
2022-09-05 08:53:58 +03:00
return Ok ( block ) ;
2022-08-24 02:56:47 +03:00
}
// block not in cache. we need to ask an rpc for it
2022-09-23 00:51:52 +03:00
let get_block_params = ( * hash , false ) ;
2022-08-24 02:56:47 +03:00
// TODO: if error, retry?
2023-02-14 23:14:50 +03:00
let block : Web3ProxyBlock = match rpc {
2022-12-06 03:18:31 +03:00
Some ( rpc ) = > rpc
2023-02-14 23:14:50 +03:00
. request ::< _ , Option < ArcBlock > > (
2022-12-06 03:18:31 +03:00
" eth_getBlockByHash " ,
& json! ( get_block_params ) ,
Level ::Error . into ( ) ,
2023-05-24 00:40:34 +03:00
authorization . clone ( ) ,
2022-12-06 03:18:31 +03:00
)
. await ?
2023-02-15 04:41:40 +03:00
. and_then ( | x | {
if x . number . is_none ( ) {
None
} else {
x . try_into ( ) . ok ( )
}
} )
2023-03-20 23:45:21 +03:00
. web3_context ( " no block! " ) ? ,
2022-08-27 05:13:36 +03:00
None = > {
2022-09-02 08:40:56 +03:00
// TODO: helper for method+params => JsonRpcRequest
// TODO: does this id matter?
2023-05-13 01:15:32 +03:00
let request = json! ( { " jsonrpc " : " 2.0 " , " id " : " 1 " , " method " : " eth_getBlockByHash " , " params " : get_block_params } ) ;
2022-08-27 05:13:36 +03:00
let request : JsonRpcRequest = serde_json ::from_value ( request ) ? ;
2022-08-24 02:56:47 +03:00
2022-11-08 22:58:11 +03:00
// TODO: request_metadata? maybe we should put it in the authorization?
2023-01-23 09:02:08 +03:00
// TODO: think more about this wait_for_sync
2022-09-22 23:27:14 +03:00
let response = self
2023-05-19 08:43:07 +03:00
. try_send_best_connection ( authorization , & request , None , None , None )
2022-09-22 23:27:14 +03:00
. await ? ;
2022-08-24 02:56:47 +03:00
2023-05-13 21:13:02 +03:00
let value = match response {
JsonRpcResponseData ::Error { .. } = > {
return Err ( anyhow ::anyhow! ( " failed fetching block " ) . into ( ) ) ;
}
JsonRpcResponseData ::Result { value , .. } = > value ,
} ;
2022-08-27 05:13:36 +03:00
2023-05-13 21:13:02 +03:00
let block : Option < ArcBlock > = serde_json ::from_str ( value . get ( ) ) ? ;
2022-12-06 03:18:31 +03:00
2023-03-20 23:45:21 +03:00
let block : ArcBlock = block . web3_context ( " no block in the response " ) ? ;
2023-02-15 04:41:40 +03:00
// TODO: received time is going to be weird
Web3ProxyBlock ::try_from ( block ) ?
2022-08-27 05:13:36 +03:00
}
} ;
2022-08-24 02:56:47 +03:00
2022-08-30 23:01:42 +03:00
// the block was fetched using eth_getBlockByHash, so it should have all fields
2022-09-14 08:26:46 +03:00
// TODO: fill in heaviest_chain! if the block is old enough, is this definitely true?
2023-02-14 23:14:50 +03:00
let block = self . try_cache_block ( block , false ) . await ? ;
2022-08-24 02:56:47 +03:00
Ok ( block )
}
/// Convenience method to get the cannonical block at a given block height.
2022-11-08 22:58:11 +03:00
pub async fn block_hash (
& self ,
authorization : & Arc < Authorization > ,
num : & U64 ,
2023-03-20 04:52:28 +03:00
) -> Web3ProxyResult < ( H256 , u64 ) > {
2023-02-06 04:58:03 +03:00
let ( block , block_depth ) = self . cannonical_block ( authorization , num ) . await ? ;
2022-08-24 02:56:47 +03:00
2023-02-14 23:14:50 +03:00
let hash = * block . hash ( ) ;
2022-08-24 02:56:47 +03:00
2023-02-06 04:58:03 +03:00
Ok ( ( hash , block_depth ) )
2022-08-24 02:56:47 +03:00
}
/// Get the heaviest chain's block from cache or backend rpc
2023-01-23 09:02:08 +03:00
/// Caution! If a future block is requested, this might wait forever. Be sure to have a timeout outside of this!
2022-11-08 22:58:11 +03:00
pub async fn cannonical_block (
& self ,
authorization : & Arc < Authorization > ,
num : & U64 ,
2023-03-20 04:52:28 +03:00
) -> Web3ProxyResult < ( Web3ProxyBlock , u64 ) > {
2022-08-28 02:49:41 +03:00
// we only have blocks by hash now
2022-09-05 08:53:58 +03:00
// maybe save them during save_block in a blocks_by_number Cache<U64, Vec<ArcBlock>>
2022-08-28 02:49:41 +03:00
// if theres multiple, use petgraph to find the one on the main chain (and remove the others if they have enough confirmations)
2022-08-26 20:26:17 +03:00
2023-01-23 09:02:08 +03:00
let mut consensus_head_receiver = self
2023-02-26 10:52:33 +03:00
. watch_consensus_head_sender
2023-01-23 09:02:08 +03:00
. as_ref ( )
2023-03-20 23:45:21 +03:00
. web3_context ( " need new head subscriptions to fetch cannonical_block " ) ?
2023-02-26 10:52:33 +03:00
. subscribe ( ) ;
2023-01-23 09:02:08 +03:00
2022-09-03 00:35:03 +03:00
// be sure the requested block num exists
2023-02-14 23:14:50 +03:00
// TODO: is this okay? what if we aren't synced?!
2023-02-15 04:41:40 +03:00
let mut head_block_num = * consensus_head_receiver
. borrow_and_update ( )
. as_ref ( )
2023-03-20 23:45:21 +03:00
. web3_context ( " no consensus head block " ) ?
2023-02-15 04:41:40 +03:00
. number ( ) ;
2022-11-03 02:14:16 +03:00
2023-01-23 09:02:08 +03:00
loop {
2023-02-14 23:14:50 +03:00
if num < = & head_block_num {
break ;
2023-01-23 09:02:08 +03:00
}
2022-11-03 02:14:16 +03:00
2023-02-14 23:14:50 +03:00
trace! ( " waiting for future block {} > {} " , num , head_block_num ) ;
2023-01-23 09:02:08 +03:00
consensus_head_receiver . changed ( ) . await ? ;
2023-02-15 04:41:40 +03:00
if let Some ( head ) = consensus_head_receiver . borrow_and_update ( ) . as_ref ( ) {
head_block_num = * head . number ( ) ;
}
2022-08-24 02:56:47 +03:00
}
2023-02-14 23:14:50 +03:00
let block_depth = ( head_block_num - num ) . as_u64 ( ) ;
2023-01-23 09:02:08 +03:00
2022-09-03 00:35:03 +03:00
// try to get the hash from our cache
// deref to not keep the lock open
2023-02-26 10:52:33 +03:00
if let Some ( block_hash ) = self . blocks_by_number . get ( num ) {
2022-09-03 00:35:03 +03:00
// TODO: sometimes this needs to fetch the block. why? i thought block_numbers would only be set if the block hash was set
2022-11-08 22:58:11 +03:00
// TODO: pass authorization through here?
let block = self . block ( authorization , & block_hash , None ) . await ? ;
2022-11-03 02:14:16 +03:00
2023-02-06 04:58:03 +03:00
return Ok ( ( block , block_depth ) ) ;
2022-09-03 00:35:03 +03:00
}
// block number not in cache. we need to ask an rpc for it
2022-08-24 02:56:47 +03:00
// TODO: helper for method+params => JsonRpcRequest
let request = json! ( { " jsonrpc " : " 2.0 " , " id " : " 1 " , " method " : " eth_getBlockByNumber " , " params " : ( num , false ) } ) ;
let request : JsonRpcRequest = serde_json ::from_value ( request ) ? ;
let response = self
2023-05-19 08:43:07 +03:00
. try_send_best_connection ( authorization , & request , None , Some ( num ) , None )
2022-08-24 02:56:47 +03:00
. await ? ;
2023-05-13 21:13:02 +03:00
let value = match response {
JsonRpcResponseData ::Error { .. } = > {
return Err ( anyhow ::anyhow! ( " failed fetching block " ) . into ( ) ) ;
}
JsonRpcResponseData ::Result { value , .. } = > value ,
} ;
2022-08-24 02:56:47 +03:00
2023-05-13 21:13:02 +03:00
let block : ArcBlock = serde_json ::from_str ( value . get ( ) ) ? ;
2022-08-24 02:56:47 +03:00
2023-02-15 04:41:40 +03:00
let block = Web3ProxyBlock ::try_from ( block ) ? ;
2023-02-14 23:14:50 +03:00
2022-09-02 08:40:56 +03:00
// the block was fetched using eth_getBlockByNumber, so it should have all fields and be on the heaviest chain
2023-02-14 23:14:50 +03:00
let block = self . try_cache_block ( block , true ) . await ? ;
2022-08-24 02:56:47 +03:00
2023-02-06 04:58:03 +03:00
Ok ( ( block , block_depth ) )
2022-08-24 02:56:47 +03:00
}
2022-08-26 20:26:17 +03:00
pub ( super ) async fn process_incoming_blocks (
2022-08-24 03:59:05 +03:00
& self ,
2022-11-08 22:58:11 +03:00
authorization : & Arc < Authorization > ,
2023-05-13 21:13:02 +03:00
block_receiver : flume ::Receiver < BlockAndRpc > ,
2022-09-05 08:53:58 +03:00
// TODO: document that this is a watch sender and not a broadcast! if things get busy, blocks might get missed
// Geth's subscriptions have the same potential for skipping blocks.
2022-08-24 03:59:05 +03:00
pending_tx_sender : Option < broadcast ::Sender < TxStatus > > ,
2023-05-24 00:40:34 +03:00
) -> Web3ProxyResult < ( ) > {
2023-02-27 07:00:13 +03:00
let mut connection_heads = ConsensusFinder ::new ( self . max_block_age , self . max_block_lag ) ;
2022-08-24 03:59:05 +03:00
2023-02-07 02:20:36 +03:00
loop {
2023-05-13 21:13:02 +03:00
match block_receiver . recv_async ( ) . await {
2023-02-07 02:20:36 +03:00
Ok ( ( new_block , rpc ) ) = > {
let rpc_name = rpc . name . clone ( ) ;
if let Err ( err ) = self
. process_block_from_rpc (
authorization ,
& mut connection_heads ,
new_block ,
rpc ,
& pending_tx_sender ,
)
. await
{
2023-03-30 15:42:56 +03:00
warn! (
" error while processing block from rpc {}: {:#?} " ,
rpc_name , err
) ;
2023-02-07 02:20:36 +03:00
}
}
Err ( err ) = > {
warn! ( " block_receiver exited! {:#?} " , err ) ;
return Err ( err . into ( ) ) ;
}
2022-09-14 08:26:46 +03:00
}
2022-08-24 03:59:05 +03:00
}
}
2022-08-26 20:26:17 +03:00
/// `connection_heads` is a mapping of rpc_names to head block hashes.
2022-11-21 01:52:08 +03:00
/// self.blockchain_map is a mapping of hashes to the complete ArcBlock.
2022-08-27 02:44:25 +03:00
/// TODO: return something?
2022-11-23 01:45:22 +03:00
pub ( crate ) async fn process_block_from_rpc (
2022-08-24 02:56:47 +03:00
& self ,
2022-11-08 22:58:11 +03:00
authorization : & Arc < Authorization > ,
2023-01-19 13:13:00 +03:00
consensus_finder : & mut ConsensusFinder ,
2023-02-27 07:00:13 +03:00
new_block : Option < Web3ProxyBlock > ,
2023-02-06 20:55:27 +03:00
rpc : Arc < Web3Rpc > ,
2023-03-17 05:38:11 +03:00
_pending_tx_sender : & Option < broadcast ::Sender < TxStatus > > ,
2023-03-20 23:45:21 +03:00
) -> Web3ProxyResult < ( ) > {
2023-01-19 13:13:00 +03:00
// TODO: how should we handle an error here?
if ! consensus_finder
2023-02-27 07:00:13 +03:00
. update_rpc ( new_block . clone ( ) , rpc . clone ( ) , self )
2023-02-14 23:14:50 +03:00
. await
2023-03-20 23:45:21 +03:00
. web3_context ( " failed to update rpc " ) ?
2023-01-19 13:13:00 +03:00
{
2023-02-14 23:14:50 +03:00
// nothing changed. no need to scan for a new consensus head
2023-01-19 13:13:00 +03:00
return Ok ( ( ) ) ;
}
2022-12-01 01:11:14 +03:00
2023-05-18 23:34:22 +03:00
let new_consensus_rpcs = match consensus_finder
2023-03-21 21:16:18 +03:00
. find_consensus_connections ( authorization , self )
2023-02-14 23:14:50 +03:00
. await
2023-03-30 15:42:56 +03:00
{
Err ( err ) = > {
2023-04-01 09:23:30 +03:00
return Err ( err ) . web3_context ( " error while finding consensus head block! " ) ;
2023-03-30 15:42:56 +03:00
}
Ok ( None ) = > {
2023-04-01 09:23:30 +03:00
return Err ( Web3ProxyError ::NoConsensusHeadBlock ) ;
2023-03-30 15:42:56 +03:00
}
Ok ( Some ( x ) ) = > x ,
} ;
2023-01-23 09:02:08 +03:00
2023-05-18 23:34:22 +03:00
trace! ( " new_synced_connections: {:#?} " , new_consensus_rpcs ) ;
2023-05-12 03:04:33 +03:00
2023-02-26 10:52:33 +03:00
let watch_consensus_head_sender = self . watch_consensus_head_sender . as_ref ( ) . unwrap ( ) ;
2023-05-18 23:34:22 +03:00
let consensus_tier = new_consensus_rpcs . tier ;
2023-03-21 21:16:18 +03:00
// TODO: think more about this unwrap
let total_tiers = consensus_finder . worst_tier ( ) . unwrap_or ( 10 ) ;
2023-05-18 23:34:22 +03:00
let backups_needed = new_consensus_rpcs . backups_needed ;
let consensus_head_block = new_consensus_rpcs . head_block . clone ( ) ;
let num_consensus_rpcs = new_consensus_rpcs . num_consensus_rpcs ( ) ;
2023-03-21 21:16:18 +03:00
let num_active_rpcs = consensus_finder . len ( ) ;
2023-05-13 01:15:32 +03:00
let total_rpcs = self . by_name . load ( ) . len ( ) ;
2023-01-19 13:13:00 +03:00
2023-01-23 09:02:08 +03:00
let old_consensus_head_connections = self
2023-02-14 23:14:50 +03:00
. watch_consensus_rpcs_sender
2023-05-18 23:34:22 +03:00
. send_replace ( Some ( Arc ::new ( new_consensus_rpcs ) ) ) ;
2023-01-19 13:13:00 +03:00
2023-02-14 23:14:50 +03:00
let backups_voted_str = if backups_needed { " B " } else { " " } ;
2023-01-20 05:14:47 +03:00
2023-02-27 09:44:09 +03:00
match old_consensus_head_connections . as_ref ( ) {
None = > {
debug! (
" first {}/{} {}{}/{}/{} block={}, rpc={} " ,
consensus_tier ,
total_tiers ,
backups_voted_str ,
num_consensus_rpcs ,
num_active_rpcs ,
total_rpcs ,
consensus_head_block ,
rpc ,
) ;
2022-08-24 02:56:47 +03:00
2023-02-27 09:44:09 +03:00
if backups_needed {
// TODO: what else should be in this error?
warn! ( " Backup RPCs are in use! " ) ;
2022-12-03 08:31:03 +03:00
}
2023-02-27 09:44:09 +03:00
// this should already be cached
let consensus_head_block = self . try_cache_block ( consensus_head_block , true ) . await ? ;
watch_consensus_head_sender
. send ( Some ( consensus_head_block ) )
2023-03-20 23:45:21 +03:00
. or ( Err ( Web3ProxyError ::WatchSendError ) )
. web3_context (
2023-02-27 09:44:09 +03:00
" watch_consensus_head_sender failed sending first consensus_head_block " ,
) ? ;
}
Some ( old_consensus_connections ) = > {
let old_head_block = & old_consensus_connections . head_block ;
// TODO: do this log item better
let rpc_head_str = new_block
. map ( | x | x . to_string ( ) )
. unwrap_or_else ( | | " None " . to_string ( ) ) ;
match consensus_head_block . number ( ) . cmp ( old_head_block . number ( ) ) {
Ordering ::Equal = > {
// multiple blocks with the same fork!
if consensus_head_block . hash ( ) = = old_head_block . hash ( ) {
// no change in hash. no need to use watch_consensus_head_sender
// TODO: trace level if rpc is backup
debug! (
2023-03-21 21:16:18 +03:00
" con {}/{} {}{}/{}/{} con={} rpc={}@{} " ,
2023-02-15 23:33:43 +03:00
consensus_tier ,
total_tiers ,
2023-02-14 23:14:50 +03:00
backups_voted_str ,
2023-01-19 13:13:00 +03:00
num_consensus_rpcs ,
num_active_rpcs ,
total_rpcs ,
2023-02-14 23:14:50 +03:00
consensus_head_block ,
2023-01-19 13:13:00 +03:00
rpc ,
rpc_head_str ,
2023-02-27 09:44:09 +03:00
)
} else {
// hash changed
2023-01-19 13:13:00 +03:00
debug! (
2023-03-21 21:16:18 +03:00
" unc {}/{} {}{}/{}/{} con_head={} old={} rpc={}@{} " ,
2023-02-15 23:33:43 +03:00
consensus_tier ,
total_tiers ,
2023-02-14 23:14:50 +03:00
backups_voted_str ,
2023-01-19 13:13:00 +03:00
num_consensus_rpcs ,
num_active_rpcs ,
total_rpcs ,
2023-02-14 23:14:50 +03:00
consensus_head_block ,
2023-02-27 09:44:09 +03:00
old_head_block ,
2023-01-19 13:13:00 +03:00
rpc ,
rpc_head_str ,
) ;
2023-02-27 09:44:09 +03:00
let consensus_head_block = self
. try_cache_block ( consensus_head_block , true )
. await
2023-03-20 23:45:21 +03:00
. web3_context ( " save consensus_head_block as heaviest chain " ) ? ;
2023-01-19 13:13:00 +03:00
2023-02-27 09:44:09 +03:00
watch_consensus_head_sender
. send ( Some ( consensus_head_block ) )
2023-03-20 23:45:21 +03:00
. or ( Err ( Web3ProxyError ::WatchSendError ) )
. web3_context ( " watch_consensus_head_sender failed sending uncled consensus_head_block " ) ? ;
2023-02-27 09:44:09 +03:00
}
}
Ordering ::Less = > {
// this is unlikely but possible
// TODO: better log
warn! (
2023-03-23 00:23:14 +03:00
" chain rolled back {}/{} {}{}/{}/{} con={} old={} rpc={}@{} " ,
2023-02-27 09:44:09 +03:00
consensus_tier ,
total_tiers ,
backups_voted_str ,
num_consensus_rpcs ,
num_active_rpcs ,
total_rpcs ,
consensus_head_block ,
old_head_block ,
rpc ,
rpc_head_str ,
) ;
2023-01-19 13:13:00 +03:00
2023-02-27 09:44:09 +03:00
if backups_needed {
// TODO: what else should be in this error?
warn! ( " Backup RPCs are in use! " ) ;
2023-01-19 13:13:00 +03:00
}
2023-02-27 09:44:09 +03:00
// TODO: tell save_block to remove any higher block numbers from the cache. not needed because we have other checks on requested blocks being > head, but still seems like a good idea
let consensus_head_block = self
. try_cache_block ( consensus_head_block , true )
. await
2023-03-20 23:45:21 +03:00
. web3_context (
" save_block sending consensus_head_block as heaviest chain " ,
) ? ;
2023-02-27 09:44:09 +03:00
watch_consensus_head_sender
. send ( Some ( consensus_head_block ) )
2023-03-20 23:45:21 +03:00
. or ( Err ( Web3ProxyError ::WatchSendError ) )
. web3_context ( " watch_consensus_head_sender failed sending rollback consensus_head_block " ) ? ;
2023-02-27 09:44:09 +03:00
}
Ordering ::Greater = > {
debug! (
2023-03-21 21:16:18 +03:00
" new {}/{} {}{}/{}/{} con={} rpc={}@{} " ,
2023-02-27 09:44:09 +03:00
consensus_tier ,
total_tiers ,
backups_voted_str ,
num_consensus_rpcs ,
num_active_rpcs ,
total_rpcs ,
consensus_head_block ,
rpc ,
rpc_head_str ,
) ;
if backups_needed {
// TODO: what else should be in this error?
warn! ( " Backup RPCs are in use! " ) ;
}
let consensus_head_block =
self . try_cache_block ( consensus_head_block , true ) . await ? ;
2023-03-20 23:45:21 +03:00
watch_consensus_head_sender . send ( Some ( consensus_head_block ) )
. or ( Err ( Web3ProxyError ::WatchSendError ) )
. web3_context ( " watch_consensus_head_sender failed sending new consensus_head_block " ) ? ;
2023-01-19 13:13:00 +03:00
}
}
2022-08-24 02:56:47 +03:00
}
2023-01-19 13:13:00 +03:00
}
Ok ( ( ) )
}
}