2022-11-08 22:58:11 +03:00
|
|
|
use crate::frontend::authorization::Authorization;
|
|
|
|
|
2023-02-06 20:55:27 +03:00
|
|
|
use super::many::Web3Rpcs;
|
2022-08-24 03:59:05 +03:00
|
|
|
///! Load balanced communication with a group of web3 providers
|
2023-02-06 20:55:27 +03:00
|
|
|
use super::one::Web3Rpc;
|
2022-08-24 03:59:05 +03:00
|
|
|
use super::request::OpenRequestResult;
|
|
|
|
use ethers::prelude::{ProviderError, Transaction, TxHash};
|
2022-11-16 10:19:56 +03:00
|
|
|
use log::{debug, trace, Level};
|
2022-08-24 03:59:05 +03:00
|
|
|
use std::sync::Arc;
|
|
|
|
use tokio::sync::broadcast;
|
|
|
|
|
|
|
|
// TODO: think more about TxState
|
|
|
|
#[derive(Clone)]
|
|
|
|
pub enum TxStatus {
|
|
|
|
Pending(Transaction),
|
|
|
|
Confirmed(Transaction),
|
|
|
|
Orphaned(Transaction),
|
|
|
|
}
|
|
|
|
|
2023-02-06 20:55:27 +03:00
|
|
|
impl Web3Rpcs {
|
2022-08-24 03:59:05 +03:00
|
|
|
async fn query_transaction_status(
|
|
|
|
&self,
|
2022-11-08 22:58:11 +03:00
|
|
|
authorization: &Arc<Authorization>,
|
2023-02-06 20:55:27 +03:00
|
|
|
rpc: Arc<Web3Rpc>,
|
2022-08-24 03:59:05 +03:00
|
|
|
pending_tx_id: TxHash,
|
|
|
|
) -> Result<Option<TxStatus>, ProviderError> {
|
|
|
|
// TODO: there is a race here on geth. sometimes the rpc isn't yet ready to serve the transaction (even though they told us about it!)
|
2022-09-22 23:27:14 +03:00
|
|
|
// TODO: might not be a race. might be a nonce thats higher than the current account nonce. geth discards chains
|
2022-08-24 03:59:05 +03:00
|
|
|
// TODO: yearn devs have had better luck with batching these, but i think that's likely just adding a delay itself
|
|
|
|
// TODO: if one rpc fails, try another?
|
2023-02-12 12:22:53 +03:00
|
|
|
// TODO: try_request_handle, or wait_for_request_handle? I think we want wait here
|
|
|
|
let tx: Transaction = match rpc.try_request_handle(authorization, None).await {
|
2022-08-24 03:59:05 +03:00
|
|
|
Ok(OpenRequestResult::Handle(handle)) => {
|
|
|
|
handle
|
2022-09-21 07:48:21 +03:00
|
|
|
.request(
|
|
|
|
"eth_getTransactionByHash",
|
2022-09-23 01:14:24 +03:00
|
|
|
&(pending_tx_id,),
|
2022-11-12 11:24:32 +03:00
|
|
|
Level::Error.into(),
|
2023-02-12 12:22:53 +03:00
|
|
|
None,
|
2022-09-21 07:48:21 +03:00
|
|
|
)
|
2022-08-24 03:59:05 +03:00
|
|
|
.await?
|
|
|
|
}
|
|
|
|
Ok(_) => {
|
|
|
|
// TODO: actually retry?
|
|
|
|
return Ok(None);
|
|
|
|
}
|
|
|
|
Err(err) => {
|
2022-11-16 10:19:56 +03:00
|
|
|
trace!(
|
|
|
|
"cancelled funneling transaction {} from {}: {:?}",
|
|
|
|
pending_tx_id,
|
|
|
|
rpc,
|
|
|
|
err,
|
|
|
|
);
|
2022-08-24 03:59:05 +03:00
|
|
|
return Ok(None);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
match &tx.block_hash {
|
|
|
|
Some(_block_hash) => {
|
|
|
|
// the transaction is already confirmed. no need to save in the pending_transactions map
|
|
|
|
Ok(Some(TxStatus::Confirmed(tx)))
|
|
|
|
}
|
|
|
|
None => Ok(Some(TxStatus::Pending(tx))),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// dedupe transaction and send them to any listening clients
|
2022-08-26 20:26:17 +03:00
|
|
|
pub(super) async fn process_incoming_tx_id(
|
2022-08-24 03:59:05 +03:00
|
|
|
self: Arc<Self>,
|
2022-11-08 22:58:11 +03:00
|
|
|
authorization: Arc<Authorization>,
|
2023-02-06 20:55:27 +03:00
|
|
|
rpc: Arc<Web3Rpc>,
|
2022-08-24 03:59:05 +03:00
|
|
|
pending_tx_id: TxHash,
|
|
|
|
pending_tx_sender: broadcast::Sender<TxStatus>,
|
|
|
|
) -> anyhow::Result<()> {
|
|
|
|
// TODO: how many retries? until some timestamp is hit is probably better. maybe just loop and call this with a timeout
|
|
|
|
// TODO: after more investigation, i don't think retries will help. i think this is because chains of transactions get dropped from memory
|
|
|
|
// TODO: also check the "confirmed transactions" mapping? maybe one shared mapping with TxState in it?
|
|
|
|
|
|
|
|
if pending_tx_sender.receiver_count() == 0 {
|
|
|
|
// no receivers, so no point in querying to get the full transaction
|
|
|
|
return Ok(());
|
|
|
|
}
|
|
|
|
|
2022-11-12 11:24:32 +03:00
|
|
|
// trace!(?pending_tx_id, "checking pending_transactions on {}", rpc);
|
2023-05-18 23:34:22 +03:00
|
|
|
if self.pending_transaction_cache.get(&pending_tx_id).is_some() {
|
2022-08-24 03:59:05 +03:00
|
|
|
// this transaction has already been processed
|
|
|
|
return Ok(());
|
|
|
|
}
|
|
|
|
|
|
|
|
// query the rpc for this transaction
|
|
|
|
// it is possible that another rpc is also being queried. thats fine. we want the fastest response
|
|
|
|
match self
|
2022-11-08 22:58:11 +03:00
|
|
|
.query_transaction_status(&authorization, rpc.clone(), pending_tx_id)
|
2022-08-24 03:59:05 +03:00
|
|
|
.await
|
|
|
|
{
|
|
|
|
Ok(Some(tx_state)) => {
|
|
|
|
let _ = pending_tx_sender.send(tx_state);
|
|
|
|
|
2022-11-16 10:19:56 +03:00
|
|
|
trace!("sent tx {:?}", pending_tx_id);
|
2022-08-24 03:59:05 +03:00
|
|
|
|
|
|
|
// we sent the transaction. return now. don't break looping because that gives a warning
|
|
|
|
return Ok(());
|
|
|
|
}
|
|
|
|
Ok(None) => {}
|
|
|
|
Err(err) => {
|
2022-11-16 10:19:56 +03:00
|
|
|
trace!("failed fetching transaction {:?}: {:?}", pending_tx_id, err);
|
2022-08-24 03:59:05 +03:00
|
|
|
// unable to update the entry. sleep and try again soon
|
|
|
|
// TODO: retry with exponential backoff with jitter starting from a much smaller time
|
|
|
|
// sleep(Duration::from_millis(100)).await;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// warn is too loud. this is somewhat common
|
|
|
|
// "There is a Pending txn with a lower account nonce. This txn can only be executed after confirmation of the earlier Txn Hash#"
|
|
|
|
// sometimes it's been pending for many hours
|
|
|
|
// sometimes it's maybe something else?
|
2022-11-12 11:24:32 +03:00
|
|
|
debug!("txid {} not found on {}", pending_tx_id, rpc);
|
2022-08-24 03:59:05 +03:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
}
|