test more

This commit is contained in:
Bryan Stitt 2022-07-23 00:19:13 +00:00
parent 0b184ae9c9
commit 430bae67cd
4 changed files with 46 additions and 7 deletions

View File

@ -51,7 +51,8 @@
- i think now that we retry header not found and similar, caching errors should be fine
- [x] RESPONSE_CACHE_CAP from config
- [x] web3_sha3 rpc command
- [ ] test that launches anvil and connects the proxy to it
- [x] test that launches anvil and connects the proxy to it and does some basic queries
- [x] need to have some sort of shutdown signaling. doesn't need to be graceful at this point, but should be eventually
- [ ] if the fastest server has hit rate limits, we won't be able to serve any traffic until another server is synced.
- thundering herd problem if we only allow a lag of 0 blocks
- we can improve this by only `publish`ing the sorted list once a threshold of total available soft and hard limits is passed. how can we do this without hammering redis? at least its only once per block per server
@ -178,3 +179,8 @@ in another repo: event subscriber
- [ ] stats for "read amplification". how many backend requests do we send compared to frontend requests we received?
- [ ] fully test retrying when "header not found"
- i saw "header not found" on a simple eth_getCode query to a public load balanced bsc archive node on block 1
- [ ] weird flapping fork could have more useful logs. like, howd we get to 1/1/4 and fork. geth changed its mind 3 times?
2022-07-22T23:52:18.593956Z WARN block_receiver: web3_proxy::connections: chain is forked! 1 possible heads. 1/1/4 rpcs have 0xa906…5bc1 rpc=Web3Connection { url: "ws://127.0.0.1:8546", data: 64, .. } new_block_num=15195517
2022-07-22T23:52:18.983441Z WARN block_receiver: web3_proxy::connections: chain is forked! 1 possible heads. 1/1/4 rpcs have 0x70e8…48e0 rpc=Web3Connection { url: "ws://127.0.0.1:8546", data: 64, .. } new_block_num=15195517
2022-07-22T23:52:19.350720Z WARN block_receiver: web3_proxy::connections: chain is forked! 2 possible heads. 1/2/4 rpcs have 0x70e8…48e0 rpc=Web3Connection { url: "ws://127.0.0.1:8549", data: "archive", .. } new_block_num=15195517
2022-07-22T23:52:26.041140Z WARN block_receiver: web3_proxy::connections: chain is forked! 2 possible heads. 2/4/4 rpcs have 0x70e8…48e0 rpc=Web3Connection { url: "http://127.0.0.1:8549", data: "archive", .. } new_block_num=15195517

View File

@ -220,6 +220,8 @@ impl Web3Connection {
// we could take "archive" as a parameter, but we would want a safety check on it regardless
// check common archive thresholds
// TODO: would be great if rpcs exposed this
// TODO: move this to a helper function so we can recheck on errors or as the chain grows
for block_data_limit in [u64::MAX, 90_000, 128, 64, 32] {
let mut head_block_num = new_connection.head_block.read().1;

View File

@ -304,7 +304,7 @@ impl Web3Connections {
}
}
/// dedupe transactions and send them to any listening clients
/// dedupe transaction and send them to any listening clients
async fn funnel_transaction(
self: Arc<Self>,
rpc: Arc<Web3Connection>,
@ -376,13 +376,11 @@ impl Web3Connections {
let clone = self.clone();
let handle = task::spawn(async move {
while let Ok((pending_tx_id, rpc)) = pending_tx_id_receiver.recv_async().await {
// TODO: spawn this
let f = clone.clone().funnel_transaction(
rpc,
pending_tx_id,
pending_tx_sender.clone(),
);
tokio::spawn(f);
}

View File

@ -142,7 +142,7 @@ fn main() -> anyhow::Result<()> {
#[cfg(test)]
mod tests {
use ethers::{
prelude::{Http, Provider, U256},
prelude::{Block, Http, Provider, TxHash, U256},
utils::Anvil,
};
use hashbrown::HashMap;
@ -176,12 +176,12 @@ mod tests {
// mine a block because my code doesn't like being on block 0
// TODO: make block 0 okay?
let head_block_num: U256 = provider.request("evm_mine", None::<()>).await.unwrap();
let _: U256 = provider.request("evm_mine", None::<()>).await.unwrap();
// make a test CliConfig
let cli_config = CliConfig {
port: 0,
workers: 2,
workers: 4,
config: "./does/not/exist/test.toml".to_string(),
};
@ -213,10 +213,43 @@ mod tests {
let handle = thread::spawn(move || run(shutdown_receiver, cli_config, app_config));
// TODO: do something to the node. query latest block, mine another block, query again
let proxy_provider = Provider::<Http>::try_from(anvil.endpoint()).unwrap();
let anvil_result: Block<TxHash> = proxy_provider
.request("eth_getBlockByNumber", ("latest", true))
.await
.unwrap();
let proxy_result: Block<TxHash> = proxy_provider
.request("eth_getBlockByNumber", ("latest", true))
.await
.unwrap();
assert_eq!(anvil_result, proxy_result);
let first_block_num = anvil_result.number.unwrap();
let _: U256 = provider.request("evm_mine", None::<()>).await.unwrap();
let anvil_result: Block<TxHash> = proxy_provider
.request("eth_getBlockByNumber", ("latest", true))
.await
.unwrap();
let proxy_result: Block<TxHash> = proxy_provider
.request("eth_getBlockByNumber", ("latest", true))
.await
.unwrap();
assert_eq!(anvil_result, proxy_result);
let second_block_num = anvil_result.number.unwrap();
assert_ne!(first_block_num, second_block_num);
// tell the test app to shut down
shutdown_sender.send(()).unwrap();
println!("waiting for shutdown...");
// TODO: timeout or panic
handle.join().unwrap().unwrap();
}
}