From e4637866aa8a5a1c5bc5edef908101f8ae8bfef1 Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Thu, 5 May 2022 22:21:27 +0000 Subject: [PATCH] more chains --- TODO.md | 8 ++++++++ web3-proxy/src/config.rs | 9 +++++++-- web3-proxy/src/connection.rs | 2 ++ web3-proxy/src/connections.rs | 4 +++- web3-proxy/src/main.rs | 2 +- 5 files changed, 21 insertions(+), 4 deletions(-) diff --git a/TODO.md b/TODO.md index 126bb15e..fcec1baf 100644 --- a/TODO.md +++ b/TODO.md @@ -22,3 +22,11 @@ I believe this is because we don't know the first block. we should force an upda it loses all the "jsonrpc" parts and just has the simple result. need to return a proper jsonrpc response # TODO: add the backend server to the header + +# random thoughts: + +the web3proxyapp object gets cloned for every call. why do we need any arcs inside that? shouldn't they be able to connect to the app's? + +on friday i had it over 100k rps. but now, even when i roll back to that commit, i can't get it that high. what changed? + +i think we need a top level head block. otherwise if tier0 stalls, we will keep using it \ No newline at end of file diff --git a/web3-proxy/src/config.rs b/web3-proxy/src/config.rs index a377d158..a3c5bc50 100644 --- a/web3-proxy/src/config.rs +++ b/web3-proxy/src/config.rs @@ -24,7 +24,7 @@ pub struct CliConfig { pub struct RpcConfig { // BTreeMap so that iterating keeps the same order pub balanced_rpc_tiers: BTreeMap>, - pub private_rpcs: HashMap, + pub private_rpcs: Option>, } #[derive(Deserialize)] @@ -41,7 +41,12 @@ impl RpcConfig { .into_values() .map(|x| x.into_values().collect()) .collect(); - let private_rpcs = self.private_rpcs.into_values().collect(); + + let private_rpcs = if let Some(private_rpcs) = self.private_rpcs { + private_rpcs.into_values().collect() + } else { + vec![] + }; Web3ProxyApp::try_new(balanced_rpc_tiers, private_rpcs).await } diff --git a/web3-proxy/src/connection.rs b/web3-proxy/src/connection.rs index 28f211cf..4c6877df 100644 --- a/web3-proxy/src/connection.rs +++ b/web3-proxy/src/connection.rs @@ -146,6 +146,7 @@ impl Web3Connection { // TODO: if error or rate limit, increase interval? interval.tick().await; + // TODO: rate limit! let block_number = provider.get_block_number().await.map(|x| x.as_u64())?; // TODO: only store if this isn't already stored? @@ -172,6 +173,7 @@ impl Web3Connection { // query the block once since the subscription doesn't send the current block // there is a very small race condition here where the stream could send us a new block right now // all it does is print "new block" for the same block as current block + // TODO: rate limit! let block_number = provider.get_block_number().await.map(|x| x.as_u64())?; info!("current block on {}: {}", self, block_number); diff --git a/web3-proxy/src/connections.rs b/web3-proxy/src/connections.rs index 3ac2c8a4..0f3ff62d 100644 --- a/web3-proxy/src/connections.rs +++ b/web3-proxy/src/connections.rs @@ -88,9 +88,11 @@ impl Web3Connections { let connection = Arc::clone(connection); let connections = connections.clone(); tokio::spawn(async move { + let url = connection.url().to_string(); + // TODO: instead of passing Some(connections), pass Some(channel_sender). Then listen on the receiver below to keep local heads up-to-date if let Err(e) = connection.new_heads(Some(connections)).await { - warn!("new_heads error: {:?}", e); + warn!("new_heads error on {}: {:?}", url, e); } }); } diff --git a/web3-proxy/src/main.rs b/web3-proxy/src/main.rs index 728bc638..af85c291 100644 --- a/web3-proxy/src/main.rs +++ b/web3-proxy/src/main.rs @@ -213,7 +213,7 @@ impl Web3ProxyApp { } Err(None) => { // TODO: this is too verbose. if there are other servers in other tiers, use those! - warn!("No servers in sync!"); + // warn!("No servers in sync!"); } Err(Some(not_until)) => { // save the smallest not_until. if nothing succeeds, return an Err with not_until in it