more chains
This commit is contained in:
parent
97b4e9f800
commit
e4637866aa
8
TODO.md
8
TODO.md
|
@ -22,3 +22,11 @@ I believe this is because we don't know the first block. we should force an upda
|
||||||
it loses all the "jsonrpc" parts and just has the simple result. need to return a proper jsonrpc response
|
it loses all the "jsonrpc" parts and just has the simple result. need to return a proper jsonrpc response
|
||||||
|
|
||||||
# TODO: add the backend server to the header
|
# TODO: add the backend server to the header
|
||||||
|
|
||||||
|
# random thoughts:
|
||||||
|
|
||||||
|
the web3proxyapp object gets cloned for every call. why do we need any arcs inside that? shouldn't they be able to connect to the app's?
|
||||||
|
|
||||||
|
on friday i had it over 100k rps. but now, even when i roll back to that commit, i can't get it that high. what changed?
|
||||||
|
|
||||||
|
i think we need a top level head block. otherwise if tier0 stalls, we will keep using it
|
|
@ -24,7 +24,7 @@ pub struct CliConfig {
|
||||||
pub struct RpcConfig {
|
pub struct RpcConfig {
|
||||||
// BTreeMap so that iterating keeps the same order
|
// BTreeMap so that iterating keeps the same order
|
||||||
pub balanced_rpc_tiers: BTreeMap<String, HashMap<String, Web3ConnectionConfig>>,
|
pub balanced_rpc_tiers: BTreeMap<String, HashMap<String, Web3ConnectionConfig>>,
|
||||||
pub private_rpcs: HashMap<String, Web3ConnectionConfig>,
|
pub private_rpcs: Option<HashMap<String, Web3ConnectionConfig>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
|
@ -41,7 +41,12 @@ impl RpcConfig {
|
||||||
.into_values()
|
.into_values()
|
||||||
.map(|x| x.into_values().collect())
|
.map(|x| x.into_values().collect())
|
||||||
.collect();
|
.collect();
|
||||||
let private_rpcs = self.private_rpcs.into_values().collect();
|
|
||||||
|
let private_rpcs = if let Some(private_rpcs) = self.private_rpcs {
|
||||||
|
private_rpcs.into_values().collect()
|
||||||
|
} else {
|
||||||
|
vec![]
|
||||||
|
};
|
||||||
|
|
||||||
Web3ProxyApp::try_new(balanced_rpc_tiers, private_rpcs).await
|
Web3ProxyApp::try_new(balanced_rpc_tiers, private_rpcs).await
|
||||||
}
|
}
|
||||||
|
|
|
@ -146,6 +146,7 @@ impl Web3Connection {
|
||||||
// TODO: if error or rate limit, increase interval?
|
// TODO: if error or rate limit, increase interval?
|
||||||
interval.tick().await;
|
interval.tick().await;
|
||||||
|
|
||||||
|
// TODO: rate limit!
|
||||||
let block_number = provider.get_block_number().await.map(|x| x.as_u64())?;
|
let block_number = provider.get_block_number().await.map(|x| x.as_u64())?;
|
||||||
|
|
||||||
// TODO: only store if this isn't already stored?
|
// TODO: only store if this isn't already stored?
|
||||||
|
@ -172,6 +173,7 @@ impl Web3Connection {
|
||||||
// query the block once since the subscription doesn't send the current block
|
// query the block once since the subscription doesn't send the current block
|
||||||
// there is a very small race condition here where the stream could send us a new block right now
|
// there is a very small race condition here where the stream could send us a new block right now
|
||||||
// all it does is print "new block" for the same block as current block
|
// all it does is print "new block" for the same block as current block
|
||||||
|
// TODO: rate limit!
|
||||||
let block_number = provider.get_block_number().await.map(|x| x.as_u64())?;
|
let block_number = provider.get_block_number().await.map(|x| x.as_u64())?;
|
||||||
|
|
||||||
info!("current block on {}: {}", self, block_number);
|
info!("current block on {}: {}", self, block_number);
|
||||||
|
|
|
@ -88,9 +88,11 @@ impl Web3Connections {
|
||||||
let connection = Arc::clone(connection);
|
let connection = Arc::clone(connection);
|
||||||
let connections = connections.clone();
|
let connections = connections.clone();
|
||||||
tokio::spawn(async move {
|
tokio::spawn(async move {
|
||||||
|
let url = connection.url().to_string();
|
||||||
|
|
||||||
// TODO: instead of passing Some(connections), pass Some(channel_sender). Then listen on the receiver below to keep local heads up-to-date
|
// TODO: instead of passing Some(connections), pass Some(channel_sender). Then listen on the receiver below to keep local heads up-to-date
|
||||||
if let Err(e) = connection.new_heads(Some(connections)).await {
|
if let Err(e) = connection.new_heads(Some(connections)).await {
|
||||||
warn!("new_heads error: {:?}", e);
|
warn!("new_heads error on {}: {:?}", url, e);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
|
@ -213,7 +213,7 @@ impl Web3ProxyApp {
|
||||||
}
|
}
|
||||||
Err(None) => {
|
Err(None) => {
|
||||||
// TODO: this is too verbose. if there are other servers in other tiers, use those!
|
// TODO: this is too verbose. if there are other servers in other tiers, use those!
|
||||||
warn!("No servers in sync!");
|
// warn!("No servers in sync!");
|
||||||
}
|
}
|
||||||
Err(Some(not_until)) => {
|
Err(Some(not_until)) => {
|
||||||
// save the smallest not_until. if nothing succeeds, return an Err with not_until in it
|
// save the smallest not_until. if nothing succeeds, return an Err with not_until in it
|
||||||
|
|
Loading…
Reference in New Issue