less max lag
This commit is contained in:
parent
2a242fe37f
commit
fc8ca4ba4f
|
@ -131,7 +131,7 @@ Flame graphs make a developer's join of finding slow code painless:
|
||||||
4
|
4
|
||||||
$ echo -1 | sudo tee /proc/sys/kernel/perf_event_paranoid
|
$ echo -1 | sudo tee /proc/sys/kernel/perf_event_paranoid
|
||||||
-1
|
-1
|
||||||
$ CARGO_PROFILE_RELEASE_DEBUG=true cargo flamegraph --bin web3_proxy --no-inline
|
$ CARGO_PROFILE_RELEASE_DEBUG=true cargo flamegraph --bin web3_proxy_cli --no-inline -- proxyd
|
||||||
|
|
||||||
Be sure to use `--no-inline` or perf will be VERY slow
|
Be sure to use `--no-inline` or perf will be VERY slow
|
||||||
|
|
||||||
|
|
|
@ -436,6 +436,8 @@ impl Web3Rpcs {
|
||||||
Ok(Some(x)) => x,
|
Ok(Some(x)) => x,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
trace!("new_synced_connections: {:?}", new_synced_connections);
|
||||||
|
|
||||||
let watch_consensus_head_sender = self.watch_consensus_head_sender.as_ref().unwrap();
|
let watch_consensus_head_sender = self.watch_consensus_head_sender.as_ref().unwrap();
|
||||||
let consensus_tier = new_synced_connections.tier;
|
let consensus_tier = new_synced_connections.tier;
|
||||||
// TODO: think more about this unwrap
|
// TODO: think more about this unwrap
|
||||||
|
|
|
@ -218,8 +218,8 @@ impl ConsensusFinder {
|
||||||
|
|
||||||
trace!("lowest_block_number: {}", lowest_block.number());
|
trace!("lowest_block_number: {}", lowest_block.number());
|
||||||
|
|
||||||
let max_lag_block_number = highest_block_number
|
let max_lag_block_number =
|
||||||
.saturating_sub(self.max_block_lag.unwrap_or_else(|| U64::from(10)));
|
highest_block_number.saturating_sub(self.max_block_lag.unwrap_or_else(|| U64::from(5)));
|
||||||
|
|
||||||
trace!("max_lag_block_number: {}", max_lag_block_number);
|
trace!("max_lag_block_number: {}", max_lag_block_number);
|
||||||
|
|
||||||
|
@ -231,6 +231,7 @@ impl ConsensusFinder {
|
||||||
|
|
||||||
if num_known < web3_rpcs.min_head_rpcs {
|
if num_known < web3_rpcs.min_head_rpcs {
|
||||||
// this keeps us from serving requests when the proxy first starts
|
// this keeps us from serving requests when the proxy first starts
|
||||||
|
trace!("not enough servers known");
|
||||||
return Ok(None);
|
return Ok(None);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -250,18 +251,22 @@ impl ConsensusFinder {
|
||||||
.0
|
.0
|
||||||
.tier;
|
.tier;
|
||||||
|
|
||||||
|
trace!("first_tier: {}", current_tier);
|
||||||
|
|
||||||
// loop over all the rpc heads (grouped by tier) and their parents to find consensus
|
// loop over all the rpc heads (grouped by tier) and their parents to find consensus
|
||||||
// TODO: i'm sure theres a lot of shortcuts that could be taken, but this is simplest to implement
|
// TODO: i'm sure theres a lot of shortcuts that could be taken, but this is simplest to implement
|
||||||
for (rpc, rpc_head) in self.rpc_heads.iter() {
|
for (rpc, rpc_head) in self.rpc_heads.iter() {
|
||||||
if current_tier != rpc.tier {
|
if current_tier != rpc.tier {
|
||||||
// we finished processing a tier. check for primary results
|
// we finished processing a tier. check for primary results
|
||||||
if let Some(consensus) = self.count_votes(&primary_votes, web3_rpcs) {
|
if let Some(consensus) = self.count_votes(&primary_votes, web3_rpcs) {
|
||||||
|
trace!("found enough votes on tier {}", current_tier);
|
||||||
return Ok(Some(consensus));
|
return Ok(Some(consensus));
|
||||||
}
|
}
|
||||||
|
|
||||||
// only set backup consensus once. we don't want it to keep checking on worse tiers if it already found consensus
|
// only set backup consensus once. we don't want it to keep checking on worse tiers if it already found consensus
|
||||||
if backup_consensus.is_none() {
|
if backup_consensus.is_none() {
|
||||||
if let Some(consensus) = self.count_votes(&backup_votes, web3_rpcs) {
|
if let Some(consensus) = self.count_votes(&backup_votes, web3_rpcs) {
|
||||||
|
trace!("found backup votes on tier {}", current_tier);
|
||||||
backup_consensus = Some(consensus)
|
backup_consensus = Some(consensus)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue