add archive depth to app config

This commit is contained in:
Bryan Stitt 2023-02-05 17:58:03 -08:00
parent 37830f1156
commit 6d959e2c1f
3 changed files with 23 additions and 15 deletions

View File

@ -32,7 +32,6 @@ use futures::stream::{FuturesUnordered, StreamExt};
use hashbrown::{HashMap, HashSet};
use ipnet::IpNet;
use log::{debug, error, info, trace, warn, Level};
use metered::{metered, ErrorCount, HitCount, ResponseTime, Throughput};
use migration::sea_orm::{
self, ConnectionTrait, Database, DatabaseConnection, EntityTrait, PaginatorTrait,
};
@ -365,7 +364,6 @@ pub struct Web3ProxyAppSpawn {
pub background_handles: FuturesUnordered<AnyhowJoinHandle<()>>,
}
// #[metered(registry = Web3ProxyAppMetrics, registry_expr = self.app_metrics, visibility = pub)]
impl Web3ProxyApp {
/// The main entrypoint.
pub async fn spawn(
@ -1469,12 +1467,12 @@ impl Web3ProxyApp {
block_num,
cache_errors,
} => {
let (request_block_hash, archive_needed) = self
let (request_block_hash, block_depth) = self
.balanced_rpcs
.block_hash(authorization, &block_num)
.await?;
if archive_needed {
if block_depth < self.config.archive_depth {
request_metadata
.archive_request
.store(true, atomic::Ordering::Relaxed);
@ -1499,12 +1497,12 @@ impl Web3ProxyApp {
to_block_num,
cache_errors,
} => {
let (from_block_hash, archive_needed) = self
let (from_block_hash, block_depth) = self
.balanced_rpcs
.block_hash(authorization, &from_block_num)
.await?;
if archive_needed {
if block_depth < self.config.archive_depth {
request_metadata
.archive_request
.store(true, atomic::Ordering::Relaxed);

View File

@ -59,6 +59,10 @@ pub struct AppConfig {
#[serde(default = "default_allowed_origin_requests_per_period")]
pub allowed_origin_requests_per_period: HashMap<String, u64>,
/// erigon defaults to pruning beyond 90,000 blocks
#[serde(default = "default_archive_depth")]
pub archive_depth: u64,
/// EVM chain id. 1 for ETH
/// TODO: better type for chain_id? max of `u64::MAX / 2 - 36` <https://github.com/ethereum/EIPs/issues/2294>
pub chain_id: u64,
@ -159,6 +163,10 @@ pub struct AppConfig {
pub extra: HashMap<String, serde_json::Value>,
}
fn default_archive_depth() -> u64 {
90_000
}
fn default_allowed_origin_requests_per_period() -> HashMap<String, u64> {
HashMap::new()
}

View File

@ -190,12 +190,12 @@ impl Web3Connections {
&self,
authorization: &Arc<Authorization>,
num: &U64,
) -> anyhow::Result<(H256, bool)> {
let (block, is_archive_block) = self.cannonical_block(authorization, num).await?;
) -> anyhow::Result<(H256, u64)> {
let (block, block_depth) = self.cannonical_block(authorization, num).await?;
let hash = block.hash.expect("Saved blocks should always have hashes");
Ok((hash, is_archive_block))
Ok((hash, block_depth))
}
/// Get the heaviest chain's block from cache or backend rpc
@ -204,7 +204,7 @@ impl Web3Connections {
&self,
authorization: &Arc<Authorization>,
num: &U64,
) -> anyhow::Result<(ArcBlock, bool)> {
) -> anyhow::Result<(ArcBlock, u64)> {
// we only have blocks by hash now
// maybe save them during save_block in a blocks_by_number Cache<U64, Vec<ArcBlock>>
// if theres multiple, use petgraph to find the one on the main chain (and remove the others if they have enough confirmations)
@ -233,9 +233,11 @@ impl Web3Connections {
let head_block_num =
head_block_num.expect("we should only get here if we have a head block");
// TODO: geth does 64, erigon does 90k. sometimes we run a mix
// TODO: do this dynamically based on balanced_rpcs block_data_limit
let archive_needed = num < &(head_block_num - U64::from(64));
let block_depth = if num >= &head_block_num {
0
} else {
(head_block_num - num).as_u64()
};
// try to get the hash from our cache
// deref to not keep the lock open
@ -244,7 +246,7 @@ impl Web3Connections {
// TODO: pass authorization through here?
let block = self.block(authorization, &block_hash, None).await?;
return Ok((block, archive_needed));
return Ok((block, block_depth));
}
// block number not in cache. we need to ask an rpc for it
@ -270,7 +272,7 @@ impl Web3Connections {
// the block was fetched using eth_getBlockByNumber, so it should have all fields and be on the heaviest chain
let block = self.save_block(block, true).await?;
Ok((block, archive_needed))
Ok((block, block_depth))
}
pub(super) async fn process_incoming_blocks(