diff --git a/Cargo.lock b/Cargo.lock index 7cba2b2b..f2c07dcb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1005,13 +1005,14 @@ dependencies = [ [[package]] name = "const-hex" -version = "1.9.1" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c37be52ef5e3b394db27a2341010685ad5103c72ac15ce2e9420a7e8f93f342c" +checksum = "a5104de16b218eddf8e34ffe2f86f74bfa4e61e95a1b89732fccf6325efd0557" dependencies = [ "cfg-if", "cpufeatures", "hex", + "proptest", "serde", ] @@ -1752,7 +1753,7 @@ dependencies = [ "ethabi", "generic-array", "k256", - "num_enum 0.7.0", + "num_enum 0.7.1", "once_cell", "open-fastrlp", "rand 0.8.5", @@ -3290,11 +3291,11 @@ dependencies = [ [[package]] name = "num_enum" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70bf6736f74634d299d00086f02986875b3c2d924781a6a2cb6c201e73da0ceb" +checksum = "683751d591e6d81200c39fb0d1032608b77724f34114db54f571ff1317b337c0" dependencies = [ - "num_enum_derive 0.7.0", + "num_enum_derive 0.7.1", ] [[package]] @@ -3311,11 +3312,11 @@ dependencies = [ [[package]] name = "num_enum_derive" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56ea360eafe1022f7cc56cd7b869ed57330fb2453d0c7831d99b74c65d2f5597" +checksum = "6c11e44798ad209ccdd91fc192f0526a369a01234f7373e1b141c96d7cee4f0e" dependencies = [ - "proc-macro-crate 1.3.1", + "proc-macro-crate 2.0.0", "proc-macro2", "quote", "syn 2.0.38", @@ -3870,6 +3871,15 @@ dependencies = [ "toml_edit 0.19.15", ] +[[package]] +name = "proc-macro-crate" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e8366a6159044a37876a2b9817124296703c586a5c92e2c53751fa06d8d43e8" +dependencies = [ + "toml_edit 0.20.7", +] + [[package]] name = "proc-macro-error" version = "1.0.4" @@ -3909,6 +3919,22 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "proptest" +version = "1.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c003ac8c77cb07bb74f5f198bce836a689bcd5a42574612bf14d17bfd08c20e" +dependencies = [ + "bitflags 2.4.1", + "lazy_static", + "num-traits", + "rand 0.8.5", + "rand_chacha 0.3.1", + "rand_xorshift", + "regex-syntax 0.7.5", + "unarray", +] + [[package]] name = "prost" version = "0.12.1" @@ -4076,6 +4102,15 @@ dependencies = [ "rand_core 0.5.1", ] +[[package]] +name = "rand_xorshift" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d25bf25ec5ae4a3f1b92f929810509a2f53d7dca2f50b794ff57e3face536c8f" +dependencies = [ + "rand_core 0.6.4", +] + [[package]] name = "ratatui" version = "0.20.1" @@ -4195,15 +4230,6 @@ dependencies = [ "bitflags 1.3.2", ] -[[package]] -name = "redox_syscall" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" -dependencies = [ - "bitflags 1.3.2", -] - [[package]] name = "redox_syscall" version = "0.4.1" @@ -4491,9 +4517,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.20" +version = "0.38.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67ce50cb2e16c2903e30d1cbccfd8387a74b9d4c938b6a4c5ec6cc7556f7a8a0" +checksum = "2b426b0506e5d50a7d8dafcf2e81471400deb602392c7dd110815afb4eaf02a3" dependencies = [ "bitflags 2.4.1", "errno", @@ -5741,13 +5767,13 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "tempfile" -version = "3.8.0" +version = "3.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb94d2f3cc536af71caac6b6fcebf65860b347e7ce0cc9ebe8f70d3e521054ef" +checksum = "7ef1adac450ad7f4b3c28589471ade84f25f731a7a0fe30d71dfa9f60fd808e5" dependencies = [ "cfg-if", "fastrand 2.0.1", - "redox_syscall 0.3.5", + "redox_syscall 0.4.1", "rustix", "windows-sys", ] @@ -6027,14 +6053,14 @@ dependencies = [ [[package]] name = "toml" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3efaf127c78d5339cc547cce4e4d973bd5e4f56e949a06d091c082ebeef2f800" +checksum = "8ff9e3abce27ee2c9a37f9ad37238c1bdd4e789c84ba37df76aa4d528f5072cc" dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit 0.20.5", + "toml_edit 0.20.7", ] [[package]] @@ -6061,9 +6087,9 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.20.5" +version = "0.20.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "782bf6c2ddf761c1e7855405e8975472acf76f7f36d0d4328bd3b7a2fae12a85" +checksum = "70f427fce4d84c72b5b732388bf4a9f4531b53f74e2887e3ecb2481f68f66d81" dependencies = [ "indexmap 2.0.2", "serde", @@ -6309,6 +6335,12 @@ dependencies = [ "libc", ] +[[package]] +name = "unarray" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" + [[package]] name = "unicase" version = "2.7.0" @@ -6656,7 +6688,7 @@ dependencies = [ "time", "tokio", "tokio-stream", - "toml 0.8.5", + "toml 0.8.6", "tower-http", "tracing", "tracing-subscriber", @@ -6864,18 +6896,18 @@ checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec" [[package]] name = "zerocopy" -version = "0.7.15" +version = "0.7.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81ba595b9f2772fbee2312de30eeb80ec773b4cb2f1e8098db024afadda6c06f" +checksum = "ede7d7c7970ca2215b8c1ccf4d4f354c4733201dfaaba72d44ae5b37472e4901" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.15" +version = "0.7.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "772666c41fb6dceaf520b564b962d738a8e1a83b41bd48945f50837aed78bb1d" +checksum = "4b27b1bb92570f989aac0ab7e9cbfbacdd65973f7ee920d9f0e71ebac878fd0b" dependencies = [ "proc-macro2", "quote", diff --git a/web3_proxy/Cargo.toml b/web3_proxy/Cargo.toml index 9730e3fd..3206791d 100644 --- a/web3_proxy/Cargo.toml +++ b/web3_proxy/Cargo.toml @@ -86,7 +86,7 @@ strum = { version = "0.25.0", features = ["derive"] } time = { version = "0.3" } tokio = { version = "1.33.0", features = ["full", "tracing"] } tokio-stream = { version = "0.1.14", features = ["sync"] } -toml = "0.8.5" +toml = "0.8.6" tower-http = { version = "0.4.4", features = ["cors", "normalize-path", "sensitive-headers", "trace"] } tracing = "0.1" ulid = { version = "1.1.0", features = ["rand", "uuid", "serde"] } diff --git a/web3_proxy/src/block_number.rs b/web3_proxy/src/block_number.rs index 84e71d2b..dee6c0b2 100644 --- a/web3_proxy/src/block_number.rs +++ b/web3_proxy/src/block_number.rs @@ -46,9 +46,11 @@ pub fn BlockNumber_to_U64(block_num: BlockNumber, latest_block: U64) -> (U64, bo pub struct BlockNumAndHash(U64, H256); impl BlockNumAndHash { - pub fn num(&self) -> &U64 { - &self.0 + #[inline] + pub fn num(&self) -> U64 { + self.0 } + #[inline] pub fn hash(&self) -> &H256 { &self.1 } @@ -103,7 +105,7 @@ pub async fn clean_block_number<'a>( if block_hash == *head_block.hash() { (head_block.into(), false) } else if let Some(app) = app { - // TODO: query for the block + // TODO: make a jsonrpc query here? cache rates will be better but it adds a network request let block = app .balanced_rpcs .blocks_by_hash @@ -137,7 +139,7 @@ pub async fn clean_block_number<'a>( if block_hash == *head_block.hash() { (head_block.number(), false) } else if let Some(app) = app { - // TODO: what should this max_wait be? + // TODO: make a jsonrpc query here? cache rates will be better but it adds a network request let block = app .balanced_rpcs .blocks_by_hash @@ -172,7 +174,7 @@ pub async fn clean_block_number<'a>( if block_num == head_block_num { (head_block.into(), changed) } else if let Some(app) = app { - // TODO: we used to make a query here, but thats causing problems with recursion now. come back to this + // TODO: make a jsonrpc query here? cache rates will be better but it adds a network request let block_hash = app .balanced_rpcs .blocks_by_number @@ -180,6 +182,7 @@ pub async fn clean_block_number<'a>( .await .context("fetching block hash from number")?; + // TODO: make a jsonrpc query here? cache rates will be better but it adds a network request let block = app .balanced_rpcs .blocks_by_hash @@ -210,6 +213,21 @@ pub async fn clean_block_number<'a>( } } +#[derive(Debug, From, Hash, Eq, PartialEq)] +pub enum BlockNumOrHash { + Num(U64), + And(BlockNumAndHash), +} + +impl BlockNumOrHash { + pub fn num(&self) -> U64 { + match self { + Self::Num(x) => *x, + Self::And(x) => x.num(), + } + } +} + /// TODO: change this to also return the hash needed? /// this replaces any "latest" identifiers in the JsonRpcRequest with the current block number which feels like the data is structured wrong #[derive(Debug, Default, Hash, Eq, PartialEq)] @@ -221,8 +239,9 @@ pub enum CacheMode { cache_errors: bool, }, Range { - from_block: BlockNumAndHash, - to_block: BlockNumAndHash, + from_block: BlockNumOrHash, + to_block: BlockNumOrHash, + cache_block: BlockNumAndHash, /// cache jsonrpc errors (server errors are never cached) cache_errors: bool, }, @@ -285,12 +304,12 @@ impl CacheMode { } if let Some(head_block) = head_block { - CacheMode::Standard { + Self::Standard { block: head_block.into(), cache_errors: true, } } else { - CacheMode::Never + Self::Never } } @@ -333,9 +352,9 @@ impl CacheMode { match request.method.as_ref() { "debug_traceTransaction" => { // TODO: make sure re-orgs work properly! - Ok(CacheMode::SuccessForever) + Ok(Self::SuccessForever) } - "eth_gasPrice" => Ok(CacheMode::Standard { + "eth_gasPrice" => Ok(Self::Standard { block: head_block.into(), cache_errors: false, }), @@ -343,23 +362,25 @@ impl CacheMode { // TODO: double check that any node can serve this // TODO: can a block change? like what if it gets orphaned? // TODO: make sure re-orgs work properly! - Ok(CacheMode::SuccessForever) + Ok(Self::SuccessForever) } "eth_getBlockByNumber" => { // TODO: double check that any node can serve this // TODO: CacheSuccessForever if the block is old enough // TODO: make sure re-orgs work properly! - Ok(CacheMode::Standard { + Ok(Self::Standard { block: head_block.into(), cache_errors: true, }) } "eth_getBlockTransactionCountByHash" => { // TODO: double check that any node can serve this - Ok(CacheMode::SuccessForever) + Ok(Self::SuccessForever) } "eth_getLogs" => { - /* + // + // if we fail to get to_block, then use head_block + // TODO: think about this more // TODO: jsonrpc has a specific code for this let obj = params @@ -371,7 +392,7 @@ impl CacheMode { })?; if obj.contains_key("blockHash") { - Ok(CacheMode::CacheSuccessForever) + Ok(Self::SuccessForever) } else { let from_block = if let Some(x) = obj.get_mut("fromBlock") { // TODO: use .take instead of clone @@ -387,11 +408,9 @@ impl CacheMode { *x = json!(block_num); } - let block_hash = rpcs.block_hash(&block_num).await?; - - BlockNumAndHash(block_num, block_hash) + BlockNumOrHash::Num(block_num) } else { - BlockNumAndHash(U64::zero(), H256::zero()) + BlockNumOrHash::Num(U64::zero()) }; let to_block = if let Some(x) = obj.get_mut("toBlock") { @@ -399,67 +418,82 @@ impl CacheMode { // what if its a hash? let block_num: BlockNumber = serde_json::from_value(x.clone())?; - let (block_num, change) = - BlockNumber_to_U64(block_num, head_block.number()); + // sometimes people request `from_block=future, to_block=latest`. latest becomes head and then + // TODO: if this is in the future, this cache key won't be very likely to be used again + // TODO: delay here until the app has this block? + let latest_block = head_block.number().max(from_block.num()); + + let (block_num, change) = BlockNumber_to_U64(block_num, latest_block); if change { trace!("changing toBlock in eth_getLogs. {} -> {}", x, block_num); *x = json!(block_num); } - let block_hash = rpcs.block_hash(&block_num).await?; - - BlockNumAndHash(block_num, block_hash) + if let Some(app) = app { + // TODO: make a jsonrpc query here? cache rates will be better but it adds a network request + if let Some(block_hash) = + app.balanced_rpcs.blocks_by_number.get(&block_num).await + { + BlockNumOrHash::And(BlockNumAndHash(block_num, block_hash)) + } else { + BlockNumOrHash::Num(block_num) + } + } else { + BlockNumOrHash::Num(block_num) + } } else { - head_block.into() + BlockNumOrHash::And(head_block.into()) }; - Ok(CacheMode::CacheRange { + let cache_block = if let BlockNumOrHash::And(x) = &to_block { + x.clone() + } else { + BlockNumAndHash::from(head_block) + }; + + Ok(Self::Range { from_block, to_block, + cache_block, cache_errors: true, }) } - */ - Ok(CacheMode::Standard { - block: head_block.into(), - cache_errors: true, - }) } "eth_getTransactionByBlockHashAndIndex" => { // TODO: check a Cache of recent hashes // try full nodes first. retry will use archive - Ok(CacheMode::SuccessForever) + Ok(Self::SuccessForever) } - "eth_getTransactionByHash" => Ok(CacheMode::Never), - "eth_getTransactionReceipt" => Ok(CacheMode::Never), + "eth_getTransactionByHash" => Ok(Self::Never), + "eth_getTransactionReceipt" => Ok(Self::Never), "eth_getUncleByBlockHashAndIndex" => { // TODO: check a Cache of recent hashes // try full nodes first. retry will use archive // TODO: what happens if this block is uncled later? - Ok(CacheMode::SuccessForever) + Ok(Self::SuccessForever) } "eth_getUncleCountByBlockHash" => { // TODO: check a Cache of recent hashes // try full nodes first. retry will use archive // TODO: what happens if this block is uncled later? - Ok(CacheMode::SuccessForever) + Ok(Self::SuccessForever) } "eth_maxPriorityFeePerGas" => { // TODO: this might be too aggressive. i think it can change before a block is mined - Ok(CacheMode::Standard { + Ok(Self::Standard { block: head_block.into(), cache_errors: false, }) } - "eth_sendRawTransaction" => Ok(CacheMode::Never), - "net_listening" => Ok(CacheMode::SuccessForever), - "net_version" => Ok(CacheMode::SuccessForever), + "eth_sendRawTransaction" => Ok(Self::Never), + "net_listening" => Ok(Self::SuccessForever), + "net_version" => Ok(Self::SuccessForever), method => match get_block_param_id(method) { Some(block_param_id) => { let block = clean_block_number(params, block_param_id, head_block, app).await?; - Ok(CacheMode::Standard { + Ok(Self::Standard { block, cache_errors: true, }) @@ -480,7 +514,7 @@ impl CacheMode { } #[inline] - pub fn from_block(&self) -> Option<&BlockNumAndHash> { + pub fn from_block(&self) -> Option<&BlockNumOrHash> { match self { Self::SuccessForever => None, Self::Never => None, @@ -494,13 +528,14 @@ impl CacheMode { !matches!(self, Self::Never) } + /// get the to_block used **for caching**. This may be the to_block in the request, or it might be the current head block. #[inline] pub fn to_block(&self) -> Option<&BlockNumAndHash> { match self { Self::SuccessForever => None, Self::Never => None, Self::Standard { block, .. } => Some(block), - Self::Range { to_block, .. } => Some(to_block), + Self::Range { cache_block, .. } => Some(cache_block), } } } diff --git a/web3_proxy/src/jsonrpc/request_builder.rs b/web3_proxy/src/jsonrpc/request_builder.rs index af574f41..9344d346 100644 --- a/web3_proxy/src/jsonrpc/request_builder.rs +++ b/web3_proxy/src/jsonrpc/request_builder.rs @@ -498,7 +498,7 @@ impl ValidatedRequest { #[inline] pub fn max_block_needed(&self) -> Option { - self.cache_mode.to_block().map(|x| *x.num()) + self.cache_mode.to_block().map(|x| x.num()) } #[inline] @@ -506,7 +506,7 @@ impl ValidatedRequest { if self.archive_request.load(atomic::Ordering::Relaxed) { Some(U64::zero()) } else { - self.cache_mode.from_block().map(|x| *x.num()) + self.cache_mode.from_block().map(|x| x.num()) } } diff --git a/web3_proxy/src/response_cache.rs b/web3_proxy/src/response_cache.rs index 22ead5e7..dadfbed0 100644 --- a/web3_proxy/src/response_cache.rs +++ b/web3_proxy/src/response_cache.rs @@ -1,5 +1,5 @@ use crate::{ - block_number::{BlockNumAndHash, CacheMode}, + block_number::{BlockNumAndHash, BlockNumOrHash, CacheMode}, errors::{Web3ProxyError, Web3ProxyResult}, frontend::authorization::RequestOrMethod, jsonrpc::{self, JsonRpcErrorData, ResponsePayload}, @@ -19,23 +19,28 @@ use std::{ #[derive(Clone, Debug, Eq, From)] pub struct JsonRpcQueryCacheKey<'a> { - /// hashed params so that + /// hashed params and block info so that we don't have to clone a potentially big thing + /// this is probably a premature optimization hash: u64, - from_block: Option<&'a BlockNumAndHash>, + from_block: Option<&'a BlockNumOrHash>, to_block: Option<&'a BlockNumAndHash>, cache_jsonrpc_errors: bool, } impl JsonRpcQueryCacheKey<'_> { + #[inline] pub fn hash(&self) -> u64 { self.hash } - pub fn from_block_num(&self) -> Option<&U64> { - self.from_block.as_ref().map(|x| x.num()) + #[inline] + pub fn from_block_num(&self) -> Option { + self.from_block.map(|x| x.num()) } - pub fn to_block_num(&self) -> Option<&U64> { - self.to_block.as_ref().map(|x| x.num()) + #[inline] + pub fn to_block_num(&self) -> Option { + self.to_block.map(|x| x.num()) } + #[inline] pub fn cache_errors(&self) -> bool { self.cache_jsonrpc_errors } @@ -102,6 +107,7 @@ pub enum ForwardedResponse { // TODO: impl for other inner result types? impl ForwardedResponse { + #[inline] pub fn num_bytes(&self) -> u64 { match self { Self::Result { num_bytes, .. } => *num_bytes, @@ -109,6 +115,7 @@ impl ForwardedResponse { } } + #[inline] pub fn is_error(&self) -> bool { match self { Self::Result { .. } => false, @@ -118,12 +125,14 @@ impl ForwardedResponse { } impl ForwardedResponse> { + #[inline] pub fn is_null(&self) -> bool { matches!(self, Self::Result { value: None, .. }) } } impl ForwardedResponse> { + #[inline] pub fn is_null(&self) -> bool { match self { Self::Result { value, .. } => value.get() == "null",