diff --git a/web3_proxy/src/block_number.rs b/web3_proxy/src/block_number.rs index 6d6a8343..478d4dbd 100644 --- a/web3_proxy/src/block_number.rs +++ b/web3_proxy/src/block_number.rs @@ -11,6 +11,7 @@ use ethers::{ prelude::{BlockNumber, U64}, types::H256, }; +use serde::Serialize; use serde_json::json; use tracing::{error, trace, warn}; @@ -42,7 +43,7 @@ pub fn BlockNumber_to_U64(block_num: BlockNumber, latest_block: U64) -> (U64, bo } } -#[derive(Clone, Debug, Eq, From, Hash, PartialEq)] +#[derive(Clone, Debug, Eq, From, Hash, PartialEq, Serialize)] pub struct BlockNumAndHash(U64, H256); impl BlockNumAndHash { @@ -200,7 +201,7 @@ pub async fn clean_block_number<'a>( } } -#[derive(Debug, From, Hash, Eq, PartialEq)] +#[derive(Debug, From, Hash, Eq, PartialEq, Serialize)] pub enum BlockNumOrHash { Num(U64), And(BlockNumAndHash), @@ -272,15 +273,16 @@ fn get_block_param_id(method: &str) -> Option { } impl CacheMode { - /// like `try_new`, but instead of erroring, it will default to caching with the head block + /// like `try_new`, but instead of erroring if things can't be cached, it will default to caching with the head block + /// this will still error if something is wrong about the request (like the range is too large or invalid) /// returns None if this request should not be cached pub async fn new<'a>( request: &'a mut SingleRequest, head_block: Option<&'a Web3ProxyBlock>, app: Option<&'a App>, - ) -> Self { + ) -> Web3ProxyResult { match Self::try_new(request, head_block, app).await { - Ok(x) => return x, + x @ Ok(_) => return x, Err(Web3ProxyError::NoBlocksKnown) => { warn!( method = %request.method, @@ -288,6 +290,8 @@ impl CacheMode { "no servers available to get block from params" ); } + err @ Err(Web3ProxyError::RangeTooLarge { .. }) => return err, + err @ Err(Web3ProxyError::RangeInvalid { .. }) => return err, Err(err) => { error!( method = %request.method, @@ -298,7 +302,7 @@ impl CacheMode { } } - if let Some(head_block) = head_block { + let fallback = if let Some(head_block) = head_block { Self::Standard { block_needed: head_block.into(), cache_block: head_block.into(), @@ -306,7 +310,9 @@ impl CacheMode { } } else { Self::Never - } + }; + + Ok(fallback) } pub async fn try_new( @@ -427,6 +433,22 @@ impl CacheMode { BlockNumOrHash::And(head_block.into()) }; + if let Some(range) = to_block.num().checked_sub(from_block.num()) { + if range.as_u64() > 200_000 { + return Err(Web3ProxyError::RangeTooLarge { + from: from_block, + to: to_block, + requested: range, + allowed: 200_000.into(), + }); + } + } else { + return Err(Web3ProxyError::RangeInvalid { + from: from_block, + to: to_block, + }); + } + let cache_block = if let BlockNumOrHash::And(x) = &to_block { x.clone() } else { @@ -652,7 +674,9 @@ mod test { x => panic!("{:?}", x), } - let x = CacheMode::new(&mut request, Some(&head_block), None).await; + let x = CacheMode::new(&mut request, Some(&head_block), None) + .await + .unwrap(); // TODO: cache with the head block instead? matches!(x, CacheMode::Never); diff --git a/web3_proxy/src/errors.rs b/web3_proxy/src/errors.rs index a15d4a71..d68c1380 100644 --- a/web3_proxy/src/errors.rs +++ b/web3_proxy/src/errors.rs @@ -1,5 +1,6 @@ //! Utlities for logging errors for admins and displaying errors to users. +use crate::block_number::BlockNumOrHash; use crate::frontend::authorization::Authorization; use crate::jsonrpc::{self, JsonRpcErrorData, ParsedResponse, StreamResponse}; use crate::response_cache::ForwardedResponse; @@ -150,6 +151,20 @@ pub enum Web3ProxyError { ParseBytesError(Option), ParseMsgError(siwe::ParseError), ParseAddressError, + #[display(fmt = "{:?} > {:?}", from, to)] + RangeInvalid { + from: BlockNumOrHash, + to: BlockNumOrHash, + }, + #[display(fmt = "{:?} > {:?}", from, to)] + #[error(ignore)] + #[from(ignore)] + RangeTooLarge { + from: BlockNumOrHash, + to: BlockNumOrHash, + requested: U64, + allowed: U64, + }, #[display(fmt = "{:?}, {:?}", _0, _1)] RateLimited(Authorization, Option), Redis(RedisError), @@ -911,6 +926,41 @@ impl Web3ProxyError { }, ) } + Self::RangeInvalid { from, to } => { + trace!(?from, ?to, "RangeInvalid"); + ( + StatusCode::BAD_REQUEST, + JsonRpcErrorData { + message: "invalid block range given".into(), + code: StatusCode::BAD_REQUEST.as_u16().into(), + data: Some(json!({ + "from": from, + "to": to, + })), + }, + ) + } + Self::RangeTooLarge { + from, + to, + requested, + allowed, + } => { + trace!(?from, ?to, %requested, %allowed, "RangeTooLarge"); + ( + StatusCode::BAD_REQUEST, + JsonRpcErrorData { + message: "invalid block range given".into(), + code: StatusCode::BAD_REQUEST.as_u16().into(), + data: Some(json!({ + "from": from, + "to": to, + "requested": requested, + "allowed": allowed, + })), + }, + ) + } // TODO: this should actually by the id of the key. multiple users might control one key Self::RateLimited(authorization, retry_at) => { // TODO: emit a stat diff --git a/web3_proxy/src/jsonrpc/request_builder.rs b/web3_proxy/src/jsonrpc/request_builder.rs index ea2b189a..c351b9e1 100644 --- a/web3_proxy/src/jsonrpc/request_builder.rs +++ b/web3_proxy/src/jsonrpc/request_builder.rs @@ -352,7 +352,7 @@ impl ValidatedRequest { // TODO: modify CacheMode::new to wait for a future block if one is requested! be sure to update head_block too! let cache_mode = match &mut request { - RequestOrMethod::Request(x) => CacheMode::new(x, head_block.as_ref(), app).await, + RequestOrMethod::Request(x) => CacheMode::new(x, head_block.as_ref(), app).await?, _ => CacheMode::Never, };