tokio spawn the cached future (#217)

* tokio spawn the cached future

* lint
This commit is contained in:
Bryan Stitt 2023-09-20 10:11:07 -07:00 committed by GitHub
parent e4d3a736d0
commit dc037e663a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -1254,12 +1254,9 @@ impl Web3ProxyApp {
head_block: Option<&Web3ProxyBlock>, head_block: Option<&Web3ProxyBlock>,
request_metadata: &Arc<RequestMetadata>, request_metadata: &Arc<RequestMetadata>,
) -> Web3ProxyResult<JsonRpcResponseEnum<Arc<RawValue>>> { ) -> Web3ProxyResult<JsonRpcResponseEnum<Arc<RawValue>>> {
// TODO: don't clone into a new string?
let request_method = method.to_string();
// TODO: serve net_version without querying the backend // TODO: serve net_version without querying the backend
// TODO: don't force RawValue // TODO: don't force RawValue
let response_data: JsonRpcResponseEnum<Arc<RawValue>> = match request_method.as_ref() { let response_data: JsonRpcResponseEnum<Arc<RawValue>> = match method {
// lots of commands are blocked // lots of commands are blocked
method @ ("db_getHex" method @ ("db_getHex"
| "db_getString" | "db_getString"
@ -1733,7 +1730,7 @@ impl Web3ProxyApp {
} }
}; };
// TODO: think more about timeouts // TODO: think more about this timeout. we should probably have a `request_expires_at` Duration on the request_metadata
// TODO: different user tiers should have different timeouts // TODO: different user tiers should have different timeouts
// erigon's timeout is 300, so keep this a few seconds shorter // erigon's timeout is 300, so keep this a few seconds shorter
let max_wait = Some(Duration::from_secs(295)); let max_wait = Some(Duration::from_secs(295));
@ -1748,15 +1745,21 @@ impl Web3ProxyApp {
// TODO: try to fetch out of s3 // TODO: try to fetch out of s3
self // TODO: clone less?
let app = self.clone();
let method = method.to_string();
let params = params.clone();
let request_metadata = request_metadata.clone();
let f = async move {
app
.jsonrpc_response_cache .jsonrpc_response_cache
.try_get_with::<_, Web3ProxyError>(cache_key.hash(), async { .try_get_with::<_, Web3ProxyError>(cache_key.hash(), async {
// TODO: think more about this timeout. we should probably have a `request_expires_at` Duration on the request_metadata let response_data = timeout(Duration::from_secs(290), app.balanced_rpcs
let response_data = timeout(Duration::from_secs(290), self.balanced_rpcs
.try_proxy_connection::<_, Arc<RawValue>>( .try_proxy_connection::<_, Arc<RawValue>>(
method, &method,
params, &params,
Some(request_metadata), Some(&request_metadata),
max_wait, max_wait,
from_block_num.as_ref(), from_block_num.as_ref(),
to_block_num.as_ref(), to_block_num.as_ref(),
@ -1788,7 +1791,11 @@ impl Web3ProxyApp {
} }
Err(err) => Err(Web3ProxyError::from(err)), Err(err) => Err(Web3ProxyError::from(err)),
} }
}).await? }).await
};
// this is spawned so that if the client disconnects, the app keeps polling the future with a lock inside the moka cache
tokio::spawn(f).await??
} else { } else {
let x = timeout( let x = timeout(
Duration::from_secs(300), Duration::from_secs(300),