From f0691efc5c34a4e3b8a6ce71d20207dfd349f3f3 Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Mon, 25 Jul 2022 18:00:29 +0000 Subject: [PATCH] always serve something --- TODO.md | 4 +++- web3-proxy/src/connections.rs | 7 +++---- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/TODO.md b/TODO.md index 573a911a..02114395 100644 --- a/TODO.md +++ b/TODO.md @@ -57,7 +57,7 @@ - thundering herd problem if we only allow a lag of 0 blocks - we can improve this by only publishing the synced connections once a threshold of total available soft and hard limits is passed. how can we do this without hammering redis? at least its only once per block per server - [x] instead of tracking `pending_synced_connections`, have a mapping of where all connections are individually. then each change, re-check for consensus. -- [ ] synced connections swap threshold should come from config +- [x] synced connections swap threshold set to 1 so that it always serves something - [ ] basic request method stats - [ ] nice output when cargo doc is run @@ -79,6 +79,8 @@ - have a blocking future watching the config file and calling app.apply_config() on first load and on change - work started on this in the "config_reloads" branch. because of how we pass channels around during spawn, this requires a larger refactor. - [ ] if a rpc fails to connect at start, retry later instead of skipping it forever +- [ ] synced connections swap threshold should come from config + - if there are bad forks, we need to think about this more. keep backfilling until there is a common block, or just error? if the common block is old, i think we should error rather than serve data. that's kind of "downtime" but really its on the chain and not us. think about this more - [ ] have a "backup" tier that is only used when the primary tier has no servers or is many blocks behind - we don't want the backup tier taking over with the head block if they happen to be fast at that (but overall low/expensive rps). only if the primary tier has fallen behind or gone entirely offline should we go to third parties - [ ] until this is done, an alternative is for infra to have a "failover" script that changes the configs to include a bunch of third party servers manually. diff --git a/web3-proxy/src/connections.rs b/web3-proxy/src/connections.rs index 21ec22a1..c981234d 100644 --- a/web3-proxy/src/connections.rs +++ b/web3-proxy/src/connections.rs @@ -699,10 +699,9 @@ impl Web3Connections { } } - // TODO: configurable threshold. in test we have 4 servers so - // TODO: - // TODO: minimum total_soft_limit? without, when one server is in the loop - let min_soft_limit = total_soft_limit / 2; + // TODO: default_min_soft_limit? without, we start serving traffic at the start too quickly + // let min_soft_limit = total_soft_limit / 2; + let min_soft_limit = 1; struct State<'a> { block: &'a Arc>,