Merge pull request #5619 from ethereum-optimism/felipe/skip-peer-count
proxyd: skip peer count config per backend
This commit is contained in:
commit
0d3740dfd6
@ -132,6 +132,8 @@ type Backend struct {
|
||||
stripTrailingXFF bool
|
||||
proxydIP string
|
||||
|
||||
skipPeerCountCheck bool
|
||||
|
||||
maxDegradedLatencyThreshold time.Duration
|
||||
maxLatencyThreshold time.Duration
|
||||
maxErrorRateThreshold float64
|
||||
@ -207,6 +209,12 @@ func WithProxydIP(ip string) BackendOpt {
|
||||
}
|
||||
}
|
||||
|
||||
func WithSkipPeerCountCheck(skipPeerCountCheck bool) BackendOpt {
|
||||
return func(b *Backend) {
|
||||
b.skipPeerCountCheck = skipPeerCountCheck
|
||||
}
|
||||
}
|
||||
|
||||
func WithMaxDegradedLatencyThreshold(maxDegradedLatencyThreshold time.Duration) BackendOpt {
|
||||
return func(b *Backend) {
|
||||
b.maxDegradedLatencyThreshold = maxDegradedLatencyThreshold
|
||||
|
@ -81,17 +81,18 @@ type BackendOptions struct {
|
||||
}
|
||||
|
||||
type BackendConfig struct {
|
||||
Username string `toml:"username"`
|
||||
Password string `toml:"password"`
|
||||
RPCURL string `toml:"rpc_url"`
|
||||
WSURL string `toml:"ws_url"`
|
||||
WSPort int `toml:"ws_port"`
|
||||
MaxRPS int `toml:"max_rps"`
|
||||
MaxWSConns int `toml:"max_ws_conns"`
|
||||
CAFile string `toml:"ca_file"`
|
||||
ClientCertFile string `toml:"client_cert_file"`
|
||||
ClientKeyFile string `toml:"client_key_file"`
|
||||
StripTrailingXFF bool `toml:"strip_trailing_xff"`
|
||||
Username string `toml:"username"`
|
||||
Password string `toml:"password"`
|
||||
RPCURL string `toml:"rpc_url"`
|
||||
WSURL string `toml:"ws_url"`
|
||||
WSPort int `toml:"ws_port"`
|
||||
MaxRPS int `toml:"max_rps"`
|
||||
MaxWSConns int `toml:"max_ws_conns"`
|
||||
CAFile string `toml:"ca_file"`
|
||||
ClientCertFile string `toml:"client_cert_file"`
|
||||
ClientKeyFile string `toml:"client_key_file"`
|
||||
StripTrailingXFF bool `toml:"strip_trailing_xff"`
|
||||
SkipPeerCountCheck bool `toml:"consensus_skip_peer_count"`
|
||||
}
|
||||
|
||||
type BackendsConfig map[string]*BackendConfig
|
||||
|
@ -227,10 +227,13 @@ func (cp *ConsensusPoller) UpdateBackend(ctx context.Context, be *Backend) {
|
||||
return
|
||||
}
|
||||
|
||||
peerCount, err := cp.getPeerCount(ctx, be)
|
||||
if err != nil {
|
||||
log.Warn("error updating backend", "name", be.Name, "err", err)
|
||||
return
|
||||
var peerCount uint64
|
||||
if !be.skipPeerCountCheck {
|
||||
peerCount, err = cp.getPeerCount(ctx, be)
|
||||
if err != nil {
|
||||
log.Warn("error updating backend", "name", be.Name, "err", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
latestBlockNumber, latestBlockHash, err := cp.fetchBlock(ctx, be, "latest")
|
||||
@ -257,7 +260,7 @@ func (cp *ConsensusPoller) UpdateBackendGroupConsensus(ctx context.Context) {
|
||||
for _, be := range cp.backendGroup.Backends {
|
||||
peerCount, backendLatestBlockNumber, backendLatestBlockHash, lastUpdate := cp.getBackendState(be)
|
||||
|
||||
if peerCount < cp.minPeerCount {
|
||||
if !be.skipPeerCountCheck && peerCount < cp.minPeerCount {
|
||||
continue
|
||||
}
|
||||
if lastUpdate.Add(cp.maxUpdateThreshold).Before(time.Now()) {
|
||||
@ -306,7 +309,7 @@ func (cp *ConsensusPoller) UpdateBackendGroupConsensus(ctx context.Context) {
|
||||
bs := cp.backendState[be]
|
||||
notUpdated := bs.lastUpdate.Add(cp.maxUpdateThreshold).Before(time.Now())
|
||||
isBanned := time.Now().Before(bs.bannedUntil)
|
||||
notEnoughPeers := bs.peerCount < cp.minPeerCount
|
||||
notEnoughPeers := !be.skipPeerCountCheck && bs.peerCount < cp.minPeerCount
|
||||
if !be.IsHealthy() || be.IsRateLimited() || !be.Online() || notUpdated || isBanned || notEnoughPeers {
|
||||
filteredBackendsNames = append(filteredBackendsNames, be.Name)
|
||||
continue
|
||||
@ -384,7 +387,7 @@ func (cp *ConsensusPoller) fetchBlock(ctx context.Context, be *Backend, block st
|
||||
return
|
||||
}
|
||||
|
||||
// isSyncing Convenient wrapper to check if the backend is syncing from the network
|
||||
// getPeerCount Convenient wrapper to retrieve the current peer count from the backend
|
||||
func (cp *ConsensusPoller) getPeerCount(ctx context.Context, be *Backend) (count uint64, err error) {
|
||||
var rpcRes RPCRes
|
||||
err = be.ForwardRPC(ctx, &rpcRes, "67", "net_peerCount")
|
||||
|
@ -72,6 +72,9 @@ ca_file = ""
|
||||
client_cert_file = ""
|
||||
# Path to a custom client key file.
|
||||
client_key_file = ""
|
||||
# Allows backends to skip peer count checking, default false
|
||||
# consensus_skip_peer_count = true
|
||||
|
||||
|
||||
[backends.alchemy]
|
||||
rpc_url = ""
|
||||
|
@ -157,6 +157,7 @@ func Start(config *Config) (*Server, func(), error) {
|
||||
opts = append(opts, WithStrippedTrailingXFF())
|
||||
}
|
||||
opts = append(opts, WithProxydIP(os.Getenv("PROXYD_IP")))
|
||||
opts = append(opts, WithSkipPeerCountCheck(cfg.SkipPeerCountCheck))
|
||||
|
||||
back := NewBackend(name, rpcURL, wsURL, lim, rpcRequestSemaphore, opts...)
|
||||
backendNames = append(backendNames, name)
|
||||
|
Loading…
Reference in New Issue
Block a user