Merge pull request #3722 from ethereum-optimism/develop

Develop -> Master
This commit is contained in:
Matthew Slipper 2022-10-17 18:19:47 -05:00 committed by GitHub
commit 813a22ba27
9 changed files with 100 additions and 22 deletions

@ -5,6 +5,7 @@ import (
"crypto/rand"
"encoding/hex"
"fmt"
"math"
"sync"
"time"
@ -256,5 +257,30 @@ func randStr(l int) string {
return hex.EncodeToString(b)
}
type ServerRateLimiter struct {
type NoopBackendRateLimiter struct{}
var noopBackendRateLimiter = &NoopBackendRateLimiter{}
func (n *NoopBackendRateLimiter) IsBackendOnline(name string) (bool, error) {
return true, nil
}
func (n *NoopBackendRateLimiter) SetBackendOffline(name string, duration time.Duration) error {
return nil
}
func (n *NoopBackendRateLimiter) IncBackendRPS(name string) (int, error) {
return math.MaxInt, nil
}
func (n *NoopBackendRateLimiter) IncBackendWSConns(name string, max int) (bool, error) {
return true, nil
}
func (n *NoopBackendRateLimiter) DecBackendWSConns(name string) error {
return nil
}
func (n *NoopBackendRateLimiter) FlushBackendWSConns(names []string) error {
return nil
}

@ -37,6 +37,21 @@ func main() {
log.Crit("error reading config file", "err", err)
}
// update log level from config
logLevel, err := log.LvlFromString(config.Server.LogLevel)
if err != nil {
logLevel = log.LvlInfo
if config.Server.LogLevel != "" {
log.Warn("invalid server.log_level set: " + config.Server.LogLevel)
}
}
log.Root().SetHandler(
log.LvlFilterHandler(
logLevel,
log.StreamHandler(os.Stdout, log.JSONFormat()),
),
)
shutdown, err := proxyd.Start(config)
if err != nil {
log.Crit("error starting proxyd", "err", err)

@ -14,6 +14,7 @@ type ServerConfig struct {
WSPort int `toml:"ws_port"`
MaxBodySizeBytes int64 `toml:"max_body_size_bytes"`
MaxConcurrentRPCs int64 `toml:"max_concurrent_rpcs"`
LogLevel string `toml:"log_level"`
// TimeoutSeconds specifies the maximum time spent serving an HTTP request. Note that isn't used for websocket connections
TimeoutSeconds int `toml:"timeout_seconds"`
@ -41,13 +42,14 @@ type MetricsConfig struct {
}
type RateLimitConfig struct {
UseRedis bool `toml:"use_redis"`
BaseRate int `toml:"base_rate"`
BaseInterval TOMLDuration `toml:"base_interval"`
ExemptOrigins []string `toml:"exempt_origins"`
ExemptUserAgents []string `toml:"exempt_user_agents"`
ErrorMessage string `toml:"error_message"`
MethodOverrides map[string]*RateLimitMethodOverride `toml:"method_overrides"`
UseRedis bool `toml:"use_redis"`
EnableBackendRateLimiter bool `toml:"enable_backend_rate_limiter"`
BaseRate int `toml:"base_rate"`
BaseInterval TOMLDuration `toml:"base_interval"`
ExemptOrigins []string `toml:"exempt_origins"`
ExemptUserAgents []string `toml:"exempt_user_agents"`
ErrorMessage string `toml:"error_message"`
MethodOverrides map[string]*RateLimitMethodOverride `toml:"method_overrides"`
}
type RateLimitMethodOverride struct {

@ -19,6 +19,8 @@ ws_port = 8085
# Maximum client body size, in bytes, that the server will accept.
max_body_size_bytes = 10485760
max_concurrent_rpcs = 1000
# Server log level
log_level = "info"
[redis]
# URL to a Redis instance.

@ -16,3 +16,6 @@ backends = ["good"]
[rpc_method_mappings]
eth_chainId = "main"
[rate_limit]
enable_backend_rate_limiter = true

@ -20,3 +20,6 @@ backends = ["bad", "good"]
[rpc_method_mappings]
eth_chainId = "main"
[rate_limit]
enable_backend_rate_limiter = true

@ -26,3 +26,6 @@ backends = ["good"]
[rpc_method_mappings]
eth_chainId = "main"
[rate_limit]
enable_backend_rate_limiter = true

@ -53,11 +53,15 @@ func Start(config *Config) (func(), error) {
var lim BackendRateLimiter
var err error
if redisClient == nil {
log.Warn("redis is not configured, using local rate limiter")
lim = NewLocalBackendRateLimiter()
if config.RateLimit.EnableBackendRateLimiter {
if redisClient != nil {
lim = NewRedisRateLimiter(redisClient)
} else {
log.Warn("redis is not configured, using local rate limiter")
lim = NewLocalBackendRateLimiter()
}
} else {
lim = NewRedisRateLimiter(redisClient)
lim = noopBackendRateLimiter
}
// While modifying shared globals is a bad practice, the alternative

@ -8,6 +8,7 @@ import (
"io"
"math"
"net/http"
"regexp"
"strconv"
"strings"
"sync"
@ -49,8 +50,8 @@ type Server struct {
upgrader *websocket.Upgrader
mainLim FrontendRateLimiter
overrideLims map[string]FrontendRateLimiter
limExemptOrigins map[string]bool
limExemptUserAgents map[string]bool
limExemptOrigins []*regexp.Regexp
limExemptUserAgents []*regexp.Regexp
rpcServer *http.Server
wsServer *http.Server
cache RPCCache
@ -104,15 +105,23 @@ func NewServer(
}
var mainLim FrontendRateLimiter
limExemptOrigins := make(map[string]bool)
limExemptUserAgents := make(map[string]bool)
limExemptOrigins := make([]*regexp.Regexp, 0)
limExemptUserAgents := make([]*regexp.Regexp, 0)
if rateLimitConfig.BaseRate > 0 {
mainLim = limiterFactory(time.Duration(rateLimitConfig.BaseInterval), rateLimitConfig.BaseRate, "main")
for _, origin := range rateLimitConfig.ExemptOrigins {
limExemptOrigins[strings.ToLower(origin)] = true
pattern, err := regexp.Compile(origin)
if err != nil {
return nil, err
}
limExemptOrigins = append(limExemptOrigins, pattern)
}
for _, agent := range rateLimitConfig.ExemptUserAgents {
limExemptUserAgents[strings.ToLower(agent)] = true
pattern, err := regexp.Compile(agent)
if err != nil {
return nil, err
}
limExemptUserAgents = append(limExemptUserAgents, pattern)
}
} else {
mainLim = NoopFrontendRateLimiter
@ -548,11 +557,22 @@ func (s *Server) populateContext(w http.ResponseWriter, r *http.Request) context
}
func (s *Server) isUnlimitedOrigin(origin string) bool {
return s.limExemptOrigins[strings.ToLower(origin)]
for _, pat := range s.limExemptOrigins {
if pat.MatchString(origin) {
return true
}
}
return false
}
func (s *Server) isUnlimitedUserAgent(origin string) bool {
return s.limExemptUserAgents[strings.ToLower(origin)]
for _, pat := range s.limExemptUserAgents {
if pat.MatchString(origin) {
return true
}
}
return false
}
func setCacheHeader(w http.ResponseWriter, cached bool) {