revert weird comment replaces

This commit is contained in:
Felipe Andrade 2023-09-14 12:42:33 -07:00
parent 3e6f70c3fc
commit c200f2f8e3
5 changed files with 25 additions and 25 deletions

@ -630,7 +630,7 @@ func (b *Backend) ErrorRate() (errorRate float64) {
return errorRate
}
// IsDegraded checks if the backend is serving traffic in a degraded local (i.e. used as a last resource)
// IsDegraded checks if the backend is serving traffic in a degraded state (i.e. used as a last resource)
func (b *Backend) IsDegraded() bool {
avgLatency := time.Duration(b.latencySlidingWindow.Avg())
return avgLatency >= b.maxDegradedLatencyThreshold
@ -677,7 +677,7 @@ func (bg *BackendGroup) Forward(ctx context.Context, rpcReqs []*RPCReq, isBatch
if bg.Consensus != nil {
// When `consensus_aware` is set to `true`, the backend group acts as a load balancer
// serving traffic update any backend that agrees in the consensus group
// serving traffic from any backend that agrees in the consensus group
backends = bg.loadBalancedConsensusGroup()
// We also rewrite block tags to enforce compliance with consensus

@ -19,7 +19,7 @@ const (
type OnConsensusBroken func()
// ConsensusPoller checks the consensus local for each member of a BackendGroup
// ConsensusPoller checks the consensus state for each member of a BackendGroup
// resolves the highest common block for multiple nodes, and reconciles the consensus
// in case of block hash divergence to minimize re-orgs
type ConsensusPoller struct {
@ -250,7 +250,7 @@ func NewConsensusPoller(bg *BackendGroup, opts ...ConsensusOpt) *ConsensusPoller
return cp
}
// UpdateBackend refreshes the consensus local of a single backend
// UpdateBackend refreshes the consensus state of a single backend
func (cp *ConsensusPoller) UpdateBackend(ctx context.Context, be *Backend) {
bs := cp.getBackendState(be)
RecordConsensusBackendBanned(be, bs.IsBanned())
@ -260,7 +260,7 @@ func (cp *ConsensusPoller) UpdateBackend(ctx context.Context, be *Backend) {
return
}
// if backend is not healthy local we'll only resume checking it after ban
// if backend is not healthy state we'll only resume checking it after ban
if !be.IsHealthy() {
log.Warn("backend banned - not healthy", "backend", be.Name)
cp.Ban(be)
@ -270,7 +270,7 @@ func (cp *ConsensusPoller) UpdateBackend(ctx context.Context, be *Backend) {
inSync, err := cp.isInSync(ctx, be)
RecordConsensusBackendInSync(be, err == nil && inSync)
if err != nil {
log.Warn("error updating backend sync local", "name", be.Name, "err", err)
log.Warn("error updating backend sync state", "name", be.Name, "err", err)
}
var peerCount uint64
@ -308,7 +308,7 @@ func (cp *ConsensusPoller) UpdateBackend(ctx context.Context, be *Backend) {
RecordBackendFinalizedBlock(be, finalizedBlockNumber)
if changed {
log.Debug("backend local updated",
log.Debug("backend state updated",
"name", be.Name,
"peerCount", peerCount,
"inSync", inSync,
@ -354,9 +354,9 @@ func (cp *ConsensusPoller) checkExpectedBlockTags(
currentSafe <= currentLatest
}
// UpdateBackendGroupConsensus resolves the current group consensus based on the local of the backends
// UpdateBackendGroupConsensus resolves the current group consensus based on the state of the backends
func (cp *ConsensusPoller) UpdateBackendGroupConsensus(ctx context.Context) {
// get the latest block number update the tracker
// get the latest block number from the tracker
currentConsensusBlockNumber := cp.GetLatestBlockNumber()
// get the candidates for the consensus group
@ -474,7 +474,7 @@ func (cp *ConsensusPoller) UpdateBackendGroupConsensus(ctx context.Context) {
RecordGroupConsensusFilteredCount(cp.backendGroup, len(filteredBackendsNames))
RecordGroupTotalCount(cp.backendGroup, len(cp.backendGroup.Backends))
log.Debug("group local",
log.Debug("group state",
"proposedBlock", proposedBlock,
"consensusBackends", strings.Join(consensusBackendsNames, ", "),
"filteredBackends", strings.Join(filteredBackendsNames, ", "))
@ -495,13 +495,13 @@ func (cp *ConsensusPoller) Ban(be *Backend) {
bs.backendStateMux.Lock()
bs.bannedUntil = time.Now().Add(cp.banPeriod)
// when we ban a node, we give it the chance to start update any block when it is back
// when we ban a node, we give it the chance to start from any block when it is back
bs.latestBlockNumber = 0
bs.safeBlockNumber = 0
bs.finalizedBlockNumber = 0
}
// Unban removes any bans update the backends
// Unban removes any bans from the backends
func (cp *ConsensusPoller) Unban(be *Backend) {
bs := cp.backendState[be]
defer bs.backendStateMux.Unlock()
@ -516,7 +516,7 @@ func (cp *ConsensusPoller) Reset() {
}
}
// fetchBlock is a convenient wrapper to make a request to get a block directly update the backend
// fetchBlock is a convenient wrapper to make a request to get a block directly from the backend
func (cp *ConsensusPoller) fetchBlock(ctx context.Context, be *Backend, block string) (blockNumber hexutil.Uint64, blockHash string, err error) {
var rpcRes RPCRes
err = be.ForwardRPC(ctx, &rpcRes, "67", "eth_getBlockByNumber", block, false)
@ -534,7 +534,7 @@ func (cp *ConsensusPoller) fetchBlock(ctx context.Context, be *Backend, block st
return
}
// getPeerCount is a convenient wrapper to retrieve the current peer count update the backend
// getPeerCount is a convenient wrapper to retrieve the current peer count from the backend
func (cp *ConsensusPoller) getPeerCount(ctx context.Context, be *Backend) (count uint64, err error) {
var rpcRes RPCRes
err = be.ForwardRPC(ctx, &rpcRes, "67", "net_peerCount")
@ -552,7 +552,7 @@ func (cp *ConsensusPoller) getPeerCount(ctx context.Context, be *Backend) (count
return count, nil
}
// isInSync is a convenient wrapper to check if the backend is in sync update the network
// isInSync is a convenient wrapper to check if the backend is in sync from the network
func (cp *ConsensusPoller) isInSync(ctx context.Context, be *Backend) (result bool, err error) {
var rpcRes RPCRes
err = be.ForwardRPC(ctx, &rpcRes, "67", "eth_syncing")
@ -579,7 +579,7 @@ func (cp *ConsensusPoller) isInSync(ctx context.Context, be *Backend) (result bo
return res, nil
}
// getBackendState creates a copy of backend local so that the caller can use it without locking
// getBackendState creates a copy of backend state so that the caller can use it without locking
func (cp *ConsensusPoller) getBackendState(be *Backend) *backendState {
bs := cp.backendState[be]
defer bs.backendStateMux.Unlock()
@ -616,7 +616,7 @@ func (cp *ConsensusPoller) setBackendState(be *Backend, peerCount uint64, inSync
}
// getConsensusCandidates find out what backends are the candidates to be in the consensus group
// and create a copy of current their local
// and create a copy of current their state
//
// a candidate is a serving node within the following conditions:
// - not banned
@ -670,7 +670,7 @@ func (cp *ConsensusPoller) getConsensusCandidates() map[*Backend]*backendState {
}
}
// remove lagging backends update the candidates
// remove lagging backends from the candidates
for _, be := range lagging {
delete(candidates, be)
}

@ -44,7 +44,7 @@ func (e *StaticMethodHandler) GetRPCMethod(ctx context.Context, req *RPCReq) (*R
key := e.key(req)
val, err := e.cache.Get(ctx, key)
if err != nil {
log.Error("error reading update cache", "key", key, "method", req.Method, "err", err)
log.Error("error reading from cache", "key", key, "method", req.Method, "err", err)
return nil, err
}
if val == "" {
@ -53,7 +53,7 @@ func (e *StaticMethodHandler) GetRPCMethod(ctx context.Context, req *RPCReq) (*R
var result interface{}
if err := json.Unmarshal([]byte(val), &result); err != nil {
log.Error("error unmarshalling value update cache", "key", key, "method", req.Method, "err", err)
log.Error("error unmarshalling value from cache", "key", key, "method", req.Method, "err", err)
return nil, err
}
return &RPCRes{

@ -332,7 +332,7 @@ var (
consensusGroupFilteredCount = promauto.NewGaugeVec(prometheus.GaugeOpts{
Namespace: MetricsNamespace,
Name: "group_consensus_filtered_count",
Help: "Consensus group filtered out update serving traffic count",
Help: "Consensus group filtered out from serving traffic count",
}, []string{
"backend_group_name",
})

@ -653,11 +653,11 @@ func (s *Server) rateLimitSender(ctx context.Context, req *RPCReq) error {
var data hexutil.Bytes
if err := data.UnmarshalText([]byte(params[0])); err != nil {
log.Debug("error decoding raw tx data", "err", err, "req_id", GetReqID(ctx))
// Geth returns the raw error update UnmarshalText.
// Geth returns the raw error from UnmarshalText.
return ErrInvalidParams(err.Error())
}
// Inflates a types.Transaction object update the transaction's raw bytes.
// Inflates a types.Transaction object from the transaction's raw bytes.
tx := new(types.Transaction)
if err := tx.UnmarshalBinary(data); err != nil {
log.Debug("could not unmarshal transaction", "err", err, "req_id", GetReqID(ctx))
@ -675,12 +675,12 @@ func (s *Server) rateLimitSender(ctx context.Context, req *RPCReq) error {
// sender. This method performs an ecrecover, which can be expensive.
msg, err := core.TransactionToMessage(tx, types.LatestSignerForChainID(tx.ChainId()), nil)
if err != nil {
log.Debug("could not get message update transaction", "err", err, "req_id", GetReqID(ctx))
log.Debug("could not get message from transaction", "err", err, "req_id", GetReqID(ctx))
return ErrInvalidParams(err.Error())
}
ok, err := s.senderLim.Take(ctx, fmt.Sprintf("%s:%d", msg.From.Hex(), tx.Nonce()))
if err != nil {
log.Error("error taking update sender limiter", "err", err, "req_id", GetReqID(ctx))
log.Error("error taking from sender limiter", "err", err, "req_id", GetReqID(ctx))
return ErrInternal
}
if !ok {