log: use atomic types (#27763)

Co-authored-by: Felix Lange <fjl@twurst.com>
This commit is contained in:
ucwong 2023-08-04 23:58:53 +01:00 committed by GitHub
parent bb148dd342
commit 5c30541c2a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 15 additions and 19 deletions

@ -32,20 +32,16 @@ var locationTrims = []string{
// PrintOrigins sets or unsets log location (file:line) printing for terminal
// format output.
func PrintOrigins(print bool) {
if print {
atomic.StoreUint32(&locationEnabled, 1)
} else {
atomic.StoreUint32(&locationEnabled, 0)
}
locationEnabled.Store(print)
}
// locationEnabled is an atomic flag controlling whether the terminal formatter
// should append the log locations too when printing entries.
var locationEnabled uint32
var locationEnabled atomic.Bool
// locationLength is the maxmimum path length encountered, which all logs are
// padded to to aid in alignment.
var locationLength uint32
var locationLength atomic.Uint32
// fieldPadding is a global map with maximum field value lengths seen until now
// to allow padding log contexts in a bit smarter way.
@ -109,17 +105,17 @@ func TerminalFormat(usecolor bool) Format {
b := &bytes.Buffer{}
lvl := r.Lvl.AlignedString()
if atomic.LoadUint32(&locationEnabled) != 0 {
if locationEnabled.Load() {
// Log origin printing was requested, format the location path and line number
location := fmt.Sprintf("%+v", r.Call)
for _, prefix := range locationTrims {
location = strings.TrimPrefix(location, prefix)
}
// Maintain the maximum location length for fancyer alignment
align := int(atomic.LoadUint32(&locationLength))
align := int(locationLength.Load())
if align < len(location) {
align = len(location)
atomic.StoreUint32(&locationLength, uint32(align))
locationLength.Store(uint32(align))
}
padding := strings.Repeat(" ", align-len(location))

@ -39,9 +39,9 @@ var errTraceSyntax = errors.New("expect file.go:234")
type GlogHandler struct {
origin Handler // The origin handler this wraps
level uint32 // Current log level, atomically accessible
override uint32 // Flag whether overrides are used, atomically accessible
backtrace uint32 // Flag whether backtrace location is set
level atomic.Uint32 // Current log level, atomically accessible
override atomic.Bool // Flag whether overrides are used, atomically accessible
backtrace atomic.Bool // Flag whether backtrace location is set
patterns []pattern // Current list of patterns to override with
siteCache map[uintptr]Lvl // Cache of callsite pattern evaluations
@ -72,7 +72,7 @@ type pattern struct {
// Verbosity sets the glog verbosity ceiling. The verbosity of individual packages
// and source files can be raised using Vmodule.
func (h *GlogHandler) Verbosity(level Lvl) {
atomic.StoreUint32(&h.level, uint32(level))
h.level.Store(uint32(level))
}
// Vmodule sets the glog verbosity pattern.
@ -138,7 +138,7 @@ func (h *GlogHandler) Vmodule(ruleset string) error {
h.patterns = filter
h.siteCache = make(map[uintptr]Lvl)
atomic.StoreUint32(&h.override, uint32(len(filter)))
h.override.Store(len(filter) != 0)
return nil
}
@ -171,7 +171,7 @@ func (h *GlogHandler) BacktraceAt(location string) error {
defer h.lock.Unlock()
h.location = location
atomic.StoreUint32(&h.backtrace, uint32(len(location)))
h.backtrace.Store(len(location) > 0)
return nil
}
@ -180,7 +180,7 @@ func (h *GlogHandler) BacktraceAt(location string) error {
// and backtrace filters, finally emitting it if either allow it through.
func (h *GlogHandler) Log(r *Record) error {
// If backtracing is requested, check whether this is the callsite
if atomic.LoadUint32(&h.backtrace) > 0 {
if h.backtrace.Load() {
// Everything below here is slow. Although we could cache the call sites the
// same way as for vmodule, backtracing is so rare it's not worth the extra
// complexity.
@ -198,11 +198,11 @@ func (h *GlogHandler) Log(r *Record) error {
}
}
// If the global log level allows, fast track logging
if atomic.LoadUint32(&h.level) >= uint32(r.Lvl) {
if h.level.Load() >= uint32(r.Lvl) {
return h.origin.Log(r)
}
// If no local overrides are present, fast track skipping
if atomic.LoadUint32(&h.override) == 0 {
if !h.override.Load() {
return nil
}
// Check callsite cache for previously calculated log levels