bsc/p2p/enode/iter.go

300 lines
7.1 KiB
Go
Raw Permalink Normal View History

// Copyright 2019 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package enode
import (
"sync"
"time"
[R4R] performance improvement in many aspects (#257) * focus on performance improvement in many aspects. 1. Do BlockBody verification concurrently; 2. Do calculation of intermediate root concurrently; 3. Preload accounts before processing blocks; 4. Make the snapshot layers configurable. 5. Reuse some object to reduce GC. add * rlp: improve decoder stream implementation (#22858) This commit makes various cleanup changes to rlp.Stream. * rlp: shrink Stream struct This removes a lot of unused padding space in Stream by reordering the fields. The size of Stream changes from 120 bytes to 88 bytes. Stream instances are internally cached and reused using sync.Pool, so this does not improve performance. * rlp: simplify list stack The list stack kept track of the size of the current list context as well as the current offset into it. The size had to be stored in the stack in order to subtract it from the remaining bytes of any enclosing list in ListEnd. It seems that this can be implemented in a simpler way: just subtract the size from the enclosing list context in List instead. * rlp: use atomic.Value for type cache (#22902) All encoding/decoding operations read the type cache to find the writer/decoder function responsible for a type. When analyzing CPU profiles of geth during sync, I found that the use of sync.RWMutex in cache lookups appears in the profiles. It seems we are running into CPU cache contention problems when package rlp is heavily used on all CPU cores during sync. This change makes it use atomic.Value + a writer lock instead of sync.RWMutex. In the common case where the typeinfo entry is present in the cache, we simply fetch the map and lookup the type. * rlp: optimize byte array handling (#22924) This change improves the performance of encoding/decoding [N]byte. name old time/op new time/op delta DecodeByteArrayStruct-8 336ns ± 0% 246ns ± 0% -26.98% (p=0.000 n=9+10) EncodeByteArrayStruct-8 225ns ± 1% 148ns ± 1% -34.12% (p=0.000 n=10+10) name old alloc/op new alloc/op delta DecodeByteArrayStruct-8 120B ± 0% 48B ± 0% -60.00% (p=0.000 n=10+10) EncodeByteArrayStruct-8 0.00B 0.00B ~ (all equal) * rlp: optimize big.Int decoding for size <= 32 bytes (#22927) This change grows the static integer buffer in Stream to 32 bytes, making it possible to decode 256bit integers without allocating a temporary buffer. In the recent commit 088da24, Stream struct size decreased from 120 bytes down to 88 bytes. This commit grows the struct to 112 bytes again, but the size change will not degrade performance because Stream instances are internally cached in sync.Pool. name old time/op new time/op delta DecodeBigInts-8 12.2µs ± 0% 8.6µs ± 4% -29.58% (p=0.000 n=9+10) name old speed new speed delta DecodeBigInts-8 230MB/s ± 0% 326MB/s ± 4% +42.04% (p=0.000 n=9+10) * eth/protocols/eth, les: avoid Raw() when decoding HashOrNumber (#22841) Getting the raw value is not necessary to decode this type, and decoding it directly from the stream is faster. * fix testcase * debug no lazy * fix can not repair * address comments Co-authored-by: Felix Lange <fjl@twurst.com>
2021-07-29 17:16:53 +08:00
"github.com/ethereum/go-ethereum/common/gopool"
)
// Iterator represents a sequence of nodes. The Next method moves to the next node in the
// sequence. It returns false when the sequence has ended or the iterator is closed. Close
// may be called concurrently with Next and Node, and interrupts Next if it is blocked.
type Iterator interface {
Next() bool // moves to next node
Node() *Node // returns current node
Close() // ends the iterator
}
// ReadNodes reads at most n nodes from the given iterator. The return value contains no
// duplicates and no nil values. To prevent looping indefinitely for small repeating node
// sequences, this function calls Next at most n times.
func ReadNodes(it Iterator, n int) []*Node {
seen := make(map[ID]*Node, n)
for i := 0; i < n && it.Next(); i++ {
// Remove duplicates, keeping the node with higher seq.
node := it.Node()
prevNode, ok := seen[node.ID()]
if ok && prevNode.Seq() > node.Seq() {
continue
}
seen[node.ID()] = node
}
result := make([]*Node, 0, len(seen))
for _, node := range seen {
result = append(result, node)
}
return result
}
// IterNodes makes an iterator which runs through the given nodes once.
func IterNodes(nodes []*Node) Iterator {
return &sliceIter{nodes: nodes, index: -1}
}
// CycleNodes makes an iterator which cycles through the given nodes indefinitely.
func CycleNodes(nodes []*Node) Iterator {
return &sliceIter{nodes: nodes, index: -1, cycle: true}
}
type sliceIter struct {
mu sync.Mutex
nodes []*Node
index int
cycle bool
}
func (it *sliceIter) Next() bool {
it.mu.Lock()
defer it.mu.Unlock()
if len(it.nodes) == 0 {
return false
}
it.index++
if it.index == len(it.nodes) {
if it.cycle {
it.index = 0
} else {
it.nodes = nil
return false
}
}
return true
}
func (it *sliceIter) Node() *Node {
it.mu.Lock()
defer it.mu.Unlock()
if len(it.nodes) == 0 {
return nil
}
return it.nodes[it.index]
}
func (it *sliceIter) Close() {
it.mu.Lock()
defer it.mu.Unlock()
it.nodes = nil
}
// Filter wraps an iterator such that Next only returns nodes for which
// the 'check' function returns true.
func Filter(it Iterator, check func(*Node) bool) Iterator {
return &filterIter{it, check}
}
type filterIter struct {
Iterator
check func(*Node) bool
}
func (f *filterIter) Next() bool {
for f.Iterator.Next() {
if f.check(f.Node()) {
return true
}
}
return false
}
// FairMix aggregates multiple node iterators. The mixer itself is an iterator which ends
// only when Close is called. Source iterators added via AddSource are removed from the
// mix when they end.
//
// The distribution of nodes returned by Next is approximately fair, i.e. FairMix
// attempts to draw from all sources equally often. However, if a certain source is slow
// and doesn't return a node within the configured timeout, a node from any other source
// will be returned.
//
// It's safe to call AddSource and Close concurrently with Next.
type FairMix struct {
wg sync.WaitGroup
fromAny chan *Node
timeout time.Duration
cur *Node
mu sync.Mutex
closed chan struct{}
sources []*mixSource
last int
}
type mixSource struct {
it Iterator
next chan *Node
timeout time.Duration
}
// NewFairMix creates a mixer.
//
// The timeout specifies how long the mixer will wait for the next fairly-chosen source
// before giving up and taking a node from any other source. A good way to set the timeout
// is deciding how long you'd want to wait for a node on average. Passing a negative
// timeout makes the mixer completely fair.
func NewFairMix(timeout time.Duration) *FairMix {
m := &FairMix{
fromAny: make(chan *Node),
closed: make(chan struct{}),
timeout: timeout,
}
return m
}
// AddSource adds a source of nodes.
func (m *FairMix) AddSource(it Iterator) {
m.mu.Lock()
defer m.mu.Unlock()
if m.closed == nil {
return
}
m.wg.Add(1)
source := &mixSource{it, make(chan *Node), m.timeout}
m.sources = append(m.sources, source)
[R4R] performance improvement in many aspects (#257) * focus on performance improvement in many aspects. 1. Do BlockBody verification concurrently; 2. Do calculation of intermediate root concurrently; 3. Preload accounts before processing blocks; 4. Make the snapshot layers configurable. 5. Reuse some object to reduce GC. add * rlp: improve decoder stream implementation (#22858) This commit makes various cleanup changes to rlp.Stream. * rlp: shrink Stream struct This removes a lot of unused padding space in Stream by reordering the fields. The size of Stream changes from 120 bytes to 88 bytes. Stream instances are internally cached and reused using sync.Pool, so this does not improve performance. * rlp: simplify list stack The list stack kept track of the size of the current list context as well as the current offset into it. The size had to be stored in the stack in order to subtract it from the remaining bytes of any enclosing list in ListEnd. It seems that this can be implemented in a simpler way: just subtract the size from the enclosing list context in List instead. * rlp: use atomic.Value for type cache (#22902) All encoding/decoding operations read the type cache to find the writer/decoder function responsible for a type. When analyzing CPU profiles of geth during sync, I found that the use of sync.RWMutex in cache lookups appears in the profiles. It seems we are running into CPU cache contention problems when package rlp is heavily used on all CPU cores during sync. This change makes it use atomic.Value + a writer lock instead of sync.RWMutex. In the common case where the typeinfo entry is present in the cache, we simply fetch the map and lookup the type. * rlp: optimize byte array handling (#22924) This change improves the performance of encoding/decoding [N]byte. name old time/op new time/op delta DecodeByteArrayStruct-8 336ns ± 0% 246ns ± 0% -26.98% (p=0.000 n=9+10) EncodeByteArrayStruct-8 225ns ± 1% 148ns ± 1% -34.12% (p=0.000 n=10+10) name old alloc/op new alloc/op delta DecodeByteArrayStruct-8 120B ± 0% 48B ± 0% -60.00% (p=0.000 n=10+10) EncodeByteArrayStruct-8 0.00B 0.00B ~ (all equal) * rlp: optimize big.Int decoding for size <= 32 bytes (#22927) This change grows the static integer buffer in Stream to 32 bytes, making it possible to decode 256bit integers without allocating a temporary buffer. In the recent commit 088da24, Stream struct size decreased from 120 bytes down to 88 bytes. This commit grows the struct to 112 bytes again, but the size change will not degrade performance because Stream instances are internally cached in sync.Pool. name old time/op new time/op delta DecodeBigInts-8 12.2µs ± 0% 8.6µs ± 4% -29.58% (p=0.000 n=9+10) name old speed new speed delta DecodeBigInts-8 230MB/s ± 0% 326MB/s ± 4% +42.04% (p=0.000 n=9+10) * eth/protocols/eth, les: avoid Raw() when decoding HashOrNumber (#22841) Getting the raw value is not necessary to decode this type, and decoding it directly from the stream is faster. * fix testcase * debug no lazy * fix can not repair * address comments Co-authored-by: Felix Lange <fjl@twurst.com>
2021-07-29 17:16:53 +08:00
gopool.Submit(func() {
m.runSource(m.closed, source)
})
}
// Close shuts down the mixer and all current sources.
// Calling this is required to release resources associated with the mixer.
func (m *FairMix) Close() {
m.mu.Lock()
defer m.mu.Unlock()
if m.closed == nil {
return
}
for _, s := range m.sources {
s.it.Close()
}
close(m.closed)
m.wg.Wait()
close(m.fromAny)
m.sources = nil
m.closed = nil
}
// Next returns a node from a random source.
func (m *FairMix) Next() bool {
m.cur = nil
for {
source := m.pickSource()
if source == nil {
return m.nextFromAny()
}
var timeout <-chan time.Time
if source.timeout >= 0 {
timer := time.NewTimer(source.timeout)
timeout = timer.C
defer timer.Stop()
}
select {
case n, ok := <-source.next:
if ok {
// Here, the timeout is reset to the configured value
// because the source delivered a node.
source.timeout = m.timeout
m.cur = n
return true
}
// This source has ended.
m.deleteSource(source)
case <-timeout:
// The selected source did not deliver a node within the timeout, so the
// timeout duration is halved for next time. This is supposed to improve
// latency with stuck sources.
source.timeout /= 2
return m.nextFromAny()
}
}
}
// Node returns the current node.
func (m *FairMix) Node() *Node {
return m.cur
}
// nextFromAny is used when there are no sources or when the 'fair' choice
// doesn't turn up a node quickly enough.
func (m *FairMix) nextFromAny() bool {
n, ok := <-m.fromAny
if ok {
m.cur = n
}
return ok
}
// pickSource chooses the next source to read from, cycling through them in order.
func (m *FairMix) pickSource() *mixSource {
m.mu.Lock()
defer m.mu.Unlock()
if len(m.sources) == 0 {
return nil
}
m.last = (m.last + 1) % len(m.sources)
return m.sources[m.last]
}
// deleteSource deletes a source.
func (m *FairMix) deleteSource(s *mixSource) {
m.mu.Lock()
defer m.mu.Unlock()
for i := range m.sources {
if m.sources[i] == s {
copy(m.sources[i:], m.sources[i+1:])
m.sources[len(m.sources)-1] = nil
m.sources = m.sources[:len(m.sources)-1]
break
}
}
}
// runSource reads a single source in a loop.
func (m *FairMix) runSource(closed chan struct{}, s *mixSource) {
defer m.wg.Done()
defer close(s.next)
for s.it.Next() {
n := s.it.Node()
select {
case s.next <- n:
case m.fromAny <- n:
case <-closed:
return
}
}
}