2ce00adb55
* focus on performance improvement in many aspects. 1. Do BlockBody verification concurrently; 2. Do calculation of intermediate root concurrently; 3. Preload accounts before processing blocks; 4. Make the snapshot layers configurable. 5. Reuse some object to reduce GC. add * rlp: improve decoder stream implementation (#22858) This commit makes various cleanup changes to rlp.Stream. * rlp: shrink Stream struct This removes a lot of unused padding space in Stream by reordering the fields. The size of Stream changes from 120 bytes to 88 bytes. Stream instances are internally cached and reused using sync.Pool, so this does not improve performance. * rlp: simplify list stack The list stack kept track of the size of the current list context as well as the current offset into it. The size had to be stored in the stack in order to subtract it from the remaining bytes of any enclosing list in ListEnd. It seems that this can be implemented in a simpler way: just subtract the size from the enclosing list context in List instead. * rlp: use atomic.Value for type cache (#22902) All encoding/decoding operations read the type cache to find the writer/decoder function responsible for a type. When analyzing CPU profiles of geth during sync, I found that the use of sync.RWMutex in cache lookups appears in the profiles. It seems we are running into CPU cache contention problems when package rlp is heavily used on all CPU cores during sync. This change makes it use atomic.Value + a writer lock instead of sync.RWMutex. In the common case where the typeinfo entry is present in the cache, we simply fetch the map and lookup the type. * rlp: optimize byte array handling (#22924) This change improves the performance of encoding/decoding [N]byte. name old time/op new time/op delta DecodeByteArrayStruct-8 336ns ± 0% 246ns ± 0% -26.98% (p=0.000 n=9+10) EncodeByteArrayStruct-8 225ns ± 1% 148ns ± 1% -34.12% (p=0.000 n=10+10) name old alloc/op new alloc/op delta DecodeByteArrayStruct-8 120B ± 0% 48B ± 0% -60.00% (p=0.000 n=10+10) EncodeByteArrayStruct-8 0.00B 0.00B ~ (all equal) * rlp: optimize big.Int decoding for size <= 32 bytes (#22927) This change grows the static integer buffer in Stream to 32 bytes, making it possible to decode 256bit integers without allocating a temporary buffer. In the recent commit 088da24, Stream struct size decreased from 120 bytes down to 88 bytes. This commit grows the struct to 112 bytes again, but the size change will not degrade performance because Stream instances are internally cached in sync.Pool. name old time/op new time/op delta DecodeBigInts-8 12.2µs ± 0% 8.6µs ± 4% -29.58% (p=0.000 n=9+10) name old speed new speed delta DecodeBigInts-8 230MB/s ± 0% 326MB/s ± 4% +42.04% (p=0.000 n=9+10) * eth/protocols/eth, les: avoid Raw() when decoding HashOrNumber (#22841) Getting the raw value is not necessary to decode this type, and decoding it directly from the stream is faster. * fix testcase * debug no lazy * fix can not repair * address comments Co-authored-by: Felix Lange <fjl@twurst.com>
188 lines
5.7 KiB
Go
188 lines
5.7 KiB
Go
// Copyright 2017 The go-ethereum Authors
|
|
// This file is part of the go-ethereum library.
|
|
//
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
// (at your option) any later version.
|
|
//
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
// GNU Lesser General Public License for more details.
|
|
//
|
|
// You should have received a copy of the GNU Lesser General Public License
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
package bloombits
|
|
|
|
import (
|
|
"sync"
|
|
|
|
"github.com/ethereum/go-ethereum/common/gopool"
|
|
)
|
|
|
|
// request represents a bloom retrieval task to prioritize and pull from the local
|
|
// database or remotely from the network.
|
|
type request struct {
|
|
section uint64 // Section index to retrieve the a bit-vector from
|
|
bit uint // Bit index within the section to retrieve the vector of
|
|
}
|
|
|
|
// response represents the state of a requested bit-vector through a scheduler.
|
|
type response struct {
|
|
cached []byte // Cached bits to dedup multiple requests
|
|
done chan struct{} // Channel to allow waiting for completion
|
|
}
|
|
|
|
// scheduler handles the scheduling of bloom-filter retrieval operations for
|
|
// entire section-batches belonging to a single bloom bit. Beside scheduling the
|
|
// retrieval operations, this struct also deduplicates the requests and caches
|
|
// the results to minimize network/database overhead even in complex filtering
|
|
// scenarios.
|
|
type scheduler struct {
|
|
bit uint // Index of the bit in the bloom filter this scheduler is responsible for
|
|
responses map[uint64]*response // Currently pending retrieval requests or already cached responses
|
|
lock sync.Mutex // Lock protecting the responses from concurrent access
|
|
}
|
|
|
|
// newScheduler creates a new bloom-filter retrieval scheduler for a specific
|
|
// bit index.
|
|
func newScheduler(idx uint) *scheduler {
|
|
return &scheduler{
|
|
bit: idx,
|
|
responses: make(map[uint64]*response),
|
|
}
|
|
}
|
|
|
|
// run creates a retrieval pipeline, receiving section indexes from sections and
|
|
// returning the results in the same order through the done channel. Concurrent
|
|
// runs of the same scheduler are allowed, leading to retrieval task deduplication.
|
|
func (s *scheduler) run(sections chan uint64, dist chan *request, done chan []byte, quit chan struct{}, wg *sync.WaitGroup) {
|
|
// Create a forwarder channel between requests and responses of the same size as
|
|
// the distribution channel (since that will block the pipeline anyway).
|
|
pend := make(chan uint64, cap(dist))
|
|
|
|
// Start the pipeline schedulers to forward between user -> distributor -> user
|
|
wg.Add(2)
|
|
gopool.Submit(func() {
|
|
s.scheduleRequests(sections, dist, pend, quit, wg)
|
|
})
|
|
gopool.Submit(func() {
|
|
s.scheduleDeliveries(pend, done, quit, wg)
|
|
})
|
|
}
|
|
|
|
// reset cleans up any leftovers from previous runs. This is required before a
|
|
// restart to ensure the no previously requested but never delivered state will
|
|
// cause a lockup.
|
|
func (s *scheduler) reset() {
|
|
s.lock.Lock()
|
|
defer s.lock.Unlock()
|
|
|
|
for section, res := range s.responses {
|
|
if res.cached == nil {
|
|
delete(s.responses, section)
|
|
}
|
|
}
|
|
}
|
|
|
|
// scheduleRequests reads section retrieval requests from the input channel,
|
|
// deduplicates the stream and pushes unique retrieval tasks into the distribution
|
|
// channel for a database or network layer to honour.
|
|
func (s *scheduler) scheduleRequests(reqs chan uint64, dist chan *request, pend chan uint64, quit chan struct{}, wg *sync.WaitGroup) {
|
|
// Clean up the goroutine and pipeline when done
|
|
defer wg.Done()
|
|
defer close(pend)
|
|
|
|
// Keep reading and scheduling section requests
|
|
for {
|
|
select {
|
|
case <-quit:
|
|
return
|
|
|
|
case section, ok := <-reqs:
|
|
// New section retrieval requested
|
|
if !ok {
|
|
return
|
|
}
|
|
// Deduplicate retrieval requests
|
|
unique := false
|
|
|
|
s.lock.Lock()
|
|
if s.responses[section] == nil {
|
|
s.responses[section] = &response{
|
|
done: make(chan struct{}),
|
|
}
|
|
unique = true
|
|
}
|
|
s.lock.Unlock()
|
|
|
|
// Schedule the section for retrieval and notify the deliverer to expect this section
|
|
if unique {
|
|
select {
|
|
case <-quit:
|
|
return
|
|
case dist <- &request{bit: s.bit, section: section}:
|
|
}
|
|
}
|
|
select {
|
|
case <-quit:
|
|
return
|
|
case pend <- section:
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
// scheduleDeliveries reads section acceptance notifications and waits for them
|
|
// to be delivered, pushing them into the output data buffer.
|
|
func (s *scheduler) scheduleDeliveries(pend chan uint64, done chan []byte, quit chan struct{}, wg *sync.WaitGroup) {
|
|
// Clean up the goroutine and pipeline when done
|
|
defer wg.Done()
|
|
defer close(done)
|
|
|
|
// Keep reading notifications and scheduling deliveries
|
|
for {
|
|
select {
|
|
case <-quit:
|
|
return
|
|
|
|
case idx, ok := <-pend:
|
|
// New section retrieval pending
|
|
if !ok {
|
|
return
|
|
}
|
|
// Wait until the request is honoured
|
|
s.lock.Lock()
|
|
res := s.responses[idx]
|
|
s.lock.Unlock()
|
|
|
|
select {
|
|
case <-quit:
|
|
return
|
|
case <-res.done:
|
|
}
|
|
// Deliver the result
|
|
select {
|
|
case <-quit:
|
|
return
|
|
case done <- res.cached:
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
// deliver is called by the request distributor when a reply to a request arrives.
|
|
func (s *scheduler) deliver(sections []uint64, data [][]byte) {
|
|
s.lock.Lock()
|
|
defer s.lock.Unlock()
|
|
|
|
for i, section := range sections {
|
|
if res := s.responses[section]; res != nil && res.cached == nil { // Avoid non-requests and double deliveries
|
|
res.cached = data[i]
|
|
close(res.done)
|
|
}
|
|
}
|
|
}
|