2020-02-05 13:12:09 +01:00
|
|
|
// Copyright 2020 The go-ethereum Authors
|
|
|
|
// This file is part of the go-ethereum library.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Lesser General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Lesser General Public License
|
|
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
|
|
|
package state
|
|
|
|
|
|
|
|
import (
|
2024-05-13 15:47:45 +03:00
|
|
|
"errors"
|
2021-01-08 15:01:49 +02:00
|
|
|
"sync"
|
|
|
|
|
2020-02-05 13:12:09 +01:00
|
|
|
"github.com/ethereum/go-ethereum/common"
|
|
|
|
"github.com/ethereum/go-ethereum/log"
|
|
|
|
"github.com/ethereum/go-ethereum/metrics"
|
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
2022-06-06 23:14:55 +08:00
|
|
|
// triePrefetchMetricsPrefix is the prefix under which to publish the metrics.
|
2021-01-08 15:01:49 +02:00
|
|
|
triePrefetchMetricsPrefix = "trie/prefetch/"
|
2024-05-13 15:47:45 +03:00
|
|
|
|
|
|
|
// errTerminated is returned if a fetcher is attempted to be operated after it
|
|
|
|
// has already terminated.
|
|
|
|
errTerminated = errors.New("fetcher is already terminated")
|
2020-02-05 13:12:09 +01:00
|
|
|
)
|
|
|
|
|
2021-01-08 15:01:49 +02:00
|
|
|
// triePrefetcher is an active prefetcher, which receives accounts or storage
|
|
|
|
// items and does trie-loading of them. The goal is to get as much useful content
|
|
|
|
// into the caches as possible.
|
|
|
|
//
|
|
|
|
// Note, the prefetcher's API is not thread safe.
|
|
|
|
type triePrefetcher struct {
|
2022-06-06 23:14:55 +08:00
|
|
|
db Database // Database to fetch trie nodes through
|
|
|
|
root common.Hash // Root hash of the account trie for metrics
|
|
|
|
fetchers map[string]*subfetcher // Subfetchers for each trie
|
2024-05-13 15:47:45 +03:00
|
|
|
term chan struct{} // Channel to signal interruption
|
2024-06-11 01:10:07 -07:00
|
|
|
noreads bool // Whether to ignore state-read-only prefetch requests
|
2021-01-08 15:01:49 +02:00
|
|
|
|
|
|
|
deliveryMissMeter metrics.Meter
|
2024-06-11 01:10:07 -07:00
|
|
|
|
|
|
|
accountLoadReadMeter metrics.Meter
|
|
|
|
accountLoadWriteMeter metrics.Meter
|
|
|
|
accountDupReadMeter metrics.Meter
|
|
|
|
accountDupWriteMeter metrics.Meter
|
|
|
|
accountDupCrossMeter metrics.Meter
|
|
|
|
accountWasteMeter metrics.Meter
|
|
|
|
|
|
|
|
storageLoadReadMeter metrics.Meter
|
|
|
|
storageLoadWriteMeter metrics.Meter
|
|
|
|
storageDupReadMeter metrics.Meter
|
|
|
|
storageDupWriteMeter metrics.Meter
|
|
|
|
storageDupCrossMeter metrics.Meter
|
|
|
|
storageWasteMeter metrics.Meter
|
2020-02-05 13:12:09 +01:00
|
|
|
}
|
|
|
|
|
2024-06-11 01:10:07 -07:00
|
|
|
func newTriePrefetcher(db Database, root common.Hash, namespace string, noreads bool) *triePrefetcher {
|
2021-01-08 15:01:49 +02:00
|
|
|
prefix := triePrefetchMetricsPrefix + namespace
|
2024-05-13 15:47:45 +03:00
|
|
|
return &triePrefetcher{
|
2021-01-08 15:01:49 +02:00
|
|
|
db: db,
|
|
|
|
root: root,
|
2022-06-06 23:14:55 +08:00
|
|
|
fetchers: make(map[string]*subfetcher), // Active prefetchers use the fetchers map
|
2024-05-13 15:47:45 +03:00
|
|
|
term: make(chan struct{}),
|
2024-06-11 01:10:07 -07:00
|
|
|
noreads: noreads,
|
2021-01-08 15:01:49 +02:00
|
|
|
|
|
|
|
deliveryMissMeter: metrics.GetOrRegisterMeter(prefix+"/deliverymiss", nil),
|
2024-06-11 01:10:07 -07:00
|
|
|
|
|
|
|
accountLoadReadMeter: metrics.GetOrRegisterMeter(prefix+"/account/load/read", nil),
|
|
|
|
accountLoadWriteMeter: metrics.GetOrRegisterMeter(prefix+"/account/load/write", nil),
|
|
|
|
accountDupReadMeter: metrics.GetOrRegisterMeter(prefix+"/account/dup/read", nil),
|
|
|
|
accountDupWriteMeter: metrics.GetOrRegisterMeter(prefix+"/account/dup/write", nil),
|
|
|
|
accountDupCrossMeter: metrics.GetOrRegisterMeter(prefix+"/account/dup/cross", nil),
|
|
|
|
accountWasteMeter: metrics.GetOrRegisterMeter(prefix+"/account/waste", nil),
|
|
|
|
|
|
|
|
storageLoadReadMeter: metrics.GetOrRegisterMeter(prefix+"/storage/load/read", nil),
|
|
|
|
storageLoadWriteMeter: metrics.GetOrRegisterMeter(prefix+"/storage/load/write", nil),
|
|
|
|
storageDupReadMeter: metrics.GetOrRegisterMeter(prefix+"/storage/dup/read", nil),
|
|
|
|
storageDupWriteMeter: metrics.GetOrRegisterMeter(prefix+"/storage/dup/write", nil),
|
|
|
|
storageDupCrossMeter: metrics.GetOrRegisterMeter(prefix+"/storage/dup/cross", nil),
|
|
|
|
storageWasteMeter: metrics.GetOrRegisterMeter(prefix+"/storage/waste", nil),
|
2020-02-05 13:12:09 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-05-28 20:10:27 +08:00
|
|
|
// terminate iterates over all the subfetchers and issues a termination request
|
2024-05-13 15:47:45 +03:00
|
|
|
// to all of them. Depending on the async parameter, the method will either block
|
|
|
|
// until all subfetchers spin down, or return immediately.
|
|
|
|
func (p *triePrefetcher) terminate(async bool) {
|
|
|
|
// Short circuit if the fetcher is already closed
|
|
|
|
select {
|
|
|
|
case <-p.term:
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
}
|
2024-06-05 17:07:37 +08:00
|
|
|
// Terminate all sub-fetchers, sync or async, depending on the request
|
2021-01-08 15:01:49 +02:00
|
|
|
for _, fetcher := range p.fetchers {
|
2024-05-13 15:47:45 +03:00
|
|
|
fetcher.terminate(async)
|
2020-02-05 13:12:09 +01:00
|
|
|
}
|
2024-05-13 15:47:45 +03:00
|
|
|
close(p.term)
|
2020-02-05 13:12:09 +01:00
|
|
|
}
|
|
|
|
|
2024-05-13 15:47:45 +03:00
|
|
|
// report aggregates the pre-fetching and usage metrics and reports them.
|
|
|
|
func (p *triePrefetcher) report() {
|
|
|
|
if !metrics.Enabled {
|
|
|
|
return
|
2021-01-08 15:01:49 +02:00
|
|
|
}
|
2024-05-13 15:47:45 +03:00
|
|
|
for _, fetcher := range p.fetchers {
|
|
|
|
fetcher.wait() // ensure the fetcher's idle before poking in its internals
|
|
|
|
|
|
|
|
if fetcher.root == p.root {
|
2024-06-11 01:10:07 -07:00
|
|
|
p.accountLoadReadMeter.Mark(int64(len(fetcher.seenRead)))
|
|
|
|
p.accountLoadWriteMeter.Mark(int64(len(fetcher.seenWrite)))
|
|
|
|
|
|
|
|
p.accountDupReadMeter.Mark(int64(fetcher.dupsRead))
|
|
|
|
p.accountDupWriteMeter.Mark(int64(fetcher.dupsWrite))
|
|
|
|
p.accountDupCrossMeter.Mark(int64(fetcher.dupsCross))
|
|
|
|
|
2024-05-13 15:47:45 +03:00
|
|
|
for _, key := range fetcher.used {
|
2024-06-11 01:10:07 -07:00
|
|
|
delete(fetcher.seenRead, string(key))
|
|
|
|
delete(fetcher.seenWrite, string(key))
|
2022-09-01 15:34:32 +08:00
|
|
|
}
|
2024-06-11 01:10:07 -07:00
|
|
|
p.accountWasteMeter.Mark(int64(len(fetcher.seenRead) + len(fetcher.seenWrite)))
|
2024-05-13 15:47:45 +03:00
|
|
|
} else {
|
2024-06-11 01:10:07 -07:00
|
|
|
p.storageLoadReadMeter.Mark(int64(len(fetcher.seenRead)))
|
|
|
|
p.storageLoadWriteMeter.Mark(int64(len(fetcher.seenWrite)))
|
|
|
|
|
|
|
|
p.storageDupReadMeter.Mark(int64(fetcher.dupsRead))
|
|
|
|
p.storageDupWriteMeter.Mark(int64(fetcher.dupsWrite))
|
|
|
|
p.storageDupCrossMeter.Mark(int64(fetcher.dupsCross))
|
|
|
|
|
2024-05-13 15:47:45 +03:00
|
|
|
for _, key := range fetcher.used {
|
2024-06-11 01:10:07 -07:00
|
|
|
delete(fetcher.seenRead, string(key))
|
|
|
|
delete(fetcher.seenWrite, string(key))
|
2024-05-13 15:47:45 +03:00
|
|
|
}
|
2024-06-11 01:10:07 -07:00
|
|
|
p.storageWasteMeter.Mark(int64(len(fetcher.seenRead) + len(fetcher.seenWrite)))
|
2021-01-08 15:01:49 +02:00
|
|
|
}
|
2020-02-05 13:12:09 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-05-13 15:47:45 +03:00
|
|
|
// prefetch schedules a batch of trie items to prefetch. After the prefetcher is
|
|
|
|
// closed, all the following tasks scheduled will not be executed and an error
|
|
|
|
// will be returned.
|
|
|
|
//
|
|
|
|
// prefetch is called from two locations:
|
|
|
|
//
|
|
|
|
// 1. Finalize of the state-objects storage roots. This happens at the end
|
|
|
|
// of every transaction, meaning that if several transactions touches
|
|
|
|
// upon the same contract, the parameters invoking this method may be
|
|
|
|
// repeated.
|
|
|
|
// 2. Finalize of the main account trie. This happens only once per block.
|
2024-06-11 01:10:07 -07:00
|
|
|
func (p *triePrefetcher) prefetch(owner common.Hash, root common.Hash, addr common.Address, keys [][]byte, read bool) error {
|
|
|
|
// If the state item is only being read, but reads are disabled, return
|
|
|
|
if read && p.noreads {
|
|
|
|
return nil
|
|
|
|
}
|
2024-05-13 15:47:45 +03:00
|
|
|
// Ensure the subfetcher is still alive
|
|
|
|
select {
|
|
|
|
case <-p.term:
|
|
|
|
return errTerminated
|
|
|
|
default:
|
2021-01-08 15:01:49 +02:00
|
|
|
}
|
2022-06-06 23:14:55 +08:00
|
|
|
id := p.trieID(owner, root)
|
|
|
|
fetcher := p.fetchers[id]
|
2021-01-08 15:01:49 +02:00
|
|
|
if fetcher == nil {
|
2023-03-23 11:52:22 +01:00
|
|
|
fetcher = newSubfetcher(p.db, p.root, owner, root, addr)
|
2022-06-06 23:14:55 +08:00
|
|
|
p.fetchers[id] = fetcher
|
2020-02-05 13:12:09 +01:00
|
|
|
}
|
2024-06-11 01:10:07 -07:00
|
|
|
return fetcher.schedule(keys, read)
|
2020-02-05 13:12:09 +01:00
|
|
|
}
|
|
|
|
|
2024-05-13 15:47:45 +03:00
|
|
|
// trie returns the trie matching the root hash, blocking until the fetcher of
|
|
|
|
// the given trie terminates. If no fetcher exists for the request, nil will be
|
|
|
|
// returned.
|
2024-05-28 10:54:55 -07:00
|
|
|
func (p *triePrefetcher) trie(owner common.Hash, root common.Hash) Trie {
|
2024-05-13 15:47:45 +03:00
|
|
|
// Bail if no trie was prefetched for this root
|
|
|
|
fetcher := p.fetchers[p.trieID(owner, root)]
|
2021-01-08 15:01:49 +02:00
|
|
|
if fetcher == nil {
|
2024-05-13 15:47:45 +03:00
|
|
|
log.Error("Prefetcher missed to load trie", "owner", owner, "root", root)
|
2021-01-08 15:01:49 +02:00
|
|
|
p.deliveryMissMeter.Mark(1)
|
2024-05-28 10:54:55 -07:00
|
|
|
return nil
|
2020-02-05 13:12:09 +01:00
|
|
|
}
|
2024-05-13 15:47:45 +03:00
|
|
|
// Subfetcher exists, retrieve its trie
|
2024-05-28 10:54:55 -07:00
|
|
|
return fetcher.peek()
|
2020-02-05 13:12:09 +01:00
|
|
|
}
|
|
|
|
|
2021-01-08 15:01:49 +02:00
|
|
|
// used marks a batch of state items used to allow creating statistics as to
|
2024-05-13 15:47:45 +03:00
|
|
|
// how useful or wasteful the fetcher is.
|
2022-06-06 23:14:55 +08:00
|
|
|
func (p *triePrefetcher) used(owner common.Hash, root common.Hash, used [][]byte) {
|
|
|
|
if fetcher := p.fetchers[p.trieID(owner, root)]; fetcher != nil {
|
2024-05-13 15:47:45 +03:00
|
|
|
fetcher.wait() // ensure the fetcher's idle before poking in its internals
|
2021-01-08 15:01:49 +02:00
|
|
|
fetcher.used = used
|
2020-02-05 13:12:09 +01:00
|
|
|
}
|
2021-01-08 15:01:49 +02:00
|
|
|
}
|
|
|
|
|
2022-06-06 23:14:55 +08:00
|
|
|
// trieID returns an unique trie identifier consists the trie owner and root hash.
|
|
|
|
func (p *triePrefetcher) trieID(owner common.Hash, root common.Hash) string {
|
2023-09-29 10:46:23 -04:00
|
|
|
trieID := make([]byte, common.HashLength*2)
|
|
|
|
copy(trieID, owner.Bytes())
|
|
|
|
copy(trieID[common.HashLength:], root.Bytes())
|
|
|
|
return string(trieID)
|
2022-06-06 23:14:55 +08:00
|
|
|
}
|
|
|
|
|
2021-01-08 15:01:49 +02:00
|
|
|
// subfetcher is a trie fetcher goroutine responsible for pulling entries for a
|
|
|
|
// single trie. It is spawned when a new root is encountered and lives until the
|
|
|
|
// main prefetcher is paused and either all requested items are processed or if
|
|
|
|
// the trie being worked on is retrieved from the prefetcher.
|
|
|
|
type subfetcher struct {
|
2023-03-23 11:52:22 +01:00
|
|
|
db Database // Database to load trie nodes through
|
|
|
|
state common.Hash // Root hash of the state to prefetch
|
|
|
|
owner common.Hash // Owner of the trie, usually account hash
|
|
|
|
root common.Hash // Root hash of the trie to prefetch
|
|
|
|
addr common.Address // Address of the account that the trie belongs to
|
|
|
|
trie Trie // Trie being populated with nodes
|
2021-01-08 15:01:49 +02:00
|
|
|
|
2024-06-11 01:10:07 -07:00
|
|
|
tasks []*subfetcherTask // Items queued up for retrieval
|
|
|
|
lock sync.Mutex // Lock protecting the task queue
|
2021-01-08 15:01:49 +02:00
|
|
|
|
2024-05-13 15:47:45 +03:00
|
|
|
wake chan struct{} // Wake channel if a new task is scheduled
|
|
|
|
stop chan struct{} // Channel to interrupt processing
|
|
|
|
term chan struct{} // Channel to signal interruption
|
2021-01-08 15:01:49 +02:00
|
|
|
|
2024-06-11 01:10:07 -07:00
|
|
|
seenRead map[string]struct{} // Tracks the entries already loaded via read operations
|
|
|
|
seenWrite map[string]struct{} // Tracks the entries already loaded via write operations
|
|
|
|
|
|
|
|
dupsRead int // Number of duplicate preload tasks via reads only
|
|
|
|
dupsWrite int // Number of duplicate preload tasks via writes only
|
|
|
|
dupsCross int // Number of duplicate preload tasks via read-write-crosses
|
|
|
|
|
|
|
|
used [][]byte // Tracks the entries used in the end
|
|
|
|
}
|
|
|
|
|
|
|
|
// subfetcherTask is a trie path to prefetch, tagged with whether it originates
|
|
|
|
// from a read or a write request.
|
|
|
|
type subfetcherTask struct {
|
|
|
|
read bool
|
|
|
|
key []byte
|
2021-01-08 15:01:49 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// newSubfetcher creates a goroutine to prefetch state items belonging to a
|
|
|
|
// particular root hash.
|
2023-03-23 11:52:22 +01:00
|
|
|
func newSubfetcher(db Database, state common.Hash, owner common.Hash, root common.Hash, addr common.Address) *subfetcher {
|
2021-01-08 15:01:49 +02:00
|
|
|
sf := &subfetcher{
|
2024-06-11 01:10:07 -07:00
|
|
|
db: db,
|
|
|
|
state: state,
|
|
|
|
owner: owner,
|
|
|
|
root: root,
|
|
|
|
addr: addr,
|
|
|
|
wake: make(chan struct{}, 1),
|
|
|
|
stop: make(chan struct{}),
|
|
|
|
term: make(chan struct{}),
|
|
|
|
seenRead: make(map[string]struct{}),
|
|
|
|
seenWrite: make(map[string]struct{}),
|
2021-01-08 15:01:49 +02:00
|
|
|
}
|
|
|
|
go sf.loop()
|
|
|
|
return sf
|
|
|
|
}
|
|
|
|
|
|
|
|
// schedule adds a batch of trie keys to the queue to prefetch.
|
2024-06-11 01:10:07 -07:00
|
|
|
func (sf *subfetcher) schedule(keys [][]byte, read bool) error {
|
2024-05-13 15:47:45 +03:00
|
|
|
// Ensure the subfetcher is still alive
|
|
|
|
select {
|
|
|
|
case <-sf.term:
|
|
|
|
return errTerminated
|
|
|
|
default:
|
|
|
|
}
|
2021-01-08 15:01:49 +02:00
|
|
|
// Append the tasks to the current queue
|
|
|
|
sf.lock.Lock()
|
2024-06-11 01:10:07 -07:00
|
|
|
for _, key := range keys {
|
|
|
|
key := key // closure for the append below
|
|
|
|
sf.tasks = append(sf.tasks, &subfetcherTask{read: read, key: key})
|
|
|
|
}
|
2021-01-08 15:01:49 +02:00
|
|
|
sf.lock.Unlock()
|
|
|
|
|
2024-05-13 15:47:45 +03:00
|
|
|
// Notify the background thread to execute scheduled tasks
|
2020-02-05 13:12:09 +01:00
|
|
|
select {
|
2021-01-08 15:01:49 +02:00
|
|
|
case sf.wake <- struct{}{}:
|
2024-05-13 15:47:45 +03:00
|
|
|
// Wake signal sent
|
2020-02-05 13:12:09 +01:00
|
|
|
default:
|
2024-05-28 20:10:27 +08:00
|
|
|
// Wake signal not sent as a previous one is already queued
|
2020-02-05 13:12:09 +01:00
|
|
|
}
|
2024-05-13 15:47:45 +03:00
|
|
|
return nil
|
2020-02-05 13:12:09 +01:00
|
|
|
}
|
|
|
|
|
2024-05-13 15:47:45 +03:00
|
|
|
// wait blocks until the subfetcher terminates. This method is used to block on
|
|
|
|
// an async termination before accessing internal fields from the fetcher.
|
|
|
|
func (sf *subfetcher) wait() {
|
|
|
|
<-sf.term
|
|
|
|
}
|
2021-01-08 15:01:49 +02:00
|
|
|
|
2024-05-13 15:47:45 +03:00
|
|
|
// peek retrieves the fetcher's trie, populated with any pre-fetched data. The
|
|
|
|
// returned trie will be a shallow copy, so modifying it will break subsequent
|
|
|
|
// peeks for the original data. The method will block until all the scheduled
|
|
|
|
// data has been loaded and the fethcer terminated.
|
|
|
|
func (sf *subfetcher) peek() Trie {
|
2024-05-28 20:10:27 +08:00
|
|
|
// Block until the fetcher terminates, then retrieve the trie
|
2024-05-13 15:47:45 +03:00
|
|
|
sf.wait()
|
|
|
|
return sf.trie
|
2021-01-08 15:01:49 +02:00
|
|
|
}
|
|
|
|
|
2024-05-13 15:47:45 +03:00
|
|
|
// terminate requests the subfetcher to stop accepting new tasks and spin down
|
|
|
|
// as soon as everything is loaded. Depending on the async parameter, the method
|
|
|
|
// will either block until all disk loads finish or return immediately.
|
|
|
|
func (sf *subfetcher) terminate(async bool) {
|
2020-02-05 13:12:09 +01:00
|
|
|
select {
|
2021-01-08 15:01:49 +02:00
|
|
|
case <-sf.stop:
|
2020-02-05 13:12:09 +01:00
|
|
|
default:
|
2021-01-08 15:01:49 +02:00
|
|
|
close(sf.stop)
|
2020-02-05 13:12:09 +01:00
|
|
|
}
|
2024-05-13 15:47:45 +03:00
|
|
|
if async {
|
|
|
|
return
|
|
|
|
}
|
2021-01-08 15:01:49 +02:00
|
|
|
<-sf.term
|
2020-02-05 13:12:09 +01:00
|
|
|
}
|
|
|
|
|
2024-05-13 15:47:45 +03:00
|
|
|
// loop loads newly-scheduled trie tasks as they are received and loads them, stopping
|
|
|
|
// when requested.
|
2021-01-08 15:01:49 +02:00
|
|
|
func (sf *subfetcher) loop() {
|
|
|
|
// No matter how the loop stops, signal anyone waiting that it's terminated
|
|
|
|
defer close(sf.term)
|
|
|
|
|
|
|
|
// Start by opening the trie and stop processing if it fails
|
2022-06-06 23:14:55 +08:00
|
|
|
if sf.owner == (common.Hash{}) {
|
|
|
|
trie, err := sf.db.OpenTrie(sf.root)
|
|
|
|
if err != nil {
|
|
|
|
log.Warn("Trie prefetcher failed opening trie", "root", sf.root, "err", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
sf.trie = trie
|
|
|
|
} else {
|
2023-11-14 13:09:40 +01:00
|
|
|
trie, err := sf.db.OpenStorageTrie(sf.state, sf.addr, sf.root, nil)
|
2022-06-06 23:14:55 +08:00
|
|
|
if err != nil {
|
|
|
|
log.Warn("Trie prefetcher failed opening trie", "root", sf.root, "err", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
sf.trie = trie
|
2021-01-08 15:01:49 +02:00
|
|
|
}
|
|
|
|
// Trie opened successfully, keep prefetching items
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-sf.wake:
|
2024-05-28 20:10:27 +08:00
|
|
|
// Execute all remaining tasks in a single run
|
2021-01-08 15:01:49 +02:00
|
|
|
sf.lock.Lock()
|
|
|
|
tasks := sf.tasks
|
|
|
|
sf.tasks = nil
|
|
|
|
sf.lock.Unlock()
|
|
|
|
|
2024-05-13 15:47:45 +03:00
|
|
|
for _, task := range tasks {
|
2024-06-11 01:10:07 -07:00
|
|
|
key := string(task.key)
|
|
|
|
if task.read {
|
|
|
|
if _, ok := sf.seenRead[key]; ok {
|
|
|
|
sf.dupsRead++
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if _, ok := sf.seenWrite[key]; ok {
|
|
|
|
sf.dupsCross++
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if _, ok := sf.seenRead[key]; ok {
|
|
|
|
sf.dupsCross++
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if _, ok := sf.seenWrite[key]; ok {
|
|
|
|
sf.dupsWrite++
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if len(task.key) == common.AddressLength {
|
|
|
|
sf.trie.GetAccount(common.BytesToAddress(task.key))
|
|
|
|
} else {
|
|
|
|
sf.trie.GetStorage(sf.addr, task.key)
|
2024-05-13 15:47:45 +03:00
|
|
|
}
|
2024-06-11 01:10:07 -07:00
|
|
|
if task.read {
|
|
|
|
sf.seenRead[key] = struct{}{}
|
2024-05-13 15:47:45 +03:00
|
|
|
} else {
|
2024-06-11 01:10:07 -07:00
|
|
|
sf.seenWrite[key] = struct{}{}
|
2021-01-08 15:01:49 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
case <-sf.stop:
|
2024-05-13 15:47:45 +03:00
|
|
|
// Termination is requested, abort if no more tasks are pending. If
|
|
|
|
// there are some, exhaust them first.
|
|
|
|
sf.lock.Lock()
|
|
|
|
done := sf.tasks == nil
|
|
|
|
sf.lock.Unlock()
|
|
|
|
|
|
|
|
if done {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
// Some tasks are pending, loop and pick them up (that wake branch
|
|
|
|
// will be selected eventually, whilst stop remains closed to this
|
|
|
|
// branch will also run afterwards).
|
2021-01-08 15:01:49 +02:00
|
|
|
}
|
|
|
|
}
|
2020-02-05 13:12:09 +01:00
|
|
|
}
|