2015-07-07 03:54:22 +03:00
|
|
|
// Copyright 2015 The go-ethereum Authors
|
2015-07-22 19:48:40 +03:00
|
|
|
// This file is part of the go-ethereum library.
|
2015-07-07 03:54:22 +03:00
|
|
|
//
|
2015-07-23 19:35:11 +03:00
|
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
2015-07-07 03:54:22 +03:00
|
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
2015-07-22 19:48:40 +03:00
|
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
2015-07-07 03:54:22 +03:00
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
2015-07-22 19:48:40 +03:00
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
2015-07-07 03:54:22 +03:00
|
|
|
// GNU Lesser General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Lesser General Public License
|
2015-07-22 19:48:40 +03:00
|
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
2015-07-07 03:54:22 +03:00
|
|
|
|
2015-02-04 15:52:59 +02:00
|
|
|
package miner
|
|
|
|
|
|
|
|
import (
|
2018-08-28 16:48:20 +03:00
|
|
|
"errors"
|
2022-01-24 10:19:52 +03:00
|
|
|
"fmt"
|
2022-10-28 07:01:18 +03:00
|
|
|
"math/big"
|
2015-02-15 17:16:27 +02:00
|
|
|
"sync"
|
2015-03-26 18:45:03 +02:00
|
|
|
"sync/atomic"
|
2015-06-15 12:33:08 +03:00
|
|
|
"time"
|
2015-02-04 15:52:59 +02:00
|
|
|
|
2024-04-11 10:15:46 +03:00
|
|
|
mapset "github.com/deckarep/golang-set/v2"
|
2024-03-08 06:15:35 +03:00
|
|
|
lru "github.com/hashicorp/golang-lru"
|
|
|
|
"github.com/holiman/uint256"
|
|
|
|
|
2015-03-18 14:00:01 +02:00
|
|
|
"github.com/ethereum/go-ethereum/common"
|
2017-04-05 01:16:29 +03:00
|
|
|
"github.com/ethereum/go-ethereum/consensus"
|
2023-08-01 12:58:45 +03:00
|
|
|
"github.com/ethereum/go-ethereum/consensus/misc/eip1559"
|
2023-08-24 00:16:14 +03:00
|
|
|
"github.com/ethereum/go-ethereum/consensus/misc/eip4844"
|
2020-05-20 06:46:45 +03:00
|
|
|
"github.com/ethereum/go-ethereum/consensus/parlia"
|
2015-02-04 15:52:59 +02:00
|
|
|
"github.com/ethereum/go-ethereum/core"
|
2015-03-23 19:27:05 +02:00
|
|
|
"github.com/ethereum/go-ethereum/core/state"
|
2022-07-20 12:33:51 +03:00
|
|
|
"github.com/ethereum/go-ethereum/core/systemcontracts"
|
2023-07-27 13:45:35 +03:00
|
|
|
"github.com/ethereum/go-ethereum/core/txpool"
|
2015-02-04 15:52:59 +02:00
|
|
|
"github.com/ethereum/go-ethereum/core/types"
|
2023-08-26 05:52:12 +03:00
|
|
|
"github.com/ethereum/go-ethereum/core/vm"
|
2015-02-04 15:52:59 +02:00
|
|
|
"github.com/ethereum/go-ethereum/event"
|
2017-02-22 15:10:07 +03:00
|
|
|
"github.com/ethereum/go-ethereum/log"
|
2022-07-14 12:09:44 +03:00
|
|
|
"github.com/ethereum/go-ethereum/metrics"
|
2016-07-08 13:00:37 +03:00
|
|
|
"github.com/ethereum/go-ethereum/params"
|
2020-08-21 15:10:40 +03:00
|
|
|
"github.com/ethereum/go-ethereum/trie"
|
2015-02-04 15:52:59 +02:00
|
|
|
)
|
|
|
|
|
2015-07-11 21:45:59 +03:00
|
|
|
const (
|
2018-08-14 18:34:33 +03:00
|
|
|
// resultQueueSize is the size of channel listening to sealing result.
|
|
|
|
resultQueueSize = 10
|
2018-08-16 14:14:33 +03:00
|
|
|
|
2018-05-18 11:45:52 +03:00
|
|
|
// txChanSize is the size of channel listening to NewTxsEvent.
|
2017-08-18 13:58:36 +03:00
|
|
|
// The number is referenced from the size of tx pool.
|
|
|
|
txChanSize = 4096
|
2018-08-16 14:14:33 +03:00
|
|
|
|
2017-08-18 13:58:36 +03:00
|
|
|
// chainHeadChanSize is the size of channel listening to ChainHeadEvent.
|
|
|
|
chainHeadChanSize = 10
|
2018-08-16 14:14:33 +03:00
|
|
|
|
2022-01-24 10:19:52 +03:00
|
|
|
// minRecommitInterval is the minimal time interval to recreate the sealing block with
|
2018-08-21 22:56:54 +03:00
|
|
|
// any newly arrived transactions.
|
|
|
|
minRecommitInterval = 1 * time.Second
|
|
|
|
|
2018-08-28 16:59:05 +03:00
|
|
|
// staleThreshold is the maximum depth of the acceptable stale block.
|
2020-06-15 12:11:06 +03:00
|
|
|
staleThreshold = 11
|
2022-11-24 10:22:37 +03:00
|
|
|
|
|
|
|
// the current 4 mining loops could have asynchronous risk of mining block with
|
|
|
|
// save height, keep recently mined blocks to avoid double sign for safety,
|
|
|
|
recentMinedCacheLimit = 20
|
2024-06-07 11:41:20 +03:00
|
|
|
|
|
|
|
// the default to wait for the mev miner to finish
|
|
|
|
waitMEVMinerEndTimeLimit = 50 * time.Millisecond
|
2024-09-10 11:24:29 +03:00
|
|
|
|
|
|
|
// Reserve block size for the following 3 components:
|
|
|
|
// a. System transactions at the end of the block
|
|
|
|
// b. Seal in the block header
|
|
|
|
// c. Overhead from RLP encoding
|
|
|
|
blockReserveSize = 100 * 1024
|
2015-07-11 21:45:59 +03:00
|
|
|
)
|
2015-05-11 22:47:34 +03:00
|
|
|
|
2022-07-14 12:09:44 +03:00
|
|
|
var (
|
|
|
|
writeBlockTimer = metrics.NewRegisteredTimer("worker/writeblock", nil)
|
|
|
|
finalizeBlockTimer = metrics.NewRegisteredTimer("worker/finalizeblock", nil)
|
2022-10-28 07:01:18 +03:00
|
|
|
|
2024-04-28 06:05:09 +03:00
|
|
|
errBlockInterruptedByNewHead = errors.New("new head arrived while building block")
|
|
|
|
errBlockInterruptedByRecommit = errors.New("recommit interrupt while building block")
|
|
|
|
errBlockInterruptedByTimeout = errors.New("timeout while building block")
|
|
|
|
errBlockInterruptedByOutOfGas = errors.New("out of gas while building block")
|
|
|
|
errBlockInterruptedByBetterBid = errors.New("better bid arrived while building block")
|
2022-07-14 12:09:44 +03:00
|
|
|
)
|
|
|
|
|
2022-01-24 10:19:52 +03:00
|
|
|
// environment is the worker's current environment and holds all
|
|
|
|
// information of the sealing block generation.
|
2018-08-16 14:14:33 +03:00
|
|
|
type environment struct {
|
2023-05-31 10:09:49 +03:00
|
|
|
signer types.Signer
|
|
|
|
state *state.StateDB // apply state changes here
|
|
|
|
tcount int // tx count in cycle
|
2024-09-10 11:24:29 +03:00
|
|
|
size uint32 // almost accurate block size,
|
2023-05-31 10:09:49 +03:00
|
|
|
gasPool *core.GasPool // available gas used to pack transactions
|
|
|
|
coinbase common.Address
|
2015-02-04 15:52:59 +02:00
|
|
|
|
2015-06-16 13:41:50 +03:00
|
|
|
header *types.Header
|
|
|
|
txs []*types.Transaction
|
|
|
|
receipts []*types.Receipt
|
2024-03-22 17:37:47 +03:00
|
|
|
sidecars types.BlobSidecars
|
2023-08-24 00:16:14 +03:00
|
|
|
blobs int
|
2022-01-24 10:19:52 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// copy creates a deep copy of environment.
|
|
|
|
func (env *environment) copy() *environment {
|
|
|
|
cpy := &environment{
|
2023-05-31 10:09:49 +03:00
|
|
|
signer: env.signer,
|
|
|
|
state: env.state.Copy(),
|
|
|
|
tcount: env.tcount,
|
2024-09-10 11:24:29 +03:00
|
|
|
size: env.size,
|
2023-05-31 10:09:49 +03:00
|
|
|
coinbase: env.coinbase,
|
|
|
|
header: types.CopyHeader(env.header),
|
|
|
|
receipts: copyReceipts(env.receipts),
|
2022-01-24 10:19:52 +03:00
|
|
|
}
|
|
|
|
if env.gasPool != nil {
|
|
|
|
gasPool := *env.gasPool
|
|
|
|
cpy.gasPool = &gasPool
|
|
|
|
}
|
|
|
|
cpy.txs = make([]*types.Transaction, len(env.txs))
|
|
|
|
copy(cpy.txs, env.txs)
|
2023-08-24 00:16:14 +03:00
|
|
|
|
2024-03-22 17:37:47 +03:00
|
|
|
if env.sidecars != nil {
|
|
|
|
cpy.sidecars = make(types.BlobSidecars, len(env.sidecars))
|
|
|
|
copy(cpy.sidecars, env.sidecars)
|
2024-04-27 01:37:42 +03:00
|
|
|
cpy.blobs = env.blobs
|
2024-03-22 17:37:47 +03:00
|
|
|
}
|
2023-08-24 00:16:14 +03:00
|
|
|
|
2022-01-24 10:19:52 +03:00
|
|
|
return cpy
|
|
|
|
}
|
|
|
|
|
|
|
|
// discard terminates the background prefetcher go-routine. It should
|
|
|
|
// always be called for all created environment instances otherwise
|
|
|
|
// the go-routine leak can happen.
|
|
|
|
func (env *environment) discard() {
|
|
|
|
if env.state == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
env.state.StopPrefetcher()
|
2018-08-14 18:34:33 +03:00
|
|
|
}
|
2015-07-11 21:45:59 +03:00
|
|
|
|
2018-08-14 18:34:33 +03:00
|
|
|
// task contains all information for consensus engine sealing and result submitting.
|
|
|
|
type task struct {
|
|
|
|
receipts []*types.Receipt
|
|
|
|
state *state.StateDB
|
|
|
|
block *types.Block
|
|
|
|
createdAt time.Time
|
|
|
|
}
|
|
|
|
|
2018-08-16 14:14:33 +03:00
|
|
|
const (
|
2022-10-28 07:01:18 +03:00
|
|
|
commitInterruptNone int32 = iota
|
|
|
|
commitInterruptNewHead
|
|
|
|
commitInterruptResubmit
|
|
|
|
commitInterruptTimeout
|
|
|
|
commitInterruptOutOfGas
|
2024-04-28 06:05:09 +03:00
|
|
|
commitInterruptBetterBid
|
2018-08-16 14:14:33 +03:00
|
|
|
)
|
|
|
|
|
2018-08-21 22:56:54 +03:00
|
|
|
// newWorkReq represents a request for new sealing work submitting with relative interrupt notifier.
|
2018-08-16 14:14:33 +03:00
|
|
|
type newWorkReq struct {
|
2022-11-10 10:05:56 +03:00
|
|
|
interruptCh chan int32
|
|
|
|
timestamp int64
|
2018-08-16 14:14:33 +03:00
|
|
|
}
|
|
|
|
|
2023-08-24 00:16:14 +03:00
|
|
|
// newPayloadResult is the result of payload generation.
|
2022-11-02 12:32:20 +03:00
|
|
|
type newPayloadResult struct {
|
2023-08-24 00:16:14 +03:00
|
|
|
err error
|
|
|
|
block *types.Block
|
2024-03-22 17:37:47 +03:00
|
|
|
fees *big.Int // total block fees
|
|
|
|
sidecars types.BlobSidecars // collected blobs of blob transactions
|
2022-11-02 12:32:20 +03:00
|
|
|
}
|
|
|
|
|
2022-01-24 10:19:52 +03:00
|
|
|
// getWorkReq represents a request for getting a new sealing work with provided parameters.
|
|
|
|
type getWorkReq struct {
|
|
|
|
params *generateParams
|
2022-11-02 12:32:20 +03:00
|
|
|
result chan *newPayloadResult // non-blocking channel
|
2022-01-24 10:19:52 +03:00
|
|
|
}
|
|
|
|
|
2024-03-08 06:15:35 +03:00
|
|
|
type bidFetcher interface {
|
|
|
|
GetBestBid(parentHash common.Hash) *BidRuntime
|
2024-06-07 11:41:20 +03:00
|
|
|
GetSimulatingBid(prevBlockHash common.Hash) *BidRuntime
|
2024-03-08 06:15:35 +03:00
|
|
|
}
|
|
|
|
|
2018-08-14 18:34:33 +03:00
|
|
|
// worker is the main object which takes care of submitting new work to consensus engine
|
|
|
|
// and gathering the sealing result.
|
2015-02-04 15:52:59 +02:00
|
|
|
type worker struct {
|
2024-03-08 06:15:35 +03:00
|
|
|
bidFetcher bidFetcher
|
2022-07-05 06:14:21 +03:00
|
|
|
prefetcher core.Prefetcher
|
2019-04-23 10:08:51 +03:00
|
|
|
config *Config
|
|
|
|
chainConfig *params.ChainConfig
|
|
|
|
engine consensus.Engine
|
|
|
|
eth Backend
|
|
|
|
chain *core.BlockChain
|
2018-08-29 12:21:12 +03:00
|
|
|
|
2019-12-10 14:39:14 +03:00
|
|
|
// Feeds
|
|
|
|
pendingLogsFeed event.Feed
|
|
|
|
|
2018-08-14 18:34:33 +03:00
|
|
|
// Subscriptions
|
2017-08-18 13:58:36 +03:00
|
|
|
mux *event.TypeMux
|
|
|
|
chainHeadCh chan core.ChainHeadEvent
|
|
|
|
chainHeadSub event.Subscription
|
2016-03-29 04:08:16 +03:00
|
|
|
|
2018-08-14 18:34:33 +03:00
|
|
|
// Channels
|
2018-08-21 22:56:54 +03:00
|
|
|
newWorkCh chan *newWorkReq
|
2022-01-24 10:19:52 +03:00
|
|
|
getWorkCh chan *getWorkReq
|
2018-08-21 22:56:54 +03:00
|
|
|
taskCh chan *task
|
2018-08-28 16:59:05 +03:00
|
|
|
resultCh chan *types.Block
|
2018-08-21 22:56:54 +03:00
|
|
|
startCh chan struct{}
|
|
|
|
exitCh chan struct{}
|
|
|
|
resubmitIntervalCh chan time.Duration
|
2015-02-04 15:52:59 +02:00
|
|
|
|
2021-10-08 19:36:58 +03:00
|
|
|
wg sync.WaitGroup
|
|
|
|
|
2023-05-31 10:09:49 +03:00
|
|
|
current *environment // An environment for current running cycle.
|
2015-04-05 19:57:03 +03:00
|
|
|
|
2018-08-14 18:34:33 +03:00
|
|
|
mu sync.RWMutex // The lock used to protect the coinbase and extra fields
|
2015-03-18 14:00:01 +02:00
|
|
|
coinbase common.Address
|
2015-04-05 19:57:03 +03:00
|
|
|
extra []byte
|
2024-02-19 16:59:40 +03:00
|
|
|
tip *uint256.Int // Minimum tip needed for non-local transaction to include them
|
2015-02-04 15:52:59 +02:00
|
|
|
|
2018-08-23 16:02:57 +03:00
|
|
|
pendingMu sync.RWMutex
|
|
|
|
pendingTasks map[common.Hash]*task
|
|
|
|
|
2021-07-08 09:57:51 +03:00
|
|
|
snapshotMu sync.RWMutex // The lock used to protect the snapshots below
|
2021-06-28 17:16:32 +03:00
|
|
|
snapshotBlock *types.Block
|
|
|
|
snapshotReceipts types.Receipts
|
|
|
|
snapshotState *state.StateDB
|
2018-04-16 10:56:20 +03:00
|
|
|
|
2015-04-22 11:58:43 +03:00
|
|
|
// atomic status counters
|
2023-08-23 12:46:08 +03:00
|
|
|
running atomic.Bool // The indicator whether the consensus engine is running or not.
|
|
|
|
syncing atomic.Bool // The indicator whether the node is still syncing.
|
2020-05-12 13:11:34 +03:00
|
|
|
|
2022-10-03 15:10:00 +03:00
|
|
|
// newpayloadTimeout is the maximum timeout allowance for creating payload.
|
|
|
|
// The default value is 2 seconds but node operator can set it to arbitrary
|
|
|
|
// large value. A large timeout allowance may cause Geth to fail creating
|
|
|
|
// a non-empty payload within the specified time and eventually miss the slot
|
|
|
|
// in case there are some computation expensive transactions in txpool.
|
|
|
|
newpayloadTimeout time.Duration
|
|
|
|
|
2022-11-02 12:32:20 +03:00
|
|
|
// recommit is the time interval to re-create sealing work or to re-build
|
|
|
|
// payload in proof-of-stake stage.
|
|
|
|
recommit time.Duration
|
2018-08-14 18:34:33 +03:00
|
|
|
|
2018-09-21 00:11:55 +03:00
|
|
|
// External functions
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 14:23:02 +03:00
|
|
|
isLocalBlock func(header *types.Header) bool // Function used to determine whether the specified block is mined by local miner.
|
2018-09-21 00:11:55 +03:00
|
|
|
|
2018-08-14 18:34:33 +03:00
|
|
|
// Test hooks
|
2022-11-24 10:22:37 +03:00
|
|
|
newTaskHook func(*task) // Method to call upon receiving a new sealing task.
|
|
|
|
skipSealHook func(*task) bool // Method to decide whether skipping the sealing.
|
|
|
|
fullTaskHook func() // Method to call before pushing the full sealing task.
|
|
|
|
resubmitHook func(time.Duration, time.Duration) // Method to call upon updating resubmitting interval.
|
|
|
|
recentMinedBlocks *lru.Cache
|
2015-02-04 15:52:59 +02:00
|
|
|
}
|
|
|
|
|
2022-01-24 10:19:52 +03:00
|
|
|
func newWorker(config *Config, chainConfig *params.ChainConfig, engine consensus.Engine, eth Backend, mux *event.TypeMux, isLocalBlock func(header *types.Header) bool, init bool) *worker {
|
2022-11-24 10:22:37 +03:00
|
|
|
recentMinedBlocks, _ := lru.New(recentMinedCacheLimit)
|
2015-04-07 13:32:55 +03:00
|
|
|
worker := &worker{
|
2022-07-05 06:14:21 +03:00
|
|
|
prefetcher: core.NewStatePrefetcher(chainConfig, eth.BlockChain(), engine),
|
2018-08-21 22:56:54 +03:00
|
|
|
config: config,
|
2019-04-23 10:08:51 +03:00
|
|
|
chainConfig: chainConfig,
|
2018-08-21 22:56:54 +03:00
|
|
|
engine: engine,
|
|
|
|
eth: eth,
|
|
|
|
chain: eth.BlockChain(),
|
2023-01-20 19:26:01 +03:00
|
|
|
mux: mux,
|
2018-09-21 00:11:55 +03:00
|
|
|
isLocalBlock: isLocalBlock,
|
2023-01-20 19:26:01 +03:00
|
|
|
coinbase: config.Etherbase,
|
|
|
|
extra: config.ExtraData,
|
2024-02-19 16:59:40 +03:00
|
|
|
tip: uint256.MustFromBig(config.GasPrice),
|
2018-08-23 16:02:57 +03:00
|
|
|
pendingTasks: make(map[common.Hash]*task),
|
2018-08-21 22:56:54 +03:00
|
|
|
chainHeadCh: make(chan core.ChainHeadEvent, chainHeadChanSize),
|
|
|
|
newWorkCh: make(chan *newWorkReq),
|
2022-01-24 10:19:52 +03:00
|
|
|
getWorkCh: make(chan *getWorkReq),
|
2018-08-21 22:56:54 +03:00
|
|
|
taskCh: make(chan *task),
|
2018-08-28 16:59:05 +03:00
|
|
|
resultCh: make(chan *types.Block, resultQueueSize),
|
2018-08-21 22:56:54 +03:00
|
|
|
startCh: make(chan struct{}, 1),
|
2023-01-20 19:26:01 +03:00
|
|
|
exitCh: make(chan struct{}),
|
2018-08-21 22:56:54 +03:00
|
|
|
resubmitIntervalCh: make(chan time.Duration),
|
2022-11-24 10:22:37 +03:00
|
|
|
recentMinedBlocks: recentMinedBlocks,
|
2015-02-09 17:20:34 +02:00
|
|
|
}
|
2017-08-18 13:58:36 +03:00
|
|
|
// Subscribe events for blockchain
|
|
|
|
worker.chainHeadSub = eth.BlockChain().SubscribeChainHeadEvent(worker.chainHeadCh)
|
2015-04-07 13:32:55 +03:00
|
|
|
|
2018-08-21 22:56:54 +03:00
|
|
|
// Sanitize recommit interval if the user-specified one is too short.
|
2019-04-23 10:08:51 +03:00
|
|
|
recommit := worker.config.Recommit
|
2018-08-21 22:56:54 +03:00
|
|
|
if recommit < minRecommitInterval {
|
|
|
|
log.Warn("Sanitizing miner recommit interval", "provided", recommit, "updated", minRecommitInterval)
|
|
|
|
recommit = minRecommitInterval
|
|
|
|
}
|
2022-11-02 12:32:20 +03:00
|
|
|
worker.recommit = recommit
|
|
|
|
|
2022-10-03 15:10:00 +03:00
|
|
|
// Sanitize the timeout config for creating payload.
|
|
|
|
newpayloadTimeout := worker.config.NewPayloadTimeout
|
|
|
|
if newpayloadTimeout == 0 {
|
2023-09-21 07:02:59 +03:00
|
|
|
// log.Warn("Sanitizing new payload timeout to default", "provided", newpayloadTimeout, "updated", DefaultConfig.NewPayloadTimeout)
|
2022-10-03 15:10:00 +03:00
|
|
|
newpayloadTimeout = DefaultConfig.NewPayloadTimeout
|
|
|
|
}
|
|
|
|
if newpayloadTimeout < time.Millisecond*100 {
|
|
|
|
log.Warn("Low payload timeout may cause high amount of non-full blocks", "provided", newpayloadTimeout, "default", DefaultConfig.NewPayloadTimeout)
|
|
|
|
}
|
|
|
|
worker.newpayloadTimeout = newpayloadTimeout
|
2018-08-21 22:56:54 +03:00
|
|
|
|
2021-10-08 19:36:58 +03:00
|
|
|
worker.wg.Add(4)
|
2018-08-14 18:34:33 +03:00
|
|
|
go worker.mainLoop()
|
2018-08-21 22:56:54 +03:00
|
|
|
go worker.newWorkLoop(recommit)
|
2018-08-14 18:34:33 +03:00
|
|
|
go worker.resultLoop()
|
|
|
|
go worker.taskLoop()
|
2015-04-07 13:32:55 +03:00
|
|
|
|
2018-08-14 18:34:33 +03:00
|
|
|
// Submit first work to initialize pending state.
|
2019-11-20 13:36:41 +03:00
|
|
|
if init {
|
|
|
|
worker.startCh <- struct{}{}
|
|
|
|
}
|
2024-03-08 06:15:35 +03:00
|
|
|
|
2015-04-07 13:32:55 +03:00
|
|
|
return worker
|
2015-02-09 17:20:34 +02:00
|
|
|
}
|
|
|
|
|
2024-03-08 06:15:35 +03:00
|
|
|
func (w *worker) setBestBidFetcher(fetcher bidFetcher) {
|
|
|
|
w.bidFetcher = fetcher
|
|
|
|
}
|
|
|
|
|
2018-08-14 18:34:33 +03:00
|
|
|
// setEtherbase sets the etherbase used to initialize the block coinbase field.
|
|
|
|
func (w *worker) setEtherbase(addr common.Address) {
|
|
|
|
w.mu.Lock()
|
|
|
|
defer w.mu.Unlock()
|
|
|
|
w.coinbase = addr
|
2015-07-07 11:58:47 +03:00
|
|
|
}
|
|
|
|
|
2023-01-20 19:26:01 +03:00
|
|
|
// etherbase retrieves the configured etherbase address.
|
|
|
|
func (w *worker) etherbase() common.Address {
|
|
|
|
w.mu.RLock()
|
|
|
|
defer w.mu.RUnlock()
|
|
|
|
return w.coinbase
|
|
|
|
}
|
|
|
|
|
2021-07-06 11:35:39 +03:00
|
|
|
func (w *worker) setGasCeil(ceil uint64) {
|
|
|
|
w.mu.Lock()
|
|
|
|
defer w.mu.Unlock()
|
|
|
|
w.config.GasCeil = ceil
|
|
|
|
}
|
|
|
|
|
2024-05-09 10:54:31 +03:00
|
|
|
func (w *worker) getGasCeil() uint64 {
|
|
|
|
w.mu.Lock()
|
|
|
|
defer w.mu.Unlock()
|
|
|
|
return w.config.GasCeil
|
|
|
|
}
|
|
|
|
|
2018-08-14 18:34:33 +03:00
|
|
|
// setExtra sets the content used to initialize the block extra field.
|
|
|
|
func (w *worker) setExtra(extra []byte) {
|
|
|
|
w.mu.Lock()
|
|
|
|
defer w.mu.Unlock()
|
|
|
|
w.extra = extra
|
2016-12-13 15:03:18 +03:00
|
|
|
}
|
|
|
|
|
2024-02-06 11:59:24 +03:00
|
|
|
// setGasTip sets the minimum miner tip needed to include a non-local transaction.
|
|
|
|
func (w *worker) setGasTip(tip *big.Int) {
|
|
|
|
w.mu.Lock()
|
|
|
|
defer w.mu.Unlock()
|
2024-02-19 16:59:40 +03:00
|
|
|
w.tip = uint256.MustFromBig(tip)
|
2024-02-06 11:59:24 +03:00
|
|
|
}
|
|
|
|
|
2018-08-21 22:56:54 +03:00
|
|
|
// setRecommitInterval updates the interval for miner sealing work recommitting.
|
|
|
|
func (w *worker) setRecommitInterval(interval time.Duration) {
|
2022-01-24 10:19:52 +03:00
|
|
|
select {
|
|
|
|
case w.resubmitIntervalCh <- interval:
|
|
|
|
case <-w.exitCh:
|
|
|
|
}
|
2018-08-21 22:56:54 +03:00
|
|
|
}
|
|
|
|
|
2023-05-31 10:09:49 +03:00
|
|
|
// pending returns the pending state and corresponding block. The returned
|
|
|
|
// values can be nil in case the pending block is not initialized.
|
2018-08-14 18:34:33 +03:00
|
|
|
func (w *worker) pending() (*types.Block, *state.StateDB) {
|
|
|
|
w.snapshotMu.RLock()
|
|
|
|
defer w.snapshotMu.RUnlock()
|
|
|
|
if w.snapshotState == nil {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
return w.snapshotBlock, w.snapshotState.Copy()
|
2018-04-16 10:56:20 +03:00
|
|
|
}
|
2016-11-30 12:48:48 +03:00
|
|
|
|
2023-05-31 10:09:49 +03:00
|
|
|
// pendingBlock returns pending block. The returned block can be nil in case the
|
|
|
|
// pending block is not initialized.
|
2018-08-14 18:34:33 +03:00
|
|
|
func (w *worker) pendingBlock() *types.Block {
|
|
|
|
w.snapshotMu.RLock()
|
|
|
|
defer w.snapshotMu.RUnlock()
|
|
|
|
return w.snapshotBlock
|
2016-11-30 12:48:48 +03:00
|
|
|
}
|
|
|
|
|
2021-06-28 17:16:32 +03:00
|
|
|
// pendingBlockAndReceipts returns pending block and corresponding receipts.
|
2023-05-31 10:09:49 +03:00
|
|
|
// The returned values can be nil in case the pending block is not initialized.
|
2021-06-28 17:16:32 +03:00
|
|
|
func (w *worker) pendingBlockAndReceipts() (*types.Block, types.Receipts) {
|
|
|
|
w.snapshotMu.RLock()
|
|
|
|
defer w.snapshotMu.RUnlock()
|
|
|
|
return w.snapshotBlock, w.snapshotReceipts
|
|
|
|
}
|
|
|
|
|
2018-08-14 18:34:33 +03:00
|
|
|
// start sets the running status as 1 and triggers new work submitting.
|
|
|
|
func (w *worker) start() {
|
2023-03-31 09:32:47 +03:00
|
|
|
w.running.Store(true)
|
2018-08-16 14:14:33 +03:00
|
|
|
w.startCh <- struct{}{}
|
2015-02-09 17:20:34 +02:00
|
|
|
}
|
|
|
|
|
2018-08-14 18:34:33 +03:00
|
|
|
// stop sets the running status as 0.
|
|
|
|
func (w *worker) stop() {
|
2023-03-31 09:32:47 +03:00
|
|
|
w.running.Store(false)
|
2015-02-09 17:20:34 +02:00
|
|
|
}
|
|
|
|
|
2018-08-14 18:34:33 +03:00
|
|
|
// isRunning returns an indicator whether worker is running or not.
|
|
|
|
func (w *worker) isRunning() bool {
|
2023-03-31 09:32:47 +03:00
|
|
|
return w.running.Load()
|
2018-08-03 11:33:37 +03:00
|
|
|
}
|
|
|
|
|
2018-08-28 16:59:05 +03:00
|
|
|
// close terminates all background threads maintained by the worker.
|
2018-08-14 18:34:33 +03:00
|
|
|
// Note the worker does not support being closed multiple times.
|
|
|
|
func (w *worker) close() {
|
2023-03-31 09:32:47 +03:00
|
|
|
w.running.Store(false)
|
2018-08-14 18:34:33 +03:00
|
|
|
close(w.exitCh)
|
2021-10-08 19:36:58 +03:00
|
|
|
w.wg.Wait()
|
2015-02-04 15:52:59 +02:00
|
|
|
}
|
|
|
|
|
2022-01-24 10:19:52 +03:00
|
|
|
// newWorkLoop is a standalone goroutine to submit new sealing work upon received events.
|
2018-08-21 22:56:54 +03:00
|
|
|
func (w *worker) newWorkLoop(recommit time.Duration) {
|
2021-10-08 19:36:58 +03:00
|
|
|
defer w.wg.Done()
|
2018-08-21 22:56:54 +03:00
|
|
|
var (
|
2022-11-10 10:05:56 +03:00
|
|
|
interruptCh chan int32
|
2018-08-21 22:56:54 +03:00
|
|
|
minRecommit = recommit // minimal resubmit interval specified by user.
|
2022-01-24 10:19:52 +03:00
|
|
|
timestamp int64 // timestamp for each round of sealing.
|
2018-08-21 22:56:54 +03:00
|
|
|
)
|
2018-08-16 14:14:33 +03:00
|
|
|
|
|
|
|
timer := time.NewTimer(0)
|
2020-04-02 11:40:38 +03:00
|
|
|
defer timer.Stop()
|
2018-08-16 14:14:33 +03:00
|
|
|
<-timer.C // discard the initial tick
|
|
|
|
|
2018-08-21 22:56:54 +03:00
|
|
|
// commit aborts in-flight transaction execution with given signal and resubmits a new one.
|
2022-11-16 12:17:50 +03:00
|
|
|
commit := func(reason int32) {
|
2022-11-10 10:05:56 +03:00
|
|
|
if interruptCh != nil {
|
|
|
|
// each commit work will have its own interruptCh to stop work with a reason
|
|
|
|
interruptCh <- reason
|
|
|
|
close(interruptCh)
|
2018-08-16 14:14:33 +03:00
|
|
|
}
|
2022-11-10 10:05:56 +03:00
|
|
|
interruptCh = make(chan int32, 1)
|
miner, test: fix potential goroutine leak (#21989)
In miner/worker.go, there are two goroutine using channel w.newWorkCh: newWorkerLoop() sends to this channel, and mainLoop() receives from this channel. Only the receive operation is in a select.
However, w.exitCh may be closed by another goroutine. This is fine for the receive since receive is in select, but if the send operation is blocking, then it will block forever. This commit puts the send in a select, so it won't block even if w.exitCh is closed.
Similarly, there are two goroutines using channel errc: the parent that runs the test receives from it, and the child created at line 573 sends to it. If the parent goroutine exits too early by calling t.Fatalf() at line 614, then the child goroutine will be blocked at line 574 forever. This commit adds 1 buffer to errc. Now send will not block, and receive is not influenced because receive still needs to wait for the send.
2020-12-11 12:29:42 +03:00
|
|
|
select {
|
2022-11-16 12:17:50 +03:00
|
|
|
case w.newWorkCh <- &newWorkReq{interruptCh: interruptCh, timestamp: timestamp}:
|
miner, test: fix potential goroutine leak (#21989)
In miner/worker.go, there are two goroutine using channel w.newWorkCh: newWorkerLoop() sends to this channel, and mainLoop() receives from this channel. Only the receive operation is in a select.
However, w.exitCh may be closed by another goroutine. This is fine for the receive since receive is in select, but if the send operation is blocking, then it will block forever. This commit puts the send in a select, so it won't block even if w.exitCh is closed.
Similarly, there are two goroutines using channel errc: the parent that runs the test receives from it, and the child created at line 573 sends to it. If the parent goroutine exits too early by calling t.Fatalf() at line 614, then the child goroutine will be blocked at line 574 forever. This commit adds 1 buffer to errc. Now send will not block, and receive is not influenced because receive still needs to wait for the send.
2020-12-11 12:29:42 +03:00
|
|
|
case <-w.exitCh:
|
|
|
|
return
|
|
|
|
}
|
2018-08-21 22:56:54 +03:00
|
|
|
timer.Reset(recommit)
|
|
|
|
}
|
2018-08-23 16:02:57 +03:00
|
|
|
// clearPending cleans the stale pending tasks.
|
|
|
|
clearPending := func(number uint64) {
|
|
|
|
w.pendingMu.Lock()
|
|
|
|
for h, t := range w.pendingTasks {
|
|
|
|
if t.block.NumberU64()+staleThreshold <= number {
|
|
|
|
delete(w.pendingTasks, h)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
w.pendingMu.Unlock()
|
|
|
|
}
|
2018-08-16 14:14:33 +03:00
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-w.startCh:
|
2023-03-02 09:29:15 +03:00
|
|
|
clearPending(w.chain.CurrentBlock().Number.Uint64())
|
2018-08-29 17:31:59 +03:00
|
|
|
timestamp = time.Now().Unix()
|
2022-11-16 12:17:50 +03:00
|
|
|
commit(commitInterruptNewHead)
|
2018-08-16 14:14:33 +03:00
|
|
|
|
2018-08-23 16:02:57 +03:00
|
|
|
case head := <-w.chainHeadCh:
|
2020-06-15 12:11:06 +03:00
|
|
|
if !w.isRunning() {
|
2020-05-20 06:46:45 +03:00
|
|
|
continue
|
|
|
|
}
|
2018-08-23 16:02:57 +03:00
|
|
|
clearPending(head.Block.NumberU64())
|
2018-08-29 17:31:59 +03:00
|
|
|
timestamp = time.Now().Unix()
|
2022-07-05 06:14:21 +03:00
|
|
|
if p, ok := w.engine.(*parlia.Parlia); ok {
|
2022-10-19 20:45:32 +03:00
|
|
|
signedRecent, err := p.SignRecently(w.chain, head.Block)
|
2022-07-05 06:14:21 +03:00
|
|
|
if err != nil {
|
2023-05-31 17:01:28 +03:00
|
|
|
log.Debug("Not allowed to propose block", "err", err)
|
2022-07-05 06:14:21 +03:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
if signedRecent {
|
|
|
|
log.Info("Signed recently, must wait")
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
2022-11-16 12:17:50 +03:00
|
|
|
commit(commitInterruptNewHead)
|
2018-08-16 14:14:33 +03:00
|
|
|
|
|
|
|
case <-timer.C:
|
2022-01-24 10:19:52 +03:00
|
|
|
// If sealing is running resubmit a new work cycle periodically to pull in
|
2018-08-16 14:14:33 +03:00
|
|
|
// higher priced transactions. Disable this overhead for pending blocks.
|
2023-08-23 12:46:08 +03:00
|
|
|
if w.isRunning() && ((w.chainConfig.Clique != nil &&
|
2020-05-20 06:46:45 +03:00
|
|
|
w.chainConfig.Clique.Period > 0) || (w.chainConfig.Parlia != nil && w.chainConfig.Parlia.Period > 0)) {
|
2018-08-21 22:56:54 +03:00
|
|
|
// Short circuit if no new transaction arrives.
|
2022-11-16 12:17:50 +03:00
|
|
|
commit(commitInterruptResubmit)
|
2018-08-21 22:56:54 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
case interval := <-w.resubmitIntervalCh:
|
|
|
|
// Adjust resubmit interval explicitly by user.
|
|
|
|
if interval < minRecommitInterval {
|
|
|
|
log.Warn("Sanitizing miner recommit interval", "provided", interval, "updated", minRecommitInterval)
|
|
|
|
interval = minRecommitInterval
|
|
|
|
}
|
|
|
|
log.Info("Miner recommit interval update", "from", minRecommit, "to", interval)
|
|
|
|
minRecommit, recommit = interval, interval
|
|
|
|
|
|
|
|
if w.resubmitHook != nil {
|
|
|
|
w.resubmitHook(minRecommit, recommit)
|
|
|
|
}
|
|
|
|
|
2018-08-16 14:14:33 +03:00
|
|
|
case <-w.exitCh:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-01-24 10:19:52 +03:00
|
|
|
// mainLoop is responsible for generating and submitting sealing work based on
|
|
|
|
// the received event. It can support two modes: automatically generate task and
|
|
|
|
// submit it or return task according to given parameters for various proposes.
|
2018-08-14 18:34:33 +03:00
|
|
|
func (w *worker) mainLoop() {
|
2021-10-08 19:36:58 +03:00
|
|
|
defer w.wg.Done()
|
2018-08-14 18:34:33 +03:00
|
|
|
defer w.chainHeadSub.Unsubscribe()
|
2021-10-08 21:12:52 +03:00
|
|
|
defer func() {
|
2022-01-24 10:19:52 +03:00
|
|
|
if w.current != nil {
|
|
|
|
w.current.discard()
|
2021-10-08 21:12:52 +03:00
|
|
|
}
|
|
|
|
}()
|
2017-08-18 13:58:36 +03:00
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
2018-08-16 14:14:33 +03:00
|
|
|
case req := <-w.newWorkCh:
|
2022-11-16 12:17:50 +03:00
|
|
|
w.commitWork(req.interruptCh, req.timestamp)
|
2022-01-24 10:19:52 +03:00
|
|
|
|
|
|
|
case req := <-w.getWorkCh:
|
2023-08-24 00:16:14 +03:00
|
|
|
req.result <- w.generateWork(req.params)
|
2018-08-14 18:34:33 +03:00
|
|
|
|
2017-08-18 13:58:36 +03:00
|
|
|
// System stopped
|
2018-08-14 18:34:33 +03:00
|
|
|
case <-w.exitCh:
|
2017-08-18 13:58:36 +03:00
|
|
|
return
|
2018-08-14 18:34:33 +03:00
|
|
|
case <-w.chainHeadSub.Err():
|
|
|
|
return
|
2015-02-04 15:52:59 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-14 18:34:33 +03:00
|
|
|
// taskLoop is a standalone goroutine to fetch sealing task from the generator and
|
|
|
|
// push them to consensus engine.
|
|
|
|
func (w *worker) taskLoop() {
|
2021-10-08 19:36:58 +03:00
|
|
|
defer w.wg.Done()
|
2018-08-21 22:56:54 +03:00
|
|
|
var (
|
|
|
|
stopCh chan struct{}
|
|
|
|
prev common.Hash
|
|
|
|
)
|
2018-08-14 18:34:33 +03:00
|
|
|
|
|
|
|
// interrupt aborts the in-flight sealing task.
|
|
|
|
interrupt := func() {
|
|
|
|
if stopCh != nil {
|
|
|
|
close(stopCh)
|
|
|
|
stopCh = nil
|
|
|
|
}
|
|
|
|
}
|
2015-02-09 17:20:34 +02:00
|
|
|
for {
|
2018-08-14 18:34:33 +03:00
|
|
|
select {
|
|
|
|
case task := <-w.taskCh:
|
|
|
|
if w.newTaskHook != nil {
|
|
|
|
w.newTaskHook(task)
|
|
|
|
}
|
2018-08-21 22:56:54 +03:00
|
|
|
// Reject duplicate sealing work due to resubmitting.
|
2018-08-23 16:02:57 +03:00
|
|
|
sealHash := w.engine.SealHash(task.block.Header())
|
|
|
|
if sealHash == prev {
|
2018-08-21 22:56:54 +03:00
|
|
|
continue
|
|
|
|
}
|
2018-08-28 16:59:05 +03:00
|
|
|
// Interrupt previous sealing operation
|
2018-08-14 18:34:33 +03:00
|
|
|
interrupt()
|
2018-08-28 16:59:05 +03:00
|
|
|
stopCh, prev = make(chan struct{}), sealHash
|
|
|
|
|
|
|
|
if w.skipSealHook != nil && w.skipSealHook(task) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
w.pendingMu.Lock()
|
2020-07-28 19:16:49 +03:00
|
|
|
w.pendingTasks[sealHash] = task
|
2018-08-28 16:59:05 +03:00
|
|
|
w.pendingMu.Unlock()
|
|
|
|
|
|
|
|
if err := w.engine.Seal(w.chain, task.block, w.resultCh, stopCh); err != nil {
|
|
|
|
log.Warn("Block sealing failed", "err", err)
|
2021-11-05 18:17:13 +03:00
|
|
|
w.pendingMu.Lock()
|
|
|
|
delete(w.pendingTasks, sealHash)
|
|
|
|
w.pendingMu.Unlock()
|
2018-08-28 16:59:05 +03:00
|
|
|
}
|
2018-08-14 18:34:33 +03:00
|
|
|
case <-w.exitCh:
|
|
|
|
interrupt()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2015-03-26 18:45:03 +02:00
|
|
|
|
2018-08-14 18:34:33 +03:00
|
|
|
// resultLoop is a standalone goroutine to handle sealing result submitting
|
|
|
|
// and flush relative data to the database.
|
|
|
|
func (w *worker) resultLoop() {
|
2021-10-08 19:36:58 +03:00
|
|
|
defer w.wg.Done()
|
2018-08-14 18:34:33 +03:00
|
|
|
for {
|
|
|
|
select {
|
2018-08-28 16:59:05 +03:00
|
|
|
case block := <-w.resultCh:
|
2018-08-21 22:56:54 +03:00
|
|
|
// Short circuit when receiving empty result.
|
2018-08-28 16:59:05 +03:00
|
|
|
if block == nil {
|
2015-03-26 18:45:03 +02:00
|
|
|
continue
|
|
|
|
}
|
2018-08-21 22:56:54 +03:00
|
|
|
// Short circuit when receiving duplicate result caused by resubmitting.
|
|
|
|
if w.chain.HasBlock(block.Hash(), block.NumberU64()) {
|
|
|
|
continue
|
|
|
|
}
|
2018-08-28 16:59:05 +03:00
|
|
|
var (
|
|
|
|
sealhash = w.engine.SealHash(block.Header())
|
|
|
|
hash = block.Hash()
|
|
|
|
)
|
|
|
|
w.pendingMu.RLock()
|
|
|
|
task, exist := w.pendingTasks[sealhash]
|
|
|
|
w.pendingMu.RUnlock()
|
|
|
|
if !exist {
|
|
|
|
log.Error("Block found but no relative pending task", "number", block.Number(), "sealhash", sealhash, "hash", hash)
|
|
|
|
continue
|
2015-07-03 12:24:42 +03:00
|
|
|
}
|
2018-08-28 16:59:05 +03:00
|
|
|
// Different block could share same sealhash, deep copy here to prevent write-write conflict.
|
|
|
|
var (
|
|
|
|
receipts = make([]*types.Receipt, len(task.receipts))
|
|
|
|
logs []*types.Log
|
|
|
|
)
|
2021-11-01 09:50:29 +03:00
|
|
|
for i, taskReceipt := range task.receipts {
|
|
|
|
receipt := new(types.Receipt)
|
|
|
|
receipts[i] = receipt
|
|
|
|
*receipt = *taskReceipt
|
|
|
|
|
2019-03-27 15:39:25 +03:00
|
|
|
// add block location fields
|
|
|
|
receipt.BlockHash = hash
|
|
|
|
receipt.BlockNumber = block.Number()
|
|
|
|
receipt.TransactionIndex = uint(i)
|
|
|
|
|
2018-08-28 16:59:05 +03:00
|
|
|
// Update the block hash in all logs since it is now available and not when the
|
|
|
|
// receipt/log of individual transactions were created.
|
2021-11-01 09:50:29 +03:00
|
|
|
receipt.Logs = make([]*types.Log, len(taskReceipt.Logs))
|
|
|
|
for i, taskLog := range taskReceipt.Logs {
|
|
|
|
log := new(types.Log)
|
|
|
|
receipt.Logs[i] = log
|
|
|
|
*log = *taskLog
|
2018-08-28 16:59:05 +03:00
|
|
|
log.BlockHash = hash
|
|
|
|
}
|
|
|
|
logs = append(logs, receipt.Logs...)
|
2017-09-11 13:13:05 +03:00
|
|
|
}
|
2022-07-14 12:09:44 +03:00
|
|
|
|
2022-11-24 10:22:37 +03:00
|
|
|
if prev, ok := w.recentMinedBlocks.Get(block.NumberU64()); ok {
|
|
|
|
doubleSign := false
|
|
|
|
prevParents, _ := prev.([]common.Hash)
|
|
|
|
for _, prevParent := range prevParents {
|
|
|
|
if prevParent == block.ParentHash() {
|
|
|
|
log.Error("Reject Double Sign!!", "block", block.NumberU64(),
|
|
|
|
"hash", block.Hash(),
|
|
|
|
"root", block.Root(),
|
|
|
|
"ParentHash", block.ParentHash())
|
|
|
|
doubleSign = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if doubleSign {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
prevParents = append(prevParents, block.ParentHash())
|
|
|
|
w.recentMinedBlocks.Add(block.NumberU64(), prevParents)
|
|
|
|
} else {
|
|
|
|
// Add() will call removeOldest internally to remove the oldest element
|
|
|
|
// if the LRU Cache is full
|
|
|
|
w.recentMinedBlocks.Add(block.NumberU64(), []common.Hash{block.ParentHash()})
|
|
|
|
}
|
|
|
|
|
2018-08-14 18:34:33 +03:00
|
|
|
// Commit block and state to database.
|
2022-07-05 06:14:21 +03:00
|
|
|
task.state.SetExpectedStateRoot(block.Root())
|
2022-07-14 12:09:44 +03:00
|
|
|
start := time.Now()
|
2024-07-22 13:23:10 +03:00
|
|
|
status, err := w.chain.WriteBlockAndSetHead(block, receipts, logs, task.state, true)
|
2023-03-28 10:52:40 +03:00
|
|
|
if status != core.CanonStatTy {
|
|
|
|
if err != nil {
|
|
|
|
log.Error("Failed writing block to chain", "err", err, "status", status)
|
|
|
|
} else {
|
|
|
|
log.Info("Written block as SideChain and avoid broadcasting", "status", status)
|
|
|
|
}
|
2017-09-11 13:13:05 +03:00
|
|
|
continue
|
|
|
|
}
|
2022-07-14 12:09:44 +03:00
|
|
|
writeBlockTimer.UpdateSince(start)
|
2018-08-28 16:59:05 +03:00
|
|
|
log.Info("Successfully sealed new block", "number", block.Number(), "sealhash", sealhash, "hash", hash,
|
|
|
|
"elapsed", common.PrettyDuration(time.Since(task.createdAt)))
|
2023-03-28 10:52:40 +03:00
|
|
|
w.mux.Post(core.NewMinedBlockEvent{Block: block})
|
2018-08-28 16:59:05 +03:00
|
|
|
|
2018-08-14 18:34:33 +03:00
|
|
|
case <-w.exitCh:
|
|
|
|
return
|
|
|
|
}
|
2015-02-04 15:52:59 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-01-24 10:19:52 +03:00
|
|
|
// makeEnv creates a new environment for the sealing block.
|
2023-08-23 12:46:08 +03:00
|
|
|
func (w *worker) makeEnv(parent *types.Header, header *types.Header, coinbase common.Address,
|
2022-11-25 09:05:19 +03:00
|
|
|
prevEnv *environment) (*environment, error) {
|
2021-01-08 16:01:49 +03:00
|
|
|
// Retrieve the parent state to execute on top and start a prefetcher for
|
2022-07-05 06:14:21 +03:00
|
|
|
// the miner to speed block sealing up a bit
|
2024-01-22 06:04:55 +03:00
|
|
|
state, err := w.chain.StateAt(parent.Root)
|
2022-01-24 10:19:52 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2015-10-06 17:35:55 +03:00
|
|
|
}
|
2022-11-25 09:05:19 +03:00
|
|
|
if prevEnv == nil {
|
|
|
|
state.StartPrefetcher("miner")
|
|
|
|
} else {
|
|
|
|
state.TransferPrefetcher(prevEnv.state)
|
|
|
|
}
|
2021-01-08 16:01:49 +03:00
|
|
|
|
2022-01-24 10:19:52 +03:00
|
|
|
// Note the passed coinbase may be different with header.Coinbase.
|
2018-08-16 14:14:33 +03:00
|
|
|
env := &environment{
|
2023-05-31 10:09:49 +03:00
|
|
|
signer: types.MakeSigner(w.chainConfig, header.Number, header.Time),
|
|
|
|
state: state,
|
|
|
|
coinbase: coinbase,
|
|
|
|
header: header,
|
2015-04-04 14:27:17 +03:00
|
|
|
}
|
2015-05-11 02:28:15 +03:00
|
|
|
// Keep track of transactions which return errors so they can be removed
|
2018-08-06 12:55:44 +03:00
|
|
|
env.tcount = 0
|
2022-01-24 10:19:52 +03:00
|
|
|
return env, nil
|
2015-04-07 13:32:55 +03:00
|
|
|
}
|
|
|
|
|
2022-01-24 10:19:52 +03:00
|
|
|
// updateSnapshot updates pending snapshot block, receipts and state.
|
|
|
|
func (w *worker) updateSnapshot(env *environment) {
|
2018-08-14 18:34:33 +03:00
|
|
|
w.snapshotMu.Lock()
|
|
|
|
defer w.snapshotMu.Unlock()
|
|
|
|
|
|
|
|
w.snapshotBlock = types.NewBlock(
|
2022-01-24 10:19:52 +03:00
|
|
|
env.header,
|
|
|
|
env.txs,
|
2023-05-31 10:09:49 +03:00
|
|
|
nil,
|
2022-01-24 10:19:52 +03:00
|
|
|
env.receipts,
|
2021-02-02 15:09:23 +03:00
|
|
|
trie.NewStackTrie(nil),
|
2018-08-14 18:34:33 +03:00
|
|
|
)
|
2022-01-24 10:19:52 +03:00
|
|
|
w.snapshotReceipts = copyReceipts(env.receipts)
|
|
|
|
w.snapshotState = env.state.Copy()
|
2018-08-14 18:34:33 +03:00
|
|
|
}
|
|
|
|
|
2024-02-02 10:43:33 +03:00
|
|
|
func (w *worker) commitTransaction(env *environment, tx *types.Transaction, receiptProcessors ...core.ReceiptProcessor) ([]*types.Log, error) {
|
2023-08-26 05:52:12 +03:00
|
|
|
if tx.Type() == types.BlobTxType {
|
2024-02-02 10:43:33 +03:00
|
|
|
return w.commitBlobTransaction(env, tx, receiptProcessors...)
|
2023-08-26 05:52:12 +03:00
|
|
|
}
|
2024-02-02 10:43:33 +03:00
|
|
|
|
|
|
|
receipt, err := w.applyTransaction(env, tx, receiptProcessors...)
|
2023-08-26 05:52:12 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
env.txs = append(env.txs, tx)
|
|
|
|
env.receipts = append(env.receipts, receipt)
|
|
|
|
return receipt.Logs, nil
|
|
|
|
}
|
|
|
|
|
2024-02-02 10:43:33 +03:00
|
|
|
func (w *worker) commitBlobTransaction(env *environment, tx *types.Transaction, receiptProcessors ...core.ReceiptProcessor) ([]*types.Log, error) {
|
2024-03-22 17:37:47 +03:00
|
|
|
sc := types.NewBlobSidecarFromTx(tx)
|
2023-08-26 05:52:12 +03:00
|
|
|
if sc == nil {
|
|
|
|
panic("blob transaction without blobs in miner")
|
|
|
|
}
|
2023-08-24 00:16:14 +03:00
|
|
|
// Checking against blob gas limit: It's kind of ugly to perform this check here, but there
|
|
|
|
// isn't really a better place right now. The blob gas limit is checked at block validation time
|
|
|
|
// and not during execution. This means core.ApplyTransaction will not return an error if the
|
|
|
|
// tx has too many blobs. So we have to explicitly check it here.
|
2023-08-26 05:52:12 +03:00
|
|
|
if (env.blobs+len(sc.Blobs))*params.BlobTxBlobGasPerBlob > params.MaxBlobGasPerBlock {
|
2023-08-24 00:16:14 +03:00
|
|
|
return nil, errors.New("max data blobs reached")
|
|
|
|
}
|
2024-02-02 10:43:33 +03:00
|
|
|
|
|
|
|
receipt, err := w.applyTransaction(env, tx, receiptProcessors...)
|
2018-08-16 14:14:33 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2024-03-22 17:37:47 +03:00
|
|
|
sc.TxIndex = uint64(len(env.txs))
|
2023-08-24 00:16:14 +03:00
|
|
|
env.txs = append(env.txs, tx.WithoutBlobTxSidecar())
|
2022-01-24 10:19:52 +03:00
|
|
|
env.receipts = append(env.receipts, receipt)
|
2023-08-26 05:52:12 +03:00
|
|
|
env.sidecars = append(env.sidecars, sc)
|
|
|
|
env.blobs += len(sc.Blobs)
|
|
|
|
*env.header.BlobGasUsed += receipt.BlobGasUsed
|
|
|
|
return receipt.Logs, nil
|
|
|
|
}
|
2018-08-16 14:14:33 +03:00
|
|
|
|
2023-08-26 05:52:12 +03:00
|
|
|
// applyTransaction runs the transaction. If execution fails, state and gas pool are reverted.
|
2024-02-02 10:43:33 +03:00
|
|
|
func (w *worker) applyTransaction(env *environment, tx *types.Transaction, receiptProcessors ...core.ReceiptProcessor) (*types.Receipt, error) {
|
2023-03-07 13:23:52 +03:00
|
|
|
var (
|
|
|
|
snap = env.state.Snapshot()
|
|
|
|
gp = env.gasPool.Gas()
|
|
|
|
)
|
2018-08-16 14:14:33 +03:00
|
|
|
|
2024-02-02 10:43:33 +03:00
|
|
|
receipt, err := core.ApplyTransaction(w.chainConfig, w.chain, &env.coinbase, env.gasPool, env.state, env.header, tx, &env.header.GasUsed, *w.chain.GetVMConfig(), receiptProcessors...)
|
2018-08-16 14:14:33 +03:00
|
|
|
if err != nil {
|
2022-01-24 10:19:52 +03:00
|
|
|
env.state.RevertToSnapshot(snap)
|
2023-03-07 13:23:52 +03:00
|
|
|
env.gasPool.SetGas(gp)
|
2018-08-16 14:14:33 +03:00
|
|
|
}
|
2023-08-26 05:52:12 +03:00
|
|
|
return receipt, err
|
2018-08-16 14:14:33 +03:00
|
|
|
}
|
|
|
|
|
2024-03-08 10:36:25 +03:00
|
|
|
func (w *worker) commitTransactions(env *environment, plainTxs, blobTxs *transactionsByPriceAndNonce,
|
2022-10-28 07:01:18 +03:00
|
|
|
interruptCh chan int32, stopTimer *time.Timer) error {
|
2022-01-24 10:19:52 +03:00
|
|
|
gasLimit := env.header.GasLimit
|
|
|
|
if env.gasPool == nil {
|
|
|
|
env.gasPool = new(core.GasPool).AddGas(gasLimit)
|
2024-01-17 13:37:23 +03:00
|
|
|
env.gasPool.SubGas(params.SystemTxsGas)
|
2018-08-16 14:14:33 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
var coalescedLogs []*types.Log
|
2022-10-28 07:01:18 +03:00
|
|
|
// initialize bloom processors
|
2022-07-05 06:14:21 +03:00
|
|
|
processorCapacity := 100
|
2024-03-08 10:36:25 +03:00
|
|
|
if plainTxs.CurrentSize() < processorCapacity {
|
|
|
|
processorCapacity = plainTxs.CurrentSize()
|
2022-07-05 06:14:21 +03:00
|
|
|
}
|
|
|
|
bloomProcessors := core.NewAsyncReceiptBloomGenerator(processorCapacity)
|
|
|
|
|
2022-11-10 10:05:56 +03:00
|
|
|
stopPrefetchCh := make(chan struct{})
|
|
|
|
defer close(stopPrefetchCh)
|
2024-03-11 09:57:31 +03:00
|
|
|
// prefetch plainTxs txs, don't bother to prefetch a few blobTxs
|
2024-03-08 10:36:25 +03:00
|
|
|
txsPrefetch := plainTxs.Copy()
|
2023-08-23 12:46:08 +03:00
|
|
|
tx := txsPrefetch.PeekWithUnwrap()
|
|
|
|
if tx != nil {
|
|
|
|
txCurr := &tx
|
|
|
|
w.prefetcher.PrefetchMining(txsPrefetch, env.header, env.gasPool.Gas(), env.state.CopyDoPrefetch(), *w.chain.GetVMConfig(), stopPrefetchCh, txCurr)
|
|
|
|
}
|
2022-07-05 06:14:21 +03:00
|
|
|
|
2022-10-28 07:01:18 +03:00
|
|
|
signal := commitInterruptNone
|
2021-03-19 08:23:44 +03:00
|
|
|
LOOP:
|
2018-08-16 14:14:33 +03:00
|
|
|
for {
|
|
|
|
// In the following three cases, we will interrupt the execution of the transaction.
|
2022-11-10 10:05:56 +03:00
|
|
|
// (1) new head block event arrival, the reason is 1
|
|
|
|
// (2) worker start or restart, the reason is 1
|
|
|
|
// (3) worker recreate the sealing block with any newly arrived transactions, the reason is 2.
|
2018-08-16 14:14:33 +03:00
|
|
|
// For the first two cases, the semi-finished work will be discarded.
|
|
|
|
// For the third case, the semi-finished work will be submitted to the consensus engine.
|
2022-11-10 10:05:56 +03:00
|
|
|
if interruptCh != nil {
|
|
|
|
select {
|
2022-10-28 07:01:18 +03:00
|
|
|
case signal, ok := <-interruptCh:
|
2022-11-10 10:05:56 +03:00
|
|
|
if !ok {
|
|
|
|
// should never be here, since interruptCh should not be read before
|
|
|
|
log.Warn("commit transactions stopped unknown")
|
|
|
|
}
|
2022-10-28 07:01:18 +03:00
|
|
|
return signalToErr(signal)
|
2022-11-10 10:05:56 +03:00
|
|
|
default:
|
|
|
|
}
|
2018-08-16 14:14:33 +03:00
|
|
|
}
|
2022-10-03 15:10:00 +03:00
|
|
|
// If we don't have enough gas for any further transactions then we're done.
|
2022-01-24 10:19:52 +03:00
|
|
|
if env.gasPool.Gas() < params.TxGas {
|
|
|
|
log.Trace("Not enough gas for further transactions", "have", env.gasPool, "want", params.TxGas)
|
2022-10-28 07:01:18 +03:00
|
|
|
signal = commitInterruptOutOfGas
|
2018-08-16 14:14:33 +03:00
|
|
|
break
|
|
|
|
}
|
2024-03-11 13:56:19 +03:00
|
|
|
if stopTimer != nil {
|
|
|
|
select {
|
|
|
|
case <-stopTimer.C:
|
|
|
|
log.Info("Not enough time for further transactions", "txs", len(env.txs))
|
|
|
|
stopTimer.Reset(0) // re-active the timer, in case it will be used later.
|
|
|
|
signal = commitInterruptTimeout
|
|
|
|
break LOOP
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-02-20 12:37:23 +03:00
|
|
|
// If we don't have enough blob space for any further blob transactions,
|
|
|
|
// skip that list altogether
|
|
|
|
if !blobTxs.Empty() && env.blobs*params.BlobTxBlobGasPerBlob >= params.MaxBlobGasPerBlock {
|
|
|
|
log.Trace("Not enough blob space for further blob transactions")
|
|
|
|
blobTxs.Clear()
|
|
|
|
// Fall though to pick up any plain txs
|
|
|
|
}
|
2022-10-03 15:10:00 +03:00
|
|
|
// Retrieve the next transaction and abort if all done.
|
2024-02-20 12:37:23 +03:00
|
|
|
var (
|
|
|
|
ltx *txpool.LazyTransaction
|
|
|
|
txs *transactionsByPriceAndNonce
|
|
|
|
)
|
|
|
|
pltx, ptip := plainTxs.Peek()
|
|
|
|
bltx, btip := blobTxs.Peek()
|
|
|
|
|
|
|
|
switch {
|
|
|
|
case pltx == nil:
|
|
|
|
txs, ltx = blobTxs, bltx
|
|
|
|
case bltx == nil:
|
|
|
|
txs, ltx = plainTxs, pltx
|
|
|
|
default:
|
|
|
|
if ptip.Lt(btip) {
|
|
|
|
txs, ltx = blobTxs, bltx
|
|
|
|
} else {
|
|
|
|
txs, ltx = plainTxs, pltx
|
|
|
|
}
|
|
|
|
}
|
2023-07-27 13:45:35 +03:00
|
|
|
if ltx == nil {
|
2018-08-16 14:14:33 +03:00
|
|
|
break
|
|
|
|
}
|
2024-03-08 10:36:25 +03:00
|
|
|
|
2023-10-04 12:36:36 +03:00
|
|
|
// If we don't have enough space for the next transaction, skip the account.
|
|
|
|
if env.gasPool.Gas() < ltx.Gas {
|
|
|
|
log.Trace("Not enough gas left for transaction", "hash", ltx.Hash, "left", env.gasPool.Gas(), "needed", ltx.Gas)
|
|
|
|
txs.Pop()
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if left := uint64(params.MaxBlobGasPerBlock - env.blobs*params.BlobTxBlobGasPerBlob); left < ltx.BlobGas {
|
|
|
|
log.Trace("Not enough blob gas left for transaction", "hash", ltx.Hash, "left", left, "needed", ltx.BlobGas)
|
|
|
|
txs.Pop()
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// Transaction seems to fit, pull it up from the pool
|
2023-07-27 13:45:35 +03:00
|
|
|
tx := ltx.Resolve()
|
|
|
|
if tx == nil {
|
2023-10-04 12:36:36 +03:00
|
|
|
log.Trace("Ignoring evicted transaction", "hash", ltx.Hash)
|
2023-07-27 13:45:35 +03:00
|
|
|
txs.Pop()
|
|
|
|
continue
|
|
|
|
}
|
2024-09-10 11:24:29 +03:00
|
|
|
// If we don't have enough size left for the next transaction, skip it.
|
|
|
|
if env.size+uint32(tx.Size())+blockReserveSize > params.MaxMessageSize {
|
|
|
|
log.Trace("Not enough size left for transaction", "hash", ltx.Hash,
|
|
|
|
"env.size", env.size, "needed", uint32(tx.Size()))
|
|
|
|
txs.Pop()
|
|
|
|
continue
|
|
|
|
}
|
2018-08-16 14:14:33 +03:00
|
|
|
// Error may be ignored here. The error has already been checked
|
|
|
|
// during transaction acceptance is the transaction pool.
|
core/types: support for optional blob sidecar in BlobTx (#27841)
This PR removes the newly added txpool.Transaction wrapper type, and instead adds a way
of keeping the blob sidecar within types.Transaction. It's better this way because most
code in go-ethereum does not care about blob transactions, and probably never will. This
will start mattering especially on the client side of RPC, where all APIs are based on
types.Transaction. Users need to be able to use the same signing flows they already
have.
However, since blobs are only allowed in some places but not others, we will now need to
add checks to avoid creating invalid blocks. I'm still trying to figure out the best place
to do some of these. The way I have it currently is as follows:
- In block validation (import), txs are verified not to have a blob sidecar.
- In miner, we strip off the sidecar when committing the transaction into the block.
- In TxPool validation, txs must have a sidecar to be added into the blobpool.
- Note there is a special case here: when transactions are re-added because of a chain
reorg, we cannot use the transactions gathered from the old chain blocks as-is,
because they will be missing their blobs. This was previously handled by storing the
blobs into the 'blobpool limbo'. The code has now changed to store the full
transaction in the limbo instead, but it might be confusing for code readers why we're
not simply adding the types.Transaction we already have.
Code changes summary:
- txpool.Transaction removed and all uses replaced by types.Transaction again
- blobpool now stores types.Transaction instead of defining its own blobTx format for storage
- the blobpool limbo now stores types.Transaction instead of storing only the blobs
- checks to validate the presence/absence of the blob sidecar added in certain critical places
2023-08-14 11:13:34 +03:00
|
|
|
from, _ := types.Sender(env.signer, tx)
|
2022-10-03 15:10:00 +03:00
|
|
|
|
2018-08-16 14:14:33 +03:00
|
|
|
// Check whether the tx is replay protected. If we're not in the EIP155 hf
|
|
|
|
// phase, start ignoring the sender until we do.
|
core/types: support for optional blob sidecar in BlobTx (#27841)
This PR removes the newly added txpool.Transaction wrapper type, and instead adds a way
of keeping the blob sidecar within types.Transaction. It's better this way because most
code in go-ethereum does not care about blob transactions, and probably never will. This
will start mattering especially on the client side of RPC, where all APIs are based on
types.Transaction. Users need to be able to use the same signing flows they already
have.
However, since blobs are only allowed in some places but not others, we will now need to
add checks to avoid creating invalid blocks. I'm still trying to figure out the best place
to do some of these. The way I have it currently is as follows:
- In block validation (import), txs are verified not to have a blob sidecar.
- In miner, we strip off the sidecar when committing the transaction into the block.
- In TxPool validation, txs must have a sidecar to be added into the blobpool.
- Note there is a special case here: when transactions are re-added because of a chain
reorg, we cannot use the transactions gathered from the old chain blocks as-is,
because they will be missing their blobs. This was previously handled by storing the
blobs into the 'blobpool limbo'. The code has now changed to store the full
transaction in the limbo instead, but it might be confusing for code readers why we're
not simply adding the types.Transaction we already have.
Code changes summary:
- txpool.Transaction removed and all uses replaced by types.Transaction again
- blobpool now stores types.Transaction instead of defining its own blobTx format for storage
- the blobpool limbo now stores types.Transaction instead of storing only the blobs
- checks to validate the presence/absence of the blob sidecar added in certain critical places
2023-08-14 11:13:34 +03:00
|
|
|
if tx.Protected() && !w.chainConfig.IsEIP155(env.header.Number) {
|
2023-10-04 12:36:36 +03:00
|
|
|
log.Trace("Ignoring replay protected transaction", "hash", ltx.Hash, "eip155", w.chainConfig.EIP155Block)
|
2018-08-16 14:14:33 +03:00
|
|
|
txs.Pop()
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// Start executing the transaction
|
core/types: support for optional blob sidecar in BlobTx (#27841)
This PR removes the newly added txpool.Transaction wrapper type, and instead adds a way
of keeping the blob sidecar within types.Transaction. It's better this way because most
code in go-ethereum does not care about blob transactions, and probably never will. This
will start mattering especially on the client side of RPC, where all APIs are based on
types.Transaction. Users need to be able to use the same signing flows they already
have.
However, since blobs are only allowed in some places but not others, we will now need to
add checks to avoid creating invalid blocks. I'm still trying to figure out the best place
to do some of these. The way I have it currently is as follows:
- In block validation (import), txs are verified not to have a blob sidecar.
- In miner, we strip off the sidecar when committing the transaction into the block.
- In TxPool validation, txs must have a sidecar to be added into the blobpool.
- Note there is a special case here: when transactions are re-added because of a chain
reorg, we cannot use the transactions gathered from the old chain blocks as-is,
because they will be missing their blobs. This was previously handled by storing the
blobs into the 'blobpool limbo'. The code has now changed to store the full
transaction in the limbo instead, but it might be confusing for code readers why we're
not simply adding the types.Transaction we already have.
Code changes summary:
- txpool.Transaction removed and all uses replaced by types.Transaction again
- blobpool now stores types.Transaction instead of defining its own blobTx format for storage
- the blobpool limbo now stores types.Transaction instead of storing only the blobs
- checks to validate the presence/absence of the blob sidecar added in certain critical places
2023-08-14 11:13:34 +03:00
|
|
|
env.state.SetTxContext(tx.Hash(), env.tcount)
|
2018-08-16 14:14:33 +03:00
|
|
|
|
2022-07-05 06:14:21 +03:00
|
|
|
logs, err := w.commitTransaction(env, tx, bloomProcessors)
|
2020-12-04 14:22:19 +03:00
|
|
|
switch {
|
|
|
|
case errors.Is(err, core.ErrNonceTooLow):
|
2018-08-16 14:14:33 +03:00
|
|
|
// New head notification data race between the transaction pool and miner, shift
|
2023-10-04 12:36:36 +03:00
|
|
|
log.Trace("Skipping transaction with low nonce", "hash", ltx.Hash, "sender", from, "nonce", tx.Nonce())
|
2018-08-16 14:14:33 +03:00
|
|
|
txs.Shift()
|
|
|
|
|
2020-12-04 14:22:19 +03:00
|
|
|
case errors.Is(err, nil):
|
2018-08-16 14:14:33 +03:00
|
|
|
// Everything ok, collect the logs and shift in the next transaction from the same account
|
|
|
|
coalescedLogs = append(coalescedLogs, logs...)
|
2022-01-24 10:19:52 +03:00
|
|
|
env.tcount++
|
2024-09-10 11:24:29 +03:00
|
|
|
env.size += uint32(tx.Size()) // size of BlobTxSidecar included
|
2018-08-16 14:14:33 +03:00
|
|
|
txs.Shift()
|
|
|
|
|
|
|
|
default:
|
2023-04-05 14:09:25 +03:00
|
|
|
// Transaction is regarded as invalid, drop all consecutive transactions from
|
|
|
|
// the same sender because of `nonce-too-high` clause.
|
2023-10-04 12:36:36 +03:00
|
|
|
log.Debug("Transaction failed, account skipped", "hash", ltx.Hash, "err", err)
|
2023-04-05 14:09:25 +03:00
|
|
|
txs.Pop()
|
2018-08-16 14:14:33 +03:00
|
|
|
}
|
|
|
|
}
|
2022-07-05 06:14:21 +03:00
|
|
|
bloomProcessors.Close()
|
2018-08-16 14:14:33 +03:00
|
|
|
if !w.isRunning() && len(coalescedLogs) > 0 {
|
2022-01-24 10:19:52 +03:00
|
|
|
// We don't push the pendingLogsEvent while we are sealing. The reason is that
|
|
|
|
// when we are sealing, the worker will regenerate a sealing block every 3 seconds.
|
2018-08-16 14:14:33 +03:00
|
|
|
// In order to avoid pushing the repeated pendingLog, we disable the pending log pushing.
|
|
|
|
|
|
|
|
// make a copy, the state caches the logs and these logs get "upgraded" from pending to mined
|
|
|
|
// logs by filling in the block hash when the block was mined by the local miner. This can
|
|
|
|
// cause a race condition if a log was "upgraded" before the PendingLogsEvent is processed.
|
|
|
|
cpy := make([]*types.Log, len(coalescedLogs))
|
|
|
|
for i, l := range coalescedLogs {
|
|
|
|
cpy[i] = new(types.Log)
|
|
|
|
*cpy[i] = *l
|
|
|
|
}
|
2019-12-10 14:39:14 +03:00
|
|
|
w.pendingLogsFeed.Send(cpy)
|
2018-08-16 14:14:33 +03:00
|
|
|
}
|
2022-10-28 07:01:18 +03:00
|
|
|
return signalToErr(signal)
|
2018-08-16 14:14:33 +03:00
|
|
|
}
|
|
|
|
|
2022-01-24 10:19:52 +03:00
|
|
|
// generateParams wraps various of settings for generating sealing task.
|
|
|
|
type generateParams struct {
|
2023-08-23 12:46:08 +03:00
|
|
|
timestamp uint64 // The timestamp for sealing task
|
2023-01-25 17:32:25 +03:00
|
|
|
forceTime bool // Flag whether the given timestamp is immutable or not
|
|
|
|
parentHash common.Hash // Parent block hash, empty means the latest chain head
|
|
|
|
coinbase common.Address // The fee recipient address for including transaction
|
|
|
|
random common.Hash // The randomness generated by beacon chain, empty before the merge
|
|
|
|
withdrawals types.Withdrawals // List of withdrawals to include in block.
|
2023-08-23 12:46:08 +03:00
|
|
|
prevWork *environment
|
2024-02-02 10:43:33 +03:00
|
|
|
beaconRoot *common.Hash // The beacon root (cancun field).
|
2024-03-01 14:19:18 +03:00
|
|
|
noTxs bool // Flag whether an empty block without any transaction is expected
|
2022-01-24 10:19:52 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// prepareWork constructs the sealing task according to the given parameters,
|
|
|
|
// either based on the last chain head or specified parent. In this function
|
|
|
|
// the pending transactions are not filled yet, only the empty task returned.
|
|
|
|
func (w *worker) prepareWork(genParams *generateParams) (*environment, error) {
|
2018-08-14 18:34:33 +03:00
|
|
|
w.mu.RLock()
|
|
|
|
defer w.mu.RUnlock()
|
2015-04-07 13:32:55 +03:00
|
|
|
|
2022-01-24 10:19:52 +03:00
|
|
|
// Find the parent block for sealing task
|
2018-08-14 18:34:33 +03:00
|
|
|
parent := w.chain.CurrentBlock()
|
2022-01-24 10:19:52 +03:00
|
|
|
if genParams.parentHash != (common.Hash{}) {
|
2023-03-02 09:29:15 +03:00
|
|
|
block := w.chain.GetBlockByHash(genParams.parentHash)
|
|
|
|
if block == nil {
|
2024-03-11 09:52:33 +03:00
|
|
|
return nil, errors.New("missing parent")
|
2023-03-02 09:29:15 +03:00
|
|
|
}
|
|
|
|
parent = block.Header()
|
2015-06-16 13:41:50 +03:00
|
|
|
}
|
2022-01-24 10:19:52 +03:00
|
|
|
// Sanity check the timestamp correctness, recap the timestamp
|
|
|
|
// to parent+1 if the mutation is allowed.
|
|
|
|
timestamp := genParams.timestamp
|
2023-03-02 09:29:15 +03:00
|
|
|
if parent.Time >= timestamp {
|
2022-01-24 10:19:52 +03:00
|
|
|
if genParams.forceTime {
|
2023-03-02 09:29:15 +03:00
|
|
|
return nil, fmt.Errorf("invalid timestamp, parent %d given %d", parent.Time, timestamp)
|
2022-01-24 10:19:52 +03:00
|
|
|
}
|
2023-03-02 09:29:15 +03:00
|
|
|
timestamp = parent.Time + 1
|
2015-06-16 13:41:50 +03:00
|
|
|
}
|
2022-10-03 15:10:00 +03:00
|
|
|
// Construct the sealing block header.
|
2015-06-16 13:41:50 +03:00
|
|
|
header := &types.Header{
|
|
|
|
ParentHash: parent.Hash(),
|
2023-03-02 09:29:15 +03:00
|
|
|
Number: new(big.Int).Add(parent.Number, common.Big1),
|
|
|
|
GasLimit: core.CalcGasLimit(parent.GasLimit, w.config.GasCeil),
|
2022-01-24 10:19:52 +03:00
|
|
|
Time: timestamp,
|
|
|
|
Coinbase: genParams.coinbase,
|
2015-06-16 13:41:50 +03:00
|
|
|
}
|
2022-12-28 11:48:26 +03:00
|
|
|
// Set the extra field.
|
|
|
|
if len(w.extra) != 0 {
|
2022-01-24 10:19:52 +03:00
|
|
|
header.Extra = w.extra
|
2017-04-05 01:16:29 +03:00
|
|
|
}
|
2022-01-24 10:19:52 +03:00
|
|
|
// Set the randomness field from the beacon chain if it's available.
|
|
|
|
if genParams.random != (common.Hash{}) {
|
|
|
|
header.MixDigest = genParams.random
|
2017-04-05 01:16:29 +03:00
|
|
|
}
|
2021-05-21 10:59:26 +03:00
|
|
|
// Set baseFee and GasLimit if we are on an EIP-1559 chain
|
|
|
|
if w.chainConfig.IsLondon(header.Number) {
|
2023-08-01 12:58:45 +03:00
|
|
|
header.BaseFee = eip1559.CalcBaseFee(w.chainConfig, parent)
|
2024-01-10 11:58:27 +03:00
|
|
|
if w.chainConfig.Parlia == nil && !w.chainConfig.IsLondon(parent.Number) {
|
|
|
|
parentGasLimit := parent.GasLimit * w.chainConfig.ElasticityMultiplier()
|
|
|
|
header.GasLimit = core.CalcGasLimit(parentGasLimit, w.config.GasCeil)
|
|
|
|
}
|
2016-07-08 13:00:37 +03:00
|
|
|
}
|
2024-03-22 17:37:47 +03:00
|
|
|
// Run the consensus preparation with the default or customized consensus engine.
|
|
|
|
// Note that the `header.Time` may be changed.
|
|
|
|
if err := w.engine.Prepare(w.chain, header); err != nil {
|
|
|
|
log.Error("Failed to prepare header for sealing", "err", err)
|
|
|
|
return nil, err
|
|
|
|
}
|
2023-08-26 05:52:12 +03:00
|
|
|
// Apply EIP-4844, EIP-4788.
|
2023-08-24 00:16:14 +03:00
|
|
|
if w.chainConfig.IsCancun(header.Number, header.Time) {
|
|
|
|
var excessBlobGas uint64
|
|
|
|
if w.chainConfig.IsCancun(parent.Number, parent.Time) {
|
|
|
|
excessBlobGas = eip4844.CalcExcessBlobGas(*parent.ExcessBlobGas, *parent.BlobGasUsed)
|
|
|
|
} else {
|
|
|
|
// For the first post-fork block, both parent.data_gas_used and parent.excess_data_gas are evaluated as 0
|
|
|
|
excessBlobGas = eip4844.CalcExcessBlobGas(0, 0)
|
|
|
|
}
|
2023-08-26 05:52:12 +03:00
|
|
|
header.BlobGasUsed = new(uint64)
|
2023-08-24 00:16:14 +03:00
|
|
|
header.ExcessBlobGas = &excessBlobGas
|
2024-03-22 17:37:47 +03:00
|
|
|
if w.chainConfig.Parlia != nil {
|
2024-03-29 10:03:21 +03:00
|
|
|
header.WithdrawalsHash = &types.EmptyWithdrawalsHash
|
2024-03-22 17:37:47 +03:00
|
|
|
}
|
2024-03-11 09:26:57 +03:00
|
|
|
if w.chainConfig.Parlia == nil {
|
|
|
|
header.ParentBeaconRoot = genParams.beaconRoot
|
2024-07-19 15:32:19 +03:00
|
|
|
} else if w.chainConfig.IsBohr(header.Number, header.Time) {
|
|
|
|
header.ParentBeaconRoot = new(common.Hash)
|
2024-03-11 09:26:57 +03:00
|
|
|
}
|
2023-08-24 00:16:14 +03:00
|
|
|
}
|
2015-10-06 17:35:55 +03:00
|
|
|
// Could potentially happen if starting to mine in an odd state.
|
2022-01-24 10:19:52 +03:00
|
|
|
// Note genParams.coinbase can be different with header.Coinbase
|
|
|
|
// since clique algorithm can modify the coinbase field in header.
|
2022-11-25 09:05:19 +03:00
|
|
|
env, err := w.makeEnv(parent, header, genParams.coinbase, genParams.prevWork)
|
2015-10-06 17:35:55 +03:00
|
|
|
if err != nil {
|
2022-01-24 10:19:52 +03:00
|
|
|
log.Error("Failed to create sealing context", "err", err)
|
|
|
|
return nil, err
|
2015-10-06 17:35:55 +03:00
|
|
|
}
|
2022-07-20 12:33:51 +03:00
|
|
|
|
2024-02-26 11:17:03 +03:00
|
|
|
if !w.chainConfig.IsFeynman(header.Number, header.Time) {
|
|
|
|
// Handle upgrade build-in system contract code
|
|
|
|
systemcontracts.UpgradeBuildInSystemContract(w.chainConfig, header.Number, parent.Time, header.Time, env.state)
|
|
|
|
}
|
2022-07-20 12:33:51 +03:00
|
|
|
|
2023-08-26 05:52:12 +03:00
|
|
|
if header.ParentBeaconRoot != nil {
|
|
|
|
context := core.NewEVMBlockContext(header, w.chain, nil)
|
|
|
|
vmenv := vm.NewEVM(context, vm.TxContext{}, env.state, w.chainConfig, vm.Config{})
|
|
|
|
core.ProcessBeaconBlockRoot(*header.ParentBeaconRoot, vmenv, env.state)
|
|
|
|
}
|
2024-09-10 11:24:29 +03:00
|
|
|
|
|
|
|
env.size = uint32(env.header.Size())
|
|
|
|
|
2022-01-24 10:19:52 +03:00
|
|
|
return env, nil
|
2022-07-05 06:14:21 +03:00
|
|
|
}
|
2018-08-03 11:33:37 +03:00
|
|
|
|
2022-01-24 10:19:52 +03:00
|
|
|
// fillTransactions retrieves the pending transactions from the txpool and fills them
|
|
|
|
// into the given sealing block. The transaction selection and ordering strategy can
|
|
|
|
// be customized with the plugin in the future.
|
2024-04-11 10:15:46 +03:00
|
|
|
func (w *worker) fillTransactions(interruptCh chan int32, env *environment, stopTimer *time.Timer, bidTxs mapset.Set[common.Hash]) (err error) {
|
2024-02-17 14:37:14 +03:00
|
|
|
w.mu.RLock()
|
|
|
|
tip := w.tip
|
|
|
|
w.mu.RUnlock()
|
|
|
|
|
|
|
|
// Retrieve the pending transactions pre-filtered by the 1559/4844 dynamic fees
|
2024-02-20 12:37:23 +03:00
|
|
|
filter := txpool.PendingFilter{
|
|
|
|
MinTip: tip,
|
|
|
|
}
|
2024-02-17 14:37:14 +03:00
|
|
|
if env.header.BaseFee != nil {
|
2024-02-20 12:37:23 +03:00
|
|
|
filter.BaseFee = uint256.MustFromBig(env.header.BaseFee)
|
2024-02-17 14:37:14 +03:00
|
|
|
}
|
|
|
|
if env.header.ExcessBlobGas != nil {
|
2024-02-20 12:37:23 +03:00
|
|
|
filter.BlobFee = uint256.MustFromBig(eip4844.CalcBlobFee(*env.header.ExcessBlobGas))
|
2024-02-17 14:37:14 +03:00
|
|
|
}
|
2024-02-20 12:37:23 +03:00
|
|
|
filter.OnlyPlainTxs, filter.OnlyBlobTxs = true, false
|
|
|
|
pendingPlainTxs := w.eth.TxPool().Pending(filter)
|
|
|
|
|
|
|
|
filter.OnlyPlainTxs, filter.OnlyBlobTxs = false, true
|
|
|
|
pendingBlobTxs := w.eth.TxPool().Pending(filter)
|
2023-06-16 15:29:40 +03:00
|
|
|
|
2024-04-11 10:15:46 +03:00
|
|
|
if bidTxs != nil {
|
|
|
|
filterBidTxs := func(commonTxs map[common.Address][]*txpool.LazyTransaction) {
|
|
|
|
for acc, txs := range commonTxs {
|
|
|
|
for i := len(txs) - 1; i >= 0; i-- {
|
|
|
|
if bidTxs.Contains(txs[i].Hash) {
|
|
|
|
if i == len(txs)-1 {
|
|
|
|
delete(commonTxs, acc)
|
|
|
|
} else {
|
|
|
|
commonTxs[acc] = txs[i+1:]
|
|
|
|
}
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
filterBidTxs(pendingPlainTxs)
|
|
|
|
filterBidTxs(pendingBlobTxs)
|
|
|
|
}
|
|
|
|
|
2023-08-24 00:16:14 +03:00
|
|
|
// Split the pending transactions into locals and remotes.
|
2024-02-20 12:37:23 +03:00
|
|
|
localPlainTxs, remotePlainTxs := make(map[common.Address][]*txpool.LazyTransaction), pendingPlainTxs
|
|
|
|
localBlobTxs, remoteBlobTxs := make(map[common.Address][]*txpool.LazyTransaction), pendingBlobTxs
|
|
|
|
|
2018-08-21 20:30:06 +03:00
|
|
|
for _, account := range w.eth.TxPool().Locals() {
|
2024-02-20 12:37:23 +03:00
|
|
|
if txs := remotePlainTxs[account]; len(txs) > 0 {
|
|
|
|
delete(remotePlainTxs, account)
|
|
|
|
localPlainTxs[account] = txs
|
|
|
|
}
|
|
|
|
if txs := remoteBlobTxs[account]; len(txs) > 0 {
|
|
|
|
delete(remoteBlobTxs, account)
|
|
|
|
localBlobTxs[account] = txs
|
2018-08-21 20:30:06 +03:00
|
|
|
}
|
|
|
|
}
|
2024-04-11 10:15:46 +03:00
|
|
|
|
2023-08-24 00:16:14 +03:00
|
|
|
// Fill the block with all available pending transactions.
|
2024-03-08 10:36:25 +03:00
|
|
|
// we will abort when:
|
|
|
|
// 1.new block was imported
|
|
|
|
// 2.out of Gas, no more transaction can be added.
|
|
|
|
// 3.the mining timer has expired, stop adding transactions.
|
|
|
|
// 4.interrupted resubmit timer, which is by default 10s.
|
|
|
|
// resubmit is for PoW only, can be deleted for PoS consensus later
|
2024-02-20 12:37:23 +03:00
|
|
|
if len(localPlainTxs) > 0 || len(localBlobTxs) > 0 {
|
|
|
|
plainTxs := newTransactionsByPriceAndNonce(env.signer, localPlainTxs, env.header.BaseFee)
|
|
|
|
blobTxs := newTransactionsByPriceAndNonce(env.signer, localBlobTxs, env.header.BaseFee)
|
|
|
|
|
2024-03-08 10:36:25 +03:00
|
|
|
if err := w.commitTransactions(env, plainTxs, blobTxs, interruptCh, stopTimer); err != nil {
|
2022-05-06 12:19:30 +03:00
|
|
|
return err
|
2018-08-21 20:30:06 +03:00
|
|
|
}
|
|
|
|
}
|
2024-02-20 12:37:23 +03:00
|
|
|
if len(remotePlainTxs) > 0 || len(remoteBlobTxs) > 0 {
|
|
|
|
plainTxs := newTransactionsByPriceAndNonce(env.signer, remotePlainTxs, env.header.BaseFee)
|
|
|
|
blobTxs := newTransactionsByPriceAndNonce(env.signer, remoteBlobTxs, env.header.BaseFee)
|
|
|
|
|
2024-03-08 10:36:25 +03:00
|
|
|
if err := w.commitTransactions(env, plainTxs, blobTxs, interruptCh, stopTimer); err != nil {
|
2022-05-06 12:19:30 +03:00
|
|
|
return err
|
2018-08-21 20:30:06 +03:00
|
|
|
}
|
2018-08-16 14:14:33 +03:00
|
|
|
}
|
2022-10-28 07:01:18 +03:00
|
|
|
|
2022-05-06 12:19:30 +03:00
|
|
|
return nil
|
2018-08-15 14:09:17 +03:00
|
|
|
}
|
|
|
|
|
2022-01-24 10:19:52 +03:00
|
|
|
// generateWork generates a sealing block based on the given parameters.
|
2023-08-24 00:16:14 +03:00
|
|
|
func (w *worker) generateWork(params *generateParams) *newPayloadResult {
|
2022-01-24 10:19:52 +03:00
|
|
|
work, err := w.prepareWork(params)
|
2018-08-15 14:09:17 +03:00
|
|
|
if err != nil {
|
2023-08-24 00:16:14 +03:00
|
|
|
return &newPayloadResult{err: err}
|
2018-05-10 10:04:45 +03:00
|
|
|
}
|
2022-01-24 10:19:52 +03:00
|
|
|
defer work.discard()
|
|
|
|
|
2022-05-18 17:33:37 +03:00
|
|
|
if !params.noTxs {
|
2024-04-11 10:15:46 +03:00
|
|
|
err := w.fillTransactions(nil, work, nil, nil)
|
2022-10-03 15:10:00 +03:00
|
|
|
if errors.Is(err, errBlockInterruptedByTimeout) {
|
|
|
|
log.Warn("Block building is interrupted", "allowance", common.PrettyDuration(w.newpayloadTimeout))
|
|
|
|
}
|
2022-05-18 17:33:37 +03:00
|
|
|
}
|
2023-09-07 11:39:29 +03:00
|
|
|
fees := work.state.GetBalance(consensus.SystemAddress)
|
2023-08-23 12:46:08 +03:00
|
|
|
block, _, err := w.engine.FinalizeAndAssemble(w.chain, work.header, work.state, work.txs, nil, work.receipts, params.withdrawals)
|
2022-11-02 12:32:20 +03:00
|
|
|
if err != nil {
|
2023-08-24 00:16:14 +03:00
|
|
|
return &newPayloadResult{err: err}
|
|
|
|
}
|
2024-03-22 17:37:47 +03:00
|
|
|
|
2023-08-24 00:16:14 +03:00
|
|
|
return &newPayloadResult{
|
|
|
|
block: block,
|
2024-02-02 10:43:33 +03:00
|
|
|
fees: fees.ToBig(),
|
2023-08-24 00:16:14 +03:00
|
|
|
sidecars: work.sidecars,
|
2022-11-02 12:32:20 +03:00
|
|
|
}
|
2022-01-24 10:19:52 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// commitWork generates several new sealing tasks based on the parent block
|
|
|
|
// and submit them to the sealer.
|
2022-11-16 12:17:50 +03:00
|
|
|
func (w *worker) commitWork(interruptCh chan int32, timestamp int64) {
|
2023-05-31 10:09:49 +03:00
|
|
|
// Abort committing if node is still syncing
|
|
|
|
if w.syncing.Load() {
|
|
|
|
return
|
|
|
|
}
|
2022-01-24 10:19:52 +03:00
|
|
|
start := time.Now()
|
|
|
|
|
|
|
|
// Set the coinbase if the worker is running or it's required
|
|
|
|
var coinbase common.Address
|
|
|
|
if w.isRunning() {
|
2023-01-20 19:26:01 +03:00
|
|
|
coinbase = w.etherbase()
|
|
|
|
if coinbase == (common.Address{}) {
|
2022-01-24 10:19:52 +03:00
|
|
|
log.Error("Refusing to mine without etherbase")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
2022-07-05 06:14:21 +03:00
|
|
|
|
2022-10-28 07:01:18 +03:00
|
|
|
stopTimer := time.NewTimer(0)
|
|
|
|
defer stopTimer.Stop()
|
|
|
|
<-stopTimer.C // discard the initial tick
|
|
|
|
|
2022-11-17 10:38:50 +03:00
|
|
|
stopWaitTimer := time.NewTimer(0)
|
|
|
|
defer stopWaitTimer.Stop()
|
|
|
|
<-stopWaitTimer.C // discard the initial tick
|
|
|
|
|
2022-10-28 07:01:18 +03:00
|
|
|
// validator can try several times to get the most profitable block,
|
|
|
|
// as long as the timestamp is not reached.
|
|
|
|
workList := make([]*environment, 0, 10)
|
2022-11-25 09:05:19 +03:00
|
|
|
var prevWork *environment
|
2022-10-28 07:01:18 +03:00
|
|
|
// workList clean up
|
|
|
|
defer func() {
|
2022-11-25 09:05:19 +03:00
|
|
|
for _, wk := range workList {
|
2022-10-28 07:01:18 +03:00
|
|
|
// only keep the best work, discard others.
|
2022-11-25 09:05:19 +03:00
|
|
|
if wk == w.current {
|
2022-10-28 07:01:18 +03:00
|
|
|
continue
|
|
|
|
}
|
2022-11-25 09:05:19 +03:00
|
|
|
wk.discard()
|
2022-10-28 07:01:18 +03:00
|
|
|
}
|
|
|
|
}()
|
2022-11-25 09:05:19 +03:00
|
|
|
|
2022-10-28 07:01:18 +03:00
|
|
|
LOOP:
|
|
|
|
for {
|
|
|
|
work, err := w.prepareWork(&generateParams{
|
|
|
|
timestamp: uint64(timestamp),
|
|
|
|
coinbase: coinbase,
|
2022-11-25 09:05:19 +03:00
|
|
|
prevWork: prevWork,
|
2022-10-28 07:01:18 +03:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
2022-11-25 09:05:19 +03:00
|
|
|
prevWork = work
|
2022-10-28 07:01:18 +03:00
|
|
|
workList = append(workList, work)
|
|
|
|
|
|
|
|
delay := w.engine.Delay(w.chain, work.header, &w.config.DelayLeftOver)
|
|
|
|
if delay == nil {
|
|
|
|
log.Warn("commitWork delay is nil, something is wrong")
|
|
|
|
stopTimer = nil
|
|
|
|
} else if *delay <= 0 {
|
|
|
|
log.Debug("Not enough time for commitWork")
|
|
|
|
break
|
|
|
|
} else {
|
|
|
|
log.Debug("commitWork stopTimer", "block", work.header.Number,
|
|
|
|
"header time", time.Until(time.Unix(int64(work.header.Time), 0)),
|
|
|
|
"commit delay", *delay, "DelayLeftOver", w.config.DelayLeftOver)
|
|
|
|
stopTimer.Reset(*delay)
|
|
|
|
}
|
|
|
|
|
|
|
|
// subscribe before fillTransactions
|
|
|
|
txsCh := make(chan core.NewTxsEvent, txChanSize)
|
2024-02-02 10:43:33 +03:00
|
|
|
// Subscribe for transaction insertion events (whether from network or resurrects)
|
|
|
|
sub := w.eth.TxPool().SubscribeTransactions(txsCh, true)
|
2023-03-15 12:43:18 +03:00
|
|
|
// if TxPool has been stopped, `sub` would be nil, it could happen on shutdown.
|
|
|
|
if sub == nil {
|
2024-02-27 14:07:14 +03:00
|
|
|
log.Info("commitWork SubscribeTransactions return nil")
|
2023-03-15 12:43:18 +03:00
|
|
|
} else {
|
|
|
|
defer sub.Unsubscribe()
|
|
|
|
}
|
2022-10-28 07:01:18 +03:00
|
|
|
|
2023-08-23 12:46:08 +03:00
|
|
|
// Fill pending transactions from the txpool into the block.
|
2022-10-28 07:01:18 +03:00
|
|
|
fillStart := time.Now()
|
2024-04-11 10:15:46 +03:00
|
|
|
err = w.fillTransactions(interruptCh, work, stopTimer, nil)
|
2022-10-28 07:01:18 +03:00
|
|
|
fillDuration := time.Since(fillStart)
|
|
|
|
switch {
|
|
|
|
case errors.Is(err, errBlockInterruptedByNewHead):
|
2023-08-23 12:46:08 +03:00
|
|
|
// work.discard()
|
2022-11-18 17:55:17 +03:00
|
|
|
log.Debug("commitWork abort", "err", err)
|
|
|
|
return
|
2022-11-21 08:30:57 +03:00
|
|
|
case errors.Is(err, errBlockInterruptedByRecommit):
|
2023-03-24 05:42:18 +03:00
|
|
|
fallthrough
|
2022-10-28 07:01:18 +03:00
|
|
|
case errors.Is(err, errBlockInterruptedByTimeout):
|
2023-03-24 05:42:18 +03:00
|
|
|
fallthrough
|
2022-10-28 07:01:18 +03:00
|
|
|
case errors.Is(err, errBlockInterruptedByOutOfGas):
|
2022-11-18 17:55:17 +03:00
|
|
|
// break the loop to get the best work
|
|
|
|
log.Debug("commitWork finish", "reason", err)
|
2022-10-28 07:01:18 +03:00
|
|
|
break LOOP
|
|
|
|
}
|
|
|
|
|
|
|
|
if interruptCh == nil || stopTimer == nil {
|
|
|
|
// it is single commit work, no need to try several time.
|
|
|
|
log.Info("commitWork interruptCh or stopTimer is nil")
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
2022-11-17 10:38:50 +03:00
|
|
|
newTxsNum := 0
|
|
|
|
// stopTimer was the maximum delay for each fillTransactions
|
|
|
|
// but now it is used to wait until (head.Time - DelayLeftOver) is reached.
|
|
|
|
stopTimer.Reset(time.Until(time.Unix(int64(work.header.Time), 0)) - w.config.DelayLeftOver)
|
|
|
|
LOOP_WAIT:
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-stopTimer.C:
|
|
|
|
log.Debug("commitWork stopTimer expired")
|
2022-10-28 07:01:18 +03:00
|
|
|
break LOOP
|
2022-11-17 10:38:50 +03:00
|
|
|
case <-interruptCh:
|
|
|
|
log.Debug("commitWork interruptCh closed, new block imported or resubmit triggered")
|
|
|
|
return
|
|
|
|
case ev := <-txsCh:
|
|
|
|
delay := w.engine.Delay(w.chain, work.header, &w.config.DelayLeftOver)
|
|
|
|
log.Debug("commitWork txsCh arrived", "fillDuration", fillDuration.String(),
|
|
|
|
"delay", delay.String(), "work.tcount", work.tcount,
|
|
|
|
"newTxsNum", newTxsNum, "len(ev.Txs)", len(ev.Txs))
|
|
|
|
if *delay < fillDuration {
|
|
|
|
// There may not have enough time for another fillTransactions.
|
|
|
|
break LOOP
|
|
|
|
} else if *delay < fillDuration*2 {
|
|
|
|
// We can schedule another fillTransactions, but the time is limited,
|
|
|
|
// probably it is the last chance, schedule it immediately.
|
|
|
|
break LOOP_WAIT
|
|
|
|
} else {
|
|
|
|
// There is still plenty of time left.
|
|
|
|
// We can wait a while to collect more transactions before
|
|
|
|
// schedule another fillTransaction to reduce CPU cost.
|
|
|
|
// There will be 2 cases to schedule another fillTransactions:
|
|
|
|
// 1.newTxsNum >= work.tcount
|
|
|
|
// 2.no much time left, have to schedule it immediately.
|
|
|
|
newTxsNum = newTxsNum + len(ev.Txs)
|
|
|
|
if newTxsNum >= work.tcount {
|
|
|
|
break LOOP_WAIT
|
|
|
|
}
|
|
|
|
stopWaitTimer.Reset(*delay - fillDuration*2)
|
|
|
|
}
|
|
|
|
case <-stopWaitTimer.C:
|
|
|
|
if newTxsNum > 0 {
|
|
|
|
break LOOP_WAIT
|
|
|
|
}
|
2022-10-28 07:01:18 +03:00
|
|
|
}
|
|
|
|
}
|
2022-11-25 12:46:22 +03:00
|
|
|
// if sub's channel if full, it will block other NewTxsEvent subscribers,
|
|
|
|
// so unsubscribe ASAP and Unsubscribe() is re-enterable, safe to call several time.
|
2023-03-15 12:43:18 +03:00
|
|
|
if sub != nil {
|
|
|
|
sub.Unsubscribe()
|
|
|
|
}
|
2022-10-28 07:01:18 +03:00
|
|
|
}
|
|
|
|
// get the most profitable work
|
2022-11-25 09:05:19 +03:00
|
|
|
bestWork := workList[0]
|
2024-02-02 10:43:33 +03:00
|
|
|
bestReward := new(uint256.Int)
|
2022-11-25 09:05:19 +03:00
|
|
|
for i, wk := range workList {
|
|
|
|
balance := wk.state.GetBalance(consensus.SystemAddress)
|
2022-10-28 07:01:18 +03:00
|
|
|
log.Debug("Get the most profitable work", "index", i, "balance", balance, "bestReward", bestReward)
|
|
|
|
if balance.Cmp(bestReward) > 0 {
|
2022-11-25 09:05:19 +03:00
|
|
|
bestWork = wk
|
2022-10-28 07:01:18 +03:00
|
|
|
bestReward = balance
|
|
|
|
}
|
|
|
|
}
|
2024-03-08 06:15:35 +03:00
|
|
|
|
|
|
|
// when out-turn, use bestWork to prevent bundle leakage.
|
|
|
|
// when in-turn, compare with remote work.
|
2024-03-21 10:03:35 +03:00
|
|
|
from := bestWork.coinbase
|
2024-03-08 06:15:35 +03:00
|
|
|
if w.bidFetcher != nil && bestWork.header.Difficulty.Cmp(diffInTurn) == 0 {
|
2024-06-07 11:41:20 +03:00
|
|
|
if pendingBid := w.bidFetcher.GetSimulatingBid(bestWork.header.ParentHash); pendingBid != nil {
|
|
|
|
waitBidTimer := time.NewTimer(waitMEVMinerEndTimeLimit)
|
|
|
|
defer waitBidTimer.Stop()
|
|
|
|
select {
|
|
|
|
case <-waitBidTimer.C:
|
|
|
|
case <-pendingBid.finished:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-03-08 06:15:35 +03:00
|
|
|
bestBid := w.bidFetcher.GetBestBid(bestWork.header.ParentHash)
|
|
|
|
|
2024-04-11 10:15:46 +03:00
|
|
|
if bestBid != nil {
|
|
|
|
log.Debug("BidSimulator: final compare", "block", bestWork.header.Number.Uint64(),
|
|
|
|
"localBlockReward", bestReward.String(),
|
|
|
|
"bidBlockReward", bestBid.packedBlockReward.String())
|
|
|
|
}
|
|
|
|
|
2024-03-08 06:15:35 +03:00
|
|
|
if bestBid != nil && bestReward.CmpBig(bestBid.packedBlockReward) < 0 {
|
|
|
|
// localValidatorReward is the reward for the validator self by the local block.
|
|
|
|
localValidatorReward := new(uint256.Int).Mul(bestReward, uint256.NewInt(w.config.Mev.ValidatorCommission))
|
|
|
|
localValidatorReward.Div(localValidatorReward, uint256.NewInt(10000))
|
|
|
|
|
2024-04-11 10:15:46 +03:00
|
|
|
log.Debug("BidSimulator: final compare", "block", bestWork.header.Number.Uint64(),
|
|
|
|
"localValidatorReward", localValidatorReward.String(),
|
|
|
|
"bidValidatorReward", bestBid.packedValidatorReward.String())
|
|
|
|
|
2024-03-08 06:15:35 +03:00
|
|
|
// blockReward(benefits delegators) and validatorReward(benefits the validator) are both optimal
|
|
|
|
if localValidatorReward.CmpBig(bestBid.packedValidatorReward) < 0 {
|
|
|
|
bestWork = bestBid.env
|
2024-03-21 10:03:35 +03:00
|
|
|
from = bestBid.bid.Builder
|
2024-04-11 10:15:46 +03:00
|
|
|
|
2024-07-11 06:31:24 +03:00
|
|
|
log.Info("[BUILDER BLOCK]",
|
|
|
|
"block", bestWork.header.Number.Uint64(),
|
|
|
|
"builder", from,
|
|
|
|
"blockReward", weiToEtherStringF6(bestBid.packedBlockReward),
|
|
|
|
"validatorReward", weiToEtherStringF6(bestBid.packedValidatorReward),
|
|
|
|
"bid", bestBid.bid.Hash().TerminalString(),
|
|
|
|
)
|
2024-03-08 06:15:35 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-03-21 10:03:35 +03:00
|
|
|
metrics.GetOrRegisterCounter(fmt.Sprintf("block/from/%v", from), nil).Inc(1)
|
|
|
|
|
2022-10-28 07:01:18 +03:00
|
|
|
w.commit(bestWork, w.fullTaskHook, true, start)
|
2022-01-24 10:19:52 +03:00
|
|
|
|
|
|
|
// Swap out the old work with the new one, terminating any leftover
|
|
|
|
// prefetcher processes in the mean time and starting a new one.
|
|
|
|
if w.current != nil {
|
|
|
|
w.current.discard()
|
|
|
|
}
|
2022-10-28 07:01:18 +03:00
|
|
|
w.current = bestWork
|
2022-01-24 10:19:52 +03:00
|
|
|
}
|
2022-07-05 06:14:21 +03:00
|
|
|
|
2024-03-08 06:15:35 +03:00
|
|
|
// inTurn return true if the current worker is in turn.
|
|
|
|
func (w *worker) inTurn() bool {
|
|
|
|
validator, _ := w.engine.NextInTurnValidator(w.chain, w.chain.CurrentBlock())
|
|
|
|
return validator != common.Address{} && validator == w.etherbase()
|
|
|
|
}
|
|
|
|
|
2022-01-24 10:19:52 +03:00
|
|
|
// commit runs any post-transaction state modifications, assembles the final block
|
|
|
|
// and commits new work if consensus engine is running.
|
|
|
|
// Note the assumption is held that the mutation is allowed to the passed env, do
|
|
|
|
// the deep copy first.
|
|
|
|
func (w *worker) commit(env *environment, interval func(), update bool, start time.Time) error {
|
2018-08-14 18:34:33 +03:00
|
|
|
if w.isRunning() {
|
2018-08-15 14:09:17 +03:00
|
|
|
if interval != nil {
|
|
|
|
interval()
|
2015-05-11 02:28:15 +03:00
|
|
|
}
|
2023-12-01 17:30:16 +03:00
|
|
|
/*
|
2022-07-20 12:33:51 +03:00
|
|
|
|
2023-12-01 17:30:16 +03:00
|
|
|
err := env.state.WaitPipeVerification()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
env.state.CorrectAccountsRoot(w.chain.CurrentBlock().Root)
|
|
|
|
*/
|
2022-07-05 06:14:21 +03:00
|
|
|
|
2024-02-02 10:43:33 +03:00
|
|
|
fees := env.state.GetBalance(consensus.SystemAddress).ToBig()
|
|
|
|
feesInEther := new(big.Float).Quo(new(big.Float).SetInt(fees), big.NewFloat(params.Ether))
|
2023-01-25 17:32:25 +03:00
|
|
|
// Withdrawals are set to nil here, because this is only called in PoW.
|
2022-07-14 12:09:44 +03:00
|
|
|
finalizeStart := time.Now()
|
2023-08-23 12:46:08 +03:00
|
|
|
block, receipts, err := w.engine.FinalizeAndAssemble(w.chain, types.CopyHeader(env.header), env.state, env.txs, nil, env.receipts, nil)
|
2022-01-24 10:19:52 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
2021-12-07 12:42:09 +03:00
|
|
|
}
|
2023-09-07 11:39:29 +03:00
|
|
|
// env.receipts = receipts
|
2022-07-14 12:09:44 +03:00
|
|
|
finalizeBlockTimer.UpdateSince(finalizeStart)
|
2022-07-22 10:58:06 +03:00
|
|
|
|
2024-04-10 09:42:16 +03:00
|
|
|
if block.Header().EmptyWithdrawalsHash() {
|
|
|
|
block = block.WithWithdrawals(make([]*types.Withdrawal, 0))
|
|
|
|
}
|
|
|
|
|
2024-03-22 17:37:47 +03:00
|
|
|
// If Cancun enabled, sidecars can't be nil then.
|
|
|
|
if w.chainConfig.IsCancun(env.header.Number, env.header.Time) && env.sidecars == nil {
|
|
|
|
env.sidecars = make(types.BlobSidecars, 0)
|
|
|
|
}
|
2022-07-22 10:58:06 +03:00
|
|
|
// Create a local environment copy, avoid the data race with snapshot state.
|
|
|
|
// https://github.com/ethereum/go-ethereum/issues/24299
|
|
|
|
env := env.copy()
|
|
|
|
|
2024-03-22 17:37:47 +03:00
|
|
|
block = block.WithSidecars(env.sidecars)
|
|
|
|
|
2022-01-24 10:19:52 +03:00
|
|
|
// If we're post merge, just ignore
|
|
|
|
if !w.isTTDReached(block.Header()) {
|
|
|
|
select {
|
2022-07-05 06:14:21 +03:00
|
|
|
case w.taskCh <- &task{receipts: receipts, state: env.state, block: block, createdAt: time.Now()}:
|
2022-01-24 10:19:52 +03:00
|
|
|
log.Info("Commit new sealing work", "number", block.Number(), "sealhash", w.engine.SealHash(block.Header()),
|
2024-04-27 01:37:42 +03:00
|
|
|
"txs", env.tcount, "blobs", env.blobs, "gas", block.GasUsed(), "fees", feesInEther, "elapsed", common.PrettyDuration(time.Since(start)))
|
2022-01-24 10:19:52 +03:00
|
|
|
|
|
|
|
case <-w.exitCh:
|
|
|
|
log.Info("Worker has exited")
|
|
|
|
}
|
2016-11-28 16:59:06 +03:00
|
|
|
}
|
2015-02-04 15:52:59 +02:00
|
|
|
}
|
2018-08-15 14:09:17 +03:00
|
|
|
if update {
|
2022-01-24 10:19:52 +03:00
|
|
|
w.updateSnapshot(env)
|
2018-08-15 14:09:17 +03:00
|
|
|
}
|
|
|
|
return nil
|
2015-02-04 15:52:59 +02:00
|
|
|
}
|
2019-11-29 16:22:08 +03:00
|
|
|
|
2022-01-24 10:19:52 +03:00
|
|
|
// getSealingBlock generates the sealing block based on the given parameters.
|
2022-05-18 17:33:37 +03:00
|
|
|
// The generation result will be passed back via the given channel no matter
|
|
|
|
// the generation itself succeeds or not.
|
2023-08-24 01:28:38 +03:00
|
|
|
func (w *worker) getSealingBlock(params *generateParams) *newPayloadResult {
|
2022-01-24 10:19:52 +03:00
|
|
|
req := &getWorkReq{
|
2023-08-24 01:28:38 +03:00
|
|
|
params: params,
|
2022-11-02 12:32:20 +03:00
|
|
|
result: make(chan *newPayloadResult, 1),
|
2022-01-24 10:19:52 +03:00
|
|
|
}
|
|
|
|
select {
|
|
|
|
case w.getWorkCh <- req:
|
2023-08-24 00:16:14 +03:00
|
|
|
return <-req.result
|
2022-01-24 10:19:52 +03:00
|
|
|
case <-w.exitCh:
|
2023-08-24 00:16:14 +03:00
|
|
|
return &newPayloadResult{err: errors.New("miner closed")}
|
2022-01-24 10:19:52 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// isTTDReached returns the indicator if the given block has reached the total
|
|
|
|
// terminal difficulty for The Merge transition.
|
|
|
|
func (w *worker) isTTDReached(header *types.Header) bool {
|
|
|
|
td, ttd := w.chain.GetTd(header.ParentHash, header.Number.Uint64()-1), w.chain.Config().TerminalTotalDifficulty
|
|
|
|
return td != nil && ttd != nil && td.Cmp(ttd) >= 0
|
|
|
|
}
|
|
|
|
|
2020-07-28 19:16:49 +03:00
|
|
|
// copyReceipts makes a deep copy of the given receipts.
|
|
|
|
func copyReceipts(receipts []*types.Receipt) []*types.Receipt {
|
|
|
|
result := make([]*types.Receipt, len(receipts))
|
|
|
|
for i, l := range receipts {
|
|
|
|
cpy := *l
|
|
|
|
result[i] = &cpy
|
|
|
|
}
|
|
|
|
return result
|
|
|
|
}
|
|
|
|
|
2022-10-28 07:01:18 +03:00
|
|
|
// signalToErr converts the interruption signal to a concrete error type for return.
|
|
|
|
// The given signal must be a valid interruption signal.
|
|
|
|
func signalToErr(signal int32) error {
|
|
|
|
switch signal {
|
|
|
|
case commitInterruptNone:
|
|
|
|
return nil
|
|
|
|
case commitInterruptNewHead:
|
|
|
|
return errBlockInterruptedByNewHead
|
|
|
|
case commitInterruptResubmit:
|
|
|
|
return errBlockInterruptedByRecommit
|
|
|
|
case commitInterruptTimeout:
|
|
|
|
return errBlockInterruptedByTimeout
|
|
|
|
case commitInterruptOutOfGas:
|
|
|
|
return errBlockInterruptedByOutOfGas
|
2024-04-28 06:05:09 +03:00
|
|
|
case commitInterruptBetterBid:
|
|
|
|
return errBlockInterruptedByBetterBid
|
2022-10-28 07:01:18 +03:00
|
|
|
default:
|
|
|
|
panic(fmt.Errorf("undefined signal %d", signal))
|
|
|
|
}
|
|
|
|
}
|