2023-06-16 15:29:40 +03:00
|
|
|
// Copyright 2023 The go-ethereum Authors
|
2015-07-22 19:48:40 +03:00
|
|
|
// This file is part of the go-ethereum library.
|
2015-07-07 03:54:22 +03:00
|
|
|
//
|
2015-07-23 19:35:11 +03:00
|
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
2015-07-07 03:54:22 +03:00
|
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
2015-07-22 19:48:40 +03:00
|
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
2015-07-07 03:54:22 +03:00
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
2015-07-22 19:48:40 +03:00
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
2015-07-07 03:54:22 +03:00
|
|
|
// GNU Lesser General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Lesser General Public License
|
2015-07-22 19:48:40 +03:00
|
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
2015-07-07 03:54:22 +03:00
|
|
|
|
2022-10-24 16:13:55 +03:00
|
|
|
package txpool
|
2014-02-15 00:56:09 +02:00
|
|
|
|
|
|
|
import (
|
2023-07-27 13:45:35 +03:00
|
|
|
"errors"
|
2023-06-20 14:51:59 +03:00
|
|
|
"fmt"
|
2015-04-08 21:47:32 +03:00
|
|
|
"math/big"
|
2023-07-27 13:45:35 +03:00
|
|
|
"sync"
|
2014-07-30 01:31:15 +03:00
|
|
|
|
2015-03-16 12:27:38 +02:00
|
|
|
"github.com/ethereum/go-ethereum/common"
|
2022-10-24 16:13:55 +03:00
|
|
|
"github.com/ethereum/go-ethereum/core"
|
2015-03-17 12:59:26 +02:00
|
|
|
"github.com/ethereum/go-ethereum/core/types"
|
2014-12-18 14:12:54 +02:00
|
|
|
"github.com/ethereum/go-ethereum/event"
|
2023-07-27 13:45:35 +03:00
|
|
|
"github.com/ethereum/go-ethereum/log"
|
|
|
|
"github.com/ethereum/go-ethereum/metrics"
|
2024-02-17 14:37:14 +03:00
|
|
|
"github.com/holiman/uint256"
|
2016-11-01 15:46:11 +03:00
|
|
|
)
|
|
|
|
|
2017-12-20 20:08:51 +03:00
|
|
|
// TxStatus is the current status of a transaction as seen by the pool.
|
2017-10-25 12:18:44 +03:00
|
|
|
type TxStatus uint
|
|
|
|
|
|
|
|
const (
|
|
|
|
TxStatusUnknown TxStatus = iota
|
|
|
|
TxStatusQueued
|
|
|
|
TxStatusPending
|
|
|
|
TxStatusIncluded
|
|
|
|
)
|
|
|
|
|
2023-07-27 13:45:35 +03:00
|
|
|
var (
|
|
|
|
// reservationsGaugeName is the prefix of a per-subpool address reservation
|
|
|
|
// metric.
|
|
|
|
//
|
|
|
|
// This is mostly a sanity metric to ensure there's no bug that would make
|
|
|
|
// some subpool hog all the reservations due to mis-accounting.
|
|
|
|
reservationsGaugeName = "txpool/reservations"
|
|
|
|
)
|
|
|
|
|
2023-06-16 15:29:40 +03:00
|
|
|
// BlockChain defines the minimal set of methods needed to back a tx pool with
|
|
|
|
// a chain. Exists to allow mocking the live chain out of tests.
|
|
|
|
type BlockChain interface {
|
|
|
|
// CurrentBlock returns the current head of the chain.
|
2023-03-02 09:29:15 +03:00
|
|
|
CurrentBlock() *types.Header
|
2017-09-05 19:49:37 +03:00
|
|
|
|
2023-06-16 15:29:40 +03:00
|
|
|
// SubscribeChainHeadEvent subscribes to new blocks being added to the chain.
|
2022-10-24 16:13:55 +03:00
|
|
|
SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription
|
2017-08-18 13:58:36 +03:00
|
|
|
}
|
2015-04-21 12:27:12 +03:00
|
|
|
|
2023-06-16 15:29:40 +03:00
|
|
|
// TxPool is an aggregator for various transaction specific pools, collectively
|
|
|
|
// tracking all the transactions deemed interesting by the node. Transactions
|
|
|
|
// enter the pool when they are received from the network or submitted locally.
|
|
|
|
// They exit the pool when they are included in the blockchain or evicted due to
|
|
|
|
// resource constraints.
|
2014-02-15 00:56:09 +02:00
|
|
|
type TxPool struct {
|
2023-07-27 13:45:35 +03:00
|
|
|
subpools []SubPool // List of subpools for specialized transaction handling
|
|
|
|
|
|
|
|
reservations map[common.Address]SubPool // Map with the account to pool reservations
|
|
|
|
reserveLock sync.Mutex // Lock protecting the account reservations
|
|
|
|
|
2023-09-28 04:48:14 +03:00
|
|
|
subs event.SubscriptionScope // Subscription scope to unsubscribe all on shutdown
|
2023-07-27 13:45:35 +03:00
|
|
|
quit chan chan error // Quit channel to tear down the head updater
|
2024-01-23 22:59:38 +03:00
|
|
|
term chan struct{} // Termination channel to detect a closed pool
|
|
|
|
|
|
|
|
sync chan chan error // Testing / simulator channel to block until internal reset is done
|
2014-02-15 00:56:09 +02:00
|
|
|
}
|
|
|
|
|
2023-06-06 12:53:29 +03:00
|
|
|
// New creates a new transaction pool to gather, sort and filter inbound
|
2017-12-20 20:08:51 +03:00
|
|
|
// transactions from the network.
|
2024-02-13 12:10:11 +03:00
|
|
|
func New(gasTip uint64, chain BlockChain, subpools []SubPool) (*TxPool, error) {
|
2023-06-16 15:29:40 +03:00
|
|
|
// Retrieve the current head so that all subpools and this main coordinator
|
|
|
|
// pool will have the same starting state, even if the chain moves forward
|
|
|
|
// during initialization.
|
|
|
|
head := chain.CurrentBlock()
|
2017-07-05 16:51:55 +03:00
|
|
|
|
2015-06-10 00:46:56 +03:00
|
|
|
pool := &TxPool{
|
2023-07-27 13:45:35 +03:00
|
|
|
subpools: subpools,
|
|
|
|
reservations: make(map[common.Address]SubPool),
|
|
|
|
quit: make(chan chan error),
|
2024-01-23 22:59:38 +03:00
|
|
|
term: make(chan struct{}),
|
|
|
|
sync: make(chan chan error),
|
2014-02-15 00:56:09 +02:00
|
|
|
}
|
2023-06-16 15:29:40 +03:00
|
|
|
for i, subpool := range subpools {
|
2023-07-27 13:45:35 +03:00
|
|
|
if err := subpool.Init(gasTip, head, pool.reserver(i, subpool)); err != nil {
|
2023-06-16 15:29:40 +03:00
|
|
|
for j := i - 1; j >= 0; j-- {
|
|
|
|
subpools[j].Close()
|
|
|
|
}
|
|
|
|
return nil, err
|
|
|
|
}
|
2018-08-21 20:30:06 +03:00
|
|
|
}
|
2023-06-16 15:29:40 +03:00
|
|
|
go pool.loop(head, chain)
|
|
|
|
return pool, nil
|
|
|
|
}
|
2016-12-13 12:13:07 +03:00
|
|
|
|
2023-07-27 13:45:35 +03:00
|
|
|
// reserver is a method to create an address reservation callback to exclusively
|
|
|
|
// assign/deassign addresses to/from subpools. This can ensure that at any point
|
|
|
|
// in time, only a single subpool is able to manage an account, avoiding cross
|
|
|
|
// subpool eviction issues and nonce conflicts.
|
|
|
|
func (p *TxPool) reserver(id int, subpool SubPool) AddressReserver {
|
|
|
|
return func(addr common.Address, reserve bool) error {
|
|
|
|
p.reserveLock.Lock()
|
|
|
|
defer p.reserveLock.Unlock()
|
|
|
|
|
|
|
|
owner, exists := p.reservations[addr]
|
|
|
|
if reserve {
|
|
|
|
// Double reservations are forbidden even from the same pool to
|
|
|
|
// avoid subtle bugs in the long term.
|
|
|
|
if exists {
|
|
|
|
if owner == subpool {
|
|
|
|
log.Error("pool attempted to reserve already-owned address", "address", addr)
|
|
|
|
return nil // Ignore fault to give the pool a chance to recover while the bug gets fixed
|
|
|
|
}
|
|
|
|
return errors.New("address already reserved")
|
|
|
|
}
|
|
|
|
p.reservations[addr] = subpool
|
|
|
|
if metrics.Enabled {
|
|
|
|
m := fmt.Sprintf("%s/%d", reservationsGaugeName, id)
|
|
|
|
metrics.GetOrRegisterGauge(m, nil).Inc(1)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
// Ensure subpools only attempt to unreserve their own owned addresses,
|
|
|
|
// otherwise flag as a programming error.
|
|
|
|
if !exists {
|
|
|
|
log.Error("pool attempted to unreserve non-reserved address", "address", addr)
|
|
|
|
return errors.New("address not reserved")
|
|
|
|
}
|
|
|
|
if subpool != owner {
|
|
|
|
log.Error("pool attempted to unreserve non-owned address", "address", addr)
|
|
|
|
return errors.New("address not owned")
|
|
|
|
}
|
|
|
|
delete(p.reservations, addr)
|
|
|
|
if metrics.Enabled {
|
|
|
|
m := fmt.Sprintf("%s/%d", reservationsGaugeName, id)
|
|
|
|
metrics.GetOrRegisterGauge(m, nil).Dec(1)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-06-16 15:29:40 +03:00
|
|
|
// Close terminates the transaction pool and all its subpools.
|
|
|
|
func (p *TxPool) Close() error {
|
|
|
|
var errs []error
|
2019-06-21 11:29:14 +03:00
|
|
|
|
2023-06-16 15:29:40 +03:00
|
|
|
// Terminate the reset loop and wait for it to finish
|
|
|
|
errc := make(chan error)
|
|
|
|
p.quit <- errc
|
2023-06-20 14:51:59 +03:00
|
|
|
if err := <-errc; err != nil {
|
|
|
|
errs = append(errs, err)
|
|
|
|
}
|
2023-06-16 15:29:40 +03:00
|
|
|
// Terminate each subpool
|
|
|
|
for _, subpool := range p.subpools {
|
2023-06-20 14:51:59 +03:00
|
|
|
if err := subpool.Close(); err != nil {
|
|
|
|
errs = append(errs, err)
|
|
|
|
}
|
|
|
|
}
|
2023-10-04 12:36:36 +03:00
|
|
|
// Unsubscribe anyone still listening for tx events
|
|
|
|
p.subs.Close()
|
|
|
|
|
2023-06-20 14:51:59 +03:00
|
|
|
if len(errs) > 0 {
|
|
|
|
return fmt.Errorf("subpool close errors: %v", errs)
|
2017-07-28 16:09:39 +03:00
|
|
|
}
|
2023-06-20 14:51:59 +03:00
|
|
|
return nil
|
2015-04-21 23:01:04 +03:00
|
|
|
}
|
|
|
|
|
2017-07-28 16:09:39 +03:00
|
|
|
// loop is the transaction pool's main event loop, waiting for and reacting to
|
|
|
|
// outside blockchain events as well as for various reporting and transaction
|
|
|
|
// eviction events.
|
2023-06-16 15:29:40 +03:00
|
|
|
func (p *TxPool) loop(head *types.Header, chain BlockChain) {
|
2024-01-23 22:59:38 +03:00
|
|
|
// Close the termination marker when the pool stops
|
|
|
|
defer close(p.term)
|
|
|
|
|
2023-06-16 15:29:40 +03:00
|
|
|
// Subscribe to chain head events to trigger subpool resets
|
2019-06-21 11:29:14 +03:00
|
|
|
var (
|
2023-06-16 15:29:40 +03:00
|
|
|
newHeadCh = make(chan core.ChainHeadEvent)
|
|
|
|
newHeadSub = chain.SubscribeChainHeadEvent(newHeadCh)
|
2019-06-21 11:29:14 +03:00
|
|
|
)
|
2023-06-16 15:29:40 +03:00
|
|
|
defer newHeadSub.Unsubscribe()
|
2019-06-21 11:29:14 +03:00
|
|
|
|
2023-06-16 15:29:40 +03:00
|
|
|
// Track the previous and current head to feed to an idle reset
|
|
|
|
var (
|
|
|
|
oldHead = head
|
|
|
|
newHead = oldHead
|
|
|
|
)
|
|
|
|
// Consume chain head events and start resets when none is running
|
|
|
|
var (
|
|
|
|
resetBusy = make(chan struct{}, 1) // Allow 1 reset to run concurrently
|
|
|
|
resetDone = make(chan *types.Header)
|
2024-01-23 22:59:38 +03:00
|
|
|
|
|
|
|
resetForced bool // Whether a forced reset was requested, only used in simulator mode
|
|
|
|
resetWaiter chan error // Channel waiting on a forced reset, only used in simulator mode
|
2023-06-16 15:29:40 +03:00
|
|
|
)
|
2024-01-23 22:59:38 +03:00
|
|
|
// Notify the live reset waiter to not block if the txpool is closed.
|
|
|
|
defer func() {
|
|
|
|
if resetWaiter != nil {
|
|
|
|
resetWaiter <- errors.New("pool already terminated")
|
|
|
|
resetWaiter = nil
|
|
|
|
}
|
|
|
|
}()
|
2023-06-16 15:29:40 +03:00
|
|
|
var errc chan error
|
|
|
|
for errc == nil {
|
|
|
|
// Something interesting might have happened, run a reset if there is
|
|
|
|
// one needed but none is running. The resetter will run on its own
|
|
|
|
// goroutine to allow chain head events to be consumed contiguously.
|
2024-01-23 22:59:38 +03:00
|
|
|
if newHead != oldHead || resetForced {
|
2023-06-16 15:29:40 +03:00
|
|
|
// Try to inject a busy marker and start a reset if successful
|
|
|
|
select {
|
|
|
|
case resetBusy <- struct{}{}:
|
|
|
|
// Busy marker injected, start a new subpool reset
|
|
|
|
go func(oldHead, newHead *types.Header) {
|
|
|
|
for _, subpool := range p.subpools {
|
|
|
|
subpool.Reset(oldHead, newHead)
|
2017-07-28 16:09:39 +03:00
|
|
|
}
|
2023-06-16 15:29:40 +03:00
|
|
|
resetDone <- newHead
|
|
|
|
}(oldHead, newHead)
|
2017-07-28 16:09:39 +03:00
|
|
|
|
2024-01-23 22:59:38 +03:00
|
|
|
// If the reset operation was explicitly requested, consider it
|
|
|
|
// being fulfilled and drop the request marker. If it was not,
|
|
|
|
// this is a noop.
|
|
|
|
resetForced = false
|
|
|
|
|
2023-06-16 15:29:40 +03:00
|
|
|
default:
|
2024-01-23 22:59:38 +03:00
|
|
|
// Reset already running, wait until it finishes.
|
|
|
|
//
|
|
|
|
// Note, this will not drop any forced reset request. If a forced
|
|
|
|
// reset was requested, but we were busy, then when the currently
|
|
|
|
// running reset finishes, a new one will be spun up.
|
2017-07-28 16:09:39 +03:00
|
|
|
}
|
2015-06-15 12:33:08 +03:00
|
|
|
}
|
2023-06-16 15:29:40 +03:00
|
|
|
// Wait for the next chain head event or a previous reset finish
|
|
|
|
select {
|
|
|
|
case event := <-newHeadCh:
|
|
|
|
// Chain moved forward, store the head for later consumption
|
|
|
|
newHead = event.Block.Header()
|
2017-09-04 22:35:00 +03:00
|
|
|
|
2023-06-16 15:29:40 +03:00
|
|
|
case head := <-resetDone:
|
|
|
|
// Previous reset finished, update the old head and allow a new reset
|
|
|
|
oldHead = head
|
|
|
|
<-resetBusy
|
2017-02-28 14:35:17 +03:00
|
|
|
|
2024-01-23 22:59:38 +03:00
|
|
|
// If someone is waiting for a reset to finish, notify them, unless
|
|
|
|
// the forced op is still pending. In that case, wait another round
|
|
|
|
// of resets.
|
|
|
|
if resetWaiter != nil && !resetForced {
|
|
|
|
resetWaiter <- nil
|
|
|
|
resetWaiter = nil
|
|
|
|
}
|
|
|
|
|
2023-06-16 15:29:40 +03:00
|
|
|
case errc = <-p.quit:
|
|
|
|
// Termination requested, break out on the next loop round
|
2024-01-23 22:59:38 +03:00
|
|
|
|
|
|
|
case syncc := <-p.sync:
|
|
|
|
// Transaction pool is running inside a simulator, and we are about
|
|
|
|
// to create a new block. Request a forced sync operation to ensure
|
|
|
|
// that any running reset operation finishes to make block imports
|
|
|
|
// deterministic. On top of that, run a new reset operation to make
|
|
|
|
// transaction insertions deterministic instead of being stuck in a
|
|
|
|
// queue waiting for a reset.
|
|
|
|
resetForced = true
|
|
|
|
resetWaiter = syncc
|
2023-06-16 15:29:40 +03:00
|
|
|
}
|
2017-07-28 16:09:39 +03:00
|
|
|
}
|
2023-06-16 15:29:40 +03:00
|
|
|
// Notify the closer of termination (no error possible for now)
|
|
|
|
errc <- nil
|
2017-08-18 13:58:36 +03:00
|
|
|
}
|
|
|
|
|
2023-06-06 12:53:29 +03:00
|
|
|
// SetGasTip updates the minimum gas tip required by the transaction pool for a
|
2017-05-16 22:07:27 +03:00
|
|
|
// new transaction, and drops all transactions below this threshold.
|
2023-06-16 15:29:40 +03:00
|
|
|
func (p *TxPool) SetGasTip(tip *big.Int) {
|
|
|
|
for _, subpool := range p.subpools {
|
|
|
|
subpool.SetGasTip(tip)
|
2021-07-13 13:40:58 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-06-16 15:29:40 +03:00
|
|
|
// Has returns an indicator whether the pool has a transaction cached with the
|
|
|
|
// given hash.
|
|
|
|
func (p *TxPool) Has(hash common.Hash) bool {
|
|
|
|
for _, subpool := range p.subpools {
|
|
|
|
if subpool.Has(hash) {
|
|
|
|
return true
|
2021-06-04 09:55:00 +03:00
|
|
|
}
|
2016-07-01 18:59:55 +03:00
|
|
|
}
|
2023-06-16 15:29:40 +03:00
|
|
|
return false
|
2018-08-21 20:30:06 +03:00
|
|
|
}
|
|
|
|
|
2023-06-16 15:29:40 +03:00
|
|
|
// Get returns a transaction if it is contained in the pool, or nil otherwise.
|
core/types: support for optional blob sidecar in BlobTx (#27841)
This PR removes the newly added txpool.Transaction wrapper type, and instead adds a way
of keeping the blob sidecar within types.Transaction. It's better this way because most
code in go-ethereum does not care about blob transactions, and probably never will. This
will start mattering especially on the client side of RPC, where all APIs are based on
types.Transaction. Users need to be able to use the same signing flows they already
have.
However, since blobs are only allowed in some places but not others, we will now need to
add checks to avoid creating invalid blocks. I'm still trying to figure out the best place
to do some of these. The way I have it currently is as follows:
- In block validation (import), txs are verified not to have a blob sidecar.
- In miner, we strip off the sidecar when committing the transaction into the block.
- In TxPool validation, txs must have a sidecar to be added into the blobpool.
- Note there is a special case here: when transactions are re-added because of a chain
reorg, we cannot use the transactions gathered from the old chain blocks as-is,
because they will be missing their blobs. This was previously handled by storing the
blobs into the 'blobpool limbo'. The code has now changed to store the full
transaction in the limbo instead, but it might be confusing for code readers why we're
not simply adding the types.Transaction we already have.
Code changes summary:
- txpool.Transaction removed and all uses replaced by types.Transaction again
- blobpool now stores types.Transaction instead of defining its own blobTx format for storage
- the blobpool limbo now stores types.Transaction instead of storing only the blobs
- checks to validate the presence/absence of the blob sidecar added in certain critical places
2023-08-14 11:13:34 +03:00
|
|
|
func (p *TxPool) Get(hash common.Hash) *types.Transaction {
|
2023-06-16 15:29:40 +03:00
|
|
|
for _, subpool := range p.subpools {
|
|
|
|
if tx := subpool.Get(hash); tx != nil {
|
|
|
|
return tx
|
2017-07-28 16:09:39 +03:00
|
|
|
}
|
|
|
|
}
|
2023-04-03 14:16:57 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-06-16 15:29:40 +03:00
|
|
|
// Add enqueues a batch of transactions into the pool if they are valid. Due
|
|
|
|
// to the large transaction churn, add may postpone fully integrating the tx
|
|
|
|
// to a later point to batch multiple ones together.
|
core/types: support for optional blob sidecar in BlobTx (#27841)
This PR removes the newly added txpool.Transaction wrapper type, and instead adds a way
of keeping the blob sidecar within types.Transaction. It's better this way because most
code in go-ethereum does not care about blob transactions, and probably never will. This
will start mattering especially on the client side of RPC, where all APIs are based on
types.Transaction. Users need to be able to use the same signing flows they already
have.
However, since blobs are only allowed in some places but not others, we will now need to
add checks to avoid creating invalid blocks. I'm still trying to figure out the best place
to do some of these. The way I have it currently is as follows:
- In block validation (import), txs are verified not to have a blob sidecar.
- In miner, we strip off the sidecar when committing the transaction into the block.
- In TxPool validation, txs must have a sidecar to be added into the blobpool.
- Note there is a special case here: when transactions are re-added because of a chain
reorg, we cannot use the transactions gathered from the old chain blocks as-is,
because they will be missing their blobs. This was previously handled by storing the
blobs into the 'blobpool limbo'. The code has now changed to store the full
transaction in the limbo instead, but it might be confusing for code readers why we're
not simply adding the types.Transaction we already have.
Code changes summary:
- txpool.Transaction removed and all uses replaced by types.Transaction again
- blobpool now stores types.Transaction instead of defining its own blobTx format for storage
- the blobpool limbo now stores types.Transaction instead of storing only the blobs
- checks to validate the presence/absence of the blob sidecar added in certain critical places
2023-08-14 11:13:34 +03:00
|
|
|
func (p *TxPool) Add(txs []*types.Transaction, local bool, sync bool) []error {
|
2023-06-16 15:29:40 +03:00
|
|
|
// Split the input transactions between the subpools. It shouldn't really
|
|
|
|
// happen that we receive merged batches, but better graceful than strange
|
|
|
|
// errors.
|
|
|
|
//
|
|
|
|
// We also need to track how the transactions were split across the subpools,
|
|
|
|
// so we can piece back the returned errors into the original order.
|
core/types: support for optional blob sidecar in BlobTx (#27841)
This PR removes the newly added txpool.Transaction wrapper type, and instead adds a way
of keeping the blob sidecar within types.Transaction. It's better this way because most
code in go-ethereum does not care about blob transactions, and probably never will. This
will start mattering especially on the client side of RPC, where all APIs are based on
types.Transaction. Users need to be able to use the same signing flows they already
have.
However, since blobs are only allowed in some places but not others, we will now need to
add checks to avoid creating invalid blocks. I'm still trying to figure out the best place
to do some of these. The way I have it currently is as follows:
- In block validation (import), txs are verified not to have a blob sidecar.
- In miner, we strip off the sidecar when committing the transaction into the block.
- In TxPool validation, txs must have a sidecar to be added into the blobpool.
- Note there is a special case here: when transactions are re-added because of a chain
reorg, we cannot use the transactions gathered from the old chain blocks as-is,
because they will be missing their blobs. This was previously handled by storing the
blobs into the 'blobpool limbo'. The code has now changed to store the full
transaction in the limbo instead, but it might be confusing for code readers why we're
not simply adding the types.Transaction we already have.
Code changes summary:
- txpool.Transaction removed and all uses replaced by types.Transaction again
- blobpool now stores types.Transaction instead of defining its own blobTx format for storage
- the blobpool limbo now stores types.Transaction instead of storing only the blobs
- checks to validate the presence/absence of the blob sidecar added in certain critical places
2023-08-14 11:13:34 +03:00
|
|
|
txsets := make([][]*types.Transaction, len(p.subpools))
|
2023-06-16 15:29:40 +03:00
|
|
|
splits := make([]int, len(txs))
|
2014-02-15 00:56:09 +02:00
|
|
|
|
2023-06-16 15:29:40 +03:00
|
|
|
for i, tx := range txs {
|
|
|
|
// Mark this transaction belonging to no-subpool
|
|
|
|
splits[i] = -1
|
|
|
|
|
|
|
|
// Try to find a subpool that accepts the transaction
|
|
|
|
for j, subpool := range p.subpools {
|
core/types: support for optional blob sidecar in BlobTx (#27841)
This PR removes the newly added txpool.Transaction wrapper type, and instead adds a way
of keeping the blob sidecar within types.Transaction. It's better this way because most
code in go-ethereum does not care about blob transactions, and probably never will. This
will start mattering especially on the client side of RPC, where all APIs are based on
types.Transaction. Users need to be able to use the same signing flows they already
have.
However, since blobs are only allowed in some places but not others, we will now need to
add checks to avoid creating invalid blocks. I'm still trying to figure out the best place
to do some of these. The way I have it currently is as follows:
- In block validation (import), txs are verified not to have a blob sidecar.
- In miner, we strip off the sidecar when committing the transaction into the block.
- In TxPool validation, txs must have a sidecar to be added into the blobpool.
- Note there is a special case here: when transactions are re-added because of a chain
reorg, we cannot use the transactions gathered from the old chain blocks as-is,
because they will be missing their blobs. This was previously handled by storing the
blobs into the 'blobpool limbo'. The code has now changed to store the full
transaction in the limbo instead, but it might be confusing for code readers why we're
not simply adding the types.Transaction we already have.
Code changes summary:
- txpool.Transaction removed and all uses replaced by types.Transaction again
- blobpool now stores types.Transaction instead of defining its own blobTx format for storage
- the blobpool limbo now stores types.Transaction instead of storing only the blobs
- checks to validate the presence/absence of the blob sidecar added in certain critical places
2023-08-14 11:13:34 +03:00
|
|
|
if subpool.Filter(tx) {
|
2023-06-16 15:29:40 +03:00
|
|
|
txsets[j] = append(txsets[j], tx)
|
|
|
|
splits[i] = j
|
|
|
|
break
|
2023-03-10 20:30:26 +03:00
|
|
|
}
|
|
|
|
}
|
2016-07-01 18:59:55 +03:00
|
|
|
}
|
2023-06-16 15:29:40 +03:00
|
|
|
// Add the transactions split apart to the individual subpools and piece
|
|
|
|
// back the errors into the original sort order.
|
|
|
|
errsets := make([][]error, len(p.subpools))
|
|
|
|
for i := 0; i < len(p.subpools); i++ {
|
|
|
|
errsets[i] = p.subpools[i].Add(txsets[i], local, sync)
|
2015-06-10 00:46:56 +03:00
|
|
|
}
|
2023-06-16 15:29:40 +03:00
|
|
|
errs := make([]error, len(txs))
|
|
|
|
for i, split := range splits {
|
|
|
|
// If the transaction was rejected by all subpools, mark it unsupported
|
|
|
|
if split == -1 {
|
|
|
|
errs[i] = core.ErrTxTypeNotSupported
|
2020-06-03 06:55:14 +03:00
|
|
|
continue
|
|
|
|
}
|
2023-06-16 15:29:40 +03:00
|
|
|
// Find which subpool handled it and pull in the corresponding error
|
|
|
|
errs[i] = errsets[split][0]
|
|
|
|
errsets[split] = errsets[split][1:]
|
2019-06-21 11:29:14 +03:00
|
|
|
}
|
|
|
|
return errs
|
2017-09-04 22:35:00 +03:00
|
|
|
}
|
|
|
|
|
2023-06-16 15:29:40 +03:00
|
|
|
// Pending retrieves all currently processable transactions, grouped by origin
|
|
|
|
// account and sorted by nonce.
|
2024-02-17 14:37:14 +03:00
|
|
|
//
|
|
|
|
// The transactions can also be pre-filtered by the dynamic fee components to
|
|
|
|
// reduce allocations and load on downstream subsystems.
|
|
|
|
func (p *TxPool) Pending(minTip *uint256.Int, baseFee *uint256.Int, blobFee *uint256.Int) map[common.Address][]*LazyTransaction {
|
2023-07-27 13:45:35 +03:00
|
|
|
txs := make(map[common.Address][]*LazyTransaction)
|
2023-06-16 15:29:40 +03:00
|
|
|
for _, subpool := range p.subpools {
|
2024-02-17 14:37:14 +03:00
|
|
|
for addr, set := range subpool.Pending(minTip, baseFee, blobFee) {
|
2023-06-16 15:29:40 +03:00
|
|
|
txs[addr] = set
|
2017-05-16 22:07:27 +03:00
|
|
|
}
|
2016-12-11 01:54:58 +03:00
|
|
|
}
|
2023-06-16 15:29:40 +03:00
|
|
|
return txs
|
2017-10-24 16:19:09 +03:00
|
|
|
}
|
|
|
|
|
2023-10-04 12:36:36 +03:00
|
|
|
// SubscribeTransactions registers a subscription for new transaction events,
|
|
|
|
// supporting feeding only newly seen or also resurrected transactions.
|
|
|
|
func (p *TxPool) SubscribeTransactions(ch chan<- core.NewTxsEvent, reorgs bool) event.Subscription {
|
2023-06-16 15:29:40 +03:00
|
|
|
subs := make([]event.Subscription, len(p.subpools))
|
|
|
|
for i, subpool := range p.subpools {
|
2023-10-04 12:36:36 +03:00
|
|
|
subs[i] = subpool.SubscribeTransactions(ch, reorgs)
|
2017-10-24 16:19:09 +03:00
|
|
|
}
|
2023-06-16 15:29:40 +03:00
|
|
|
return p.subs.Track(event.JoinSubscriptions(subs...))
|
2019-10-28 14:59:07 +03:00
|
|
|
}
|
2015-04-23 11:51:13 +03:00
|
|
|
|
2023-06-16 15:29:40 +03:00
|
|
|
// Nonce returns the next nonce of an account, with all transactions executable
|
|
|
|
// by the pool already applied on top.
|
|
|
|
func (p *TxPool) Nonce(addr common.Address) uint64 {
|
|
|
|
// Since (for now) accounts are unique to subpools, only one pool will have
|
|
|
|
// (at max) a non-state nonce. To avoid stateful lookups, just return the
|
|
|
|
// highest nonce for now.
|
|
|
|
var nonce uint64
|
|
|
|
for _, subpool := range p.subpools {
|
|
|
|
if next := subpool.Nonce(addr); nonce < next {
|
|
|
|
nonce = next
|
2016-07-01 18:59:55 +03:00
|
|
|
}
|
2015-06-30 12:04:30 +03:00
|
|
|
}
|
2023-06-16 15:29:40 +03:00
|
|
|
return nonce
|
2019-06-21 11:29:14 +03:00
|
|
|
}
|
|
|
|
|
2023-06-16 15:29:40 +03:00
|
|
|
// Stats retrieves the current pool stats, namely the number of pending and the
|
|
|
|
// number of queued (non-executable) transactions.
|
|
|
|
func (p *TxPool) Stats() (int, int) {
|
|
|
|
var runnable, blocked int
|
|
|
|
for _, subpool := range p.subpools {
|
|
|
|
run, block := subpool.Stats()
|
2019-06-21 11:29:14 +03:00
|
|
|
|
2023-06-16 15:29:40 +03:00
|
|
|
runnable += run
|
|
|
|
blocked += block
|
2019-06-21 11:29:14 +03:00
|
|
|
}
|
2023-06-16 15:29:40 +03:00
|
|
|
return runnable, blocked
|
2019-06-21 11:29:14 +03:00
|
|
|
}
|
|
|
|
|
2023-06-16 15:29:40 +03:00
|
|
|
// Content retrieves the data content of the transaction pool, returning all the
|
|
|
|
// pending as well as queued transactions, grouped by account and sorted by nonce.
|
|
|
|
func (p *TxPool) Content() (map[common.Address][]*types.Transaction, map[common.Address][]*types.Transaction) {
|
2019-06-21 11:29:14 +03:00
|
|
|
var (
|
2023-06-16 15:29:40 +03:00
|
|
|
runnable = make(map[common.Address][]*types.Transaction)
|
|
|
|
blocked = make(map[common.Address][]*types.Transaction)
|
2019-06-21 11:29:14 +03:00
|
|
|
)
|
2023-06-16 15:29:40 +03:00
|
|
|
for _, subpool := range p.subpools {
|
|
|
|
run, block := subpool.Content()
|
2019-06-21 11:29:14 +03:00
|
|
|
|
2023-06-16 15:29:40 +03:00
|
|
|
for addr, txs := range run {
|
|
|
|
runnable[addr] = txs
|
2019-06-21 11:29:14 +03:00
|
|
|
}
|
2023-06-16 15:29:40 +03:00
|
|
|
for addr, txs := range block {
|
|
|
|
blocked[addr] = txs
|
2019-06-21 11:29:14 +03:00
|
|
|
}
|
|
|
|
}
|
2023-06-16 15:29:40 +03:00
|
|
|
return runnable, blocked
|
2019-06-21 11:29:14 +03:00
|
|
|
}
|
|
|
|
|
2023-06-16 15:29:40 +03:00
|
|
|
// ContentFrom retrieves the data content of the transaction pool, returning the
|
|
|
|
// pending as well as queued transactions of this address, grouped by nonce.
|
|
|
|
func (p *TxPool) ContentFrom(addr common.Address) ([]*types.Transaction, []*types.Transaction) {
|
|
|
|
for _, subpool := range p.subpools {
|
|
|
|
run, block := subpool.ContentFrom(addr)
|
|
|
|
if len(run) != 0 || len(block) != 0 {
|
|
|
|
return run, block
|
2018-08-21 20:30:06 +03:00
|
|
|
}
|
|
|
|
}
|
2023-06-16 15:29:40 +03:00
|
|
|
return []*types.Transaction{}, []*types.Transaction{}
|
2015-11-21 01:40:36 +02:00
|
|
|
}
|
2018-05-23 15:55:42 +03:00
|
|
|
|
2023-06-16 15:29:40 +03:00
|
|
|
// Locals retrieves the accounts currently considered local by the pool.
|
|
|
|
func (p *TxPool) Locals() []common.Address {
|
|
|
|
// Retrieve the locals from each subpool and deduplicate them
|
|
|
|
locals := make(map[common.Address]struct{})
|
|
|
|
for _, subpool := range p.subpools {
|
|
|
|
for _, local := range subpool.Locals() {
|
|
|
|
locals[local] = struct{}{}
|
2018-05-23 15:55:42 +03:00
|
|
|
}
|
|
|
|
}
|
2023-06-16 15:29:40 +03:00
|
|
|
// Flatten and return the deduplicated local set
|
|
|
|
flat := make([]common.Address, 0, len(locals))
|
|
|
|
for local := range locals {
|
|
|
|
flat = append(flat, local)
|
2020-12-11 12:44:57 +03:00
|
|
|
}
|
2023-06-16 15:29:40 +03:00
|
|
|
return flat
|
2020-12-11 12:44:57 +03:00
|
|
|
}
|
|
|
|
|
2023-06-16 15:29:40 +03:00
|
|
|
// Status returns the known status (unknown/pending/queued) of a transaction
|
2023-09-28 04:48:14 +03:00
|
|
|
// identified by its hash.
|
2023-06-16 15:29:40 +03:00
|
|
|
func (p *TxPool) Status(hash common.Hash) TxStatus {
|
|
|
|
for _, subpool := range p.subpools {
|
|
|
|
if status := subpool.Status(hash); status != TxStatusUnknown {
|
|
|
|
return status
|
2020-12-11 12:44:57 +03:00
|
|
|
}
|
|
|
|
}
|
2023-06-16 15:29:40 +03:00
|
|
|
return TxStatusUnknown
|
2020-01-10 12:40:03 +03:00
|
|
|
}
|
2024-01-23 22:59:38 +03:00
|
|
|
|
|
|
|
// Sync is a helper method for unit tests or simulator runs where the chain events
|
|
|
|
// are arriving in quick succession, without any time in between them to run the
|
|
|
|
// internal background reset operations. This method will run an explicit reset
|
|
|
|
// operation to ensure the pool stabilises, thus avoiding flakey behavior.
|
|
|
|
//
|
|
|
|
// Note, do not use this in production / live code. In live code, the pool is
|
|
|
|
// meant to reset on a separate thread to avoid DoS vectors.
|
|
|
|
func (p *TxPool) Sync() error {
|
|
|
|
sync := make(chan error)
|
|
|
|
select {
|
|
|
|
case p.sync <- sync:
|
|
|
|
return <-sync
|
|
|
|
case <-p.term:
|
|
|
|
return errors.New("pool already terminated")
|
|
|
|
}
|
|
|
|
}
|