go-ethereum/light/txpool.go

534 lines
17 KiB
Go
Raw Normal View History

2016-11-09 04:01:56 +03:00
// Copyright 2016 The go-ethereum Authors
2016-10-14 06:47:09 +03:00
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package light
import (
"context"
2016-10-14 06:47:09 +03:00
"fmt"
"math/big"
2016-10-14 06:47:09 +03:00
"sync"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
2016-10-14 06:47:09 +03:00
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
2016-10-14 06:47:09 +03:00
"github.com/ethereum/go-ethereum/rlp"
)
const (
// chainHeadChanSize is the size of channel listening to ChainHeadEvent.
chainHeadChanSize = 10
)
2016-10-14 06:47:09 +03:00
// txPermanent is the number of mined blocks after a mined transaction is
// considered permanent and no rollback is expected
var txPermanent = uint64(500)
// TxPool implements the transaction pool for light clients, which keeps track
// of the status of locally created transactions, detecting if they are included
// in a block (mined) or rolled back. There are no queued transactions since we
// always receive all locally signed transactions in the same order as they are
// created.
type TxPool struct {
config *params.ChainConfig
signer types.Signer
quit chan bool
txFeed event.Feed
scope event.SubscriptionScope
chainHeadCh chan core.ChainHeadEvent
chainHeadSub event.Subscription
mu sync.RWMutex
chain *LightChain
odr OdrBackend
chainDb ethdb.Database
relay TxRelayBackend
head common.Hash
nonce map[common.Address]uint64 // "pending" nonce
pending map[common.Hash]*types.Transaction // pending transactions by tx hash
mined map[common.Hash][]*types.Transaction // mined transactions by block hash
clearIdx uint64 // earliest block nr that can contain mined tx info
2016-10-14 06:47:09 +03:00
istanbul bool // Fork indicator whether we are in the istanbul stage.
2016-10-14 06:47:09 +03:00
}
// TxRelayBackend provides an interface to the mechanism that forwards transacions
// to the ETH network. The implementations of the functions should be non-blocking.
//
// Send instructs backend to forward new transactions
// NewHead notifies backend about a new head after processed by the tx pool,
// including mined and rolled back transactions since the last event
// Discard notifies backend about transactions that should be discarded either
// because they have been replaced by a re-send or because they have been mined
// long ago and no rollback is expected
type TxRelayBackend interface {
Send(txs types.Transactions)
NewHead(head common.Hash, mined []common.Hash, rollback []common.Hash)
Discard(hashes []common.Hash)
}
// NewTxPool creates a new light transaction pool
func NewTxPool(config *params.ChainConfig, chain *LightChain, relay TxRelayBackend) *TxPool {
2016-10-14 06:47:09 +03:00
pool := &TxPool{
config: config,
signer: types.NewEIP155Signer(config.ChainID),
nonce: make(map[common.Address]uint64),
pending: make(map[common.Hash]*types.Transaction),
mined: make(map[common.Hash][]*types.Transaction),
quit: make(chan bool),
chainHeadCh: make(chan core.ChainHeadEvent, chainHeadChanSize),
chain: chain,
relay: relay,
odr: chain.Odr(),
chainDb: chain.Odr().Database(),
head: chain.CurrentHeader().Hash(),
clearIdx: chain.CurrentHeader().Number.Uint64(),
2016-10-14 06:47:09 +03:00
}
// Subscribe events from blockchain
pool.chainHeadSub = pool.chain.SubscribeChainHeadEvent(pool.chainHeadCh)
2016-10-14 06:47:09 +03:00
go pool.eventLoop()
return pool
}
// currentState returns the light state of the current head header
func (pool *TxPool) currentState(ctx context.Context) *state.StateDB {
return NewState(ctx, pool.chain.CurrentHeader(), pool.odr)
2016-10-14 06:47:09 +03:00
}
// GetNonce returns the "pending" nonce of a given address. It always queries
// the nonce belonging to the latest header too in order to detect if another
// client using the same key sent a transaction.
func (pool *TxPool) GetNonce(ctx context.Context, addr common.Address) (uint64, error) {
state := pool.currentState(ctx)
nonce := state.GetNonce(addr)
if state.Error() != nil {
return 0, state.Error()
2016-10-14 06:47:09 +03:00
}
sn, ok := pool.nonce[addr]
if ok && sn > nonce {
nonce = sn
}
if !ok || sn < nonce {
pool.nonce[addr] = nonce
}
return nonce, nil
}
// txStateChanges stores the recent changes between pending/mined states of
// transactions. True means mined, false means rolled back, no entry means no change
type txStateChanges map[common.Hash]bool
// setState sets the status of a tx to either recently mined or recently rolled back
func (txc txStateChanges) setState(txHash common.Hash, mined bool) {
val, ent := txc[txHash]
if ent && (val != mined) {
delete(txc, txHash)
} else {
txc[txHash] = mined
}
}
// getLists creates lists of mined and rolled back tx hashes
func (txc txStateChanges) getLists() (mined []common.Hash, rollback []common.Hash) {
for hash, val := range txc {
if val {
mined = append(mined, hash)
} else {
rollback = append(rollback, hash)
}
}
return
}
// checkMinedTxs checks newly added blocks for the currently pending transactions
// and marks them as mined if necessary. It also stores block position in the db
// and adds them to the received txStateChanges map.
func (pool *TxPool) checkMinedTxs(ctx context.Context, hash common.Hash, number uint64, txc txStateChanges) error {
// If no transactions are pending, we don't care about anything
2016-10-14 06:47:09 +03:00
if len(pool.pending) == 0 {
return nil
}
block, err := GetBlock(ctx, pool.odr, hash, number)
2016-10-14 06:47:09 +03:00
if err != nil {
return err
}
// Gather all the local transaction mined in this block
2016-10-14 06:47:09 +03:00
list := pool.mined[hash]
for _, tx := range block.Transactions() {
if _, ok := pool.pending[tx.Hash()]; ok {
2016-10-14 06:47:09 +03:00
list = append(list, tx)
}
}
// If some transactions have been mined, write the needed data to disk and update
2016-10-14 06:47:09 +03:00
if list != nil {
// Retrieve all the receipts belonging to this block and write the loopup table
if _, err := GetBlockReceipts(ctx, pool.odr, hash, number); err != nil { // ODR caches, ignore results
return err
}
rawdb.WriteTxLookupEntriesByBlock(pool.chainDb, block)
// Update the transaction pool's state
for _, tx := range list {
delete(pool.pending, tx.Hash())
txc.setState(tx.Hash(), true)
}
2016-10-14 06:47:09 +03:00
pool.mined[hash] = list
}
return nil
}
// rollbackTxs marks the transactions contained in recently rolled back blocks
// as rolled back. It also removes any positional lookup entries.
2016-10-14 06:47:09 +03:00
func (pool *TxPool) rollbackTxs(hash common.Hash, txc txStateChanges) {
batch := pool.chainDb.NewBatch()
2016-10-14 06:47:09 +03:00
if list, ok := pool.mined[hash]; ok {
for _, tx := range list {
txHash := tx.Hash()
rawdb.DeleteTxLookupEntry(batch, txHash)
2016-10-14 06:47:09 +03:00
pool.pending[txHash] = tx
txc.setState(txHash, false)
}
delete(pool.mined, hash)
}
batch.Write()
2016-10-14 06:47:09 +03:00
}
// reorgOnNewHead sets a new head header, processing (and rolling back if necessary)
2016-10-14 06:47:09 +03:00
// the blocks since the last known head and returns a txStateChanges map containing
// the recently mined and rolled back transaction hashes. If an error (context
// timeout) occurs during checking new blocks, it leaves the locally known head
// at the latest checked block and still returns a valid txStateChanges, making it
// possible to continue checking the missing blocks at the next chain head event
func (pool *TxPool) reorgOnNewHead(ctx context.Context, newHeader *types.Header) (txStateChanges, error) {
2016-10-14 06:47:09 +03:00
txc := make(txStateChanges)
oldh := pool.chain.GetHeaderByHash(pool.head)
newh := newHeader
// find common ancestor, create list of rolled back and new block hashes
var oldHashes, newHashes []common.Hash
for oldh.Hash() != newh.Hash() {
if oldh.Number.Uint64() >= newh.Number.Uint64() {
2016-10-14 06:47:09 +03:00
oldHashes = append(oldHashes, oldh.Hash())
oldh = pool.chain.GetHeader(oldh.ParentHash, oldh.Number.Uint64()-1)
}
if oldh.Number.Uint64() < newh.Number.Uint64() {
2016-10-14 06:47:09 +03:00
newHashes = append(newHashes, newh.Hash())
newh = pool.chain.GetHeader(newh.ParentHash, newh.Number.Uint64()-1)
if newh == nil {
// happens when CHT syncing, nothing to do
newh = oldh
}
}
}
if oldh.Number.Uint64() < pool.clearIdx {
pool.clearIdx = oldh.Number.Uint64()
2016-10-14 06:47:09 +03:00
}
// roll back old blocks
for _, hash := range oldHashes {
pool.rollbackTxs(hash, txc)
}
pool.head = oldh.Hash()
// check mined txs of new blocks (array is in reversed order)
for i := len(newHashes) - 1; i >= 0; i-- {
hash := newHashes[i]
if err := pool.checkMinedTxs(ctx, hash, newHeader.Number.Uint64()-uint64(i), txc); err != nil {
2016-10-14 06:47:09 +03:00
return txc, err
}
pool.head = hash
}
// clear old mined tx entries of old blocks
if idx := newHeader.Number.Uint64(); idx > pool.clearIdx+txPermanent {
2016-10-14 06:47:09 +03:00
idx2 := idx - txPermanent
if len(pool.mined) > 0 {
for i := pool.clearIdx; i < idx2; i++ {
hash := rawdb.ReadCanonicalHash(pool.chainDb, i)
if list, ok := pool.mined[hash]; ok {
hashes := make([]common.Hash, len(list))
for i, tx := range list {
hashes[i] = tx.Hash()
}
pool.relay.Discard(hashes)
delete(pool.mined, hash)
2016-10-14 06:47:09 +03:00
}
}
}
pool.clearIdx = idx2
}
return txc, nil
}
// blockCheckTimeout is the time limit for checking new blocks for mined
// transactions. Checking resumes at the next chain head event if timed out.
const blockCheckTimeout = time.Second * 3
// eventLoop processes chain head events and also notifies the tx relay backend
// about the new head hash and tx state changes
func (pool *TxPool) eventLoop() {
for {
select {
case ev := <-pool.chainHeadCh:
pool.setNewHead(ev.Block.Header())
// hack in order to avoid hogging the lock; this part will
// be replaced by a subsequent PR.
time.Sleep(time.Millisecond)
// System stopped
case <-pool.chainHeadSub.Err():
return
2016-10-14 06:47:09 +03:00
}
}
}
func (pool *TxPool) setNewHead(head *types.Header) {
pool.mu.Lock()
defer pool.mu.Unlock()
ctx, cancel := context.WithTimeout(context.Background(), blockCheckTimeout)
defer cancel()
txc, _ := pool.reorgOnNewHead(ctx, head)
m, r := txc.getLists()
pool.relay.NewHead(pool.head, m, r)
// Update fork indicator by next pending block number
next := new(big.Int).Add(head.Number, big.NewInt(1))
pool.istanbul = pool.config.IsIstanbul(next)
}
2016-10-14 06:47:09 +03:00
// Stop stops the light transaction pool
func (pool *TxPool) Stop() {
// Unsubscribe all subscriptions registered from txpool
pool.scope.Close()
// Unsubscribe subscriptions registered from blockchain
pool.chainHeadSub.Unsubscribe()
2016-10-14 06:47:09 +03:00
close(pool.quit)
log.Info("Transaction pool stopped")
2016-10-14 06:47:09 +03:00
}
2018-05-18 11:45:52 +03:00
// SubscribeNewTxsEvent registers a subscription of core.NewTxsEvent and
// starts sending event to the given channel.
2018-05-18 11:45:52 +03:00
func (pool *TxPool) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription {
return pool.scope.Track(pool.txFeed.Subscribe(ch))
}
2016-10-14 06:47:09 +03:00
// Stats returns the number of currently pending (locally created) transactions
func (pool *TxPool) Stats() (pending int) {
pool.mu.RLock()
defer pool.mu.RUnlock()
pending = len(pool.pending)
return
}
// validateTx checks whether a transaction is valid according to the consensus rules.
func (pool *TxPool) validateTx(ctx context.Context, tx *types.Transaction) error {
// Validate sender
var (
from common.Address
err error
)
// Validate the transaction sender and it's sig. Throw
// if the from fields is invalid.
2016-11-02 15:44:13 +03:00
if from, err = types.Sender(pool.signer, tx); err != nil {
2016-10-14 06:47:09 +03:00
return core.ErrInvalidSender
}
// Last but not least check for nonce errors
currentState := pool.currentState(ctx)
if n := currentState.GetNonce(from); n > tx.Nonce() {
return core.ErrNonceTooLow
2016-10-14 06:47:09 +03:00
}
// Check the transaction doesn't exceed the current
// block limit gas.
header := pool.chain.GetHeaderByHash(pool.head)
if header.GasLimit < tx.Gas() {
2016-10-14 06:47:09 +03:00
return core.ErrGasLimit
}
// Transactions can't be negative. This may never happen
// using RLP decoded transactions but may occur if you create
// a transaction using the RPC for example.
if tx.Value().Sign() < 0 {
2016-10-14 06:47:09 +03:00
return core.ErrNegativeValue
}
// Transactor should have enough funds to cover the costs
// cost == V + GP * GL
if b := currentState.GetBalance(from); b.Cmp(tx.Cost()) < 0 {
return core.ErrInsufficientFunds
2016-10-14 06:47:09 +03:00
}
// Should supply enough intrinsic gas
gas, err := core.IntrinsicGas(tx.Data(), tx.To() == nil, true, pool.istanbul)
if err != nil {
return err
}
if tx.Gas() < gas {
2016-10-14 06:47:09 +03:00
return core.ErrIntrinsicGas
}
return currentState.Error()
2016-10-14 06:47:09 +03:00
}
// add validates a new transaction and sets its state pending if processable.
// It also updates the locally stored nonce if necessary.
func (pool *TxPool) add(ctx context.Context, tx *types.Transaction) error {
2016-10-14 06:47:09 +03:00
hash := tx.Hash()
if pool.pending[hash] != nil {
2016-10-14 06:47:09 +03:00
return fmt.Errorf("Known transaction (%x)", hash[:4])
}
err := pool.validateTx(ctx, tx)
2016-10-14 06:47:09 +03:00
if err != nil {
return err
}
if _, ok := pool.pending[hash]; !ok {
pool.pending[hash] = tx
2016-10-14 06:47:09 +03:00
nonce := tx.Nonce() + 1
2016-11-02 15:44:13 +03:00
addr, _ := types.Sender(pool.signer, tx)
if nonce > pool.nonce[addr] {
pool.nonce[addr] = nonce
2016-10-14 06:47:09 +03:00
}
// Notify the subscribers. This event is posted in a goroutine
// because it's possible that somewhere during the post "Remove transaction"
// gets called which will then wait for the global tx pool lock and deadlock.
go pool.txFeed.Send(core.NewTxsEvent{Txs: types.Transactions{tx}})
2016-10-14 06:47:09 +03:00
}
// Print a log message if low enough level is set
log.Debug("Pooled new transaction", "hash", hash, "from", log.Lazy{Fn: func() common.Address { from, _ := types.Sender(pool.signer, tx); return from }}, "to", tx.To())
2016-10-14 06:47:09 +03:00
return nil
}
// Add adds a transaction to the pool if valid and passes it to the tx relay
// backend
func (pool *TxPool) Add(ctx context.Context, tx *types.Transaction) error {
pool.mu.Lock()
defer pool.mu.Unlock()
2016-10-14 06:47:09 +03:00
data, err := rlp.EncodeToBytes(tx)
if err != nil {
return err
}
if err := pool.add(ctx, tx); err != nil {
2016-10-14 06:47:09 +03:00
return err
}
//fmt.Println("Send", tx.Hash())
pool.relay.Send(types.Transactions{tx})
2016-10-14 06:47:09 +03:00
pool.chainDb.Put(tx.Hash().Bytes(), data)
2016-10-14 06:47:09 +03:00
return nil
}
// AddTransactions adds all valid transactions to the pool and passes them to
// the tx relay backend
func (pool *TxPool) AddBatch(ctx context.Context, txs []*types.Transaction) {
pool.mu.Lock()
defer pool.mu.Unlock()
2016-10-14 06:47:09 +03:00
var sendTx types.Transactions
for _, tx := range txs {
if err := pool.add(ctx, tx); err == nil {
2016-10-14 06:47:09 +03:00
sendTx = append(sendTx, tx)
}
}
if len(sendTx) > 0 {
pool.relay.Send(sendTx)
2016-10-14 06:47:09 +03:00
}
}
// GetTransaction returns a transaction if it is contained in the pool
// and nil otherwise.
func (pool *TxPool) GetTransaction(hash common.Hash) *types.Transaction {
2016-10-14 06:47:09 +03:00
// check the txs first
if tx, ok := pool.pending[hash]; ok {
2016-10-14 06:47:09 +03:00
return tx
}
return nil
}
// GetTransactions returns all currently processable transactions.
// The returned slice may be modified by the caller.
func (pool *TxPool) GetTransactions() (txs types.Transactions, err error) {
pool.mu.RLock()
defer pool.mu.RUnlock()
2016-10-14 06:47:09 +03:00
txs = make(types.Transactions, len(pool.pending))
2016-10-14 06:47:09 +03:00
i := 0
for _, tx := range pool.pending {
2016-10-14 06:47:09 +03:00
txs[i] = tx
i++
}
return txs, nil
2016-10-14 06:47:09 +03:00
}
// Content retrieves the data content of the transaction pool, returning all the
// pending as well as queued transactions, grouped by account and nonce.
func (pool *TxPool) Content() (map[common.Address]types.Transactions, map[common.Address]types.Transactions) {
pool.mu.RLock()
defer pool.mu.RUnlock()
2016-10-14 06:47:09 +03:00
// Retrieve all the pending transactions and sort by account and by nonce
pending := make(map[common.Address]types.Transactions)
for _, tx := range pool.pending {
account, _ := types.Sender(pool.signer, tx)
2016-10-14 06:47:09 +03:00
pending[account] = append(pending[account], tx)
}
// There are no queued transactions in a light pool, just return an empty map
queued := make(map[common.Address]types.Transactions)
return pending, queued
}
// RemoveTransactions removes all given transactions from the pool.
func (pool *TxPool) RemoveTransactions(txs types.Transactions) {
pool.mu.Lock()
defer pool.mu.Unlock()
2016-10-14 06:47:09 +03:00
var hashes []common.Hash
batch := pool.chainDb.NewBatch()
2016-10-14 06:47:09 +03:00
for _, tx := range txs {
hash := tx.Hash()
delete(pool.pending, hash)
batch.Delete(hash.Bytes())
2016-10-14 06:47:09 +03:00
hashes = append(hashes, hash)
}
batch.Write()
pool.relay.Discard(hashes)
2016-10-14 06:47:09 +03:00
}
// RemoveTx removes the transaction with the given hash from the pool.
func (pool *TxPool) RemoveTx(hash common.Hash) {
pool.mu.Lock()
defer pool.mu.Unlock()
// delete from pending pool
delete(pool.pending, hash)
pool.chainDb.Delete(hash[:])
pool.relay.Discard([]common.Hash{hash})
}