core: move TxPool reorg and events to background goroutine (#19705)

* core: move TxPool reorg and events to background goroutine

This change moves internal queue re-shuffling work in TxPool to a
background goroutine, TxPool.runReorg. Requests to execute runReorg are
accumulated by the new scheduleReorgLoop. The new loop also accumulates
transaction events.

The motivation for this change is making sends to txFeed synchronous
instead of sending them in one-off goroutines launched by 'add' and
'promoteExecutables'. If a downstream consumer of txFeed is blocked for
a while, reorg requests and events will queue up.

* core: remove homestead check in TxPool

This change removes tracking of the homestead block number from TxPool.
The homestead field was used to enforce minimum gas of 53000 for
contract creations after the homestead fork, but not before it. Since
nobody would want configure a non-homestead chain nowadays and contract
creations usually take more than 53000 gas, the extra correctness is
redundant and can be removed.

* core: fixes for review comments

* core: remove BenchmarkPoolInsert

This is useless now because there is no separate code path for
individual transactions anymore.

* core: fix pending counter metric

* core: fix pool tests

* core: dedup txpool announced events, discard stales

* core: reorg tx promotion/demotion to avoid weird pending gaps
This commit is contained in:
Felix Lange 2019-06-21 10:29:14 +02:00 committed by Péter Szilágyi
parent 25c3282cf1
commit 60c062e17d
2 changed files with 546 additions and 456 deletions

File diff suppressed because it is too large Load Diff

@ -200,7 +200,7 @@ func TestStateChangeDuringTransactionPoolReset(t *testing.T) {
t.Fatalf("Invalid nonce, want 0, got %d", nonce)
}
pool.AddRemotes(types.Transactions{tx0, tx1})
pool.addRemotesSync([]*types.Transaction{tx0, tx1})
nonce = pool.State().GetNonce(address)
if nonce != 2 {
@ -209,8 +209,7 @@ func TestStateChangeDuringTransactionPoolReset(t *testing.T) {
// trigger state change in the background
trigger = true
pool.lockedReset(nil, nil)
<-pool.requestReset(nil, nil)
_, err := pool.Pending()
if err != nil {
@ -268,10 +267,10 @@ func TestTransactionQueue(t *testing.T) {
tx := transaction(0, 100, key)
from, _ := deriveSender(tx)
pool.currentState.AddBalance(from, big.NewInt(1000))
pool.lockedReset(nil, nil)
pool.enqueueTx(tx.Hash(), tx)
<-pool.requestReset(nil, nil)
pool.promoteExecutables([]common.Address{from})
pool.enqueueTx(tx.Hash(), tx)
<-pool.requestPromoteExecutables(newAccountSet(pool.signer, from))
if len(pool.pending) != 1 {
t.Error("expected valid txs to be 1 is", len(pool.pending))
}
@ -280,33 +279,36 @@ func TestTransactionQueue(t *testing.T) {
from, _ = deriveSender(tx)
pool.currentState.SetNonce(from, 2)
pool.enqueueTx(tx.Hash(), tx)
pool.promoteExecutables([]common.Address{from})
<-pool.requestPromoteExecutables(newAccountSet(pool.signer, from))
if _, ok := pool.pending[from].txs.items[tx.Nonce()]; ok {
t.Error("expected transaction to be in tx pool")
}
if len(pool.queue) > 0 {
t.Error("expected transaction queue to be empty. is", len(pool.queue))
}
}
pool, key = setupTxPool()
func TestTransactionQueue2(t *testing.T) {
t.Parallel()
pool, key := setupTxPool()
defer pool.Stop()
tx1 := transaction(0, 100, key)
tx2 := transaction(10, 100, key)
tx3 := transaction(11, 100, key)
from, _ = deriveSender(tx1)
from, _ := deriveSender(tx1)
pool.currentState.AddBalance(from, big.NewInt(1000))
pool.lockedReset(nil, nil)
pool.reset(nil, nil)
pool.enqueueTx(tx1.Hash(), tx1)
pool.enqueueTx(tx2.Hash(), tx2)
pool.enqueueTx(tx3.Hash(), tx3)
pool.promoteExecutables([]common.Address{from})
if len(pool.pending) != 1 {
t.Error("expected tx pool to be 1, got", len(pool.pending))
t.Error("expected pending length to be 1, got", len(pool.pending))
}
if pool.queue[from].Len() != 2 {
t.Error("expected len(queue) == 2, got", pool.queue[from].Len())
@ -339,7 +341,7 @@ func TestTransactionChainFork(t *testing.T) {
statedb.AddBalance(addr, big.NewInt(100000000000000))
pool.chain = &testBlockChain{statedb, 1000000, new(event.Feed)}
pool.lockedReset(nil, nil)
<-pool.requestReset(nil, nil)
}
resetState()
@ -368,7 +370,7 @@ func TestTransactionDoubleNonce(t *testing.T) {
statedb.AddBalance(addr, big.NewInt(100000000000000))
pool.chain = &testBlockChain{statedb, 1000000, new(event.Feed)}
pool.lockedReset(nil, nil)
<-pool.requestReset(nil, nil)
}
resetState()
@ -384,16 +386,17 @@ func TestTransactionDoubleNonce(t *testing.T) {
if replace, err := pool.add(tx2, false); err != nil || !replace {
t.Errorf("second transaction insert failed (%v) or not reported replacement (%v)", err, replace)
}
pool.promoteExecutables([]common.Address{addr})
<-pool.requestPromoteExecutables(newAccountSet(signer, addr))
if pool.pending[addr].Len() != 1 {
t.Error("expected 1 pending transactions, got", pool.pending[addr].Len())
}
if tx := pool.pending[addr].txs.items[0]; tx.Hash() != tx2.Hash() {
t.Errorf("transaction mismatch: have %x, want %x", tx.Hash(), tx2.Hash())
}
// Add the third transaction and ensure it's not saved (smaller price)
pool.add(tx3, false)
pool.promoteExecutables([]common.Address{addr})
<-pool.requestPromoteExecutables(newAccountSet(signer, addr))
if pool.pending[addr].Len() != 1 {
t.Error("expected 1 pending transactions, got", pool.pending[addr].Len())
}
@ -439,7 +442,7 @@ func TestTransactionNonceRecovery(t *testing.T) {
addr := crypto.PubkeyToAddress(key.PublicKey)
pool.currentState.SetNonce(addr, n)
pool.currentState.AddBalance(addr, big.NewInt(100000000000000))
pool.lockedReset(nil, nil)
<-pool.requestReset(nil, nil)
tx := transaction(n, 100000, key)
if err := pool.AddRemote(tx); err != nil {
@ -447,7 +450,7 @@ func TestTransactionNonceRecovery(t *testing.T) {
}
// simulate some weird re-order of transactions and missing nonce(s)
pool.currentState.SetNonce(addr, n-1)
pool.lockedReset(nil, nil)
<-pool.requestReset(nil, nil)
if fn := pool.pendingState.GetNonce(addr); fn != n-1 {
t.Errorf("expected nonce to be %d, got %d", n-1, fn)
}
@ -491,7 +494,7 @@ func TestTransactionDropping(t *testing.T) {
if pool.all.Count() != 6 {
t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), 6)
}
pool.lockedReset(nil, nil)
<-pool.requestReset(nil, nil)
if pool.pending[account].Len() != 3 {
t.Errorf("pending transaction mismatch: have %d, want %d", pool.pending[account].Len(), 3)
}
@ -503,7 +506,7 @@ func TestTransactionDropping(t *testing.T) {
}
// Reduce the balance of the account, and check that invalidated transactions are dropped
pool.currentState.AddBalance(account, big.NewInt(-650))
pool.lockedReset(nil, nil)
<-pool.requestReset(nil, nil)
if _, ok := pool.pending[account].txs.items[tx0.Nonce()]; !ok {
t.Errorf("funded pending transaction missing: %v", tx0)
@ -528,7 +531,7 @@ func TestTransactionDropping(t *testing.T) {
}
// Reduce the block gas limit, check that invalidated transactions are dropped
pool.chain.(*testBlockChain).gasLimit = 100
pool.lockedReset(nil, nil)
<-pool.requestReset(nil, nil)
if _, ok := pool.pending[account].txs.items[tx0.Nonce()]; !ok {
t.Errorf("funded pending transaction missing: %v", tx0)
@ -584,7 +587,7 @@ func TestTransactionPostponing(t *testing.T) {
txs = append(txs, tx)
}
}
for i, err := range pool.AddRemotes(txs) {
for i, err := range pool.addRemotesSync(txs) {
if err != nil {
t.Fatalf("tx %d: failed to add transactions: %v", i, err)
}
@ -599,7 +602,7 @@ func TestTransactionPostponing(t *testing.T) {
if pool.all.Count() != len(txs) {
t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), len(txs))
}
pool.lockedReset(nil, nil)
<-pool.requestReset(nil, nil)
if pending := pool.pending[accs[0]].Len() + pool.pending[accs[1]].Len(); pending != len(txs) {
t.Errorf("pending transaction mismatch: have %d, want %d", pending, len(txs))
}
@ -613,7 +616,7 @@ func TestTransactionPostponing(t *testing.T) {
for _, addr := range accs {
pool.currentState.AddBalance(addr, big.NewInt(-1))
}
pool.lockedReset(nil, nil)
<-pool.requestReset(nil, nil)
// The first account's first transaction remains valid, check that subsequent
// ones are either filtered out, or queued up for later.
@ -680,12 +683,10 @@ func TestTransactionGapFilling(t *testing.T) {
defer sub.Unsubscribe()
// Create a pending and a queued transaction with a nonce-gap in between
if err := pool.AddRemote(transaction(0, 100000, key)); err != nil {
t.Fatalf("failed to add pending transaction: %v", err)
}
if err := pool.AddRemote(transaction(2, 100000, key)); err != nil {
t.Fatalf("failed to add queued transaction: %v", err)
}
pool.addRemotesSync([]*types.Transaction{
transaction(0, 100000, key),
transaction(2, 100000, key),
})
pending, queued := pool.Stats()
if pending != 1 {
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 1)
@ -700,7 +701,7 @@ func TestTransactionGapFilling(t *testing.T) {
t.Fatalf("pool internal state corrupted: %v", err)
}
// Fill the nonce gap and ensure all transactions become pending
if err := pool.AddRemote(transaction(1, 100000, key)); err != nil {
if err := pool.addRemoteSync(transaction(1, 100000, key)); err != nil {
t.Fatalf("failed to add gapped transaction: %v", err)
}
pending, queued = pool.Stats()
@ -732,7 +733,7 @@ func TestTransactionQueueAccountLimiting(t *testing.T) {
// Keep queuing up transactions and make sure all above a limit are dropped
for i := uint64(1); i <= testTxPoolConfig.AccountQueue+5; i++ {
if err := pool.AddRemote(transaction(i, 100000, key)); err != nil {
if err := pool.addRemoteSync(transaction(i, 100000, key)); err != nil {
t.Fatalf("tx %d: failed to add transaction: %v", i, err)
}
if len(pool.pending) != 0 {
@ -799,7 +800,7 @@ func testTransactionQueueGlobalLimiting(t *testing.T, nolocals bool) {
nonces[addr]++
}
// Import the batch and verify that limits have been enforced
pool.AddRemotes(txs)
pool.addRemotesSync(txs)
queued := 0
for addr, list := range pool.queue {
@ -932,7 +933,7 @@ func TestTransactionPendingLimiting(t *testing.T) {
// Keep queuing up transactions and make sure all above a limit are dropped
for i := uint64(0); i < testTxPoolConfig.AccountQueue+5; i++ {
if err := pool.AddRemote(transaction(i, 100000, key)); err != nil {
if err := pool.addRemoteSync(transaction(i, 100000, key)); err != nil {
t.Fatalf("tx %d: failed to add transaction: %v", i, err)
}
if pool.pending[account].Len() != int(i)+1 {
@ -953,57 +954,6 @@ func TestTransactionPendingLimiting(t *testing.T) {
}
}
// Tests that the transaction limits are enforced the same way irrelevant whether
// the transactions are added one by one or in batches.
func TestTransactionQueueLimitingEquivalency(t *testing.T) { testTransactionLimitingEquivalency(t, 1) }
func TestTransactionPendingLimitingEquivalency(t *testing.T) { testTransactionLimitingEquivalency(t, 0) }
func testTransactionLimitingEquivalency(t *testing.T, origin uint64) {
t.Parallel()
// Add a batch of transactions to a pool one by one
pool1, key1 := setupTxPool()
defer pool1.Stop()
account1, _ := deriveSender(transaction(0, 0, key1))
pool1.currentState.AddBalance(account1, big.NewInt(1000000))
for i := uint64(0); i < testTxPoolConfig.AccountQueue+5; i++ {
if err := pool1.AddRemote(transaction(origin+i, 100000, key1)); err != nil {
t.Fatalf("tx %d: failed to add transaction: %v", i, err)
}
}
// Add a batch of transactions to a pool in one big batch
pool2, key2 := setupTxPool()
defer pool2.Stop()
account2, _ := deriveSender(transaction(0, 0, key2))
pool2.currentState.AddBalance(account2, big.NewInt(1000000))
txs := []*types.Transaction{}
for i := uint64(0); i < testTxPoolConfig.AccountQueue+5; i++ {
txs = append(txs, transaction(origin+i, 100000, key2))
}
pool2.AddRemotes(txs)
// Ensure the batch optimization honors the same pool mechanics
if len(pool1.pending) != len(pool2.pending) {
t.Errorf("pending transaction count mismatch: one-by-one algo: %d, batch algo: %d", len(pool1.pending), len(pool2.pending))
}
if len(pool1.queue) != len(pool2.queue) {
t.Errorf("queued transaction count mismatch: one-by-one algo: %d, batch algo: %d", len(pool1.queue), len(pool2.queue))
}
if pool1.all.Count() != pool2.all.Count() {
t.Errorf("total transaction count mismatch: one-by-one algo %d, batch algo %d", pool1.all.Count(), pool2.all.Count())
}
if err := validateTxPoolInternals(pool1); err != nil {
t.Errorf("pool 1 internal state corrupted: %v", err)
}
if err := validateTxPoolInternals(pool2); err != nil {
t.Errorf("pool 2 internal state corrupted: %v", err)
}
}
// Tests that if the transaction count belonging to multiple accounts go above
// some hard threshold, the higher transactions are dropped to prevent DOS
// attacks.
@ -1038,7 +988,7 @@ func TestTransactionPendingGlobalLimiting(t *testing.T) {
}
}
// Import the batch and verify that limits have been enforced
pool.AddRemotes(txs)
pool.addRemotesSync(txs)
pending := 0
for _, list := range pool.pending {
@ -1118,7 +1068,7 @@ func TestTransactionPendingMinimumAllowance(t *testing.T) {
}
}
// Import the batch and verify that limits have been enforced
pool.AddRemotes(txs)
pool.addRemotesSync(txs)
for addr, list := range pool.pending {
if list.Len() != int(config.AccountSlots) {
@ -1174,7 +1124,7 @@ func TestTransactionPoolRepricing(t *testing.T) {
ltx := pricedTransaction(0, 100000, big.NewInt(1), keys[3])
// Import the batch and that both pending and queued transactions match up
pool.AddRemotes(txs)
pool.addRemotesSync(txs)
pool.AddLocal(ltx)
pending, queued := pool.Stats()
@ -1454,7 +1404,7 @@ func TestTransactionPoolStableUnderpricing(t *testing.T) {
for i := uint64(0); i < config.GlobalSlots; i++ {
txs = append(txs, pricedTransaction(i, 100000, big.NewInt(1), keys[0]))
}
pool.AddRemotes(txs)
pool.addRemotesSync(txs)
pending, queued := pool.Stats()
if pending != int(config.GlobalSlots) {
@ -1470,7 +1420,7 @@ func TestTransactionPoolStableUnderpricing(t *testing.T) {
t.Fatalf("pool internal state corrupted: %v", err)
}
// Ensure that adding high priced transactions drops a cheap, but doesn't produce a gap
if err := pool.AddRemote(pricedTransaction(0, 100000, big.NewInt(3), keys[1])); err != nil {
if err := pool.addRemoteSync(pricedTransaction(0, 100000, big.NewInt(3), keys[1])); err != nil {
t.Fatalf("failed to add well priced transaction: %v", err)
}
pending, queued = pool.Stats()
@ -1513,7 +1463,7 @@ func TestTransactionReplacement(t *testing.T) {
price := int64(100)
threshold := (price * (100 + int64(testTxPoolConfig.PriceBump))) / 100
if err := pool.AddRemote(pricedTransaction(0, 100000, big.NewInt(1), key)); err != nil {
if err := pool.addRemoteSync(pricedTransaction(0, 100000, big.NewInt(1), key)); err != nil {
t.Fatalf("failed to add original cheap pending transaction: %v", err)
}
if err := pool.AddRemote(pricedTransaction(0, 100001, big.NewInt(1), key)); err != ErrReplaceUnderpriced {
@ -1526,7 +1476,7 @@ func TestTransactionReplacement(t *testing.T) {
t.Fatalf("cheap replacement event firing failed: %v", err)
}
if err := pool.AddRemote(pricedTransaction(0, 100000, big.NewInt(price), key)); err != nil {
if err := pool.addRemoteSync(pricedTransaction(0, 100000, big.NewInt(price), key)); err != nil {
t.Fatalf("failed to add original proper pending transaction: %v", err)
}
if err := pool.AddRemote(pricedTransaction(0, 100001, big.NewInt(threshold-1), key)); err != ErrReplaceUnderpriced {
@ -1538,6 +1488,7 @@ func TestTransactionReplacement(t *testing.T) {
if err := validateEvents(events, 2); err != nil {
t.Fatalf("proper replacement event firing failed: %v", err)
}
// Add queued transactions, ensuring the minimum price bump is enforced for replacement (for ultra low prices too)
if err := pool.AddRemote(pricedTransaction(2, 100000, big.NewInt(1), key)); err != nil {
t.Fatalf("failed to add original cheap queued transaction: %v", err)
@ -1615,7 +1566,7 @@ func testTransactionJournaling(t *testing.T, nolocals bool) {
if err := pool.AddLocal(pricedTransaction(2, 100000, big.NewInt(1), local)); err != nil {
t.Fatalf("failed to add local transaction: %v", err)
}
if err := pool.AddRemote(pricedTransaction(0, 100000, big.NewInt(1), remote)); err != nil {
if err := pool.addRemoteSync(pricedTransaction(0, 100000, big.NewInt(1), remote)); err != nil {
t.Fatalf("failed to add remote transaction: %v", err)
}
pending, queued := pool.Stats()
@ -1653,7 +1604,7 @@ func testTransactionJournaling(t *testing.T, nolocals bool) {
}
// Bump the nonce temporarily and ensure the newly invalidated transaction is removed
statedb.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 2)
pool.lockedReset(nil, nil)
<-pool.requestReset(nil, nil)
time.Sleep(2 * config.Rejournal)
pool.Stop()
@ -1707,7 +1658,7 @@ func TestTransactionStatusCheck(t *testing.T) {
txs = append(txs, pricedTransaction(2, 100000, big.NewInt(1), keys[2])) // Queued only
// Import the transaction and ensure they are correctly added
pool.AddRemotes(txs)
pool.addRemotesSync(txs)
pending, queued := pool.Stats()
if pending != 2 {
@ -1786,26 +1737,6 @@ func benchmarkFuturePromotion(b *testing.B, size int) {
}
}
// Benchmarks the speed of iterative transaction insertion.
func BenchmarkPoolInsert(b *testing.B) {
// Generate a batch of transactions to enqueue into the pool
pool, key := setupTxPool()
defer pool.Stop()
account, _ := deriveSender(transaction(0, 0, key))
pool.currentState.AddBalance(account, big.NewInt(1000000))
txs := make(types.Transactions, b.N)
for i := 0; i < b.N; i++ {
txs[i] = transaction(uint64(i), 100000, key)
}
// Benchmark importing the transactions into the queue
b.ResetTimer()
for _, tx := range txs {
pool.AddRemote(tx)
}
}
// Benchmarks the speed of batched transaction insertion.
func BenchmarkPoolBatchInsert100(b *testing.B) { benchmarkPoolBatchInsert(b, 100) }
func BenchmarkPoolBatchInsert1000(b *testing.B) { benchmarkPoolBatchInsert(b, 1000) }