Compare commits
4 Commits
v1.4.9
...
no-discard
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
405c236884 | ||
|
|
6778d297b5 | ||
|
|
eec121633c | ||
|
|
4b262b78f0 |
22
CHANGELOG.md
22
CHANGELOG.md
@@ -1,26 +1,4 @@
|
||||
# Changelog
|
||||
## v1.4.9
|
||||
### FEATURE
|
||||
* [\#2463](https://github.com/bnb-chain/bsc/pull/2463) utils: add check_blobtx.js
|
||||
* [\#2470](https://github.com/bnb-chain/bsc/pull/2470) jsutils: faucet successful requests within blocks
|
||||
* [\#2467](https://github.com/bnb-chain/bsc/pull/2467) internal/ethapi: add optional parameter for blobSidecars
|
||||
|
||||
### IMPROVEMENT
|
||||
* [\#2462](https://github.com/bnb-chain/bsc/pull/2462) cmd/utils: add a flag to change breathe block interval for testing
|
||||
* [\#2497](https://github.com/bnb-chain/bsc/pull/2497) params/config: add Bohr hardfork
|
||||
* [\#2479](https://github.com/bnb-chain/bsc/pull/2479) dev: ensure consistency in BPS bundle result
|
||||
|
||||
### BUGFIX
|
||||
* [\#2461](https://github.com/bnb-chain/bsc/pull/2461) eth/handler: check lists in body before broadcast blocks
|
||||
* [\#2455](https://github.com/bnb-chain/bsc/pull/2455) cmd: fix memory leak when big dataset
|
||||
* [\#2466](https://github.com/bnb-chain/bsc/pull/2466) sync: fix some sync issues caused by prune-block.
|
||||
* [\#2475](https://github.com/bnb-chain/bsc/pull/2475) fix: move mev op to MinerAPI & add command to console
|
||||
* [\#2473](https://github.com/bnb-chain/bsc/pull/2473) fix: limit the gas price of the mev bid
|
||||
* [\#2484](https://github.com/bnb-chain/bsc/pull/2484) fix: fix inspect database error
|
||||
* [\#2481](https://github.com/bnb-chain/bsc/pull/2481) fix: keep 9W blocks in ancient db when prune block
|
||||
* [\#2495](https://github.com/bnb-chain/bsc/pull/2495) fix: add an empty freeze db
|
||||
* [\#2507](https://github.com/bnb-chain/bsc/pull/2507) fix: waiting for the last simulation before pick best bid
|
||||
|
||||
## v1.4.8
|
||||
### FEATURE
|
||||
* [\#2483](https://github.com/bnb-chain/bsc/pull/2483) core/vm: add secp256r1 into PrecompiledContractsHaber
|
||||
|
||||
@@ -406,7 +406,7 @@ func inspectTrie(ctx *cli.Context) error {
|
||||
var err error
|
||||
blockNumber, err = strconv.ParseUint(ctx.Args().Get(0), 10, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse blocknum, Args[0]: %v, err: %v", ctx.Args().Get(0), err)
|
||||
return fmt.Errorf("failed to Parse blocknum, Args[0]: %v, err: %v", ctx.Args().Get(0), err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -417,26 +417,26 @@ func inspectTrie(ctx *cli.Context) error {
|
||||
var err error
|
||||
jobnum, err = strconv.ParseUint(ctx.Args().Get(1), 10, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse jobnum, Args[1]: %v, err: %v", ctx.Args().Get(1), err)
|
||||
return fmt.Errorf("failed to Parse jobnum, Args[1]: %v, err: %v", ctx.Args().Get(1), err)
|
||||
}
|
||||
topN = 10
|
||||
} else {
|
||||
var err error
|
||||
jobnum, err = strconv.ParseUint(ctx.Args().Get(1), 10, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse jobnum, Args[1]: %v, err: %v", ctx.Args().Get(1), err)
|
||||
return fmt.Errorf("failed to Parse jobnum, Args[1]: %v, err: %v", ctx.Args().Get(1), err)
|
||||
}
|
||||
|
||||
topN, err = strconv.ParseUint(ctx.Args().Get(2), 10, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse topn, Args[1]: %v, err: %v", ctx.Args().Get(1), err)
|
||||
return fmt.Errorf("failed to Parse topn, Args[1]: %v, err: %v", ctx.Args().Get(1), err)
|
||||
}
|
||||
}
|
||||
|
||||
if blockNumber != math.MaxUint64 {
|
||||
headerBlockHash = rawdb.ReadCanonicalHash(db, blockNumber)
|
||||
if headerBlockHash == (common.Hash{}) {
|
||||
return errors.New("ReadHeadBlockHash empty hash")
|
||||
return errors.New("ReadHeadBlockHash empry hash")
|
||||
}
|
||||
blockHeader := rawdb.ReadHeader(db, headerBlockHash, blockNumber)
|
||||
trieRootHash = blockHeader.Root
|
||||
@@ -508,7 +508,7 @@ func ancientInspect(ctx *cli.Context) error {
|
||||
stack, _ := makeConfigNode(ctx)
|
||||
defer stack.Close()
|
||||
|
||||
db := utils.MakeChainDatabase(ctx, stack, true, false)
|
||||
db := utils.MakeChainDatabase(ctx, stack, true, true)
|
||||
defer db.Close()
|
||||
return rawdb.AncientInspect(db)
|
||||
}
|
||||
|
||||
@@ -193,7 +193,7 @@ func (db *nofreezedb) Ancients() (uint64, error) {
|
||||
return 0, errNotSupported
|
||||
}
|
||||
|
||||
// ItemAmountInAncient returns an error as we don't have a backing chain freezer.
|
||||
// Ancients returns an error as we don't have a backing chain freezer.
|
||||
func (db *nofreezedb) ItemAmountInAncient() (uint64, error) {
|
||||
return 0, errNotSupported
|
||||
}
|
||||
@@ -331,110 +331,6 @@ func NewDatabase(db ethdb.KeyValueStore) ethdb.Database {
|
||||
return &nofreezedb{KeyValueStore: db}
|
||||
}
|
||||
|
||||
type emptyfreezedb struct {
|
||||
ethdb.KeyValueStore
|
||||
}
|
||||
|
||||
// HasAncient returns nil for pruned db that we don't have a backing chain freezer.
|
||||
func (db *emptyfreezedb) HasAncient(kind string, number uint64) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Ancient returns nil for pruned db that we don't have a backing chain freezer.
|
||||
func (db *emptyfreezedb) Ancient(kind string, number uint64) ([]byte, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// AncientRange returns nil for pruned db that we don't have a backing chain freezer.
|
||||
func (db *emptyfreezedb) AncientRange(kind string, start, max, maxByteSize uint64) ([][]byte, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Ancients returns nil for pruned db that we don't have a backing chain freezer.
|
||||
func (db *emptyfreezedb) Ancients() (uint64, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// ItemAmountInAncient returns nil for pruned db that we don't have a backing chain freezer.
|
||||
func (db *emptyfreezedb) ItemAmountInAncient() (uint64, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// Tail returns nil for pruned db that we don't have a backing chain freezer.
|
||||
func (db *emptyfreezedb) Tail() (uint64, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// AncientSize returns nil for pruned db that we don't have a backing chain freezer.
|
||||
func (db *emptyfreezedb) AncientSize(kind string) (uint64, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// ModifyAncients returns nil for pruned db that we don't have a backing chain freezer.
|
||||
func (db *emptyfreezedb) ModifyAncients(func(ethdb.AncientWriteOp) error) (int64, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// TruncateHead returns nil for pruned db that we don't have a backing chain freezer.
|
||||
func (db *emptyfreezedb) TruncateHead(items uint64) (uint64, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// TruncateTail returns nil for pruned db that we don't have a backing chain freezer.
|
||||
func (db *emptyfreezedb) TruncateTail(items uint64) (uint64, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// TruncateTableTail returns nil for pruned db that we don't have a backing chain freezer.
|
||||
func (db *emptyfreezedb) TruncateTableTail(kind string, tail uint64) (uint64, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// ResetTable returns nil for pruned db that we don't have a backing chain freezer.
|
||||
func (db *emptyfreezedb) ResetTable(kind string, startAt uint64, onlyEmpty bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Sync returns nil for pruned db that we don't have a backing chain freezer.
|
||||
func (db *emptyfreezedb) Sync() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (db *emptyfreezedb) DiffStore() ethdb.KeyValueStore { return db }
|
||||
func (db *emptyfreezedb) SetDiffStore(diff ethdb.KeyValueStore) {}
|
||||
func (db *emptyfreezedb) StateStore() ethdb.Database { return db }
|
||||
func (db *emptyfreezedb) SetStateStore(state ethdb.Database) {}
|
||||
func (db *emptyfreezedb) StateStoreReader() ethdb.Reader { return db }
|
||||
func (db *emptyfreezedb) BlockStore() ethdb.Database { return db }
|
||||
func (db *emptyfreezedb) SetBlockStore(block ethdb.Database) {}
|
||||
func (db *emptyfreezedb) HasSeparateBlockStore() bool { return false }
|
||||
func (db *emptyfreezedb) BlockStoreReader() ethdb.Reader { return db }
|
||||
func (db *emptyfreezedb) BlockStoreWriter() ethdb.Writer { return db }
|
||||
func (db *emptyfreezedb) ReadAncients(fn func(reader ethdb.AncientReaderOp) error) (err error) {
|
||||
return nil
|
||||
}
|
||||
func (db *emptyfreezedb) AncientOffSet() uint64 { return 0 }
|
||||
|
||||
// MigrateTable returns nil for pruned db that we don't have a backing chain freezer.
|
||||
func (db *emptyfreezedb) MigrateTable(kind string, convert convertLegacyFn) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// AncientDatadir returns nil for pruned db that we don't have a backing chain freezer.
|
||||
func (db *emptyfreezedb) AncientDatadir() (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
func (db *emptyfreezedb) SetupFreezerEnv(env *ethdb.FreezerEnv) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewEmptyFreezeDB is used for CLI such as `geth db inspect` in pruned db that we don't
|
||||
// have a backing chain freezer.
|
||||
// WARNING: it must be only used in the above case.
|
||||
func NewEmptyFreezeDB(db ethdb.KeyValueStore) ethdb.Database {
|
||||
return &emptyfreezedb{KeyValueStore: db}
|
||||
}
|
||||
|
||||
// NewFreezerDb only create a freezer without statedb.
|
||||
func NewFreezerDb(db ethdb.KeyValueStore, frz, namespace string, readonly bool, newOffSet uint64) (*Freezer, error) {
|
||||
// Create the idle freezer instance, this operation should be atomic to avoid mismatch between offset and acientDB.
|
||||
@@ -484,12 +380,6 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, ancient string, namespace st
|
||||
offset = ReadOffSetOfCurrentAncientFreezer(db)
|
||||
}
|
||||
|
||||
// This case is used for someone who wants to execute geth db inspect CLI in a pruned db
|
||||
if !disableFreeze && readonly && ReadAncientType(db) == PruneFreezerType {
|
||||
log.Warn("Disk db is pruned, using an empty freezer db for CLI")
|
||||
return NewEmptyFreezeDB(db), nil
|
||||
}
|
||||
|
||||
if pruneAncientData && !disableFreeze && !readonly {
|
||||
frdb, err := newPrunedFreezer(resolveChainFreezerDir(ancient), db, offset)
|
||||
if err != nil {
|
||||
@@ -744,7 +634,7 @@ func Open(o OpenOptions) (ethdb.Database, error) {
|
||||
}
|
||||
if ReadAncientType(kvdb) == PruneFreezerType {
|
||||
if !o.PruneAncientData {
|
||||
log.Warn("NOTICE: You're opening a pruned disk db!")
|
||||
log.Warn("Disk db is pruned")
|
||||
}
|
||||
}
|
||||
if len(o.AncientsDirectory) == 0 {
|
||||
|
||||
@@ -55,6 +55,8 @@ const (
|
||||
|
||||
// txReannoMaxNum is the maximum number of transactions a reannounce action can include.
|
||||
txReannoMaxNum = 1024
|
||||
|
||||
maxBufferSize = 1000 // maximum size of tx buffer
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -244,6 +246,10 @@ type LegacyPool struct {
|
||||
initDoneCh chan struct{} // is closed once the pool is initialized (for tests)
|
||||
|
||||
changesSinceReorg int // A counter for how many drops we've performed in-between reorg.
|
||||
|
||||
// A buffer to store transactions that would otherwise be discarded
|
||||
buffer []*types.Transaction
|
||||
bufferLock sync.Mutex
|
||||
}
|
||||
|
||||
type txpoolResetRequest struct {
|
||||
@@ -355,11 +361,13 @@ func (pool *LegacyPool) loop() {
|
||||
evict = time.NewTicker(evictionInterval)
|
||||
reannounce = time.NewTicker(reannounceInterval)
|
||||
journal = time.NewTicker(pool.config.Rejournal)
|
||||
readd = time.NewTicker(time.Minute) // ticker to re-add buffered transactions periodically
|
||||
)
|
||||
defer report.Stop()
|
||||
defer evict.Stop()
|
||||
defer reannounce.Stop()
|
||||
defer journal.Stop()
|
||||
defer readd.Stop() // Stop the ticker when the loop exits
|
||||
|
||||
// Notify tests that the init phase is done
|
||||
close(pool.initDoneCh)
|
||||
@@ -436,6 +444,9 @@ func (pool *LegacyPool) loop() {
|
||||
}
|
||||
pool.mu.Unlock()
|
||||
}
|
||||
// Handle re-adding buffered transactions
|
||||
case <-readd.C:
|
||||
pool.readdBufferedTransactions()
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -781,12 +792,21 @@ func (pool *LegacyPool) add(tx *types.Transaction, local bool) (replaced bool, e
|
||||
}
|
||||
}()
|
||||
}
|
||||
// If the transaction pool is full, discard underpriced transactions
|
||||
// If the transaction pool is full, buffer underpriced transactions
|
||||
if uint64(pool.all.Slots()+numSlots(tx)) > pool.config.GlobalSlots+pool.config.GlobalQueue {
|
||||
// If the new transaction is underpriced, don't accept it
|
||||
// If the new transaction is underpriced, buffer it
|
||||
if !isLocal && pool.priced.Underpriced(tx) {
|
||||
log.Trace("Discarding underpriced transaction", "hash", hash, "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap())
|
||||
log.Trace("Buffering underpriced transaction", "hash", hash, "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap())
|
||||
underpricedTxMeter.Mark(1)
|
||||
|
||||
pool.bufferLock.Lock()
|
||||
if len(pool.buffer) < maxBufferSize {
|
||||
pool.buffer = append(pool.buffer, tx)
|
||||
} else {
|
||||
log.Warn("Buffer is full, discarding transaction", "hash", hash)
|
||||
}
|
||||
pool.bufferLock.Unlock()
|
||||
|
||||
return false, txpool.ErrUnderpriced
|
||||
}
|
||||
|
||||
@@ -804,6 +824,16 @@ func (pool *LegacyPool) add(tx *types.Transaction, local bool) (replaced bool, e
|
||||
// Otherwise if we can't make enough room for new one, abort the operation.
|
||||
drop, success := pool.priced.Discard(pool.all.Slots()-int(pool.config.GlobalSlots+pool.config.GlobalQueue)+numSlots(tx), isLocal)
|
||||
|
||||
// Add dropped transactions to the buffer
|
||||
pool.bufferLock.Lock()
|
||||
availableSpace := maxBufferSize - len(pool.buffer)
|
||||
// Determine how many elements to take from drop
|
||||
if availableSpace > len(drop) {
|
||||
availableSpace = len(drop)
|
||||
}
|
||||
pool.buffer = append(pool.buffer, drop[:availableSpace]...)
|
||||
pool.bufferLock.Unlock()
|
||||
|
||||
// Special case, we still can't make the room for the new remote one.
|
||||
if !isLocal && !success {
|
||||
log.Trace("Discarding overflown transaction", "hash", hash)
|
||||
@@ -1779,6 +1809,51 @@ func (pool *LegacyPool) SetMaxGas(maxGas uint64) {
|
||||
pool.maxGas.Store(maxGas)
|
||||
}
|
||||
|
||||
func (pool *LegacyPool) readdBufferedTransactions() {
|
||||
pool.mu.Lock()
|
||||
defer pool.mu.Unlock()
|
||||
|
||||
// Check if there is space in the pool
|
||||
if uint64(pool.all.Slots()) >= pool.config.GlobalSlots+pool.config.GlobalQueue {
|
||||
return // No space available, skip re-adding
|
||||
}
|
||||
|
||||
var readded []*types.Transaction
|
||||
|
||||
pool.bufferLock.Lock()
|
||||
for _, tx := range pool.buffer {
|
||||
// Check if adding this transaction will exceed the pool capacity
|
||||
if uint64(pool.all.Slots()+numSlots(tx)) > pool.config.GlobalSlots+pool.config.GlobalQueue {
|
||||
break // Stop if adding the transaction will exceed the pool capacity
|
||||
}
|
||||
|
||||
if _, err := pool.add(tx, false); err == nil {
|
||||
readded = append(readded, tx)
|
||||
}
|
||||
}
|
||||
pool.bufferLock.Unlock()
|
||||
|
||||
// Remove successfully re-added transactions from the buffer
|
||||
if len(readded) > 0 {
|
||||
remaining := pool.buffer[:0]
|
||||
for _, tx := range pool.buffer {
|
||||
if !containsTransaction(readded, tx) {
|
||||
remaining = append(remaining, tx)
|
||||
}
|
||||
}
|
||||
pool.buffer = remaining
|
||||
}
|
||||
}
|
||||
|
||||
func containsTransaction(txs []*types.Transaction, tx *types.Transaction) bool {
|
||||
for _, t := range txs {
|
||||
if t.Hash() == tx.Hash() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// addressByHeartbeat is an account address tagged with its last activity timestamp.
|
||||
type addressByHeartbeat struct {
|
||||
address common.Address
|
||||
|
||||
@@ -158,7 +158,7 @@ func (f *fetcherTester) chainFinalizedHeight() uint64 {
|
||||
return f.blocks[f.hashes[len(f.hashes)-3]].NumberU64()
|
||||
}
|
||||
|
||||
// insertHeaders injects a new headers into the simulated chain.
|
||||
// insertChain injects a new headers into the simulated chain.
|
||||
func (f *fetcherTester) insertHeaders(headers []*types.Header) (int, error) {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
@@ -29,6 +29,11 @@ import (
|
||||
const (
|
||||
// maxBidPerBuilderPerBlock is the max bid number per builder
|
||||
maxBidPerBuilderPerBlock = 3
|
||||
|
||||
// leftOverTimeRate is the rate of left over time to simulate a bid
|
||||
leftOverTimeRate = 11
|
||||
// leftOverTimeScale is the scale of left over time to simulate a bid
|
||||
leftOverTimeScale = 10
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -313,6 +318,18 @@ func (b *bidSimulator) newBidLoop() {
|
||||
|
||||
// commit aborts in-flight bid execution with given signal and resubmits a new one.
|
||||
commit := func(reason int32, bidRuntime *BidRuntime) {
|
||||
// if the left time is not enough to do simulation, return
|
||||
var simDuration time.Duration
|
||||
if lastBid := b.GetBestBid(bidRuntime.bid.ParentHash); lastBid != nil && lastBid.duration != 0 {
|
||||
simDuration = lastBid.duration
|
||||
}
|
||||
|
||||
if time.Until(b.bidMustBefore(bidRuntime.bid.ParentHash)) <= simDuration*leftOverTimeRate/leftOverTimeScale {
|
||||
log.Debug("BidSimulator: abort commit, not enough time to simulate",
|
||||
"builder", bidRuntime.bid.Builder, "bidHash", bidRuntime.bid.Hash().Hex())
|
||||
return
|
||||
}
|
||||
|
||||
if interruptCh != nil {
|
||||
// each commit work will have its own interruptCh to stop work with a reason
|
||||
interruptCh <- reason
|
||||
@@ -353,7 +370,6 @@ func (b *bidSimulator) newBidLoop() {
|
||||
expectedValidatorReward: expectedValidatorReward,
|
||||
packedBlockReward: big.NewInt(0),
|
||||
packedValidatorReward: big.NewInt(0),
|
||||
finished: make(chan struct{}),
|
||||
}
|
||||
|
||||
simulatingBid := b.GetSimulatingBid(newBid.ParentHash)
|
||||
@@ -394,6 +410,11 @@ func (b *bidSimulator) newBidLoop() {
|
||||
}
|
||||
}
|
||||
|
||||
func (b *bidSimulator) bidMustBefore(parentHash common.Hash) time.Time {
|
||||
parentHeader := b.chain.GetHeaderByHash(parentHash)
|
||||
return bidutil.BidMustBefore(parentHeader, b.chainConfig.Parlia.Period, b.delayLeftOver)
|
||||
}
|
||||
|
||||
func (b *bidSimulator) bidBetterBefore(parentHash common.Hash) time.Time {
|
||||
parentHeader := b.chain.GetHeaderByHash(parentHash)
|
||||
return bidutil.BidBetterBefore(parentHeader, b.chainConfig.Parlia.Period, b.delayLeftOver, b.config.BidSimulationLeftOver)
|
||||
@@ -509,6 +530,7 @@ func (b *bidSimulator) simBid(interruptCh chan int32, bidRuntime *BidRuntime) {
|
||||
|
||||
// ensure simulation exited then start next simulation
|
||||
b.SetSimulatingBid(parentHash, bidRuntime)
|
||||
start := time.Now()
|
||||
|
||||
defer func(simStart time.Time) {
|
||||
logCtx := []any{
|
||||
@@ -534,11 +556,10 @@ func (b *bidSimulator) simBid(interruptCh chan int32, bidRuntime *BidRuntime) {
|
||||
}
|
||||
|
||||
b.RemoveSimulatingBid(parentHash)
|
||||
close(bidRuntime.finished)
|
||||
bidSimTimer.UpdateSince(start)
|
||||
|
||||
if success {
|
||||
bidRuntime.duration = time.Since(simStart)
|
||||
bidSimTimer.UpdateSince(simStart)
|
||||
|
||||
// only recommit self bid when newBidCh is empty
|
||||
if len(b.newBidCh) > 0 {
|
||||
@@ -562,14 +583,6 @@ func (b *bidSimulator) simBid(interruptCh chan int32, bidRuntime *BidRuntime) {
|
||||
return
|
||||
}
|
||||
|
||||
// if the left time is not enough to do simulation, return
|
||||
delay := b.engine.Delay(b.chain, bidRuntime.env.header, &b.delayLeftOver)
|
||||
if delay == nil || *delay <= 0 {
|
||||
log.Info("BidSimulator: abort commit, not enough time to simulate",
|
||||
"builder", bidRuntime.bid.Builder, "bidHash", bidRuntime.bid.Hash().Hex())
|
||||
return
|
||||
}
|
||||
|
||||
gasLimit := bidRuntime.env.header.GasLimit
|
||||
if bidRuntime.env.gasPool == nil {
|
||||
bidRuntime.env.gasPool = new(core.GasPool).AddGas(gasLimit)
|
||||
@@ -637,12 +650,14 @@ func (b *bidSimulator) simBid(interruptCh chan int32, bidRuntime *BidRuntime) {
|
||||
if b.config.GreedyMergeTx {
|
||||
delay := b.engine.Delay(b.chain, bidRuntime.env.header, &b.delayLeftOver)
|
||||
if delay != nil && *delay > 0 {
|
||||
stopTimer := time.NewTimer(*delay)
|
||||
|
||||
bidTxsSet := mapset.NewSet[common.Hash]()
|
||||
for _, tx := range bidRuntime.bid.Txs {
|
||||
bidTxsSet.Add(tx.Hash())
|
||||
}
|
||||
|
||||
fillErr := b.bidWorker.fillTransactions(interruptCh, bidRuntime.env, nil, bidTxsSet)
|
||||
fillErr := b.bidWorker.fillTransactions(interruptCh, bidRuntime.env, stopTimer, bidTxsSet)
|
||||
log.Trace("BidSimulator: greedy merge stopped", "block", bidRuntime.env.header.Number,
|
||||
"builder", bidRuntime.bid.Builder, "tx count", bidRuntime.env.tcount-bidTxLen+1, "err", fillErr)
|
||||
|
||||
@@ -718,7 +733,6 @@ type BidRuntime struct {
|
||||
packedBlockReward *big.Int
|
||||
packedValidatorReward *big.Int
|
||||
|
||||
finished chan struct{}
|
||||
duration time.Duration
|
||||
}
|
||||
|
||||
|
||||
@@ -67,9 +67,6 @@ const (
|
||||
// the current 4 mining loops could have asynchronous risk of mining block with
|
||||
// save height, keep recently mined blocks to avoid double sign for safety,
|
||||
recentMinedCacheLimit = 20
|
||||
|
||||
// the default to wait for the mev miner to finish
|
||||
waitMEVMinerEndTimeLimit = 50 * time.Millisecond
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -174,7 +171,6 @@ type getWorkReq struct {
|
||||
|
||||
type bidFetcher interface {
|
||||
GetBestBid(parentHash common.Hash) *BidRuntime
|
||||
GetSimulatingBid(prevBlockHash common.Hash) *BidRuntime
|
||||
}
|
||||
|
||||
// worker is the main object which takes care of submitting new work to consensus engine
|
||||
@@ -1340,15 +1336,6 @@ LOOP:
|
||||
// when in-turn, compare with remote work.
|
||||
from := bestWork.coinbase
|
||||
if w.bidFetcher != nil && bestWork.header.Difficulty.Cmp(diffInTurn) == 0 {
|
||||
if pendingBid := w.bidFetcher.GetSimulatingBid(bestWork.header.ParentHash); pendingBid != nil {
|
||||
waitBidTimer := time.NewTimer(waitMEVMinerEndTimeLimit)
|
||||
defer waitBidTimer.Stop()
|
||||
select {
|
||||
case <-waitBidTimer.C:
|
||||
case <-pendingBid.finished:
|
||||
}
|
||||
}
|
||||
|
||||
bestBid := w.bidFetcher.GetBestBid(bestWork.header.ParentHash)
|
||||
|
||||
if bestBid != nil {
|
||||
|
||||
@@ -23,7 +23,7 @@ import (
|
||||
const (
|
||||
VersionMajor = 1 // Major version component of the current release
|
||||
VersionMinor = 4 // Minor version component of the current release
|
||||
VersionPatch = 9 // Patch version component of the current release
|
||||
VersionPatch = 8 // Patch version component of the current release
|
||||
VersionMeta = "" // Version metadata to append to the version string
|
||||
)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user