Merge branch 'develop' into cancun_code_merge_v1.13.12_v1.13.14

This commit is contained in:
buddh0 2024-03-11 15:16:24 +08:00
commit 270793f13a
75 changed files with 1915 additions and 150 deletions

@ -5,6 +5,7 @@ package main
import ( import (
"bytes" "bytes"
"encoding/hex" "encoding/hex"
"errors"
"flag" "flag"
"fmt" "fmt"
"os" "os"
@ -78,7 +79,7 @@ func parseExtra(hexData string) (*Extra, error) {
// decode hex into bytes // decode hex into bytes
data, err := hex.DecodeString(strings.TrimPrefix(hexData, "0x")) data, err := hex.DecodeString(strings.TrimPrefix(hexData, "0x"))
if err != nil { if err != nil {
return nil, fmt.Errorf("invalid hex data") return nil, errors.New("invalid hex data")
} }
// parse ExtraVanity and ExtraSeal // parse ExtraVanity and ExtraSeal
@ -99,7 +100,7 @@ func parseExtra(hexData string) (*Extra, error) {
validatorNum := int(data[0]) validatorNum := int(data[0])
validatorBytesTotalLength := validatorNumberSize + validatorNum*validatorBytesLength validatorBytesTotalLength := validatorNumberSize + validatorNum*validatorBytesLength
if dataLength < validatorBytesTotalLength { if dataLength < validatorBytesTotalLength {
return nil, fmt.Errorf("parse validators failed") return nil, errors.New("parse validators failed")
} }
extra.ValidatorSize = uint8(validatorNum) extra.ValidatorSize = uint8(validatorNum)
data = data[validatorNumberSize:] data = data[validatorNumberSize:]
@ -117,7 +118,7 @@ func parseExtra(hexData string) (*Extra, error) {
// parse Vote Attestation // parse Vote Attestation
if dataLength > 0 { if dataLength > 0 {
if err := rlp.Decode(bytes.NewReader(data), &extra.VoteAttestation); err != nil { if err := rlp.Decode(bytes.NewReader(data), &extra.VoteAttestation); err != nil {
return nil, fmt.Errorf("parse voteAttestation failed") return nil, errors.New("parse voteAttestation failed")
} }
if extra.ValidatorSize > 0 { if extra.ValidatorSize > 0 {
validatorsBitSet := bitset.From([]uint64{uint64(extra.VoteAddressSet)}) validatorsBitSet := bitset.From([]uint64{uint64(extra.VoteAddressSet)})

@ -266,6 +266,15 @@ func initGenesis(ctx *cli.Context) error {
} }
defer chaindb.Close() defer chaindb.Close()
// if the trie data dir has been set, new trie db with a new state database
if ctx.IsSet(utils.SeparateDBFlag.Name) {
statediskdb, dbErr := stack.OpenDatabaseWithFreezer(name+"/state", 0, 0, "", "", false, false, false, false)
if dbErr != nil {
utils.Fatalf("Failed to open separate trie database: %v", dbErr)
}
chaindb.SetStateStore(statediskdb)
}
triedb := utils.MakeTrieDatabase(ctx, chaindb, ctx.Bool(utils.CachePreimagesFlag.Name), false, genesis.IsVerkle()) triedb := utils.MakeTrieDatabase(ctx, chaindb, ctx.Bool(utils.CachePreimagesFlag.Name), false, genesis.IsVerkle())
defer triedb.Close() defer triedb.Close()
@ -717,7 +726,6 @@ func parseDumpConfig(ctx *cli.Context, stack *node.Node) (*state.DumpConfig, eth
} }
db := utils.MakeChainDatabase(ctx, stack, true, false) db := utils.MakeChainDatabase(ctx, stack, true, false)
defer db.Close()
scheme, err := rawdb.ParseStateScheme(ctx.String(utils.StateSchemeFlag.Name), db) scheme, err := rawdb.ParseStateScheme(ctx.String(utils.StateSchemeFlag.Name), db)
if err != nil { if err != nil {
return nil, nil, common.Hash{}, err return nil, nil, common.Hash{}, err
@ -726,7 +734,7 @@ func parseDumpConfig(ctx *cli.Context, stack *node.Node) (*state.DumpConfig, eth
fmt.Println("You are using geth dump in path mode, please use `geth dump-roothash` command to get all available blocks.") fmt.Println("You are using geth dump in path mode, please use `geth dump-roothash` command to get all available blocks.")
} }
var header *types.Header header := &types.Header{}
if ctx.NArg() == 1 { if ctx.NArg() == 1 {
arg := ctx.Args().First() arg := ctx.Args().First()
if hashish(arg) { if hashish(arg) {
@ -755,7 +763,7 @@ func parseDumpConfig(ctx *cli.Context, stack *node.Node) (*state.DumpConfig, eth
if stateRoot := triedb.Head(); stateRoot != (common.Hash{}) { if stateRoot := triedb.Head(); stateRoot != (common.Hash{}) {
header.Root = stateRoot header.Root = stateRoot
} else { } else {
return nil, nil, common.Hash{}, fmt.Errorf("no top state root hash in path db") return nil, nil, common.Hash{}, errors.New("no top state root hash in path db")
} }
} else { } else {
header = rawdb.ReadHeadHeader(db) header = rawdb.ReadHeadHeader(db)

@ -203,6 +203,9 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) {
v := ctx.Uint64(utils.OverrideFeynman.Name) v := ctx.Uint64(utils.OverrideFeynman.Name)
cfg.Eth.OverrideFeynman = &v cfg.Eth.OverrideFeynman = &v
} }
if ctx.IsSet(utils.SeparateDBFlag.Name) && !stack.IsSeparatedDB() {
utils.Fatalf("Failed to locate separate database subdirectory when separatedb parameter has been set")
}
backend, eth := utils.RegisterEthService(stack, &cfg.Eth) backend, eth := utils.RegisterEthService(stack, &cfg.Eth)
// Create gauge with geth system and build information // Create gauge with geth system and build information

@ -30,7 +30,7 @@ import (
) )
const ( const (
ipcAPIs = "admin:1.0 debug:1.0 eth:1.0 miner:1.0 net:1.0 parlia:1.0 rpc:1.0 txpool:1.0 web3:1.0" ipcAPIs = "admin:1.0 debug:1.0 eth:1.0 mev:1.0 miner:1.0 net:1.0 parlia:1.0 rpc:1.0 txpool:1.0 web3:1.0"
httpAPIs = "eth:1.0 net:1.0 rpc:1.0 web3:1.0" httpAPIs = "eth:1.0 net:1.0 rpc:1.0 web3:1.0"
) )

@ -18,6 +18,7 @@ package main
import ( import (
"bytes" "bytes"
"errors"
"fmt" "fmt"
"math" "math"
"os" "os"
@ -382,7 +383,6 @@ func inspectTrie(ctx *cli.Context) error {
db := utils.MakeChainDatabase(ctx, stack, true, false) db := utils.MakeChainDatabase(ctx, stack, true, false)
defer db.Close() defer db.Close()
var headerBlockHash common.Hash var headerBlockHash common.Hash
if ctx.NArg() >= 1 { if ctx.NArg() >= 1 {
if ctx.Args().Get(0) == "latest" { if ctx.Args().Get(0) == "latest" {
@ -412,7 +412,7 @@ func inspectTrie(ctx *cli.Context) error {
if blockNumber != math.MaxUint64 { if blockNumber != math.MaxUint64 {
headerBlockHash = rawdb.ReadCanonicalHash(db, blockNumber) headerBlockHash = rawdb.ReadCanonicalHash(db, blockNumber)
if headerBlockHash == (common.Hash{}) { if headerBlockHash == (common.Hash{}) {
return fmt.Errorf("ReadHeadBlockHash empry hash") return errors.New("ReadHeadBlockHash empry hash")
} }
blockHeader := rawdb.ReadHeader(db, headerBlockHash, blockNumber) blockHeader := rawdb.ReadHeader(db, headerBlockHash, blockNumber)
trieRootHash = blockHeader.Root trieRootHash = blockHeader.Root
@ -563,6 +563,11 @@ func dbStats(ctx *cli.Context) error {
defer db.Close() defer db.Close()
showLeveldbStats(db) showLeveldbStats(db)
if db.StateStore() != nil {
fmt.Println("show stats of state store")
showLeveldbStats(db.StateStore())
}
return nil return nil
} }
@ -576,13 +581,31 @@ func dbCompact(ctx *cli.Context) error {
log.Info("Stats before compaction") log.Info("Stats before compaction")
showLeveldbStats(db) showLeveldbStats(db)
statediskdb := db.StateStore()
if statediskdb != nil {
fmt.Println("show stats of state store")
showLeveldbStats(statediskdb)
}
log.Info("Triggering compaction") log.Info("Triggering compaction")
if err := db.Compact(nil, nil); err != nil { if err := db.Compact(nil, nil); err != nil {
log.Info("Compact err", "error", err) log.Error("Compact err", "error", err)
return err return err
} }
if statediskdb != nil {
if err := statediskdb.Compact(nil, nil); err != nil {
log.Error("Compact err", "error", err)
return err
}
}
log.Info("Stats after compaction") log.Info("Stats after compaction")
showLeveldbStats(db) showLeveldbStats(db)
if statediskdb != nil {
fmt.Println("show stats of state store after compaction")
showLeveldbStats(statediskdb)
}
return nil return nil
} }
@ -603,8 +626,17 @@ func dbGet(ctx *cli.Context) error {
return err return err
} }
statediskdb := db.StateStore()
data, err := db.Get(key) data, err := db.Get(key)
if err != nil { if err != nil {
// if separate trie db exist, try to get it from separate db
if statediskdb != nil {
statedata, dberr := statediskdb.Get(key)
if dberr == nil {
fmt.Printf("key %#x: %#x\n", key, statedata)
return nil
}
}
log.Info("Get operation failed", "key", fmt.Sprintf("%#x", key), "error", err) log.Info("Get operation failed", "key", fmt.Sprintf("%#x", key), "error", err)
return err return err
} }
@ -620,8 +652,14 @@ func dbTrieGet(ctx *cli.Context) error {
stack, _ := makeConfigNode(ctx) stack, _ := makeConfigNode(ctx)
defer stack.Close() defer stack.Close()
db := utils.MakeChainDatabase(ctx, stack, false, false) var db ethdb.Database
defer db.Close() chaindb := utils.MakeChainDatabase(ctx, stack, true, false)
if chaindb.StateStore() != nil {
db = chaindb.StateStore()
} else {
db = chaindb
}
defer chaindb.Close()
scheme := ctx.String(utils.StateSchemeFlag.Name) scheme := ctx.String(utils.StateSchemeFlag.Name)
if scheme == "" { if scheme == "" {
@ -686,8 +724,14 @@ func dbTrieDelete(ctx *cli.Context) error {
stack, _ := makeConfigNode(ctx) stack, _ := makeConfigNode(ctx)
defer stack.Close() defer stack.Close()
db := utils.MakeChainDatabase(ctx, stack, false, false) var db ethdb.Database
defer db.Close() chaindb := utils.MakeChainDatabase(ctx, stack, true, false)
if chaindb.StateStore() != nil {
db = chaindb.StateStore()
} else {
db = chaindb
}
defer chaindb.Close()
scheme := ctx.String(utils.StateSchemeFlag.Name) scheme := ctx.String(utils.StateSchemeFlag.Name)
if scheme == "" { if scheme == "" {
@ -1077,10 +1121,16 @@ func hbss2pbss(ctx *cli.Context) error {
db := utils.MakeChainDatabase(ctx, stack, false, false) db := utils.MakeChainDatabase(ctx, stack, false, false)
db.Sync() db.Sync()
stateDiskDb := db.StateStore()
defer db.Close() defer db.Close()
// convert hbss trie node to pbss trie node // convert hbss trie node to pbss trie node
lastStateID := rawdb.ReadPersistentStateID(db) var lastStateID uint64
if stateDiskDb != nil {
lastStateID = rawdb.ReadPersistentStateID(stateDiskDb)
} else {
lastStateID = rawdb.ReadPersistentStateID(db)
}
if lastStateID == 0 || force { if lastStateID == 0 || force {
config := triedb.HashDefaults config := triedb.HashDefaults
triedb := triedb.NewDatabase(db, config) triedb := triedb.NewDatabase(db, config)
@ -1103,7 +1153,7 @@ func hbss2pbss(ctx *cli.Context) error {
if *blockNumber != math.MaxUint64 { if *blockNumber != math.MaxUint64 {
headerBlockHash = rawdb.ReadCanonicalHash(db, *blockNumber) headerBlockHash = rawdb.ReadCanonicalHash(db, *blockNumber)
if headerBlockHash == (common.Hash{}) { if headerBlockHash == (common.Hash{}) {
return fmt.Errorf("ReadHeadBlockHash empty hash") return errors.New("ReadHeadBlockHash empty hash")
} }
blockHeader := rawdb.ReadHeader(db, headerBlockHash, *blockNumber) blockHeader := rawdb.ReadHeader(db, headerBlockHash, *blockNumber)
trieRootHash = blockHeader.Root trieRootHash = blockHeader.Root
@ -1111,7 +1161,7 @@ func hbss2pbss(ctx *cli.Context) error {
} }
if (trieRootHash == common.Hash{}) { if (trieRootHash == common.Hash{}) {
log.Error("Empty root hash") log.Error("Empty root hash")
return fmt.Errorf("Empty root hash.") return errors.New("Empty root hash.")
} }
id := trie.StateTrieID(trieRootHash) id := trie.StateTrieID(trieRootHash)
@ -1132,18 +1182,34 @@ func hbss2pbss(ctx *cli.Context) error {
} }
// repair state ancient offset // repair state ancient offset
lastStateID = rawdb.ReadPersistentStateID(db) if stateDiskDb != nil {
lastStateID = rawdb.ReadPersistentStateID(stateDiskDb)
} else {
lastStateID = rawdb.ReadPersistentStateID(db)
}
if lastStateID == 0 { if lastStateID == 0 {
log.Error("Convert hbss to pbss trie node error. The last state id is still 0") log.Error("Convert hbss to pbss trie node error. The last state id is still 0")
} }
ancient := stack.ResolveAncient("chaindata", ctx.String(utils.AncientFlag.Name))
var ancient string
if db.StateStore() != nil {
dirName := filepath.Join(stack.ResolvePath("chaindata"), "state")
ancient = filepath.Join(dirName, "ancient")
} else {
ancient = stack.ResolveAncient("chaindata", ctx.String(utils.AncientFlag.Name))
}
err = rawdb.ResetStateFreezerTableOffset(ancient, lastStateID) err = rawdb.ResetStateFreezerTableOffset(ancient, lastStateID)
if err != nil { if err != nil {
log.Error("Reset state freezer table offset failed", "error", err) log.Error("Reset state freezer table offset failed", "error", err)
return err return err
} }
// prune hbss trie node // prune hbss trie node
err = rawdb.PruneHashTrieNodeInDataBase(db) if stateDiskDb != nil {
err = rawdb.PruneHashTrieNodeInDataBase(stateDiskDb)
} else {
err = rawdb.PruneHashTrieNodeInDataBase(db)
}
if err != nil { if err != nil {
log.Error("Prune Hash trie node in database failed", "error", err) log.Error("Prune Hash trie node in database failed", "error", err)
return err return err

BIN
cmd/geth/geth Executable file

Binary file not shown.

@ -437,13 +437,15 @@ func pruneState(ctx *cli.Context) error {
chaindb := utils.MakeChainDatabase(ctx, stack, false, false) chaindb := utils.MakeChainDatabase(ctx, stack, false, false)
defer chaindb.Close() defer chaindb.Close()
if rawdb.ReadStateScheme(chaindb) != rawdb.HashScheme {
log.Crit("Offline pruning is not required for path scheme")
}
prunerconfig := pruner.Config{ prunerconfig := pruner.Config{
Datadir: stack.ResolvePath(""), Datadir: stack.ResolvePath(""),
BloomSize: ctx.Uint64(utils.BloomFilterSizeFlag.Name), BloomSize: ctx.Uint64(utils.BloomFilterSizeFlag.Name),
} }
if rawdb.ReadStateScheme(chaindb) != rawdb.HashScheme {
log.Crit("Offline pruning is not required for path scheme")
}
pruner, err := pruner.NewPruner(chaindb, prunerconfig, ctx.Uint64(utils.TriesInMemoryFlag.Name)) pruner, err := pruner.NewPruner(chaindb, prunerconfig, ctx.Uint64(utils.TriesInMemoryFlag.Name))
if err != nil { if err != nil {
log.Error("Failed to open snapshot tree", "err", err) log.Error("Failed to open snapshot tree", "err", err)

@ -93,6 +93,12 @@ var (
Value: flags.DirectoryString(node.DefaultDataDir()), Value: flags.DirectoryString(node.DefaultDataDir()),
Category: flags.EthCategory, Category: flags.EthCategory,
} }
SeparateDBFlag = &cli.BoolFlag{
Name: "separatedb",
Usage: "Enable a separated trie database, it will be created within a subdirectory called state, " +
"Users can copy this state directory to another directory or disk, and then create a symbolic link to the state directory under the chaindata",
Category: flags.EthCategory,
}
DirectBroadcastFlag = &cli.BoolFlag{ DirectBroadcastFlag = &cli.BoolFlag{
Name: "directbroadcast", Name: "directbroadcast",
Usage: "Enable directly broadcast mined block to all peers", Usage: "Enable directly broadcast mined block to all peers",
@ -1112,6 +1118,7 @@ var (
DBEngineFlag, DBEngineFlag,
StateSchemeFlag, StateSchemeFlag,
HttpHeaderFlag, HttpHeaderFlag,
SeparateDBFlag,
} }
) )
@ -2314,6 +2321,11 @@ func MakeChainDatabase(ctx *cli.Context, stack *node.Node, readonly, disableFree
chainDb, err = stack.OpenDatabase("lightchaindata", cache, handles, "", readonly) chainDb, err = stack.OpenDatabase("lightchaindata", cache, handles, "", readonly)
default: default:
chainDb, err = stack.OpenDatabaseWithFreezer("chaindata", cache, handles, ctx.String(AncientFlag.Name), "", readonly, disableFreeze, false, false) chainDb, err = stack.OpenDatabaseWithFreezer("chaindata", cache, handles, ctx.String(AncientFlag.Name), "", readonly, disableFreeze, false, false)
// set the separate state database
if stack.IsSeparatedDB() && err == nil {
stateDiskDb := MakeStateDataBase(ctx, stack, readonly, false)
chainDb.SetStateStore(stateDiskDb)
}
} }
if err != nil { if err != nil {
Fatalf("Could not open database: %v", err) Fatalf("Could not open database: %v", err)
@ -2321,6 +2333,17 @@ func MakeChainDatabase(ctx *cli.Context, stack *node.Node, readonly, disableFree
return chainDb return chainDb
} }
// MakeStateDataBase open a separate state database using the flags passed to the client and will hard crash if it fails.
func MakeStateDataBase(ctx *cli.Context, stack *node.Node, readonly, disableFreeze bool) ethdb.Database {
cache := ctx.Int(CacheFlag.Name) * ctx.Int(CacheDatabaseFlag.Name) / 100
handles := MakeDatabaseHandles(ctx.Int(FDLimitFlag.Name)) / 2
statediskdb, err := stack.OpenDatabaseWithFreezer("chaindata/state", cache, handles, "", "", readonly, disableFreeze, false, false)
if err != nil {
Fatalf("Failed to open separate trie database: %v", err)
}
return statediskdb
}
// tryMakeReadOnlyDatabase try to open the chain database in read-only mode, // tryMakeReadOnlyDatabase try to open the chain database in read-only mode,
// or fallback to write mode if the database is not initialized. // or fallback to write mode if the database is not initialized.
// //

23
common/bidutil/bidutil.go Normal file

@ -0,0 +1,23 @@
package bidutil
import (
"time"
"github.com/ethereum/go-ethereum/core/types"
)
// BidBetterBefore returns the time when the next bid better be received, considering the delay and bid simulation.
// BidBetterBefore is earlier than BidMustBefore.
func BidBetterBefore(parentHeader *types.Header, blockPeriod uint64, delayLeftOver, simulationLeftOver time.Duration) time.Time {
nextHeaderTime := BidMustBefore(parentHeader, blockPeriod, delayLeftOver)
nextHeaderTime = nextHeaderTime.Add(-simulationLeftOver)
return nextHeaderTime
}
// BidMustBefore returns the time when the next bid must be received,
// only considering the consensus delay but not bid simulation duration.
func BidMustBefore(parentHeader *types.Header, blockPeriod uint64, delayLeftOver time.Duration) time.Time {
nextHeaderTime := time.Unix(int64(parentHeader.Time+blockPeriod), 0)
nextHeaderTime = nextHeaderTime.Add(-delayLeftOver)
return nextHeaderTime
}

@ -333,6 +333,11 @@ func (beacon *Beacon) verifyHeaders(chain consensus.ChainHeaderReader, headers [
return abort, results return abort, results
} }
// NextInTurnValidator return the next in-turn validator for header
func (beacon *Beacon) NextInTurnValidator(chain consensus.ChainHeaderReader, header *types.Header) (common.Address, error) {
return common.Address{}, errors.New("not implemented")
}
// Prepare implements consensus.Engine, initializing the difficulty field of a // Prepare implements consensus.Engine, initializing the difficulty field of a
// header to conform to the beacon protocol. The changes are done inline. // header to conform to the beacon protocol. The changes are done inline.
func (beacon *Beacon) Prepare(chain consensus.ChainHeaderReader, header *types.Header) error { func (beacon *Beacon) Prepare(chain consensus.ChainHeaderReader, header *types.Header) error {

@ -511,6 +511,11 @@ func (c *Clique) verifySeal(snap *Snapshot, header *types.Header, parents []*typ
return nil return nil
} }
// NextInTurnValidator return the next in-turn validator for header
func (c *Clique) NextInTurnValidator(chain consensus.ChainHeaderReader, header *types.Header) (common.Address, error) {
return common.Address{}, errors.New("not implemented")
}
// Prepare implements consensus.Engine, preparing all the consensus fields of the // Prepare implements consensus.Engine, preparing all the consensus fields of the
// header for running the transactions on top. // header for running the transactions on top.
func (c *Clique) Prepare(chain consensus.ChainHeaderReader, header *types.Header) error { func (c *Clique) Prepare(chain consensus.ChainHeaderReader, header *types.Header) error {

@ -94,6 +94,9 @@ type Engine interface {
// rules of a given engine. // rules of a given engine.
VerifyUncles(chain ChainReader, block *types.Block) error VerifyUncles(chain ChainReader, block *types.Block) error
// NextInTurnValidator return the next in-turn validator for header
NextInTurnValidator(chain ChainHeaderReader, header *types.Header) (common.Address, error)
// Prepare initializes the consensus fields of a block header according to the // Prepare initializes the consensus fields of a block header according to the
// rules of a particular engine. The changes are executed inline. // rules of a particular engine. The changes are executed inline.
Prepare(chain ChainHeaderReader, header *types.Header) error Prepare(chain ChainHeaderReader, header *types.Header) error

@ -489,6 +489,11 @@ var FrontierDifficultyCalculator = calcDifficultyFrontier
var HomesteadDifficultyCalculator = calcDifficultyHomestead var HomesteadDifficultyCalculator = calcDifficultyHomestead
var DynamicDifficultyCalculator = makeDifficultyCalculator var DynamicDifficultyCalculator = makeDifficultyCalculator
// NextInTurnValidator return the next in-turn validator for header
func (ethash *Ethash) NextInTurnValidator(chain consensus.ChainHeaderReader, header *types.Header) (common.Address, error) {
return common.Address{}, errors.New("not implemented")
}
// Prepare implements consensus.Engine, initializing the difficulty field of a // Prepare implements consensus.Engine, initializing the difficulty field of a
// header to conform to the ethash protocol. The changes are done inline. // header to conform to the ethash protocol. The changes are done inline.
func (ethash *Ethash) Prepare(chain consensus.ChainHeaderReader, header *types.Header) error { func (ethash *Ethash) Prepare(chain consensus.ChainHeaderReader, header *types.Header) error {

@ -3,7 +3,7 @@ package parlia
import ( import (
"container/heap" "container/heap"
"context" "context"
"fmt" "errors"
"math" "math"
"math/big" "math/big"
@ -159,7 +159,7 @@ func (p *Parlia) getValidatorElectionInfo(blockNr rpc.BlockNumberOrHash) ([]Vali
return nil, err return nil, err
} }
if totalLength.Int64() != int64(len(validators)) || totalLength.Int64() != int64(len(votingPowers)) || totalLength.Int64() != int64(len(voteAddrs)) { if totalLength.Int64() != int64(len(validators)) || totalLength.Int64() != int64(len(votingPowers)) || totalLength.Int64() != int64(len(voteAddrs)) {
return nil, fmt.Errorf("validator length not match") return nil, errors.New("validator length not match")
} }
validatorItems := make([]ValidatorItem, len(validators)) validatorItems := make([]ValidatorItem, len(validators))

@ -435,7 +435,7 @@ func (p *Parlia) verifyVoteAttestation(chain consensus.ChainHeaderReader, header
return nil return nil
} }
if attestation.Data == nil { if attestation.Data == nil {
return fmt.Errorf("invalid attestation, vote data is nil") return errors.New("invalid attestation, vote data is nil")
} }
if len(attestation.Extra) > types.MaxAttestationExtraLength { if len(attestation.Extra) > types.MaxAttestationExtraLength {
return fmt.Errorf("invalid attestation, too large extra length: %d", len(attestation.Extra)) return fmt.Errorf("invalid attestation, too large extra length: %d", len(attestation.Extra))
@ -464,7 +464,7 @@ func (p *Parlia) verifyVoteAttestation(chain consensus.ChainHeaderReader, header
} }
justifiedBlockNumber, justifiedBlockHash, err := p.GetJustifiedNumberAndHash(chain, headers) justifiedBlockNumber, justifiedBlockHash, err := p.GetJustifiedNumberAndHash(chain, headers)
if err != nil { if err != nil {
return fmt.Errorf("unexpected error when getting the highest justified number and hash") return errors.New("unexpected error when getting the highest justified number and hash")
} }
if sourceNumber != justifiedBlockNumber || sourceHash != justifiedBlockHash { if sourceNumber != justifiedBlockNumber || sourceHash != justifiedBlockHash {
return fmt.Errorf("invalid attestation, source mismatch, expected block: %d, hash: %s; real block: %d, hash: %s", return fmt.Errorf("invalid attestation, source mismatch, expected block: %d, hash: %s; real block: %d, hash: %s",
@ -486,7 +486,7 @@ func (p *Parlia) verifyVoteAttestation(chain consensus.ChainHeaderReader, header
validators := snap.validators() validators := snap.validators()
validatorsBitSet := bitset.From([]uint64{uint64(attestation.VoteAddressSet)}) validatorsBitSet := bitset.From([]uint64{uint64(attestation.VoteAddressSet)})
if validatorsBitSet.Count() > uint(len(validators)) { if validatorsBitSet.Count() > uint(len(validators)) {
return fmt.Errorf("invalid attestation, vote number larger than validators number") return errors.New("invalid attestation, vote number larger than validators number")
} }
votedAddrs := make([]bls.PublicKey, 0, validatorsBitSet.Count()) votedAddrs := make([]bls.PublicKey, 0, validatorsBitSet.Count())
for index, val := range validators { for index, val := range validators {
@ -503,7 +503,7 @@ func (p *Parlia) verifyVoteAttestation(chain consensus.ChainHeaderReader, header
// The valid voted validators should be no less than 2/3 validators. // The valid voted validators should be no less than 2/3 validators.
if len(votedAddrs) < cmath.CeilDiv(len(snap.Validators)*2, 3) { if len(votedAddrs) < cmath.CeilDiv(len(snap.Validators)*2, 3) {
return fmt.Errorf("invalid attestation, not enough validators voted") return errors.New("invalid attestation, not enough validators voted")
} }
// Verify the aggregated signature. // Verify the aggregated signature.
@ -512,7 +512,7 @@ func (p *Parlia) verifyVoteAttestation(chain consensus.ChainHeaderReader, header
return fmt.Errorf("BLS signature converts failed: %v", err) return fmt.Errorf("BLS signature converts failed: %v", err)
} }
if !aggSig.FastAggregateVerify(votedAddrs, attestation.Data.Hash()) { if !aggSig.FastAggregateVerify(votedAddrs, attestation.Data.Hash()) {
return fmt.Errorf("invalid attestation, signature verify failed") return errors.New("invalid attestation, signature verify failed")
} }
return nil return nil
@ -605,18 +605,6 @@ func (p *Parlia) verifyHeader(chain consensus.ChainHeaderReader, header *types.H
} }
} }
if !cancun && header.ExcessBlobGas != nil {
return fmt.Errorf("invalid excessBlobGas: have %d, expected nil", header.ExcessBlobGas)
}
if !cancun && header.BlobGasUsed != nil {
return fmt.Errorf("invalid blobGasUsed: have %d, expected nil", header.BlobGasUsed)
}
if cancun {
if err := eip4844.VerifyEIP4844Header(parent, header); err != nil {
return err
}
}
// All basic checks passed, verify cascading fields // All basic checks passed, verify cascading fields
return p.verifyCascadingFields(chain, header, parents) return p.verifyCascadingFields(chain, header, parents)
} }
@ -904,7 +892,7 @@ func (p *Parlia) assembleVoteAttestation(chain consensus.ChainHeaderReader, head
// Prepare vote data // Prepare vote data
justifiedBlockNumber, justifiedBlockHash, err := p.GetJustifiedNumberAndHash(chain, []*types.Header{parent}) justifiedBlockNumber, justifiedBlockHash, err := p.GetJustifiedNumberAndHash(chain, []*types.Header{parent})
if err != nil { if err != nil {
return fmt.Errorf("unexpected error when getting the highest justified number and hash") return errors.New("unexpected error when getting the highest justified number and hash")
} }
attestation := &types.VoteAttestation{ attestation := &types.VoteAttestation{
Data: &types.VoteData{ Data: &types.VoteData{
@ -941,7 +929,7 @@ func (p *Parlia) assembleVoteAttestation(chain consensus.ChainHeaderReader, head
validatorsBitSet := bitset.From([]uint64{uint64(attestation.VoteAddressSet)}) validatorsBitSet := bitset.From([]uint64{uint64(attestation.VoteAddressSet)})
if validatorsBitSet.Count() < uint(len(signatures)) { if validatorsBitSet.Count() < uint(len(signatures)) {
log.Warn(fmt.Sprintf("assembleVoteAttestation, check VoteAddress Set failed, expected:%d, real:%d", len(signatures), validatorsBitSet.Count())) log.Warn(fmt.Sprintf("assembleVoteAttestation, check VoteAddress Set failed, expected:%d, real:%d", len(signatures), validatorsBitSet.Count()))
return fmt.Errorf("invalid attestation, check VoteAddress Set failed") return errors.New("invalid attestation, check VoteAddress Set failed")
} }
// Append attestation to header extra field. // Append attestation to header extra field.
@ -960,6 +948,16 @@ func (p *Parlia) assembleVoteAttestation(chain consensus.ChainHeaderReader, head
return nil return nil
} }
// NextInTurnValidator return the next in-turn validator for header
func (p *Parlia) NextInTurnValidator(chain consensus.ChainHeaderReader, header *types.Header) (common.Address, error) {
snap, err := p.snapshot(chain, header.Number.Uint64(), header.Hash(), nil)
if err != nil {
return common.Address{}, err
}
return snap.inturnValidator(), nil
}
// Prepare implements consensus.Engine, preparing all the consensus fields of the // Prepare implements consensus.Engine, preparing all the consensus fields of the
// header for running the transactions on top. // header for running the transactions on top.
func (p *Parlia) Prepare(chain consensus.ChainHeaderReader, header *types.Header) error { func (p *Parlia) Prepare(chain consensus.ChainHeaderReader, header *types.Header) error {
@ -1344,27 +1342,27 @@ func (p *Parlia) VerifyVote(chain consensus.ChainHeaderReader, vote *types.VoteE
header := chain.GetHeaderByHash(targetHash) header := chain.GetHeaderByHash(targetHash)
if header == nil { if header == nil {
log.Warn("BlockHeader at current voteBlockNumber is nil", "targetNumber", targetNumber, "targetHash", targetHash) log.Warn("BlockHeader at current voteBlockNumber is nil", "targetNumber", targetNumber, "targetHash", targetHash)
return fmt.Errorf("BlockHeader at current voteBlockNumber is nil") return errors.New("BlockHeader at current voteBlockNumber is nil")
} }
if header.Number.Uint64() != targetNumber { if header.Number.Uint64() != targetNumber {
log.Warn("unexpected target number", "expect", header.Number.Uint64(), "real", targetNumber) log.Warn("unexpected target number", "expect", header.Number.Uint64(), "real", targetNumber)
return fmt.Errorf("target number mismatch") return errors.New("target number mismatch")
} }
justifiedBlockNumber, justifiedBlockHash, err := p.GetJustifiedNumberAndHash(chain, []*types.Header{header}) justifiedBlockNumber, justifiedBlockHash, err := p.GetJustifiedNumberAndHash(chain, []*types.Header{header})
if err != nil { if err != nil {
log.Error("failed to get the highest justified number and hash", "headerNumber", header.Number, "headerHash", header.Hash()) log.Error("failed to get the highest justified number and hash", "headerNumber", header.Number, "headerHash", header.Hash())
return fmt.Errorf("unexpected error when getting the highest justified number and hash") return errors.New("unexpected error when getting the highest justified number and hash")
} }
if vote.Data.SourceNumber != justifiedBlockNumber || vote.Data.SourceHash != justifiedBlockHash { if vote.Data.SourceNumber != justifiedBlockNumber || vote.Data.SourceHash != justifiedBlockHash {
return fmt.Errorf("vote source block mismatch") return errors.New("vote source block mismatch")
} }
number := header.Number.Uint64() number := header.Number.Uint64()
snap, err := p.snapshot(chain, number-1, header.ParentHash, nil) snap, err := p.snapshot(chain, number-1, header.ParentHash, nil)
if err != nil { if err != nil {
log.Error("failed to get the snapshot from consensus", "error", err) log.Error("failed to get the snapshot from consensus", "error", err)
return fmt.Errorf("failed to get the snapshot from consensus") return errors.New("failed to get the snapshot from consensus")
} }
validators := snap.Validators validators := snap.Validators
@ -1379,7 +1377,7 @@ func (p *Parlia) VerifyVote(chain consensus.ChainHeaderReader, vote *types.VoteE
} }
} }
return fmt.Errorf("vote verification failed") return errors.New("vote verification failed")
} }
// Authorize injects a private key into the consensus engine to mint new blocks // Authorize injects a private key into the consensus engine to mint new blocks
@ -1838,7 +1836,7 @@ func (p *Parlia) applyTransaction(
// within the branch including `headers` and utilizing the latest element as the head. // within the branch including `headers` and utilizing the latest element as the head.
func (p *Parlia) GetJustifiedNumberAndHash(chain consensus.ChainHeaderReader, headers []*types.Header) (uint64, common.Hash, error) { func (p *Parlia) GetJustifiedNumberAndHash(chain consensus.ChainHeaderReader, headers []*types.Header) (uint64, common.Hash, error) {
if chain == nil || len(headers) == 0 || headers[len(headers)-1] == nil { if chain == nil || len(headers) == 0 || headers[len(headers)-1] == nil {
return 0, common.Hash{}, fmt.Errorf("illegal chain or header") return 0, common.Hash{}, errors.New("illegal chain or header")
} }
head := headers[len(headers)-1] head := headers[len(headers)-1]
snap, err := p.snapshot(chain, head.Number.Uint64(), head.Hash(), headers) snap, err := p.snapshot(chain, head.Number.Uint64(), head.Hash(), headers)

@ -338,6 +338,13 @@ func (s *Snapshot) inturn(validator common.Address) bool {
return validators[offset] == validator return validators[offset] == validator
} }
// inturnValidator returns the validator at a given block height.
func (s *Snapshot) inturnValidator() common.Address {
validators := s.validators()
offset := (s.Number + 1) % uint64(len(validators))
return validators[offset]
}
func (s *Snapshot) enoughDistance(validator common.Address, header *types.Header) bool { func (s *Snapshot) enoughDistance(validator common.Address, header *types.Header) bool {
idx := s.indexOfVal(validator) idx := s.indexOfVal(validator)
if idx < 0 { if idx < 0 {

@ -2961,7 +2961,7 @@ func (bc *BlockChain) GetTrustedDiffLayer(blockHash common.Hash) *types.DiffLaye
func CalculateDiffHash(d *types.DiffLayer) (common.Hash, error) { func CalculateDiffHash(d *types.DiffLayer) (common.Hash, error) {
if d == nil { if d == nil {
return common.Hash{}, fmt.Errorf("nil diff layer") return common.Hash{}, errors.New("nil diff layer")
} }
diff := &types.ExtDiffLayer{ diff := &types.ExtDiffLayer{

@ -456,7 +456,9 @@ func (cm *chainMaker) makeHeader(parent *types.Block, state *state.StateDB, engi
excessBlobGas := eip4844.CalcExcessBlobGas(parentExcessBlobGas, parentBlobGasUsed) excessBlobGas := eip4844.CalcExcessBlobGas(parentExcessBlobGas, parentBlobGasUsed)
header.ExcessBlobGas = &excessBlobGas header.ExcessBlobGas = &excessBlobGas
header.BlobGasUsed = new(uint64) header.BlobGasUsed = new(uint64)
header.ParentBeaconRoot = new(common.Hash) if cm.config.Parlia == nil {
header.ParentBeaconRoot = new(common.Hash)
}
} }
return header return header
} }

@ -452,7 +452,10 @@ func (g *Genesis) ToBlock() *types.Block {
// EIP-4788: The parentBeaconBlockRoot of the genesis block is always // EIP-4788: The parentBeaconBlockRoot of the genesis block is always
// the zero hash. This is because the genesis block does not have a parent // the zero hash. This is because the genesis block does not have a parent
// by definition. // by definition.
head.ParentBeaconRoot = new(common.Hash) if conf.Parlia == nil {
head.ParentBeaconRoot = new(common.Hash)
}
// EIP-4844 fields // EIP-4844 fields
head.ExcessBlobGas = g.ExcessBlobGas head.ExcessBlobGas = g.ExcessBlobGas
head.BlobGasUsed = g.BlobGasUsed head.BlobGasUsed = g.BlobGasUsed

@ -288,13 +288,13 @@ func DeleteTrieNode(db ethdb.KeyValueWriter, owner common.Hash, path []byte, has
// if the state is not present in database. // if the state is not present in database.
func ReadStateScheme(db ethdb.Reader) string { func ReadStateScheme(db ethdb.Reader) string {
// Check if state in path-based scheme is present // Check if state in path-based scheme is present
blob, _ := ReadAccountTrieNode(db, nil) blob, _ := ReadAccountTrieNode(db.StateStoreReader(), nil)
if len(blob) != 0 { if len(blob) != 0 {
return PathScheme return PathScheme
} }
// The root node might be deleted during the initial snap sync, check // The root node might be deleted during the initial snap sync, check
// the persistent state id then. // the persistent state id then.
if id := ReadPersistentStateID(db); id != 0 { if id := ReadPersistentStateID(db.StateStoreReader()); id != 0 {
return PathScheme return PathScheme
} }
// In a hash-based scheme, the genesis state is consistently stored // In a hash-based scheme, the genesis state is consistently stored
@ -304,7 +304,7 @@ func ReadStateScheme(db ethdb.Reader) string {
if header == nil { if header == nil {
return "" // empty datadir return "" // empty datadir
} }
blob = ReadLegacyTrieNode(db, header.Root) blob = ReadLegacyTrieNode(db.StateStoreReader(), header.Root)
if len(blob) == 0 { if len(blob) == 0 {
return "" // no state in disk return "" // no state in disk
} }

@ -91,7 +91,7 @@ func inspectFreezers(db ethdb.Database) ([]freezerInfo, error) {
infos = append(infos, info) infos = append(infos, info)
case StateFreezerName: case StateFreezerName:
if ReadStateScheme(db) != PathScheme { if ReadStateScheme(db) != PathScheme || db.StateStore() != nil {
continue continue
} }
datadir, err := db.AncientDatadir() datadir, err := db.AncientDatadir()

@ -41,7 +41,15 @@ type freezerdb struct {
ancientRoot string ancientRoot string
ethdb.KeyValueStore ethdb.KeyValueStore
ethdb.AncientStore ethdb.AncientStore
diffStore ethdb.KeyValueStore diffStore ethdb.KeyValueStore
stateStore ethdb.Database
}
func (frdb *freezerdb) StateStoreReader() ethdb.Reader {
if frdb.stateStore == nil {
return frdb
}
return frdb.stateStore
} }
// AncientDatadir returns the path of root ancient directory. // AncientDatadir returns the path of root ancient directory.
@ -64,6 +72,11 @@ func (frdb *freezerdb) Close() error {
errs = append(errs, err) errs = append(errs, err)
} }
} }
if frdb.stateStore != nil {
if err := frdb.stateStore.Close(); err != nil {
errs = append(errs, err)
}
}
if len(errs) != 0 { if len(errs) != 0 {
return fmt.Errorf("%v", errs) return fmt.Errorf("%v", errs)
} }
@ -81,6 +94,17 @@ func (frdb *freezerdb) SetDiffStore(diff ethdb.KeyValueStore) {
frdb.diffStore = diff frdb.diffStore = diff
} }
func (frdb *freezerdb) StateStore() ethdb.Database {
return frdb.stateStore
}
func (frdb *freezerdb) SetStateStore(state ethdb.Database) {
if frdb.stateStore != nil {
frdb.stateStore.Close()
}
frdb.stateStore = state
}
// Freeze is a helper method used for external testing to trigger and block until // Freeze is a helper method used for external testing to trigger and block until
// a freeze cycle completes, without having to sleep for a minute to trigger the // a freeze cycle completes, without having to sleep for a minute to trigger the
// automatic background run. // automatic background run.
@ -104,7 +128,8 @@ func (frdb *freezerdb) Freeze(threshold uint64) error {
// nofreezedb is a database wrapper that disables freezer data retrievals. // nofreezedb is a database wrapper that disables freezer data retrievals.
type nofreezedb struct { type nofreezedb struct {
ethdb.KeyValueStore ethdb.KeyValueStore
diffStore ethdb.KeyValueStore diffStore ethdb.KeyValueStore
stateStore ethdb.Database
} }
// HasAncient returns an error as we don't have a backing chain freezer. // HasAncient returns an error as we don't have a backing chain freezer.
@ -170,6 +195,21 @@ func (db *nofreezedb) SetDiffStore(diff ethdb.KeyValueStore) {
db.diffStore = diff db.diffStore = diff
} }
func (db *nofreezedb) StateStore() ethdb.Database {
return db.stateStore
}
func (db *nofreezedb) SetStateStore(state ethdb.Database) {
db.stateStore = state
}
func (db *nofreezedb) StateStoreReader() ethdb.Reader {
if db.stateStore != nil {
return db.stateStore
}
return db
}
func (db *nofreezedb) ReadAncients(fn func(reader ethdb.AncientReaderOp) error) (err error) { func (db *nofreezedb) ReadAncients(fn func(reader ethdb.AncientReaderOp) error) (err error) {
// Unlike other ancient-related methods, this method does not return // Unlike other ancient-related methods, this method does not return
// errNotSupported when invoked. // errNotSupported when invoked.
@ -609,6 +649,11 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
it := db.NewIterator(keyPrefix, keyStart) it := db.NewIterator(keyPrefix, keyStart)
defer it.Release() defer it.Release()
var trieIter ethdb.Iterator
if db.StateStore() != nil {
trieIter = db.StateStore().NewIterator(keyPrefix, nil)
defer trieIter.Release()
}
var ( var (
count int64 count int64
start = time.Now() start = time.Now()
@ -659,14 +704,14 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
bodies.Add(size) bodies.Add(size)
case bytes.HasPrefix(key, blockReceiptsPrefix) && len(key) == (len(blockReceiptsPrefix)+8+common.HashLength): case bytes.HasPrefix(key, blockReceiptsPrefix) && len(key) == (len(blockReceiptsPrefix)+8+common.HashLength):
receipts.Add(size) receipts.Add(size)
case IsLegacyTrieNode(key, it.Value()):
legacyTries.Add(size)
case bytes.HasPrefix(key, headerPrefix) && bytes.HasSuffix(key, headerTDSuffix): case bytes.HasPrefix(key, headerPrefix) && bytes.HasSuffix(key, headerTDSuffix):
tds.Add(size) tds.Add(size)
case bytes.HasPrefix(key, headerPrefix) && bytes.HasSuffix(key, headerHashSuffix): case bytes.HasPrefix(key, headerPrefix) && bytes.HasSuffix(key, headerHashSuffix):
numHashPairings.Add(size) numHashPairings.Add(size)
case bytes.HasPrefix(key, headerNumberPrefix) && len(key) == (len(headerNumberPrefix)+common.HashLength): case bytes.HasPrefix(key, headerNumberPrefix) && len(key) == (len(headerNumberPrefix)+common.HashLength):
hashNumPairings.Add(size) hashNumPairings.Add(size)
case IsLegacyTrieNode(key, it.Value()):
legacyTries.Add(size)
case bytes.HasPrefix(key, stateIDPrefix) && len(key) == len(stateIDPrefix)+common.HashLength: case bytes.HasPrefix(key, stateIDPrefix) && len(key) == len(stateIDPrefix)+common.HashLength:
stateLookups.Add(size) stateLookups.Add(size)
case IsAccountTrieNode(key): case IsAccountTrieNode(key):
@ -728,6 +773,46 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
logged = time.Now() logged = time.Now()
} }
} }
// inspect separate trie db
if trieIter != nil {
count = 0
logged = time.Now()
for trieIter.Next() {
var (
key = trieIter.Key()
value = trieIter.Value()
size = common.StorageSize(len(key) + len(value))
)
switch {
case IsLegacyTrieNode(key, value):
legacyTries.Add(size)
case bytes.HasPrefix(key, stateIDPrefix) && len(key) == len(stateIDPrefix)+common.HashLength:
stateLookups.Add(size)
case IsAccountTrieNode(key):
accountTries.Add(size)
case IsStorageTrieNode(key):
storageTries.Add(size)
default:
var accounted bool
for _, meta := range [][]byte{
fastTrieProgressKey, persistentStateIDKey, trieJournalKey} {
if bytes.Equal(key, meta) {
metadata.Add(size)
break
}
}
if !accounted {
unaccounted.Add(size)
}
}
count++
if count%1000 == 0 && time.Since(logged) > 8*time.Second {
log.Info("Inspecting separate state database", "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
logged = time.Now()
}
}
}
// Display the database statistic of key-value store. // Display the database statistic of key-value store.
stats := [][]string{ stats := [][]string{
{"Key-Value store", "Headers", headers.Size(), headers.Count()}, {"Key-Value store", "Headers", headers.Size(), headers.Count()},
@ -768,6 +853,28 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
} }
total += ancient.size() total += ancient.size()
} }
// inspect ancient state in separate trie db if exist
if trieIter != nil {
stateAncients, err := inspectFreezers(db.StateStore())
if err != nil {
return err
}
for _, ancient := range stateAncients {
for _, table := range ancient.sizes {
if ancient.name == "chain" {
break
}
stats = append(stats, []string{
fmt.Sprintf("Ancient store (%s)", strings.Title(ancient.name)),
strings.Title(table.name),
table.size.String(),
fmt.Sprintf("%d", ancient.count()),
})
}
total += ancient.size()
}
}
table := tablewriter.NewWriter(os.Stdout) table := tablewriter.NewWriter(os.Stdout)
table.SetHeader([]string{"Database", "Category", "Size", "Items"}) table.SetHeader([]string{"Database", "Category", "Size", "Items"})
table.SetFooter([]string{"", "Total", total.String(), " "}) table.SetFooter([]string{"", "Total", total.String(), " "})

@ -1004,7 +1004,7 @@ func (t *freezerTable) ResetItemsOffset(virtualTail uint64) error {
} }
if stat.Size() == 0 { if stat.Size() == 0 {
return fmt.Errorf("Stat size is zero when ResetVirtualTail.") return errors.New("Stat size is zero when ResetVirtualTail.")
} }
var firstIndex indexEntry var firstIndex indexEntry

@ -213,6 +213,18 @@ func (t *table) SetDiffStore(diff ethdb.KeyValueStore) {
panic("not implement") panic("not implement")
} }
func (t *table) StateStore() ethdb.Database {
return nil
}
func (t *table) SetStateStore(state ethdb.Database) {
panic("not implement")
}
func (t *table) StateStoreReader() ethdb.Reader {
return nil
}
// NewBatchWithSize creates a write-only database batch with pre-allocated buffer. // NewBatchWithSize creates a write-only database batch with pre-allocated buffer.
func (t *table) NewBatchWithSize(size int) ethdb.Batch { func (t *table) NewBatchWithSize(size int) ethdb.Batch {
return &tableBatch{t.db.NewBatchWithSize(size), t.prefix} return &tableBatch{t.db.NewBatchWithSize(size), t.prefix}

@ -159,13 +159,19 @@ func (p *Pruner) PruneAll(genesis *core.Genesis) error {
} }
func pruneAll(maindb ethdb.Database, g *core.Genesis) error { func pruneAll(maindb ethdb.Database, g *core.Genesis) error {
var pruneDB ethdb.Database
if maindb != nil && maindb.StateStore() != nil {
pruneDB = maindb.StateStore()
} else {
pruneDB = maindb
}
var ( var (
count int count int
size common.StorageSize size common.StorageSize
pstart = time.Now() pstart = time.Now()
logged = time.Now() logged = time.Now()
batch = maindb.NewBatch() batch = pruneDB.NewBatch()
iter = maindb.NewIterator(nil, nil) iter = pruneDB.NewIterator(nil, nil)
) )
start := time.Now() start := time.Now()
for iter.Next() { for iter.Next() {
@ -195,7 +201,7 @@ func pruneAll(maindb ethdb.Database, g *core.Genesis) error {
batch.Reset() batch.Reset()
iter.Release() iter.Release()
iter = maindb.NewIterator(nil, key) iter = pruneDB.NewIterator(nil, key)
} }
} }
} }
@ -219,7 +225,7 @@ func pruneAll(maindb ethdb.Database, g *core.Genesis) error {
end = nil end = nil
} }
log.Info("Compacting database", "range", fmt.Sprintf("%#x-%#x", start, end), "elapsed", common.PrettyDuration(time.Since(cstart))) log.Info("Compacting database", "range", fmt.Sprintf("%#x-%#x", start, end), "elapsed", common.PrettyDuration(time.Since(cstart)))
if err := maindb.Compact(start, end); err != nil { if err := pruneDB.Compact(start, end); err != nil {
log.Error("Database compaction failed", "error", err) log.Error("Database compaction failed", "error", err)
return err return err
} }
@ -250,13 +256,19 @@ func prune(snaptree *snapshot.Tree, root common.Hash, maindb ethdb.Database, sta
// that the false-positive is low enough(~0.05%). The probability of the // that the false-positive is low enough(~0.05%). The probability of the
// dangling node is the state root is super low. So the dangling nodes in // dangling node is the state root is super low. So the dangling nodes in
// theory will never ever be visited again. // theory will never ever be visited again.
var pruneDB ethdb.Database
if maindb != nil && maindb.StateStore() != nil {
pruneDB = maindb.StateStore()
} else {
pruneDB = maindb
}
var ( var (
skipped, count int skipped, count int
size common.StorageSize size common.StorageSize
pstart = time.Now() pstart = time.Now()
logged = time.Now() logged = time.Now()
batch = maindb.NewBatch() batch = pruneDB.NewBatch()
iter = maindb.NewIterator(nil, nil) iter = pruneDB.NewIterator(nil, nil)
) )
for iter.Next() { for iter.Next() {
key := iter.Key() key := iter.Key()
@ -303,7 +315,7 @@ func prune(snaptree *snapshot.Tree, root common.Hash, maindb ethdb.Database, sta
batch.Reset() batch.Reset()
iter.Release() iter.Release()
iter = maindb.NewIterator(nil, key) iter = pruneDB.NewIterator(nil, key)
} }
} }
} }
@ -348,7 +360,7 @@ func prune(snaptree *snapshot.Tree, root common.Hash, maindb ethdb.Database, sta
end = nil end = nil
} }
log.Info("Compacting database", "range", fmt.Sprintf("%#x-%#x", start, end), "elapsed", common.PrettyDuration(time.Since(cstart))) log.Info("Compacting database", "range", fmt.Sprintf("%#x-%#x", start, end), "elapsed", common.PrettyDuration(time.Since(cstart)))
if err := maindb.Compact(start, end); err != nil { if err := pruneDB.Compact(start, end); err != nil {
log.Error("Database compaction failed", "error", err) log.Error("Database compaction failed", "error", err)
return err return err
} }
@ -586,10 +598,17 @@ func (p *Pruner) Prune(root common.Hash) error {
// Use the bottom-most diff layer as the target // Use the bottom-most diff layer as the target
root = layers[len(layers)-1].Root() root = layers[len(layers)-1].Root()
} }
// if the separated state db has been set, use this db to prune data
var trienodedb ethdb.Database
if p.db != nil && p.db.StateStore() != nil {
trienodedb = p.db.StateStore()
} else {
trienodedb = p.db
}
// Ensure the root is really present. The weak assumption // Ensure the root is really present. The weak assumption
// is the presence of root can indicate the presence of the // is the presence of root can indicate the presence of the
// entire trie. // entire trie.
if !rawdb.HasLegacyTrieNode(p.db, root) { if !rawdb.HasLegacyTrieNode(trienodedb, root) {
// The special case is for clique based networks(goerli // The special case is for clique based networks(goerli
// and some other private networks), it's possible that two // and some other private networks), it's possible that two
// consecutive blocks will have same root. In this case snapshot // consecutive blocks will have same root. In this case snapshot
@ -603,7 +622,7 @@ func (p *Pruner) Prune(root common.Hash) error {
// as the pruning target. // as the pruning target.
var found bool var found bool
for i := len(layers) - 2; i >= 2; i-- { for i := len(layers) - 2; i >= 2; i-- {
if rawdb.HasLegacyTrieNode(p.db, layers[i].Root()) { if rawdb.HasLegacyTrieNode(trienodedb, layers[i].Root()) {
root = layers[i].Root() root = layers[i].Root()
found = true found = true
log.Info("Selecting middle-layer as the pruning target", "root", root, "depth", i) log.Info("Selecting middle-layer as the pruning target", "root", root, "depth", i)
@ -611,7 +630,7 @@ func (p *Pruner) Prune(root common.Hash) error {
} }
} }
if !found { if !found {
if blob := rawdb.ReadLegacyTrieNode(p.db, p.snaptree.DiskRoot()); len(blob) != 0 { if blob := rawdb.ReadLegacyTrieNode(trienodedb, p.snaptree.DiskRoot()); len(blob) != 0 {
root = p.snaptree.DiskRoot() root = p.snaptree.DiskRoot()
found = true found = true
log.Info("Selecting disk-layer as the pruning target", "root", root) log.Info("Selecting disk-layer as the pruning target", "root", root)

@ -6,7 +6,7 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
) )
// sharedPool is used to store maps of originStorage of stateObjects // StoragePool is used to store maps of originStorage of stateObjects
type StoragePool struct { type StoragePool struct {
sync.RWMutex sync.RWMutex
sharedMap map[common.Address]*sync.Map sharedMap map[common.Address]*sync.Map

@ -18,6 +18,7 @@
package state package state
import ( import (
"errors"
"fmt" "fmt"
"runtime" "runtime"
"sort" "sort"
@ -747,7 +748,7 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject {
if s.trie == nil { if s.trie == nil {
tr, err := s.db.OpenTrie(s.originalRoot) tr, err := s.db.OpenTrie(s.originalRoot)
if err != nil { if err != nil {
s.setError(fmt.Errorf("failed to open trie tree")) s.setError(errors.New("failed to open trie tree"))
return nil return nil
} }
s.trie = tr s.trie = tr
@ -1003,7 +1004,7 @@ func (s *StateDB) WaitPipeVerification() error {
// Need to wait for the parent trie to commit // Need to wait for the parent trie to commit
if s.snap != nil { if s.snap != nil {
if valid := s.snap.WaitAndGetVerifyRes(); !valid { if valid := s.snap.WaitAndGetVerifyRes(); !valid {
return fmt.Errorf("verification on parent snap failed") return errors.New("verification on parent snap failed")
} }
} }
return nil return nil

@ -80,7 +80,7 @@ func CheckNoGoroutines(key, value string) error {
var pb bytes.Buffer var pb bytes.Buffer
profiler := pprof.Lookup("goroutine") profiler := pprof.Lookup("goroutine")
if profiler == nil { if profiler == nil {
return fmt.Errorf("unable to find profile") return errors.New("unable to find profile")
} }
err := profiler.WriteTo(&pb, 0) err := profiler.WriteTo(&pb, 0)
if err != nil { if err != nil {

@ -76,7 +76,7 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
lastBlock := p.bc.GetBlockByHash(block.ParentHash()) lastBlock := p.bc.GetBlockByHash(block.ParentHash())
if lastBlock == nil { if lastBlock == nil {
return statedb, nil, nil, 0, fmt.Errorf("could not get parent block") return statedb, nil, nil, 0, errors.New("could not get parent block")
} }
if !p.config.IsFeynman(block.Number(), block.Time()) { if !p.config.IsFeynman(block.Number(), block.Time()) {
// Handle upgrade build-in system contract code // Handle upgrade build-in system contract code

@ -419,7 +419,9 @@ func GenerateBadBlock(parent *types.Block, engine consensus.Engine, txs types.Tr
header.BlobGasUsed = &used header.BlobGasUsed = &used
beaconRoot := common.HexToHash("0xbeac00") beaconRoot := common.HexToHash("0xbeac00")
header.ParentBeaconRoot = &beaconRoot if config.Parlia == nil {
header.ParentBeaconRoot = &beaconRoot
}
} }
// Assemble and return the final block for sealing // Assemble and return the final block for sealing
if config.IsShanghai(header.Number, header.Time) { if config.IsShanghai(header.Number, header.Time) {

@ -17,6 +17,7 @@
package core package core
import ( import (
"errors"
"fmt" "fmt"
"math" "math"
"math/big" "math/big"
@ -397,10 +398,10 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) {
if st.evm.ChainConfig().IsNano(st.evm.Context.BlockNumber) { if st.evm.ChainConfig().IsNano(st.evm.Context.BlockNumber) {
for _, blackListAddr := range types.NanoBlackList { for _, blackListAddr := range types.NanoBlackList {
if blackListAddr == msg.From { if blackListAddr == msg.From {
return nil, fmt.Errorf("block blacklist account") return nil, errors.New("block blacklist account")
} }
if msg.To != nil && *msg.To == blackListAddr { if msg.To != nil && *msg.To == blackListAddr {
return nil, fmt.Errorf("block blacklist account") return nil, errors.New("block blacklist account")
} }
} }
} }

@ -18,6 +18,7 @@ package txpool
import ( import (
"crypto/sha256" "crypto/sha256"
"errors"
"fmt" "fmt"
"math/big" "math/big"
@ -126,7 +127,7 @@ func ValidateTransaction(tx *types.Transaction, head *types.Header, signer types
// data match up before doing any expensive validations // data match up before doing any expensive validations
hashes := tx.BlobHashes() hashes := tx.BlobHashes()
if len(hashes) == 0 { if len(hashes) == 0 {
return fmt.Errorf("blobless blob transaction") return errors.New("blobless blob transaction")
} }
if len(hashes) > params.MaxBlobGasPerBlock/params.BlobTxBlobGasPerBlob { if len(hashes) > params.MaxBlobGasPerBlock/params.BlobTxBlobGasPerBlob {
return fmt.Errorf("too many blobs in transaction: have %d, permitted %d", len(hashes), params.MaxBlobGasPerBlock/params.BlobTxBlobGasPerBlob) return fmt.Errorf("too many blobs in transaction: have %d, permitted %d", len(hashes), params.MaxBlobGasPerBlock/params.BlobTxBlobGasPerBlob)

184
core/types/bid.go Normal file

@ -0,0 +1,184 @@
package types
import (
"fmt"
"math/big"
"sync/atomic"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/crypto"
)
const TxDecodeConcurrencyForPerBid = 5
// BidArgs represents the arguments to submit a bid.
type BidArgs struct {
// RawBid from builder directly
RawBid *RawBid
// Signature of the bid from builder
Signature hexutil.Bytes `json:"signature"`
// PayBidTx is a payment tx to builder from sentry, which is optional
PayBidTx hexutil.Bytes `json:"payBidTx"`
PayBidTxGasUsed uint64 `json:"payBidTxGasUsed"`
}
func (b *BidArgs) EcrecoverSender() (common.Address, error) {
pk, err := crypto.SigToPub(b.RawBid.Hash().Bytes(), b.Signature)
if err != nil {
return common.Address{}, err
}
return crypto.PubkeyToAddress(*pk), nil
}
func (b *BidArgs) ToBid(builder common.Address, signer Signer) (*Bid, error) {
txs, err := b.RawBid.DecodeTxs(signer)
if err != nil {
return nil, err
}
if len(b.PayBidTx) != 0 {
var payBidTx = new(Transaction)
err = payBidTx.UnmarshalBinary(b.PayBidTx)
if err != nil {
return nil, err
}
txs = append(txs, payBidTx)
}
bid := &Bid{
Builder: builder,
BlockNumber: b.RawBid.BlockNumber,
ParentHash: b.RawBid.ParentHash,
Txs: txs,
GasUsed: b.RawBid.GasUsed + b.PayBidTxGasUsed,
GasFee: b.RawBid.GasFee,
BuilderFee: b.RawBid.BuilderFee,
rawBid: *b.RawBid,
}
if bid.BuilderFee == nil {
bid.BuilderFee = big.NewInt(0)
}
return bid, nil
}
// RawBid represents a raw bid from builder directly.
type RawBid struct {
BlockNumber uint64 `json:"blockNumber"`
ParentHash common.Hash `json:"parentHash"`
Txs []hexutil.Bytes `json:"txs"`
GasUsed uint64 `json:"gasUsed"`
GasFee *big.Int `json:"gasFee"`
BuilderFee *big.Int `json:"builderFee"`
hash atomic.Value
}
func (b *RawBid) DecodeTxs(signer Signer) ([]*Transaction, error) {
if len(b.Txs) == 0 {
return []*Transaction{}, nil
}
txChan := make(chan int, len(b.Txs))
bidTxs := make([]*Transaction, len(b.Txs))
decode := func(txBytes hexutil.Bytes) (*Transaction, error) {
tx := new(Transaction)
err := tx.UnmarshalBinary(txBytes)
if err != nil {
return nil, err
}
_, err = Sender(signer, tx)
if err != nil {
return nil, err
}
return tx, nil
}
errChan := make(chan error, TxDecodeConcurrencyForPerBid)
for i := 0; i < TxDecodeConcurrencyForPerBid; i++ {
go func() {
for {
txIndex, ok := <-txChan
if !ok {
errChan <- nil
return
}
txBytes := b.Txs[txIndex]
tx, err := decode(txBytes)
if err != nil {
errChan <- err
return
}
bidTxs[txIndex] = tx
}
}()
}
for i := 0; i < len(b.Txs); i++ {
txChan <- i
}
close(txChan)
for i := 0; i < TxDecodeConcurrencyForPerBid; i++ {
err := <-errChan
if err != nil {
return nil, fmt.Errorf("failed to decode tx, %v", err)
}
}
return bidTxs, nil
}
// Hash returns the hash of the bid.
func (b *RawBid) Hash() common.Hash {
if hash := b.hash.Load(); hash != nil {
return hash.(common.Hash)
}
h := rlpHash(b)
b.hash.Store(h)
return h
}
// Bid represents a bid.
type Bid struct {
Builder common.Address
BlockNumber uint64
ParentHash common.Hash
Txs Transactions
GasUsed uint64
GasFee *big.Int
BuilderFee *big.Int
rawBid RawBid
}
// Hash returns the bid hash.
func (b *Bid) Hash() common.Hash {
return b.rawBid.Hash()
}
// BidIssue represents a bid issue.
type BidIssue struct {
Validator common.Address
Builder common.Address
BidHash common.Hash
Message string
}
type MevParams struct {
ValidatorCommission uint64 // 100 means 1%
BidSimulationLeftOver time.Duration
}

45
core/types/bid_error.go Normal file

@ -0,0 +1,45 @@
package types
import "errors"
const (
InvalidBidParamError = -38001
InvalidPayBidTxError = -38002
MevNotRunningError = -38003
MevBusyError = -38004
MevNotInTurnError = -38005
)
var (
ErrMevNotRunning = newBidError(errors.New("the validator stop accepting bids for now, try again later"), MevNotRunningError)
ErrMevBusy = newBidError(errors.New("the validator is working on too many bids, try again later"), MevBusyError)
ErrMevNotInTurn = newBidError(errors.New("the validator is not in-turn to propose currently, try again later"), MevNotInTurnError)
)
// bidError is an API error that encompasses an invalid bid with JSON error
// code and a binary data blob.
type bidError struct {
error
code int
}
// ErrorCode returns the JSON error code for an invalid bid.
// See: https://github.com/ethereum/wiki/wiki/JSON-RPC-Error-Codes-Improvement-Proposal
func (e *bidError) ErrorCode() int {
return e.code
}
func NewInvalidBidError(message string) *bidError {
return newBidError(errors.New(message), InvalidBidParamError)
}
func NewInvalidPayBidTxError(message string) *bidError {
return newBidError(errors.New(message), InvalidPayBidTxError)
}
func newBidError(err error, code int) *bidError {
return &bidError{
error: err,
code: code,
}
}

@ -2,6 +2,7 @@ package vm
import ( import (
"encoding/binary" "encoding/binary"
"errors"
"fmt" "fmt"
"net/url" "net/url"
"strings" "strings"
@ -68,7 +69,7 @@ func (c *tmHeaderValidate) Run(input []byte) (result []byte, err error) {
}() }()
if uint64(len(input)) <= precompileContractInputMetaDataLength { if uint64(len(input)) <= precompileContractInputMetaDataLength {
return nil, fmt.Errorf("invalid input") return nil, errors.New("invalid input")
} }
payloadLength := binary.BigEndian.Uint64(input[precompileContractInputMetaDataLength-uint64TypeLength : precompileContractInputMetaDataLength]) payloadLength := binary.BigEndian.Uint64(input[precompileContractInputMetaDataLength-uint64TypeLength : precompileContractInputMetaDataLength])
@ -124,7 +125,7 @@ func (c *iavlMerkleProofValidate) Run(input []byte) (result []byte, err error) {
return c.basicIavlMerkleProofValidate.Run(input) return c.basicIavlMerkleProofValidate.Run(input)
} }
// tmHeaderValidate implemented as a native contract. // tmHeaderValidateNano implemented as a native contract.
type tmHeaderValidateNano struct{} type tmHeaderValidateNano struct{}
func (c *tmHeaderValidateNano) RequiredGas(input []byte) uint64 { func (c *tmHeaderValidateNano) RequiredGas(input []byte) uint64 {
@ -132,7 +133,7 @@ func (c *tmHeaderValidateNano) RequiredGas(input []byte) uint64 {
} }
func (c *tmHeaderValidateNano) Run(input []byte) (result []byte, err error) { func (c *tmHeaderValidateNano) Run(input []byte) (result []byte, err error) {
return nil, fmt.Errorf("suspend") return nil, errors.New("suspend")
} }
type iavlMerkleProofValidateNano struct{} type iavlMerkleProofValidateNano struct{}
@ -142,7 +143,7 @@ func (c *iavlMerkleProofValidateNano) RequiredGas(_ []byte) uint64 {
} }
func (c *iavlMerkleProofValidateNano) Run(_ []byte) (result []byte, err error) { func (c *iavlMerkleProofValidateNano) Run(_ []byte) (result []byte, err error) {
return nil, fmt.Errorf("suspend") return nil, errors.New("suspend")
} }
// ------------------------------------------------------------------------------------------------------------------------------------------------ // ------------------------------------------------------------------------------------------------------------------------------------------------
@ -250,7 +251,7 @@ func (c *basicIavlMerkleProofValidate) Run(input []byte) (result []byte, err err
valid := kvmp.Validate() valid := kvmp.Validate()
if !valid { if !valid {
return nil, fmt.Errorf("invalid merkle proof") return nil, errors.New("invalid merkle proof")
} }
return successfulMerkleResult(), nil return successfulMerkleResult(), nil
@ -418,7 +419,7 @@ const (
// | 33 bytes | 64 bytes | 32 bytes | // | 33 bytes | 64 bytes | 32 bytes |
func (c *secp256k1SignatureRecover) Run(input []byte) (result []byte, err error) { func (c *secp256k1SignatureRecover) Run(input []byte) (result []byte, err error) {
if len(input) != int(secp256k1PubKeyLength)+int(secp256k1SignatureLength)+int(secp256k1SignatureMsgHashLength) { if len(input) != int(secp256k1PubKeyLength)+int(secp256k1SignatureLength)+int(secp256k1SignatureMsgHashLength) {
return nil, fmt.Errorf("invalid input") return nil, errors.New("invalid input")
} }
return c.runTMSecp256k1Signature( return c.runTMSecp256k1Signature(
@ -432,7 +433,7 @@ func (c *secp256k1SignatureRecover) runTMSecp256k1Signature(pubkey, signatureStr
tmPubKey := secp256k1.PubKeySecp256k1(pubkey) tmPubKey := secp256k1.PubKeySecp256k1(pubkey)
ok := tmPubKey.VerifyBytesWithMsgHash(msgHash, signatureStr) ok := tmPubKey.VerifyBytesWithMsgHash(msgHash, signatureStr)
if !ok { if !ok {
return nil, fmt.Errorf("invalid signature") return nil, errors.New("invalid signature")
} }
return tmPubKey.Address().Bytes(), nil return tmPubKey.Address().Bytes(), nil
} }

@ -1,6 +1,7 @@
package v1 package v1
import ( import (
"errors"
"fmt" "fmt"
"github.com/bnb-chain/ics23" "github.com/bnb-chain/ics23"
@ -71,7 +72,7 @@ func (op CommitmentOp) GetKey() []byte {
// in the CommitmentOp and return the CommitmentRoot of the proof. // in the CommitmentOp and return the CommitmentRoot of the proof.
func (op CommitmentOp) Run(args [][]byte) ([][]byte, error) { func (op CommitmentOp) Run(args [][]byte) ([][]byte, error) {
if _, ok := op.Proof.Proof.(*ics23.CommitmentProof_Exist); !ok { if _, ok := op.Proof.Proof.(*ics23.CommitmentProof_Exist); !ok {
return nil, fmt.Errorf("only exist proof supported") return nil, errors.New("only exist proof supported")
} }
// calculate root from proof // calculate root from proof

@ -3,6 +3,7 @@ package v1
import ( import (
"bytes" "bytes"
"encoding/binary" "encoding/binary"
"errors"
"fmt" "fmt"
"github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/ed25519"
@ -97,7 +98,7 @@ func (cs ConsensusState) EncodeConsensusState() ([]byte, error) {
pos := uint64(0) pos := uint64(0)
if uint64(len(cs.ChainID)) > chainIDLength { if uint64(len(cs.ChainID)) > chainIDLength {
return nil, fmt.Errorf("chainID length should be no more than 32") return nil, errors.New("chainID length should be no more than 32")
} }
copy(encodingBytes[pos:pos+chainIDLength], cs.ChainID) copy(encodingBytes[pos:pos+chainIDLength], cs.ChainID)
pos += chainIDLength pos += chainIDLength
@ -115,7 +116,7 @@ func (cs ConsensusState) EncodeConsensusState() ([]byte, error) {
validator := cs.NextValidatorSet.Validators[index] validator := cs.NextValidatorSet.Validators[index]
pubkey, ok := validator.PubKey.(ed25519.PubKeyEd25519) pubkey, ok := validator.PubKey.(ed25519.PubKeyEd25519)
if !ok { if !ok {
return nil, fmt.Errorf("invalid pubkey type") return nil, errors.New("invalid pubkey type")
} }
copy(encodingBytes[pos:pos+validatorPubkeyLength], pubkey[:]) copy(encodingBytes[pos:pos+validatorPubkeyLength], pubkey[:])
@ -177,16 +178,16 @@ func (h *Header) Validate(chainID string) error {
return err return err
} }
if h.ValidatorSet == nil { if h.ValidatorSet == nil {
return fmt.Errorf("invalid header: validator set is nil") return errors.New("invalid header: validator set is nil")
} }
if h.NextValidatorSet == nil { if h.NextValidatorSet == nil {
return fmt.Errorf("invalid header: next validator set is nil") return errors.New("invalid header: next validator set is nil")
} }
if !bytes.Equal(h.ValidatorsHash, h.ValidatorSet.Hash()) { if !bytes.Equal(h.ValidatorsHash, h.ValidatorSet.Hash()) {
return fmt.Errorf("invalid header: validator set does not match hash") return errors.New("invalid header: validator set does not match hash")
} }
if !bytes.Equal(h.NextValidatorsHash, h.NextValidatorSet.Hash()) { if !bytes.Equal(h.NextValidatorsHash, h.NextValidatorSet.Hash()) {
return fmt.Errorf("invalid header: next validator set does not match hash") return errors.New("invalid header: next validator set does not match hash")
} }
return nil return nil
} }

@ -4,6 +4,7 @@ package v2
import ( import (
"bytes" "bytes"
"encoding/binary" "encoding/binary"
"errors"
"fmt" "fmt"
"github.com/cometbft/cometbft/crypto/ed25519" "github.com/cometbft/cometbft/crypto/ed25519"
@ -49,7 +50,7 @@ func (cs ConsensusState) EncodeConsensusState() ([]byte, error) {
pos := uint64(0) pos := uint64(0)
if uint64(len(cs.ChainID)) > chainIDLength { if uint64(len(cs.ChainID)) > chainIDLength {
return nil, fmt.Errorf("chainID length should be no more than 32") return nil, errors.New("chainID length should be no more than 32")
} }
copy(encodingBytes[pos:pos+chainIDLength], cs.ChainID) copy(encodingBytes[pos:pos+chainIDLength], cs.ChainID)
pos += chainIDLength pos += chainIDLength
@ -197,7 +198,7 @@ func DecodeConsensusState(input []byte) (ConsensusState, error) {
// 32 bytes | | | // 32 bytes | | |
func DecodeLightBlockValidationInput(input []byte) (*ConsensusState, *types.LightBlock, error) { func DecodeLightBlockValidationInput(input []byte) (*ConsensusState, *types.LightBlock, error) {
if uint64(len(input)) <= consensusStateLengthBytesLength { if uint64(len(input)) <= consensusStateLengthBytesLength {
return nil, nil, fmt.Errorf("invalid input") return nil, nil, errors.New("invalid input")
} }
csLen := binary.BigEndian.Uint64(input[consensusStateLengthBytesLength-uint64TypeLength : consensusStateLengthBytesLength]) csLen := binary.BigEndian.Uint64(input[consensusStateLengthBytesLength-uint64TypeLength : consensusStateLengthBytesLength])

@ -20,6 +20,7 @@ import (
"container/heap" "container/heap"
"context" "context"
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"math/big" "math/big"
"os" "os"
@ -81,13 +82,13 @@ func (b *testBackend) EventMux() *event.TypeMux { return b.eventMux }
func (p *mockPOSA) GetJustifiedNumberAndHash(chain consensus.ChainHeaderReader, headers []*types.Header) (uint64, common.Hash, error) { func (p *mockPOSA) GetJustifiedNumberAndHash(chain consensus.ChainHeaderReader, headers []*types.Header) (uint64, common.Hash, error) {
parentHeader := chain.GetHeaderByHash(headers[len(headers)-1].ParentHash) parentHeader := chain.GetHeaderByHash(headers[len(headers)-1].ParentHash)
if parentHeader == nil { if parentHeader == nil {
return 0, common.Hash{}, fmt.Errorf("unexpected error") return 0, common.Hash{}, errors.New("unexpected error")
} }
return parentHeader.Number.Uint64(), parentHeader.Hash(), nil return parentHeader.Number.Uint64(), parentHeader.Hash(), nil
} }
func (p *mockInvalidPOSA) GetJustifiedNumberAndHash(chain consensus.ChainHeaderReader, headers []*types.Header) (uint64, common.Hash, error) { func (p *mockInvalidPOSA) GetJustifiedNumberAndHash(chain consensus.ChainHeaderReader, headers []*types.Header) (uint64, common.Hash, error) {
return 0, common.Hash{}, fmt.Errorf("not supported") return 0, common.Hash{}, errors.New("not supported")
} }
func (m *mockPOSA) VerifyVote(chain consensus.ChainHeaderReader, vote *types.VoteEnvelope) error { func (m *mockPOSA) VerifyVote(chain consensus.ChainHeaderReader, vote *types.VoteEnvelope) error {

@ -2,7 +2,6 @@ package vote
import ( import (
"context" "context"
"fmt"
"os" "os"
"time" "time"
@ -38,7 +37,7 @@ func NewVoteSigner(blsPasswordPath, blsWalletPath string) (*VoteSigner, error) {
} }
if !dirExists { if !dirExists {
log.Error("BLS wallet did not exists.") log.Error("BLS wallet did not exists.")
return nil, fmt.Errorf("BLS wallet did not exists") return nil, errors.New("BLS wallet did not exists")
} }
walletPassword, err := os.ReadFile(blsPasswordPath) walletPassword, err := os.ReadFile(blsPasswordPath)

@ -24,6 +24,7 @@ import (
"os" "os"
"strings" "strings"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
@ -141,3 +142,31 @@ func (api *AdminAPI) ImportChain(file string) (bool, error) {
} }
return true, nil return true, nil
} }
// MevRunning returns true if the validator accept bids from builder
func (api *AdminAPI) MevRunning() bool {
return api.eth.APIBackend.MevRunning()
}
// StartMev starts mev. It notifies the miner to start to receive bids.
func (api *AdminAPI) StartMev() {
api.eth.APIBackend.StartMev()
}
// StopMev stops mev. It notifies the miner to stop receiving bids from this moment,
// but the bids before this moment would still been taken into consideration by mev.
func (api *AdminAPI) StopMev() {
api.eth.APIBackend.StopMev()
}
// AddBuilder adds a builder to the bid simulator.
// url is the endpoint of the builder, for example, "https://mev-builder.amazonaws.com",
// if validator is equipped with sentry, ignore the url.
func (api *AdminAPI) AddBuilder(builder common.Address, url string) error {
return api.eth.APIBackend.AddBuilder(builder, url)
}
// RemoveBuilder removes a builder from the bid simulator.
func (api *AdminAPI) RemoveBuilder(builder common.Address) error {
return api.eth.APIBackend.RemoveBuilder(builder)
}

@ -456,3 +456,39 @@ func (b *EthAPIBackend) StateAtBlock(ctx context.Context, block *types.Block, re
func (b *EthAPIBackend) StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (*core.Message, vm.BlockContext, *state.StateDB, tracers.StateReleaseFunc, error) { func (b *EthAPIBackend) StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (*core.Message, vm.BlockContext, *state.StateDB, tracers.StateReleaseFunc, error) {
return b.eth.stateAtTransaction(ctx, block, txIndex, reexec) return b.eth.stateAtTransaction(ctx, block, txIndex, reexec)
} }
func (b *EthAPIBackend) MevRunning() bool {
return b.Miner().MevRunning()
}
func (b *EthAPIBackend) MevParams() *types.MevParams {
return b.Miner().MevParams()
}
func (b *EthAPIBackend) StartMev() {
b.Miner().StartMev()
}
func (b *EthAPIBackend) StopMev() {
b.Miner().StopMev()
}
func (b *EthAPIBackend) AddBuilder(builder common.Address, url string) error {
return b.Miner().AddBuilder(builder, url)
}
func (b *EthAPIBackend) RemoveBuilder(builder common.Address) error {
return b.Miner().RemoveBuilder(builder)
}
func (b *EthAPIBackend) SendBid(ctx context.Context, bid *types.BidArgs) (common.Hash, error) {
return b.Miner().SendBid(ctx, bid)
}
func (b *EthAPIBackend) BestBidGasFee(parentHash common.Hash) *big.Int {
return b.Miner().BestPackedBlockReward(parentHash)
}
func (b *EthAPIBackend) MinerInTurn() bool {
return b.Miner().InTurn()
}

@ -67,6 +67,10 @@ import (
"github.com/ethereum/go-ethereum/triedb/pathdb" "github.com/ethereum/go-ethereum/triedb/pathdb"
) )
const (
ChainDBNamespace = "eth/db/chaindata/"
)
// Config contains the configuration options of the ETH protocol. // Config contains the configuration options of the ETH protocol.
// Deprecated: use ethconfig.Config instead. // Deprecated: use ethconfig.Config instead.
type Config = ethconfig.Config type Config = ethconfig.Config
@ -134,7 +138,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
// Assemble the Ethereum object // Assemble the Ethereum object
chainDb, err := stack.OpenAndMergeDatabase("chaindata", config.DatabaseCache, config.DatabaseHandles, chainDb, err := stack.OpenAndMergeDatabase("chaindata", config.DatabaseCache, config.DatabaseHandles,
config.DatabaseFreezer, config.DatabaseDiff, "eth/db/chaindata/", false, config.PersistDiff, config.PruneAncientData) config.DatabaseFreezer, config.DatabaseDiff, ChainDBNamespace, false, config.PersistDiff, config.PruneAncientData)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -327,7 +331,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
parlia.VotePool = votePool parlia.VotePool = votePool
} }
} else { } else {
return nil, fmt.Errorf("Engine is not Parlia type") return nil, errors.New("Engine is not Parlia type")
} }
log.Info("Create votePool successfully") log.Info("Create votePool successfully")
eth.handler.votepool = votePool eth.handler.votepool = votePool

@ -1,6 +1,7 @@
package eth package eth
import ( import (
"errors"
"fmt" "fmt"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
@ -44,7 +45,7 @@ func (h *trustHandler) Handle(peer *trust.Peer, packet trust.Packet) error {
vm.HandleRootResponse(verifyResult, peer.ID()) vm.HandleRootResponse(verifyResult, peer.ID())
return nil return nil
} }
return fmt.Errorf("verify manager is nil which is unexpected") return errors.New("verify manager is nil which is unexpected")
default: default:
return fmt.Errorf("unexpected trust packet type: %T", packet) return fmt.Errorf("unexpected trust packet type: %T", packet)

@ -52,6 +52,16 @@ func DialContext(ctx context.Context, rawurl string) (*Client, error) {
return NewClient(c), nil return NewClient(c), nil
} }
// DialOptions creates a new RPC client for the given URL. You can supply any of the
// pre-defined client options to configure the underlying transport.
func DialOptions(ctx context.Context, rawurl string, opts ...rpc.ClientOption) (*Client, error) {
c, err := rpc.DialOptions(ctx, rawurl, opts...)
if err != nil {
return nil, err
}
return NewClient(c), nil
}
// NewClient creates a client that uses the given RPC client. // NewClient creates a client that uses the given RPC client.
func NewClient(c *rpc.Client) *Client { func NewClient(c *rpc.Client) *Client {
return &Client{c} return &Client{c}
@ -715,6 +725,43 @@ func (ec *Client) SendTransactionConditional(ctx context.Context, tx *types.Tran
return ec.c.CallContext(ctx, nil, "eth_sendRawTransactionConditional", hexutil.Encode(data), opts) return ec.c.CallContext(ctx, nil, "eth_sendRawTransactionConditional", hexutil.Encode(data), opts)
} }
// MevRunning returns whether MEV is running
func (ec *Client) MevRunning(ctx context.Context) (bool, error) {
var result bool
err := ec.c.CallContext(ctx, &result, "mev_running")
return result, err
}
// SendBid sends a bid
func (ec *Client) SendBid(ctx context.Context, args types.BidArgs) (common.Hash, error) {
var hash common.Hash
err := ec.c.CallContext(ctx, &hash, "mev_sendBid", args)
if err != nil {
return common.Hash{}, err
}
return hash, nil
}
// BestBidGasFee returns the gas fee of the best bid for the given parent hash.
func (ec *Client) BestBidGasFee(ctx context.Context, parentHash common.Hash) (*big.Int, error) {
var fee *big.Int
err := ec.c.CallContext(ctx, &fee, "mev_bestBidGasFee", parentHash)
if err != nil {
return nil, err
}
return fee, nil
}
// MevParams returns the static params of mev
func (ec *Client) MevParams(ctx context.Context) (*types.MevParams, error) {
var params types.MevParams
err := ec.c.CallContext(ctx, &params, "mev_params")
if err != nil {
return nil, err
}
return &params, err
}
func toBlockNumArg(number *big.Int) string { func toBlockNumArg(number *big.Int) string {
if number == nil { if number == nil {
return "latest" return "latest"

@ -155,11 +155,16 @@ type AncientStater interface {
AncientDatadir() (string, error) AncientDatadir() (string, error)
} }
type StateStoreReader interface {
StateStoreReader() Reader
}
// Reader contains the methods required to read data from both key-value as well as // Reader contains the methods required to read data from both key-value as well as
// immutable ancient data. // immutable ancient data.
type Reader interface { type Reader interface {
KeyValueReader KeyValueReader
AncientReader AncientReader
StateStoreReader
} }
// Writer contains the methods required to write data to both key-value as well as // Writer contains the methods required to write data to both key-value as well as
@ -189,12 +194,18 @@ type DiffStore interface {
SetDiffStore(diff KeyValueStore) SetDiffStore(diff KeyValueStore)
} }
type StateStore interface {
StateStore() Database
SetStateStore(state Database)
}
// Database contains all the methods required by the high level database to not // Database contains all the methods required by the high level database to not
// only access the key-value data store but also the chain freezer. // only access the key-value data store but also the chain freezer.
type Database interface { type Database interface {
Reader Reader
Writer Writer
DiffStore DiffStore
StateStore
Batcher Batcher
Iteratee Iteratee
Stater Stater

@ -94,6 +94,18 @@ func (db *Database) SetDiffStore(diff ethdb.KeyValueStore) {
panic("not supported") panic("not supported")
} }
func (db *Database) StateStore() ethdb.Database {
panic("not supported")
}
func (db *Database) SetStateStore(state ethdb.Database) {
panic("not supported")
}
func (db *Database) StateStoreReader() ethdb.Reader {
return db
}
func (db *Database) ReadAncients(fn func(op ethdb.AncientReaderOp) error) (err error) { func (db *Database) ReadAncients(fn func(op ethdb.AncientReaderOp) error) (err error) {
return fn(db) return fn(db)
} }

4
go.mod

@ -35,7 +35,7 @@ require (
github.com/gballet/go-verkle v0.1.1-0.20231031103413-a67434b50f46 github.com/gballet/go-verkle v0.1.1-0.20231031103413-a67434b50f46
github.com/gofrs/flock v0.8.1 github.com/gofrs/flock v0.8.1
github.com/golang-jwt/jwt/v4 v4.5.0 github.com/golang-jwt/jwt/v4 v4.5.0
github.com/golang/protobuf v1.5.3 github.com/golang/protobuf v1.5.4
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb
github.com/google/gofuzz v1.2.0 github.com/google/gofuzz v1.2.0
github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b
@ -282,7 +282,7 @@ require (
google.golang.org/appengine v1.6.7 // indirect google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect
google.golang.org/grpc v1.56.3 // indirect google.golang.org/grpc v1.56.3 // indirect
google.golang.org/protobuf v1.30.0 // indirect google.golang.org/protobuf v1.33.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect
k8s.io/apimachinery v0.20.0 // indirect k8s.io/apimachinery v0.20.0 // indirect

8
go.sum

@ -609,8 +609,8 @@ github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
@ -2348,8 +2348,8 @@ google.golang.org/protobuf v1.25.1-0.20201208041424-160c7477e0e8/go.mod h1:hFxJC
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/bsm/ratelimit.v1 v1.0.0-20160220154919-db14e161995a/go.mod h1:KF9sEfUPAXdG8Oev9e99iLGnl2uJMjc5B+4y3O7x610= gopkg.in/bsm/ratelimit.v1 v1.0.0-20160220154919-db14e161995a/go.mod h1:KF9sEfUPAXdG8Oev9e99iLGnl2uJMjc5B+4y3O7x610=
gopkg.in/cenkalti/backoff.v1 v1.1.0/go.mod h1:J6Vskwqd+OMVJl8C33mmtxTBs2gyzfv7UDAkHu8BrjI= gopkg.in/cenkalti/backoff.v1 v1.1.0/go.mod h1:J6Vskwqd+OMVJl8C33mmtxTBs2gyzfv7UDAkHu8BrjI=

@ -1424,7 +1424,7 @@ func (s *BlockChainAPI) replay(ctx context.Context, block *types.Block, accounts
// GetDiffAccountsWithScope returns detailed changes of some interested accounts in a specific block number. // GetDiffAccountsWithScope returns detailed changes of some interested accounts in a specific block number.
func (s *BlockChainAPI) GetDiffAccountsWithScope(ctx context.Context, blockNr rpc.BlockNumber, accounts []common.Address) (*types.DiffAccountsInBlock, error) { func (s *BlockChainAPI) GetDiffAccountsWithScope(ctx context.Context, blockNr rpc.BlockNumber, accounts []common.Address) (*types.DiffAccountsInBlock, error) {
if s.b.Chain() == nil { if s.b.Chain() == nil {
return nil, fmt.Errorf("blockchain not support get diff accounts") return nil, errors.New("blockchain not support get diff accounts")
} }
block, err := s.b.BlockByNumber(ctx, blockNr) block, err := s.b.BlockByNumber(ctx, blockNr)
@ -1937,7 +1937,7 @@ func (s *TransactionAPI) GetTransactionReceiptsByBlockNumber(ctx context.Context
} }
txs := block.Transactions() txs := block.Transactions()
if len(txs) != len(receipts) { if len(txs) != len(receipts) {
return nil, fmt.Errorf("txs length doesn't equal to receipts' length") return nil, errors.New("txs length doesn't equal to receipts' length")
} }
txReceipts := make([]map[string]interface{}, 0, len(txs)) txReceipts := make([]map[string]interface{}, 0, len(txs))

111
internal/ethapi/api_mev.go Normal file

@ -0,0 +1,111 @@
package ethapi
import (
"context"
"fmt"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
)
const (
TransferTxGasLimit = 25000
)
// MevAPI implements the interfaces that defined in the BEP-322.
// It offers methods for the interaction between builders and validators.
type MevAPI struct {
b Backend
}
// NewMevAPI creates a new MevAPI.
func NewMevAPI(b Backend) *MevAPI {
return &MevAPI{b}
}
// SendBid receives bid from the builders.
// If mev is not running or bid is invalid, return error.
// Otherwise, creates a builder bid for the given argument, submit it to the miner.
func (m *MevAPI) SendBid(ctx context.Context, args *types.BidArgs) (common.Hash, error) {
if !m.b.MevRunning() {
return common.Hash{}, types.ErrMevNotRunning
}
if !m.b.MinerInTurn() {
return common.Hash{}, types.ErrMevNotInTurn
}
var (
rawBid = args.RawBid
currentHeader = m.b.CurrentHeader()
)
if rawBid == nil {
return common.Hash{}, types.NewInvalidBidError("rawBid should not be nil")
}
// only support bidding for the next block not for the future block
if rawBid.BlockNumber != currentHeader.Number.Uint64()+1 {
return common.Hash{}, types.NewInvalidBidError("stale block number or block in future")
}
if rawBid.ParentHash != currentHeader.Hash() {
return common.Hash{}, types.NewInvalidBidError(
fmt.Sprintf("non-aligned parent hash: %v", currentHeader.Hash()))
}
if rawBid.GasFee == nil || rawBid.GasFee.Cmp(common.Big0) == 0 || rawBid.GasUsed == 0 {
return common.Hash{}, types.NewInvalidBidError("empty gasFee or empty gasUsed")
}
if rawBid.BuilderFee != nil {
builderFee := rawBid.BuilderFee
if builderFee.Cmp(common.Big0) < 0 {
return common.Hash{}, types.NewInvalidBidError("builder fee should not be less than 0")
}
if builderFee.Cmp(common.Big0) == 0 {
if len(args.PayBidTx) != 0 || args.PayBidTxGasUsed != 0 {
return common.Hash{}, types.NewInvalidPayBidTxError("payBidTx should be nil when builder fee is 0")
}
}
if builderFee.Cmp(rawBid.GasFee) >= 0 {
return common.Hash{}, types.NewInvalidBidError("builder fee must be less than gas fee")
}
if builderFee.Cmp(common.Big0) > 0 {
// payBidTx can be nil when validator and builder take some other settlement
if args.PayBidTxGasUsed > TransferTxGasLimit {
return common.Hash{}, types.NewInvalidBidError(
fmt.Sprintf("transfer tx gas used must be no more than %v", TransferTxGasLimit))
}
if (len(args.PayBidTx) == 0 && args.PayBidTxGasUsed != 0) ||
(len(args.PayBidTx) != 0 && args.PayBidTxGasUsed == 0) {
return common.Hash{}, types.NewInvalidPayBidTxError("non-aligned payBidTx and payBidTxGasUsed")
}
}
} else {
if len(args.PayBidTx) != 0 || args.PayBidTxGasUsed != 0 {
return common.Hash{}, types.NewInvalidPayBidTxError("payBidTx should be nil when builder fee is nil")
}
}
return m.b.SendBid(ctx, args)
}
func (m *MevAPI) BestBidGasFee(_ context.Context, parentHash common.Hash) *big.Int {
return m.b.BestBidGasFee(parentHash)
}
func (m *MevAPI) Params() *types.MevParams {
return m.b.MevParams()
}
// Running returns true if mev is running
func (m *MevAPI) Running() bool {
return m.b.MevRunning()
}

@ -31,6 +31,10 @@ import (
"testing" "testing"
"time" "time"
"github.com/holiman/uint256"
"github.com/stretchr/testify/require"
"golang.org/x/exp/slices"
"github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/accounts/keystore" "github.com/ethereum/go-ethereum/accounts/keystore"
@ -52,9 +56,6 @@ import (
"github.com/ethereum/go-ethereum/internal/blocktest" "github.com/ethereum/go-ethereum/internal/blocktest"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
"github.com/holiman/uint256"
"github.com/stretchr/testify/require"
"golang.org/x/exp/slices"
) )
func testTransactionMarshal(t *testing.T, tests []txData, config *params.ChainConfig) { func testTransactionMarshal(t *testing.T, tests []txData, config *params.ChainConfig) {
@ -635,6 +636,23 @@ func (b testBackend) ServiceFilter(ctx context.Context, session *bloombits.Match
panic("implement me") panic("implement me")
} }
func (b *testBackend) MevRunning() bool { return false }
func (b *testBackend) MevParams() *types.MevParams {
return &types.MevParams{}
}
func (b *testBackend) StartMev() {}
func (b *testBackend) StopMev() {}
func (b *testBackend) AddBuilder(builder common.Address, builderUrl string) error { return nil }
func (b *testBackend) RemoveBuilder(builder common.Address) error { return nil }
func (b *testBackend) SendBid(ctx context.Context, bid *types.BidArgs) (common.Hash, error) {
panic("implement me")
}
func (b *testBackend) MinerInTurn() bool { return false }
func (b *testBackend) BestBidGasFee(parentHash common.Hash) *big.Int {
//TODO implement me
panic("implement me")
}
func TestEstimateGas(t *testing.T) { func TestEstimateGas(t *testing.T) {
t.Parallel() t.Parallel()
// Initialize test accounts // Initialize test accounts

@ -101,6 +101,25 @@ type Backend interface {
ServiceFilter(ctx context.Context, session *bloombits.MatcherSession) ServiceFilter(ctx context.Context, session *bloombits.MatcherSession)
SubscribeFinalizedHeaderEvent(ch chan<- core.FinalizedHeaderEvent) event.Subscription SubscribeFinalizedHeaderEvent(ch chan<- core.FinalizedHeaderEvent) event.Subscription
SubscribeNewVoteEvent(chan<- core.NewVoteEvent) event.Subscription SubscribeNewVoteEvent(chan<- core.NewVoteEvent) event.Subscription
// MevRunning return true if mev is running
MevRunning() bool
// MevParams returns the static params of mev
MevParams() *types.MevParams
// StartMev starts mev
StartMev()
// StopMev stops mev
StopMev()
// AddBuilder adds a builder to the bid simulator.
AddBuilder(builder common.Address, builderUrl string) error
// RemoveBuilder removes a builder from the bid simulator.
RemoveBuilder(builder common.Address) error
// SendBid receives bid from the builders.
SendBid(ctx context.Context, bid *types.BidArgs) (common.Hash, error)
// BestBidGasFee returns the gas fee of the best bid for the given parent hash.
BestBidGasFee(parentHash common.Hash) *big.Int
// MinerInTurn returns true if the validator is in turn to propose the block.
MinerInTurn() bool
} }
func GetAPIs(apiBackend Backend) []rpc.API { func GetAPIs(apiBackend Backend) []rpc.API {
@ -127,6 +146,9 @@ func GetAPIs(apiBackend Backend) []rpc.API {
}, { }, {
Namespace: "personal", Namespace: "personal",
Service: NewPersonalAccountAPI(apiBackend, nonceLock), Service: NewPersonalAccountAPI(apiBackend, nonceLock),
}, {
Namespace: "mev",
Service: NewMevAPI(apiBackend),
}, },
} }
} }

@ -412,3 +412,19 @@ func (b *backendMock) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent)
} }
func (b *backendMock) Engine() consensus.Engine { return nil } func (b *backendMock) Engine() consensus.Engine { return nil }
func (b *backendMock) MevRunning() bool { return false }
func (b *backendMock) MevParams() *types.MevParams {
return &types.MevParams{}
}
func (b *backendMock) StartMev() {}
func (b *backendMock) StopMev() {}
func (b *backendMock) AddBuilder(builder common.Address, builderUrl string) error { return nil }
func (b *backendMock) RemoveBuilder(builder common.Address) error { return nil }
func (b *backendMock) SendBid(ctx context.Context, bid *types.BidArgs) (common.Hash, error) {
panic("implement me")
}
func (b *backendMock) MinerInTurn() bool { return false }
func (b *backendMock) BestBidGasFee(parentHash common.Hash) *big.Int {
panic("implement me")
}

688
miner/bid_simulator.go Normal file

@ -0,0 +1,688 @@
package miner
import (
"context"
"errors"
"fmt"
"math/big"
"net"
"net/http"
"sync"
"sync/atomic"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/bidutil"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/miner/builderclient"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rpc"
)
const (
// maxBidPerBuilderPerBlock is the max bid number per builder
maxBidPerBuilderPerBlock = 3
commitInterruptBetterBid = 1
// leftOverTimeRate is the rate of left over time to simulate a bid
leftOverTimeRate = 11
// leftOverTimeScale is the scale of left over time to simulate a bid
leftOverTimeScale = 10
)
var (
diffInTurn = big.NewInt(2) // the difficulty of a block that proposed by an in-turn validator
)
var (
dialer = &net.Dialer{
Timeout: time.Second,
KeepAlive: 60 * time.Second,
}
transport = &http.Transport{
DialContext: dialer.DialContext,
MaxIdleConnsPerHost: 50,
MaxConnsPerHost: 50,
IdleConnTimeout: 90 * time.Second,
}
client = &http.Client{
Timeout: 5 * time.Second,
Transport: transport,
}
)
type WorkPreparer interface {
prepareWork(params *generateParams) (*environment, error)
etherbase() common.Address
}
// simBidReq is the request for simulating a bid
type simBidReq struct {
bid *BidRuntime
interruptCh chan int32
}
// bidSimulator is in charge of receiving bid from builders, reporting issue to builders.
// And take care of bid simulation, rewards computing, best bid maintaining.
type bidSimulator struct {
config *MevConfig
delayLeftOver time.Duration
chain *core.BlockChain
chainConfig *params.ChainConfig
workPreparer WorkPreparer
running atomic.Bool // controlled by miner
exitCh chan struct{}
bidReceiving atomic.Bool // controlled by config and eth.AdminAPI
chainHeadCh chan core.ChainHeadEvent
chainHeadSub event.Subscription
sentryCli *builderclient.Client
// builder info (warning: only keep status in memory!)
buildersMu sync.RWMutex
builders map[common.Address]*builderclient.Client
// channels
simBidCh chan *simBidReq
newBidCh chan *types.Bid
pendingMu sync.RWMutex
pending map[uint64]map[common.Address]map[common.Hash]struct{} // blockNumber -> builder -> bidHash -> struct{}
bestBidMu sync.RWMutex
bestBid map[common.Hash]*BidRuntime // prevBlockHash -> bidRuntime
simBidMu sync.RWMutex
simulatingBid map[common.Hash]*BidRuntime // prevBlockHash -> bidRuntime, in the process of simulation
}
func newBidSimulator(
config *MevConfig,
delayLeftOver time.Duration,
chainConfig *params.ChainConfig,
chain *core.BlockChain,
workPreparer WorkPreparer,
) *bidSimulator {
b := &bidSimulator{
config: config,
delayLeftOver: delayLeftOver,
chainConfig: chainConfig,
chain: chain,
workPreparer: workPreparer,
exitCh: make(chan struct{}),
chainHeadCh: make(chan core.ChainHeadEvent, chainHeadChanSize),
builders: make(map[common.Address]*builderclient.Client),
simBidCh: make(chan *simBidReq),
newBidCh: make(chan *types.Bid, 100),
pending: make(map[uint64]map[common.Address]map[common.Hash]struct{}),
bestBid: make(map[common.Hash]*BidRuntime),
simulatingBid: make(map[common.Hash]*BidRuntime),
}
b.chainHeadSub = chain.SubscribeChainHeadEvent(b.chainHeadCh)
if config.Enabled {
b.bidReceiving.Store(true)
b.dialSentryAndBuilders()
if len(b.builders) == 0 {
log.Warn("BidSimulator: no valid builders")
}
}
go b.clearLoop()
go b.mainLoop()
go b.newBidLoop()
return b
}
func (b *bidSimulator) dialSentryAndBuilders() {
var sentryCli *builderclient.Client
var err error
if b.config.SentryURL != "" {
sentryCli, err = builderclient.DialOptions(context.Background(), b.config.SentryURL, rpc.WithHTTPClient(client))
if err != nil {
log.Error("BidSimulator: failed to dial sentry", "url", b.config.SentryURL, "err", err)
}
}
b.sentryCli = sentryCli
for _, v := range b.config.Builders {
_ = b.AddBuilder(v.Address, v.URL)
}
}
func (b *bidSimulator) start() {
b.running.Store(true)
}
func (b *bidSimulator) stop() {
b.running.Store(false)
}
func (b *bidSimulator) close() {
b.running.Store(false)
close(b.exitCh)
}
func (b *bidSimulator) isRunning() bool {
return b.running.Load()
}
func (b *bidSimulator) receivingBid() bool {
return b.bidReceiving.Load()
}
func (b *bidSimulator) startReceivingBid() {
b.dialSentryAndBuilders()
b.bidReceiving.Store(true)
}
func (b *bidSimulator) stopReceivingBid() {
b.bidReceiving.Store(false)
}
func (b *bidSimulator) AddBuilder(builder common.Address, url string) error {
b.buildersMu.Lock()
defer b.buildersMu.Unlock()
if b.sentryCli != nil {
b.builders[builder] = b.sentryCli
} else {
var builderCli *builderclient.Client
if url != "" {
var err error
builderCli, err = builderclient.DialOptions(context.Background(), url, rpc.WithHTTPClient(client))
if err != nil {
log.Error("BidSimulator: failed to dial builder", "url", url, "err", err)
return err
}
}
b.builders[builder] = builderCli
}
return nil
}
func (b *bidSimulator) RemoveBuilder(builder common.Address) error {
b.buildersMu.Lock()
defer b.buildersMu.Unlock()
delete(b.builders, builder)
return nil
}
func (b *bidSimulator) ExistBuilder(builder common.Address) bool {
b.buildersMu.RLock()
defer b.buildersMu.RUnlock()
_, ok := b.builders[builder]
return ok
}
func (b *bidSimulator) SetBestBid(prevBlockHash common.Hash, bid *BidRuntime) {
b.bestBidMu.Lock()
defer b.bestBidMu.Unlock()
b.bestBid[prevBlockHash] = bid
}
func (b *bidSimulator) GetBestBid(prevBlockHash common.Hash) *BidRuntime {
b.bestBidMu.RLock()
defer b.bestBidMu.RUnlock()
return b.bestBid[prevBlockHash]
}
func (b *bidSimulator) SetSimulatingBid(prevBlockHash common.Hash, bid *BidRuntime) {
b.simBidMu.Lock()
defer b.simBidMu.Unlock()
b.simulatingBid[prevBlockHash] = bid
}
func (b *bidSimulator) GetSimulatingBid(prevBlockHash common.Hash) *BidRuntime {
b.simBidMu.RLock()
defer b.simBidMu.RUnlock()
return b.simulatingBid[prevBlockHash]
}
func (b *bidSimulator) RemoveSimulatingBid(prevBlockHash common.Hash) {
b.simBidMu.Lock()
defer b.simBidMu.Unlock()
delete(b.simulatingBid, prevBlockHash)
}
func (b *bidSimulator) mainLoop() {
defer b.chainHeadSub.Unsubscribe()
for {
select {
case req := <-b.simBidCh:
if !b.isRunning() {
continue
}
b.simBid(req.interruptCh, req.bid)
// System stopped
case <-b.exitCh:
return
case <-b.chainHeadSub.Err():
return
}
}
}
func (b *bidSimulator) newBidLoop() {
var (
interruptCh chan int32
)
// commit aborts in-flight bid execution with given signal and resubmits a new one.
commit := func(reason int32, bidRuntime *BidRuntime) {
// if the left time is not enough to do simulation, return
var simDuration time.Duration
if lastBid := b.GetBestBid(bidRuntime.bid.ParentHash); lastBid != nil && lastBid.duration != 0 {
simDuration = lastBid.duration
}
if time.Until(b.bidMustBefore(bidRuntime.bid.ParentHash)) <= simDuration*leftOverTimeRate/leftOverTimeScale {
return
}
if interruptCh != nil {
// each commit work will have its own interruptCh to stop work with a reason
interruptCh <- reason
close(interruptCh)
}
interruptCh = make(chan int32, 1)
select {
case b.simBidCh <- &simBidReq{interruptCh: interruptCh, bid: bidRuntime}:
case <-b.exitCh:
return
}
}
for {
select {
case newBid := <-b.newBidCh:
if !b.isRunning() {
continue
}
// check the block reward and validator reward of the newBid
expectedBlockReward := newBid.GasFee
expectedValidatorReward := new(big.Int).Mul(expectedBlockReward, big.NewInt(int64(b.config.ValidatorCommission)))
expectedValidatorReward.Div(expectedValidatorReward, big.NewInt(10000))
expectedValidatorReward.Sub(expectedValidatorReward, newBid.BuilderFee)
if expectedValidatorReward.Cmp(big.NewInt(0)) < 0 {
// damage self profit, ignore
continue
}
bidRuntime := &BidRuntime{
bid: newBid,
expectedBlockReward: expectedBlockReward,
expectedValidatorReward: expectedValidatorReward,
packedBlockReward: big.NewInt(0),
packedValidatorReward: big.NewInt(0),
}
// TODO(renee-) opt bid comparation
simulatingBid := b.GetSimulatingBid(newBid.ParentHash)
// simulatingBid is nil means there is no bid in simulation
if simulatingBid == nil {
// bestBid is nil means bid is the first bid
bestBid := b.GetBestBid(newBid.ParentHash)
if bestBid == nil {
commit(commitInterruptBetterBid, bidRuntime)
continue
}
// if bestBid is not nil, check if newBid is better than bestBid
if bidRuntime.expectedBlockReward.Cmp(bestBid.expectedBlockReward) > 0 &&
bidRuntime.expectedValidatorReward.Cmp(bestBid.expectedValidatorReward) > 0 {
// if both reward are better than last simulating newBid, commit for simulation
commit(commitInterruptBetterBid, bidRuntime)
continue
}
continue
}
// simulatingBid must be better than bestBid, if newBid is better than simulatingBid, commit for simulation
if bidRuntime.expectedBlockReward.Cmp(simulatingBid.expectedBlockReward) > 0 &&
bidRuntime.expectedValidatorReward.Cmp(simulatingBid.expectedValidatorReward) > 0 {
// if both reward are better than last simulating newBid, commit for simulation
commit(commitInterruptBetterBid, bidRuntime)
continue
}
case <-b.exitCh:
return
}
}
}
func (b *bidSimulator) bidMustBefore(parentHash common.Hash) time.Time {
parentHeader := b.chain.GetHeaderByHash(parentHash)
return bidutil.BidMustBefore(parentHeader, b.chainConfig.Parlia.Period, b.delayLeftOver)
}
func (b *bidSimulator) bidBetterBefore(parentHash common.Hash) time.Time {
parentHeader := b.chain.GetHeaderByHash(parentHash)
return bidutil.BidBetterBefore(parentHeader, b.chainConfig.Parlia.Period, b.delayLeftOver, b.config.BidSimulationLeftOver)
}
func (b *bidSimulator) clearLoop() {
clearFn := func(parentHash common.Hash, blockNumber uint64) {
b.pendingMu.Lock()
delete(b.pending, blockNumber)
b.pendingMu.Unlock()
b.bestBidMu.Lock()
if bid, ok := b.bestBid[parentHash]; ok {
bid.env.discard()
}
delete(b.bestBid, parentHash)
for k, v := range b.bestBid {
if v.bid.BlockNumber <= blockNumber-core.TriesInMemory {
v.env.discard()
delete(b.bestBid, k)
}
}
b.bestBidMu.Unlock()
b.simBidMu.Lock()
if bid, ok := b.simulatingBid[parentHash]; ok {
bid.env.discard()
}
delete(b.simulatingBid, parentHash)
for k, v := range b.simulatingBid {
if v.bid.BlockNumber <= blockNumber-core.TriesInMemory {
v.env.discard()
delete(b.simulatingBid, k)
}
}
b.simBidMu.Unlock()
}
for head := range b.chainHeadCh {
if !b.isRunning() {
continue
}
clearFn(head.Block.ParentHash(), head.Block.NumberU64())
}
}
// sendBid checks if the bid is already exists or if the builder sends too many bids,
// if yes, return error, if not, add bid into newBid chan waiting for judge profit.
func (b *bidSimulator) sendBid(_ context.Context, bid *types.Bid) error {
timer := time.NewTimer(1 * time.Second)
defer timer.Stop()
select {
case b.newBidCh <- bid:
b.AddPending(bid.BlockNumber, bid.Builder, bid.Hash())
return nil
case <-timer.C:
return types.ErrMevBusy
}
}
func (b *bidSimulator) CheckPending(blockNumber uint64, builder common.Address, bidHash common.Hash) error {
b.pendingMu.Lock()
defer b.pendingMu.Unlock()
// check if bid exists or if builder sends too many bids
if _, ok := b.pending[blockNumber]; !ok {
b.pending[blockNumber] = make(map[common.Address]map[common.Hash]struct{})
}
if _, ok := b.pending[blockNumber][builder]; !ok {
b.pending[blockNumber][builder] = make(map[common.Hash]struct{})
}
if _, ok := b.pending[blockNumber][builder][bidHash]; ok {
return errors.New("bid already exists")
}
if len(b.pending[blockNumber][builder]) >= maxBidPerBuilderPerBlock {
return errors.New("too many bids")
}
return nil
}
func (b *bidSimulator) AddPending(blockNumber uint64, builder common.Address, bidHash common.Hash) {
b.pendingMu.Lock()
defer b.pendingMu.Unlock()
b.pending[blockNumber][builder][bidHash] = struct{}{}
}
// simBid simulates a newBid with txs.
// simBid does not enable state prefetching when commit transaction.
func (b *bidSimulator) simBid(interruptCh chan int32, bidRuntime *BidRuntime) {
// prevent from stopping happen in time interval from sendBid to simBid
if !b.isRunning() || !b.receivingBid() {
return
}
var (
blockNumber = bidRuntime.bid.BlockNumber
parentHash = bidRuntime.bid.ParentHash
builder = bidRuntime.bid.Builder
err error
success bool
)
// ensure simulation exited then start next simulation
b.SetSimulatingBid(parentHash, bidRuntime)
defer func(simStart time.Time) {
logCtx := []any{
"blockNumber", blockNumber,
"parentHash", parentHash,
"builder", builder,
"gasUsed", bidRuntime.bid.GasUsed,
}
if bidRuntime.env != nil {
logCtx = append(logCtx, "gasLimit", bidRuntime.env.header.GasLimit)
if err != nil || !success {
bidRuntime.env.discard()
}
}
if err != nil {
logCtx = append(logCtx, "err", err)
log.Debug("bid simulation failed", logCtx...)
go b.reportIssue(bidRuntime, err)
}
if success {
bidRuntime.duration = time.Since(simStart)
}
b.RemoveSimulatingBid(parentHash)
}(time.Now())
// prepareWork will configure header with a suitable time according to consensus
// prepareWork will start trie prefetching
if bidRuntime.env, err = b.workPreparer.prepareWork(&generateParams{
parentHash: bidRuntime.bid.ParentHash,
coinbase: b.workPreparer.etherbase(),
}); err != nil {
return
}
gasLimit := bidRuntime.env.header.GasLimit
if bidRuntime.env.gasPool == nil {
bidRuntime.env.gasPool = new(core.GasPool).AddGas(gasLimit)
bidRuntime.env.gasPool.SubGas(params.SystemTxsGas)
}
if bidRuntime.bid.GasUsed > bidRuntime.env.gasPool.Gas() {
err = errors.New("gas used exceeds gas limit")
return
}
for _, tx := range bidRuntime.bid.Txs {
select {
case <-interruptCh:
err = errors.New("simulation abort due to better bid arrived")
return
case <-b.exitCh:
err = errors.New("miner exit")
return
default:
}
// Start executing the transaction
bidRuntime.env.state.SetTxContext(tx.Hash(), bidRuntime.env.tcount)
err = bidRuntime.commitTransaction(b.chain, b.chainConfig, tx)
if err != nil {
log.Error("BidSimulator: failed to commit tx", "bidHash", bidRuntime.bid.Hash(), "tx", tx.Hash(), "err", err)
err = fmt.Errorf("invalid tx in bid, %v", err)
return
}
bidRuntime.env.tcount++
}
bidRuntime.packReward(b.config.ValidatorCommission)
// return if bid is invalid, reportIssue issue to mev-sentry/builder if simulation is fully done
if !bidRuntime.validReward() {
err = errors.New("reward does not achieve the expectation")
return
}
bestBid := b.GetBestBid(parentHash)
if bestBid == nil {
b.SetBestBid(bidRuntime.bid.ParentHash, bidRuntime)
success = true
return
}
// this is the simplest strategy: best for all the delegators.
if bidRuntime.packedBlockReward.Cmp(bestBid.packedBlockReward) > 0 {
b.SetBestBid(bidRuntime.bid.ParentHash, bidRuntime)
success = true
return
}
}
// reportIssue reports the issue to the mev-sentry
func (b *bidSimulator) reportIssue(bidRuntime *BidRuntime, err error) {
cli := b.builders[bidRuntime.bid.Builder]
if cli != nil {
cli.ReportIssue(context.Background(), &types.BidIssue{
Validator: bidRuntime.env.header.Coinbase,
Builder: bidRuntime.bid.Builder,
Message: err.Error(),
})
}
}
type BidRuntime struct {
bid *types.Bid
env *environment
expectedBlockReward *big.Int
expectedValidatorReward *big.Int
packedBlockReward *big.Int
packedValidatorReward *big.Int
duration time.Duration
}
func (r *BidRuntime) validReward() bool {
return r.packedBlockReward.Cmp(r.expectedBlockReward) >= 0 &&
r.packedValidatorReward.Cmp(r.expectedValidatorReward) >= 0
}
// packReward calculates packedBlockReward and packedValidatorReward
func (r *BidRuntime) packReward(validatorCommission uint64) {
r.packedBlockReward = r.env.state.GetBalance(consensus.SystemAddress).ToBig()
r.packedValidatorReward = new(big.Int).Mul(r.packedBlockReward, big.NewInt(int64(validatorCommission)))
r.packedValidatorReward.Div(r.packedValidatorReward, big.NewInt(10000))
r.packedValidatorReward.Sub(r.packedValidatorReward, r.bid.BuilderFee)
}
func (r *BidRuntime) commitTransaction(chain *core.BlockChain, chainConfig *params.ChainConfig, tx *types.Transaction) error {
var (
env = r.env
snap = env.state.Snapshot()
gp = env.gasPool.Gas()
sc *types.BlobTxSidecar
)
if tx.Type() == types.BlobTxType {
sc := tx.BlobTxSidecar()
if sc == nil {
return errors.New("blob transaction without blobs in miner")
}
// Checking against blob gas limit: It's kind of ugly to perform this check here, but there
// isn't really a better place right now. The blob gas limit is checked at block validation time
// and not during execution. This means core.ApplyTransaction will not return an error if the
// tx has too many blobs. So we have to explicitly check it here.
if (env.blobs+len(sc.Blobs))*params.BlobTxBlobGasPerBlob > params.MaxBlobGasPerBlock {
return errors.New("max data blobs reached")
}
}
receipt, err := core.ApplyTransaction(chainConfig, chain, &env.coinbase, env.gasPool, env.state, env.header, tx,
&env.header.GasUsed, *chain.GetVMConfig(), core.NewReceiptBloomGenerator())
if err != nil {
env.state.RevertToSnapshot(snap)
env.gasPool.SetGas(gp)
return err
}
if tx.Type() == types.BlobTxType {
env.txs = append(env.txs, tx.WithoutBlobTxSidecar())
env.receipts = append(env.receipts, receipt)
env.sidecars = append(env.sidecars, sc)
env.blobs += len(sc.Blobs)
*env.header.BlobGasUsed += receipt.BlobGasUsed
} else {
env.txs = append(env.txs, tx)
env.receipts = append(env.receipts, receipt)
}
return nil
}

@ -0,0 +1,33 @@
package builderclient
import (
"context"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rpc"
)
// Client defines typed wrappers for the Ethereum RPC API.
type Client struct {
c *rpc.Client
}
// DialOptions creates a new RPC client for the given URL. You can supply any of the
// pre-defined client options to configure the underlying transport.
func DialOptions(ctx context.Context, rawurl string, opts ...rpc.ClientOption) (*Client, error) {
c, err := rpc.DialOptions(ctx, rawurl, opts...)
if err != nil {
return nil, err
}
return newClient(c), nil
}
// newClient creates a client that uses the given RPC client.
func newClient(c *rpc.Client) *Client {
return &Client{c}
}
// ReportIssue reports an issue
func (ec *Client) ReportIssue(ctx context.Context, args *types.BidIssue) error {
return ec.c.CallContext(ctx, nil, "mev_reportIssue", args)
}

@ -56,6 +56,8 @@ type Config struct {
NewPayloadTimeout time.Duration // The maximum time allowance for creating a new payload NewPayloadTimeout time.Duration // The maximum time allowance for creating a new payload
DisableVoteAttestation bool // Whether to skip assembling vote attestation DisableVoteAttestation bool // Whether to skip assembling vote attestation
Mev MevConfig // Mev configuration
} }
// DefaultConfig contains default settings for miner. // DefaultConfig contains default settings for miner.
@ -70,6 +72,8 @@ var DefaultConfig = Config{
Recommit: 3 * time.Second, Recommit: 3 * time.Second,
NewPayloadTimeout: 2 * time.Second, NewPayloadTimeout: 2 * time.Second,
DelayLeftOver: 50 * time.Millisecond, DelayLeftOver: 50 * time.Millisecond,
Mev: DefaultMevConfig,
} }
// Miner creates blocks and searches for proof-of-work values. // Miner creates blocks and searches for proof-of-work values.
@ -82,6 +86,8 @@ type Miner struct {
stopCh chan struct{} stopCh chan struct{}
worker *worker worker *worker
bidSimulator *bidSimulator
wg sync.WaitGroup wg sync.WaitGroup
} }
@ -95,6 +101,10 @@ func New(eth Backend, config *Config, chainConfig *params.ChainConfig, mux *even
stopCh: make(chan struct{}), stopCh: make(chan struct{}),
worker: newWorker(config, chainConfig, engine, eth, mux, isLocalBlock, false), worker: newWorker(config, chainConfig, engine, eth, mux, isLocalBlock, false),
} }
miner.bidSimulator = newBidSimulator(&config.Mev, config.DelayLeftOver, chainConfig, eth.BlockChain(), miner.worker)
miner.worker.setBestBidFetcher(miner.bidSimulator)
miner.wg.Add(1) miner.wg.Add(1)
go miner.update() go miner.update()
return miner return miner
@ -129,6 +139,7 @@ func (miner *Miner) update() {
case downloader.StartEvent: case downloader.StartEvent:
wasMining := miner.Mining() wasMining := miner.Mining()
miner.worker.stop() miner.worker.stop()
miner.bidSimulator.stop()
canStart = false canStart = false
if wasMining { if wasMining {
// Resume mining after sync was finished // Resume mining after sync was finished
@ -141,6 +152,7 @@ func (miner *Miner) update() {
canStart = true canStart = true
if shouldStart { if shouldStart {
miner.worker.start() miner.worker.start()
miner.bidSimulator.start()
} }
miner.worker.syncing.Store(false) miner.worker.syncing.Store(false)
@ -148,6 +160,7 @@ func (miner *Miner) update() {
canStart = true canStart = true
if shouldStart { if shouldStart {
miner.worker.start() miner.worker.start()
miner.bidSimulator.start()
} }
miner.worker.syncing.Store(false) miner.worker.syncing.Store(false)
@ -157,13 +170,16 @@ func (miner *Miner) update() {
case <-miner.startCh: case <-miner.startCh:
if canStart { if canStart {
miner.worker.start() miner.worker.start()
miner.bidSimulator.start()
} }
shouldStart = true shouldStart = true
case <-miner.stopCh: case <-miner.stopCh:
shouldStart = false shouldStart = false
miner.worker.stop() miner.worker.stop()
miner.bidSimulator.stop()
case <-miner.exitCh: case <-miner.exitCh:
miner.worker.close() miner.worker.close()
miner.bidSimulator.close()
return return
} }
} }
@ -186,6 +202,10 @@ func (miner *Miner) Mining() bool {
return miner.worker.isRunning() return miner.worker.isRunning()
} }
func (miner *Miner) InTurn() bool {
return miner.worker.inTurn()
}
func (miner *Miner) Hashrate() uint64 { func (miner *Miner) Hashrate() uint64 {
if pow, ok := miner.engine.(consensus.PoW); ok { if pow, ok := miner.engine.(consensus.PoW); ok {
return uint64(pow.Hashrate()) return uint64(pow.Hashrate())

111
miner/miner_mev.go Normal file

@ -0,0 +1,111 @@
package miner
import (
"context"
"fmt"
"math/big"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
)
type BuilderConfig struct {
Address common.Address
URL string
}
type MevConfig struct {
Enabled bool // Whether to enable Mev or not
SentryURL string // The url of Mev sentry
Builders []BuilderConfig // The list of builders
ValidatorCommission uint64 // 100 means 1%
BidSimulationLeftOver time.Duration
}
var DefaultMevConfig = MevConfig{
Enabled: false,
SentryURL: "",
Builders: nil,
ValidatorCommission: 100,
BidSimulationLeftOver: 50 * time.Millisecond,
}
// MevRunning return true if mev is running.
func (miner *Miner) MevRunning() bool {
return miner.bidSimulator.isRunning() && miner.bidSimulator.receivingBid()
}
// StartMev starts mev.
func (miner *Miner) StartMev() {
miner.bidSimulator.startReceivingBid()
}
// StopMev stops mev.
func (miner *Miner) StopMev() {
miner.bidSimulator.stopReceivingBid()
}
// AddBuilder adds a builder to the bid simulator.
func (miner *Miner) AddBuilder(builder common.Address, url string) error {
return miner.bidSimulator.AddBuilder(builder, url)
}
// RemoveBuilder removes a builder from the bid simulator.
func (miner *Miner) RemoveBuilder(builderAddr common.Address) error {
return miner.bidSimulator.RemoveBuilder(builderAddr)
}
func (miner *Miner) SendBid(ctx context.Context, bidArgs *types.BidArgs) (common.Hash, error) {
builder, err := bidArgs.EcrecoverSender()
if err != nil {
return common.Hash{}, types.NewInvalidBidError(fmt.Sprintf("invalid signature:%v", err))
}
if !miner.bidSimulator.ExistBuilder(builder) {
return common.Hash{}, types.NewInvalidBidError("builder is not registered")
}
err = miner.bidSimulator.CheckPending(bidArgs.RawBid.BlockNumber, builder, bidArgs.RawBid.Hash())
if err != nil {
return common.Hash{}, err
}
signer := types.MakeSigner(miner.worker.chainConfig, big.NewInt(int64(bidArgs.RawBid.BlockNumber)), uint64(time.Now().Unix()))
bid, err := bidArgs.ToBid(builder, signer)
if err != nil {
return common.Hash{}, types.NewInvalidBidError(fmt.Sprintf("fail to convert bidArgs to bid, %v", err))
}
bidBetterBefore := miner.bidSimulator.bidBetterBefore(bidArgs.RawBid.ParentHash)
timeout := time.Until(bidBetterBefore)
if timeout <= 0 {
return common.Hash{}, fmt.Errorf("too late, expected befor %s, appeared %s later", bidBetterBefore,
common.PrettyDuration(timeout))
}
err = miner.bidSimulator.sendBid(ctx, bid)
if err != nil {
return common.Hash{}, err
}
return bid.Hash(), nil
}
func (miner *Miner) BestPackedBlockReward(parentHash common.Hash) *big.Int {
bidRuntime := miner.bidSimulator.GetBestBid(parentHash)
if bidRuntime == nil {
return big.NewInt(0)
}
return bidRuntime.packedBlockReward
}
func (miner *Miner) MevParams() *types.MevParams {
return &types.MevParams{
ValidatorCommission: miner.worker.config.Mev.ValidatorCommission,
BidSimulationLeftOver: miner.worker.config.Mev.BidSimulationLeftOver,
}
}

@ -183,7 +183,6 @@ func makeGenesis(faucets []*ecdsa.PrivateKey, sealers []*ecdsa.PrivateKey) *core
func makeSealer(genesis *core.Genesis) (*node.Node, *eth.Ethereum, error) { func makeSealer(genesis *core.Genesis) (*node.Node, *eth.Ethereum, error) {
// Define the basic configurations for the Ethereum node // Define the basic configurations for the Ethereum node
datadir, _ := os.MkdirTemp("", "") datadir, _ := os.MkdirTemp("", "")
config := &node.Config{ config := &node.Config{
Name: "geth", Name: "geth",
Version: params.Version, Version: params.Version,

@ -24,6 +24,9 @@ import (
"sync/atomic" "sync/atomic"
"time" "time"
lru "github.com/hashicorp/golang-lru"
"github.com/holiman/uint256"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/consensus/misc/eip1559" "github.com/ethereum/go-ethereum/consensus/misc/eip1559"
@ -40,8 +43,6 @@ import (
"github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie"
lru "github.com/hashicorp/golang-lru"
"github.com/holiman/uint256"
) )
const ( const (
@ -162,9 +163,14 @@ type getWorkReq struct {
result chan *newPayloadResult // non-blocking channel result chan *newPayloadResult // non-blocking channel
} }
type bidFetcher interface {
GetBestBid(parentHash common.Hash) *BidRuntime
}
// worker is the main object which takes care of submitting new work to consensus engine // worker is the main object which takes care of submitting new work to consensus engine
// and gathering the sealing result. // and gathering the sealing result.
type worker struct { type worker struct {
bidFetcher bidFetcher
prefetcher core.Prefetcher prefetcher core.Prefetcher
config *Config config *Config
chainConfig *params.ChainConfig chainConfig *params.ChainConfig
@ -289,9 +295,14 @@ func newWorker(config *Config, chainConfig *params.ChainConfig, engine consensus
if init { if init {
worker.startCh <- struct{}{} worker.startCh <- struct{}{}
} }
return worker return worker
} }
func (w *worker) setBestBidFetcher(fetcher bidFetcher) {
w.bidFetcher = fetcher
}
// setEtherbase sets the etherbase used to initialize the block coinbase field. // setEtherbase sets the etherbase used to initialize the block coinbase field.
func (w *worker) setEtherbase(addr common.Address) { func (w *worker) setEtherbase(addr common.Address) {
w.mu.Lock() w.mu.Lock()
@ -946,7 +957,7 @@ func (w *worker) prepareWork(genParams *generateParams) (*environment, error) {
if genParams.parentHash != (common.Hash{}) { if genParams.parentHash != (common.Hash{}) {
block := w.chain.GetBlockByHash(genParams.parentHash) block := w.chain.GetBlockByHash(genParams.parentHash)
if block == nil { if block == nil {
return nil, fmt.Errorf("missing parent") return nil, errors.New("missing parent")
} }
parent = block.Header() parent = block.Header()
} }
@ -994,7 +1005,9 @@ func (w *worker) prepareWork(genParams *generateParams) (*environment, error) {
} }
header.BlobGasUsed = new(uint64) header.BlobGasUsed = new(uint64)
header.ExcessBlobGas = &excessBlobGas header.ExcessBlobGas = &excessBlobGas
header.ParentBeaconRoot = genParams.beaconRoot if w.chainConfig.Parlia == nil {
header.ParentBeaconRoot = genParams.beaconRoot
}
} }
// Run the consensus preparation with the default or customized consensus engine. // Run the consensus preparation with the default or customized consensus engine.
if err := w.engine.Prepare(w.chain, header); err != nil { if err := w.engine.Prepare(w.chain, header); err != nil {
@ -1280,6 +1293,24 @@ LOOP:
bestReward = balance bestReward = balance
} }
} }
// when out-turn, use bestWork to prevent bundle leakage.
// when in-turn, compare with remote work.
if w.bidFetcher != nil && bestWork.header.Difficulty.Cmp(diffInTurn) == 0 {
bestBid := w.bidFetcher.GetBestBid(bestWork.header.ParentHash)
if bestBid != nil && bestReward.CmpBig(bestBid.packedBlockReward) < 0 {
// localValidatorReward is the reward for the validator self by the local block.
localValidatorReward := new(uint256.Int).Mul(bestReward, uint256.NewInt(w.config.Mev.ValidatorCommission))
localValidatorReward.Div(localValidatorReward, uint256.NewInt(10000))
// blockReward(benefits delegators) and validatorReward(benefits the validator) are both optimal
if localValidatorReward.CmpBig(bestBid.packedValidatorReward) < 0 {
bestWork = bestBid.env
}
}
}
w.commit(bestWork, w.fullTaskHook, true, start) w.commit(bestWork, w.fullTaskHook, true, start)
// Swap out the old work with the new one, terminating any leftover // Swap out the old work with the new one, terminating any leftover
@ -1290,6 +1321,12 @@ LOOP:
w.current = bestWork w.current = bestWork
} }
// inTurn return true if the current worker is in turn.
func (w *worker) inTurn() bool {
validator, _ := w.engine.NextInTurnValidator(w.chain, w.chain.CurrentBlock())
return validator != common.Address{} && validator == w.etherbase()
}
// commit runs any post-transaction state modifications, assembles the final block // commit runs any post-transaction state modifications, assembles the final block
// and commits new work if consensus engine is running. // and commits new work if consensus engine is running.
// Note the assumption is held that the mutation is allowed to the passed env, do // Note the assumption is held that the mutation is allowed to the passed env, do

@ -786,10 +786,30 @@ func (n *Node) OpenAndMergeDatabase(name string, cache, handles int, freezer, di
if persistDiff { if persistDiff {
chainDataHandles = handles * chainDataHandlesPercentage / 100 chainDataHandles = handles * chainDataHandlesPercentage / 100
} }
var statediskdb ethdb.Database
var err error
// Open the separated state database if the state directory exists
if n.IsSeparatedDB() {
// Allocate half of the handles and cache to this separate state data database
statediskdb, err = n.OpenDatabaseWithFreezer(name+"/state", cache/2, chainDataHandles/2, "", "eth/db/statedata/", readonly, false, false, pruneAncientData)
if err != nil {
return nil, err
}
// Reduce the handles and cache to this separate database because it is not a complete database with no trie data storing in it.
cache = int(float64(cache) * 0.6)
chainDataHandles = int(float64(chainDataHandles) * 0.6)
}
chainDB, err := n.OpenDatabaseWithFreezer(name, cache, chainDataHandles, freezer, namespace, readonly, false, false, pruneAncientData) chainDB, err := n.OpenDatabaseWithFreezer(name, cache, chainDataHandles, freezer, namespace, readonly, false, false, pruneAncientData)
if err != nil { if err != nil {
return nil, err return nil, err
} }
if statediskdb != nil {
chainDB.SetStateStore(statediskdb)
}
if persistDiff { if persistDiff {
diffStore, err := n.OpenDiffDatabase(name, handles-chainDataHandles, diff, namespace, readonly) diffStore, err := n.OpenDiffDatabase(name, handles-chainDataHandles, diff, namespace, readonly)
if err != nil { if err != nil {
@ -837,6 +857,16 @@ func (n *Node) OpenDatabaseWithFreezer(name string, cache, handles int, ancient,
return db, err return db, err
} }
// IsSeparatedDB check the state subdirectory of db, if subdirectory exists, return true
func (n *Node) IsSeparatedDB() bool {
separateDir := filepath.Join(n.ResolvePath("chaindata"), "state")
fileInfo, err := os.Stat(separateDir)
if os.IsNotExist(err) {
return false
}
return fileInfo.IsDir()
}
func (n *Node) OpenDiffDatabase(name string, handles int, diff, namespace string, readonly bool) (*leveldb.Database, error) { func (n *Node) OpenDiffDatabase(name string, handles int, diff, namespace string, readonly bool) (*leveldb.Database, error) {
n.lock.Lock() n.lock.Lock()
defer n.lock.Unlock() defer n.lock.Unlock()

@ -19,6 +19,7 @@ package node
import ( import (
"compress/gzip" "compress/gzip"
"context" "context"
"errors"
"fmt" "fmt"
"io" "io"
"net" "net"
@ -299,7 +300,7 @@ func (h *httpServer) enableRPC(apis []rpc.API, config httpConfig) error {
defer h.mu.Unlock() defer h.mu.Unlock()
if h.rpcAllowed() { if h.rpcAllowed() {
return fmt.Errorf("JSON-RPC over HTTP is already enabled") return errors.New("JSON-RPC over HTTP is already enabled")
} }
// Create RPC server and handler. // Create RPC server and handler.

@ -44,7 +44,7 @@ func init() {
} }
} }
// meteredConn is a wrapper around a net.UDPConn that meters both the // meteredUdpConn is a wrapper around a net.UDPConn that meters both the
// inbound and outbound network traffic. // inbound and outbound network traffic.
type meteredUdpConn struct { type meteredUdpConn struct {
UDPConn UDPConn

@ -26,6 +26,7 @@ import (
"context" "context"
crand "crypto/rand" crand "crypto/rand"
"encoding/binary" "encoding/binary"
"errors"
"fmt" "fmt"
mrand "math/rand" mrand "math/rand"
"net" "net"
@ -383,7 +384,7 @@ func (tab *Table) doRevalidate(done chan<- struct{}) {
if tab.enrFilter != nil { if tab.enrFilter != nil {
if !tab.enrFilter(n.Record()) { if !tab.enrFilter(n.Record()) {
tab.log.Trace("ENR record filter out", "id", last.ID(), "addr", last.addr()) tab.log.Trace("ENR record filter out", "id", last.ID(), "addr", last.addr())
err = fmt.Errorf("filtered node") err = errors.New("filtered node")
} }
} }
last = &node{Node: *n, addedAt: last.addedAt, livenessChecks: last.livenessChecks} last = &node{Node: *n, addedAt: last.addedAt, livenessChecks: last.livenessChecks}

@ -365,7 +365,7 @@ func (t *UDPv4) RequestENR(n *enode.Node) (*enode.Node, error) {
return nil, err return nil, err
} }
if respN.ID() != n.ID() { if respN.ID() != n.ID() {
return nil, fmt.Errorf("invalid ID in response record") return nil, errors.New("invalid ID in response record")
} }
if respN.Seq() < n.Seq() { if respN.Seq() < n.Seq() {
return n, nil // response record is older return n, nil // response record is older

@ -442,7 +442,7 @@ func (t *UDPv5) verifyResponseNode(c *callV5, r *enr.Record, distances []uint, s
} }
} }
if _, ok := seen[node.ID()]; ok { if _, ok := seen[node.ID()]; ok {
return nil, fmt.Errorf("duplicate record") return nil, errors.New("duplicate record")
} }
seen[node.ID()] = struct{}{} seen[node.ID()] = struct{}{}
return node, nil return node, nil

@ -367,11 +367,11 @@ func (c *Codec) makeHandshakeAuth(toID enode.ID, addr string, challenge *Whoarey
// key is part of the ID nonce signature. // key is part of the ID nonce signature.
var remotePubkey = new(ecdsa.PublicKey) var remotePubkey = new(ecdsa.PublicKey)
if err := challenge.Node.Load((*enode.Secp256k1)(remotePubkey)); err != nil { if err := challenge.Node.Load((*enode.Secp256k1)(remotePubkey)); err != nil {
return nil, nil, fmt.Errorf("can't find secp256k1 key for recipient") return nil, nil, errors.New("can't find secp256k1 key for recipient")
} }
ephkey, err := c.sc.ephemeralKeyGen() ephkey, err := c.sc.ephemeralKeyGen()
if err != nil { if err != nil {
return nil, nil, fmt.Errorf("can't generate ephemeral key") return nil, nil, errors.New("can't generate ephemeral key")
} }
ephpubkey := EncodePubkey(&ephkey.PublicKey) ephpubkey := EncodePubkey(&ephkey.PublicKey)
auth.pubkey = ephpubkey[:] auth.pubkey = ephpubkey[:]
@ -395,7 +395,7 @@ func (c *Codec) makeHandshakeAuth(toID enode.ID, addr string, challenge *Whoarey
// Create session keys. // Create session keys.
sec := deriveKeys(sha256.New, ephkey, remotePubkey, c.localnode.ID(), challenge.Node.ID(), cdata) sec := deriveKeys(sha256.New, ephkey, remotePubkey, c.localnode.ID(), challenge.Node.ID(), cdata)
if sec == nil { if sec == nil {
return nil, nil, fmt.Errorf("key derivation failed") return nil, nil, errors.New("key derivation failed")
} }
return auth, sec, err return auth, sec, err
} }

@ -191,7 +191,7 @@ func (c *Client) resolveEntry(ctx context.Context, domain, hash string) (entry,
func (c *Client) doResolveEntry(ctx context.Context, domain, hash string) (entry, error) { func (c *Client) doResolveEntry(ctx context.Context, domain, hash string) (entry, error) {
wantHash, err := b32format.DecodeString(hash) wantHash, err := b32format.DecodeString(hash)
if err != nil { if err != nil {
return nil, fmt.Errorf("invalid base32 hash") return nil, errors.New("invalid base32 hash")
} }
name := hash + "." + domain name := hash + "." + domain
txts, err := c.cfg.Resolver.LookupTXT(ctx, hash+"."+domain) txts, err := c.cfg.Resolver.LookupTXT(ctx, hash+"."+domain)

@ -21,6 +21,7 @@ import (
"crypto/ecdsa" "crypto/ecdsa"
"encoding/base32" "encoding/base32"
"encoding/base64" "encoding/base64"
"errors"
"fmt" "fmt"
"io" "io"
"strings" "strings"
@ -341,7 +342,7 @@ func parseLinkEntry(e string) (entry, error) {
func parseLink(e string) (*linkEntry, error) { func parseLink(e string) (*linkEntry, error) {
if !strings.HasPrefix(e, linkPrefix) { if !strings.HasPrefix(e, linkPrefix) {
return nil, fmt.Errorf("wrong/missing scheme 'enrtree' in URL") return nil, errors.New("wrong/missing scheme 'enrtree' in URL")
} }
e = e[len(linkPrefix):] e = e[len(linkPrefix):]

@ -18,7 +18,7 @@ package enode
import ( import (
"crypto/ecdsa" "crypto/ecdsa"
"fmt" "errors"
"io" "io"
"github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/common/math"
@ -67,7 +67,7 @@ func (V4ID) Verify(r *enr.Record, sig []byte) error {
if err := r.Load(&entry); err != nil { if err := r.Load(&entry); err != nil {
return err return err
} else if len(entry) != 33 { } else if len(entry) != 33 {
return fmt.Errorf("invalid public key") return errors.New("invalid public key")
} }
h := sha3.NewLegacyKeccak256() h := sha3.NewLegacyKeccak256()

@ -17,6 +17,7 @@
package nat package nat
import ( import (
"errors"
"fmt" "fmt"
"net" "net"
"strings" "strings"
@ -47,7 +48,7 @@ func (n *pmp) ExternalIP() (net.IP, error) {
func (n *pmp) AddMapping(protocol string, extport, intport int, name string, lifetime time.Duration) (uint16, error) { func (n *pmp) AddMapping(protocol string, extport, intport int, name string, lifetime time.Duration) (uint16, error) {
if lifetime <= 0 { if lifetime <= 0 {
return 0, fmt.Errorf("lifetime must not be <= 0") return 0, errors.New("lifetime must not be <= 0")
} }
// Note order of port arguments is switched between our // Note order of port arguments is switched between our
// AddMapping and the client's AddPortMapping. // AddMapping and the client's AddPortMapping.

@ -712,7 +712,7 @@ func formatPrimitiveValue(encType string, encValue interface{}) (string, error)
func (t Types) validate() error { func (t Types) validate() error {
for typeKey, typeArr := range t { for typeKey, typeArr := range t {
if len(typeKey) == 0 { if len(typeKey) == 0 {
return fmt.Errorf("empty type key") return errors.New("empty type key")
} }
for i, typeObj := range typeArr { for i, typeObj := range typeArr {
if len(typeObj.Type) == 0 { if len(typeObj.Type) == 0 {

@ -556,7 +556,7 @@ func runRandTest(rt randTest) error {
checktr.MustUpdate(it.Key, it.Value) checktr.MustUpdate(it.Key, it.Value)
} }
if tr.Hash() != checktr.Hash() { if tr.Hash() != checktr.Hash() {
rt[i].err = fmt.Errorf("hash mismatch in opItercheckhash") rt[i].err = errors.New("hash mismatch in opItercheckhash")
} }
case opNodeDiff: case opNodeDiff:
var ( var (
@ -594,19 +594,19 @@ func runRandTest(rt randTest) error {
} }
} }
if len(insertExp) != len(tr.tracer.inserts) { if len(insertExp) != len(tr.tracer.inserts) {
rt[i].err = fmt.Errorf("insert set mismatch") rt[i].err = errors.New("insert set mismatch")
} }
if len(deleteExp) != len(tr.tracer.deletes) { if len(deleteExp) != len(tr.tracer.deletes) {
rt[i].err = fmt.Errorf("delete set mismatch") rt[i].err = errors.New("delete set mismatch")
} }
for insert := range tr.tracer.inserts { for insert := range tr.tracer.inserts {
if _, present := insertExp[insert]; !present { if _, present := insertExp[insert]; !present {
rt[i].err = fmt.Errorf("missing inserted node") rt[i].err = errors.New("missing inserted node")
} }
} }
for del := range tr.tracer.deletes { for del := range tr.tracer.deletes {
if _, present := deleteExp[del]; !present { if _, present := deleteExp[del]; !present {
rt[i].err = fmt.Errorf("missing deleted node") rt[i].err = errors.New("missing deleted node")
} }
} }
} }

@ -93,6 +93,12 @@ type Database struct {
// the legacy hash-based scheme is used by default. // the legacy hash-based scheme is used by default.
func NewDatabase(diskdb ethdb.Database, config *Config) *Database { func NewDatabase(diskdb ethdb.Database, config *Config) *Database {
// Sanitize the config and use the default one if it's not specified. // Sanitize the config and use the default one if it's not specified.
var triediskdb ethdb.Database
if diskdb != nil && diskdb.StateStore() != nil {
triediskdb = diskdb.StateStore()
} else {
triediskdb = diskdb
}
dbScheme := rawdb.ReadStateScheme(diskdb) dbScheme := rawdb.ReadStateScheme(diskdb)
if config == nil { if config == nil {
if dbScheme == rawdb.PathScheme { if dbScheme == rawdb.PathScheme {
@ -112,11 +118,11 @@ func NewDatabase(diskdb ethdb.Database, config *Config) *Database {
} }
var preimages *preimageStore var preimages *preimageStore
if config.Preimages { if config.Preimages {
preimages = newPreimageStore(diskdb) preimages = newPreimageStore(triediskdb)
} }
db := &Database{ db := &Database{
config: config, config: config,
diskdb: diskdb, diskdb: triediskdb,
preimages: preimages, preimages: preimages,
} }
/* /*
@ -125,20 +131,20 @@ func NewDatabase(diskdb ethdb.Database, config *Config) *Database {
* 3. Last, use the default scheme, namely hash scheme * 3. Last, use the default scheme, namely hash scheme
*/ */
if config.HashDB != nil { if config.HashDB != nil {
if rawdb.ReadStateScheme(diskdb) == rawdb.PathScheme { if rawdb.ReadStateScheme(triediskdb) == rawdb.PathScheme {
log.Warn("incompatible state scheme", "old", rawdb.PathScheme, "new", rawdb.HashScheme) log.Warn("incompatible state scheme", "old", rawdb.PathScheme, "new", rawdb.HashScheme)
} }
db.backend = hashdb.New(diskdb, config.HashDB, trie.MerkleResolver{}) db.backend = hashdb.New(triediskdb, config.HashDB, trie.MerkleResolver{})
} else if config.PathDB != nil { } else if config.PathDB != nil {
if rawdb.ReadStateScheme(diskdb) == rawdb.HashScheme { if rawdb.ReadStateScheme(triediskdb) == rawdb.HashScheme {
log.Warn("incompatible state scheme", "old", rawdb.HashScheme, "new", rawdb.PathScheme) log.Warn("incompatible state scheme", "old", rawdb.HashScheme, "new", rawdb.PathScheme)
} }
db.backend = pathdb.New(diskdb, config.PathDB) db.backend = pathdb.New(triediskdb, config.PathDB)
} else if strings.Compare(dbScheme, rawdb.PathScheme) == 0 { } else if strings.Compare(dbScheme, rawdb.PathScheme) == 0 {
if config.PathDB == nil { if config.PathDB == nil {
config.PathDB = pathdb.Defaults config.PathDB = pathdb.Defaults
} }
db.backend = pathdb.New(diskdb, config.PathDB) db.backend = pathdb.New(triediskdb, config.PathDB)
} else { } else {
var resolver hashdb.ChildResolver var resolver hashdb.ChildResolver
if config.IsVerkle { if config.IsVerkle {
@ -150,7 +156,7 @@ func NewDatabase(diskdb ethdb.Database, config *Config) *Database {
if config.HashDB == nil { if config.HashDB == nil {
config.HashDB = hashdb.Defaults config.HashDB = hashdb.Defaults
} }
db.backend = hashdb.New(diskdb, config.HashDB, resolver) db.backend = hashdb.New(triediskdb, config.HashDB, resolver)
} }
return db return db
} }

@ -215,7 +215,7 @@ func (m *meta) encode() []byte {
// decode unpacks the meta object from byte stream. // decode unpacks the meta object from byte stream.
func (m *meta) decode(blob []byte) error { func (m *meta) decode(blob []byte) error {
if len(blob) < 1 { if len(blob) < 1 {
return fmt.Errorf("no version tag") return errors.New("no version tag")
} }
switch blob[0] { switch blob[0] {
case stateHistoryVersion: case stateHistoryVersion: