Compare commits

...

24 Commits

Author SHA1 Message Date
Roshan
65baf18031 chore: update the contracts' byte code of Feynman upgrade (#2175)
* chore: update the contracts' byte code of Feynman upgrade

* add `OverrideFeynman`
2024-01-22 13:01:16 +08:00
Roshan
13f17f2970 fix: adjusted the timing of UpgradeBuildInSystemContract (#2166)
* fix: adjusted the timing of `UpgradeBuildInSystemContract`

* fix review comments

* fix review comments

* fix review comments

* fix review comments

* add `UpgradeBuildInSystemContract` to `traceBlock`

* add `UpgradeBuildInSystemContract` to `traceBlock`

* add `UpgradeBuildInSystemContract` to all trace functions
2024-01-22 12:15:05 +08:00
Roshan
7ade1d2a5d fix: compare headers' number in Big.Int rather than Uint64 (#2158) 2024-01-15 10:54:54 +08:00
Roshan
e2e2ac750c chore: fix lint issue (#2139) 2024-01-08 12:06:27 +08:00
zjubfd
85c6750592 Merge pull request #2138 from Pythonberg1997/bc-fusion
chore: merge with develop branch
2024-01-08 11:49:07 +08:00
Roshan
7466f0a075 Merge branch 'develop' into bc-fusion
# Conflicts:
#	core/systemcontracts/upgrade.go
#	params/config.go
2024-01-08 11:37:58 +08:00
Roshan
64a57a3e2a chore: update abi and bytecode (#2137) 2024-01-08 11:15:14 +08:00
Roshan
e5822640c6 chore: add SystemTxsGas for Feynman upgrade (#2133) 2024-01-04 20:18:45 +08:00
Roshan
432085ea62 fix: avoid update validators on Feynman block (#2102) 2023-12-25 11:20:50 +08:00
zjubfd
aab5ad94b8 Merge pull request #2096 from Pythonberg1997/bc-fusion
chore: merge with develop branch
2023-12-21 16:30:35 +08:00
Roshan
c0df5e7000 chore: add new byte code of TokenHub and GovHub to Feynman upgrade 2023-12-21 16:15:10 +08:00
Roshan
167da21801 Merge branch 'develop' into merge
# Conflicts:
#	params/config.go
2023-12-21 16:14:09 +08:00
Roshan
08f75ca23f chore: update system contracts' byte code (#2093) 2023-12-20 17:17:43 +08:00
Roshan
6685f68995 chore: fix CI issues (#2085) 2023-12-19 09:46:15 +08:00
Roshan
d4f7313760 chore: remove unused code (#2061) 2023-12-13 15:53:36 +08:00
Roshan
94b68156c8 chore: update logic of determining breathe block and returned data of verifyDoubleSignEvidence (#2056) 2023-12-12 17:24:43 +08:00
Roshan
ab8793ae9f fix: bug in determining breathe block (#2055) 2023-12-12 14:21:30 +08:00
Roshan
6744d7c15f chore: modify breath block interval for test (#2054) 2023-12-11 17:12:42 +08:00
zjubfd
3414e5672a Merge pull request #2048 from Pythonberg1997/bc-fusion
chore: merge with develop branch
2023-12-08 16:26:32 +08:00
Roshan
8f3c525adc chore: resolve merge conflicts and fix review comments 2023-12-08 16:20:30 +08:00
Roshan
3e9e6423c0 Merge remote-tracking branch 'BNBChain/develop' into bc-fusion
# Conflicts:
#	cmd/geth/blsaccountcmd.go
#	params/config.go
2023-12-07 18:25:45 +08:00
Roshan
5743b067ba feat: add generate-proof to geth cmd (#2028)
* feat: add `generate-proof` to geth cmd

* chore: rename variable
2023-12-05 11:46:54 +08:00
Roshan
d3f882d799 chore: update contracts code (#2024) 2023-12-04 10:43:23 +08:00
Roshan
030e41607e feat: add new fork block and precompile contract for BEP294 and BEP299 (#1874) 2023-11-30 19:36:54 +08:00
34 changed files with 5520 additions and 1854 deletions

View File

@@ -7,7 +7,7 @@ on:
- develop
pull_request:
branches:
branches:
- master
- develop
@@ -47,5 +47,3 @@ jobs:
run: |
go mod download
make geth

View File

@@ -7,7 +7,7 @@ on:
- develop
pull_request:
branches:
branches:
- master
- develop

View File

@@ -7,7 +7,7 @@ on:
- develop
pull_request:
branches:
branches:
- master
- develop

View File

@@ -7,7 +7,7 @@ on:
- develop
pull_request:
branches:
branches:
- master
- develop
@@ -44,7 +44,7 @@ jobs:
${{ runner.os }}-go-
- run: |
go mod download
go mod tidy
- name: golangci-lint
uses: golangci/golangci-lint-action@v3

View File

@@ -7,7 +7,7 @@ on:
- develop
pull_request:
branches:
branches:
- master
- develop
@@ -52,4 +52,3 @@ jobs:
git submodule update --init --depth 1 --recursive
go mod download
make test

View File

@@ -2127,7 +2127,7 @@ func TestGolangBindings(t *testing.T) {
t.Fatalf("failed to replace binding test dependency to current source tree: %v\n%s", err, out)
}
replacer = exec.Command(gocmd, "mod", "edit", "-x", "-require", "github.com/tendermint/tendermint@v0.0.0", "-replace", "github.com/tendermint/tendermint=github.com/bnb-chain/tendermint@v0.31.15") // Repo root
replacer = exec.Command(gocmd, "mod", "edit", "-x", "-require", "github.com/tendermint/tendermint@v0.0.0", "-replace", "github.com/tendermint/tendermint=github.com/bnb-chain/tendermint@v0.31.16") // Repo root
replacer.Dir = pkg
if out, err := replacer.CombinedOutput(); err != nil {
t.Fatalf("failed to replace tendermint dependency to bnb-chain source: %v\n%s", err, out)

View File

@@ -5,6 +5,7 @@ import (
"encoding/hex"
"encoding/json"
"fmt"
"math/big"
"os"
"path/filepath"
"strings"
@@ -15,6 +16,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v4/io/prompt"
"github.com/prysmaticlabs/prysm/v4/proto/eth/service"
validatorpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1/validator-client"
"github.com/prysmaticlabs/prysm/v4/validator/accounts"
"github.com/prysmaticlabs/prysm/v4/validator/accounts/iface"
"github.com/prysmaticlabs/prysm/v4/validator/accounts/petnames"
@@ -26,6 +28,7 @@ import (
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/internal/flags"
"github.com/ethereum/go-ethereum/signer/core"
)
@@ -47,6 +50,10 @@ var (
Usage: "Password file path for the imported BLS account , which contains the password to get the private key by decrypting the keystore file",
Category: flags.AccountCategory,
}
chainIdFlag = &cli.Int64Flag{
Name: "chain-id",
Usage: "The chain id of the network that the validator will be created at",
}
)
var (
@@ -189,6 +196,22 @@ Print summary of existing BLS accounts in the current BLS wallet.`,
Delete the selected BLS account from the BLS wallet.`,
},
{
Name: "generate-proof",
Usage: "Generate ownership proof for the selected BLS account from the BLS wallet",
Action: blsAccountGenerateProof,
ArgsUsage: "<BLS pubkey>",
Category: "BLS ACCOUNT COMMANDS",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.BLSPasswordFileFlag,
chainIdFlag,
},
Description: `
geth bls account generate-proof
Generate ownership proof for the selected BLS account from the BLS wallet. The proof is used to prove the ownership of the BLS account when creating validator on BSC after feynman upgrade.`,
},
},
},
},
@@ -608,3 +631,79 @@ func blsAccountDelete(ctx *cli.Context) error {
return nil
}
// blsAccountGenerateProof generate ownership proof for a selected BLS account.
func blsAccountGenerateProof(ctx *cli.Context) error {
if ctx.Args().Len() == 0 {
utils.Fatalf("No BLS account specified.")
}
var filteredPubKeys []bls.PublicKey
for _, str := range ctx.Args().Slice() {
pkString := str
if strings.Contains(pkString, "0x") {
pkString = pkString[2:]
}
pubKeyBytes, err := hex.DecodeString(pkString)
if err != nil {
utils.Fatalf("Could not decode string %s as hex.", pkString)
}
blsPublicKey, err := bls.PublicKeyFromBytes(pubKeyBytes)
if err != nil {
utils.Fatalf("%#x is not a valid BLS public key.", pubKeyBytes)
}
filteredPubKeys = append(filteredPubKeys, blsPublicKey)
}
if len(filteredPubKeys) > 1 {
utils.Fatalf("Only support one BLS account specified.")
}
pubkeyBz := filteredPubKeys[0].Marshal()
cfg := gethConfig{Node: defaultNodeConfig()}
// Load config file.
if file := ctx.String(configFileFlag.Name); file != "" {
if err := loadConfig(file, &cfg); err != nil {
utils.Fatalf("%v", err)
}
}
utils.SetNodeConfig(ctx, &cfg.Node)
walletDir := filepath.Join(cfg.Node.DataDir, BLSWalletPath)
dirExists, err := wallet.Exists(walletDir)
if err != nil || !dirExists {
utils.Fatalf("BLS wallet not exists.")
}
walletPassword := utils.GetPassPhraseWithList("Enter the password for your BLS wallet.", false, 0, utils.MakePasswordListFromPath(ctx.String(utils.BLSPasswordFileFlag.Name)))
w, err := wallet.OpenWallet(context.Background(), &wallet.Config{
WalletDir: walletDir,
WalletPassword: walletPassword,
})
if err != nil {
utils.Fatalf("Open BLS wallet failed: %v.", err)
}
km, err := w.InitializeKeymanager(context.Background(), iface.InitKeymanagerConfig{ListenForChanges: false})
if err != nil {
utils.Fatalf("Initialize key manager failed: %v.", err)
}
chainIdInt64 := ctx.Int64(chainIdFlag.Name)
if chainIdInt64 == 0 {
utils.Fatalf("Chain id is required.")
}
chainId := new(big.Int).SetInt64(chainIdInt64)
paddedChainIdBytes := make([]byte, 32)
copy(paddedChainIdBytes[32-len(chainId.Bytes()):], chainId.Bytes())
msgHash := crypto.Keccak256(append(pubkeyBz, paddedChainIdBytes...))
req := &validatorpb.SignRequest{
PublicKey: pubkeyBz,
SigningRoot: msgHash,
}
sig, err := km.Sign(context.Background(), req)
if err != nil {
utils.Fatalf("Generate signature failed: %v.", err)
}
fmt.Printf("Proof: %#x\n", sig.Marshal())
return nil
}

View File

@@ -198,6 +198,10 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) {
v := ctx.Uint64(utils.OverrideVerkle.Name)
cfg.Eth.OverrideVerkle = &v
}
if ctx.IsSet(utils.OverrideFeynman.Name) {
v := ctx.Uint64(utils.OverrideFeynman.Name)
cfg.Eth.OverrideFeynman = &v
}
backend, _ := utils.RegisterEthService(stack, &cfg.Eth)
// Configure log filter RPC API.

View File

@@ -74,6 +74,7 @@ var (
utils.OverrideKepler,
utils.OverrideCancun,
utils.OverrideVerkle,
utils.OverrideFeynman,
utils.EnablePersonal,
utils.TxPoolLocalsFlag,
utils.TxPoolNoLocalsFlag,

View File

@@ -315,6 +315,11 @@ var (
Usage: "Manually specify the Verkle fork timestamp, overriding the bundled setting",
Category: flags.EthCategory,
}
OverrideFeynman = &cli.Uint64Flag{
Name: "override.feynman",
Usage: "Manually specify the Feynman fork timestamp, overriding the bundled setting",
Category: flags.EthCategory,
}
SyncModeFlag = &flags.TextMarshalerFlag{
Name: "syncmode",
Usage: `Blockchain sync mode ("snap" or "full")`,

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,237 @@
package parlia
import (
"container/heap"
"context"
"fmt"
"math"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/systemcontracts"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/internal/ethapi"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rpc"
)
// TODO: SecondsPerDay represents the seconds in a day, it should be 86400
// We set it to 60 for testing purpose and we will change it back to 86400 when launching
// const SecondsPerDay uint64 = 86400
const SecondsPerDay uint64 = 60
// the params should be two blocks' time(timestamp)
func sameDayInUTC(first, second uint64) bool {
return first/SecondsPerDay == second/SecondsPerDay
}
func isBreatheBlock(lastBlockTime, blockTime uint64) bool {
return lastBlockTime != 0 && !sameDayInUTC(lastBlockTime, blockTime)
}
// initializeFeynmanContract initialize new contracts of Feynman fork
func (p *Parlia) initializeFeynmanContract(state *state.StateDB, header *types.Header, chain core.ChainContext,
txs *[]*types.Transaction, receipts *[]*types.Receipt, receivedTxs *[]*types.Transaction, usedGas *uint64, mining bool,
) error {
// method
method := "initialize"
// initialize contracts
contracts := []string{
systemcontracts.StakeHubContract,
systemcontracts.GovernorContract,
systemcontracts.GovTokenContract,
systemcontracts.TimelockContract,
}
// get packed data
data, err := p.stakeHubABI.Pack(method)
if err != nil {
log.Error("Unable to pack tx for initialize feynman contracts", "error", err)
return err
}
for _, c := range contracts {
msg := p.getSystemMessage(header.Coinbase, common.HexToAddress(c), data, common.Big0)
// apply message
log.Info("initialize feynman contract", "block number", header.Number.Uint64(), "contract", c)
err = p.applyTransaction(msg, state, header, chain, txs, receipts, receivedTxs, usedGas, mining)
if err != nil {
return err
}
}
return nil
}
type ValidatorItem struct {
address common.Address
votingPower *big.Int
voteAddress []byte
}
// An ValidatorHeap is a max-heap of validator's votingPower.
type ValidatorHeap []ValidatorItem
func (h *ValidatorHeap) Len() int { return len(*h) }
func (h *ValidatorHeap) Less(i, j int) bool {
// We want topK validators with max voting power, so we need a max-heap
if (*h)[i].votingPower.Cmp((*h)[j].votingPower) == 0 {
return (*h)[i].address.Hex() < (*h)[j].address.Hex()
} else {
return (*h)[i].votingPower.Cmp((*h)[j].votingPower) == 1
}
}
func (h *ValidatorHeap) Swap(i, j int) { (*h)[i], (*h)[j] = (*h)[j], (*h)[i] }
func (h *ValidatorHeap) Push(x interface{}) {
*h = append(*h, x.(ValidatorItem))
}
func (h *ValidatorHeap) Pop() interface{} {
old := *h
n := len(old)
x := old[n-1]
*h = old[0 : n-1]
return x
}
func (p *Parlia) updateValidatorSetV2(state *state.StateDB, header *types.Header, chain core.ChainContext,
txs *[]*types.Transaction, receipts *[]*types.Receipt, receivedTxs *[]*types.Transaction, usedGas *uint64, mining bool,
) error {
// 1. get all validators and its voting power
blockNr := rpc.BlockNumberOrHashWithHash(header.ParentHash, false)
validatorItems, err := p.getValidatorElectionInfo(blockNr)
if err != nil {
return err
}
maxElectedValidators, err := p.getMaxElectedValidators(blockNr)
if err != nil {
return err
}
// 2. sort by voting power
eValidators, eVotingPowers, eVoteAddrs := getTopValidatorsByVotingPower(validatorItems, maxElectedValidators)
// 3. update validator set to system contract
method := "updateValidatorSetV2"
data, err := p.validatorSetABI.Pack(method, eValidators, eVotingPowers, eVoteAddrs)
if err != nil {
log.Error("Unable to pack tx for updateValidatorSetV2", "error", err)
return err
}
// get system message
msg := p.getSystemMessage(header.Coinbase, common.HexToAddress(systemcontracts.ValidatorContract), data, common.Big0)
// apply message
return p.applyTransaction(msg, state, header, chain, txs, receipts, receivedTxs, usedGas, mining)
}
func (p *Parlia) getValidatorElectionInfo(blockNr rpc.BlockNumberOrHash) ([]ValidatorItem, error) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
method := "getValidatorElectionInfo"
toAddress := common.HexToAddress(systemcontracts.StakeHubContract)
gas := (hexutil.Uint64)(uint64(math.MaxUint64 / 2))
data, err := p.stakeHubABI.Pack(method, big.NewInt(0), big.NewInt(0))
if err != nil {
log.Error("Unable to pack tx for getValidatorElectionInfo", "error", err)
return nil, err
}
msgData := (hexutil.Bytes)(data)
result, err := p.ethAPI.Call(ctx, ethapi.TransactionArgs{
Gas: &gas,
To: &toAddress,
Data: &msgData,
}, blockNr, nil, nil)
if err != nil {
return nil, err
}
var validators []common.Address
var votingPowers []*big.Int
var voteAddrs [][]byte
var totalLength *big.Int
if err := p.stakeHubABI.UnpackIntoInterface(&[]interface{}{&validators, &votingPowers, &voteAddrs, &totalLength}, method, result); err != nil {
return nil, err
}
if totalLength.Int64() != int64(len(validators)) || totalLength.Int64() != int64(len(votingPowers)) || totalLength.Int64() != int64(len(voteAddrs)) {
return nil, fmt.Errorf("validator length not match")
}
validatorItems := make([]ValidatorItem, len(validators))
for i := 0; i < len(validators); i++ {
validatorItems[i] = ValidatorItem{
address: validators[i],
votingPower: votingPowers[i],
voteAddress: voteAddrs[i],
}
}
return validatorItems, nil
}
func (p *Parlia) getMaxElectedValidators(blockNr rpc.BlockNumberOrHash) (maxElectedValidators *big.Int, err error) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
method := "maxElectedValidators"
toAddress := common.HexToAddress(systemcontracts.StakeHubContract)
gas := (hexutil.Uint64)(uint64(math.MaxUint64 / 2))
data, err := p.stakeHubABI.Pack(method)
if err != nil {
log.Error("Unable to pack tx for maxElectedValidators", "error", err)
return nil, err
}
msgData := (hexutil.Bytes)(data)
result, err := p.ethAPI.Call(ctx, ethapi.TransactionArgs{
Gas: &gas,
To: &toAddress,
Data: &msgData,
}, blockNr, nil, nil)
if err != nil {
return nil, err
}
if err := p.stakeHubABI.UnpackIntoInterface(&maxElectedValidators, method, result); err != nil {
return nil, err
}
return maxElectedValidators, nil
}
func getTopValidatorsByVotingPower(validatorItems []ValidatorItem, maxElectedValidators *big.Int) ([]common.Address, []uint64, [][]byte) {
var validatorHeap ValidatorHeap
for i := 0; i < len(validatorItems); i++ {
// only keep validators with voting power > 0
if validatorItems[i].votingPower.Cmp(big.NewInt(0)) == 1 {
validatorHeap = append(validatorHeap, validatorItems[i])
}
}
hp := &validatorHeap
heap.Init(hp)
topN := int(maxElectedValidators.Int64())
if topN > len(validatorHeap) {
topN = len(validatorHeap)
}
eValidators := make([]common.Address, topN)
eVotingPowers := make([]uint64, topN)
eVoteAddrs := make([][]byte, topN)
for i := 0; i < topN; i++ {
item := heap.Pop(hp).(ValidatorItem)
eValidators[i] = item.address
// as the decimal in BNB Beacon Chain is 1e8 and in BNB Smart Chain is 1e18, we need to divide it by 1e10
eVotingPowers[i] = new(big.Int).Div(item.votingPower, big.NewInt(1e10)).Uint64()
eVoteAddrs[i] = item.voteAddress
}
return eValidators, eVotingPowers, eVoteAddrs
}

View File

@@ -0,0 +1,166 @@
package parlia
import (
"math/big"
"testing"
"github.com/ethereum/go-ethereum/common"
)
func TestValidatorHeap(t *testing.T) {
testCases := []struct {
description string
k int64
validators []ValidatorItem
expected []common.Address
}{
{
description: "normal case",
k: 2,
validators: []ValidatorItem{
{
address: common.HexToAddress("0x1"),
votingPower: new(big.Int).Mul(big.NewInt(300), big.NewInt(1e10)),
voteAddress: []byte("0x1"),
},
{
address: common.HexToAddress("0x2"),
votingPower: new(big.Int).Mul(big.NewInt(200), big.NewInt(1e10)),
voteAddress: []byte("0x2"),
},
{
address: common.HexToAddress("0x3"),
votingPower: new(big.Int).Mul(big.NewInt(100), big.NewInt(1e10)),
voteAddress: []byte("0x3"),
},
},
expected: []common.Address{
common.HexToAddress("0x1"),
common.HexToAddress("0x2"),
},
},
{
description: "same voting power",
k: 2,
validators: []ValidatorItem{
{
address: common.HexToAddress("0x1"),
votingPower: new(big.Int).Mul(big.NewInt(300), big.NewInt(1e10)),
voteAddress: []byte("0x1"),
},
{
address: common.HexToAddress("0x2"),
votingPower: new(big.Int).Mul(big.NewInt(100), big.NewInt(1e10)),
voteAddress: []byte("0x2"),
},
{
address: common.HexToAddress("0x3"),
votingPower: new(big.Int).Mul(big.NewInt(100), big.NewInt(1e10)),
voteAddress: []byte("0x3"),
},
},
expected: []common.Address{
common.HexToAddress("0x1"),
common.HexToAddress("0x2"),
},
},
{
description: "zero voting power and k > len(validators)",
k: 5,
validators: []ValidatorItem{
{
address: common.HexToAddress("0x1"),
votingPower: new(big.Int).Mul(big.NewInt(300), big.NewInt(1e10)),
voteAddress: []byte("0x1"),
},
{
address: common.HexToAddress("0x2"),
votingPower: big.NewInt(0),
voteAddress: []byte("0x2"),
},
{
address: common.HexToAddress("0x3"),
votingPower: big.NewInt(0),
voteAddress: []byte("0x3"),
},
{
address: common.HexToAddress("0x4"),
votingPower: big.NewInt(0),
voteAddress: []byte("0x4"),
},
},
expected: []common.Address{
common.HexToAddress("0x1"),
},
},
{
description: "zero voting power and k < len(validators)",
k: 2,
validators: []ValidatorItem{
{
address: common.HexToAddress("0x1"),
votingPower: new(big.Int).Mul(big.NewInt(300), big.NewInt(1e10)),
voteAddress: []byte("0x1"),
},
{
address: common.HexToAddress("0x2"),
votingPower: big.NewInt(0),
voteAddress: []byte("0x2"),
},
{
address: common.HexToAddress("0x3"),
votingPower: big.NewInt(0),
voteAddress: []byte("0x3"),
},
{
address: common.HexToAddress("0x4"),
votingPower: big.NewInt(0),
voteAddress: []byte("0x4"),
},
},
expected: []common.Address{
common.HexToAddress("0x1"),
},
},
{
description: "all zero voting power",
k: 2,
validators: []ValidatorItem{
{
address: common.HexToAddress("0x1"),
votingPower: big.NewInt(0),
voteAddress: []byte("0x1"),
},
{
address: common.HexToAddress("0x2"),
votingPower: big.NewInt(0),
voteAddress: []byte("0x2"),
},
{
address: common.HexToAddress("0x3"),
votingPower: big.NewInt(0),
voteAddress: []byte("0x3"),
},
{
address: common.HexToAddress("0x4"),
votingPower: big.NewInt(0),
voteAddress: []byte("0x4"),
},
},
expected: []common.Address{},
},
}
for _, tc := range testCases {
eligibleValidators, _, _ := getTopValidatorsByVotingPower(tc.validators, big.NewInt(tc.k))
// check
if len(eligibleValidators) != len(tc.expected) {
t.Errorf("expected %d, got %d", len(tc.expected), len(eligibleValidators))
}
for i := 0; i < len(tc.expected); i++ {
if eligibleValidators[i] != tc.expected[i] {
t.Errorf("expected %s, got %s", tc.expected[i].Hex(), eligibleValidators[i].Hex())
}
}
}
}

View File

@@ -6,7 +6,6 @@ import (
"encoding/hex"
"errors"
"fmt"
"io"
"math"
"math/big"
"math/rand"
@@ -91,6 +90,10 @@ var (
common.HexToAddress(systemcontracts.TokenHubContract): true,
common.HexToAddress(systemcontracts.RelayerIncentivizeContract): true,
common.HexToAddress(systemcontracts.CrossChainContract): true,
common.HexToAddress(systemcontracts.StakeHubContract): true,
common.HexToAddress(systemcontracts.GovernorContract): true,
common.HexToAddress(systemcontracts.GovTokenContract): true,
common.HexToAddress(systemcontracts.TimelockContract): true,
}
)
@@ -180,7 +183,7 @@ func ecrecover(header *types.Header, sigCache *lru.ARCCache, chainId *big.Int) (
signature := header.Extra[len(header.Extra)-extraSeal:]
// Recover the public key and the Ethereum address
pubkey, err := crypto.Ecrecover(SealHash(header, chainId).Bytes(), signature)
pubkey, err := crypto.Ecrecover(types.SealHash(header, chainId).Bytes(), signature)
if err != nil {
return common.Address{}, err
}
@@ -200,7 +203,7 @@ func ecrecover(header *types.Header, sigCache *lru.ARCCache, chainId *big.Int) (
// or not), which could be abused to produce different hashes for the same header.
func ParliaRLP(header *types.Header, chainId *big.Int) []byte {
b := new(bytes.Buffer)
encodeSigHeader(b, header, chainId)
types.EncodeSigHeader(b, header, chainId)
return b.Bytes()
}
@@ -227,6 +230,7 @@ type Parlia struct {
validatorSetABIBeforeLuban abi.ABI
validatorSetABI abi.ABI
slashABI abi.ABI
stakeHubABI abi.ABI
// The fields below are for testing only
fakeDiff bool // Skip difficulty verifications
@@ -269,6 +273,10 @@ func New(
if err != nil {
panic(err)
}
stABI, err := abi.JSON(strings.NewReader(stakeABI))
if err != nil {
panic(err)
}
c := &Parlia{
chainConfig: chainConfig,
config: parliaConfig,
@@ -280,6 +288,7 @@ func New(
validatorSetABIBeforeLuban: vABIBeforeLuban,
validatorSetABI: vABI,
slashABI: sABI,
stakeHubABI: stABI,
signer: types.LatestSigner(chainConfig),
}
@@ -908,7 +917,7 @@ func (p *Parlia) assembleVoteAttestation(chain consensus.ChainHeaderReader, head
// Prepare vote address bitset.
for _, valInfo := range snap.Validators {
if _, ok := voteAddrSet[valInfo.VoteAddress]; ok {
attestation.VoteAddressSet |= 1 << (valInfo.Index - 1) //Index is offset by 1
attestation.VoteAddressSet |= 1 << (valInfo.Index - 1) // Index is offset by 1
}
}
validatorsBitSet := bitset.From([]uint64{uint64(attestation.VoteAddressSet)})
@@ -1117,6 +1126,15 @@ func (p *Parlia) Finalize(chain consensus.ChainHeaderReader, header *types.Heade
cx := chainContext{Chain: chain, parlia: p}
parent := chain.GetHeaderByHash(header.ParentHash)
if parent == nil {
return errors.New("parent not found")
}
if p.chainConfig.IsFeynman(header.Number, header.Time) {
systemcontracts.UpgradeBuildInSystemContract(p.chainConfig, header.Number, parent.Time, header.Time, state)
}
// No block rewards in PoA, so the state remains as is and uncles are dropped
if header.Number.Cmp(common.Big1) == 0 {
err := p.initContract(state, header, cx, txs, receipts, systemTxs, usedGas, false)
@@ -1158,6 +1176,24 @@ func (p *Parlia) Finalize(chain consensus.ChainHeaderReader, header *types.Heade
return err
}
}
if p.chainConfig.IsOnFeynman(header.Number, parent.Time, header.Time) {
err := p.initializeFeynmanContract(state, header, cx, txs, receipts, systemTxs, usedGas, false)
if err != nil {
log.Error("init feynman contract failed", "error", err)
}
}
// update validators every day
if p.chainConfig.IsFeynman(header.Number, header.Time) && isBreatheBlock(parent.Time, header.Time) {
// we should avoid update validators in the Feynman upgrade block
if !p.chainConfig.IsOnFeynman(header.Number, parent.Time, header.Time) {
if err := p.updateValidatorSetV2(state, header, cx, txs, receipts, systemTxs, usedGas, false); err != nil {
return err
}
}
}
if len(*systemTxs) > 0 {
return errors.New("the length of systemTxs do not match")
}
@@ -1176,6 +1212,16 @@ func (p *Parlia) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *
if receipts == nil {
receipts = make([]*types.Receipt, 0)
}
parent := chain.GetHeaderByHash(header.ParentHash)
if parent == nil {
return nil, nil, errors.New("parent not found")
}
if p.chainConfig.IsFeynman(header.Number, header.Time) {
systemcontracts.UpgradeBuildInSystemContract(p.chainConfig, header.Number, parent.Time, header.Time, state)
}
if header.Number.Cmp(common.Big1) == 0 {
err := p.initContract(state, header, cx, &txs, &receipts, nil, &header.GasUsed, true)
if err != nil {
@@ -1220,6 +1266,23 @@ func (p *Parlia) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *
}
}
if p.chainConfig.IsOnFeynman(header.Number, parent.Time, header.Time) {
err := p.initializeFeynmanContract(state, header, cx, &txs, &receipts, nil, &header.GasUsed, true)
if err != nil {
log.Error("init feynman contract failed", "error", err)
}
}
// update validators every day
if p.chainConfig.IsFeynman(header.Number, header.Time) && isBreatheBlock(parent.Time, header.Time) {
// we should avoid update validators in the Feynman upgrade block
if !p.chainConfig.IsOnFeynman(header.Number, parent.Time, header.Time) {
if err := p.updateValidatorSetV2(state, header, cx, &txs, &receipts, nil, &header.GasUsed, true); err != nil {
return nil, nil, err
}
}
}
// should not happen. Once happen, stop the node is better than broadcast the block
if header.GasLimit < header.GasUsed {
return nil, nil, errors.New("gas consumption of system txs exceed the gas limit")
@@ -1422,7 +1485,7 @@ func (p *Parlia) Seal(chain consensus.ChainHeaderReader, block *types.Block, res
select {
case results <- block.WithSeal(header):
default:
log.Warn("Sealing result is not read by miner", "sealhash", SealHash(header, p.chainConfig.ChainID))
log.Warn("Sealing result is not read by miner", "sealhash", types.SealHash(header, p.chainConfig.ChainID))
}
}()
@@ -1496,7 +1559,7 @@ func CalcDifficulty(snap *Snapshot, signer common.Address) *big.Int {
// So it's not the real hash of a block, just used as unique id to distinguish task
func (p *Parlia) SealHash(header *types.Header) (hash common.Hash) {
hasher := sha3.NewLegacyKeccak256()
encodeSigHeaderWithoutVoteAttestation(hasher, header, p.chainConfig.ChainID)
types.EncodeSigHeaderWithoutVoteAttestation(hasher, header, p.chainConfig.ChainID)
hasher.Sum(hash[:0])
return hash
}
@@ -1554,16 +1617,15 @@ func (p *Parlia) getCurrentValidators(blockHash common.Hash, blockNum *big.Int)
var valSet []common.Address
var voteAddrSet []types.BLSPublicKey
if err := p.validatorSetABI.UnpackIntoInterface(&[]interface{}{&valSet, &voteAddrSet}, method, result); err != nil {
return nil, nil, err
}
voteAddrmap := make(map[common.Address]*types.BLSPublicKey, len(valSet))
voteAddrMap := make(map[common.Address]*types.BLSPublicKey, len(valSet))
for i := 0; i < len(valSet); i++ {
voteAddrmap[valSet[i]] = &(voteAddrSet)[i]
voteAddrMap[valSet[i]] = &(voteAddrSet)[i]
}
return valSet, voteAddrmap, nil
return valSet, voteAddrMap, nil
}
// slash spoiled validators
@@ -1580,7 +1642,7 @@ func (p *Parlia) distributeIncoming(val common.Address, state *state.StateDB, he
doDistributeSysReward := !p.chainConfig.IsKepler(header.Number, header.Time) &&
state.GetBalance(common.HexToAddress(systemcontracts.SystemRewardContract)).Cmp(maxSystemBalance) < 0
if doDistributeSysReward {
var rewards = new(big.Int)
rewards := new(big.Int)
rewards = rewards.Rsh(balance, systemRewardPercent)
if rewards.Cmp(common.Big0) > 0 {
err := p.distributeToSystem(rewards, state, header, chain, txs, receipts, receivedTxs, usedGas, mining)
@@ -1802,62 +1864,6 @@ func (p *Parlia) GetFinalizedHeader(chain consensus.ChainHeaderReader, header *t
}
// =========================== utility function ==========================
// SealHash returns the hash of a block prior to it being sealed.
func SealHash(header *types.Header, chainId *big.Int) (hash common.Hash) {
hasher := sha3.NewLegacyKeccak256()
encodeSigHeader(hasher, header, chainId)
hasher.Sum(hash[:0])
return hash
}
func encodeSigHeader(w io.Writer, header *types.Header, chainId *big.Int) {
err := rlp.Encode(w, []interface{}{
chainId,
header.ParentHash,
header.UncleHash,
header.Coinbase,
header.Root,
header.TxHash,
header.ReceiptHash,
header.Bloom,
header.Difficulty,
header.Number,
header.GasLimit,
header.GasUsed,
header.Time,
header.Extra[:len(header.Extra)-extraSeal], // this will panic if extra is too short, should check before calling encodeSigHeader
header.MixDigest,
header.Nonce,
})
if err != nil {
panic("can't encode: " + err.Error())
}
}
func encodeSigHeaderWithoutVoteAttestation(w io.Writer, header *types.Header, chainId *big.Int) {
err := rlp.Encode(w, []interface{}{
chainId,
header.ParentHash,
header.UncleHash,
header.Coinbase,
header.Root,
header.TxHash,
header.ReceiptHash,
header.Bloom,
header.Difficulty,
header.Number,
header.GasLimit,
header.GasUsed,
header.Time,
header.Extra[:extraVanity], // this will panic if extra is too short, should check before calling encodeSigHeaderWithoutVoteAttestation
header.MixDigest,
header.Nonce,
})
if err != nil {
panic("can't encode: " + err.Error())
}
}
func (p *Parlia) backOffTime(snap *Snapshot, header *types.Header, val common.Address) uint64 {
if snap.inturn(val) {
return 0

View File

@@ -308,7 +308,11 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse
if config.DAOForkSupport && config.DAOForkBlock != nil && config.DAOForkBlock.Cmp(b.header.Number) == 0 {
misc.ApplyDAOHardFork(statedb)
}
systemcontracts.UpgradeBuildInSystemContract(config, b.header.Number, parent.Time(), b.header.Time, statedb)
if !config.IsFeynman(b.header.Number, b.header.Time) {
systemcontracts.UpgradeBuildInSystemContract(config, b.header.Number, parent.Time(), b.header.Time, statedb)
}
// Execute any user modifications to the block
if gen != nil {
gen(i, b)

View File

@@ -280,6 +280,7 @@ type ChainOverrides struct {
OverrideKepler *uint64
OverrideCancun *uint64
OverrideVerkle *uint64
OverrideFeynman *uint64
}
// SetupGenesisBlock writes or updates the genesis block in db.
@@ -317,6 +318,9 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, triedb *trie.Database, gen
if overrides != nil && overrides.OverrideVerkle != nil {
config.VerkleTime = overrides.OverrideVerkle
}
if overrides != nil && overrides.OverrideFeynman != nil {
config.FeynmanTime = overrides.OverrideFeynman
}
}
}
// Just commit the new block if there is no stored genesis block.

View File

@@ -73,12 +73,15 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
if p.config.DAOForkSupport && p.config.DAOForkBlock != nil && p.config.DAOForkBlock.Cmp(block.Number()) == 0 {
misc.ApplyDAOHardFork(statedb)
}
// Handle upgrade build-in system contract code
lastBlock := p.bc.GetBlockByHash(block.ParentHash())
if lastBlock == nil {
return statedb, nil, nil, 0, fmt.Errorf("could not get parent block")
}
systemcontracts.UpgradeBuildInSystemContract(p.config, blockNumber, lastBlock.Time(), block.Time(), statedb)
if !p.config.IsFeynman(block.Number(), block.Time()) {
// Handle upgrade build-in system contract code
systemcontracts.UpgradeBuildInSystemContract(p.config, blockNumber, lastBlock.Time(), block.Time(), statedb)
}
var (
context = NewEVMBlockContext(header, p.bc, nil)

View File

@@ -13,4 +13,10 @@ const (
TokenManagerContract = "0x0000000000000000000000000000000000001008"
CrossChainContract = "0x0000000000000000000000000000000000002000"
StakingContract = "0x0000000000000000000000000000000000002001"
StakeHubContract = "0x0000000000000000000000000000000000002002"
StakeCreditContract = "0x0000000000000000000000000000000000002003"
GovernorContract = "0x0000000000000000000000000000000000002004"
GovTokenContract = "0x0000000000000000000000000000000000002005"
TimelockContract = "0x0000000000000000000000000000000000002006"
TokenRecoverPortalContract = "0x0000000000000000000000000000000000003000"
)

File diff suppressed because one or more lines are too long

View File

@@ -26,6 +26,8 @@ import (
"sync/atomic"
"time"
"golang.org/x/crypto/sha3"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/rlp"
@@ -560,8 +562,7 @@ func (d *DiffLayer) DecodeRLP(s *rlp.Stream) error {
if err := s.Decode(&ed); err != nil {
return err
}
d.BlockHash, d.Number, d.Codes, d.Destructs, d.Accounts, d.Storages =
ed.BlockHash, ed.Number, ed.Codes, ed.Destructs, ed.Accounts, ed.Storages
d.BlockHash, d.Number, d.Codes, d.Destructs, d.Accounts, d.Storages = ed.BlockHash, ed.Number, ed.Codes, ed.Destructs, ed.Accounts, ed.Storages
d.Receipts = make([]*Receipt, len(ed.Receipts))
for i, storageReceipt := range ed.Receipts {
@@ -608,6 +609,7 @@ func (storage *DiffStorage) Swap(i, j int) {
storage.Keys[i], storage.Keys[j] = storage.Keys[j], storage.Keys[i]
storage.Vals[i], storage.Vals[j] = storage.Vals[j], storage.Vals[i]
}
func (storage *DiffStorage) Less(i, j int) bool {
return string(storage.Keys[i][:]) < string(storage.Keys[j][:])
}
@@ -622,3 +624,64 @@ type DiffAccountsInBlock struct {
BlockHash common.Hash
Transactions []DiffAccountsInTx
}
var (
extraVanity = 32 // Fixed number of extra-data prefix bytes reserved for signer vanity
extraSeal = 65 // Fixed number of extra-data suffix bytes reserved for signer seal
)
// SealHash returns the hash of a block prior to it being sealed.
func SealHash(header *Header, chainId *big.Int) (hash common.Hash) {
hasher := sha3.NewLegacyKeccak256()
EncodeSigHeader(hasher, header, chainId)
hasher.Sum(hash[:0])
return hash
}
func EncodeSigHeader(w io.Writer, header *Header, chainId *big.Int) {
err := rlp.Encode(w, []interface{}{
chainId,
header.ParentHash,
header.UncleHash,
header.Coinbase,
header.Root,
header.TxHash,
header.ReceiptHash,
header.Bloom,
header.Difficulty,
header.Number,
header.GasLimit,
header.GasUsed,
header.Time,
header.Extra[:len(header.Extra)-extraSeal], // this will panic if extra is too short, should check before calling encodeSigHeader
header.MixDigest,
header.Nonce,
})
if err != nil {
panic("can't encode: " + err.Error())
}
}
func EncodeSigHeaderWithoutVoteAttestation(w io.Writer, header *Header, chainId *big.Int) {
err := rlp.Encode(w, []interface{}{
chainId,
header.ParentHash,
header.UncleHash,
header.Coinbase,
header.Root,
header.TxHash,
header.ReceiptHash,
header.Bloom,
header.Difficulty,
header.Number,
header.GasLimit,
header.GasUsed,
header.Time,
header.Extra[:extraVanity], // this will panic if extra is too short, should check before calling encodeSigHeaderWithoutVoteAttestation
header.MixDigest,
header.Nonce,
})
if err != nil {
panic("can't encode: " + err.Error())
}
}

View File

@@ -17,23 +17,28 @@
package vm
import (
"bytes"
"crypto/sha256"
"encoding/binary"
"errors"
"fmt"
"math/big"
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
"golang.org/x/crypto/ripemd160"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/crypto/blake2b"
"github.com/ethereum/go-ethereum/crypto/bls12381"
"github.com/ethereum/go-ethereum/crypto/bn256"
"github.com/ethereum/go-ethereum/crypto/kzg4844"
"github.com/ethereum/go-ethereum/crypto/secp256k1"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
"golang.org/x/crypto/ripemd160"
"github.com/ethereum/go-ethereum/rlp"
)
// PrecompiledContract is the basic interface for native Go contracts. The implementation
@@ -219,6 +224,27 @@ var PrecompiledContractsCancun = map[common.Address]PrecompiledContract{
common.BytesToAddress([]byte{103}): &cometBFTLightBlockValidate{},
}
// PrecompiledContractsFeynman contains the default set of pre-compiled Ethereum
// contracts used in the Feynman release.
var PrecompiledContractsFeynman = map[common.Address]PrecompiledContract{
common.BytesToAddress([]byte{1}): &ecrecover{},
common.BytesToAddress([]byte{2}): &sha256hash{},
common.BytesToAddress([]byte{3}): &ripemd160hash{},
common.BytesToAddress([]byte{4}): &dataCopy{},
common.BytesToAddress([]byte{5}): &bigModExp{},
common.BytesToAddress([]byte{6}): &bn256AddIstanbul{},
common.BytesToAddress([]byte{7}): &bn256ScalarMulIstanbul{},
common.BytesToAddress([]byte{8}): &bn256PairingIstanbul{},
common.BytesToAddress([]byte{9}): &blake2F{},
common.BytesToAddress([]byte{100}): &tmHeaderValidate{},
common.BytesToAddress([]byte{101}): &iavlMerkleProofValidatePlato{},
common.BytesToAddress([]byte{102}): &blsSignatureVerify{},
common.BytesToAddress([]byte{103}): &cometBFTLightBlockValidate{},
common.BytesToAddress([]byte{104}): &verifyDoubleSignEvidence{},
common.BytesToAddress([]byte{105}): &secp256k1SignatureRecover{},
}
// PrecompiledContractsBLS contains the set of pre-compiled Ethereum
// contracts specified in EIP-2537. These are exported for testing purposes.
var PrecompiledContractsBLS = map[common.Address]PrecompiledContract{
@@ -245,6 +271,7 @@ var (
PrecompiledAddressesIstanbul []common.Address
PrecompiledAddressesByzantium []common.Address
PrecompiledAddressesHomestead []common.Address
PrecompiledAddressesFeynman []common.Address
)
func init() {
@@ -281,11 +308,16 @@ func init() {
for k := range PrecompiledContractsCancun {
PrecompiledAddressesCancun = append(PrecompiledAddressesCancun, k)
}
for k := range PrecompiledContractsFeynman {
PrecompiledAddressesFeynman = append(PrecompiledAddressesFeynman, k)
}
}
// ActivePrecompiles returns the precompiles enabled with the current configuration.
func ActivePrecompiles(rules params.Rules) []common.Address {
switch {
case rules.IsFeynman:
return PrecompiledAddressesFeynman
case rules.IsCancun:
return PrecompiledAddressesCancun
case rules.IsHertz:
@@ -561,7 +593,7 @@ func (c *bigModExp) Run(input []byte) ([]byte, error) {
// Modulo 0 is undefined, return zero
return common.LeftPadBytes([]byte{}, int(modLen)), nil
case base.BitLen() == 1: // a bit length of 1 means it's 1 (or -1).
//If base == 1, then we can just return base % mod (if mod >= 1, which it is)
// If base == 1, then we can just return base % mod (if mod >= 1, which it is)
v = base.Mod(base, mod).Bytes()
default:
v = base.Exp(base, exp, mod).Bytes()
@@ -1355,3 +1387,88 @@ func kZGToVersionedHash(kzg kzg4844.Commitment) common.Hash {
return h
}
// verifyDoubleSignEvidence implements bsc header verification precompile.
type verifyDoubleSignEvidence struct{}
// RequiredGas returns the gas required to execute the pre-compiled contract.
func (c *verifyDoubleSignEvidence) RequiredGas(input []byte) uint64 {
return params.DoubleSignEvidenceVerifyGas
}
var (
extraSeal = 65
)
type DoubleSignEvidence struct {
ChainId *big.Int
HeaderBytes1 []byte
HeaderBytes2 []byte
}
// Run input: rlp encoded DoubleSignEvidence
// return:
// signer address| evidence height|
// 20 bytes | 32 bytes |
func (c *verifyDoubleSignEvidence) Run(input []byte) ([]byte, error) {
evidence := &DoubleSignEvidence{}
err := rlp.DecodeBytes(input, evidence)
if err != nil {
return nil, ErrExecutionReverted
}
header1 := &types.Header{}
err = rlp.DecodeBytes(evidence.HeaderBytes1, header1)
if err != nil {
return nil, ErrExecutionReverted
}
header2 := &types.Header{}
err = rlp.DecodeBytes(evidence.HeaderBytes2, header2)
if err != nil {
return nil, ErrExecutionReverted
}
// basic check
if header1.Number.Cmp(header2.Number) != 0 {
return nil, ErrExecutionReverted
}
if header1.ParentHash != header2.ParentHash {
return nil, ErrExecutionReverted
}
if len(header1.Extra) < extraSeal || len(header2.Extra) < extraSeal {
return nil, ErrExecutionReverted
}
sig1 := header1.Extra[len(header1.Extra)-extraSeal:]
sig2 := header2.Extra[len(header2.Extra)-extraSeal:]
if bytes.Equal(sig1, sig2) {
return nil, ErrExecutionReverted
}
// check sig
msgHash1 := types.SealHash(header1, evidence.ChainId)
msgHash2 := types.SealHash(header2, evidence.ChainId)
if bytes.Equal(msgHash1.Bytes(), msgHash2.Bytes()) {
return nil, ErrExecutionReverted
}
pubkey1, err := secp256k1.RecoverPubkey(msgHash1.Bytes(), sig1)
if err != nil {
return nil, ErrExecutionReverted
}
pubkey2, err := secp256k1.RecoverPubkey(msgHash2.Bytes(), sig2)
if err != nil {
return nil, ErrExecutionReverted
}
if !bytes.Equal(pubkey1, pubkey2) {
return nil, ErrExecutionReverted
}
returnBz := make([]byte, 52) // 20 + 32
signerAddr := crypto.Keccak256(pubkey1[1:])[12:]
evidenceHeightBz := header1.Number.Bytes()
copy(returnBz[:20], signerAddr)
copy(returnBz[52-len(evidenceHeightBz):], evidenceHeightBz)
return returnBz, nil
}

View File

@@ -8,8 +8,10 @@ import (
"github.com/tendermint/iavl"
"github.com/tendermint/tendermint/crypto/merkle"
"github.com/tendermint/tendermint/crypto/secp256k1"
cmn "github.com/tendermint/tendermint/libs/common"
//nolint:staticcheck
v1 "github.com/ethereum/go-ethereum/core/vm/lightclient/v1"
v2 "github.com/ethereum/go-ethereum/core/vm/lightclient/v2"
"github.com/ethereum/go-ethereum/params"
@@ -104,7 +106,7 @@ func (c *tmHeaderValidate) Run(input []byte) (result []byte, err error) {
return result, nil
}
//------------------------------------------------------------------------------------------------------------------------------------------------
// ------------------------------------------------------------------------------------------------------------------------------------------------
// iavlMerkleProofValidate implemented as a native contract.
type iavlMerkleProofValidate struct {
@@ -397,3 +399,40 @@ type cometBFTLightBlockValidateHertz struct {
func (c *cometBFTLightBlockValidateHertz) Run(input []byte) (result []byte, err error) {
return c.run(input, true)
}
// secp256k1SignatureRecover implemented as a native contract.
type secp256k1SignatureRecover struct{}
func (c *secp256k1SignatureRecover) RequiredGas(input []byte) uint64 {
return params.EcrecoverGas
}
const (
tmPubKeyLength uint8 = 33
tmSignatureLength uint8 = 64
tmSignatureMsgHashLength uint8 = 32
)
// input:
// | tmPubKey | tmSignature | tmSignatureMsgHash |
// | 33 bytes | 64 bytes | 32 bytes |
func (c *secp256k1SignatureRecover) Run(input []byte) (result []byte, err error) {
if len(input) != int(tmPubKeyLength)+int(tmSignatureLength)+int(tmSignatureMsgHashLength) {
return nil, fmt.Errorf("invalid input")
}
return c.runTMSecp256k1Signature(
input[:tmPubKeyLength],
input[tmPubKeyLength:tmPubKeyLength+tmSignatureLength],
input[tmPubKeyLength+tmSignatureLength:],
)
}
func (c *secp256k1SignatureRecover) runTMSecp256k1Signature(pubkey, signatureStr, msgHash []byte) (result []byte, err error) {
tmPubKey := secp256k1.PubKeySecp256k1(pubkey)
ok := tmPubKey.VerifyBytesWithMsgHash(msgHash, signatureStr)
if !ok {
return nil, fmt.Errorf("invalid signature")
}
return tmPubKey.Address().Bytes(), nil
}

View File

@@ -373,3 +373,42 @@ func TestCometBFTLightBlockValidateHertz(t *testing.T) {
require.NoError(t, err)
require.Equal(t, expectOutputStr, hex.EncodeToString(res))
}
func TestSecp256k1SignatureRecover(t *testing.T) {
// local key
{
pubKey, err := hex.DecodeString("0278caa4d6321aa856d6341dd3e8bcdfe0b55901548871c63c3f5cec43c2ae88a9")
require.NoError(t, err)
sig, err := hex.DecodeString("0cb78be0d8eaeab991907b06c61240c04f4ca83f54b7799ce77cf029b837988038c4b3b7f5df231695b0d14499b716e1fd6504860eb3c9244ecb4e569d44c062")
require.NoError(t, err)
msghash, err := hex.DecodeString("b6ac827edff4bbbf23579720782dbef40b65780af292cc66849e7e5944f1230f")
require.NoError(t, err)
expectedAddr, err := hex.DecodeString("fa3B227adFf8EA1706098928715076D76959Ae6c")
require.NoError(t, err)
input := append(append(pubKey, sig...), msghash...)
contract := &secp256k1SignatureRecover{}
res, err := contract.Run(input)
require.NoError(t, err)
require.Equal(t, expectedAddr, res)
}
// ledger
{
pubKey, err := hex.DecodeString("02d63ee39adb1779353b4393dd5ea9d6d2b6df63b71d168571803cc7b9a0a20e98")
require.NoError(t, err)
sig, err := hex.DecodeString("66bdb5d381b2773c0f569858c7ee143959522d7c1f46dc656c325cb7353ec40c28ec22dff3650b34c096c5b12e702d7237d409f1ebaaa6dd1128a8f2d401fd5b")
require.NoError(t, err)
msghash, err := hex.DecodeString("c45e8f0dc7c054c31912beeffd6f10f1c585606d61e252e97968cd66661c2571")
require.NoError(t, err)
expectedAddr, err := hex.DecodeString("65a284146b84210a01add088954bb52d88b230af")
require.NoError(t, err)
input := append(append(pubKey, sig...), msghash...)
contract := &secp256k1SignatureRecover{}
res, err := contract.Run(input)
require.NoError(t, err)
require.Equal(t, expectedAddr, res)
}
}

View File

@@ -67,6 +67,7 @@ var allPrecompiles = map[common.Address]PrecompiledContract{
common.BytesToAddress([]byte{0x0f, 0x11}): &bls12381MapG1{},
common.BytesToAddress([]byte{0x0f, 0x12}): &bls12381MapG2{},
common.BytesToAddress([]byte{102}): &blsSignatureVerify{},
common.BytesToAddress([]byte{104}): &verifyDoubleSignEvidence{},
}
// EIP-152 test vectors
@@ -405,3 +406,14 @@ func BenchmarkPrecompiledBLS12381G2MultiExpWorstCase(b *testing.B) {
}
benchmarkPrecompiled("0f", testcase, b)
}
func TestDoubleSignSlash(t *testing.T) {
tc := precompiledTest{
Input: "f906278202cab9030ff9030ca01062d3d5015b9242bc193a9b0769f3d3780ecb55f97f40a752ae26d0b68cd0d8a0fae1a05fcb14bfd9b8a9f2b65007a9b6c2000de0627a73be644dd993d32342c494976ea74026e726554db657fa54763abd0c3a0aa9a0f385cc58ed297ff0d66eb5580b02853d3478ba418b1819ac659ee05df49b9794a0bf88464af369ed6b8cf02db00f0b9556ffa8d49cd491b00952a7f83431446638a00a6d0870e586a76278fbfdcedf76ef6679af18fc1f9137cfad495f434974ea81b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001820cdf830f4240830f4240846555fa64b90111d983010301846765746888676f312e32302e378664617277696e00007abd731ef8ae07b86091cb8836d58f5444b883422a18825d899035d3e6ea39ad1a50069bf0b86da8b5573dde1cb4a0a34f19ce94e0ef78ff7518c80265b8a3ca56e3c60167523590d4e8dcc324900559465fc0fa403774096614e135de280949b58a45cc96f2ba9e17f848820d41a08429d0d8b33ee72a84f750fefea846cbca54e487129c7961c680bb72309ca888820d42a08c9db14d938b19f9e2261bbeca2679945462be2b58103dfff73665d0d150fb8a804ae755e0fe64b59753f4db6308a1f679747bce186aa2c62b95fa6eeff3fbd08f3b0667e45428a54ade15bad19f49641c499b431b36f65803ea71b379e6b61de501a0232c9ba2d41b40d36ed794c306747bcbc49bf61a0f37409c18bfe2b5bef26a2d880000000000000000b9030ff9030ca01062d3d5015b9242bc193a9b0769f3d3780ecb55f97f40a752ae26d0b68cd0d8a0b2789a5357827ed838335283e15c4dcc42b9bebcbf2919a18613246787e2f96094976ea74026e726554db657fa54763abd0c3a0aa9a071ce4c09ee275206013f0063761bc19c93c13990582f918cc57333634c94ce89a00e095703e5c9b149f253fe89697230029e32484a410b4b1f2c61442d73c3095aa0d317ae19ede7c8a2d3ac9ef98735b049bcb7278d12f48c42b924538b60a25e12b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001820cdf830f4240830f4240846555fa64b90111d983010301846765746888676f312e32302e378664617277696e00007abd731ef8ae07b86091cb8836d58f5444b883422a18825d899035d3e6ea39ad1a50069bf0b86da8b5573dde1cb4a0a34f19ce94e0ef78ff7518c80265b8a3ca56e3c60167523590d4e8dcc324900559465fc0fa403774096614e135de280949b58a45cc96f2ba9e17f848820d41a08429d0d8b33ee72a84f750fefea846cbca54e487129c7961c680bb72309ca888820d42a08c9db14d938b19f9e2261bbeca2679945462be2b58103dfff73665d0d150fb8a80c0b17bfe88534296ff064cb7156548f6deba2d6310d5044ed6485f087dc6ef232e051c28e1909c2b50a3b4f29345d66681c319bef653e52e5d746480d5a3983b00a0b56228685be711834d0f154292d07826dea42a0fad3e4f56c31470b7fbfbea26880000000000000000",
Expected: "15d34aaf54267db7d7c367839aaf71a00a2c6a650000000000000000000000000000000000000000000000000000000000000cdf",
Gas: 1000,
Name: "",
}
testPrecompiled("68", tc, t)
}

View File

@@ -48,6 +48,8 @@ type (
func (evm *EVM) precompile(addr common.Address) (PrecompiledContract, bool) {
var precompiles map[common.Address]PrecompiledContract
switch {
case evm.chainRules.IsFeynman:
precompiles = PrecompiledContractsFeynman
case evm.chainRules.IsCancun:
precompiles = PrecompiledContractsCancun
case evm.chainRules.IsHertz:

View File

@@ -183,6 +183,10 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
chainConfig.VerkleTime = config.OverrideVerkle
overrides.OverrideVerkle = config.OverrideVerkle
}
if config.OverrideFeynman != nil {
chainConfig.FeynmanTime = config.OverrideFeynman
overrides.OverrideFeynman = config.OverrideFeynman
}
eth := &Ethereum{
config: config,

View File

@@ -193,6 +193,9 @@ type Config struct {
// OverrideVerkle (TODO: remove after the fork)
OverrideVerkle *uint64 `toml:",omitempty"`
// OverrideFeynman (TODO: remove after the fork)
OverrideFeynman *uint64 `toml:",omitempty"`
}
// CreateConsensusEngine creates a consensus engine for the given chain config.

View File

@@ -36,6 +36,7 @@ import (
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/systemcontracts"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/eth/tracers/logger"
@@ -522,12 +523,18 @@ func (api *API) IntermediateRoots(ctx context.Context, hash common.Hash, config
}
defer release()
// upgrade build-in system contract before tracing non-system tx if Feynman is not enabled
if !api.backend.ChainConfig().IsFeynman(block.Number(), block.Time()) {
systemcontracts.UpgradeBuildInSystemContract(api.backend.ChainConfig(), block.Number(), parent.Time(), block.Time(), statedb)
}
var (
roots []common.Hash
signer = types.MakeSigner(api.backend.ChainConfig(), block.Number(), block.Time())
chainConfig = api.backend.ChainConfig()
vmctx = core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil)
deleteEmptyObjects = chainConfig.IsEIP158(block.Number())
beforeSystemTx = true
)
for i, tx := range block.Transactions() {
if err := ctx.Err(); err != nil {
@@ -546,6 +553,11 @@ func (api *API) IntermediateRoots(ctx context.Context, hash common.Hash, config
statedb.SetBalance(consensus.SystemAddress, big.NewInt(0))
statedb.AddBalance(vmctx.Coinbase, balance)
}
if beforeSystemTx && api.backend.ChainConfig().IsFeynman(block.Number(), block.Time()) {
systemcontracts.UpgradeBuildInSystemContract(api.backend.ChainConfig(), block.Number(), parent.Time(), block.Time(), statedb)
beforeSystemTx = false
}
}
}
@@ -602,6 +614,11 @@ func (api *API) traceBlock(ctx context.Context, block *types.Block, config *Trac
}
defer release()
// upgrade build-in system contract before tracing non-system tx if Feynman is not enabled
if !api.backend.ChainConfig().IsFeynman(block.Number(), block.Time()) {
systemcontracts.UpgradeBuildInSystemContract(api.backend.ChainConfig(), block.Number(), parent.Time(), block.Time(), statedb)
}
// JS tracers have high overhead. In this case run a parallel
// process that generates states in one thread and traces txes
// in separate worker threads.
@@ -610,18 +627,33 @@ func (api *API) traceBlock(ctx context.Context, block *types.Block, config *Trac
return api.traceBlockParallel(ctx, block, statedb, config)
}
}
// Native tracers have low overhead
var (
txs = block.Transactions()
blockHash = block.Hash()
is158 = api.backend.ChainConfig().IsEIP158(block.Number())
blockCtx = core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil)
signer = types.MakeSigner(api.backend.ChainConfig(), block.Number(), block.Time())
results = make([]*txTraceResult, len(txs))
txs = block.Transactions()
blockHash = block.Hash()
is158 = api.backend.ChainConfig().IsEIP158(block.Number())
blockCtx = core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil)
signer = types.MakeSigner(api.backend.ChainConfig(), block.Number(), block.Time())
results = make([]*txTraceResult, len(txs))
beforeSystemTx = true
)
for i, tx := range txs {
// upgrade build-in system contract before tracing system tx if Feynman is enabled
if posa, ok := api.backend.Engine().(consensus.PoSA); ok {
if isSystem, _ := posa.IsSystemTransaction(tx, block.Header()); isSystem {
if beforeSystemTx {
if api.backend.ChainConfig().IsFeynman(block.Number(), block.Time()) {
systemcontracts.UpgradeBuildInSystemContract(api.backend.ChainConfig(), block.Number(), parent.Time(), block.Time(), statedb)
}
beforeSystemTx = false
}
}
}
// Generate the next state snapshot fast without tracing
msg, _ := core.TransactionToMessage(tx, signer, block.BaseFee())
txctx := &Context{
BlockHash: blockHash,
BlockNumber: block.Number(),
@@ -682,10 +714,36 @@ func (api *API) traceBlockParallel(ctx context.Context, block *types.Block, stat
})
}
// upgrade build-in system contract before tracing non-system tx if Feynman is not enabled
parent, err := api.blockByNumberAndHash(ctx, rpc.BlockNumber(block.NumberU64()-1), block.ParentHash())
if err != nil {
return nil, err
}
if !api.backend.ChainConfig().IsFeynman(block.Number(), block.Time()) {
systemcontracts.UpgradeBuildInSystemContract(api.backend.ChainConfig(), block.Number(), parent.Time(), block.Time(), statedb)
}
// Feed the transactions into the tracers and return
var failed error
var (
failed error
beforeSystemTx = true
)
txloop:
for i, tx := range txs {
var isSystem bool
// upgrade build-in system contract before tracing system tx if Feynman is enabled
if posa, ok := api.backend.Engine().(consensus.PoSA); ok {
isSystem, _ = posa.IsSystemTransaction(tx, block.Header())
if isSystem {
if beforeSystemTx {
if api.backend.ChainConfig().IsFeynman(block.Number(), block.Time()) {
systemcontracts.UpgradeBuildInSystemContract(api.backend.ChainConfig(), block.Number(), parent.Time(), block.Time(), statedb)
}
beforeSystemTx = false
}
}
}
// Send the trace task over for execution
task := &txTraceTask{statedb: statedb.Copy(), index: i}
select {
@@ -697,13 +755,11 @@ txloop:
// Generate the next state snapshot fast without tracing
msg, _ := core.TransactionToMessage(tx, signer, block.BaseFee())
if posa, ok := api.backend.Engine().(consensus.PoSA); ok {
if isSystem, _ := posa.IsSystemTransaction(tx, block.Header()); isSystem {
balance := statedb.GetBalance(consensus.SystemAddress)
if balance.Cmp(common.Big0) > 0 {
statedb.SetBalance(consensus.SystemAddress, big.NewInt(0))
statedb.AddBalance(block.Header().Coinbase, balance)
}
if isSystem {
balance := statedb.GetBalance(consensus.SystemAddress)
if balance.Cmp(common.Big0) > 0 {
statedb.SetBalance(consensus.SystemAddress, big.NewInt(0))
statedb.AddBalance(block.Header().Coinbase, balance)
}
}
statedb.SetTxContext(tx.Hash(), i)
@@ -754,6 +810,11 @@ func (api *API) standardTraceBlockToFile(ctx context.Context, block *types.Block
}
defer release()
// upgrade build-in system contract before tracing non-system tx if Feynman is not enabled
if !api.backend.ChainConfig().IsFeynman(block.Number(), block.Time()) {
systemcontracts.UpgradeBuildInSystemContract(api.backend.ChainConfig(), block.Number(), parent.Time(), block.Time(), statedb)
}
// Retrieve the tracing configurations, or use default values
var (
logConfig logger.Config
@@ -782,7 +843,23 @@ func (api *API) standardTraceBlockToFile(ctx context.Context, block *types.Block
// Note: This copies the config, to not screw up the main config
chainConfig, canon = overrideConfig(chainConfig, config.Overrides)
}
beforeSystemTx := true
for i, tx := range block.Transactions() {
// upgrade build-in system contract before tracing system tx if Feynman is enabled
var isSystem bool
if posa, ok := api.backend.Engine().(consensus.PoSA); ok {
isSystem, _ = posa.IsSystemTransaction(tx, block.Header())
if isSystem {
if beforeSystemTx {
if api.backend.ChainConfig().IsFeynman(block.Number(), block.Time()) {
systemcontracts.UpgradeBuildInSystemContract(api.backend.ChainConfig(), block.Number(), parent.Time(), block.Time(), statedb)
}
beforeSystemTx = false
}
}
}
// Prepare the transaction for un-traced execution
var (
msg, _ = core.TransactionToMessage(tx, signer, block.BaseFee())
@@ -814,13 +891,11 @@ func (api *API) standardTraceBlockToFile(ctx context.Context, block *types.Block
}
// Execute the transaction and flush any traces to disk
vmenv := vm.NewEVM(vmctx, txContext, statedb, chainConfig, vmConf)
if posa, ok := api.backend.Engine().(consensus.PoSA); ok {
if isSystem, _ := posa.IsSystemTransaction(tx, block.Header()); isSystem {
balance := statedb.GetBalance(consensus.SystemAddress)
if balance.Cmp(common.Big0) > 0 {
statedb.SetBalance(consensus.SystemAddress, big.NewInt(0))
statedb.AddBalance(vmctx.Coinbase, balance)
}
if isSystem {
balance := statedb.GetBalance(consensus.SystemAddress)
if balance.Cmp(common.Big0) > 0 {
statedb.SetBalance(consensus.SystemAddress, big.NewInt(0))
statedb.AddBalance(vmctx.Coinbase, balance)
}
}
statedb.SetTxContext(tx.Hash(), i)
@@ -887,6 +962,22 @@ func (api *API) TraceTransaction(ctx context.Context, hash common.Hash, config *
}
defer release()
parent, err := api.blockByNumberAndHash(ctx, rpc.BlockNumber(block.NumberU64()-1), block.ParentHash())
if err != nil {
return nil, err
}
if !api.backend.ChainConfig().IsFeynman(block.Number(), block.Time()) {
// upgrade build-in system contract before trace if Feynman is not enabled
systemcontracts.UpgradeBuildInSystemContract(api.backend.ChainConfig(), block.Number(), parent.Time(), block.Time(), statedb)
} else {
// upgrade build-in system contract before trace system tx if Feynman is enabled
if posa, ok := api.backend.Engine().(consensus.PoSA); ok {
if isSystem, _ := posa.IsSystemTransaction(tx, block.Header()); isSystem {
systemcontracts.UpgradeBuildInSystemContract(api.backend.ChainConfig(), block.Number(), parent.Time(), block.Time(), statedb)
}
}
}
txctx := &Context{
BlockHash: blockHash,
BlockNumber: block.Number(),
@@ -934,6 +1025,15 @@ func (api *API) TraceCall(ctx context.Context, args ethapi.TransactionArgs, bloc
}
defer release()
// upgrade build-in system contract before tracing if Feynman is not enabled
parent, err := api.blockByNumberAndHash(ctx, rpc.BlockNumber(block.NumberU64()-1), block.ParentHash())
if err != nil {
return nil, err
}
if !api.backend.ChainConfig().IsFeynman(block.Number(), block.Time()) {
systemcontracts.UpgradeBuildInSystemContract(api.backend.ChainConfig(), block.Number(), parent.Time(), block.Time(), statedb)
}
vmctx := core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil)
// Apply the customization rules if required.
if config != nil {

2
go.mod
View File

@@ -295,5 +295,5 @@ replace (
github.com/cometbft/cometbft => github.com/bnb-chain/greenfield-tendermint v0.0.0-20230417032003-4cda1f296fb2
github.com/grpc-ecosystem/grpc-gateway/v2 => github.com/prysmaticlabs/grpc-gateway/v2 v2.3.1-0.20210702154020-550e1cd83ec1
github.com/syndtr/goleveldb v1.0.1 => github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7
github.com/tendermint/tendermint => github.com/bnb-chain/tendermint v0.31.15
github.com/tendermint/tendermint => github.com/bnb-chain/tendermint v0.31.16
)

4
go.sum
View File

@@ -188,8 +188,8 @@ github.com/bnb-chain/greenfield-tendermint v0.0.0-20230417032003-4cda1f296fb2 h1
github.com/bnb-chain/greenfield-tendermint v0.0.0-20230417032003-4cda1f296fb2/go.mod h1:9q11eHNRY9FDwFH+4pompzPNGv//Z3VcfvkELaHJPMs=
github.com/bnb-chain/ics23 v0.1.0 h1:DvjGOts2FBfbxB48384CYD1LbcrfjThFz8kowY/7KxU=
github.com/bnb-chain/ics23 v0.1.0/go.mod h1:cU6lTGolbbLFsGCgceNB2AzplH1xecLp6+KXvxM32nI=
github.com/bnb-chain/tendermint v0.31.15 h1:Xyn/Hifb/7X4E1zSuMdnZdMSoM2Fx6cZuKCNnqIxbNU=
github.com/bnb-chain/tendermint v0.31.15/go.mod h1:cmt8HHmQUSVaWQ/hoTefRxsh5X3ERaM1zCUIR0DPbFU=
github.com/bnb-chain/tendermint v0.31.16 h1:rOO6WG61JDAuRCCL8TKnGhorJftQDVygq0mqR7A0ck4=
github.com/bnb-chain/tendermint v0.31.16/go.mod h1:cmt8HHmQUSVaWQ/hoTefRxsh5X3ERaM1zCUIR0DPbFU=
github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g=
github.com/bradfitz/gomemcache v0.0.0-20170208213004-1952afaa557d/go.mod h1:PmM6Mmwb0LSuEubjR8N7PtNe1KxZLtOUHtbeikc5h60=

View File

@@ -711,10 +711,10 @@ func (w *worker) commitTransactions(env *environment, txs *transactionsByPriceAn
gasLimit := env.header.GasLimit
if env.gasPool == nil {
env.gasPool = new(core.GasPool).AddGas(gasLimit)
if w.chain.Config().IsEuler(env.header.Number) {
env.gasPool.SubGas(params.SystemTxsGas * 3)
if w.chain.Config().IsFeynman(env.header.Number, env.header.Time) {
env.gasPool.SubGas(params.SystemTxsGas * 40) // 20,000,000
} else {
env.gasPool.SubGas(params.SystemTxsGas)
env.gasPool.SubGas(params.SystemTxsGas * 5)
}
}
@@ -728,7 +728,7 @@ func (w *worker) commitTransactions(env *environment, txs *transactionsByPriceAn
stopPrefetchCh := make(chan struct{})
defer close(stopPrefetchCh)
//prefetch txs from all pending txs
// prefetch txs from all pending txs
txsPrefetch := txs.Copy()
tx := txsPrefetch.PeekWithUnwrap()
if tx != nil {
@@ -913,8 +913,10 @@ func (w *worker) prepareWork(genParams *generateParams) (*environment, error) {
return nil, err
}
// Handle upgrade build-in system contract code
systemcontracts.UpgradeBuildInSystemContract(w.chainConfig, header.Number, parent.Time, header.Time, env.state)
if !w.chainConfig.IsFeynman(header.Number, header.Time) {
// Handle upgrade build-in system contract code
systemcontracts.UpgradeBuildInSystemContract(w.chainConfig, header.Number, parent.Time, header.Time, env.state)
}
return env, nil
}

View File

@@ -148,6 +148,9 @@ var (
ShanghaiTime: newUint64(1705996800),
KeplerTime: newUint64(1705996800),
// TODO
FeynmanTime: nil,
Parlia: &ParliaConfig{
Period: 3,
Epoch: 200,
@@ -184,6 +187,9 @@ var (
ShanghaiTime: newUint64(1702972800),
KeplerTime: newUint64(1702972800),
// TODO
FeynmanTime: nil,
Parlia: &ParliaConfig{
Period: 3,
Epoch: 200,
@@ -219,6 +225,7 @@ var (
HertzfixBlock: big.NewInt(8),
ShanghaiTime: newUint64(0),
KeplerTime: newUint64(0),
FeynmanTime: newUint64(0),
Parlia: &ParliaConfig{
Period: 3,
@@ -454,11 +461,12 @@ type ChainConfig struct {
// Fork scheduling was switched from blocks to timestamps here
ShanghaiTime *uint64 `json:"shanghaiTime,omitempty" ` // Shanghai switch time (nil = no fork, 0 = already on shanghai)
KeplerTime *uint64 `json:"keplerTime,omitempty"` // Kepler switch time (nil = no fork, 0 = already activated)
CancunTime *uint64 `json:"cancunTime,omitempty" ` // Cancun switch time (nil = no fork, 0 = already on cancun)
PragueTime *uint64 `json:"pragueTime,omitempty" ` // Prague switch time (nil = no fork, 0 = already on prague)
VerkleTime *uint64 `json:"verkleTime,omitempty" ` // Verkle switch time (nil = no fork, 0 = already on verkle)
ShanghaiTime *uint64 `json:"shanghaiTime,omitempty"` // Shanghai switch time (nil = no fork, 0 = already on shanghai)
KeplerTime *uint64 `json:"keplerTime,omitempty"` // Kepler switch time (nil = no fork, 0 = already activated)
FeynmanTime *uint64 `json:"feynmanTime,omitempty"` // Feynman switch time (nil = no fork, 0 = already activated)
CancunTime *uint64 `json:"cancunTime,omitempty"` // Cancun switch time (nil = no fork, 0 = already on cancun)
PragueTime *uint64 `json:"pragueTime,omitempty"` // Prague switch time (nil = no fork, 0 = already on prague)
VerkleTime *uint64 `json:"verkleTime,omitempty"` // Verkle switch time (nil = no fork, 0 = already on verkle)
// TerminalTotalDifficulty is the amount of total difficulty reached by
// the network that triggers the consensus upgrade.
@@ -548,7 +556,12 @@ func (c *ChainConfig) String() string {
KeplerTime = big.NewInt(0).SetUint64(*c.KeplerTime)
}
return fmt.Sprintf("{ChainID: %v Homestead: %v DAO: %v DAOSupport: %v EIP150: %v EIP155: %v EIP158: %v Byzantium: %v Constantinople: %v Petersburg: %v Istanbul: %v, Muir Glacier: %v, Ramanujan: %v, Niels: %v, MirrorSync: %v, Bruno: %v, Berlin: %v, YOLO v3: %v, CatalystBlock: %v, London: %v, ArrowGlacier: %v, MergeFork:%v, Euler: %v, Gibbs: %v, Nano: %v, Moran: %v, Planck: %v,Luban: %v, Plato: %v, Hertz: %v, Hertzfix: %v, ShanghaiTime: %v, KeplerTime: %v, Engine: %v}",
var FeynmanTime *big.Int
if c.FeynmanTime != nil {
FeynmanTime = big.NewInt(0).SetUint64(*c.FeynmanTime)
}
return fmt.Sprintf("{ChainID: %v Homestead: %v DAO: %v DAOSupport: %v EIP150: %v EIP155: %v EIP158: %v Byzantium: %v Constantinople: %v Petersburg: %v Istanbul: %v, Muir Glacier: %v, Ramanujan: %v, Niels: %v, MirrorSync: %v, Bruno: %v, Berlin: %v, YOLO v3: %v, CatalystBlock: %v, London: %v, ArrowGlacier: %v, MergeFork:%v, Euler: %v, Gibbs: %v, Nano: %v, Moran: %v, Planck: %v,Luban: %v, Plato: %v, Hertz: %v, Hertzfix: %v, ShanghaiTime: %v, KeplerTime: %v, FeynmanTime: %v, Engine: %v}",
c.ChainID,
c.HomesteadBlock,
c.DAOForkBlock,
@@ -582,6 +595,7 @@ func (c *ChainConfig) String() string {
c.HertzfixBlock,
ShanghaiTime,
KeplerTime,
FeynmanTime,
engine,
)
}
@@ -816,6 +830,20 @@ func (c *ChainConfig) IsOnKepler(currentBlockNumber *big.Int, lastBlockTime uint
return !c.IsKepler(lastBlockNumber, lastBlockTime) && c.IsKepler(currentBlockNumber, currentBlockTime)
}
// IsFeynman returns whether time is either equal to the Feynman fork time or greater.
func (c *ChainConfig) IsFeynman(num *big.Int, time uint64) bool {
return c.IsLondon(num) && isTimestampForked(c.FeynmanTime, time)
}
// IsOnFeynman returns whether currentBlockTime is either equal to the Feynman fork time or greater firstly.
func (c *ChainConfig) IsOnFeynman(currentBlockNumber *big.Int, lastBlockTime uint64, currentBlockTime uint64) bool {
lastBlockNumber := new(big.Int)
if currentBlockNumber.Cmp(big.NewInt(1)) >= 0 {
lastBlockNumber.Sub(currentBlockNumber, big.NewInt(1))
}
return !c.IsFeynman(lastBlockNumber, lastBlockTime) && c.IsFeynman(currentBlockNumber, currentBlockTime)
}
// IsCancun returns whether num is either equal to the Cancun fork time or greater.
func (c *ChainConfig) IsCancun(num *big.Int, time uint64) bool {
return c.IsLondon(num) && isTimestampForked(c.CancunTime, time)
@@ -881,6 +909,7 @@ func (c *ChainConfig) CheckConfigForkOrder() error {
{name: "hertzBlock", block: c.HertzBlock},
{name: "hertzfixBlock", block: c.HertzfixBlock},
{name: "keplerTime", timestamp: c.KeplerTime},
{name: "feynmanTime", timestamp: c.FeynmanTime},
{name: "cancunTime", timestamp: c.CancunTime, optional: true},
{name: "pragueTime", timestamp: c.PragueTime, optional: true},
{name: "verkleTime", timestamp: c.VerkleTime, optional: true},
@@ -1020,6 +1049,9 @@ func (c *ChainConfig) checkCompatible(newcfg *ChainConfig, headNumber *big.Int,
if isForkTimestampIncompatible(c.KeplerTime, newcfg.KeplerTime, headTimestamp) {
return newTimestampCompatError("Kepler fork timestamp", c.KeplerTime, newcfg.KeplerTime)
}
if isForkTimestampIncompatible(c.FeynmanTime, newcfg.FeynmanTime, headTimestamp) {
return newTimestampCompatError("Feynman fork timestamp", c.FeynmanTime, newcfg.FeynmanTime)
}
if isForkTimestampIncompatible(c.CancunTime, newcfg.CancunTime, headTimestamp) {
return newTimestampCompatError("Cancun fork timestamp", c.CancunTime, newcfg.CancunTime)
}
@@ -1181,7 +1213,7 @@ type Rules struct {
IsPlato bool
IsHertz bool
IsHertzfix bool
IsShanghai, IsKepler, IsCancun, IsPrague bool
IsShanghai, IsKepler, IsFeynman, IsCancun, IsPrague bool
IsVerkle bool
}
@@ -1213,6 +1245,7 @@ func (c *ChainConfig) Rules(num *big.Int, isMerge bool, timestamp uint64) Rules
IsHertzfix: c.IsHertzfix(num),
IsShanghai: c.IsShanghai(num, timestamp),
IsKepler: c.IsKepler(num, timestamp),
IsFeynman: c.IsFeynman(num, timestamp),
IsCancun: c.IsCancun(num, timestamp),
IsPrague: c.IsPrague(num, timestamp),
IsVerkle: c.IsVerkle(num, timestamp),

View File

@@ -144,6 +144,7 @@ const (
IdentityPerWordGas uint64 = 3 // Per-work price for a data copy operation
BlsSignatureVerifyBaseGas uint64 = 1000 // base price for a BLS signature verify operation
BlsSignatureVerifyPerKeyGas uint64 = 3500 // Per-key price for a BLS signature verify operation
DoubleSignEvidenceVerifyGas uint64 = 1000 // Gas for verify double sign evidence
Bn256AddGasByzantium uint64 = 500 // Byzantium gas needed for an elliptic curve addition
Bn256AddGasIstanbul uint64 = 150 // Gas needed for an elliptic curve addition

View File

@@ -262,7 +262,7 @@ func parliaHeaderHashAndRlp(header *types.Header, chainId *big.Int) (hash, rlp [
return
}
rlp = parlia.ParliaRLP(header, chainId)
hash = parlia.SealHash(header, chainId).Bytes()
hash = types.SealHash(header, chainId).Bytes()
return hash, rlp, err
}