rename: change the next upgrade name to Planck (#1339)

This commit is contained in:
Larry 2023-03-07 09:59:03 +08:00 committed by GitHub
parent a956064629
commit a476e315f2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 42 additions and 42 deletions

@ -328,10 +328,10 @@ func (p *Parlia) verifyHeader(chain consensus.ChainHeaderReader, header *types.H
} }
number := header.Number.Uint64() number := header.Number.Uint64()
// According to BEP188, after Bohr fork, an in-turn validator is allowed to broadcast // According to BEP188, after Planck fork, an in-turn validator is allowed to broadcast
// a mined block earlier but not earlier than its parent's timestamp when the block is ready . // a mined block earlier but not earlier than its parent's timestamp when the block is ready .
if header.Time > uint64(time.Now().Unix()) { if header.Time > uint64(time.Now().Unix()) {
if !chain.Config().IsBohr(header.Number) || header.Difficulty.Cmp(diffInTurn) != 0 { if !chain.Config().IsPlanck(header.Number) || header.Difficulty.Cmp(diffInTurn) != 0 {
return consensus.ErrFutureBlock return consensus.ErrFutureBlock
} }
var parent *types.Header var parent *types.Header
@ -883,7 +883,7 @@ func (p *Parlia) Seal(chain consensus.ChainHeaderReader, block *types.Block, res
} }
// BEP-188 allows an in-turn validator to broadcast the mined block earlier // BEP-188 allows an in-turn validator to broadcast the mined block earlier
// but not earlier than its parent's timestamp after Bohr fork. // but not earlier than its parent's timestamp after Planck fork.
// At the same time, small block which means gas used rate is less than // At the same time, small block which means gas used rate is less than
// gasUsedRateDemarcation does not broadcast early to avoid an upcoming fat block. // gasUsedRateDemarcation does not broadcast early to avoid an upcoming fat block.
delay := time.Duration(0) delay := time.Duration(0)
@ -891,7 +891,7 @@ func (p *Parlia) Seal(chain consensus.ChainHeaderReader, block *types.Block, res
if header.GasLimit != 0 { if header.GasLimit != 0 {
gasUsedRate = header.GasUsed * 100 / header.GasLimit gasUsedRate = header.GasUsed * 100 / header.GasLimit
} }
if p.chainConfig.IsBohr(header.Number) && header.Difficulty.Cmp(diffInTurn) == 0 && gasUsedRate >= gasUsedRateDemarcation { if p.chainConfig.IsPlanck(header.Number) && header.Difficulty.Cmp(diffInTurn) == 0 && gasUsedRate >= gasUsedRateDemarcation {
parent := chain.GetHeader(header.ParentHash, number-1) parent := chain.GetHeader(header.ParentHash, number-1)
if parent == nil || parent.Number.Uint64() != number-1 || parent.Hash() != header.ParentHash { if parent == nil || parent.Number.Uint64() != number-1 || parent.Hash() != header.ParentHash {
return consensus.ErrUnknownAncestor return consensus.ErrUnknownAncestor
@ -1323,7 +1323,7 @@ func (p *Parlia) backOffTime(snap *Snapshot, header *types.Header, val common.Ad
} else { } else {
delay := initialBackOffTime delay := initialBackOffTime
validators := snap.validators() validators := snap.validators()
if p.chainConfig.IsBohr(header.Number) { if p.chainConfig.IsPlanck(header.Number) {
// reverse the key/value of snap.Recents to get recentsMap // reverse the key/value of snap.Recents to get recentsMap
recentsMap := make(map[common.Address]uint64, len(snap.Recents)) recentsMap := make(map[common.Address]uint64, len(snap.Recents))
for seen, recent := range snap.Recents { for seen, recent := range snap.Recents {

@ -50,7 +50,7 @@ var (
moranUpgrade = make(map[string]*Upgrade) moranUpgrade = make(map[string]*Upgrade)
bohrUpgrade = make(map[string]*Upgrade) planckUpgrade = make(map[string]*Upgrade)
) )
func init() { func init() {
@ -483,9 +483,9 @@ func init() {
}, },
} }
// TODO: update the commit url of bohr upgrade // TODO: update the commit url of planck upgrade
bohrUpgrade[mainNet] = &Upgrade{ planckUpgrade[mainNet] = &Upgrade{
UpgradeName: "bohr", UpgradeName: "planck",
Configs: []*UpgradeConfig{ Configs: []*UpgradeConfig{
{ {
ContractAddr: common.HexToAddress(SlashContract), ContractAddr: common.HexToAddress(SlashContract),
@ -505,8 +505,8 @@ func init() {
}, },
} }
bohrUpgrade[chapelNet] = &Upgrade{ planckUpgrade[chapelNet] = &Upgrade{
UpgradeName: "bohr", UpgradeName: "planck",
Configs: []*UpgradeConfig{ Configs: []*UpgradeConfig{
{ {
ContractAddr: common.HexToAddress(SlashContract), ContractAddr: common.HexToAddress(SlashContract),
@ -531,8 +531,8 @@ func init() {
}, },
} }
bohrUpgrade[rialtoNet] = &Upgrade{ planckUpgrade[rialtoNet] = &Upgrade{
UpgradeName: "bohr", UpgradeName: "planck",
Configs: []*UpgradeConfig{ Configs: []*UpgradeConfig{
{ {
ContractAddr: common.HexToAddress(SlashContract), ContractAddr: common.HexToAddress(SlashContract),
@ -600,8 +600,8 @@ func UpgradeBuildInSystemContract(config *params.ChainConfig, blockNumber *big.I
applySystemContractUpgrade(moranUpgrade[network], blockNumber, statedb, logger) applySystemContractUpgrade(moranUpgrade[network], blockNumber, statedb, logger)
} }
if config.IsOnBohr(blockNumber) { if config.IsOnPlanck(blockNumber) {
applySystemContractUpgrade(bohrUpgrade[network], blockNumber, statedb, logger) applySystemContractUpgrade(planckUpgrade[network], blockNumber, statedb, logger)
} }
/* /*

@ -111,7 +111,7 @@ var PrecompiledContractsMoran = map[common.Address]PrecompiledContract{
common.BytesToAddress([]byte{101}): &iavlMerkleProofValidateMoran{}, common.BytesToAddress([]byte{101}): &iavlMerkleProofValidateMoran{},
} }
var PrecompiledContractsBohr = map[common.Address]PrecompiledContract{ var PrecompiledContractsPlanck = map[common.Address]PrecompiledContract{
common.BytesToAddress([]byte{1}): &ecrecover{}, common.BytesToAddress([]byte{1}): &ecrecover{},
common.BytesToAddress([]byte{2}): &sha256hash{}, common.BytesToAddress([]byte{2}): &sha256hash{},
common.BytesToAddress([]byte{3}): &ripemd160hash{}, common.BytesToAddress([]byte{3}): &ripemd160hash{},
@ -123,7 +123,7 @@ var PrecompiledContractsBohr = map[common.Address]PrecompiledContract{
common.BytesToAddress([]byte{9}): &blake2F{}, common.BytesToAddress([]byte{9}): &blake2F{},
common.BytesToAddress([]byte{100}): &tmHeaderValidate{}, common.BytesToAddress([]byte{100}): &tmHeaderValidate{},
common.BytesToAddress([]byte{101}): &iavlMerkleProofValidateBohr{}, common.BytesToAddress([]byte{101}): &iavlMerkleProofValidatePlanck{},
} }
// PrecompiledContractsBerlin contains the default set of pre-compiled Ethereum // PrecompiledContractsBerlin contains the default set of pre-compiled Ethereum
@ -155,7 +155,7 @@ var PrecompiledContractsBLS = map[common.Address]PrecompiledContract{
} }
var ( var (
PrecompiledAddressesBohr []common.Address PrecompiledAddressesPlanck []common.Address
PrecompiledAddressesMoran []common.Address PrecompiledAddressesMoran []common.Address
PrecompiledAddressesNano []common.Address PrecompiledAddressesNano []common.Address
PrecompiledAddressesBerlin []common.Address PrecompiledAddressesBerlin []common.Address
@ -183,16 +183,16 @@ func init() {
for k := range PrecompiledContractsMoran { for k := range PrecompiledContractsMoran {
PrecompiledAddressesMoran = append(PrecompiledAddressesMoran, k) PrecompiledAddressesMoran = append(PrecompiledAddressesMoran, k)
} }
for k := range PrecompiledContractsBohr { for k := range PrecompiledContractsPlanck {
PrecompiledAddressesBohr = append(PrecompiledAddressesBohr, k) PrecompiledAddressesPlanck = append(PrecompiledAddressesPlanck, k)
} }
} }
// ActivePrecompiles returns the precompiles enabled with the current configuration. // ActivePrecompiles returns the precompiles enabled with the current configuration.
func ActivePrecompiles(rules params.Rules) []common.Address { func ActivePrecompiles(rules params.Rules) []common.Address {
switch { switch {
case rules.IsBohr: case rules.IsPlanck:
return PrecompiledAddressesBohr return PrecompiledAddressesPlanck
case rules.IsMoran: case rules.IsMoran:
return PrecompiledAddressesMoran return PrecompiledAddressesMoran
case rules.IsNano: case rules.IsNano:

@ -155,15 +155,15 @@ func (c *iavlMerkleProofValidateMoran) Run(input []byte) (result []byte, err err
return c.basicIavlMerkleProofValidate.Run(input) return c.basicIavlMerkleProofValidate.Run(input)
} }
type iavlMerkleProofValidateBohr struct { type iavlMerkleProofValidatePlanck struct {
basicIavlMerkleProofValidate basicIavlMerkleProofValidate
} }
func (c *iavlMerkleProofValidateBohr) RequiredGas(_ []byte) uint64 { func (c *iavlMerkleProofValidatePlanck) RequiredGas(_ []byte) uint64 {
return params.IAVLMerkleProofValidateGas return params.IAVLMerkleProofValidateGas
} }
func (c *iavlMerkleProofValidateBohr) Run(input []byte) (result []byte, err error) { func (c *iavlMerkleProofValidatePlanck) Run(input []byte) (result []byte, err error) {
c.basicIavlMerkleProofValidate.proofRuntime = lightclient.Ics23CompatibleProofRuntime() c.basicIavlMerkleProofValidate.proofRuntime = lightclient.Ics23CompatibleProofRuntime()
c.basicIavlMerkleProofValidate.verifiers = []merkle.ProofOpVerifier{ c.basicIavlMerkleProofValidate.verifiers = []merkle.ProofOpVerifier{
forbiddenAbsenceOpVerifier, forbiddenAbsenceOpVerifier,

@ -146,7 +146,7 @@ func TestIcs23Proof(t *testing.T) {
input := append(totalLengthPrefix, merkleProofInput...) input := append(totalLengthPrefix, merkleProofInput...)
validator := iavlMerkleProofValidateBohr{} validator := iavlMerkleProofValidatePlanck{}
success, err := validator.Run(input) success, err := validator.Run(input)
require.NoError(t, err) require.NoError(t, err)
expectedResult := make([]byte, 32) expectedResult := make([]byte, 32)

@ -52,8 +52,8 @@ type (
func (evm *EVM) precompile(addr common.Address) (PrecompiledContract, bool) { func (evm *EVM) precompile(addr common.Address) (PrecompiledContract, bool) {
var precompiles map[common.Address]PrecompiledContract var precompiles map[common.Address]PrecompiledContract
switch { switch {
case evm.chainRules.IsBohr: case evm.chainRules.IsPlanck:
precompiles = PrecompiledContractsBohr precompiles = PrecompiledContractsPlanck
case evm.chainRules.IsMoran: case evm.chainRules.IsMoran:
precompiles = PrecompiledContractsMoran precompiles = PrecompiledContractsMoran
case evm.chainRules.IsNano: case evm.chainRules.IsNano:

@ -114,7 +114,7 @@ var (
NanoBlock: big.NewInt(21962149), NanoBlock: big.NewInt(21962149),
MoranBlock: big.NewInt(22107423), MoranBlock: big.NewInt(22107423),
GibbsBlock: big.NewInt(23846001), GibbsBlock: big.NewInt(23846001),
BohrBlock: nil, // todo: TBD PlanckBlock: nil, // todo: TBD
Parlia: &ParliaConfig{ Parlia: &ParliaConfig{
Period: 3, Period: 3,
@ -141,7 +141,7 @@ var (
GibbsBlock: big.NewInt(22800220), GibbsBlock: big.NewInt(22800220),
NanoBlock: big.NewInt(23482428), NanoBlock: big.NewInt(23482428),
MoranBlock: big.NewInt(23603940), MoranBlock: big.NewInt(23603940),
BohrBlock: nil, // todo: TBD PlanckBlock: nil, // todo: TBD
Parlia: &ParliaConfig{ Parlia: &ParliaConfig{
Period: 3, Period: 3,
Epoch: 200, Epoch: 200,
@ -167,7 +167,7 @@ var (
GibbsBlock: big.NewInt(400), GibbsBlock: big.NewInt(400),
NanoBlock: nil, NanoBlock: nil,
MoranBlock: nil, MoranBlock: nil,
BohrBlock: nil, PlanckBlock: nil,
Parlia: &ParliaConfig{ Parlia: &ParliaConfig{
Period: 3, Period: 3,
@ -285,7 +285,7 @@ type ChainConfig struct {
GibbsBlock *big.Int `json:"gibbsBlock,omitempty" toml:",omitempty"` // gibbsBlock switch block (nil = no fork, 0 = already activated) GibbsBlock *big.Int `json:"gibbsBlock,omitempty" toml:",omitempty"` // gibbsBlock switch block (nil = no fork, 0 = already activated)
NanoBlock *big.Int `json:"nanoBlock,omitempty" toml:",omitempty"` // nanoBlock switch block (nil = no fork, 0 = already activated) NanoBlock *big.Int `json:"nanoBlock,omitempty" toml:",omitempty"` // nanoBlock switch block (nil = no fork, 0 = already activated)
MoranBlock *big.Int `json:"moranBlock,omitempty" toml:",omitempty"` // moranBlock switch block (nil = no fork, 0 = already activated) MoranBlock *big.Int `json:"moranBlock,omitempty" toml:",omitempty"` // moranBlock switch block (nil = no fork, 0 = already activated)
BohrBlock *big.Int `json:"bohrBlock,omitempty" toml:",omitempty"` // bohrBlock switch block (nil = no fork, 0 = already activated) PlanckBlock *big.Int `json:"planckBlock,omitempty" toml:",omitempty"` // planckBlock switch block (nil = no fork, 0 = already activated)
// Various consensus engines // Various consensus engines
Ethash *EthashConfig `json:"ethash,omitempty" toml:",omitempty"` Ethash *EthashConfig `json:"ethash,omitempty" toml:",omitempty"`
@ -336,7 +336,7 @@ func (c *ChainConfig) String() string {
default: default:
engine = "unknown" engine = "unknown"
} }
return fmt.Sprintf("{ChainID: %v Homestead: %v DAO: %v DAOSupport: %v EIP150: %v EIP155: %v EIP158: %v Byzantium: %v Constantinople: %v Petersburg: %v Istanbul: %v, Muir Glacier: %v, Ramanujan: %v, Niels: %v, MirrorSync: %v, Bruno: %v, Berlin: %v, YOLO v3: %v, CatalystBlock: %v, London: %v, ArrowGlacier: %v, MergeFork:%v, Euler: %v, Gibbs: %v, Nano: %v, Moran: %v, Bohr: %v, Engine: %v}", return fmt.Sprintf("{ChainID: %v Homestead: %v DAO: %v DAOSupport: %v EIP150: %v EIP155: %v EIP158: %v Byzantium: %v Constantinople: %v Petersburg: %v Istanbul: %v, Muir Glacier: %v, Ramanujan: %v, Niels: %v, MirrorSync: %v, Bruno: %v, Berlin: %v, YOLO v3: %v, CatalystBlock: %v, London: %v, ArrowGlacier: %v, MergeFork:%v, Euler: %v, Gibbs: %v, Nano: %v, Moran: %v, Planck: %v, Engine: %v}",
c.ChainID, c.ChainID,
c.HomesteadBlock, c.HomesteadBlock,
c.DAOForkBlock, c.DAOForkBlock,
@ -363,7 +363,7 @@ func (c *ChainConfig) String() string {
c.GibbsBlock, c.GibbsBlock,
c.NanoBlock, c.NanoBlock,
c.MoranBlock, c.MoranBlock,
c.BohrBlock, c.PlanckBlock,
engine, engine,
) )
} }
@ -519,12 +519,12 @@ func (c *ChainConfig) IsOnMoran(num *big.Int) bool {
return configNumEqual(c.MoranBlock, num) return configNumEqual(c.MoranBlock, num)
} }
func (c *ChainConfig) IsBohr(num *big.Int) bool { func (c *ChainConfig) IsPlanck(num *big.Int) bool {
return isForked(c.BohrBlock, num) return isForked(c.PlanckBlock, num)
} }
func (c *ChainConfig) IsOnBohr(num *big.Int) bool { func (c *ChainConfig) IsOnPlanck(num *big.Int) bool {
return configNumEqual(c.BohrBlock, num) return configNumEqual(c.PlanckBlock, num)
} }
// CheckCompatible checks whether scheduled fork transitions have been imported // CheckCompatible checks whether scheduled fork transitions have been imported
@ -655,8 +655,8 @@ func (c *ChainConfig) checkCompatible(newcfg *ChainConfig, head *big.Int) *Confi
if isForkIncompatible(c.MoranBlock, newcfg.MoranBlock, head) { if isForkIncompatible(c.MoranBlock, newcfg.MoranBlock, head) {
return newCompatError("moran fork block", c.MoranBlock, newcfg.MoranBlock) return newCompatError("moran fork block", c.MoranBlock, newcfg.MoranBlock)
} }
if isForkIncompatible(c.BohrBlock, newcfg.BohrBlock, head) { if isForkIncompatible(c.PlanckBlock, newcfg.PlanckBlock, head) {
return newCompatError("bohr fork block", c.BohrBlock, newcfg.BohrBlock) return newCompatError("planck fork block", c.PlanckBlock, newcfg.PlanckBlock)
} }
return nil return nil
} }
@ -729,7 +729,7 @@ type Rules struct {
IsMerge bool IsMerge bool
IsNano bool IsNano bool
IsMoran bool IsMoran bool
IsBohr bool IsPlanck bool
} }
// Rules ensures c's ChainID is not nil. // Rules ensures c's ChainID is not nil.
@ -753,6 +753,6 @@ func (c *ChainConfig) Rules(num *big.Int, isMerge bool) Rules {
IsMerge: isMerge, IsMerge: isMerge,
IsNano: c.IsNano(num), IsNano: c.IsNano(num),
IsMoran: c.IsMoran(num), IsMoran: c.IsMoran(num),
IsBohr: c.IsBohr(num), IsPlanck: c.IsPlanck(num),
} }
} }