bsc/consensus/parlia/parlia.go

2184 lines
76 KiB
Go
Raw Permalink Normal View History

2020-05-20 11:46:45 +08:00
package parlia
import (
"bytes"
"context"
"encoding/hex"
2020-05-20 11:46:45 +08:00
"errors"
"fmt"
"io"
2020-05-20 11:46:45 +08:00
"math"
"math/big"
"math/rand"
"sort"
"strings"
"sync"
"time"
lru "github.com/hashicorp/golang-lru"
2024-02-02 15:43:33 +08:00
"github.com/holiman/uint256"
"github.com/prysmaticlabs/prysm/v5/crypto/bls"
"github.com/willf/bitset"
2020-05-20 11:46:45 +08:00
"golang.org/x/crypto/sha3"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/common"
2022-07-05 11:14:21 +08:00
"github.com/ethereum/go-ethereum/common/gopool"
2020-05-20 11:46:45 +08:00
"github.com/ethereum/go-ethereum/common/hexutil"
cmath "github.com/ethereum/go-ethereum/common/math"
2020-05-20 11:46:45 +08:00
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/consensus/misc/eip1559"
"github.com/ethereum/go-ethereum/consensus/misc/eip4844"
2020-05-20 11:46:45 +08:00
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/forkid"
2020-05-20 11:46:45 +08:00
"github.com/ethereum/go-ethereum/core/state"
2020-08-06 23:27:24 +08:00
"github.com/ethereum/go-ethereum/core/systemcontracts"
2020-05-20 11:46:45 +08:00
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/internal/ethapi"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
2020-05-20 11:46:45 +08:00
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rpc"
2021-05-07 14:31:06 +08:00
"github.com/ethereum/go-ethereum/trie"
2020-05-20 11:46:45 +08:00
)
const (
inMemorySnapshots = 256 // Number of recent snapshots to keep in memory
inMemorySignatures = 4096 // Number of recent block signatures to keep in memory
inMemoryHeaders = 86400 // Number of recent headers to keep in memory for double sign detection,
2020-05-20 11:46:45 +08:00
checkpointInterval = 1024 // Number of blocks after which to save the snapshot to the database
defaultEpochLength = uint64(200) // Default number of blocks of checkpoint to update validatorSet from contract
defaultTurnLength = uint8(1) // Default consecutive number of blocks a validator receives priority for block production
2020-05-20 11:46:45 +08:00
extraVanity = 32 // Fixed number of extra-data prefix bytes reserved for signer vanity
extraSeal = 65 // Fixed number of extra-data suffix bytes reserved for signer seal
nextForkHashSize = 4 // Fixed number of extra-data suffix bytes reserved for nextForkHash.
turnLengthSize = 1 // Fixed number of extra-data suffix bytes reserved for turnLength
2020-05-20 11:46:45 +08:00
validatorBytesLengthBeforeLuban = common.AddressLength
validatorBytesLength = common.AddressLength + types.BLSPublicKeyLength
validatorNumberSize = 1 // Fixed number of extra prefix bytes reserved for validator number after Luban
wiggleTime = uint64(1) // second, Random delay (per signer) to allow concurrent signers
initialBackOffTime = uint64(1) // second
2020-05-20 11:46:45 +08:00
systemRewardPercent = 4 // it means 1/2^4 = 1/16 percentage of gas fee incoming will be distributed to system
collectAdditionalVotesRewardRatio = 100 // ratio of additional reward for collecting more votes than needed, the denominator is 100
2020-05-20 11:46:45 +08:00
)
var (
uncleHash = types.CalcUncleHash(nil) // Always Keccak256(RLP([])) as uncles are meaningless outside of PoW.
diffInTurn = big.NewInt(2) // Block difficulty for in-turn signatures
diffNoTurn = big.NewInt(1) // Block difficulty for out-of-turn signatures
// 100 native token
2024-02-02 15:43:33 +08:00
maxSystemBalance = new(uint256.Int).Mul(uint256.NewInt(100), uint256.NewInt(params.Ether))
verifyVoteAttestationErrorCounter = metrics.NewRegisteredCounter("parlia/verifyVoteAttestation/error", nil)
updateAttestationErrorCounter = metrics.NewRegisteredCounter("parlia/updateAttestation/error", nil)
validVotesfromSelfCounter = metrics.NewRegisteredCounter("parlia/VerifyVote/self", nil)
doubleSignCounter = metrics.NewRegisteredCounter("parlia/doublesign", nil)
2020-05-20 11:46:45 +08:00
systemContracts = map[common.Address]bool{
2020-08-06 23:27:24 +08:00
common.HexToAddress(systemcontracts.ValidatorContract): true,
common.HexToAddress(systemcontracts.SlashContract): true,
common.HexToAddress(systemcontracts.SystemRewardContract): true,
common.HexToAddress(systemcontracts.LightClientContract): true,
common.HexToAddress(systemcontracts.RelayerHubContract): true,
common.HexToAddress(systemcontracts.GovHubContract): true,
common.HexToAddress(systemcontracts.TokenHubContract): true,
common.HexToAddress(systemcontracts.RelayerIncentivizeContract): true,
common.HexToAddress(systemcontracts.CrossChainContract): true,
common.HexToAddress(systemcontracts.StakeHubContract): true,
common.HexToAddress(systemcontracts.GovernorContract): true,
common.HexToAddress(systemcontracts.GovTokenContract): true,
common.HexToAddress(systemcontracts.TimelockContract): true,
common.HexToAddress(systemcontracts.TokenRecoverPortalContract): true,
2020-05-20 11:46:45 +08:00
}
)
// Various error messages to mark blocks invalid. These should be private to
// prevent engine specific errors from being referenced in the remainder of the
// codebase, inherently breaking if the engine is swapped out. Please put common
// error types into the consensus package.
var (
// errUnknownBlock is returned when the list of validators is requested for a block
// that is not part of the local blockchain.
errUnknownBlock = errors.New("unknown block")
// errMissingVanity is returned if a block's extra-data section is shorter than
// 32 bytes, which is required to store the signer vanity.
errMissingVanity = errors.New("extra-data 32 byte vanity prefix missing")
// errMissingSignature is returned if a block's extra-data section doesn't seem
// to contain a 65 byte secp256k1 signature.
errMissingSignature = errors.New("extra-data 65 byte signature suffix missing")
// errExtraValidators is returned if non-sprint-end block contain validator data in
// their extra-data fields.
errExtraValidators = errors.New("non-sprint-end block contains extra validator list")
// errInvalidSpanValidators is returned if a block contains an
// invalid list of validators (i.e. non divisible by 20 bytes).
errInvalidSpanValidators = errors.New("invalid validator list on sprint end block")
// errInvalidTurnLength is returned if a block contains an
// invalid length of turn (i.e. no data left after parsing validators).
errInvalidTurnLength = errors.New("invalid turnLength")
2020-05-20 11:46:45 +08:00
// errInvalidMixDigest is returned if a block's mix digest is non-zero.
errInvalidMixDigest = errors.New("non-zero mix digest")
// errInvalidUncleHash is returned if a block contains an non-empty uncle list.
errInvalidUncleHash = errors.New("non empty uncle hash")
// errMismatchingEpochValidators is returned if a sprint block contains a
// list of validators different than the one the local node calculated.
errMismatchingEpochValidators = errors.New("mismatching validator list on epoch block")
// errMismatchingEpochTurnLength is returned if a sprint block contains a
// turn length different than the one the local node calculated.
errMismatchingEpochTurnLength = errors.New("mismatching turn length on epoch block")
2020-05-20 11:46:45 +08:00
// errInvalidDifficulty is returned if the difficulty of a block is missing.
errInvalidDifficulty = errors.New("invalid difficulty")
// errWrongDifficulty is returned if the difficulty of a block doesn't match the
// turn of the signer.
errWrongDifficulty = errors.New("wrong difficulty")
// errOutOfRangeChain is returned if an authorization list is attempted to
// be modified via out-of-range or non-contiguous headers.
errOutOfRangeChain = errors.New("out of range or non-contiguous chain")
// errBlockHashInconsistent is returned if an authorization list is attempted to
// insert an inconsistent block.
errBlockHashInconsistent = errors.New("the block hash is inconsistent")
// errUnauthorizedValidator is returned if a header is signed by a non-authorized entity.
errUnauthorizedValidator = func(val string) error {
return errors.New("unauthorized validator: " + val)
}
2020-05-20 11:46:45 +08:00
// errCoinBaseMisMatch is returned if a header's coinbase do not match with signature
errCoinBaseMisMatch = errors.New("coinbase do not match with signature")
// errRecentlySigned is returned if a header is signed by an authorized entity
// that already signed a header recently, thus is temporarily not allowed to.
errRecentlySigned = errors.New("recently signed")
)
// SignerFn is a signer callback function to request a header to be signed by a
// backing account.
type SignerFn func(accounts.Account, string, []byte) ([]byte, error)
type SignerTxFn func(accounts.Account, *types.Transaction, *big.Int) (*types.Transaction, error)
func isToSystemContract(to common.Address) bool {
return systemContracts[to]
}
// ecrecover extracts the Ethereum account address from a signed header.
func ecrecover(header *types.Header, sigCache *lru.ARCCache, chainId *big.Int) (common.Address, error) {
2020-05-20 11:46:45 +08:00
// If the signature's already cached, return that
hash := header.Hash()
if address, known := sigCache.Get(hash); known {
return address.(common.Address), nil
}
// Retrieve the signature from the header extra-data
if len(header.Extra) < extraSeal {
return common.Address{}, errMissingSignature
}
signature := header.Extra[len(header.Extra)-extraSeal:]
// Recover the public key and the Ethereum address
pubkey, err := crypto.Ecrecover(types.SealHash(header, chainId).Bytes(), signature)
2020-05-20 11:46:45 +08:00
if err != nil {
return common.Address{}, err
}
var signer common.Address
copy(signer[:], crypto.Keccak256(pubkey[1:])[12:])
sigCache.Add(hash, signer)
return signer, nil
}
// ParliaRLP returns the rlp bytes which needs to be signed for the parlia
// sealing. The RLP to sign consists of the entire header apart from the 65 byte signature
// contained at the end of the extra data.
//
// Note, the method requires the extra data to be at least 65 bytes, otherwise it
// panics. This is done to avoid accidentally using both forms (signature present
// or not), which could be abused to produce different hashes for the same header.
func ParliaRLP(header *types.Header, chainId *big.Int) []byte {
2020-05-20 11:46:45 +08:00
b := new(bytes.Buffer)
types.EncodeSigHeader(b, header, chainId)
2020-05-20 11:46:45 +08:00
return b.Bytes()
}
// Parlia is the consensus engine of BSC
type Parlia struct {
chainConfig *params.ChainConfig // Chain config
config *params.ParliaConfig // Consensus engine configuration parameters for parlia consensus
genesisHash common.Hash
db ethdb.Database // Database to store and retrieve snapshot checkpoints
2020-05-20 11:46:45 +08:00
recentSnaps *lru.ARCCache // Snapshots for recent block to speed up
signatures *lru.ARCCache // Signatures of recent blocks to speed up mining
recentHeaders *lru.ARCCache //
// Recent headers to check for double signing: key includes block number and miner. value is the block header
// If same key's value already exists for different block header roots then double sign is detected
2020-05-20 11:46:45 +08:00
signer types.Signer
val common.Address // Ethereum address of the signing key
signFn SignerFn // Signer function to authorize hashes with
signTxFn SignerTxFn
lock sync.RWMutex // Protects the signer fields
ethAPI *ethapi.BlockChainAPI
VotePool consensus.VotePool
validatorSetABIBeforeLuban abi.ABI
validatorSetABI abi.ABI
slashABI abi.ABI
stakeHubABI abi.ABI
2020-05-20 11:46:45 +08:00
// The fields below are for testing only
fakeDiff bool // Skip difficulty verifications
}
// New creates a Parlia consensus engine.
func New(
chainConfig *params.ChainConfig,
db ethdb.Database,
ethAPI *ethapi.BlockChainAPI,
genesisHash common.Hash,
2020-05-20 11:46:45 +08:00
) *Parlia {
// get parlia config
parliaConfig := chainConfig.Parlia
log.Info("Parlia", "chainConfig", chainConfig)
2020-05-20 11:46:45 +08:00
// Set any missing consensus parameters to their defaults
if parliaConfig != nil && parliaConfig.Epoch == 0 {
parliaConfig.Epoch = defaultEpochLength
}
// Allocate the snapshot caches and create the engine
recentSnaps, err := lru.NewARC(inMemorySnapshots)
if err != nil {
panic(err)
}
signatures, err := lru.NewARC(inMemorySignatures)
if err != nil {
panic(err)
}
recentHeaders, err := lru.NewARC(inMemoryHeaders)
if err != nil {
panic(err)
}
vABIBeforeLuban, err := abi.JSON(strings.NewReader(validatorSetABIBeforeLuban))
if err != nil {
panic(err)
}
2020-05-20 11:46:45 +08:00
vABI, err := abi.JSON(strings.NewReader(validatorSetABI))
if err != nil {
panic(err)
}
sABI, err := abi.JSON(strings.NewReader(slashABI))
if err != nil {
panic(err)
}
stABI, err := abi.JSON(strings.NewReader(stakeABI))
if err != nil {
panic(err)
}
2020-05-20 11:46:45 +08:00
c := &Parlia{
chainConfig: chainConfig,
config: parliaConfig,
genesisHash: genesisHash,
db: db,
ethAPI: ethAPI,
recentSnaps: recentSnaps,
recentHeaders: recentHeaders,
signatures: signatures,
validatorSetABIBeforeLuban: vABIBeforeLuban,
validatorSetABI: vABI,
slashABI: sABI,
stakeHubABI: stABI,
signer: types.LatestSigner(chainConfig),
2020-05-20 11:46:45 +08:00
}
return c
}
func (p *Parlia) Period() uint64 {
return p.config.Period
}
2020-05-20 11:46:45 +08:00
func (p *Parlia) IsSystemTransaction(tx *types.Transaction, header *types.Header) (bool, error) {
// deploy a contract
if tx.To() == nil {
return false, nil
}
sender, err := types.Sender(p.signer, tx)
if err != nil {
return false, errors.New("UnAuthorized transaction")
}
if sender == header.Coinbase && isToSystemContract(*tx.To()) && tx.GasPrice().Cmp(big.NewInt(0)) == 0 {
2020-05-20 11:46:45 +08:00
return true, nil
}
return false, nil
}
func (p *Parlia) IsSystemContract(to *common.Address) bool {
if to == nil {
return false
}
return isToSystemContract(*to)
}
// Author implements consensus.Engine, returning the SystemAddress
func (p *Parlia) Author(header *types.Header) (common.Address, error) {
return header.Coinbase, nil
}
// VerifyHeader checks whether a header conforms to the consensus rules.
func (p *Parlia) VerifyHeader(chain consensus.ChainHeaderReader, header *types.Header) error {
2020-05-20 11:46:45 +08:00
return p.verifyHeader(chain, header, nil)
}
// VerifyHeaders is similar to VerifyHeader, but verifies a batch of headers. The
// method returns a quit channel to abort the operations and a results channel to
// retrieve the async verifications (the order is that of the input slice).
func (p *Parlia) VerifyHeaders(chain consensus.ChainHeaderReader, headers []*types.Header) (chan<- struct{}, <-chan error) {
2020-05-20 11:46:45 +08:00
abort := make(chan struct{})
results := make(chan error, len(headers))
2022-07-05 11:14:21 +08:00
gopool.Submit(func() {
2020-05-20 11:46:45 +08:00
for i, header := range headers {
err := p.verifyHeader(chain, header, headers[:i])
select {
case <-abort:
return
case results <- err:
}
}
2022-07-05 11:14:21 +08:00
})
2020-05-20 11:46:45 +08:00
return abort, results
}
// getValidatorBytesFromHeader returns the validators bytes extracted from the header's extra field if exists.
// The validators bytes would be contained only in the epoch block's header, and its each validator bytes length is fixed.
// On luban fork, we introduce vote attestation into the header's extra field, so extra format is different from before.
// Before luban fork: |---Extra Vanity---|---Validators Bytes (or Empty)---|---Extra Seal---|
// After luban fork: |---Extra Vanity---|---Validators Number and Validators Bytes (or Empty)---|---Vote Attestation (or Empty)---|---Extra Seal---|
// After bohr fork: |---Extra Vanity---|---Validators Number and Validators Bytes (or Empty)---|---Turn Length (or Empty)---|---Vote Attestation (or Empty)---|---Extra Seal---|
func getValidatorBytesFromHeader(header *types.Header, chainConfig *params.ChainConfig, parliaConfig *params.ParliaConfig) []byte {
if len(header.Extra) <= extraVanity+extraSeal {
return nil
}
if !chainConfig.IsLuban(header.Number) {
if header.Number.Uint64()%parliaConfig.Epoch == 0 && (len(header.Extra)-extraSeal-extraVanity)%validatorBytesLengthBeforeLuban != 0 {
return nil
}
return header.Extra[extraVanity : len(header.Extra)-extraSeal]
}
if header.Number.Uint64()%parliaConfig.Epoch != 0 {
return nil
}
num := int(header.Extra[extraVanity])
start := extraVanity + validatorNumberSize
end := start + num*validatorBytesLength
extraMinLen := end + extraSeal
if chainConfig.IsBohr(header.Number, header.Time) {
extraMinLen += turnLengthSize
}
if num == 0 || len(header.Extra) < extraMinLen {
return nil
}
return header.Extra[start:end]
}
// getVoteAttestationFromHeader returns the vote attestation extracted from the header's extra field if exists.
func getVoteAttestationFromHeader(header *types.Header, chainConfig *params.ChainConfig, parliaConfig *params.ParliaConfig) (*types.VoteAttestation, error) {
if len(header.Extra) <= extraVanity+extraSeal {
return nil, nil
}
if !chainConfig.IsLuban(header.Number) {
return nil, nil
}
var attestationBytes []byte
if header.Number.Uint64()%parliaConfig.Epoch != 0 {
attestationBytes = header.Extra[extraVanity : len(header.Extra)-extraSeal]
} else {
num := int(header.Extra[extraVanity])
start := extraVanity + validatorNumberSize + num*validatorBytesLength
if chainConfig.IsBohr(header.Number, header.Time) {
start += turnLengthSize
}
end := len(header.Extra) - extraSeal
if end <= start {
return nil, nil
}
attestationBytes = header.Extra[start:end]
}
var attestation types.VoteAttestation
if err := rlp.Decode(bytes.NewReader(attestationBytes), &attestation); err != nil {
return nil, fmt.Errorf("block %d has vote attestation info, decode err: %s", header.Number.Uint64(), err)
}
return &attestation, nil
}
// getParent returns the parent of a given block.
func (p *Parlia) getParent(chain consensus.ChainHeaderReader, header *types.Header, parents []*types.Header) (*types.Header, error) {
var parent *types.Header
number := header.Number.Uint64()
if len(parents) > 0 {
parent = parents[len(parents)-1]
} else {
parent = chain.GetHeader(header.ParentHash, number-1)
}
if parent == nil || parent.Number.Uint64() != number-1 || parent.Hash() != header.ParentHash {
return nil, consensus.ErrUnknownAncestor
}
return parent, nil
}
// verifyVoteAttestation checks whether the vote attestation in the header is valid.
func (p *Parlia) verifyVoteAttestation(chain consensus.ChainHeaderReader, header *types.Header, parents []*types.Header) error {
attestation, err := getVoteAttestationFromHeader(header, p.chainConfig, p.config)
if err != nil {
return err
}
if attestation == nil {
return nil
}
if attestation.Data == nil {
return errors.New("invalid attestation, vote data is nil")
}
if len(attestation.Extra) > types.MaxAttestationExtraLength {
return fmt.Errorf("invalid attestation, too large extra length: %d", len(attestation.Extra))
}
// Get parent block
parent, err := p.getParent(chain, header, parents)
if err != nil {
return err
}
// The target block should be direct parent.
targetNumber := attestation.Data.TargetNumber
targetHash := attestation.Data.TargetHash
if targetNumber != parent.Number.Uint64() || targetHash != parent.Hash() {
return fmt.Errorf("invalid attestation, target mismatch, expected block: %d, hash: %s; real block: %d, hash: %s",
parent.Number.Uint64(), parent.Hash(), targetNumber, targetHash)
}
// The source block should be the highest justified block.
sourceNumber := attestation.Data.SourceNumber
sourceHash := attestation.Data.SourceHash
headers := []*types.Header{parent}
if len(parents) > 0 {
headers = parents
}
justifiedBlockNumber, justifiedBlockHash, err := p.GetJustifiedNumberAndHash(chain, headers)
if err != nil {
return errors.New("unexpected error when getting the highest justified number and hash")
}
if sourceNumber != justifiedBlockNumber || sourceHash != justifiedBlockHash {
return fmt.Errorf("invalid attestation, source mismatch, expected block: %d, hash: %s; real block: %d, hash: %s",
justifiedBlockNumber, justifiedBlockHash, sourceNumber, sourceHash)
}
// The snapshot should be the targetNumber-1 block's snapshot.
if len(parents) > 1 {
parents = parents[:len(parents)-1]
} else {
parents = nil
}
snap, err := p.snapshot(chain, parent.Number.Uint64()-1, parent.ParentHash, parents)
if err != nil {
return err
}
// Filter out valid validator from attestation.
validators := snap.validators()
validatorsBitSet := bitset.From([]uint64{uint64(attestation.VoteAddressSet)})
if validatorsBitSet.Count() > uint(len(validators)) {
return errors.New("invalid attestation, vote number larger than validators number")
}
votedAddrs := make([]bls.PublicKey, 0, validatorsBitSet.Count())
for index, val := range validators {
if !validatorsBitSet.Test(uint(index)) {
continue
}
voteAddr, err := bls.PublicKeyFromBytes(snap.Validators[val].VoteAddress[:])
if err != nil {
return fmt.Errorf("BLS public key converts failed: %v", err)
}
votedAddrs = append(votedAddrs, voteAddr)
}
// The valid voted validators should be no less than 2/3 validators.
if len(votedAddrs) < cmath.CeilDiv(len(snap.Validators)*2, 3) {
return errors.New("invalid attestation, not enough validators voted")
}
// Verify the aggregated signature.
aggSig, err := bls.SignatureFromBytes(attestation.AggSignature[:])
if err != nil {
return fmt.Errorf("BLS signature converts failed: %v", err)
}
if !aggSig.FastAggregateVerify(votedAddrs, attestation.Data.Hash()) {
return errors.New("invalid attestation, signature verify failed")
}
return nil
}
2020-05-20 11:46:45 +08:00
// verifyHeader checks whether a header conforms to the consensus rules.The
// caller may optionally pass in a batch of parents (ascending order) to avoid
// looking those up from the database. This is useful for concurrently verifying
// a batch of new headers.
2021-04-16 12:45:26 +08:00
func (p *Parlia) verifyHeader(chain consensus.ChainHeaderReader, header *types.Header, parents []*types.Header) error {
2020-05-20 11:46:45 +08:00
if header.Number == nil {
return errUnknownBlock
}
// Don't waste time checking blocks from the future
2020-05-20 11:46:45 +08:00
if header.Time > uint64(time.Now().Unix()) {
return consensus.ErrFutureBlock
2020-05-20 11:46:45 +08:00
}
// Check that the extra-data contains the vanity, validators and signature.
if len(header.Extra) < extraVanity {
return errMissingVanity
}
if len(header.Extra) < extraVanity+extraSeal {
return errMissingSignature
}
2020-05-20 11:46:45 +08:00
// check extra data
number := header.Number.Uint64()
2020-05-20 11:46:45 +08:00
isEpoch := number%p.config.Epoch == 0
// Ensure that the extra-data contains a signer list on checkpoint, but none otherwise
signersBytes := getValidatorBytesFromHeader(header, p.chainConfig, p.config)
if !isEpoch && len(signersBytes) != 0 {
2020-05-20 11:46:45 +08:00
return errExtraValidators
}
if isEpoch && len(signersBytes) == 0 {
2020-05-20 11:46:45 +08:00
return errInvalidSpanValidators
}
// Ensure that the mix digest is zero as we don't have fork protection currently
if header.MixDigest != (common.Hash{}) {
return errInvalidMixDigest
}
// Ensure that the block doesn't contain any uncles which are meaningless in PoA
if header.UncleHash != uncleHash {
return errInvalidUncleHash
}
// Ensure that the block's difficulty is meaningful (may not be correct at this point)
if number > 0 {
if header.Difficulty == nil {
return errInvalidDifficulty
}
}
parent, err := p.getParent(chain, header, parents)
if err != nil {
return err
}
// Verify the block's gas usage and (if applicable) verify the base fee.
if !chain.Config().IsLondon(header.Number) {
// Verify BaseFee not present before EIP-1559 fork.
if header.BaseFee != nil {
return fmt.Errorf("invalid baseFee before fork: have %d, expected 'nil'", header.BaseFee)
}
} else if err := eip1559.VerifyEIP1559Header(chain.Config(), parent, header); err != nil {
// Verify the header's EIP-1559 attributes.
return err
}
cancun := chain.Config().IsCancun(header.Number, header.Time)
if !cancun {
switch {
case header.ExcessBlobGas != nil:
return fmt.Errorf("invalid excessBlobGas: have %d, expected nil", header.ExcessBlobGas)
case header.BlobGasUsed != nil:
return fmt.Errorf("invalid blobGasUsed: have %d, expected nil", header.BlobGasUsed)
BlobTx: implement EIP-4844 on BSC (#2279) * ci: temp enable blobtx branch ci run; * Switch ON blobpool & ensure Cancun hardfork can occur (#2223) * feat: support blob storage & miscs; (#2229) * chainconfig: use cancun fork for BSC; * feat: fill WithdrawalsHash when BSC enable cancun fork; * rawdb: support to CRUD blobs; * freezer: support to freeze block blobs; * blockchain: add blob cache & blob query helper; * freezer: refactor addition table logic, add uts; * blobexpiry: add more extra expiry time, and logs; * parlia: implement IsDataAvailable function; * blob: refactor blob transfer logic; * blob: support config blob extra reserve; * blockchian: support to import block with blob & blobGasFee; (#2260) * blob: implement min&max gas price logic; * blockchian: support import side chain; * blobpool: reject the banned address; * blockchain: add chasing head for DA check; * params: update blob related config; * blockchain: opt data available checking performance; * params: modify blob related params; * gasprice: support BEP-336 blob gas price calculate; * blobTx: mining + brodcasting (#2253) * blobtx mining pass (#2282) * Sidecar fetching changes for 4844 (#2283) * ci: temp enable blobtx branch ci run; * Switch ON blobpool & ensure Cancun hardfork can occur (#2223) * feat: support blob storage & miscs; (#2229) * chainconfig: use cancun fork for BSC; feat: fill WithdrawalsHash when BSC enable cancun fork; * rawdb: support to CRUD blobs; * freezer: support to freeze block blobs; * blockchain: add blob cache & blob query helper; * freezer: refactor addition table logic, add uts; * blobexpiry: add more extra expiry time, and logs; * parlia: implement IsDataAvailable function; * blob: refactor blob transfer logic; * blob: support config blob extra reserve; * blockchian: support to import block with blob & blobGasFee; (#2260) * blob: implement min&max gas price logic; * blockchian: support import side chain; * blobpool: reject the banned address; * blockchain: add chasing head for DA check; * params: update blob related config; * blockchain: opt data available checking performance; * params: modify blob related params; * gasprice: support BEP-336 blob gas price calculate; * fix failed check for WithdrawalsHash (#2276) * eth: include sidecars in fitering of body * core: refactor sidecars name * eth: sidecars type refactor * core: remove extra from bad merge * eth: fix handlenewblock test after merge * Implement eth_getBlobSidecars && eth_getBlobSidecarByTxHash (#2286) * execution: add blob gas fee reward to system; * syncing: support blob syncing & DA checking; * naming: rename blobs to sidecars; * fix the semantics of WithXXX (#2293) * config: reduce sidecar cache to 1024 and rename (#2297) * fix: Withdrawals turn into empty from nil when BlockBody has Sidecars (#2301) * internal/api_test: add test case for eth_getBlobSidecars && eth_getBlobSidecarByTxHash (#2300) * consensus/misc: rollback CalcBlobFee (#2306) * flags: add new flags to override blobs' params; * freezer: fix blob ancient save error; * blobsidecar: add new sidecar struct with metadata; (#2315) * core/rawdb: optimize write block with sidecars (#2318) * core: more check for validity of sidecars * mev: add TxIndex for mev bid (#2325) * remove useless Config() (#2326) * fix WithSidecars (#2327) * fix: fix mined block sidecar issue; (#2328) * fix WithSidecars (#2329) --------- Co-authored-by: GalaIO <GalaIO@users.noreply.github.com> Co-authored-by: buddho <galaxystroller@gmail.com> Co-authored-by: Satyajit Das <emailtovamos@gmail.com> Co-authored-by: Eric <45141191+zlacfzy@users.noreply.github.com> Co-authored-by: zzzckck <152148891+zzzckck@users.noreply.github.com>
2024-03-22 22:37:47 +08:00
case header.WithdrawalsHash != nil:
return fmt.Errorf("invalid WithdrawalsHash, have %#x, expected nil", header.WithdrawalsHash)
}
} else {
BlobTx: implement EIP-4844 on BSC (#2279) * ci: temp enable blobtx branch ci run; * Switch ON blobpool & ensure Cancun hardfork can occur (#2223) * feat: support blob storage & miscs; (#2229) * chainconfig: use cancun fork for BSC; * feat: fill WithdrawalsHash when BSC enable cancun fork; * rawdb: support to CRUD blobs; * freezer: support to freeze block blobs; * blockchain: add blob cache & blob query helper; * freezer: refactor addition table logic, add uts; * blobexpiry: add more extra expiry time, and logs; * parlia: implement IsDataAvailable function; * blob: refactor blob transfer logic; * blob: support config blob extra reserve; * blockchian: support to import block with blob & blobGasFee; (#2260) * blob: implement min&max gas price logic; * blockchian: support import side chain; * blobpool: reject the banned address; * blockchain: add chasing head for DA check; * params: update blob related config; * blockchain: opt data available checking performance; * params: modify blob related params; * gasprice: support BEP-336 blob gas price calculate; * blobTx: mining + brodcasting (#2253) * blobtx mining pass (#2282) * Sidecar fetching changes for 4844 (#2283) * ci: temp enable blobtx branch ci run; * Switch ON blobpool & ensure Cancun hardfork can occur (#2223) * feat: support blob storage & miscs; (#2229) * chainconfig: use cancun fork for BSC; feat: fill WithdrawalsHash when BSC enable cancun fork; * rawdb: support to CRUD blobs; * freezer: support to freeze block blobs; * blockchain: add blob cache & blob query helper; * freezer: refactor addition table logic, add uts; * blobexpiry: add more extra expiry time, and logs; * parlia: implement IsDataAvailable function; * blob: refactor blob transfer logic; * blob: support config blob extra reserve; * blockchian: support to import block with blob & blobGasFee; (#2260) * blob: implement min&max gas price logic; * blockchian: support import side chain; * blobpool: reject the banned address; * blockchain: add chasing head for DA check; * params: update blob related config; * blockchain: opt data available checking performance; * params: modify blob related params; * gasprice: support BEP-336 blob gas price calculate; * fix failed check for WithdrawalsHash (#2276) * eth: include sidecars in fitering of body * core: refactor sidecars name * eth: sidecars type refactor * core: remove extra from bad merge * eth: fix handlenewblock test after merge * Implement eth_getBlobSidecars && eth_getBlobSidecarByTxHash (#2286) * execution: add blob gas fee reward to system; * syncing: support blob syncing & DA checking; * naming: rename blobs to sidecars; * fix the semantics of WithXXX (#2293) * config: reduce sidecar cache to 1024 and rename (#2297) * fix: Withdrawals turn into empty from nil when BlockBody has Sidecars (#2301) * internal/api_test: add test case for eth_getBlobSidecars && eth_getBlobSidecarByTxHash (#2300) * consensus/misc: rollback CalcBlobFee (#2306) * flags: add new flags to override blobs' params; * freezer: fix blob ancient save error; * blobsidecar: add new sidecar struct with metadata; (#2315) * core/rawdb: optimize write block with sidecars (#2318) * core: more check for validity of sidecars * mev: add TxIndex for mev bid (#2325) * remove useless Config() (#2326) * fix WithSidecars (#2327) * fix: fix mined block sidecar issue; (#2328) * fix WithSidecars (#2329) --------- Co-authored-by: GalaIO <GalaIO@users.noreply.github.com> Co-authored-by: buddho <galaxystroller@gmail.com> Co-authored-by: Satyajit Das <emailtovamos@gmail.com> Co-authored-by: Eric <45141191+zlacfzy@users.noreply.github.com> Co-authored-by: zzzckck <152148891+zzzckck@users.noreply.github.com>
2024-03-22 22:37:47 +08:00
switch {
case !header.EmptyWithdrawalsHash():
BlobTx: implement EIP-4844 on BSC (#2279) * ci: temp enable blobtx branch ci run; * Switch ON blobpool & ensure Cancun hardfork can occur (#2223) * feat: support blob storage & miscs; (#2229) * chainconfig: use cancun fork for BSC; * feat: fill WithdrawalsHash when BSC enable cancun fork; * rawdb: support to CRUD blobs; * freezer: support to freeze block blobs; * blockchain: add blob cache & blob query helper; * freezer: refactor addition table logic, add uts; * blobexpiry: add more extra expiry time, and logs; * parlia: implement IsDataAvailable function; * blob: refactor blob transfer logic; * blob: support config blob extra reserve; * blockchian: support to import block with blob & blobGasFee; (#2260) * blob: implement min&max gas price logic; * blockchian: support import side chain; * blobpool: reject the banned address; * blockchain: add chasing head for DA check; * params: update blob related config; * blockchain: opt data available checking performance; * params: modify blob related params; * gasprice: support BEP-336 blob gas price calculate; * blobTx: mining + brodcasting (#2253) * blobtx mining pass (#2282) * Sidecar fetching changes for 4844 (#2283) * ci: temp enable blobtx branch ci run; * Switch ON blobpool & ensure Cancun hardfork can occur (#2223) * feat: support blob storage & miscs; (#2229) * chainconfig: use cancun fork for BSC; feat: fill WithdrawalsHash when BSC enable cancun fork; * rawdb: support to CRUD blobs; * freezer: support to freeze block blobs; * blockchain: add blob cache & blob query helper; * freezer: refactor addition table logic, add uts; * blobexpiry: add more extra expiry time, and logs; * parlia: implement IsDataAvailable function; * blob: refactor blob transfer logic; * blob: support config blob extra reserve; * blockchian: support to import block with blob & blobGasFee; (#2260) * blob: implement min&max gas price logic; * blockchian: support import side chain; * blobpool: reject the banned address; * blockchain: add chasing head for DA check; * params: update blob related config; * blockchain: opt data available checking performance; * params: modify blob related params; * gasprice: support BEP-336 blob gas price calculate; * fix failed check for WithdrawalsHash (#2276) * eth: include sidecars in fitering of body * core: refactor sidecars name * eth: sidecars type refactor * core: remove extra from bad merge * eth: fix handlenewblock test after merge * Implement eth_getBlobSidecars && eth_getBlobSidecarByTxHash (#2286) * execution: add blob gas fee reward to system; * syncing: support blob syncing & DA checking; * naming: rename blobs to sidecars; * fix the semantics of WithXXX (#2293) * config: reduce sidecar cache to 1024 and rename (#2297) * fix: Withdrawals turn into empty from nil when BlockBody has Sidecars (#2301) * internal/api_test: add test case for eth_getBlobSidecars && eth_getBlobSidecarByTxHash (#2300) * consensus/misc: rollback CalcBlobFee (#2306) * flags: add new flags to override blobs' params; * freezer: fix blob ancient save error; * blobsidecar: add new sidecar struct with metadata; (#2315) * core/rawdb: optimize write block with sidecars (#2318) * core: more check for validity of sidecars * mev: add TxIndex for mev bid (#2325) * remove useless Config() (#2326) * fix WithSidecars (#2327) * fix: fix mined block sidecar issue; (#2328) * fix WithSidecars (#2329) --------- Co-authored-by: GalaIO <GalaIO@users.noreply.github.com> Co-authored-by: buddho <galaxystroller@gmail.com> Co-authored-by: Satyajit Das <emailtovamos@gmail.com> Co-authored-by: Eric <45141191+zlacfzy@users.noreply.github.com> Co-authored-by: zzzckck <152148891+zzzckck@users.noreply.github.com>
2024-03-22 22:37:47 +08:00
return errors.New("header has wrong WithdrawalsHash")
}
if err := eip4844.VerifyEIP4844Header(parent, header); err != nil {
return err
}
}
bohr := chain.Config().IsBohr(header.Number, header.Time)
if !bohr {
if header.ParentBeaconRoot != nil {
return fmt.Errorf("invalid parentBeaconRoot, have %#x, expected nil", header.ParentBeaconRoot)
}
} else {
if header.ParentBeaconRoot == nil || *header.ParentBeaconRoot != (common.Hash{}) {
return fmt.Errorf("invalid parentBeaconRoot, have %#x, expected zero hash", header.ParentBeaconRoot)
}
}
2020-05-20 11:46:45 +08:00
// All basic checks passed, verify cascading fields
return p.verifyCascadingFields(chain, header, parents)
}
// verifyCascadingFields verifies all the header fields that are not standalone,
// rather depend on a batch of previous headers. The caller may optionally pass
// in a batch of parents (ascending order) to avoid looking those up from the
// database. This is useful for concurrently verifying a batch of new headers.
2021-04-16 12:45:26 +08:00
func (p *Parlia) verifyCascadingFields(chain consensus.ChainHeaderReader, header *types.Header, parents []*types.Header) error {
2020-05-20 11:46:45 +08:00
// The genesis block is the always valid dead-end
number := header.Number.Uint64()
if number == 0 {
return nil
}
parent, err := p.getParent(chain, header, parents)
if err != nil {
return err
2020-05-20 11:46:45 +08:00
}
snap, err := p.snapshot(chain, number-1, header.ParentHash, parents)
if err != nil {
return err
}
err = p.blockTimeVerifyForRamanujanFork(snap, header, parent)
if err != nil {
return err
}
2020-05-20 11:46:45 +08:00
// Verify that the gas limit is <= 2^63-1
2020-08-04 13:56:13 +08:00
capacity := uint64(0x7fffffffffffffff)
if header.GasLimit > capacity {
return fmt.Errorf("invalid gasLimit: have %v, max %v", header.GasLimit, capacity)
2020-05-20 11:46:45 +08:00
}
// Verify that the gasUsed is <= gasLimit
if header.GasUsed > header.GasLimit {
return fmt.Errorf("invalid gasUsed: have %d, gasLimit %d", header.GasUsed, header.GasLimit)
}
// Verify that the gas limit remains within allowed bounds
diff := int64(parent.GasLimit) - int64(header.GasLimit)
if diff < 0 {
diff *= -1
}
limit := parent.GasLimit / params.GasLimitBoundDivisor
2020-05-20 11:46:45 +08:00
if uint64(diff) >= limit || header.GasLimit < params.MinGasLimit {
return fmt.Errorf("invalid gas limit: have %d, want %d += %d", header.GasLimit, parent.GasLimit, limit-1)
2020-05-20 11:46:45 +08:00
}
// Verify vote attestation for fast finality.
if err := p.verifyVoteAttestation(chain, header, parents); err != nil {
log.Warn("Verify vote attestation failed", "error", err, "hash", header.Hash(), "number", header.Number,
"parent", header.ParentHash, "coinbase", header.Coinbase, "extra", common.Bytes2Hex(header.Extra))
verifyVoteAttestationErrorCounter.Inc(1)
if chain.Config().IsPlato(header.Number) {
return err
}
}
2020-05-20 11:46:45 +08:00
// All basic checks passed, verify the seal and return
return p.verifySeal(chain, header, parents)
}
// snapshot retrieves the authorization snapshot at a given point in time.
// !!! be careful
// the block with `number` and `hash` is just the last element of `parents`,
// unlike other interfaces such as verifyCascadingFields, `parents` are real parents
2021-04-16 12:45:26 +08:00
func (p *Parlia) snapshot(chain consensus.ChainHeaderReader, number uint64, hash common.Hash, parents []*types.Header) (*Snapshot, error) {
2020-05-20 11:46:45 +08:00
// Search for a snapshot in memory or on disk for checkpoints
var (
headers []*types.Header
snap *Snapshot
)
for snap == nil {
// If an in-memory snapshot was found, use that
if s, ok := p.recentSnaps.Get(hash); ok {
snap = s.(*Snapshot)
break
}
// If an on-disk checkpoint snapshot can be found, use that
if number%checkpointInterval == 0 {
if s, err := loadSnapshot(p.config, p.signatures, p.db, hash, p.ethAPI); err == nil {
log.Trace("Loaded snapshot from disk", "number", number, "hash", hash)
snap = s
break
}
}
// If we're at the genesis, snapshot the initial state. Alternatively if we have
// piled up more headers than allowed to be reorged (chain reinit from a freezer),
// consider the checkpoint trusted and snapshot it.
// An offset `p.config.Epoch - 1` can ensure getting the right validators.
if number == 0 || ((number+1)%p.config.Epoch == 0 && (len(headers) > int(params.FullImmutabilityThreshold))) {
var (
checkpoint *types.Header
blockHash common.Hash
)
if number == 0 {
checkpoint = chain.GetHeaderByNumber(0)
if checkpoint != nil {
blockHash = checkpoint.Hash()
}
} else {
checkpoint = chain.GetHeaderByNumber(number + 1 - p.config.Epoch)
blockHeader := chain.GetHeaderByNumber(number)
if blockHeader != nil {
blockHash = blockHeader.Hash()
}
}
if checkpoint != nil && blockHash != (common.Hash{}) {
2020-05-20 11:46:45 +08:00
// get validators from headers
validators, voteAddrs, err := parseValidators(checkpoint, p.chainConfig, p.config)
2020-05-20 11:46:45 +08:00
if err != nil {
return nil, err
}
// new snapshot
snap = newSnapshot(p.config, p.signatures, number, blockHash, validators, voteAddrs, p.ethAPI)
// get turnLength from headers and use that for new turnLength
turnLength, err := parseTurnLength(checkpoint, p.chainConfig, p.config)
if err != nil {
return nil, err
}
if turnLength != nil {
snap.TurnLength = *turnLength
}
// snap.Recents is currently empty, which affects the following:
// a. The function SignRecently - This is acceptable since an empty snap.Recents results in a more lenient check.
// b. The function blockTimeVerifyForRamanujanFork - This is also acceptable as it won't be invoked during `snap.apply`.
// c. This may cause a mismatch in the slash systemtx, but the transaction list is not verified during `snap.apply`.
// snap.Attestation is nil, but Snapshot.updateAttestation will handle it correctly.
if err := snap.store(p.db); err != nil {
return nil, err
2020-05-20 11:46:45 +08:00
}
log.Info("Stored checkpoint snapshot to disk", "number", number, "hash", blockHash)
2020-05-20 11:46:45 +08:00
break
}
}
// No snapshot for this header, gather the header and move backward
var header *types.Header
if len(parents) > 0 {
// If we have explicit parents, pick from there (enforced)
header = parents[len(parents)-1]
if header.Hash() != hash || header.Number.Uint64() != number {
return nil, consensus.ErrUnknownAncestor
}
parents = parents[:len(parents)-1]
} else {
// No explicit parents (or no more left), reach out to the database
header = chain.GetHeader(hash, number)
if header == nil {
return nil, consensus.ErrUnknownAncestor
}
}
headers = append(headers, header)
number, hash = number-1, header.ParentHash
}
// check if snapshot is nil
if snap == nil {
return nil, fmt.Errorf("unknown error while retrieving snapshot at block number %v", number)
}
// Previous snapshot found, apply any pending headers on top of it
for i := 0; i < len(headers)/2; i++ {
headers[i], headers[len(headers)-1-i] = headers[len(headers)-1-i], headers[i]
}
snap, err := snap.apply(headers, chain, parents, p.chainConfig)
2020-05-20 11:46:45 +08:00
if err != nil {
return nil, err
}
p.recentSnaps.Add(snap.Hash, snap)
// If we've generated a new checkpoint snapshot, save to disk
if snap.Number%checkpointInterval == 0 && len(headers) > 0 {
if err = snap.store(p.db); err != nil {
return nil, err
}
log.Trace("Stored snapshot to disk", "number", snap.Number, "hash", snap.Hash)
}
return snap, err
}
// VerifyUncles implements consensus.Engine, always returning an error for any
// uncles as this consensus mechanism doesn't permit uncles.
func (p *Parlia) VerifyUncles(chain consensus.ChainReader, block *types.Block) error {
if len(block.Uncles()) > 0 {
return errors.New("uncles not allowed")
}
return nil
}
// VerifySeal implements consensus.Engine, checking whether the signature contained
// in the header satisfies the consensus protocol requirements.
func (p *Parlia) VerifySeal(chain consensus.ChainReader, header *types.Header) error {
return p.verifySeal(chain, header, nil)
}
// verifySeal checks whether the signature contained in the header satisfies the
// consensus protocol requirements. The method accepts an optional list of parent
// headers that aren't yet part of the local blockchain to generate the snapshots
// from.
2021-04-16 12:45:26 +08:00
func (p *Parlia) verifySeal(chain consensus.ChainHeaderReader, header *types.Header, parents []*types.Header) error {
2020-05-20 11:46:45 +08:00
// Verifying the genesis block is not supported
number := header.Number.Uint64()
if number == 0 {
return errUnknownBlock
}
// Retrieve the snapshot needed to verify this header and cache it
snap, err := p.snapshot(chain, number-1, header.ParentHash, parents)
if err != nil {
return err
}
// Resolve the authorization key and check against validators
signer, err := ecrecover(header, p.signatures, p.chainConfig.ChainID)
2020-05-20 11:46:45 +08:00
if err != nil {
return err
}
if signer != header.Coinbase {
return errCoinBaseMisMatch
}
// check for double sign & add to cache
key := proposalKey(*header)
preHash, ok := p.recentHeaders.Get(key)
if ok && preHash != header.Hash() {
doubleSignCounter.Inc(1)
log.Warn("DoubleSign detected", " block", header.Number, " miner", header.Coinbase,
"hash1", preHash.(common.Hash), "hash2", header.Hash())
} else {
p.recentHeaders.Add(key, header.Hash())
}
if _, ok := snap.Validators[signer]; !ok {
return errUnauthorizedValidator(signer.String())
}
if snap.SignRecently(signer) {
return errRecentlySigned
2020-05-20 11:46:45 +08:00
}
// Ensure that the difficulty corresponds to the turn-ness of the signer
if !p.fakeDiff {
inturn := snap.inturn(signer)
2020-05-20 11:46:45 +08:00
if inturn && header.Difficulty.Cmp(diffInTurn) != 0 {
return errWrongDifficulty
}
if !inturn && header.Difficulty.Cmp(diffNoTurn) != 0 {
return errWrongDifficulty
}
}
return nil
}
func (p *Parlia) prepareValidators(header *types.Header) error {
if header.Number.Uint64()%p.config.Epoch != 0 {
return nil
}
newValidators, voteAddressMap, err := p.getCurrentValidators(header.ParentHash, new(big.Int).Sub(header.Number, big.NewInt(1)))
if err != nil {
return err
}
// sort validator by address
sort.Sort(validatorsAscending(newValidators))
if !p.chainConfig.IsLuban(header.Number) {
for _, validator := range newValidators {
header.Extra = append(header.Extra, validator.Bytes()...)
}
} else {
header.Extra = append(header.Extra, byte(len(newValidators)))
if p.chainConfig.IsOnLuban(header.Number) {
voteAddressMap = make(map[common.Address]*types.BLSPublicKey, len(newValidators))
var zeroBlsKey types.BLSPublicKey
for _, validator := range newValidators {
voteAddressMap[validator] = &zeroBlsKey
}
}
for _, validator := range newValidators {
header.Extra = append(header.Extra, validator.Bytes()...)
header.Extra = append(header.Extra, voteAddressMap[validator].Bytes()...)
}
}
return nil
}
func (p *Parlia) prepareTurnLength(chain consensus.ChainHeaderReader, header *types.Header) error {
if header.Number.Uint64()%p.config.Epoch != 0 ||
!p.chainConfig.IsBohr(header.Number, header.Time) {
return nil
}
turnLength, err := p.getTurnLength(chain, header)
if err != nil {
return err
}
if turnLength != nil {
header.Extra = append(header.Extra, *turnLength)
}
return nil
}
func (p *Parlia) assembleVoteAttestation(chain consensus.ChainHeaderReader, header *types.Header) error {
if !p.chainConfig.IsLuban(header.Number) || header.Number.Uint64() < 2 {
return nil
}
if p.VotePool == nil {
return nil
}
// Fetch direct parent's votes
parent := chain.GetHeaderByHash(header.ParentHash)
if parent == nil {
return errors.New("parent not found")
}
snap, err := p.snapshot(chain, parent.Number.Uint64()-1, parent.ParentHash, nil)
if err != nil {
return err
}
votes := p.VotePool.FetchVoteByBlockHash(parent.Hash())
if len(votes) < cmath.CeilDiv(len(snap.Validators)*2, 3) {
return nil
}
// Prepare vote attestation
// Prepare vote data
justifiedBlockNumber, justifiedBlockHash, err := p.GetJustifiedNumberAndHash(chain, []*types.Header{parent})
if err != nil {
return errors.New("unexpected error when getting the highest justified number and hash")
}
attestation := &types.VoteAttestation{
Data: &types.VoteData{
SourceNumber: justifiedBlockNumber,
SourceHash: justifiedBlockHash,
TargetNumber: parent.Number.Uint64(),
TargetHash: parent.Hash(),
},
}
// Check vote data from votes
for _, vote := range votes {
if vote.Data.Hash() != attestation.Data.Hash() {
return fmt.Errorf("vote check error, expected: %v, real: %v", attestation.Data, vote)
}
}
// Prepare aggregated vote signature
voteAddrSet := make(map[types.BLSPublicKey]struct{}, len(votes))
signatures := make([][]byte, 0, len(votes))
for _, vote := range votes {
voteAddrSet[vote.VoteAddress] = struct{}{}
signatures = append(signatures, vote.Signature[:])
}
sigs, err := bls.MultipleSignaturesFromBytes(signatures)
if err != nil {
return err
}
copy(attestation.AggSignature[:], bls.AggregateSignatures(sigs).Marshal())
// Prepare vote address bitset.
for _, valInfo := range snap.Validators {
if _, ok := voteAddrSet[valInfo.VoteAddress]; ok {
attestation.VoteAddressSet |= 1 << (valInfo.Index - 1) // Index is offset by 1
}
}
validatorsBitSet := bitset.From([]uint64{uint64(attestation.VoteAddressSet)})
if validatorsBitSet.Count() < uint(len(signatures)) {
log.Warn(fmt.Sprintf("assembleVoteAttestation, check VoteAddress Set failed, expected:%d, real:%d", len(signatures), validatorsBitSet.Count()))
return errors.New("invalid attestation, check VoteAddress Set failed")
}
// Append attestation to header extra field.
buf := new(bytes.Buffer)
err = rlp.Encode(buf, attestation)
if err != nil {
return err
}
// Insert vote attestation into header extra ahead extra seal.
extraSealStart := len(header.Extra) - extraSeal
extraSealBytes := header.Extra[extraSealStart:]
header.Extra = append(header.Extra[0:extraSealStart], buf.Bytes()...)
header.Extra = append(header.Extra, extraSealBytes...)
return nil
}
2024-03-08 11:15:35 +08:00
// NextInTurnValidator return the next in-turn validator for header
func (p *Parlia) NextInTurnValidator(chain consensus.ChainHeaderReader, header *types.Header) (common.Address, error) {
snap, err := p.snapshot(chain, header.Number.Uint64(), header.Hash(), nil)
if err != nil {
return common.Address{}, err
}
return snap.inturnValidator(), nil
}
2020-05-20 11:46:45 +08:00
// Prepare implements consensus.Engine, preparing all the consensus fields of the
// header for running the transactions on top.
2021-04-16 12:45:26 +08:00
func (p *Parlia) Prepare(chain consensus.ChainHeaderReader, header *types.Header) error {
2020-05-20 11:46:45 +08:00
header.Coinbase = p.val
header.Nonce = types.BlockNonce{}
number := header.Number.Uint64()
snap, err := p.snapshot(chain, number-1, header.ParentHash, nil)
if err != nil {
return err
}
// Set the correct difficulty
header.Difficulty = CalcDifficulty(snap, p.val)
// Ensure the extra data has all it's components
if len(header.Extra) < extraVanity-nextForkHashSize {
header.Extra = append(header.Extra, bytes.Repeat([]byte{0x00}, extraVanity-nextForkHashSize-len(header.Extra))...)
2020-05-20 11:46:45 +08:00
}
// Ensure the timestamp has the correct delay
parent := chain.GetHeader(header.ParentHash, number-1)
if parent == nil {
return consensus.ErrUnknownAncestor
}
header.Time = p.blockTimeForRamanujanFork(snap, header, parent)
if header.Time < uint64(time.Now().Unix()) {
header.Time = uint64(time.Now().Unix())
}
header.Extra = header.Extra[:extraVanity-nextForkHashSize]
nextForkHash := forkid.NextForkHash(p.chainConfig, p.genesisHash, chain.GenesisHeader().Time, number, header.Time)
header.Extra = append(header.Extra, nextForkHash[:]...)
2020-05-20 11:46:45 +08:00
if err := p.prepareValidators(header); err != nil {
return err
2020-05-20 11:46:45 +08:00
}
if err := p.prepareTurnLength(chain, header); err != nil {
return err
}
2020-05-20 11:46:45 +08:00
// add extra seal space
header.Extra = append(header.Extra, make([]byte, extraSeal)...)
// Mix digest is reserved for now, set to empty
header.MixDigest = common.Hash{}
return nil
}
func (p *Parlia) verifyValidators(header *types.Header) error {
if header.Number.Uint64()%p.config.Epoch != 0 {
return nil
}
newValidators, voteAddressMap, err := p.getCurrentValidators(header.ParentHash, new(big.Int).Sub(header.Number, big.NewInt(1)))
if err != nil {
return err
}
// sort validator by address
sort.Sort(validatorsAscending(newValidators))
var validatorsBytes []byte
validatorsNumber := len(newValidators)
if !p.chainConfig.IsLuban(header.Number) {
validatorsBytes = make([]byte, validatorsNumber*validatorBytesLengthBeforeLuban)
for i, validator := range newValidators {
copy(validatorsBytes[i*validatorBytesLengthBeforeLuban:], validator.Bytes())
}
} else {
if uint8(validatorsNumber) != header.Extra[extraVanity] {
return errMismatchingEpochValidators
}
validatorsBytes = make([]byte, validatorsNumber*validatorBytesLength)
if p.chainConfig.IsOnLuban(header.Number) {
voteAddressMap = make(map[common.Address]*types.BLSPublicKey, len(newValidators))
var zeroBlsKey types.BLSPublicKey
for _, validator := range newValidators {
voteAddressMap[validator] = &zeroBlsKey
}
}
for i, validator := range newValidators {
copy(validatorsBytes[i*validatorBytesLength:], validator.Bytes())
copy(validatorsBytes[i*validatorBytesLength+common.AddressLength:], voteAddressMap[validator].Bytes())
}
}
if !bytes.Equal(getValidatorBytesFromHeader(header, p.chainConfig, p.config), validatorsBytes) {
return errMismatchingEpochValidators
}
return nil
}
func (p *Parlia) verifyTurnLength(chain consensus.ChainHeaderReader, header *types.Header) error {
if header.Number.Uint64()%p.config.Epoch != 0 ||
!p.chainConfig.IsBohr(header.Number, header.Time) {
return nil
}
turnLengthFromHeader, err := parseTurnLength(header, p.chainConfig, p.config)
if err != nil {
return err
}
if turnLengthFromHeader != nil {
turnLength, err := p.getTurnLength(chain, header)
if err != nil {
return err
}
if turnLength != nil && *turnLength == *turnLengthFromHeader {
log.Debug("verifyTurnLength", "turnLength", *turnLength)
return nil
}
}
return errMismatchingEpochTurnLength
}
func (p *Parlia) distributeFinalityReward(chain consensus.ChainHeaderReader, state *state.StateDB, header *types.Header,
cx core.ChainContext, txs *[]*types.Transaction, receipts *[]*types.Receipt, systemTxs *[]*types.Transaction,
usedGas *uint64, mining bool) error {
currentHeight := header.Number.Uint64()
epoch := p.config.Epoch
chainConfig := chain.Config()
if currentHeight%epoch != 0 {
return nil
}
head := header
accumulatedWeights := make(map[common.Address]uint64)
for height := currentHeight - 1; height+epoch >= currentHeight && height >= 1; height-- {
head = chain.GetHeaderByHash(head.ParentHash)
if head == nil {
return fmt.Errorf("header is nil at height %d", height)
}
voteAttestation, err := getVoteAttestationFromHeader(head, chainConfig, p.config)
if err != nil {
return err
}
if voteAttestation == nil {
continue
}
justifiedBlock := chain.GetHeaderByHash(voteAttestation.Data.TargetHash)
if justifiedBlock == nil {
log.Warn("justifiedBlock is nil at height %d", voteAttestation.Data.TargetNumber)
continue
}
snap, err := p.snapshot(chain, justifiedBlock.Number.Uint64()-1, justifiedBlock.ParentHash, nil)
if err != nil {
return err
}
validators := snap.validators()
validatorsBitSet := bitset.From([]uint64{uint64(voteAttestation.VoteAddressSet)})
if validatorsBitSet.Count() > uint(len(validators)) {
log.Error("invalid attestation, vote number larger than validators number")
continue
}
validVoteCount := 0
for index, val := range validators {
if validatorsBitSet.Test(uint(index)) {
accumulatedWeights[val] += 1
validVoteCount += 1
}
}
quorum := cmath.CeilDiv(len(snap.Validators)*2, 3)
if validVoteCount > quorum {
accumulatedWeights[head.Coinbase] += uint64((validVoteCount - quorum) * collectAdditionalVotesRewardRatio / 100)
}
}
validators := make([]common.Address, 0, len(accumulatedWeights))
weights := make([]*big.Int, 0, len(accumulatedWeights))
for val := range accumulatedWeights {
validators = append(validators, val)
}
sort.Sort(validatorsAscending(validators))
for _, val := range validators {
weights = append(weights, big.NewInt(int64(accumulatedWeights[val])))
}
// generate system transaction
method := "distributeFinalityReward"
data, err := p.validatorSetABI.Pack(method, validators, weights)
if err != nil {
log.Error("Unable to pack tx for distributeFinalityReward", "error", err)
return err
}
msg := p.getSystemMessage(header.Coinbase, common.HexToAddress(systemcontracts.ValidatorContract), data, common.Big0)
return p.applyTransaction(msg, state, header, cx, txs, receipts, systemTxs, usedGas, mining)
}
2020-05-20 11:46:45 +08:00
// Finalize implements consensus.Engine, ensuring no uncles are set, nor block
// rewards given.
Big merge v1.10.16 v1.12.2 fix ci (#1850) * fix: crash of highestVerifiedHeader * fix: panic of blobpool * fix: genesis set up * 1. modify NewDatabaseWithNodeDB to upstream 2. fix race use of hasher in statedb 3. fix use wrong value when updateTrie * fix dir legacypool * fix dir blobpool * fix dir vote * remove diffsync related code * fix core/state/snapshot * disable pipeCommit for now * fix applyTransaction for bloom setting * CI: fast finality in gasprice test * CI: diffFetcher was removed * CI: downloader, remove beaconsync test * CI: no beaconsync in downloader, remove a failed case TestCheckpointChallenge was removed in: https://github.com/ethereum/go-ethereum/pull/27147 since after merge, it is useless for ethereum, but might be useful for BSC. disable the case right now, as it is not a big issue. * CI: bsc protocol decHandlers * CI: receipt Bloom process * 1. skip CheckConfigForkOrder for non-parlia engine 2. all test cases in core work well now cd core && go test ./... -v * fix test cases in trie dir * CI: no beaconsync in downloader, remove a failed case(redo) * fix dir miner * fix dir cmd/geth * CI: filter test, BaseFee & Finality * fix dir graphql * remove diffStore * fix ethclient * fix TestRPCGetTransactionReceipt * fix dir internal * ut add dir ethstats and signer * disable pipeCommit thoroughly; fix concurrent map iteration and map write in statedb * CI: fix snap sync it could be changed by mistake * fix tests/Run to generate snapshot * prepare for merge * remove useless * use common hasher in getDeletedStateObject, no race here * an critical comment for state.Prepare * do not copy nil accessList * add omitempty tag for unused new fields of core.Genesis * remove totalFees * calculate fees before FinalizeAndAssemble * revert interface Finalize of consensus * do not double gas limit upon london block * use Leveldb as default * Revert "remove diffStore" This reverts commit df343b137412b0beb25298a6ba9c3c19e47f20b1. * Revert "remove diffsync related code" This reverts commit 8d84b81feae5d794cb5d7fcfdb7f5f7da751941b. * compile pass after revert * remove diffsync * fix dir eth/protocols/trust * fix TestFastNode * decHandlers for trust protocol * keep persist diff in test
2023-09-07 16:39:29 +08:00
func (p *Parlia) Finalize(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, txs *[]*types.Transaction,
uncles []*types.Header, _ []*types.Withdrawal, receipts *[]*types.Receipt, systemTxs *[]*types.Transaction, usedGas *uint64) error {
// warn if not in majority fork
number := header.Number.Uint64()
snap, err := p.snapshot(chain, number-1, header.ParentHash, nil)
if err != nil {
2022-07-05 11:14:21 +08:00
return err
}
nextForkHash := forkid.NextForkHash(p.chainConfig, p.genesisHash, chain.GenesisHeader().Time, number, header.Time)
if !snap.isMajorityFork(hex.EncodeToString(nextForkHash[:])) {
log.Debug("there is a possible fork, and your client is not the majority. Please check...", "nextForkHash", hex.EncodeToString(nextForkHash[:]))
}
// If the block is an epoch end block, verify the validator list
2020-05-20 11:46:45 +08:00
// The verification can only be done when the state is ready, it can't be done in VerifyHeader.
if err := p.verifyValidators(header); err != nil {
return err
2020-05-20 11:46:45 +08:00
}
if err := p.verifyTurnLength(chain, header); err != nil {
return err
}
2020-05-20 11:46:45 +08:00
cx := chainContext{Chain: chain, parlia: p}
parent := chain.GetHeaderByHash(header.ParentHash)
if parent == nil {
return errors.New("parent not found")
}
if p.chainConfig.IsFeynman(header.Number, header.Time) {
systemcontracts.UpgradeBuildInSystemContract(p.chainConfig, header.Number, parent.Time, header.Time, state)
}
if p.chainConfig.IsOnFeynman(header.Number, parent.Time, header.Time) {
err := p.initializeFeynmanContract(state, header, cx, txs, receipts, systemTxs, usedGas, false)
if err != nil {
log.Error("init feynman contract failed", "error", err)
}
}
// No block rewards in PoA, so the state remains as is and uncles are dropped
2020-05-20 11:46:45 +08:00
if header.Number.Cmp(common.Big1) == 0 {
Big merge v1.10.16 v1.12.2 fix ci (#1850) * fix: crash of highestVerifiedHeader * fix: panic of blobpool * fix: genesis set up * 1. modify NewDatabaseWithNodeDB to upstream 2. fix race use of hasher in statedb 3. fix use wrong value when updateTrie * fix dir legacypool * fix dir blobpool * fix dir vote * remove diffsync related code * fix core/state/snapshot * disable pipeCommit for now * fix applyTransaction for bloom setting * CI: fast finality in gasprice test * CI: diffFetcher was removed * CI: downloader, remove beaconsync test * CI: no beaconsync in downloader, remove a failed case TestCheckpointChallenge was removed in: https://github.com/ethereum/go-ethereum/pull/27147 since after merge, it is useless for ethereum, but might be useful for BSC. disable the case right now, as it is not a big issue. * CI: bsc protocol decHandlers * CI: receipt Bloom process * 1. skip CheckConfigForkOrder for non-parlia engine 2. all test cases in core work well now cd core && go test ./... -v * fix test cases in trie dir * CI: no beaconsync in downloader, remove a failed case(redo) * fix dir miner * fix dir cmd/geth * CI: filter test, BaseFee & Finality * fix dir graphql * remove diffStore * fix ethclient * fix TestRPCGetTransactionReceipt * fix dir internal * ut add dir ethstats and signer * disable pipeCommit thoroughly; fix concurrent map iteration and map write in statedb * CI: fix snap sync it could be changed by mistake * fix tests/Run to generate snapshot * prepare for merge * remove useless * use common hasher in getDeletedStateObject, no race here * an critical comment for state.Prepare * do not copy nil accessList * add omitempty tag for unused new fields of core.Genesis * remove totalFees * calculate fees before FinalizeAndAssemble * revert interface Finalize of consensus * do not double gas limit upon london block * use Leveldb as default * Revert "remove diffStore" This reverts commit df343b137412b0beb25298a6ba9c3c19e47f20b1. * Revert "remove diffsync related code" This reverts commit 8d84b81feae5d794cb5d7fcfdb7f5f7da751941b. * compile pass after revert * remove diffsync * fix dir eth/protocols/trust * fix TestFastNode * decHandlers for trust protocol * keep persist diff in test
2023-09-07 16:39:29 +08:00
err := p.initContract(state, header, cx, txs, receipts, systemTxs, usedGas, false)
2020-05-20 11:46:45 +08:00
if err != nil {
log.Error("init contract failed")
}
}
if header.Difficulty.Cmp(diffInTurn) != 0 {
spoiledVal := snap.inturnValidator()
2020-05-20 11:46:45 +08:00
signedRecently := false
if p.chainConfig.IsPlato(header.Number) {
signedRecently = snap.SignRecently(spoiledVal)
} else {
for _, recent := range snap.Recents {
if recent == spoiledVal {
signedRecently = true
break
}
2020-05-20 11:46:45 +08:00
}
}
2020-05-20 11:46:45 +08:00
if !signedRecently {
log.Trace("slash validator", "block hash", header.Hash(), "address", spoiledVal)
Big merge v1.10.16 v1.12.2 fix ci (#1850) * fix: crash of highestVerifiedHeader * fix: panic of blobpool * fix: genesis set up * 1. modify NewDatabaseWithNodeDB to upstream 2. fix race use of hasher in statedb 3. fix use wrong value when updateTrie * fix dir legacypool * fix dir blobpool * fix dir vote * remove diffsync related code * fix core/state/snapshot * disable pipeCommit for now * fix applyTransaction for bloom setting * CI: fast finality in gasprice test * CI: diffFetcher was removed * CI: downloader, remove beaconsync test * CI: no beaconsync in downloader, remove a failed case TestCheckpointChallenge was removed in: https://github.com/ethereum/go-ethereum/pull/27147 since after merge, it is useless for ethereum, but might be useful for BSC. disable the case right now, as it is not a big issue. * CI: bsc protocol decHandlers * CI: receipt Bloom process * 1. skip CheckConfigForkOrder for non-parlia engine 2. all test cases in core work well now cd core && go test ./... -v * fix test cases in trie dir * CI: no beaconsync in downloader, remove a failed case(redo) * fix dir miner * fix dir cmd/geth * CI: filter test, BaseFee & Finality * fix dir graphql * remove diffStore * fix ethclient * fix TestRPCGetTransactionReceipt * fix dir internal * ut add dir ethstats and signer * disable pipeCommit thoroughly; fix concurrent map iteration and map write in statedb * CI: fix snap sync it could be changed by mistake * fix tests/Run to generate snapshot * prepare for merge * remove useless * use common hasher in getDeletedStateObject, no race here * an critical comment for state.Prepare * do not copy nil accessList * add omitempty tag for unused new fields of core.Genesis * remove totalFees * calculate fees before FinalizeAndAssemble * revert interface Finalize of consensus * do not double gas limit upon london block * use Leveldb as default * Revert "remove diffStore" This reverts commit df343b137412b0beb25298a6ba9c3c19e47f20b1. * Revert "remove diffsync related code" This reverts commit 8d84b81feae5d794cb5d7fcfdb7f5f7da751941b. * compile pass after revert * remove diffsync * fix dir eth/protocols/trust * fix TestFastNode * decHandlers for trust protocol * keep persist diff in test
2023-09-07 16:39:29 +08:00
err = p.slash(spoiledVal, state, header, cx, txs, receipts, systemTxs, usedGas, false)
2020-05-20 11:46:45 +08:00
if err != nil {
2020-06-30 20:22:57 +08:00
// it is possible that slash validator failed because of the slash channel is disabled.
log.Error("slash validator failed", "block hash", header.Hash(), "address", spoiledVal)
2020-05-20 11:46:45 +08:00
}
}
}
val := header.Coinbase
Big merge v1.10.16 v1.12.2 fix ci (#1850) * fix: crash of highestVerifiedHeader * fix: panic of blobpool * fix: genesis set up * 1. modify NewDatabaseWithNodeDB to upstream 2. fix race use of hasher in statedb 3. fix use wrong value when updateTrie * fix dir legacypool * fix dir blobpool * fix dir vote * remove diffsync related code * fix core/state/snapshot * disable pipeCommit for now * fix applyTransaction for bloom setting * CI: fast finality in gasprice test * CI: diffFetcher was removed * CI: downloader, remove beaconsync test * CI: no beaconsync in downloader, remove a failed case TestCheckpointChallenge was removed in: https://github.com/ethereum/go-ethereum/pull/27147 since after merge, it is useless for ethereum, but might be useful for BSC. disable the case right now, as it is not a big issue. * CI: bsc protocol decHandlers * CI: receipt Bloom process * 1. skip CheckConfigForkOrder for non-parlia engine 2. all test cases in core work well now cd core && go test ./... -v * fix test cases in trie dir * CI: no beaconsync in downloader, remove a failed case(redo) * fix dir miner * fix dir cmd/geth * CI: filter test, BaseFee & Finality * fix dir graphql * remove diffStore * fix ethclient * fix TestRPCGetTransactionReceipt * fix dir internal * ut add dir ethstats and signer * disable pipeCommit thoroughly; fix concurrent map iteration and map write in statedb * CI: fix snap sync it could be changed by mistake * fix tests/Run to generate snapshot * prepare for merge * remove useless * use common hasher in getDeletedStateObject, no race here * an critical comment for state.Prepare * do not copy nil accessList * add omitempty tag for unused new fields of core.Genesis * remove totalFees * calculate fees before FinalizeAndAssemble * revert interface Finalize of consensus * do not double gas limit upon london block * use Leveldb as default * Revert "remove diffStore" This reverts commit df343b137412b0beb25298a6ba9c3c19e47f20b1. * Revert "remove diffsync related code" This reverts commit 8d84b81feae5d794cb5d7fcfdb7f5f7da751941b. * compile pass after revert * remove diffsync * fix dir eth/protocols/trust * fix TestFastNode * decHandlers for trust protocol * keep persist diff in test
2023-09-07 16:39:29 +08:00
err = p.distributeIncoming(val, state, header, cx, txs, receipts, systemTxs, usedGas, false)
2020-05-20 11:46:45 +08:00
if err != nil {
2022-07-05 11:14:21 +08:00
return err
2020-05-20 11:46:45 +08:00
}
if p.chainConfig.IsPlato(header.Number) {
Big merge v1.10.16 v1.12.2 fix ci (#1850) * fix: crash of highestVerifiedHeader * fix: panic of blobpool * fix: genesis set up * 1. modify NewDatabaseWithNodeDB to upstream 2. fix race use of hasher in statedb 3. fix use wrong value when updateTrie * fix dir legacypool * fix dir blobpool * fix dir vote * remove diffsync related code * fix core/state/snapshot * disable pipeCommit for now * fix applyTransaction for bloom setting * CI: fast finality in gasprice test * CI: diffFetcher was removed * CI: downloader, remove beaconsync test * CI: no beaconsync in downloader, remove a failed case TestCheckpointChallenge was removed in: https://github.com/ethereum/go-ethereum/pull/27147 since after merge, it is useless for ethereum, but might be useful for BSC. disable the case right now, as it is not a big issue. * CI: bsc protocol decHandlers * CI: receipt Bloom process * 1. skip CheckConfigForkOrder for non-parlia engine 2. all test cases in core work well now cd core && go test ./... -v * fix test cases in trie dir * CI: no beaconsync in downloader, remove a failed case(redo) * fix dir miner * fix dir cmd/geth * CI: filter test, BaseFee & Finality * fix dir graphql * remove diffStore * fix ethclient * fix TestRPCGetTransactionReceipt * fix dir internal * ut add dir ethstats and signer * disable pipeCommit thoroughly; fix concurrent map iteration and map write in statedb * CI: fix snap sync it could be changed by mistake * fix tests/Run to generate snapshot * prepare for merge * remove useless * use common hasher in getDeletedStateObject, no race here * an critical comment for state.Prepare * do not copy nil accessList * add omitempty tag for unused new fields of core.Genesis * remove totalFees * calculate fees before FinalizeAndAssemble * revert interface Finalize of consensus * do not double gas limit upon london block * use Leveldb as default * Revert "remove diffStore" This reverts commit df343b137412b0beb25298a6ba9c3c19e47f20b1. * Revert "remove diffsync related code" This reverts commit 8d84b81feae5d794cb5d7fcfdb7f5f7da751941b. * compile pass after revert * remove diffsync * fix dir eth/protocols/trust * fix TestFastNode * decHandlers for trust protocol * keep persist diff in test
2023-09-07 16:39:29 +08:00
if err := p.distributeFinalityReward(chain, state, header, cx, txs, receipts, systemTxs, usedGas, false); err != nil {
return err
}
}
// update validators every day
if p.chainConfig.IsFeynman(header.Number, header.Time) && isBreatheBlock(parent.Time, header.Time) {
// we should avoid update validators in the Feynman upgrade block
if !p.chainConfig.IsOnFeynman(header.Number, parent.Time, header.Time) {
if err := p.updateValidatorSetV2(state, header, cx, txs, receipts, systemTxs, usedGas, false); err != nil {
return err
}
}
}
Big merge v1.10.16 v1.12.2 fix ci (#1850) * fix: crash of highestVerifiedHeader * fix: panic of blobpool * fix: genesis set up * 1. modify NewDatabaseWithNodeDB to upstream 2. fix race use of hasher in statedb 3. fix use wrong value when updateTrie * fix dir legacypool * fix dir blobpool * fix dir vote * remove diffsync related code * fix core/state/snapshot * disable pipeCommit for now * fix applyTransaction for bloom setting * CI: fast finality in gasprice test * CI: diffFetcher was removed * CI: downloader, remove beaconsync test * CI: no beaconsync in downloader, remove a failed case TestCheckpointChallenge was removed in: https://github.com/ethereum/go-ethereum/pull/27147 since after merge, it is useless for ethereum, but might be useful for BSC. disable the case right now, as it is not a big issue. * CI: bsc protocol decHandlers * CI: receipt Bloom process * 1. skip CheckConfigForkOrder for non-parlia engine 2. all test cases in core work well now cd core && go test ./... -v * fix test cases in trie dir * CI: no beaconsync in downloader, remove a failed case(redo) * fix dir miner * fix dir cmd/geth * CI: filter test, BaseFee & Finality * fix dir graphql * remove diffStore * fix ethclient * fix TestRPCGetTransactionReceipt * fix dir internal * ut add dir ethstats and signer * disable pipeCommit thoroughly; fix concurrent map iteration and map write in statedb * CI: fix snap sync it could be changed by mistake * fix tests/Run to generate snapshot * prepare for merge * remove useless * use common hasher in getDeletedStateObject, no race here * an critical comment for state.Prepare * do not copy nil accessList * add omitempty tag for unused new fields of core.Genesis * remove totalFees * calculate fees before FinalizeAndAssemble * revert interface Finalize of consensus * do not double gas limit upon london block * use Leveldb as default * Revert "remove diffStore" This reverts commit df343b137412b0beb25298a6ba9c3c19e47f20b1. * Revert "remove diffsync related code" This reverts commit 8d84b81feae5d794cb5d7fcfdb7f5f7da751941b. * compile pass after revert * remove diffsync * fix dir eth/protocols/trust * fix TestFastNode * decHandlers for trust protocol * keep persist diff in test
2023-09-07 16:39:29 +08:00
if len(*systemTxs) > 0 {
2020-05-20 11:46:45 +08:00
return errors.New("the length of systemTxs do not match")
}
return nil
}
// FinalizeAndAssemble implements consensus.Engine, ensuring no uncles are set,
// nor block rewards given, and returns the final block.
2021-04-16 12:45:26 +08:00
func (p *Parlia) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB,
txs []*types.Transaction, uncles []*types.Header, receipts []*types.Receipt, _ []*types.Withdrawal) (*types.Block, []*types.Receipt, error) {
2020-05-20 11:46:45 +08:00
// No block rewards in PoA, so the state remains as is and uncles are dropped
cx := chainContext{Chain: chain, parlia: p}
if txs == nil {
txs = make([]*types.Transaction, 0)
}
if receipts == nil {
receipts = make([]*types.Receipt, 0)
}
parent := chain.GetHeaderByHash(header.ParentHash)
if parent == nil {
return nil, nil, errors.New("parent not found")
}
if p.chainConfig.IsFeynman(header.Number, header.Time) {
systemcontracts.UpgradeBuildInSystemContract(p.chainConfig, header.Number, parent.Time, header.Time, state)
}
if p.chainConfig.IsOnFeynman(header.Number, parent.Time, header.Time) {
err := p.initializeFeynmanContract(state, header, cx, &txs, &receipts, nil, &header.GasUsed, true)
if err != nil {
log.Error("init feynman contract failed", "error", err)
}
}
2020-05-20 11:46:45 +08:00
if header.Number.Cmp(common.Big1) == 0 {
err := p.initContract(state, header, cx, &txs, &receipts, nil, &header.GasUsed, true)
2020-05-20 11:46:45 +08:00
if err != nil {
log.Error("init contract failed")
}
}
if header.Difficulty.Cmp(diffInTurn) != 0 {
number := header.Number.Uint64()
snap, err := p.snapshot(chain, number-1, header.ParentHash, nil)
if err != nil {
2022-07-05 11:14:21 +08:00
return nil, nil, err
2020-05-20 11:46:45 +08:00
}
spoiledVal := snap.inturnValidator()
2020-05-20 11:46:45 +08:00
signedRecently := false
if p.chainConfig.IsPlato(header.Number) {
signedRecently = snap.SignRecently(spoiledVal)
} else {
for _, recent := range snap.Recents {
if recent == spoiledVal {
signedRecently = true
break
}
2020-05-20 11:46:45 +08:00
}
}
if !signedRecently {
err = p.slash(spoiledVal, state, header, cx, &txs, &receipts, nil, &header.GasUsed, true)
2020-05-20 11:46:45 +08:00
if err != nil {
2020-06-30 20:22:57 +08:00
// it is possible that slash validator failed because of the slash channel is disabled.
log.Error("slash validator failed", "block hash", header.Hash(), "address", spoiledVal)
2020-05-20 11:46:45 +08:00
}
}
}
err := p.distributeIncoming(p.val, state, header, cx, &txs, &receipts, nil, &header.GasUsed, true)
2020-05-20 11:46:45 +08:00
if err != nil {
2022-07-05 11:14:21 +08:00
return nil, nil, err
2020-05-20 11:46:45 +08:00
}
if p.chainConfig.IsPlato(header.Number) {
if err := p.distributeFinalityReward(chain, state, header, cx, &txs, &receipts, nil, &header.GasUsed, true); err != nil {
return nil, nil, err
}
}
// update validators every day
if p.chainConfig.IsFeynman(header.Number, header.Time) && isBreatheBlock(parent.Time, header.Time) {
// we should avoid update validators in the Feynman upgrade block
if !p.chainConfig.IsOnFeynman(header.Number, parent.Time, header.Time) {
if err := p.updateValidatorSetV2(state, header, cx, &txs, &receipts, nil, &header.GasUsed, true); err != nil {
return nil, nil, err
}
}
}
// should not happen. Once happen, stop the node is better than broadcast the block
if header.GasLimit < header.GasUsed {
2022-07-05 11:14:21 +08:00
return nil, nil, errors.New("gas consumption of system txs exceed the gas limit")
}
2020-05-20 11:46:45 +08:00
header.UncleHash = types.CalcUncleHash(nil)
2022-07-05 11:14:21 +08:00
var blk *types.Block
var rootHash common.Hash
wg := sync.WaitGroup{}
wg.Add(2)
go func() {
rootHash = state.IntermediateRoot(chain.Config().IsEIP158(header.Number))
wg.Done()
}()
go func() {
blk = types.NewBlock(header, txs, nil, receipts, trie.NewStackTrie(nil))
wg.Done()
}()
wg.Wait()
blk.SetRoot(rootHash)
2020-05-20 11:46:45 +08:00
// Assemble and return the final block for sealing
2022-07-05 11:14:21 +08:00
return blk, receipts, nil
2020-05-20 11:46:45 +08:00
}
func (p *Parlia) IsActiveValidatorAt(chain consensus.ChainHeaderReader, header *types.Header, checkVoteKeyFn func(bLSPublicKey *types.BLSPublicKey) bool) bool {
number := header.Number.Uint64()
snap, err := p.snapshot(chain, number-1, header.ParentHash, nil)
if err != nil {
log.Error("failed to get the snapshot from consensus", "error", err)
return false
}
validators := snap.Validators
validatorInfo, ok := validators[p.val]
return ok && (checkVoteKeyFn == nil || (validatorInfo != nil && checkVoteKeyFn(&validatorInfo.VoteAddress)))
}
// VerifyVote will verify: 1. If the vote comes from valid validators 2. If the vote's sourceNumber and sourceHash are correct
func (p *Parlia) VerifyVote(chain consensus.ChainHeaderReader, vote *types.VoteEnvelope) error {
targetNumber := vote.Data.TargetNumber
targetHash := vote.Data.TargetHash
header := chain.GetVerifiedBlockByHash(targetHash)
if header == nil {
log.Warn("BlockHeader at current voteBlockNumber is nil", "targetNumber", targetNumber, "targetHash", targetHash)
return errors.New("BlockHeader at current voteBlockNumber is nil")
}
if header.Number.Uint64() != targetNumber {
log.Warn("unexpected target number", "expect", header.Number.Uint64(), "real", targetNumber)
return errors.New("target number mismatch")
}
justifiedBlockNumber, justifiedBlockHash, err := p.GetJustifiedNumberAndHash(chain, []*types.Header{header})
if err != nil {
log.Error("failed to get the highest justified number and hash", "headerNumber", header.Number, "headerHash", header.Hash())
return errors.New("unexpected error when getting the highest justified number and hash")
}
if vote.Data.SourceNumber != justifiedBlockNumber || vote.Data.SourceHash != justifiedBlockHash {
return errors.New("vote source block mismatch")
}
number := header.Number.Uint64()
snap, err := p.snapshot(chain, number-1, header.ParentHash, nil)
if err != nil {
log.Error("failed to get the snapshot from consensus", "error", err)
return errors.New("failed to get the snapshot from consensus")
}
validators := snap.Validators
voteAddress := vote.VoteAddress
for addr, validator := range validators {
if validator.VoteAddress == voteAddress {
if addr == p.val {
validVotesfromSelfCounter.Inc(1)
}
metrics.GetOrRegisterCounter(fmt.Sprintf("parlia/VerifyVote/%s", addr.String()), nil).Inc(1)
return nil
}
}
return errors.New("vote verification failed")
}
2020-05-20 11:46:45 +08:00
// Authorize injects a private key into the consensus engine to mint new blocks
// with.
func (p *Parlia) Authorize(val common.Address, signFn SignerFn, signTxFn SignerTxFn) {
p.lock.Lock()
defer p.lock.Unlock()
p.val = val
p.signFn = signFn
p.signTxFn = signTxFn
}
// Argument leftOver is the time reserved for block finalize(calculate root, distribute income...)
func (p *Parlia) Delay(chain consensus.ChainReader, header *types.Header, leftOver *time.Duration) *time.Duration {
number := header.Number.Uint64()
snap, err := p.snapshot(chain, number-1, header.ParentHash, nil)
if err != nil {
return nil
}
delay := p.delayForRamanujanFork(snap, header)
if *leftOver >= time.Duration(p.config.Period)*time.Second {
// ignore invalid leftOver
log.Error("Delay invalid argument", "leftOver", leftOver.String(), "Period", p.config.Period)
} else if *leftOver >= delay {
delay = time.Duration(0)
return &delay
} else {
delay = delay - *leftOver
}
// The blocking time should be no more than half of period when snap.TurnLength == 1
timeForMining := time.Duration(p.config.Period) * time.Second / 2
if !snap.lastBlockInOneTurn(header.Number.Uint64()) {
timeForMining = time.Duration(p.config.Period) * time.Second * 2 / 3
}
if delay > timeForMining {
delay = timeForMining
2022-07-05 11:14:21 +08:00
}
return &delay
}
2020-05-20 11:46:45 +08:00
// Seal implements consensus.Engine, attempting to create a sealed block using
// the local signing credentials.
2021-04-16 12:45:26 +08:00
func (p *Parlia) Seal(chain consensus.ChainHeaderReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error {
2020-05-20 11:46:45 +08:00
header := block.Header()
// Sealing the genesis block is not supported
number := header.Number.Uint64()
if number == 0 {
return errUnknownBlock
}
// For 0-period chains, refuse to seal empty blocks (no reward but would spin sealing)
if p.config.Period == 0 && len(block.Transactions()) == 0 {
log.Info("Sealing paused, waiting for transactions")
return nil
}
// Don't hold the val fields for the entire sealing procedure
p.lock.RLock()
val, signFn := p.val, p.signFn
p.lock.RUnlock()
snap, err := p.snapshot(chain, number-1, header.ParentHash, nil)
if err != nil {
return err
}
// Bail out if we're unauthorized to sign a block
if _, authorized := snap.Validators[val]; !authorized {
return errUnauthorizedValidator(val.String())
2020-05-20 11:46:45 +08:00
}
// If we're amongst the recent signers, wait for the next block
if snap.SignRecently(val) {
log.Info("Signed recently, must wait for others")
return nil
2020-05-20 11:46:45 +08:00
}
// Sweet, the protocol permits us to sign the block, wait for our time
delay := p.delayForRamanujanFork(snap, header)
2020-05-20 11:46:45 +08:00
log.Info("Sealing block with", "number", number, "delay", delay, "headerDifficulty", header.Difficulty, "val", val.Hex())
// Wait until sealing is terminated or delay timeout.
log.Trace("Waiting for slot to sign and propagate", "delay", common.PrettyDuration(delay))
go func() {
select {
case <-stop:
return
case <-time.After(delay):
}
err := p.assembleVoteAttestation(chain, header)
if err != nil {
/* If the vote attestation can't be assembled successfully, the blockchain won't get
fast finalized, but it can be tolerated, so just report this error here. */
log.Error("Assemble vote attestation failed when sealing", "err", err)
}
// Sign all the things!
sig, err := signFn(accounts.Account{Address: val}, accounts.MimetypeParlia, ParliaRLP(header, p.chainConfig.ChainID))
if err != nil {
log.Error("Sign for the block header failed when sealing", "err", err)
return
}
copy(header.Extra[len(header.Extra)-extraSeal:], sig)
2022-07-05 11:14:21 +08:00
if p.shouldWaitForCurrentBlockProcess(chain, header, snap) {
highestVerifiedHeader := chain.GetHighestVerifiedHeader()
// including time for writing and committing blocks
waitProcessEstimate := math.Ceil(float64(highestVerifiedHeader.GasUsed) / float64(100_000_000))
log.Info("Waiting for received in turn block to process", "waitProcessEstimate(Seconds)", waitProcessEstimate)
2022-07-05 11:14:21 +08:00
select {
case <-stop:
log.Info("Received block process finished, abort block seal")
return
case <-time.After(time.Duration(waitProcessEstimate) * time.Second):
if chain.CurrentHeader().Number.Uint64() >= header.Number.Uint64() {
log.Info("Process backoff time exhausted, and current header has updated to abort this seal")
return
}
2022-07-05 11:14:21 +08:00
log.Info("Process backoff time exhausted, start to seal block")
}
}
2020-05-20 11:46:45 +08:00
select {
case results <- block.WithSeal(header):
default:
log.Warn("Sealing result is not read by miner", "sealhash", types.SealHash(header, p.chainConfig.ChainID))
2020-05-20 11:46:45 +08:00
}
}()
return nil
}
2022-07-05 11:14:21 +08:00
func (p *Parlia) shouldWaitForCurrentBlockProcess(chain consensus.ChainHeaderReader, header *types.Header, snap *Snapshot) bool {
if header.Difficulty.Cmp(diffInTurn) == 0 {
return false
}
highestVerifiedHeader := chain.GetHighestVerifiedHeader()
if highestVerifiedHeader == nil {
return false
}
if header.ParentHash == highestVerifiedHeader.ParentHash {
return true
}
return false
}
func (p *Parlia) EnoughDistance(chain consensus.ChainReader, header *types.Header) bool {
snap, err := p.snapshot(chain, header.Number.Uint64()-1, header.ParentHash, nil)
if err != nil {
return true
}
2022-07-05 11:14:21 +08:00
return snap.enoughDistance(p.val, header)
}
func (p *Parlia) IsLocalBlock(header *types.Header) bool {
return p.val == header.Coinbase
}
func (p *Parlia) SignRecently(chain consensus.ChainReader, parent *types.Block) (bool, error) {
snap, err := p.snapshot(chain, parent.NumberU64(), parent.Hash(), nil)
2022-07-05 11:14:21 +08:00
if err != nil {
return true, err
}
// Bail out if we're unauthorized to sign a block
if _, authorized := snap.Validators[p.val]; !authorized {
return true, errUnauthorizedValidator(p.val.String())
2022-07-05 11:14:21 +08:00
}
return snap.SignRecently(p.val), nil
}
2020-05-20 11:46:45 +08:00
// CalcDifficulty is the difficulty adjustment algorithm. It returns the difficulty
// that a new block should have based on the previous blocks in the chain and the
// current signer.
2021-04-16 12:45:26 +08:00
func (p *Parlia) CalcDifficulty(chain consensus.ChainHeaderReader, time uint64, parent *types.Header) *big.Int {
2020-05-20 11:46:45 +08:00
snap, err := p.snapshot(chain, parent.Number.Uint64(), parent.Hash(), nil)
if err != nil {
return nil
}
return CalcDifficulty(snap, p.val)
}
// CalcDifficulty is the difficulty adjustment algorithm. It returns the difficulty
// that a new block should have based on the previous blocks in the chain and the
// current signer.
func CalcDifficulty(snap *Snapshot, signer common.Address) *big.Int {
if snap.inturn(signer) {
2020-05-20 11:46:45 +08:00
return new(big.Int).Set(diffInTurn)
}
return new(big.Int).Set(diffNoTurn)
}
func encodeSigHeaderWithoutVoteAttestation(w io.Writer, header *types.Header, chainId *big.Int) {
err := rlp.Encode(w, []interface{}{
chainId,
header.ParentHash,
header.UncleHash,
header.Coinbase,
header.Root,
header.TxHash,
header.ReceiptHash,
header.Bloom,
header.Difficulty,
header.Number,
header.GasLimit,
header.GasUsed,
header.Time,
header.Extra[:extraVanity], // this will panic if extra is too short, should check before calling encodeSigHeaderWithoutVoteAttestation
header.MixDigest,
header.Nonce,
})
if err != nil {
panic("can't encode: " + err.Error())
}
}
// SealHash returns the hash of a block without vote attestation prior to it being sealed.
// So it's not the real hash of a block, just used as unique id to distinguish task
func (p *Parlia) SealHash(header *types.Header) (hash common.Hash) {
hasher := sha3.NewLegacyKeccak256()
encodeSigHeaderWithoutVoteAttestation(hasher, header, p.chainConfig.ChainID)
hasher.Sum(hash[:0])
return hash
2020-05-20 11:46:45 +08:00
}
// APIs implements consensus.Engine, returning the user facing RPC API to query snapshot.
2021-04-16 12:45:26 +08:00
func (p *Parlia) APIs(chain consensus.ChainHeaderReader) []rpc.API {
2020-05-20 11:46:45 +08:00
return []rpc.API{{
Namespace: "parlia",
Version: "1.0",
Service: &API{chain: chain, parlia: p},
Public: false,
}}
}
// Close implements consensus.Engine. It's a noop for parlia as there are no background threads.
func (p *Parlia) Close() error {
return nil
}
// ========================== interaction with contract/account =========
// getCurrentValidators get current validators
func (p *Parlia) getCurrentValidators(blockHash common.Hash, blockNum *big.Int) ([]common.Address, map[common.Address]*types.BLSPublicKey, error) {
2020-05-20 11:46:45 +08:00
// block
blockNr := rpc.BlockNumberOrHashWithHash(blockHash, false)
if !p.chainConfig.IsLuban(blockNum) {
validators, err := p.getCurrentValidatorsBeforeLuban(blockHash, blockNum)
return validators, nil, err
2022-07-05 11:14:21 +08:00
}
2020-05-20 11:46:45 +08:00
// method
method := "getMiningValidators"
2020-05-20 11:46:45 +08:00
ctx, cancel := context.WithCancel(context.Background())
defer cancel() // cancel when we are finished consuming integers
data, err := p.validatorSetABI.Pack(method)
if err != nil {
log.Error("Unable to pack tx for getMiningValidators", "error", err)
return nil, nil, err
2020-05-20 11:46:45 +08:00
}
// call
msgData := (hexutil.Bytes)(data)
2020-08-06 23:27:24 +08:00
toAddress := common.HexToAddress(systemcontracts.ValidatorContract)
2020-05-20 11:46:45 +08:00
gas := (hexutil.Uint64)(uint64(math.MaxUint64 / 2))
2022-07-05 11:14:21 +08:00
result, err := p.ethAPI.Call(ctx, ethapi.TransactionArgs{
2020-05-20 11:46:45 +08:00
Gas: &gas,
To: &toAddress,
Data: &msgData,
2024-02-02 15:43:33 +08:00
}, &blockNr, nil, nil)
2020-05-20 11:46:45 +08:00
if err != nil {
return nil, nil, err
2020-05-20 11:46:45 +08:00
}
var valSet []common.Address
var voteAddrSet []types.BLSPublicKey
if err := p.validatorSetABI.UnpackIntoInterface(&[]interface{}{&valSet, &voteAddrSet}, method, result); err != nil {
return nil, nil, err
2020-05-20 11:46:45 +08:00
}
voteAddrMap := make(map[common.Address]*types.BLSPublicKey, len(valSet))
for i := 0; i < len(valSet); i++ {
voteAddrMap[valSet[i]] = &(voteAddrSet)[i]
2020-05-20 11:46:45 +08:00
}
return valSet, voteAddrMap, nil
2020-05-20 11:46:45 +08:00
}
// distributeIncoming distributes system incoming of the block
2020-05-20 11:46:45 +08:00
func (p *Parlia) distributeIncoming(val common.Address, state *state.StateDB, header *types.Header, chain core.ChainContext,
txs *[]*types.Transaction, receipts *[]*types.Receipt, receivedTxs *[]*types.Transaction, usedGas *uint64, mining bool) error {
2020-05-20 11:46:45 +08:00
coinbase := header.Coinbase
doDistributeSysReward := !p.chainConfig.IsKepler(header.Number, header.Time) &&
state.GetBalance(common.HexToAddress(systemcontracts.SystemRewardContract)).Cmp(maxSystemBalance) < 0
2020-05-20 11:46:45 +08:00
if doDistributeSysReward {
balance := state.GetBalance(consensus.SystemAddress)
rewards := new(uint256.Int)
2020-05-20 11:46:45 +08:00
rewards = rewards.Rsh(balance, systemRewardPercent)
2024-02-02 15:43:33 +08:00
if rewards.Cmp(common.U2560) > 0 {
state.SetBalance(consensus.SystemAddress, balance.Sub(balance, rewards))
state.AddBalance(coinbase, rewards)
2024-02-02 15:43:33 +08:00
err := p.distributeToSystem(rewards.ToBig(), state, header, chain, txs, receipts, receivedTxs, usedGas, mining)
2020-05-20 11:46:45 +08:00
if err != nil {
return err
}
log.Trace("distribute to system reward pool", "block hash", header.Hash(), "amount", rewards)
2020-05-20 11:46:45 +08:00
}
}
balance := state.GetBalance(consensus.SystemAddress)
if balance.Cmp(common.U2560) <= 0 {
return nil
}
state.SetBalance(consensus.SystemAddress, common.U2560)
state.AddBalance(coinbase, balance)
log.Trace("distribute to validator contract", "block hash", header.Hash(), "amount", balance)
2024-02-02 15:43:33 +08:00
return p.distributeToValidator(balance.ToBig(), val, state, header, chain, txs, receipts, receivedTxs, usedGas, mining)
2020-05-20 11:46:45 +08:00
}
// slash spoiled validators
func (p *Parlia) slash(spoiledVal common.Address, state *state.StateDB, header *types.Header, chain core.ChainContext,
txs *[]*types.Transaction, receipts *[]*types.Receipt, receivedTxs *[]*types.Transaction, usedGas *uint64, mining bool) error {
2020-05-20 11:46:45 +08:00
// method
method := "slash"
// get packed data
data, err := p.slashABI.Pack(method,
spoiledVal,
)
if err != nil {
log.Error("Unable to pack tx for slash", "error", err)
return err
}
// get system message
2020-08-06 23:27:24 +08:00
msg := p.getSystemMessage(header.Coinbase, common.HexToAddress(systemcontracts.SlashContract), data, common.Big0)
2020-05-20 11:46:45 +08:00
// apply message
return p.applyTransaction(msg, state, header, chain, txs, receipts, receivedTxs, usedGas, mining)
2020-05-20 11:46:45 +08:00
}
// init contract
func (p *Parlia) initContract(state *state.StateDB, header *types.Header, chain core.ChainContext,
txs *[]*types.Transaction, receipts *[]*types.Receipt, receivedTxs *[]*types.Transaction, usedGas *uint64, mining bool) error {
2020-05-20 11:46:45 +08:00
// method
method := "init"
// contracts
2020-08-06 23:27:24 +08:00
contracts := []string{
systemcontracts.ValidatorContract,
systemcontracts.SlashContract,
systemcontracts.LightClientContract,
systemcontracts.RelayerHubContract,
systemcontracts.TokenHubContract,
systemcontracts.RelayerIncentivizeContract,
systemcontracts.CrossChainContract,
}
2020-05-20 11:46:45 +08:00
// get packed data
data, err := p.validatorSetABI.Pack(method)
if err != nil {
log.Error("Unable to pack tx for init validator set", "error", err)
return err
}
for _, c := range contracts {
msg := p.getSystemMessage(header.Coinbase, common.HexToAddress(c), data, common.Big0)
// apply message
log.Trace("init contract", "block hash", header.Hash(), "contract", c)
err = p.applyTransaction(msg, state, header, chain, txs, receipts, receivedTxs, usedGas, mining)
2020-05-20 11:46:45 +08:00
if err != nil {
return err
}
}
return nil
}
func (p *Parlia) distributeToSystem(amount *big.Int, state *state.StateDB, header *types.Header, chain core.ChainContext,
txs *[]*types.Transaction, receipts *[]*types.Receipt, receivedTxs *[]*types.Transaction, usedGas *uint64, mining bool) error {
2020-05-20 11:46:45 +08:00
// get system message
2020-08-06 23:27:24 +08:00
msg := p.getSystemMessage(header.Coinbase, common.HexToAddress(systemcontracts.SystemRewardContract), nil, amount)
2020-05-20 11:46:45 +08:00
// apply message
return p.applyTransaction(msg, state, header, chain, txs, receipts, receivedTxs, usedGas, mining)
2020-05-20 11:46:45 +08:00
}
// distributeToValidator deposits validator reward to validator contract
2020-05-20 11:46:45 +08:00
func (p *Parlia) distributeToValidator(amount *big.Int, validator common.Address,
state *state.StateDB, header *types.Header, chain core.ChainContext,
txs *[]*types.Transaction, receipts *[]*types.Receipt, receivedTxs *[]*types.Transaction, usedGas *uint64, mining bool) error {
2020-05-20 11:46:45 +08:00
// method
method := "deposit"
// get packed data
data, err := p.validatorSetABI.Pack(method,
validator,
)
if err != nil {
log.Error("Unable to pack tx for deposit", "error", err)
return err
}
// get system message
2020-08-06 23:27:24 +08:00
msg := p.getSystemMessage(header.Coinbase, common.HexToAddress(systemcontracts.ValidatorContract), data, amount)
2020-05-20 11:46:45 +08:00
// apply message
return p.applyTransaction(msg, state, header, chain, txs, receipts, receivedTxs, usedGas, mining)
2020-05-20 11:46:45 +08:00
}
// get system message
func (p *Parlia) getSystemMessage(from, toAddress common.Address, data []byte, value *big.Int) callmsg {
return callmsg{
ethereum.CallMsg{
From: from,
Gas: math.MaxUint64 / 2,
GasPrice: big.NewInt(0),
Value: value,
To: &toAddress,
Data: data,
},
}
}
func (p *Parlia) applyTransaction(
msg callmsg,
state *state.StateDB,
header *types.Header,
chainContext core.ChainContext,
txs *[]*types.Transaction, receipts *[]*types.Receipt,
receivedTxs *[]*types.Transaction, usedGas *uint64, mining bool,
2020-05-20 11:46:45 +08:00
) (err error) {
nonce := state.GetNonce(msg.From())
expectedTx := types.NewTransaction(nonce, *msg.To(), msg.Value(), msg.Gas(), msg.GasPrice(), msg.Data())
expectedHash := p.signer.Hash(expectedTx)
if msg.From() == p.val && mining {
2020-05-20 11:46:45 +08:00
expectedTx, err = p.signTxFn(accounts.Account{Address: msg.From()}, expectedTx, p.chainConfig.ChainID)
if err != nil {
return err
}
} else {
if receivedTxs == nil || len(*receivedTxs) == 0 || (*receivedTxs)[0] == nil {
return errors.New("supposed to get a actual transaction, but get none")
}
actualTx := (*receivedTxs)[0]
2020-08-04 13:56:13 +08:00
if !bytes.Equal(p.signer.Hash(actualTx).Bytes(), expectedHash.Bytes()) {
2022-07-05 11:14:21 +08:00
return fmt.Errorf("expected tx hash %v, get %v, nonce %d, to %s, value %s, gas %d, gasPrice %s, data %s", expectedHash.String(), actualTx.Hash().String(),
expectedTx.Nonce(),
expectedTx.To().String(),
expectedTx.Value().String(),
expectedTx.Gas(),
expectedTx.GasPrice().String(),
hex.EncodeToString(expectedTx.Data()),
)
2020-05-20 11:46:45 +08:00
}
expectedTx = actualTx
// move to next
*receivedTxs = (*receivedTxs)[1:]
}
state.SetTxContext(expectedTx.Hash(), len(*txs))
2020-05-20 11:46:45 +08:00
gasUsed, err := applyMessage(msg, state, header, p.chainConfig, chainContext)
if err != nil {
return err
}
*txs = append(*txs, expectedTx)
var root []byte
if p.chainConfig.IsByzantium(header.Number) {
state.Finalise(true)
} else {
root = state.IntermediateRoot(p.chainConfig.IsEIP158(header.Number)).Bytes()
}
*usedGas += gasUsed
receipt := types.NewReceipt(root, false, *usedGas)
receipt.TxHash = expectedTx.Hash()
receipt.GasUsed = gasUsed
// Set the receipt logs and create a bloom for filtering
receipt.Logs = state.GetLogs(expectedTx.Hash(), header.Number.Uint64(), header.Hash())
2020-05-20 11:46:45 +08:00
receipt.Bloom = types.CreateBloom(types.Receipts{receipt})
2022-07-05 11:14:21 +08:00
receipt.BlockHash = header.Hash()
2020-05-20 11:46:45 +08:00
receipt.BlockNumber = header.Number
receipt.TransactionIndex = uint(state.TxIndex())
*receipts = append(*receipts, receipt)
return nil
}
// GetJustifiedNumberAndHash retrieves the number and hash of the highest justified block
// within the branch including `headers` and utilizing the latest element as the head.
func (p *Parlia) GetJustifiedNumberAndHash(chain consensus.ChainHeaderReader, headers []*types.Header) (uint64, common.Hash, error) {
if chain == nil || len(headers) == 0 || headers[len(headers)-1] == nil {
return 0, common.Hash{}, errors.New("illegal chain or header")
}
head := headers[len(headers)-1]
snap, err := p.snapshot(chain, head.Number.Uint64(), head.Hash(), headers)
if err != nil {
log.Error("Unexpected error when getting snapshot",
"error", err, "blockNumber", head.Number.Uint64(), "blockHash", head.Hash())
return 0, common.Hash{}, err
}
if snap.Attestation == nil {
if p.chainConfig.IsLuban(head.Number) {
log.Debug("once one attestation generated, attestation of snap would not be nil forever basically")
}
return 0, chain.GetHeaderByNumber(0).Hash(), nil
}
return snap.Attestation.TargetNumber, snap.Attestation.TargetHash, nil
}
// GetFinalizedHeader returns highest finalized block header.
func (p *Parlia) GetFinalizedHeader(chain consensus.ChainHeaderReader, header *types.Header) *types.Header {
if chain == nil || header == nil {
return nil
}
if !chain.Config().IsPlato(header.Number) {
return chain.GetHeaderByNumber(0)
}
snap, err := p.snapshot(chain, header.Number.Uint64(), header.Hash(), nil)
if err != nil {
log.Error("Unexpected error when getting snapshot",
"error", err, "blockNumber", header.Number.Uint64(), "blockHash", header.Hash())
return nil
}
if snap.Attestation == nil {
return chain.GetHeaderByNumber(0) // keep consistent with GetJustifiedNumberAndHash
}
return chain.GetHeader(snap.Attestation.SourceHash, snap.Attestation.SourceNumber)
}
2020-05-20 11:46:45 +08:00
// =========================== utility function ==========================
func (p *Parlia) backOffTime(snap *Snapshot, header *types.Header, val common.Address) uint64 {
if snap.inturn(val) {
log.Debug("backOffTime", "blockNumber", header.Number, "in turn validator", val)
return 0
} else {
delay := initialBackOffTime
validators := snap.validators()
if p.chainConfig.IsPlanck(header.Number) {
counts := snap.countRecents()
for addr, seenTimes := range counts {
log.Debug("backOffTime", "blockNumber", header.Number, "validator", addr, "seenTimes", seenTimes)
}
2023-03-22 13:42:25 +08:00
// The backOffTime does not matter when a validator has signed recently.
if snap.signRecentlyByCounts(val, counts) {
return 0
}
inTurnAddr := snap.inturnValidator()
if snap.signRecentlyByCounts(inTurnAddr, counts) {
2023-03-22 13:42:25 +08:00
log.Debug("in turn validator has recently signed, skip initialBackOffTime",
"inTurnAddr", inTurnAddr)
delay = 0
}
// Exclude the recently signed validators and the in turn validator
temp := make([]common.Address, 0, len(validators))
for _, addr := range validators {
if snap.signRecentlyByCounts(addr, counts) {
continue
}
if p.chainConfig.IsBohr(header.Number, header.Time) {
if addr == inTurnAddr {
continue
}
}
temp = append(temp, addr)
}
validators = temp
}
// get the index of current validator and its shuffled backoff time.
idx := -1
for index, itemAddr := range validators {
if val == itemAddr {
idx = index
}
}
2020-08-10 10:54:14 +08:00
if idx < 0 {
log.Debug("The validator is not authorized", "addr", val)
2020-08-10 10:54:14 +08:00
return 0
}
randSeed := snap.Number
if p.chainConfig.IsBohr(header.Number, header.Time) {
randSeed = header.Number.Uint64() / uint64(snap.TurnLength)
}
s := rand.NewSource(int64(randSeed))
r := rand.New(s)
n := len(validators)
backOffSteps := make([]uint64, 0, n)
for i := uint64(0); i < uint64(n); i++ {
backOffSteps = append(backOffSteps, i)
}
r.Shuffle(n, func(i, j int) {
backOffSteps[i], backOffSteps[j] = backOffSteps[j], backOffSteps[i]
})
delay += backOffSteps[idx] * wiggleTime
return delay
}
}
2020-05-20 11:46:45 +08:00
// chain context
type chainContext struct {
2021-04-16 12:45:26 +08:00
Chain consensus.ChainHeaderReader
2020-05-20 11:46:45 +08:00
parlia consensus.Engine
}
func (c chainContext) Engine() consensus.Engine {
return c.parlia
}
func (c chainContext) GetHeader(hash common.Hash, number uint64) *types.Header {
return c.Chain.GetHeader(hash, number)
}
// callmsg implements core.Message to allow passing it as a transaction simulator.
type callmsg struct {
ethereum.CallMsg
}
func (m callmsg) From() common.Address { return m.CallMsg.From }
func (m callmsg) Nonce() uint64 { return 0 }
func (m callmsg) CheckNonce() bool { return false }
func (m callmsg) To() *common.Address { return m.CallMsg.To }
func (m callmsg) GasPrice() *big.Int { return m.CallMsg.GasPrice }
func (m callmsg) Gas() uint64 { return m.CallMsg.Gas }
func (m callmsg) Value() *big.Int { return m.CallMsg.Value }
func (m callmsg) Data() []byte { return m.CallMsg.Data }
// apply message
func applyMessage(
msg callmsg,
state *state.StateDB,
header *types.Header,
chainConfig *params.ChainConfig,
chainContext core.ChainContext,
) (uint64, error) {
// Create a new context to be used in the EVM environment
2021-04-16 12:45:26 +08:00
context := core.NewEVMBlockContext(header, chainContext, nil)
2020-05-20 11:46:45 +08:00
// Create a new environment which holds all relevant information
// about the transaction and calling mechanisms.
2021-04-16 12:45:26 +08:00
vmenv := vm.NewEVM(context, vm.TxContext{Origin: msg.From(), GasPrice: big.NewInt(0)}, state, chainConfig, vm.Config{})
2020-05-20 11:46:45 +08:00
// Apply the transaction to the current state (included in the env)
if chainConfig.IsCancun(header.Number, header.Time) {
rules := vmenv.ChainConfig().Rules(vmenv.Context.BlockNumber, vmenv.Context.Random != nil, vmenv.Context.Time)
state.Prepare(rules, msg.From(), vmenv.Context.Coinbase, msg.To(), vm.ActivePrecompiles(rules), msg.AccessList)
}
// Increment the nonce for the next transaction
state.SetNonce(msg.From(), state.GetNonce(msg.From())+1)
2020-05-20 11:46:45 +08:00
ret, returnGas, err := vmenv.Call(
vm.AccountRef(msg.From()),
*msg.To(),
msg.Data(),
msg.Gas(),
2024-02-02 15:43:33 +08:00
uint256.MustFromBig(msg.Value()),
2020-05-20 11:46:45 +08:00
)
if err != nil {
log.Error("apply message failed", "msg", string(ret), "err", err)
}
return msg.Gas() - returnGas, err
}
// proposalKey build a key which is a combination of the block number and the proposer address.
func proposalKey(header types.Header) string {
return header.ParentHash.String() + header.Coinbase.String()
}