bsc/consensus/parlia/parlia.go

1984 lines
68 KiB
Go
Raw Normal View History

2020-05-20 06:46:45 +03:00
package parlia
import (
"bytes"
"context"
"encoding/hex"
2020-05-20 06:46:45 +03:00
"errors"
"fmt"
"io"
"math"
"math/big"
"math/rand"
"sort"
"strings"
"sync"
"time"
lru "github.com/hashicorp/golang-lru"
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
"github.com/willf/bitset"
2020-05-20 06:46:45 +03:00
"golang.org/x/crypto/sha3"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/common"
2022-07-05 06:14:21 +03:00
"github.com/ethereum/go-ethereum/common/gopool"
2020-05-20 06:46:45 +03:00
"github.com/ethereum/go-ethereum/common/hexutil"
cmath "github.com/ethereum/go-ethereum/common/math"
2020-05-20 06:46:45 +03:00
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/consensus/misc/eip1559"
"github.com/ethereum/go-ethereum/consensus/misc/eip4844"
2020-05-20 06:46:45 +03:00
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/forkid"
2020-05-20 06:46:45 +03:00
"github.com/ethereum/go-ethereum/core/state"
2020-08-06 18:27:24 +03:00
"github.com/ethereum/go-ethereum/core/systemcontracts"
2020-05-20 06:46:45 +03:00
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/internal/ethapi"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
2020-05-20 06:46:45 +03:00
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rpc"
2021-05-07 09:31:06 +03:00
"github.com/ethereum/go-ethereum/trie"
2020-05-20 06:46:45 +03:00
)
const (
inMemorySnapshots = 256 // Number of recent snapshots to keep in memory
2020-05-20 06:46:45 +03:00
inMemorySignatures = 4096 // Number of recent block signatures to keep in memory
checkpointInterval = 1024 // Number of blocks after which to save the snapshot to the database
defaultEpochLength = uint64(100) // Default number of blocks of checkpoint to update validatorSet from contract
extraVanity = 32 // Fixed number of extra-data prefix bytes reserved for signer vanity
extraSeal = 65 // Fixed number of extra-data suffix bytes reserved for signer seal
nextForkHashSize = 4 // Fixed number of extra-data suffix bytes reserved for nextForkHash.
2020-05-20 06:46:45 +03:00
validatorBytesLengthBeforeLuban = common.AddressLength
validatorBytesLength = common.AddressLength + types.BLSPublicKeyLength
validatorNumberSize = 1 // Fixed number of extra prefix bytes reserved for validator number after Luban
wiggleTime = uint64(1) // second, Random delay (per signer) to allow concurrent signers
initialBackOffTime = uint64(1) // second
processBackOffTime = uint64(1) // second
2020-05-20 06:46:45 +03:00
systemRewardPercent = 4 // it means 1/2^4 = 1/16 percentage of gas fee incoming will be distributed to system
collectAdditionalVotesRewardRatio = 100 // ratio of additional reward for collecting more votes than needed, the denominator is 100
2020-05-20 06:46:45 +03:00
)
var (
uncleHash = types.CalcUncleHash(nil) // Always Keccak256(RLP([])) as uncles are meaningless outside of PoW.
diffInTurn = big.NewInt(2) // Block difficulty for in-turn signatures
diffNoTurn = big.NewInt(1) // Block difficulty for out-of-turn signatures
// 100 native token
maxSystemBalance = new(big.Int).Mul(big.NewInt(100), big.NewInt(params.Ether))
verifyVoteAttestationErrorCounter = metrics.NewRegisteredCounter("parlia/verifyVoteAttestation/error", nil)
updateAttestationErrorCounter = metrics.NewRegisteredCounter("parlia/updateAttestation/error", nil)
validVotesfromSelfCounter = metrics.NewRegisteredCounter("parlia/VerifyVote/self", nil)
2020-05-20 06:46:45 +03:00
systemContracts = map[common.Address]bool{
2020-08-06 18:27:24 +03:00
common.HexToAddress(systemcontracts.ValidatorContract): true,
common.HexToAddress(systemcontracts.SlashContract): true,
common.HexToAddress(systemcontracts.SystemRewardContract): true,
common.HexToAddress(systemcontracts.LightClientContract): true,
common.HexToAddress(systemcontracts.RelayerHubContract): true,
common.HexToAddress(systemcontracts.GovHubContract): true,
common.HexToAddress(systemcontracts.TokenHubContract): true,
common.HexToAddress(systemcontracts.RelayerIncentivizeContract): true,
common.HexToAddress(systemcontracts.CrossChainContract): true,
2020-05-20 06:46:45 +03:00
}
)
// Various error messages to mark blocks invalid. These should be private to
// prevent engine specific errors from being referenced in the remainder of the
// codebase, inherently breaking if the engine is swapped out. Please put common
// error types into the consensus package.
var (
// errUnknownBlock is returned when the list of validators is requested for a block
// that is not part of the local blockchain.
errUnknownBlock = errors.New("unknown block")
// errMissingVanity is returned if a block's extra-data section is shorter than
// 32 bytes, which is required to store the signer vanity.
errMissingVanity = errors.New("extra-data 32 byte vanity prefix missing")
// errMissingSignature is returned if a block's extra-data section doesn't seem
// to contain a 65 byte secp256k1 signature.
errMissingSignature = errors.New("extra-data 65 byte signature suffix missing")
// errExtraValidators is returned if non-sprint-end block contain validator data in
// their extra-data fields.
errExtraValidators = errors.New("non-sprint-end block contains extra validator list")
// errInvalidSpanValidators is returned if a block contains an
// invalid list of validators (i.e. non divisible by 20 bytes).
errInvalidSpanValidators = errors.New("invalid validator list on sprint end block")
// errInvalidMixDigest is returned if a block's mix digest is non-zero.
errInvalidMixDigest = errors.New("non-zero mix digest")
// errInvalidUncleHash is returned if a block contains an non-empty uncle list.
errInvalidUncleHash = errors.New("non empty uncle hash")
// errMismatchingEpochValidators is returned if a sprint block contains a
// list of validators different than the one the local node calculated.
errMismatchingEpochValidators = errors.New("mismatching validator list on epoch block")
// errInvalidDifficulty is returned if the difficulty of a block is missing.
errInvalidDifficulty = errors.New("invalid difficulty")
// errWrongDifficulty is returned if the difficulty of a block doesn't match the
// turn of the signer.
errWrongDifficulty = errors.New("wrong difficulty")
// errOutOfRangeChain is returned if an authorization list is attempted to
// be modified via out-of-range or non-contiguous headers.
errOutOfRangeChain = errors.New("out of range or non-contiguous chain")
// errBlockHashInconsistent is returned if an authorization list is attempted to
// insert an inconsistent block.
errBlockHashInconsistent = errors.New("the block hash is inconsistent")
// errUnauthorizedValidator is returned if a header is signed by a non-authorized entity.
errUnauthorizedValidator = func(val string) error {
return errors.New("unauthorized validator: " + val)
}
2020-05-20 06:46:45 +03:00
// errCoinBaseMisMatch is returned if a header's coinbase do not match with signature
errCoinBaseMisMatch = errors.New("coinbase do not match with signature")
// errRecentlySigned is returned if a header is signed by an authorized entity
// that already signed a header recently, thus is temporarily not allowed to.
errRecentlySigned = errors.New("recently signed")
)
// SignerFn is a signer callback function to request a header to be signed by a
// backing account.
type SignerFn func(accounts.Account, string, []byte) ([]byte, error)
type SignerTxFn func(accounts.Account, *types.Transaction, *big.Int) (*types.Transaction, error)
func isToSystemContract(to common.Address) bool {
return systemContracts[to]
}
// ecrecover extracts the Ethereum account address from a signed header.
func ecrecover(header *types.Header, sigCache *lru.ARCCache, chainId *big.Int) (common.Address, error) {
2020-05-20 06:46:45 +03:00
// If the signature's already cached, return that
hash := header.Hash()
if address, known := sigCache.Get(hash); known {
return address.(common.Address), nil
}
// Retrieve the signature from the header extra-data
if len(header.Extra) < extraSeal {
return common.Address{}, errMissingSignature
}
signature := header.Extra[len(header.Extra)-extraSeal:]
// Recover the public key and the Ethereum address
pubkey, err := crypto.Ecrecover(SealHash(header, chainId).Bytes(), signature)
2020-05-20 06:46:45 +03:00
if err != nil {
return common.Address{}, err
}
var signer common.Address
copy(signer[:], crypto.Keccak256(pubkey[1:])[12:])
sigCache.Add(hash, signer)
return signer, nil
}
// ParliaRLP returns the rlp bytes which needs to be signed for the parlia
// sealing. The RLP to sign consists of the entire header apart from the 65 byte signature
// contained at the end of the extra data.
//
// Note, the method requires the extra data to be at least 65 bytes, otherwise it
// panics. This is done to avoid accidentally using both forms (signature present
// or not), which could be abused to produce different hashes for the same header.
func ParliaRLP(header *types.Header, chainId *big.Int) []byte {
2020-05-20 06:46:45 +03:00
b := new(bytes.Buffer)
encodeSigHeader(b, header, chainId)
2020-05-20 06:46:45 +03:00
return b.Bytes()
}
// Parlia is the consensus engine of BSC
type Parlia struct {
chainConfig *params.ChainConfig // Chain config
config *params.ParliaConfig // Consensus engine configuration parameters for parlia consensus
genesisHash common.Hash
db ethdb.Database // Database to store and retrieve snapshot checkpoints
2020-05-20 06:46:45 +03:00
recentSnaps *lru.ARCCache // Snapshots for recent block to speed up
signatures *lru.ARCCache // Signatures of recent blocks to speed up mining
signer types.Signer
val common.Address // Ethereum address of the signing key
signFn SignerFn // Signer function to authorize hashes with
signTxFn SignerTxFn
lock sync.RWMutex // Protects the signer fields
ethAPI *ethapi.BlockChainAPI
VotePool consensus.VotePool
validatorSetABIBeforeLuban abi.ABI
validatorSetABI abi.ABI
slashABI abi.ABI
2020-05-20 06:46:45 +03:00
// The fields below are for testing only
fakeDiff bool // Skip difficulty verifications
}
// New creates a Parlia consensus engine.
func New(
chainConfig *params.ChainConfig,
db ethdb.Database,
ethAPI *ethapi.BlockChainAPI,
genesisHash common.Hash,
2020-05-20 06:46:45 +03:00
) *Parlia {
// get parlia config
parliaConfig := chainConfig.Parlia
log.Info("Parlia", "chainConfig", chainConfig)
2020-05-20 06:46:45 +03:00
// Set any missing consensus parameters to their defaults
if parliaConfig != nil && parliaConfig.Epoch == 0 {
parliaConfig.Epoch = defaultEpochLength
}
// Allocate the snapshot caches and create the engine
recentSnaps, err := lru.NewARC(inMemorySnapshots)
if err != nil {
panic(err)
}
signatures, err := lru.NewARC(inMemorySignatures)
if err != nil {
panic(err)
}
vABIBeforeLuban, err := abi.JSON(strings.NewReader(validatorSetABIBeforeLuban))
if err != nil {
panic(err)
}
2020-05-20 06:46:45 +03:00
vABI, err := abi.JSON(strings.NewReader(validatorSetABI))
if err != nil {
panic(err)
}
sABI, err := abi.JSON(strings.NewReader(slashABI))
if err != nil {
panic(err)
}
c := &Parlia{
chainConfig: chainConfig,
config: parliaConfig,
genesisHash: genesisHash,
db: db,
ethAPI: ethAPI,
recentSnaps: recentSnaps,
signatures: signatures,
validatorSetABIBeforeLuban: vABIBeforeLuban,
validatorSetABI: vABI,
slashABI: sABI,
signer: types.LatestSigner(chainConfig),
2020-05-20 06:46:45 +03:00
}
return c
}
func (p *Parlia) IsSystemTransaction(tx *types.Transaction, header *types.Header) (bool, error) {
// deploy a contract
if tx.To() == nil {
return false, nil
}
sender, err := types.Sender(p.signer, tx)
if err != nil {
return false, errors.New("UnAuthorized transaction")
}
if sender == header.Coinbase && isToSystemContract(*tx.To()) && tx.GasPrice().Cmp(big.NewInt(0)) == 0 {
2020-05-20 06:46:45 +03:00
return true, nil
}
return false, nil
}
func (p *Parlia) IsSystemContract(to *common.Address) bool {
if to == nil {
return false
}
return isToSystemContract(*to)
}
// Author implements consensus.Engine, returning the SystemAddress
func (p *Parlia) Author(header *types.Header) (common.Address, error) {
return header.Coinbase, nil
}
// VerifyHeader checks whether a header conforms to the consensus rules.
func (p *Parlia) VerifyHeader(chain consensus.ChainHeaderReader, header *types.Header) error {
2020-05-20 06:46:45 +03:00
return p.verifyHeader(chain, header, nil)
}
// VerifyHeaders is similar to VerifyHeader, but verifies a batch of headers. The
// method returns a quit channel to abort the operations and a results channel to
// retrieve the async verifications (the order is that of the input slice).
func (p *Parlia) VerifyHeaders(chain consensus.ChainHeaderReader, headers []*types.Header) (chan<- struct{}, <-chan error) {
2020-05-20 06:46:45 +03:00
abort := make(chan struct{})
results := make(chan error, len(headers))
2022-07-05 06:14:21 +03:00
gopool.Submit(func() {
2020-05-20 06:46:45 +03:00
for i, header := range headers {
err := p.verifyHeader(chain, header, headers[:i])
select {
case <-abort:
return
case results <- err:
}
}
2022-07-05 06:14:21 +03:00
})
2020-05-20 06:46:45 +03:00
return abort, results
}
// getValidatorBytesFromHeader returns the validators bytes extracted from the header's extra field if exists.
// The validators bytes would be contained only in the epoch block's header, and its each validator bytes length is fixed.
// On luban fork, we introduce vote attestation into the header's extra field, so extra format is different from before.
// Before luban fork: |---Extra Vanity---|---Validators Bytes (or Empty)---|---Extra Seal---|
// After luban fork: |---Extra Vanity---|---Validators Number and Validators Bytes (or Empty)---|---Vote Attestation (or Empty)---|---Extra Seal---|
func getValidatorBytesFromHeader(header *types.Header, chainConfig *params.ChainConfig, parliaConfig *params.ParliaConfig) []byte {
if len(header.Extra) <= extraVanity+extraSeal {
return nil
}
if !chainConfig.IsLuban(header.Number) {
if header.Number.Uint64()%parliaConfig.Epoch == 0 && (len(header.Extra)-extraSeal-extraVanity)%validatorBytesLengthBeforeLuban != 0 {
return nil
}
return header.Extra[extraVanity : len(header.Extra)-extraSeal]
}
if header.Number.Uint64()%parliaConfig.Epoch != 0 {
return nil
}
num := int(header.Extra[extraVanity])
if num == 0 || len(header.Extra) <= extraVanity+extraSeal+num*validatorBytesLength {
return nil
}
start := extraVanity + validatorNumberSize
end := start + num*validatorBytesLength
return header.Extra[start:end]
}
// getVoteAttestationFromHeader returns the vote attestation extracted from the header's extra field if exists.
func getVoteAttestationFromHeader(header *types.Header, chainConfig *params.ChainConfig, parliaConfig *params.ParliaConfig) (*types.VoteAttestation, error) {
if len(header.Extra) <= extraVanity+extraSeal {
return nil, nil
}
if !chainConfig.IsLuban(header.Number) {
return nil, nil
}
var attestationBytes []byte
if header.Number.Uint64()%parliaConfig.Epoch != 0 {
attestationBytes = header.Extra[extraVanity : len(header.Extra)-extraSeal]
} else {
num := int(header.Extra[extraVanity])
if len(header.Extra) <= extraVanity+extraSeal+validatorNumberSize+num*validatorBytesLength {
return nil, nil
}
start := extraVanity + validatorNumberSize + num*validatorBytesLength
end := len(header.Extra) - extraSeal
attestationBytes = header.Extra[start:end]
}
var attestation types.VoteAttestation
if err := rlp.Decode(bytes.NewReader(attestationBytes), &attestation); err != nil {
return nil, fmt.Errorf("block %d has vote attestation info, decode err: %s", header.Number.Uint64(), err)
}
return &attestation, nil
}
// getParent returns the parent of a given block.
func (p *Parlia) getParent(chain consensus.ChainHeaderReader, header *types.Header, parents []*types.Header) (*types.Header, error) {
var parent *types.Header
number := header.Number.Uint64()
if len(parents) > 0 {
parent = parents[len(parents)-1]
} else {
parent = chain.GetHeader(header.ParentHash, number-1)
}
if parent == nil || parent.Number.Uint64() != number-1 || parent.Hash() != header.ParentHash {
return nil, consensus.ErrUnknownAncestor
}
return parent, nil
}
// verifyVoteAttestation checks whether the vote attestation in the header is valid.
func (p *Parlia) verifyVoteAttestation(chain consensus.ChainHeaderReader, header *types.Header, parents []*types.Header) error {
attestation, err := getVoteAttestationFromHeader(header, p.chainConfig, p.config)
if err != nil {
return err
}
if attestation == nil {
return nil
}
if attestation.Data == nil {
return fmt.Errorf("invalid attestation, vote data is nil")
}
if len(attestation.Extra) > types.MaxAttestationExtraLength {
return fmt.Errorf("invalid attestation, too large extra length: %d", len(attestation.Extra))
}
// Get parent block
parent, err := p.getParent(chain, header, parents)
if err != nil {
return err
}
// The target block should be direct parent.
targetNumber := attestation.Data.TargetNumber
targetHash := attestation.Data.TargetHash
if targetNumber != parent.Number.Uint64() || targetHash != parent.Hash() {
return fmt.Errorf("invalid attestation, target mismatch, expected block: %d, hash: %s; real block: %d, hash: %s",
parent.Number.Uint64(), parent.Hash(), targetNumber, targetHash)
}
// The source block should be the highest justified block.
sourceNumber := attestation.Data.SourceNumber
sourceHash := attestation.Data.SourceHash
justifiedBlockNumber, justifiedBlockHash, err := p.GetJustifiedNumberAndHash(chain, parent)
if err != nil {
return fmt.Errorf("unexpected error when getting the highest justified number and hash")
}
if sourceNumber != justifiedBlockNumber || sourceHash != justifiedBlockHash {
return fmt.Errorf("invalid attestation, source mismatch, expected block: %d, hash: %s; real block: %d, hash: %s",
justifiedBlockNumber, justifiedBlockHash, sourceNumber, sourceHash)
}
// The snapshot should be the targetNumber-1 block's snapshot.
if len(parents) > 1 {
parents = parents[:len(parents)-1]
} else {
parents = nil
}
snap, err := p.snapshot(chain, parent.Number.Uint64()-1, parent.ParentHash, parents)
if err != nil {
return err
}
// Filter out valid validator from attestation.
validators := snap.validators()
validatorsBitSet := bitset.From([]uint64{uint64(attestation.VoteAddressSet)})
if validatorsBitSet.Count() > uint(len(validators)) {
return fmt.Errorf("invalid attestation, vote number larger than validators number")
}
votedAddrs := make([]bls.PublicKey, 0, validatorsBitSet.Count())
for index, val := range validators {
if !validatorsBitSet.Test(uint(index)) {
continue
}
voteAddr, err := bls.PublicKeyFromBytes(snap.Validators[val].VoteAddress[:])
if err != nil {
return fmt.Errorf("BLS public key converts failed: %v", err)
}
votedAddrs = append(votedAddrs, voteAddr)
}
// The valid voted validators should be no less than 2/3 validators.
if len(votedAddrs) < cmath.CeilDiv(len(snap.Validators)*2, 3) {
return fmt.Errorf("invalid attestation, not enough validators voted")
}
// Verify the aggregated signature.
aggSig, err := bls.SignatureFromBytes(attestation.AggSignature[:])
if err != nil {
return fmt.Errorf("BLS signature converts failed: %v", err)
}
if !aggSig.FastAggregateVerify(votedAddrs, attestation.Data.Hash()) {
return fmt.Errorf("invalid attestation, signature verify failed")
}
return nil
}
2020-05-20 06:46:45 +03:00
// verifyHeader checks whether a header conforms to the consensus rules.The
// caller may optionally pass in a batch of parents (ascending order) to avoid
// looking those up from the database. This is useful for concurrently verifying
// a batch of new headers.
2021-04-16 07:45:26 +03:00
func (p *Parlia) verifyHeader(chain consensus.ChainHeaderReader, header *types.Header, parents []*types.Header) error {
2020-05-20 06:46:45 +03:00
if header.Number == nil {
return errUnknownBlock
}
// Don't waste time checking blocks from the future
2020-05-20 06:46:45 +03:00
if header.Time > uint64(time.Now().Unix()) {
return consensus.ErrFutureBlock
2020-05-20 06:46:45 +03:00
}
// Check that the extra-data contains the vanity, validators and signature.
if len(header.Extra) < extraVanity {
return errMissingVanity
}
if len(header.Extra) < extraVanity+extraSeal {
return errMissingSignature
}
2020-05-20 06:46:45 +03:00
// check extra data
number := header.Number.Uint64()
2020-05-20 06:46:45 +03:00
isEpoch := number%p.config.Epoch == 0
// Ensure that the extra-data contains a signer list on checkpoint, but none otherwise
signersBytes := getValidatorBytesFromHeader(header, p.chainConfig, p.config)
if !isEpoch && len(signersBytes) != 0 {
2020-05-20 06:46:45 +03:00
return errExtraValidators
}
if isEpoch && len(signersBytes) == 0 {
2020-05-20 06:46:45 +03:00
return errInvalidSpanValidators
}
// Ensure that the mix digest is zero as we don't have fork protection currently
if header.MixDigest != (common.Hash{}) {
return errInvalidMixDigest
}
// Ensure that the block doesn't contain any uncles which are meaningless in PoA
if header.UncleHash != uncleHash {
return errInvalidUncleHash
}
// Ensure that the block's difficulty is meaningful (may not be correct at this point)
if number > 0 {
if header.Difficulty == nil {
return errInvalidDifficulty
}
}
parent, err := p.getParent(chain, header, parents)
if err != nil {
return err
}
// Verify the block's gas usage and (if applicable) verify the base fee.
if !chain.Config().IsLondon(header.Number) {
// Verify BaseFee not present before EIP-1559 fork.
if header.BaseFee != nil {
return fmt.Errorf("invalid baseFee before fork: have %d, expected 'nil'", header.BaseFee)
}
} else if err := eip1559.VerifyEIP1559Header(chain.Config(), parent, header); err != nil {
// Verify the header's EIP-1559 attributes.
return err
}
// Verify existence / non-existence of withdrawalsHash.
if header.WithdrawalsHash != nil {
return fmt.Errorf("invalid withdrawalsHash: have %x, expected nil", header.WithdrawalsHash)
}
// Verify the existence / non-existence of excessBlobGas
cancun := chain.Config().IsCancun(header.Number, header.Time)
if !cancun && header.ExcessBlobGas != nil {
return fmt.Errorf("invalid excessBlobGas: have %d, expected nil", header.ExcessBlobGas)
}
if !cancun && header.BlobGasUsed != nil {
return fmt.Errorf("invalid blobGasUsed: have %d, expected nil", header.BlobGasUsed)
}
if cancun {
if err := eip4844.VerifyEIP4844Header(parent, header); err != nil {
return err
}
}
2020-05-20 06:46:45 +03:00
// All basic checks passed, verify cascading fields
return p.verifyCascadingFields(chain, header, parents)
}
// verifyCascadingFields verifies all the header fields that are not standalone,
// rather depend on a batch of previous headers. The caller may optionally pass
// in a batch of parents (ascending order) to avoid looking those up from the
// database. This is useful for concurrently verifying a batch of new headers.
2021-04-16 07:45:26 +03:00
func (p *Parlia) verifyCascadingFields(chain consensus.ChainHeaderReader, header *types.Header, parents []*types.Header) error {
2020-05-20 06:46:45 +03:00
// The genesis block is the always valid dead-end
number := header.Number.Uint64()
if number == 0 {
return nil
}
parent, err := p.getParent(chain, header, parents)
if err != nil {
return err
2020-05-20 06:46:45 +03:00
}
snap, err := p.snapshot(chain, number-1, header.ParentHash, parents)
if err != nil {
return err
}
err = p.blockTimeVerifyForRamanujanFork(snap, header, parent)
if err != nil {
return err
}
2020-05-20 06:46:45 +03:00
// Verify that the gas limit is <= 2^63-1
2020-08-04 08:56:13 +03:00
capacity := uint64(0x7fffffffffffffff)
if header.GasLimit > capacity {
return fmt.Errorf("invalid gasLimit: have %v, max %v", header.GasLimit, capacity)
2020-05-20 06:46:45 +03:00
}
// Verify that the gasUsed is <= gasLimit
if header.GasUsed > header.GasLimit {
return fmt.Errorf("invalid gasUsed: have %d, gasLimit %d", header.GasUsed, header.GasLimit)
}
// Verify that the gas limit remains within allowed bounds
diff := int64(parent.GasLimit) - int64(header.GasLimit)
if diff < 0 {
diff *= -1
}
limit := parent.GasLimit / params.GasLimitBoundDivisor
2020-05-20 06:46:45 +03:00
if uint64(diff) >= limit || header.GasLimit < params.MinGasLimit {
return fmt.Errorf("invalid gas limit: have %d, want %d += %d", header.GasLimit, parent.GasLimit, limit-1)
2020-05-20 06:46:45 +03:00
}
// Verify vote attestation for fast finality.
if err := p.verifyVoteAttestation(chain, header, parents); err != nil {
log.Warn("Verify vote attestation failed", "error", err, "hash", header.Hash(), "number", header.Number,
"parent", header.ParentHash, "coinbase", header.Coinbase, "extra", common.Bytes2Hex(header.Extra))
verifyVoteAttestationErrorCounter.Inc(1)
if chain.Config().IsPlato(header.Number) {
return err
}
}
2020-05-20 06:46:45 +03:00
// All basic checks passed, verify the seal and return
return p.verifySeal(chain, header, parents)
}
// snapshot retrieves the authorization snapshot at a given point in time.
// !!! be careful
// the block with `number` and `hash` is just the last element of `parents`,
// unlike other interfaces such as verifyCascadingFields, `parents` are real parents
2021-04-16 07:45:26 +03:00
func (p *Parlia) snapshot(chain consensus.ChainHeaderReader, number uint64, hash common.Hash, parents []*types.Header) (*Snapshot, error) {
2020-05-20 06:46:45 +03:00
// Search for a snapshot in memory or on disk for checkpoints
var (
headers []*types.Header
snap *Snapshot
)
for snap == nil {
// If an in-memory snapshot was found, use that
if s, ok := p.recentSnaps.Get(hash); ok {
snap = s.(*Snapshot)
break
}
// If an on-disk checkpoint snapshot can be found, use that
if number%checkpointInterval == 0 {
if s, err := loadSnapshot(p.config, p.signatures, p.db, hash, p.ethAPI); err == nil {
log.Trace("Loaded snapshot from disk", "number", number, "hash", hash)
snap = s
break
}
}
// If we're at the genesis, snapshot the initial state. Alternatively if we have
// piled up more headers than allowed to be reorged (chain reinit from a freezer),
// consider the checkpoint trusted and snapshot it.
if number == 0 || (number%p.config.Epoch == 0 && (len(headers) > params.FullImmutabilityThreshold/10)) {
2020-05-20 06:46:45 +03:00
checkpoint := chain.GetHeaderByNumber(number)
if checkpoint != nil {
// get checkpoint data
hash := checkpoint.Hash()
// get validators from headers
validators, voteAddrs, err := parseValidators(checkpoint, p.chainConfig, p.config)
2020-05-20 06:46:45 +03:00
if err != nil {
return nil, err
}
// new snapshot
snap = newSnapshot(p.config, p.signatures, number, hash, validators, voteAddrs, p.ethAPI)
if snap.Number%checkpointInterval == 0 { // snapshot will only be loaded when snap.Number%checkpointInterval == 0
if err := snap.store(p.db); err != nil {
return nil, err
}
log.Info("Stored checkpoint snapshot to disk", "number", number, "hash", hash)
2020-05-20 06:46:45 +03:00
}
break
}
}
// No snapshot for this header, gather the header and move backward
var header *types.Header
if len(parents) > 0 {
// If we have explicit parents, pick from there (enforced)
header = parents[len(parents)-1]
if header.Hash() != hash || header.Number.Uint64() != number {
return nil, consensus.ErrUnknownAncestor
}
parents = parents[:len(parents)-1]
} else {
// No explicit parents (or no more left), reach out to the database
header = chain.GetHeader(hash, number)
if header == nil {
return nil, consensus.ErrUnknownAncestor
}
}
headers = append(headers, header)
number, hash = number-1, header.ParentHash
}
// check if snapshot is nil
if snap == nil {
return nil, fmt.Errorf("unknown error while retrieving snapshot at block number %v", number)
}
// Previous snapshot found, apply any pending headers on top of it
for i := 0; i < len(headers)/2; i++ {
headers[i], headers[len(headers)-1-i] = headers[len(headers)-1-i], headers[i]
}
snap, err := snap.apply(headers, chain, parents, p.chainConfig)
2020-05-20 06:46:45 +03:00
if err != nil {
return nil, err
}
p.recentSnaps.Add(snap.Hash, snap)
// If we've generated a new checkpoint snapshot, save to disk
if snap.Number%checkpointInterval == 0 && len(headers) > 0 {
if err = snap.store(p.db); err != nil {
return nil, err
}
log.Trace("Stored snapshot to disk", "number", snap.Number, "hash", snap.Hash)
}
return snap, err
}
// VerifyUncles implements consensus.Engine, always returning an error for any
// uncles as this consensus mechanism doesn't permit uncles.
func (p *Parlia) VerifyUncles(chain consensus.ChainReader, block *types.Block) error {
if len(block.Uncles()) > 0 {
return errors.New("uncles not allowed")
}
return nil
}
// VerifySeal implements consensus.Engine, checking whether the signature contained
// in the header satisfies the consensus protocol requirements.
func (p *Parlia) VerifySeal(chain consensus.ChainReader, header *types.Header) error {
return p.verifySeal(chain, header, nil)
}
// verifySeal checks whether the signature contained in the header satisfies the
// consensus protocol requirements. The method accepts an optional list of parent
// headers that aren't yet part of the local blockchain to generate the snapshots
// from.
2021-04-16 07:45:26 +03:00
func (p *Parlia) verifySeal(chain consensus.ChainHeaderReader, header *types.Header, parents []*types.Header) error {
2020-05-20 06:46:45 +03:00
// Verifying the genesis block is not supported
number := header.Number.Uint64()
if number == 0 {
return errUnknownBlock
}
// Retrieve the snapshot needed to verify this header and cache it
snap, err := p.snapshot(chain, number-1, header.ParentHash, parents)
if err != nil {
return err
}
// Resolve the authorization key and check against validators
signer, err := ecrecover(header, p.signatures, p.chainConfig.ChainID)
2020-05-20 06:46:45 +03:00
if err != nil {
return err
}
if signer != header.Coinbase {
return errCoinBaseMisMatch
}
if _, ok := snap.Validators[signer]; !ok {
return errUnauthorizedValidator(signer.String())
2020-05-20 06:46:45 +03:00
}
if snap.SignRecently(signer) {
return errRecentlySigned
2020-05-20 06:46:45 +03:00
}
// Ensure that the difficulty corresponds to the turn-ness of the signer
if !p.fakeDiff {
inturn := snap.inturn(signer)
2020-05-20 06:46:45 +03:00
if inturn && header.Difficulty.Cmp(diffInTurn) != 0 {
return errWrongDifficulty
}
if !inturn && header.Difficulty.Cmp(diffNoTurn) != 0 {
return errWrongDifficulty
}
}
return nil
}
func (p *Parlia) prepareValidators(header *types.Header) error {
if header.Number.Uint64()%p.config.Epoch != 0 {
return nil
}
newValidators, voteAddressMap, err := p.getCurrentValidators(header.ParentHash, new(big.Int).Sub(header.Number, big.NewInt(1)))
if err != nil {
return err
}
// sort validator by address
sort.Sort(validatorsAscending(newValidators))
if !p.chainConfig.IsLuban(header.Number) {
for _, validator := range newValidators {
header.Extra = append(header.Extra, validator.Bytes()...)
}
} else {
header.Extra = append(header.Extra, byte(len(newValidators)))
if p.chainConfig.IsOnLuban(header.Number) {
voteAddressMap = make(map[common.Address]*types.BLSPublicKey, len(newValidators))
var zeroBlsKey types.BLSPublicKey
for _, validator := range newValidators {
voteAddressMap[validator] = &zeroBlsKey
}
}
for _, validator := range newValidators {
header.Extra = append(header.Extra, validator.Bytes()...)
header.Extra = append(header.Extra, voteAddressMap[validator].Bytes()...)
}
}
return nil
}
func (p *Parlia) assembleVoteAttestation(chain consensus.ChainHeaderReader, header *types.Header) error {
if !p.chainConfig.IsLuban(header.Number) || header.Number.Uint64() < 2 {
return nil
}
if p.VotePool == nil {
return nil
}
// Fetch direct parent's votes
parent := chain.GetHeaderByHash(header.ParentHash)
if parent == nil {
return errors.New("parent not found")
}
snap, err := p.snapshot(chain, parent.Number.Uint64()-1, parent.ParentHash, nil)
if err != nil {
return err
}
votes := p.VotePool.FetchVoteByBlockHash(parent.Hash())
if len(votes) < cmath.CeilDiv(len(snap.Validators)*2, 3) {
return nil
}
// Prepare vote attestation
// Prepare vote data
justifiedBlockNumber, justifiedBlockHash, err := p.GetJustifiedNumberAndHash(chain, parent)
if err != nil {
return fmt.Errorf("unexpected error when getting the highest justified number and hash")
}
attestation := &types.VoteAttestation{
Data: &types.VoteData{
SourceNumber: justifiedBlockNumber,
SourceHash: justifiedBlockHash,
TargetNumber: parent.Number.Uint64(),
TargetHash: parent.Hash(),
},
}
// Check vote data from votes
for _, vote := range votes {
if vote.Data.Hash() != attestation.Data.Hash() {
return fmt.Errorf("vote check error, expected: %v, real: %v", attestation.Data, vote)
}
}
// Prepare aggregated vote signature
voteAddrSet := make(map[types.BLSPublicKey]struct{}, len(votes))
signatures := make([][]byte, 0, len(votes))
for _, vote := range votes {
voteAddrSet[vote.VoteAddress] = struct{}{}
signatures = append(signatures, vote.Signature[:])
}
sigs, err := bls.MultipleSignaturesFromBytes(signatures)
if err != nil {
return err
}
copy(attestation.AggSignature[:], bls.AggregateSignatures(sigs).Marshal())
// Prepare vote address bitset.
for _, valInfo := range snap.Validators {
if _, ok := voteAddrSet[valInfo.VoteAddress]; ok {
attestation.VoteAddressSet |= 1 << (valInfo.Index - 1) //Index is offset by 1
}
}
validatorsBitSet := bitset.From([]uint64{uint64(attestation.VoteAddressSet)})
if validatorsBitSet.Count() < uint(len(signatures)) {
log.Warn(fmt.Sprintf("assembleVoteAttestation, check VoteAddress Set failed, expected:%d, real:%d", len(signatures), validatorsBitSet.Count()))
return fmt.Errorf("invalid attestation, check VoteAddress Set failed")
}
// Append attestation to header extra field.
buf := new(bytes.Buffer)
err = rlp.Encode(buf, attestation)
if err != nil {
return err
}
// Insert vote attestation into header extra ahead extra seal.
extraSealStart := len(header.Extra) - extraSeal
extraSealBytes := header.Extra[extraSealStart:]
header.Extra = append(header.Extra[0:extraSealStart], buf.Bytes()...)
header.Extra = append(header.Extra, extraSealBytes...)
return nil
}
2020-05-20 06:46:45 +03:00
// Prepare implements consensus.Engine, preparing all the consensus fields of the
// header for running the transactions on top.
2021-04-16 07:45:26 +03:00
func (p *Parlia) Prepare(chain consensus.ChainHeaderReader, header *types.Header) error {
2020-05-20 06:46:45 +03:00
header.Coinbase = p.val
header.Nonce = types.BlockNonce{}
number := header.Number.Uint64()
snap, err := p.snapshot(chain, number-1, header.ParentHash, nil)
if err != nil {
return err
}
// Set the correct difficulty
header.Difficulty = CalcDifficulty(snap, p.val)
// Ensure the extra data has all it's components
if len(header.Extra) < extraVanity-nextForkHashSize {
header.Extra = append(header.Extra, bytes.Repeat([]byte{0x00}, extraVanity-nextForkHashSize-len(header.Extra))...)
2020-05-20 06:46:45 +03:00
}
// Ensure the timestamp has the correct delay
parent := chain.GetHeader(header.ParentHash, number-1)
if parent == nil {
return consensus.ErrUnknownAncestor
}
header.Time = p.blockTimeForRamanujanFork(snap, header, parent)
if header.Time < uint64(time.Now().Unix()) {
header.Time = uint64(time.Now().Unix())
}
header.Extra = header.Extra[:extraVanity-nextForkHashSize]
nextForkHash := forkid.NextForkHash(p.chainConfig, p.genesisHash, number, header.Time)
header.Extra = append(header.Extra, nextForkHash[:]...)
2020-05-20 06:46:45 +03:00
if err := p.prepareValidators(header); err != nil {
return err
2020-05-20 06:46:45 +03:00
}
// add extra seal space
header.Extra = append(header.Extra, make([]byte, extraSeal)...)
// Mix digest is reserved for now, set to empty
header.MixDigest = common.Hash{}
return nil
}
func (p *Parlia) verifyValidators(header *types.Header) error {
if header.Number.Uint64()%p.config.Epoch != 0 {
return nil
}
newValidators, voteAddressMap, err := p.getCurrentValidators(header.ParentHash, new(big.Int).Sub(header.Number, big.NewInt(1)))
if err != nil {
return err
}
// sort validator by address
sort.Sort(validatorsAscending(newValidators))
var validatorsBytes []byte
validatorsNumber := len(newValidators)
if !p.chainConfig.IsLuban(header.Number) {
validatorsBytes = make([]byte, validatorsNumber*validatorBytesLengthBeforeLuban)
for i, validator := range newValidators {
copy(validatorsBytes[i*validatorBytesLengthBeforeLuban:], validator.Bytes())
}
} else {
if uint8(validatorsNumber) != header.Extra[extraVanity] {
return errMismatchingEpochValidators
}
validatorsBytes = make([]byte, validatorsNumber*validatorBytesLength)
if p.chainConfig.IsOnLuban(header.Number) {
voteAddressMap = make(map[common.Address]*types.BLSPublicKey, len(newValidators))
var zeroBlsKey types.BLSPublicKey
for _, validator := range newValidators {
voteAddressMap[validator] = &zeroBlsKey
}
}
for i, validator := range newValidators {
copy(validatorsBytes[i*validatorBytesLength:], validator.Bytes())
copy(validatorsBytes[i*validatorBytesLength+common.AddressLength:], voteAddressMap[validator].Bytes())
}
}
if !bytes.Equal(getValidatorBytesFromHeader(header, p.chainConfig, p.config), validatorsBytes) {
return errMismatchingEpochValidators
}
return nil
}
func (p *Parlia) distributeFinalityReward(chain consensus.ChainHeaderReader, state *state.StateDB, header *types.Header,
cx core.ChainContext, txs *[]*types.Transaction, receipts *[]*types.Receipt, systemTxs *[]*types.Transaction,
usedGas *uint64, mining bool) error {
currentHeight := header.Number.Uint64()
epoch := p.config.Epoch
chainConfig := chain.Config()
if currentHeight%epoch != 0 {
return nil
}
head := header
accumulatedWeights := make(map[common.Address]uint64)
for height := currentHeight - 1; height+epoch >= currentHeight && height >= 1; height-- {
head = chain.GetHeaderByHash(head.ParentHash)
if head == nil {
return fmt.Errorf("header is nil at height %d", height)
}
voteAttestation, err := getVoteAttestationFromHeader(head, chainConfig, p.config)
if err != nil {
return err
}
if voteAttestation == nil {
continue
}
justifiedBlock := chain.GetHeaderByHash(voteAttestation.Data.TargetHash)
if justifiedBlock == nil {
log.Warn("justifiedBlock is nil at height %d", voteAttestation.Data.TargetNumber)
continue
}
snap, err := p.snapshot(chain, justifiedBlock.Number.Uint64()-1, justifiedBlock.ParentHash, nil)
if err != nil {
return err
}
validators := snap.validators()
validatorsBitSet := bitset.From([]uint64{uint64(voteAttestation.VoteAddressSet)})
if validatorsBitSet.Count() > uint(len(validators)) {
log.Error("invalid attestation, vote number larger than validators number")
continue
}
validVoteCount := 0
for index, val := range validators {
if validatorsBitSet.Test(uint(index)) {
accumulatedWeights[val] += 1
validVoteCount += 1
}
}
quorum := cmath.CeilDiv(len(snap.Validators)*2, 3)
if validVoteCount > quorum {
accumulatedWeights[head.Coinbase] += uint64((validVoteCount - quorum) * collectAdditionalVotesRewardRatio / 100)
}
}
validators := make([]common.Address, 0, len(accumulatedWeights))
weights := make([]*big.Int, 0, len(accumulatedWeights))
for val := range accumulatedWeights {
validators = append(validators, val)
}
sort.Sort(validatorsAscending(validators))
for _, val := range validators {
weights = append(weights, big.NewInt(int64(accumulatedWeights[val])))
}
// generate system transaction
method := "distributeFinalityReward"
data, err := p.validatorSetABI.Pack(method, validators, weights)
if err != nil {
log.Error("Unable to pack tx for distributeFinalityReward", "error", err)
return err
}
msg := p.getSystemMessage(header.Coinbase, common.HexToAddress(systemcontracts.ValidatorContract), data, common.Big0)
return p.applyTransaction(msg, state, header, cx, txs, receipts, systemTxs, usedGas, mining)
}
2020-05-20 06:46:45 +03:00
// Finalize implements consensus.Engine, ensuring no uncles are set, nor block
// rewards given.
Big merge v1.10.16 v1.12.2 fix ci (#1850) * fix: crash of highestVerifiedHeader * fix: panic of blobpool * fix: genesis set up * 1. modify NewDatabaseWithNodeDB to upstream 2. fix race use of hasher in statedb 3. fix use wrong value when updateTrie * fix dir legacypool * fix dir blobpool * fix dir vote * remove diffsync related code * fix core/state/snapshot * disable pipeCommit for now * fix applyTransaction for bloom setting * CI: fast finality in gasprice test * CI: diffFetcher was removed * CI: downloader, remove beaconsync test * CI: no beaconsync in downloader, remove a failed case TestCheckpointChallenge was removed in: https://github.com/ethereum/go-ethereum/pull/27147 since after merge, it is useless for ethereum, but might be useful for BSC. disable the case right now, as it is not a big issue. * CI: bsc protocol decHandlers * CI: receipt Bloom process * 1. skip CheckConfigForkOrder for non-parlia engine 2. all test cases in core work well now cd core && go test ./... -v * fix test cases in trie dir * CI: no beaconsync in downloader, remove a failed case(redo) * fix dir miner * fix dir cmd/geth * CI: filter test, BaseFee & Finality * fix dir graphql * remove diffStore * fix ethclient * fix TestRPCGetTransactionReceipt * fix dir internal * ut add dir ethstats and signer * disable pipeCommit thoroughly; fix concurrent map iteration and map write in statedb * CI: fix snap sync it could be changed by mistake * fix tests/Run to generate snapshot * prepare for merge * remove useless * use common hasher in getDeletedStateObject, no race here * an critical comment for state.Prepare * do not copy nil accessList * add omitempty tag for unused new fields of core.Genesis * remove totalFees * calculate fees before FinalizeAndAssemble * revert interface Finalize of consensus * do not double gas limit upon london block * use Leveldb as default * Revert "remove diffStore" This reverts commit df343b137412b0beb25298a6ba9c3c19e47f20b1. * Revert "remove diffsync related code" This reverts commit 8d84b81feae5d794cb5d7fcfdb7f5f7da751941b. * compile pass after revert * remove diffsync * fix dir eth/protocols/trust * fix TestFastNode * decHandlers for trust protocol * keep persist diff in test
2023-09-07 11:39:29 +03:00
func (p *Parlia) Finalize(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, txs *[]*types.Transaction,
uncles []*types.Header, _ []*types.Withdrawal, receipts *[]*types.Receipt, systemTxs *[]*types.Transaction, usedGas *uint64) error {
// warn if not in majority fork
number := header.Number.Uint64()
snap, err := p.snapshot(chain, number-1, header.ParentHash, nil)
if err != nil {
2022-07-05 06:14:21 +03:00
return err
}
nextForkHash := forkid.NextForkHash(p.chainConfig, p.genesisHash, number, header.Time)
if !snap.isMajorityFork(hex.EncodeToString(nextForkHash[:])) {
log.Debug("there is a possible fork, and your client is not the majority. Please check...", "nextForkHash", hex.EncodeToString(nextForkHash[:]))
}
// If the block is an epoch end block, verify the validator list
2020-05-20 06:46:45 +03:00
// The verification can only be done when the state is ready, it can't be done in VerifyHeader.
if err := p.verifyValidators(header); err != nil {
return err
2020-05-20 06:46:45 +03:00
}
2020-05-20 06:46:45 +03:00
cx := chainContext{Chain: chain, parlia: p}
// No block rewards in PoA, so the state remains as is and uncles are dropped
2020-05-20 06:46:45 +03:00
if header.Number.Cmp(common.Big1) == 0 {
Big merge v1.10.16 v1.12.2 fix ci (#1850) * fix: crash of highestVerifiedHeader * fix: panic of blobpool * fix: genesis set up * 1. modify NewDatabaseWithNodeDB to upstream 2. fix race use of hasher in statedb 3. fix use wrong value when updateTrie * fix dir legacypool * fix dir blobpool * fix dir vote * remove diffsync related code * fix core/state/snapshot * disable pipeCommit for now * fix applyTransaction for bloom setting * CI: fast finality in gasprice test * CI: diffFetcher was removed * CI: downloader, remove beaconsync test * CI: no beaconsync in downloader, remove a failed case TestCheckpointChallenge was removed in: https://github.com/ethereum/go-ethereum/pull/27147 since after merge, it is useless for ethereum, but might be useful for BSC. disable the case right now, as it is not a big issue. * CI: bsc protocol decHandlers * CI: receipt Bloom process * 1. skip CheckConfigForkOrder for non-parlia engine 2. all test cases in core work well now cd core && go test ./... -v * fix test cases in trie dir * CI: no beaconsync in downloader, remove a failed case(redo) * fix dir miner * fix dir cmd/geth * CI: filter test, BaseFee & Finality * fix dir graphql * remove diffStore * fix ethclient * fix TestRPCGetTransactionReceipt * fix dir internal * ut add dir ethstats and signer * disable pipeCommit thoroughly; fix concurrent map iteration and map write in statedb * CI: fix snap sync it could be changed by mistake * fix tests/Run to generate snapshot * prepare for merge * remove useless * use common hasher in getDeletedStateObject, no race here * an critical comment for state.Prepare * do not copy nil accessList * add omitempty tag for unused new fields of core.Genesis * remove totalFees * calculate fees before FinalizeAndAssemble * revert interface Finalize of consensus * do not double gas limit upon london block * use Leveldb as default * Revert "remove diffStore" This reverts commit df343b137412b0beb25298a6ba9c3c19e47f20b1. * Revert "remove diffsync related code" This reverts commit 8d84b81feae5d794cb5d7fcfdb7f5f7da751941b. * compile pass after revert * remove diffsync * fix dir eth/protocols/trust * fix TestFastNode * decHandlers for trust protocol * keep persist diff in test
2023-09-07 11:39:29 +03:00
err := p.initContract(state, header, cx, txs, receipts, systemTxs, usedGas, false)
2020-05-20 06:46:45 +03:00
if err != nil {
log.Error("init contract failed")
}
}
if header.Difficulty.Cmp(diffInTurn) != 0 {
spoiledVal := snap.supposeValidator()
signedRecently := false
if p.chainConfig.IsPlato(header.Number) {
signedRecently = snap.SignRecently(spoiledVal)
} else {
for _, recent := range snap.Recents {
if recent == spoiledVal {
signedRecently = true
break
}
2020-05-20 06:46:45 +03:00
}
}
2020-05-20 06:46:45 +03:00
if !signedRecently {
log.Trace("slash validator", "block hash", header.Hash(), "address", spoiledVal)
Big merge v1.10.16 v1.12.2 fix ci (#1850) * fix: crash of highestVerifiedHeader * fix: panic of blobpool * fix: genesis set up * 1. modify NewDatabaseWithNodeDB to upstream 2. fix race use of hasher in statedb 3. fix use wrong value when updateTrie * fix dir legacypool * fix dir blobpool * fix dir vote * remove diffsync related code * fix core/state/snapshot * disable pipeCommit for now * fix applyTransaction for bloom setting * CI: fast finality in gasprice test * CI: diffFetcher was removed * CI: downloader, remove beaconsync test * CI: no beaconsync in downloader, remove a failed case TestCheckpointChallenge was removed in: https://github.com/ethereum/go-ethereum/pull/27147 since after merge, it is useless for ethereum, but might be useful for BSC. disable the case right now, as it is not a big issue. * CI: bsc protocol decHandlers * CI: receipt Bloom process * 1. skip CheckConfigForkOrder for non-parlia engine 2. all test cases in core work well now cd core && go test ./... -v * fix test cases in trie dir * CI: no beaconsync in downloader, remove a failed case(redo) * fix dir miner * fix dir cmd/geth * CI: filter test, BaseFee & Finality * fix dir graphql * remove diffStore * fix ethclient * fix TestRPCGetTransactionReceipt * fix dir internal * ut add dir ethstats and signer * disable pipeCommit thoroughly; fix concurrent map iteration and map write in statedb * CI: fix snap sync it could be changed by mistake * fix tests/Run to generate snapshot * prepare for merge * remove useless * use common hasher in getDeletedStateObject, no race here * an critical comment for state.Prepare * do not copy nil accessList * add omitempty tag for unused new fields of core.Genesis * remove totalFees * calculate fees before FinalizeAndAssemble * revert interface Finalize of consensus * do not double gas limit upon london block * use Leveldb as default * Revert "remove diffStore" This reverts commit df343b137412b0beb25298a6ba9c3c19e47f20b1. * Revert "remove diffsync related code" This reverts commit 8d84b81feae5d794cb5d7fcfdb7f5f7da751941b. * compile pass after revert * remove diffsync * fix dir eth/protocols/trust * fix TestFastNode * decHandlers for trust protocol * keep persist diff in test
2023-09-07 11:39:29 +03:00
err = p.slash(spoiledVal, state, header, cx, txs, receipts, systemTxs, usedGas, false)
2020-05-20 06:46:45 +03:00
if err != nil {
2020-06-30 15:22:57 +03:00
// it is possible that slash validator failed because of the slash channel is disabled.
log.Error("slash validator failed", "block hash", header.Hash(), "address", spoiledVal)
2020-05-20 06:46:45 +03:00
}
}
}
val := header.Coinbase
Big merge v1.10.16 v1.12.2 fix ci (#1850) * fix: crash of highestVerifiedHeader * fix: panic of blobpool * fix: genesis set up * 1. modify NewDatabaseWithNodeDB to upstream 2. fix race use of hasher in statedb 3. fix use wrong value when updateTrie * fix dir legacypool * fix dir blobpool * fix dir vote * remove diffsync related code * fix core/state/snapshot * disable pipeCommit for now * fix applyTransaction for bloom setting * CI: fast finality in gasprice test * CI: diffFetcher was removed * CI: downloader, remove beaconsync test * CI: no beaconsync in downloader, remove a failed case TestCheckpointChallenge was removed in: https://github.com/ethereum/go-ethereum/pull/27147 since after merge, it is useless for ethereum, but might be useful for BSC. disable the case right now, as it is not a big issue. * CI: bsc protocol decHandlers * CI: receipt Bloom process * 1. skip CheckConfigForkOrder for non-parlia engine 2. all test cases in core work well now cd core && go test ./... -v * fix test cases in trie dir * CI: no beaconsync in downloader, remove a failed case(redo) * fix dir miner * fix dir cmd/geth * CI: filter test, BaseFee & Finality * fix dir graphql * remove diffStore * fix ethclient * fix TestRPCGetTransactionReceipt * fix dir internal * ut add dir ethstats and signer * disable pipeCommit thoroughly; fix concurrent map iteration and map write in statedb * CI: fix snap sync it could be changed by mistake * fix tests/Run to generate snapshot * prepare for merge * remove useless * use common hasher in getDeletedStateObject, no race here * an critical comment for state.Prepare * do not copy nil accessList * add omitempty tag for unused new fields of core.Genesis * remove totalFees * calculate fees before FinalizeAndAssemble * revert interface Finalize of consensus * do not double gas limit upon london block * use Leveldb as default * Revert "remove diffStore" This reverts commit df343b137412b0beb25298a6ba9c3c19e47f20b1. * Revert "remove diffsync related code" This reverts commit 8d84b81feae5d794cb5d7fcfdb7f5f7da751941b. * compile pass after revert * remove diffsync * fix dir eth/protocols/trust * fix TestFastNode * decHandlers for trust protocol * keep persist diff in test
2023-09-07 11:39:29 +03:00
err = p.distributeIncoming(val, state, header, cx, txs, receipts, systemTxs, usedGas, false)
2020-05-20 06:46:45 +03:00
if err != nil {
2022-07-05 06:14:21 +03:00
return err
2020-05-20 06:46:45 +03:00
}
if p.chainConfig.IsPlato(header.Number) {
Big merge v1.10.16 v1.12.2 fix ci (#1850) * fix: crash of highestVerifiedHeader * fix: panic of blobpool * fix: genesis set up * 1. modify NewDatabaseWithNodeDB to upstream 2. fix race use of hasher in statedb 3. fix use wrong value when updateTrie * fix dir legacypool * fix dir blobpool * fix dir vote * remove diffsync related code * fix core/state/snapshot * disable pipeCommit for now * fix applyTransaction for bloom setting * CI: fast finality in gasprice test * CI: diffFetcher was removed * CI: downloader, remove beaconsync test * CI: no beaconsync in downloader, remove a failed case TestCheckpointChallenge was removed in: https://github.com/ethereum/go-ethereum/pull/27147 since after merge, it is useless for ethereum, but might be useful for BSC. disable the case right now, as it is not a big issue. * CI: bsc protocol decHandlers * CI: receipt Bloom process * 1. skip CheckConfigForkOrder for non-parlia engine 2. all test cases in core work well now cd core && go test ./... -v * fix test cases in trie dir * CI: no beaconsync in downloader, remove a failed case(redo) * fix dir miner * fix dir cmd/geth * CI: filter test, BaseFee & Finality * fix dir graphql * remove diffStore * fix ethclient * fix TestRPCGetTransactionReceipt * fix dir internal * ut add dir ethstats and signer * disable pipeCommit thoroughly; fix concurrent map iteration and map write in statedb * CI: fix snap sync it could be changed by mistake * fix tests/Run to generate snapshot * prepare for merge * remove useless * use common hasher in getDeletedStateObject, no race here * an critical comment for state.Prepare * do not copy nil accessList * add omitempty tag for unused new fields of core.Genesis * remove totalFees * calculate fees before FinalizeAndAssemble * revert interface Finalize of consensus * do not double gas limit upon london block * use Leveldb as default * Revert "remove diffStore" This reverts commit df343b137412b0beb25298a6ba9c3c19e47f20b1. * Revert "remove diffsync related code" This reverts commit 8d84b81feae5d794cb5d7fcfdb7f5f7da751941b. * compile pass after revert * remove diffsync * fix dir eth/protocols/trust * fix TestFastNode * decHandlers for trust protocol * keep persist diff in test
2023-09-07 11:39:29 +03:00
if err := p.distributeFinalityReward(chain, state, header, cx, txs, receipts, systemTxs, usedGas, false); err != nil {
return err
}
}
Big merge v1.10.16 v1.12.2 fix ci (#1850) * fix: crash of highestVerifiedHeader * fix: panic of blobpool * fix: genesis set up * 1. modify NewDatabaseWithNodeDB to upstream 2. fix race use of hasher in statedb 3. fix use wrong value when updateTrie * fix dir legacypool * fix dir blobpool * fix dir vote * remove diffsync related code * fix core/state/snapshot * disable pipeCommit for now * fix applyTransaction for bloom setting * CI: fast finality in gasprice test * CI: diffFetcher was removed * CI: downloader, remove beaconsync test * CI: no beaconsync in downloader, remove a failed case TestCheckpointChallenge was removed in: https://github.com/ethereum/go-ethereum/pull/27147 since after merge, it is useless for ethereum, but might be useful for BSC. disable the case right now, as it is not a big issue. * CI: bsc protocol decHandlers * CI: receipt Bloom process * 1. skip CheckConfigForkOrder for non-parlia engine 2. all test cases in core work well now cd core && go test ./... -v * fix test cases in trie dir * CI: no beaconsync in downloader, remove a failed case(redo) * fix dir miner * fix dir cmd/geth * CI: filter test, BaseFee & Finality * fix dir graphql * remove diffStore * fix ethclient * fix TestRPCGetTransactionReceipt * fix dir internal * ut add dir ethstats and signer * disable pipeCommit thoroughly; fix concurrent map iteration and map write in statedb * CI: fix snap sync it could be changed by mistake * fix tests/Run to generate snapshot * prepare for merge * remove useless * use common hasher in getDeletedStateObject, no race here * an critical comment for state.Prepare * do not copy nil accessList * add omitempty tag for unused new fields of core.Genesis * remove totalFees * calculate fees before FinalizeAndAssemble * revert interface Finalize of consensus * do not double gas limit upon london block * use Leveldb as default * Revert "remove diffStore" This reverts commit df343b137412b0beb25298a6ba9c3c19e47f20b1. * Revert "remove diffsync related code" This reverts commit 8d84b81feae5d794cb5d7fcfdb7f5f7da751941b. * compile pass after revert * remove diffsync * fix dir eth/protocols/trust * fix TestFastNode * decHandlers for trust protocol * keep persist diff in test
2023-09-07 11:39:29 +03:00
if len(*systemTxs) > 0 {
2020-05-20 06:46:45 +03:00
return errors.New("the length of systemTxs do not match")
}
return nil
}
// FinalizeAndAssemble implements consensus.Engine, ensuring no uncles are set,
// nor block rewards given, and returns the final block.
2021-04-16 07:45:26 +03:00
func (p *Parlia) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB,
txs []*types.Transaction, uncles []*types.Header, receipts []*types.Receipt, _ []*types.Withdrawal) (*types.Block, []*types.Receipt, error) {
2020-05-20 06:46:45 +03:00
// No block rewards in PoA, so the state remains as is and uncles are dropped
cx := chainContext{Chain: chain, parlia: p}
if txs == nil {
txs = make([]*types.Transaction, 0)
}
if receipts == nil {
receipts = make([]*types.Receipt, 0)
}
if header.Number.Cmp(common.Big1) == 0 {
err := p.initContract(state, header, cx, &txs, &receipts, nil, &header.GasUsed, true)
2020-05-20 06:46:45 +03:00
if err != nil {
log.Error("init contract failed")
}
}
if header.Difficulty.Cmp(diffInTurn) != 0 {
number := header.Number.Uint64()
snap, err := p.snapshot(chain, number-1, header.ParentHash, nil)
if err != nil {
2022-07-05 06:14:21 +03:00
return nil, nil, err
2020-05-20 06:46:45 +03:00
}
spoiledVal := snap.supposeValidator()
signedRecently := false
if p.chainConfig.IsPlato(header.Number) {
signedRecently = snap.SignRecently(spoiledVal)
} else {
for _, recent := range snap.Recents {
if recent == spoiledVal {
signedRecently = true
break
}
2020-05-20 06:46:45 +03:00
}
}
if !signedRecently {
err = p.slash(spoiledVal, state, header, cx, &txs, &receipts, nil, &header.GasUsed, true)
2020-05-20 06:46:45 +03:00
if err != nil {
2020-06-30 15:22:57 +03:00
// it is possible that slash validator failed because of the slash channel is disabled.
log.Error("slash validator failed", "block hash", header.Hash(), "address", spoiledVal)
2020-05-20 06:46:45 +03:00
}
}
}
err := p.distributeIncoming(p.val, state, header, cx, &txs, &receipts, nil, &header.GasUsed, true)
2020-05-20 06:46:45 +03:00
if err != nil {
2022-07-05 06:14:21 +03:00
return nil, nil, err
2020-05-20 06:46:45 +03:00
}
if p.chainConfig.IsPlato(header.Number) {
if err := p.distributeFinalityReward(chain, state, header, cx, &txs, &receipts, nil, &header.GasUsed, true); err != nil {
return nil, nil, err
}
}
// should not happen. Once happen, stop the node is better than broadcast the block
if header.GasLimit < header.GasUsed {
2022-07-05 06:14:21 +03:00
return nil, nil, errors.New("gas consumption of system txs exceed the gas limit")
}
2020-05-20 06:46:45 +03:00
header.UncleHash = types.CalcUncleHash(nil)
2022-07-05 06:14:21 +03:00
var blk *types.Block
var rootHash common.Hash
wg := sync.WaitGroup{}
wg.Add(2)
go func() {
rootHash = state.IntermediateRoot(chain.Config().IsEIP158(header.Number))
wg.Done()
}()
go func() {
blk = types.NewBlock(header, txs, nil, receipts, trie.NewStackTrie(nil))
wg.Done()
}()
wg.Wait()
blk.SetRoot(rootHash)
2020-05-20 06:46:45 +03:00
// Assemble and return the final block for sealing
2022-07-05 06:14:21 +03:00
return blk, receipts, nil
2020-05-20 06:46:45 +03:00
}
func (p *Parlia) IsActiveValidatorAt(chain consensus.ChainHeaderReader, header *types.Header, checkVoteKeyFn func(bLSPublicKey *types.BLSPublicKey) bool) bool {
number := header.Number.Uint64()
snap, err := p.snapshot(chain, number-1, header.ParentHash, nil)
if err != nil {
log.Error("failed to get the snapshot from consensus", "error", err)
return false
}
validators := snap.Validators
validatorInfo, ok := validators[p.val]
return ok && (checkVoteKeyFn == nil || (validatorInfo != nil && checkVoteKeyFn(&validatorInfo.VoteAddress)))
}
// VerifyVote will verify: 1. If the vote comes from valid validators 2. If the vote's sourceNumber and sourceHash are correct
func (p *Parlia) VerifyVote(chain consensus.ChainHeaderReader, vote *types.VoteEnvelope) error {
targetNumber := vote.Data.TargetNumber
targetHash := vote.Data.TargetHash
header := chain.GetHeaderByHash(targetHash)
if header == nil {
log.Warn("BlockHeader at current voteBlockNumber is nil", "targetNumber", targetNumber, "targetHash", targetHash)
return fmt.Errorf("BlockHeader at current voteBlockNumber is nil")
}
if header.Number.Uint64() != targetNumber {
log.Warn("unexpected target number", "expect", header.Number.Uint64(), "real", targetNumber)
return fmt.Errorf("target number mismatch")
}
justifiedBlockNumber, justifiedBlockHash, err := p.GetJustifiedNumberAndHash(chain, header)
if err != nil {
log.Error("failed to get the highest justified number and hash", "headerNumber", header.Number, "headerHash", header.Hash())
return fmt.Errorf("unexpected error when getting the highest justified number and hash")
}
if vote.Data.SourceNumber != justifiedBlockNumber || vote.Data.SourceHash != justifiedBlockHash {
return fmt.Errorf("vote source block mismatch")
}
number := header.Number.Uint64()
snap, err := p.snapshot(chain, number-1, header.ParentHash, nil)
if err != nil {
log.Error("failed to get the snapshot from consensus", "error", err)
return fmt.Errorf("failed to get the snapshot from consensus")
}
validators := snap.Validators
voteAddress := vote.VoteAddress
for addr, validator := range validators {
if validator.VoteAddress == voteAddress {
if addr == p.val {
validVotesfromSelfCounter.Inc(1)
}
metrics.GetOrRegisterCounter(fmt.Sprintf("parlia/VerifyVote/%s", addr.String()), nil).Inc(1)
return nil
}
}
return fmt.Errorf("vote verification failed")
}
2020-05-20 06:46:45 +03:00
// Authorize injects a private key into the consensus engine to mint new blocks
// with.
func (p *Parlia) Authorize(val common.Address, signFn SignerFn, signTxFn SignerTxFn) {
p.lock.Lock()
defer p.lock.Unlock()
p.val = val
p.signFn = signFn
p.signTxFn = signTxFn
}
// Argument leftOver is the time reserved for block finalize(calculate root, distribute income...)
func (p *Parlia) Delay(chain consensus.ChainReader, header *types.Header, leftOver *time.Duration) *time.Duration {
number := header.Number.Uint64()
snap, err := p.snapshot(chain, number-1, header.ParentHash, nil)
if err != nil {
return nil
}
delay := p.delayForRamanujanFork(snap, header)
if *leftOver >= time.Duration(p.config.Period)*time.Second {
// ignore invalid leftOver
log.Error("Delay invalid argument", "leftOver", leftOver.String(), "Period", p.config.Period)
} else if *leftOver >= delay {
delay = time.Duration(0)
return &delay
} else {
delay = delay - *leftOver
}
2022-07-05 06:14:21 +03:00
// The blocking time should be no more than half of period
half := time.Duration(p.config.Period) * time.Second / 2
if delay > half {
delay = half
}
return &delay
}
2020-05-20 06:46:45 +03:00
// Seal implements consensus.Engine, attempting to create a sealed block using
// the local signing credentials.
2021-04-16 07:45:26 +03:00
func (p *Parlia) Seal(chain consensus.ChainHeaderReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error {
2020-05-20 06:46:45 +03:00
header := block.Header()
// Sealing the genesis block is not supported
number := header.Number.Uint64()
if number == 0 {
return errUnknownBlock
}
// For 0-period chains, refuse to seal empty blocks (no reward but would spin sealing)
if p.config.Period == 0 && len(block.Transactions()) == 0 {
log.Info("Sealing paused, waiting for transactions")
return nil
}
// Don't hold the val fields for the entire sealing procedure
p.lock.RLock()
val, signFn := p.val, p.signFn
p.lock.RUnlock()
snap, err := p.snapshot(chain, number-1, header.ParentHash, nil)
if err != nil {
return err
}
// Bail out if we're unauthorized to sign a block
if _, authorized := snap.Validators[val]; !authorized {
return errUnauthorizedValidator(val.String())
2020-05-20 06:46:45 +03:00
}
// If we're amongst the recent signers, wait for the next block
if snap.SignRecently(val) {
log.Info("Signed recently, must wait for others")
return nil
2020-05-20 06:46:45 +03:00
}
// Sweet, the protocol permits us to sign the block, wait for our time
delay := p.delayForRamanujanFork(snap, header)
2020-05-20 06:46:45 +03:00
log.Info("Sealing block with", "number", number, "delay", delay, "headerDifficulty", header.Difficulty, "val", val.Hex())
// Wait until sealing is terminated or delay timeout.
log.Trace("Waiting for slot to sign and propagate", "delay", common.PrettyDuration(delay))
go func() {
select {
case <-stop:
return
case <-time.After(delay):
}
err := p.assembleVoteAttestation(chain, header)
if err != nil {
/* If the vote attestation can't be assembled successfully, the blockchain won't get
fast finalized, but it can be tolerated, so just report this error here. */
log.Error("Assemble vote attestation failed when sealing", "err", err)
}
// Sign all the things!
sig, err := signFn(accounts.Account{Address: val}, accounts.MimetypeParlia, ParliaRLP(header, p.chainConfig.ChainID))
if err != nil {
log.Error("Sign for the block header failed when sealing", "err", err)
return
}
copy(header.Extra[len(header.Extra)-extraSeal:], sig)
2022-07-05 06:14:21 +03:00
if p.shouldWaitForCurrentBlockProcess(chain, header, snap) {
log.Info("Waiting for received in turn block to process")
select {
case <-stop:
log.Info("Received block process finished, abort block seal")
return
case <-time.After(time.Duration(processBackOffTime) * time.Second):
if chain.CurrentHeader().Number.Uint64() >= header.Number.Uint64() {
log.Info("Process backoff time exhausted, and current header has updated to abort this seal")
return
}
2022-07-05 06:14:21 +03:00
log.Info("Process backoff time exhausted, start to seal block")
}
}
2020-05-20 06:46:45 +03:00
select {
case results <- block.WithSeal(header):
default:
log.Warn("Sealing result is not read by miner", "sealhash", SealHash(header, p.chainConfig.ChainID))
2020-05-20 06:46:45 +03:00
}
}()
return nil
}
2022-07-05 06:14:21 +03:00
func (p *Parlia) shouldWaitForCurrentBlockProcess(chain consensus.ChainHeaderReader, header *types.Header, snap *Snapshot) bool {
if header.Difficulty.Cmp(diffInTurn) == 0 {
return false
}
highestVerifiedHeader := chain.GetHighestVerifiedHeader()
if highestVerifiedHeader == nil {
return false
}
if header.ParentHash == highestVerifiedHeader.ParentHash {
return true
}
return false
}
func (p *Parlia) EnoughDistance(chain consensus.ChainReader, header *types.Header) bool {
snap, err := p.snapshot(chain, header.Number.Uint64()-1, header.ParentHash, nil)
if err != nil {
return true
}
2022-07-05 06:14:21 +03:00
return snap.enoughDistance(p.val, header)
}
func (p *Parlia) IsLocalBlock(header *types.Header) bool {
return p.val == header.Coinbase
}
func (p *Parlia) SignRecently(chain consensus.ChainReader, parent *types.Block) (bool, error) {
snap, err := p.snapshot(chain, parent.NumberU64(), parent.Hash(), nil)
2022-07-05 06:14:21 +03:00
if err != nil {
return true, err
}
// Bail out if we're unauthorized to sign a block
if _, authorized := snap.Validators[p.val]; !authorized {
return true, errUnauthorizedValidator(p.val.String())
2022-07-05 06:14:21 +03:00
}
return snap.SignRecently(p.val), nil
}
2020-05-20 06:46:45 +03:00
// CalcDifficulty is the difficulty adjustment algorithm. It returns the difficulty
// that a new block should have based on the previous blocks in the chain and the
// current signer.
2021-04-16 07:45:26 +03:00
func (p *Parlia) CalcDifficulty(chain consensus.ChainHeaderReader, time uint64, parent *types.Header) *big.Int {
2020-05-20 06:46:45 +03:00
snap, err := p.snapshot(chain, parent.Number.Uint64(), parent.Hash(), nil)
if err != nil {
return nil
}
return CalcDifficulty(snap, p.val)
}
// CalcDifficulty is the difficulty adjustment algorithm. It returns the difficulty
// that a new block should have based on the previous blocks in the chain and the
// current signer.
func CalcDifficulty(snap *Snapshot, signer common.Address) *big.Int {
if snap.inturn(signer) {
2020-05-20 06:46:45 +03:00
return new(big.Int).Set(diffInTurn)
}
return new(big.Int).Set(diffNoTurn)
}
// SealHash returns the hash of a block without vote attestation prior to it being sealed.
// So it's not the real hash of a block, just used as unique id to distinguish task
func (p *Parlia) SealHash(header *types.Header) (hash common.Hash) {
hasher := sha3.NewLegacyKeccak256()
encodeSigHeaderWithoutVoteAttestation(hasher, header, p.chainConfig.ChainID)
hasher.Sum(hash[:0])
return hash
2020-05-20 06:46:45 +03:00
}
// APIs implements consensus.Engine, returning the user facing RPC API to query snapshot.
2021-04-16 07:45:26 +03:00
func (p *Parlia) APIs(chain consensus.ChainHeaderReader) []rpc.API {
2020-05-20 06:46:45 +03:00
return []rpc.API{{
Namespace: "parlia",
Version: "1.0",
Service: &API{chain: chain, parlia: p},
Public: false,
}}
}
// Close implements consensus.Engine. It's a noop for parlia as there are no background threads.
func (p *Parlia) Close() error {
return nil
}
// ========================== interaction with contract/account =========
// getCurrentValidators get current validators
func (p *Parlia) getCurrentValidators(blockHash common.Hash, blockNum *big.Int) ([]common.Address, map[common.Address]*types.BLSPublicKey, error) {
2020-05-20 06:46:45 +03:00
// block
blockNr := rpc.BlockNumberOrHashWithHash(blockHash, false)
if !p.chainConfig.IsLuban(blockNum) {
validators, err := p.getCurrentValidatorsBeforeLuban(blockHash, blockNum)
return validators, nil, err
2022-07-05 06:14:21 +03:00
}
2020-05-20 06:46:45 +03:00
// method
method := "getMiningValidators"
2020-05-20 06:46:45 +03:00
ctx, cancel := context.WithCancel(context.Background())
defer cancel() // cancel when we are finished consuming integers
data, err := p.validatorSetABI.Pack(method)
if err != nil {
log.Error("Unable to pack tx for getMiningValidators", "error", err)
return nil, nil, err
2020-05-20 06:46:45 +03:00
}
// call
msgData := (hexutil.Bytes)(data)
2020-08-06 18:27:24 +03:00
toAddress := common.HexToAddress(systemcontracts.ValidatorContract)
2020-05-20 06:46:45 +03:00
gas := (hexutil.Uint64)(uint64(math.MaxUint64 / 2))
2022-07-05 06:14:21 +03:00
result, err := p.ethAPI.Call(ctx, ethapi.TransactionArgs{
2020-05-20 06:46:45 +03:00
Gas: &gas,
To: &toAddress,
Data: &msgData,
}, blockNr, nil, nil)
2020-05-20 06:46:45 +03:00
if err != nil {
return nil, nil, err
2020-05-20 06:46:45 +03:00
}
var valSet []common.Address
var voteAddrSet []types.BLSPublicKey
2021-04-16 09:05:41 +03:00
if err := p.validatorSetABI.UnpackIntoInterface(&[]interface{}{&valSet, &voteAddrSet}, method, result); err != nil {
return nil, nil, err
2020-05-20 06:46:45 +03:00
}
voteAddrmap := make(map[common.Address]*types.BLSPublicKey, len(valSet))
for i := 0; i < len(valSet); i++ {
voteAddrmap[valSet[i]] = &(voteAddrSet)[i]
2020-05-20 06:46:45 +03:00
}
return valSet, voteAddrmap, nil
2020-05-20 06:46:45 +03:00
}
// slash spoiled validators
func (p *Parlia) distributeIncoming(val common.Address, state *state.StateDB, header *types.Header, chain core.ChainContext,
txs *[]*types.Transaction, receipts *[]*types.Receipt, receivedTxs *[]*types.Transaction, usedGas *uint64, mining bool) error {
2020-05-20 06:46:45 +03:00
coinbase := header.Coinbase
balance := state.GetBalance(consensus.SystemAddress)
if balance.Cmp(common.Big0) <= 0 {
return nil
}
state.SetBalance(consensus.SystemAddress, big.NewInt(0))
state.AddBalance(coinbase, balance)
doDistributeSysReward := !p.chainConfig.IsKepler(header.Number, header.Time) &&
state.GetBalance(common.HexToAddress(systemcontracts.SystemRewardContract)).Cmp(maxSystemBalance) < 0
2020-05-20 06:46:45 +03:00
if doDistributeSysReward {
var rewards = new(big.Int)
rewards = rewards.Rsh(balance, systemRewardPercent)
if rewards.Cmp(common.Big0) > 0 {
err := p.distributeToSystem(rewards, state, header, chain, txs, receipts, receivedTxs, usedGas, mining)
2020-05-20 06:46:45 +03:00
if err != nil {
return err
}
log.Trace("distribute to system reward pool", "block hash", header.Hash(), "amount", rewards)
2020-05-20 06:46:45 +03:00
balance = balance.Sub(balance, rewards)
}
}
log.Trace("distribute to validator contract", "block hash", header.Hash(), "amount", balance)
return p.distributeToValidator(balance, val, state, header, chain, txs, receipts, receivedTxs, usedGas, mining)
2020-05-20 06:46:45 +03:00
}
// slash spoiled validators
func (p *Parlia) slash(spoiledVal common.Address, state *state.StateDB, header *types.Header, chain core.ChainContext,
txs *[]*types.Transaction, receipts *[]*types.Receipt, receivedTxs *[]*types.Transaction, usedGas *uint64, mining bool) error {
2020-05-20 06:46:45 +03:00
// method
method := "slash"
// get packed data
data, err := p.slashABI.Pack(method,
spoiledVal,
)
if err != nil {
log.Error("Unable to pack tx for slash", "error", err)
return err
}
// get system message
2020-08-06 18:27:24 +03:00
msg := p.getSystemMessage(header.Coinbase, common.HexToAddress(systemcontracts.SlashContract), data, common.Big0)
2020-05-20 06:46:45 +03:00
// apply message
return p.applyTransaction(msg, state, header, chain, txs, receipts, receivedTxs, usedGas, mining)
2020-05-20 06:46:45 +03:00
}
// init contract
func (p *Parlia) initContract(state *state.StateDB, header *types.Header, chain core.ChainContext,
txs *[]*types.Transaction, receipts *[]*types.Receipt, receivedTxs *[]*types.Transaction, usedGas *uint64, mining bool) error {
2020-05-20 06:46:45 +03:00
// method
method := "init"
// contracts
2020-08-06 18:27:24 +03:00
contracts := []string{
systemcontracts.ValidatorContract,
systemcontracts.SlashContract,
systemcontracts.LightClientContract,
systemcontracts.RelayerHubContract,
systemcontracts.TokenHubContract,
systemcontracts.RelayerIncentivizeContract,
systemcontracts.CrossChainContract,
}
2020-05-20 06:46:45 +03:00
// get packed data
data, err := p.validatorSetABI.Pack(method)
if err != nil {
log.Error("Unable to pack tx for init validator set", "error", err)
return err
}
for _, c := range contracts {
msg := p.getSystemMessage(header.Coinbase, common.HexToAddress(c), data, common.Big0)
// apply message
log.Trace("init contract", "block hash", header.Hash(), "contract", c)
err = p.applyTransaction(msg, state, header, chain, txs, receipts, receivedTxs, usedGas, mining)
2020-05-20 06:46:45 +03:00
if err != nil {
return err
}
}
return nil
}
func (p *Parlia) distributeToSystem(amount *big.Int, state *state.StateDB, header *types.Header, chain core.ChainContext,
txs *[]*types.Transaction, receipts *[]*types.Receipt, receivedTxs *[]*types.Transaction, usedGas *uint64, mining bool) error {
2020-05-20 06:46:45 +03:00
// get system message
2020-08-06 18:27:24 +03:00
msg := p.getSystemMessage(header.Coinbase, common.HexToAddress(systemcontracts.SystemRewardContract), nil, amount)
2020-05-20 06:46:45 +03:00
// apply message
return p.applyTransaction(msg, state, header, chain, txs, receipts, receivedTxs, usedGas, mining)
2020-05-20 06:46:45 +03:00
}
// slash spoiled validators
func (p *Parlia) distributeToValidator(amount *big.Int, validator common.Address,
state *state.StateDB, header *types.Header, chain core.ChainContext,
txs *[]*types.Transaction, receipts *[]*types.Receipt, receivedTxs *[]*types.Transaction, usedGas *uint64, mining bool) error {
2020-05-20 06:46:45 +03:00
// method
method := "deposit"
// get packed data
data, err := p.validatorSetABI.Pack(method,
validator,
)
if err != nil {
log.Error("Unable to pack tx for deposit", "error", err)
return err
}
// get system message
2020-08-06 18:27:24 +03:00
msg := p.getSystemMessage(header.Coinbase, common.HexToAddress(systemcontracts.ValidatorContract), data, amount)
2020-05-20 06:46:45 +03:00
// apply message
return p.applyTransaction(msg, state, header, chain, txs, receipts, receivedTxs, usedGas, mining)
2020-05-20 06:46:45 +03:00
}
// get system message
func (p *Parlia) getSystemMessage(from, toAddress common.Address, data []byte, value *big.Int) callmsg {
return callmsg{
ethereum.CallMsg{
From: from,
Gas: math.MaxUint64 / 2,
GasPrice: big.NewInt(0),
Value: value,
To: &toAddress,
Data: data,
},
}
}
func (p *Parlia) applyTransaction(
msg callmsg,
state *state.StateDB,
header *types.Header,
chainContext core.ChainContext,
txs *[]*types.Transaction, receipts *[]*types.Receipt,
receivedTxs *[]*types.Transaction, usedGas *uint64, mining bool,
2020-05-20 06:46:45 +03:00
) (err error) {
nonce := state.GetNonce(msg.From())
expectedTx := types.NewTransaction(nonce, *msg.To(), msg.Value(), msg.Gas(), msg.GasPrice(), msg.Data())
expectedHash := p.signer.Hash(expectedTx)
if msg.From() == p.val && mining {
2020-05-20 06:46:45 +03:00
expectedTx, err = p.signTxFn(accounts.Account{Address: msg.From()}, expectedTx, p.chainConfig.ChainID)
if err != nil {
return err
}
} else {
if receivedTxs == nil || len(*receivedTxs) == 0 || (*receivedTxs)[0] == nil {
return errors.New("supposed to get a actual transaction, but get none")
}
actualTx := (*receivedTxs)[0]
2020-08-04 08:56:13 +03:00
if !bytes.Equal(p.signer.Hash(actualTx).Bytes(), expectedHash.Bytes()) {
2022-07-05 06:14:21 +03:00
return fmt.Errorf("expected tx hash %v, get %v, nonce %d, to %s, value %s, gas %d, gasPrice %s, data %s", expectedHash.String(), actualTx.Hash().String(),
expectedTx.Nonce(),
expectedTx.To().String(),
expectedTx.Value().String(),
expectedTx.Gas(),
expectedTx.GasPrice().String(),
hex.EncodeToString(expectedTx.Data()),
)
2020-05-20 06:46:45 +03:00
}
expectedTx = actualTx
// move to next
*receivedTxs = (*receivedTxs)[1:]
}
state.SetTxContext(expectedTx.Hash(), len(*txs))
2020-05-20 06:46:45 +03:00
gasUsed, err := applyMessage(msg, state, header, p.chainConfig, chainContext)
if err != nil {
return err
}
*txs = append(*txs, expectedTx)
var root []byte
if p.chainConfig.IsByzantium(header.Number) {
state.Finalise(true)
} else {
root = state.IntermediateRoot(p.chainConfig.IsEIP158(header.Number)).Bytes()
}
*usedGas += gasUsed
receipt := types.NewReceipt(root, false, *usedGas)
receipt.TxHash = expectedTx.Hash()
receipt.GasUsed = gasUsed
// Set the receipt logs and create a bloom for filtering
receipt.Logs = state.GetLogs(expectedTx.Hash(), header.Number.Uint64(), header.Hash())
2020-05-20 06:46:45 +03:00
receipt.Bloom = types.CreateBloom(types.Receipts{receipt})
2022-07-05 06:14:21 +03:00
receipt.BlockHash = header.Hash()
2020-05-20 06:46:45 +03:00
receipt.BlockNumber = header.Number
receipt.TransactionIndex = uint(state.TxIndex())
*receipts = append(*receipts, receipt)
state.SetNonce(msg.From(), nonce+1)
return nil
}
// GetJustifiedNumberAndHash returns the highest justified block's number and hash on the branch including and before `header`
func (p *Parlia) GetJustifiedNumberAndHash(chain consensus.ChainHeaderReader, header *types.Header) (uint64, common.Hash, error) {
if chain == nil || header == nil {
return 0, common.Hash{}, fmt.Errorf("illegal chain or header")
}
snap, err := p.snapshot(chain, header.Number.Uint64(), header.Hash(), nil)
if err != nil {
log.Error("Unexpected error when getting snapshot",
"error", err, "blockNumber", header.Number.Uint64(), "blockHash", header.Hash())
return 0, common.Hash{}, err
}
if snap.Attestation == nil {
if p.chainConfig.IsLuban(header.Number) {
log.Debug("once one attestation generated, attestation of snap would not be nil forever basically")
}
return 0, chain.GetHeaderByNumber(0).Hash(), nil
}
return snap.Attestation.TargetNumber, snap.Attestation.TargetHash, nil
}
// GetFinalizedHeader returns highest finalized block header.
func (p *Parlia) GetFinalizedHeader(chain consensus.ChainHeaderReader, header *types.Header) *types.Header {
if chain == nil || header == nil {
return nil
}
if !chain.Config().IsPlato(header.Number) {
return chain.GetHeaderByNumber(0)
}
snap, err := p.snapshot(chain, header.Number.Uint64(), header.Hash(), nil)
if err != nil {
log.Error("Unexpected error when getting snapshot",
"error", err, "blockNumber", header.Number.Uint64(), "blockHash", header.Hash())
return nil
}
if snap.Attestation == nil {
return chain.GetHeaderByNumber(0) // keep consistent with GetJustifiedNumberAndHash
}
return chain.GetHeader(snap.Attestation.SourceHash, snap.Attestation.SourceNumber)
}
2020-05-20 06:46:45 +03:00
// =========================== utility function ==========================
// SealHash returns the hash of a block prior to it being sealed.
func SealHash(header *types.Header, chainId *big.Int) (hash common.Hash) {
2020-05-20 06:46:45 +03:00
hasher := sha3.NewLegacyKeccak256()
encodeSigHeader(hasher, header, chainId)
2020-05-20 06:46:45 +03:00
hasher.Sum(hash[:0])
return hash
}
func encodeSigHeader(w io.Writer, header *types.Header, chainId *big.Int) {
2020-05-20 06:46:45 +03:00
err := rlp.Encode(w, []interface{}{
chainId,
2020-05-20 06:46:45 +03:00
header.ParentHash,
header.UncleHash,
header.Coinbase,
header.Root,
header.TxHash,
header.ReceiptHash,
header.Bloom,
header.Difficulty,
header.Number,
header.GasLimit,
header.GasUsed,
header.Time,
header.Extra[:len(header.Extra)-extraSeal], // this will panic if extra is too short, should check before calling encodeSigHeader
2020-05-20 06:46:45 +03:00
header.MixDigest,
header.Nonce,
})
if err != nil {
panic("can't encode: " + err.Error())
}
}
func encodeSigHeaderWithoutVoteAttestation(w io.Writer, header *types.Header, chainId *big.Int) {
err := rlp.Encode(w, []interface{}{
chainId,
header.ParentHash,
header.UncleHash,
header.Coinbase,
header.Root,
header.TxHash,
header.ReceiptHash,
header.Bloom,
header.Difficulty,
header.Number,
header.GasLimit,
header.GasUsed,
header.Time,
header.Extra[:extraVanity], // this will panic if extra is too short, should check before calling encodeSigHeaderWithoutVoteAttestation
header.MixDigest,
header.Nonce,
})
if err != nil {
panic("can't encode: " + err.Error())
}
}
func (p *Parlia) backOffTime(snap *Snapshot, header *types.Header, val common.Address) uint64 {
if snap.inturn(val) {
return 0
} else {
delay := initialBackOffTime
validators := snap.validators()
if p.chainConfig.IsPlanck(header.Number) {
// reverse the key/value of snap.Recents to get recentsMap
recentsMap := make(map[common.Address]uint64, len(snap.Recents))
bound := uint64(0)
if n, limit := header.Number.Uint64(), uint64(len(validators)/2+1); n > limit {
bound = n - limit
}
for seen, recent := range snap.Recents {
if seen <= bound {
continue
}
recentsMap[recent] = seen
}
2023-03-22 08:42:25 +03:00
// The backOffTime does not matter when a validator has signed recently.
if _, ok := recentsMap[val]; ok {
return 0
}
inTurnAddr := validators[(snap.Number+1)%uint64(len(validators))]
if _, ok := recentsMap[inTurnAddr]; ok {
2023-03-22 08:42:25 +03:00
log.Debug("in turn validator has recently signed, skip initialBackOffTime",
"inTurnAddr", inTurnAddr)
delay = 0
}
// Exclude the recently signed validators
temp := make([]common.Address, 0, len(validators))
for _, addr := range validators {
if _, ok := recentsMap[addr]; ok {
continue
}
temp = append(temp, addr)
}
validators = temp
}
// get the index of current validator and its shuffled backoff time.
idx := -1
for index, itemAddr := range validators {
if val == itemAddr {
idx = index
}
}
2020-08-10 05:54:14 +03:00
if idx < 0 {
log.Debug("The validator is not authorized", "addr", val)
2020-08-10 05:54:14 +03:00
return 0
}
s := rand.NewSource(int64(snap.Number))
r := rand.New(s)
n := len(validators)
backOffSteps := make([]uint64, 0, n)
for i := uint64(0); i < uint64(n); i++ {
backOffSteps = append(backOffSteps, i)
}
r.Shuffle(n, func(i, j int) {
backOffSteps[i], backOffSteps[j] = backOffSteps[j], backOffSteps[i]
})
delay += backOffSteps[idx] * wiggleTime
return delay
}
}
2020-05-20 06:46:45 +03:00
// chain context
type chainContext struct {
2021-04-16 07:45:26 +03:00
Chain consensus.ChainHeaderReader
2020-05-20 06:46:45 +03:00
parlia consensus.Engine
}
func (c chainContext) Engine() consensus.Engine {
return c.parlia
}
func (c chainContext) GetHeader(hash common.Hash, number uint64) *types.Header {
return c.Chain.GetHeader(hash, number)
}
// callmsg implements core.Message to allow passing it as a transaction simulator.
type callmsg struct {
ethereum.CallMsg
}
func (m callmsg) From() common.Address { return m.CallMsg.From }
func (m callmsg) Nonce() uint64 { return 0 }
func (m callmsg) CheckNonce() bool { return false }
func (m callmsg) To() *common.Address { return m.CallMsg.To }
func (m callmsg) GasPrice() *big.Int { return m.CallMsg.GasPrice }
func (m callmsg) Gas() uint64 { return m.CallMsg.Gas }
func (m callmsg) Value() *big.Int { return m.CallMsg.Value }
func (m callmsg) Data() []byte { return m.CallMsg.Data }
// apply message
func applyMessage(
msg callmsg,
state *state.StateDB,
header *types.Header,
chainConfig *params.ChainConfig,
chainContext core.ChainContext,
) (uint64, error) {
// TODO(Nathan): state.Prepare should be called here, now accessList related EIP not affect systemtxs
Big merge v1.10.16 v1.12.2 fix ci (#1850) * fix: crash of highestVerifiedHeader * fix: panic of blobpool * fix: genesis set up * 1. modify NewDatabaseWithNodeDB to upstream 2. fix race use of hasher in statedb 3. fix use wrong value when updateTrie * fix dir legacypool * fix dir blobpool * fix dir vote * remove diffsync related code * fix core/state/snapshot * disable pipeCommit for now * fix applyTransaction for bloom setting * CI: fast finality in gasprice test * CI: diffFetcher was removed * CI: downloader, remove beaconsync test * CI: no beaconsync in downloader, remove a failed case TestCheckpointChallenge was removed in: https://github.com/ethereum/go-ethereum/pull/27147 since after merge, it is useless for ethereum, but might be useful for BSC. disable the case right now, as it is not a big issue. * CI: bsc protocol decHandlers * CI: receipt Bloom process * 1. skip CheckConfigForkOrder for non-parlia engine 2. all test cases in core work well now cd core && go test ./... -v * fix test cases in trie dir * CI: no beaconsync in downloader, remove a failed case(redo) * fix dir miner * fix dir cmd/geth * CI: filter test, BaseFee & Finality * fix dir graphql * remove diffStore * fix ethclient * fix TestRPCGetTransactionReceipt * fix dir internal * ut add dir ethstats and signer * disable pipeCommit thoroughly; fix concurrent map iteration and map write in statedb * CI: fix snap sync it could be changed by mistake * fix tests/Run to generate snapshot * prepare for merge * remove useless * use common hasher in getDeletedStateObject, no race here * an critical comment for state.Prepare * do not copy nil accessList * add omitempty tag for unused new fields of core.Genesis * remove totalFees * calculate fees before FinalizeAndAssemble * revert interface Finalize of consensus * do not double gas limit upon london block * use Leveldb as default * Revert "remove diffStore" This reverts commit df343b137412b0beb25298a6ba9c3c19e47f20b1. * Revert "remove diffsync related code" This reverts commit 8d84b81feae5d794cb5d7fcfdb7f5f7da751941b. * compile pass after revert * remove diffsync * fix dir eth/protocols/trust * fix TestFastNode * decHandlers for trust protocol * keep persist diff in test
2023-09-07 11:39:29 +03:00
// EIP1153 may cause a critical issue in the future
2020-05-20 06:46:45 +03:00
// Create a new context to be used in the EVM environment
2021-04-16 07:45:26 +03:00
context := core.NewEVMBlockContext(header, chainContext, nil)
2020-05-20 06:46:45 +03:00
// Create a new environment which holds all relevant information
// about the transaction and calling mechanisms.
2021-04-16 07:45:26 +03:00
vmenv := vm.NewEVM(context, vm.TxContext{Origin: msg.From(), GasPrice: big.NewInt(0)}, state, chainConfig, vm.Config{})
2020-05-20 06:46:45 +03:00
// Apply the transaction to the current state (included in the env)
ret, returnGas, err := vmenv.Call(
vm.AccountRef(msg.From()),
*msg.To(),
msg.Data(),
msg.Gas(),
msg.Value(),
)
if err != nil {
log.Error("apply message failed", "msg", string(ret), "err", err)
}
return msg.Gas() - returnGas, err
}