2017-04-05 01:16:29 +03:00
|
|
|
// Copyright 2017 The go-ethereum Authors
|
|
|
|
// This file is part of the go-ethereum library.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Lesser General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Lesser General Public License
|
|
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
|
|
|
package ethash
|
|
|
|
|
|
|
|
import (
|
|
|
|
"errors"
|
|
|
|
"fmt"
|
|
|
|
"math/big"
|
|
|
|
"time"
|
|
|
|
|
2022-11-14 17:16:52 +03:00
|
|
|
mapset "github.com/deckarep/golang-set/v2"
|
2017-04-05 01:16:29 +03:00
|
|
|
"github.com/ethereum/go-ethereum/common"
|
|
|
|
"github.com/ethereum/go-ethereum/consensus"
|
|
|
|
"github.com/ethereum/go-ethereum/consensus/misc"
|
2023-08-01 12:58:45 +03:00
|
|
|
"github.com/ethereum/go-ethereum/consensus/misc/eip1559"
|
2017-04-05 01:16:29 +03:00
|
|
|
"github.com/ethereum/go-ethereum/core/state"
|
2024-03-22 20:53:53 +03:00
|
|
|
"github.com/ethereum/go-ethereum/core/tracing"
|
2017-04-05 01:16:29 +03:00
|
|
|
"github.com/ethereum/go-ethereum/core/types"
|
core/state: move state log mechanism to a separate layer (#30569)
This PR moves the logging/tracing-facilities out of `*state.StateDB`,
in to a wrapping struct which implements `vm.StateDB` instead.
In most places, it is a pretty straight-forward change:
- First, hoisting the invocations from state objects up to the statedb.
- Then making the mutation-methods simply return the previous value, so
that the external logging layer could log everything.
Some internal code uses the direct object-accessors to mutate the state,
particularly in testing and in setting up state overrides, which means
that these changes are unobservable for the hooked layer. Thus, configuring
the overrides are not necessarily part of the API we want to publish.
The trickiest part about the layering is that when the selfdestructs are
finally deleted during `Finalise`, there's the possibility that someone
sent some ether to it, which is burnt at that point, and thus needs to
be logged. The hooked layer reaches into the inner layer to figure out
these events.
In package `vm`, the conversion from `state.StateDB + hooks` into a
hooked `vm.StateDB` is performed where needed.
---------
Co-authored-by: Gary Rong <garyrong0905@gmail.com>
2024-10-23 09:03:36 +03:00
|
|
|
"github.com/ethereum/go-ethereum/core/vm"
|
2017-04-05 01:16:29 +03:00
|
|
|
"github.com/ethereum/go-ethereum/params"
|
2018-08-23 16:02:57 +03:00
|
|
|
"github.com/ethereum/go-ethereum/rlp"
|
2020-08-21 15:10:40 +03:00
|
|
|
"github.com/ethereum/go-ethereum/trie"
|
2024-01-23 16:51:58 +03:00
|
|
|
"github.com/holiman/uint256"
|
2019-01-04 01:15:26 +03:00
|
|
|
"golang.org/x/crypto/sha3"
|
2017-04-05 01:16:29 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
// Ethash proof-of-work protocol constants.
|
|
|
|
var (
|
2024-01-23 16:51:58 +03:00
|
|
|
FrontierBlockReward = uint256.NewInt(5e+18) // Block reward in wei for successfully mining a block
|
|
|
|
ByzantiumBlockReward = uint256.NewInt(3e+18) // Block reward in wei for successfully mining a block upward from Byzantium
|
|
|
|
ConstantinopleBlockReward = uint256.NewInt(2e+18) // Block reward in wei for successfully mining a block upward from Constantinople
|
|
|
|
maxUncles = 2 // Maximum number of uncles allowed in a single block
|
|
|
|
allowedFutureBlockTimeSeconds = int64(15) // Max seconds from current time allowed for blocks, before they're considered future blocks
|
2018-09-16 00:44:25 +03:00
|
|
|
|
2022-06-15 14:10:38 +03:00
|
|
|
// calcDifficultyEip5133 is the difficulty adjustment algorithm as specified by EIP 5133.
|
|
|
|
// It offsets the bomb a total of 11.4M blocks.
|
|
|
|
// Specification EIP-5133: https://eips.ethereum.org/EIPS/eip-5133
|
|
|
|
calcDifficultyEip5133 = makeDifficultyCalculator(big.NewInt(11_400_000))
|
|
|
|
|
2021-10-28 23:18:14 +03:00
|
|
|
// calcDifficultyEip4345 is the difficulty adjustment algorithm as specified by EIP 4345.
|
|
|
|
// It offsets the bomb a total of 10.7M blocks.
|
|
|
|
// Specification EIP-4345: https://eips.ethereum.org/EIPS/eip-4345
|
|
|
|
calcDifficultyEip4345 = makeDifficultyCalculator(big.NewInt(10_700_000))
|
|
|
|
|
2021-05-07 15:04:54 +03:00
|
|
|
// calcDifficultyEip3554 is the difficulty adjustment algorithm as specified by EIP 3554.
|
2021-05-17 11:49:23 +03:00
|
|
|
// It offsets the bomb a total of 9.7M blocks.
|
2021-05-07 15:04:54 +03:00
|
|
|
// Specification EIP-3554: https://eips.ethereum.org/EIPS/eip-3554
|
2021-05-17 11:49:23 +03:00
|
|
|
calcDifficultyEip3554 = makeDifficultyCalculator(big.NewInt(9700000))
|
2021-05-07 15:04:54 +03:00
|
|
|
|
2019-12-06 12:36:40 +03:00
|
|
|
// calcDifficultyEip2384 is the difficulty adjustment algorithm as specified by EIP 2384.
|
|
|
|
// It offsets the bomb 4M blocks from Constantinople, so in total 9M blocks.
|
|
|
|
// Specification EIP-2384: https://eips.ethereum.org/EIPS/eip-2384
|
|
|
|
calcDifficultyEip2384 = makeDifficultyCalculator(big.NewInt(9000000))
|
|
|
|
|
2018-09-16 00:44:25 +03:00
|
|
|
// calcDifficultyConstantinople is the difficulty adjustment algorithm for Constantinople.
|
|
|
|
// It returns the difficulty that a new block should have when created at time given the
|
2018-09-17 12:49:39 +03:00
|
|
|
// parent block's time and difficulty. The calculation uses the Byzantium rules, but with
|
|
|
|
// bomb offset 5M.
|
|
|
|
// Specification EIP-1234: https://eips.ethereum.org/EIPS/eip-1234
|
|
|
|
calcDifficultyConstantinople = makeDifficultyCalculator(big.NewInt(5000000))
|
2018-09-16 00:44:25 +03:00
|
|
|
|
|
|
|
// calcDifficultyByzantium is the difficulty adjustment algorithm. It returns
|
|
|
|
// the difficulty that a new block should have when created at time given the
|
|
|
|
// parent block's time and difficulty. The calculation uses the Byzantium rules.
|
2018-09-17 12:49:39 +03:00
|
|
|
// Specification EIP-649: https://eips.ethereum.org/EIPS/eip-649
|
|
|
|
calcDifficultyByzantium = makeDifficultyCalculator(big.NewInt(3000000))
|
2017-04-05 01:16:29 +03:00
|
|
|
)
|
|
|
|
|
2017-04-10 13:24:12 +03:00
|
|
|
// Various error messages to mark blocks invalid. These should be private to
|
|
|
|
// prevent engine specific errors from being referenced in the remainder of the
|
|
|
|
// codebase, inherently breaking if the engine is swapped out. Please put common
|
|
|
|
// error types into the consensus package.
|
2017-04-05 01:16:29 +03:00
|
|
|
var (
|
2023-05-03 12:58:39 +03:00
|
|
|
errOlderBlockTime = errors.New("timestamp older than parent")
|
|
|
|
errTooManyUncles = errors.New("too many uncles")
|
|
|
|
errDuplicateUncle = errors.New("duplicate uncle")
|
|
|
|
errUncleIsAncestor = errors.New("uncle is ancestor")
|
|
|
|
errDanglingUncle = errors.New("uncle's parent is not ancestor")
|
2017-04-05 01:16:29 +03:00
|
|
|
)
|
|
|
|
|
2017-04-12 16:38:31 +03:00
|
|
|
// Author implements consensus.Engine, returning the header's coinbase as the
|
|
|
|
// proof-of-work verified author of the block.
|
|
|
|
func (ethash *Ethash) Author(header *types.Header) (common.Address, error) {
|
|
|
|
return header.Coinbase, nil
|
|
|
|
}
|
|
|
|
|
2017-04-05 01:16:29 +03:00
|
|
|
// VerifyHeader checks whether a header conforms to the consensus rules of the
|
|
|
|
// stock Ethereum ethash engine.
|
2023-05-03 12:58:39 +03:00
|
|
|
func (ethash *Ethash) VerifyHeader(chain consensus.ChainHeaderReader, header *types.Header) error {
|
2019-10-28 10:57:34 +03:00
|
|
|
// Short circuit if the header is known, or its parent not
|
2017-04-05 01:16:29 +03:00
|
|
|
number := header.Number.Uint64()
|
|
|
|
if chain.GetHeader(header.Hash(), number) != nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
parent := chain.GetHeader(header.ParentHash, number-1)
|
|
|
|
if parent == nil {
|
2017-04-06 14:58:03 +03:00
|
|
|
return consensus.ErrUnknownAncestor
|
2017-04-05 01:16:29 +03:00
|
|
|
}
|
|
|
|
// Sanity checks passed, do a proper verification
|
2023-05-03 12:58:39 +03:00
|
|
|
return ethash.verifyHeader(chain, header, parent, false, time.Now().Unix())
|
2017-04-05 01:16:29 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// VerifyHeaders is similar to VerifyHeader, but verifies a batch of headers
|
|
|
|
// concurrently. The method returns a quit channel to abort the operations and
|
|
|
|
// a results channel to retrieve the async verifications.
|
2023-05-03 12:58:39 +03:00
|
|
|
func (ethash *Ethash) VerifyHeaders(chain consensus.ChainHeaderReader, headers []*types.Header) (chan<- struct{}, <-chan error) {
|
2017-04-05 01:16:29 +03:00
|
|
|
// If we're running a full engine faking, accept any input as valid
|
2023-05-03 12:58:39 +03:00
|
|
|
if ethash.fakeFull || len(headers) == 0 {
|
2017-04-05 01:16:29 +03:00
|
|
|
abort, results := make(chan struct{}), make(chan error, len(headers))
|
|
|
|
for i := 0; i < len(headers); i++ {
|
|
|
|
results <- nil
|
|
|
|
}
|
|
|
|
return abort, results
|
|
|
|
}
|
2023-05-03 12:58:39 +03:00
|
|
|
abort := make(chan struct{})
|
|
|
|
results := make(chan error, len(headers))
|
|
|
|
unixNow := time.Now().Unix()
|
2017-04-12 19:48:49 +03:00
|
|
|
|
2017-04-05 01:16:29 +03:00
|
|
|
go func() {
|
2023-05-03 12:58:39 +03:00
|
|
|
for i, header := range headers {
|
|
|
|
var parent *types.Header
|
|
|
|
if i == 0 {
|
|
|
|
parent = chain.GetHeader(headers[0].ParentHash, headers[0].Number.Uint64()-1)
|
|
|
|
} else if headers[i-1].Hash() == headers[i].ParentHash {
|
|
|
|
parent = headers[i-1]
|
|
|
|
}
|
|
|
|
var err error
|
|
|
|
if parent == nil {
|
|
|
|
err = consensus.ErrUnknownAncestor
|
|
|
|
} else {
|
|
|
|
err = ethash.verifyHeader(chain, header, parent, false, unixNow)
|
|
|
|
}
|
2017-04-12 19:48:49 +03:00
|
|
|
select {
|
|
|
|
case <-abort:
|
|
|
|
return
|
2023-05-03 12:58:39 +03:00
|
|
|
case results <- err:
|
2017-04-05 01:16:29 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
2023-05-03 12:58:39 +03:00
|
|
|
return abort, results
|
2017-04-05 01:16:29 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// VerifyUncles verifies that the given block's uncles conform to the consensus
|
|
|
|
// rules of the stock Ethereum ethash engine.
|
|
|
|
func (ethash *Ethash) VerifyUncles(chain consensus.ChainReader, block *types.Block) error {
|
|
|
|
// If we're running a full engine faking, accept any input as valid
|
2023-05-03 12:58:39 +03:00
|
|
|
if ethash.fakeFull {
|
2017-04-05 01:16:29 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
// Verify that there are at most 2 uncles included in this block
|
|
|
|
if len(block.Uncles()) > maxUncles {
|
2017-04-10 13:24:12 +03:00
|
|
|
return errTooManyUncles
|
2017-04-05 01:16:29 +03:00
|
|
|
}
|
2019-04-08 10:13:05 +03:00
|
|
|
if len(block.Uncles()) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
2017-04-05 01:16:29 +03:00
|
|
|
// Gather the set of past uncles and ancestors
|
2022-11-14 17:16:52 +03:00
|
|
|
uncles, ancestors := mapset.NewSet[common.Hash](), make(map[common.Hash]*types.Header)
|
2017-04-05 01:16:29 +03:00
|
|
|
|
|
|
|
number, parent := block.NumberU64()-1, block.ParentHash()
|
|
|
|
for i := 0; i < 7; i++ {
|
2020-08-20 21:34:31 +03:00
|
|
|
ancestorHeader := chain.GetHeader(parent, number)
|
|
|
|
if ancestorHeader == nil {
|
2017-04-05 01:16:29 +03:00
|
|
|
break
|
|
|
|
}
|
2020-08-20 21:34:31 +03:00
|
|
|
ancestors[parent] = ancestorHeader
|
|
|
|
// If the ancestor doesn't have any uncles, we don't have to iterate them
|
|
|
|
if ancestorHeader.UncleHash != types.EmptyUncleHash {
|
2021-07-29 11:17:40 +03:00
|
|
|
// Need to add those uncles to the banned list too
|
2020-08-20 21:34:31 +03:00
|
|
|
ancestor := chain.GetBlock(parent, number)
|
|
|
|
if ancestor == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
for _, uncle := range ancestor.Uncles() {
|
|
|
|
uncles.Add(uncle.Hash())
|
|
|
|
}
|
2017-04-05 01:16:29 +03:00
|
|
|
}
|
2020-08-20 21:34:31 +03:00
|
|
|
parent, number = ancestorHeader.ParentHash, number-1
|
2017-04-05 01:16:29 +03:00
|
|
|
}
|
|
|
|
ancestors[block.Hash()] = block.Header()
|
|
|
|
uncles.Add(block.Hash())
|
|
|
|
|
|
|
|
// Verify each of the uncles that it's recent, but not an ancestor
|
|
|
|
for _, uncle := range block.Uncles() {
|
|
|
|
// Make sure every uncle is rewarded only once
|
|
|
|
hash := uncle.Hash()
|
2018-07-16 10:54:19 +03:00
|
|
|
if uncles.Contains(hash) {
|
2017-04-10 13:24:12 +03:00
|
|
|
return errDuplicateUncle
|
2017-04-05 01:16:29 +03:00
|
|
|
}
|
|
|
|
uncles.Add(hash)
|
|
|
|
|
|
|
|
// Make sure the uncle has a valid ancestry
|
|
|
|
if ancestors[hash] != nil {
|
2017-04-10 13:24:12 +03:00
|
|
|
return errUncleIsAncestor
|
2017-04-05 01:16:29 +03:00
|
|
|
}
|
|
|
|
if ancestors[uncle.ParentHash] == nil || uncle.ParentHash == block.ParentHash() {
|
2017-04-10 13:24:12 +03:00
|
|
|
return errDanglingUncle
|
2017-04-05 01:16:29 +03:00
|
|
|
}
|
2023-05-03 12:58:39 +03:00
|
|
|
if err := ethash.verifyHeader(chain, uncle, ancestors[uncle.ParentHash], true, time.Now().Unix()); err != nil {
|
2017-04-05 01:16:29 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// verifyHeader checks whether a header conforms to the consensus rules of the
|
|
|
|
// stock Ethereum ethash engine.
|
|
|
|
// See YP section 4.3.4. "Block Header Validity"
|
2023-05-03 12:58:39 +03:00
|
|
|
func (ethash *Ethash) verifyHeader(chain consensus.ChainHeaderReader, header, parent *types.Header, uncle bool, unixNow int64) error {
|
2017-04-05 01:16:29 +03:00
|
|
|
// Ensure that the header's extra-data section is of a reasonable size
|
|
|
|
if uint64(len(header.Extra)) > params.MaximumExtraDataSize {
|
|
|
|
return fmt.Errorf("extra-data too long: %d > %d", len(header.Extra), params.MaximumExtraDataSize)
|
|
|
|
}
|
|
|
|
// Verify the header's timestamp
|
2019-04-02 23:28:48 +03:00
|
|
|
if !uncle {
|
2021-01-26 14:17:11 +03:00
|
|
|
if header.Time > uint64(unixNow+allowedFutureBlockTimeSeconds) {
|
2017-04-06 14:58:03 +03:00
|
|
|
return consensus.ErrFutureBlock
|
2017-04-05 01:16:29 +03:00
|
|
|
}
|
|
|
|
}
|
2019-04-02 23:28:48 +03:00
|
|
|
if header.Time <= parent.Time {
|
2020-01-07 20:19:21 +03:00
|
|
|
return errOlderBlockTime
|
2017-04-05 01:16:29 +03:00
|
|
|
}
|
2020-01-07 20:19:21 +03:00
|
|
|
// Verify the block's difficulty based on its timestamp and parent's difficulty
|
2019-04-02 23:28:48 +03:00
|
|
|
expected := ethash.CalcDifficulty(chain, header.Time, parent)
|
2017-12-22 15:37:50 +03:00
|
|
|
|
2017-04-05 01:16:29 +03:00
|
|
|
if expected.Cmp(header.Difficulty) != 0 {
|
|
|
|
return fmt.Errorf("invalid difficulty: have %v, want %v", header.Difficulty, expected)
|
|
|
|
}
|
2017-05-04 13:53:42 +03:00
|
|
|
// Verify that the gas limit is <= 2^63-1
|
2021-12-14 12:17:25 +03:00
|
|
|
if header.GasLimit > params.MaxGasLimit {
|
|
|
|
return fmt.Errorf("invalid gasLimit: have %v, max %v", header.GasLimit, params.MaxGasLimit)
|
2017-05-04 13:53:42 +03:00
|
|
|
}
|
|
|
|
// Verify that the gasUsed is <= gasLimit
|
2017-11-13 14:47:27 +03:00
|
|
|
if header.GasUsed > header.GasLimit {
|
|
|
|
return fmt.Errorf("invalid gasUsed: have %d, gasLimit %d", header.GasUsed, header.GasLimit)
|
2017-05-04 13:53:42 +03:00
|
|
|
}
|
2021-05-17 16:13:22 +03:00
|
|
|
// Verify the block's gas usage and (if applicable) verify the base fee.
|
|
|
|
if !chain.Config().IsLondon(header.Number) {
|
|
|
|
// Verify BaseFee not present before EIP-1559 fork.
|
|
|
|
if header.BaseFee != nil {
|
|
|
|
return fmt.Errorf("invalid baseFee before fork: have %d, expected 'nil'", header.BaseFee)
|
|
|
|
}
|
|
|
|
if err := misc.VerifyGaslimit(parent.GasLimit, header.GasLimit); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2023-08-01 12:58:45 +03:00
|
|
|
} else if err := eip1559.VerifyEIP1559Header(chain.Config(), parent, header); err != nil {
|
2021-05-17 16:13:22 +03:00
|
|
|
// Verify the header's EIP-1559 attributes.
|
|
|
|
return err
|
2017-04-05 01:16:29 +03:00
|
|
|
}
|
|
|
|
// Verify that the block number is parent's +1
|
|
|
|
if diff := new(big.Int).Sub(header.Number, parent.Number); diff.Cmp(big.NewInt(1)) != 0 {
|
2017-04-06 14:58:03 +03:00
|
|
|
return consensus.ErrInvalidNumber
|
2017-04-05 01:16:29 +03:00
|
|
|
}
|
2023-05-19 11:27:19 +03:00
|
|
|
if chain.Config().IsShanghai(header.Number, header.Time) {
|
2023-05-25 09:57:34 +03:00
|
|
|
return errors.New("ethash does not support shanghai fork")
|
2023-01-25 17:32:25 +03:00
|
|
|
}
|
2023-11-28 23:29:00 +03:00
|
|
|
// Verify the non-existence of withdrawalsHash.
|
|
|
|
if header.WithdrawalsHash != nil {
|
|
|
|
return fmt.Errorf("invalid withdrawalsHash: have %x, expected nil", header.WithdrawalsHash)
|
|
|
|
}
|
2023-05-19 11:27:19 +03:00
|
|
|
if chain.Config().IsCancun(header.Number, header.Time) {
|
2023-05-25 09:57:34 +03:00
|
|
|
return errors.New("ethash does not support cancun fork")
|
2023-04-04 10:02:50 +03:00
|
|
|
}
|
2023-11-28 23:29:00 +03:00
|
|
|
// Verify the non-existence of cancun-specific header fields
|
|
|
|
switch {
|
|
|
|
case header.ExcessBlobGas != nil:
|
|
|
|
return fmt.Errorf("invalid excessBlobGas: have %d, expected nil", header.ExcessBlobGas)
|
|
|
|
case header.BlobGasUsed != nil:
|
|
|
|
return fmt.Errorf("invalid blobGasUsed: have %d, expected nil", header.BlobGasUsed)
|
|
|
|
case header.ParentBeaconRoot != nil:
|
|
|
|
return fmt.Errorf("invalid parentBeaconRoot, have %#x, expected nil", header.ParentBeaconRoot)
|
|
|
|
}
|
2023-05-03 12:58:39 +03:00
|
|
|
// Add some fake checks for tests
|
|
|
|
if ethash.fakeDelay != nil {
|
|
|
|
time.Sleep(*ethash.fakeDelay)
|
|
|
|
}
|
|
|
|
if ethash.fakeFail != nil && *ethash.fakeFail == header.Number.Uint64() {
|
|
|
|
return errors.New("invalid tester pow")
|
2017-04-05 01:16:29 +03:00
|
|
|
}
|
|
|
|
// If all checks passed, validate any special fields for hard forks
|
|
|
|
if err := misc.VerifyDAOHeaderExtraData(chain.Config(), header); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-02-02 00:36:51 +03:00
|
|
|
// CalcDifficulty is the difficulty adjustment algorithm. It returns
|
|
|
|
// the difficulty that a new block should have when created at time
|
|
|
|
// given the parent block's time and difficulty.
|
2020-07-28 18:02:35 +03:00
|
|
|
func (ethash *Ethash) CalcDifficulty(chain consensus.ChainHeaderReader, time uint64, parent *types.Header) *big.Int {
|
2017-12-22 15:37:50 +03:00
|
|
|
return CalcDifficulty(chain.Config(), time, parent)
|
|
|
|
}
|
|
|
|
|
|
|
|
// CalcDifficulty is the difficulty adjustment algorithm. It returns
|
|
|
|
// the difficulty that a new block should have when created at time
|
|
|
|
// given the parent block's time and difficulty.
|
2017-02-02 00:36:51 +03:00
|
|
|
func CalcDifficulty(config *params.ChainConfig, time uint64, parent *types.Header) *big.Int {
|
2017-06-29 13:13:00 +03:00
|
|
|
next := new(big.Int).Add(parent.Number, big1)
|
2017-02-02 00:36:51 +03:00
|
|
|
switch {
|
2022-06-15 14:10:38 +03:00
|
|
|
case config.IsGrayGlacier(next):
|
|
|
|
return calcDifficultyEip5133(time, parent)
|
2021-10-28 23:18:14 +03:00
|
|
|
case config.IsArrowGlacier(next):
|
|
|
|
return calcDifficultyEip4345(time, parent)
|
2021-05-07 15:04:54 +03:00
|
|
|
case config.IsLondon(next):
|
|
|
|
return calcDifficultyEip3554(time, parent)
|
2019-12-06 12:36:40 +03:00
|
|
|
case config.IsMuirGlacier(next):
|
|
|
|
return calcDifficultyEip2384(time, parent)
|
2018-09-12 13:33:57 +03:00
|
|
|
case config.IsConstantinople(next):
|
|
|
|
return calcDifficultyConstantinople(time, parent)
|
2017-09-14 10:07:31 +03:00
|
|
|
case config.IsByzantium(next):
|
|
|
|
return calcDifficultyByzantium(time, parent)
|
2017-02-02 00:36:51 +03:00
|
|
|
case config.IsHomestead(next):
|
|
|
|
return calcDifficultyHomestead(time, parent)
|
|
|
|
default:
|
|
|
|
return calcDifficultyFrontier(time, parent)
|
2017-04-05 01:16:29 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Some weird constants to avoid constant memory allocs for them.
|
|
|
|
var (
|
|
|
|
expDiffPeriod = big.NewInt(100000)
|
2017-06-29 13:13:00 +03:00
|
|
|
big1 = big.NewInt(1)
|
|
|
|
big2 = big.NewInt(2)
|
2017-06-30 16:39:18 +03:00
|
|
|
big9 = big.NewInt(9)
|
2017-04-05 01:16:29 +03:00
|
|
|
big10 = big.NewInt(10)
|
|
|
|
bigMinus99 = big.NewInt(-99)
|
|
|
|
)
|
|
|
|
|
2018-09-16 00:44:25 +03:00
|
|
|
// makeDifficultyCalculator creates a difficultyCalculator with the given bomb-delay.
|
|
|
|
// the difficulty is calculated with Byzantium rules, which differs from Homestead in
|
|
|
|
// how uncles affect the calculation
|
|
|
|
func makeDifficultyCalculator(bombDelay *big.Int) func(time uint64, parent *types.Header) *big.Int {
|
2018-09-17 12:49:39 +03:00
|
|
|
// Note, the calculations below looks at the parent number, which is 1 below
|
|
|
|
// the block number. Thus we remove one from the delay given
|
|
|
|
bombDelayFromParent := new(big.Int).Sub(bombDelay, big1)
|
2018-09-16 00:44:25 +03:00
|
|
|
return func(time uint64, parent *types.Header) *big.Int {
|
|
|
|
// https://github.com/ethereum/EIPs/issues/100.
|
|
|
|
// algorithm:
|
|
|
|
// diff = (parent_diff +
|
|
|
|
// (parent_diff / 2048 * max((2 if len(parent.uncles) else 1) - ((timestamp - parent.timestamp) // 9), -99))
|
|
|
|
// ) + 2^(periodCount - 2)
|
|
|
|
|
|
|
|
bigTime := new(big.Int).SetUint64(time)
|
2019-04-02 23:28:48 +03:00
|
|
|
bigParentTime := new(big.Int).SetUint64(parent.Time)
|
2018-09-16 00:44:25 +03:00
|
|
|
|
|
|
|
// holds intermediate values to make the algo easier to read & audit
|
|
|
|
x := new(big.Int)
|
|
|
|
y := new(big.Int)
|
|
|
|
|
|
|
|
// (2 if len(parent_uncles) else 1) - (block_timestamp - parent_timestamp) // 9
|
|
|
|
x.Sub(bigTime, bigParentTime)
|
|
|
|
x.Div(x, big9)
|
|
|
|
if parent.UncleHash == types.EmptyUncleHash {
|
|
|
|
x.Sub(big1, x)
|
|
|
|
} else {
|
|
|
|
x.Sub(big2, x)
|
|
|
|
}
|
|
|
|
// max((2 if len(parent_uncles) else 1) - (block_timestamp - parent_timestamp) // 9, -99)
|
|
|
|
if x.Cmp(bigMinus99) < 0 {
|
|
|
|
x.Set(bigMinus99)
|
|
|
|
}
|
|
|
|
// parent_diff + (parent_diff / 2048 * max((2 if len(parent.uncles) else 1) - ((timestamp - parent.timestamp) // 9), -99))
|
|
|
|
y.Div(parent.Difficulty, params.DifficultyBoundDivisor)
|
|
|
|
x.Mul(y, x)
|
|
|
|
x.Add(parent.Difficulty, x)
|
|
|
|
|
|
|
|
// minimum difficulty can ever be (before exponential factor)
|
|
|
|
if x.Cmp(params.MinimumDifficulty) < 0 {
|
|
|
|
x.Set(params.MinimumDifficulty)
|
|
|
|
}
|
2018-09-17 12:49:39 +03:00
|
|
|
// calculate a fake block number for the ice-age delay
|
|
|
|
// Specification: https://eips.ethereum.org/EIPS/eip-1234
|
2018-09-16 00:44:25 +03:00
|
|
|
fakeBlockNumber := new(big.Int)
|
2018-09-17 12:49:39 +03:00
|
|
|
if parent.Number.Cmp(bombDelayFromParent) >= 0 {
|
|
|
|
fakeBlockNumber = fakeBlockNumber.Sub(parent.Number, bombDelayFromParent)
|
2018-09-16 00:44:25 +03:00
|
|
|
}
|
|
|
|
// for the exponential factor
|
|
|
|
periodCount := fakeBlockNumber
|
|
|
|
periodCount.Div(periodCount, expDiffPeriod)
|
|
|
|
|
|
|
|
// the exponential factor, commonly referred to as "the bomb"
|
|
|
|
// diff = diff + 2^(periodCount - 2)
|
|
|
|
if periodCount.Cmp(big1) > 0 {
|
|
|
|
y.Sub(periodCount, big2)
|
|
|
|
y.Exp(big2, y, nil)
|
|
|
|
x.Add(x, y)
|
|
|
|
}
|
|
|
|
return x
|
2017-06-30 16:39:18 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-05 01:16:29 +03:00
|
|
|
// calcDifficultyHomestead is the difficulty adjustment algorithm. It returns
|
|
|
|
// the difficulty that a new block should have when created at time given the
|
|
|
|
// parent block's time and difficulty. The calculation uses the Homestead rules.
|
2017-02-02 00:36:51 +03:00
|
|
|
func calcDifficultyHomestead(time uint64, parent *types.Header) *big.Int {
|
2017-12-31 14:38:39 +03:00
|
|
|
// https://github.com/ethereum/EIPs/blob/master/EIPS/eip-2.md
|
2017-04-05 01:16:29 +03:00
|
|
|
// algorithm:
|
|
|
|
// diff = (parent_diff +
|
|
|
|
// (parent_diff / 2048 * max(1 - (block_timestamp - parent_timestamp) // 10, -99))
|
|
|
|
// ) + 2^(periodCount - 2)
|
|
|
|
|
2017-05-12 22:35:45 +03:00
|
|
|
bigTime := new(big.Int).SetUint64(time)
|
2019-04-02 23:28:48 +03:00
|
|
|
bigParentTime := new(big.Int).SetUint64(parent.Time)
|
2017-04-05 01:16:29 +03:00
|
|
|
|
|
|
|
// holds intermediate values to make the algo easier to read & audit
|
|
|
|
x := new(big.Int)
|
|
|
|
y := new(big.Int)
|
|
|
|
|
2017-06-29 13:13:00 +03:00
|
|
|
// 1 - (block_timestamp - parent_timestamp) // 10
|
2017-04-05 01:16:29 +03:00
|
|
|
x.Sub(bigTime, bigParentTime)
|
|
|
|
x.Div(x, big10)
|
2017-06-29 13:13:00 +03:00
|
|
|
x.Sub(big1, x)
|
2017-04-05 01:16:29 +03:00
|
|
|
|
2017-06-29 13:13:00 +03:00
|
|
|
// max(1 - (block_timestamp - parent_timestamp) // 10, -99)
|
2017-04-05 01:16:29 +03:00
|
|
|
if x.Cmp(bigMinus99) < 0 {
|
|
|
|
x.Set(bigMinus99)
|
|
|
|
}
|
|
|
|
// (parent_diff + parent_diff // 2048 * max(1 - (block_timestamp - parent_timestamp) // 10, -99))
|
2017-02-02 00:36:51 +03:00
|
|
|
y.Div(parent.Difficulty, params.DifficultyBoundDivisor)
|
2017-04-05 01:16:29 +03:00
|
|
|
x.Mul(y, x)
|
2017-02-02 00:36:51 +03:00
|
|
|
x.Add(parent.Difficulty, x)
|
2017-04-05 01:16:29 +03:00
|
|
|
|
|
|
|
// minimum difficulty can ever be (before exponential factor)
|
|
|
|
if x.Cmp(params.MinimumDifficulty) < 0 {
|
|
|
|
x.Set(params.MinimumDifficulty)
|
|
|
|
}
|
|
|
|
// for the exponential factor
|
2017-06-29 13:13:00 +03:00
|
|
|
periodCount := new(big.Int).Add(parent.Number, big1)
|
2017-04-05 01:16:29 +03:00
|
|
|
periodCount.Div(periodCount, expDiffPeriod)
|
|
|
|
|
|
|
|
// the exponential factor, commonly referred to as "the bomb"
|
|
|
|
// diff = diff + 2^(periodCount - 2)
|
2017-06-29 13:13:00 +03:00
|
|
|
if periodCount.Cmp(big1) > 0 {
|
|
|
|
y.Sub(periodCount, big2)
|
|
|
|
y.Exp(big2, y, nil)
|
2017-04-05 01:16:29 +03:00
|
|
|
x.Add(x, y)
|
|
|
|
}
|
|
|
|
return x
|
|
|
|
}
|
|
|
|
|
|
|
|
// calcDifficultyFrontier is the difficulty adjustment algorithm. It returns the
|
|
|
|
// difficulty that a new block should have when created at time given the parent
|
|
|
|
// block's time and difficulty. The calculation uses the Frontier rules.
|
2017-02-02 00:36:51 +03:00
|
|
|
func calcDifficultyFrontier(time uint64, parent *types.Header) *big.Int {
|
2017-04-05 01:16:29 +03:00
|
|
|
diff := new(big.Int)
|
2017-02-02 00:36:51 +03:00
|
|
|
adjust := new(big.Int).Div(parent.Difficulty, params.DifficultyBoundDivisor)
|
2017-04-05 01:16:29 +03:00
|
|
|
bigTime := new(big.Int)
|
|
|
|
bigParentTime := new(big.Int)
|
|
|
|
|
|
|
|
bigTime.SetUint64(time)
|
2019-04-02 23:28:48 +03:00
|
|
|
bigParentTime.SetUint64(parent.Time)
|
2017-04-05 01:16:29 +03:00
|
|
|
|
|
|
|
if bigTime.Sub(bigTime, bigParentTime).Cmp(params.DurationLimit) < 0 {
|
2017-02-02 00:36:51 +03:00
|
|
|
diff.Add(parent.Difficulty, adjust)
|
2017-04-05 01:16:29 +03:00
|
|
|
} else {
|
2017-02-02 00:36:51 +03:00
|
|
|
diff.Sub(parent.Difficulty, adjust)
|
2017-04-05 01:16:29 +03:00
|
|
|
}
|
|
|
|
if diff.Cmp(params.MinimumDifficulty) < 0 {
|
|
|
|
diff.Set(params.MinimumDifficulty)
|
|
|
|
}
|
|
|
|
|
2017-06-29 13:13:00 +03:00
|
|
|
periodCount := new(big.Int).Add(parent.Number, big1)
|
2017-04-05 01:16:29 +03:00
|
|
|
periodCount.Div(periodCount, expDiffPeriod)
|
2017-06-29 13:13:00 +03:00
|
|
|
if periodCount.Cmp(big1) > 0 {
|
2017-04-05 01:16:29 +03:00
|
|
|
// diff = diff + 2^(periodCount - 2)
|
2017-06-29 13:13:00 +03:00
|
|
|
expDiff := periodCount.Sub(periodCount, big2)
|
|
|
|
expDiff.Exp(big2, expDiff, nil)
|
2017-04-05 01:16:29 +03:00
|
|
|
diff.Add(diff, expDiff)
|
2024-10-21 12:45:33 +03:00
|
|
|
if diff.Cmp(params.MinimumDifficulty) < 0 {
|
|
|
|
diff = params.MinimumDifficulty
|
|
|
|
}
|
2017-04-05 01:16:29 +03:00
|
|
|
}
|
|
|
|
return diff
|
|
|
|
}
|
|
|
|
|
2020-12-11 13:06:44 +03:00
|
|
|
// Exported for fuzzing
|
2022-04-26 11:16:57 +03:00
|
|
|
var FrontierDifficultyCalculator = calcDifficultyFrontier
|
|
|
|
var HomesteadDifficultyCalculator = calcDifficultyHomestead
|
2020-12-11 13:06:44 +03:00
|
|
|
var DynamicDifficultyCalculator = makeDifficultyCalculator
|
|
|
|
|
2017-04-05 01:16:29 +03:00
|
|
|
// Prepare implements consensus.Engine, initializing the difficulty field of a
|
|
|
|
// header to conform to the ethash protocol. The changes are done inline.
|
2020-07-28 18:02:35 +03:00
|
|
|
func (ethash *Ethash) Prepare(chain consensus.ChainHeaderReader, header *types.Header) error {
|
2017-04-05 01:16:29 +03:00
|
|
|
parent := chain.GetHeader(header.ParentHash, header.Number.Uint64()-1)
|
|
|
|
if parent == nil {
|
2017-04-06 14:58:03 +03:00
|
|
|
return consensus.ErrUnknownAncestor
|
2017-04-05 01:16:29 +03:00
|
|
|
}
|
2019-04-02 23:28:48 +03:00
|
|
|
header.Difficulty = ethash.CalcDifficulty(chain, header.Time, parent)
|
2017-04-05 01:16:29 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-03-16 22:34:25 +03:00
|
|
|
// Finalize implements consensus.Engine, accumulating the block and uncle rewards.
|
core/state: move state log mechanism to a separate layer (#30569)
This PR moves the logging/tracing-facilities out of `*state.StateDB`,
in to a wrapping struct which implements `vm.StateDB` instead.
In most places, it is a pretty straight-forward change:
- First, hoisting the invocations from state objects up to the statedb.
- Then making the mutation-methods simply return the previous value, so
that the external logging layer could log everything.
Some internal code uses the direct object-accessors to mutate the state,
particularly in testing and in setting up state overrides, which means
that these changes are unobservable for the hooked layer. Thus, configuring
the overrides are not necessarily part of the API we want to publish.
The trickiest part about the layering is that when the selfdestructs are
finally deleted during `Finalise`, there's the possibility that someone
sent some ether to it, which is burnt at that point, and thus needs to
be logged. The hooked layer reaches into the inner layer to figure out
these events.
In package `vm`, the conversion from `state.StateDB + hooks` into a
hooked `vm.StateDB` is performed where needed.
---------
Co-authored-by: Gary Rong <garyrong0905@gmail.com>
2024-10-23 09:03:36 +03:00
|
|
|
func (ethash *Ethash) Finalize(chain consensus.ChainHeaderReader, header *types.Header, state vm.StateDB, body *types.Body) {
|
2023-03-16 22:34:25 +03:00
|
|
|
// Accumulate any block and uncle rewards
|
2024-03-20 16:58:47 +03:00
|
|
|
accumulateRewards(chain.Config(), state, header, body.Uncles)
|
2019-04-30 16:42:36 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// FinalizeAndAssemble implements consensus.Engine, accumulating the block and
|
|
|
|
// uncle rewards, setting the final state and assembling the block.
|
2024-03-20 16:58:47 +03:00
|
|
|
func (ethash *Ethash) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, body *types.Body, receipts []*types.Receipt) (*types.Block, error) {
|
|
|
|
if len(body.Withdrawals) > 0 {
|
2023-01-25 17:32:25 +03:00
|
|
|
return nil, errors.New("ethash does not support withdrawals")
|
|
|
|
}
|
|
|
|
// Finalize block
|
2024-03-20 16:58:47 +03:00
|
|
|
ethash.Finalize(chain, header, state, body)
|
2023-03-16 22:34:25 +03:00
|
|
|
|
|
|
|
// Assign the final state root to header.
|
|
|
|
header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number))
|
|
|
|
|
2017-04-05 01:16:29 +03:00
|
|
|
// Header seems complete, assemble into a block and return
|
2024-04-30 15:55:08 +03:00
|
|
|
return types.NewBlock(header, &types.Body{Transactions: body.Transactions, Uncles: body.Uncles}, receipts, trie.NewStackTrie(nil)), nil
|
2017-04-05 01:16:29 +03:00
|
|
|
}
|
|
|
|
|
2018-08-23 16:02:57 +03:00
|
|
|
// SealHash returns the hash of a block prior to it being sealed.
|
|
|
|
func (ethash *Ethash) SealHash(header *types.Header) (hash common.Hash) {
|
2019-01-04 01:15:26 +03:00
|
|
|
hasher := sha3.NewLegacyKeccak256()
|
2018-08-23 16:02:57 +03:00
|
|
|
|
2021-05-17 16:13:22 +03:00
|
|
|
enc := []interface{}{
|
2018-08-23 16:02:57 +03:00
|
|
|
header.ParentHash,
|
|
|
|
header.UncleHash,
|
|
|
|
header.Coinbase,
|
|
|
|
header.Root,
|
|
|
|
header.TxHash,
|
|
|
|
header.ReceiptHash,
|
|
|
|
header.Bloom,
|
|
|
|
header.Difficulty,
|
|
|
|
header.Number,
|
|
|
|
header.GasLimit,
|
|
|
|
header.GasUsed,
|
|
|
|
header.Time,
|
|
|
|
header.Extra,
|
2021-05-17 16:13:22 +03:00
|
|
|
}
|
|
|
|
if header.BaseFee != nil {
|
|
|
|
enc = append(enc, header.BaseFee)
|
|
|
|
}
|
2023-01-25 17:32:25 +03:00
|
|
|
if header.WithdrawalsHash != nil {
|
|
|
|
panic("withdrawal hash set on ethash")
|
|
|
|
}
|
2023-11-28 23:29:00 +03:00
|
|
|
if header.ExcessBlobGas != nil {
|
|
|
|
panic("excess blob gas set on ethash")
|
|
|
|
}
|
|
|
|
if header.BlobGasUsed != nil {
|
|
|
|
panic("blob gas used set on ethash")
|
|
|
|
}
|
|
|
|
if header.ParentBeaconRoot != nil {
|
|
|
|
panic("parent beacon root set on ethash")
|
|
|
|
}
|
2021-05-17 16:13:22 +03:00
|
|
|
rlp.Encode(hasher, enc)
|
2018-08-23 16:02:57 +03:00
|
|
|
hasher.Sum(hash[:0])
|
|
|
|
return hash
|
|
|
|
}
|
|
|
|
|
2024-03-26 23:01:28 +03:00
|
|
|
// accumulateRewards credits the coinbase of the given block with the mining
|
2017-04-05 01:16:29 +03:00
|
|
|
// reward. The total reward consists of the static block reward and rewards for
|
|
|
|
// included uncles. The coinbase of each uncle block is also rewarded.
|
core/state: move state log mechanism to a separate layer (#30569)
This PR moves the logging/tracing-facilities out of `*state.StateDB`,
in to a wrapping struct which implements `vm.StateDB` instead.
In most places, it is a pretty straight-forward change:
- First, hoisting the invocations from state objects up to the statedb.
- Then making the mutation-methods simply return the previous value, so
that the external logging layer could log everything.
Some internal code uses the direct object-accessors to mutate the state,
particularly in testing and in setting up state overrides, which means
that these changes are unobservable for the hooked layer. Thus, configuring
the overrides are not necessarily part of the API we want to publish.
The trickiest part about the layering is that when the selfdestructs are
finally deleted during `Finalise`, there's the possibility that someone
sent some ether to it, which is burnt at that point, and thus needs to
be logged. The hooked layer reaches into the inner layer to figure out
these events.
In package `vm`, the conversion from `state.StateDB + hooks` into a
hooked `vm.StateDB` is performed where needed.
---------
Co-authored-by: Gary Rong <garyrong0905@gmail.com>
2024-10-23 09:03:36 +03:00
|
|
|
func accumulateRewards(config *params.ChainConfig, stateDB vm.StateDB, header *types.Header, uncles []*types.Header) {
|
2017-08-24 12:26:06 +03:00
|
|
|
// Select the correct block reward based on chain progression
|
2017-10-20 11:14:10 +03:00
|
|
|
blockReward := FrontierBlockReward
|
2017-09-14 10:07:31 +03:00
|
|
|
if config.IsByzantium(header.Number) {
|
2017-10-20 11:14:10 +03:00
|
|
|
blockReward = ByzantiumBlockReward
|
2017-08-24 12:26:06 +03:00
|
|
|
}
|
2018-09-12 13:33:57 +03:00
|
|
|
if config.IsConstantinople(header.Number) {
|
|
|
|
blockReward = ConstantinopleBlockReward
|
|
|
|
}
|
2017-08-24 12:26:06 +03:00
|
|
|
// Accumulate the rewards for the miner and any included uncles
|
2024-01-23 16:51:58 +03:00
|
|
|
reward := new(uint256.Int).Set(blockReward)
|
|
|
|
r := new(uint256.Int)
|
|
|
|
hNum, _ := uint256.FromBig(header.Number)
|
2017-04-05 01:16:29 +03:00
|
|
|
for _, uncle := range uncles {
|
2024-01-23 16:51:58 +03:00
|
|
|
uNum, _ := uint256.FromBig(uncle.Number)
|
|
|
|
r.AddUint64(uNum, 8)
|
|
|
|
r.Sub(r, hNum)
|
2017-04-05 01:16:29 +03:00
|
|
|
r.Mul(r, blockReward)
|
2024-06-28 19:08:31 +03:00
|
|
|
r.Rsh(r, 3)
|
2024-03-22 20:53:53 +03:00
|
|
|
stateDB.AddBalance(uncle.Coinbase, r, tracing.BalanceIncreaseRewardMineUncle)
|
2017-04-05 01:16:29 +03:00
|
|
|
|
2024-06-28 19:08:31 +03:00
|
|
|
r.Rsh(blockReward, 5)
|
2017-04-05 01:16:29 +03:00
|
|
|
reward.Add(reward, r)
|
|
|
|
}
|
2024-03-22 20:53:53 +03:00
|
|
|
stateDB.AddBalance(header.Coinbase, reward, tracing.BalanceIncreaseRewardMineBlock)
|
2017-04-05 01:16:29 +03:00
|
|
|
}
|