Compare commits

..

10 Commits

Author SHA1 Message Date
will-2012
f5b5805359
Merge pull request #2705 from will-2012/perf-pbssbsc
perf: add db metrics
2024-09-19 11:16:10 +08:00
will@2012
34bc9e02aa Merge remote-tracking branch 'bsc/versa_base' into perf-pbssbsc 2024-09-14 10:09:16 +08:00
will@2012
8a3d62e756 perf: add db metrics 2024-09-14 10:06:38 +08:00
joeycli
d4879836c0 chore: add statedb get metrices 2024-09-06 14:06:12 +08:00
joeycli
4b00174821 chore: add snapshot metrices 2024-09-06 11:04:37 +08:00
joeycli
3082da4e86 chore: add validate metrics 2024-09-04 10:20:47 +08:00
joeycli
d334f520be chore: add state trie metrics 2024-09-04 09:39:08 +08:00
joeycli
cef6acec23 chore: add execute, verify and commit total metrics 2024-09-02 09:08:55 +08:00
joeycli
092fbafa3c chore: delete share mem pool 2024-09-01 12:20:05 +08:00
joeycli
c4e8ec7fea chore: add mgasps metrics 2024-08-29 16:43:04 +08:00
104 changed files with 823 additions and 2384 deletions

View File

@ -82,28 +82,28 @@ jobs:
# ============================== # ==============================
- name: Upload Linux Build - name: Upload Linux Build
uses: actions/upload-artifact@v4.3.3 uses: actions/upload-artifact@v3
if: matrix.os == 'ubuntu-latest' if: matrix.os == 'ubuntu-latest'
with: with:
name: linux name: linux
path: ./build/bin/geth path: ./build/bin/geth
- name: Upload MacOS Build - name: Upload MacOS Build
uses: actions/upload-artifact@v4.3.3 uses: actions/upload-artifact@v3
if: matrix.os == 'macos-latest' if: matrix.os == 'macos-latest'
with: with:
name: macos name: macos
path: ./build/bin/geth path: ./build/bin/geth
- name: Upload Windows Build - name: Upload Windows Build
uses: actions/upload-artifact@v4.3.3 uses: actions/upload-artifact@v3
if: matrix.os == 'windows-latest' if: matrix.os == 'windows-latest'
with: with:
name: windows name: windows
path: ./build/bin/geth.exe path: ./build/bin/geth.exe
- name: Upload ARM-64 Build - name: Upload ARM-64 Build
uses: actions/upload-artifact@v4.3.3 uses: actions/upload-artifact@v3
if: matrix.os == 'ubuntu-latest' if: matrix.os == 'ubuntu-latest'
with: with:
name: arm64 name: arm64
@ -125,25 +125,25 @@ jobs:
# ============================== # ==============================
- name: Download Artifacts - name: Download Artifacts
uses: actions/download-artifact@v4.1.7 uses: actions/download-artifact@v3
with: with:
name: linux name: linux
path: ./linux path: ./linux
- name: Download Artifacts - name: Download Artifacts
uses: actions/download-artifact@v4.1.7 uses: actions/download-artifact@v3
with: with:
name: macos name: macos
path: ./macos path: ./macos
- name: Download Artifacts - name: Download Artifacts
uses: actions/download-artifact@v4.1.7 uses: actions/download-artifact@v3
with: with:
name: windows name: windows
path: ./windows path: ./windows
- name: Download Artifacts - name: Download Artifacts
uses: actions/download-artifact@v4.1.7 uses: actions/download-artifact@v3
with: with:
name: arm64 name: arm64
path: ./arm64 path: ./arm64

View File

@ -81,28 +81,28 @@ jobs:
# ============================== # ==============================
- name: Upload Linux Build - name: Upload Linux Build
uses: actions/upload-artifact@v4.3.3 uses: actions/upload-artifact@v3
if: matrix.os == 'ubuntu-latest' if: matrix.os == 'ubuntu-latest'
with: with:
name: linux name: linux
path: ./build/bin/geth path: ./build/bin/geth
- name: Upload MacOS Build - name: Upload MacOS Build
uses: actions/upload-artifact@v4.3.3 uses: actions/upload-artifact@v3
if: matrix.os == 'macos-latest' if: matrix.os == 'macos-latest'
with: with:
name: macos name: macos
path: ./build/bin/geth path: ./build/bin/geth
- name: Upload Windows Build - name: Upload Windows Build
uses: actions/upload-artifact@v4.3.3 uses: actions/upload-artifact@v3
if: matrix.os == 'windows-latest' if: matrix.os == 'windows-latest'
with: with:
name: windows name: windows
path: ./build/bin/geth.exe path: ./build/bin/geth.exe
- name: Upload ARM-64 Build - name: Upload ARM-64 Build
uses: actions/upload-artifact@v4.3.3 uses: actions/upload-artifact@v3
if: matrix.os == 'ubuntu-latest' if: matrix.os == 'ubuntu-latest'
with: with:
name: arm64 name: arm64
@ -124,25 +124,25 @@ jobs:
# ============================== # ==============================
- name: Download Artifacts - name: Download Artifacts
uses: actions/download-artifact@v4.1.7 uses: actions/download-artifact@v3
with: with:
name: linux name: linux
path: ./linux path: ./linux
- name: Download Artifacts - name: Download Artifacts
uses: actions/download-artifact@v4.1.7 uses: actions/download-artifact@v3
with: with:
name: macos name: macos
path: ./macos path: ./macos
- name: Download Artifacts - name: Download Artifacts
uses: actions/download-artifact@v4.1.7 uses: actions/download-artifact@v3
with: with:
name: windows name: windows
path: ./windows path: ./windows
- name: Download Artifacts - name: Download Artifacts
uses: actions/download-artifact@v4.1.7 uses: actions/download-artifact@v3
with: with:
name: arm64 name: arm64
path: ./arm64 path: ./arm64

View File

@ -1,3 +1,2 @@
CVE-2024-34478 # "CWE-754: Improper Check for Unusual or Exceptional Conditions." This vulnerability is BTC only, BSC does not have the issue. CVE-2024-34478 # "CWE-754: Improper Check for Unusual or Exceptional Conditions." This vulnerability is BTC only, BSC does not have the issue.
CVE-2024-6104 # "CWE-532: Information Exposure Through Log Files" This is caused by the vulnerabilities go-retryablehttp@v0.7.4, it is only used in cmd devp2p, impact is limited. will upgrade to v0.7.7 later CVE-2024-6104 # "CWE-532: Information Exposure Through Log Files" This is caused by the vulnerabilities go-retryablehttp@v0.7.4, it is only used in cmd devp2p, impact is limited. will upgrade to v0.7.7 later
CVE-2024-8421 # "CWE-400: Uncontrolled Resource Consumption (Resource Exhaustion)" This vulnerability is caused by issues in the golang.org/x/net package. Even the latest version(v0.29.0) has not yet addressed it, but we will continue to monitor updates closely.

View File

@ -1,70 +1,4 @@
# Changelog # Changelog
## v1.4.15
### BUGFIX
* [\#2680](https://github.com/bnb-chain/bsc/pull/2680) txpool: apply miner's gasceil to txpool
* [\#2688](https://github.com/bnb-chain/bsc/pull/2688) txpool: set default GasCeil from 30M to 0
* [\#2696](https://github.com/bnb-chain/bsc/pull/2696) miner: limit block size to eth protocol msg size
* [\#2684](https://github.com/bnb-chain/bsc/pull/2684) eth: Add sidecars when available to broadcasted current block
### FEATURE
* [\#2672](https://github.com/bnb-chain/bsc/pull/2672) faucet: with mainnet balance check, 0.002BNB at least
* [\#2678](https://github.com/bnb-chain/bsc/pull/2678) beaconserver: simulated beacon api server for op-stack
* [\#2687](https://github.com/bnb-chain/bsc/pull/2687) faucet: support customized token
* [\#2698](https://github.com/bnb-chain/bsc/pull/2698) faucet: add example for custimized token
* [\#2706](https://github.com/bnb-chain/bsc/pull/2706) faucet: update DIN token faucet support
### IMPROVEMENT
* [\#2677](https://github.com/bnb-chain/bsc/pull/2677) log: add some p2p log
* [\#2679](https://github.com/bnb-chain/bsc/pull/2679) build(deps): bump actions/download-artifact in /.github/workflows
* [\#2662](https://github.com/bnb-chain/bsc/pull/2662) metrics: add some extra feature flags as node stats
* [\#2675](https://github.com/bnb-chain/bsc/pull/2675) fetcher: Sleep after marking block as done when requeuing
* [\#2695](https://github.com/bnb-chain/bsc/pull/2695) CI: nancy ignore CVE-2024-8421
* [\#2689](https://github.com/bnb-chain/bsc/pull/2689) consensus/parlia: wait more time when processing huge blocks
## v1.4.14
### BUGFIX
* [\#2643](https://github.com/bnb-chain/bsc/pull/2643)core: fix cache for receipts
* [\#2656](https://github.com/bnb-chain/bsc/pull/2656)ethclient: fix BlobSidecars api
* [\#2657](https://github.com/bnb-chain/bsc/pull/2657)fix: update prunefreezers offset when pruneancient and the dataset has pruned block
### FEATURE
* [\#2661](https://github.com/bnb-chain/bsc/pull/2661)config: setup Mainnet 2 hardfork date: HaberFix & Bohr
### IMPROVEMENT
* [\#2578](https://github.com/bnb-chain/bsc/pull/2578)core/systemcontracts: use vm.StateDB in UpgradeBuildInSystemContract
* [\#2649](https://github.com/bnb-chain/bsc/pull/2649)internal/debug: remove memsize
* [\#2655](https://github.com/bnb-chain/bsc/pull/2655)internal/ethapi: make GetFinalizedHeader monotonically increasing
* [\#2658](https://github.com/bnb-chain/bsc/pull/2658)core: improve readability of the fork choice logic
* [\#2665](https://github.com/bnb-chain/bsc/pull/2665)faucet: bump and resend faucet transaction if it has been pending for a while
## v1.4.13
### BUGFIX
* [\#2602](https://github.com/bnb-chain/bsc/pull/2602) fix: prune-state when specify --triesInMemory 32
* [\#2579](https://github.com/bnb-chain/bsc/pull/00025790) fix: only take non-mempool tx to calculate bid price
### FEATURE
* [\#2634](https://github.com/bnb-chain/bsc/pull/2634) config: setup Testnet Bohr hardfork date
* [\#2482](https://github.com/bnb-chain/bsc/pull/2482) BEP-341: Validators can produce consecutive blocks
* [\#2502](https://github.com/bnb-chain/bsc/pull/2502) BEP-402: Complete Missing Fields in Block Header to Generate Signature
* [\#2558](https://github.com/bnb-chain/bsc/pull/2558) BEP-404: Clear Miner History when Switching Validators Set
* [\#2605](https://github.com/bnb-chain/bsc/pull/2605) feat: add bohr upgrade contracts bytecode
* [\#2614](https://github.com/bnb-chain/bsc/pull/2614) fix: update stakehub bytecode after zero address agent issue fixed
* [\#2608](https://github.com/bnb-chain/bsc/pull/2608) consensus/parlia: modify mining time for last block in one turn
* [\#2618](https://github.com/bnb-chain/bsc/pull/2618) consensus/parlia: exclude inturn validator when calculate backoffTime
* [\#2621](https://github.com/bnb-chain/bsc/pull/2621) core: not record zero hash beacon block root with Parlia engine
### IMPROVEMENT
* [\#2589](https://github.com/bnb-chain/bsc/pull/2589) core/vote: vote before committing state and writing block
* [\#2596](https://github.com/bnb-chain/bsc/pull/2596) core: improve the network stability when double sign happens
* [\#2600](https://github.com/bnb-chain/bsc/pull/2600) core: cache block after wroten into db
* [\#2629](https://github.com/bnb-chain/bsc/pull/2629) utils: add GetTopAddr to analyse large traffic
* [\#2591](https://github.com/bnb-chain/bsc/pull/2591) consensus/parlia: add GetJustifiedNumber and GetFinalizedNumber
* [\#2611](https://github.com/bnb-chain/bsc/pull/2611) cmd/utils: add new flag OverridePassedForkTime
* [\#2603](https://github.com/bnb-chain/bsc/pull/2603) faucet: rate limit initial implementation
* [\#2622](https://github.com/bnb-chain/bsc/pull/2622) tests: fix evm-test CI
* [\#2628](https://github.com/bnb-chain/bsc/pull/2628) Makefile: use docker compose v2 instead of v1
## v1.4.12 ## v1.4.12
@ -101,6 +35,7 @@
* [\#2534](https://github.com/bnb-chain/bsc/pull/2534) fix: nil pointer when clear simulating bid * [\#2534](https://github.com/bnb-chain/bsc/pull/2534) fix: nil pointer when clear simulating bid
* [\#2535](https://github.com/bnb-chain/bsc/pull/2535) upgrade: add HaberFix hardfork * [\#2535](https://github.com/bnb-chain/bsc/pull/2535) upgrade: add HaberFix hardfork
## v1.4.10 ## v1.4.10
### FEATURE ### FEATURE
NA NA

View File

@ -17,11 +17,6 @@ geth:
@echo "Done building." @echo "Done building."
@echo "Run \"$(GOBIN)/geth\" to launch geth." @echo "Run \"$(GOBIN)/geth\" to launch geth."
#? faucet: Build faucet
faucet:
$(GORUN) build/ci.go install ./cmd/faucet
@echo "Done building faucet"
#? all: Build all packages and executables #? all: Build all packages and executables
all: all:
$(GORUN) build/ci.go install $(GORUN) build/ci.go install
@ -34,11 +29,11 @@ truffle-test:
docker build . -f ./docker/Dockerfile --target bsc-genesis -t bsc-genesis docker build . -f ./docker/Dockerfile --target bsc-genesis -t bsc-genesis
docker build . -f ./docker/Dockerfile --target bsc -t bsc docker build . -f ./docker/Dockerfile --target bsc -t bsc
docker build . -f ./docker/Dockerfile.truffle -t truffle-test docker build . -f ./docker/Dockerfile.truffle -t truffle-test
docker compose -f ./tests/truffle/docker-compose.yml up genesis docker-compose -f ./tests/truffle/docker-compose.yml up genesis
docker compose -f ./tests/truffle/docker-compose.yml up -d bsc-rpc bsc-validator1 docker-compose -f ./tests/truffle/docker-compose.yml up -d bsc-rpc bsc-validator1
sleep 30 sleep 30
docker compose -f ./tests/truffle/docker-compose.yml up --exit-code-from truffle-test truffle-test docker-compose -f ./tests/truffle/docker-compose.yml up --exit-code-from truffle-test truffle-test
docker compose -f ./tests/truffle/docker-compose.yml down docker-compose -f ./tests/truffle/docker-compose.yml down
#? lint: Run certain pre-selected linters #? lint: Run certain pre-selected linters
lint: ## Run linters. lint: ## Run linters.

View File

@ -11,13 +11,14 @@ https://pkg.go.dev/badge/github.com/ethereum/go-ethereum
But from that baseline of EVM compatible, BNB Smart Chain introduces a system of 21 validators with Proof of Staked Authority (PoSA) consensus that can support short block time and lower fees. The most bonded validator candidates of staking will become validators and produce blocks. The double-sign detection and other slashing logic guarantee security, stability, and chain finality. But from that baseline of EVM compatible, BNB Smart Chain introduces a system of 21 validators with Proof of Staked Authority (PoSA) consensus that can support short block time and lower fees. The most bonded validator candidates of staking will become validators and produce blocks. The double-sign detection and other slashing logic guarantee security, stability, and chain finality.
**The BNB Smart Chain** will be: Cross-chain transfer and other communication are possible due to native support of interoperability. Relayers and on-chain contracts are developed to support that. BNB Beacon Chain DEX remains a liquid venue of the exchange of assets on both chains. This dual-chain architecture will be ideal for users to take advantage of the fast trading on one side and build their decentralized apps on the other side. **The BNB Smart Chain** will be:
- **A self-sovereign blockchain**: Provides security and safety with elected validators. - **A self-sovereign blockchain**: Provides security and safety with elected validators.
- **EVM-compatible**: Supports all the existing Ethereum tooling along with faster finality and cheaper transaction fees. - **EVM-compatible**: Supports all the existing Ethereum tooling along with faster finality and cheaper transaction fees.
- **Interoperable**: Comes with efficient native dual chain communication; Optimized for scaling high-performance dApps that require fast and smooth user experience.
- **Distributed with on-chain governance**: Proof of Staked Authority brings in decentralization and community participants. As the native token, BNB will serve as both the gas of smart contract execution and tokens for staking. - **Distributed with on-chain governance**: Proof of Staked Authority brings in decentralization and community participants. As the native token, BNB will serve as both the gas of smart contract execution and tokens for staking.
More details in [White Paper](https://github.com/bnb-chain/whitepaper/blob/master/WHITEPAPER.md). More details in [White Paper](https://www.bnbchain.org/en#smartChain).
## Key features ## Key features
@ -33,8 +34,18 @@ To combine DPoS and PoA for consensus, BNB Smart Chain implement a novel consens
1. Blocks are produced by a limited set of validators. 1. Blocks are produced by a limited set of validators.
2. Validators take turns to produce blocks in a PoA manner, similar to Ethereum's Clique consensus engine. 2. Validators take turns to produce blocks in a PoA manner, similar to Ethereum's Clique consensus engine.
3. Validator set are elected in and out based on a staking based governance on BNB Smart Chain. 3. Validator set are elected in and out based on a staking based governance on BNB Beacon Chain.
4. Parlia consensus engine will interact with a set of [system contracts](https://docs.bnbchain.org/bnb-smart-chain/staking/overview/#system-contracts) to achieve liveness slash, revenue distributing and validator set renewing func. 4. The validator set change is relayed via a cross-chain communication mechanism.
5. Parlia consensus engine will interact with a set of [system contracts](https://docs.bnbchain.org/bnb-smart-chain/staking/overview/#system-contracts) to achieve liveness slash, revenue distributing and validator set renewing func.
### Light Client of BNB Beacon Chain
To achieve the cross-chain communication from BNB Beacon Chain to BNB Smart Chain, need introduce a on-chain light client verification algorithm.
It contains two parts:
1. [Stateless Precompiled contracts](https://github.com/bnb-chain/bsc/blob/master/core/vm/contracts_lightclient.go) to do tendermint header verification and Merkle Proof verification.
2. [Stateful solidity contracts](https://github.com/bnb-chain/bsc-genesis-contract/blob/master/contracts/TendermintLightClient.sol) to store validator set and trusted appHash.
## Native Token ## Native Token
@ -42,6 +53,7 @@ BNB will run on BNB Smart Chain in the same way as ETH runs on Ethereum so that
BNB will be used to: BNB will be used to:
1. pay `gas` to deploy or invoke Smart Contract on BSC 1. pay `gas` to deploy or invoke Smart Contract on BSC
2. perform cross-chain operations, such as transfer token assets across BNB Smart Chain and BNB Beacon Chain.
## Building the source ## Building the source
@ -235,7 +247,9 @@ running web servers, so malicious web pages could try to subvert locally availab
APIs!** APIs!**
### Operating a private network ### Operating a private network
- [BSC-Deploy](https://github.com/bnb-chain/node-deploy/): deploy tool for setting up BNB Smart Chain. - [BSC-Deploy](https://github.com/bnb-chain/node-deploy/): deploy tool for setting up both BNB Beacon Chain, BNB Smart Chain and the cross chain infrastructure between them.
- [BSC-Docker](https://github.com/bnb-chain/bsc-docker): deploy tool for setting up local BSC cluster in container.
## Running a bootnode ## Running a bootnode

View File

@ -1,87 +0,0 @@
package fakebeacon
import (
"context"
"sort"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto/kzg4844"
"github.com/ethereum/go-ethereum/internal/ethapi"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rpc"
)
type BlobSidecar struct {
Blob kzg4844.Blob `json:"blob"`
Index int `json:"index"`
KZGCommitment kzg4844.Commitment `json:"kzg_commitment"`
KZGProof kzg4844.Proof `json:"kzg_proof"`
}
type APIGetBlobSidecarsResponse struct {
Data []*BlobSidecar `json:"data"`
}
type ReducedGenesisData struct {
GenesisTime string `json:"genesis_time"`
}
type APIGenesisResponse struct {
Data ReducedGenesisData `json:"data"`
}
type ReducedConfigData struct {
SecondsPerSlot string `json:"SECONDS_PER_SLOT"`
}
type IndexedBlobHash struct {
Index int // absolute index in the block, a.k.a. position in sidecar blobs array
Hash common.Hash // hash of the blob, used for consistency checks
}
func configSpec() ReducedConfigData {
return ReducedConfigData{SecondsPerSlot: "1"}
}
func beaconGenesis() APIGenesisResponse {
return APIGenesisResponse{Data: ReducedGenesisData{GenesisTime: "0"}}
}
func beaconBlobSidecars(ctx context.Context, backend ethapi.Backend, slot uint64, indices []int) (APIGetBlobSidecarsResponse, error) {
var blockNrOrHash rpc.BlockNumberOrHash
header, err := fetchBlockNumberByTime(ctx, int64(slot), backend)
if err != nil {
log.Error("Error fetching block number", "slot", slot, "indices", indices)
return APIGetBlobSidecarsResponse{}, err
}
sideCars, err := backend.GetBlobSidecars(ctx, header.Hash())
if err != nil {
log.Error("Error fetching Sidecars", "blockNrOrHash", blockNrOrHash, "err", err)
return APIGetBlobSidecarsResponse{}, err
}
sort.Ints(indices)
fullBlob := len(indices) == 0
res := APIGetBlobSidecarsResponse{}
idx := 0
curIdx := 0
for _, sideCar := range sideCars {
for i := 0; i < len(sideCar.Blobs); i++ {
//hash := kZGToVersionedHash(sideCar.Commitments[i])
if !fullBlob && curIdx >= len(indices) {
break
}
if fullBlob || idx == indices[curIdx] {
res.Data = append(res.Data, &BlobSidecar{
Index: idx,
Blob: sideCar.Blobs[i],
KZGCommitment: sideCar.Commitments[i],
KZGProof: sideCar.Proofs[i],
})
curIdx++
}
idx++
}
}
return res, nil
}

View File

@ -1,88 +0,0 @@
package fakebeacon
import (
"fmt"
"net/http"
"net/url"
"strconv"
"strings"
"github.com/prysmaticlabs/prysm/v5/api/server/structs"
field_params "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/network/httputil"
)
var (
versionMethod = "/eth/v1/node/version"
specMethod = "/eth/v1/config/spec"
genesisMethod = "/eth/v1/beacon/genesis"
sidecarsMethodPrefix = "/eth/v1/beacon/blob_sidecars/{slot}"
)
func VersionMethod(w http.ResponseWriter, r *http.Request) {
resp := &structs.GetVersionResponse{
Data: &structs.Version{
Version: "",
},
}
httputil.WriteJson(w, resp)
}
func SpecMethod(w http.ResponseWriter, r *http.Request) {
httputil.WriteJson(w, &structs.GetSpecResponse{Data: configSpec()})
}
func GenesisMethod(w http.ResponseWriter, r *http.Request) {
httputil.WriteJson(w, beaconGenesis())
}
func (s *Service) SidecarsMethod(w http.ResponseWriter, r *http.Request) {
indices, err := parseIndices(r.URL)
if err != nil {
httputil.HandleError(w, err.Error(), http.StatusBadRequest)
return
}
segments := strings.Split(r.URL.Path, "/")
slot, err := strconv.ParseUint(segments[len(segments)-1], 10, 64)
if err != nil {
httputil.HandleError(w, "not a valid slot(timestamp)", http.StatusBadRequest)
return
}
resp, err := beaconBlobSidecars(r.Context(), s.backend, slot, indices)
if err != nil {
httputil.HandleError(w, err.Error(), http.StatusBadRequest)
return
}
httputil.WriteJson(w, resp)
}
// parseIndices filters out invalid and duplicate blob indices
func parseIndices(url *url.URL) ([]int, error) {
rawIndices := url.Query()["indices"]
indices := make([]int, 0, field_params.MaxBlobsPerBlock)
invalidIndices := make([]string, 0)
loop:
for _, raw := range rawIndices {
ix, err := strconv.Atoi(raw)
if err != nil {
invalidIndices = append(invalidIndices, raw)
continue
}
if ix >= field_params.MaxBlobsPerBlock {
invalidIndices = append(invalidIndices, raw)
continue
}
for i := range indices {
if ix == indices[i] {
continue loop
}
}
indices = append(indices, ix)
}
if len(invalidIndices) > 0 {
return nil, fmt.Errorf("requested blob indices %v are invalid", invalidIndices)
}
return indices, nil
}

View File

@ -1,97 +0,0 @@
package fakebeacon
import (
"net/http"
"strconv"
"github.com/ethereum/go-ethereum/internal/ethapi"
"github.com/gorilla/mux"
"github.com/prysmaticlabs/prysm/v5/api/server"
)
const (
DefaultAddr = "localhost"
DefaultPort = 8686
)
type Config struct {
Enable bool
Addr string
Port int
}
func defaultConfig() *Config {
return &Config{
Enable: false,
Addr: DefaultAddr,
Port: DefaultPort,
}
}
type Service struct {
cfg *Config
router *mux.Router
backend ethapi.Backend
}
func NewService(cfg *Config, backend ethapi.Backend) *Service {
cfgs := defaultConfig()
if cfg.Addr != "" {
cfgs.Addr = cfg.Addr
}
if cfg.Port > 0 {
cfgs.Port = cfg.Port
}
s := &Service{
cfg: cfgs,
backend: backend,
}
router := s.newRouter()
s.router = router
return s
}
func (s *Service) Run() {
_ = http.ListenAndServe(s.cfg.Addr+":"+strconv.Itoa(s.cfg.Port), s.router)
}
func (s *Service) newRouter() *mux.Router {
r := mux.NewRouter()
r.Use(server.NormalizeQueryValuesHandler)
for _, e := range s.endpoints() {
r.HandleFunc(e.path, e.handler).Methods(e.methods...)
}
return r
}
type endpoint struct {
path string
handler http.HandlerFunc
methods []string
}
func (s *Service) endpoints() []endpoint {
return []endpoint{
{
path: versionMethod,
handler: VersionMethod,
methods: []string{http.MethodGet},
},
{
path: specMethod,
handler: SpecMethod,
methods: []string{http.MethodGet},
},
{
path: genesisMethod,
handler: GenesisMethod,
methods: []string{http.MethodGet},
},
{
path: sidecarsMethodPrefix,
handler: s.SidecarsMethod,
methods: []string{http.MethodGet},
},
}
}

View File

@ -1,90 +0,0 @@
package fakebeacon
import (
"context"
"fmt"
"strconv"
"testing"
"github.com/stretchr/testify/assert"
)
//
//func TestFetchBlockNumberByTime(t *testing.T) {
// blockNum, err := fetchBlockNumberByTime(context.Background(), 1724052941, client)
// assert.Nil(t, err)
// assert.Equal(t, uint64(41493946), blockNum)
//
// blockNum, err = fetchBlockNumberByTime(context.Background(), 1734052941, client)
// assert.Equal(t, err, errors.New("time too large"))
//
// blockNum, err = fetchBlockNumberByTime(context.Background(), 1600153618, client)
// assert.Nil(t, err)
// assert.Equal(t, uint64(493946), blockNum)
//}
//
//func TestBeaconBlobSidecars(t *testing.T) {
// indexBlobHash := []IndexedBlobHash{
// {Hash: common.HexToHash("0x01231952ecbaede62f8d0398b656072c072db36982c9ef106fbbc39ce14f983c"), Index: 0},
// {Hash: common.HexToHash("0x012c21a8284d2d707bb5318e874d2e1b97a53d028e96abb702b284a2cbb0f79c"), Index: 1},
// {Hash: common.HexToHash("0x011196c8d02536ede0382aa6e9fdba6c460169c0711b5f97fcd701bd8997aee3"), Index: 2},
// {Hash: common.HexToHash("0x019c86b46b27401fb978fd175d1eb7dadf4976d6919501b0c5280d13a5bab57b"), Index: 3},
// {Hash: common.HexToHash("0x01e00db7ee99176b3fd50aab45b4fae953292334bbf013707aac58c455d98596"), Index: 4},
// {Hash: common.HexToHash("0x0117d23b68123d578a98b3e1aa029661e0abda821a98444c21992eb1e5b7208f"), Index: 5},
// //{Hash: common.HexToHash("0x01e00db7ee99176b3fd50aab45b4fae953292334bbf013707aac58c455d98596"), Index: 1},
// }
//
// resp, err := beaconBlobSidecars(context.Background(), 1724055046, []int{0, 1, 2, 3, 4, 5}) // block: 41494647
// assert.Nil(t, err)
// assert.NotNil(t, resp)
// assert.NotEmpty(t, resp.Data)
// for i, sideCar := range resp.Data {
// assert.Equal(t, indexBlobHash[i].Index, sideCar.Index)
// assert.Equal(t, indexBlobHash[i].Hash, kZGToVersionedHash(sideCar.KZGCommitment))
// }
//
// apiscs := make([]*BlobSidecar, 0, len(indexBlobHash))
// // filter and order by hashes
// for _, h := range indexBlobHash {
// for _, apisc := range resp.Data {
// if h.Index == int(apisc.Index) {
// apiscs = append(apiscs, apisc)
// break
// }
// }
// }
//
// assert.Equal(t, len(apiscs), len(resp.Data))
// assert.Equal(t, len(apiscs), len(indexBlobHash))
//}
type TimeToSlotFn func(timestamp uint64) (uint64, error)
// GetTimeToSlotFn returns a function that converts a timestamp to a slot number.
func GetTimeToSlotFn(ctx context.Context) (TimeToSlotFn, error) {
genesis := beaconGenesis()
config := configSpec()
genesisTime, _ := strconv.ParseUint(genesis.Data.GenesisTime, 10, 64)
secondsPerSlot, _ := strconv.ParseUint(config.SecondsPerSlot, 10, 64)
if secondsPerSlot == 0 {
return nil, fmt.Errorf("got bad value for seconds per slot: %v", config.SecondsPerSlot)
}
timeToSlotFn := func(timestamp uint64) (uint64, error) {
if timestamp < genesisTime {
return 0, fmt.Errorf("provided timestamp (%v) precedes genesis time (%v)", timestamp, genesisTime)
}
return (timestamp - genesisTime) / secondsPerSlot, nil
}
return timeToSlotFn, nil
}
func TestAPI(t *testing.T) {
slotFn, err := GetTimeToSlotFn(context.Background())
assert.Nil(t, err)
expTx := uint64(123151345)
gotTx, err := slotFn(expTx)
assert.Nil(t, err)
assert.Equal(t, expTx, gotTx)
}

View File

@ -1,65 +0,0 @@
package fakebeacon
import (
"context"
"errors"
"fmt"
"math/rand"
"time"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/internal/ethapi"
"github.com/ethereum/go-ethereum/rpc"
)
func fetchBlockNumberByTime(ctx context.Context, ts int64, backend ethapi.Backend) (*types.Header, error) {
// calc the block number of the ts.
currentHeader := backend.CurrentHeader()
blockTime := int64(currentHeader.Time)
if ts > blockTime {
return nil, errors.New("time too large")
}
blockNum := currentHeader.Number.Uint64()
estimateEndNumber := int64(blockNum) - (blockTime-ts)/3
// find the end number
for {
header, err := backend.HeaderByNumber(ctx, rpc.BlockNumber(estimateEndNumber))
if err != nil {
time.Sleep(time.Duration(rand.Int()%180) * time.Millisecond)
continue
}
if header == nil {
estimateEndNumber -= 1
time.Sleep(time.Duration(rand.Int()%180) * time.Millisecond)
continue
}
headerTime := int64(header.Time)
if headerTime == ts {
return header, nil
}
// let the estimateEndNumber a little bigger than real value
if headerTime > ts+8 {
estimateEndNumber -= (headerTime - ts) / 3
} else if headerTime < ts {
estimateEndNumber += (ts-headerTime)/3 + 1
} else {
// search one by one
for headerTime >= ts {
header, err = backend.HeaderByNumber(ctx, rpc.BlockNumber(estimateEndNumber-1))
if err != nil {
time.Sleep(time.Duration(rand.Int()%180) * time.Millisecond)
continue
}
headerTime = int64(header.Time)
if headerTime == ts {
return header, nil
}
estimateEndNumber -= 1
if headerTime < ts { //found the real endNumber
return nil, fmt.Errorf("block not found by time %d", ts)
}
}
}
}
}

View File

@ -43,42 +43,7 @@ func TestExtraParse(t *testing.T) {
} }
} }
// case 3, |---Extra Vanity---|---Validators Number and Validators Bytes---|---Turn Length---|---Empty---|---Extra Seal---| // case 3, |---Extra Vanity---|---Empty---|---Vote Attestation---|---Extra Seal---|
{
extraData := "0xd983010209846765746889676f312e31392e3131856c696e75780000a6bf97c1152465176c461afb316ebc773c61faee85a6515daa8a923564c6ffd37fb2fe9f118ef88092e8762c7addb526ab7eb1e772baef85181f892c731be0c1891a50e6b06262c816295e26495cef6f69dfa69911d9d8e4f3bbadb89b977cf58294f7239d515e15b24cfeb82494056cf691eaf729b165f32c9757c429dba5051155903067e56ebe3698678e912d4c407bbe49438ed859fe965b140dcf1aab71a993c1f7f6929d1fe2a17b4e14614ef9fc5bdc713d6631d675403fbeefac55611bf612700b1b65f4744861b80b0f7d6ab03f349bbafec1551819b8be1efea2fc46ca749aa184248a459464eec1a21e7fc7b71a053d9644e9bb8da4853b8f872cd7c1d6b324bf1922829830646ceadfb658d3de009a61dd481a114a2e761c554b641742c973867899d300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000069c77a677c40c7fbea129d4b171a39b7a8ddabfab2317f59d86abfaf690850223d90e9e7593d91a29331dfc2f84d5adecc75fc39ecab4632c1b4400a3dd1e1298835bcca70f657164e5b75689b64b7fd1fa275f334f28e1896a26afa1295da81418593bd12814463d9f6e45c36a0e47eb4cd3e5b6af29c41e2a3a5636430155a466e216585af3ba772b61c6014342d914470ec7ac2975be345796c2b81db0422a5fd08e40db1fc2368d2245e4b18b1d0b85c921aaaafd2e341760e29fc613edd39f71254614e2055c3287a517ae2f5b9e386cd1b50a4550696d957cb4900f03ab84f83ff2df44193496793b847f64e9d6db1b3953682bb95edd096eb1e69bbd357c200992ca78050d0cbe180cfaa018e8b6c8fd93d6f4cea42bbb345dbc6f0dfdb5bec73a8a257074e82b881cfa06ef3eb4efeca060c2531359abd0eab8af1e3edfa2025fca464ac9c3fd123f6c24a0d78869485a6f79b60359f141df90a0c745125b131caaffd12000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000b218c5d6af1f979ac42bc68d98a5a0d796c6ab01000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000b4dd66d7c2c7e57f628210187192fb89d4b99dd4000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000be807dddb074639cd9fa61b47676c064fc50d62cb1f2c71577def3144fabeb75a8a1c8cb5b51d1d1b4a05eec67988b8685008baa17459ec425dbaebc852f496dc92196cdcc8e6d00c17eb431350c6c50d8b8f05176b90b11b3a3d4feb825ae9702711566df5dbf38e82add4dd1b573b95d2466fa6501ccb81e9d26a352b96150ccbf7b697fd0a419d1d6bf74282782b0b3eb1413c901d6ecf02e8e28000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e2d3a739effcd3a99387d015e260eefac72ebea1956c470ddff48cb49300200b5f83497f3a3ccb3aeb83c5edd9818569038e61d197184f4aa6939ea5e9911e3e98ac6d21e9ae3261a475a27bb1028f140bc2a7c843318afd000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ea0a6e3c511bbd10f4519ece37dc24887e11b55db2d4c6283c44a1c7bd503aaba7666e9f0c830e0ff016c1c750a5e48757a713d0836b1cabfd5c281b1de3b77d1c192183ee226379db83cffc681495730c11fdde79ba4c0c000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ef0274e31810c9df02f98fafde0f841f4e66a1cd00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004e99f701bb14cb7dfb68b90bd3e6d1ca656964630de71beffc7f33f7f08ec99d336ec51ad9fad0ac84ae77ca2e8ad9512acc56e0d7c93f3c2ce7de1b69149a5a400"
extra, err := parseExtra(extraData)
assert.NoError(t, err)
{
var have = extra.ValidatorSize
var want = uint8(21)
if have != want {
t.Fatalf("extra.ValidatorSize mismatch, have %d, want %d", have, want)
}
}
{
var have = common.Bytes2Hex(extra.Validators[14].Address[:])
var want = "cc8e6d00c17eb431350c6c50d8b8f05176b90b11"
if have != want {
t.Fatalf("extra.Validators[14].Address mismatch, have %s, want %s", have, want)
}
}
{
var have = common.Bytes2Hex(extra.Validators[18].BLSPublicKey[:])
var want = "b2d4c6283c44a1c7bd503aaba7666e9f0c830e0ff016c1c750a5e48757a713d0836b1cabfd5c281b1de3b77d1c192183"
if have != want {
t.Fatalf("extra.Validators[18].BLSPublicKey mismatch, have %s, want %s", have, want)
}
}
{
var have = extra.TurnLength
var want = uint8(4)
if *have != want {
t.Fatalf("extra.TurnLength mismatch, have %d, want %d", *have, want)
}
}
}
// case 4, |---Extra Vanity---|---Empty---|---Vote Attestation---|---Extra Seal---|
{ {
extraData := "0xd883010205846765746888676f312e32302e35856c696e75780000002995c52af8b5830563efb86089cf168dcf4c5d3cb057926628ad1bf0f03ea67eef1458485578a4f8489afa8a853ecc7af45e2d145c21b70641c4b29f0febd2dd2c61fa1ba174be3fd47f1f5fa2ab9b5c318563d8b70ca58d0d51e79ee32b2fb721649e2cb9d36538361fba11f84c8401d14bb7a0fa67ddb3ba654d6006bf788710032247aa4d1be0707273e696b422b3ff72e9798401d14bbaa01225f505f5a0e1aefadcd2913b7aac9009fe4fb3d1bf57399e0b9dce5947f94280fe6d3647276c4127f437af59eb7c7985b2ae1ebe432619860695cb6106b80cc66c735bc1709afd11f233a2c97409d38ebaf7178aa53e895aea2fe0a229f71ec601" extraData := "0xd883010205846765746888676f312e32302e35856c696e75780000002995c52af8b5830563efb86089cf168dcf4c5d3cb057926628ad1bf0f03ea67eef1458485578a4f8489afa8a853ecc7af45e2d145c21b70641c4b29f0febd2dd2c61fa1ba174be3fd47f1f5fa2ab9b5c318563d8b70ca58d0d51e79ee32b2fb721649e2cb9d36538361fba11f84c8401d14bb7a0fa67ddb3ba654d6006bf788710032247aa4d1be0707273e696b422b3ff72e9798401d14bbaa01225f505f5a0e1aefadcd2913b7aac9009fe4fb3d1bf57399e0b9dce5947f94280fe6d3647276c4127f437af59eb7c7985b2ae1ebe432619860695cb6106b80cc66c735bc1709afd11f233a2c97409d38ebaf7178aa53e895aea2fe0a229f71ec601"
extra, err := parseExtra(extraData) extra, err := parseExtra(extraData)
@ -99,9 +64,9 @@ func TestExtraParse(t *testing.T) {
} }
} }
// case 5, |---Extra Vanity---|---Validators Number and Validators Bytes---|---Vote Attestation---|---Extra Seal---| // case 4, |---Extra Vanity---|---Validators Number and Validators Bytes---|---Vote Attestation---|---Extra Seal---|
{ {
extraData := "0xd883010209846765746888676f312e31392e38856c696e7578000000dc55905c071284214b9b9c85549ab3d2b972df0deef66ac2c98e82934ca974fdcd97f3309de967d3c9c43fa711a8d673af5d75465844bf8969c8d1948d903748ac7b8b1720fa64e50c35552c16704d214347f29fa77f77da6d75d7c752b742ad4855bae330426b823e742da31f816cc83bc16d69a9134be0cfb4a1d17ec34f1b5b32d5c20440b8536b1e88f0f247788386d0ed6c748e03a53160b4b30ed3748cc5000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000980a75ecd1309ea12fa2ed87a8744fbfc9b863d589037a9ace3b590165ea1c0c5ac72bf600b7c88c1e435f41932c1132aae1bfa0bb68e46b96ccb12c3415e4d82af717d8a2959d3f95eae5dc7d70144ce1b73b403b7eb6e0b973c2d38487e58fd6e145491b110080fb14ac915a0411fc78f19e09a399ddee0d20c63a75d8f930f1694544ad2dc01bb71b214cb885500844365e95cd9942c7276e7fd8a2750ec6dded3dcdc2f351782310b0eadc077db59abca0f0cd26776e2e7acb9f3bce40b1fa5221fd1561226c6263cc5ff474cf03cceff28abc65c9cbae594f725c80e12d96c9b86c3400e529bfe184056e257c07940bb664636f689e8d2027c834681f8f878b73445261034e946bb2d901b4b878f8b27bb8608c11016739b3f8a19e54ab8c7abacd936cfeba200f3645a98b65adb0dd3692b69ce0b3ae10e7176b9a4b0d83f04065b1042b4bcb646a34b75c550f92fc34b8b2b1db0fa0d3172db23ba92727c80bcd306320d0ff411bf858525fde13bc8e0370f84c8401e9c2e6a0820dc11d63176a0eb1b828bc5376867b275579112b7013358da40317e7bab6e98401e9c2e7a00edc71ce80105a3220a87bea2792fa340d66c59002f02b0a09349ed1ed28407080048b972fac2b9077a4dcb6fc37093799a652858016c99142b227500c844fa97ec22e3f9d3b1e982f14bcd999a7453e89ce5ef5c55f1c7f8f74ba904186cd67828200" extraData := "0xd883010209846765746888676f312e31392e38856c696e7578000000dc55905c071284214b9b9c85549ab3d2b972df0deef66ac2c98e82934ca974fdcd97f3309de967d3c9c43fa711a8d673af5d75465844bf8969c8d1948d903748ac7b8b1720fa64e50c35552c16704d214347f29fa77f77da6d75d7c752b742ad4855bae330426b823e742da31f816cc83bc16d69a9134be0cfb4a1d17ec34f1b5b32d5c20440b8536b1e88f0f247788386d0ed6c748e03a53160b4b30ed3748cc5000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000980a75ecd1309ea12fa2ed87a8744fbfc9b863d589037a9ace3b590165ea1c0c5ac72bf600b7c88c1e435f41932c1132aae1bfa0bb68e46b96ccb12c3415e4d82af717d8a2959d3f95eae5dc7d70144ce1b73b403b7eb6e0b973c2d38487e58fd6e145491b110080fb14ac915a0411fc78f19e09a399ddee0d20c63a75d8f930f1694544ad2dc01bb71b214cb885500844365e95cd9942c7276e7fd8a2750ec6dded3dcdc2f351782310b0eadc077db59abca0f0cd26776e2e7acb9f3bce40b1fa5221fd1561226c6263cc5ff474cf03cceff28abc65c9cbae594f725c80e12d96c9b86c3400e529bfe184056e257c07940bb664636f689e8d2027c834681f8f878b73445261034e946bb2d901b4b878f8b27bb8608c11016739b3f8a19e54ab8c7abacd936cfeba200f3645a98b65adb0dd3692b69ce0b3ae10e7176b9a4b0d83f04065b1042b4bcb646a34b75c550f92fc34b8b2b1db0fa0d3172db23ba92727c80bcd306320d0ff411bf858525fde13bc8e0370f84c8401e9c2e6a0820dc11d63176a0eb1b828bc5376867b275579112b7013358da40317e7bab6e98401e9c2e7a00edc71ce80105a3220a87bea2792fa340d66c59002f02b0a09349ed1ed284070808b972fac2b9077a4dcb6fc37093799a652858016c99142b227500c844fa97ec22e3f9d3b1e982f14bcd999a7453e89ce5ef5c55f1c7f8f74ba904186cd67828200"
extra, err := parseExtra(extraData) extra, err := parseExtra(extraData)
assert.NoError(t, err) assert.NoError(t, err)
{ {
@ -140,53 +105,4 @@ func TestExtraParse(t *testing.T) {
} }
} }
} }
// case 6, |---Extra Vanity---|---Validators Number and Validators Bytes---|---Turn Length---|---Vote Attestation---|---Extra Seal---|
{
extraData := "0xd883010209846765746888676f312e31392e38856c696e7578000000dc55905c071284214b9b9c85549ab3d2b972df0deef66ac2c98e82934ca974fdcd97f3309de967d3c9c43fa711a8d673af5d75465844bf8969c8d1948d903748ac7b8b1720fa64e50c35552c16704d214347f29fa77f77da6d75d7c752b742ad4855bae330426b823e742da31f816cc83bc16d69a9134be0cfb4a1d17ec34f1b5b32d5c20440b8536b1e88f0f247788386d0ed6c748e03a53160b4b30ed3748cc5000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000980a75ecd1309ea12fa2ed87a8744fbfc9b863d589037a9ace3b590165ea1c0c5ac72bf600b7c88c1e435f41932c1132aae1bfa0bb68e46b96ccb12c3415e4d82af717d8a2959d3f95eae5dc7d70144ce1b73b403b7eb6e0b973c2d38487e58fd6e145491b110080fb14ac915a0411fc78f19e09a399ddee0d20c63a75d8f930f1694544ad2dc01bb71b214cb885500844365e95cd9942c7276e7fd8a2750ec6dded3dcdc2f351782310b0eadc077db59abca0f0cd26776e2e7acb9f3bce40b1fa5221fd1561226c6263cc5ff474cf03cceff28abc65c9cbae594f725c80e12d96c9b86c3400e529bfe184056e257c07940bb664636f689e8d2027c834681f8f878b73445261034e946bb2d901b4b87804f8b27bb8608c11016739b3f8a19e54ab8c7abacd936cfeba200f3645a98b65adb0dd3692b69ce0b3ae10e7176b9a4b0d83f04065b1042b4bcb646a34b75c550f92fc34b8b2b1db0fa0d3172db23ba92727c80bcd306320d0ff411bf858525fde13bc8e0370f84c8401e9c2e6a0820dc11d63176a0eb1b828bc5376867b275579112b7013358da40317e7bab6e98401e9c2e7a00edc71ce80105a3220a87bea2792fa340d66c59002f02b0a09349ed1ed28407080048b972fac2b9077a4dcb6fc37093799a652858016c99142b227500c844fa97ec22e3f9d3b1e982f14bcd999a7453e89ce5ef5c55f1c7f8f74ba904186cd67828200"
extra, err := parseExtra(extraData)
assert.NoError(t, err)
{
var have = common.Bytes2Hex(extra.Validators[0].Address[:])
var want = "1284214b9b9c85549ab3d2b972df0deef66ac2c9"
if have != want {
t.Fatalf("extra.Validators[0].Address mismatch, have %s, want %s", have, want)
}
}
{
var have = common.Bytes2Hex(extra.Validators[0].BLSPublicKey[:])
var want = "8e82934ca974fdcd97f3309de967d3c9c43fa711a8d673af5d75465844bf8969c8d1948d903748ac7b8b1720fa64e50c"
if have != want {
t.Fatalf("extra.Validators[0].BLSPublicKey mismatch, have %s, want %s", have, want)
}
}
{
var have = extra.Validators[0].VoteIncluded
var want = true
if have != want {
t.Fatalf("extra.Validators[0].VoteIncluded mismatch, have %t, want %t", have, want)
}
}
{
var have = common.Bytes2Hex(extra.Data.TargetHash[:])
var want = "0edc71ce80105a3220a87bea2792fa340d66c59002f02b0a09349ed1ed284070"
if have != want {
t.Fatalf("extra.Data.TargetHash mismatch, have %s, want %s", have, want)
}
}
{
var have = extra.Data.TargetNumber
var want = uint64(32096999)
if have != want {
t.Fatalf("extra.Data.TargetNumber mismatch, have %d, want %d", have, want)
}
}
{
var have = extra.TurnLength
var want = uint8(4)
if *have != want {
t.Fatalf("extra.TurnLength mismatch, have %d, want %d", *have, want)
}
}
}
} }

View File

@ -24,7 +24,7 @@ const (
BLSPublicKeyLength = 48 BLSPublicKeyLength = 48
// follow order in extra field // follow order in extra field
// |---Extra Vanity---|---Validators Number and Validators Bytes (or Empty)---|---Turn Length (or Empty)---|---Vote Attestation (or Empty)---|---Extra Seal---| // |---Extra Vanity---|---Validators Number and Validators Bytes (or Empty)---|---Vote Attestation (or Empty)---|---Extra Seal---|
extraVanityLength = 32 // Fixed number of extra-data prefix bytes reserved for signer vanity extraVanityLength = 32 // Fixed number of extra-data prefix bytes reserved for signer vanity
validatorNumberSize = 1 // Fixed number of extra prefix bytes reserved for validator number after Luban validatorNumberSize = 1 // Fixed number of extra prefix bytes reserved for validator number after Luban
validatorBytesLength = common.AddressLength + types.BLSPublicKeyLength validatorBytesLength = common.AddressLength + types.BLSPublicKeyLength
@ -35,7 +35,6 @@ type Extra struct {
ExtraVanity string ExtraVanity string
ValidatorSize uint8 ValidatorSize uint8
Validators validatorsAscending Validators validatorsAscending
TurnLength *uint8
*types.VoteAttestation *types.VoteAttestation
ExtraSeal []byte ExtraSeal []byte
} }
@ -114,15 +113,6 @@ func parseExtra(hexData string) (*Extra, error) {
sort.Sort(extra.Validators) sort.Sort(extra.Validators)
data = data[validatorBytesTotalLength-validatorNumberSize:] data = data[validatorBytesTotalLength-validatorNumberSize:]
dataLength = len(data) dataLength = len(data)
// parse TurnLength
if dataLength > 0 {
if data[0] != '\xf8' {
extra.TurnLength = &data[0]
data = data[1:]
dataLength = len(data)
}
}
} }
// parse Vote Attestation // parse Vote Attestation
@ -158,10 +148,6 @@ func prettyExtra(extra Extra) {
} }
} }
if extra.TurnLength != nil {
fmt.Printf("TurnLength : %d\n", *extra.TurnLength)
}
if extra.VoteAttestation != nil { if extra.VoteAttestation != nil {
fmt.Printf("Attestation :\n") fmt.Printf("Attestation :\n")
fmt.Printf("\tVoteAddressSet : %b, %d\n", extra.VoteAddressSet, bitset.From([]uint64{uint64(extra.VoteAddressSet)}).Count()) fmt.Printf("\tVoteAddressSet : %b, %d\n", extra.VoteAddressSet, bitset.From([]uint64{uint64(extra.VoteAddressSet)}).Count())

Binary file not shown.

Before

Width:  |  Height:  |  Size: 10 KiB

View File

@ -1,23 +0,0 @@
# 1.Background
This is to support some projects with customized tokens that they want to integrate into the BSC faucet tool.
## 1.1. How to Integrate Your Token
- Step 1: Fund the faucet address by sending a specific amount of your BEP-20 token to the faucet address (0xaa25aa7a19f9c426e07dee59b12f944f4d9f1dd3) on the BSC testnet.
- Step 2: Update this README.md file and create a Pull Request on [bsc github](https://github.com/bnb-chain/bsc) with relevant information.
We will review the request, and once it is approved, the faucet tool will start to support the customized token and list it on https://www.bnbchain.org/en/testnet-faucet.
# 2.Token List
## 2.1.DemoToken
- symbol: DEMO
- amount: 10000000000000000000
- icon: ./demotoken.png
- addr: https://testnet.bscscan.com/address/0xe15c158d768c306dae87b96430a94f884333e55d
- fundTx: [0xa499dc9aaf918aff0507538a8aa80a88d0af6ca15054e6acc57b69c651945280](https://testnet.bscscan.com/tx/0x2a3f334b6ca756b64331bdec9e6cf3207ac50a4839fda6379e909de4d9a194ca)
-
## 2.2.DIN token
- symbol: DIN
- amount: 10000000000000000000
- icon: ./DIN.png
- addr: https://testnet.bscscan.com/address/0xb8b40FcC5B4519Dba0E07Ac8821884CE90BdE677
- fundTx: [0x17fc4c1db133830c7c146a0d41ca1df31cb446989ec11b382d58bb6176d6fde3](https://testnet.bscscan.com/tx/0x17fc4c1db133830c7c146a0d41ca1df31cb446989ec11b382d58bb6176d6fde3)

Binary file not shown.

Before

Width:  |  Height:  |  Size: 28 KiB

View File

@ -49,14 +49,12 @@ import (
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/gorilla/websocket" "github.com/gorilla/websocket"
"golang.org/x/time/rate"
) )
var ( var (
genesisFlag = flag.String("genesis", "", "Genesis json file to seed the chain with") genesisFlag = flag.String("genesis", "", "Genesis json file to seed the chain with")
apiPortFlag = flag.Int("apiport", 8080, "Listener port for the HTTP API connection") apiPortFlag = flag.Int("apiport", 8080, "Listener port for the HTTP API connection")
wsEndpoint = flag.String("ws", "http://127.0.0.1:7777/", "Url to ws endpoint") wsEndpoint = flag.String("ws", "http://127.0.0.1:7777/", "Url to ws endpoint")
wsEndpointMainnet = flag.String("ws.mainnet", "", "Url to ws endpoint of BSC mainnet")
netnameFlag = flag.String("faucet.name", "", "Network name to assign to the faucet") netnameFlag = flag.String("faucet.name", "", "Network name to assign to the faucet")
payoutFlag = flag.Int("faucet.amount", 1, "Number of Ethers to pay out per user request") payoutFlag = flag.Int("faucet.amount", 1, "Number of Ethers to pay out per user request")
@ -78,12 +76,6 @@ var (
fixGasPrice = flag.Int64("faucet.fixedprice", 0, "Will use fixed gas price if specified") fixGasPrice = flag.Int64("faucet.fixedprice", 0, "Will use fixed gas price if specified")
twitterTokenFlag = flag.String("twitter.token", "", "Bearer token to authenticate with the v2 Twitter API") twitterTokenFlag = flag.String("twitter.token", "", "Bearer token to authenticate with the v2 Twitter API")
twitterTokenV1Flag = flag.String("twitter.token.v1", "", "Bearer token to authenticate with the v1.1 Twitter API") twitterTokenV1Flag = flag.String("twitter.token.v1", "", "Bearer token to authenticate with the v1.1 Twitter API")
resendInterval = 15 * time.Second
resendBatchSize = 3
resendMaxGasPrice = big.NewInt(50 * params.GWei)
wsReadTimeout = 5 * time.Minute
minMainnetBalance = big.NewInt(2 * 1e6 * params.GWei) // 0.002 bnb
) )
var ( var (
@ -94,17 +86,11 @@ var (
//go:embed faucet.html //go:embed faucet.html
var websiteTmpl string var websiteTmpl string
func weiToEtherStringFx(wei *big.Int, prec int) string {
etherValue := new(big.Float).Quo(new(big.Float).SetInt(wei), big.NewFloat(params.Ether))
// Format the big.Float directly to a string with the specified precision
return etherValue.Text('f', prec)
}
func main() { func main() {
// Parse the flags and set up the logger to print everything requested // Parse the flags and set up the logger to print everything requested
flag.Parse() flag.Parse()
log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.FromLegacyLevel(*logFlag), false))) log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.FromLegacyLevel(*logFlag), true)))
log.Info("faucet started")
// Construct the payout tiers // Construct the payout tiers
amounts := make([]string, *tiersFlag) amounts := make([]string, *tiersFlag)
for i := 0; i < *tiersFlag; i++ { for i := 0; i < *tiersFlag; i++ {
@ -183,7 +169,7 @@ func main() {
log.Crit("Failed to unlock faucet signer account", "err", err) log.Crit("Failed to unlock faucet signer account", "err", err)
} }
// Assemble and start the faucet light service // Assemble and start the faucet light service
faucet, err := newFaucet(genesis, *wsEndpoint, *wsEndpointMainnet, ks, website.Bytes(), bep2eInfos) faucet, err := newFaucet(genesis, *wsEndpoint, ks, website.Bytes(), bep2eInfos)
if err != nil { if err != nil {
log.Crit("Failed to start faucet", "err", err) log.Crit("Failed to start faucet", "err", err)
} }
@ -212,7 +198,6 @@ type bep2eInfo struct {
type faucet struct { type faucet struct {
config *params.ChainConfig // Chain configurations for signing config *params.ChainConfig // Chain configurations for signing
client *ethclient.Client // Client connection to the Ethereum chain client *ethclient.Client // Client connection to the Ethereum chain
clientMainnet *ethclient.Client // Client connection to BSC mainnet for balance check
index []byte // Index page to serve up on the web index []byte // Index page to serve up on the web
keystore *keystore.KeyStore // Keystore containing the single signer keystore *keystore.KeyStore // Keystore containing the single signer
@ -231,8 +216,6 @@ type faucet struct {
bep2eInfos map[string]bep2eInfo bep2eInfos map[string]bep2eInfo
bep2eAbi abi.ABI bep2eAbi abi.ABI
limiter *IPRateLimiter
} }
// wsConn wraps a websocket connection with a write mutex as the underlying // wsConn wraps a websocket connection with a write mutex as the underlying
@ -242,7 +225,7 @@ type wsConn struct {
wlock sync.Mutex wlock sync.Mutex
} }
func newFaucet(genesis *core.Genesis, url string, mainnetUrl string, ks *keystore.KeyStore, index []byte, bep2eInfos map[string]bep2eInfo) (*faucet, error) { func newFaucet(genesis *core.Genesis, url string, ks *keystore.KeyStore, index []byte, bep2eInfos map[string]bep2eInfo) (*faucet, error) {
bep2eAbi, err := abi.JSON(strings.NewReader(bep2eAbiJson)) bep2eAbi, err := abi.JSON(strings.NewReader(bep2eAbiJson))
if err != nil { if err != nil {
return nil, err return nil, err
@ -251,22 +234,10 @@ func newFaucet(genesis *core.Genesis, url string, mainnetUrl string, ks *keystor
if err != nil { if err != nil {
return nil, err return nil, err
} }
clientMainnet, err := ethclient.Dial(mainnetUrl)
if err != nil {
// skip mainnet balance check if it there is no available mainnet endpoint
log.Warn("dail mainnet endpoint failed", "mainnetUrl", mainnetUrl, "err", err)
}
// Allow 1 request per minute with burst of 5, and cache up to 1000 IPs
limiter, err := NewIPRateLimiter(rate.Limit(1.0), 5, 1000)
if err != nil {
return nil, err
}
return &faucet{ return &faucet{
config: genesis.Config, config: genesis.Config,
client: client, client: client,
clientMainnet: clientMainnet,
index: index, index: index,
keystore: ks, keystore: ks,
account: ks.Accounts()[0], account: ks.Accounts()[0],
@ -274,7 +245,6 @@ func newFaucet(genesis *core.Genesis, url string, mainnetUrl string, ks *keystor
update: make(chan struct{}, 1), update: make(chan struct{}, 1),
bep2eInfos: bep2eInfos, bep2eInfos: bep2eInfos,
bep2eAbi: bep2eAbi, bep2eAbi: bep2eAbi,
limiter: limiter,
}, nil }, nil
} }
@ -302,20 +272,6 @@ func (f *faucet) webHandler(w http.ResponseWriter, r *http.Request) {
// apiHandler handles requests for Ether grants and transaction statuses. // apiHandler handles requests for Ether grants and transaction statuses.
func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) { func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) {
ip := r.RemoteAddr
if len(r.Header.Get("X-Forwarded-For")) > 0 {
ips := strings.Split(r.Header.Get("X-Forwarded-For"), ",")
if len(ips) > 0 {
ip = strings.TrimSpace(ips[len(ips)-1])
}
}
if !f.limiter.GetLimiter(ip).Allow() {
log.Warn("Too many requests from client: ", "client", ip)
http.Error(w, "Too many requests", http.StatusTooManyRequests)
return
}
upgrader := websocket.Upgrader{CheckOrigin: func(r *http.Request) bool { return true }} upgrader := websocket.Upgrader{CheckOrigin: func(r *http.Request) bool { return true }}
conn, err := upgrader.Upgrade(w, r, nil) conn, err := upgrader.Upgrade(w, r, nil)
if err != nil { if err != nil {
@ -398,11 +354,7 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) {
Captcha string `json:"captcha"` Captcha string `json:"captcha"`
Symbol string `json:"symbol"` Symbol string `json:"symbol"`
} }
// not sure if it helps or not, but set a read deadline could help prevent resource leakage
// if user did not give response for too long, then the routine will be stuck.
conn.SetReadDeadline(time.Now().Add(wsReadTimeout))
if err = conn.ReadJSON(&msg); err != nil { if err = conn.ReadJSON(&msg); err != nil {
log.Debug("read json message failed", "err", err, "ip", ip)
return return
} }
if !*noauthFlag && !strings.HasPrefix(msg.URL, "https://twitter.com/") && !strings.HasPrefix(msg.URL, "https://www.facebook.com/") { if !*noauthFlag && !strings.HasPrefix(msg.URL, "https://twitter.com/") && !strings.HasPrefix(msg.URL, "https://www.facebook.com/") {
@ -420,9 +372,9 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) {
} }
continue continue
} }
log.Info("Faucet funds requested", "url", msg.URL, "tier", msg.Tier, "ip", ip) log.Info("Faucet funds requested", "url", msg.URL, "tier", msg.Tier)
// check #1: captcha verifications to exclude robot // If captcha verifications are enabled, make sure we're not dealing with a robot
if *captchaToken != "" { if *captchaToken != "" {
form := url.Values{} form := url.Values{}
form.Add("secret", *captchaSecret) form.Add("secret", *captchaSecret)
@ -499,55 +451,24 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) {
} }
continue continue
} }
log.Info("Faucet request valid", "url", msg.URL, "tier", msg.Tier, "user", username, "address", address)
// check #2: check IP and ID(address) to ensure the user didn't request funds too frequently // Ensure the user didn't request funds too recently
f.lock.Lock() f.lock.Lock()
var (
fund bool
timeout time.Time
)
if ipTimeout := f.timeouts[ips[len(ips)-2]]; time.Now().Before(ipTimeout) { if ipTimeout := f.timeouts[ips[len(ips)-2]]; time.Now().Before(ipTimeout) {
f.lock.Unlock()
if err = sendError(wsconn, fmt.Errorf("%s left until next allowance", common.PrettyDuration(time.Until(ipTimeout)))); err != nil { // nolint: gosimple if err = sendError(wsconn, fmt.Errorf("%s left until next allowance", common.PrettyDuration(time.Until(ipTimeout)))); err != nil { // nolint: gosimple
log.Warn("Failed to send funding error to client", "err", err) log.Warn("Failed to send funding error to client", "err", err)
return
} }
log.Info("too frequent funding(ip)", "TimeLeft", common.PrettyDuration(time.Until(ipTimeout)), "ip", ips[len(ips)-2], "ipsStr", ipsStr)
continue
}
if idTimeout := f.timeouts[id]; time.Now().Before(idTimeout) {
f.lock.Unlock() f.lock.Unlock()
// Send an error if too frequent funding, otherwise a success
if err = sendError(wsconn, fmt.Errorf("%s left until next allowance", common.PrettyDuration(time.Until(idTimeout)))); err != nil { // nolint: gosimple
log.Warn("Failed to send funding error to client", "err", err)
return
}
log.Info("too frequent funding(id)", "TimeLeft", common.PrettyDuration(time.Until(idTimeout)), "id", id)
continue continue
} }
// check #3: minimum mainnet balance check, internal error will bypass the check to avoid blocking the faucet service
if f.clientMainnet != nil {
mainnetAddr := address
balanceMainnet, err := f.clientMainnet.BalanceAt(context.Background(), mainnetAddr, nil)
if err != nil {
log.Warn("check balance failed, call BalanceAt", "err", err)
} else if balanceMainnet == nil {
log.Warn("check balance failed, balanceMainnet is nil")
} else {
if balanceMainnet.Cmp(minMainnetBalance) < 0 {
f.lock.Unlock()
log.Warn("insufficient BNB on BSC mainnet", "address", mainnetAddr,
"balanceMainnet", balanceMainnet, "minMainnetBalance", minMainnetBalance)
// Send an error if failed to meet the minimum balance requirement
if err = sendError(wsconn, fmt.Errorf("insufficient BNB on BSC mainnet (require >=%sBNB)",
weiToEtherStringFx(minMainnetBalance, 3))); err != nil {
log.Warn("Failed to send mainnet minimum balance error to client", "err", err)
return
}
continue
}
}
}
log.Info("Faucet request valid", "url", msg.URL, "tier", msg.Tier, "user", username, "address", address, "ip", ip)
// now, it is ok to send tBNB or other tokens if timeout = f.timeouts[id]; time.Now().After(timeout) {
var tx *types.Transaction var tx *types.Transaction
if msg.Symbol == "BNB" { if msg.Symbol == "BNB" {
// User wasn't funded recently, create the funding transaction // User wasn't funded recently, create the funding transaction
@ -595,12 +516,23 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) {
Time: time.Now(), Time: time.Now(),
Tx: signed, Tx: signed,
}) })
timeoutInt64 := time.Duration(*minutesFlag*int(math.Pow(3, float64(msg.Tier)))) * time.Minute timeout := time.Duration(*minutesFlag*int(math.Pow(3, float64(msg.Tier)))) * time.Minute
grace := timeoutInt64 / 288 // 24h timeout => 5m grace grace := timeout / 288 // 24h timeout => 5m grace
f.timeouts[id] = time.Now().Add(timeoutInt64 - grace) f.timeouts[id] = time.Now().Add(timeout - grace)
f.timeouts[ips[len(ips)-2]] = time.Now().Add(timeoutInt64 - grace) f.timeouts[ips[len(ips)-2]] = time.Now().Add(timeout - grace)
fund = true
}
f.lock.Unlock() f.lock.Unlock()
// Send an error if too frequent funding, otherwise a success
if !fund {
if err = sendError(wsconn, fmt.Errorf("%s left until next allowance", common.PrettyDuration(time.Until(timeout)))); err != nil { // nolint: gosimple
log.Warn("Failed to send funding error to client", "err", err)
return
}
continue
}
if err = sendSuccess(wsconn, fmt.Sprintf("Funding request accepted for %s into %s", username, address.Hex())); err != nil { if err = sendSuccess(wsconn, fmt.Sprintf("Funding request accepted for %s into %s", username, address.Hex())); err != nil {
log.Warn("Failed to send funding success to client", "err", err) log.Warn("Failed to send funding success to client", "err", err)
return return
@ -649,52 +581,9 @@ func (f *faucet) refresh(head *types.Header) error {
f.lock.Lock() f.lock.Lock()
f.head, f.balance = head, balance f.head, f.balance = head, balance
f.price, f.nonce = price, nonce f.price, f.nonce = price, nonce
if len(f.reqs) == 0 { if len(f.reqs) > 0 && f.reqs[0].Tx.Nonce() > f.nonce {
log.Debug("refresh len(f.reqs) == 0", "f.nonce", f.nonce)
f.lock.Unlock()
return nil
}
if f.reqs[0].Tx.Nonce() == f.nonce {
// if the next Tx failed to be included for a certain time(resendInterval), try to
// resend it with higher gasPrice, as it could be discarded in the network.
// Also resend extra following txs, as they could be discarded as well.
if time.Now().After(f.reqs[0].Time.Add(resendInterval)) {
for i, req := range f.reqs {
if i >= resendBatchSize {
break
}
prePrice := req.Tx.GasPrice()
// bump gas price 20% to replace the previous tx
newPrice := new(big.Int).Add(prePrice, new(big.Int).Div(prePrice, big.NewInt(5)))
if newPrice.Cmp(resendMaxGasPrice) >= 0 {
log.Info("resendMaxGasPrice reached", "newPrice", newPrice, "resendMaxGasPrice", resendMaxGasPrice, "nonce", req.Tx.Nonce())
break
}
newTx := types.NewTransaction(req.Tx.Nonce(), *req.Tx.To(), req.Tx.Value(), req.Tx.Gas(), newPrice, req.Tx.Data())
newSigned, err := f.keystore.SignTx(f.account, newTx, f.config.ChainID)
if err != nil {
log.Error("resend sign tx failed", "err", err)
}
log.Info("reqs[0] Tx has been stuck for a while, trigger resend",
"resendInterval", resendInterval, "resendTxSize", resendBatchSize,
"preHash", req.Tx.Hash().Hex(), "newHash", newSigned.Hash().Hex(),
"newPrice", newPrice, "nonce", req.Tx.Nonce(), "req.Tx.Gas()", req.Tx.Gas())
if err := f.client.SendTransaction(context.Background(), newSigned); err != nil {
log.Warn("resend tx failed", "err", err)
continue
}
req.Tx = newSigned
}
}
}
// it is abnormal that reqs[0] has larger nonce than next expected nonce.
// could be caused by reorg? reset it
if f.reqs[0].Tx.Nonce() > f.nonce {
log.Warn("reset due to nonce gap", "f.nonce", f.nonce, "f.reqs[0].Tx.Nonce()", f.reqs[0].Tx.Nonce())
f.reqs = f.reqs[:0] f.reqs = f.reqs[:0]
} }
// remove the reqs if they have smaller nonce, which means it is no longer valid,
// either has been accepted or replaced.
for len(f.reqs) > 0 && f.reqs[0].Tx.Nonce() < f.nonce { for len(f.reqs) > 0 && f.reqs[0].Tx.Nonce() < f.nonce {
f.reqs = f.reqs[1:] f.reqs = f.reqs[1:]
} }
@ -736,7 +625,6 @@ func (f *faucet) loop() {
balance := new(big.Int).Div(f.balance, ether) balance := new(big.Int).Div(f.balance, ether)
for _, conn := range f.conns { for _, conn := range f.conns {
go func(conn *wsConn) {
if err := send(conn, map[string]interface{}{ if err := send(conn, map[string]interface{}{
"funds": balance, "funds": balance,
"funded": f.nonce, "funded": f.nonce,
@ -744,14 +632,12 @@ func (f *faucet) loop() {
}, time.Second); err != nil { }, time.Second); err != nil {
log.Warn("Failed to send stats to client", "err", err) log.Warn("Failed to send stats to client", "err", err)
conn.conn.Close() conn.conn.Close()
return // Exit the goroutine if the first send fails continue
} }
if err := send(conn, head, time.Second); err != nil { if err := send(conn, head, time.Second); err != nil {
log.Warn("Failed to send header to client", "err", err) log.Warn("Failed to send header to client", "err", err)
conn.conn.Close() conn.conn.Close()
} }
}(conn)
} }
f.lock.RUnlock() f.lock.RUnlock()
} }
@ -770,12 +656,10 @@ func (f *faucet) loop() {
// Pending requests updated, stream to clients // Pending requests updated, stream to clients
f.lock.RLock() f.lock.RLock()
for _, conn := range f.conns { for _, conn := range f.conns {
go func(conn *wsConn) {
if err := send(conn, map[string]interface{}{"requests": f.reqs}, time.Second); err != nil { if err := send(conn, map[string]interface{}{"requests": f.reqs}, time.Second); err != nil {
log.Warn("Failed to send requests to client", "err", err) log.Warn("Failed to send requests to client", "err", err)
conn.conn.Close() conn.conn.Close()
} }
}(conn)
} }
f.lock.RUnlock() f.lock.RUnlock()
} }

View File

@ -1,44 +0,0 @@
package main
import (
lru "github.com/hashicorp/golang-lru"
"golang.org/x/time/rate"
)
type IPRateLimiter struct {
ips *lru.Cache // LRU cache to store IP addresses and their associated rate limiters
r rate.Limit // the rate limit, e.g., 5 requests per second
b int // the burst size, e.g., allowing a burst of 10 requests at once. The rate limiter gets into action
// only after this number exceeds
}
func NewIPRateLimiter(r rate.Limit, b int, size int) (*IPRateLimiter, error) {
cache, err := lru.New(size)
if err != nil {
return nil, err
}
i := &IPRateLimiter{
ips: cache,
r: r,
b: b,
}
return i, nil
}
func (i *IPRateLimiter) addIP(ip string) *rate.Limiter {
limiter := rate.NewLimiter(i.r, i.b)
i.ips.Add(ip, limiter)
return limiter
}
func (i *IPRateLimiter) GetLimiter(ip string) *rate.Limiter {
if limiter, exists := i.ips.Get(ip); exists {
return limiter.(*rate.Limiter)
}
return i.addIP(ip)
}

View File

@ -62,7 +62,6 @@ var (
ArgsUsage: "<genesisPath>", ArgsUsage: "<genesisPath>",
Flags: flags.Merge([]cli.Flag{ Flags: flags.Merge([]cli.Flag{
utils.CachePreimagesFlag, utils.CachePreimagesFlag,
utils.OverridePassedForkTime,
utils.OverrideBohr, utils.OverrideBohr,
utils.OverrideVerkle, utils.OverrideVerkle,
utils.MultiDataBaseFlag, utils.MultiDataBaseFlag,
@ -254,10 +253,6 @@ func initGenesis(ctx *cli.Context) error {
defer stack.Close() defer stack.Close()
var overrides core.ChainOverrides var overrides core.ChainOverrides
if ctx.IsSet(utils.OverridePassedForkTime.Name) {
v := ctx.Uint64(utils.OverridePassedForkTime.Name)
overrides.OverridePassedForkTime = &v
}
if ctx.IsSet(utils.OverrideBohr.Name) { if ctx.IsSet(utils.OverrideBohr.Name) {
v := ctx.Uint64(utils.OverrideBohr.Name) v := ctx.Uint64(utils.OverrideBohr.Name)
overrides.OverrideBohr = &v overrides.OverrideBohr = &v

View File

@ -33,7 +33,6 @@ import (
"github.com/ethereum/go-ethereum/accounts/keystore" "github.com/ethereum/go-ethereum/accounts/keystore"
"github.com/ethereum/go-ethereum/accounts/scwallet" "github.com/ethereum/go-ethereum/accounts/scwallet"
"github.com/ethereum/go-ethereum/accounts/usbwallet" "github.com/ethereum/go-ethereum/accounts/usbwallet"
"github.com/ethereum/go-ethereum/beacon/fakebeacon"
"github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/rawdb"
@ -97,7 +96,6 @@ type gethConfig struct {
Node node.Config Node node.Config
Ethstats ethstatsConfig Ethstats ethstatsConfig
Metrics metrics.Config Metrics metrics.Config
FakeBeacon fakebeacon.Config
} }
func loadConfig(file string, cfg *gethConfig) error { func loadConfig(file string, cfg *gethConfig) error {
@ -187,10 +185,6 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) {
params.RialtoGenesisHash = common.HexToHash(v) params.RialtoGenesisHash = common.HexToHash(v)
} }
if ctx.IsSet(utils.OverridePassedForkTime.Name) {
v := ctx.Uint64(utils.OverridePassedForkTime.Name)
cfg.Eth.OverridePassedForkTime = &v
}
if ctx.IsSet(utils.OverrideBohr.Name) { if ctx.IsSet(utils.OverrideBohr.Name) {
v := ctx.Uint64(utils.OverrideBohr.Name) v := ctx.Uint64(utils.OverrideBohr.Name)
cfg.Eth.OverrideBohr = &v cfg.Eth.OverrideBohr = &v
@ -212,9 +206,6 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) {
if ctx.IsSet(utils.OverrideBreatheBlockInterval.Name) { if ctx.IsSet(utils.OverrideBreatheBlockInterval.Name) {
params.BreatheBlockInterval = ctx.Uint64(utils.OverrideBreatheBlockInterval.Name) params.BreatheBlockInterval = ctx.Uint64(utils.OverrideBreatheBlockInterval.Name)
} }
if ctx.IsSet(utils.OverrideFixedTurnLength.Name) {
params.FixedTurnLength = ctx.Uint64(utils.OverrideFixedTurnLength.Name)
}
backend, eth := utils.RegisterEthService(stack, &cfg.Eth) backend, eth := utils.RegisterEthService(stack, &cfg.Eth)
@ -244,22 +235,11 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) {
utils.RegisterEthStatsService(stack, backend, cfg.Ethstats.URL) utils.RegisterEthStatsService(stack, backend, cfg.Ethstats.URL)
} }
if ctx.IsSet(utils.FakeBeaconAddrFlag.Name) {
cfg.FakeBeacon.Addr = ctx.String(utils.FakeBeaconAddrFlag.Name)
}
if ctx.IsSet(utils.FakeBeaconPortFlag.Name) {
cfg.FakeBeacon.Port = ctx.Int(utils.FakeBeaconPortFlag.Name)
}
if cfg.FakeBeacon.Enable || ctx.IsSet(utils.FakeBeaconEnabledFlag.Name) {
go fakebeacon.NewService(&cfg.FakeBeacon, backend).Run()
}
git, _ := version.VCS() git, _ := version.VCS()
utils.SetupMetrics(ctx, utils.SetupMetrics(ctx,
utils.EnableBuildInfo(git.Commit, git.Date), utils.EnableBuildInfo(git.Commit, git.Date),
utils.EnableMinerInfo(ctx, &cfg.Eth.Miner), utils.EnableMinerInfo(ctx, &cfg.Eth.Miner),
utils.EnableNodeInfo(&cfg.Eth.TxPool, stack.Server().NodeInfo()), utils.EnableNodeInfo(&cfg.Eth.TxPool, stack.Server().NodeInfo()),
utils.EnableNodeTrack(ctx, &cfg.Eth, stack),
) )
return stack, backend return stack, backend
} }

View File

@ -72,14 +72,12 @@ var (
utils.USBFlag, utils.USBFlag,
utils.SmartCardDaemonPathFlag, utils.SmartCardDaemonPathFlag,
utils.RialtoHash, utils.RialtoHash,
utils.OverridePassedForkTime,
utils.OverrideBohr, utils.OverrideBohr,
utils.OverrideVerkle, utils.OverrideVerkle,
utils.OverrideFullImmutabilityThreshold, utils.OverrideFullImmutabilityThreshold,
utils.OverrideMinBlocksForBlobRequests, utils.OverrideMinBlocksForBlobRequests,
utils.OverrideDefaultExtraReserveForBlobRequests, utils.OverrideDefaultExtraReserveForBlobRequests,
utils.OverrideBreatheBlockInterval, utils.OverrideBreatheBlockInterval,
utils.OverrideFixedTurnLength,
utils.EnablePersonal, utils.EnablePersonal,
utils.TxPoolLocalsFlag, utils.TxPoolLocalsFlag,
utils.TxPoolNoLocalsFlag, utils.TxPoolNoLocalsFlag,
@ -232,12 +230,6 @@ var (
utils.MetricsInfluxDBBucketFlag, utils.MetricsInfluxDBBucketFlag,
utils.MetricsInfluxDBOrganizationFlag, utils.MetricsInfluxDBOrganizationFlag,
} }
fakeBeaconFlags = []cli.Flag{
utils.FakeBeaconEnabledFlag,
utils.FakeBeaconAddrFlag,
utils.FakeBeaconPortFlag,
}
) )
var app = flags.NewApp("the go-ethereum command line interface") var app = flags.NewApp("the go-ethereum command line interface")
@ -292,7 +284,6 @@ func init() {
consoleFlags, consoleFlags,
debug.Flags, debug.Flags,
metricsFlags, metricsFlags,
fakeBeaconFlags,
) )
flags.AutoEnvVars(app.Flags, "GETH") flags.AutoEnvVars(app.Flags, "GETH")
@ -378,6 +369,8 @@ func geth(ctx *cli.Context) error {
// it unlocks any requested accounts, and starts the RPC/IPC interfaces and the // it unlocks any requested accounts, and starts the RPC/IPC interfaces and the
// miner. // miner.
func startNode(ctx *cli.Context, stack *node.Node, backend ethapi.Backend, isConsole bool) { func startNode(ctx *cli.Context, stack *node.Node, backend ethapi.Backend, isConsole bool) {
debug.Memsize.Add("node", stack)
// Start up the node itself // Start up the node itself
utils.StartNode(ctx, stack, isConsole) utils.StartNode(ctx, stack, isConsole)
@ -450,23 +443,22 @@ func startNode(ctx *cli.Context, stack *node.Node, backend ethapi.Backend, isCon
} }
// Start auxiliary services if enabled // Start auxiliary services if enabled
ethBackend, ok := backend.(*eth.EthAPIBackend)
gasCeil := ethBackend.Miner().GasCeil()
if gasCeil > params.SystemTxsGas {
ethBackend.TxPool().SetMaxGas(gasCeil - params.SystemTxsGas)
}
if ctx.Bool(utils.MiningEnabledFlag.Name) { if ctx.Bool(utils.MiningEnabledFlag.Name) {
// Mining only makes sense if a full Ethereum node is running // Mining only makes sense if a full Ethereum node is running
if ctx.String(utils.SyncModeFlag.Name) == "light" { if ctx.String(utils.SyncModeFlag.Name) == "light" {
utils.Fatalf("Light clients do not support mining") utils.Fatalf("Light clients do not support mining")
} }
ethBackend, ok := backend.(*eth.EthAPIBackend)
if !ok { if !ok {
utils.Fatalf("Ethereum service not running") utils.Fatalf("Ethereum service not running")
} }
// Set the gas price to the limits from the CLI and start mining // Set the gas price to the limits from the CLI and start mining
gasprice := flags.GlobalBig(ctx, utils.MinerGasPriceFlag.Name) gasprice := flags.GlobalBig(ctx, utils.MinerGasPriceFlag.Name)
ethBackend.TxPool().SetGasTip(gasprice) ethBackend.TxPool().SetGasTip(gasprice)
gasCeil := ethBackend.Miner().GasCeil()
if gasCeil > params.SystemTxsGas {
ethBackend.TxPool().SetMaxGas(gasCeil - params.SystemTxsGas)
}
if err := ethBackend.StartMining(); err != nil { if err := ethBackend.StartMining(); err != nil {
utils.Fatalf("Failed to start mining: %v", err) utils.Fatalf("Failed to start mining: %v", err)
} }

View File

@ -33,7 +33,7 @@ node get_perf.js --rpc ${url} --startNum ${start} --endNum ${end}
output as following output as following
```bash ```bash
Get the performance between [ 19470 , 19670 ) Get the performance between [ 19470 , 19670 )
txCountPerBlock = 3142.81 txCountTotal = 628562 BlockCount = 200 avgBlockTime = 3.005 inturnBlocksRatio = 0.975 justifiedBlocksRatio = 0.98 txCountPerBlock = 3142.81 txCountTotal = 628562 BlockCount = 200 avgBlockTime = 3.005 inturnBlocksRatio = 0.975
txCountPerSecond = 1045.8602329450914 avgGasUsedPerBlock = 250.02062627 avgGasUsedPerSecond = 83.20153952412646 txCountPerSecond = 1045.8602329450914 avgGasUsedPerBlock = 250.02062627 avgGasUsedPerSecond = 83.20153952412646
``` ```

View File

@ -12,9 +12,6 @@ const main = async () => {
let txCountTotal = 0; let txCountTotal = 0;
let gasUsedTotal = 0; let gasUsedTotal = 0;
let inturnBlocks = 0; let inturnBlocks = 0;
let justifiedBlocks = 0;
let turnLength = await provider.send("parlia_getTurnLength", [
ethers.toQuantity(program.startNum)]);
for (let i = program.startNum; i < program.endNum; i++) { for (let i = program.startNum; i < program.endNum; i++) {
let txCount = await provider.send("eth_getBlockTransactionCountByNumber", [ let txCount = await provider.send("eth_getBlockTransactionCountByNumber", [
ethers.toQuantity(i)]); ethers.toQuantity(i)]);
@ -29,15 +26,7 @@ const main = async () => {
inturnBlocks += 1 inturnBlocks += 1
} }
let timestamp = eval(eval(header.timestamp).toString(10)) let timestamp = eval(eval(header.timestamp).toString(10))
console.log("BlockNumber =", i, "mod =", i%4, "miner =", header.miner , "difficulty =", difficulty, "txCount =", ethers.toNumber(txCount), "gasUsed", gasUsed, "timestamp", timestamp)
let justifiedNumber = await provider.send("parlia_getJustifiedNumber", [
ethers.toQuantity(i)]);
if (justifiedNumber + 1 == i) {
justifiedBlocks += 1
} else {
console.log("justified unexpected", "BlockNumber =", i,"justifiedNumber",justifiedNumber)
}
console.log("BlockNumber =", i, "mod =", i%turnLength, "miner =", header.miner , "difficulty =", difficulty, "txCount =", ethers.toNumber(txCount), "gasUsed", gasUsed, "timestamp", timestamp)
} }
let blockCount = program.endNum - program.startNum let blockCount = program.endNum - program.startNum
@ -52,14 +41,13 @@ const main = async () => {
let timeCost = endTime - startTime let timeCost = endTime - startTime
let avgBlockTime = timeCost/blockCount let avgBlockTime = timeCost/blockCount
let inturnBlocksRatio = inturnBlocks/blockCount let inturnBlocksRatio = inturnBlocks/blockCount
let justifiedBlocksRatio = justifiedBlocks/blockCount
let tps = txCountTotal/timeCost let tps = txCountTotal/timeCost
let M = 1000000 let M = 1000000
let avgGasUsedPerBlock = gasUsedTotal/blockCount/M let avgGasUsedPerBlock = gasUsedTotal/blockCount/M
let avgGasUsedPerSecond = gasUsedTotal/timeCost/M let avgGasUsedPerSecond = gasUsedTotal/timeCost/M
console.log("Get the performance between [", program.startNum, ",", program.endNum, ")"); console.log("Get the performance between [", program.startNum, ",", program.endNum, ")");
console.log("txCountPerBlock =", txCountPerBlock, "txCountTotal =", txCountTotal, "BlockCount =", blockCount, "avgBlockTime =", avgBlockTime, "inturnBlocksRatio =", inturnBlocksRatio, "justifiedBlocksRatio =", justifiedBlocksRatio); console.log("txCountPerBlock =", txCountPerBlock, "txCountTotal =", txCountTotal, "BlockCount =", blockCount, "avgBlockTime =", avgBlockTime, "inturnBlocksRatio =", inturnBlocksRatio);
console.log("txCountPerSecond =", tps, "avgGasUsedPerBlock =", avgGasUsedPerBlock, "avgGasUsedPerSecond =", avgGasUsedPerSecond); console.log("txCountPerSecond =", tps, "avgGasUsedPerBlock =", avgGasUsedPerBlock, "avgGasUsedPerSecond =", avgGasUsedPerSecond);
}; };

View File

@ -1,164 +0,0 @@
import { ethers } from "ethers";
import program from "commander";
// Global Options:
program.option("--rpc <rpc>", "Rpc");
// GetTxCount Options:
program.option("--startNum <startNum>", "start num")
program.option("--endNum <endNum>", "end num")
program.option("--miner <miner>", "miner", "")
// GetVersion Options:
program.option("--num <Num>", "validator num", 21)
// GetTopAddr Options:
program.option("--topNum <Num>", "top num of address to be displayed", 20)
program.parse(process.argv);
const provider = new ethers.JsonRpcProvider(program.rpc)
function printUsage() {
console.log("Usage:");
console.log(" node getchainstatus.js --help");
console.log(" node getchainstatus.js [subcommand] [options]");
console.log("\nSubcommands:");
console.log(" GetTxCount: find the block with max tx size of a range");
console.log(" GetVersion: dump validators' binary version, based on Header.Extra");
console.log(" GetTopAddr: get hottest $topNum target address within a block range");
console.log("\nOptions:");
console.log(" --rpc specify the url of RPC endpoint");
console.log(" --startNum the start block number, for command GetTxCount");
console.log(" --endNum the end block number, for command GetTxCount");
console.log(" --miner the miner address, for command GetTxCount");
console.log(" --num the number of blocks to be checked, for command GetVersion");
console.log(" --topNum the topNum of blocks to be checked, for command GetVersion");
console.log("\nExample:");
// mainnet https://bsc-mainnet.nodereal.io/v1/454e504917db4f82b756bd0cf6317dce
console.log(" node getchainstatus.js GetTxCount --rpc https://bsc-testnet-dataseed.bnbchain.org --startNum 40000001 --endNum 40000005")
console.log(" node getchainstatus.js GetVersion --rpc https://bsc-testnet-dataseed.bnbchain.org --num 21")
console.log(" node getchainstatus.js GetTopAddr --rpc https://bsc-testnet-dataseed.bnbchain.org --startNum 40000001 --endNum 40000010 --topNum 10")
}
// 1.cmd: "GetTxCount", usage:
// node getchainstatus.js GetTxCount --rpc https://bsc-testnet-dataseed.bnbchain.org \
// --startNum 40000001 --endNum 40000005 \
// --miner(optional): specified: find the max txCounter from the specified validator,
// not specified: find the max txCounter from all validators
async function getTxCount() {
let txCount = 0;
let num = 0;
console.log("Find the max txs count between", program.startNum, "and", program.endNum);
for (let i = program.startNum; i < program.endNum; i++) {
if (program.miner !== "") {
let blockData = await provider.getBlock(Number(i))
if (program.miner !== blockData.miner) {
continue
}
}
let x = await provider.send("eth_getBlockTransactionCountByNumber", [
ethers.toQuantity(i)]);
let a = ethers.toNumber(x)
if (a > txCount) {
num = i;
txCount = a;
}
}
console.log("BlockNum = ", num, "TxCount =", txCount);
}
// 2.cmd: "GetVersion", usage:
// node getchainstatus.js GetVersion \
// --rpc https://bsc-testnet-dataseed.bnbchain.org \
// --num(optional): defualt 21, the number of blocks that will be checked
async function getBinaryVersion() {
const blockNum = await provider.getBlockNumber();
console.log(blockNum);
for (let i = 0; i < program.num; i++) {
let blockData = await provider.getBlock(blockNum - i);
// 1.get Geth client version
let major = ethers.toNumber(ethers.dataSlice(blockData.extraData, 2, 3))
let minor = ethers.toNumber(ethers.dataSlice(blockData.extraData, 3, 4))
let patch = ethers.toNumber(ethers.dataSlice(blockData.extraData, 4, 5))
// 2.get minimum txGasPrice based on the last non-zero-gasprice transaction
let lastGasPrice = 0
for (let txIndex = blockData.transactions.length - 1; txIndex >= 0; txIndex--) {
let txHash = blockData.transactions[txIndex]
let txData = await provider.getTransaction(txHash);
if (txData.gasPrice == 0) {
continue
}
lastGasPrice = txData.gasPrice
break
}
console.log(blockData.miner, "version =", major + "." + minor + "." + patch, " MinGasPrice = " + lastGasPrice)
}
};
// 3.cmd: "GetTopAddr", usage:
// node getchainstatus.js GetTopAddr \
// --rpc https://bsc-testnet-dataseed.bnbchain.org \
// --startNum 40000001 --endNum 40000005 \
// --topNum(optional): the top num of address to be displayed, default 20
function getTopKElements(map, k) {
let entries = Array.from(map.entries());
entries.sort((a, b) => b[1] - a[1]);
return entries.slice(0, k);
}
async function getTopAddr() {
let countMap = new Map();
let totalTxs = 0
console.log("Find the top target address, between", program.startNum, "and", program.endNum);
for (let i = program.startNum; i <= program.endNum; i++) {
let blockData = await provider.getBlock(Number(i), true)
totalTxs += blockData.transactions.length
for (let txIndex = blockData.transactions.length - 1; txIndex >= 0; txIndex--) {
let txData = await blockData.getTransaction(txIndex)
if (txData.to == null) {
console.log("Contract creation,txHash:", txData.hash)
continue
}
let toAddr = txData.to;
if (countMap.has(toAddr)) {
countMap.set(toAddr, countMap.get(toAddr) + 1);
} else {
countMap.set(toAddr, 1);
}
}
console.log("progress:", (program.endNum-i), "blocks left", "totalTxs", totalTxs)
}
let tops = getTopKElements(countMap, program.topNum)
tops.forEach((value, key) => {
// value: [ '0x40661F989826CC641Ce1601526Bb16a4221412c8', 71 ]
console.log(key+":", value[0], " ", value[1], " ", ((value[1]*100)/totalTxs).toFixed(2)+"%");
});
};
const main = async () => {
if (process.argv.length <= 2) {
console.error('invalid process.argv.length', process.argv.length);
printUsage()
return
}
const cmd = process.argv[2]
if (cmd === "--help") {
printUsage()
return
}
if (cmd === "GetTxCount") {
await getTxCount()
} else if (cmd === "GetVersion") {
await getBinaryVersion()
} else if (cmd === "GetTopAddr") {
await getTopAddr()
} else {
console.log("unsupported cmd", cmd);
printUsage()
}
}
main().then(() => process.exit(0))
.catch((error) => {
console.error(error);
process.exit(1);
});

41
cmd/jsutils/gettxcount.js Normal file
View File

@ -0,0 +1,41 @@
import { ethers } from "ethers";
import program from "commander";
program.option("--rpc <rpc>", "Rpc");
program.option("--startNum <startNum>", "start num")
program.option("--endNum <endNum>", "end num")
// --miner:
// specified: find the max txCounter from the specified validator
// not specified: find the max txCounter from all validators
program.option("--miner <miner>", "miner", "")
program.parse(process.argv);
const provider = new ethers.JsonRpcProvider(program.rpc)
const main = async () => {
let txCount = 0;
let num = 0;
console.log("Find the max txs count between", program.startNum, "and", program.endNum);
for (let i = program.startNum; i < program.endNum; i++) {
if (program.miner !== "") {
let blockData = await provider.getBlock(Number(i))
if (program.miner !== blockData.miner) {
continue
}
}
let x = await provider.send("eth_getBlockTransactionCountByNumber", [
ethers.toQuantity(i)]);
let a = ethers.toNumber(x)
if (a > txCount) {
num = i;
txCount = a;
}
}
console.log("BlockNum = ", num, "TxCount =", txCount);
};
main().then(() => process.exit(0))
.catch((error) => {
console.error(error);
process.exit(1);
});

View File

@ -0,0 +1,38 @@
import { ethers } from "ethers";
import program from "commander";
program.option("--Rpc <Rpc>", "Rpc");
program.option("--Num <Num>", "validator num", 21)
program.parse(process.argv);
const provider = new ethers.JsonRpcProvider(program.Rpc);
const main = async () => {
const blockNum = await provider.getBlockNumber();
console.log(blockNum);
for (let i = 0; i < program.Num; i++) {
let blockData = await provider.getBlock(blockNum - i);
// 1.get Geth client version
let major = ethers.toNumber(ethers.dataSlice(blockData.extraData, 2, 3))
let minor = ethers.toNumber(ethers.dataSlice(blockData.extraData, 3, 4))
let patch = ethers.toNumber(ethers.dataSlice(blockData.extraData, 4, 5))
// 2.get minimum txGasPrice based on the last non-zero-gasprice transaction
let lastGasPrice = 0
for (let txIndex = blockData.transactions.length - 1; txIndex >= 0; txIndex--) {
let txHash = blockData.transactions[txIndex]
let txData = await provider.getTransaction(txHash);
if (txData.gasPrice == 0) {
continue
}
lastGasPrice = txData.gasPrice
break
}
console.log(blockData.miner, "version =", major + "." + minor + "." + patch, " MinGasPrice = " + lastGasPrice)
}
};
main().then(() => process.exit(0))
.catch((error) => {
console.error(error);
process.exit(1);
});

View File

@ -35,11 +35,8 @@ import (
"strings" "strings"
"time" "time"
"github.com/ethereum/go-ethereum/internal/version"
"github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/accounts/keystore" "github.com/ethereum/go-ethereum/accounts/keystore"
"github.com/ethereum/go-ethereum/beacon/fakebeacon"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/fdlimit" "github.com/ethereum/go-ethereum/common/fdlimit"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
@ -308,11 +305,6 @@ var (
Usage: "Manually specify the Rialto Genesis Hash, to trigger builtin network logic", Usage: "Manually specify the Rialto Genesis Hash, to trigger builtin network logic",
Category: flags.EthCategory, Category: flags.EthCategory,
} }
OverridePassedForkTime = &cli.Uint64Flag{
Name: "override.passedforktime",
Usage: "Manually specify the hard fork timestamp except the last one, overriding the bundled setting",
Category: flags.EthCategory,
}
OverrideBohr = &cli.Uint64Flag{ OverrideBohr = &cli.Uint64Flag{
Name: "override.bohr", Name: "override.bohr",
Usage: "Manually specify the Bohr fork timestamp, overriding the bundled setting", Usage: "Manually specify the Bohr fork timestamp, overriding the bundled setting",
@ -347,12 +339,6 @@ var (
Value: params.BreatheBlockInterval, Value: params.BreatheBlockInterval,
Category: flags.EthCategory, Category: flags.EthCategory,
} }
OverrideFixedTurnLength = &cli.Uint64Flag{
Name: "override.fixedturnlength",
Usage: "It use fixed or random values for turn length instead of reading from the contract, only for testing purpose",
Value: params.FixedTurnLength,
Category: flags.EthCategory,
}
SyncModeFlag = &flags.TextMarshalerFlag{ SyncModeFlag = &flags.TextMarshalerFlag{
Name: "syncmode", Name: "syncmode",
Usage: `Blockchain sync mode ("snap" or "full")`, Usage: `Blockchain sync mode ("snap" or "full")`,
@ -1149,25 +1135,6 @@ Please note that --` + MetricsHTTPFlag.Name + ` must be set to start the server.
Value: params.DefaultExtraReserveForBlobRequests, Value: params.DefaultExtraReserveForBlobRequests,
Category: flags.MiscCategory, Category: flags.MiscCategory,
} }
// Fake beacon
FakeBeaconEnabledFlag = &cli.BoolFlag{
Name: "fake-beacon",
Usage: "Enable the HTTP-RPC server of fake-beacon",
Category: flags.APICategory,
}
FakeBeaconAddrFlag = &cli.StringFlag{
Name: "fake-beacon.addr",
Usage: "HTTP-RPC server listening addr of fake-beacon",
Value: fakebeacon.DefaultAddr,
Category: flags.APICategory,
}
FakeBeaconPortFlag = &cli.IntFlag{
Name: "fake-beacon.port",
Usage: "HTTP-RPC server listening port of fake-beacon",
Value: fakebeacon.DefaultPort,
Category: flags.APICategory,
}
) )
var ( var (
@ -2319,67 +2286,6 @@ func EnableNodeInfo(poolConfig *legacypool.Config, nodeInfo *p2p.NodeInfo) Setup
} }
} }
func EnableNodeTrack(ctx *cli.Context, cfg *ethconfig.Config, stack *node.Node) SetupMetricsOption {
nodeInfo := stack.Server().NodeInfo()
return func() {
// register node info into metrics
metrics.NewRegisteredLabel("node-stats", nil).Mark(map[string]interface{}{
"NodeType": parseNodeType(),
"ENR": nodeInfo.ENR,
"Mining": ctx.Bool(MiningEnabledFlag.Name),
"Etherbase": parseEtherbase(cfg),
"MiningFeatures": parseMiningFeatures(ctx, cfg),
"DBFeatures": parseDBFeatures(cfg, stack),
})
}
}
func parseEtherbase(cfg *ethconfig.Config) string {
if cfg.Miner.Etherbase == (common.Address{}) {
return ""
}
return cfg.Miner.Etherbase.String()
}
func parseNodeType() string {
git, _ := version.VCS()
version := []string{params.VersionWithMeta}
if len(git.Commit) >= 7 {
version = append(version, git.Commit[:7])
}
if git.Date != "" {
version = append(version, git.Date)
}
arch := []string{runtime.GOOS, runtime.GOARCH}
infos := []string{"BSC", strings.Join(version, "-"), strings.Join(arch, "-"), runtime.Version()}
return strings.Join(infos, "/")
}
func parseDBFeatures(cfg *ethconfig.Config, stack *node.Node) string {
var features []string
if cfg.StateScheme == rawdb.PathScheme {
features = append(features, "PBSS")
}
if stack.CheckIfMultiDataBase() {
features = append(features, "MultiDB")
}
return strings.Join(features, "|")
}
func parseMiningFeatures(ctx *cli.Context, cfg *ethconfig.Config) string {
if !ctx.Bool(MiningEnabledFlag.Name) {
return ""
}
var features []string
if cfg.Miner.Mev.Enabled {
features = append(features, "MEV")
}
if cfg.Miner.VoteEnable {
features = append(features, "FFVoting")
}
return strings.Join(features, "|")
}
func SetupMetrics(ctx *cli.Context, options ...SetupMetricsOption) { func SetupMetrics(ctx *cli.Context, options ...SetupMetricsOption) {
if metrics.Enabled { if metrics.Enabled {
log.Info("Enabling metrics collection") log.Info("Enabling metrics collection")

View File

@ -59,9 +59,6 @@ type ChainHeaderReader interface {
// GetHighestVerifiedHeader retrieves the highest header verified. // GetHighestVerifiedHeader retrieves the highest header verified.
GetHighestVerifiedHeader() *types.Header GetHighestVerifiedHeader() *types.Header
// GetVerifiedBlockByHash retrieves the highest verified block.
GetVerifiedBlockByHash(hash common.Hash) *types.Header
// ChasingHead return the best chain head of peers. // ChasingHead return the best chain head of peers.
ChasingHead() *types.Header ChasingHead() *types.Header
} }

View File

@ -2306,19 +2306,6 @@ const validatorSetABI = `
], ],
"stateMutability": "view" "stateMutability": "view"
}, },
{
"inputs": [],
"name": "getTurnLength",
"outputs": [
{
"internalType": "uint256",
"name": "",
"type": "uint256"
}
],
"stateMutability": "view",
"type": "function"
},
{ {
"type": "function", "type": "function",
"name": "getValidators", "name": "getValidators",

View File

@ -31,7 +31,13 @@ type API struct {
// GetSnapshot retrieves the state snapshot at a given block. // GetSnapshot retrieves the state snapshot at a given block.
func (api *API) GetSnapshot(number *rpc.BlockNumber) (*Snapshot, error) { func (api *API) GetSnapshot(number *rpc.BlockNumber) (*Snapshot, error) {
header := api.getHeader(number) // Retrieve the requested block number (or current if none requested)
var header *types.Header
if number == nil || *number == rpc.LatestBlockNumber {
header = api.chain.CurrentHeader()
} else {
header = api.chain.GetHeaderByNumber(uint64(number.Int64()))
}
// Ensure we have an actually valid block and return its snapshot // Ensure we have an actually valid block and return its snapshot
if header == nil { if header == nil {
return nil, errUnknownBlock return nil, errUnknownBlock
@ -50,7 +56,13 @@ func (api *API) GetSnapshotAtHash(hash common.Hash) (*Snapshot, error) {
// GetValidators retrieves the list of validators at the specified block. // GetValidators retrieves the list of validators at the specified block.
func (api *API) GetValidators(number *rpc.BlockNumber) ([]common.Address, error) { func (api *API) GetValidators(number *rpc.BlockNumber) ([]common.Address, error) {
header := api.getHeader(number) // Retrieve the requested block number (or current if none requested)
var header *types.Header
if number == nil || *number == rpc.LatestBlockNumber {
header = api.chain.CurrentHeader()
} else {
header = api.chain.GetHeaderByNumber(uint64(number.Int64()))
}
// Ensure we have an actually valid block and return the validators from its snapshot // Ensure we have an actually valid block and return the validators from its snapshot
if header == nil { if header == nil {
return nil, errUnknownBlock return nil, errUnknownBlock
@ -74,65 +86,3 @@ func (api *API) GetValidatorsAtHash(hash common.Hash) ([]common.Address, error)
} }
return snap.validators(), nil return snap.validators(), nil
} }
func (api *API) GetJustifiedNumber(number *rpc.BlockNumber) (uint64, error) {
header := api.getHeader(number)
// Ensure we have an actually valid block and return the validators from its snapshot
if header == nil {
return 0, errUnknownBlock
}
snap, err := api.parlia.snapshot(api.chain, header.Number.Uint64(), header.Hash(), nil)
if err != nil || snap.Attestation == nil {
return 0, err
}
return snap.Attestation.TargetNumber, nil
}
func (api *API) GetTurnLength(number *rpc.BlockNumber) (uint8, error) {
header := api.getHeader(number)
// Ensure we have an actually valid block and return the validators from its snapshot
if header == nil {
return 0, errUnknownBlock
}
snap, err := api.parlia.snapshot(api.chain, header.Number.Uint64(), header.Hash(), nil)
if err != nil || snap.TurnLength == 0 {
return 0, err
}
return snap.TurnLength, nil
}
func (api *API) GetFinalizedNumber(number *rpc.BlockNumber) (uint64, error) {
header := api.getHeader(number)
// Ensure we have an actually valid block and return the validators from its snapshot
if header == nil {
return 0, errUnknownBlock
}
snap, err := api.parlia.snapshot(api.chain, header.Number.Uint64(), header.Hash(), nil)
if err != nil || snap.Attestation == nil {
return 0, err
}
return snap.Attestation.SourceNumber, nil
}
func (api *API) getHeader(number *rpc.BlockNumber) (header *types.Header) {
currentHeader := api.chain.CurrentHeader()
if number == nil || *number == rpc.LatestBlockNumber {
header = currentHeader // current if none requested
} else if *number == rpc.SafeBlockNumber {
justifiedNumber, _, err := api.parlia.GetJustifiedNumberAndHash(api.chain, []*types.Header{currentHeader})
if err != nil {
return nil
}
header = api.chain.GetHeaderByNumber(justifiedNumber)
} else if *number == rpc.FinalizedBlockNumber {
header = api.parlia.GetFinalizedHeader(api.chain, currentHeader)
} else if *number == rpc.PendingBlockNumber {
return nil // no pending blocks on bsc
} else if *number == rpc.EarliestBlockNumber {
header = api.chain.GetHeaderByNumber(0)
} else {
header = api.chain.GetHeaderByNumber(uint64(number.Int64()))
}
return
}

View File

@ -1,91 +0,0 @@
package parlia
import (
"context"
"errors"
"math/big"
mrand "math/rand"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/core/systemcontracts"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/internal/ethapi"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rpc"
)
func (p *Parlia) getTurnLength(chain consensus.ChainHeaderReader, header *types.Header) (*uint8, error) {
parent := chain.GetHeaderByHash(header.ParentHash)
if parent == nil {
return nil, errors.New("parent not found")
}
var turnLength uint8
if p.chainConfig.IsBohr(parent.Number, parent.Time) {
turnLengthFromContract, err := p.getTurnLengthFromContract(parent)
if err != nil {
return nil, err
}
if turnLengthFromContract == nil {
return nil, errors.New("unexpected error when getTurnLengthFromContract")
}
turnLength = uint8(turnLengthFromContract.Int64())
} else {
turnLength = defaultTurnLength
}
log.Debug("getTurnLength", "turnLength", turnLength)
return &turnLength, nil
}
func (p *Parlia) getTurnLengthFromContract(header *types.Header) (turnLength *big.Int, err error) {
// mock to get turnLength from the contract
if params.FixedTurnLength >= 1 && params.FixedTurnLength <= 9 {
if params.FixedTurnLength == 2 {
return p.getRandTurnLength(header)
}
return big.NewInt(int64(params.FixedTurnLength)), nil
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
method := "getTurnLength"
toAddress := common.HexToAddress(systemcontracts.ValidatorContract)
gas := (hexutil.Uint64)(uint64(math.MaxUint64 / 2))
data, err := p.validatorSetABI.Pack(method)
if err != nil {
log.Error("Unable to pack tx for getTurnLength", "error", err)
return nil, err
}
msgData := (hexutil.Bytes)(data)
blockNr := rpc.BlockNumberOrHashWithHash(header.Hash(), false)
result, err := p.ethAPI.Call(ctx, ethapi.TransactionArgs{
Gas: &gas,
To: &toAddress,
Data: &msgData,
}, &blockNr, nil, nil)
if err != nil {
return nil, err
}
if err := p.validatorSetABI.UnpackIntoInterface(&turnLength, method, result); err != nil {
return nil, err
}
return turnLength, nil
}
// getRandTurnLength returns a random valid value, used to test switching turn length
func (p *Parlia) getRandTurnLength(header *types.Header) (turnLength *big.Int, err error) {
turnLengths := [8]uint8{1, 3, 4, 5, 6, 7, 8, 9}
r := mrand.New(mrand.NewSource(int64(header.Time)))
lengthIndex := int(r.Int31n(int32(len(turnLengths))))
return big.NewInt(int64(turnLengths[lengthIndex])), nil
}

View File

@ -6,7 +6,6 @@ import (
"encoding/hex" "encoding/hex"
"errors" "errors"
"fmt" "fmt"
"io"
"math" "math"
"math/big" "math/big"
"math/rand" "math/rand"
@ -54,13 +53,11 @@ const (
inMemoryHeaders = 86400 // Number of recent headers to keep in memory for double sign detection, inMemoryHeaders = 86400 // Number of recent headers to keep in memory for double sign detection,
checkpointInterval = 1024 // Number of blocks after which to save the snapshot to the database checkpointInterval = 1024 // Number of blocks after which to save the snapshot to the database
defaultEpochLength = uint64(200) // Default number of blocks of checkpoint to update validatorSet from contract defaultEpochLength = uint64(100) // Default number of blocks of checkpoint to update validatorSet from contract
defaultTurnLength = uint8(1) // Default consecutive number of blocks a validator receives priority for block production
extraVanity = 32 // Fixed number of extra-data prefix bytes reserved for signer vanity extraVanity = 32 // Fixed number of extra-data prefix bytes reserved for signer vanity
extraSeal = 65 // Fixed number of extra-data suffix bytes reserved for signer seal extraSeal = 65 // Fixed number of extra-data suffix bytes reserved for signer seal
nextForkHashSize = 4 // Fixed number of extra-data suffix bytes reserved for nextForkHash. nextForkHashSize = 4 // Fixed number of extra-data suffix bytes reserved for nextForkHash.
turnLengthSize = 1 // Fixed number of extra-data suffix bytes reserved for turnLength
validatorBytesLengthBeforeLuban = common.AddressLength validatorBytesLengthBeforeLuban = common.AddressLength
validatorBytesLength = common.AddressLength + types.BLSPublicKeyLength validatorBytesLength = common.AddressLength + types.BLSPublicKeyLength
@ -68,6 +65,7 @@ const (
wiggleTime = uint64(1) // second, Random delay (per signer) to allow concurrent signers wiggleTime = uint64(1) // second, Random delay (per signer) to allow concurrent signers
initialBackOffTime = uint64(1) // second initialBackOffTime = uint64(1) // second
processBackOffTime = uint64(1) // second
systemRewardPercent = 4 // it means 1/2^4 = 1/16 percentage of gas fee incoming will be distributed to system systemRewardPercent = 4 // it means 1/2^4 = 1/16 percentage of gas fee incoming will be distributed to system
@ -128,10 +126,6 @@ var (
// invalid list of validators (i.e. non divisible by 20 bytes). // invalid list of validators (i.e. non divisible by 20 bytes).
errInvalidSpanValidators = errors.New("invalid validator list on sprint end block") errInvalidSpanValidators = errors.New("invalid validator list on sprint end block")
// errInvalidTurnLength is returned if a block contains an
// invalid length of turn (i.e. no data left after parsing validators).
errInvalidTurnLength = errors.New("invalid turnLength")
// errInvalidMixDigest is returned if a block's mix digest is non-zero. // errInvalidMixDigest is returned if a block's mix digest is non-zero.
errInvalidMixDigest = errors.New("non-zero mix digest") errInvalidMixDigest = errors.New("non-zero mix digest")
@ -142,10 +136,6 @@ var (
// list of validators different than the one the local node calculated. // list of validators different than the one the local node calculated.
errMismatchingEpochValidators = errors.New("mismatching validator list on epoch block") errMismatchingEpochValidators = errors.New("mismatching validator list on epoch block")
// errMismatchingEpochTurnLength is returned if a sprint block contains a
// turn length different than the one the local node calculated.
errMismatchingEpochTurnLength = errors.New("mismatching turn length on epoch block")
// errInvalidDifficulty is returned if the difficulty of a block is missing. // errInvalidDifficulty is returned if the difficulty of a block is missing.
errInvalidDifficulty = errors.New("invalid difficulty") errInvalidDifficulty = errors.New("invalid difficulty")
@ -379,7 +369,6 @@ func (p *Parlia) VerifyHeaders(chain consensus.ChainHeaderReader, headers []*typ
// On luban fork, we introduce vote attestation into the header's extra field, so extra format is different from before. // On luban fork, we introduce vote attestation into the header's extra field, so extra format is different from before.
// Before luban fork: |---Extra Vanity---|---Validators Bytes (or Empty)---|---Extra Seal---| // Before luban fork: |---Extra Vanity---|---Validators Bytes (or Empty)---|---Extra Seal---|
// After luban fork: |---Extra Vanity---|---Validators Number and Validators Bytes (or Empty)---|---Vote Attestation (or Empty)---|---Extra Seal---| // After luban fork: |---Extra Vanity---|---Validators Number and Validators Bytes (or Empty)---|---Vote Attestation (or Empty)---|---Extra Seal---|
// After bohr fork: |---Extra Vanity---|---Validators Number and Validators Bytes (or Empty)---|---Turn Length (or Empty)---|---Vote Attestation (or Empty)---|---Extra Seal---|
func getValidatorBytesFromHeader(header *types.Header, chainConfig *params.ChainConfig, parliaConfig *params.ParliaConfig) []byte { func getValidatorBytesFromHeader(header *types.Header, chainConfig *params.ChainConfig, parliaConfig *params.ParliaConfig) []byte {
if len(header.Extra) <= extraVanity+extraSeal { if len(header.Extra) <= extraVanity+extraSeal {
return nil return nil
@ -396,15 +385,11 @@ func getValidatorBytesFromHeader(header *types.Header, chainConfig *params.Chain
return nil return nil
} }
num := int(header.Extra[extraVanity]) num := int(header.Extra[extraVanity])
start := extraVanity + validatorNumberSize if num == 0 || len(header.Extra) <= extraVanity+extraSeal+num*validatorBytesLength {
end := start + num*validatorBytesLength
extraMinLen := end + extraSeal
if chainConfig.IsBohr(header.Number, header.Time) {
extraMinLen += turnLengthSize
}
if num == 0 || len(header.Extra) < extraMinLen {
return nil return nil
} }
start := extraVanity + validatorNumberSize
end := start + num*validatorBytesLength
return header.Extra[start:end] return header.Extra[start:end]
} }
@ -423,14 +408,11 @@ func getVoteAttestationFromHeader(header *types.Header, chainConfig *params.Chai
attestationBytes = header.Extra[extraVanity : len(header.Extra)-extraSeal] attestationBytes = header.Extra[extraVanity : len(header.Extra)-extraSeal]
} else { } else {
num := int(header.Extra[extraVanity]) num := int(header.Extra[extraVanity])
start := extraVanity + validatorNumberSize + num*validatorBytesLength if len(header.Extra) <= extraVanity+extraSeal+validatorNumberSize+num*validatorBytesLength {
if chainConfig.IsBohr(header.Number, header.Time) {
start += turnLengthSize
}
end := len(header.Extra) - extraSeal
if end <= start {
return nil, nil return nil, nil
} }
start := extraVanity + validatorNumberSize + num*validatorBytesLength
end := len(header.Extra) - extraSeal
attestationBytes = header.Extra[start:end] attestationBytes = header.Extra[start:end]
} }
@ -622,11 +604,15 @@ func (p *Parlia) verifyHeader(chain consensus.ChainHeaderReader, header *types.H
return fmt.Errorf("invalid excessBlobGas: have %d, expected nil", header.ExcessBlobGas) return fmt.Errorf("invalid excessBlobGas: have %d, expected nil", header.ExcessBlobGas)
case header.BlobGasUsed != nil: case header.BlobGasUsed != nil:
return fmt.Errorf("invalid blobGasUsed: have %d, expected nil", header.BlobGasUsed) return fmt.Errorf("invalid blobGasUsed: have %d, expected nil", header.BlobGasUsed)
case header.ParentBeaconRoot != nil:
return fmt.Errorf("invalid parentBeaconRoot, have %#x, expected nil", header.ParentBeaconRoot)
case header.WithdrawalsHash != nil: case header.WithdrawalsHash != nil:
return fmt.Errorf("invalid WithdrawalsHash, have %#x, expected nil", header.WithdrawalsHash) return fmt.Errorf("invalid WithdrawalsHash, have %#x, expected nil", header.WithdrawalsHash)
} }
} else { } else {
switch { switch {
case header.ParentBeaconRoot != nil:
return fmt.Errorf("invalid parentBeaconRoot, have %#x, expected nil", header.ParentBeaconRoot)
case !header.EmptyWithdrawalsHash(): case !header.EmptyWithdrawalsHash():
return errors.New("header has wrong WithdrawalsHash") return errors.New("header has wrong WithdrawalsHash")
} }
@ -635,17 +621,6 @@ func (p *Parlia) verifyHeader(chain consensus.ChainHeaderReader, header *types.H
} }
} }
bohr := chain.Config().IsBohr(header.Number, header.Time)
if !bohr {
if header.ParentBeaconRoot != nil {
return fmt.Errorf("invalid parentBeaconRoot, have %#x, expected nil", header.ParentBeaconRoot)
}
} else {
if header.ParentBeaconRoot == nil || *header.ParentBeaconRoot != (common.Hash{}) {
return fmt.Errorf("invalid parentBeaconRoot, have %#x, expected zero hash", header.ParentBeaconRoot)
}
}
// All basic checks passed, verify cascading fields // All basic checks passed, verify cascading fields
return p.verifyCascadingFields(chain, header, parents) return p.verifyCascadingFields(chain, header, parents)
} }
@ -738,28 +713,13 @@ func (p *Parlia) snapshot(chain consensus.ChainHeaderReader, number uint64, hash
} }
} }
// If we're at the genesis, snapshot the initial state. Alternatively if we have // If we're at the genesis, snapshot the initial state.
// piled up more headers than allowed to be reorged (chain reinit from a freezer),
// consider the checkpoint trusted and snapshot it.
// An offset `p.config.Epoch - 1` can ensure getting the right validators.
if number == 0 || ((number+1)%p.config.Epoch == 0 && (len(headers) > int(params.FullImmutabilityThreshold))) {
var (
checkpoint *types.Header
blockHash common.Hash
)
if number == 0 { if number == 0 {
checkpoint = chain.GetHeaderByNumber(0) checkpoint := chain.GetHeaderByNumber(number)
if checkpoint != nil { if checkpoint != nil {
blockHash = checkpoint.Hash() // get checkpoint data
} hash := checkpoint.Hash()
} else {
checkpoint = chain.GetHeaderByNumber(number + 1 - p.config.Epoch)
blockHeader := chain.GetHeaderByNumber(number)
if blockHeader != nil {
blockHash = blockHeader.Hash()
}
}
if checkpoint != nil && blockHash != (common.Hash{}) {
// get validators from headers // get validators from headers
validators, voteAddrs, err := parseValidators(checkpoint, p.chainConfig, p.config) validators, voteAddrs, err := parseValidators(checkpoint, p.chainConfig, p.config)
if err != nil { if err != nil {
@ -767,27 +727,11 @@ func (p *Parlia) snapshot(chain consensus.ChainHeaderReader, number uint64, hash
} }
// new snapshot // new snapshot
snap = newSnapshot(p.config, p.signatures, number, blockHash, validators, voteAddrs, p.ethAPI) snap = newSnapshot(p.config, p.signatures, number, hash, validators, voteAddrs, p.ethAPI)
// get turnLength from headers and use that for new turnLength
turnLength, err := parseTurnLength(checkpoint, p.chainConfig, p.config)
if err != nil {
return nil, err
}
if turnLength != nil {
snap.TurnLength = *turnLength
}
// snap.Recents is currently empty, which affects the following:
// a. The function SignRecently - This is acceptable since an empty snap.Recents results in a more lenient check.
// b. The function blockTimeVerifyForRamanujanFork - This is also acceptable as it won't be invoked during `snap.apply`.
// c. This may cause a mismatch in the slash systemtx, but the transaction list is not verified during `snap.apply`.
// snap.Attestation is nil, but Snapshot.updateAttestation will handle it correctly.
if err := snap.store(p.db); err != nil { if err := snap.store(p.db); err != nil {
return nil, err return nil, err
} }
log.Info("Stored checkpoint snapshot to disk", "number", number, "hash", blockHash) log.Info("Stored checkpoint snapshot to disk", "number", number, "hash", hash)
break break
} }
} }
@ -944,24 +888,6 @@ func (p *Parlia) prepareValidators(header *types.Header) error {
return nil return nil
} }
func (p *Parlia) prepareTurnLength(chain consensus.ChainHeaderReader, header *types.Header) error {
if header.Number.Uint64()%p.config.Epoch != 0 ||
!p.chainConfig.IsBohr(header.Number, header.Time) {
return nil
}
turnLength, err := p.getTurnLength(chain, header)
if err != nil {
return err
}
if turnLength != nil {
header.Extra = append(header.Extra, *turnLength)
}
return nil
}
func (p *Parlia) assembleVoteAttestation(chain consensus.ChainHeaderReader, header *types.Header) error { func (p *Parlia) assembleVoteAttestation(chain consensus.ChainHeaderReader, header *types.Header) error {
if !p.chainConfig.IsLuban(header.Number) || header.Number.Uint64() < 2 { if !p.chainConfig.IsLuban(header.Number) || header.Number.Uint64() < 2 {
return nil return nil
@ -1093,9 +1019,6 @@ func (p *Parlia) Prepare(chain consensus.ChainHeaderReader, header *types.Header
return err return err
} }
if err := p.prepareTurnLength(chain, header); err != nil {
return err
}
// add extra seal space // add extra seal space
header.Extra = append(header.Extra, make([]byte, extraSeal)...) header.Extra = append(header.Extra, make([]byte, extraSeal)...)
@ -1146,30 +1069,6 @@ func (p *Parlia) verifyValidators(header *types.Header) error {
return nil return nil
} }
func (p *Parlia) verifyTurnLength(chain consensus.ChainHeaderReader, header *types.Header) error {
if header.Number.Uint64()%p.config.Epoch != 0 ||
!p.chainConfig.IsBohr(header.Number, header.Time) {
return nil
}
turnLengthFromHeader, err := parseTurnLength(header, p.chainConfig, p.config)
if err != nil {
return err
}
if turnLengthFromHeader != nil {
turnLength, err := p.getTurnLength(chain, header)
if err != nil {
return err
}
if turnLength != nil && *turnLength == *turnLengthFromHeader {
log.Debug("verifyTurnLength", "turnLength", *turnLength)
return nil
}
}
return errMismatchingEpochTurnLength
}
func (p *Parlia) distributeFinalityReward(chain consensus.ChainHeaderReader, state *state.StateDB, header *types.Header, func (p *Parlia) distributeFinalityReward(chain consensus.ChainHeaderReader, state *state.StateDB, header *types.Header,
cx core.ChainContext, txs *[]*types.Transaction, receipts *[]*types.Receipt, systemTxs *[]*types.Transaction, cx core.ChainContext, txs *[]*types.Transaction, receipts *[]*types.Receipt, systemTxs *[]*types.Transaction,
usedGas *uint64, mining bool) error { usedGas *uint64, mining bool) error {
@ -1264,10 +1163,6 @@ func (p *Parlia) Finalize(chain consensus.ChainHeaderReader, header *types.Heade
return err return err
} }
if err := p.verifyTurnLength(chain, header); err != nil {
return err
}
cx := chainContext{Chain: chain, parlia: p} cx := chainContext{Chain: chain, parlia: p}
parent := chain.GetHeaderByHash(header.ParentHash) parent := chain.GetHeaderByHash(header.ParentHash)
@ -1294,7 +1189,7 @@ func (p *Parlia) Finalize(chain consensus.ChainHeaderReader, header *types.Heade
} }
} }
if header.Difficulty.Cmp(diffInTurn) != 0 { if header.Difficulty.Cmp(diffInTurn) != 0 {
spoiledVal := snap.inturnValidator() spoiledVal := snap.supposeValidator()
signedRecently := false signedRecently := false
if p.chainConfig.IsPlato(header.Number) { if p.chainConfig.IsPlato(header.Number) {
signedRecently = snap.SignRecently(spoiledVal) signedRecently = snap.SignRecently(spoiledVal)
@ -1385,7 +1280,7 @@ func (p *Parlia) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
spoiledVal := snap.inturnValidator() spoiledVal := snap.supposeValidator()
signedRecently := false signedRecently := false
if p.chainConfig.IsPlato(header.Number) { if p.chainConfig.IsPlato(header.Number) {
signedRecently = snap.SignRecently(spoiledVal) signedRecently = snap.SignRecently(spoiledVal)
@ -1467,7 +1362,7 @@ func (p *Parlia) IsActiveValidatorAt(chain consensus.ChainHeaderReader, header *
func (p *Parlia) VerifyVote(chain consensus.ChainHeaderReader, vote *types.VoteEnvelope) error { func (p *Parlia) VerifyVote(chain consensus.ChainHeaderReader, vote *types.VoteEnvelope) error {
targetNumber := vote.Data.TargetNumber targetNumber := vote.Data.TargetNumber
targetHash := vote.Data.TargetHash targetHash := vote.Data.TargetHash
header := chain.GetVerifiedBlockByHash(targetHash) header := chain.GetHeaderByHash(targetHash)
if header == nil { if header == nil {
log.Warn("BlockHeader at current voteBlockNumber is nil", "targetNumber", targetNumber, "targetHash", targetHash) log.Warn("BlockHeader at current voteBlockNumber is nil", "targetNumber", targetNumber, "targetHash", targetHash)
return errors.New("BlockHeader at current voteBlockNumber is nil") return errors.New("BlockHeader at current voteBlockNumber is nil")
@ -1538,13 +1433,10 @@ func (p *Parlia) Delay(chain consensus.ChainReader, header *types.Header, leftOv
delay = delay - *leftOver delay = delay - *leftOver
} }
// The blocking time should be no more than half of period when snap.TurnLength == 1 // The blocking time should be no more than half of period
timeForMining := time.Duration(p.config.Period) * time.Second / 2 half := time.Duration(p.config.Period) * time.Second / 2
if !snap.lastBlockInOneTurn(header.Number.Uint64()) { if delay > half {
timeForMining = time.Duration(p.config.Period) * time.Second * 2 / 3 delay = half
}
if delay > timeForMining {
delay = timeForMining
} }
return &delay return &delay
} }
@ -1615,15 +1507,12 @@ func (p *Parlia) Seal(chain consensus.ChainHeaderReader, block *types.Block, res
copy(header.Extra[len(header.Extra)-extraSeal:], sig) copy(header.Extra[len(header.Extra)-extraSeal:], sig)
if p.shouldWaitForCurrentBlockProcess(chain, header, snap) { if p.shouldWaitForCurrentBlockProcess(chain, header, snap) {
highestVerifiedHeader := chain.GetHighestVerifiedHeader() log.Info("Waiting for received in turn block to process")
// including time for writing and committing blocks
waitProcessEstimate := math.Ceil(float64(highestVerifiedHeader.GasUsed) / float64(100_000_000))
log.Info("Waiting for received in turn block to process", "waitProcessEstimate(Seconds)", waitProcessEstimate)
select { select {
case <-stop: case <-stop:
log.Info("Received block process finished, abort block seal") log.Info("Received block process finished, abort block seal")
return return
case <-time.After(time.Duration(waitProcessEstimate) * time.Second): case <-time.After(time.Duration(processBackOffTime) * time.Second):
if chain.CurrentHeader().Number.Uint64() >= header.Number.Uint64() { if chain.CurrentHeader().Number.Uint64() >= header.Number.Uint64() {
log.Info("Process backoff time exhausted, and current header has updated to abort this seal") log.Info("Process backoff time exhausted, and current header has updated to abort this seal")
return return
@ -1705,35 +1594,11 @@ func CalcDifficulty(snap *Snapshot, signer common.Address) *big.Int {
return new(big.Int).Set(diffNoTurn) return new(big.Int).Set(diffNoTurn)
} }
func encodeSigHeaderWithoutVoteAttestation(w io.Writer, header *types.Header, chainId *big.Int) {
err := rlp.Encode(w, []interface{}{
chainId,
header.ParentHash,
header.UncleHash,
header.Coinbase,
header.Root,
header.TxHash,
header.ReceiptHash,
header.Bloom,
header.Difficulty,
header.Number,
header.GasLimit,
header.GasUsed,
header.Time,
header.Extra[:extraVanity], // this will panic if extra is too short, should check before calling encodeSigHeaderWithoutVoteAttestation
header.MixDigest,
header.Nonce,
})
if err != nil {
panic("can't encode: " + err.Error())
}
}
// SealHash returns the hash of a block without vote attestation prior to it being sealed. // SealHash returns the hash of a block without vote attestation prior to it being sealed.
// So it's not the real hash of a block, just used as unique id to distinguish task // So it's not the real hash of a block, just used as unique id to distinguish task
func (p *Parlia) SealHash(header *types.Header) (hash common.Hash) { func (p *Parlia) SealHash(header *types.Header) (hash common.Hash) {
hasher := sha3.NewLegacyKeccak256() hasher := sha3.NewLegacyKeccak256()
encodeSigHeaderWithoutVoteAttestation(hasher, header, p.chainConfig.ChainID) types.EncodeSigHeaderWithoutVoteAttestation(hasher, header, p.chainConfig.ChainID)
hasher.Sum(hash[:0]) hasher.Sum(hash[:0])
return hash return hash
} }
@ -2039,40 +1904,42 @@ func (p *Parlia) GetFinalizedHeader(chain consensus.ChainHeaderReader, header *t
// =========================== utility function ========================== // =========================== utility function ==========================
func (p *Parlia) backOffTime(snap *Snapshot, header *types.Header, val common.Address) uint64 { func (p *Parlia) backOffTime(snap *Snapshot, header *types.Header, val common.Address) uint64 {
if snap.inturn(val) { if snap.inturn(val) {
log.Debug("backOffTime", "blockNumber", header.Number, "in turn validator", val)
return 0 return 0
} else { } else {
delay := initialBackOffTime delay := initialBackOffTime
validators := snap.validators() validators := snap.validators()
if p.chainConfig.IsPlanck(header.Number) { if p.chainConfig.IsPlanck(header.Number) {
counts := snap.countRecents() // reverse the key/value of snap.Recents to get recentsMap
for addr, seenTimes := range counts { recentsMap := make(map[common.Address]uint64, len(snap.Recents))
log.Debug("backOffTime", "blockNumber", header.Number, "validator", addr, "seenTimes", seenTimes) bound := uint64(0)
if n, limit := header.Number.Uint64(), uint64(len(validators)/2+1); n > limit {
bound = n - limit
}
for seen, recent := range snap.Recents {
if seen <= bound {
continue
}
recentsMap[recent] = seen
} }
// The backOffTime does not matter when a validator has signed recently. // The backOffTime does not matter when a validator has signed recently.
if snap.signRecentlyByCounts(val, counts) { if _, ok := recentsMap[val]; ok {
return 0 return 0
} }
inTurnAddr := snap.inturnValidator() inTurnAddr := validators[(snap.Number+1)%uint64(len(validators))]
if snap.signRecentlyByCounts(inTurnAddr, counts) { if _, ok := recentsMap[inTurnAddr]; ok {
log.Debug("in turn validator has recently signed, skip initialBackOffTime", log.Debug("in turn validator has recently signed, skip initialBackOffTime",
"inTurnAddr", inTurnAddr) "inTurnAddr", inTurnAddr)
delay = 0 delay = 0
} }
// Exclude the recently signed validators and the in turn validator // Exclude the recently signed validators
temp := make([]common.Address, 0, len(validators)) temp := make([]common.Address, 0, len(validators))
for _, addr := range validators { for _, addr := range validators {
if snap.signRecentlyByCounts(addr, counts) { if _, ok := recentsMap[addr]; ok {
continue continue
} }
if p.chainConfig.IsBohr(header.Number, header.Time) {
if addr == inTurnAddr {
continue
}
}
temp = append(temp, addr) temp = append(temp, addr)
} }
validators = temp validators = temp
@ -2090,11 +1957,7 @@ func (p *Parlia) backOffTime(snap *Snapshot, header *types.Header, val common.Ad
return 0 return 0
} }
randSeed := snap.Number s := rand.NewSource(int64(snap.Number))
if p.chainConfig.IsBohr(header.Number, header.Time) {
randSeed = header.Number.Uint64() / uint64(snap.TurnLength)
}
s := rand.NewSource(int64(randSeed))
r := rand.New(s) r := rand.New(s)
n := len(validators) n := len(validators)
backOffSteps := make([]uint64, 0, n) backOffSteps := make([]uint64, 0, n)

View File

@ -22,44 +22,22 @@ func TestImpactOfValidatorOutOfService(t *testing.T) {
testCases := []struct { testCases := []struct {
totalValidators int totalValidators int
downValidators int downValidators int
turnLength int
}{ }{
{3, 1, 1}, {3, 1},
{5, 2, 1}, {5, 2},
{10, 1, 2}, {10, 1},
{10, 4, 2}, {10, 4},
{21, 1, 3}, {21, 1},
{21, 3, 3}, {21, 3},
{21, 5, 4}, {21, 5},
{21, 10, 5}, {21, 10},
} }
for _, tc := range testCases { for _, tc := range testCases {
simulateValidatorOutOfService(tc.totalValidators, tc.downValidators, tc.turnLength) simulateValidatorOutOfService(tc.totalValidators, tc.downValidators)
} }
} }
// refer Snapshot.SignRecently func simulateValidatorOutOfService(totalValidators int, downValidators int) {
func signRecently(idx int, recents map[uint64]int, turnLength int) bool {
recentSignTimes := 0
for _, signIdx := range recents {
if signIdx == idx {
recentSignTimes += 1
}
}
return recentSignTimes >= turnLength
}
// refer Snapshot.minerHistoryCheckLen
func minerHistoryCheckLen(totalValidators int, turnLength int) uint64 {
return uint64(totalValidators/2+1)*uint64(turnLength) - 1
}
// refer Snapshot.inturnValidator
func inturnValidator(totalValidators int, turnLength int, height int) int {
return height / turnLength % totalValidators
}
func simulateValidatorOutOfService(totalValidators int, downValidators int, turnLength int) {
downBlocks := 10000 downBlocks := 10000
recoverBlocks := 10000 recoverBlocks := 10000
recents := make(map[uint64]int) recents := make(map[uint64]int)
@ -77,7 +55,12 @@ func simulateValidatorOutOfService(totalValidators int, downValidators int, turn
delete(validators, down[i]) delete(validators, down[i])
} }
isRecentSign := func(idx int) bool { isRecentSign := func(idx int) bool {
return signRecently(idx, recents, turnLength) for _, signIdx := range recents {
if signIdx == idx {
return true
}
}
return false
} }
isInService := func(idx int) bool { isInService := func(idx int) bool {
return validators[idx] return validators[idx]
@ -85,10 +68,10 @@ func simulateValidatorOutOfService(totalValidators int, downValidators int, turn
downDelay := uint64(0) downDelay := uint64(0)
for h := 1; h <= downBlocks; h++ { for h := 1; h <= downBlocks; h++ {
if limit := minerHistoryCheckLen(totalValidators, turnLength) + 1; uint64(h) >= limit { if limit := uint64(totalValidators/2 + 1); uint64(h) >= limit {
delete(recents, uint64(h)-limit) delete(recents, uint64(h)-limit)
} }
proposer := inturnValidator(totalValidators, turnLength, h) proposer := h % totalValidators
if !isInService(proposer) || isRecentSign(proposer) { if !isInService(proposer) || isRecentSign(proposer) {
candidates := make(map[int]bool, totalValidators/2) candidates := make(map[int]bool, totalValidators/2)
for v := range validators { for v := range validators {
@ -116,10 +99,10 @@ func simulateValidatorOutOfService(totalValidators int, downValidators int, turn
recoverDelay := uint64(0) recoverDelay := uint64(0)
lastseen := downBlocks lastseen := downBlocks
for h := downBlocks + 1; h <= downBlocks+recoverBlocks; h++ { for h := downBlocks + 1; h <= downBlocks+recoverBlocks; h++ {
if limit := minerHistoryCheckLen(totalValidators, turnLength) + 1; uint64(h) >= limit { if limit := uint64(totalValidators/2 + 1); uint64(h) >= limit {
delete(recents, uint64(h)-limit) delete(recents, uint64(h)-limit)
} }
proposer := inturnValidator(totalValidators, turnLength, h) proposer := h % totalValidators
if !isInService(proposer) || isRecentSign(proposer) { if !isInService(proposer) || isRecentSign(proposer) {
lastseen = h lastseen = h
candidates := make(map[int]bool, totalValidators/2) candidates := make(map[int]bool, totalValidators/2)

View File

@ -22,7 +22,6 @@ import (
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
"math"
"sort" "sort"
lru "github.com/hashicorp/golang-lru" lru "github.com/hashicorp/golang-lru"
@ -44,7 +43,6 @@ type Snapshot struct {
Number uint64 `json:"number"` // Block number where the snapshot was created Number uint64 `json:"number"` // Block number where the snapshot was created
Hash common.Hash `json:"hash"` // Block hash where the snapshot was created Hash common.Hash `json:"hash"` // Block hash where the snapshot was created
TurnLength uint8 `json:"turn_length"` // Length of `turn`, meaning the consecutive number of blocks a validator receives priority for block production
Validators map[common.Address]*ValidatorInfo `json:"validators"` // Set of authorized validators at this moment Validators map[common.Address]*ValidatorInfo `json:"validators"` // Set of authorized validators at this moment
Recents map[uint64]common.Address `json:"recents"` // Set of recent validators for spam protections Recents map[uint64]common.Address `json:"recents"` // Set of recent validators for spam protections
RecentForkHashes map[uint64]string `json:"recent_fork_hashes"` // Set of recent forkHash RecentForkHashes map[uint64]string `json:"recent_fork_hashes"` // Set of recent forkHash
@ -74,7 +72,6 @@ func newSnapshot(
sigCache: sigCache, sigCache: sigCache,
Number: number, Number: number,
Hash: hash, Hash: hash,
TurnLength: defaultTurnLength,
Recents: make(map[uint64]common.Address), Recents: make(map[uint64]common.Address),
RecentForkHashes: make(map[uint64]string), RecentForkHashes: make(map[uint64]string),
Validators: make(map[common.Address]*ValidatorInfo), Validators: make(map[common.Address]*ValidatorInfo),
@ -117,10 +114,6 @@ func loadSnapshot(config *params.ParliaConfig, sigCache *lru.ARCCache, db ethdb.
if err := json.Unmarshal(blob, snap); err != nil { if err := json.Unmarshal(blob, snap); err != nil {
return nil, err return nil, err
} }
if snap.TurnLength == 0 { // no TurnLength field in old snapshots
snap.TurnLength = defaultTurnLength
}
snap.config = config snap.config = config
snap.sigCache = sigCache snap.sigCache = sigCache
snap.ethAPI = ethAPI snap.ethAPI = ethAPI
@ -145,7 +138,6 @@ func (s *Snapshot) copy() *Snapshot {
sigCache: s.sigCache, sigCache: s.sigCache,
Number: s.Number, Number: s.Number,
Hash: s.Hash, Hash: s.Hash,
TurnLength: s.TurnLength,
Validators: make(map[common.Address]*ValidatorInfo), Validators: make(map[common.Address]*ValidatorInfo),
Recents: make(map[uint64]common.Address), Recents: make(map[uint64]common.Address),
RecentForkHashes: make(map[uint64]string), RecentForkHashes: make(map[uint64]string),
@ -218,45 +210,17 @@ func (s *Snapshot) updateAttestation(header *types.Header, chainConfig *params.C
} }
} }
func (s *Snapshot) versionHistoryCheckLen() uint64 { func (s *Snapshot) SignRecently(validator common.Address) bool {
return uint64(len(s.Validators)) * uint64(s.TurnLength)
}
func (s *Snapshot) minerHistoryCheckLen() uint64 {
return (uint64(len(s.Validators))/2+1)*uint64(s.TurnLength) - 1
}
func (s *Snapshot) countRecents() map[common.Address]uint8 {
leftHistoryBound := uint64(0) // the bound is excluded
checkHistoryLength := s.minerHistoryCheckLen()
if s.Number > checkHistoryLength {
leftHistoryBound = s.Number - checkHistoryLength
}
counts := make(map[common.Address]uint8, len(s.Validators))
for seen, recent := range s.Recents { for seen, recent := range s.Recents {
if seen <= leftHistoryBound || recent == (common.Address{}) /*when seen == `epochKey`*/ { if recent == validator {
continue if limit := uint64(len(s.Validators)/2 + 1); s.Number+1 < limit || seen > s.Number+1-limit {
}
counts[recent] += 1
}
return counts
}
func (s *Snapshot) signRecentlyByCounts(validator common.Address, counts map[common.Address]uint8) bool {
if seenTimes, ok := counts[validator]; ok && seenTimes >= s.TurnLength {
if seenTimes > s.TurnLength {
log.Warn("produce more blocks than expected!", "validator", validator, "seenTimes", seenTimes)
}
return true return true
} }
}
}
return false return false
} }
func (s *Snapshot) SignRecently(validator common.Address) bool {
return s.signRecentlyByCounts(validator, s.countRecents())
}
func (s *Snapshot) apply(headers []*types.Header, chain consensus.ChainHeaderReader, parents []*types.Header, chainConfig *params.ChainConfig) (*Snapshot, error) { func (s *Snapshot) apply(headers []*types.Header, chain consensus.ChainHeaderReader, parents []*types.Header, chainConfig *params.ChainConfig) (*Snapshot, error) {
// Allow passing in no headers for cleaner code // Allow passing in no headers for cleaner code
if len(headers) == 0 { if len(headers) == 0 {
@ -283,10 +247,10 @@ func (s *Snapshot) apply(headers []*types.Header, chain consensus.ChainHeaderRea
for _, header := range headers { for _, header := range headers {
number := header.Number.Uint64() number := header.Number.Uint64()
// Delete the oldest validator from the recent list to allow it signing again // Delete the oldest validator from the recent list to allow it signing again
if limit := snap.minerHistoryCheckLen() + 1; number >= limit { if limit := uint64(len(snap.Validators)/2 + 1); number >= limit {
delete(snap.Recents, number-limit) delete(snap.Recents, number-limit)
} }
if limit := snap.versionHistoryCheckLen(); number >= limit { if limit := uint64(len(snap.Validators)); number >= limit {
delete(snap.RecentForkHashes, number-limit) delete(snap.RecentForkHashes, number-limit)
} }
// Resolve the authorization key and check against signers // Resolve the authorization key and check against signers
@ -297,47 +261,19 @@ func (s *Snapshot) apply(headers []*types.Header, chain consensus.ChainHeaderRea
if _, ok := snap.Validators[validator]; !ok { if _, ok := snap.Validators[validator]; !ok {
return nil, errUnauthorizedValidator(validator.String()) return nil, errUnauthorizedValidator(validator.String())
} }
if chainConfig.IsBohr(header.Number, header.Time) {
if snap.SignRecently(validator) {
return nil, errRecentlySigned
}
} else {
for _, recent := range snap.Recents { for _, recent := range snap.Recents {
if recent == validator { if recent == validator {
return nil, errRecentlySigned return nil, errRecentlySigned
} }
} }
}
snap.Recents[number] = validator snap.Recents[number] = validator
snap.RecentForkHashes[number] = hex.EncodeToString(header.Extra[extraVanity-nextForkHashSize : extraVanity])
snap.updateAttestation(header, chainConfig, s.config)
// change validator set // change validator set
if number > 0 && number%s.config.Epoch == snap.minerHistoryCheckLen() { if number > 0 && number%s.config.Epoch == uint64(len(snap.Validators)/2) {
epochKey := math.MaxUint64 - header.Number.Uint64()/s.config.Epoch // impossible used as a block number checkpointHeader := FindAncientHeader(header, uint64(len(snap.Validators)/2), chain, parents)
if chainConfig.IsBohr(header.Number, header.Time) {
// after switching the validator set, snap.Validators may become larger,
// then the unexpected second switch will happen, just skip it.
if _, ok := snap.Recents[epochKey]; ok {
continue
}
}
checkpointHeader := FindAncientHeader(header, snap.minerHistoryCheckLen(), chain, parents)
if checkpointHeader == nil { if checkpointHeader == nil {
return nil, consensus.ErrUnknownAncestor return nil, consensus.ErrUnknownAncestor
} }
oldVersionsLen := snap.versionHistoryCheckLen()
// get turnLength from headers and use that for new turnLength
turnLength, err := parseTurnLength(checkpointHeader, chainConfig, s.config)
if err != nil {
return nil, err
}
if turnLength != nil {
snap.TurnLength = *turnLength
log.Debug("validator set switch", "turnLength", *turnLength)
}
// get validators from headers and use that for new validator set // get validators from headers and use that for new validator set
newValArr, voteAddrs, err := parseValidators(checkpointHeader, chainConfig, s.config) newValArr, voteAddrs, err := parseValidators(checkpointHeader, chainConfig, s.config)
if err != nil { if err != nil {
@ -353,12 +289,6 @@ func (s *Snapshot) apply(headers []*types.Header, chain consensus.ChainHeaderRea
} }
} }
} }
if chainConfig.IsBohr(header.Number, header.Time) {
// BEP-404: Clear Miner History when Switching Validators Set
snap.Recents = make(map[uint64]common.Address)
snap.Recents[epochKey] = common.Address{}
log.Debug("Recents are cleared up", "blockNumber", number)
} else {
oldLimit := len(snap.Validators)/2 + 1 oldLimit := len(snap.Validators)/2 + 1
newLimit := len(newVals)/2 + 1 newLimit := len(newVals)/2 + 1
if newLimit < oldLimit { if newLimit < oldLimit {
@ -366,6 +296,12 @@ func (s *Snapshot) apply(headers []*types.Header, chain consensus.ChainHeaderRea
delete(snap.Recents, number-uint64(newLimit)-uint64(i)) delete(snap.Recents, number-uint64(newLimit)-uint64(i))
} }
} }
oldLimit = len(snap.Validators)
newLimit = len(newVals)
if newLimit < oldLimit {
for i := 0; i < oldLimit-newLimit; i++ {
delete(snap.RecentForkHashes, number-uint64(newLimit)-uint64(i))
}
} }
snap.Validators = newVals snap.Validators = newVals
if chainConfig.IsLuban(header.Number) { if chainConfig.IsLuban(header.Number) {
@ -374,10 +310,11 @@ func (s *Snapshot) apply(headers []*types.Header, chain consensus.ChainHeaderRea
snap.Validators[val].Index = idx + 1 // offset by 1 snap.Validators[val].Index = idx + 1 // offset by 1
} }
} }
for i := snap.versionHistoryCheckLen(); i < oldVersionsLen; i++ {
delete(snap.RecentForkHashes, number-i)
}
} }
snap.updateAttestation(header, chainConfig, s.config)
snap.RecentForkHashes[number] = hex.EncodeToString(header.Extra[extraVanity-nextForkHashSize : extraVanity])
} }
snap.Number += uint64(len(headers)) snap.Number += uint64(len(headers))
snap.Hash = headers[len(headers)-1].Hash() snap.Hash = headers[len(headers)-1].Hash()
@ -394,20 +331,17 @@ func (s *Snapshot) validators() []common.Address {
return validators return validators
} }
// lastBlockInOneTurn returns if the block at height `blockNumber` is the last block in current turn.
func (s *Snapshot) lastBlockInOneTurn(blockNumber uint64) bool {
return (blockNumber+1)%uint64(s.TurnLength) == 0
}
// inturn returns if a validator at a given block height is in-turn or not. // inturn returns if a validator at a given block height is in-turn or not.
func (s *Snapshot) inturn(validator common.Address) bool { func (s *Snapshot) inturn(validator common.Address) bool {
return s.inturnValidator() == validator validators := s.validators()
offset := (s.Number + 1) % uint64(len(validators))
return validators[offset] == validator
} }
// inturnValidator returns the validator for the following block height. // inturnValidator returns the validator at a given block height.
func (s *Snapshot) inturnValidator() common.Address { func (s *Snapshot) inturnValidator() common.Address {
validators := s.validators() validators := s.validators()
offset := (s.Number + 1) / uint64(s.TurnLength) % uint64(len(validators)) offset := (s.Number + 1) % uint64(len(validators))
return validators[offset] return validators[offset]
} }
@ -445,6 +379,12 @@ func (s *Snapshot) indexOfVal(validator common.Address) int {
return -1 return -1
} }
func (s *Snapshot) supposeValidator() common.Address {
validators := s.validators()
index := (s.Number + 1) % uint64(len(validators))
return validators[index]
}
func parseValidators(header *types.Header, chainConfig *params.ChainConfig, parliaConfig *params.ParliaConfig) ([]common.Address, []types.BLSPublicKey, error) { func parseValidators(header *types.Header, chainConfig *params.ChainConfig, parliaConfig *params.ParliaConfig) ([]common.Address, []types.BLSPublicKey, error) {
validatorsBytes := getValidatorBytesFromHeader(header, chainConfig, parliaConfig) validatorsBytes := getValidatorBytesFromHeader(header, chainConfig, parliaConfig)
if len(validatorsBytes) == 0 { if len(validatorsBytes) == 0 {
@ -470,24 +410,6 @@ func parseValidators(header *types.Header, chainConfig *params.ChainConfig, parl
return cnsAddrs, voteAddrs, nil return cnsAddrs, voteAddrs, nil
} }
func parseTurnLength(header *types.Header, chainConfig *params.ChainConfig, parliaConfig *params.ParliaConfig) (*uint8, error) {
if header.Number.Uint64()%parliaConfig.Epoch != 0 ||
!chainConfig.IsBohr(header.Number, header.Time) {
return nil, nil
}
if len(header.Extra) <= extraVanity+extraSeal {
return nil, errInvalidSpanValidators
}
num := int(header.Extra[extraVanity])
pos := extraVanity + validatorNumberSize + num*validatorBytesLength
if len(header.Extra) <= pos {
return nil, errInvalidTurnLength
}
turnLength := header.Extra[pos]
return &turnLength, nil
}
func FindAncientHeader(header *types.Header, ite uint64, chain consensus.ChainHeaderReader, candidateParents []*types.Header) *types.Header { func FindAncientHeader(header *types.Header, ite uint64, chain consensus.ChainHeaderReader, candidateParents []*types.Header) *types.Header {
ancient := header ancient := header
for i := uint64(1); i <= ite; i++ { for i := uint64(1); i <= ite; i++ {

View File

@ -25,6 +25,7 @@ import (
"github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie"
) )
@ -40,6 +41,12 @@ func EnableRemoteVerifyManager(remoteValidator *remoteVerifyManager) BlockValida
} }
} }
var (
validateBloomTimer = metrics.NewRegisteredTimer("validate/bloom/time", nil)
validateReceiptTimer = metrics.NewRegisteredTimer("validate/receipt/time", nil)
validateRootTimer = metrics.NewRegisteredTimer("validate/root/time", nil)
)
// BlockValidator is responsible for validating block headers, uncles and // BlockValidator is responsible for validating block headers, uncles and
// processed state. // processed state.
// //
@ -66,6 +73,31 @@ func NewBlockValidator(config *params.ChainConfig, blockchain *BlockChain, engin
return validator return validator
} }
// ValidateListsInBody validates that UncleHash, WithdrawalsHash, and WithdrawalsHash correspond to the lists in the block body, respectively.
func ValidateListsInBody(block *types.Block) error {
header := block.Header()
if hash := types.CalcUncleHash(block.Uncles()); hash != header.UncleHash {
return fmt.Errorf("uncle root hash mismatch (header value %x, calculated %x)", header.UncleHash, hash)
}
if hash := types.DeriveSha(block.Transactions(), trie.NewStackTrie(nil)); hash != header.TxHash {
return fmt.Errorf("transaction root hash mismatch: have %x, want %x", hash, header.TxHash)
}
// Withdrawals are present after the Shanghai fork.
if header.WithdrawalsHash != nil {
// Withdrawals list must be present in body after Shanghai.
if block.Withdrawals() == nil {
return errors.New("missing withdrawals in block body")
}
if hash := types.DeriveSha(block.Withdrawals(), trie.NewStackTrie(nil)); hash != *header.WithdrawalsHash {
return fmt.Errorf("withdrawals root hash mismatch (header value %x, calculated %x)", *header.WithdrawalsHash, hash)
}
} else if block.Withdrawals() != nil { // Withdrawals turn into empty from nil when BlockBody has Sidecars
// Withdrawals are not allowed prior to shanghai fork
return errors.New("withdrawals present in block body")
}
return nil
}
// ValidateBody validates the given block's uncles and verifies the block // ValidateBody validates the given block's uncles and verifies the block
// header's transaction and uncle roots. The headers are assumed to be already // header's transaction and uncle roots. The headers are assumed to be already
// validated at this point. // validated at this point.
@ -83,31 +115,12 @@ func (v *BlockValidator) ValidateBody(block *types.Block) error {
if err := v.engine.VerifyUncles(v.bc, block); err != nil { if err := v.engine.VerifyUncles(v.bc, block); err != nil {
return err return err
} }
if hash := types.CalcUncleHash(block.Uncles()); hash != header.UncleHash {
return fmt.Errorf("uncle root hash mismatch (header value %x, calculated %x)", header.UncleHash, hash)
}
validateFuns := []func() error{ validateFuns := []func() error{
func() error { func() error {
if hash := types.DeriveSha(block.Transactions(), trie.NewStackTrie(nil)); hash != header.TxHash { return ValidateListsInBody(block)
return fmt.Errorf("transaction root hash mismatch: have %x, want %x", hash, header.TxHash)
}
return nil
}, },
func() error { func() error {
// Withdrawals are present after the Shanghai fork.
if header.WithdrawalsHash != nil {
// Withdrawals list must be present in body after Shanghai.
if block.Withdrawals() == nil {
return errors.New("missing withdrawals in block body")
}
if hash := types.DeriveSha(block.Withdrawals(), trie.NewStackTrie(nil)); hash != *header.WithdrawalsHash {
return fmt.Errorf("withdrawals root hash mismatch (header value %x, calculated %x)", *header.WithdrawalsHash, hash)
}
} else if block.Withdrawals() != nil { // Withdrawals turn into empty from nil when BlockBody has Sidecars
// Withdrawals are not allowed prior to shanghai fork
return errors.New("withdrawals present in block body")
}
// Blob transactions may be present after the Cancun fork. // Blob transactions may be present after the Cancun fork.
var blobs int var blobs int
for i, tx := range block.Transactions() { for i, tx := range block.Transactions() {
@ -178,6 +191,10 @@ func (v *BlockValidator) ValidateState(block *types.Block, statedb *state.StateD
// For valid blocks this should always validate to true. // For valid blocks this should always validate to true.
validateFuns := []func() error{ validateFuns := []func() error{
func() error { func() error {
defer func(start time.Time) {
validateBloomTimer.UpdateSince(start)
}(time.Now())
rbloom := types.CreateBloom(receipts) rbloom := types.CreateBloom(receipts)
if rbloom != header.Bloom { if rbloom != header.Bloom {
return fmt.Errorf("invalid bloom (remote: %x local: %x)", header.Bloom, rbloom) return fmt.Errorf("invalid bloom (remote: %x local: %x)", header.Bloom, rbloom)
@ -185,6 +202,9 @@ func (v *BlockValidator) ValidateState(block *types.Block, statedb *state.StateD
return nil return nil
}, },
func() error { func() error {
defer func(start time.Time) {
validateReceiptTimer.UpdateSince(start)
}(time.Now())
receiptSha := types.DeriveSha(receipts, trie.NewStackTrie(nil)) receiptSha := types.DeriveSha(receipts, trie.NewStackTrie(nil))
if receiptSha != header.ReceiptHash { if receiptSha != header.ReceiptHash {
return fmt.Errorf("invalid receipt root hash (remote: %x local: %x)", header.ReceiptHash, receiptSha) return fmt.Errorf("invalid receipt root hash (remote: %x local: %x)", header.ReceiptHash, receiptSha)
@ -203,6 +223,9 @@ func (v *BlockValidator) ValidateState(block *types.Block, statedb *state.StateD
}) })
} else { } else {
validateFuns = append(validateFuns, func() error { validateFuns = append(validateFuns, func() error {
defer func(start time.Time) {
validateRootTimer.UpdateSince(start)
}(time.Now())
if root := statedb.IntermediateRoot(v.config.IsEIP158(header.Number)); header.Root != root { if root := statedb.IntermediateRoot(v.config.IsEIP158(header.Number)); header.Root != root {
return fmt.Errorf("invalid merkle root (remote: %x local: %x) dberr: %w", header.Root, root, statedb.Error()) return fmt.Errorf("invalid merkle root (remote: %x local: %x) dberr: %w", header.Root, root, statedb.Error())
} }

View File

@ -74,6 +74,7 @@ var (
blockInsertMgaspsGauge = metrics.NewRegisteredGauge("chain/insert/mgasps", nil) blockInsertMgaspsGauge = metrics.NewRegisteredGauge("chain/insert/mgasps", nil)
chainInfoGauge = metrics.NewRegisteredGaugeInfo("chain/info", nil) chainInfoGauge = metrics.NewRegisteredGaugeInfo("chain/info", nil)
mGasPsGauge = metrics.NewRegisteredGauge("chain/process/gas", nil)
accountReadTimer = metrics.NewRegisteredTimer("chain/account/reads", nil) accountReadTimer = metrics.NewRegisteredTimer("chain/account/reads", nil)
accountHashTimer = metrics.NewRegisteredTimer("chain/account/hashes", nil) accountHashTimer = metrics.NewRegisteredTimer("chain/account/hashes", nil)
@ -95,13 +96,14 @@ var (
blockValidationTimer = metrics.NewRegisteredTimer("chain/validation", nil) blockValidationTimer = metrics.NewRegisteredTimer("chain/validation", nil)
blockExecutionTimer = metrics.NewRegisteredTimer("chain/execution", nil) blockExecutionTimer = metrics.NewRegisteredTimer("chain/execution", nil)
blockWriteTimer = metrics.NewRegisteredTimer("chain/write", nil) blockWriteTimer = metrics.NewRegisteredTimer("chain/write", nil)
blockValidationTotalTimer = metrics.NewRegisteredTimer("chain/total/validation", nil)
blockExecutionTotalTimer = metrics.NewRegisteredTimer("chain/total/execution", nil)
blockWriteTotalTimer = metrics.NewRegisteredTimer("chain/total/write", nil)
blockReorgMeter = metrics.NewRegisteredMeter("chain/reorg/executes", nil) blockReorgMeter = metrics.NewRegisteredMeter("chain/reorg/executes", nil)
blockReorgAddMeter = metrics.NewRegisteredMeter("chain/reorg/add", nil) blockReorgAddMeter = metrics.NewRegisteredMeter("chain/reorg/add", nil)
blockReorgDropMeter = metrics.NewRegisteredMeter("chain/reorg/drop", nil) blockReorgDropMeter = metrics.NewRegisteredMeter("chain/reorg/drop", nil)
blockRecvTimeDiffGauge = metrics.NewRegisteredGauge("chain/block/recvtimediff", nil)
errStateRootVerificationFailed = errors.New("state root verification failed") errStateRootVerificationFailed = errors.New("state root verification failed")
errInsertionInterrupted = errors.New("insertion is interrupted") errInsertionInterrupted = errors.New("insertion is interrupted")
errChainStopped = errors.New("blockchain is stopped") errChainStopped = errors.New("blockchain is stopped")
@ -270,7 +272,6 @@ type BlockChain struct {
logsFeed event.Feed logsFeed event.Feed
blockProcFeed event.Feed blockProcFeed event.Feed
finalizedHeaderFeed event.Feed finalizedHeaderFeed event.Feed
highestVerifiedBlockFeed event.Feed
scope event.SubscriptionScope scope event.SubscriptionScope
genesisBlock *types.Block genesisBlock *types.Block
@ -279,7 +280,6 @@ type BlockChain struct {
chainmu *syncx.ClosableMutex chainmu *syncx.ClosableMutex
highestVerifiedHeader atomic.Pointer[types.Header] highestVerifiedHeader atomic.Pointer[types.Header]
highestVerifiedBlock atomic.Pointer[types.Header]
currentBlock atomic.Pointer[types.Header] // Current head of the chain currentBlock atomic.Pointer[types.Header] // Current head of the chain
currentSnapBlock atomic.Pointer[types.Header] // Current head of snap-sync currentSnapBlock atomic.Pointer[types.Header] // Current head of snap-sync
currentFinalBlock atomic.Pointer[types.Header] // Latest (consensus) finalized block currentFinalBlock atomic.Pointer[types.Header] // Latest (consensus) finalized block
@ -404,7 +404,6 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
} }
bc.highestVerifiedHeader.Store(nil) bc.highestVerifiedHeader.Store(nil)
bc.highestVerifiedBlock.Store(nil)
bc.currentBlock.Store(nil) bc.currentBlock.Store(nil)
bc.currentSnapBlock.Store(nil) bc.currentSnapBlock.Store(nil)
bc.chasingHead.Store(nil) bc.chasingHead.Store(nil)
@ -1395,7 +1394,7 @@ func (bc *BlockChain) Stop() {
if !bc.cacheConfig.TrieDirtyDisabled { if !bc.cacheConfig.TrieDirtyDisabled {
triedb := bc.triedb triedb := bc.triedb
var once sync.Once var once sync.Once
for _, offset := range []uint64{0, 1, bc.TriesInMemory() - 1} { for _, offset := range []uint64{0, 1, TriesInMemory - 1} {
if number := bc.CurrentBlock().Number.Uint64(); number > offset { if number := bc.CurrentBlock().Number.Uint64(); number > offset {
recent := bc.GetBlockByNumber(number - offset) recent := bc.GetBlockByNumber(number - offset)
log.Info("Writing cached state to disk", "block", recent.Number(), "hash", recent.Hash(), "root", recent.Root()) log.Info("Writing cached state to disk", "block", recent.Number(), "hash", recent.Hash(), "root", recent.Root())
@ -1805,7 +1804,7 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
} }
bc.hc.tdCache.Add(block.Hash(), externTd) bc.hc.tdCache.Add(block.Hash(), externTd)
bc.blockCache.Add(block.Hash(), block) bc.blockCache.Add(block.Hash(), block)
bc.cacheReceipts(block.Hash(), receipts, block) bc.receiptsCache.Add(block.Hash(), receipts)
if bc.chainConfig.IsCancun(block.Number(), block.Time()) { if bc.chainConfig.IsCancun(block.Number(), block.Time()) {
bc.sidecarsCache.Add(block.Hash(), block.Sidecars()) bc.sidecarsCache.Add(block.Hash(), block.Sidecars())
} }
@ -1833,7 +1832,7 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
// Flush limits are not considered for the first TriesInMemory blocks. // Flush limits are not considered for the first TriesInMemory blocks.
current := block.NumberU64() current := block.NumberU64()
if current <= bc.TriesInMemory() { if current <= TriesInMemory {
return nil return nil
} }
// If we exceeded our memory allowance, flush matured singleton nodes to disk // If we exceeded our memory allowance, flush matured singleton nodes to disk
@ -1931,19 +1930,14 @@ func (bc *BlockChain) WriteBlockAndSetHead(block *types.Block, receipts []*types
// writeBlockAndSetHead is the internal implementation of WriteBlockAndSetHead. // writeBlockAndSetHead is the internal implementation of WriteBlockAndSetHead.
// This function expects the chain mutex to be held. // This function expects the chain mutex to be held.
func (bc *BlockChain) writeBlockAndSetHead(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB, emitHeadEvent bool) (status WriteStatus, err error) { func (bc *BlockChain) writeBlockAndSetHead(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB, emitHeadEvent bool) (status WriteStatus, err error) {
if err := bc.writeBlockWithState(block, receipts, state); err != nil {
return NonStatTy, err
}
currentBlock := bc.CurrentBlock() currentBlock := bc.CurrentBlock()
reorg, err := bc.forker.ReorgNeededWithFastFinality(currentBlock, block.Header()) reorg, err := bc.forker.ReorgNeededWithFastFinality(currentBlock, block.Header())
if err != nil { if err != nil {
return NonStatTy, err return NonStatTy, err
} }
if reorg {
bc.highestVerifiedBlock.Store(types.CopyHeader(block.Header()))
bc.highestVerifiedBlockFeed.Send(HighestVerifiedBlockEvent{Header: block.Header()})
}
if err := bc.writeBlockWithState(block, receipts, state); err != nil {
return NonStatTy, err
}
if reorg { if reorg {
// Reorganise the chain if the parent is not the head block // Reorganise the chain if the parent is not the head block
if block.ParentHash() != currentBlock.Hash() { if block.ParentHash() != currentBlock.Hash() {
@ -2057,9 +2051,6 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error)
return 0, nil return 0, nil
} }
if len(chain) > 0 {
blockRecvTimeDiffGauge.Update(time.Now().Unix() - int64(chain[0].Time()))
}
// Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss) // Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss)
signer := types.MakeSigner(bc.chainConfig, chain[0].Number(), chain[0].Time()) signer := types.MakeSigner(bc.chainConfig, chain[0].Number(), chain[0].Time())
go SenderCacher.RecoverFromBlocks(signer, chain) go SenderCacher.RecoverFromBlocks(signer, chain)
@ -2281,6 +2272,8 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error)
statedb.StopPrefetcher() statedb.StopPrefetcher()
return it.index, err return it.index, err
} }
blockExecutionTotalTimer.UpdateSince(pstart)
ptime := time.Since(pstart) ptime := time.Since(pstart)
// Validate the state using the default validator // Validate the state using the default validator
@ -2291,6 +2284,8 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error)
statedb.StopPrefetcher() statedb.StopPrefetcher()
return it.index, err return it.index, err
} }
blockValidationTotalTimer.UpdateSince(vstart)
vtime := time.Since(vstart) vtime := time.Since(vstart)
proctime := time.Since(start) // processing + validation proctime := time.Since(start) // processing + validation
@ -2324,6 +2319,9 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error)
if err != nil { if err != nil {
return it.index, err return it.index, err
} }
blockWriteTotalTimer.UpdateSince(wstart)
bc.cacheReceipts(block.Hash(), receipts, block)
// Update the metrics touched during block commit // Update the metrics touched during block commit
accountCommitTimer.Update(statedb.AccountCommits) // Account commits are complete, we can mark them accountCommitTimer.Update(statedb.AccountCommits) // Account commits are complete, we can mark them

View File

@ -64,6 +64,7 @@ func (st *insertStats) report(chain []*types.Block, index int, snapDiffItems, sn
"blocks", st.processed, "txs", txs, "blobs", blobs, "mgas", float64(st.usedGas) / 1000000, "blocks", st.processed, "txs", txs, "blobs", blobs, "mgas", float64(st.usedGas) / 1000000,
"elapsed", common.PrettyDuration(elapsed), "mgasps", mgasps, "elapsed", common.PrettyDuration(elapsed), "mgasps", mgasps,
} }
mGasPsGauge.Update(int64(mgasps))
blockInsertMgaspsGauge.Update(int64(mgasps)) blockInsertMgaspsGauge.Update(int64(mgasps))
if timestamp := time.Unix(int64(end.Time()), 0); time.Since(timestamp) > time.Minute { if timestamp := time.Unix(int64(end.Time()), 0); time.Since(timestamp) > time.Minute {
context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...) context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...)

View File

@ -98,15 +98,6 @@ func (bc *BlockChain) GetHeaderByHash(hash common.Hash) *types.Header {
return bc.hc.GetHeaderByHash(hash) return bc.hc.GetHeaderByHash(hash)
} }
// GetVerifiedBlockByHash retrieves the header of a verified block, it may be only in memory.
func (bc *BlockChain) GetVerifiedBlockByHash(hash common.Hash) *types.Header {
highestVerifiedBlock := bc.highestVerifiedBlock.Load()
if highestVerifiedBlock != nil && highestVerifiedBlock.Hash() == hash {
return highestVerifiedBlock
}
return bc.hc.GetHeaderByHash(hash)
}
// GetHeaderByNumber retrieves a block header from the database by number, // GetHeaderByNumber retrieves a block header from the database by number,
// caching it (associated with its hash) if found. // caching it (associated with its hash) if found.
func (bc *BlockChain) GetHeaderByNumber(number uint64) *types.Header { func (bc *BlockChain) GetHeaderByNumber(number uint64) *types.Header {
@ -495,11 +486,6 @@ func (bc *BlockChain) SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Su
return bc.scope.Track(bc.chainHeadFeed.Subscribe(ch)) return bc.scope.Track(bc.chainHeadFeed.Subscribe(ch))
} }
// SubscribeHighestVerifiedBlockEvent registers a subscription of HighestVerifiedBlockEvent.
func (bc *BlockChain) SubscribeHighestVerifiedHeaderEvent(ch chan<- HighestVerifiedBlockEvent) event.Subscription {
return bc.scope.Track(bc.highestVerifiedBlockFeed.Subscribe(ch))
}
// SubscribeChainBlockEvent registers a subscription of ChainBlockEvent. // SubscribeChainBlockEvent registers a subscription of ChainBlockEvent.
func (bc *BlockChain) SubscribeChainBlockEvent(ch chan<- ChainHeadEvent) event.Subscription { func (bc *BlockChain) SubscribeChainBlockEvent(ch chan<- ChainHeadEvent) event.Subscription {
return bc.scope.Track(bc.chainBlockFeed.Subscribe(ch)) return bc.scope.Track(bc.chainBlockFeed.Subscribe(ch))

View File

@ -486,7 +486,7 @@ func (cm *chainMaker) makeHeader(parent *types.Block, state *state.StateDB, engi
if cm.config.Parlia != nil { if cm.config.Parlia != nil {
header.WithdrawalsHash = &types.EmptyWithdrawalsHash header.WithdrawalsHash = &types.EmptyWithdrawalsHash
} }
if cm.config.Parlia == nil || cm.config.IsBohr(header.Number, header.Time) { if cm.config.Parlia == nil {
header.ParentBeaconRoot = new(common.Hash) header.ParentBeaconRoot = new(common.Hash)
} }
} }
@ -621,10 +621,6 @@ func (cm *chainMaker) GetHighestVerifiedHeader() *types.Header {
panic("not supported") panic("not supported")
} }
func (cm *chainMaker) GetVerifiedBlockByHash(hash common.Hash) *types.Header {
return cm.GetHeaderByHash(hash)
}
func (cm *chainMaker) ChasingHead() *types.Header { func (cm *chainMaker) ChasingHead() *types.Header {
panic("not supported") panic("not supported")
} }

View File

@ -365,10 +365,6 @@ func (r *mockDAHeaderReader) GetHighestVerifiedHeader() *types.Header {
panic("not supported") panic("not supported")
} }
func (r *mockDAHeaderReader) GetVerifiedBlockByHash(hash common.Hash) *types.Header {
panic("not supported")
}
func createMockDATx(config *params.ChainConfig, sidecar *types.BlobTxSidecar) *types.Transaction { func createMockDATx(config *params.ChainConfig, sidecar *types.BlobTxSidecar) *types.Transaction {
if sidecar == nil { if sidecar == nil {
tx := &types.DynamicFeeTx{ tx := &types.DynamicFeeTx{

View File

@ -50,5 +50,3 @@ type ChainSideEvent struct {
} }
type ChainHeadEvent struct{ Block *types.Block } type ChainHeadEvent struct{ Block *types.Block }
type HighestVerifiedBlockEvent struct{ Header *types.Header }

View File

@ -121,19 +121,9 @@ func (f *ForkChoice) ReorgNeeded(current *types.Header, extern *types.Header) (b
if f.preserve != nil { if f.preserve != nil {
currentPreserve, externPreserve = f.preserve(current), f.preserve(extern) currentPreserve, externPreserve = f.preserve(current), f.preserve(extern)
} }
choiceRules := func() bool { reorg = !currentPreserve && (externPreserve ||
if extern.Time == current.Time { extern.Time < current.Time ||
doubleSign := (extern.Coinbase == current.Coinbase) extern.Time == current.Time && f.rand.Float64() < 0.5)
if doubleSign {
return extern.Hash().Cmp(current.Hash()) < 0
} else {
return f.rand.Float64() < 0.5
}
} else {
return extern.Time < current.Time
}
}
reorg = !currentPreserve && (externPreserve || choiceRules())
} }
return reorg, nil return reorg, nil
} }

View File

@ -216,7 +216,6 @@ func (e *GenesisMismatchError) Error() string {
// ChainOverrides contains the changes to chain config // ChainOverrides contains the changes to chain config
// Typically, these modifications involve hardforks that are not enabled on the BSC mainnet, intended for testing purposes. // Typically, these modifications involve hardforks that are not enabled on the BSC mainnet, intended for testing purposes.
type ChainOverrides struct { type ChainOverrides struct {
OverridePassedForkTime *uint64
OverrideBohr *uint64 OverrideBohr *uint64
OverrideVerkle *uint64 OverrideVerkle *uint64
} }
@ -244,15 +243,6 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, triedb *triedb.Database, g
} }
applyOverrides := func(config *params.ChainConfig) { applyOverrides := func(config *params.ChainConfig) {
if config != nil { if config != nil {
if overrides != nil && overrides.OverridePassedForkTime != nil {
config.ShanghaiTime = overrides.OverridePassedForkTime
config.KeplerTime = overrides.OverridePassedForkTime
config.FeynmanTime = overrides.OverridePassedForkTime
config.FeynmanFixTime = overrides.OverridePassedForkTime
config.CancunTime = overrides.OverridePassedForkTime
config.HaberTime = overrides.OverridePassedForkTime
config.HaberFixTime = overrides.OverridePassedForkTime
}
if overrides != nil && overrides.OverrideBohr != nil { if overrides != nil && overrides.OverrideBohr != nil {
config.BohrTime = overrides.OverrideBohr config.BohrTime = overrides.OverrideBohr
} }
@ -454,7 +444,7 @@ func (g *Genesis) ToBlock() *types.Block {
// EIP-4788: The parentBeaconBlockRoot of the genesis block is always // EIP-4788: The parentBeaconBlockRoot of the genesis block is always
// the zero hash. This is because the genesis block does not have a parent // the zero hash. This is because the genesis block does not have a parent
// by definition. // by definition.
if conf.Parlia == nil || conf.IsBohr(num, g.Timestamp) { if conf.Parlia == nil {
head.ParentBeaconRoot = new(common.Hash) head.ParentBeaconRoot = new(common.Hash)
} }

View File

@ -436,10 +436,6 @@ func (hc *HeaderChain) GetHighestVerifiedHeader() *types.Header {
return nil return nil
} }
func (hc *HeaderChain) GetVerifiedBlockByHash(hash common.Hash) *types.Header {
return hc.GetHeaderByHash(hash)
}
func (hc *HeaderChain) ChasingHead() *types.Header { func (hc *HeaderChain) ChasingHead() *types.Header {
return nil return nil
} }

View File

@ -68,7 +68,6 @@ func newPrunedFreezer(datadir string, db ethdb.KeyValueStore, offset uint64) (*p
// repair init frozen , compatible disk-ancientdb and pruner-block-tool. // repair init frozen , compatible disk-ancientdb and pruner-block-tool.
func (f *prunedfreezer) repair(datadir string) error { func (f *prunedfreezer) repair(datadir string) error {
offset := atomic.LoadUint64(&f.frozen)
// compatible freezer // compatible freezer
minItems := uint64(math.MaxUint64) minItems := uint64(math.MaxUint64)
for name, disableSnappy := range chainFreezerNoSnappy { for name, disableSnappy := range chainFreezerNoSnappy {
@ -97,14 +96,19 @@ func (f *prunedfreezer) repair(datadir string) error {
table.Close() table.Close()
} }
// If the dataset has undergone a prune block, the offset is a non-zero value, otherwise the offset is a zero value. // If minItems is non-zero, it indicates that the chain freezer was previously enabled, and we should use minItems as the current frozen value.
// The minItems is the value relative to offset // If minItems is zero, it indicates that the pruneAncient was previously enabled, and we should continue using frozen
offset += minItems // (retrieved from CurrentAncientFreezer) as the current frozen value.
offset := minItems
if offset == 0 {
// no item in ancientDB, init `offset` to the `f.frozen`
offset = atomic.LoadUint64(&f.frozen)
}
log.Info("Read ancientdb item counts", "items", minItems, "offset", offset)
// FrozenOfAncientFreezer is the progress of the last prune-freezer freeze. // FrozenOfAncientFreezer is the progress of the last prune-freezer freeze.
frozenInDB := ReadFrozenOfAncientFreezer(f.db) frozenInDB := ReadFrozenOfAncientFreezer(f.db)
maxOffset := max(offset, frozenInDB) maxOffset := max(offset, frozenInDB)
log.Info("Read ancient db item counts", "items", minItems, "frozen", maxOffset)
atomic.StoreUint64(&f.frozen, maxOffset) atomic.StoreUint64(&f.frozen, maxOffset)
if err := f.Sync(); err != nil { if err := f.Sync(); err != nil {
@ -157,12 +161,12 @@ func (f *prunedfreezer) AncientOffSet() uint64 {
// MigrateTable processes the entries in a given table in sequence // MigrateTable processes the entries in a given table in sequence
// converting them to a new format if they're of an old format. // converting them to a new format if they're of an old format.
func (f *prunedfreezer) MigrateTable(kind string, convert convertLegacyFn) error { func (db *prunedfreezer) MigrateTable(kind string, convert convertLegacyFn) error {
return errNotSupported return errNotSupported
} }
// AncientDatadir returns an error as we don't have a backing chain freezer. // AncientDatadir returns an error as we don't have a backing chain freezer.
func (f *prunedfreezer) AncientDatadir() (string, error) { func (db *prunedfreezer) AncientDatadir() (string, error) {
return "", errNotSupported return "", errNotSupported
} }

View File

@ -34,4 +34,7 @@ var (
slotDeletionCount = metrics.NewRegisteredMeter("state/delete/storage/slot", nil) slotDeletionCount = metrics.NewRegisteredMeter("state/delete/storage/slot", nil)
slotDeletionSize = metrics.NewRegisteredMeter("state/delete/storage/size", nil) slotDeletionSize = metrics.NewRegisteredMeter("state/delete/storage/size", nil)
slotDeletionSkip = metrics.NewRegisteredGauge("state/delete/storage/skip", nil) slotDeletionSkip = metrics.NewRegisteredGauge("state/delete/storage/skip", nil)
accountIntermediateRootTimer = metrics.NewRegisteredTimer("state/account/intermediate/root/time", nil)
storageIntermediateRootTimer = metrics.NewRegisteredTimer("state/storage/intermediate/root/time", nil)
) )

View File

@ -286,6 +286,13 @@ func (dl *diffLayer) Stale() bool {
// Account directly retrieves the account associated with a particular hash in // Account directly retrieves the account associated with a particular hash in
// the snapshot slim data format. // the snapshot slim data format.
func (dl *diffLayer) Account(hash common.Hash) (*types.SlimAccount, error) { func (dl *diffLayer) Account(hash common.Hash) (*types.SlimAccount, error) {
defer func(start time.Time) {
snapGetTimer.UpdateSince(start)
snapGetQPS.Mark(1)
snapGetAccountTimer.UpdateSince(start)
snapGetAccountQPS.Mark(1)
}(time.Now())
data, err := dl.AccountRLP(hash) data, err := dl.AccountRLP(hash)
if err != nil { if err != nil {
return nil, err return nil, err
@ -394,6 +401,13 @@ func (dl *diffLayer) accountRLP(hash common.Hash, depth int) ([]byte, error) {
// //
// Note the returned slot is not a copy, please don't modify it. // Note the returned slot is not a copy, please don't modify it.
func (dl *diffLayer) Storage(accountHash, storageHash common.Hash) ([]byte, error) { func (dl *diffLayer) Storage(accountHash, storageHash common.Hash) ([]byte, error) {
defer func(start time.Time) {
snapGetTimer.UpdateSince(start)
snapGetQPS.Mark(1)
snapGetStorageTimer.UpdateSince(start)
snapGetStorageQPS.Mark(1)
}(time.Now())
// Check the bloom filter first whether there's even a point in reaching into // Check the bloom filter first whether there's even a point in reaching into
// all the maps in all the layers below // all the maps in all the layers below
dl.lock.RLock() dl.lock.RLock()

View File

@ -19,6 +19,7 @@ package snapshot
import ( import (
"bytes" "bytes"
"sync" "sync"
"time"
"github.com/VictoriaMetrics/fastcache" "github.com/VictoriaMetrics/fastcache"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
@ -136,7 +137,12 @@ func (dl *diskLayer) AccountRLP(hash common.Hash) ([]byte, error) {
return blob, nil return blob, nil
} }
// Cache doesn't contain account, pull from disk and cache for later // Cache doesn't contain account, pull from disk and cache for later
// TODO:
snapNodeQPS.Mark(1)
startLoadSnapNode := time.Now()
blob := rawdb.ReadAccountSnapshot(dl.diskdb, hash) blob := rawdb.ReadAccountSnapshot(dl.diskdb, hash)
snapNodeTime.Mark(time.Since(startLoadSnapNode).Nanoseconds())
dl.cache.Set(hash[:], blob) dl.cache.Set(hash[:], blob)
snapshotCleanAccountMissMeter.Mark(1) snapshotCleanAccountMissMeter.Mark(1)
@ -176,7 +182,11 @@ func (dl *diskLayer) Storage(accountHash, storageHash common.Hash) ([]byte, erro
return blob, nil return blob, nil
} }
// Cache doesn't contain storage slot, pull from disk and cache for later // Cache doesn't contain storage slot, pull from disk and cache for later
// TODO:
snapNodeQPS.Mark(1)
startLoadSnapNode := time.Now()
blob := rawdb.ReadStorageSnapshot(dl.diskdb, accountHash, storageHash) blob := rawdb.ReadStorageSnapshot(dl.diskdb, accountHash, storageHash)
snapNodeTime.Mark(time.Since(startLoadSnapNode).Nanoseconds())
dl.cache.Set(key, blob) dl.cache.Set(key, blob)
snapshotCleanStorageMissMeter.Mark(1) snapshotCleanStorageMissMeter.Mark(1)

View File

@ -50,4 +50,15 @@ var (
snapStorageWriteCounter = metrics.NewRegisteredCounter("state/snapshot/generation/duration/storage/write", nil) snapStorageWriteCounter = metrics.NewRegisteredCounter("state/snapshot/generation/duration/storage/write", nil)
// snapStorageCleanCounter measures time spent on deleting storages // snapStorageCleanCounter measures time spent on deleting storages
snapStorageCleanCounter = metrics.NewRegisteredCounter("state/snapshot/generation/duration/storage/clean", nil) snapStorageCleanCounter = metrics.NewRegisteredCounter("state/snapshot/generation/duration/storage/clean", nil)
snapNodeQPS = metrics.NewRegisteredMeter("pbss/snap/node/qps", nil)
snapNodeTime = metrics.NewRegisteredMeter("pbss/snap/node/time", nil)
snapGetTimer = metrics.NewRegisteredTimer("snap/get/time", nil)
snapGetQPS = metrics.NewRegisteredMeter("snap/get/qps", nil)
snapGetAccountTimer = metrics.NewRegisteredTimer("snap/account/get/time", nil)
snapGetAccountQPS = metrics.NewRegisteredMeter("snap/account/get/qps", nil)
snapGetStorageTimer = metrics.NewRegisteredTimer("snap/storage/get/time", nil)
snapGetStorageQPS = metrics.NewRegisteredMeter("snap/storage/get/qps", nil)
) )

View File

@ -223,6 +223,14 @@ func (s *stateObject) GetCommittedState(key common.Hash) common.Hash {
return common.Hash{} return common.Hash{}
} }
// If no live objects are available, attempt to use snapshots // If no live objects are available, attempt to use snapshots
defer func(start time.Time) {
stateDBGetTimer.UpdateSince(start)
stateDBGetQPS.Mark(1)
stateDBGetStorageTimer.UpdateSince(start)
stateDBGetStorageQPS.Mark(1)
}(time.Now())
var ( var (
enc []byte enc []byte
err error err error

View File

@ -54,6 +54,16 @@ type revision struct {
journalIndex int journalIndex int
} }
var (
stateDBGetTimer = metrics.NewRegisteredTimer("statedb/get/time", nil)
stateDBGetQPS = metrics.NewRegisteredMeter("statedb/get/qps", nil)
stateDBGetAccountTimer = metrics.NewRegisteredTimer("statedb/account/get/time", nil)
stateDBGetAccountQPS = metrics.NewRegisteredMeter("statedb/account/get/qps", nil)
stateDBGetStorageTimer = metrics.NewRegisteredTimer("statedb/storage/get/time", nil)
stateDBGetStorageQPS = metrics.NewRegisteredMeter("statedb/storage/get/qps", nil)
)
// StateDB structs within the ethereum protocol are used to store anything // StateDB structs within the ethereum protocol are used to store anything
// within the merkle trie. StateDBs take care of caching and storing // within the merkle trie. StateDBs take care of caching and storing
// nested states. It's the general query interface to retrieve: // nested states. It's the general query interface to retrieve:
@ -166,7 +176,7 @@ func NewWithSharedPool(root common.Hash, db Database, snaps *snapshot.Tree) (*St
if err != nil { if err != nil {
return nil, err return nil, err
} }
statedb.storagePool = NewStoragePool() //statedb.storagePool = NewStoragePool()
return statedb, nil return statedb, nil
} }
@ -716,6 +726,14 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject {
if obj := s.stateObjects[addr]; obj != nil { if obj := s.stateObjects[addr]; obj != nil {
return obj return obj
} }
defer func(start time.Time) {
stateDBGetTimer.UpdateSince(start)
stateDBGetQPS.Mark(1)
stateDBGetAccountTimer.UpdateSince(start)
stateDBGetAccountQPS.Mark(1)
}(time.Now())
// If no live objects are available, attempt to use snapshots // If no live objects are available, attempt to use snapshots
var data *types.StateAccount var data *types.StateAccount
if s.snap != nil { if s.snap != nil {
@ -1147,6 +1165,10 @@ func (s *StateDB) populateSnapStorage(obj *stateObject) bool {
} }
func (s *StateDB) AccountsIntermediateRoot() { func (s *StateDB) AccountsIntermediateRoot() {
defer func(start time.Time) {
storageIntermediateRootTimer.UpdateSince(start)
}(time.Now())
tasks := make(chan func()) tasks := make(chan func())
finishCh := make(chan struct{}) finishCh := make(chan struct{})
defer close(finishCh) defer close(finishCh)
@ -1191,6 +1213,9 @@ func (s *StateDB) AccountsIntermediateRoot() {
} }
func (s *StateDB) StateIntermediateRoot() common.Hash { func (s *StateDB) StateIntermediateRoot() common.Hash {
defer func(start time.Time) {
accountIntermediateRootTimer.UpdateSince(start)
}(time.Now())
// If there was a trie prefetcher operating, it gets aborted and irrevocably // If there was a trie prefetcher operating, it gets aborted and irrevocably
// modified after we start retrieving tries. Remove it from the statedb after // modified after we start retrieving tries. Remove it from the statedb after
// this round of use. // this round of use.

View File

@ -231,13 +231,6 @@ func ApplyTransaction(config *params.ChainConfig, bc ChainContext, author *commo
// ProcessBeaconBlockRoot applies the EIP-4788 system call to the beacon block root // ProcessBeaconBlockRoot applies the EIP-4788 system call to the beacon block root
// contract. This method is exported to be used in tests. // contract. This method is exported to be used in tests.
func ProcessBeaconBlockRoot(beaconRoot common.Hash, vmenv *vm.EVM, statedb *state.StateDB) { func ProcessBeaconBlockRoot(beaconRoot common.Hash, vmenv *vm.EVM, statedb *state.StateDB) {
// Return immediately if beaconRoot equals the zero hash when using the Parlia engine.
if beaconRoot == (common.Hash{}) {
if chainConfig := vmenv.ChainConfig(); chainConfig != nil && chainConfig.Parlia != nil {
return
}
}
// If EIP-4788 is enabled, we need to invoke the beaconroot storage contract with // If EIP-4788 is enabled, we need to invoke the beaconroot storage contract with
// the new root // the new root
msg := &Message{ msg := &Message{

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -1,27 +0,0 @@
package bohr
import _ "embed"
// contract codes for Mainnet upgrade
var (
//go:embed mainnet/ValidatorContract
MainnetValidatorContract string
//go:embed mainnet/StakeHubContract
MainnetStakeHubContract string
)
// contract codes for Chapel upgrade
var (
//go:embed chapel/ValidatorContract
ChapelValidatorContract string
//go:embed chapel/StakeHubContract
ChapelStakeHubContract string
)
// contract codes for Rialto upgrade
var (
//go:embed rialto/ValidatorContract
RialtoValidatorContract string
//go:embed rialto/StakeHubContract
RialtoStakeHubContract string
)

View File

@ -4,11 +4,10 @@ import (
"encoding/hex" "encoding/hex"
"fmt" "fmt"
"math/big" "math/big"
"reflect"
"strings" "strings"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/systemcontracts/bohr" "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/systemcontracts/bruno" "github.com/ethereum/go-ethereum/core/systemcontracts/bruno"
"github.com/ethereum/go-ethereum/core/systemcontracts/euler" "github.com/ethereum/go-ethereum/core/systemcontracts/euler"
"github.com/ethereum/go-ethereum/core/systemcontracts/feynman" "github.com/ethereum/go-ethereum/core/systemcontracts/feynman"
@ -23,7 +22,6 @@ import (
"github.com/ethereum/go-ethereum/core/systemcontracts/planck" "github.com/ethereum/go-ethereum/core/systemcontracts/planck"
"github.com/ethereum/go-ethereum/core/systemcontracts/plato" "github.com/ethereum/go-ethereum/core/systemcontracts/plato"
"github.com/ethereum/go-ethereum/core/systemcontracts/ramanujan" "github.com/ethereum/go-ethereum/core/systemcontracts/ramanujan"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
) )
@ -41,7 +39,7 @@ type Upgrade struct {
Configs []*UpgradeConfig Configs []*UpgradeConfig
} }
type upgradeHook func(blockNumber *big.Int, contractAddr common.Address, statedb vm.StateDB) error type upgradeHook func(blockNumber *big.Int, contractAddr common.Address, statedb *state.StateDB) error
const ( const (
mainNet = "Mainnet" mainNet = "Mainnet"
@ -80,8 +78,6 @@ var (
feynmanFixUpgrade = make(map[string]*Upgrade) feynmanFixUpgrade = make(map[string]*Upgrade)
haberFixUpgrade = make(map[string]*Upgrade) haberFixUpgrade = make(map[string]*Upgrade)
bohrUpgrade = make(map[string]*Upgrade)
) )
func init() { func init() {
@ -740,61 +736,12 @@ func init() {
}, },
}, },
} }
bohrUpgrade[mainNet] = &Upgrade{
UpgradeName: "bohr",
Configs: []*UpgradeConfig{
{
ContractAddr: common.HexToAddress(ValidatorContract),
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/398c9364aad5261c1ecd90ac3ab2df89b65c45e3",
Code: bohr.MainnetValidatorContract,
},
{
ContractAddr: common.HexToAddress(StakeHubContract),
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/398c9364aad5261c1ecd90ac3ab2df89b65c45e3",
Code: bohr.MainnetStakeHubContract,
},
},
}
bohrUpgrade[chapelNet] = &Upgrade{
UpgradeName: "bohr",
Configs: []*UpgradeConfig{
{
ContractAddr: common.HexToAddress(ValidatorContract),
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/398c9364aad5261c1ecd90ac3ab2df89b65c45e3",
Code: bohr.ChapelValidatorContract,
},
{
ContractAddr: common.HexToAddress(StakeHubContract),
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/398c9364aad5261c1ecd90ac3ab2df89b65c45e3",
Code: bohr.ChapelStakeHubContract,
},
},
}
bohrUpgrade[rialtoNet] = &Upgrade{
UpgradeName: "bohr",
Configs: []*UpgradeConfig{
{
ContractAddr: common.HexToAddress(ValidatorContract),
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/398c9364aad5261c1ecd90ac3ab2df89b65c45e3",
Code: bohr.RialtoValidatorContract,
},
{
ContractAddr: common.HexToAddress(StakeHubContract),
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/398c9364aad5261c1ecd90ac3ab2df89b65c45e3",
Code: bohr.RialtoStakeHubContract,
},
},
}
} }
func UpgradeBuildInSystemContract(config *params.ChainConfig, blockNumber *big.Int, lastBlockTime uint64, blockTime uint64, statedb vm.StateDB) { func UpgradeBuildInSystemContract(config *params.ChainConfig, blockNumber *big.Int, lastBlockTime uint64, blockTime uint64, statedb *state.StateDB) {
if config == nil || blockNumber == nil || statedb == nil || reflect.ValueOf(statedb).IsNil() { if config == nil || blockNumber == nil || statedb == nil {
return return
} }
var network string var network string
switch GenesisHash { switch GenesisHash {
/* Add mainnet genesis hash */ /* Add mainnet genesis hash */
@ -869,16 +816,12 @@ func UpgradeBuildInSystemContract(config *params.ChainConfig, blockNumber *big.I
applySystemContractUpgrade(haberFixUpgrade[network], blockNumber, statedb, logger) applySystemContractUpgrade(haberFixUpgrade[network], blockNumber, statedb, logger)
} }
if config.IsOnBohr(blockNumber, lastBlockTime, blockTime) {
applySystemContractUpgrade(bohrUpgrade[network], blockNumber, statedb, logger)
}
/* /*
apply other upgrades apply other upgrades
*/ */
} }
func applySystemContractUpgrade(upgrade *Upgrade, blockNumber *big.Int, statedb vm.StateDB, logger log.Logger) { func applySystemContractUpgrade(upgrade *Upgrade, blockNumber *big.Int, statedb *state.StateDB, logger log.Logger) {
if upgrade == nil { if upgrade == nil {
logger.Info("Empty upgrade config", "height", blockNumber.String()) logger.Info("Empty upgrade config", "height", blockNumber.String())
return return

View File

@ -2,13 +2,9 @@ package systemcontracts
import ( import (
"crypto/sha256" "crypto/sha256"
"math/big"
"testing" "testing"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/params"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@ -43,31 +39,3 @@ func TestAllCodesHash(t *testing.T) {
allCodeHash := sha256.Sum256(allCodes) allCodeHash := sha256.Sum256(allCodes)
require.Equal(t, allCodeHash[:], common.Hex2Bytes("833cc0fc87c46ad8a223e44ccfdc16a51a7e7383525136441bd0c730f06023df")) require.Equal(t, allCodeHash[:], common.Hex2Bytes("833cc0fc87c46ad8a223e44ccfdc16a51a7e7383525136441bd0c730f06023df"))
} }
func TestUpgradeBuildInSystemContractNilInterface(t *testing.T) {
var (
config = params.BSCChainConfig
blockNumber = big.NewInt(37959559)
lastBlockTime uint64 = 1713419337
blockTime uint64 = 1713419340
statedb vm.StateDB
)
GenesisHash = params.BSCGenesisHash
UpgradeBuildInSystemContract(config, blockNumber, lastBlockTime, blockTime, statedb)
}
func TestUpgradeBuildInSystemContractNilValue(t *testing.T) {
var (
config = params.BSCChainConfig
blockNumber = big.NewInt(37959559)
lastBlockTime uint64 = 1713419337
blockTime uint64 = 1713419340
statedb vm.StateDB = (*state.StateDB)(nil)
)
GenesisHash = params.BSCGenesisHash
UpgradeBuildInSystemContract(config, blockNumber, lastBlockTime, blockTime, statedb)
}

View File

@ -2,12 +2,10 @@ package types
import ( import (
"bytes" "bytes"
"encoding/json"
"errors" "errors"
"math/big" "math/big"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
) )
@ -55,40 +53,3 @@ func (s *BlobSidecar) SanityCheck(blockNumber *big.Int, blockHash common.Hash) e
} }
return nil return nil
} }
func (s *BlobSidecar) MarshalJSON() ([]byte, error) {
fields := map[string]interface{}{
"blockHash": s.BlockHash,
"blockNumber": hexutil.EncodeUint64(s.BlockNumber.Uint64()),
"txHash": s.TxHash,
"txIndex": hexutil.EncodeUint64(s.TxIndex),
}
fields["blobSidecar"] = s.BlobTxSidecar
return json.Marshal(fields)
}
func (s *BlobSidecar) UnmarshalJSON(input []byte) error {
type blobSidecar struct {
BlobSidecar BlobTxSidecar `json:"blobSidecar"`
BlockNumber *hexutil.Big `json:"blockNumber"`
BlockHash common.Hash `json:"blockHash"`
TxIndex *hexutil.Big `json:"txIndex"`
TxHash common.Hash `json:"txHash"`
}
var blob blobSidecar
if err := json.Unmarshal(input, &blob); err != nil {
return err
}
s.BlobTxSidecar = blob.BlobSidecar
if blob.BlockNumber == nil {
return errors.New("missing required field 'blockNumber' for BlobSidecar")
}
s.BlockNumber = blob.BlockNumber.ToInt()
s.BlockHash = blob.BlockHash
if blob.TxIndex == nil {
return errors.New("missing required field 'txIndex' for BlobSidecar")
}
s.TxIndex = blob.TxIndex.ToInt().Uint64()
s.TxHash = blob.TxHash
return nil
}

View File

@ -673,7 +673,10 @@ type DiffAccountsInBlock struct {
Transactions []DiffAccountsInTx Transactions []DiffAccountsInTx
} }
var extraSeal = 65 // Fixed number of extra-data suffix bytes reserved for signer seal var (
extraVanity = 32 // Fixed number of extra-data prefix bytes reserved for signer vanity
extraSeal = 65 // Fixed number of extra-data suffix bytes reserved for signer seal
)
// SealHash returns the hash of a block prior to it being sealed. // SealHash returns the hash of a block prior to it being sealed.
func SealHash(header *Header, chainId *big.Int) (hash common.Hash) { func SealHash(header *Header, chainId *big.Int) (hash common.Hash) {
@ -684,33 +687,7 @@ func SealHash(header *Header, chainId *big.Int) (hash common.Hash) {
} }
func EncodeSigHeader(w io.Writer, header *Header, chainId *big.Int) { func EncodeSigHeader(w io.Writer, header *Header, chainId *big.Int) {
var err error err := rlp.Encode(w, []interface{}{
if header.ParentBeaconRoot != nil && *header.ParentBeaconRoot == (common.Hash{}) {
err = rlp.Encode(w, []interface{}{
chainId,
header.ParentHash,
header.UncleHash,
header.Coinbase,
header.Root,
header.TxHash,
header.ReceiptHash,
header.Bloom,
header.Difficulty,
header.Number,
header.GasLimit,
header.GasUsed,
header.Time,
header.Extra[:len(header.Extra)-extraSeal], // this will panic if extra is too short, should check before calling encodeSigHeader
header.MixDigest,
header.Nonce,
header.BaseFee,
header.WithdrawalsHash,
header.BlobGasUsed,
header.ExcessBlobGas,
header.ParentBeaconRoot,
})
} else {
err = rlp.Encode(w, []interface{}{
chainId, chainId,
header.ParentHash, header.ParentHash,
header.UncleHash, header.UncleHash,
@ -728,7 +705,30 @@ func EncodeSigHeader(w io.Writer, header *Header, chainId *big.Int) {
header.MixDigest, header.MixDigest,
header.Nonce, header.Nonce,
}) })
} if err != nil {
panic("can't encode: " + err.Error())
}
}
func EncodeSigHeaderWithoutVoteAttestation(w io.Writer, header *Header, chainId *big.Int) {
err := rlp.Encode(w, []interface{}{
chainId,
header.ParentHash,
header.UncleHash,
header.Coinbase,
header.Root,
header.TxHash,
header.ReceiptHash,
header.Bloom,
header.Difficulty,
header.Number,
header.GasLimit,
header.GasUsed,
header.Time,
header.Extra[:extraVanity], // this will panic if extra is too short, should check before calling encodeSigHeaderWithoutVoteAttestation
header.MixDigest,
header.Nonce,
})
if err != nil { if err != nil {
panic("can't encode: " + err.Error()) panic("can't encode: " + err.Error())
} }

View File

@ -3,7 +3,6 @@ package vote
import ( import (
"bytes" "bytes"
"fmt" "fmt"
"math/big"
"sync" "sync"
"time" "time"
@ -20,13 +19,7 @@ import (
const blocksNumberSinceMining = 5 // the number of blocks need to wait before voting, counting from the validator begin to mine const blocksNumberSinceMining = 5 // the number of blocks need to wait before voting, counting from the validator begin to mine
var diffInTurn = big.NewInt(2) // Block difficulty for in-turn signatures
var votesManagerCounter = metrics.NewRegisteredCounter("votesManager/local", nil) var votesManagerCounter = metrics.NewRegisteredCounter("votesManager/local", nil)
var notJustified = metrics.NewRegisteredCounter("votesManager/notJustified", nil)
var inTurnJustified = metrics.NewRegisteredCounter("votesManager/inTurnJustified", nil)
var notInTurnJustified = metrics.NewRegisteredCounter("votesManager/notInTurnJustified", nil)
var continuousJustified = metrics.NewRegisteredCounter("votesManager/continuousJustified", nil)
var notContinuousJustified = metrics.NewRegisteredCounter("votesManager/notContinuousJustified", nil)
// Backend wraps all methods required for voting. // Backend wraps all methods required for voting.
type Backend interface { type Backend interface {
@ -40,8 +33,8 @@ type VoteManager struct {
chain *core.BlockChain chain *core.BlockChain
highestVerifiedBlockCh chan core.HighestVerifiedBlockEvent chainHeadCh chan core.ChainHeadEvent
highestVerifiedBlockSub event.Subscription chainHeadSub event.Subscription
// used for backup validators to sync votes from corresponding mining validator // used for backup validators to sync votes from corresponding mining validator
syncVoteCh chan core.NewVoteEvent syncVoteCh chan core.NewVoteEvent
@ -58,7 +51,7 @@ func NewVoteManager(eth Backend, chain *core.BlockChain, pool *VotePool, journal
voteManager := &VoteManager{ voteManager := &VoteManager{
eth: eth, eth: eth,
chain: chain, chain: chain,
highestVerifiedBlockCh: make(chan core.HighestVerifiedBlockEvent, highestVerifiedBlockChanSize), chainHeadCh: make(chan core.ChainHeadEvent, chainHeadChanSize),
syncVoteCh: make(chan core.NewVoteEvent, voteBufferForPut), syncVoteCh: make(chan core.NewVoteEvent, voteBufferForPut),
pool: pool, pool: pool,
engine: engine, engine: engine,
@ -81,7 +74,7 @@ func NewVoteManager(eth Backend, chain *core.BlockChain, pool *VotePool, journal
voteManager.journal = voteJournal voteManager.journal = voteJournal
// Subscribe to chain head event. // Subscribe to chain head event.
voteManager.highestVerifiedBlockSub = voteManager.chain.SubscribeHighestVerifiedHeaderEvent(voteManager.highestVerifiedBlockCh) voteManager.chainHeadSub = voteManager.chain.SubscribeChainHeadEvent(voteManager.chainHeadCh)
voteManager.syncVoteSub = voteManager.pool.SubscribeNewVoteEvent(voteManager.syncVoteCh) voteManager.syncVoteSub = voteManager.pool.SubscribeNewVoteEvent(voteManager.syncVoteCh)
go voteManager.loop() go voteManager.loop()
@ -91,7 +84,7 @@ func NewVoteManager(eth Backend, chain *core.BlockChain, pool *VotePool, journal
func (voteManager *VoteManager) loop() { func (voteManager *VoteManager) loop() {
log.Debug("vote manager routine loop started") log.Debug("vote manager routine loop started")
defer voteManager.highestVerifiedBlockSub.Unsubscribe() defer voteManager.chainHeadSub.Unsubscribe()
defer voteManager.syncVoteSub.Unsubscribe() defer voteManager.syncVoteSub.Unsubscribe()
events := voteManager.eth.EventMux().Subscribe(downloader.StartEvent{}, downloader.DoneEvent{}, downloader.FailedEvent{}) events := voteManager.eth.EventMux().Subscribe(downloader.StartEvent{}, downloader.DoneEvent{}, downloader.FailedEvent{})
@ -126,7 +119,7 @@ func (voteManager *VoteManager) loop() {
log.Debug("downloader is in DoneEvent mode, set the startVote flag to true") log.Debug("downloader is in DoneEvent mode, set the startVote flag to true")
startVote = true startVote = true
} }
case cHead := <-voteManager.highestVerifiedBlockCh: case cHead := <-voteManager.chainHeadCh:
if !startVote { if !startVote {
log.Debug("startVote flag is false, continue") log.Debug("startVote flag is false, continue")
continue continue
@ -142,12 +135,12 @@ func (voteManager *VoteManager) loop() {
continue continue
} }
if cHead.Header == nil { if cHead.Block == nil {
log.Debug("cHead.Header is nil, continue") log.Debug("cHead.Block is nil, continue")
continue continue
} }
curHead := cHead.Header curHead := cHead.Block.Header()
if p, ok := voteManager.engine.(*parlia.Parlia); ok { if p, ok := voteManager.engine.(*parlia.Parlia); ok {
nextBlockMinedTime := time.Unix(int64((curHead.Time + p.Period())), 0) nextBlockMinedTime := time.Unix(int64((curHead.Time + p.Period())), 0)
timeForBroadcast := 50 * time.Millisecond // enough to broadcast a vote timeForBroadcast := 50 * time.Millisecond // enough to broadcast a vote
@ -162,7 +155,7 @@ func (voteManager *VoteManager) loop() {
func(bLSPublicKey *types.BLSPublicKey) bool { func(bLSPublicKey *types.BLSPublicKey) bool {
return bytes.Equal(voteManager.signer.PubKey[:], bLSPublicKey[:]) return bytes.Equal(voteManager.signer.PubKey[:], bLSPublicKey[:])
}) { }) {
log.Debug("local validator with voteKey is not within the validatorSet at curHead") log.Debug("cur validator is not within the validatorSet at curHead")
continue continue
} }
@ -209,36 +202,6 @@ func (voteManager *VoteManager) loop() {
voteManager.pool.PutVote(voteMessage) voteManager.pool.PutVote(voteMessage)
votesManagerCounter.Inc(1) votesManagerCounter.Inc(1)
} }
// check the latest justified block, which indicating the stability of the network
curJustifiedNumber, _, err := voteManager.engine.GetJustifiedNumberAndHash(voteManager.chain, []*types.Header{curHead})
if err == nil && curJustifiedNumber != 0 {
if curJustifiedNumber+1 != curHead.Number.Uint64() {
log.Debug("not justified", "blockNumber", curHead.Number.Uint64()-1)
notJustified.Inc(1)
} else {
parent := voteManager.chain.GetHeaderByHash(curHead.ParentHash)
if parent != nil {
if parent.Difficulty.Cmp(diffInTurn) == 0 {
inTurnJustified.Inc(1)
} else {
log.Debug("not in turn block justified", "blockNumber", parent.Number.Int64(), "blockHash", parent.Hash())
notInTurnJustified.Inc(1)
}
lastJustifiedNumber, _, err := voteManager.engine.GetJustifiedNumberAndHash(voteManager.chain, []*types.Header{parent})
if err == nil {
if lastJustifiedNumber == 0 || lastJustifiedNumber+1 == curJustifiedNumber {
continuousJustified.Inc(1)
} else {
log.Debug("not continuous block justified", "lastJustified", lastJustifiedNumber, "curJustified", curJustifiedNumber)
notContinuousJustified.Inc(1)
}
}
}
}
}
case event := <-voteManager.syncVoteCh: case event := <-voteManager.syncVoteCh:
voteMessage := event.Vote voteMessage := event.Vote
if voteManager.eth.IsMining() || !bytes.Equal(voteManager.signer.PubKey[:], voteMessage.VoteAddress[:]) { if voteManager.eth.IsMining() || !bytes.Equal(voteManager.signer.PubKey[:], voteMessage.VoteAddress[:]) {
@ -254,7 +217,7 @@ func (voteManager *VoteManager) loop() {
case <-voteManager.syncVoteSub.Err(): case <-voteManager.syncVoteSub.Err():
log.Debug("voteManager subscribed votes failed") log.Debug("voteManager subscribed votes failed")
return return
case <-voteManager.highestVerifiedBlockSub.Err(): case <-voteManager.chainHeadSub.Err():
log.Debug("voteManager subscribed chainHead failed") log.Debug("voteManager subscribed chainHead failed")
return return
} }

View File

@ -24,7 +24,7 @@ const (
lowerLimitOfVoteBlockNumber = 256 lowerLimitOfVoteBlockNumber = 256
upperLimitOfVoteBlockNumber = 11 // refer to fetcher.maxUncleDist upperLimitOfVoteBlockNumber = 11 // refer to fetcher.maxUncleDist
highestVerifiedBlockChanSize = 10 // highestVerifiedBlockChanSize is the size of channel listening to HighestVerifiedBlockEvent. chainHeadChanSize = 10 // chainHeadChanSize is the size of channel listening to ChainHeadEvent.
) )
var ( var (
@ -57,8 +57,8 @@ type VotePool struct {
curVotesPq *votesPriorityQueue curVotesPq *votesPriorityQueue
futureVotesPq *votesPriorityQueue futureVotesPq *votesPriorityQueue
highestVerifiedBlockCh chan core.HighestVerifiedBlockEvent chainHeadCh chan core.ChainHeadEvent
highestVerifiedBlockSub event.Subscription chainHeadSub event.Subscription
votesCh chan *types.VoteEnvelope votesCh chan *types.VoteEnvelope
@ -75,13 +75,13 @@ func NewVotePool(chain *core.BlockChain, engine consensus.PoSA) *VotePool {
futureVotes: make(map[common.Hash]*VoteBox), futureVotes: make(map[common.Hash]*VoteBox),
curVotesPq: &votesPriorityQueue{}, curVotesPq: &votesPriorityQueue{},
futureVotesPq: &votesPriorityQueue{}, futureVotesPq: &votesPriorityQueue{},
highestVerifiedBlockCh: make(chan core.HighestVerifiedBlockEvent, highestVerifiedBlockChanSize), chainHeadCh: make(chan core.ChainHeadEvent, chainHeadChanSize),
votesCh: make(chan *types.VoteEnvelope, voteBufferForPut), votesCh: make(chan *types.VoteEnvelope, voteBufferForPut),
engine: engine, engine: engine,
} }
// Subscribe events from blockchain and start the main event loop. // Subscribe events from blockchain and start the main event loop.
votePool.highestVerifiedBlockSub = votePool.chain.SubscribeHighestVerifiedHeaderEvent(votePool.highestVerifiedBlockCh) votePool.chainHeadSub = votePool.chain.SubscribeChainHeadEvent(votePool.chainHeadCh)
go votePool.loop() go votePool.loop()
return votePool return votePool
@ -89,18 +89,18 @@ func NewVotePool(chain *core.BlockChain, engine consensus.PoSA) *VotePool {
// loop is the vote pool's main even loop, waiting for and reacting to outside blockchain events and votes channel event. // loop is the vote pool's main even loop, waiting for and reacting to outside blockchain events and votes channel event.
func (pool *VotePool) loop() { func (pool *VotePool) loop() {
defer pool.highestVerifiedBlockSub.Unsubscribe() defer pool.chainHeadSub.Unsubscribe()
for { for {
select { select {
// Handle ChainHeadEvent. // Handle ChainHeadEvent.
case ev := <-pool.highestVerifiedBlockCh: case ev := <-pool.chainHeadCh:
if ev.Header != nil { if ev.Block != nil {
latestBlockNumber := ev.Header.Number.Uint64() latestBlockNumber := ev.Block.NumberU64()
pool.prune(latestBlockNumber) pool.prune(latestBlockNumber)
pool.transferVotesFromFutureToCur(ev.Header) pool.transferVotesFromFutureToCur(ev.Block.Header())
} }
case <-pool.highestVerifiedBlockSub.Err(): case <-pool.chainHeadSub.Err():
return return
// Handle votes channel and put the vote into vote pool. // Handle votes channel and put the vote into vote pool.
@ -135,7 +135,7 @@ func (pool *VotePool) putIntoVotePool(vote *types.VoteEnvelope) bool {
var votesPq *votesPriorityQueue var votesPq *votesPriorityQueue
isFutureVote := false isFutureVote := false
voteBlock := pool.chain.GetVerifiedBlockByHash(targetHash) voteBlock := pool.chain.GetHeaderByHash(targetHash)
if voteBlock == nil { if voteBlock == nil {
votes = pool.futureVotes votes = pool.futureVotes
votesPq = pool.futureVotesPq votesPq = pool.futureVotesPq
@ -226,7 +226,7 @@ func (pool *VotePool) transferVotesFromFutureToCur(latestBlockHeader *types.Head
futurePqBuffer := make([]*types.VoteData, 0) futurePqBuffer := make([]*types.VoteData, 0)
for futurePq.Len() > 0 && futurePq.Peek().TargetNumber <= latestBlockNumber { for futurePq.Len() > 0 && futurePq.Peek().TargetNumber <= latestBlockNumber {
blockHash := futurePq.Peek().TargetHash blockHash := futurePq.Peek().TargetHash
header := pool.chain.GetVerifiedBlockByHash(blockHash) header := pool.chain.GetHeaderByHash(blockHash)
if header == nil { if header == nil {
// Put into pq buffer used for later put again into futurePq // Put into pq buffer used for later put again into futurePq
futurePqBuffer = append(futurePqBuffer, heap.Pop(futurePq).(*types.VoteData)) futurePqBuffer = append(futurePqBuffer, heap.Pop(futurePq).(*types.VoteData))

View File

@ -1,7 +1,7 @@
package bn256 package bn256
// For details of the algorithms used, see "Multiplication and Squaring on // For details of the algorithms used, see "Multiplication and Squaring on
// Pairing-Friendly Fields", Devegili et al. // Pairing-Friendly Fields, Devegili et al.
// http://eprint.iacr.org/2006/471.pdf. // http://eprint.iacr.org/2006/471.pdf.
import ( import (

View File

@ -1,7 +1,7 @@
package bn256 package bn256
// For details of the algorithms used, see "Multiplication and Squaring on // For details of the algorithms used, see "Multiplication and Squaring on
// Pairing-Friendly Fields", Devegili et al. // Pairing-Friendly Fields, Devegili et al.
// http://eprint.iacr.org/2006/471.pdf. // http://eprint.iacr.org/2006/471.pdf.
// gfP2 implements a field of size p² as a quadratic extension of the base field // gfP2 implements a field of size p² as a quadratic extension of the base field

View File

@ -1,7 +1,7 @@
package bn256 package bn256
// For details of the algorithms used, see "Multiplication and Squaring on // For details of the algorithms used, see "Multiplication and Squaring on
// Pairing-Friendly Fields", Devegili et al. // Pairing-Friendly Fields, Devegili et al.
// http://eprint.iacr.org/2006/471.pdf. // http://eprint.iacr.org/2006/471.pdf.
// gfP6 implements the field of size p⁶ as a cubic extension of gfP2 where τ³=ξ // gfP6 implements the field of size p⁶ as a cubic extension of gfP2 where τ³=ξ

View File

@ -5,7 +5,7 @@
package bn256 package bn256
// For details of the algorithms used, see "Multiplication and Squaring on // For details of the algorithms used, see "Multiplication and Squaring on
// Pairing-Friendly Fields", Devegili et al. // Pairing-Friendly Fields, Devegili et al.
// http://eprint.iacr.org/2006/471.pdf. // http://eprint.iacr.org/2006/471.pdf.
import ( import (

View File

@ -5,7 +5,7 @@
package bn256 package bn256
// For details of the algorithms used, see "Multiplication and Squaring on // For details of the algorithms used, see "Multiplication and Squaring on
// Pairing-Friendly Fields", Devegili et al. // Pairing-Friendly Fields, Devegili et al.
// http://eprint.iacr.org/2006/471.pdf. // http://eprint.iacr.org/2006/471.pdf.
import ( import (

View File

@ -5,7 +5,7 @@
package bn256 package bn256
// For details of the algorithms used, see "Multiplication and Squaring on // For details of the algorithms used, see "Multiplication and Squaring on
// Pairing-Friendly Fields", Devegili et al. // Pairing-Friendly Fields, Devegili et al.
// http://eprint.iacr.org/2006/471.pdf. // http://eprint.iacr.org/2006/471.pdf.
import ( import (

View File

@ -26,7 +26,6 @@ import (
"github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/consensus/parlia"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/bloombits" "github.com/ethereum/go-ethereum/core/bloombits"
"github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/rawdb"
@ -441,16 +440,6 @@ func (b *EthAPIBackend) Engine() consensus.Engine {
return b.eth.engine return b.eth.engine
} }
func (b *EthAPIBackend) CurrentValidators() ([]common.Address, error) {
if p, ok := b.eth.engine.(*parlia.Parlia); ok {
service := p.APIs(b.Chain())[0].Service
currentHead := rpc.LatestBlockNumber
return service.(*parlia.API).GetValidators(&currentHead)
}
return []common.Address{}, errors.New("not supported")
}
func (b *EthAPIBackend) CurrentHeader() *types.Header { func (b *EthAPIBackend) CurrentHeader() *types.Header {
return b.eth.blockchain.CurrentHeader() return b.eth.blockchain.CurrentHeader()
} }

View File

@ -73,7 +73,7 @@ func (api *MinerAPI) SetGasPrice(gasPrice hexutil.Big) bool {
// SetGasLimit sets the gaslimit to target towards during mining. // SetGasLimit sets the gaslimit to target towards during mining.
func (api *MinerAPI) SetGasLimit(gasLimit hexutil.Uint64) bool { func (api *MinerAPI) SetGasLimit(gasLimit hexutil.Uint64) bool {
api.e.Miner().SetGasCeil(uint64(gasLimit)) api.e.Miner().SetGasCeil(uint64(gasLimit))
if uint64(gasLimit) > params.SystemTxsGas { if api.e.Miner().Mining() && uint64(gasLimit) > params.SystemTxsGas {
api.e.TxPool().SetMaxGas(uint64(gasLimit) - params.SystemTxsGas) api.e.TxPool().SetMaxGas(uint64(gasLimit) - params.SystemTxsGas)
} }
return true return true

View File

@ -185,16 +185,6 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
} }
// Override the chain config with provided settings. // Override the chain config with provided settings.
var overrides core.ChainOverrides var overrides core.ChainOverrides
if config.OverridePassedForkTime != nil {
chainConfig.ShanghaiTime = config.OverridePassedForkTime
chainConfig.KeplerTime = config.OverridePassedForkTime
chainConfig.FeynmanTime = config.OverridePassedForkTime
chainConfig.FeynmanFixTime = config.OverridePassedForkTime
chainConfig.CancunTime = config.OverridePassedForkTime
chainConfig.HaberTime = config.OverridePassedForkTime
chainConfig.HaberFixTime = config.OverridePassedForkTime
overrides.OverridePassedForkTime = config.OverridePassedForkTime
}
if config.OverrideBohr != nil { if config.OverrideBohr != nil {
chainConfig.BohrTime = config.OverrideBohr chainConfig.BohrTime = config.OverrideBohr
overrides.OverrideBohr = config.OverrideBohr overrides.OverrideBohr = config.OverrideBohr

View File

@ -327,7 +327,7 @@ func (d *Downloader) UnregisterPeer(id string) error {
// LegacySync tries to sync up our local blockchain with a remote peer, both // LegacySync tries to sync up our local blockchain with a remote peer, both
// adding various sanity checks and wrapping it with various log entries. // adding various sanity checks and wrapping it with various log entries.
func (d *Downloader) LegacySync(id string, head common.Hash, name string, td *big.Int, ttd *big.Int, mode SyncMode) error { func (d *Downloader) LegacySync(id string, head common.Hash, td *big.Int, ttd *big.Int, mode SyncMode) error {
err := d.synchronise(id, head, td, ttd, mode, false, nil) err := d.synchronise(id, head, td, ttd, mode, false, nil)
switch err { switch err {
@ -337,7 +337,7 @@ func (d *Downloader) LegacySync(id string, head common.Hash, name string, td *bi
if errors.Is(err, errInvalidChain) || errors.Is(err, errBadPeer) || errors.Is(err, errTimeout) || if errors.Is(err, errInvalidChain) || errors.Is(err, errBadPeer) || errors.Is(err, errTimeout) ||
errors.Is(err, errStallingPeer) || errors.Is(err, errUnsyncedPeer) || errors.Is(err, errEmptyHeaderSet) || errors.Is(err, errStallingPeer) || errors.Is(err, errUnsyncedPeer) || errors.Is(err, errEmptyHeaderSet) ||
errors.Is(err, errPeersUnavailable) || errors.Is(err, errTooOld) || errors.Is(err, errInvalidAncestor) { errors.Is(err, errPeersUnavailable) || errors.Is(err, errTooOld) || errors.Is(err, errInvalidAncestor) {
log.Warn("Synchronisation failed, dropping peer", "peer", id, "name", name, "td", td, "err", err) log.Warn("Synchronisation failed, dropping peer", "peer", id, "err", err)
if d.dropPeer == nil { if d.dropPeer == nil {
// The dropPeer method is nil when `--copydb` is used for a local copy. // The dropPeer method is nil when `--copydb` is used for a local copy.
// Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored // Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored

View File

@ -902,7 +902,7 @@ func testBlockHeaderAttackerDropping(t *testing.T, protocol uint) {
// Simulate a synchronisation and check the required result // Simulate a synchronisation and check the required result
tester.downloader.synchroniseMock = func(string, common.Hash) error { return tt.result } tester.downloader.synchroniseMock = func(string, common.Hash) error { return tt.result }
tester.downloader.LegacySync(id, tester.chain.Genesis().Hash(), "", big.NewInt(1000), nil, FullSync) tester.downloader.LegacySync(id, tester.chain.Genesis().Hash(), big.NewInt(1000), nil, FullSync)
if _, ok := tester.peers[id]; !ok != tt.drop { if _, ok := tester.peers[id]; !ok != tt.drop {
t.Errorf("test %d: peer drop mismatch for %v: have %v, want %v", i, tt.result, !ok, tt.drop) t.Errorf("test %d: peer drop mismatch for %v: have %v, want %v", i, tt.result, !ok, tt.drop)
} }

View File

@ -188,9 +188,6 @@ type Config struct {
// send-transaction variants. The unit is ether. // send-transaction variants. The unit is ether.
RPCTxFeeCap float64 RPCTxFeeCap float64
// OverridePassedForkTime
OverridePassedForkTime *uint64 `toml:",omitempty"`
// OverrideBohr (TODO: remove after the fork) // OverrideBohr (TODO: remove after the fork)
OverrideBohr *uint64 `toml:",omitempty"` OverrideBohr *uint64 `toml:",omitempty"`

View File

@ -70,7 +70,6 @@ func (c Config) MarshalTOML() (interface{}, error) {
RPCGasCap uint64 RPCGasCap uint64
RPCEVMTimeout time.Duration RPCEVMTimeout time.Duration
RPCTxFeeCap float64 RPCTxFeeCap float64
OverridePassedForkTime *uint64 `toml:",omitempty"`
OverrideBohr *uint64 `toml:",omitempty"` OverrideBohr *uint64 `toml:",omitempty"`
OverrideVerkle *uint64 `toml:",omitempty"` OverrideVerkle *uint64 `toml:",omitempty"`
BlobExtraReserve uint64 BlobExtraReserve uint64
@ -129,7 +128,6 @@ func (c Config) MarshalTOML() (interface{}, error) {
enc.RPCGasCap = c.RPCGasCap enc.RPCGasCap = c.RPCGasCap
enc.RPCEVMTimeout = c.RPCEVMTimeout enc.RPCEVMTimeout = c.RPCEVMTimeout
enc.RPCTxFeeCap = c.RPCTxFeeCap enc.RPCTxFeeCap = c.RPCTxFeeCap
enc.OverridePassedForkTime = c.OverridePassedForkTime
enc.OverrideBohr = c.OverrideBohr enc.OverrideBohr = c.OverrideBohr
enc.OverrideVerkle = c.OverrideVerkle enc.OverrideVerkle = c.OverrideVerkle
enc.BlobExtraReserve = c.BlobExtraReserve enc.BlobExtraReserve = c.BlobExtraReserve
@ -192,7 +190,6 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
RPCGasCap *uint64 RPCGasCap *uint64
RPCEVMTimeout *time.Duration RPCEVMTimeout *time.Duration
RPCTxFeeCap *float64 RPCTxFeeCap *float64
OverridePassedForkTime *uint64 `toml:",omitempty"`
OverrideBohr *uint64 `toml:",omitempty"` OverrideBohr *uint64 `toml:",omitempty"`
OverrideVerkle *uint64 `toml:",omitempty"` OverrideVerkle *uint64 `toml:",omitempty"`
BlobExtraReserve *uint64 BlobExtraReserve *uint64
@ -360,9 +357,6 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
if dec.RPCTxFeeCap != nil { if dec.RPCTxFeeCap != nil {
c.RPCTxFeeCap = *dec.RPCTxFeeCap c.RPCTxFeeCap = *dec.RPCTxFeeCap
} }
if dec.OverridePassedForkTime != nil {
c.OverridePassedForkTime = dec.OverridePassedForkTime
}
if dec.OverrideBohr != nil { if dec.OverrideBohr != nil {
c.OverrideBohr = dec.OverrideBohr c.OverrideBohr = dec.OverrideBohr
} }

View File

@ -868,9 +868,9 @@ func (f *BlockFetcher) importHeaders(op *blockOrHeaderInject) {
parent := f.getHeader(header.ParentHash) parent := f.getHeader(header.ParentHash)
if parent == nil { if parent == nil {
log.Debug("Unknown parent of propagated header", "peer", peer, "number", header.Number, "hash", hash, "parent", header.ParentHash) log.Debug("Unknown parent of propagated header", "peer", peer, "number", header.Number, "hash", hash, "parent", header.ParentHash)
time.Sleep(reQueueBlockTimeout)
// forget block first, then re-queue // forget block first, then re-queue
f.done <- hash f.done <- hash
time.Sleep(reQueueBlockTimeout)
f.requeue <- op f.requeue <- op
return return
} }
@ -909,9 +909,9 @@ func (f *BlockFetcher) importBlocks(op *blockOrHeaderInject) {
parent := f.getBlock(block.ParentHash()) parent := f.getBlock(block.ParentHash())
if parent == nil { if parent == nil {
log.Debug("Unknown parent of propagated block", "peer", peer, "number", block.Number(), "hash", hash, "parent", block.ParentHash()) log.Debug("Unknown parent of propagated block", "peer", peer, "number", block.Number(), "hash", hash, "parent", block.ParentHash())
time.Sleep(reQueueBlockTimeout)
// forget block first, then re-queue // forget block first, then re-queue
f.done <- hash f.done <- hash
time.Sleep(reQueueBlockTimeout)
f.requeue <- op f.requeue <- op
return return
} }

View File

@ -436,7 +436,7 @@ func TestInvalidLogFilterCreation(t *testing.T) {
} }
} }
// TestInvalidGetLogsRequest tests invalid getLogs requests // TestLogFilterUninstall tests invalid getLogs requests
func TestInvalidGetLogsRequest(t *testing.T) { func TestInvalidGetLogsRequest(t *testing.T) {
t.Parallel() t.Parallel()

View File

@ -321,14 +321,21 @@ func newHandler(config *handlerConfig) (*handler, error) {
broadcastBlockWithCheck := func(block *types.Block, propagate bool) { broadcastBlockWithCheck := func(block *types.Block, propagate bool) {
if propagate { if propagate {
if !(block.Header().WithdrawalsHash == nil && block.Withdrawals() == nil) && checkErrs := make(chan error, 2)
!(block.Header().EmptyWithdrawalsHash() && block.Withdrawals() != nil && len(block.Withdrawals()) == 0) {
log.Error("Propagated block has invalid withdrawals") go func() {
checkErrs <- core.ValidateListsInBody(block)
}()
go func() {
checkErrs <- core.IsDataAvailable(h.chain, block)
}()
for i := 0; i < cap(checkErrs); i++ {
err := <-checkErrs
if err != nil {
log.Error("Propagating invalid block", "number", block.Number(), "hash", block.Hash(), "err", err)
return return
} }
if err := core.IsDataAvailable(h.chain, block); err != nil {
log.Error("Propagating block with invalid sidecars", "number", block.Number(), "hash", block.Hash(), "err", err)
return
} }
} }
h.BroadcastBlock(block, propagate) h.BroadcastBlock(block, propagate)
@ -476,13 +483,13 @@ func (h *handler) runEthPeer(peer *eth.Peer, handler eth.Handler) error {
h.peersPerIP[remoteIP] = h.peersPerIP[remoteIP] + 1 h.peersPerIP[remoteIP] = h.peersPerIP[remoteIP] + 1
h.peerPerIPLock.Unlock() h.peerPerIPLock.Unlock()
} }
peer.Log().Debug("Ethereum peer connected", "name", peer.Name())
// Register the peer locally // Register the peer locally
if err := h.peers.registerPeer(peer, snap, trust, bsc); err != nil { if err := h.peers.registerPeer(peer, snap, trust, bsc); err != nil {
peer.Log().Error("Ethereum peer registration failed", "err", err) peer.Log().Error("Ethereum peer registration failed", "err", err)
return err return err
} }
peer.Log().Debug("Ethereum peer connected", "name", peer.Name(), "peers.len", h.peers.len())
defer h.unregisterPeer(peer.ID()) defer h.unregisterPeer(peer.ID())
p := h.peers.peer(peer.ID()) p := h.peers.peer(peer.ID())
@ -625,7 +632,7 @@ func (h *handler) runBscExtension(peer *bsc.Peer, handler bsc.Handler) error {
bsc.EgressRegistrationErrorMeter.Mark(1) bsc.EgressRegistrationErrorMeter.Mark(1)
} }
} }
peer.Log().Error("Bsc extension registration failed", "err", err, "name", peer.Name()) peer.Log().Error("Bsc extension registration failed", "err", err)
return err return err
} }
return handler(peer) return handler(peer)

View File

@ -136,7 +136,7 @@ func (p *Peer) dispatchRequest(req *Request) error {
} }
} }
// dispatchResponse fulfils a pending request and delivers it to the requested // dispatchRequest fulfils a pending request and delivers it to the requested
// sink. // sink.
func (p *Peer) dispatchResponse(res *Response, metadata func() interface{}) error { func (p *Peer) dispatchResponse(res *Response, metadata func() interface{}) error {
resOp := &response{ resOp := &response{

View File

@ -25,7 +25,6 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/forkid" "github.com/ethereum/go-ethereum/core/forkid"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
) )
@ -47,7 +46,7 @@ var ProtocolVersions = []uint{ETH68}
var protocolLengths = map[uint]uint64{ETH68: 17} var protocolLengths = map[uint]uint64{ETH68: 17}
// maxMessageSize is the maximum cap on the size of a protocol message. // maxMessageSize is the maximum cap on the size of a protocol message.
var maxMessageSize = params.MaxMessageSize const maxMessageSize = 10 * 1024 * 1024
const ( const (
StatusMsg = 0x00 StatusMsg = 0x00

View File

@ -233,7 +233,7 @@ func (cs *chainSyncer) startSync(op *chainSyncOp) {
// doSync synchronizes the local blockchain with a remote peer. // doSync synchronizes the local blockchain with a remote peer.
func (h *handler) doSync(op *chainSyncOp) error { func (h *handler) doSync(op *chainSyncOp) error {
// Run the sync cycle, and disable snap sync if we're past the pivot block // Run the sync cycle, and disable snap sync if we're past the pivot block
err := h.downloader.LegacySync(op.peer.ID(), op.head, op.peer.Name(), op.td, h.chain.Config().TerminalTotalDifficulty, op.mode) err := h.downloader.LegacySync(op.peer.ID(), op.head, op.td, h.chain.Config().TerminalTotalDifficulty, op.mode)
if err != nil { if err != nil {
return err return err
} }
@ -248,9 +248,6 @@ func (h *handler) doSync(op *chainSyncOp) error {
// degenerate connectivity, but it should be healthy for the mainnet too to // degenerate connectivity, but it should be healthy for the mainnet too to
// more reliably update peers or the local TD state. // more reliably update peers or the local TD state.
if block := h.chain.GetBlock(head.Hash(), head.Number.Uint64()); block != nil { if block := h.chain.GetBlock(head.Hash(), head.Number.Uint64()); block != nil {
if h.chain.Config().IsCancun(block.Number(), block.Time()) {
block = block.WithSidecars(h.chain.GetSidecarsByHash(block.Hash()))
}
h.BroadcastBlock(block, false) h.BroadcastBlock(block, false)
} }
} }

View File

@ -131,8 +131,8 @@ func (ec *Client) BlockReceipts(ctx context.Context, blockNrOrHash rpc.BlockNumb
} }
// BlobSidecars return the Sidecars of a given block number or hash. // BlobSidecars return the Sidecars of a given block number or hash.
func (ec *Client) BlobSidecars(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) ([]*types.BlobSidecar, error) { func (ec *Client) BlobSidecars(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) ([]*types.BlobTxSidecar, error) {
var r []*types.BlobSidecar var r []*types.BlobTxSidecar
err := ec.c.CallContext(ctx, &r, "eth_getBlobSidecars", blockNrOrHash.String()) err := ec.c.CallContext(ctx, &r, "eth_getBlobSidecars", blockNrOrHash.String())
if err == nil && r == nil { if err == nil && r == nil {
return nil, ethereum.NotFound return nil, ethereum.NotFound
@ -141,8 +141,8 @@ func (ec *Client) BlobSidecars(ctx context.Context, blockNrOrHash rpc.BlockNumbe
} }
// BlobSidecarByTxHash return a sidecar of a given blob transaction // BlobSidecarByTxHash return a sidecar of a given blob transaction
func (ec *Client) BlobSidecarByTxHash(ctx context.Context, hash common.Hash) (*types.BlobSidecar, error) { func (ec *Client) BlobSidecarByTxHash(ctx context.Context, hash common.Hash) (*types.BlobTxSidecar, error) {
var r *types.BlobSidecar var r *types.BlobTxSidecar
err := ec.c.CallContext(ctx, &r, "eth_getBlobSidecarByTxHash", hash) err := ec.c.CallContext(ctx, &r, "eth_getBlobSidecarByTxHash", hash)
if err == nil && r == nil { if err == nil && r == nil {
return nil, ethereum.NotFound return nil, ethereum.NotFound
@ -361,7 +361,7 @@ func (ec *Client) TransactionInBlock(ctx context.Context, blockHash common.Hash,
return json.tx, err return json.tx, err
} }
// TransactionsInBlock returns a single transaction at index in the given block. // TransactionInBlock returns a single transaction at index in the given block.
func (ec *Client) TransactionsInBlock(ctx context.Context, number *big.Int) ([]*types.Transaction, error) { func (ec *Client) TransactionsInBlock(ctx context.Context, number *big.Int) ([]*types.Transaction, error) {
var rpcTxs []*rpcTransaction var rpcTxs []*rpcTransaction
err := ec.c.CallContext(ctx, &rpcTxs, "eth_getTransactionsByBlockNumber", toBlockNumArg(number)) err := ec.c.CallContext(ctx, &rpcTxs, "eth_getTransactionsByBlockNumber", toBlockNumArg(number))
@ -376,7 +376,7 @@ func (ec *Client) TransactionsInBlock(ctx context.Context, number *big.Int) ([]*
return txs, err return txs, err
} }
// TransactionRecipientsInBlock returns a single transaction at index in the given block. // TransactionInBlock returns a single transaction at index in the given block.
func (ec *Client) TransactionRecipientsInBlock(ctx context.Context, number *big.Int) ([]*types.Receipt, error) { func (ec *Client) TransactionRecipientsInBlock(ctx context.Context, number *big.Int) ([]*types.Receipt, error) {
var rs []*types.Receipt var rs []*types.Receipt
err := ec.c.CallContext(ctx, &rs, "eth_getTransactionReceiptsByBlockNumber", toBlockNumArg(number)) err := ec.c.CallContext(ctx, &rs, "eth_getTransactionReceiptsByBlockNumber", toBlockNumArg(number))

1
go.mod
View File

@ -27,6 +27,7 @@ require (
github.com/fatih/color v1.16.0 github.com/fatih/color v1.16.0
github.com/fatih/structs v1.1.0 github.com/fatih/structs v1.1.0
github.com/fjl/gencodec v0.0.0-20230517082657-f9840df7b83e github.com/fjl/gencodec v0.0.0-20230517082657-f9840df7b83e
github.com/fjl/memsize v0.0.2
github.com/fsnotify/fsnotify v1.6.0 github.com/fsnotify/fsnotify v1.6.0
github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08
github.com/gballet/go-verkle v0.1.1-0.20231031103413-a67434b50f46 github.com/gballet/go-verkle v0.1.1-0.20231031103413-a67434b50f46

3
go.sum
View File

@ -326,6 +326,7 @@ github.com/ethereum/c-kzg-4844 v0.4.0 h1:3MS1s4JtA868KpJxroZoepdV0ZKBp3u/O5HcZ7R
github.com/ethereum/c-kzg-4844 v0.4.0/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0= github.com/ethereum/c-kzg-4844 v0.4.0/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo=
@ -335,6 +336,8 @@ github.com/ferranbt/fastssz v0.0.0-20210905181407-59cf6761a7d5 h1:6dVcS0LktRSyEE
github.com/ferranbt/fastssz v0.0.0-20210905181407-59cf6761a7d5/go.mod h1:S8yiDeAXy8f88W4Ul+0dBMPx49S05byYbmZD6Uv94K4= github.com/ferranbt/fastssz v0.0.0-20210905181407-59cf6761a7d5/go.mod h1:S8yiDeAXy8f88W4Ul+0dBMPx49S05byYbmZD6Uv94K4=
github.com/fjl/gencodec v0.0.0-20230517082657-f9840df7b83e h1:bBLctRc7kr01YGvaDfgLbTwjFNW5jdp5y5rj8XXBHfY= github.com/fjl/gencodec v0.0.0-20230517082657-f9840df7b83e h1:bBLctRc7kr01YGvaDfgLbTwjFNW5jdp5y5rj8XXBHfY=
github.com/fjl/gencodec v0.0.0-20230517082657-f9840df7b83e/go.mod h1:AzA8Lj6YtixmJWL+wkKoBGsLWy9gFrAzi4g+5bCKwpY= github.com/fjl/gencodec v0.0.0-20230517082657-f9840df7b83e/go.mod h1:AzA8Lj6YtixmJWL+wkKoBGsLWy9gFrAzi4g+5bCKwpY=
github.com/fjl/memsize v0.0.2 h1:27txuSD9or+NZlnOWdKUxeBzTAUkWCVh+4Gf2dWFOzA=
github.com/fjl/memsize v0.0.2/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg= github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg=
github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag=

View File

@ -30,6 +30,7 @@ import (
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/metrics/exp" "github.com/ethereum/go-ethereum/metrics/exp"
"github.com/fjl/memsize/memsizeui"
"github.com/mattn/go-colorable" "github.com/mattn/go-colorable"
"github.com/mattn/go-isatty" "github.com/mattn/go-isatty"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
@ -37,6 +38,8 @@ import (
"gopkg.in/natefinch/lumberjack.v2" "gopkg.in/natefinch/lumberjack.v2"
) )
var Memsize memsizeui.Handler
var ( var (
verbosityFlag = &cli.IntFlag{ verbosityFlag = &cli.IntFlag{
Name: "verbosity", Name: "verbosity",
@ -310,6 +313,7 @@ func StartPProf(address string, withMetrics bool) {
if withMetrics { if withMetrics {
exp.Exp(metrics.DefaultRegistry) exp.Exp(metrics.DefaultRegistry)
} }
http.Handle("/memsize/", http.StripPrefix("/memsize", &Memsize))
log.Info("Starting pprof server", "addr", fmt.Sprintf("http://%s/debug/pprof", address)) log.Info("Starting pprof server", "addr", fmt.Sprintf("http://%s/debug/pprof", address))
go func() { go func() {
if err := http.ListenAndServe(address, nil); err != nil { if err := http.ListenAndServe(address, nil); err != nil {

View File

@ -862,72 +862,48 @@ func (s *BlockChainAPI) Health() bool {
return true return true
} }
func (s *BlockChainAPI) getFinalizedNumber(ctx context.Context, verifiedValidatorNum int64) (int64, error) { // GetFinalizedHeader returns the requested finalized block header.
parliaConfig := s.b.ChainConfig().Parlia // - probabilisticFinalized should be in range [2,21],
if parliaConfig == nil { // then the block header with number `max(fastFinalized, latest-probabilisticFinalized)` is returned
return 0, fmt.Errorf("only parlia engine supported") func (s *BlockChainAPI) GetFinalizedHeader(ctx context.Context, probabilisticFinalized int64) (map[string]interface{}, error) {
} if probabilisticFinalized < 2 || probabilisticFinalized > 21 {
return nil, fmt.Errorf("%d out of range [2,21]", probabilisticFinalized)
curValidators, err := s.b.CurrentValidators()
if err != nil { // impossible
return 0, err
}
valLen := int64(len(curValidators))
if verifiedValidatorNum < 1 || verifiedValidatorNum > valLen {
return 0, fmt.Errorf("%d out of range [1,%d]", verifiedValidatorNum, valLen)
} }
var err error
fastFinalizedHeader, err := s.b.HeaderByNumber(ctx, rpc.FinalizedBlockNumber) fastFinalizedHeader, err := s.b.HeaderByNumber(ctx, rpc.FinalizedBlockNumber)
if err != nil { // impossible
return 0, err
}
latestHeader, err := s.b.HeaderByNumber(ctx, rpc.LatestBlockNumber)
if err != nil { // impossible
return 0, err
}
lastHeader := latestHeader
confirmedValSet := make(map[common.Address]struct{}, valLen)
confirmedValSet[lastHeader.Coinbase] = struct{}{}
for count := 1; int64(len(confirmedValSet)) < verifiedValidatorNum && count <= int(parliaConfig.Epoch) && lastHeader.Number.Int64() > max(fastFinalizedHeader.Number.Int64(), 1); count++ {
lastHeader, err = s.b.HeaderByHash(ctx, lastHeader.ParentHash)
if err != nil { // impossible
return 0, err
}
confirmedValSet[lastHeader.Coinbase] = struct{}{}
}
finalizedBlockNumber := max(fastFinalizedHeader.Number.Int64(), lastHeader.Number.Int64())
log.Debug("getFinalizedNumber", "LatestBlockNumber", latestHeader.Number.Int64(), "fastFinalizedHeight", fastFinalizedHeader.Number.Int64(),
"lastHeader", lastHeader.Number.Int64(), "finalizedBlockNumber", finalizedBlockNumber, "len(confirmedValSet)", len(confirmedValSet))
return finalizedBlockNumber, nil
}
// GetFinalizedHeader returns the finalized block header based on the specified parameters.
// - `verifiedValidatorNum` must be within the range [1, len(currentValidators)].
// - The function calculates `probabilisticFinalizedHeight` as the highest height of the block verified by `verifiedValidatorNum` validators,
// it then returns the block header with a height equal to `max(fastFinalizedHeight, probabilisticFinalizedHeight)`.
// - The height of the returned block header is guaranteed to be monotonically increasing.
func (s *BlockChainAPI) GetFinalizedHeader(ctx context.Context, verifiedValidatorNum int64) (map[string]interface{}, error) {
finalizedBlockNumber, err := s.getFinalizedNumber(ctx, verifiedValidatorNum)
if err != nil { // impossible if err != nil { // impossible
return nil, err return nil, err
} }
latestHeader, err := s.b.HeaderByNumber(ctx, rpc.LatestBlockNumber)
if err != nil { // impossible
return nil, err
}
finalizedBlockNumber := max(fastFinalizedHeader.Number.Int64(), latestHeader.Number.Int64()-probabilisticFinalized)
return s.GetHeaderByNumber(ctx, rpc.BlockNumber(finalizedBlockNumber)) return s.GetHeaderByNumber(ctx, rpc.BlockNumber(finalizedBlockNumber))
} }
// GetFinalizedBlock returns the finalized block based on the specified parameters. // GetFinalizedBlock returns the requested finalized block.
// - `verifiedValidatorNum` must be within the range [1, len(currentValidators)]. // - probabilisticFinalized should be in range [2,21],
// - The function calculates `probabilisticFinalizedHeight` as the highest height of the block verified by `verifiedValidatorNum` validators, // then the block with number `max(fastFinalized, latest-probabilisticFinalized)` is returned
// it then returns the block with a height equal to `max(fastFinalizedHeight, probabilisticFinalizedHeight)`. // - When fullTx is true all transactions in the block are returned, otherwise
// - If `fullTx` is true, the block includes all transactions; otherwise, only transaction hashes are included. // only the transaction hash is returned.
// - The height of the returned block is guaranteed to be monotonically increasing. func (s *BlockChainAPI) GetFinalizedBlock(ctx context.Context, probabilisticFinalized int64, fullTx bool) (map[string]interface{}, error) {
func (s *BlockChainAPI) GetFinalizedBlock(ctx context.Context, verifiedValidatorNum int64, fullTx bool) (map[string]interface{}, error) { if probabilisticFinalized < 2 || probabilisticFinalized > 21 {
finalizedBlockNumber, err := s.getFinalizedNumber(ctx, verifiedValidatorNum) return nil, fmt.Errorf("%d out of range [2,21]", probabilisticFinalized)
}
var err error
fastFinalizedHeader, err := s.b.HeaderByNumber(ctx, rpc.FinalizedBlockNumber)
if err != nil { // impossible if err != nil { // impossible
return nil, err return nil, err
} }
latestHeader, err := s.b.HeaderByNumber(ctx, rpc.LatestBlockNumber)
if err != nil { // impossible
return nil, err
}
finalizedBlockNumber := max(fastFinalizedHeader.Number.Int64(), latestHeader.Number.Int64()-probabilisticFinalized)
return s.GetBlockByNumber(ctx, rpc.BlockNumber(finalizedBlockNumber), fullTx) return s.GetBlockByNumber(ctx, rpc.BlockNumber(finalizedBlockNumber), fullTx)
} }

View File

@ -633,7 +633,6 @@ func (b testBackend) SubscribeNewTxsEvent(events chan<- core.NewTxsEvent) event.
} }
func (b testBackend) ChainConfig() *params.ChainConfig { return b.chain.Config() } func (b testBackend) ChainConfig() *params.ChainConfig { return b.chain.Config() }
func (b testBackend) Engine() consensus.Engine { return b.chain.Engine() } func (b testBackend) Engine() consensus.Engine { return b.chain.Engine() }
func (b testBackend) CurrentValidators() ([]common.Address, error) { return []common.Address{}, nil }
func (b testBackend) GetLogs(ctx context.Context, blockHash common.Hash, number uint64) ([][]*types.Log, error) { func (b testBackend) GetLogs(ctx context.Context, blockHash common.Hash, number uint64) ([][]*types.Log, error) {
panic("implement me") panic("implement me")
} }

View File

@ -89,8 +89,6 @@ type Backend interface {
ChainConfig() *params.ChainConfig ChainConfig() *params.ChainConfig
Engine() consensus.Engine Engine() consensus.Engine
// CurrentValidators return the list of validator at the latest block
CurrentValidators() ([]common.Address, error)
// This is copied from filters.Backend // This is copied from filters.Backend
// eth/filters needs to be initialized from this backend type, so methods needed by // eth/filters needs to be initialized from this backend type, so methods needed by

View File

@ -416,8 +416,6 @@ func (b *backendMock) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent)
func (b *backendMock) Engine() consensus.Engine { return nil } func (b *backendMock) Engine() consensus.Engine { return nil }
func (b *backendMock) CurrentValidators() ([]common.Address, error) { return []common.Address{}, nil }
func (b *backendMock) MevRunning() bool { return false } func (b *backendMock) MevRunning() bool { return false }
func (b *backendMock) HasBuilder(builder common.Address) bool { return false } func (b *backendMock) HasBuilder(builder common.Address) bool { return false }
func (b *backendMock) MevParams() *types.MevParams { func (b *backendMock) MevParams() *types.MevParams {

View File

@ -18,7 +18,6 @@ import (
"github.com/ethereum/go-ethereum/common/bidutil" "github.com/ethereum/go-ethereum/common/bidutil"
"github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/txpool"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
@ -83,7 +82,6 @@ type bidSimulator struct {
delayLeftOver time.Duration delayLeftOver time.Duration
minGasPrice *big.Int minGasPrice *big.Int
chain *core.BlockChain chain *core.BlockChain
txpool *txpool.TxPool
chainConfig *params.ChainConfig chainConfig *params.ChainConfig
engine consensus.Engine engine consensus.Engine
bidWorker bidWorker bidWorker bidWorker
@ -120,7 +118,7 @@ func newBidSimulator(
config *MevConfig, config *MevConfig,
delayLeftOver time.Duration, delayLeftOver time.Duration,
minGasPrice *big.Int, minGasPrice *big.Int,
eth Backend, chain *core.BlockChain,
chainConfig *params.ChainConfig, chainConfig *params.ChainConfig,
engine consensus.Engine, engine consensus.Engine,
bidWorker bidWorker, bidWorker bidWorker,
@ -129,8 +127,7 @@ func newBidSimulator(
config: config, config: config,
delayLeftOver: delayLeftOver, delayLeftOver: delayLeftOver,
minGasPrice: minGasPrice, minGasPrice: minGasPrice,
chain: eth.BlockChain(), chain: chain,
txpool: eth.TxPool(),
chainConfig: chainConfig, chainConfig: chainConfig,
engine: engine, engine: engine,
bidWorker: bidWorker, bidWorker: bidWorker,
@ -144,7 +141,7 @@ func newBidSimulator(
simulatingBid: make(map[common.Hash]*BidRuntime), simulatingBid: make(map[common.Hash]*BidRuntime),
} }
b.chainHeadSub = b.chain.SubscribeChainHeadEvent(b.chainHeadCh) b.chainHeadSub = chain.SubscribeChainHeadEvent(b.chainHeadCh)
if config.Enabled { if config.Enabled {
b.bidReceiving.Store(true) b.bidReceiving.Store(true)
@ -412,7 +409,7 @@ func (b *bidSimulator) clearLoop() {
} }
delete(b.bestBid, parentHash) delete(b.bestBid, parentHash)
for k, v := range b.bestBid { for k, v := range b.bestBid {
if v.bid.BlockNumber <= blockNumber-b.chain.TriesInMemory() { if v.bid.BlockNumber <= blockNumber-core.TriesInMemory {
v.env.discard() v.env.discard()
delete(b.bestBid, k) delete(b.bestBid, k)
} }
@ -421,7 +418,7 @@ func (b *bidSimulator) clearLoop() {
b.simBidMu.Lock() b.simBidMu.Lock()
for k, v := range b.simulatingBid { for k, v := range b.simulatingBid {
if v.bid.BlockNumber <= blockNumber-b.chain.TriesInMemory() { if v.bid.BlockNumber <= blockNumber-core.TriesInMemory {
v.env.discard() v.env.discard()
delete(b.simulatingBid, k) delete(b.simulatingBid, k)
} }
@ -628,41 +625,18 @@ func (b *bidSimulator) simBid(interruptCh chan int32, bidRuntime *BidRuntime) {
// check if bid gas price is lower than min gas price // check if bid gas price is lower than min gas price
{ {
bidGasUsed := uint64(0) bidGasUsed := uint64(0)
bidGasFee := big.NewInt(0) bidGasFee := bidRuntime.env.state.GetBalance(consensus.SystemAddress)
for i, receipt := range bidRuntime.env.receipts { for _, receipt := range bidRuntime.env.receipts {
tx := bidRuntime.env.txs[i]
if !b.txpool.Has(tx.Hash()) {
bidGasUsed += receipt.GasUsed bidGasUsed += receipt.GasUsed
effectiveTip, er := tx.EffectiveGasTip(bidRuntime.env.header.BaseFee)
if er != nil {
err = errors.New("failed to calculate effective tip")
return
} }
if bidRuntime.env.header.BaseFee != nil { bidGasPrice := new(big.Int).Div(bidGasFee.ToBig(), new(big.Int).SetUint64(bidGasUsed))
effectiveTip.Add(effectiveTip, bidRuntime.env.header.BaseFee)
}
gasFee := new(big.Int).Mul(effectiveTip, new(big.Int).SetUint64(receipt.GasUsed))
bidGasFee.Add(bidGasFee, gasFee)
if tx.Type() == types.BlobTxType {
blobFee := new(big.Int).Mul(receipt.BlobGasPrice, new(big.Int).SetUint64(receipt.BlobGasUsed))
bidGasFee.Add(bidGasFee, blobFee)
}
}
}
// if bid txs are all from mempool, do not check gas price
if bidGasUsed != 0 {
bidGasPrice := new(big.Int).Div(bidGasFee, new(big.Int).SetUint64(bidGasUsed))
if bidGasPrice.Cmp(b.minGasPrice) < 0 { if bidGasPrice.Cmp(b.minGasPrice) < 0 {
err = fmt.Errorf("bid gas price is lower than min gas price, bid:%v, min:%v", bidGasPrice, b.minGasPrice) err = errors.New("bid gas price is lower than min gas price")
return return
} }
} }
}
// if enable greedy merge, fill bid env with transactions from mempool // if enable greedy merge, fill bid env with transactions from mempool
if b.config.GreedyMergeTx { if b.config.GreedyMergeTx {
@ -692,14 +666,6 @@ func (b *bidSimulator) simBid(interruptCh chan int32, bidRuntime *BidRuntime) {
return return
} }
// check bid size
if bidRuntime.env.size+blockReserveSize > params.MaxMessageSize {
log.Error("BidSimulator: failed to check bid size", "builder", bidRuntime.bid.Builder,
"bidHash", bidRuntime.bid.Hash(), "env.size", bidRuntime.env.size)
err = errors.New("invalid bid size")
return
}
bestBid := b.GetBestBid(parentHash) bestBid := b.GetBestBid(parentHash)
if bestBid == nil { if bestBid == nil {
log.Info("[BID RESULT]", "win", "true[first]", "builder", bidRuntime.bid.Builder, "hash", bidRuntime.bid.Hash().TerminalString()) log.Info("[BID RESULT]", "win", "true[first]", "builder", bidRuntime.bid.Builder, "hash", bidRuntime.bid.Hash().TerminalString())
@ -866,7 +832,6 @@ func (r *BidRuntime) commitTransaction(chain *core.BlockChain, chainConfig *para
} }
r.env.tcount++ r.env.tcount++
r.env.size += uint32(tx.Size())
return nil return nil
} }

View File

@ -62,7 +62,7 @@ type Config struct {
// DefaultConfig contains default settings for miner. // DefaultConfig contains default settings for miner.
var DefaultConfig = Config{ var DefaultConfig = Config{
GasCeil: 0, GasCeil: 30000000,
GasPrice: big.NewInt(params.GWei), GasPrice: big.NewInt(params.GWei),
// The default recommit time is chosen as two seconds since // The default recommit time is chosen as two seconds since
@ -102,7 +102,7 @@ func New(eth Backend, config *Config, chainConfig *params.ChainConfig, mux *even
worker: newWorker(config, chainConfig, engine, eth, mux, isLocalBlock, false), worker: newWorker(config, chainConfig, engine, eth, mux, isLocalBlock, false),
} }
miner.bidSimulator = newBidSimulator(&config.Mev, config.DelayLeftOver, config.GasPrice, eth, chainConfig, engine, miner.worker) miner.bidSimulator = newBidSimulator(&config.Mev, config.DelayLeftOver, config.GasPrice, eth.BlockChain(), chainConfig, engine, miner.worker)
miner.worker.setBestBidFetcher(miner.bidSimulator) miner.worker.setBestBidFetcher(miner.bidSimulator)
miner.wg.Add(1) miner.wg.Add(1)

View File

@ -70,12 +70,6 @@ const (
// the default to wait for the mev miner to finish // the default to wait for the mev miner to finish
waitMEVMinerEndTimeLimit = 50 * time.Millisecond waitMEVMinerEndTimeLimit = 50 * time.Millisecond
// Reserve block size for the following 3 components:
// a. System transactions at the end of the block
// b. Seal in the block header
// c. Overhead from RLP encoding
blockReserveSize = 100 * 1024
) )
var ( var (
@ -95,7 +89,6 @@ type environment struct {
signer types.Signer signer types.Signer
state *state.StateDB // apply state changes here state *state.StateDB // apply state changes here
tcount int // tx count in cycle tcount int // tx count in cycle
size uint32 // almost accurate block size,
gasPool *core.GasPool // available gas used to pack transactions gasPool *core.GasPool // available gas used to pack transactions
coinbase common.Address coinbase common.Address
@ -112,7 +105,6 @@ func (env *environment) copy() *environment {
signer: env.signer, signer: env.signer,
state: env.state.Copy(), state: env.state.Copy(),
tcount: env.tcount, tcount: env.tcount,
size: env.size,
coinbase: env.coinbase, coinbase: env.coinbase,
header: types.CopyHeader(env.header), header: types.CopyHeader(env.header),
receipts: copyReceipts(env.receipts), receipts: copyReceipts(env.receipts),
@ -903,13 +895,6 @@ LOOP:
txs.Pop() txs.Pop()
continue continue
} }
// If we don't have enough size left for the next transaction, skip it.
if env.size+uint32(tx.Size())+blockReserveSize > params.MaxMessageSize {
log.Trace("Not enough size left for transaction", "hash", ltx.Hash,
"env.size", env.size, "needed", uint32(tx.Size()))
txs.Pop()
continue
}
// Error may be ignored here. The error has already been checked // Error may be ignored here. The error has already been checked
// during transaction acceptance is the transaction pool. // during transaction acceptance is the transaction pool.
from, _ := types.Sender(env.signer, tx) from, _ := types.Sender(env.signer, tx)
@ -935,7 +920,6 @@ LOOP:
// Everything ok, collect the logs and shift in the next transaction from the same account // Everything ok, collect the logs and shift in the next transaction from the same account
coalescedLogs = append(coalescedLogs, logs...) coalescedLogs = append(coalescedLogs, logs...)
env.tcount++ env.tcount++
env.size += uint32(tx.Size()) // size of BlobTxSidecar included
txs.Shift() txs.Shift()
default: default:
@ -1048,8 +1032,6 @@ func (w *worker) prepareWork(genParams *generateParams) (*environment, error) {
} }
if w.chainConfig.Parlia == nil { if w.chainConfig.Parlia == nil {
header.ParentBeaconRoot = genParams.beaconRoot header.ParentBeaconRoot = genParams.beaconRoot
} else if w.chainConfig.IsBohr(header.Number, header.Time) {
header.ParentBeaconRoot = new(common.Hash)
} }
} }
// Could potentially happen if starting to mine in an odd state. // Could potentially happen if starting to mine in an odd state.
@ -1071,9 +1053,6 @@ func (w *worker) prepareWork(genParams *generateParams) (*environment, error) {
vmenv := vm.NewEVM(context, vm.TxContext{}, env.state, w.chainConfig, vm.Config{}) vmenv := vm.NewEVM(context, vm.TxContext{}, env.state, w.chainConfig, vm.Config{})
core.ProcessBeaconBlockRoot(*header.ParentBeaconRoot, vmenv, env.state) core.ProcessBeaconBlockRoot(*header.ParentBeaconRoot, vmenv, env.state)
} }
env.size = uint32(env.header.Size())
return env, nil return env, nil
} }

View File

@ -153,8 +153,8 @@ var (
FeynmanFixTime: newUint64(1713419340), // 2024-04-18 05:49:00 AM UTC FeynmanFixTime: newUint64(1713419340), // 2024-04-18 05:49:00 AM UTC
CancunTime: newUint64(1718863500), // 2024-06-20 06:05:00 AM UTC CancunTime: newUint64(1718863500), // 2024-06-20 06:05:00 AM UTC
HaberTime: newUint64(1718863500), // 2024-06-20 06:05:00 AM UTC HaberTime: newUint64(1718863500), // 2024-06-20 06:05:00 AM UTC
HaberFixTime: newUint64(1727316120), // 2024-09-26 02:02:00 AM UTC HaberFixTime: nil, // TBD
BohrTime: newUint64(1727317200), // 2024-09-26 02:20:00 AM UTC BohrTime: nil,
Parlia: &ParliaConfig{ Parlia: &ParliaConfig{
Period: 3, Period: 3,
@ -195,7 +195,7 @@ var (
CancunTime: newUint64(1713330442), // 2024-04-17 05:07:22 AM UTC CancunTime: newUint64(1713330442), // 2024-04-17 05:07:22 AM UTC
HaberTime: newUint64(1716962820), // 2024-05-29 06:07:00 AM UTC HaberTime: newUint64(1716962820), // 2024-05-29 06:07:00 AM UTC
HaberFixTime: newUint64(1719986788), // 2024-07-03 06:06:28 AM UTC HaberFixTime: newUint64(1719986788), // 2024-07-03 06:06:28 AM UTC
BohrTime: newUint64(1724116996), // 2024-08-20 01:23:16 AM UTC BohrTime: nil,
Parlia: &ParliaConfig{ Parlia: &ParliaConfig{
Period: 3, Period: 3,

View File

@ -29,8 +29,6 @@ const (
GenesisGasLimit uint64 = 4712388 // Gas limit of the Genesis block. GenesisGasLimit uint64 = 4712388 // Gas limit of the Genesis block.
PayBidTxGasLimit uint64 = 25000 // Gas limit of the PayBidTx in the types.BidArgs. PayBidTxGasLimit uint64 = 25000 // Gas limit of the PayBidTx in the types.BidArgs.
MaxMessageSize uint32 = 10 * 1024 * 1024 // MaxMessageSize is the maximum cap on the size of a eth protocol message.
MaximumExtraDataSize uint64 = 32 // Maximum size extra data may be after Genesis. MaximumExtraDataSize uint64 = 32 // Maximum size extra data may be after Genesis.
ForkIDSize uint64 = 4 // The length of fork id ForkIDSize uint64 = 4 // The length of fork id
ExpByteGas uint64 = 10 // Times ceil(log256(exponent)) for the EXP instruction. ExpByteGas uint64 = 10 // Times ceil(log256(exponent)) for the EXP instruction.
@ -194,11 +192,6 @@ var (
MinBlocksForBlobRequests uint64 = 524288 // it keeps blob data available for ~18.2 days in local, ref: https://github.com/bnb-chain/BEPs/blob/master/BEPs/BEP-336.md#51-parameters. MinBlocksForBlobRequests uint64 = 524288 // it keeps blob data available for ~18.2 days in local, ref: https://github.com/bnb-chain/BEPs/blob/master/BEPs/BEP-336.md#51-parameters.
DefaultExtraReserveForBlobRequests uint64 = 1 * (24 * 3600) / 3 // it adds more time for expired blobs for some request cases, like expiry blob when remote peer is syncing, default 1 day. DefaultExtraReserveForBlobRequests uint64 = 1 * (24 * 3600) / 3 // it adds more time for expired blobs for some request cases, like expiry blob when remote peer is syncing, default 1 day.
BreatheBlockInterval uint64 = 86400 // Controls the interval for updateValidatorSetV2 BreatheBlockInterval uint64 = 86400 // Controls the interval for updateValidatorSetV2
// used for testing:
// [1,9] except 2 --> used as turn length directly
// 2 --> use random values to test switching turn length
// 0 and other values --> get turn length from contract
FixedTurnLength uint64 = 0
) )
// Gas discount table for BLS12-381 G1 and G2 multi exponentiation operations // Gas discount table for BLS12-381 G1 and G2 multi exponentiation operations

View File

@ -23,7 +23,7 @@ import (
const ( const (
VersionMajor = 1 // Major version component of the current release VersionMajor = 1 // Major version component of the current release
VersionMinor = 4 // Minor version component of the current release VersionMinor = 4 // Minor version component of the current release
VersionPatch = 15 // Patch version component of the current release VersionPatch = 12 // Patch version component of the current release
VersionMeta = "" // Version metadata to append to the version string VersionMeta = "" // Version metadata to append to the version string
) )

View File

@ -1,19 +1,8 @@
From 32329440626abd6e9668c2d5bd2e7b719e951e01 Mon Sep 17 00:00:00 2001
From: NathanBSC <Nathan.l@nodereal.io>
Date: Wed, 31 Jul 2024 15:01:28 +0800
Subject: [PATCH] diff go ethereum
---
core/vm/contracts.go | 10 ----------
core/vm/jump_table.go | 2 +-
params/protocol_params.go | 2 +-
3 files changed, 2 insertions(+), 12 deletions(-)
diff --git a/core/vm/contracts.go b/core/vm/contracts.go diff --git a/core/vm/contracts.go b/core/vm/contracts.go
index 38a6cac24..7eb29c3ed 100644 index 5988bb15f..c92cbf542 100644
--- a/core/vm/contracts.go --- a/core/vm/contracts.go
+++ b/core/vm/contracts.go +++ b/core/vm/contracts.go
@@ -84,9 +84,6 @@ var PrecompiledContractsIstanbul = map[common.Address]PrecompiledContract{ @@ -83,9 +83,6 @@ var PrecompiledContractsIstanbul = map[common.Address]PrecompiledContract{
common.BytesToAddress([]byte{7}): &bn256ScalarMulIstanbul{}, common.BytesToAddress([]byte{7}): &bn256ScalarMulIstanbul{},
common.BytesToAddress([]byte{8}): &bn256PairingIstanbul{}, common.BytesToAddress([]byte{8}): &bn256PairingIstanbul{},
common.BytesToAddress([]byte{9}): &blake2F{}, common.BytesToAddress([]byte{9}): &blake2F{},
@ -23,7 +12,7 @@ index 38a6cac24..7eb29c3ed 100644
} }
var PrecompiledContractsNano = map[common.Address]PrecompiledContract{ var PrecompiledContractsNano = map[common.Address]PrecompiledContract{
@@ -239,13 +236,6 @@ var PrecompiledContractsCancun = map[common.Address]PrecompiledContract{ @@ -238,13 +235,6 @@ var PrecompiledContractsCancun = map[common.Address]PrecompiledContract{
common.BytesToAddress([]byte{8}): &bn256PairingIstanbul{}, common.BytesToAddress([]byte{8}): &bn256PairingIstanbul{},
common.BytesToAddress([]byte{9}): &blake2F{}, common.BytesToAddress([]byte{9}): &blake2F{},
common.BytesToAddress([]byte{0x0a}): &kzgPointEvaluation{}, common.BytesToAddress([]byte{0x0a}): &kzgPointEvaluation{},
@ -36,7 +25,7 @@ index 38a6cac24..7eb29c3ed 100644
- common.BytesToAddress([]byte{105}): &secp256k1SignatureRecover{}, - common.BytesToAddress([]byte{105}): &secp256k1SignatureRecover{},
} }
// PrecompiledContractsHaber contains the default set of pre-compiled Ethereum // PrecompiledContractsBLS contains the set of pre-compiled Ethereum
diff --git a/core/vm/jump_table.go b/core/vm/jump_table.go diff --git a/core/vm/jump_table.go b/core/vm/jump_table.go
index 70c543f14..65716f944 100644 index 70c543f14..65716f944 100644
--- a/core/vm/jump_table.go --- a/core/vm/jump_table.go
@ -51,7 +40,7 @@ index 70c543f14..65716f944 100644
enable3860(&instructionSet) // Limit and meter initcode enable3860(&instructionSet) // Limit and meter initcode
diff --git a/params/protocol_params.go b/params/protocol_params.go diff --git a/params/protocol_params.go b/params/protocol_params.go
index 65b2d942c..bb085512f 100644 index b84fa148f..97bf6c4d2 100644
--- a/params/protocol_params.go --- a/params/protocol_params.go
+++ b/params/protocol_params.go +++ b/params/protocol_params.go
@@ -23,7 +23,7 @@ import ( @@ -23,7 +23,7 @@ import (
@ -63,6 +52,3 @@ index 65b2d942c..bb085512f 100644
MinGasLimit uint64 = 5000 // Minimum the gas limit may ever be. MinGasLimit uint64 = 5000 // Minimum the gas limit may ever be.
MaxGasLimit uint64 = 0x7fffffffffffffff // Maximum the gas limit (2^63-1). MaxGasLimit uint64 = 0x7fffffffffffffff // Maximum the gas limit (2^63-1).
GenesisGasLimit uint64 = 4712388 // Gas limit of the Genesis block. GenesisGasLimit uint64 = 4712388 // Gas limit of the Genesis block.
--
2.41.0

View File

@ -11,5 +11,4 @@ echo "PASS",$PASS,"FAIL",$FAIL
if [ $FAIL -ne 0 ] if [ $FAIL -ne 0 ]
then then
cat fail.log cat fail.log
exit 1
fi fi

Some files were not shown because too many files have changed in this diff Show More