Compare commits
8 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
d572c77e4c | ||
|
565085959b | ||
|
b7b1b0c001 | ||
|
08702d3380 | ||
|
91e3a3ea1f | ||
|
73477bd0fc | ||
|
8749c8e8ce | ||
|
79fe2899c7 |
16
.github/workflows/pre-release.yml
vendored
16
.github/workflows/pre-release.yml
vendored
@ -82,28 +82,28 @@ jobs:
|
|||||||
# ==============================
|
# ==============================
|
||||||
|
|
||||||
- name: Upload Linux Build
|
- name: Upload Linux Build
|
||||||
uses: actions/upload-artifact@v4.3.3
|
uses: actions/upload-artifact@v3
|
||||||
if: matrix.os == 'ubuntu-latest'
|
if: matrix.os == 'ubuntu-latest'
|
||||||
with:
|
with:
|
||||||
name: linux
|
name: linux
|
||||||
path: ./build/bin/geth
|
path: ./build/bin/geth
|
||||||
|
|
||||||
- name: Upload MacOS Build
|
- name: Upload MacOS Build
|
||||||
uses: actions/upload-artifact@v4.3.3
|
uses: actions/upload-artifact@v3
|
||||||
if: matrix.os == 'macos-latest'
|
if: matrix.os == 'macos-latest'
|
||||||
with:
|
with:
|
||||||
name: macos
|
name: macos
|
||||||
path: ./build/bin/geth
|
path: ./build/bin/geth
|
||||||
|
|
||||||
- name: Upload Windows Build
|
- name: Upload Windows Build
|
||||||
uses: actions/upload-artifact@v4.3.3
|
uses: actions/upload-artifact@v3
|
||||||
if: matrix.os == 'windows-latest'
|
if: matrix.os == 'windows-latest'
|
||||||
with:
|
with:
|
||||||
name: windows
|
name: windows
|
||||||
path: ./build/bin/geth.exe
|
path: ./build/bin/geth.exe
|
||||||
|
|
||||||
- name: Upload ARM-64 Build
|
- name: Upload ARM-64 Build
|
||||||
uses: actions/upload-artifact@v4.3.3
|
uses: actions/upload-artifact@v3
|
||||||
if: matrix.os == 'ubuntu-latest'
|
if: matrix.os == 'ubuntu-latest'
|
||||||
with:
|
with:
|
||||||
name: arm64
|
name: arm64
|
||||||
@ -125,25 +125,25 @@ jobs:
|
|||||||
# ==============================
|
# ==============================
|
||||||
|
|
||||||
- name: Download Artifacts
|
- name: Download Artifacts
|
||||||
uses: actions/download-artifact@v4.1.7
|
uses: actions/download-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: linux
|
name: linux
|
||||||
path: ./linux
|
path: ./linux
|
||||||
|
|
||||||
- name: Download Artifacts
|
- name: Download Artifacts
|
||||||
uses: actions/download-artifact@v4.1.7
|
uses: actions/download-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: macos
|
name: macos
|
||||||
path: ./macos
|
path: ./macos
|
||||||
|
|
||||||
- name: Download Artifacts
|
- name: Download Artifacts
|
||||||
uses: actions/download-artifact@v4.1.7
|
uses: actions/download-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: windows
|
name: windows
|
||||||
path: ./windows
|
path: ./windows
|
||||||
|
|
||||||
- name: Download Artifacts
|
- name: Download Artifacts
|
||||||
uses: actions/download-artifact@v4.1.7
|
uses: actions/download-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: arm64
|
name: arm64
|
||||||
path: ./arm64
|
path: ./arm64
|
||||||
|
16
.github/workflows/release.yml
vendored
16
.github/workflows/release.yml
vendored
@ -81,28 +81,28 @@ jobs:
|
|||||||
# ==============================
|
# ==============================
|
||||||
|
|
||||||
- name: Upload Linux Build
|
- name: Upload Linux Build
|
||||||
uses: actions/upload-artifact@v4.3.3
|
uses: actions/upload-artifact@v3
|
||||||
if: matrix.os == 'ubuntu-latest'
|
if: matrix.os == 'ubuntu-latest'
|
||||||
with:
|
with:
|
||||||
name: linux
|
name: linux
|
||||||
path: ./build/bin/geth
|
path: ./build/bin/geth
|
||||||
|
|
||||||
- name: Upload MacOS Build
|
- name: Upload MacOS Build
|
||||||
uses: actions/upload-artifact@v4.3.3
|
uses: actions/upload-artifact@v3
|
||||||
if: matrix.os == 'macos-latest'
|
if: matrix.os == 'macos-latest'
|
||||||
with:
|
with:
|
||||||
name: macos
|
name: macos
|
||||||
path: ./build/bin/geth
|
path: ./build/bin/geth
|
||||||
|
|
||||||
- name: Upload Windows Build
|
- name: Upload Windows Build
|
||||||
uses: actions/upload-artifact@v4.3.3
|
uses: actions/upload-artifact@v3
|
||||||
if: matrix.os == 'windows-latest'
|
if: matrix.os == 'windows-latest'
|
||||||
with:
|
with:
|
||||||
name: windows
|
name: windows
|
||||||
path: ./build/bin/geth.exe
|
path: ./build/bin/geth.exe
|
||||||
|
|
||||||
- name: Upload ARM-64 Build
|
- name: Upload ARM-64 Build
|
||||||
uses: actions/upload-artifact@v4.3.3
|
uses: actions/upload-artifact@v3
|
||||||
if: matrix.os == 'ubuntu-latest'
|
if: matrix.os == 'ubuntu-latest'
|
||||||
with:
|
with:
|
||||||
name: arm64
|
name: arm64
|
||||||
@ -124,25 +124,25 @@ jobs:
|
|||||||
# ==============================
|
# ==============================
|
||||||
|
|
||||||
- name: Download Artifacts
|
- name: Download Artifacts
|
||||||
uses: actions/download-artifact@v4.1.7
|
uses: actions/download-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: linux
|
name: linux
|
||||||
path: ./linux
|
path: ./linux
|
||||||
|
|
||||||
- name: Download Artifacts
|
- name: Download Artifacts
|
||||||
uses: actions/download-artifact@v4.1.7
|
uses: actions/download-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: macos
|
name: macos
|
||||||
path: ./macos
|
path: ./macos
|
||||||
|
|
||||||
- name: Download Artifacts
|
- name: Download Artifacts
|
||||||
uses: actions/download-artifact@v4.1.7
|
uses: actions/download-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: windows
|
name: windows
|
||||||
path: ./windows
|
path: ./windows
|
||||||
|
|
||||||
- name: Download Artifacts
|
- name: Download Artifacts
|
||||||
uses: actions/download-artifact@v4.1.7
|
uses: actions/download-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: arm64
|
name: arm64
|
||||||
path: ./arm64
|
path: ./arm64
|
||||||
|
@ -1,3 +1 @@
|
|||||||
CVE-2024-34478 # "CWE-754: Improper Check for Unusual or Exceptional Conditions." This vulnerability is BTC only, BSC does not have the issue.
|
CVE-2024-34478 # "CWE-754: Improper Check for Unusual or Exceptional Conditions." This vulnerability is BTC only, BSC does not have the issue.
|
||||||
CVE-2024-6104 # "CWE-532: Information Exposure Through Log Files" This is caused by the vulnerabilities go-retryablehttp@v0.7.4, it is only used in cmd devp2p, impact is limited. will upgrade to v0.7.7 later
|
|
||||||
CVE-2024-8421 # "CWE-400: Uncontrolled Resource Consumption (Resource Exhaustion)" This vulnerability is caused by issues in the golang.org/x/net package. Even the latest version(v0.29.0) has not yet addressed it, but we will continue to monitor updates closely.
|
|
151
CHANGELOG.md
151
CHANGELOG.md
@ -1,155 +1,4 @@
|
|||||||
# Changelog
|
# Changelog
|
||||||
## v1.4.15
|
|
||||||
### BUGFIX
|
|
||||||
* [\#2680](https://github.com/bnb-chain/bsc/pull/2680) txpool: apply miner's gasceil to txpool
|
|
||||||
* [\#2688](https://github.com/bnb-chain/bsc/pull/2688) txpool: set default GasCeil from 30M to 0
|
|
||||||
* [\#2696](https://github.com/bnb-chain/bsc/pull/2696) miner: limit block size to eth protocol msg size
|
|
||||||
* [\#2684](https://github.com/bnb-chain/bsc/pull/2684) eth: Add sidecars when available to broadcasted current block
|
|
||||||
|
|
||||||
### FEATURE
|
|
||||||
* [\#2672](https://github.com/bnb-chain/bsc/pull/2672) faucet: with mainnet balance check, 0.002BNB at least
|
|
||||||
* [\#2678](https://github.com/bnb-chain/bsc/pull/2678) beaconserver: simulated beacon api server for op-stack
|
|
||||||
* [\#2687](https://github.com/bnb-chain/bsc/pull/2687) faucet: support customized token
|
|
||||||
* [\#2698](https://github.com/bnb-chain/bsc/pull/2698) faucet: add example for custimized token
|
|
||||||
* [\#2706](https://github.com/bnb-chain/bsc/pull/2706) faucet: update DIN token faucet support
|
|
||||||
|
|
||||||
### IMPROVEMENT
|
|
||||||
* [\#2677](https://github.com/bnb-chain/bsc/pull/2677) log: add some p2p log
|
|
||||||
* [\#2679](https://github.com/bnb-chain/bsc/pull/2679) build(deps): bump actions/download-artifact in /.github/workflows
|
|
||||||
* [\#2662](https://github.com/bnb-chain/bsc/pull/2662) metrics: add some extra feature flags as node stats
|
|
||||||
* [\#2675](https://github.com/bnb-chain/bsc/pull/2675) fetcher: Sleep after marking block as done when requeuing
|
|
||||||
* [\#2695](https://github.com/bnb-chain/bsc/pull/2695) CI: nancy ignore CVE-2024-8421
|
|
||||||
* [\#2689](https://github.com/bnb-chain/bsc/pull/2689) consensus/parlia: wait more time when processing huge blocks
|
|
||||||
|
|
||||||
## v1.4.14
|
|
||||||
|
|
||||||
### BUGFIX
|
|
||||||
* [\#2643](https://github.com/bnb-chain/bsc/pull/2643)core: fix cache for receipts
|
|
||||||
* [\#2656](https://github.com/bnb-chain/bsc/pull/2656)ethclient: fix BlobSidecars api
|
|
||||||
* [\#2657](https://github.com/bnb-chain/bsc/pull/2657)fix: update prunefreezer’s offset when pruneancient and the dataset has pruned block
|
|
||||||
|
|
||||||
### FEATURE
|
|
||||||
* [\#2661](https://github.com/bnb-chain/bsc/pull/2661)config: setup Mainnet 2 hardfork date: HaberFix & Bohr
|
|
||||||
|
|
||||||
### IMPROVEMENT
|
|
||||||
* [\#2578](https://github.com/bnb-chain/bsc/pull/2578)core/systemcontracts: use vm.StateDB in UpgradeBuildInSystemContract
|
|
||||||
* [\#2649](https://github.com/bnb-chain/bsc/pull/2649)internal/debug: remove memsize
|
|
||||||
* [\#2655](https://github.com/bnb-chain/bsc/pull/2655)internal/ethapi: make GetFinalizedHeader monotonically increasing
|
|
||||||
* [\#2658](https://github.com/bnb-chain/bsc/pull/2658)core: improve readability of the fork choice logic
|
|
||||||
* [\#2665](https://github.com/bnb-chain/bsc/pull/2665)faucet: bump and resend faucet transaction if it has been pending for a while
|
|
||||||
|
|
||||||
## v1.4.13
|
|
||||||
|
|
||||||
### BUGFIX
|
|
||||||
* [\#2602](https://github.com/bnb-chain/bsc/pull/2602) fix: prune-state when specify --triesInMemory 32
|
|
||||||
* [\#2579](https://github.com/bnb-chain/bsc/pull/00025790) fix: only take non-mempool tx to calculate bid price
|
|
||||||
|
|
||||||
### FEATURE
|
|
||||||
* [\#2634](https://github.com/bnb-chain/bsc/pull/2634) config: setup Testnet Bohr hardfork date
|
|
||||||
* [\#2482](https://github.com/bnb-chain/bsc/pull/2482) BEP-341: Validators can produce consecutive blocks
|
|
||||||
* [\#2502](https://github.com/bnb-chain/bsc/pull/2502) BEP-402: Complete Missing Fields in Block Header to Generate Signature
|
|
||||||
* [\#2558](https://github.com/bnb-chain/bsc/pull/2558) BEP-404: Clear Miner History when Switching Validators Set
|
|
||||||
* [\#2605](https://github.com/bnb-chain/bsc/pull/2605) feat: add bohr upgrade contracts bytecode
|
|
||||||
* [\#2614](https://github.com/bnb-chain/bsc/pull/2614) fix: update stakehub bytecode after zero address agent issue fixed
|
|
||||||
* [\#2608](https://github.com/bnb-chain/bsc/pull/2608) consensus/parlia: modify mining time for last block in one turn
|
|
||||||
* [\#2618](https://github.com/bnb-chain/bsc/pull/2618) consensus/parlia: exclude inturn validator when calculate backoffTime
|
|
||||||
* [\#2621](https://github.com/bnb-chain/bsc/pull/2621) core: not record zero hash beacon block root with Parlia engine
|
|
||||||
|
|
||||||
### IMPROVEMENT
|
|
||||||
* [\#2589](https://github.com/bnb-chain/bsc/pull/2589) core/vote: vote before committing state and writing block
|
|
||||||
* [\#2596](https://github.com/bnb-chain/bsc/pull/2596) core: improve the network stability when double sign happens
|
|
||||||
* [\#2600](https://github.com/bnb-chain/bsc/pull/2600) core: cache block after wroten into db
|
|
||||||
* [\#2629](https://github.com/bnb-chain/bsc/pull/2629) utils: add GetTopAddr to analyse large traffic
|
|
||||||
* [\#2591](https://github.com/bnb-chain/bsc/pull/2591) consensus/parlia: add GetJustifiedNumber and GetFinalizedNumber
|
|
||||||
* [\#2611](https://github.com/bnb-chain/bsc/pull/2611) cmd/utils: add new flag OverridePassedForkTime
|
|
||||||
* [\#2603](https://github.com/bnb-chain/bsc/pull/2603) faucet: rate limit initial implementation
|
|
||||||
* [\#2622](https://github.com/bnb-chain/bsc/pull/2622) tests: fix evm-test CI
|
|
||||||
* [\#2628](https://github.com/bnb-chain/bsc/pull/2628) Makefile: use docker compose v2 instead of v1
|
|
||||||
|
|
||||||
## v1.4.12
|
|
||||||
|
|
||||||
### BUGFIX
|
|
||||||
* [\#2557](https://github.com/bnb-chain/bsc/pull/2557) fix: fix state inspect error after pruned state
|
|
||||||
* [\#2562](https://github.com/bnb-chain/bsc/pull/2562) fix: delete unexpected block
|
|
||||||
* [\#2566](https://github.com/bnb-chain/bsc/pull/2566) core: avoid to cache block before wroten into db
|
|
||||||
* [\#2567](https://github.com/bnb-chain/bsc/pull/2567) fix: fix statedb copy
|
|
||||||
* [\#2574](https://github.com/bnb-chain/bsc/pull/2574) core: adapt highestVerifiedHeader to FastFinality
|
|
||||||
* [\#2542](https://github.com/bnb-chain/bsc/pull/2542) fix: pruneancient freeze from the previous position when the first time
|
|
||||||
* [\#2564](https://github.com/bnb-chain/bsc/pull/2564) fix: the bug of blobsidecars and downloader with multi-database
|
|
||||||
* [\#2582](https://github.com/bnb-chain/bsc/pull/2582) fix: remove delete and dangling side chains in prunefreezer
|
|
||||||
|
|
||||||
### FEATURE
|
|
||||||
* [\#2513](https://github.com/bnb-chain/bsc/pull/2513) cmd/jsutils: add a tool to get performance between a range of blocks
|
|
||||||
* [\#2569](https://github.com/bnb-chain/bsc/pull/2569) cmd/jsutils: add a tool to get slash count
|
|
||||||
* [\#2583](https://github.com/bnb-chain/bsc/pull/2583) cmd/jsutill: add log about validator name
|
|
||||||
|
|
||||||
### IMPROVEMENT
|
|
||||||
* [\#2546](https://github.com/bnb-chain/bsc/pull/2546) go.mod: update missing dependency
|
|
||||||
* [\#2559](https://github.com/bnb-chain/bsc/pull/2559) nancy: ignore go-retryablehttp@v0.7.4 in .nancy-ignore
|
|
||||||
* [\#2556](https://github.com/bnb-chain/bsc/pull/2556) chore: update greenfield cometbft version
|
|
||||||
* [\#2561](https://github.com/bnb-chain/bsc/pull/2561) tests: fix unstable test
|
|
||||||
* [\#2572](https://github.com/bnb-chain/bsc/pull/2572) core: clearup testflag for Cancun and Haber
|
|
||||||
* [\#2573](https://github.com/bnb-chain/bsc/pull/2573) cmd/utils: support use NetworkId to distinguish chapel when do syncing
|
|
||||||
* [\#2538](https://github.com/bnb-chain/bsc/pull/2538) feat: enhance bid comparison and reply bidding results && detail logs
|
|
||||||
* [\#2568](https://github.com/bnb-chain/bsc/pull/2568) core/vote: not vote if too late for next in turn validator
|
|
||||||
* [\#2576](https://github.com/bnb-chain/bsc/pull/2576) miner/worker: broadcast block immediately once sealed
|
|
||||||
* [\#2580](https://github.com/bnb-chain/bsc/pull/2580) freezer: Opt freezer env checking
|
|
||||||
|
|
||||||
## v1.4.11
|
|
||||||
|
|
||||||
### BUGFIX
|
|
||||||
* [\#2534](https://github.com/bnb-chain/bsc/pull/2534) fix: nil pointer when clear simulating bid
|
|
||||||
* [\#2535](https://github.com/bnb-chain/bsc/pull/2535) upgrade: add HaberFix hardfork
|
|
||||||
|
|
||||||
## v1.4.10
|
|
||||||
### FEATURE
|
|
||||||
NA
|
|
||||||
|
|
||||||
### IMPROVEMENT
|
|
||||||
* [\#2512](https://github.com/bnb-chain/bsc/pull/2512) feat: add mev helper params and func
|
|
||||||
* [\#2508](https://github.com/bnb-chain/bsc/pull/2508) perf: speedup pbss trienode read
|
|
||||||
* [\#2509](https://github.com/bnb-chain/bsc/pull/2509) perf: optimize chain commit performance for multi-database
|
|
||||||
* [\#2451](https://github.com/bnb-chain/bsc/pull/2451) core/forkchoice: improve stability when inturn block not generate
|
|
||||||
|
|
||||||
### BUGFIX
|
|
||||||
* [\#2518](https://github.com/bnb-chain/bsc/pull/2518) fix: remove zero gasprice check for BSC
|
|
||||||
* [\#2519](https://github.com/bnb-chain/bsc/pull/2519) UT: random failure of TestSnapSyncWithBlobs
|
|
||||||
* [\#2515](https://github.com/bnb-chain/bsc/pull/2515) fix getBlobSidecars by ethclient
|
|
||||||
* [\#2525](https://github.com/bnb-chain/bsc/pull/2525) fix: ensure empty withdrawals after cancun before broadcast
|
|
||||||
|
|
||||||
## v1.4.9
|
|
||||||
### FEATURE
|
|
||||||
* [\#2463](https://github.com/bnb-chain/bsc/pull/2463) utils: add check_blobtx.js
|
|
||||||
* [\#2470](https://github.com/bnb-chain/bsc/pull/2470) jsutils: faucet successful requests within blocks
|
|
||||||
* [\#2467](https://github.com/bnb-chain/bsc/pull/2467) internal/ethapi: add optional parameter for blobSidecars
|
|
||||||
|
|
||||||
### IMPROVEMENT
|
|
||||||
* [\#2462](https://github.com/bnb-chain/bsc/pull/2462) cmd/utils: add a flag to change breathe block interval for testing
|
|
||||||
* [\#2497](https://github.com/bnb-chain/bsc/pull/2497) params/config: add Bohr hardfork
|
|
||||||
* [\#2479](https://github.com/bnb-chain/bsc/pull/2479) dev: ensure consistency in BPS bundle result
|
|
||||||
|
|
||||||
### BUGFIX
|
|
||||||
* [\#2461](https://github.com/bnb-chain/bsc/pull/2461) eth/handler: check lists in body before broadcast blocks
|
|
||||||
* [\#2455](https://github.com/bnb-chain/bsc/pull/2455) cmd: fix memory leak when big dataset
|
|
||||||
* [\#2466](https://github.com/bnb-chain/bsc/pull/2466) sync: fix some sync issues caused by prune-block.
|
|
||||||
* [\#2475](https://github.com/bnb-chain/bsc/pull/2475) fix: move mev op to MinerAPI & add command to console
|
|
||||||
* [\#2473](https://github.com/bnb-chain/bsc/pull/2473) fix: limit the gas price of the mev bid
|
|
||||||
* [\#2484](https://github.com/bnb-chain/bsc/pull/2484) fix: fix inspect database error
|
|
||||||
* [\#2481](https://github.com/bnb-chain/bsc/pull/2481) fix: keep 9W blocks in ancient db when prune block
|
|
||||||
* [\#2495](https://github.com/bnb-chain/bsc/pull/2495) fix: add an empty freeze db
|
|
||||||
* [\#2507](https://github.com/bnb-chain/bsc/pull/2507) fix: waiting for the last simulation before pick best bid
|
|
||||||
|
|
||||||
## v1.4.8
|
|
||||||
### FEATURE
|
|
||||||
* [\#2483](https://github.com/bnb-chain/bsc/pull/2483) core/vm: add secp256r1 into PrecompiledContractsHaber
|
|
||||||
* [\#2400](https://github.com/bnb-chain/bsc/pull/2400) RIP-7212: Precompile for secp256r1 Curve Support
|
|
||||||
|
|
||||||
### IMPROVEMENT
|
|
||||||
NA
|
|
||||||
|
|
||||||
### BUGFIX
|
|
||||||
NA
|
|
||||||
|
|
||||||
## v1.4.7
|
## v1.4.7
|
||||||
### FEATURE
|
### FEATURE
|
||||||
* [\#2439](https://github.com/bnb-chain/bsc/pull/2439) config: setup Mainnet Tycho(Cancun) hardfork date
|
* [\#2439](https://github.com/bnb-chain/bsc/pull/2439) config: setup Mainnet Tycho(Cancun) hardfork date
|
||||||
|
13
Makefile
13
Makefile
@ -17,11 +17,6 @@ geth:
|
|||||||
@echo "Done building."
|
@echo "Done building."
|
||||||
@echo "Run \"$(GOBIN)/geth\" to launch geth."
|
@echo "Run \"$(GOBIN)/geth\" to launch geth."
|
||||||
|
|
||||||
#? faucet: Build faucet
|
|
||||||
faucet:
|
|
||||||
$(GORUN) build/ci.go install ./cmd/faucet
|
|
||||||
@echo "Done building faucet"
|
|
||||||
|
|
||||||
#? all: Build all packages and executables
|
#? all: Build all packages and executables
|
||||||
all:
|
all:
|
||||||
$(GORUN) build/ci.go install
|
$(GORUN) build/ci.go install
|
||||||
@ -34,11 +29,11 @@ truffle-test:
|
|||||||
docker build . -f ./docker/Dockerfile --target bsc-genesis -t bsc-genesis
|
docker build . -f ./docker/Dockerfile --target bsc-genesis -t bsc-genesis
|
||||||
docker build . -f ./docker/Dockerfile --target bsc -t bsc
|
docker build . -f ./docker/Dockerfile --target bsc -t bsc
|
||||||
docker build . -f ./docker/Dockerfile.truffle -t truffle-test
|
docker build . -f ./docker/Dockerfile.truffle -t truffle-test
|
||||||
docker compose -f ./tests/truffle/docker-compose.yml up genesis
|
docker-compose -f ./tests/truffle/docker-compose.yml up genesis
|
||||||
docker compose -f ./tests/truffle/docker-compose.yml up -d bsc-rpc bsc-validator1
|
docker-compose -f ./tests/truffle/docker-compose.yml up -d bsc-rpc bsc-validator1
|
||||||
sleep 30
|
sleep 30
|
||||||
docker compose -f ./tests/truffle/docker-compose.yml up --exit-code-from truffle-test truffle-test
|
docker-compose -f ./tests/truffle/docker-compose.yml up --exit-code-from truffle-test truffle-test
|
||||||
docker compose -f ./tests/truffle/docker-compose.yml down
|
docker-compose -f ./tests/truffle/docker-compose.yml down
|
||||||
|
|
||||||
#? lint: Run certain pre-selected linters
|
#? lint: Run certain pre-selected linters
|
||||||
lint: ## Run linters.
|
lint: ## Run linters.
|
||||||
|
28
README.md
28
README.md
@ -11,13 +11,14 @@ https://pkg.go.dev/badge/github.com/ethereum/go-ethereum
|
|||||||
|
|
||||||
But from that baseline of EVM compatible, BNB Smart Chain introduces a system of 21 validators with Proof of Staked Authority (PoSA) consensus that can support short block time and lower fees. The most bonded validator candidates of staking will become validators and produce blocks. The double-sign detection and other slashing logic guarantee security, stability, and chain finality.
|
But from that baseline of EVM compatible, BNB Smart Chain introduces a system of 21 validators with Proof of Staked Authority (PoSA) consensus that can support short block time and lower fees. The most bonded validator candidates of staking will become validators and produce blocks. The double-sign detection and other slashing logic guarantee security, stability, and chain finality.
|
||||||
|
|
||||||
**The BNB Smart Chain** will be:
|
Cross-chain transfer and other communication are possible due to native support of interoperability. Relayers and on-chain contracts are developed to support that. BNB Beacon Chain DEX remains a liquid venue of the exchange of assets on both chains. This dual-chain architecture will be ideal for users to take advantage of the fast trading on one side and build their decentralized apps on the other side. **The BNB Smart Chain** will be:
|
||||||
|
|
||||||
- **A self-sovereign blockchain**: Provides security and safety with elected validators.
|
- **A self-sovereign blockchain**: Provides security and safety with elected validators.
|
||||||
- **EVM-compatible**: Supports all the existing Ethereum tooling along with faster finality and cheaper transaction fees.
|
- **EVM-compatible**: Supports all the existing Ethereum tooling along with faster finality and cheaper transaction fees.
|
||||||
|
- **Interoperable**: Comes with efficient native dual chain communication; Optimized for scaling high-performance dApps that require fast and smooth user experience.
|
||||||
- **Distributed with on-chain governance**: Proof of Staked Authority brings in decentralization and community participants. As the native token, BNB will serve as both the gas of smart contract execution and tokens for staking.
|
- **Distributed with on-chain governance**: Proof of Staked Authority brings in decentralization and community participants. As the native token, BNB will serve as both the gas of smart contract execution and tokens for staking.
|
||||||
|
|
||||||
More details in [White Paper](https://github.com/bnb-chain/whitepaper/blob/master/WHITEPAPER.md).
|
More details in [White Paper](https://www.bnbchain.org/en#smartChain).
|
||||||
|
|
||||||
## Key features
|
## Key features
|
||||||
|
|
||||||
@ -33,8 +34,18 @@ To combine DPoS and PoA for consensus, BNB Smart Chain implement a novel consens
|
|||||||
|
|
||||||
1. Blocks are produced by a limited set of validators.
|
1. Blocks are produced by a limited set of validators.
|
||||||
2. Validators take turns to produce blocks in a PoA manner, similar to Ethereum's Clique consensus engine.
|
2. Validators take turns to produce blocks in a PoA manner, similar to Ethereum's Clique consensus engine.
|
||||||
3. Validator set are elected in and out based on a staking based governance on BNB Smart Chain.
|
3. Validator set are elected in and out based on a staking based governance on BNB Beacon Chain.
|
||||||
4. Parlia consensus engine will interact with a set of [system contracts](https://docs.bnbchain.org/bnb-smart-chain/staking/overview/#system-contracts) to achieve liveness slash, revenue distributing and validator set renewing func.
|
4. The validator set change is relayed via a cross-chain communication mechanism.
|
||||||
|
5. Parlia consensus engine will interact with a set of [system contracts](https://docs.bnbchain.org/docs/learn/system-contract) to achieve liveness slash, revenue distributing and validator set renewing func.
|
||||||
|
|
||||||
|
|
||||||
|
### Light Client of BNB Beacon Chain
|
||||||
|
|
||||||
|
To achieve the cross-chain communication from BNB Beacon Chain to BNB Smart Chain, need introduce a on-chain light client verification algorithm.
|
||||||
|
It contains two parts:
|
||||||
|
|
||||||
|
1. [Stateless Precompiled contracts](https://github.com/bnb-chain/bsc/blob/master/core/vm/contracts_lightclient.go) to do tendermint header verification and Merkle Proof verification.
|
||||||
|
2. [Stateful solidity contracts](https://github.com/bnb-chain/bsc-genesis-contract/blob/master/contracts/TendermintLightClient.sol) to store validator set and trusted appHash.
|
||||||
|
|
||||||
## Native Token
|
## Native Token
|
||||||
|
|
||||||
@ -42,6 +53,7 @@ BNB will run on BNB Smart Chain in the same way as ETH runs on Ethereum so that
|
|||||||
BNB will be used to:
|
BNB will be used to:
|
||||||
|
|
||||||
1. pay `gas` to deploy or invoke Smart Contract on BSC
|
1. pay `gas` to deploy or invoke Smart Contract on BSC
|
||||||
|
2. perform cross-chain operations, such as transfer token assets across BNB Smart Chain and BNB Beacon Chain.
|
||||||
|
|
||||||
## Building the source
|
## Building the source
|
||||||
|
|
||||||
@ -137,6 +149,8 @@ unzip testnet.zip
|
|||||||
#### 3. Download snapshot
|
#### 3. Download snapshot
|
||||||
Download latest chaindata snapshot from [here](https://github.com/bnb-chain/bsc-snapshots). Follow the guide to structure your files.
|
Download latest chaindata snapshot from [here](https://github.com/bnb-chain/bsc-snapshots). Follow the guide to structure your files.
|
||||||
|
|
||||||
|
Note: If you encounter difficulties downloading the chaindata snapshot and prefer to synchronize from the genesis block on the Chapel testnet, remember to include the additional flag `--chapel` when initially launching Geth.
|
||||||
|
|
||||||
#### 4. Start a full node
|
#### 4. Start a full node
|
||||||
```shell
|
```shell
|
||||||
./geth --config ./config.toml --datadir ./node --cache 8000 --rpc.allow-unprotected-txs --history.transactions 0
|
./geth --config ./config.toml --datadir ./node --cache 8000 --rpc.allow-unprotected-txs --history.transactions 0
|
||||||
@ -169,7 +183,7 @@ This tool is optional and if you leave it out you can always attach to an alread
|
|||||||
|
|
||||||
#### 7. More
|
#### 7. More
|
||||||
|
|
||||||
More details about [running a node](https://docs.bnbchain.org/bnb-smart-chain/developers/node_operators/full_node/) and [becoming a validator](https://docs.bnbchain.org/bnb-smart-chain/validator/create-val/)
|
More details about [running a node](https://docs.bnbchain.org/docs/validator/fullnode) and [becoming a validator](https://docs.bnbchain.org/docs/validator/create-val)
|
||||||
|
|
||||||
*Note: Although some internal protective measures prevent transactions from
|
*Note: Although some internal protective measures prevent transactions from
|
||||||
crossing over between the main network and test network, you should always
|
crossing over between the main network and test network, you should always
|
||||||
@ -235,7 +249,9 @@ running web servers, so malicious web pages could try to subvert locally availab
|
|||||||
APIs!**
|
APIs!**
|
||||||
|
|
||||||
### Operating a private network
|
### Operating a private network
|
||||||
- [BSC-Deploy](https://github.com/bnb-chain/node-deploy/): deploy tool for setting up BNB Smart Chain.
|
- [BSC-Deploy](https://github.com/bnb-chain/node-deploy/): deploy tool for setting up both BNB Beacon Chain, BNB Smart Chain and the cross chain infrastructure between them.
|
||||||
|
- [BSC-Docker](https://github.com/bnb-chain/bsc-docker): deploy tool for setting up local BSC cluster in container.
|
||||||
|
|
||||||
|
|
||||||
## Running a bootnode
|
## Running a bootnode
|
||||||
|
|
||||||
|
@ -1,87 +0,0 @@
|
|||||||
package fakebeacon
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"sort"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
|
||||||
"github.com/ethereum/go-ethereum/crypto/kzg4844"
|
|
||||||
"github.com/ethereum/go-ethereum/internal/ethapi"
|
|
||||||
"github.com/ethereum/go-ethereum/log"
|
|
||||||
"github.com/ethereum/go-ethereum/rpc"
|
|
||||||
)
|
|
||||||
|
|
||||||
type BlobSidecar struct {
|
|
||||||
Blob kzg4844.Blob `json:"blob"`
|
|
||||||
Index int `json:"index"`
|
|
||||||
KZGCommitment kzg4844.Commitment `json:"kzg_commitment"`
|
|
||||||
KZGProof kzg4844.Proof `json:"kzg_proof"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type APIGetBlobSidecarsResponse struct {
|
|
||||||
Data []*BlobSidecar `json:"data"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type ReducedGenesisData struct {
|
|
||||||
GenesisTime string `json:"genesis_time"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type APIGenesisResponse struct {
|
|
||||||
Data ReducedGenesisData `json:"data"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type ReducedConfigData struct {
|
|
||||||
SecondsPerSlot string `json:"SECONDS_PER_SLOT"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type IndexedBlobHash struct {
|
|
||||||
Index int // absolute index in the block, a.k.a. position in sidecar blobs array
|
|
||||||
Hash common.Hash // hash of the blob, used for consistency checks
|
|
||||||
}
|
|
||||||
|
|
||||||
func configSpec() ReducedConfigData {
|
|
||||||
return ReducedConfigData{SecondsPerSlot: "1"}
|
|
||||||
}
|
|
||||||
|
|
||||||
func beaconGenesis() APIGenesisResponse {
|
|
||||||
return APIGenesisResponse{Data: ReducedGenesisData{GenesisTime: "0"}}
|
|
||||||
}
|
|
||||||
|
|
||||||
func beaconBlobSidecars(ctx context.Context, backend ethapi.Backend, slot uint64, indices []int) (APIGetBlobSidecarsResponse, error) {
|
|
||||||
var blockNrOrHash rpc.BlockNumberOrHash
|
|
||||||
header, err := fetchBlockNumberByTime(ctx, int64(slot), backend)
|
|
||||||
if err != nil {
|
|
||||||
log.Error("Error fetching block number", "slot", slot, "indices", indices)
|
|
||||||
return APIGetBlobSidecarsResponse{}, err
|
|
||||||
}
|
|
||||||
sideCars, err := backend.GetBlobSidecars(ctx, header.Hash())
|
|
||||||
if err != nil {
|
|
||||||
log.Error("Error fetching Sidecars", "blockNrOrHash", blockNrOrHash, "err", err)
|
|
||||||
return APIGetBlobSidecarsResponse{}, err
|
|
||||||
}
|
|
||||||
sort.Ints(indices)
|
|
||||||
fullBlob := len(indices) == 0
|
|
||||||
res := APIGetBlobSidecarsResponse{}
|
|
||||||
idx := 0
|
|
||||||
curIdx := 0
|
|
||||||
for _, sideCar := range sideCars {
|
|
||||||
for i := 0; i < len(sideCar.Blobs); i++ {
|
|
||||||
//hash := kZGToVersionedHash(sideCar.Commitments[i])
|
|
||||||
if !fullBlob && curIdx >= len(indices) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if fullBlob || idx == indices[curIdx] {
|
|
||||||
res.Data = append(res.Data, &BlobSidecar{
|
|
||||||
Index: idx,
|
|
||||||
Blob: sideCar.Blobs[i],
|
|
||||||
KZGCommitment: sideCar.Commitments[i],
|
|
||||||
KZGProof: sideCar.Proofs[i],
|
|
||||||
})
|
|
||||||
curIdx++
|
|
||||||
}
|
|
||||||
idx++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return res, nil
|
|
||||||
}
|
|
@ -1,88 +0,0 @@
|
|||||||
package fakebeacon
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/prysmaticlabs/prysm/v5/api/server/structs"
|
|
||||||
field_params "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
|
||||||
"github.com/prysmaticlabs/prysm/v5/network/httputil"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
versionMethod = "/eth/v1/node/version"
|
|
||||||
specMethod = "/eth/v1/config/spec"
|
|
||||||
genesisMethod = "/eth/v1/beacon/genesis"
|
|
||||||
sidecarsMethodPrefix = "/eth/v1/beacon/blob_sidecars/{slot}"
|
|
||||||
)
|
|
||||||
|
|
||||||
func VersionMethod(w http.ResponseWriter, r *http.Request) {
|
|
||||||
resp := &structs.GetVersionResponse{
|
|
||||||
Data: &structs.Version{
|
|
||||||
Version: "",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
httputil.WriteJson(w, resp)
|
|
||||||
}
|
|
||||||
|
|
||||||
func SpecMethod(w http.ResponseWriter, r *http.Request) {
|
|
||||||
httputil.WriteJson(w, &structs.GetSpecResponse{Data: configSpec()})
|
|
||||||
}
|
|
||||||
|
|
||||||
func GenesisMethod(w http.ResponseWriter, r *http.Request) {
|
|
||||||
httputil.WriteJson(w, beaconGenesis())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Service) SidecarsMethod(w http.ResponseWriter, r *http.Request) {
|
|
||||||
indices, err := parseIndices(r.URL)
|
|
||||||
if err != nil {
|
|
||||||
httputil.HandleError(w, err.Error(), http.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
segments := strings.Split(r.URL.Path, "/")
|
|
||||||
slot, err := strconv.ParseUint(segments[len(segments)-1], 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
httputil.HandleError(w, "not a valid slot(timestamp)", http.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
resp, err := beaconBlobSidecars(r.Context(), s.backend, slot, indices)
|
|
||||||
if err != nil {
|
|
||||||
httputil.HandleError(w, err.Error(), http.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
httputil.WriteJson(w, resp)
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseIndices filters out invalid and duplicate blob indices
|
|
||||||
func parseIndices(url *url.URL) ([]int, error) {
|
|
||||||
rawIndices := url.Query()["indices"]
|
|
||||||
indices := make([]int, 0, field_params.MaxBlobsPerBlock)
|
|
||||||
invalidIndices := make([]string, 0)
|
|
||||||
loop:
|
|
||||||
for _, raw := range rawIndices {
|
|
||||||
ix, err := strconv.Atoi(raw)
|
|
||||||
if err != nil {
|
|
||||||
invalidIndices = append(invalidIndices, raw)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if ix >= field_params.MaxBlobsPerBlock {
|
|
||||||
invalidIndices = append(invalidIndices, raw)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
for i := range indices {
|
|
||||||
if ix == indices[i] {
|
|
||||||
continue loop
|
|
||||||
}
|
|
||||||
}
|
|
||||||
indices = append(indices, ix)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(invalidIndices) > 0 {
|
|
||||||
return nil, fmt.Errorf("requested blob indices %v are invalid", invalidIndices)
|
|
||||||
}
|
|
||||||
return indices, nil
|
|
||||||
}
|
|
@ -1,97 +0,0 @@
|
|||||||
package fakebeacon
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http"
|
|
||||||
"strconv"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/internal/ethapi"
|
|
||||||
"github.com/gorilla/mux"
|
|
||||||
"github.com/prysmaticlabs/prysm/v5/api/server"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
DefaultAddr = "localhost"
|
|
||||||
DefaultPort = 8686
|
|
||||||
)
|
|
||||||
|
|
||||||
type Config struct {
|
|
||||||
Enable bool
|
|
||||||
Addr string
|
|
||||||
Port int
|
|
||||||
}
|
|
||||||
|
|
||||||
func defaultConfig() *Config {
|
|
||||||
return &Config{
|
|
||||||
Enable: false,
|
|
||||||
Addr: DefaultAddr,
|
|
||||||
Port: DefaultPort,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type Service struct {
|
|
||||||
cfg *Config
|
|
||||||
router *mux.Router
|
|
||||||
backend ethapi.Backend
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewService(cfg *Config, backend ethapi.Backend) *Service {
|
|
||||||
cfgs := defaultConfig()
|
|
||||||
if cfg.Addr != "" {
|
|
||||||
cfgs.Addr = cfg.Addr
|
|
||||||
}
|
|
||||||
if cfg.Port > 0 {
|
|
||||||
cfgs.Port = cfg.Port
|
|
||||||
}
|
|
||||||
|
|
||||||
s := &Service{
|
|
||||||
cfg: cfgs,
|
|
||||||
backend: backend,
|
|
||||||
}
|
|
||||||
router := s.newRouter()
|
|
||||||
s.router = router
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Service) Run() {
|
|
||||||
_ = http.ListenAndServe(s.cfg.Addr+":"+strconv.Itoa(s.cfg.Port), s.router)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Service) newRouter() *mux.Router {
|
|
||||||
r := mux.NewRouter()
|
|
||||||
r.Use(server.NormalizeQueryValuesHandler)
|
|
||||||
for _, e := range s.endpoints() {
|
|
||||||
r.HandleFunc(e.path, e.handler).Methods(e.methods...)
|
|
||||||
}
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
type endpoint struct {
|
|
||||||
path string
|
|
||||||
handler http.HandlerFunc
|
|
||||||
methods []string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Service) endpoints() []endpoint {
|
|
||||||
return []endpoint{
|
|
||||||
{
|
|
||||||
path: versionMethod,
|
|
||||||
handler: VersionMethod,
|
|
||||||
methods: []string{http.MethodGet},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
path: specMethod,
|
|
||||||
handler: SpecMethod,
|
|
||||||
methods: []string{http.MethodGet},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
path: genesisMethod,
|
|
||||||
handler: GenesisMethod,
|
|
||||||
methods: []string{http.MethodGet},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
path: sidecarsMethodPrefix,
|
|
||||||
handler: s.SidecarsMethod,
|
|
||||||
methods: []string{http.MethodGet},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,90 +0,0 @@
|
|||||||
package fakebeacon
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"strconv"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
|
||||||
|
|
||||||
//
|
|
||||||
//func TestFetchBlockNumberByTime(t *testing.T) {
|
|
||||||
// blockNum, err := fetchBlockNumberByTime(context.Background(), 1724052941, client)
|
|
||||||
// assert.Nil(t, err)
|
|
||||||
// assert.Equal(t, uint64(41493946), blockNum)
|
|
||||||
//
|
|
||||||
// blockNum, err = fetchBlockNumberByTime(context.Background(), 1734052941, client)
|
|
||||||
// assert.Equal(t, err, errors.New("time too large"))
|
|
||||||
//
|
|
||||||
// blockNum, err = fetchBlockNumberByTime(context.Background(), 1600153618, client)
|
|
||||||
// assert.Nil(t, err)
|
|
||||||
// assert.Equal(t, uint64(493946), blockNum)
|
|
||||||
//}
|
|
||||||
//
|
|
||||||
//func TestBeaconBlobSidecars(t *testing.T) {
|
|
||||||
// indexBlobHash := []IndexedBlobHash{
|
|
||||||
// {Hash: common.HexToHash("0x01231952ecbaede62f8d0398b656072c072db36982c9ef106fbbc39ce14f983c"), Index: 0},
|
|
||||||
// {Hash: common.HexToHash("0x012c21a8284d2d707bb5318e874d2e1b97a53d028e96abb702b284a2cbb0f79c"), Index: 1},
|
|
||||||
// {Hash: common.HexToHash("0x011196c8d02536ede0382aa6e9fdba6c460169c0711b5f97fcd701bd8997aee3"), Index: 2},
|
|
||||||
// {Hash: common.HexToHash("0x019c86b46b27401fb978fd175d1eb7dadf4976d6919501b0c5280d13a5bab57b"), Index: 3},
|
|
||||||
// {Hash: common.HexToHash("0x01e00db7ee99176b3fd50aab45b4fae953292334bbf013707aac58c455d98596"), Index: 4},
|
|
||||||
// {Hash: common.HexToHash("0x0117d23b68123d578a98b3e1aa029661e0abda821a98444c21992eb1e5b7208f"), Index: 5},
|
|
||||||
// //{Hash: common.HexToHash("0x01e00db7ee99176b3fd50aab45b4fae953292334bbf013707aac58c455d98596"), Index: 1},
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// resp, err := beaconBlobSidecars(context.Background(), 1724055046, []int{0, 1, 2, 3, 4, 5}) // block: 41494647
|
|
||||||
// assert.Nil(t, err)
|
|
||||||
// assert.NotNil(t, resp)
|
|
||||||
// assert.NotEmpty(t, resp.Data)
|
|
||||||
// for i, sideCar := range resp.Data {
|
|
||||||
// assert.Equal(t, indexBlobHash[i].Index, sideCar.Index)
|
|
||||||
// assert.Equal(t, indexBlobHash[i].Hash, kZGToVersionedHash(sideCar.KZGCommitment))
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// apiscs := make([]*BlobSidecar, 0, len(indexBlobHash))
|
|
||||||
// // filter and order by hashes
|
|
||||||
// for _, h := range indexBlobHash {
|
|
||||||
// for _, apisc := range resp.Data {
|
|
||||||
// if h.Index == int(apisc.Index) {
|
|
||||||
// apiscs = append(apiscs, apisc)
|
|
||||||
// break
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// assert.Equal(t, len(apiscs), len(resp.Data))
|
|
||||||
// assert.Equal(t, len(apiscs), len(indexBlobHash))
|
|
||||||
//}
|
|
||||||
|
|
||||||
type TimeToSlotFn func(timestamp uint64) (uint64, error)
|
|
||||||
|
|
||||||
// GetTimeToSlotFn returns a function that converts a timestamp to a slot number.
|
|
||||||
func GetTimeToSlotFn(ctx context.Context) (TimeToSlotFn, error) {
|
|
||||||
genesis := beaconGenesis()
|
|
||||||
config := configSpec()
|
|
||||||
|
|
||||||
genesisTime, _ := strconv.ParseUint(genesis.Data.GenesisTime, 10, 64)
|
|
||||||
secondsPerSlot, _ := strconv.ParseUint(config.SecondsPerSlot, 10, 64)
|
|
||||||
if secondsPerSlot == 0 {
|
|
||||||
return nil, fmt.Errorf("got bad value for seconds per slot: %v", config.SecondsPerSlot)
|
|
||||||
}
|
|
||||||
timeToSlotFn := func(timestamp uint64) (uint64, error) {
|
|
||||||
if timestamp < genesisTime {
|
|
||||||
return 0, fmt.Errorf("provided timestamp (%v) precedes genesis time (%v)", timestamp, genesisTime)
|
|
||||||
}
|
|
||||||
return (timestamp - genesisTime) / secondsPerSlot, nil
|
|
||||||
}
|
|
||||||
return timeToSlotFn, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAPI(t *testing.T) {
|
|
||||||
slotFn, err := GetTimeToSlotFn(context.Background())
|
|
||||||
assert.Nil(t, err)
|
|
||||||
|
|
||||||
expTx := uint64(123151345)
|
|
||||||
gotTx, err := slotFn(expTx)
|
|
||||||
assert.Nil(t, err)
|
|
||||||
assert.Equal(t, expTx, gotTx)
|
|
||||||
}
|
|
@ -1,65 +0,0 @@
|
|||||||
package fakebeacon
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"math/rand"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
|
||||||
"github.com/ethereum/go-ethereum/internal/ethapi"
|
|
||||||
"github.com/ethereum/go-ethereum/rpc"
|
|
||||||
)
|
|
||||||
|
|
||||||
func fetchBlockNumberByTime(ctx context.Context, ts int64, backend ethapi.Backend) (*types.Header, error) {
|
|
||||||
// calc the block number of the ts.
|
|
||||||
currentHeader := backend.CurrentHeader()
|
|
||||||
blockTime := int64(currentHeader.Time)
|
|
||||||
if ts > blockTime {
|
|
||||||
return nil, errors.New("time too large")
|
|
||||||
}
|
|
||||||
blockNum := currentHeader.Number.Uint64()
|
|
||||||
estimateEndNumber := int64(blockNum) - (blockTime-ts)/3
|
|
||||||
// find the end number
|
|
||||||
for {
|
|
||||||
header, err := backend.HeaderByNumber(ctx, rpc.BlockNumber(estimateEndNumber))
|
|
||||||
if err != nil {
|
|
||||||
time.Sleep(time.Duration(rand.Int()%180) * time.Millisecond)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if header == nil {
|
|
||||||
estimateEndNumber -= 1
|
|
||||||
time.Sleep(time.Duration(rand.Int()%180) * time.Millisecond)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
headerTime := int64(header.Time)
|
|
||||||
if headerTime == ts {
|
|
||||||
return header, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// let the estimateEndNumber a little bigger than real value
|
|
||||||
if headerTime > ts+8 {
|
|
||||||
estimateEndNumber -= (headerTime - ts) / 3
|
|
||||||
} else if headerTime < ts {
|
|
||||||
estimateEndNumber += (ts-headerTime)/3 + 1
|
|
||||||
} else {
|
|
||||||
// search one by one
|
|
||||||
for headerTime >= ts {
|
|
||||||
header, err = backend.HeaderByNumber(ctx, rpc.BlockNumber(estimateEndNumber-1))
|
|
||||||
if err != nil {
|
|
||||||
time.Sleep(time.Duration(rand.Int()%180) * time.Millisecond)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
headerTime = int64(header.Time)
|
|
||||||
if headerTime == ts {
|
|
||||||
return header, nil
|
|
||||||
}
|
|
||||||
estimateEndNumber -= 1
|
|
||||||
if headerTime < ts { //found the real endNumber
|
|
||||||
return nil, fmt.Errorf("block not found by time %d", ts)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -43,42 +43,7 @@ func TestExtraParse(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// case 3, |---Extra Vanity---|---Validators Number and Validators Bytes---|---Turn Length---|---Empty---|---Extra Seal---|
|
// case 3, |---Extra Vanity---|---Empty---|---Vote Attestation---|---Extra Seal---|
|
||||||
{
|
|
||||||
extraData := "0xd983010209846765746889676f312e31392e3131856c696e75780000a6bf97c1152465176c461afb316ebc773c61faee85a6515daa8a923564c6ffd37fb2fe9f118ef88092e8762c7addb526ab7eb1e772baef85181f892c731be0c1891a50e6b06262c816295e26495cef6f69dfa69911d9d8e4f3bbadb89b977cf58294f7239d515e15b24cfeb82494056cf691eaf729b165f32c9757c429dba5051155903067e56ebe3698678e912d4c407bbe49438ed859fe965b140dcf1aab71a993c1f7f6929d1fe2a17b4e14614ef9fc5bdc713d6631d675403fbeefac55611bf612700b1b65f4744861b80b0f7d6ab03f349bbafec1551819b8be1efea2fc46ca749aa184248a459464eec1a21e7fc7b71a053d9644e9bb8da4853b8f872cd7c1d6b324bf1922829830646ceadfb658d3de009a61dd481a114a2e761c554b641742c973867899d300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000069c77a677c40c7fbea129d4b171a39b7a8ddabfab2317f59d86abfaf690850223d90e9e7593d91a29331dfc2f84d5adecc75fc39ecab4632c1b4400a3dd1e1298835bcca70f657164e5b75689b64b7fd1fa275f334f28e1896a26afa1295da81418593bd12814463d9f6e45c36a0e47eb4cd3e5b6af29c41e2a3a5636430155a466e216585af3ba772b61c6014342d914470ec7ac2975be345796c2b81db0422a5fd08e40db1fc2368d2245e4b18b1d0b85c921aaaafd2e341760e29fc613edd39f71254614e2055c3287a517ae2f5b9e386cd1b50a4550696d957cb4900f03ab84f83ff2df44193496793b847f64e9d6db1b3953682bb95edd096eb1e69bbd357c200992ca78050d0cbe180cfaa018e8b6c8fd93d6f4cea42bbb345dbc6f0dfdb5bec73a8a257074e82b881cfa06ef3eb4efeca060c2531359abd0eab8af1e3edfa2025fca464ac9c3fd123f6c24a0d78869485a6f79b60359f141df90a0c745125b131caaffd12000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000b218c5d6af1f979ac42bc68d98a5a0d796c6ab01000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000b4dd66d7c2c7e57f628210187192fb89d4b99dd4000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000be807dddb074639cd9fa61b47676c064fc50d62cb1f2c71577def3144fabeb75a8a1c8cb5b51d1d1b4a05eec67988b8685008baa17459ec425dbaebc852f496dc92196cdcc8e6d00c17eb431350c6c50d8b8f05176b90b11b3a3d4feb825ae9702711566df5dbf38e82add4dd1b573b95d2466fa6501ccb81e9d26a352b96150ccbf7b697fd0a419d1d6bf74282782b0b3eb1413c901d6ecf02e8e28000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e2d3a739effcd3a99387d015e260eefac72ebea1956c470ddff48cb49300200b5f83497f3a3ccb3aeb83c5edd9818569038e61d197184f4aa6939ea5e9911e3e98ac6d21e9ae3261a475a27bb1028f140bc2a7c843318afd000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ea0a6e3c511bbd10f4519ece37dc24887e11b55db2d4c6283c44a1c7bd503aaba7666e9f0c830e0ff016c1c750a5e48757a713d0836b1cabfd5c281b1de3b77d1c192183ee226379db83cffc681495730c11fdde79ba4c0c000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ef0274e31810c9df02f98fafde0f841f4e66a1cd00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004e99f701bb14cb7dfb68b90bd3e6d1ca656964630de71beffc7f33f7f08ec99d336ec51ad9fad0ac84ae77ca2e8ad9512acc56e0d7c93f3c2ce7de1b69149a5a400"
|
|
||||||
extra, err := parseExtra(extraData)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
{
|
|
||||||
var have = extra.ValidatorSize
|
|
||||||
var want = uint8(21)
|
|
||||||
if have != want {
|
|
||||||
t.Fatalf("extra.ValidatorSize mismatch, have %d, want %d", have, want)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
{
|
|
||||||
var have = common.Bytes2Hex(extra.Validators[14].Address[:])
|
|
||||||
var want = "cc8e6d00c17eb431350c6c50d8b8f05176b90b11"
|
|
||||||
if have != want {
|
|
||||||
t.Fatalf("extra.Validators[14].Address mismatch, have %s, want %s", have, want)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
{
|
|
||||||
var have = common.Bytes2Hex(extra.Validators[18].BLSPublicKey[:])
|
|
||||||
var want = "b2d4c6283c44a1c7bd503aaba7666e9f0c830e0ff016c1c750a5e48757a713d0836b1cabfd5c281b1de3b77d1c192183"
|
|
||||||
if have != want {
|
|
||||||
t.Fatalf("extra.Validators[18].BLSPublicKey mismatch, have %s, want %s", have, want)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
{
|
|
||||||
var have = extra.TurnLength
|
|
||||||
var want = uint8(4)
|
|
||||||
if *have != want {
|
|
||||||
t.Fatalf("extra.TurnLength mismatch, have %d, want %d", *have, want)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// case 4, |---Extra Vanity---|---Empty---|---Vote Attestation---|---Extra Seal---|
|
|
||||||
{
|
{
|
||||||
extraData := "0xd883010205846765746888676f312e32302e35856c696e75780000002995c52af8b5830563efb86089cf168dcf4c5d3cb057926628ad1bf0f03ea67eef1458485578a4f8489afa8a853ecc7af45e2d145c21b70641c4b29f0febd2dd2c61fa1ba174be3fd47f1f5fa2ab9b5c318563d8b70ca58d0d51e79ee32b2fb721649e2cb9d36538361fba11f84c8401d14bb7a0fa67ddb3ba654d6006bf788710032247aa4d1be0707273e696b422b3ff72e9798401d14bbaa01225f505f5a0e1aefadcd2913b7aac9009fe4fb3d1bf57399e0b9dce5947f94280fe6d3647276c4127f437af59eb7c7985b2ae1ebe432619860695cb6106b80cc66c735bc1709afd11f233a2c97409d38ebaf7178aa53e895aea2fe0a229f71ec601"
|
extraData := "0xd883010205846765746888676f312e32302e35856c696e75780000002995c52af8b5830563efb86089cf168dcf4c5d3cb057926628ad1bf0f03ea67eef1458485578a4f8489afa8a853ecc7af45e2d145c21b70641c4b29f0febd2dd2c61fa1ba174be3fd47f1f5fa2ab9b5c318563d8b70ca58d0d51e79ee32b2fb721649e2cb9d36538361fba11f84c8401d14bb7a0fa67ddb3ba654d6006bf788710032247aa4d1be0707273e696b422b3ff72e9798401d14bbaa01225f505f5a0e1aefadcd2913b7aac9009fe4fb3d1bf57399e0b9dce5947f94280fe6d3647276c4127f437af59eb7c7985b2ae1ebe432619860695cb6106b80cc66c735bc1709afd11f233a2c97409d38ebaf7178aa53e895aea2fe0a229f71ec601"
|
||||||
extra, err := parseExtra(extraData)
|
extra, err := parseExtra(extraData)
|
||||||
@ -99,9 +64,9 @@ func TestExtraParse(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// case 5, |---Extra Vanity---|---Validators Number and Validators Bytes---|---Vote Attestation---|---Extra Seal---|
|
// case 4, |---Extra Vanity---|---Validators Number and Validators Bytes---|---Vote Attestation---|---Extra Seal---|
|
||||||
{
|
{
|
||||||
extraData := "0xd883010209846765746888676f312e31392e38856c696e7578000000dc55905c071284214b9b9c85549ab3d2b972df0deef66ac2c98e82934ca974fdcd97f3309de967d3c9c43fa711a8d673af5d75465844bf8969c8d1948d903748ac7b8b1720fa64e50c35552c16704d214347f29fa77f77da6d75d7c752b742ad4855bae330426b823e742da31f816cc83bc16d69a9134be0cfb4a1d17ec34f1b5b32d5c20440b8536b1e88f0f247788386d0ed6c748e03a53160b4b30ed3748cc5000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000980a75ecd1309ea12fa2ed87a8744fbfc9b863d589037a9ace3b590165ea1c0c5ac72bf600b7c88c1e435f41932c1132aae1bfa0bb68e46b96ccb12c3415e4d82af717d8a2959d3f95eae5dc7d70144ce1b73b403b7eb6e0b973c2d38487e58fd6e145491b110080fb14ac915a0411fc78f19e09a399ddee0d20c63a75d8f930f1694544ad2dc01bb71b214cb885500844365e95cd9942c7276e7fd8a2750ec6dded3dcdc2f351782310b0eadc077db59abca0f0cd26776e2e7acb9f3bce40b1fa5221fd1561226c6263cc5ff474cf03cceff28abc65c9cbae594f725c80e12d96c9b86c3400e529bfe184056e257c07940bb664636f689e8d2027c834681f8f878b73445261034e946bb2d901b4b878f8b27bb8608c11016739b3f8a19e54ab8c7abacd936cfeba200f3645a98b65adb0dd3692b69ce0b3ae10e7176b9a4b0d83f04065b1042b4bcb646a34b75c550f92fc34b8b2b1db0fa0d3172db23ba92727c80bcd306320d0ff411bf858525fde13bc8e0370f84c8401e9c2e6a0820dc11d63176a0eb1b828bc5376867b275579112b7013358da40317e7bab6e98401e9c2e7a00edc71ce80105a3220a87bea2792fa340d66c59002f02b0a09349ed1ed28407080048b972fac2b9077a4dcb6fc37093799a652858016c99142b227500c844fa97ec22e3f9d3b1e982f14bcd999a7453e89ce5ef5c55f1c7f8f74ba904186cd67828200"
|
extraData := "0xd883010209846765746888676f312e31392e38856c696e7578000000dc55905c071284214b9b9c85549ab3d2b972df0deef66ac2c98e82934ca974fdcd97f3309de967d3c9c43fa711a8d673af5d75465844bf8969c8d1948d903748ac7b8b1720fa64e50c35552c16704d214347f29fa77f77da6d75d7c752b742ad4855bae330426b823e742da31f816cc83bc16d69a9134be0cfb4a1d17ec34f1b5b32d5c20440b8536b1e88f0f247788386d0ed6c748e03a53160b4b30ed3748cc5000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000980a75ecd1309ea12fa2ed87a8744fbfc9b863d589037a9ace3b590165ea1c0c5ac72bf600b7c88c1e435f41932c1132aae1bfa0bb68e46b96ccb12c3415e4d82af717d8a2959d3f95eae5dc7d70144ce1b73b403b7eb6e0b973c2d38487e58fd6e145491b110080fb14ac915a0411fc78f19e09a399ddee0d20c63a75d8f930f1694544ad2dc01bb71b214cb885500844365e95cd9942c7276e7fd8a2750ec6dded3dcdc2f351782310b0eadc077db59abca0f0cd26776e2e7acb9f3bce40b1fa5221fd1561226c6263cc5ff474cf03cceff28abc65c9cbae594f725c80e12d96c9b86c3400e529bfe184056e257c07940bb664636f689e8d2027c834681f8f878b73445261034e946bb2d901b4b878f8b27bb8608c11016739b3f8a19e54ab8c7abacd936cfeba200f3645a98b65adb0dd3692b69ce0b3ae10e7176b9a4b0d83f04065b1042b4bcb646a34b75c550f92fc34b8b2b1db0fa0d3172db23ba92727c80bcd306320d0ff411bf858525fde13bc8e0370f84c8401e9c2e6a0820dc11d63176a0eb1b828bc5376867b275579112b7013358da40317e7bab6e98401e9c2e7a00edc71ce80105a3220a87bea2792fa340d66c59002f02b0a09349ed1ed284070808b972fac2b9077a4dcb6fc37093799a652858016c99142b227500c844fa97ec22e3f9d3b1e982f14bcd999a7453e89ce5ef5c55f1c7f8f74ba904186cd67828200"
|
||||||
extra, err := parseExtra(extraData)
|
extra, err := parseExtra(extraData)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
{
|
{
|
||||||
@ -140,53 +105,4 @@ func TestExtraParse(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// case 6, |---Extra Vanity---|---Validators Number and Validators Bytes---|---Turn Length---|---Vote Attestation---|---Extra Seal---|
|
|
||||||
{
|
|
||||||
extraData := "0xd883010209846765746888676f312e31392e38856c696e7578000000dc55905c071284214b9b9c85549ab3d2b972df0deef66ac2c98e82934ca974fdcd97f3309de967d3c9c43fa711a8d673af5d75465844bf8969c8d1948d903748ac7b8b1720fa64e50c35552c16704d214347f29fa77f77da6d75d7c752b742ad4855bae330426b823e742da31f816cc83bc16d69a9134be0cfb4a1d17ec34f1b5b32d5c20440b8536b1e88f0f247788386d0ed6c748e03a53160b4b30ed3748cc5000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000980a75ecd1309ea12fa2ed87a8744fbfc9b863d589037a9ace3b590165ea1c0c5ac72bf600b7c88c1e435f41932c1132aae1bfa0bb68e46b96ccb12c3415e4d82af717d8a2959d3f95eae5dc7d70144ce1b73b403b7eb6e0b973c2d38487e58fd6e145491b110080fb14ac915a0411fc78f19e09a399ddee0d20c63a75d8f930f1694544ad2dc01bb71b214cb885500844365e95cd9942c7276e7fd8a2750ec6dded3dcdc2f351782310b0eadc077db59abca0f0cd26776e2e7acb9f3bce40b1fa5221fd1561226c6263cc5ff474cf03cceff28abc65c9cbae594f725c80e12d96c9b86c3400e529bfe184056e257c07940bb664636f689e8d2027c834681f8f878b73445261034e946bb2d901b4b87804f8b27bb8608c11016739b3f8a19e54ab8c7abacd936cfeba200f3645a98b65adb0dd3692b69ce0b3ae10e7176b9a4b0d83f04065b1042b4bcb646a34b75c550f92fc34b8b2b1db0fa0d3172db23ba92727c80bcd306320d0ff411bf858525fde13bc8e0370f84c8401e9c2e6a0820dc11d63176a0eb1b828bc5376867b275579112b7013358da40317e7bab6e98401e9c2e7a00edc71ce80105a3220a87bea2792fa340d66c59002f02b0a09349ed1ed28407080048b972fac2b9077a4dcb6fc37093799a652858016c99142b227500c844fa97ec22e3f9d3b1e982f14bcd999a7453e89ce5ef5c55f1c7f8f74ba904186cd67828200"
|
|
||||||
extra, err := parseExtra(extraData)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
{
|
|
||||||
var have = common.Bytes2Hex(extra.Validators[0].Address[:])
|
|
||||||
var want = "1284214b9b9c85549ab3d2b972df0deef66ac2c9"
|
|
||||||
if have != want {
|
|
||||||
t.Fatalf("extra.Validators[0].Address mismatch, have %s, want %s", have, want)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
{
|
|
||||||
var have = common.Bytes2Hex(extra.Validators[0].BLSPublicKey[:])
|
|
||||||
var want = "8e82934ca974fdcd97f3309de967d3c9c43fa711a8d673af5d75465844bf8969c8d1948d903748ac7b8b1720fa64e50c"
|
|
||||||
if have != want {
|
|
||||||
t.Fatalf("extra.Validators[0].BLSPublicKey mismatch, have %s, want %s", have, want)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
{
|
|
||||||
var have = extra.Validators[0].VoteIncluded
|
|
||||||
var want = true
|
|
||||||
if have != want {
|
|
||||||
t.Fatalf("extra.Validators[0].VoteIncluded mismatch, have %t, want %t", have, want)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
{
|
|
||||||
var have = common.Bytes2Hex(extra.Data.TargetHash[:])
|
|
||||||
var want = "0edc71ce80105a3220a87bea2792fa340d66c59002f02b0a09349ed1ed284070"
|
|
||||||
if have != want {
|
|
||||||
t.Fatalf("extra.Data.TargetHash mismatch, have %s, want %s", have, want)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
{
|
|
||||||
var have = extra.Data.TargetNumber
|
|
||||||
var want = uint64(32096999)
|
|
||||||
if have != want {
|
|
||||||
t.Fatalf("extra.Data.TargetNumber mismatch, have %d, want %d", have, want)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
{
|
|
||||||
var have = extra.TurnLength
|
|
||||||
var want = uint8(4)
|
|
||||||
if *have != want {
|
|
||||||
t.Fatalf("extra.TurnLength mismatch, have %d, want %d", *have, want)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
@ -24,7 +24,7 @@ const (
|
|||||||
BLSPublicKeyLength = 48
|
BLSPublicKeyLength = 48
|
||||||
|
|
||||||
// follow order in extra field
|
// follow order in extra field
|
||||||
// |---Extra Vanity---|---Validators Number and Validators Bytes (or Empty)---|---Turn Length (or Empty)---|---Vote Attestation (or Empty)---|---Extra Seal---|
|
// |---Extra Vanity---|---Validators Number and Validators Bytes (or Empty)---|---Vote Attestation (or Empty)---|---Extra Seal---|
|
||||||
extraVanityLength = 32 // Fixed number of extra-data prefix bytes reserved for signer vanity
|
extraVanityLength = 32 // Fixed number of extra-data prefix bytes reserved for signer vanity
|
||||||
validatorNumberSize = 1 // Fixed number of extra prefix bytes reserved for validator number after Luban
|
validatorNumberSize = 1 // Fixed number of extra prefix bytes reserved for validator number after Luban
|
||||||
validatorBytesLength = common.AddressLength + types.BLSPublicKeyLength
|
validatorBytesLength = common.AddressLength + types.BLSPublicKeyLength
|
||||||
@ -35,7 +35,6 @@ type Extra struct {
|
|||||||
ExtraVanity string
|
ExtraVanity string
|
||||||
ValidatorSize uint8
|
ValidatorSize uint8
|
||||||
Validators validatorsAscending
|
Validators validatorsAscending
|
||||||
TurnLength *uint8
|
|
||||||
*types.VoteAttestation
|
*types.VoteAttestation
|
||||||
ExtraSeal []byte
|
ExtraSeal []byte
|
||||||
}
|
}
|
||||||
@ -114,15 +113,6 @@ func parseExtra(hexData string) (*Extra, error) {
|
|||||||
sort.Sort(extra.Validators)
|
sort.Sort(extra.Validators)
|
||||||
data = data[validatorBytesTotalLength-validatorNumberSize:]
|
data = data[validatorBytesTotalLength-validatorNumberSize:]
|
||||||
dataLength = len(data)
|
dataLength = len(data)
|
||||||
|
|
||||||
// parse TurnLength
|
|
||||||
if dataLength > 0 {
|
|
||||||
if data[0] != '\xf8' {
|
|
||||||
extra.TurnLength = &data[0]
|
|
||||||
data = data[1:]
|
|
||||||
dataLength = len(data)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// parse Vote Attestation
|
// parse Vote Attestation
|
||||||
@ -158,10 +148,6 @@ func prettyExtra(extra Extra) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if extra.TurnLength != nil {
|
|
||||||
fmt.Printf("TurnLength : %d\n", *extra.TurnLength)
|
|
||||||
}
|
|
||||||
|
|
||||||
if extra.VoteAttestation != nil {
|
if extra.VoteAttestation != nil {
|
||||||
fmt.Printf("Attestation :\n")
|
fmt.Printf("Attestation :\n")
|
||||||
fmt.Printf("\tVoteAddressSet : %b, %d\n", extra.VoteAddressSet, bitset.From([]uint64{uint64(extra.VoteAddressSet)}).Count())
|
fmt.Printf("\tVoteAddressSet : %b, %d\n", extra.VoteAddressSet, bitset.From([]uint64{uint64(extra.VoteAddressSet)}).Count())
|
||||||
|
Binary file not shown.
Before Width: | Height: | Size: 10 KiB |
@ -1,23 +0,0 @@
|
|||||||
# 1.Background
|
|
||||||
This is to support some projects with customized tokens that they want to integrate into the BSC faucet tool.
|
|
||||||
|
|
||||||
## 1.1. How to Integrate Your Token
|
|
||||||
- Step 1: Fund the faucet address by sending a specific amount of your BEP-20 token to the faucet address (0xaa25aa7a19f9c426e07dee59b12f944f4d9f1dd3) on the BSC testnet.
|
|
||||||
- Step 2: Update this README.md file and create a Pull Request on [bsc github](https://github.com/bnb-chain/bsc) with relevant information.
|
|
||||||
|
|
||||||
We will review the request, and once it is approved, the faucet tool will start to support the customized token and list it on https://www.bnbchain.org/en/testnet-faucet.
|
|
||||||
|
|
||||||
# 2.Token List
|
|
||||||
## 2.1.DemoToken
|
|
||||||
- symbol: DEMO
|
|
||||||
- amount: 10000000000000000000
|
|
||||||
- icon: ./demotoken.png
|
|
||||||
- addr: https://testnet.bscscan.com/address/0xe15c158d768c306dae87b96430a94f884333e55d
|
|
||||||
- fundTx: [0xa499dc9aaf918aff0507538a8aa80a88d0af6ca15054e6acc57b69c651945280](https://testnet.bscscan.com/tx/0x2a3f334b6ca756b64331bdec9e6cf3207ac50a4839fda6379e909de4d9a194ca)
|
|
||||||
-
|
|
||||||
## 2.2.DIN token
|
|
||||||
- symbol: DIN
|
|
||||||
- amount: 10000000000000000000
|
|
||||||
- icon: ./DIN.png
|
|
||||||
- addr: https://testnet.bscscan.com/address/0xb8b40FcC5B4519Dba0E07Ac8821884CE90BdE677
|
|
||||||
- fundTx: [0x17fc4c1db133830c7c146a0d41ca1df31cb446989ec11b382d58bb6176d6fde3](https://testnet.bscscan.com/tx/0x17fc4c1db133830c7c146a0d41ca1df31cb446989ec11b382d58bb6176d6fde3)
|
|
Binary file not shown.
Before Width: | Height: | Size: 28 KiB |
@ -49,14 +49,12 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/gorilla/websocket"
|
"github.com/gorilla/websocket"
|
||||||
"golang.org/x/time/rate"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
genesisFlag = flag.String("genesis", "", "Genesis json file to seed the chain with")
|
genesisFlag = flag.String("genesis", "", "Genesis json file to seed the chain with")
|
||||||
apiPortFlag = flag.Int("apiport", 8080, "Listener port for the HTTP API connection")
|
apiPortFlag = flag.Int("apiport", 8080, "Listener port for the HTTP API connection")
|
||||||
wsEndpoint = flag.String("ws", "http://127.0.0.1:7777/", "Url to ws endpoint")
|
wsEndpoint = flag.String("ws", "http://127.0.0.1:7777/", "Url to ws endpoint")
|
||||||
wsEndpointMainnet = flag.String("ws.mainnet", "", "Url to ws endpoint of BSC mainnet")
|
|
||||||
|
|
||||||
netnameFlag = flag.String("faucet.name", "", "Network name to assign to the faucet")
|
netnameFlag = flag.String("faucet.name", "", "Network name to assign to the faucet")
|
||||||
payoutFlag = flag.Int("faucet.amount", 1, "Number of Ethers to pay out per user request")
|
payoutFlag = flag.Int("faucet.amount", 1, "Number of Ethers to pay out per user request")
|
||||||
@ -78,12 +76,6 @@ var (
|
|||||||
fixGasPrice = flag.Int64("faucet.fixedprice", 0, "Will use fixed gas price if specified")
|
fixGasPrice = flag.Int64("faucet.fixedprice", 0, "Will use fixed gas price if specified")
|
||||||
twitterTokenFlag = flag.String("twitter.token", "", "Bearer token to authenticate with the v2 Twitter API")
|
twitterTokenFlag = flag.String("twitter.token", "", "Bearer token to authenticate with the v2 Twitter API")
|
||||||
twitterTokenV1Flag = flag.String("twitter.token.v1", "", "Bearer token to authenticate with the v1.1 Twitter API")
|
twitterTokenV1Flag = flag.String("twitter.token.v1", "", "Bearer token to authenticate with the v1.1 Twitter API")
|
||||||
|
|
||||||
resendInterval = 15 * time.Second
|
|
||||||
resendBatchSize = 3
|
|
||||||
resendMaxGasPrice = big.NewInt(50 * params.GWei)
|
|
||||||
wsReadTimeout = 5 * time.Minute
|
|
||||||
minMainnetBalance = big.NewInt(2 * 1e6 * params.GWei) // 0.002 bnb
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -94,17 +86,11 @@ var (
|
|||||||
//go:embed faucet.html
|
//go:embed faucet.html
|
||||||
var websiteTmpl string
|
var websiteTmpl string
|
||||||
|
|
||||||
func weiToEtherStringFx(wei *big.Int, prec int) string {
|
|
||||||
etherValue := new(big.Float).Quo(new(big.Float).SetInt(wei), big.NewFloat(params.Ether))
|
|
||||||
// Format the big.Float directly to a string with the specified precision
|
|
||||||
return etherValue.Text('f', prec)
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
// Parse the flags and set up the logger to print everything requested
|
// Parse the flags and set up the logger to print everything requested
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.FromLegacyLevel(*logFlag), false)))
|
log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.FromLegacyLevel(*logFlag), true)))
|
||||||
log.Info("faucet started")
|
|
||||||
// Construct the payout tiers
|
// Construct the payout tiers
|
||||||
amounts := make([]string, *tiersFlag)
|
amounts := make([]string, *tiersFlag)
|
||||||
for i := 0; i < *tiersFlag; i++ {
|
for i := 0; i < *tiersFlag; i++ {
|
||||||
@ -183,7 +169,7 @@ func main() {
|
|||||||
log.Crit("Failed to unlock faucet signer account", "err", err)
|
log.Crit("Failed to unlock faucet signer account", "err", err)
|
||||||
}
|
}
|
||||||
// Assemble and start the faucet light service
|
// Assemble and start the faucet light service
|
||||||
faucet, err := newFaucet(genesis, *wsEndpoint, *wsEndpointMainnet, ks, website.Bytes(), bep2eInfos)
|
faucet, err := newFaucet(genesis, *wsEndpoint, ks, website.Bytes(), bep2eInfos)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Crit("Failed to start faucet", "err", err)
|
log.Crit("Failed to start faucet", "err", err)
|
||||||
}
|
}
|
||||||
@ -212,7 +198,6 @@ type bep2eInfo struct {
|
|||||||
type faucet struct {
|
type faucet struct {
|
||||||
config *params.ChainConfig // Chain configurations for signing
|
config *params.ChainConfig // Chain configurations for signing
|
||||||
client *ethclient.Client // Client connection to the Ethereum chain
|
client *ethclient.Client // Client connection to the Ethereum chain
|
||||||
clientMainnet *ethclient.Client // Client connection to BSC mainnet for balance check
|
|
||||||
index []byte // Index page to serve up on the web
|
index []byte // Index page to serve up on the web
|
||||||
|
|
||||||
keystore *keystore.KeyStore // Keystore containing the single signer
|
keystore *keystore.KeyStore // Keystore containing the single signer
|
||||||
@ -231,8 +216,6 @@ type faucet struct {
|
|||||||
|
|
||||||
bep2eInfos map[string]bep2eInfo
|
bep2eInfos map[string]bep2eInfo
|
||||||
bep2eAbi abi.ABI
|
bep2eAbi abi.ABI
|
||||||
|
|
||||||
limiter *IPRateLimiter
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// wsConn wraps a websocket connection with a write mutex as the underlying
|
// wsConn wraps a websocket connection with a write mutex as the underlying
|
||||||
@ -242,7 +225,7 @@ type wsConn struct {
|
|||||||
wlock sync.Mutex
|
wlock sync.Mutex
|
||||||
}
|
}
|
||||||
|
|
||||||
func newFaucet(genesis *core.Genesis, url string, mainnetUrl string, ks *keystore.KeyStore, index []byte, bep2eInfos map[string]bep2eInfo) (*faucet, error) {
|
func newFaucet(genesis *core.Genesis, url string, ks *keystore.KeyStore, index []byte, bep2eInfos map[string]bep2eInfo) (*faucet, error) {
|
||||||
bep2eAbi, err := abi.JSON(strings.NewReader(bep2eAbiJson))
|
bep2eAbi, err := abi.JSON(strings.NewReader(bep2eAbiJson))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -251,22 +234,10 @@ func newFaucet(genesis *core.Genesis, url string, mainnetUrl string, ks *keystor
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
clientMainnet, err := ethclient.Dial(mainnetUrl)
|
|
||||||
if err != nil {
|
|
||||||
// skip mainnet balance check if it there is no available mainnet endpoint
|
|
||||||
log.Warn("dail mainnet endpoint failed", "mainnetUrl", mainnetUrl, "err", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Allow 1 request per minute with burst of 5, and cache up to 1000 IPs
|
|
||||||
limiter, err := NewIPRateLimiter(rate.Limit(1.0), 5, 1000)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &faucet{
|
return &faucet{
|
||||||
config: genesis.Config,
|
config: genesis.Config,
|
||||||
client: client,
|
client: client,
|
||||||
clientMainnet: clientMainnet,
|
|
||||||
index: index,
|
index: index,
|
||||||
keystore: ks,
|
keystore: ks,
|
||||||
account: ks.Accounts()[0],
|
account: ks.Accounts()[0],
|
||||||
@ -274,7 +245,6 @@ func newFaucet(genesis *core.Genesis, url string, mainnetUrl string, ks *keystor
|
|||||||
update: make(chan struct{}, 1),
|
update: make(chan struct{}, 1),
|
||||||
bep2eInfos: bep2eInfos,
|
bep2eInfos: bep2eInfos,
|
||||||
bep2eAbi: bep2eAbi,
|
bep2eAbi: bep2eAbi,
|
||||||
limiter: limiter,
|
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -302,20 +272,6 @@ func (f *faucet) webHandler(w http.ResponseWriter, r *http.Request) {
|
|||||||
|
|
||||||
// apiHandler handles requests for Ether grants and transaction statuses.
|
// apiHandler handles requests for Ether grants and transaction statuses.
|
||||||
func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) {
|
func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
ip := r.RemoteAddr
|
|
||||||
if len(r.Header.Get("X-Forwarded-For")) > 0 {
|
|
||||||
ips := strings.Split(r.Header.Get("X-Forwarded-For"), ",")
|
|
||||||
if len(ips) > 0 {
|
|
||||||
ip = strings.TrimSpace(ips[len(ips)-1])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !f.limiter.GetLimiter(ip).Allow() {
|
|
||||||
log.Warn("Too many requests from client: ", "client", ip)
|
|
||||||
http.Error(w, "Too many requests", http.StatusTooManyRequests)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
upgrader := websocket.Upgrader{CheckOrigin: func(r *http.Request) bool { return true }}
|
upgrader := websocket.Upgrader{CheckOrigin: func(r *http.Request) bool { return true }}
|
||||||
conn, err := upgrader.Upgrade(w, r, nil)
|
conn, err := upgrader.Upgrade(w, r, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -398,11 +354,7 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) {
|
|||||||
Captcha string `json:"captcha"`
|
Captcha string `json:"captcha"`
|
||||||
Symbol string `json:"symbol"`
|
Symbol string `json:"symbol"`
|
||||||
}
|
}
|
||||||
// not sure if it helps or not, but set a read deadline could help prevent resource leakage
|
|
||||||
// if user did not give response for too long, then the routine will be stuck.
|
|
||||||
conn.SetReadDeadline(time.Now().Add(wsReadTimeout))
|
|
||||||
if err = conn.ReadJSON(&msg); err != nil {
|
if err = conn.ReadJSON(&msg); err != nil {
|
||||||
log.Debug("read json message failed", "err", err, "ip", ip)
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if !*noauthFlag && !strings.HasPrefix(msg.URL, "https://twitter.com/") && !strings.HasPrefix(msg.URL, "https://www.facebook.com/") {
|
if !*noauthFlag && !strings.HasPrefix(msg.URL, "https://twitter.com/") && !strings.HasPrefix(msg.URL, "https://www.facebook.com/") {
|
||||||
@ -420,9 +372,9 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) {
|
|||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
log.Info("Faucet funds requested", "url", msg.URL, "tier", msg.Tier, "ip", ip)
|
log.Info("Faucet funds requested", "url", msg.URL, "tier", msg.Tier)
|
||||||
|
|
||||||
// check #1: captcha verifications to exclude robot
|
// If captcha verifications are enabled, make sure we're not dealing with a robot
|
||||||
if *captchaToken != "" {
|
if *captchaToken != "" {
|
||||||
form := url.Values{}
|
form := url.Values{}
|
||||||
form.Add("secret", *captchaSecret)
|
form.Add("secret", *captchaSecret)
|
||||||
@ -499,55 +451,24 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) {
|
|||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
log.Info("Faucet request valid", "url", msg.URL, "tier", msg.Tier, "user", username, "address", address)
|
||||||
|
|
||||||
// check #2: check IP and ID(address) to ensure the user didn't request funds too frequently
|
// Ensure the user didn't request funds too recently
|
||||||
f.lock.Lock()
|
f.lock.Lock()
|
||||||
|
var (
|
||||||
|
fund bool
|
||||||
|
timeout time.Time
|
||||||
|
)
|
||||||
|
|
||||||
if ipTimeout := f.timeouts[ips[len(ips)-2]]; time.Now().Before(ipTimeout) {
|
if ipTimeout := f.timeouts[ips[len(ips)-2]]; time.Now().Before(ipTimeout) {
|
||||||
f.lock.Unlock()
|
|
||||||
if err = sendError(wsconn, fmt.Errorf("%s left until next allowance", common.PrettyDuration(time.Until(ipTimeout)))); err != nil { // nolint: gosimple
|
if err = sendError(wsconn, fmt.Errorf("%s left until next allowance", common.PrettyDuration(time.Until(ipTimeout)))); err != nil { // nolint: gosimple
|
||||||
log.Warn("Failed to send funding error to client", "err", err)
|
log.Warn("Failed to send funding error to client", "err", err)
|
||||||
return
|
|
||||||
}
|
}
|
||||||
log.Info("too frequent funding(ip)", "TimeLeft", common.PrettyDuration(time.Until(ipTimeout)), "ip", ips[len(ips)-2], "ipsStr", ipsStr)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if idTimeout := f.timeouts[id]; time.Now().Before(idTimeout) {
|
|
||||||
f.lock.Unlock()
|
f.lock.Unlock()
|
||||||
// Send an error if too frequent funding, otherwise a success
|
|
||||||
if err = sendError(wsconn, fmt.Errorf("%s left until next allowance", common.PrettyDuration(time.Until(idTimeout)))); err != nil { // nolint: gosimple
|
|
||||||
log.Warn("Failed to send funding error to client", "err", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
log.Info("too frequent funding(id)", "TimeLeft", common.PrettyDuration(time.Until(idTimeout)), "id", id)
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// check #3: minimum mainnet balance check, internal error will bypass the check to avoid blocking the faucet service
|
|
||||||
if f.clientMainnet != nil {
|
|
||||||
mainnetAddr := address
|
|
||||||
balanceMainnet, err := f.clientMainnet.BalanceAt(context.Background(), mainnetAddr, nil)
|
|
||||||
if err != nil {
|
|
||||||
log.Warn("check balance failed, call BalanceAt", "err", err)
|
|
||||||
} else if balanceMainnet == nil {
|
|
||||||
log.Warn("check balance failed, balanceMainnet is nil")
|
|
||||||
} else {
|
|
||||||
if balanceMainnet.Cmp(minMainnetBalance) < 0 {
|
|
||||||
f.lock.Unlock()
|
|
||||||
log.Warn("insufficient BNB on BSC mainnet", "address", mainnetAddr,
|
|
||||||
"balanceMainnet", balanceMainnet, "minMainnetBalance", minMainnetBalance)
|
|
||||||
// Send an error if failed to meet the minimum balance requirement
|
|
||||||
if err = sendError(wsconn, fmt.Errorf("insufficient BNB on BSC mainnet (require >=%sBNB)",
|
|
||||||
weiToEtherStringFx(minMainnetBalance, 3))); err != nil {
|
|
||||||
log.Warn("Failed to send mainnet minimum balance error to client", "err", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
log.Info("Faucet request valid", "url", msg.URL, "tier", msg.Tier, "user", username, "address", address, "ip", ip)
|
|
||||||
|
|
||||||
// now, it is ok to send tBNB or other tokens
|
if timeout = f.timeouts[id]; time.Now().After(timeout) {
|
||||||
var tx *types.Transaction
|
var tx *types.Transaction
|
||||||
if msg.Symbol == "BNB" {
|
if msg.Symbol == "BNB" {
|
||||||
// User wasn't funded recently, create the funding transaction
|
// User wasn't funded recently, create the funding transaction
|
||||||
@ -595,12 +516,23 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) {
|
|||||||
Time: time.Now(),
|
Time: time.Now(),
|
||||||
Tx: signed,
|
Tx: signed,
|
||||||
})
|
})
|
||||||
timeoutInt64 := time.Duration(*minutesFlag*int(math.Pow(3, float64(msg.Tier)))) * time.Minute
|
timeout := time.Duration(*minutesFlag*int(math.Pow(3, float64(msg.Tier)))) * time.Minute
|
||||||
grace := timeoutInt64 / 288 // 24h timeout => 5m grace
|
grace := timeout / 288 // 24h timeout => 5m grace
|
||||||
|
|
||||||
f.timeouts[id] = time.Now().Add(timeoutInt64 - grace)
|
f.timeouts[id] = time.Now().Add(timeout - grace)
|
||||||
f.timeouts[ips[len(ips)-2]] = time.Now().Add(timeoutInt64 - grace)
|
f.timeouts[ips[len(ips)-2]] = time.Now().Add(timeout - grace)
|
||||||
|
fund = true
|
||||||
|
}
|
||||||
f.lock.Unlock()
|
f.lock.Unlock()
|
||||||
|
|
||||||
|
// Send an error if too frequent funding, otherwise a success
|
||||||
|
if !fund {
|
||||||
|
if err = sendError(wsconn, fmt.Errorf("%s left until next allowance", common.PrettyDuration(time.Until(timeout)))); err != nil { // nolint: gosimple
|
||||||
|
log.Warn("Failed to send funding error to client", "err", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
if err = sendSuccess(wsconn, fmt.Sprintf("Funding request accepted for %s into %s", username, address.Hex())); err != nil {
|
if err = sendSuccess(wsconn, fmt.Sprintf("Funding request accepted for %s into %s", username, address.Hex())); err != nil {
|
||||||
log.Warn("Failed to send funding success to client", "err", err)
|
log.Warn("Failed to send funding success to client", "err", err)
|
||||||
return
|
return
|
||||||
@ -649,52 +581,9 @@ func (f *faucet) refresh(head *types.Header) error {
|
|||||||
f.lock.Lock()
|
f.lock.Lock()
|
||||||
f.head, f.balance = head, balance
|
f.head, f.balance = head, balance
|
||||||
f.price, f.nonce = price, nonce
|
f.price, f.nonce = price, nonce
|
||||||
if len(f.reqs) == 0 {
|
if len(f.reqs) > 0 && f.reqs[0].Tx.Nonce() > f.nonce {
|
||||||
log.Debug("refresh len(f.reqs) == 0", "f.nonce", f.nonce)
|
|
||||||
f.lock.Unlock()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if f.reqs[0].Tx.Nonce() == f.nonce {
|
|
||||||
// if the next Tx failed to be included for a certain time(resendInterval), try to
|
|
||||||
// resend it with higher gasPrice, as it could be discarded in the network.
|
|
||||||
// Also resend extra following txs, as they could be discarded as well.
|
|
||||||
if time.Now().After(f.reqs[0].Time.Add(resendInterval)) {
|
|
||||||
for i, req := range f.reqs {
|
|
||||||
if i >= resendBatchSize {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
prePrice := req.Tx.GasPrice()
|
|
||||||
// bump gas price 20% to replace the previous tx
|
|
||||||
newPrice := new(big.Int).Add(prePrice, new(big.Int).Div(prePrice, big.NewInt(5)))
|
|
||||||
if newPrice.Cmp(resendMaxGasPrice) >= 0 {
|
|
||||||
log.Info("resendMaxGasPrice reached", "newPrice", newPrice, "resendMaxGasPrice", resendMaxGasPrice, "nonce", req.Tx.Nonce())
|
|
||||||
break
|
|
||||||
}
|
|
||||||
newTx := types.NewTransaction(req.Tx.Nonce(), *req.Tx.To(), req.Tx.Value(), req.Tx.Gas(), newPrice, req.Tx.Data())
|
|
||||||
newSigned, err := f.keystore.SignTx(f.account, newTx, f.config.ChainID)
|
|
||||||
if err != nil {
|
|
||||||
log.Error("resend sign tx failed", "err", err)
|
|
||||||
}
|
|
||||||
log.Info("reqs[0] Tx has been stuck for a while, trigger resend",
|
|
||||||
"resendInterval", resendInterval, "resendTxSize", resendBatchSize,
|
|
||||||
"preHash", req.Tx.Hash().Hex(), "newHash", newSigned.Hash().Hex(),
|
|
||||||
"newPrice", newPrice, "nonce", req.Tx.Nonce(), "req.Tx.Gas()", req.Tx.Gas())
|
|
||||||
if err := f.client.SendTransaction(context.Background(), newSigned); err != nil {
|
|
||||||
log.Warn("resend tx failed", "err", err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
req.Tx = newSigned
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// it is abnormal that reqs[0] has larger nonce than next expected nonce.
|
|
||||||
// could be caused by reorg? reset it
|
|
||||||
if f.reqs[0].Tx.Nonce() > f.nonce {
|
|
||||||
log.Warn("reset due to nonce gap", "f.nonce", f.nonce, "f.reqs[0].Tx.Nonce()", f.reqs[0].Tx.Nonce())
|
|
||||||
f.reqs = f.reqs[:0]
|
f.reqs = f.reqs[:0]
|
||||||
}
|
}
|
||||||
// remove the reqs if they have smaller nonce, which means it is no longer valid,
|
|
||||||
// either has been accepted or replaced.
|
|
||||||
for len(f.reqs) > 0 && f.reqs[0].Tx.Nonce() < f.nonce {
|
for len(f.reqs) > 0 && f.reqs[0].Tx.Nonce() < f.nonce {
|
||||||
f.reqs = f.reqs[1:]
|
f.reqs = f.reqs[1:]
|
||||||
}
|
}
|
||||||
@ -736,7 +625,6 @@ func (f *faucet) loop() {
|
|||||||
balance := new(big.Int).Div(f.balance, ether)
|
balance := new(big.Int).Div(f.balance, ether)
|
||||||
|
|
||||||
for _, conn := range f.conns {
|
for _, conn := range f.conns {
|
||||||
go func(conn *wsConn) {
|
|
||||||
if err := send(conn, map[string]interface{}{
|
if err := send(conn, map[string]interface{}{
|
||||||
"funds": balance,
|
"funds": balance,
|
||||||
"funded": f.nonce,
|
"funded": f.nonce,
|
||||||
@ -744,19 +632,17 @@ func (f *faucet) loop() {
|
|||||||
}, time.Second); err != nil {
|
}, time.Second); err != nil {
|
||||||
log.Warn("Failed to send stats to client", "err", err)
|
log.Warn("Failed to send stats to client", "err", err)
|
||||||
conn.conn.Close()
|
conn.conn.Close()
|
||||||
return // Exit the goroutine if the first send fails
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := send(conn, head, time.Second); err != nil {
|
if err := send(conn, head, time.Second); err != nil {
|
||||||
log.Warn("Failed to send header to client", "err", err)
|
log.Warn("Failed to send header to client", "err", err)
|
||||||
conn.conn.Close()
|
conn.conn.Close()
|
||||||
}
|
}
|
||||||
}(conn)
|
|
||||||
}
|
}
|
||||||
f.lock.RUnlock()
|
f.lock.RUnlock()
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
// Wait for various events and assign to the appropriate background threads
|
// Wait for various events and assing to the appropriate background threads
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case head := <-heads:
|
case head := <-heads:
|
||||||
@ -770,12 +656,10 @@ func (f *faucet) loop() {
|
|||||||
// Pending requests updated, stream to clients
|
// Pending requests updated, stream to clients
|
||||||
f.lock.RLock()
|
f.lock.RLock()
|
||||||
for _, conn := range f.conns {
|
for _, conn := range f.conns {
|
||||||
go func(conn *wsConn) {
|
|
||||||
if err := send(conn, map[string]interface{}{"requests": f.reqs}, time.Second); err != nil {
|
if err := send(conn, map[string]interface{}{"requests": f.reqs}, time.Second); err != nil {
|
||||||
log.Warn("Failed to send requests to client", "err", err)
|
log.Warn("Failed to send requests to client", "err", err)
|
||||||
conn.conn.Close()
|
conn.conn.Close()
|
||||||
}
|
}
|
||||||
}(conn)
|
|
||||||
}
|
}
|
||||||
f.lock.RUnlock()
|
f.lock.RUnlock()
|
||||||
}
|
}
|
||||||
|
@ -1,44 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
lru "github.com/hashicorp/golang-lru"
|
|
||||||
"golang.org/x/time/rate"
|
|
||||||
)
|
|
||||||
|
|
||||||
type IPRateLimiter struct {
|
|
||||||
ips *lru.Cache // LRU cache to store IP addresses and their associated rate limiters
|
|
||||||
r rate.Limit // the rate limit, e.g., 5 requests per second
|
|
||||||
b int // the burst size, e.g., allowing a burst of 10 requests at once. The rate limiter gets into action
|
|
||||||
// only after this number exceeds
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewIPRateLimiter(r rate.Limit, b int, size int) (*IPRateLimiter, error) {
|
|
||||||
cache, err := lru.New(size)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
i := &IPRateLimiter{
|
|
||||||
ips: cache,
|
|
||||||
r: r,
|
|
||||||
b: b,
|
|
||||||
}
|
|
||||||
|
|
||||||
return i, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *IPRateLimiter) addIP(ip string) *rate.Limiter {
|
|
||||||
limiter := rate.NewLimiter(i.r, i.b)
|
|
||||||
|
|
||||||
i.ips.Add(ip, limiter)
|
|
||||||
|
|
||||||
return limiter
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *IPRateLimiter) GetLimiter(ip string) *rate.Limiter {
|
|
||||||
if limiter, exists := i.ips.Get(ip); exists {
|
|
||||||
return limiter.(*rate.Limiter)
|
|
||||||
}
|
|
||||||
|
|
||||||
return i.addIP(ip)
|
|
||||||
}
|
|
@ -62,10 +62,8 @@ var (
|
|||||||
ArgsUsage: "<genesisPath>",
|
ArgsUsage: "<genesisPath>",
|
||||||
Flags: flags.Merge([]cli.Flag{
|
Flags: flags.Merge([]cli.Flag{
|
||||||
utils.CachePreimagesFlag,
|
utils.CachePreimagesFlag,
|
||||||
utils.OverridePassedForkTime,
|
utils.OverrideCancun,
|
||||||
utils.OverrideBohr,
|
|
||||||
utils.OverrideVerkle,
|
utils.OverrideVerkle,
|
||||||
utils.MultiDataBaseFlag,
|
|
||||||
}, utils.DatabaseFlags),
|
}, utils.DatabaseFlags),
|
||||||
Description: `
|
Description: `
|
||||||
The init command initializes a new genesis block and definition for the network.
|
The init command initializes a new genesis block and definition for the network.
|
||||||
@ -254,13 +252,9 @@ func initGenesis(ctx *cli.Context) error {
|
|||||||
defer stack.Close()
|
defer stack.Close()
|
||||||
|
|
||||||
var overrides core.ChainOverrides
|
var overrides core.ChainOverrides
|
||||||
if ctx.IsSet(utils.OverridePassedForkTime.Name) {
|
if ctx.IsSet(utils.OverrideCancun.Name) {
|
||||||
v := ctx.Uint64(utils.OverridePassedForkTime.Name)
|
v := ctx.Uint64(utils.OverrideCancun.Name)
|
||||||
overrides.OverridePassedForkTime = &v
|
overrides.OverrideCancun = &v
|
||||||
}
|
|
||||||
if ctx.IsSet(utils.OverrideBohr.Name) {
|
|
||||||
v := ctx.Uint64(utils.OverrideBohr.Name)
|
|
||||||
overrides.OverrideBohr = &v
|
|
||||||
}
|
}
|
||||||
if ctx.IsSet(utils.OverrideVerkle.Name) {
|
if ctx.IsSet(utils.OverrideVerkle.Name) {
|
||||||
v := ctx.Uint64(utils.OverrideVerkle.Name)
|
v := ctx.Uint64(utils.OverrideVerkle.Name)
|
||||||
@ -765,7 +759,7 @@ func parseDumpConfig(ctx *cli.Context, stack *node.Node) (*state.DumpConfig, eth
|
|||||||
arg := ctx.Args().First()
|
arg := ctx.Args().First()
|
||||||
if hashish(arg) {
|
if hashish(arg) {
|
||||||
hash := common.HexToHash(arg)
|
hash := common.HexToHash(arg)
|
||||||
if number := rawdb.ReadHeaderNumber(db, hash); number != nil {
|
if number := rawdb.ReadHeaderNumber(db.BlockStore(), hash); number != nil {
|
||||||
header = rawdb.ReadHeader(db, hash, *number)
|
header = rawdb.ReadHeader(db, hash, *number)
|
||||||
} else {
|
} else {
|
||||||
return nil, nil, common.Hash{}, fmt.Errorf("block %x not found", hash)
|
return nil, nil, common.Hash{}, fmt.Errorf("block %x not found", hash)
|
||||||
|
@ -33,7 +33,6 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/accounts/keystore"
|
"github.com/ethereum/go-ethereum/accounts/keystore"
|
||||||
"github.com/ethereum/go-ethereum/accounts/scwallet"
|
"github.com/ethereum/go-ethereum/accounts/scwallet"
|
||||||
"github.com/ethereum/go-ethereum/accounts/usbwallet"
|
"github.com/ethereum/go-ethereum/accounts/usbwallet"
|
||||||
"github.com/ethereum/go-ethereum/beacon/fakebeacon"
|
|
||||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||||
@ -97,7 +96,6 @@ type gethConfig struct {
|
|||||||
Node node.Config
|
Node node.Config
|
||||||
Ethstats ethstatsConfig
|
Ethstats ethstatsConfig
|
||||||
Metrics metrics.Config
|
Metrics metrics.Config
|
||||||
FakeBeacon fakebeacon.Config
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func loadConfig(file string, cfg *gethConfig) error {
|
func loadConfig(file string, cfg *gethConfig) error {
|
||||||
@ -187,13 +185,9 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) {
|
|||||||
params.RialtoGenesisHash = common.HexToHash(v)
|
params.RialtoGenesisHash = common.HexToHash(v)
|
||||||
}
|
}
|
||||||
|
|
||||||
if ctx.IsSet(utils.OverridePassedForkTime.Name) {
|
if ctx.IsSet(utils.OverrideCancun.Name) {
|
||||||
v := ctx.Uint64(utils.OverridePassedForkTime.Name)
|
v := ctx.Uint64(utils.OverrideCancun.Name)
|
||||||
cfg.Eth.OverridePassedForkTime = &v
|
cfg.Eth.OverrideCancun = &v
|
||||||
}
|
|
||||||
if ctx.IsSet(utils.OverrideBohr.Name) {
|
|
||||||
v := ctx.Uint64(utils.OverrideBohr.Name)
|
|
||||||
cfg.Eth.OverrideBohr = &v
|
|
||||||
}
|
}
|
||||||
if ctx.IsSet(utils.OverrideVerkle.Name) {
|
if ctx.IsSet(utils.OverrideVerkle.Name) {
|
||||||
v := ctx.Uint64(utils.OverrideVerkle.Name)
|
v := ctx.Uint64(utils.OverrideVerkle.Name)
|
||||||
@ -212,9 +206,6 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) {
|
|||||||
if ctx.IsSet(utils.OverrideBreatheBlockInterval.Name) {
|
if ctx.IsSet(utils.OverrideBreatheBlockInterval.Name) {
|
||||||
params.BreatheBlockInterval = ctx.Uint64(utils.OverrideBreatheBlockInterval.Name)
|
params.BreatheBlockInterval = ctx.Uint64(utils.OverrideBreatheBlockInterval.Name)
|
||||||
}
|
}
|
||||||
if ctx.IsSet(utils.OverrideFixedTurnLength.Name) {
|
|
||||||
params.FixedTurnLength = ctx.Uint64(utils.OverrideFixedTurnLength.Name)
|
|
||||||
}
|
|
||||||
|
|
||||||
backend, eth := utils.RegisterEthService(stack, &cfg.Eth)
|
backend, eth := utils.RegisterEthService(stack, &cfg.Eth)
|
||||||
|
|
||||||
@ -244,22 +235,11 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) {
|
|||||||
utils.RegisterEthStatsService(stack, backend, cfg.Ethstats.URL)
|
utils.RegisterEthStatsService(stack, backend, cfg.Ethstats.URL)
|
||||||
}
|
}
|
||||||
|
|
||||||
if ctx.IsSet(utils.FakeBeaconAddrFlag.Name) {
|
|
||||||
cfg.FakeBeacon.Addr = ctx.String(utils.FakeBeaconAddrFlag.Name)
|
|
||||||
}
|
|
||||||
if ctx.IsSet(utils.FakeBeaconPortFlag.Name) {
|
|
||||||
cfg.FakeBeacon.Port = ctx.Int(utils.FakeBeaconPortFlag.Name)
|
|
||||||
}
|
|
||||||
if cfg.FakeBeacon.Enable || ctx.IsSet(utils.FakeBeaconEnabledFlag.Name) {
|
|
||||||
go fakebeacon.NewService(&cfg.FakeBeacon, backend).Run()
|
|
||||||
}
|
|
||||||
|
|
||||||
git, _ := version.VCS()
|
git, _ := version.VCS()
|
||||||
utils.SetupMetrics(ctx,
|
utils.SetupMetrics(ctx,
|
||||||
utils.EnableBuildInfo(git.Commit, git.Date),
|
utils.EnableBuildInfo(git.Commit, git.Date),
|
||||||
utils.EnableMinerInfo(ctx, &cfg.Eth.Miner),
|
utils.EnableMinerInfo(ctx, &cfg.Eth.Miner),
|
||||||
utils.EnableNodeInfo(&cfg.Eth.TxPool, stack.Server().NodeInfo()),
|
utils.EnableNodeInfo(&cfg.Eth.TxPool, stack.Server().NodeInfo()),
|
||||||
utils.EnableNodeTrack(ctx, &cfg.Eth, stack),
|
|
||||||
)
|
)
|
||||||
return stack, backend
|
return stack, backend
|
||||||
}
|
}
|
||||||
@ -300,6 +280,7 @@ func applyMetricConfig(ctx *cli.Context, cfg *gethConfig) {
|
|||||||
if ctx.IsSet(utils.MetricsEnabledExpensiveFlag.Name) {
|
if ctx.IsSet(utils.MetricsEnabledExpensiveFlag.Name) {
|
||||||
cfg.Metrics.EnabledExpensive = ctx.Bool(utils.MetricsEnabledExpensiveFlag.Name)
|
cfg.Metrics.EnabledExpensive = ctx.Bool(utils.MetricsEnabledExpensiveFlag.Name)
|
||||||
}
|
}
|
||||||
|
cfg.Metrics.EnabledExpensive = true
|
||||||
if ctx.IsSet(utils.MetricsHTTPFlag.Name) {
|
if ctx.IsSet(utils.MetricsHTTPFlag.Name) {
|
||||||
cfg.Metrics.HTTP = ctx.String(utils.MetricsHTTPFlag.Name)
|
cfg.Metrics.HTTP = ctx.String(utils.MetricsHTTPFlag.Name)
|
||||||
}
|
}
|
||||||
|
@ -106,12 +106,12 @@ Remove blockchain and state databases`,
|
|||||||
dbInspectTrieCmd = &cli.Command{
|
dbInspectTrieCmd = &cli.Command{
|
||||||
Action: inspectTrie,
|
Action: inspectTrie,
|
||||||
Name: "inspect-trie",
|
Name: "inspect-trie",
|
||||||
ArgsUsage: "<blocknum> <jobnum> <topn>",
|
ArgsUsage: "<blocknum> <jobnum>",
|
||||||
Flags: []cli.Flag{
|
Flags: []cli.Flag{
|
||||||
utils.DataDirFlag,
|
utils.DataDirFlag,
|
||||||
utils.SyncModeFlag,
|
utils.SyncModeFlag,
|
||||||
},
|
},
|
||||||
Usage: "Inspect the MPT tree of the account and contract. 'blocknum' can be latest/snapshot/number. 'topn' means output the top N storage tries info ranked by the total number of TrieNodes",
|
Usage: "Inspect the MPT tree of the account and contract.",
|
||||||
Description: `This commands iterates the entrie WorldState.`,
|
Description: `This commands iterates the entrie WorldState.`,
|
||||||
}
|
}
|
||||||
dbCheckStateContentCmd = &cli.Command{
|
dbCheckStateContentCmd = &cli.Command{
|
||||||
@ -386,7 +386,6 @@ func inspectTrie(ctx *cli.Context) error {
|
|||||||
blockNumber uint64
|
blockNumber uint64
|
||||||
trieRootHash common.Hash
|
trieRootHash common.Hash
|
||||||
jobnum uint64
|
jobnum uint64
|
||||||
topN uint64
|
|
||||||
)
|
)
|
||||||
|
|
||||||
stack, _ := makeConfigNode(ctx)
|
stack, _ := makeConfigNode(ctx)
|
||||||
@ -397,8 +396,8 @@ func inspectTrie(ctx *cli.Context) error {
|
|||||||
var headerBlockHash common.Hash
|
var headerBlockHash common.Hash
|
||||||
if ctx.NArg() >= 1 {
|
if ctx.NArg() >= 1 {
|
||||||
if ctx.Args().Get(0) == "latest" {
|
if ctx.Args().Get(0) == "latest" {
|
||||||
headerHash := rawdb.ReadHeadHeaderHash(db)
|
headerHash := rawdb.ReadHeadHeaderHash(db.BlockStore())
|
||||||
blockNumber = *(rawdb.ReadHeaderNumber(db, headerHash))
|
blockNumber = *(rawdb.ReadHeaderNumber(db.BlockStore(), headerHash))
|
||||||
} else if ctx.Args().Get(0) == "snapshot" {
|
} else if ctx.Args().Get(0) == "snapshot" {
|
||||||
trieRootHash = rawdb.ReadSnapshotRoot(db)
|
trieRootHash = rawdb.ReadSnapshotRoot(db)
|
||||||
blockNumber = math.MaxUint64
|
blockNumber = math.MaxUint64
|
||||||
@ -406,37 +405,24 @@ func inspectTrie(ctx *cli.Context) error {
|
|||||||
var err error
|
var err error
|
||||||
blockNumber, err = strconv.ParseUint(ctx.Args().Get(0), 10, 64)
|
blockNumber, err = strconv.ParseUint(ctx.Args().Get(0), 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to parse blocknum, Args[0]: %v, err: %v", ctx.Args().Get(0), err)
|
return fmt.Errorf("failed to Parse blocknum, Args[0]: %v, err: %v", ctx.Args().Get(0), err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if ctx.NArg() == 1 {
|
if ctx.NArg() == 1 {
|
||||||
jobnum = 1000
|
jobnum = 1000
|
||||||
topN = 10
|
|
||||||
} else if ctx.NArg() == 2 {
|
|
||||||
var err error
|
|
||||||
jobnum, err = strconv.ParseUint(ctx.Args().Get(1), 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to parse jobnum, Args[1]: %v, err: %v", ctx.Args().Get(1), err)
|
|
||||||
}
|
|
||||||
topN = 10
|
|
||||||
} else {
|
} else {
|
||||||
var err error
|
var err error
|
||||||
jobnum, err = strconv.ParseUint(ctx.Args().Get(1), 10, 64)
|
jobnum, err = strconv.ParseUint(ctx.Args().Get(1), 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to parse jobnum, Args[1]: %v, err: %v", ctx.Args().Get(1), err)
|
return fmt.Errorf("failed to Parse jobnum, Args[1]: %v, err: %v", ctx.Args().Get(1), err)
|
||||||
}
|
|
||||||
|
|
||||||
topN, err = strconv.ParseUint(ctx.Args().Get(2), 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to parse topn, Args[1]: %v, err: %v", ctx.Args().Get(1), err)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if blockNumber != math.MaxUint64 {
|
if blockNumber != math.MaxUint64 {
|
||||||
headerBlockHash = rawdb.ReadCanonicalHash(db, blockNumber)
|
headerBlockHash = rawdb.ReadCanonicalHash(db, blockNumber)
|
||||||
if headerBlockHash == (common.Hash{}) {
|
if headerBlockHash == (common.Hash{}) {
|
||||||
return errors.New("ReadHeadBlockHash empty hash")
|
return errors.New("ReadHeadBlockHash empry hash")
|
||||||
}
|
}
|
||||||
blockHeader := rawdb.ReadHeader(db, headerBlockHash, blockNumber)
|
blockHeader := rawdb.ReadHeader(db, headerBlockHash, blockNumber)
|
||||||
trieRootHash = blockHeader.Root
|
trieRootHash = blockHeader.Root
|
||||||
@ -451,7 +437,6 @@ func inspectTrie(ctx *cli.Context) error {
|
|||||||
if dbScheme == rawdb.PathScheme {
|
if dbScheme == rawdb.PathScheme {
|
||||||
config = &triedb.Config{
|
config = &triedb.Config{
|
||||||
PathDB: utils.PathDBConfigAddJournalFilePath(stack, pathdb.ReadOnly),
|
PathDB: utils.PathDBConfigAddJournalFilePath(stack, pathdb.ReadOnly),
|
||||||
Cache: 0,
|
|
||||||
}
|
}
|
||||||
} else if dbScheme == rawdb.HashScheme {
|
} else if dbScheme == rawdb.HashScheme {
|
||||||
config = triedb.HashDefaults
|
config = triedb.HashDefaults
|
||||||
@ -463,7 +448,7 @@ func inspectTrie(ctx *cli.Context) error {
|
|||||||
fmt.Printf("fail to new trie tree, err: %v, rootHash: %v\n", err, trieRootHash.String())
|
fmt.Printf("fail to new trie tree, err: %v, rootHash: %v\n", err, trieRootHash.String())
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
theInspect, err := trie.NewInspector(theTrie, triedb, trieRootHash, blockNumber, jobnum, int(topN))
|
theInspect, err := trie.NewInspector(theTrie, triedb, trieRootHash, blockNumber, jobnum)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -508,7 +493,7 @@ func ancientInspect(ctx *cli.Context) error {
|
|||||||
stack, _ := makeConfigNode(ctx)
|
stack, _ := makeConfigNode(ctx)
|
||||||
defer stack.Close()
|
defer stack.Close()
|
||||||
|
|
||||||
db := utils.MakeChainDatabase(ctx, stack, true, false)
|
db := utils.MakeChainDatabase(ctx, stack, true, true)
|
||||||
defer db.Close()
|
defer db.Close()
|
||||||
return rawdb.AncientInspect(db)
|
return rawdb.AncientInspect(db)
|
||||||
}
|
}
|
||||||
@ -1212,7 +1197,7 @@ func showMetaData(ctx *cli.Context) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Fprintf(os.Stderr, "Error accessing ancients: %v", err)
|
fmt.Fprintf(os.Stderr, "Error accessing ancients: %v", err)
|
||||||
}
|
}
|
||||||
data := rawdb.ReadChainMetadata(db)
|
data := rawdb.ReadChainMetadataFromMultiDatabase(db)
|
||||||
data = append(data, []string{"frozen", fmt.Sprintf("%d items", ancients)})
|
data = append(data, []string{"frozen", fmt.Sprintf("%d items", ancients)})
|
||||||
data = append(data, []string{"snapshotGenerator", snapshot.ParseGeneratorStatus(rawdb.ReadSnapshotGenerator(db))})
|
data = append(data, []string{"snapshotGenerator", snapshot.ParseGeneratorStatus(rawdb.ReadSnapshotGenerator(db))})
|
||||||
if b := rawdb.ReadHeadBlock(db); b != nil {
|
if b := rawdb.ReadHeadBlock(db); b != nil {
|
||||||
@ -1255,7 +1240,7 @@ func hbss2pbss(ctx *cli.Context) error {
|
|||||||
defer stack.Close()
|
defer stack.Close()
|
||||||
|
|
||||||
db := utils.MakeChainDatabase(ctx, stack, false, false)
|
db := utils.MakeChainDatabase(ctx, stack, false, false)
|
||||||
db.BlockStore().Sync()
|
db.Sync()
|
||||||
stateDiskDb := db.StateStore()
|
stateDiskDb := db.StateStore()
|
||||||
defer db.Close()
|
defer db.Close()
|
||||||
|
|
||||||
@ -1273,8 +1258,8 @@ func hbss2pbss(ctx *cli.Context) error {
|
|||||||
log.Info("hbss2pbss triedb", "scheme", triedb.Scheme())
|
log.Info("hbss2pbss triedb", "scheme", triedb.Scheme())
|
||||||
defer triedb.Close()
|
defer triedb.Close()
|
||||||
|
|
||||||
headerHash := rawdb.ReadHeadHeaderHash(db)
|
headerHash := rawdb.ReadHeadHeaderHash(db.BlockStore())
|
||||||
blockNumber := rawdb.ReadHeaderNumber(db, headerHash)
|
blockNumber := rawdb.ReadHeaderNumber(db.BlockStore(), headerHash)
|
||||||
if blockNumber == nil {
|
if blockNumber == nil {
|
||||||
log.Error("read header number failed.")
|
log.Error("read header number failed.")
|
||||||
return fmt.Errorf("read header number failed")
|
return fmt.Errorf("read header number failed")
|
||||||
|
@ -72,14 +72,12 @@ var (
|
|||||||
utils.USBFlag,
|
utils.USBFlag,
|
||||||
utils.SmartCardDaemonPathFlag,
|
utils.SmartCardDaemonPathFlag,
|
||||||
utils.RialtoHash,
|
utils.RialtoHash,
|
||||||
utils.OverridePassedForkTime,
|
utils.OverrideCancun,
|
||||||
utils.OverrideBohr,
|
|
||||||
utils.OverrideVerkle,
|
utils.OverrideVerkle,
|
||||||
utils.OverrideFullImmutabilityThreshold,
|
utils.OverrideFullImmutabilityThreshold,
|
||||||
utils.OverrideMinBlocksForBlobRequests,
|
utils.OverrideMinBlocksForBlobRequests,
|
||||||
utils.OverrideDefaultExtraReserveForBlobRequests,
|
utils.OverrideDefaultExtraReserveForBlobRequests,
|
||||||
utils.OverrideBreatheBlockInterval,
|
utils.OverrideBreatheBlockInterval,
|
||||||
utils.OverrideFixedTurnLength,
|
|
||||||
utils.EnablePersonal,
|
utils.EnablePersonal,
|
||||||
utils.TxPoolLocalsFlag,
|
utils.TxPoolLocalsFlag,
|
||||||
utils.TxPoolNoLocalsFlag,
|
utils.TxPoolNoLocalsFlag,
|
||||||
@ -127,7 +125,6 @@ var (
|
|||||||
utils.CacheSnapshotFlag,
|
utils.CacheSnapshotFlag,
|
||||||
// utils.CacheNoPrefetchFlag,
|
// utils.CacheNoPrefetchFlag,
|
||||||
utils.CachePreimagesFlag,
|
utils.CachePreimagesFlag,
|
||||||
utils.MultiDataBaseFlag,
|
|
||||||
utils.PersistDiffFlag,
|
utils.PersistDiffFlag,
|
||||||
utils.DiffBlockFlag,
|
utils.DiffBlockFlag,
|
||||||
utils.PruneAncientDataFlag,
|
utils.PruneAncientDataFlag,
|
||||||
@ -232,12 +229,6 @@ var (
|
|||||||
utils.MetricsInfluxDBBucketFlag,
|
utils.MetricsInfluxDBBucketFlag,
|
||||||
utils.MetricsInfluxDBOrganizationFlag,
|
utils.MetricsInfluxDBOrganizationFlag,
|
||||||
}
|
}
|
||||||
|
|
||||||
fakeBeaconFlags = []cli.Flag{
|
|
||||||
utils.FakeBeaconEnabledFlag,
|
|
||||||
utils.FakeBeaconAddrFlag,
|
|
||||||
utils.FakeBeaconPortFlag,
|
|
||||||
}
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var app = flags.NewApp("the go-ethereum command line interface")
|
var app = flags.NewApp("the go-ethereum command line interface")
|
||||||
@ -292,7 +283,6 @@ func init() {
|
|||||||
consoleFlags,
|
consoleFlags,
|
||||||
debug.Flags,
|
debug.Flags,
|
||||||
metricsFlags,
|
metricsFlags,
|
||||||
fakeBeaconFlags,
|
|
||||||
)
|
)
|
||||||
flags.AutoEnvVars(app.Flags, "GETH")
|
flags.AutoEnvVars(app.Flags, "GETH")
|
||||||
|
|
||||||
@ -344,6 +334,9 @@ func prepare(ctx *cli.Context) {
|
|||||||
5. Networking is disabled; there is no listen-address, the maximum number of peers is set
|
5. Networking is disabled; there is no listen-address, the maximum number of peers is set
|
||||||
to 0, and discovery is disabled.
|
to 0, and discovery is disabled.
|
||||||
`)
|
`)
|
||||||
|
|
||||||
|
case !ctx.IsSet(utils.NetworkIdFlag.Name):
|
||||||
|
log.Info("Starting Geth on BSC mainnet...")
|
||||||
}
|
}
|
||||||
// If we're a full node on mainnet without --cache specified, bump default cache allowance
|
// If we're a full node on mainnet without --cache specified, bump default cache allowance
|
||||||
if !ctx.IsSet(utils.CacheFlag.Name) && !ctx.IsSet(utils.NetworkIdFlag.Name) {
|
if !ctx.IsSet(utils.CacheFlag.Name) && !ctx.IsSet(utils.NetworkIdFlag.Name) {
|
||||||
@ -378,6 +371,8 @@ func geth(ctx *cli.Context) error {
|
|||||||
// it unlocks any requested accounts, and starts the RPC/IPC interfaces and the
|
// it unlocks any requested accounts, and starts the RPC/IPC interfaces and the
|
||||||
// miner.
|
// miner.
|
||||||
func startNode(ctx *cli.Context, stack *node.Node, backend ethapi.Backend, isConsole bool) {
|
func startNode(ctx *cli.Context, stack *node.Node, backend ethapi.Backend, isConsole bool) {
|
||||||
|
debug.Memsize.Add("node", stack)
|
||||||
|
|
||||||
// Start up the node itself
|
// Start up the node itself
|
||||||
utils.StartNode(ctx, stack, isConsole)
|
utils.StartNode(ctx, stack, isConsole)
|
||||||
|
|
||||||
@ -450,23 +445,22 @@ func startNode(ctx *cli.Context, stack *node.Node, backend ethapi.Backend, isCon
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Start auxiliary services if enabled
|
// Start auxiliary services if enabled
|
||||||
ethBackend, ok := backend.(*eth.EthAPIBackend)
|
|
||||||
gasCeil := ethBackend.Miner().GasCeil()
|
|
||||||
if gasCeil > params.SystemTxsGas {
|
|
||||||
ethBackend.TxPool().SetMaxGas(gasCeil - params.SystemTxsGas)
|
|
||||||
}
|
|
||||||
if ctx.Bool(utils.MiningEnabledFlag.Name) {
|
if ctx.Bool(utils.MiningEnabledFlag.Name) {
|
||||||
// Mining only makes sense if a full Ethereum node is running
|
// Mining only makes sense if a full Ethereum node is running
|
||||||
if ctx.String(utils.SyncModeFlag.Name) == "light" {
|
if ctx.String(utils.SyncModeFlag.Name) == "light" {
|
||||||
utils.Fatalf("Light clients do not support mining")
|
utils.Fatalf("Light clients do not support mining")
|
||||||
}
|
}
|
||||||
|
ethBackend, ok := backend.(*eth.EthAPIBackend)
|
||||||
if !ok {
|
if !ok {
|
||||||
utils.Fatalf("Ethereum service not running")
|
utils.Fatalf("Ethereum service not running")
|
||||||
}
|
}
|
||||||
// Set the gas price to the limits from the CLI and start mining
|
// Set the gas price to the limits from the CLI and start mining
|
||||||
gasprice := flags.GlobalBig(ctx, utils.MinerGasPriceFlag.Name)
|
gasprice := flags.GlobalBig(ctx, utils.MinerGasPriceFlag.Name)
|
||||||
ethBackend.TxPool().SetGasTip(gasprice)
|
ethBackend.TxPool().SetGasTip(gasprice)
|
||||||
|
gasCeil := ethBackend.Miner().GasCeil()
|
||||||
|
if gasCeil > params.SystemTxsGas {
|
||||||
|
ethBackend.TxPool().SetMaxGas(gasCeil - params.SystemTxsGas)
|
||||||
|
}
|
||||||
if err := ethBackend.StartMining(); err != nil {
|
if err := ethBackend.StartMining(); err != nil {
|
||||||
utils.Fatalf("Failed to start mining: %v", err)
|
utils.Fatalf("Failed to start mining: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -75,7 +75,7 @@ func NewLevelDBDatabaseWithFreezer(file string, cache int, handles int, ancient
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
frdb, err := rawdb.NewDatabaseWithFreezer(kvdb, ancient, namespace, readonly, disableFreeze, isLastOffset, pruneAncientData, false)
|
frdb, err := rawdb.NewDatabaseWithFreezer(kvdb, ancient, namespace, readonly, disableFreeze, isLastOffset, pruneAncientData)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
kvdb.Close()
|
kvdb.Close()
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -155,12 +155,6 @@ func BlockchainCreator(t *testing.T, chaindbPath, AncientPath string, blockRemai
|
|||||||
triedb := triedb.NewDatabase(db, nil)
|
triedb := triedb.NewDatabase(db, nil)
|
||||||
defer triedb.Close()
|
defer triedb.Close()
|
||||||
|
|
||||||
if err = db.SetupFreezerEnv(ðdb.FreezerEnv{
|
|
||||||
ChainCfg: gspec.Config,
|
|
||||||
BlobExtraReserve: params.DefaultExtraReserveForBlobRequests,
|
|
||||||
}); err != nil {
|
|
||||||
t.Fatalf("Failed to create chain: %v", err)
|
|
||||||
}
|
|
||||||
genesis := gspec.MustCommit(db, triedb)
|
genesis := gspec.MustCommit(db, triedb)
|
||||||
// Initialize a fresh chain with only a genesis block
|
// Initialize a fresh chain with only a genesis block
|
||||||
blockchain, err := core.NewBlockChain(db, config, gspec, nil, engine, vm.Config{}, nil, nil)
|
blockchain, err := core.NewBlockChain(db, config, gspec, nil, engine, vm.Config{}, nil, nil)
|
||||||
@ -184,10 +178,11 @@ func BlockchainCreator(t *testing.T, chaindbPath, AncientPath string, blockRemai
|
|||||||
|
|
||||||
// Force run a freeze cycle
|
// Force run a freeze cycle
|
||||||
type freezer interface {
|
type freezer interface {
|
||||||
Freeze(threshold uint64) error
|
Freeze() error
|
||||||
Ancients() (uint64, error)
|
Ancients() (uint64, error)
|
||||||
}
|
}
|
||||||
db.(freezer).Freeze(10)
|
blockchain.SetFinalized(blocks[len(blocks)-1].Header())
|
||||||
|
db.(freezer).Freeze()
|
||||||
|
|
||||||
frozen, err := db.Ancients()
|
frozen, err := db.Ancients()
|
||||||
//make sure there're frozen items
|
//make sure there're frozen items
|
||||||
|
@ -43,11 +43,9 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/metrics"
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
"github.com/ethereum/go-ethereum/node"
|
"github.com/ethereum/go-ethereum/node"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
"github.com/ethereum/go-ethereum/triedb"
|
"github.com/ethereum/go-ethereum/triedb"
|
||||||
"github.com/ethereum/go-ethereum/triedb/pathdb"
|
|
||||||
cli "github.com/urfave/cli/v2"
|
cli "github.com/urfave/cli/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -247,16 +245,7 @@ func accessDb(ctx *cli.Context, stack *node.Node) (ethdb.Database, error) {
|
|||||||
NoBuild: true,
|
NoBuild: true,
|
||||||
AsyncBuild: false,
|
AsyncBuild: false,
|
||||||
}
|
}
|
||||||
dbScheme := rawdb.ReadStateScheme(chaindb)
|
snaptree, err := snapshot.New(snapconfig, chaindb, triedb.NewDatabase(chaindb, nil), headBlock.Root(), TriesInMemory, false)
|
||||||
var config *triedb.Config
|
|
||||||
if dbScheme == rawdb.PathScheme {
|
|
||||||
config = &triedb.Config{
|
|
||||||
PathDB: utils.PathDBConfigAddJournalFilePath(stack, pathdb.ReadOnly),
|
|
||||||
}
|
|
||||||
} else if dbScheme == rawdb.HashScheme {
|
|
||||||
config = triedb.HashDefaults
|
|
||||||
}
|
|
||||||
snaptree, err := snapshot.New(snapconfig, chaindb, triedb.NewDatabase(chaindb, config), headBlock.Root(), TriesInMemory, false)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("snaptree error", "err", err)
|
log.Error("snaptree error", "err", err)
|
||||||
return nil, err // The relevant snapshot(s) might not exist
|
return nil, err // The relevant snapshot(s) might not exist
|
||||||
@ -344,9 +333,6 @@ func pruneBlock(ctx *cli.Context) error {
|
|||||||
stack, config = makeConfigNode(ctx)
|
stack, config = makeConfigNode(ctx)
|
||||||
defer stack.Close()
|
defer stack.Close()
|
||||||
blockAmountReserved = ctx.Uint64(utils.BlockAmountReserved.Name)
|
blockAmountReserved = ctx.Uint64(utils.BlockAmountReserved.Name)
|
||||||
if blockAmountReserved < params.FullImmutabilityThreshold {
|
|
||||||
return fmt.Errorf("block-amount-reserved must be greater than or equal to %d", params.FullImmutabilityThreshold)
|
|
||||||
}
|
|
||||||
chaindb, err = accessDb(ctx, stack)
|
chaindb, err = accessDb(ctx, stack)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -25,23 +25,3 @@ testnet validators version
|
|||||||
```bash
|
```bash
|
||||||
node gettxcount.js --rpc ${url} --startNum ${start} --endNum ${end} --miner ${miner} (optional)
|
node gettxcount.js --rpc ${url} --startNum ${start} --endNum ${end} --miner ${miner} (optional)
|
||||||
```
|
```
|
||||||
|
|
||||||
### 3. Get Performance
|
|
||||||
```bash
|
|
||||||
node get_perf.js --rpc ${url} --startNum ${start} --endNum ${end}
|
|
||||||
```
|
|
||||||
output as following
|
|
||||||
```bash
|
|
||||||
Get the performance between [ 19470 , 19670 )
|
|
||||||
txCountPerBlock = 3142.81 txCountTotal = 628562 BlockCount = 200 avgBlockTime = 3.005 inturnBlocksRatio = 0.975 justifiedBlocksRatio = 0.98
|
|
||||||
txCountPerSecond = 1045.8602329450914 avgGasUsedPerBlock = 250.02062627 avgGasUsedPerSecond = 83.20153952412646
|
|
||||||
```
|
|
||||||
|
|
||||||
### 4. Get validators slash count
|
|
||||||
```bash
|
|
||||||
use the latest block
|
|
||||||
node getslashcount.js --Rpc ${ArchiveRpc}
|
|
||||||
use a block number
|
|
||||||
node getslashcount.js --Rpc ${ArchiveRpc} --Num ${blockNum}
|
|
||||||
```
|
|
||||||
|
|
||||||
|
@ -1,51 +0,0 @@
|
|||||||
import { ethers } from "ethers";
|
|
||||||
import program from "commander";
|
|
||||||
|
|
||||||
// depends on ethjs v6.11.0+ for 4844, https://github.com/ethers-io/ethers.js/releases/tag/v6.11.0
|
|
||||||
// BSC testnet enabled 4844 on block: 39539137
|
|
||||||
// Usage:
|
|
||||||
// nvm use 20
|
|
||||||
// node check_blobtx.js --rpc https://data-seed-prebsc-1-s1.binance.org:8545 --startNum 39539137
|
|
||||||
// node check_blobtx.js --rpc https://data-seed-prebsc-1-s1.binance.org:8545 --startNum 39539137 --endNum 40345994
|
|
||||||
program.option("--rpc <Rpc>", "Rpc Server URL");
|
|
||||||
program.option("--startNum <Num>", "start block", 0);
|
|
||||||
program.option("--endNum <Num>", "end block", 0);
|
|
||||||
program.parse(process.argv);
|
|
||||||
|
|
||||||
const provider = new ethers.JsonRpcProvider(program.rpc);
|
|
||||||
const main = async () => {
|
|
||||||
var startBlock = parseInt(program.startNum)
|
|
||||||
var endBlock = parseInt(program.endNum)
|
|
||||||
if (isNaN(endBlock) || isNaN(startBlock) || startBlock == 0) {
|
|
||||||
console.error("invalid input, --startNum", program.startNum, "--end", program.endNum)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// if --endNum is not specified, set it to the latest block number.
|
|
||||||
if (endBlock == 0) {
|
|
||||||
endBlock = await provider.getBlockNumber();
|
|
||||||
}
|
|
||||||
if (startBlock > endBlock) {
|
|
||||||
console.error("invalid input, startBlock:",startBlock, " endBlock:", endBlock);
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
for (let i = startBlock; i <= endBlock; i++) {
|
|
||||||
let blockData = await provider.getBlock(i);
|
|
||||||
console.log("startBlock:",startBlock, "endBlock:", endBlock, "curBlock", i, "blobGasUsed", blockData.blobGasUsed);
|
|
||||||
if (blockData.blobGasUsed == 0) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
for (let txIndex = 0; txIndex<= blockData.transactions.length - 1; txIndex++) {
|
|
||||||
let txHash = blockData.transactions[txIndex]
|
|
||||||
let txData = await provider.getTransaction(txHash);
|
|
||||||
if (txData.type == 3) {
|
|
||||||
console.log("BlobTx in block:",i, " txIndex:", txIndex, " txHash:", txHash);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
main().then(() => process.exit(0))
|
|
||||||
.catch((error) => {
|
|
||||||
console.error(error);
|
|
||||||
process.exit(1);
|
|
||||||
});
|
|
@ -1,49 +0,0 @@
|
|||||||
import { ethers } from "ethers";
|
|
||||||
import program from "commander";
|
|
||||||
|
|
||||||
// Usage:
|
|
||||||
// node faucet_request.js --rpc localhost:8545 --startNum 39539137
|
|
||||||
// node faucet_request.js --rpc localhost:8545 --startNum 39539137 --endNum 40345994
|
|
||||||
|
|
||||||
// node faucet_request.js --rpc https://data-seed-prebsc-1-s1.bnbchain.org:8545 --startNum 39539137 --endNum 40345994
|
|
||||||
program.option("--rpc <Rpc>", "Rpc Server URL");
|
|
||||||
program.option("--startNum <Num>", "start block", 0);
|
|
||||||
program.option("--endNum <Num>", "end block", 0);
|
|
||||||
program.parse(process.argv);
|
|
||||||
|
|
||||||
const provider = new ethers.JsonRpcProvider(program.rpc);
|
|
||||||
const main = async () => {
|
|
||||||
var startBlock = parseInt(program.startNum)
|
|
||||||
var endBlock = parseInt(program.endNum)
|
|
||||||
if (isNaN(endBlock) || isNaN(startBlock) || startBlock == 0) {
|
|
||||||
console.error("invalid input, --startNum", program.startNum, "--end", program.endNum)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// if --endNum is not specified, set it to the latest block number.
|
|
||||||
if (endBlock == 0) {
|
|
||||||
endBlock = await provider.getBlockNumber();
|
|
||||||
}
|
|
||||||
if (startBlock > endBlock) {
|
|
||||||
console.error("invalid input, startBlock:",startBlock, " endBlock:", endBlock);
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
let startBalance = await provider.getBalance("0xaa25Aa7a19f9c426E07dee59b12f944f4d9f1DD3", startBlock)
|
|
||||||
let endBalance = await provider.getBalance("0xaa25Aa7a19f9c426E07dee59b12f944f4d9f1DD3", endBlock)
|
|
||||||
const faucetAmount = BigInt(0.3 * 10**18); // Convert 0.3 ether to wei as a BigInt
|
|
||||||
const numFaucetRequest = (startBalance - endBalance) / faucetAmount;
|
|
||||||
|
|
||||||
// Convert BigInt to ether
|
|
||||||
const startBalanceEth = Number(startBalance) / 10**18;
|
|
||||||
const endBalanceEth = Number(endBalance) / 10**18;
|
|
||||||
|
|
||||||
console.log(`Start Balance: ${startBalanceEth} ETH`);
|
|
||||||
console.log(`End Balance: ${endBalanceEth} ETH`);
|
|
||||||
|
|
||||||
console.log("successful faucet request: ",numFaucetRequest);
|
|
||||||
};
|
|
||||||
main().then(() => process.exit(0))
|
|
||||||
.catch((error) => {
|
|
||||||
console.error(error);
|
|
||||||
process.exit(1);
|
|
||||||
});
|
|
@ -1,70 +0,0 @@
|
|||||||
import { ethers } from "ethers";
|
|
||||||
import program from "commander";
|
|
||||||
|
|
||||||
program.option("--rpc <rpc>", "Rpc");
|
|
||||||
program.option("--startNum <startNum>", "start num")
|
|
||||||
program.option("--endNum <endNum>", "end num")
|
|
||||||
program.parse(process.argv);
|
|
||||||
|
|
||||||
const provider = new ethers.JsonRpcProvider(program.rpc)
|
|
||||||
|
|
||||||
const main = async () => {
|
|
||||||
let txCountTotal = 0;
|
|
||||||
let gasUsedTotal = 0;
|
|
||||||
let inturnBlocks = 0;
|
|
||||||
let justifiedBlocks = 0;
|
|
||||||
let turnLength = await provider.send("parlia_getTurnLength", [
|
|
||||||
ethers.toQuantity(program.startNum)]);
|
|
||||||
for (let i = program.startNum; i < program.endNum; i++) {
|
|
||||||
let txCount = await provider.send("eth_getBlockTransactionCountByNumber", [
|
|
||||||
ethers.toQuantity(i)]);
|
|
||||||
txCountTotal += ethers.toNumber(txCount)
|
|
||||||
|
|
||||||
let header = await provider.send("eth_getHeaderByNumber", [
|
|
||||||
ethers.toQuantity(i)]);
|
|
||||||
let gasUsed = eval(eval(header.gasUsed).toString(10))
|
|
||||||
gasUsedTotal += gasUsed
|
|
||||||
let difficulty = eval(eval(header.difficulty).toString(10))
|
|
||||||
if (difficulty == 2) {
|
|
||||||
inturnBlocks += 1
|
|
||||||
}
|
|
||||||
let timestamp = eval(eval(header.timestamp).toString(10))
|
|
||||||
|
|
||||||
let justifiedNumber = await provider.send("parlia_getJustifiedNumber", [
|
|
||||||
ethers.toQuantity(i)]);
|
|
||||||
if (justifiedNumber + 1 == i) {
|
|
||||||
justifiedBlocks += 1
|
|
||||||
} else {
|
|
||||||
console.log("justified unexpected", "BlockNumber =", i,"justifiedNumber",justifiedNumber)
|
|
||||||
}
|
|
||||||
console.log("BlockNumber =", i, "mod =", i%turnLength, "miner =", header.miner , "difficulty =", difficulty, "txCount =", ethers.toNumber(txCount), "gasUsed", gasUsed, "timestamp", timestamp)
|
|
||||||
}
|
|
||||||
|
|
||||||
let blockCount = program.endNum - program.startNum
|
|
||||||
let txCountPerBlock = txCountTotal/blockCount
|
|
||||||
|
|
||||||
let startHeader = await provider.send("eth_getHeaderByNumber", [
|
|
||||||
ethers.toQuantity(program.startNum)]);
|
|
||||||
let startTime = eval(eval(startHeader.timestamp).toString(10))
|
|
||||||
let endHeader = await provider.send("eth_getHeaderByNumber", [
|
|
||||||
ethers.toQuantity(program.endNum)]);
|
|
||||||
let endTime = eval(eval(endHeader.timestamp).toString(10))
|
|
||||||
let timeCost = endTime - startTime
|
|
||||||
let avgBlockTime = timeCost/blockCount
|
|
||||||
let inturnBlocksRatio = inturnBlocks/blockCount
|
|
||||||
let justifiedBlocksRatio = justifiedBlocks/blockCount
|
|
||||||
let tps = txCountTotal/timeCost
|
|
||||||
let M = 1000000
|
|
||||||
let avgGasUsedPerBlock = gasUsedTotal/blockCount/M
|
|
||||||
let avgGasUsedPerSecond = gasUsedTotal/timeCost/M
|
|
||||||
|
|
||||||
console.log("Get the performance between [", program.startNum, ",", program.endNum, ")");
|
|
||||||
console.log("txCountPerBlock =", txCountPerBlock, "txCountTotal =", txCountTotal, "BlockCount =", blockCount, "avgBlockTime =", avgBlockTime, "inturnBlocksRatio =", inturnBlocksRatio, "justifiedBlocksRatio =", justifiedBlocksRatio);
|
|
||||||
console.log("txCountPerSecond =", tps, "avgGasUsedPerBlock =", avgGasUsedPerBlock, "avgGasUsedPerSecond =", avgGasUsedPerSecond);
|
|
||||||
};
|
|
||||||
|
|
||||||
main().then(() => process.exit(0))
|
|
||||||
.catch((error) => {
|
|
||||||
console.error(error);
|
|
||||||
process.exit(1);
|
|
||||||
});
|
|
@ -1,164 +0,0 @@
|
|||||||
import { ethers } from "ethers";
|
|
||||||
import program from "commander";
|
|
||||||
|
|
||||||
// Global Options:
|
|
||||||
program.option("--rpc <rpc>", "Rpc");
|
|
||||||
// GetTxCount Options:
|
|
||||||
program.option("--startNum <startNum>", "start num")
|
|
||||||
program.option("--endNum <endNum>", "end num")
|
|
||||||
program.option("--miner <miner>", "miner", "")
|
|
||||||
// GetVersion Options:
|
|
||||||
program.option("--num <Num>", "validator num", 21)
|
|
||||||
// GetTopAddr Options:
|
|
||||||
program.option("--topNum <Num>", "top num of address to be displayed", 20)
|
|
||||||
|
|
||||||
program.parse(process.argv);
|
|
||||||
|
|
||||||
const provider = new ethers.JsonRpcProvider(program.rpc)
|
|
||||||
|
|
||||||
function printUsage() {
|
|
||||||
console.log("Usage:");
|
|
||||||
console.log(" node getchainstatus.js --help");
|
|
||||||
console.log(" node getchainstatus.js [subcommand] [options]");
|
|
||||||
console.log("\nSubcommands:");
|
|
||||||
console.log(" GetTxCount: find the block with max tx size of a range");
|
|
||||||
console.log(" GetVersion: dump validators' binary version, based on Header.Extra");
|
|
||||||
console.log(" GetTopAddr: get hottest $topNum target address within a block range");
|
|
||||||
console.log("\nOptions:");
|
|
||||||
console.log(" --rpc specify the url of RPC endpoint");
|
|
||||||
console.log(" --startNum the start block number, for command GetTxCount");
|
|
||||||
console.log(" --endNum the end block number, for command GetTxCount");
|
|
||||||
console.log(" --miner the miner address, for command GetTxCount");
|
|
||||||
console.log(" --num the number of blocks to be checked, for command GetVersion");
|
|
||||||
console.log(" --topNum the topNum of blocks to be checked, for command GetVersion");
|
|
||||||
console.log("\nExample:");
|
|
||||||
// mainnet https://bsc-mainnet.nodereal.io/v1/454e504917db4f82b756bd0cf6317dce
|
|
||||||
console.log(" node getchainstatus.js GetTxCount --rpc https://bsc-testnet-dataseed.bnbchain.org --startNum 40000001 --endNum 40000005")
|
|
||||||
console.log(" node getchainstatus.js GetVersion --rpc https://bsc-testnet-dataseed.bnbchain.org --num 21")
|
|
||||||
console.log(" node getchainstatus.js GetTopAddr --rpc https://bsc-testnet-dataseed.bnbchain.org --startNum 40000001 --endNum 40000010 --topNum 10")
|
|
||||||
}
|
|
||||||
|
|
||||||
// 1.cmd: "GetTxCount", usage:
|
|
||||||
// node getchainstatus.js GetTxCount --rpc https://bsc-testnet-dataseed.bnbchain.org \
|
|
||||||
// --startNum 40000001 --endNum 40000005 \
|
|
||||||
// --miner(optional): specified: find the max txCounter from the specified validator,
|
|
||||||
// not specified: find the max txCounter from all validators
|
|
||||||
async function getTxCount() {
|
|
||||||
let txCount = 0;
|
|
||||||
let num = 0;
|
|
||||||
console.log("Find the max txs count between", program.startNum, "and", program.endNum);
|
|
||||||
for (let i = program.startNum; i < program.endNum; i++) {
|
|
||||||
if (program.miner !== "") {
|
|
||||||
let blockData = await provider.getBlock(Number(i))
|
|
||||||
if (program.miner !== blockData.miner) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
let x = await provider.send("eth_getBlockTransactionCountByNumber", [
|
|
||||||
ethers.toQuantity(i)]);
|
|
||||||
let a = ethers.toNumber(x)
|
|
||||||
if (a > txCount) {
|
|
||||||
num = i;
|
|
||||||
txCount = a;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
console.log("BlockNum = ", num, "TxCount =", txCount);
|
|
||||||
}
|
|
||||||
|
|
||||||
// 2.cmd: "GetVersion", usage:
|
|
||||||
// node getchainstatus.js GetVersion \
|
|
||||||
// --rpc https://bsc-testnet-dataseed.bnbchain.org \
|
|
||||||
// --num(optional): defualt 21, the number of blocks that will be checked
|
|
||||||
async function getBinaryVersion() {
|
|
||||||
const blockNum = await provider.getBlockNumber();
|
|
||||||
console.log(blockNum);
|
|
||||||
for (let i = 0; i < program.num; i++) {
|
|
||||||
let blockData = await provider.getBlock(blockNum - i);
|
|
||||||
// 1.get Geth client version
|
|
||||||
let major = ethers.toNumber(ethers.dataSlice(blockData.extraData, 2, 3))
|
|
||||||
let minor = ethers.toNumber(ethers.dataSlice(blockData.extraData, 3, 4))
|
|
||||||
let patch = ethers.toNumber(ethers.dataSlice(blockData.extraData, 4, 5))
|
|
||||||
|
|
||||||
// 2.get minimum txGasPrice based on the last non-zero-gasprice transaction
|
|
||||||
let lastGasPrice = 0
|
|
||||||
for (let txIndex = blockData.transactions.length - 1; txIndex >= 0; txIndex--) {
|
|
||||||
let txHash = blockData.transactions[txIndex]
|
|
||||||
let txData = await provider.getTransaction(txHash);
|
|
||||||
if (txData.gasPrice == 0) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
lastGasPrice = txData.gasPrice
|
|
||||||
break
|
|
||||||
}
|
|
||||||
console.log(blockData.miner, "version =", major + "." + minor + "." + patch, " MinGasPrice = " + lastGasPrice)
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// 3.cmd: "GetTopAddr", usage:
|
|
||||||
// node getchainstatus.js GetTopAddr \
|
|
||||||
// --rpc https://bsc-testnet-dataseed.bnbchain.org \
|
|
||||||
// --startNum 40000001 --endNum 40000005 \
|
|
||||||
// --topNum(optional): the top num of address to be displayed, default 20
|
|
||||||
function getTopKElements(map, k) {
|
|
||||||
let entries = Array.from(map.entries());
|
|
||||||
entries.sort((a, b) => b[1] - a[1]);
|
|
||||||
return entries.slice(0, k);
|
|
||||||
}
|
|
||||||
|
|
||||||
async function getTopAddr() {
|
|
||||||
let countMap = new Map();
|
|
||||||
let totalTxs = 0
|
|
||||||
console.log("Find the top target address, between", program.startNum, "and", program.endNum);
|
|
||||||
for (let i = program.startNum; i <= program.endNum; i++) {
|
|
||||||
let blockData = await provider.getBlock(Number(i), true)
|
|
||||||
totalTxs += blockData.transactions.length
|
|
||||||
for (let txIndex = blockData.transactions.length - 1; txIndex >= 0; txIndex--) {
|
|
||||||
let txData = await blockData.getTransaction(txIndex)
|
|
||||||
if (txData.to == null) {
|
|
||||||
console.log("Contract creation,txHash:", txData.hash)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
let toAddr = txData.to;
|
|
||||||
if (countMap.has(toAddr)) {
|
|
||||||
countMap.set(toAddr, countMap.get(toAddr) + 1);
|
|
||||||
} else {
|
|
||||||
countMap.set(toAddr, 1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
console.log("progress:", (program.endNum-i), "blocks left", "totalTxs", totalTxs)
|
|
||||||
}
|
|
||||||
let tops = getTopKElements(countMap, program.topNum)
|
|
||||||
tops.forEach((value, key) => {
|
|
||||||
// value: [ '0x40661F989826CC641Ce1601526Bb16a4221412c8', 71 ]
|
|
||||||
console.log(key+":", value[0], " ", value[1], " ", ((value[1]*100)/totalTxs).toFixed(2)+"%");
|
|
||||||
});
|
|
||||||
};
|
|
||||||
|
|
||||||
const main = async () => {
|
|
||||||
if (process.argv.length <= 2) {
|
|
||||||
console.error('invalid process.argv.length', process.argv.length);
|
|
||||||
printUsage()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
const cmd = process.argv[2]
|
|
||||||
if (cmd === "--help") {
|
|
||||||
printUsage()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if (cmd === "GetTxCount") {
|
|
||||||
await getTxCount()
|
|
||||||
} else if (cmd === "GetVersion") {
|
|
||||||
await getBinaryVersion()
|
|
||||||
} else if (cmd === "GetTopAddr") {
|
|
||||||
await getTopAddr()
|
|
||||||
} else {
|
|
||||||
console.log("unsupported cmd", cmd);
|
|
||||||
printUsage()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
main().then(() => process.exit(0))
|
|
||||||
.catch((error) => {
|
|
||||||
console.error(error);
|
|
||||||
process.exit(1);
|
|
||||||
});
|
|
@ -1,119 +0,0 @@
|
|||||||
import { ethers } from "ethers";
|
|
||||||
import program from "commander";
|
|
||||||
|
|
||||||
program.option("--Rpc <Rpc>", "Rpc");
|
|
||||||
program.option("--Num <Num>", "num", 0)
|
|
||||||
program.parse(process.argv);
|
|
||||||
|
|
||||||
const provider = new ethers.JsonRpcProvider(program.Rpc);
|
|
||||||
|
|
||||||
const slashAbi = [
|
|
||||||
"function getSlashIndicator(address validatorAddr) external view returns (uint256, uint256)"
|
|
||||||
]
|
|
||||||
const validatorSetAbi = [
|
|
||||||
"function getLivingValidators() external view returns (address[], bytes[])"
|
|
||||||
]
|
|
||||||
const stakeHubAbi = [
|
|
||||||
"function getValidatorDescription(address validatorAddr) external view returns (tuple(string, string, string, string))",
|
|
||||||
"function consensusToOperator(address consensusAddr) public view returns (address)"
|
|
||||||
]
|
|
||||||
const addrValidatorSet = '0x0000000000000000000000000000000000001000';
|
|
||||||
const validatorSet = new ethers.Contract(addrValidatorSet, validatorSetAbi, provider);
|
|
||||||
|
|
||||||
const addrSlash = '0x0000000000000000000000000000000000001001';
|
|
||||||
const slashIndicator = new ethers.Contract(addrSlash, slashAbi, provider)
|
|
||||||
|
|
||||||
const addrStakeHub = '0x0000000000000000000000000000000000002002';
|
|
||||||
const stakeHub = new ethers.Contract(addrStakeHub, stakeHubAbi, provider)
|
|
||||||
|
|
||||||
const validatorMap = new Map([
|
|
||||||
//BSC
|
|
||||||
["0x37e9627A91DD13e453246856D58797Ad6583D762", "LegendII"],
|
|
||||||
["0xB4647b856CB9C3856d559C885Bed8B43e0846a47", "CertiK"],
|
|
||||||
["0x75B851a27D7101438F45fce31816501193239A83", "Figment"],
|
|
||||||
["0x502aECFE253E6AA0e8D2A06E12438FFeD0Fe16a0", "BscScan"],
|
|
||||||
["0xCa503a7eD99eca485da2E875aedf7758472c378C", "InfStones"],
|
|
||||||
["0x5009317FD4F6F8FeEa9dAe41E5F0a4737BB7A7D5", "NodeReal"],
|
|
||||||
["0x1cFDBd2dFf70C6e2e30df5012726F87731F38164", "Tranchess"],
|
|
||||||
["0xF8de5e61322302b2c6e0a525cC842F10332811bf", "Namelix"],
|
|
||||||
["0xCcB42A9b8d6C46468900527Bc741938E78AB4577", "Turing"],
|
|
||||||
["0x9f1b7FAE54BE07F4FEE34Eb1aaCb39A1F7B6FC92", "TWStaking"],
|
|
||||||
["0x7E1FdF03Eb3aC35BF0256694D7fBe6B6d7b3E0c8","LegendIII"],
|
|
||||||
["0x7b501c7944185130DD4aD73293e8Aa84eFfDcee7","MathW"],
|
|
||||||
["0x58567F7A51a58708C8B40ec592A38bA64C0697De","Legend"],
|
|
||||||
["0x460A252B4fEEFA821d3351731220627D7B7d1F3d","Defibit"],
|
|
||||||
["0x8A239732871AdC8829EA2f47e94087C5FBad47b6","The48Club"],
|
|
||||||
["0xD3b0d838cCCEAe7ebF1781D11D1bB741DB7Fe1A7","BNBEve"],
|
|
||||||
["0xF8B99643fAfC79d9404DE68E48C4D49a3936f787","Avengers"],
|
|
||||||
["0x4e5acf9684652BEa56F2f01b7101a225Ee33d23f","HashKey"],
|
|
||||||
["0x9bb56C2B4DBE5a06d79911C9899B6f817696ACFc","Feynman"],
|
|
||||||
["0xbdcc079BBb23C1D9a6F36AA31309676C258aBAC7","Fuji"],
|
|
||||||
["0x38944092685a336CB6B9ea58836436709a2adC89","Shannon"],
|
|
||||||
["0xfC1004C0f296Ec3Df4F6762E9EabfcF20EB304a2","Aoraki"],
|
|
||||||
["0xa0884bb00E5F23fE2427f0E5eC9E51F812848563","Coda"],
|
|
||||||
["0xe7776De78740f28a96412eE5cbbB8f90896b11A5","Ankr"],
|
|
||||||
["0xA2D969E82524001Cb6a2357dBF5922B04aD2FCD8","Pexmons"],
|
|
||||||
["0x5cf810AB8C718ac065b45f892A5BAdAB2B2946B9","Zen"],
|
|
||||||
["0x4d15D9BCd0c2f33E7510c0de8b42697CA558234a","LegendVII"],
|
|
||||||
["0x1579ca96EBd49A0B173f86C372436ab1AD393380","LegendV"],
|
|
||||||
["0xd1F72d433f362922f6565FC77c25e095B29141c8","LegendVI"],
|
|
||||||
["0xf9814D93b4d904AaA855cBD4266D6Eb0Ec1Aa478","Legend8"],
|
|
||||||
["0x025a4e09Ea947b8d695f53ddFDD48ddB8F9B06b7","Ciscox"],
|
|
||||||
["0xE9436F6F30b4B01b57F2780B2898f3820EbD7B98","LegendIV"],
|
|
||||||
["0xC2d534F079444E6E7Ff9DabB3FD8a26c607932c8","Axion"],
|
|
||||||
["0x9F7110Ba7EdFda83Fc71BeA6BA3c0591117b440D","LegendIX"],
|
|
||||||
["0xB997Bf1E3b96919fBA592c1F61CE507E165Ec030","Seoraksan"],
|
|
||||||
["0x286C1b674d48cFF67b4096b6c1dc22e769581E91","Sigm8"],
|
|
||||||
["0x73A26778ef9509a6E94b55310eE7233795a9EB25","Coinlix"],
|
|
||||||
["0x18c44f4FBEde9826C7f257d500A65a3D5A8edebc","Nozti"],
|
|
||||||
["0xA100FCd08cE722Dc68Ddc3b54237070Cb186f118","Tiollo"],
|
|
||||||
["0x0F28847cfdbf7508B13Ebb9cEb94B2f1B32E9503","Raptas"],
|
|
||||||
["0xfD85346c8C991baC16b9c9157e6bdfDACE1cD7d7","Glorin"],
|
|
||||||
["0x978F05CED39A4EaFa6E8FD045Fe2dd6Da836c7DF","NovaX"],
|
|
||||||
["0xd849d1dF66bFF1c2739B4399425755C2E0fAbbAb","Nexa"],
|
|
||||||
["0xA015d9e9206859c13201BB3D6B324d6634276534","Star"],
|
|
||||||
["0x5ADde0151BfAB27f329e5112c1AeDeed7f0D3692","Veri"],
|
|
||||||
//Chapel
|
|
||||||
["0x08265dA01E1A65d62b903c7B34c08cB389bF3D99","Ararat"],
|
|
||||||
["0x7f5f2cF1aec83bF0c74DF566a41aa7ed65EA84Ea","Kita"],
|
|
||||||
["0x53387F3321FD69d1E030BB921230dFb188826AFF","Fuji"],
|
|
||||||
["0x76D76ee8823dE52A1A431884c2ca930C5e72bff3","Seoraksan"],
|
|
||||||
["0xd447b49CD040D20BC21e49ffEa6487F5638e4346","Everest"],
|
|
||||||
["0x1a3d9D7A717D64e6088aC937d5aAcDD3E20ca963","Elbrus"],
|
|
||||||
["0x40D3256EB0BaBE89f0ea54EDAa398513136612f5","Bloxroute"],
|
|
||||||
["0xF9a1Db0d6f22Bd78ffAECCbc8F47c83Df9FBdbCf","Test"]
|
|
||||||
]);
|
|
||||||
|
|
||||||
|
|
||||||
const main = async () => {
|
|
||||||
let blockNum = ethers.getNumber(program.Num)
|
|
||||||
if (blockNum === 0) {
|
|
||||||
blockNum = await provider.getBlockNumber()
|
|
||||||
}
|
|
||||||
let block = await provider.getBlock(blockNum)
|
|
||||||
console.log("At block", blockNum, "time", block.date)
|
|
||||||
const data = await validatorSet.getLivingValidators({blockTag:blockNum})
|
|
||||||
let totalSlash = 0
|
|
||||||
for (let i = 0; i < data[0].length; i++) {
|
|
||||||
let addr = data[0][i];
|
|
||||||
var val
|
|
||||||
if (!validatorMap.has(addr)) {
|
|
||||||
let opAddr = await stakeHub.consensusToOperator(addr, {blockTag:blockNum})
|
|
||||||
let value = await stakeHub.getValidatorDescription(opAddr, {blockTag:blockNum})
|
|
||||||
val = value[0]
|
|
||||||
console.log(addr, val)
|
|
||||||
} else {
|
|
||||||
val = validatorMap.get(addr)
|
|
||||||
}
|
|
||||||
let info = await slashIndicator.getSlashIndicator(addr, {blockTag:blockNum})
|
|
||||||
let count = ethers.toNumber(info[1])
|
|
||||||
totalSlash += count
|
|
||||||
console.log("Slash:", count, addr, val)
|
|
||||||
}
|
|
||||||
console.log("Total slash count", totalSlash)
|
|
||||||
};
|
|
||||||
main().then(() => process.exit(0))
|
|
||||||
.catch((error) => {
|
|
||||||
console.error(error);
|
|
||||||
process.exit(1);
|
|
||||||
});
|
|
41
cmd/jsutils/gettxcount.js
Normal file
41
cmd/jsutils/gettxcount.js
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
import { ethers } from "ethers";
|
||||||
|
import program from "commander";
|
||||||
|
|
||||||
|
program.option("--rpc <rpc>", "Rpc");
|
||||||
|
program.option("--startNum <startNum>", "start num")
|
||||||
|
program.option("--endNum <endNum>", "end num")
|
||||||
|
// --miner:
|
||||||
|
// specified: find the max txCounter from the specified validator
|
||||||
|
// not specified: find the max txCounter from all validators
|
||||||
|
program.option("--miner <miner>", "miner", "")
|
||||||
|
program.parse(process.argv);
|
||||||
|
|
||||||
|
const provider = new ethers.JsonRpcProvider(program.rpc)
|
||||||
|
|
||||||
|
const main = async () => {
|
||||||
|
let txCount = 0;
|
||||||
|
let num = 0;
|
||||||
|
console.log("Find the max txs count between", program.startNum, "and", program.endNum);
|
||||||
|
for (let i = program.startNum; i < program.endNum; i++) {
|
||||||
|
if (program.miner !== "") {
|
||||||
|
let blockData = await provider.getBlock(Number(i))
|
||||||
|
if (program.miner !== blockData.miner) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
let x = await provider.send("eth_getBlockTransactionCountByNumber", [
|
||||||
|
ethers.toQuantity(i)]);
|
||||||
|
let a = ethers.toNumber(x)
|
||||||
|
if (a > txCount) {
|
||||||
|
num = i;
|
||||||
|
txCount = a;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
console.log("BlockNum = ", num, "TxCount =", txCount);
|
||||||
|
};
|
||||||
|
|
||||||
|
main().then(() => process.exit(0))
|
||||||
|
.catch((error) => {
|
||||||
|
console.error(error);
|
||||||
|
process.exit(1);
|
||||||
|
});
|
38
cmd/jsutils/getvalidatorversion.js
Normal file
38
cmd/jsutils/getvalidatorversion.js
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
import { ethers } from "ethers";
|
||||||
|
import program from "commander";
|
||||||
|
|
||||||
|
program.option("--Rpc <Rpc>", "Rpc");
|
||||||
|
program.option("--Num <Num>", "validator num", 21)
|
||||||
|
program.parse(process.argv);
|
||||||
|
|
||||||
|
const provider = new ethers.JsonRpcProvider(program.Rpc);
|
||||||
|
|
||||||
|
const main = async () => {
|
||||||
|
const blockNum = await provider.getBlockNumber();
|
||||||
|
console.log(blockNum);
|
||||||
|
for (let i = 0; i < program.Num; i++) {
|
||||||
|
let blockData = await provider.getBlock(blockNum - i);
|
||||||
|
// 1.get Geth client version
|
||||||
|
let major = ethers.toNumber(ethers.dataSlice(blockData.extraData, 2, 3))
|
||||||
|
let minor = ethers.toNumber(ethers.dataSlice(blockData.extraData, 3, 4))
|
||||||
|
let patch = ethers.toNumber(ethers.dataSlice(blockData.extraData, 4, 5))
|
||||||
|
|
||||||
|
// 2.get minimum txGasPrice based on the last non-zero-gasprice transaction
|
||||||
|
let lastGasPrice = 0
|
||||||
|
for (let txIndex = blockData.transactions.length - 1; txIndex >= 0; txIndex--) {
|
||||||
|
let txHash = blockData.transactions[txIndex]
|
||||||
|
let txData = await provider.getTransaction(txHash);
|
||||||
|
if (txData.gasPrice == 0) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
lastGasPrice = txData.gasPrice
|
||||||
|
break
|
||||||
|
}
|
||||||
|
console.log(blockData.miner, "version =", major + "." + minor + "." + patch, " MinGasPrice = " + lastGasPrice)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
main().then(() => process.exit(0))
|
||||||
|
.catch((error) => {
|
||||||
|
console.error(error);
|
||||||
|
process.exit(1);
|
||||||
|
});
|
@ -35,11 +35,8 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/internal/version"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/accounts"
|
"github.com/ethereum/go-ethereum/accounts"
|
||||||
"github.com/ethereum/go-ethereum/accounts/keystore"
|
"github.com/ethereum/go-ethereum/accounts/keystore"
|
||||||
"github.com/ethereum/go-ethereum/beacon/fakebeacon"
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/common/fdlimit"
|
"github.com/ethereum/go-ethereum/common/fdlimit"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
@ -308,14 +305,9 @@ var (
|
|||||||
Usage: "Manually specify the Rialto Genesis Hash, to trigger builtin network logic",
|
Usage: "Manually specify the Rialto Genesis Hash, to trigger builtin network logic",
|
||||||
Category: flags.EthCategory,
|
Category: flags.EthCategory,
|
||||||
}
|
}
|
||||||
OverridePassedForkTime = &cli.Uint64Flag{
|
OverrideCancun = &cli.Uint64Flag{
|
||||||
Name: "override.passedforktime",
|
Name: "override.cancun",
|
||||||
Usage: "Manually specify the hard fork timestamp except the last one, overriding the bundled setting",
|
Usage: "Manually specify the Cancun fork timestamp, overriding the bundled setting",
|
||||||
Category: flags.EthCategory,
|
|
||||||
}
|
|
||||||
OverrideBohr = &cli.Uint64Flag{
|
|
||||||
Name: "override.bohr",
|
|
||||||
Usage: "Manually specify the Bohr fork timestamp, overriding the bundled setting",
|
|
||||||
Category: flags.EthCategory,
|
Category: flags.EthCategory,
|
||||||
}
|
}
|
||||||
OverrideVerkle = &cli.Uint64Flag{
|
OverrideVerkle = &cli.Uint64Flag{
|
||||||
@ -347,12 +339,6 @@ var (
|
|||||||
Value: params.BreatheBlockInterval,
|
Value: params.BreatheBlockInterval,
|
||||||
Category: flags.EthCategory,
|
Category: flags.EthCategory,
|
||||||
}
|
}
|
||||||
OverrideFixedTurnLength = &cli.Uint64Flag{
|
|
||||||
Name: "override.fixedturnlength",
|
|
||||||
Usage: "It use fixed or random values for turn length instead of reading from the contract, only for testing purpose",
|
|
||||||
Value: params.FixedTurnLength,
|
|
||||||
Category: flags.EthCategory,
|
|
||||||
}
|
|
||||||
SyncModeFlag = &flags.TextMarshalerFlag{
|
SyncModeFlag = &flags.TextMarshalerFlag{
|
||||||
Name: "syncmode",
|
Name: "syncmode",
|
||||||
Usage: `Blockchain sync mode ("snap" or "full")`,
|
Usage: `Blockchain sync mode ("snap" or "full")`,
|
||||||
@ -1091,7 +1077,6 @@ Please note that --` + MetricsHTTPFlag.Name + ` must be set to start the server.
|
|||||||
Name: "block-amount-reserved",
|
Name: "block-amount-reserved",
|
||||||
Usage: "Sets the expected remained amount of blocks for offline block prune",
|
Usage: "Sets the expected remained amount of blocks for offline block prune",
|
||||||
Category: flags.BlockHistoryCategory,
|
Category: flags.BlockHistoryCategory,
|
||||||
Value: params.FullImmutabilityThreshold,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
CheckSnapshotWithMPT = &cli.BoolFlag{
|
CheckSnapshotWithMPT = &cli.BoolFlag{
|
||||||
@ -1149,25 +1134,6 @@ Please note that --` + MetricsHTTPFlag.Name + ` must be set to start the server.
|
|||||||
Value: params.DefaultExtraReserveForBlobRequests,
|
Value: params.DefaultExtraReserveForBlobRequests,
|
||||||
Category: flags.MiscCategory,
|
Category: flags.MiscCategory,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fake beacon
|
|
||||||
FakeBeaconEnabledFlag = &cli.BoolFlag{
|
|
||||||
Name: "fake-beacon",
|
|
||||||
Usage: "Enable the HTTP-RPC server of fake-beacon",
|
|
||||||
Category: flags.APICategory,
|
|
||||||
}
|
|
||||||
FakeBeaconAddrFlag = &cli.StringFlag{
|
|
||||||
Name: "fake-beacon.addr",
|
|
||||||
Usage: "HTTP-RPC server listening addr of fake-beacon",
|
|
||||||
Value: fakebeacon.DefaultAddr,
|
|
||||||
Category: flags.APICategory,
|
|
||||||
}
|
|
||||||
FakeBeaconPortFlag = &cli.IntFlag{
|
|
||||||
Name: "fake-beacon.port",
|
|
||||||
Usage: "HTTP-RPC server listening port of fake-beacon",
|
|
||||||
Value: fakebeacon.DefaultPort,
|
|
||||||
Category: flags.APICategory,
|
|
||||||
}
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -1186,6 +1152,7 @@ var (
|
|||||||
DBEngineFlag,
|
DBEngineFlag,
|
||||||
StateSchemeFlag,
|
StateSchemeFlag,
|
||||||
HttpHeaderFlag,
|
HttpHeaderFlag,
|
||||||
|
MultiDataBaseFlag,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -2105,7 +2072,7 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
|
|||||||
}
|
}
|
||||||
cfg.Genesis = core.DefaultBSCGenesisBlock()
|
cfg.Genesis = core.DefaultBSCGenesisBlock()
|
||||||
SetDNSDiscoveryDefaults(cfg, params.BSCGenesisHash)
|
SetDNSDiscoveryDefaults(cfg, params.BSCGenesisHash)
|
||||||
case ctx.Bool(ChapelFlag.Name) || cfg.NetworkId == 97:
|
case ctx.Bool(ChapelFlag.Name):
|
||||||
if !ctx.IsSet(NetworkIdFlag.Name) {
|
if !ctx.IsSet(NetworkIdFlag.Name) {
|
||||||
cfg.NetworkId = 97
|
cfg.NetworkId = 97
|
||||||
}
|
}
|
||||||
@ -2319,67 +2286,6 @@ func EnableNodeInfo(poolConfig *legacypool.Config, nodeInfo *p2p.NodeInfo) Setup
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func EnableNodeTrack(ctx *cli.Context, cfg *ethconfig.Config, stack *node.Node) SetupMetricsOption {
|
|
||||||
nodeInfo := stack.Server().NodeInfo()
|
|
||||||
return func() {
|
|
||||||
// register node info into metrics
|
|
||||||
metrics.NewRegisteredLabel("node-stats", nil).Mark(map[string]interface{}{
|
|
||||||
"NodeType": parseNodeType(),
|
|
||||||
"ENR": nodeInfo.ENR,
|
|
||||||
"Mining": ctx.Bool(MiningEnabledFlag.Name),
|
|
||||||
"Etherbase": parseEtherbase(cfg),
|
|
||||||
"MiningFeatures": parseMiningFeatures(ctx, cfg),
|
|
||||||
"DBFeatures": parseDBFeatures(cfg, stack),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseEtherbase(cfg *ethconfig.Config) string {
|
|
||||||
if cfg.Miner.Etherbase == (common.Address{}) {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return cfg.Miner.Etherbase.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseNodeType() string {
|
|
||||||
git, _ := version.VCS()
|
|
||||||
version := []string{params.VersionWithMeta}
|
|
||||||
if len(git.Commit) >= 7 {
|
|
||||||
version = append(version, git.Commit[:7])
|
|
||||||
}
|
|
||||||
if git.Date != "" {
|
|
||||||
version = append(version, git.Date)
|
|
||||||
}
|
|
||||||
arch := []string{runtime.GOOS, runtime.GOARCH}
|
|
||||||
infos := []string{"BSC", strings.Join(version, "-"), strings.Join(arch, "-"), runtime.Version()}
|
|
||||||
return strings.Join(infos, "/")
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseDBFeatures(cfg *ethconfig.Config, stack *node.Node) string {
|
|
||||||
var features []string
|
|
||||||
if cfg.StateScheme == rawdb.PathScheme {
|
|
||||||
features = append(features, "PBSS")
|
|
||||||
}
|
|
||||||
if stack.CheckIfMultiDataBase() {
|
|
||||||
features = append(features, "MultiDB")
|
|
||||||
}
|
|
||||||
return strings.Join(features, "|")
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseMiningFeatures(ctx *cli.Context, cfg *ethconfig.Config) string {
|
|
||||||
if !ctx.Bool(MiningEnabledFlag.Name) {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
var features []string
|
|
||||||
if cfg.Miner.Mev.Enabled {
|
|
||||||
features = append(features, "MEV")
|
|
||||||
}
|
|
||||||
if cfg.Miner.VoteEnable {
|
|
||||||
features = append(features, "FFVoting")
|
|
||||||
}
|
|
||||||
return strings.Join(features, "|")
|
|
||||||
}
|
|
||||||
|
|
||||||
func SetupMetrics(ctx *cli.Context, options ...SetupMetricsOption) {
|
func SetupMetrics(ctx *cli.Context, options ...SetupMetricsOption) {
|
||||||
if metrics.Enabled {
|
if metrics.Enabled {
|
||||||
log.Info("Enabling metrics collection")
|
log.Info("Enabling metrics collection")
|
||||||
|
@ -83,7 +83,7 @@ func TestHistoryImportAndExport(t *testing.T) {
|
|||||||
t.Fatalf("unable to initialize chain: %v", err)
|
t.Fatalf("unable to initialize chain: %v", err)
|
||||||
}
|
}
|
||||||
if _, err := chain.InsertChain(blocks); err != nil {
|
if _, err := chain.InsertChain(blocks); err != nil {
|
||||||
t.Fatalf("error inserting chain: %v", err)
|
t.Fatalf("error insterting chain: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make temp directory for era files.
|
// Make temp directory for era files.
|
||||||
@ -163,7 +163,7 @@ func TestHistoryImportAndExport(t *testing.T) {
|
|||||||
|
|
||||||
// Now import Era.
|
// Now import Era.
|
||||||
freezer := t.TempDir()
|
freezer := t.TempDir()
|
||||||
db2, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), freezer, "", false, false, false, false, false)
|
db2, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), freezer, "", false, false, false, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
@ -59,9 +59,6 @@ type ChainHeaderReader interface {
|
|||||||
// GetHighestVerifiedHeader retrieves the highest header verified.
|
// GetHighestVerifiedHeader retrieves the highest header verified.
|
||||||
GetHighestVerifiedHeader() *types.Header
|
GetHighestVerifiedHeader() *types.Header
|
||||||
|
|
||||||
// GetVerifiedBlockByHash retrieves the highest verified block.
|
|
||||||
GetVerifiedBlockByHash(hash common.Hash) *types.Header
|
|
||||||
|
|
||||||
// ChasingHead return the best chain head of peers.
|
// ChasingHead return the best chain head of peers.
|
||||||
ChasingHead() *types.Header
|
ChasingHead() *types.Header
|
||||||
}
|
}
|
||||||
|
@ -2306,19 +2306,6 @@ const validatorSetABI = `
|
|||||||
],
|
],
|
||||||
"stateMutability": "view"
|
"stateMutability": "view"
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"inputs": [],
|
|
||||||
"name": "getTurnLength",
|
|
||||||
"outputs": [
|
|
||||||
{
|
|
||||||
"internalType": "uint256",
|
|
||||||
"name": "",
|
|
||||||
"type": "uint256"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"stateMutability": "view",
|
|
||||||
"type": "function"
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"type": "function",
|
"type": "function",
|
||||||
"name": "getValidators",
|
"name": "getValidators",
|
||||||
|
@ -31,7 +31,13 @@ type API struct {
|
|||||||
|
|
||||||
// GetSnapshot retrieves the state snapshot at a given block.
|
// GetSnapshot retrieves the state snapshot at a given block.
|
||||||
func (api *API) GetSnapshot(number *rpc.BlockNumber) (*Snapshot, error) {
|
func (api *API) GetSnapshot(number *rpc.BlockNumber) (*Snapshot, error) {
|
||||||
header := api.getHeader(number)
|
// Retrieve the requested block number (or current if none requested)
|
||||||
|
var header *types.Header
|
||||||
|
if number == nil || *number == rpc.LatestBlockNumber {
|
||||||
|
header = api.chain.CurrentHeader()
|
||||||
|
} else {
|
||||||
|
header = api.chain.GetHeaderByNumber(uint64(number.Int64()))
|
||||||
|
}
|
||||||
// Ensure we have an actually valid block and return its snapshot
|
// Ensure we have an actually valid block and return its snapshot
|
||||||
if header == nil {
|
if header == nil {
|
||||||
return nil, errUnknownBlock
|
return nil, errUnknownBlock
|
||||||
@ -50,7 +56,13 @@ func (api *API) GetSnapshotAtHash(hash common.Hash) (*Snapshot, error) {
|
|||||||
|
|
||||||
// GetValidators retrieves the list of validators at the specified block.
|
// GetValidators retrieves the list of validators at the specified block.
|
||||||
func (api *API) GetValidators(number *rpc.BlockNumber) ([]common.Address, error) {
|
func (api *API) GetValidators(number *rpc.BlockNumber) ([]common.Address, error) {
|
||||||
header := api.getHeader(number)
|
// Retrieve the requested block number (or current if none requested)
|
||||||
|
var header *types.Header
|
||||||
|
if number == nil || *number == rpc.LatestBlockNumber {
|
||||||
|
header = api.chain.CurrentHeader()
|
||||||
|
} else {
|
||||||
|
header = api.chain.GetHeaderByNumber(uint64(number.Int64()))
|
||||||
|
}
|
||||||
// Ensure we have an actually valid block and return the validators from its snapshot
|
// Ensure we have an actually valid block and return the validators from its snapshot
|
||||||
if header == nil {
|
if header == nil {
|
||||||
return nil, errUnknownBlock
|
return nil, errUnknownBlock
|
||||||
@ -74,65 +86,3 @@ func (api *API) GetValidatorsAtHash(hash common.Hash) ([]common.Address, error)
|
|||||||
}
|
}
|
||||||
return snap.validators(), nil
|
return snap.validators(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (api *API) GetJustifiedNumber(number *rpc.BlockNumber) (uint64, error) {
|
|
||||||
header := api.getHeader(number)
|
|
||||||
// Ensure we have an actually valid block and return the validators from its snapshot
|
|
||||||
if header == nil {
|
|
||||||
return 0, errUnknownBlock
|
|
||||||
}
|
|
||||||
snap, err := api.parlia.snapshot(api.chain, header.Number.Uint64(), header.Hash(), nil)
|
|
||||||
if err != nil || snap.Attestation == nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
return snap.Attestation.TargetNumber, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (api *API) GetTurnLength(number *rpc.BlockNumber) (uint8, error) {
|
|
||||||
header := api.getHeader(number)
|
|
||||||
// Ensure we have an actually valid block and return the validators from its snapshot
|
|
||||||
if header == nil {
|
|
||||||
return 0, errUnknownBlock
|
|
||||||
}
|
|
||||||
snap, err := api.parlia.snapshot(api.chain, header.Number.Uint64(), header.Hash(), nil)
|
|
||||||
if err != nil || snap.TurnLength == 0 {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
return snap.TurnLength, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (api *API) GetFinalizedNumber(number *rpc.BlockNumber) (uint64, error) {
|
|
||||||
header := api.getHeader(number)
|
|
||||||
// Ensure we have an actually valid block and return the validators from its snapshot
|
|
||||||
if header == nil {
|
|
||||||
return 0, errUnknownBlock
|
|
||||||
}
|
|
||||||
snap, err := api.parlia.snapshot(api.chain, header.Number.Uint64(), header.Hash(), nil)
|
|
||||||
if err != nil || snap.Attestation == nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
return snap.Attestation.SourceNumber, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (api *API) getHeader(number *rpc.BlockNumber) (header *types.Header) {
|
|
||||||
currentHeader := api.chain.CurrentHeader()
|
|
||||||
|
|
||||||
if number == nil || *number == rpc.LatestBlockNumber {
|
|
||||||
header = currentHeader // current if none requested
|
|
||||||
} else if *number == rpc.SafeBlockNumber {
|
|
||||||
justifiedNumber, _, err := api.parlia.GetJustifiedNumberAndHash(api.chain, []*types.Header{currentHeader})
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
header = api.chain.GetHeaderByNumber(justifiedNumber)
|
|
||||||
} else if *number == rpc.FinalizedBlockNumber {
|
|
||||||
header = api.parlia.GetFinalizedHeader(api.chain, currentHeader)
|
|
||||||
} else if *number == rpc.PendingBlockNumber {
|
|
||||||
return nil // no pending blocks on bsc
|
|
||||||
} else if *number == rpc.EarliestBlockNumber {
|
|
||||||
header = api.chain.GetHeaderByNumber(0)
|
|
||||||
} else {
|
|
||||||
header = api.chain.GetHeaderByNumber(uint64(number.Int64()))
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
@ -1,91 +0,0 @@
|
|||||||
package parlia
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"math/big"
|
|
||||||
mrand "math/rand"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
|
||||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
|
||||||
"github.com/ethereum/go-ethereum/common/math"
|
|
||||||
"github.com/ethereum/go-ethereum/consensus"
|
|
||||||
"github.com/ethereum/go-ethereum/core/systemcontracts"
|
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
|
||||||
"github.com/ethereum/go-ethereum/internal/ethapi"
|
|
||||||
"github.com/ethereum/go-ethereum/log"
|
|
||||||
"github.com/ethereum/go-ethereum/params"
|
|
||||||
"github.com/ethereum/go-ethereum/rpc"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (p *Parlia) getTurnLength(chain consensus.ChainHeaderReader, header *types.Header) (*uint8, error) {
|
|
||||||
parent := chain.GetHeaderByHash(header.ParentHash)
|
|
||||||
if parent == nil {
|
|
||||||
return nil, errors.New("parent not found")
|
|
||||||
}
|
|
||||||
|
|
||||||
var turnLength uint8
|
|
||||||
if p.chainConfig.IsBohr(parent.Number, parent.Time) {
|
|
||||||
turnLengthFromContract, err := p.getTurnLengthFromContract(parent)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if turnLengthFromContract == nil {
|
|
||||||
return nil, errors.New("unexpected error when getTurnLengthFromContract")
|
|
||||||
}
|
|
||||||
turnLength = uint8(turnLengthFromContract.Int64())
|
|
||||||
} else {
|
|
||||||
turnLength = defaultTurnLength
|
|
||||||
}
|
|
||||||
log.Debug("getTurnLength", "turnLength", turnLength)
|
|
||||||
|
|
||||||
return &turnLength, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Parlia) getTurnLengthFromContract(header *types.Header) (turnLength *big.Int, err error) {
|
|
||||||
// mock to get turnLength from the contract
|
|
||||||
if params.FixedTurnLength >= 1 && params.FixedTurnLength <= 9 {
|
|
||||||
if params.FixedTurnLength == 2 {
|
|
||||||
return p.getRandTurnLength(header)
|
|
||||||
}
|
|
||||||
return big.NewInt(int64(params.FixedTurnLength)), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
method := "getTurnLength"
|
|
||||||
toAddress := common.HexToAddress(systemcontracts.ValidatorContract)
|
|
||||||
gas := (hexutil.Uint64)(uint64(math.MaxUint64 / 2))
|
|
||||||
|
|
||||||
data, err := p.validatorSetABI.Pack(method)
|
|
||||||
if err != nil {
|
|
||||||
log.Error("Unable to pack tx for getTurnLength", "error", err)
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
msgData := (hexutil.Bytes)(data)
|
|
||||||
|
|
||||||
blockNr := rpc.BlockNumberOrHashWithHash(header.Hash(), false)
|
|
||||||
result, err := p.ethAPI.Call(ctx, ethapi.TransactionArgs{
|
|
||||||
Gas: &gas,
|
|
||||||
To: &toAddress,
|
|
||||||
Data: &msgData,
|
|
||||||
}, &blockNr, nil, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := p.validatorSetABI.UnpackIntoInterface(&turnLength, method, result); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return turnLength, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// getRandTurnLength returns a random valid value, used to test switching turn length
|
|
||||||
func (p *Parlia) getRandTurnLength(header *types.Header) (turnLength *big.Int, err error) {
|
|
||||||
turnLengths := [8]uint8{1, 3, 4, 5, 6, 7, 8, 9}
|
|
||||||
r := mrand.New(mrand.NewSource(int64(header.Time)))
|
|
||||||
lengthIndex := int(r.Int31n(int32(len(turnLengths))))
|
|
||||||
return big.NewInt(int64(turnLengths[lengthIndex])), nil
|
|
||||||
}
|
|
@ -6,7 +6,6 @@ import (
|
|||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
|
||||||
"math"
|
"math"
|
||||||
"math/big"
|
"math/big"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
@ -54,13 +53,11 @@ const (
|
|||||||
inMemoryHeaders = 86400 // Number of recent headers to keep in memory for double sign detection,
|
inMemoryHeaders = 86400 // Number of recent headers to keep in memory for double sign detection,
|
||||||
|
|
||||||
checkpointInterval = 1024 // Number of blocks after which to save the snapshot to the database
|
checkpointInterval = 1024 // Number of blocks after which to save the snapshot to the database
|
||||||
defaultEpochLength = uint64(200) // Default number of blocks of checkpoint to update validatorSet from contract
|
defaultEpochLength = uint64(100) // Default number of blocks of checkpoint to update validatorSet from contract
|
||||||
defaultTurnLength = uint8(1) // Default consecutive number of blocks a validator receives priority for block production
|
|
||||||
|
|
||||||
extraVanity = 32 // Fixed number of extra-data prefix bytes reserved for signer vanity
|
extraVanity = 32 // Fixed number of extra-data prefix bytes reserved for signer vanity
|
||||||
extraSeal = 65 // Fixed number of extra-data suffix bytes reserved for signer seal
|
extraSeal = 65 // Fixed number of extra-data suffix bytes reserved for signer seal
|
||||||
nextForkHashSize = 4 // Fixed number of extra-data suffix bytes reserved for nextForkHash.
|
nextForkHashSize = 4 // Fixed number of extra-data suffix bytes reserved for nextForkHash.
|
||||||
turnLengthSize = 1 // Fixed number of extra-data suffix bytes reserved for turnLength
|
|
||||||
|
|
||||||
validatorBytesLengthBeforeLuban = common.AddressLength
|
validatorBytesLengthBeforeLuban = common.AddressLength
|
||||||
validatorBytesLength = common.AddressLength + types.BLSPublicKeyLength
|
validatorBytesLength = common.AddressLength + types.BLSPublicKeyLength
|
||||||
@ -68,6 +65,7 @@ const (
|
|||||||
|
|
||||||
wiggleTime = uint64(1) // second, Random delay (per signer) to allow concurrent signers
|
wiggleTime = uint64(1) // second, Random delay (per signer) to allow concurrent signers
|
||||||
initialBackOffTime = uint64(1) // second
|
initialBackOffTime = uint64(1) // second
|
||||||
|
processBackOffTime = uint64(1) // second
|
||||||
|
|
||||||
systemRewardPercent = 4 // it means 1/2^4 = 1/16 percentage of gas fee incoming will be distributed to system
|
systemRewardPercent = 4 // it means 1/2^4 = 1/16 percentage of gas fee incoming will be distributed to system
|
||||||
|
|
||||||
@ -128,10 +126,6 @@ var (
|
|||||||
// invalid list of validators (i.e. non divisible by 20 bytes).
|
// invalid list of validators (i.e. non divisible by 20 bytes).
|
||||||
errInvalidSpanValidators = errors.New("invalid validator list on sprint end block")
|
errInvalidSpanValidators = errors.New("invalid validator list on sprint end block")
|
||||||
|
|
||||||
// errInvalidTurnLength is returned if a block contains an
|
|
||||||
// invalid length of turn (i.e. no data left after parsing validators).
|
|
||||||
errInvalidTurnLength = errors.New("invalid turnLength")
|
|
||||||
|
|
||||||
// errInvalidMixDigest is returned if a block's mix digest is non-zero.
|
// errInvalidMixDigest is returned if a block's mix digest is non-zero.
|
||||||
errInvalidMixDigest = errors.New("non-zero mix digest")
|
errInvalidMixDigest = errors.New("non-zero mix digest")
|
||||||
|
|
||||||
@ -142,10 +136,6 @@ var (
|
|||||||
// list of validators different than the one the local node calculated.
|
// list of validators different than the one the local node calculated.
|
||||||
errMismatchingEpochValidators = errors.New("mismatching validator list on epoch block")
|
errMismatchingEpochValidators = errors.New("mismatching validator list on epoch block")
|
||||||
|
|
||||||
// errMismatchingEpochTurnLength is returned if a sprint block contains a
|
|
||||||
// turn length different than the one the local node calculated.
|
|
||||||
errMismatchingEpochTurnLength = errors.New("mismatching turn length on epoch block")
|
|
||||||
|
|
||||||
// errInvalidDifficulty is returned if the difficulty of a block is missing.
|
// errInvalidDifficulty is returned if the difficulty of a block is missing.
|
||||||
errInvalidDifficulty = errors.New("invalid difficulty")
|
errInvalidDifficulty = errors.New("invalid difficulty")
|
||||||
|
|
||||||
@ -317,10 +307,6 @@ func New(
|
|||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *Parlia) Period() uint64 {
|
|
||||||
return p.config.Period
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Parlia) IsSystemTransaction(tx *types.Transaction, header *types.Header) (bool, error) {
|
func (p *Parlia) IsSystemTransaction(tx *types.Transaction, header *types.Header) (bool, error) {
|
||||||
// deploy a contract
|
// deploy a contract
|
||||||
if tx.To() == nil {
|
if tx.To() == nil {
|
||||||
@ -379,7 +365,6 @@ func (p *Parlia) VerifyHeaders(chain consensus.ChainHeaderReader, headers []*typ
|
|||||||
// On luban fork, we introduce vote attestation into the header's extra field, so extra format is different from before.
|
// On luban fork, we introduce vote attestation into the header's extra field, so extra format is different from before.
|
||||||
// Before luban fork: |---Extra Vanity---|---Validators Bytes (or Empty)---|---Extra Seal---|
|
// Before luban fork: |---Extra Vanity---|---Validators Bytes (or Empty)---|---Extra Seal---|
|
||||||
// After luban fork: |---Extra Vanity---|---Validators Number and Validators Bytes (or Empty)---|---Vote Attestation (or Empty)---|---Extra Seal---|
|
// After luban fork: |---Extra Vanity---|---Validators Number and Validators Bytes (or Empty)---|---Vote Attestation (or Empty)---|---Extra Seal---|
|
||||||
// After bohr fork: |---Extra Vanity---|---Validators Number and Validators Bytes (or Empty)---|---Turn Length (or Empty)---|---Vote Attestation (or Empty)---|---Extra Seal---|
|
|
||||||
func getValidatorBytesFromHeader(header *types.Header, chainConfig *params.ChainConfig, parliaConfig *params.ParliaConfig) []byte {
|
func getValidatorBytesFromHeader(header *types.Header, chainConfig *params.ChainConfig, parliaConfig *params.ParliaConfig) []byte {
|
||||||
if len(header.Extra) <= extraVanity+extraSeal {
|
if len(header.Extra) <= extraVanity+extraSeal {
|
||||||
return nil
|
return nil
|
||||||
@ -396,15 +381,11 @@ func getValidatorBytesFromHeader(header *types.Header, chainConfig *params.Chain
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
num := int(header.Extra[extraVanity])
|
num := int(header.Extra[extraVanity])
|
||||||
start := extraVanity + validatorNumberSize
|
if num == 0 || len(header.Extra) <= extraVanity+extraSeal+num*validatorBytesLength {
|
||||||
end := start + num*validatorBytesLength
|
|
||||||
extraMinLen := end + extraSeal
|
|
||||||
if chainConfig.IsBohr(header.Number, header.Time) {
|
|
||||||
extraMinLen += turnLengthSize
|
|
||||||
}
|
|
||||||
if num == 0 || len(header.Extra) < extraMinLen {
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
start := extraVanity + validatorNumberSize
|
||||||
|
end := start + num*validatorBytesLength
|
||||||
return header.Extra[start:end]
|
return header.Extra[start:end]
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -423,14 +404,11 @@ func getVoteAttestationFromHeader(header *types.Header, chainConfig *params.Chai
|
|||||||
attestationBytes = header.Extra[extraVanity : len(header.Extra)-extraSeal]
|
attestationBytes = header.Extra[extraVanity : len(header.Extra)-extraSeal]
|
||||||
} else {
|
} else {
|
||||||
num := int(header.Extra[extraVanity])
|
num := int(header.Extra[extraVanity])
|
||||||
start := extraVanity + validatorNumberSize + num*validatorBytesLength
|
if len(header.Extra) <= extraVanity+extraSeal+validatorNumberSize+num*validatorBytesLength {
|
||||||
if chainConfig.IsBohr(header.Number, header.Time) {
|
|
||||||
start += turnLengthSize
|
|
||||||
}
|
|
||||||
end := len(header.Extra) - extraSeal
|
|
||||||
if end <= start {
|
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
start := extraVanity + validatorNumberSize + num*validatorBytesLength
|
||||||
|
end := len(header.Extra) - extraSeal
|
||||||
attestationBytes = header.Extra[start:end]
|
attestationBytes = header.Extra[start:end]
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -622,11 +600,15 @@ func (p *Parlia) verifyHeader(chain consensus.ChainHeaderReader, header *types.H
|
|||||||
return fmt.Errorf("invalid excessBlobGas: have %d, expected nil", header.ExcessBlobGas)
|
return fmt.Errorf("invalid excessBlobGas: have %d, expected nil", header.ExcessBlobGas)
|
||||||
case header.BlobGasUsed != nil:
|
case header.BlobGasUsed != nil:
|
||||||
return fmt.Errorf("invalid blobGasUsed: have %d, expected nil", header.BlobGasUsed)
|
return fmt.Errorf("invalid blobGasUsed: have %d, expected nil", header.BlobGasUsed)
|
||||||
|
case header.ParentBeaconRoot != nil:
|
||||||
|
return fmt.Errorf("invalid parentBeaconRoot, have %#x, expected nil", header.ParentBeaconRoot)
|
||||||
case header.WithdrawalsHash != nil:
|
case header.WithdrawalsHash != nil:
|
||||||
return fmt.Errorf("invalid WithdrawalsHash, have %#x, expected nil", header.WithdrawalsHash)
|
return fmt.Errorf("invalid WithdrawalsHash, have %#x, expected nil", header.WithdrawalsHash)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
switch {
|
switch {
|
||||||
|
case header.ParentBeaconRoot != nil:
|
||||||
|
return fmt.Errorf("invalid parentBeaconRoot, have %#x, expected nil", header.ParentBeaconRoot)
|
||||||
case !header.EmptyWithdrawalsHash():
|
case !header.EmptyWithdrawalsHash():
|
||||||
return errors.New("header has wrong WithdrawalsHash")
|
return errors.New("header has wrong WithdrawalsHash")
|
||||||
}
|
}
|
||||||
@ -635,17 +617,6 @@ func (p *Parlia) verifyHeader(chain consensus.ChainHeaderReader, header *types.H
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bohr := chain.Config().IsBohr(header.Number, header.Time)
|
|
||||||
if !bohr {
|
|
||||||
if header.ParentBeaconRoot != nil {
|
|
||||||
return fmt.Errorf("invalid parentBeaconRoot, have %#x, expected nil", header.ParentBeaconRoot)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if header.ParentBeaconRoot == nil || *header.ParentBeaconRoot != (common.Hash{}) {
|
|
||||||
return fmt.Errorf("invalid parentBeaconRoot, have %#x, expected zero hash", header.ParentBeaconRoot)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// All basic checks passed, verify cascading fields
|
// All basic checks passed, verify cascading fields
|
||||||
return p.verifyCascadingFields(chain, header, parents)
|
return p.verifyCascadingFields(chain, header, parents)
|
||||||
}
|
}
|
||||||
@ -738,28 +709,13 @@ func (p *Parlia) snapshot(chain consensus.ChainHeaderReader, number uint64, hash
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// If we're at the genesis, snapshot the initial state. Alternatively if we have
|
// If we're at the genesis, snapshot the initial state.
|
||||||
// piled up more headers than allowed to be reorged (chain reinit from a freezer),
|
|
||||||
// consider the checkpoint trusted and snapshot it.
|
|
||||||
// An offset `p.config.Epoch - 1` can ensure getting the right validators.
|
|
||||||
if number == 0 || ((number+1)%p.config.Epoch == 0 && (len(headers) > int(params.FullImmutabilityThreshold))) {
|
|
||||||
var (
|
|
||||||
checkpoint *types.Header
|
|
||||||
blockHash common.Hash
|
|
||||||
)
|
|
||||||
if number == 0 {
|
if number == 0 {
|
||||||
checkpoint = chain.GetHeaderByNumber(0)
|
checkpoint := chain.GetHeaderByNumber(number)
|
||||||
if checkpoint != nil {
|
if checkpoint != nil {
|
||||||
blockHash = checkpoint.Hash()
|
// get checkpoint data
|
||||||
}
|
hash := checkpoint.Hash()
|
||||||
} else {
|
|
||||||
checkpoint = chain.GetHeaderByNumber(number + 1 - p.config.Epoch)
|
|
||||||
blockHeader := chain.GetHeaderByNumber(number)
|
|
||||||
if blockHeader != nil {
|
|
||||||
blockHash = blockHeader.Hash()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if checkpoint != nil && blockHash != (common.Hash{}) {
|
|
||||||
// get validators from headers
|
// get validators from headers
|
||||||
validators, voteAddrs, err := parseValidators(checkpoint, p.chainConfig, p.config)
|
validators, voteAddrs, err := parseValidators(checkpoint, p.chainConfig, p.config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -767,27 +723,11 @@ func (p *Parlia) snapshot(chain consensus.ChainHeaderReader, number uint64, hash
|
|||||||
}
|
}
|
||||||
|
|
||||||
// new snapshot
|
// new snapshot
|
||||||
snap = newSnapshot(p.config, p.signatures, number, blockHash, validators, voteAddrs, p.ethAPI)
|
snap = newSnapshot(p.config, p.signatures, number, hash, validators, voteAddrs, p.ethAPI)
|
||||||
|
|
||||||
// get turnLength from headers and use that for new turnLength
|
|
||||||
turnLength, err := parseTurnLength(checkpoint, p.chainConfig, p.config)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if turnLength != nil {
|
|
||||||
snap.TurnLength = *turnLength
|
|
||||||
}
|
|
||||||
|
|
||||||
// snap.Recents is currently empty, which affects the following:
|
|
||||||
// a. The function SignRecently - This is acceptable since an empty snap.Recents results in a more lenient check.
|
|
||||||
// b. The function blockTimeVerifyForRamanujanFork - This is also acceptable as it won't be invoked during `snap.apply`.
|
|
||||||
// c. This may cause a mismatch in the slash systemtx, but the transaction list is not verified during `snap.apply`.
|
|
||||||
|
|
||||||
// snap.Attestation is nil, but Snapshot.updateAttestation will handle it correctly.
|
|
||||||
if err := snap.store(p.db); err != nil {
|
if err := snap.store(p.db); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
log.Info("Stored checkpoint snapshot to disk", "number", number, "hash", blockHash)
|
log.Info("Stored checkpoint snapshot to disk", "number", number, "hash", hash)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -944,24 +884,6 @@ func (p *Parlia) prepareValidators(header *types.Header) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *Parlia) prepareTurnLength(chain consensus.ChainHeaderReader, header *types.Header) error {
|
|
||||||
if header.Number.Uint64()%p.config.Epoch != 0 ||
|
|
||||||
!p.chainConfig.IsBohr(header.Number, header.Time) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
turnLength, err := p.getTurnLength(chain, header)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if turnLength != nil {
|
|
||||||
header.Extra = append(header.Extra, *turnLength)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Parlia) assembleVoteAttestation(chain consensus.ChainHeaderReader, header *types.Header) error {
|
func (p *Parlia) assembleVoteAttestation(chain consensus.ChainHeaderReader, header *types.Header) error {
|
||||||
if !p.chainConfig.IsLuban(header.Number) || header.Number.Uint64() < 2 {
|
if !p.chainConfig.IsLuban(header.Number) || header.Number.Uint64() < 2 {
|
||||||
return nil
|
return nil
|
||||||
@ -1093,9 +1015,6 @@ func (p *Parlia) Prepare(chain consensus.ChainHeaderReader, header *types.Header
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := p.prepareTurnLength(chain, header); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// add extra seal space
|
// add extra seal space
|
||||||
header.Extra = append(header.Extra, make([]byte, extraSeal)...)
|
header.Extra = append(header.Extra, make([]byte, extraSeal)...)
|
||||||
|
|
||||||
@ -1146,30 +1065,6 @@ func (p *Parlia) verifyValidators(header *types.Header) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *Parlia) verifyTurnLength(chain consensus.ChainHeaderReader, header *types.Header) error {
|
|
||||||
if header.Number.Uint64()%p.config.Epoch != 0 ||
|
|
||||||
!p.chainConfig.IsBohr(header.Number, header.Time) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
turnLengthFromHeader, err := parseTurnLength(header, p.chainConfig, p.config)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if turnLengthFromHeader != nil {
|
|
||||||
turnLength, err := p.getTurnLength(chain, header)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if turnLength != nil && *turnLength == *turnLengthFromHeader {
|
|
||||||
log.Debug("verifyTurnLength", "turnLength", *turnLength)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return errMismatchingEpochTurnLength
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Parlia) distributeFinalityReward(chain consensus.ChainHeaderReader, state *state.StateDB, header *types.Header,
|
func (p *Parlia) distributeFinalityReward(chain consensus.ChainHeaderReader, state *state.StateDB, header *types.Header,
|
||||||
cx core.ChainContext, txs *[]*types.Transaction, receipts *[]*types.Receipt, systemTxs *[]*types.Transaction,
|
cx core.ChainContext, txs *[]*types.Transaction, receipts *[]*types.Receipt, systemTxs *[]*types.Transaction,
|
||||||
usedGas *uint64, mining bool) error {
|
usedGas *uint64, mining bool) error {
|
||||||
@ -1264,10 +1159,6 @@ func (p *Parlia) Finalize(chain consensus.ChainHeaderReader, header *types.Heade
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := p.verifyTurnLength(chain, header); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
cx := chainContext{Chain: chain, parlia: p}
|
cx := chainContext{Chain: chain, parlia: p}
|
||||||
|
|
||||||
parent := chain.GetHeaderByHash(header.ParentHash)
|
parent := chain.GetHeaderByHash(header.ParentHash)
|
||||||
@ -1294,7 +1185,7 @@ func (p *Parlia) Finalize(chain consensus.ChainHeaderReader, header *types.Heade
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if header.Difficulty.Cmp(diffInTurn) != 0 {
|
if header.Difficulty.Cmp(diffInTurn) != 0 {
|
||||||
spoiledVal := snap.inturnValidator()
|
spoiledVal := snap.supposeValidator()
|
||||||
signedRecently := false
|
signedRecently := false
|
||||||
if p.chainConfig.IsPlato(header.Number) {
|
if p.chainConfig.IsPlato(header.Number) {
|
||||||
signedRecently = snap.SignRecently(spoiledVal)
|
signedRecently = snap.SignRecently(spoiledVal)
|
||||||
@ -1385,7 +1276,7 @@ func (p *Parlia) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
spoiledVal := snap.inturnValidator()
|
spoiledVal := snap.supposeValidator()
|
||||||
signedRecently := false
|
signedRecently := false
|
||||||
if p.chainConfig.IsPlato(header.Number) {
|
if p.chainConfig.IsPlato(header.Number) {
|
||||||
signedRecently = snap.SignRecently(spoiledVal)
|
signedRecently = snap.SignRecently(spoiledVal)
|
||||||
@ -1467,7 +1358,7 @@ func (p *Parlia) IsActiveValidatorAt(chain consensus.ChainHeaderReader, header *
|
|||||||
func (p *Parlia) VerifyVote(chain consensus.ChainHeaderReader, vote *types.VoteEnvelope) error {
|
func (p *Parlia) VerifyVote(chain consensus.ChainHeaderReader, vote *types.VoteEnvelope) error {
|
||||||
targetNumber := vote.Data.TargetNumber
|
targetNumber := vote.Data.TargetNumber
|
||||||
targetHash := vote.Data.TargetHash
|
targetHash := vote.Data.TargetHash
|
||||||
header := chain.GetVerifiedBlockByHash(targetHash)
|
header := chain.GetHeaderByHash(targetHash)
|
||||||
if header == nil {
|
if header == nil {
|
||||||
log.Warn("BlockHeader at current voteBlockNumber is nil", "targetNumber", targetNumber, "targetHash", targetHash)
|
log.Warn("BlockHeader at current voteBlockNumber is nil", "targetNumber", targetNumber, "targetHash", targetHash)
|
||||||
return errors.New("BlockHeader at current voteBlockNumber is nil")
|
return errors.New("BlockHeader at current voteBlockNumber is nil")
|
||||||
@ -1538,13 +1429,10 @@ func (p *Parlia) Delay(chain consensus.ChainReader, header *types.Header, leftOv
|
|||||||
delay = delay - *leftOver
|
delay = delay - *leftOver
|
||||||
}
|
}
|
||||||
|
|
||||||
// The blocking time should be no more than half of period when snap.TurnLength == 1
|
// The blocking time should be no more than half of period
|
||||||
timeForMining := time.Duration(p.config.Period) * time.Second / 2
|
half := time.Duration(p.config.Period) * time.Second / 2
|
||||||
if !snap.lastBlockInOneTurn(header.Number.Uint64()) {
|
if delay > half {
|
||||||
timeForMining = time.Duration(p.config.Period) * time.Second * 2 / 3
|
delay = half
|
||||||
}
|
|
||||||
if delay > timeForMining {
|
|
||||||
delay = timeForMining
|
|
||||||
}
|
}
|
||||||
return &delay
|
return &delay
|
||||||
}
|
}
|
||||||
@ -1615,15 +1503,12 @@ func (p *Parlia) Seal(chain consensus.ChainHeaderReader, block *types.Block, res
|
|||||||
copy(header.Extra[len(header.Extra)-extraSeal:], sig)
|
copy(header.Extra[len(header.Extra)-extraSeal:], sig)
|
||||||
|
|
||||||
if p.shouldWaitForCurrentBlockProcess(chain, header, snap) {
|
if p.shouldWaitForCurrentBlockProcess(chain, header, snap) {
|
||||||
highestVerifiedHeader := chain.GetHighestVerifiedHeader()
|
log.Info("Waiting for received in turn block to process")
|
||||||
// including time for writing and committing blocks
|
|
||||||
waitProcessEstimate := math.Ceil(float64(highestVerifiedHeader.GasUsed) / float64(100_000_000))
|
|
||||||
log.Info("Waiting for received in turn block to process", "waitProcessEstimate(Seconds)", waitProcessEstimate)
|
|
||||||
select {
|
select {
|
||||||
case <-stop:
|
case <-stop:
|
||||||
log.Info("Received block process finished, abort block seal")
|
log.Info("Received block process finished, abort block seal")
|
||||||
return
|
return
|
||||||
case <-time.After(time.Duration(waitProcessEstimate) * time.Second):
|
case <-time.After(time.Duration(processBackOffTime) * time.Second):
|
||||||
if chain.CurrentHeader().Number.Uint64() >= header.Number.Uint64() {
|
if chain.CurrentHeader().Number.Uint64() >= header.Number.Uint64() {
|
||||||
log.Info("Process backoff time exhausted, and current header has updated to abort this seal")
|
log.Info("Process backoff time exhausted, and current header has updated to abort this seal")
|
||||||
return
|
return
|
||||||
@ -1705,35 +1590,11 @@ func CalcDifficulty(snap *Snapshot, signer common.Address) *big.Int {
|
|||||||
return new(big.Int).Set(diffNoTurn)
|
return new(big.Int).Set(diffNoTurn)
|
||||||
}
|
}
|
||||||
|
|
||||||
func encodeSigHeaderWithoutVoteAttestation(w io.Writer, header *types.Header, chainId *big.Int) {
|
|
||||||
err := rlp.Encode(w, []interface{}{
|
|
||||||
chainId,
|
|
||||||
header.ParentHash,
|
|
||||||
header.UncleHash,
|
|
||||||
header.Coinbase,
|
|
||||||
header.Root,
|
|
||||||
header.TxHash,
|
|
||||||
header.ReceiptHash,
|
|
||||||
header.Bloom,
|
|
||||||
header.Difficulty,
|
|
||||||
header.Number,
|
|
||||||
header.GasLimit,
|
|
||||||
header.GasUsed,
|
|
||||||
header.Time,
|
|
||||||
header.Extra[:extraVanity], // this will panic if extra is too short, should check before calling encodeSigHeaderWithoutVoteAttestation
|
|
||||||
header.MixDigest,
|
|
||||||
header.Nonce,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
panic("can't encode: " + err.Error())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// SealHash returns the hash of a block without vote attestation prior to it being sealed.
|
// SealHash returns the hash of a block without vote attestation prior to it being sealed.
|
||||||
// So it's not the real hash of a block, just used as unique id to distinguish task
|
// So it's not the real hash of a block, just used as unique id to distinguish task
|
||||||
func (p *Parlia) SealHash(header *types.Header) (hash common.Hash) {
|
func (p *Parlia) SealHash(header *types.Header) (hash common.Hash) {
|
||||||
hasher := sha3.NewLegacyKeccak256()
|
hasher := sha3.NewLegacyKeccak256()
|
||||||
encodeSigHeaderWithoutVoteAttestation(hasher, header, p.chainConfig.ChainID)
|
types.EncodeSigHeaderWithoutVoteAttestation(hasher, header, p.chainConfig.ChainID)
|
||||||
hasher.Sum(hash[:0])
|
hasher.Sum(hash[:0])
|
||||||
return hash
|
return hash
|
||||||
}
|
}
|
||||||
@ -2039,40 +1900,42 @@ func (p *Parlia) GetFinalizedHeader(chain consensus.ChainHeaderReader, header *t
|
|||||||
// =========================== utility function ==========================
|
// =========================== utility function ==========================
|
||||||
func (p *Parlia) backOffTime(snap *Snapshot, header *types.Header, val common.Address) uint64 {
|
func (p *Parlia) backOffTime(snap *Snapshot, header *types.Header, val common.Address) uint64 {
|
||||||
if snap.inturn(val) {
|
if snap.inturn(val) {
|
||||||
log.Debug("backOffTime", "blockNumber", header.Number, "in turn validator", val)
|
|
||||||
return 0
|
return 0
|
||||||
} else {
|
} else {
|
||||||
delay := initialBackOffTime
|
delay := initialBackOffTime
|
||||||
validators := snap.validators()
|
validators := snap.validators()
|
||||||
if p.chainConfig.IsPlanck(header.Number) {
|
if p.chainConfig.IsPlanck(header.Number) {
|
||||||
counts := snap.countRecents()
|
// reverse the key/value of snap.Recents to get recentsMap
|
||||||
for addr, seenTimes := range counts {
|
recentsMap := make(map[common.Address]uint64, len(snap.Recents))
|
||||||
log.Debug("backOffTime", "blockNumber", header.Number, "validator", addr, "seenTimes", seenTimes)
|
bound := uint64(0)
|
||||||
|
if n, limit := header.Number.Uint64(), uint64(len(validators)/2+1); n > limit {
|
||||||
|
bound = n - limit
|
||||||
|
}
|
||||||
|
for seen, recent := range snap.Recents {
|
||||||
|
if seen <= bound {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
recentsMap[recent] = seen
|
||||||
}
|
}
|
||||||
|
|
||||||
// The backOffTime does not matter when a validator has signed recently.
|
// The backOffTime does not matter when a validator has signed recently.
|
||||||
if snap.signRecentlyByCounts(val, counts) {
|
if _, ok := recentsMap[val]; ok {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
inTurnAddr := snap.inturnValidator()
|
inTurnAddr := validators[(snap.Number+1)%uint64(len(validators))]
|
||||||
if snap.signRecentlyByCounts(inTurnAddr, counts) {
|
if _, ok := recentsMap[inTurnAddr]; ok {
|
||||||
log.Debug("in turn validator has recently signed, skip initialBackOffTime",
|
log.Debug("in turn validator has recently signed, skip initialBackOffTime",
|
||||||
"inTurnAddr", inTurnAddr)
|
"inTurnAddr", inTurnAddr)
|
||||||
delay = 0
|
delay = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
// Exclude the recently signed validators and the in turn validator
|
// Exclude the recently signed validators
|
||||||
temp := make([]common.Address, 0, len(validators))
|
temp := make([]common.Address, 0, len(validators))
|
||||||
for _, addr := range validators {
|
for _, addr := range validators {
|
||||||
if snap.signRecentlyByCounts(addr, counts) {
|
if _, ok := recentsMap[addr]; ok {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if p.chainConfig.IsBohr(header.Number, header.Time) {
|
|
||||||
if addr == inTurnAddr {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
temp = append(temp, addr)
|
temp = append(temp, addr)
|
||||||
}
|
}
|
||||||
validators = temp
|
validators = temp
|
||||||
@ -2090,11 +1953,7 @@ func (p *Parlia) backOffTime(snap *Snapshot, header *types.Header, val common.Ad
|
|||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
randSeed := snap.Number
|
s := rand.NewSource(int64(snap.Number))
|
||||||
if p.chainConfig.IsBohr(header.Number, header.Time) {
|
|
||||||
randSeed = header.Number.Uint64() / uint64(snap.TurnLength)
|
|
||||||
}
|
|
||||||
s := rand.NewSource(int64(randSeed))
|
|
||||||
r := rand.New(s)
|
r := rand.New(s)
|
||||||
n := len(validators)
|
n := len(validators)
|
||||||
backOffSteps := make([]uint64, 0, n)
|
backOffSteps := make([]uint64, 0, n)
|
||||||
|
@ -22,44 +22,22 @@ func TestImpactOfValidatorOutOfService(t *testing.T) {
|
|||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
totalValidators int
|
totalValidators int
|
||||||
downValidators int
|
downValidators int
|
||||||
turnLength int
|
|
||||||
}{
|
}{
|
||||||
{3, 1, 1},
|
{3, 1},
|
||||||
{5, 2, 1},
|
{5, 2},
|
||||||
{10, 1, 2},
|
{10, 1},
|
||||||
{10, 4, 2},
|
{10, 4},
|
||||||
{21, 1, 3},
|
{21, 1},
|
||||||
{21, 3, 3},
|
{21, 3},
|
||||||
{21, 5, 4},
|
{21, 5},
|
||||||
{21, 10, 5},
|
{21, 10},
|
||||||
}
|
}
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
simulateValidatorOutOfService(tc.totalValidators, tc.downValidators, tc.turnLength)
|
simulateValidatorOutOfService(tc.totalValidators, tc.downValidators)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// refer Snapshot.SignRecently
|
func simulateValidatorOutOfService(totalValidators int, downValidators int) {
|
||||||
func signRecently(idx int, recents map[uint64]int, turnLength int) bool {
|
|
||||||
recentSignTimes := 0
|
|
||||||
for _, signIdx := range recents {
|
|
||||||
if signIdx == idx {
|
|
||||||
recentSignTimes += 1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return recentSignTimes >= turnLength
|
|
||||||
}
|
|
||||||
|
|
||||||
// refer Snapshot.minerHistoryCheckLen
|
|
||||||
func minerHistoryCheckLen(totalValidators int, turnLength int) uint64 {
|
|
||||||
return uint64(totalValidators/2+1)*uint64(turnLength) - 1
|
|
||||||
}
|
|
||||||
|
|
||||||
// refer Snapshot.inturnValidator
|
|
||||||
func inturnValidator(totalValidators int, turnLength int, height int) int {
|
|
||||||
return height / turnLength % totalValidators
|
|
||||||
}
|
|
||||||
|
|
||||||
func simulateValidatorOutOfService(totalValidators int, downValidators int, turnLength int) {
|
|
||||||
downBlocks := 10000
|
downBlocks := 10000
|
||||||
recoverBlocks := 10000
|
recoverBlocks := 10000
|
||||||
recents := make(map[uint64]int)
|
recents := make(map[uint64]int)
|
||||||
@ -77,7 +55,12 @@ func simulateValidatorOutOfService(totalValidators int, downValidators int, turn
|
|||||||
delete(validators, down[i])
|
delete(validators, down[i])
|
||||||
}
|
}
|
||||||
isRecentSign := func(idx int) bool {
|
isRecentSign := func(idx int) bool {
|
||||||
return signRecently(idx, recents, turnLength)
|
for _, signIdx := range recents {
|
||||||
|
if signIdx == idx {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
}
|
}
|
||||||
isInService := func(idx int) bool {
|
isInService := func(idx int) bool {
|
||||||
return validators[idx]
|
return validators[idx]
|
||||||
@ -85,10 +68,10 @@ func simulateValidatorOutOfService(totalValidators int, downValidators int, turn
|
|||||||
|
|
||||||
downDelay := uint64(0)
|
downDelay := uint64(0)
|
||||||
for h := 1; h <= downBlocks; h++ {
|
for h := 1; h <= downBlocks; h++ {
|
||||||
if limit := minerHistoryCheckLen(totalValidators, turnLength) + 1; uint64(h) >= limit {
|
if limit := uint64(totalValidators/2 + 1); uint64(h) >= limit {
|
||||||
delete(recents, uint64(h)-limit)
|
delete(recents, uint64(h)-limit)
|
||||||
}
|
}
|
||||||
proposer := inturnValidator(totalValidators, turnLength, h)
|
proposer := h % totalValidators
|
||||||
if !isInService(proposer) || isRecentSign(proposer) {
|
if !isInService(proposer) || isRecentSign(proposer) {
|
||||||
candidates := make(map[int]bool, totalValidators/2)
|
candidates := make(map[int]bool, totalValidators/2)
|
||||||
for v := range validators {
|
for v := range validators {
|
||||||
@ -116,10 +99,10 @@ func simulateValidatorOutOfService(totalValidators int, downValidators int, turn
|
|||||||
recoverDelay := uint64(0)
|
recoverDelay := uint64(0)
|
||||||
lastseen := downBlocks
|
lastseen := downBlocks
|
||||||
for h := downBlocks + 1; h <= downBlocks+recoverBlocks; h++ {
|
for h := downBlocks + 1; h <= downBlocks+recoverBlocks; h++ {
|
||||||
if limit := minerHistoryCheckLen(totalValidators, turnLength) + 1; uint64(h) >= limit {
|
if limit := uint64(totalValidators/2 + 1); uint64(h) >= limit {
|
||||||
delete(recents, uint64(h)-limit)
|
delete(recents, uint64(h)-limit)
|
||||||
}
|
}
|
||||||
proposer := inturnValidator(totalValidators, turnLength, h)
|
proposer := h % totalValidators
|
||||||
if !isInService(proposer) || isRecentSign(proposer) {
|
if !isInService(proposer) || isRecentSign(proposer) {
|
||||||
lastseen = h
|
lastseen = h
|
||||||
candidates := make(map[int]bool, totalValidators/2)
|
candidates := make(map[int]bool, totalValidators/2)
|
||||||
|
@ -22,7 +22,6 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
|
||||||
"sort"
|
"sort"
|
||||||
|
|
||||||
lru "github.com/hashicorp/golang-lru"
|
lru "github.com/hashicorp/golang-lru"
|
||||||
@ -44,7 +43,6 @@ type Snapshot struct {
|
|||||||
|
|
||||||
Number uint64 `json:"number"` // Block number where the snapshot was created
|
Number uint64 `json:"number"` // Block number where the snapshot was created
|
||||||
Hash common.Hash `json:"hash"` // Block hash where the snapshot was created
|
Hash common.Hash `json:"hash"` // Block hash where the snapshot was created
|
||||||
TurnLength uint8 `json:"turn_length"` // Length of `turn`, meaning the consecutive number of blocks a validator receives priority for block production
|
|
||||||
Validators map[common.Address]*ValidatorInfo `json:"validators"` // Set of authorized validators at this moment
|
Validators map[common.Address]*ValidatorInfo `json:"validators"` // Set of authorized validators at this moment
|
||||||
Recents map[uint64]common.Address `json:"recents"` // Set of recent validators for spam protections
|
Recents map[uint64]common.Address `json:"recents"` // Set of recent validators for spam protections
|
||||||
RecentForkHashes map[uint64]string `json:"recent_fork_hashes"` // Set of recent forkHash
|
RecentForkHashes map[uint64]string `json:"recent_fork_hashes"` // Set of recent forkHash
|
||||||
@ -74,7 +72,6 @@ func newSnapshot(
|
|||||||
sigCache: sigCache,
|
sigCache: sigCache,
|
||||||
Number: number,
|
Number: number,
|
||||||
Hash: hash,
|
Hash: hash,
|
||||||
TurnLength: defaultTurnLength,
|
|
||||||
Recents: make(map[uint64]common.Address),
|
Recents: make(map[uint64]common.Address),
|
||||||
RecentForkHashes: make(map[uint64]string),
|
RecentForkHashes: make(map[uint64]string),
|
||||||
Validators: make(map[common.Address]*ValidatorInfo),
|
Validators: make(map[common.Address]*ValidatorInfo),
|
||||||
@ -117,10 +114,6 @@ func loadSnapshot(config *params.ParliaConfig, sigCache *lru.ARCCache, db ethdb.
|
|||||||
if err := json.Unmarshal(blob, snap); err != nil {
|
if err := json.Unmarshal(blob, snap); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if snap.TurnLength == 0 { // no TurnLength field in old snapshots
|
|
||||||
snap.TurnLength = defaultTurnLength
|
|
||||||
}
|
|
||||||
|
|
||||||
snap.config = config
|
snap.config = config
|
||||||
snap.sigCache = sigCache
|
snap.sigCache = sigCache
|
||||||
snap.ethAPI = ethAPI
|
snap.ethAPI = ethAPI
|
||||||
@ -145,7 +138,6 @@ func (s *Snapshot) copy() *Snapshot {
|
|||||||
sigCache: s.sigCache,
|
sigCache: s.sigCache,
|
||||||
Number: s.Number,
|
Number: s.Number,
|
||||||
Hash: s.Hash,
|
Hash: s.Hash,
|
||||||
TurnLength: s.TurnLength,
|
|
||||||
Validators: make(map[common.Address]*ValidatorInfo),
|
Validators: make(map[common.Address]*ValidatorInfo),
|
||||||
Recents: make(map[uint64]common.Address),
|
Recents: make(map[uint64]common.Address),
|
||||||
RecentForkHashes: make(map[uint64]string),
|
RecentForkHashes: make(map[uint64]string),
|
||||||
@ -218,43 +210,15 @@ func (s *Snapshot) updateAttestation(header *types.Header, chainConfig *params.C
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Snapshot) versionHistoryCheckLen() uint64 {
|
func (s *Snapshot) SignRecently(validator common.Address) bool {
|
||||||
return uint64(len(s.Validators)) * uint64(s.TurnLength)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Snapshot) minerHistoryCheckLen() uint64 {
|
|
||||||
return (uint64(len(s.Validators))/2+1)*uint64(s.TurnLength) - 1
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Snapshot) countRecents() map[common.Address]uint8 {
|
|
||||||
leftHistoryBound := uint64(0) // the bound is excluded
|
|
||||||
checkHistoryLength := s.minerHistoryCheckLen()
|
|
||||||
if s.Number > checkHistoryLength {
|
|
||||||
leftHistoryBound = s.Number - checkHistoryLength
|
|
||||||
}
|
|
||||||
counts := make(map[common.Address]uint8, len(s.Validators))
|
|
||||||
for seen, recent := range s.Recents {
|
for seen, recent := range s.Recents {
|
||||||
if seen <= leftHistoryBound || recent == (common.Address{}) /*when seen == `epochKey`*/ {
|
if recent == validator {
|
||||||
continue
|
if limit := uint64(len(s.Validators)/2 + 1); s.Number+1 < limit || seen > s.Number+1-limit {
|
||||||
}
|
|
||||||
counts[recent] += 1
|
|
||||||
}
|
|
||||||
return counts
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Snapshot) signRecentlyByCounts(validator common.Address, counts map[common.Address]uint8) bool {
|
|
||||||
if seenTimes, ok := counts[validator]; ok && seenTimes >= s.TurnLength {
|
|
||||||
if seenTimes > s.TurnLength {
|
|
||||||
log.Warn("produce more blocks than expected!", "validator", validator, "seenTimes", seenTimes)
|
|
||||||
}
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
func (s *Snapshot) SignRecently(validator common.Address) bool {
|
return false
|
||||||
return s.signRecentlyByCounts(validator, s.countRecents())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Snapshot) apply(headers []*types.Header, chain consensus.ChainHeaderReader, parents []*types.Header, chainConfig *params.ChainConfig) (*Snapshot, error) {
|
func (s *Snapshot) apply(headers []*types.Header, chain consensus.ChainHeaderReader, parents []*types.Header, chainConfig *params.ChainConfig) (*Snapshot, error) {
|
||||||
@ -283,10 +247,10 @@ func (s *Snapshot) apply(headers []*types.Header, chain consensus.ChainHeaderRea
|
|||||||
for _, header := range headers {
|
for _, header := range headers {
|
||||||
number := header.Number.Uint64()
|
number := header.Number.Uint64()
|
||||||
// Delete the oldest validator from the recent list to allow it signing again
|
// Delete the oldest validator from the recent list to allow it signing again
|
||||||
if limit := snap.minerHistoryCheckLen() + 1; number >= limit {
|
if limit := uint64(len(snap.Validators)/2 + 1); number >= limit {
|
||||||
delete(snap.Recents, number-limit)
|
delete(snap.Recents, number-limit)
|
||||||
}
|
}
|
||||||
if limit := snap.versionHistoryCheckLen(); number >= limit {
|
if limit := uint64(len(snap.Validators)); number >= limit {
|
||||||
delete(snap.RecentForkHashes, number-limit)
|
delete(snap.RecentForkHashes, number-limit)
|
||||||
}
|
}
|
||||||
// Resolve the authorization key and check against signers
|
// Resolve the authorization key and check against signers
|
||||||
@ -297,47 +261,19 @@ func (s *Snapshot) apply(headers []*types.Header, chain consensus.ChainHeaderRea
|
|||||||
if _, ok := snap.Validators[validator]; !ok {
|
if _, ok := snap.Validators[validator]; !ok {
|
||||||
return nil, errUnauthorizedValidator(validator.String())
|
return nil, errUnauthorizedValidator(validator.String())
|
||||||
}
|
}
|
||||||
if chainConfig.IsBohr(header.Number, header.Time) {
|
|
||||||
if snap.SignRecently(validator) {
|
|
||||||
return nil, errRecentlySigned
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
for _, recent := range snap.Recents {
|
for _, recent := range snap.Recents {
|
||||||
if recent == validator {
|
if recent == validator {
|
||||||
return nil, errRecentlySigned
|
return nil, errRecentlySigned
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
snap.Recents[number] = validator
|
snap.Recents[number] = validator
|
||||||
snap.RecentForkHashes[number] = hex.EncodeToString(header.Extra[extraVanity-nextForkHashSize : extraVanity])
|
|
||||||
snap.updateAttestation(header, chainConfig, s.config)
|
|
||||||
// change validator set
|
// change validator set
|
||||||
if number > 0 && number%s.config.Epoch == snap.minerHistoryCheckLen() {
|
if number > 0 && number%s.config.Epoch == uint64(len(snap.Validators)/2) {
|
||||||
epochKey := math.MaxUint64 - header.Number.Uint64()/s.config.Epoch // impossible used as a block number
|
checkpointHeader := FindAncientHeader(header, uint64(len(snap.Validators)/2), chain, parents)
|
||||||
if chainConfig.IsBohr(header.Number, header.Time) {
|
|
||||||
// after switching the validator set, snap.Validators may become larger,
|
|
||||||
// then the unexpected second switch will happen, just skip it.
|
|
||||||
if _, ok := snap.Recents[epochKey]; ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
checkpointHeader := FindAncientHeader(header, snap.minerHistoryCheckLen(), chain, parents)
|
|
||||||
if checkpointHeader == nil {
|
if checkpointHeader == nil {
|
||||||
return nil, consensus.ErrUnknownAncestor
|
return nil, consensus.ErrUnknownAncestor
|
||||||
}
|
}
|
||||||
|
|
||||||
oldVersionsLen := snap.versionHistoryCheckLen()
|
|
||||||
// get turnLength from headers and use that for new turnLength
|
|
||||||
turnLength, err := parseTurnLength(checkpointHeader, chainConfig, s.config)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if turnLength != nil {
|
|
||||||
snap.TurnLength = *turnLength
|
|
||||||
log.Debug("validator set switch", "turnLength", *turnLength)
|
|
||||||
}
|
|
||||||
|
|
||||||
// get validators from headers and use that for new validator set
|
// get validators from headers and use that for new validator set
|
||||||
newValArr, voteAddrs, err := parseValidators(checkpointHeader, chainConfig, s.config)
|
newValArr, voteAddrs, err := parseValidators(checkpointHeader, chainConfig, s.config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -353,12 +289,6 @@ func (s *Snapshot) apply(headers []*types.Header, chain consensus.ChainHeaderRea
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if chainConfig.IsBohr(header.Number, header.Time) {
|
|
||||||
// BEP-404: Clear Miner History when Switching Validators Set
|
|
||||||
snap.Recents = make(map[uint64]common.Address)
|
|
||||||
snap.Recents[epochKey] = common.Address{}
|
|
||||||
log.Debug("Recents are cleared up", "blockNumber", number)
|
|
||||||
} else {
|
|
||||||
oldLimit := len(snap.Validators)/2 + 1
|
oldLimit := len(snap.Validators)/2 + 1
|
||||||
newLimit := len(newVals)/2 + 1
|
newLimit := len(newVals)/2 + 1
|
||||||
if newLimit < oldLimit {
|
if newLimit < oldLimit {
|
||||||
@ -366,6 +296,12 @@ func (s *Snapshot) apply(headers []*types.Header, chain consensus.ChainHeaderRea
|
|||||||
delete(snap.Recents, number-uint64(newLimit)-uint64(i))
|
delete(snap.Recents, number-uint64(newLimit)-uint64(i))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
oldLimit = len(snap.Validators)
|
||||||
|
newLimit = len(newVals)
|
||||||
|
if newLimit < oldLimit {
|
||||||
|
for i := 0; i < oldLimit-newLimit; i++ {
|
||||||
|
delete(snap.RecentForkHashes, number-uint64(newLimit)-uint64(i))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
snap.Validators = newVals
|
snap.Validators = newVals
|
||||||
if chainConfig.IsLuban(header.Number) {
|
if chainConfig.IsLuban(header.Number) {
|
||||||
@ -374,10 +310,11 @@ func (s *Snapshot) apply(headers []*types.Header, chain consensus.ChainHeaderRea
|
|||||||
snap.Validators[val].Index = idx + 1 // offset by 1
|
snap.Validators[val].Index = idx + 1 // offset by 1
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for i := snap.versionHistoryCheckLen(); i < oldVersionsLen; i++ {
|
|
||||||
delete(snap.RecentForkHashes, number-i)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
snap.updateAttestation(header, chainConfig, s.config)
|
||||||
|
|
||||||
|
snap.RecentForkHashes[number] = hex.EncodeToString(header.Extra[extraVanity-nextForkHashSize : extraVanity])
|
||||||
}
|
}
|
||||||
snap.Number += uint64(len(headers))
|
snap.Number += uint64(len(headers))
|
||||||
snap.Hash = headers[len(headers)-1].Hash()
|
snap.Hash = headers[len(headers)-1].Hash()
|
||||||
@ -394,20 +331,17 @@ func (s *Snapshot) validators() []common.Address {
|
|||||||
return validators
|
return validators
|
||||||
}
|
}
|
||||||
|
|
||||||
// lastBlockInOneTurn returns if the block at height `blockNumber` is the last block in current turn.
|
|
||||||
func (s *Snapshot) lastBlockInOneTurn(blockNumber uint64) bool {
|
|
||||||
return (blockNumber+1)%uint64(s.TurnLength) == 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// inturn returns if a validator at a given block height is in-turn or not.
|
// inturn returns if a validator at a given block height is in-turn or not.
|
||||||
func (s *Snapshot) inturn(validator common.Address) bool {
|
func (s *Snapshot) inturn(validator common.Address) bool {
|
||||||
return s.inturnValidator() == validator
|
validators := s.validators()
|
||||||
|
offset := (s.Number + 1) % uint64(len(validators))
|
||||||
|
return validators[offset] == validator
|
||||||
}
|
}
|
||||||
|
|
||||||
// inturnValidator returns the validator for the following block height.
|
// inturnValidator returns the validator at a given block height.
|
||||||
func (s *Snapshot) inturnValidator() common.Address {
|
func (s *Snapshot) inturnValidator() common.Address {
|
||||||
validators := s.validators()
|
validators := s.validators()
|
||||||
offset := (s.Number + 1) / uint64(s.TurnLength) % uint64(len(validators))
|
offset := (s.Number + 1) % uint64(len(validators))
|
||||||
return validators[offset]
|
return validators[offset]
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -445,6 +379,12 @@ func (s *Snapshot) indexOfVal(validator common.Address) int {
|
|||||||
return -1
|
return -1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *Snapshot) supposeValidator() common.Address {
|
||||||
|
validators := s.validators()
|
||||||
|
index := (s.Number + 1) % uint64(len(validators))
|
||||||
|
return validators[index]
|
||||||
|
}
|
||||||
|
|
||||||
func parseValidators(header *types.Header, chainConfig *params.ChainConfig, parliaConfig *params.ParliaConfig) ([]common.Address, []types.BLSPublicKey, error) {
|
func parseValidators(header *types.Header, chainConfig *params.ChainConfig, parliaConfig *params.ParliaConfig) ([]common.Address, []types.BLSPublicKey, error) {
|
||||||
validatorsBytes := getValidatorBytesFromHeader(header, chainConfig, parliaConfig)
|
validatorsBytes := getValidatorBytesFromHeader(header, chainConfig, parliaConfig)
|
||||||
if len(validatorsBytes) == 0 {
|
if len(validatorsBytes) == 0 {
|
||||||
@ -470,24 +410,6 @@ func parseValidators(header *types.Header, chainConfig *params.ChainConfig, parl
|
|||||||
return cnsAddrs, voteAddrs, nil
|
return cnsAddrs, voteAddrs, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseTurnLength(header *types.Header, chainConfig *params.ChainConfig, parliaConfig *params.ParliaConfig) (*uint8, error) {
|
|
||||||
if header.Number.Uint64()%parliaConfig.Epoch != 0 ||
|
|
||||||
!chainConfig.IsBohr(header.Number, header.Time) {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(header.Extra) <= extraVanity+extraSeal {
|
|
||||||
return nil, errInvalidSpanValidators
|
|
||||||
}
|
|
||||||
num := int(header.Extra[extraVanity])
|
|
||||||
pos := extraVanity + validatorNumberSize + num*validatorBytesLength
|
|
||||||
if len(header.Extra) <= pos {
|
|
||||||
return nil, errInvalidTurnLength
|
|
||||||
}
|
|
||||||
turnLength := header.Extra[pos]
|
|
||||||
return &turnLength, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func FindAncientHeader(header *types.Header, ite uint64, chain consensus.ChainHeaderReader, candidateParents []*types.Header) *types.Header {
|
func FindAncientHeader(header *types.Header, ite uint64, chain consensus.ChainHeaderReader, candidateParents []*types.Header) *types.Header {
|
||||||
ancient := header
|
ancient := header
|
||||||
for i := uint64(1); i <= ite; i++ {
|
for i := uint64(1); i <= ite; i++ {
|
||||||
|
@ -100,8 +100,6 @@ var (
|
|||||||
blockReorgAddMeter = metrics.NewRegisteredMeter("chain/reorg/add", nil)
|
blockReorgAddMeter = metrics.NewRegisteredMeter("chain/reorg/add", nil)
|
||||||
blockReorgDropMeter = metrics.NewRegisteredMeter("chain/reorg/drop", nil)
|
blockReorgDropMeter = metrics.NewRegisteredMeter("chain/reorg/drop", nil)
|
||||||
|
|
||||||
blockRecvTimeDiffGauge = metrics.NewRegisteredGauge("chain/block/recvtimediff", nil)
|
|
||||||
|
|
||||||
errStateRootVerificationFailed = errors.New("state root verification failed")
|
errStateRootVerificationFailed = errors.New("state root verification failed")
|
||||||
errInsertionInterrupted = errors.New("insertion is interrupted")
|
errInsertionInterrupted = errors.New("insertion is interrupted")
|
||||||
errChainStopped = errors.New("blockchain is stopped")
|
errChainStopped = errors.New("blockchain is stopped")
|
||||||
@ -270,7 +268,6 @@ type BlockChain struct {
|
|||||||
logsFeed event.Feed
|
logsFeed event.Feed
|
||||||
blockProcFeed event.Feed
|
blockProcFeed event.Feed
|
||||||
finalizedHeaderFeed event.Feed
|
finalizedHeaderFeed event.Feed
|
||||||
highestVerifiedBlockFeed event.Feed
|
|
||||||
scope event.SubscriptionScope
|
scope event.SubscriptionScope
|
||||||
genesisBlock *types.Block
|
genesisBlock *types.Block
|
||||||
|
|
||||||
@ -279,7 +276,6 @@ type BlockChain struct {
|
|||||||
chainmu *syncx.ClosableMutex
|
chainmu *syncx.ClosableMutex
|
||||||
|
|
||||||
highestVerifiedHeader atomic.Pointer[types.Header]
|
highestVerifiedHeader atomic.Pointer[types.Header]
|
||||||
highestVerifiedBlock atomic.Pointer[types.Header]
|
|
||||||
currentBlock atomic.Pointer[types.Header] // Current head of the chain
|
currentBlock atomic.Pointer[types.Header] // Current head of the chain
|
||||||
currentSnapBlock atomic.Pointer[types.Header] // Current head of snap-sync
|
currentSnapBlock atomic.Pointer[types.Header] // Current head of snap-sync
|
||||||
currentFinalBlock atomic.Pointer[types.Header] // Latest (consensus) finalized block
|
currentFinalBlock atomic.Pointer[types.Header] // Latest (consensus) finalized block
|
||||||
@ -305,7 +301,6 @@ type BlockChain struct {
|
|||||||
diffLayerFreezerBlockLimit uint64
|
diffLayerFreezerBlockLimit uint64
|
||||||
|
|
||||||
wg sync.WaitGroup
|
wg sync.WaitGroup
|
||||||
dbWg sync.WaitGroup
|
|
||||||
quit chan struct{} // shutdown signal, closed in Stop.
|
quit chan struct{} // shutdown signal, closed in Stop.
|
||||||
stopping atomic.Bool // false if chain is running, true when stopped
|
stopping atomic.Bool // false if chain is running, true when stopped
|
||||||
procInterrupt atomic.Bool // interrupt signaler for block processing
|
procInterrupt atomic.Bool // interrupt signaler for block processing
|
||||||
@ -404,7 +399,6 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
|
|||||||
}
|
}
|
||||||
|
|
||||||
bc.highestVerifiedHeader.Store(nil)
|
bc.highestVerifiedHeader.Store(nil)
|
||||||
bc.highestVerifiedBlock.Store(nil)
|
|
||||||
bc.currentBlock.Store(nil)
|
bc.currentBlock.Store(nil)
|
||||||
bc.currentSnapBlock.Store(nil)
|
bc.currentSnapBlock.Store(nil)
|
||||||
bc.chasingHead.Store(nil)
|
bc.chasingHead.Store(nil)
|
||||||
@ -447,6 +441,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
|
|||||||
diskRoot = bc.triedb.Head()
|
diskRoot = bc.triedb.Head()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
diskRoot = common.HexToHash("0x59d2a69ad465dbadf78f99635af9ed8125636cbdedc50bda9668ab2ac677b17a")
|
||||||
if diskRoot != (common.Hash{}) {
|
if diskRoot != (common.Hash{}) {
|
||||||
log.Warn("Head state missing, repairing", "number", head.Number, "hash", head.Hash(), "diskRoot", diskRoot)
|
log.Warn("Head state missing, repairing", "number", head.Number, "hash", head.Hash(), "diskRoot", diskRoot)
|
||||||
|
|
||||||
@ -467,8 +462,8 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Ensure that a previous crash in SetHead doesn't leave extra ancients
|
// Ensure that a previous crash in SetHead doesn't leave extra ancients
|
||||||
if frozen, err := bc.db.BlockStore().ItemAmountInAncient(); err == nil && frozen > 0 {
|
if frozen, err := bc.db.ItemAmountInAncient(); err == nil && frozen > 0 {
|
||||||
frozen, err = bc.db.BlockStore().Ancients()
|
frozen, err = bc.db.Ancients()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -582,7 +577,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
|
|||||||
}
|
}
|
||||||
// Start tx indexer if it's enabled.
|
// Start tx indexer if it's enabled.
|
||||||
if txLookupLimit != nil {
|
if txLookupLimit != nil {
|
||||||
bc.txIndexer = newTxIndexer(*txLookupLimit, bc)
|
// bc.txIndexer = newTxIndexer(*txLookupLimit, bc)
|
||||||
}
|
}
|
||||||
return bc, nil
|
return bc, nil
|
||||||
}
|
}
|
||||||
@ -662,13 +657,20 @@ func (bc *BlockChain) cacheDiffLayer(diffLayer *types.DiffLayer, diffLayerCh cha
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (bc *BlockChain) cacheBlock(hash common.Hash, block *types.Block) {
|
||||||
|
bc.blockCache.Add(hash, block)
|
||||||
|
if bc.chainConfig.IsCancun(block.Number(), block.Time()) {
|
||||||
|
bc.sidecarsCache.Add(hash, block.Sidecars())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// empty returns an indicator whether the blockchain is empty.
|
// empty returns an indicator whether the blockchain is empty.
|
||||||
// Note, it's a special case that we connect a non-empty ancient
|
// Note, it's a special case that we connect a non-empty ancient
|
||||||
// database with an empty node, so that we can plugin the ancient
|
// database with an empty node, so that we can plugin the ancient
|
||||||
// into node seamlessly.
|
// into node seamlessly.
|
||||||
func (bc *BlockChain) empty() bool {
|
func (bc *BlockChain) empty() bool {
|
||||||
genesis := bc.genesisBlock.Hash()
|
genesis := bc.genesisBlock.Hash()
|
||||||
for _, hash := range []common.Hash{rawdb.ReadHeadBlockHash(bc.db), rawdb.ReadHeadHeaderHash(bc.db), rawdb.ReadHeadFastBlockHash(bc.db)} {
|
for _, hash := range []common.Hash{rawdb.ReadHeadBlockHash(bc.db.BlockStore()), rawdb.ReadHeadHeaderHash(bc.db.BlockStore()), rawdb.ReadHeadFastBlockHash(bc.db)} {
|
||||||
if hash != genesis {
|
if hash != genesis {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@ -704,7 +706,7 @@ func (bc *BlockChain) getFinalizedNumber(header *types.Header) uint64 {
|
|||||||
// assumes that the chain manager mutex is held.
|
// assumes that the chain manager mutex is held.
|
||||||
func (bc *BlockChain) loadLastState() error {
|
func (bc *BlockChain) loadLastState() error {
|
||||||
// Restore the last known head block
|
// Restore the last known head block
|
||||||
head := rawdb.ReadHeadBlockHash(bc.db)
|
head := rawdb.ReadHeadBlockHash(bc.db.BlockStore())
|
||||||
if head == (common.Hash{}) {
|
if head == (common.Hash{}) {
|
||||||
// Corrupt or empty database, init from scratch
|
// Corrupt or empty database, init from scratch
|
||||||
log.Warn("Empty database, resetting chain")
|
log.Warn("Empty database, resetting chain")
|
||||||
@ -726,7 +728,7 @@ func (bc *BlockChain) loadLastState() error {
|
|||||||
|
|
||||||
// Restore the last known head header
|
// Restore the last known head header
|
||||||
headHeader := headBlock.Header()
|
headHeader := headBlock.Header()
|
||||||
if head := rawdb.ReadHeadHeaderHash(bc.db); head != (common.Hash{}) {
|
if head := rawdb.ReadHeadHeaderHash(bc.db.BlockStore()); head != (common.Hash{}) {
|
||||||
if header := bc.GetHeaderByHash(head); header != nil {
|
if header := bc.GetHeaderByHash(head); header != nil {
|
||||||
headHeader = header
|
headHeader = header
|
||||||
}
|
}
|
||||||
@ -1105,7 +1107,7 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, time uint64, root common.Ha
|
|||||||
// intent afterwards is full block importing, delete the chain segment
|
// intent afterwards is full block importing, delete the chain segment
|
||||||
// between the stateful-block and the sethead target.
|
// between the stateful-block and the sethead target.
|
||||||
var wipe bool
|
var wipe bool
|
||||||
frozen, _ := bc.db.BlockStore().Ancients()
|
frozen, _ := bc.db.Ancients()
|
||||||
if headNumber+1 < frozen {
|
if headNumber+1 < frozen {
|
||||||
wipe = pivot == nil || headNumber >= *pivot
|
wipe = pivot == nil || headNumber >= *pivot
|
||||||
}
|
}
|
||||||
@ -1114,11 +1116,11 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, time uint64, root common.Ha
|
|||||||
// Rewind the header chain, deleting all block bodies until then
|
// Rewind the header chain, deleting all block bodies until then
|
||||||
delFn := func(db ethdb.KeyValueWriter, hash common.Hash, num uint64) {
|
delFn := func(db ethdb.KeyValueWriter, hash common.Hash, num uint64) {
|
||||||
// Ignore the error here since light client won't hit this path
|
// Ignore the error here since light client won't hit this path
|
||||||
frozen, _ := bc.db.BlockStore().Ancients()
|
frozen, _ := bc.db.Ancients()
|
||||||
if num+1 <= frozen {
|
if num+1 <= frozen {
|
||||||
// Truncate all relative data(header, total difficulty, body, receipt
|
// Truncate all relative data(header, total difficulty, body, receipt
|
||||||
// and canonical hash) from ancient store.
|
// and canonical hash) from ancient store.
|
||||||
if _, err := bc.db.BlockStore().TruncateHead(num); err != nil {
|
if _, err := bc.db.TruncateHead(num); err != nil {
|
||||||
log.Crit("Failed to truncate ancient data", "number", num, "err", err)
|
log.Crit("Failed to truncate ancient data", "number", num, "err", err)
|
||||||
}
|
}
|
||||||
// Remove the hash <-> number mapping from the active store.
|
// Remove the hash <-> number mapping from the active store.
|
||||||
@ -1136,7 +1138,7 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, time uint64, root common.Ha
|
|||||||
// If SetHead was only called as a chain reparation method, try to skip
|
// If SetHead was only called as a chain reparation method, try to skip
|
||||||
// touching the header chain altogether, unless the freezer is broken
|
// touching the header chain altogether, unless the freezer is broken
|
||||||
if repair {
|
if repair {
|
||||||
if target, force := updateFn(bc.db.BlockStore(), bc.CurrentBlock()); force {
|
if target, force := updateFn(bc.db, bc.CurrentBlock()); force {
|
||||||
bc.hc.SetHead(target.Number.Uint64(), updateFn, delFn)
|
bc.hc.SetHead(target.Number.Uint64(), updateFn, delFn)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@ -1297,33 +1299,19 @@ func (bc *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error {
|
|||||||
//
|
//
|
||||||
// Note, this function assumes that the `mu` mutex is held!
|
// Note, this function assumes that the `mu` mutex is held!
|
||||||
func (bc *BlockChain) writeHeadBlock(block *types.Block) {
|
func (bc *BlockChain) writeHeadBlock(block *types.Block) {
|
||||||
bc.dbWg.Add(2)
|
|
||||||
defer bc.dbWg.Wait()
|
|
||||||
go func() {
|
|
||||||
defer bc.dbWg.Done()
|
|
||||||
// Add the block to the canonical chain number scheme and mark as the head
|
// Add the block to the canonical chain number scheme and mark as the head
|
||||||
blockBatch := bc.db.BlockStore().NewBatch()
|
rawdb.WriteCanonicalHash(bc.db.BlockStore(), block.Hash(), block.NumberU64())
|
||||||
rawdb.WriteCanonicalHash(blockBatch, block.Hash(), block.NumberU64())
|
rawdb.WriteHeadHeaderHash(bc.db.BlockStore(), block.Hash())
|
||||||
rawdb.WriteHeadHeaderHash(blockBatch, block.Hash())
|
rawdb.WriteHeadBlockHash(bc.db.BlockStore(), block.Hash())
|
||||||
rawdb.WriteHeadBlockHash(blockBatch, block.Hash())
|
|
||||||
rawdb.WriteHeadFastBlockHash(blockBatch, block.Hash())
|
|
||||||
// Flush the whole batch into the disk, exit the node if failed
|
|
||||||
if err := blockBatch.Write(); err != nil {
|
|
||||||
log.Crit("Failed to update chain indexes and markers in block db", "err", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
go func() {
|
|
||||||
defer bc.dbWg.Done()
|
|
||||||
|
|
||||||
batch := bc.db.NewBatch()
|
batch := bc.db.NewBatch()
|
||||||
|
rawdb.WriteHeadFastBlockHash(batch, block.Hash())
|
||||||
rawdb.WriteTxLookupEntriesByBlock(batch, block)
|
rawdb.WriteTxLookupEntriesByBlock(batch, block)
|
||||||
|
|
||||||
// Flush the whole batch into the disk, exit the node if failed
|
// Flush the whole batch into the disk, exit the node if failed
|
||||||
if err := batch.Write(); err != nil {
|
if err := batch.Write(); err != nil {
|
||||||
log.Crit("Failed to update chain indexes in chain db", "err", err)
|
log.Crit("Failed to update chain indexes and markers", "err", err)
|
||||||
}
|
}
|
||||||
}()
|
|
||||||
|
|
||||||
// Update all in-memory chain markers in the last step
|
// Update all in-memory chain markers in the last step
|
||||||
bc.hc.SetCurrentHeader(block.Header())
|
bc.hc.SetCurrentHeader(block.Header())
|
||||||
|
|
||||||
@ -1395,7 +1383,7 @@ func (bc *BlockChain) Stop() {
|
|||||||
if !bc.cacheConfig.TrieDirtyDisabled {
|
if !bc.cacheConfig.TrieDirtyDisabled {
|
||||||
triedb := bc.triedb
|
triedb := bc.triedb
|
||||||
var once sync.Once
|
var once sync.Once
|
||||||
for _, offset := range []uint64{0, 1, bc.TriesInMemory() - 1} {
|
for _, offset := range []uint64{0, 1, TriesInMemory - 1} {
|
||||||
if number := bc.CurrentBlock().Number.Uint64(); number > offset {
|
if number := bc.CurrentBlock().Number.Uint64(); number > offset {
|
||||||
recent := bc.GetBlockByNumber(number - offset)
|
recent := bc.GetBlockByNumber(number - offset)
|
||||||
log.Info("Writing cached state to disk", "block", recent.Number(), "hash", recent.Hash(), "root", recent.Root())
|
log.Info("Writing cached state to disk", "block", recent.Number(), "hash", recent.Hash(), "root", recent.Root())
|
||||||
@ -1544,7 +1532,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
|
|||||||
} else if !reorg {
|
} else if !reorg {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
rawdb.WriteHeadFastBlockHash(bc.db.BlockStore(), head.Hash())
|
rawdb.WriteHeadFastBlockHash(bc.db, head.Hash())
|
||||||
bc.currentSnapBlock.Store(head.Header())
|
bc.currentSnapBlock.Store(head.Header())
|
||||||
headFastBlockGauge.Update(int64(head.NumberU64()))
|
headFastBlockGauge.Update(int64(head.NumberU64()))
|
||||||
return true
|
return true
|
||||||
@ -1561,9 +1549,9 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
|
|||||||
|
|
||||||
// Ensure genesis is in ancients.
|
// Ensure genesis is in ancients.
|
||||||
if first.NumberU64() == 1 {
|
if first.NumberU64() == 1 {
|
||||||
if frozen, _ := bc.db.BlockStore().Ancients(); frozen == 0 {
|
if frozen, _ := bc.db.Ancients(); frozen == 0 {
|
||||||
td := bc.genesisBlock.Difficulty()
|
td := bc.genesisBlock.Difficulty()
|
||||||
writeSize, err := rawdb.WriteAncientBlocks(bc.db.BlockStore(), []*types.Block{bc.genesisBlock}, []types.Receipts{nil}, td)
|
writeSize, err := rawdb.WriteAncientBlocks(bc.db, []*types.Block{bc.genesisBlock}, []types.Receipts{nil}, td)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Error writing genesis to ancients", "err", err)
|
log.Error("Error writing genesis to ancients", "err", err)
|
||||||
return 0, err
|
return 0, err
|
||||||
@ -1581,7 +1569,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
|
|||||||
|
|
||||||
// Write all chain data to ancients.
|
// Write all chain data to ancients.
|
||||||
td := bc.GetTd(first.Hash(), first.NumberU64())
|
td := bc.GetTd(first.Hash(), first.NumberU64())
|
||||||
writeSize, err := rawdb.WriteAncientBlocksWithBlobs(bc.db.BlockStore(), blockChain, receiptChain, td)
|
writeSize, err := rawdb.WriteAncientBlocksWithBlobs(bc.db, blockChain, receiptChain, td)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Error importing chain data to ancients", "err", err)
|
log.Error("Error importing chain data to ancients", "err", err)
|
||||||
return 0, err
|
return 0, err
|
||||||
@ -1589,7 +1577,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
|
|||||||
size += writeSize
|
size += writeSize
|
||||||
|
|
||||||
// Sync the ancient store explicitly to ensure all data has been flushed to disk.
|
// Sync the ancient store explicitly to ensure all data has been flushed to disk.
|
||||||
if err := bc.db.BlockStore().Sync(); err != nil {
|
if err := bc.db.Sync(); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
// Update the current snap block because all block data is now present in DB.
|
// Update the current snap block because all block data is now present in DB.
|
||||||
@ -1597,7 +1585,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
|
|||||||
if !updateHead(blockChain[len(blockChain)-1]) {
|
if !updateHead(blockChain[len(blockChain)-1]) {
|
||||||
// We end up here if the header chain has reorg'ed, and the blocks/receipts
|
// We end up here if the header chain has reorg'ed, and the blocks/receipts
|
||||||
// don't match the canonical chain.
|
// don't match the canonical chain.
|
||||||
if _, err := bc.db.BlockStore().TruncateHead(previousSnapBlock + 1); err != nil {
|
if _, err := bc.db.TruncateHead(previousSnapBlock + 1); err != nil {
|
||||||
log.Error("Can't truncate ancient store after failed insert", "err", err)
|
log.Error("Can't truncate ancient store after failed insert", "err", err)
|
||||||
}
|
}
|
||||||
return 0, errSideChainReceipts
|
return 0, errSideChainReceipts
|
||||||
@ -1617,7 +1605,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
|
|||||||
rawdb.DeleteBlockWithoutNumber(blockBatch, block.Hash(), block.NumberU64())
|
rawdb.DeleteBlockWithoutNumber(blockBatch, block.Hash(), block.NumberU64())
|
||||||
}
|
}
|
||||||
// Delete side chain hash-to-number mappings.
|
// Delete side chain hash-to-number mappings.
|
||||||
for _, nh := range rawdb.ReadAllHashesInRange(bc.db.BlockStore(), first.NumberU64(), last.NumberU64()) {
|
for _, nh := range rawdb.ReadAllHashesInRange(bc.db, first.NumberU64(), last.NumberU64()) {
|
||||||
if _, canon := canonHashes[nh.Hash]; !canon {
|
if _, canon := canonHashes[nh.Hash]; !canon {
|
||||||
rawdb.DeleteHeader(blockBatch, nh.Hash, nh.Number)
|
rawdb.DeleteHeader(blockBatch, nh.Hash, nh.Number)
|
||||||
}
|
}
|
||||||
@ -1787,6 +1775,7 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
|
|||||||
wg := sync.WaitGroup{}
|
wg := sync.WaitGroup{}
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
|
rawdb.WritePreimages(bc.db, state.Preimages())
|
||||||
blockBatch := bc.db.BlockStore().NewBatch()
|
blockBatch := bc.db.BlockStore().NewBatch()
|
||||||
rawdb.WriteTd(blockBatch, block.Hash(), block.NumberU64(), externTd)
|
rawdb.WriteTd(blockBatch, block.Hash(), block.NumberU64(), externTd)
|
||||||
rawdb.WriteBlock(blockBatch, block)
|
rawdb.WriteBlock(blockBatch, block)
|
||||||
@ -1795,20 +1784,10 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
|
|||||||
if bc.chainConfig.IsCancun(block.Number(), block.Time()) {
|
if bc.chainConfig.IsCancun(block.Number(), block.Time()) {
|
||||||
rawdb.WriteBlobSidecars(blockBatch, block.Hash(), block.NumberU64(), block.Sidecars())
|
rawdb.WriteBlobSidecars(blockBatch, block.Hash(), block.NumberU64(), block.Sidecars())
|
||||||
}
|
}
|
||||||
if bc.db.StateStore() != nil {
|
|
||||||
rawdb.WritePreimages(bc.db.StateStore(), state.Preimages())
|
|
||||||
} else {
|
|
||||||
rawdb.WritePreimages(blockBatch, state.Preimages())
|
rawdb.WritePreimages(blockBatch, state.Preimages())
|
||||||
}
|
|
||||||
if err := blockBatch.Write(); err != nil {
|
if err := blockBatch.Write(); err != nil {
|
||||||
log.Crit("Failed to write block into disk", "err", err)
|
log.Crit("Failed to write block into disk", "err", err)
|
||||||
}
|
}
|
||||||
bc.hc.tdCache.Add(block.Hash(), externTd)
|
|
||||||
bc.blockCache.Add(block.Hash(), block)
|
|
||||||
bc.cacheReceipts(block.Hash(), receipts, block)
|
|
||||||
if bc.chainConfig.IsCancun(block.Number(), block.Time()) {
|
|
||||||
bc.sidecarsCache.Add(block.Hash(), block.Sidecars())
|
|
||||||
}
|
|
||||||
wg.Done()
|
wg.Done()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
@ -1833,7 +1812,7 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
|
|||||||
|
|
||||||
// Flush limits are not considered for the first TriesInMemory blocks.
|
// Flush limits are not considered for the first TriesInMemory blocks.
|
||||||
current := block.NumberU64()
|
current := block.NumberU64()
|
||||||
if current <= bc.TriesInMemory() {
|
if current <= TriesInMemory {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
// If we exceeded our memory allowance, flush matured singleton nodes to disk
|
// If we exceeded our memory allowance, flush matured singleton nodes to disk
|
||||||
@ -1931,19 +1910,14 @@ func (bc *BlockChain) WriteBlockAndSetHead(block *types.Block, receipts []*types
|
|||||||
// writeBlockAndSetHead is the internal implementation of WriteBlockAndSetHead.
|
// writeBlockAndSetHead is the internal implementation of WriteBlockAndSetHead.
|
||||||
// This function expects the chain mutex to be held.
|
// This function expects the chain mutex to be held.
|
||||||
func (bc *BlockChain) writeBlockAndSetHead(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB, emitHeadEvent bool) (status WriteStatus, err error) {
|
func (bc *BlockChain) writeBlockAndSetHead(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB, emitHeadEvent bool) (status WriteStatus, err error) {
|
||||||
|
if err := bc.writeBlockWithState(block, receipts, state); err != nil {
|
||||||
|
return NonStatTy, err
|
||||||
|
}
|
||||||
currentBlock := bc.CurrentBlock()
|
currentBlock := bc.CurrentBlock()
|
||||||
reorg, err := bc.forker.ReorgNeededWithFastFinality(currentBlock, block.Header())
|
reorg, err := bc.forker.ReorgNeededWithFastFinality(currentBlock, block.Header())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return NonStatTy, err
|
return NonStatTy, err
|
||||||
}
|
}
|
||||||
if reorg {
|
|
||||||
bc.highestVerifiedBlock.Store(types.CopyHeader(block.Header()))
|
|
||||||
bc.highestVerifiedBlockFeed.Send(HighestVerifiedBlockEvent{Header: block.Header()})
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := bc.writeBlockWithState(block, receipts, state); err != nil {
|
|
||||||
return NonStatTy, err
|
|
||||||
}
|
|
||||||
if reorg {
|
if reorg {
|
||||||
// Reorganise the chain if the parent is not the head block
|
// Reorganise the chain if the parent is not the head block
|
||||||
if block.ParentHash() != currentBlock.Hash() {
|
if block.ParentHash() != currentBlock.Hash() {
|
||||||
@ -2057,9 +2031,6 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error)
|
|||||||
return 0, nil
|
return 0, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(chain) > 0 {
|
|
||||||
blockRecvTimeDiffGauge.Update(time.Now().Unix() - int64(chain[0].Time()))
|
|
||||||
}
|
|
||||||
// Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss)
|
// Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss)
|
||||||
signer := types.MakeSigner(bc.chainConfig, chain[0].Number(), chain[0].Time())
|
signer := types.MakeSigner(bc.chainConfig, chain[0].Number(), chain[0].Time())
|
||||||
go SenderCacher.RecoverFromBlocks(signer, chain)
|
go SenderCacher.RecoverFromBlocks(signer, chain)
|
||||||
@ -2294,6 +2265,8 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error)
|
|||||||
vtime := time.Since(vstart)
|
vtime := time.Since(vstart)
|
||||||
proctime := time.Since(start) // processing + validation
|
proctime := time.Since(start) // processing + validation
|
||||||
|
|
||||||
|
bc.cacheBlock(block.Hash(), block)
|
||||||
|
|
||||||
// Update the metrics touched during block processing and validation
|
// Update the metrics touched during block processing and validation
|
||||||
accountReadTimer.Update(statedb.AccountReads) // Account reads are complete(in processing)
|
accountReadTimer.Update(statedb.AccountReads) // Account reads are complete(in processing)
|
||||||
storageReadTimer.Update(statedb.StorageReads) // Storage reads are complete(in processing)
|
storageReadTimer.Update(statedb.StorageReads) // Storage reads are complete(in processing)
|
||||||
@ -2303,12 +2276,8 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error)
|
|||||||
storageUpdateTimer.Update(statedb.StorageUpdates) // Storage updates are complete(in validation)
|
storageUpdateTimer.Update(statedb.StorageUpdates) // Storage updates are complete(in validation)
|
||||||
accountHashTimer.Update(statedb.AccountHashes) // Account hashes are complete(in validation)
|
accountHashTimer.Update(statedb.AccountHashes) // Account hashes are complete(in validation)
|
||||||
storageHashTimer.Update(statedb.StorageHashes) // Storage hashes are complete(in validation)
|
storageHashTimer.Update(statedb.StorageHashes) // Storage hashes are complete(in validation)
|
||||||
triehash := statedb.AccountHashes + statedb.StorageHashes // The time spent on tries hashing
|
blockExecutionTimer.Update(ptime) // The time spent on EVM processing
|
||||||
trieUpdate := statedb.AccountUpdates + statedb.StorageUpdates // The time spent on tries update
|
blockValidationTimer.Update(vtime) // The time spent on block validation
|
||||||
trieRead := statedb.SnapshotAccountReads + statedb.AccountReads // The time spent on account read
|
|
||||||
trieRead += statedb.SnapshotStorageReads + statedb.StorageReads // The time spent on storage read
|
|
||||||
blockExecutionTimer.Update(ptime - trieRead) // The time spent on EVM processing
|
|
||||||
blockValidationTimer.Update(vtime - (triehash + trieUpdate)) // The time spent on block validation
|
|
||||||
|
|
||||||
// Write the block to the chain and get the status.
|
// Write the block to the chain and get the status.
|
||||||
var (
|
var (
|
||||||
@ -2325,13 +2294,15 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error)
|
|||||||
return it.index, err
|
return it.index, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bc.cacheReceipts(block.Hash(), receipts, block)
|
||||||
|
|
||||||
// Update the metrics touched during block commit
|
// Update the metrics touched during block commit
|
||||||
accountCommitTimer.Update(statedb.AccountCommits) // Account commits are complete, we can mark them
|
accountCommitTimer.Update(statedb.AccountCommits) // Account commits are complete, we can mark them
|
||||||
storageCommitTimer.Update(statedb.StorageCommits) // Storage commits are complete, we can mark them
|
storageCommitTimer.Update(statedb.StorageCommits) // Storage commits are complete, we can mark them
|
||||||
snapshotCommitTimer.Update(statedb.SnapshotCommits) // Snapshot commits are complete, we can mark them
|
snapshotCommitTimer.Update(statedb.SnapshotCommits) // Snapshot commits are complete, we can mark them
|
||||||
triedbCommitTimer.Update(statedb.TrieDBCommits) // Trie database commits are complete, we can mark them
|
triedbCommitTimer.Update(statedb.TrieDBCommits) // Trie database commits are complete, we can mark them
|
||||||
|
|
||||||
blockWriteTimer.Update(time.Since(wstart) - statedb.AccountCommits - statedb.StorageCommits - statedb.SnapshotCommits - statedb.TrieDBCommits)
|
blockWriteTimer.UpdateSince(wstart)
|
||||||
blockInsertTimer.UpdateSince(start)
|
blockInsertTimer.UpdateSince(start)
|
||||||
|
|
||||||
// Report the import stats before returning the various results
|
// Report the import stats before returning the various results
|
||||||
@ -2404,11 +2375,26 @@ func (bc *BlockChain) updateHighestVerifiedHeader(header *types.Header) {
|
|||||||
if header == nil || header.Number == nil {
|
if header == nil || header.Number == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
currentBlock := bc.CurrentBlock()
|
currentHeader := bc.highestVerifiedHeader.Load()
|
||||||
reorg, err := bc.forker.ReorgNeededWithFastFinality(currentBlock, header)
|
if currentHeader == nil {
|
||||||
if err == nil && reorg {
|
|
||||||
bc.highestVerifiedHeader.Store(types.CopyHeader(header))
|
bc.highestVerifiedHeader.Store(types.CopyHeader(header))
|
||||||
log.Trace("updateHighestVerifiedHeader", "number", header.Number.Uint64(), "hash", header.Hash())
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
newParentTD := bc.GetTd(header.ParentHash, header.Number.Uint64()-1)
|
||||||
|
if newParentTD == nil {
|
||||||
|
newParentTD = big.NewInt(0)
|
||||||
|
}
|
||||||
|
oldParentTD := bc.GetTd(currentHeader.ParentHash, currentHeader.Number.Uint64()-1)
|
||||||
|
if oldParentTD == nil {
|
||||||
|
oldParentTD = big.NewInt(0)
|
||||||
|
}
|
||||||
|
newTD := big.NewInt(0).Add(newParentTD, header.Difficulty)
|
||||||
|
oldTD := big.NewInt(0).Add(oldParentTD, currentHeader.Difficulty)
|
||||||
|
|
||||||
|
if newTD.Cmp(oldTD) > 0 {
|
||||||
|
bc.highestVerifiedHeader.Store(types.CopyHeader(header))
|
||||||
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -98,15 +98,6 @@ func (bc *BlockChain) GetHeaderByHash(hash common.Hash) *types.Header {
|
|||||||
return bc.hc.GetHeaderByHash(hash)
|
return bc.hc.GetHeaderByHash(hash)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetVerifiedBlockByHash retrieves the header of a verified block, it may be only in memory.
|
|
||||||
func (bc *BlockChain) GetVerifiedBlockByHash(hash common.Hash) *types.Header {
|
|
||||||
highestVerifiedBlock := bc.highestVerifiedBlock.Load()
|
|
||||||
if highestVerifiedBlock != nil && highestVerifiedBlock.Hash() == hash {
|
|
||||||
return highestVerifiedBlock
|
|
||||||
}
|
|
||||||
return bc.hc.GetHeaderByHash(hash)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetHeaderByNumber retrieves a block header from the database by number,
|
// GetHeaderByNumber retrieves a block header from the database by number,
|
||||||
// caching it (associated with its hash) if found.
|
// caching it (associated with its hash) if found.
|
||||||
func (bc *BlockChain) GetHeaderByNumber(number uint64) *types.Header {
|
func (bc *BlockChain) GetHeaderByNumber(number uint64) *types.Header {
|
||||||
@ -240,7 +231,7 @@ func (bc *BlockChain) GetReceiptsByHash(hash common.Hash) types.Receipts {
|
|||||||
if receipts, ok := bc.receiptsCache.Get(hash); ok {
|
if receipts, ok := bc.receiptsCache.Get(hash); ok {
|
||||||
return receipts
|
return receipts
|
||||||
}
|
}
|
||||||
number := rawdb.ReadHeaderNumber(bc.db, hash)
|
number := rawdb.ReadHeaderNumber(bc.db.BlockStore(), hash)
|
||||||
if number == nil {
|
if number == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -495,11 +486,6 @@ func (bc *BlockChain) SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Su
|
|||||||
return bc.scope.Track(bc.chainHeadFeed.Subscribe(ch))
|
return bc.scope.Track(bc.chainHeadFeed.Subscribe(ch))
|
||||||
}
|
}
|
||||||
|
|
||||||
// SubscribeHighestVerifiedBlockEvent registers a subscription of HighestVerifiedBlockEvent.
|
|
||||||
func (bc *BlockChain) SubscribeHighestVerifiedHeaderEvent(ch chan<- HighestVerifiedBlockEvent) event.Subscription {
|
|
||||||
return bc.scope.Track(bc.highestVerifiedBlockFeed.Subscribe(ch))
|
|
||||||
}
|
|
||||||
|
|
||||||
// SubscribeChainBlockEvent registers a subscription of ChainBlockEvent.
|
// SubscribeChainBlockEvent registers a subscription of ChainBlockEvent.
|
||||||
func (bc *BlockChain) SubscribeChainBlockEvent(ch chan<- ChainHeadEvent) event.Subscription {
|
func (bc *BlockChain) SubscribeChainBlockEvent(ch chan<- ChainHeadEvent) event.Subscription {
|
||||||
return bc.scope.Track(bc.chainBlockFeed.Subscribe(ch))
|
return bc.scope.Track(bc.chainBlockFeed.Subscribe(ch))
|
||||||
@ -525,12 +511,3 @@ func (bc *BlockChain) SubscribeBlockProcessingEvent(ch chan<- bool) event.Subscr
|
|||||||
func (bc *BlockChain) SubscribeFinalizedHeaderEvent(ch chan<- FinalizedHeaderEvent) event.Subscription {
|
func (bc *BlockChain) SubscribeFinalizedHeaderEvent(ch chan<- FinalizedHeaderEvent) event.Subscription {
|
||||||
return bc.scope.Track(bc.finalizedHeaderFeed.Subscribe(ch))
|
return bc.scope.Track(bc.finalizedHeaderFeed.Subscribe(ch))
|
||||||
}
|
}
|
||||||
|
|
||||||
// AncientTail retrieves the tail the ancients blocks
|
|
||||||
func (bc *BlockChain) AncientTail() (uint64, error) {
|
|
||||||
tail, err := bc.db.BlockStore().Tail()
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
return tail, nil
|
|
||||||
}
|
|
||||||
|
@ -26,8 +26,6 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||||
@ -1797,13 +1795,6 @@ func testRepairWithScheme(t *testing.T, tt *rewindTest, snapshots bool, scheme s
|
|||||||
config.SnapshotWait = true
|
config.SnapshotWait = true
|
||||||
}
|
}
|
||||||
config.TriesInMemory = 128
|
config.TriesInMemory = 128
|
||||||
|
|
||||||
if err = db.SetupFreezerEnv(ðdb.FreezerEnv{
|
|
||||||
ChainCfg: gspec.Config,
|
|
||||||
BlobExtraReserve: params.DefaultExtraReserveForBlobRequests,
|
|
||||||
}); err != nil {
|
|
||||||
t.Fatalf("Failed to create chain: %v", err)
|
|
||||||
}
|
|
||||||
chain, err := NewBlockChain(db, config, gspec, nil, engine, vm.Config{}, nil, nil)
|
chain, err := NewBlockChain(db, config, gspec, nil, engine, vm.Config{}, nil, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to create chain: %v", err)
|
t.Fatalf("Failed to create chain: %v", err)
|
||||||
@ -1841,10 +1832,14 @@ func testRepairWithScheme(t *testing.T, tt *rewindTest, snapshots bool, scheme s
|
|||||||
}
|
}
|
||||||
// Force run a freeze cycle
|
// Force run a freeze cycle
|
||||||
type freezer interface {
|
type freezer interface {
|
||||||
Freeze(threshold uint64) error
|
Freeze() error
|
||||||
Ancients() (uint64, error)
|
Ancients() (uint64, error)
|
||||||
}
|
}
|
||||||
db.(freezer).Freeze(tt.freezeThreshold)
|
if tt.freezeThreshold < uint64(tt.canonicalBlocks) {
|
||||||
|
final := uint64(tt.canonicalBlocks) - tt.freezeThreshold
|
||||||
|
chain.SetFinalized(canonblocks[int(final)-1].Header())
|
||||||
|
}
|
||||||
|
db.(freezer).Freeze()
|
||||||
|
|
||||||
// Set the simulated pivot block
|
// Set the simulated pivot block
|
||||||
if tt.pivotBlock != nil {
|
if tt.pivotBlock != nil {
|
||||||
|
@ -27,8 +27,6 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||||
@ -2000,13 +1998,6 @@ func testSetHeadWithScheme(t *testing.T, tt *rewindTest, snapshots bool, scheme
|
|||||||
config.SnapshotWait = true
|
config.SnapshotWait = true
|
||||||
}
|
}
|
||||||
config.TriesInMemory = 128
|
config.TriesInMemory = 128
|
||||||
|
|
||||||
if err = db.SetupFreezerEnv(ðdb.FreezerEnv{
|
|
||||||
ChainCfg: gspec.Config,
|
|
||||||
BlobExtraReserve: params.DefaultExtraReserveForBlobRequests,
|
|
||||||
}); err != nil {
|
|
||||||
t.Fatalf("Failed to create chain: %v", err)
|
|
||||||
}
|
|
||||||
chain, err := NewBlockChain(db, config, gspec, nil, engine, vm.Config{}, nil, nil)
|
chain, err := NewBlockChain(db, config, gspec, nil, engine, vm.Config{}, nil, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to create chain: %v", err)
|
t.Fatalf("Failed to create chain: %v", err)
|
||||||
@ -2054,10 +2045,14 @@ func testSetHeadWithScheme(t *testing.T, tt *rewindTest, snapshots bool, scheme
|
|||||||
|
|
||||||
// Force run a freeze cycle
|
// Force run a freeze cycle
|
||||||
type freezer interface {
|
type freezer interface {
|
||||||
Freeze(threshold uint64) error
|
Freeze() error
|
||||||
Ancients() (uint64, error)
|
Ancients() (uint64, error)
|
||||||
}
|
}
|
||||||
db.(freezer).Freeze(tt.freezeThreshold)
|
if tt.freezeThreshold < uint64(tt.canonicalBlocks) {
|
||||||
|
final := uint64(tt.canonicalBlocks) - tt.freezeThreshold
|
||||||
|
chain.SetFinalized(canonblocks[int(final)-1].Header())
|
||||||
|
}
|
||||||
|
db.(freezer).Freeze()
|
||||||
|
|
||||||
// Set the simulated pivot block
|
// Set the simulated pivot block
|
||||||
if tt.pivotBlock != nil {
|
if tt.pivotBlock != nil {
|
||||||
|
@ -974,7 +974,7 @@ func testFastVsFullChains(t *testing.T, scheme string) {
|
|||||||
t.Fatalf("failed to insert receipt %d: %v", n, err)
|
t.Fatalf("failed to insert receipt %d: %v", n, err)
|
||||||
}
|
}
|
||||||
// Freezer style fast import the chain.
|
// Freezer style fast import the chain.
|
||||||
ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false, false)
|
ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to create temp freezer db: %v", err)
|
t.Fatalf("failed to create temp freezer db: %v", err)
|
||||||
}
|
}
|
||||||
@ -1069,7 +1069,7 @@ func testLightVsFastVsFullChainHeads(t *testing.T, scheme string) {
|
|||||||
|
|
||||||
// makeDb creates a db instance for testing.
|
// makeDb creates a db instance for testing.
|
||||||
makeDb := func() ethdb.Database {
|
makeDb := func() ethdb.Database {
|
||||||
db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false, false)
|
db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to create temp freezer db: %v", err)
|
t.Fatalf("failed to create temp freezer db: %v", err)
|
||||||
}
|
}
|
||||||
@ -1957,7 +1957,7 @@ func testLargeReorgTrieGC(t *testing.T, scheme string) {
|
|||||||
competitor, _ := GenerateChain(genesis.Config, shared[len(shared)-1], engine, genDb, 2*TriesInMemory+1, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{3}) })
|
competitor, _ := GenerateChain(genesis.Config, shared[len(shared)-1], engine, genDb, 2*TriesInMemory+1, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{3}) })
|
||||||
|
|
||||||
// Import the shared chain and the original canonical one
|
// Import the shared chain and the original canonical one
|
||||||
db, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false, false)
|
db, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false)
|
||||||
defer db.Close()
|
defer db.Close()
|
||||||
|
|
||||||
chain, err := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil, nil)
|
chain, err := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil, nil)
|
||||||
@ -2026,7 +2026,7 @@ func testBlockchainRecovery(t *testing.T, scheme string) {
|
|||||||
_, blocks, receipts := GenerateChainWithGenesis(gspec, ethash.NewFaker(), int(height), nil)
|
_, blocks, receipts := GenerateChainWithGenesis(gspec, ethash.NewFaker(), int(height), nil)
|
||||||
|
|
||||||
// Import the chain as a ancient-first node and ensure all pointers are updated
|
// Import the chain as a ancient-first node and ensure all pointers are updated
|
||||||
ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false, false)
|
ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to create temp freezer db: %v", err)
|
t.Fatalf("failed to create temp freezer db: %v", err)
|
||||||
}
|
}
|
||||||
@ -2097,7 +2097,7 @@ func testInsertReceiptChainRollback(t *testing.T, scheme string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Set up a BlockChain that uses the ancient store.
|
// Set up a BlockChain that uses the ancient store.
|
||||||
ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false, false)
|
ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to create temp freezer db: %v", err)
|
t.Fatalf("failed to create temp freezer db: %v", err)
|
||||||
}
|
}
|
||||||
@ -2167,7 +2167,7 @@ func testLowDiffLongChain(t *testing.T, scheme string) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
// Import the canonical chain
|
// Import the canonical chain
|
||||||
diskdb, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false, false)
|
diskdb, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false)
|
||||||
defer diskdb.Close()
|
defer diskdb.Close()
|
||||||
|
|
||||||
chain, err := NewBlockChain(diskdb, DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil, nil)
|
chain, err := NewBlockChain(diskdb, DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil, nil)
|
||||||
@ -2384,7 +2384,7 @@ func testInsertKnownChainData(t *testing.T, typ string, scheme string) {
|
|||||||
b.OffsetTime(-9) // A higher difficulty
|
b.OffsetTime(-9) // A higher difficulty
|
||||||
})
|
})
|
||||||
// Import the shared chain and the original canonical one
|
// Import the shared chain and the original canonical one
|
||||||
chaindb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false, false)
|
chaindb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to create temp freezer db: %v", err)
|
t.Fatalf("failed to create temp freezer db: %v", err)
|
||||||
}
|
}
|
||||||
@ -2555,7 +2555,7 @@ func testInsertKnownChainDataWithMerging(t *testing.T, typ string, mergeHeight i
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
// Import the shared chain and the original canonical one
|
// Import the shared chain and the original canonical one
|
||||||
chaindb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false, false)
|
chaindb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to create temp freezer db: %v", err)
|
t.Fatalf("failed to create temp freezer db: %v", err)
|
||||||
}
|
}
|
||||||
@ -3858,7 +3858,7 @@ func testSetCanonical(t *testing.T, scheme string) {
|
|||||||
}
|
}
|
||||||
gen.AddTx(tx)
|
gen.AddTx(tx)
|
||||||
})
|
})
|
||||||
diskdb, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false, false)
|
diskdb, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false)
|
||||||
defer diskdb.Close()
|
defer diskdb.Close()
|
||||||
|
|
||||||
chain, err := NewBlockChain(diskdb, DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil)
|
chain, err := NewBlockChain(diskdb, DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil)
|
||||||
@ -4483,7 +4483,7 @@ func (c *mockParlia) CalcDifficulty(chain consensus.ChainHeaderReader, time uint
|
|||||||
func TestParliaBlobFeeReward(t *testing.T) {
|
func TestParliaBlobFeeReward(t *testing.T) {
|
||||||
// Have N headers in the freezer
|
// Have N headers in the freezer
|
||||||
frdir := t.TempDir()
|
frdir := t.TempDir()
|
||||||
db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false, false, false, false, false)
|
db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false, false, false, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to create database with ancient backend")
|
t.Fatalf("failed to create database with ancient backend")
|
||||||
}
|
}
|
||||||
|
@ -227,8 +227,8 @@ func (c *ChainIndexer) eventLoop(currentHeader *types.Header, events chan ChainH
|
|||||||
// Reorg to the common ancestor if needed (might not exist in light sync mode, skip reorg then)
|
// Reorg to the common ancestor if needed (might not exist in light sync mode, skip reorg then)
|
||||||
// TODO(karalabe, zsfelfoldi): This seems a bit brittle, can we detect this case explicitly?
|
// TODO(karalabe, zsfelfoldi): This seems a bit brittle, can we detect this case explicitly?
|
||||||
|
|
||||||
if rawdb.ReadCanonicalHash(c.chainDb, prevHeader.Number.Uint64()) != prevHash {
|
if rawdb.ReadCanonicalHash(c.chainDb.BlockStore(), prevHeader.Number.Uint64()) != prevHash {
|
||||||
if h := rawdb.FindCommonAncestor(c.chainDb, prevHeader, header); h != nil {
|
if h := rawdb.FindCommonAncestor(c.chainDb.BlockStore(), prevHeader, header); h != nil {
|
||||||
c.newHead(h.Number.Uint64(), true)
|
c.newHead(h.Number.Uint64(), true)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -486,7 +486,7 @@ func (cm *chainMaker) makeHeader(parent *types.Block, state *state.StateDB, engi
|
|||||||
if cm.config.Parlia != nil {
|
if cm.config.Parlia != nil {
|
||||||
header.WithdrawalsHash = &types.EmptyWithdrawalsHash
|
header.WithdrawalsHash = &types.EmptyWithdrawalsHash
|
||||||
}
|
}
|
||||||
if cm.config.Parlia == nil || cm.config.IsBohr(header.Number, header.Time) {
|
if cm.config.Parlia == nil {
|
||||||
header.ParentBeaconRoot = new(common.Hash)
|
header.ParentBeaconRoot = new(common.Hash)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -621,10 +621,6 @@ func (cm *chainMaker) GetHighestVerifiedHeader() *types.Header {
|
|||||||
panic("not supported")
|
panic("not supported")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cm *chainMaker) GetVerifiedBlockByHash(hash common.Hash) *types.Header {
|
|
||||||
return cm.GetHeaderByHash(hash)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cm *chainMaker) ChasingHead() *types.Header {
|
func (cm *chainMaker) ChasingHead() *types.Header {
|
||||||
panic("not supported")
|
panic("not supported")
|
||||||
}
|
}
|
||||||
|
@ -365,10 +365,6 @@ func (r *mockDAHeaderReader) GetHighestVerifiedHeader() *types.Header {
|
|||||||
panic("not supported")
|
panic("not supported")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *mockDAHeaderReader) GetVerifiedBlockByHash(hash common.Hash) *types.Header {
|
|
||||||
panic("not supported")
|
|
||||||
}
|
|
||||||
|
|
||||||
func createMockDATx(config *params.ChainConfig, sidecar *types.BlobTxSidecar) *types.Transaction {
|
func createMockDATx(config *params.ChainConfig, sidecar *types.BlobTxSidecar) *types.Transaction {
|
||||||
if sidecar == nil {
|
if sidecar == nil {
|
||||||
tx := &types.DynamicFeeTx{
|
tx := &types.DynamicFeeTx{
|
||||||
|
@ -50,5 +50,3 @@ type ChainSideEvent struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type ChainHeadEvent struct{ Block *types.Block }
|
type ChainHeadEvent struct{ Block *types.Block }
|
||||||
|
|
||||||
type HighestVerifiedBlockEvent struct{ Header *types.Header }
|
|
||||||
|
@ -86,16 +86,9 @@ func (f *ForkChoice) ReorgNeeded(current *types.Header, extern *types.Header) (b
|
|||||||
localTD = f.chain.GetTd(current.Hash(), current.Number.Uint64())
|
localTD = f.chain.GetTd(current.Hash(), current.Number.Uint64())
|
||||||
externTd = f.chain.GetTd(extern.Hash(), extern.Number.Uint64())
|
externTd = f.chain.GetTd(extern.Hash(), extern.Number.Uint64())
|
||||||
)
|
)
|
||||||
if localTD == nil {
|
if localTD == nil || externTd == nil {
|
||||||
return false, errors.New("missing td")
|
return false, errors.New("missing td")
|
||||||
}
|
}
|
||||||
if externTd == nil {
|
|
||||||
ptd := f.chain.GetTd(extern.ParentHash, extern.Number.Uint64()-1)
|
|
||||||
if ptd == nil {
|
|
||||||
return false, consensus.ErrUnknownAncestor
|
|
||||||
}
|
|
||||||
externTd = new(big.Int).Add(ptd, extern.Difficulty)
|
|
||||||
}
|
|
||||||
// Accept the new header as the chain head if the transition
|
// Accept the new header as the chain head if the transition
|
||||||
// is already triggered. We assume all the headers after the
|
// is already triggered. We assume all the headers after the
|
||||||
// transition come from the trusted consensus layer.
|
// transition come from the trusted consensus layer.
|
||||||
@ -121,19 +114,7 @@ func (f *ForkChoice) ReorgNeeded(current *types.Header, extern *types.Header) (b
|
|||||||
if f.preserve != nil {
|
if f.preserve != nil {
|
||||||
currentPreserve, externPreserve = f.preserve(current), f.preserve(extern)
|
currentPreserve, externPreserve = f.preserve(current), f.preserve(extern)
|
||||||
}
|
}
|
||||||
choiceRules := func() bool {
|
reorg = !currentPreserve && (externPreserve || f.rand.Float64() < 0.5)
|
||||||
if extern.Time == current.Time {
|
|
||||||
doubleSign := (extern.Coinbase == current.Coinbase)
|
|
||||||
if doubleSign {
|
|
||||||
return extern.Hash().Cmp(current.Hash()) < 0
|
|
||||||
} else {
|
|
||||||
return f.rand.Float64() < 0.5
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return extern.Time < current.Time
|
|
||||||
}
|
|
||||||
}
|
|
||||||
reorg = !currentPreserve && (externPreserve || choiceRules())
|
|
||||||
}
|
}
|
||||||
return reorg, nil
|
return reorg, nil
|
||||||
}
|
}
|
||||||
|
@ -216,8 +216,7 @@ func (e *GenesisMismatchError) Error() string {
|
|||||||
// ChainOverrides contains the changes to chain config
|
// ChainOverrides contains the changes to chain config
|
||||||
// Typically, these modifications involve hardforks that are not enabled on the BSC mainnet, intended for testing purposes.
|
// Typically, these modifications involve hardforks that are not enabled on the BSC mainnet, intended for testing purposes.
|
||||||
type ChainOverrides struct {
|
type ChainOverrides struct {
|
||||||
OverridePassedForkTime *uint64
|
OverrideCancun *uint64
|
||||||
OverrideBohr *uint64
|
|
||||||
OverrideVerkle *uint64
|
OverrideVerkle *uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -244,17 +243,8 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, triedb *triedb.Database, g
|
|||||||
}
|
}
|
||||||
applyOverrides := func(config *params.ChainConfig) {
|
applyOverrides := func(config *params.ChainConfig) {
|
||||||
if config != nil {
|
if config != nil {
|
||||||
if overrides != nil && overrides.OverridePassedForkTime != nil {
|
if overrides != nil && overrides.OverrideCancun != nil {
|
||||||
config.ShanghaiTime = overrides.OverridePassedForkTime
|
config.CancunTime = overrides.OverrideCancun
|
||||||
config.KeplerTime = overrides.OverridePassedForkTime
|
|
||||||
config.FeynmanTime = overrides.OverridePassedForkTime
|
|
||||||
config.FeynmanFixTime = overrides.OverridePassedForkTime
|
|
||||||
config.CancunTime = overrides.OverridePassedForkTime
|
|
||||||
config.HaberTime = overrides.OverridePassedForkTime
|
|
||||||
config.HaberFixTime = overrides.OverridePassedForkTime
|
|
||||||
}
|
|
||||||
if overrides != nil && overrides.OverrideBohr != nil {
|
|
||||||
config.BohrTime = overrides.OverrideBohr
|
|
||||||
}
|
}
|
||||||
if overrides != nil && overrides.OverrideVerkle != nil {
|
if overrides != nil && overrides.OverrideVerkle != nil {
|
||||||
config.VerkleTime = overrides.OverrideVerkle
|
config.VerkleTime = overrides.OverrideVerkle
|
||||||
@ -454,7 +444,7 @@ func (g *Genesis) ToBlock() *types.Block {
|
|||||||
// EIP-4788: The parentBeaconBlockRoot of the genesis block is always
|
// EIP-4788: The parentBeaconBlockRoot of the genesis block is always
|
||||||
// the zero hash. This is because the genesis block does not have a parent
|
// the zero hash. This is because the genesis block does not have a parent
|
||||||
// by definition.
|
// by definition.
|
||||||
if conf.Parlia == nil || conf.IsBohr(num, g.Timestamp) {
|
if conf.Parlia == nil {
|
||||||
head.ParentBeaconRoot = new(common.Hash)
|
head.ParentBeaconRoot = new(common.Hash)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -500,7 +490,7 @@ func (g *Genesis) Commit(db ethdb.Database, triedb *triedb.Database) (*types.Blo
|
|||||||
rawdb.WriteReceipts(db.BlockStore(), block.Hash(), block.NumberU64(), nil)
|
rawdb.WriteReceipts(db.BlockStore(), block.Hash(), block.NumberU64(), nil)
|
||||||
rawdb.WriteCanonicalHash(db.BlockStore(), block.Hash(), block.NumberU64())
|
rawdb.WriteCanonicalHash(db.BlockStore(), block.Hash(), block.NumberU64())
|
||||||
rawdb.WriteHeadBlockHash(db.BlockStore(), block.Hash())
|
rawdb.WriteHeadBlockHash(db.BlockStore(), block.Hash())
|
||||||
rawdb.WriteHeadFastBlockHash(db.BlockStore(), block.Hash())
|
rawdb.WriteHeadFastBlockHash(db, block.Hash())
|
||||||
rawdb.WriteHeadHeaderHash(db.BlockStore(), block.Hash())
|
rawdb.WriteHeadHeaderHash(db.BlockStore(), block.Hash())
|
||||||
rawdb.WriteChainConfig(db, block.Hash(), config)
|
rawdb.WriteChainConfig(db, block.Hash(), config)
|
||||||
return block, nil
|
return block, nil
|
||||||
|
@ -97,7 +97,7 @@ func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, engine c
|
|||||||
return nil, ErrNoGenesis
|
return nil, ErrNoGenesis
|
||||||
}
|
}
|
||||||
hc.currentHeader.Store(hc.genesisHeader)
|
hc.currentHeader.Store(hc.genesisHeader)
|
||||||
if head := rawdb.ReadHeadBlockHash(chainDb); head != (common.Hash{}) {
|
if head := rawdb.ReadHeadBlockHash(chainDb.BlockStore()); head != (common.Hash{}) {
|
||||||
if chead := hc.GetHeaderByHash(head); chead != nil {
|
if chead := hc.GetHeaderByHash(head); chead != nil {
|
||||||
hc.currentHeader.Store(chead)
|
hc.currentHeader.Store(chead)
|
||||||
}
|
}
|
||||||
@ -144,7 +144,7 @@ func (hc *HeaderChain) GetBlockNumber(hash common.Hash) *uint64 {
|
|||||||
if cached, ok := hc.numberCache.Get(hash); ok {
|
if cached, ok := hc.numberCache.Get(hash); ok {
|
||||||
return &cached
|
return &cached
|
||||||
}
|
}
|
||||||
number := rawdb.ReadHeaderNumber(hc.chainDb, hash)
|
number := rawdb.ReadHeaderNumber(hc.chainDb.BlockStore(), hash)
|
||||||
if number != nil {
|
if number != nil {
|
||||||
hc.numberCache.Add(hash, *number)
|
hc.numberCache.Add(hash, *number)
|
||||||
}
|
}
|
||||||
@ -436,10 +436,6 @@ func (hc *HeaderChain) GetHighestVerifiedHeader() *types.Header {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hc *HeaderChain) GetVerifiedBlockByHash(hash common.Hash) *types.Header {
|
|
||||||
return hc.GetHeaderByHash(hash)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (hc *HeaderChain) ChasingHead() *types.Header {
|
func (hc *HeaderChain) ChasingHead() *types.Header {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -672,7 +668,7 @@ func (hc *HeaderChain) setHead(headBlock uint64, headTime uint64, updateFn Updat
|
|||||||
// first then remove the relative data from the database.
|
// first then remove the relative data from the database.
|
||||||
//
|
//
|
||||||
// Update head first(head fast block, head full block) before deleting the data.
|
// Update head first(head fast block, head full block) before deleting the data.
|
||||||
markerBatch := hc.chainDb.BlockStore().NewBatch()
|
markerBatch := hc.chainDb.NewBatch()
|
||||||
if updateFn != nil {
|
if updateFn != nil {
|
||||||
newHead, force := updateFn(markerBatch, parent)
|
newHead, force := updateFn(markerBatch, parent)
|
||||||
if force && ((headTime > 0 && newHead.Time < headTime) || (headTime == 0 && newHead.Number.Uint64() < headBlock)) {
|
if force && ((headTime > 0 && newHead.Time < headTime) || (headTime == 0 && newHead.Number.Uint64() < headBlock)) {
|
||||||
@ -681,7 +677,7 @@ func (hc *HeaderChain) setHead(headBlock uint64, headTime uint64, updateFn Updat
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Update head header then.
|
// Update head header then.
|
||||||
rawdb.WriteHeadHeaderHash(markerBatch, parentHash)
|
rawdb.WriteHeadHeaderHash(hc.chainDb.BlockStore(), parentHash)
|
||||||
if err := markerBatch.Write(); err != nil {
|
if err := markerBatch.Write(); err != nil {
|
||||||
log.Crit("Failed to update chain markers", "error", err)
|
log.Crit("Failed to update chain markers", "error", err)
|
||||||
}
|
}
|
||||||
@ -695,7 +691,7 @@ func (hc *HeaderChain) setHead(headBlock uint64, headTime uint64, updateFn Updat
|
|||||||
// we don't end up with dangling daps in the database
|
// we don't end up with dangling daps in the database
|
||||||
var nums []uint64
|
var nums []uint64
|
||||||
if origin {
|
if origin {
|
||||||
for n := num + 1; len(rawdb.ReadAllHashes(hc.chainDb.BlockStore(), n)) > 0; n++ {
|
for n := num + 1; len(rawdb.ReadAllHashes(hc.chainDb, n)) > 0; n++ {
|
||||||
nums = append([]uint64{n}, nums...) // suboptimal, but we don't really expect this path
|
nums = append([]uint64{n}, nums...) // suboptimal, but we don't really expect this path
|
||||||
}
|
}
|
||||||
origin = false
|
origin = false
|
||||||
@ -705,7 +701,7 @@ func (hc *HeaderChain) setHead(headBlock uint64, headTime uint64, updateFn Updat
|
|||||||
// Remove the related data from the database on all sidechains
|
// Remove the related data from the database on all sidechains
|
||||||
for _, num := range nums {
|
for _, num := range nums {
|
||||||
// Gather all the side fork hashes
|
// Gather all the side fork hashes
|
||||||
hashes := rawdb.ReadAllHashes(hc.chainDb.BlockStore(), num)
|
hashes := rawdb.ReadAllHashes(hc.chainDb, num)
|
||||||
if len(hashes) == 0 {
|
if len(hashes) == 0 {
|
||||||
// No hashes in the database whatsoever, probably frozen already
|
// No hashes in the database whatsoever, probably frozen already
|
||||||
hashes = append(hashes, hdr.Hash())
|
hashes = append(hashes, hdr.Hash())
|
||||||
|
@ -34,15 +34,6 @@ import (
|
|||||||
"golang.org/x/exp/slices"
|
"golang.org/x/exp/slices"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Support Multi-Database Based on Data Pattern, the Chaindata will be divided into three stores: BlockStore, StateStore, and ChainStore,
|
|
||||||
// according to data schema and read/write behavior. When using the following data interfaces, you should take note of the following:
|
|
||||||
//
|
|
||||||
// 1) Block-Related Data: For CanonicalHash, Header, Body, Td, Receipts, and BlobSidecars, the Write, Delete, and Iterator
|
|
||||||
// operations should carefully ensure that the database being used is BlockStore.
|
|
||||||
// 2) Meta-Related Data: For HeaderNumber, HeadHeaderHash, HeadBlockHash, HeadFastBlockHash, and FinalizedBlockHash, the
|
|
||||||
// Write and Delete operations should carefully ensure that the database being used is BlockStore.
|
|
||||||
// 3) Ancient Data: When using a multi-database, Ancient data will use the BlockStore.
|
|
||||||
|
|
||||||
// ReadCanonicalHash retrieves the hash assigned to a canonical block number.
|
// ReadCanonicalHash retrieves the hash assigned to a canonical block number.
|
||||||
func ReadCanonicalHash(db ethdb.Reader, number uint64) common.Hash {
|
func ReadCanonicalHash(db ethdb.Reader, number uint64) common.Hash {
|
||||||
var data []byte
|
var data []byte
|
||||||
@ -153,8 +144,8 @@ func ReadAllCanonicalHashes(db ethdb.Iteratee, from uint64, to uint64, limit int
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ReadHeaderNumber returns the header number assigned to a hash.
|
// ReadHeaderNumber returns the header number assigned to a hash.
|
||||||
func ReadHeaderNumber(db ethdb.MultiDatabaseReader, hash common.Hash) *uint64 {
|
func ReadHeaderNumber(db ethdb.KeyValueReader, hash common.Hash) *uint64 {
|
||||||
data, _ := db.BlockStoreReader().Get(headerNumberKey(hash))
|
data, _ := db.Get(headerNumberKey(hash))
|
||||||
if len(data) != 8 {
|
if len(data) != 8 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -179,8 +170,8 @@ func DeleteHeaderNumber(db ethdb.KeyValueWriter, hash common.Hash) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ReadHeadHeaderHash retrieves the hash of the current canonical head header.
|
// ReadHeadHeaderHash retrieves the hash of the current canonical head header.
|
||||||
func ReadHeadHeaderHash(db ethdb.MultiDatabaseReader) common.Hash {
|
func ReadHeadHeaderHash(db ethdb.KeyValueReader) common.Hash {
|
||||||
data, _ := db.BlockStoreReader().Get(headHeaderKey)
|
data, _ := db.Get(headHeaderKey)
|
||||||
if len(data) == 0 {
|
if len(data) == 0 {
|
||||||
return common.Hash{}
|
return common.Hash{}
|
||||||
}
|
}
|
||||||
@ -195,8 +186,8 @@ func WriteHeadHeaderHash(db ethdb.KeyValueWriter, hash common.Hash) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ReadHeadBlockHash retrieves the hash of the current canonical head block.
|
// ReadHeadBlockHash retrieves the hash of the current canonical head block.
|
||||||
func ReadHeadBlockHash(db ethdb.MultiDatabaseReader) common.Hash {
|
func ReadHeadBlockHash(db ethdb.KeyValueReader) common.Hash {
|
||||||
data, _ := db.BlockStoreReader().Get(headBlockKey)
|
data, _ := db.Get(headBlockKey)
|
||||||
if len(data) == 0 {
|
if len(data) == 0 {
|
||||||
return common.Hash{}
|
return common.Hash{}
|
||||||
}
|
}
|
||||||
@ -211,8 +202,8 @@ func WriteHeadBlockHash(db ethdb.KeyValueWriter, hash common.Hash) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ReadHeadFastBlockHash retrieves the hash of the current fast-sync head block.
|
// ReadHeadFastBlockHash retrieves the hash of the current fast-sync head block.
|
||||||
func ReadHeadFastBlockHash(db ethdb.MultiDatabaseReader) common.Hash {
|
func ReadHeadFastBlockHash(db ethdb.KeyValueReader) common.Hash {
|
||||||
data, _ := db.BlockStoreReader().Get(headFastBlockKey)
|
data, _ := db.Get(headFastBlockKey)
|
||||||
if len(data) == 0 {
|
if len(data) == 0 {
|
||||||
return common.Hash{}
|
return common.Hash{}
|
||||||
}
|
}
|
||||||
@ -227,8 +218,8 @@ func WriteHeadFastBlockHash(db ethdb.KeyValueWriter, hash common.Hash) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ReadFinalizedBlockHash retrieves the hash of the finalized block.
|
// ReadFinalizedBlockHash retrieves the hash of the finalized block.
|
||||||
func ReadFinalizedBlockHash(db ethdb.MultiDatabaseReader) common.Hash {
|
func ReadFinalizedBlockHash(db ethdb.KeyValueReader) common.Hash {
|
||||||
data, _ := db.BlockStoreReader().Get(headFinalizedBlockKey)
|
data, _ := db.Get(headFinalizedBlockKey)
|
||||||
if len(data) == 0 {
|
if len(data) == 0 {
|
||||||
return common.Hash{}
|
return common.Hash{}
|
||||||
}
|
}
|
||||||
@ -306,7 +297,7 @@ func ReadHeaderRange(db ethdb.Reader, number uint64, count uint64) []rlp.RawValu
|
|||||||
// It's ok to request block 0, 1 item
|
// It's ok to request block 0, 1 item
|
||||||
count = number + 1
|
count = number + 1
|
||||||
}
|
}
|
||||||
limit, _ := db.BlockStoreReader().Ancients()
|
limit, _ := db.Ancients()
|
||||||
// First read live blocks
|
// First read live blocks
|
||||||
if i >= limit {
|
if i >= limit {
|
||||||
// If we need to read live blocks, we need to figure out the hash first
|
// If we need to read live blocks, we need to figure out the hash first
|
||||||
@ -326,7 +317,7 @@ func ReadHeaderRange(db ethdb.Reader, number uint64, count uint64) []rlp.RawValu
|
|||||||
return rlpHeaders
|
return rlpHeaders
|
||||||
}
|
}
|
||||||
// read remaining from ancients, cap at 2M
|
// read remaining from ancients, cap at 2M
|
||||||
data, err := db.BlockStoreReader().AncientRange(ChainFreezerHeaderTable, i+1-count, count, 2*1024*1024)
|
data, err := db.AncientRange(ChainFreezerHeaderTable, i+1-count, count, 2*1024*1024)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Failed to read headers from freezer", "err", err)
|
log.Error("Failed to read headers from freezer", "err", err)
|
||||||
return rlpHeaders
|
return rlpHeaders
|
||||||
@ -477,7 +468,7 @@ func ReadCanonicalBodyRLP(db ethdb.Reader, number uint64) rlp.RawValue {
|
|||||||
// Block is not in ancients, read from leveldb by hash and number.
|
// Block is not in ancients, read from leveldb by hash and number.
|
||||||
// Note: ReadCanonicalHash cannot be used here because it also
|
// Note: ReadCanonicalHash cannot be used here because it also
|
||||||
// calls ReadAncients internally.
|
// calls ReadAncients internally.
|
||||||
hash, _ := db.BlockStoreReader().Get(headerHashKey(number))
|
hash, _ := db.Get(headerHashKey(number))
|
||||||
data, _ = db.BlockStoreReader().Get(blockBodyKey(number, common.BytesToHash(hash)))
|
data, _ = db.BlockStoreReader().Get(blockBodyKey(number, common.BytesToHash(hash)))
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
@ -525,13 +516,6 @@ func WriteBody(db ethdb.KeyValueWriter, hash common.Hash, number uint64, body *t
|
|||||||
WriteBodyRLP(db, hash, number, data)
|
WriteBodyRLP(db, hash, number, data)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteBody removes all block body data associated with a hash.
|
|
||||||
func DeleteBody(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
|
|
||||||
if err := db.Delete(blockBodyKey(number, hash)); err != nil {
|
|
||||||
log.Crit("Failed to delete block body", "err", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func WriteDiffLayer(db ethdb.KeyValueWriter, hash common.Hash, layer *types.DiffLayer) {
|
func WriteDiffLayer(db ethdb.KeyValueWriter, hash common.Hash, layer *types.DiffLayer) {
|
||||||
data, err := rlp.EncodeToBytes(layer)
|
data, err := rlp.EncodeToBytes(layer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -570,6 +554,13 @@ func DeleteDiffLayer(db ethdb.KeyValueWriter, blockHash common.Hash) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeleteBody removes all block body data associated with a hash.
|
||||||
|
func DeleteBody(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
|
||||||
|
if err := db.Delete(blockBodyKey(number, hash)); err != nil {
|
||||||
|
log.Crit("Failed to delete block body", "err", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// ReadTdRLP retrieves a block's total difficulty corresponding to the hash in RLP encoding.
|
// ReadTdRLP retrieves a block's total difficulty corresponding to the hash in RLP encoding.
|
||||||
func ReadTdRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
|
func ReadTdRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
|
||||||
var data []byte
|
var data []byte
|
||||||
@ -893,7 +884,7 @@ func WriteAncientBlocks(db ethdb.AncientWriter, blocks []*types.Block, receipts
|
|||||||
// ReadBlobSidecarsRLP retrieves all the transaction blobs belonging to a block in RLP encoding.
|
// ReadBlobSidecarsRLP retrieves all the transaction blobs belonging to a block in RLP encoding.
|
||||||
func ReadBlobSidecarsRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
|
func ReadBlobSidecarsRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
|
||||||
var data []byte
|
var data []byte
|
||||||
db.BlockStoreReader().ReadAncients(func(reader ethdb.AncientReaderOp) error {
|
db.ReadAncients(func(reader ethdb.AncientReaderOp) error {
|
||||||
// Check if the data is in ancients
|
// Check if the data is in ancients
|
||||||
if isCanon(reader, number, hash) {
|
if isCanon(reader, number, hash) {
|
||||||
data, _ = reader.Ancient(ChainFreezerBlobSidecarTable, number)
|
data, _ = reader.Ancient(ChainFreezerBlobSidecarTable, number)
|
||||||
@ -1102,24 +1093,24 @@ func FindCommonAncestor(db ethdb.Reader, a, b *types.Header) *types.Header {
|
|||||||
|
|
||||||
// ReadHeadHeader returns the current canonical head header.
|
// ReadHeadHeader returns the current canonical head header.
|
||||||
func ReadHeadHeader(db ethdb.Reader) *types.Header {
|
func ReadHeadHeader(db ethdb.Reader) *types.Header {
|
||||||
headHeaderHash := ReadHeadHeaderHash(db)
|
headHeaderHash := ReadHeadHeaderHash(db.BlockStoreReader())
|
||||||
if headHeaderHash == (common.Hash{}) {
|
if headHeaderHash == (common.Hash{}) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
headHeaderNumber := ReadHeaderNumber(db, headHeaderHash)
|
headHeaderNumber := ReadHeaderNumber(db.BlockStoreReader(), headHeaderHash)
|
||||||
if headHeaderNumber == nil {
|
if headHeaderNumber == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return ReadHeader(db, headHeaderHash, *headHeaderNumber)
|
return ReadHeader(db.BlockStoreReader(), headHeaderHash, *headHeaderNumber)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReadHeadBlock returns the current canonical head block.
|
// ReadHeadBlock returns the current canonical head block.
|
||||||
func ReadHeadBlock(db ethdb.Reader) *types.Block {
|
func ReadHeadBlock(db ethdb.Reader) *types.Block {
|
||||||
headBlockHash := ReadHeadBlockHash(db)
|
headBlockHash := ReadHeadBlockHash(db.BlockStoreReader())
|
||||||
if headBlockHash == (common.Hash{}) {
|
if headBlockHash == (common.Hash{}) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
headBlockNumber := ReadHeaderNumber(db, headBlockHash)
|
headBlockNumber := ReadHeaderNumber(db.BlockStoreReader(), headBlockHash)
|
||||||
if headBlockNumber == nil {
|
if headBlockNumber == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -518,7 +518,7 @@ func checkBlobSidecarsRLP(have, want types.BlobSidecars) error {
|
|||||||
func TestAncientStorage(t *testing.T) {
|
func TestAncientStorage(t *testing.T) {
|
||||||
// Freezer style fast import the chain.
|
// Freezer style fast import the chain.
|
||||||
frdir := t.TempDir()
|
frdir := t.TempDir()
|
||||||
db, err := NewDatabaseWithFreezer(NewMemoryDatabase(), frdir, "", false, false, false, false, false)
|
db, err := NewDatabaseWithFreezer(NewMemoryDatabase(), frdir, "", false, false, false, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to create database with ancient backend")
|
t.Fatalf("failed to create database with ancient backend")
|
||||||
}
|
}
|
||||||
@ -657,7 +657,7 @@ func TestHashesInRange(t *testing.T) {
|
|||||||
func BenchmarkWriteAncientBlocks(b *testing.B) {
|
func BenchmarkWriteAncientBlocks(b *testing.B) {
|
||||||
// Open freezer database.
|
// Open freezer database.
|
||||||
frdir := b.TempDir()
|
frdir := b.TempDir()
|
||||||
db, err := NewDatabaseWithFreezer(NewMemoryDatabase(), frdir, "", false, false, false, false, false)
|
db, err := NewDatabaseWithFreezer(NewMemoryDatabase(), frdir, "", false, false, false, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatalf("failed to create database with ancient backend")
|
b.Fatalf("failed to create database with ancient backend")
|
||||||
}
|
}
|
||||||
@ -1001,7 +1001,7 @@ func TestHeadersRLPStorage(t *testing.T) {
|
|||||||
// Have N headers in the freezer
|
// Have N headers in the freezer
|
||||||
frdir := t.TempDir()
|
frdir := t.TempDir()
|
||||||
|
|
||||||
db, err := NewDatabaseWithFreezer(NewMemoryDatabase(), frdir, "", false, false, false, false, false)
|
db, err := NewDatabaseWithFreezer(NewMemoryDatabase(), frdir, "", false, false, false, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to create database with ancient backend")
|
t.Fatalf("failed to create database with ancient backend")
|
||||||
}
|
}
|
||||||
|
@ -42,7 +42,7 @@ func ReadTxLookupEntry(db ethdb.Reader, hash common.Hash) *uint64 {
|
|||||||
}
|
}
|
||||||
// Database v4-v5 tx lookup format just stores the hash
|
// Database v4-v5 tx lookup format just stores the hash
|
||||||
if len(data) == common.HashLength {
|
if len(data) == common.HashLength {
|
||||||
return ReadHeaderNumber(db, common.BytesToHash(data))
|
return ReadHeaderNumber(db.BlockStoreReader(), common.BytesToHash(data))
|
||||||
}
|
}
|
||||||
// Finally try database v3 tx lookup format
|
// Finally try database v3 tx lookup format
|
||||||
var entry LegacyTxLookupEntry
|
var entry LegacyTxLookupEntry
|
||||||
|
@ -18,10 +18,12 @@ package rawdb
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ReadSnapshotDisabled retrieves if the snapshot maintenance is disabled.
|
// ReadSnapshotDisabled retrieves if the snapshot maintenance is disabled.
|
||||||
@ -74,6 +76,10 @@ func DeleteSnapshotRoot(db ethdb.KeyValueWriter) {
|
|||||||
|
|
||||||
// ReadAccountSnapshot retrieves the snapshot entry of an account trie leaf.
|
// ReadAccountSnapshot retrieves the snapshot entry of an account trie leaf.
|
||||||
func ReadAccountSnapshot(db ethdb.KeyValueReader, hash common.Hash) []byte {
|
func ReadAccountSnapshot(db ethdb.KeyValueReader, hash common.Hash) []byte {
|
||||||
|
if metrics.EnabledExpensive {
|
||||||
|
start := time.Now()
|
||||||
|
defer func() { rawdbGetAccountSnapNodeTimer.UpdateSince(start) }()
|
||||||
|
}
|
||||||
data, _ := db.Get(accountSnapshotKey(hash))
|
data, _ := db.Get(accountSnapshotKey(hash))
|
||||||
return data
|
return data
|
||||||
}
|
}
|
||||||
@ -94,6 +100,10 @@ func DeleteAccountSnapshot(db ethdb.KeyValueWriter, hash common.Hash) {
|
|||||||
|
|
||||||
// ReadStorageSnapshot retrieves the snapshot entry of an storage trie leaf.
|
// ReadStorageSnapshot retrieves the snapshot entry of an storage trie leaf.
|
||||||
func ReadStorageSnapshot(db ethdb.KeyValueReader, accountHash, storageHash common.Hash) []byte {
|
func ReadStorageSnapshot(db ethdb.KeyValueReader, accountHash, storageHash common.Hash) []byte {
|
||||||
|
if metrics.EnabledExpensive {
|
||||||
|
start := time.Now()
|
||||||
|
defer func() { rawdbGetStorageSnapNodeTimer.UpdateSince(start) }()
|
||||||
|
}
|
||||||
data, _ := db.Get(storageSnapshotKey(accountHash, storageHash))
|
data, _ := db.Get(storageSnapshotKey(accountHash, storageHash))
|
||||||
return data
|
return data
|
||||||
}
|
}
|
||||||
|
@ -19,11 +19,13 @@ package rawdb
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
"golang.org/x/crypto/sha3"
|
"golang.org/x/crypto/sha3"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -68,6 +70,10 @@ func (h *hasher) release() {
|
|||||||
// ReadAccountTrieNode retrieves the account trie node and the associated node
|
// ReadAccountTrieNode retrieves the account trie node and the associated node
|
||||||
// hash with the specified node path.
|
// hash with the specified node path.
|
||||||
func ReadAccountTrieNode(db ethdb.KeyValueReader, path []byte) ([]byte, common.Hash) {
|
func ReadAccountTrieNode(db ethdb.KeyValueReader, path []byte) ([]byte, common.Hash) {
|
||||||
|
if metrics.EnabledExpensive {
|
||||||
|
start := time.Now()
|
||||||
|
defer func() { rawdbGetAccountTrieNodeTimer.UpdateSince(start) }()
|
||||||
|
}
|
||||||
data, err := db.Get(accountTrieNodeKey(path))
|
data, err := db.Get(accountTrieNodeKey(path))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, common.Hash{}
|
return nil, common.Hash{}
|
||||||
@ -116,6 +122,10 @@ func DeleteAccountTrieNode(db ethdb.KeyValueWriter, path []byte) {
|
|||||||
// ReadStorageTrieNode retrieves the storage trie node and the associated node
|
// ReadStorageTrieNode retrieves the storage trie node and the associated node
|
||||||
// hash with the specified node path.
|
// hash with the specified node path.
|
||||||
func ReadStorageTrieNode(db ethdb.KeyValueReader, accountHash common.Hash, path []byte) ([]byte, common.Hash) {
|
func ReadStorageTrieNode(db ethdb.KeyValueReader, accountHash common.Hash, path []byte) ([]byte, common.Hash) {
|
||||||
|
if metrics.EnabledExpensive {
|
||||||
|
start := time.Now()
|
||||||
|
defer func() { rawdbGetStorageTrieNodeTimer.UpdateSince(start) }()
|
||||||
|
}
|
||||||
data, err := db.Get(storageTrieNodeKey(accountHash, path))
|
data, err := db.Get(storageTrieNodeKey(accountHash, path))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, common.Hash{}
|
return nil, common.Hash{}
|
||||||
@ -218,7 +228,22 @@ func HasTrieNode(db ethdb.KeyValueReader, owner common.Hash, path []byte, hash c
|
|||||||
func ReadTrieNode(db ethdb.KeyValueReader, owner common.Hash, path []byte, hash common.Hash, scheme string) []byte {
|
func ReadTrieNode(db ethdb.KeyValueReader, owner common.Hash, path []byte, hash common.Hash, scheme string) []byte {
|
||||||
switch scheme {
|
switch scheme {
|
||||||
case HashScheme:
|
case HashScheme:
|
||||||
return ReadLegacyTrieNode(db, hash)
|
var (
|
||||||
|
blob []byte
|
||||||
|
start time.Time
|
||||||
|
)
|
||||||
|
start = time.Now()
|
||||||
|
blob = ReadLegacyTrieNode(db, hash)
|
||||||
|
if owner == (common.Hash{}) {
|
||||||
|
if metrics.EnabledExpensive {
|
||||||
|
rawdbGetAccountTrieNodeTimer.UpdateSince(start)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if metrics.EnabledExpensive {
|
||||||
|
rawdbGetStorageTrieNodeTimer.UpdateSince(start)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return blob
|
||||||
case PathScheme:
|
case PathScheme:
|
||||||
var (
|
var (
|
||||||
blob []byte
|
blob []byte
|
||||||
|
@ -18,8 +18,6 @@ package rawdb
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
@ -100,18 +98,6 @@ func inspectFreezers(db ethdb.Database) ([]freezerInfo, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
file, err := os.Open(filepath.Join(datadir, StateFreezerName))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer file.Close()
|
|
||||||
// if state freezer folder has been pruned, there is no need for inspection
|
|
||||||
_, err = file.Readdirnames(1)
|
|
||||||
if err == io.EOF {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
f, err := NewStateFreezer(datadir, true, 0)
|
f, err := NewStateFreezer(datadir, true, 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -24,12 +24,12 @@ import (
|
|||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -51,32 +51,25 @@ var (
|
|||||||
// The background thread will keep moving ancient chain segments from key-value
|
// The background thread will keep moving ancient chain segments from key-value
|
||||||
// database to flat files for saving space on live database.
|
// database to flat files for saving space on live database.
|
||||||
type chainFreezer struct {
|
type chainFreezer struct {
|
||||||
threshold atomic.Uint64 // Number of recent blocks not to freeze (params.FullImmutabilityThreshold apart from tests)
|
|
||||||
|
|
||||||
*Freezer
|
*Freezer
|
||||||
quit chan struct{}
|
quit chan struct{}
|
||||||
wg sync.WaitGroup
|
wg sync.WaitGroup
|
||||||
trigger chan chan struct{} // Manual blocking freeze trigger, test determinism
|
trigger chan chan struct{} // Manual blocking freeze trigger, test determinism
|
||||||
|
|
||||||
freezeEnv atomic.Value
|
freezeEnv atomic.Value
|
||||||
waitEnvTimes int
|
|
||||||
|
|
||||||
multiDatabase bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// newChainFreezer initializes the freezer for ancient chain data.
|
// newChainFreezer initializes the freezer for ancient chain data.
|
||||||
func newChainFreezer(datadir string, namespace string, readonly bool, offset uint64, multiDatabase bool) (*chainFreezer, error) {
|
func newChainFreezer(datadir string, namespace string, readonly bool, offset uint64) (*chainFreezer, error) {
|
||||||
freezer, err := NewChainFreezer(datadir, namespace, readonly, offset)
|
freezer, err := NewChainFreezer(datadir, namespace, readonly, offset)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
cf := chainFreezer{
|
return &chainFreezer{
|
||||||
Freezer: freezer,
|
Freezer: freezer,
|
||||||
quit: make(chan struct{}),
|
quit: make(chan struct{}),
|
||||||
trigger: make(chan chan struct{}),
|
trigger: make(chan chan struct{}),
|
||||||
}
|
}, nil
|
||||||
cf.threshold.Store(params.FullImmutabilityThreshold)
|
|
||||||
return &cf, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close closes the chain freezer instance and terminates the background thread.
|
// Close closes the chain freezer instance and terminates the background thread.
|
||||||
@ -92,7 +85,7 @@ func (f *chainFreezer) Close() error {
|
|||||||
|
|
||||||
// readHeadNumber returns the number of chain head block. 0 is returned if the
|
// readHeadNumber returns the number of chain head block. 0 is returned if the
|
||||||
// block is unknown or not available yet.
|
// block is unknown or not available yet.
|
||||||
func (f *chainFreezer) readHeadNumber(db ethdb.Reader) uint64 {
|
func (f *chainFreezer) readHeadNumber(db ethdb.KeyValueReader) uint64 {
|
||||||
hash := ReadHeadBlockHash(db)
|
hash := ReadHeadBlockHash(db)
|
||||||
if hash == (common.Hash{}) {
|
if hash == (common.Hash{}) {
|
||||||
log.Error("Head block is not reachable")
|
log.Error("Head block is not reachable")
|
||||||
@ -108,7 +101,7 @@ func (f *chainFreezer) readHeadNumber(db ethdb.Reader) uint64 {
|
|||||||
|
|
||||||
// readFinalizedNumber returns the number of finalized block. 0 is returned
|
// readFinalizedNumber returns the number of finalized block. 0 is returned
|
||||||
// if the block is unknown or not available yet.
|
// if the block is unknown or not available yet.
|
||||||
func (f *chainFreezer) readFinalizedNumber(db ethdb.Reader) uint64 {
|
func (f *chainFreezer) readFinalizedNumber(db ethdb.KeyValueReader) uint64 {
|
||||||
hash := ReadFinalizedBlockHash(db)
|
hash := ReadFinalizedBlockHash(db)
|
||||||
if hash == (common.Hash{}) {
|
if hash == (common.Hash{}) {
|
||||||
return 0
|
return 0
|
||||||
@ -123,7 +116,7 @@ func (f *chainFreezer) readFinalizedNumber(db ethdb.Reader) uint64 {
|
|||||||
|
|
||||||
// freezeThreshold returns the threshold for chain freezing. It's determined
|
// freezeThreshold returns the threshold for chain freezing. It's determined
|
||||||
// by formula: max(finality, HEAD-params.FullImmutabilityThreshold).
|
// by formula: max(finality, HEAD-params.FullImmutabilityThreshold).
|
||||||
func (f *chainFreezer) freezeThreshold(db ethdb.Reader) (uint64, error) {
|
func (f *chainFreezer) freezeThreshold(db ethdb.KeyValueReader) (uint64, error) {
|
||||||
var (
|
var (
|
||||||
head = f.readHeadNumber(db)
|
head = f.readHeadNumber(db)
|
||||||
final = f.readFinalizedNumber(db)
|
final = f.readFinalizedNumber(db)
|
||||||
@ -179,27 +172,26 @@ func (f *chainFreezer) freeze(db ethdb.KeyValueStore) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
// check freezer env first, it must wait a while when the env is necessary
|
||||||
frozen uint64
|
err := f.checkFreezerEnv()
|
||||||
threshold uint64
|
if err == missFreezerEnvErr {
|
||||||
first uint64 // the first block to freeze
|
log.Warn("Freezer need related env, may wait for a while", "err", err)
|
||||||
last uint64 // the last block to freeze
|
backoff = true
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
log.Error("Freezer check FreezerEnv err", "err", err)
|
||||||
|
backoff = true
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
hash common.Hash
|
threshold, err := f.freezeThreshold(nfdb)
|
||||||
number *uint64
|
|
||||||
head *types.Header
|
|
||||||
err error
|
|
||||||
)
|
|
||||||
|
|
||||||
// use finalized block as the chain freeze indicator was used for multiDatabase feature, if multiDatabase is false, keep 9W blocks in db
|
|
||||||
if f.multiDatabase {
|
|
||||||
threshold, err = f.freezeThreshold(nfdb)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
backoff = true
|
backoff = true
|
||||||
log.Debug("Current full block not old enough to freeze", "err", err)
|
log.Debug("Current full block not old enough to freeze", "err", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
frozen = f.frozen.Load()
|
frozen := f.frozen.Load()
|
||||||
|
|
||||||
// Short circuit if the blocks below threshold are already frozen.
|
// Short circuit if the blocks below threshold are already frozen.
|
||||||
if frozen != 0 && frozen-1 >= threshold {
|
if frozen != 0 && frozen-1 >= threshold {
|
||||||
@ -207,86 +199,15 @@ func (f *chainFreezer) freeze(db ethdb.KeyValueStore) {
|
|||||||
log.Debug("Ancient blocks frozen already", "threshold", threshold, "frozen", frozen)
|
log.Debug("Ancient blocks frozen already", "threshold", threshold, "frozen", frozen)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
hash = ReadHeadBlockHash(nfdb)
|
|
||||||
if hash == (common.Hash{}) {
|
|
||||||
log.Debug("Current full block hash unavailable") // new chain, empty database
|
|
||||||
backoff = true
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
number = ReadHeaderNumber(nfdb, hash)
|
|
||||||
if number == nil {
|
|
||||||
log.Error("Current full block number unavailable", "hash", hash)
|
|
||||||
backoff = true
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
head = ReadHeader(nfdb, hash, *number)
|
|
||||||
if head == nil {
|
|
||||||
log.Error("Current full block unavailable", "number", *number, "hash", hash)
|
|
||||||
backoff = true
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
first = frozen
|
|
||||||
last = threshold
|
|
||||||
if last-first+1 > freezerBatchLimit {
|
|
||||||
last = freezerBatchLimit + first - 1
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Retrieve the freezing threshold.
|
|
||||||
hash = ReadHeadBlockHash(nfdb)
|
|
||||||
if hash == (common.Hash{}) {
|
|
||||||
log.Debug("Current full block hash unavailable") // new chain, empty database
|
|
||||||
backoff = true
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
number = ReadHeaderNumber(nfdb, hash)
|
|
||||||
threshold = f.threshold.Load()
|
|
||||||
frozen = f.frozen.Load()
|
|
||||||
switch {
|
|
||||||
case number == nil:
|
|
||||||
log.Error("Current full block number unavailable", "hash", hash)
|
|
||||||
backoff = true
|
|
||||||
continue
|
|
||||||
|
|
||||||
case *number < threshold:
|
|
||||||
log.Debug("Current full block not old enough to freeze", "number", *number, "hash", hash, "delay", threshold)
|
|
||||||
backoff = true
|
|
||||||
continue
|
|
||||||
|
|
||||||
case *number-threshold <= frozen:
|
|
||||||
log.Debug("Ancient blocks frozen already", "number", *number, "hash", hash, "frozen", frozen)
|
|
||||||
backoff = true
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
head = ReadHeader(nfdb, hash, *number)
|
|
||||||
if head == nil {
|
|
||||||
log.Error("Current full block unavailable", "number", *number, "hash", hash)
|
|
||||||
backoff = true
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
first, _ = f.Ancients()
|
|
||||||
last = *number - threshold
|
|
||||||
if last-first > freezerBatchLimit {
|
|
||||||
last = first + freezerBatchLimit
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// check env first before chain freeze, it must wait when the env is necessary
|
|
||||||
if err := f.checkFreezerEnv(); err != nil {
|
|
||||||
f.waitEnvTimes++
|
|
||||||
if f.waitEnvTimes%30 == 0 {
|
|
||||||
log.Warn("Freezer need related env, may wait for a while, and it's not a issue when non-import block", "err", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
backoff = true
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Seems we have data ready to be frozen, process in usable batches
|
// Seems we have data ready to be frozen, process in usable batches
|
||||||
var (
|
var (
|
||||||
start = time.Now()
|
start = time.Now()
|
||||||
|
first = frozen // the first block to freeze
|
||||||
|
last = threshold // the last block to freeze
|
||||||
)
|
)
|
||||||
|
if last-first+1 > freezerBatchLimit {
|
||||||
|
last = freezerBatchLimit + first - 1
|
||||||
|
}
|
||||||
|
|
||||||
ancients, err := f.freezeRangeWithBlobs(nfdb, first, last)
|
ancients, err := f.freezeRangeWithBlobs(nfdb, first, last)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -374,6 +295,24 @@ func (f *chainFreezer) freeze(db ethdb.KeyValueStore) {
|
|||||||
log.Debug("Deep froze chain segment", context...)
|
log.Debug("Deep froze chain segment", context...)
|
||||||
|
|
||||||
env, _ := f.freezeEnv.Load().(*ethdb.FreezerEnv)
|
env, _ := f.freezeEnv.Load().(*ethdb.FreezerEnv)
|
||||||
|
hash := ReadHeadBlockHash(nfdb)
|
||||||
|
if hash == (common.Hash{}) {
|
||||||
|
log.Debug("Current full block hash unavailable") // new chain, empty database
|
||||||
|
backoff = true
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
number := ReadHeaderNumber(nfdb, hash)
|
||||||
|
if number == nil {
|
||||||
|
log.Error("Current full block number unavailable", "hash", hash)
|
||||||
|
backoff = true
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
head := ReadHeader(nfdb, hash, *number)
|
||||||
|
if head == nil {
|
||||||
|
log.Error("Current full block unavailable", "number", *number, "hash", hash)
|
||||||
|
backoff = true
|
||||||
|
continue
|
||||||
|
}
|
||||||
// try prune blob data after cancun fork
|
// try prune blob data after cancun fork
|
||||||
if isCancun(env, head.Number, head.Time) {
|
if isCancun(env, head.Number, head.Time) {
|
||||||
f.tryPruneBlobAncientTable(env, *number)
|
f.tryPruneBlobAncientTable(env, *number)
|
||||||
@ -545,8 +484,15 @@ func (f *chainFreezer) checkFreezerEnv() error {
|
|||||||
if exist {
|
if exist {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
blobFrozen, err := f.TableAncients(ChainFreezerBlobSidecarTable)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if blobFrozen > 0 {
|
||||||
return missFreezerEnvErr
|
return missFreezerEnvErr
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func isCancun(env *ethdb.FreezerEnv, num *big.Int, time uint64) bool {
|
func isCancun(env *ethdb.FreezerEnv, num *big.Int, time uint64) bool {
|
||||||
if env == nil || env.ChainCfg == nil {
|
if env == nil || env.ChainCfg == nil {
|
||||||
|
@ -35,16 +35,16 @@ import (
|
|||||||
// injects into the database the block hash->number mappings.
|
// injects into the database the block hash->number mappings.
|
||||||
func InitDatabaseFromFreezer(db ethdb.Database) {
|
func InitDatabaseFromFreezer(db ethdb.Database) {
|
||||||
// If we can't access the freezer or it's empty, abort
|
// If we can't access the freezer or it's empty, abort
|
||||||
frozen, err := db.BlockStore().ItemAmountInAncient()
|
frozen, err := db.ItemAmountInAncient()
|
||||||
if err != nil || frozen == 0 {
|
if err != nil || frozen == 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
var (
|
var (
|
||||||
batch = db.BlockStore().NewBatch()
|
batch = db.NewBatch()
|
||||||
start = time.Now()
|
start = time.Now()
|
||||||
logged = start.Add(-7 * time.Second) // Unindex during import is fast, don't double log
|
logged = start.Add(-7 * time.Second) // Unindex during import is fast, don't double log
|
||||||
hash common.Hash
|
hash common.Hash
|
||||||
offset = db.BlockStore().AncientOffSet()
|
offset = db.AncientOffSet()
|
||||||
)
|
)
|
||||||
for i := uint64(0) + offset; i < frozen+offset; i++ {
|
for i := uint64(0) + offset; i < frozen+offset; i++ {
|
||||||
// We read 100K hashes at a time, for a total of 3.2M
|
// We read 100K hashes at a time, for a total of 3.2M
|
||||||
@ -52,7 +52,7 @@ func InitDatabaseFromFreezer(db ethdb.Database) {
|
|||||||
if i+count > frozen+offset {
|
if i+count > frozen+offset {
|
||||||
count = frozen + offset - i
|
count = frozen + offset - i
|
||||||
}
|
}
|
||||||
data, err := db.BlockStore().AncientRange(ChainFreezerHashTable, i, count, 32*count)
|
data, err := db.AncientRange(ChainFreezerHashTable, i, count, 32*count)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Crit("Failed to init database from freezer", "err", err)
|
log.Crit("Failed to init database from freezer", "err", err)
|
||||||
}
|
}
|
||||||
@ -81,7 +81,7 @@ func InitDatabaseFromFreezer(db ethdb.Database) {
|
|||||||
batch.Reset()
|
batch.Reset()
|
||||||
|
|
||||||
WriteHeadHeaderHash(db.BlockStore(), hash)
|
WriteHeadHeaderHash(db.BlockStore(), hash)
|
||||||
WriteHeadFastBlockHash(db.BlockStore(), hash)
|
WriteHeadFastBlockHash(db, hash)
|
||||||
log.Info("Initialized database from freezer", "blocks", frozen, "elapsed", common.PrettyDuration(time.Since(start)))
|
log.Info("Initialized database from freezer", "blocks", frozen, "elapsed", common.PrettyDuration(time.Since(start)))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -100,7 +100,7 @@ func iterateTransactions(db ethdb.Database, from uint64, to uint64, reverse bool
|
|||||||
number uint64
|
number uint64
|
||||||
rlp rlp.RawValue
|
rlp rlp.RawValue
|
||||||
}
|
}
|
||||||
if offset := db.BlockStore().AncientOffSet(); offset > from {
|
if offset := db.AncientOffSet(); offset > from {
|
||||||
from = offset
|
from = offset
|
||||||
}
|
}
|
||||||
if to <= from {
|
if to <= from {
|
||||||
@ -122,7 +122,7 @@ func iterateTransactions(db ethdb.Database, from uint64, to uint64, reverse bool
|
|||||||
}
|
}
|
||||||
defer close(rlpCh)
|
defer close(rlpCh)
|
||||||
for n != end {
|
for n != end {
|
||||||
data := ReadCanonicalBodyRLP(db, n)
|
data := ReadCanonicalBodyRLP(db.BlockStore(), n)
|
||||||
// Feed the block to the aggregator, or abort on interrupt
|
// Feed the block to the aggregator, or abort on interrupt
|
||||||
select {
|
select {
|
||||||
case rlpCh <- &numberRlp{n, data}:
|
case rlpCh <- &numberRlp{n, data}:
|
||||||
@ -187,7 +187,7 @@ func iterateTransactions(db ethdb.Database, from uint64, to uint64, reverse bool
|
|||||||
// signal received.
|
// signal received.
|
||||||
func indexTransactions(db ethdb.Database, from uint64, to uint64, interrupt chan struct{}, hook func(uint64) bool, report bool) {
|
func indexTransactions(db ethdb.Database, from uint64, to uint64, interrupt chan struct{}, hook func(uint64) bool, report bool) {
|
||||||
// short circuit for invalid range
|
// short circuit for invalid range
|
||||||
if offset := db.BlockStore().AncientOffSet(); offset > from {
|
if offset := db.AncientOffSet(); offset > from {
|
||||||
from = offset
|
from = offset
|
||||||
}
|
}
|
||||||
if from >= to {
|
if from >= to {
|
||||||
@ -286,7 +286,7 @@ func indexTransactionsForTesting(db ethdb.Database, from uint64, to uint64, inte
|
|||||||
// signal received.
|
// signal received.
|
||||||
func unindexTransactions(db ethdb.Database, from uint64, to uint64, interrupt chan struct{}, hook func(uint64) bool, report bool) {
|
func unindexTransactions(db ethdb.Database, from uint64, to uint64, interrupt chan struct{}, hook func(uint64) bool, report bool) {
|
||||||
// short circuit for invalid range
|
// short circuit for invalid range
|
||||||
if offset := db.BlockStore().AncientOffSet(); offset > from {
|
if offset := db.AncientOffSet(); offset > from {
|
||||||
from = offset
|
from = offset
|
||||||
}
|
}
|
||||||
if from >= to {
|
if from >= to {
|
||||||
|
@ -61,10 +61,8 @@ func (frdb *freezerdb) BlockStoreReader() ethdb.Reader {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (frdb *freezerdb) BlockStoreWriter() ethdb.Writer {
|
func (frdb *freezerdb) BlockStoreWriter() ethdb.Writer {
|
||||||
if frdb.blockStore == nil {
|
//TODO implement me
|
||||||
return frdb
|
panic("implement me")
|
||||||
}
|
|
||||||
return frdb.blockStore
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// AncientDatadir returns the path of root ancient directory.
|
// AncientDatadir returns the path of root ancient directory.
|
||||||
@ -118,13 +116,6 @@ func (frdb *freezerdb) StateStore() ethdb.Database {
|
|||||||
return frdb.stateStore
|
return frdb.stateStore
|
||||||
}
|
}
|
||||||
|
|
||||||
func (frdb *freezerdb) GetStateStore() ethdb.Database {
|
|
||||||
if frdb.stateStore != nil {
|
|
||||||
return frdb.stateStore
|
|
||||||
}
|
|
||||||
return frdb
|
|
||||||
}
|
|
||||||
|
|
||||||
func (frdb *freezerdb) SetStateStore(state ethdb.Database) {
|
func (frdb *freezerdb) SetStateStore(state ethdb.Database) {
|
||||||
if frdb.stateStore != nil {
|
if frdb.stateStore != nil {
|
||||||
frdb.stateStore.Close()
|
frdb.stateStore.Close()
|
||||||
@ -147,22 +138,13 @@ func (frdb *freezerdb) SetBlockStore(block ethdb.Database) {
|
|||||||
frdb.blockStore = block
|
frdb.blockStore = block
|
||||||
}
|
}
|
||||||
|
|
||||||
func (frdb *freezerdb) HasSeparateBlockStore() bool {
|
|
||||||
return frdb.blockStore != nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Freeze is a helper method used for external testing to trigger and block until
|
// Freeze is a helper method used for external testing to trigger and block until
|
||||||
// a freeze cycle completes, without having to sleep for a minute to trigger the
|
// a freeze cycle completes, without having to sleep for a minute to trigger the
|
||||||
// automatic background run.
|
// automatic background run.
|
||||||
func (frdb *freezerdb) Freeze(threshold uint64) error {
|
func (frdb *freezerdb) Freeze() error {
|
||||||
if frdb.AncientStore.(*chainFreezer).readonly {
|
if frdb.AncientStore.(*chainFreezer).readonly {
|
||||||
return errReadOnly
|
return errReadOnly
|
||||||
}
|
}
|
||||||
// Set the freezer threshold to a temporary value
|
|
||||||
defer func(old uint64) {
|
|
||||||
frdb.AncientStore.(*chainFreezer).threshold.Store(old)
|
|
||||||
}(frdb.AncientStore.(*chainFreezer).threshold.Load())
|
|
||||||
frdb.AncientStore.(*chainFreezer).threshold.Store(threshold)
|
|
||||||
// Trigger a freeze cycle and block until it's done
|
// Trigger a freeze cycle and block until it's done
|
||||||
trigger := make(chan struct{}, 1)
|
trigger := make(chan struct{}, 1)
|
||||||
frdb.AncientStore.(*chainFreezer).trigger <- trigger
|
frdb.AncientStore.(*chainFreezer).trigger <- trigger
|
||||||
@ -202,7 +184,7 @@ func (db *nofreezedb) Ancients() (uint64, error) {
|
|||||||
return 0, errNotSupported
|
return 0, errNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
// ItemAmountInAncient returns an error as we don't have a backing chain freezer.
|
// Ancients returns an error as we don't have a backing chain freezer.
|
||||||
func (db *nofreezedb) ItemAmountInAncient() (uint64, error) {
|
func (db *nofreezedb) ItemAmountInAncient() (uint64, error) {
|
||||||
return 0, errNotSupported
|
return 0, errNotSupported
|
||||||
}
|
}
|
||||||
@ -263,13 +245,6 @@ func (db *nofreezedb) SetStateStore(state ethdb.Database) {
|
|||||||
db.stateStore = state
|
db.stateStore = state
|
||||||
}
|
}
|
||||||
|
|
||||||
func (db *nofreezedb) GetStateStore() ethdb.Database {
|
|
||||||
if db.stateStore != nil {
|
|
||||||
return db.stateStore
|
|
||||||
}
|
|
||||||
return db
|
|
||||||
}
|
|
||||||
|
|
||||||
func (db *nofreezedb) StateStoreReader() ethdb.Reader {
|
func (db *nofreezedb) StateStoreReader() ethdb.Reader {
|
||||||
if db.stateStore != nil {
|
if db.stateStore != nil {
|
||||||
return db.stateStore
|
return db.stateStore
|
||||||
@ -288,10 +263,6 @@ func (db *nofreezedb) SetBlockStore(block ethdb.Database) {
|
|||||||
db.blockStore = block
|
db.blockStore = block
|
||||||
}
|
}
|
||||||
|
|
||||||
func (db *nofreezedb) HasSeparateBlockStore() bool {
|
|
||||||
return db.blockStore != nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (db *nofreezedb) BlockStoreReader() ethdb.Reader {
|
func (db *nofreezedb) BlockStoreReader() ethdb.Reader {
|
||||||
if db.blockStore != nil {
|
if db.blockStore != nil {
|
||||||
return db.blockStore
|
return db.blockStore
|
||||||
@ -347,111 +318,6 @@ func NewDatabase(db ethdb.KeyValueStore) ethdb.Database {
|
|||||||
return &nofreezedb{KeyValueStore: db}
|
return &nofreezedb{KeyValueStore: db}
|
||||||
}
|
}
|
||||||
|
|
||||||
type emptyfreezedb struct {
|
|
||||||
ethdb.KeyValueStore
|
|
||||||
}
|
|
||||||
|
|
||||||
// HasAncient returns nil for pruned db that we don't have a backing chain freezer.
|
|
||||||
func (db *emptyfreezedb) HasAncient(kind string, number uint64) (bool, error) {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ancient returns nil for pruned db that we don't have a backing chain freezer.
|
|
||||||
func (db *emptyfreezedb) Ancient(kind string, number uint64) ([]byte, error) {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// AncientRange returns nil for pruned db that we don't have a backing chain freezer.
|
|
||||||
func (db *emptyfreezedb) AncientRange(kind string, start, max, maxByteSize uint64) ([][]byte, error) {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ancients returns nil for pruned db that we don't have a backing chain freezer.
|
|
||||||
func (db *emptyfreezedb) Ancients() (uint64, error) {
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ItemAmountInAncient returns nil for pruned db that we don't have a backing chain freezer.
|
|
||||||
func (db *emptyfreezedb) ItemAmountInAncient() (uint64, error) {
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tail returns nil for pruned db that we don't have a backing chain freezer.
|
|
||||||
func (db *emptyfreezedb) Tail() (uint64, error) {
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// AncientSize returns nil for pruned db that we don't have a backing chain freezer.
|
|
||||||
func (db *emptyfreezedb) AncientSize(kind string) (uint64, error) {
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ModifyAncients returns nil for pruned db that we don't have a backing chain freezer.
|
|
||||||
func (db *emptyfreezedb) ModifyAncients(func(ethdb.AncientWriteOp) error) (int64, error) {
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// TruncateHead returns nil for pruned db that we don't have a backing chain freezer.
|
|
||||||
func (db *emptyfreezedb) TruncateHead(items uint64) (uint64, error) {
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// TruncateTail returns nil for pruned db that we don't have a backing chain freezer.
|
|
||||||
func (db *emptyfreezedb) TruncateTail(items uint64) (uint64, error) {
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// TruncateTableTail returns nil for pruned db that we don't have a backing chain freezer.
|
|
||||||
func (db *emptyfreezedb) TruncateTableTail(kind string, tail uint64) (uint64, error) {
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ResetTable returns nil for pruned db that we don't have a backing chain freezer.
|
|
||||||
func (db *emptyfreezedb) ResetTable(kind string, startAt uint64, onlyEmpty bool) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sync returns nil for pruned db that we don't have a backing chain freezer.
|
|
||||||
func (db *emptyfreezedb) Sync() error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (db *emptyfreezedb) DiffStore() ethdb.KeyValueStore { return db }
|
|
||||||
func (db *emptyfreezedb) SetDiffStore(diff ethdb.KeyValueStore) {}
|
|
||||||
func (db *emptyfreezedb) StateStore() ethdb.Database { return db }
|
|
||||||
func (db *emptyfreezedb) GetStateStore() ethdb.Database { return db }
|
|
||||||
func (db *emptyfreezedb) SetStateStore(state ethdb.Database) {}
|
|
||||||
func (db *emptyfreezedb) StateStoreReader() ethdb.Reader { return db }
|
|
||||||
func (db *emptyfreezedb) BlockStore() ethdb.Database { return db }
|
|
||||||
func (db *emptyfreezedb) SetBlockStore(block ethdb.Database) {}
|
|
||||||
func (db *emptyfreezedb) HasSeparateBlockStore() bool { return false }
|
|
||||||
func (db *emptyfreezedb) BlockStoreReader() ethdb.Reader { return db }
|
|
||||||
func (db *emptyfreezedb) BlockStoreWriter() ethdb.Writer { return db }
|
|
||||||
func (db *emptyfreezedb) ReadAncients(fn func(reader ethdb.AncientReaderOp) error) (err error) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
func (db *emptyfreezedb) AncientOffSet() uint64 { return 0 }
|
|
||||||
|
|
||||||
// MigrateTable returns nil for pruned db that we don't have a backing chain freezer.
|
|
||||||
func (db *emptyfreezedb) MigrateTable(kind string, convert convertLegacyFn) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// AncientDatadir returns nil for pruned db that we don't have a backing chain freezer.
|
|
||||||
func (db *emptyfreezedb) AncientDatadir() (string, error) {
|
|
||||||
return "", nil
|
|
||||||
}
|
|
||||||
func (db *emptyfreezedb) SetupFreezerEnv(env *ethdb.FreezerEnv) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewEmptyFreezeDB is used for CLI such as `geth db inspect` in pruned db that we don't
|
|
||||||
// have a backing chain freezer.
|
|
||||||
// WARNING: it must be only used in the above case.
|
|
||||||
func NewEmptyFreezeDB(db ethdb.KeyValueStore) ethdb.Database {
|
|
||||||
return &emptyfreezedb{KeyValueStore: db}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewFreezerDb only create a freezer without statedb.
|
// NewFreezerDb only create a freezer without statedb.
|
||||||
func NewFreezerDb(db ethdb.KeyValueStore, frz, namespace string, readonly bool, newOffSet uint64) (*Freezer, error) {
|
func NewFreezerDb(db ethdb.KeyValueStore, frz, namespace string, readonly bool, newOffSet uint64) (*Freezer, error) {
|
||||||
// Create the idle freezer instance, this operation should be atomic to avoid mismatch between offset and acientDB.
|
// Create the idle freezer instance, this operation should be atomic to avoid mismatch between offset and acientDB.
|
||||||
@ -492,7 +358,7 @@ func resolveChainFreezerDir(ancient string) string {
|
|||||||
// value data store with a freezer moving immutable chain segments into cold
|
// value data store with a freezer moving immutable chain segments into cold
|
||||||
// storage. The passed ancient indicates the path of root ancient directory
|
// storage. The passed ancient indicates the path of root ancient directory
|
||||||
// where the chain freezer can be opened.
|
// where the chain freezer can be opened.
|
||||||
func NewDatabaseWithFreezer(db ethdb.KeyValueStore, ancient string, namespace string, readonly, disableFreeze, isLastOffset, pruneAncientData, multiDatabase bool) (ethdb.Database, error) {
|
func NewDatabaseWithFreezer(db ethdb.KeyValueStore, ancient string, namespace string, readonly, disableFreeze, isLastOffset, pruneAncientData bool) (ethdb.Database, error) {
|
||||||
var offset uint64
|
var offset uint64
|
||||||
// The offset of ancientDB should be handled differently in different scenarios.
|
// The offset of ancientDB should be handled differently in different scenarios.
|
||||||
if isLastOffset {
|
if isLastOffset {
|
||||||
@ -501,12 +367,6 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, ancient string, namespace st
|
|||||||
offset = ReadOffSetOfCurrentAncientFreezer(db)
|
offset = ReadOffSetOfCurrentAncientFreezer(db)
|
||||||
}
|
}
|
||||||
|
|
||||||
// This case is used for someone who wants to execute geth db inspect CLI in a pruned db
|
|
||||||
if !disableFreeze && readonly && ReadAncientType(db) == PruneFreezerType {
|
|
||||||
log.Warn("Disk db is pruned, using an empty freezer db for CLI")
|
|
||||||
return NewEmptyFreezeDB(db), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if pruneAncientData && !disableFreeze && !readonly {
|
if pruneAncientData && !disableFreeze && !readonly {
|
||||||
frdb, err := newPrunedFreezer(resolveChainFreezerDir(ancient), db, offset)
|
frdb, err := newPrunedFreezer(resolveChainFreezerDir(ancient), db, offset)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -534,18 +394,9 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, ancient string, namespace st
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Create the idle freezer instance
|
// Create the idle freezer instance
|
||||||
frdb, err := newChainFreezer(resolveChainFreezerDir(ancient), namespace, readonly, offset, multiDatabase)
|
frdb, err := newChainFreezer(resolveChainFreezerDir(ancient), namespace, readonly, offset)
|
||||||
|
|
||||||
// We are creating the freezerdb here because the validation logic for db and freezer below requires certain interfaces
|
|
||||||
// that need a database type. Therefore, we are pre-creating it for subsequent use.
|
|
||||||
freezerDb := &freezerdb{
|
|
||||||
ancientRoot: ancient,
|
|
||||||
KeyValueStore: db,
|
|
||||||
AncientStore: frdb,
|
|
||||||
AncientFreezer: frdb,
|
|
||||||
}
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
printChainMetadata(freezerDb)
|
printChainMetadata(db)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -581,10 +432,10 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, ancient string, namespace st
|
|||||||
// the freezer and the key-value store.
|
// the freezer and the key-value store.
|
||||||
frgenesis, err := frdb.Ancient(ChainFreezerHashTable, 0)
|
frgenesis, err := frdb.Ancient(ChainFreezerHashTable, 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
printChainMetadata(freezerDb)
|
printChainMetadata(db)
|
||||||
return nil, fmt.Errorf("failed to retrieve genesis from ancient %v", err)
|
return nil, fmt.Errorf("failed to retrieve genesis from ancient %v", err)
|
||||||
} else if !bytes.Equal(kvgenesis, frgenesis) {
|
} else if !bytes.Equal(kvgenesis, frgenesis) {
|
||||||
printChainMetadata(freezerDb)
|
printChainMetadata(db)
|
||||||
return nil, fmt.Errorf("genesis mismatch: %#x (leveldb) != %#x (ancients)", kvgenesis, frgenesis)
|
return nil, fmt.Errorf("genesis mismatch: %#x (leveldb) != %#x (ancients)", kvgenesis, frgenesis)
|
||||||
}
|
}
|
||||||
// Key-value store and freezer belong to the same network. Ensure that they
|
// Key-value store and freezer belong to the same network. Ensure that they
|
||||||
@ -592,7 +443,7 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, ancient string, namespace st
|
|||||||
if kvhash, _ := db.Get(headerHashKey(frozen)); len(kvhash) == 0 {
|
if kvhash, _ := db.Get(headerHashKey(frozen)); len(kvhash) == 0 {
|
||||||
// Subsequent header after the freezer limit is missing from the database.
|
// Subsequent header after the freezer limit is missing from the database.
|
||||||
// Reject startup if the database has a more recent head.
|
// Reject startup if the database has a more recent head.
|
||||||
if head := *ReadHeaderNumber(freezerDb, ReadHeadHeaderHash(freezerDb)); head > frozen-1 {
|
if head := *ReadHeaderNumber(db, ReadHeadHeaderHash(db)); head > frozen-1 {
|
||||||
// Find the smallest block stored in the key-value store
|
// Find the smallest block stored in the key-value store
|
||||||
// in range of [frozen, head]
|
// in range of [frozen, head]
|
||||||
var number uint64
|
var number uint64
|
||||||
@ -602,7 +453,7 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, ancient string, namespace st
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// We are about to exit on error. Print database metadata before exiting
|
// We are about to exit on error. Print database metadata before exiting
|
||||||
printChainMetadata(freezerDb)
|
printChainMetadata(db)
|
||||||
return nil, fmt.Errorf("gap in the chain between ancients [0 - #%d] and leveldb [#%d - #%d] ",
|
return nil, fmt.Errorf("gap in the chain between ancients [0 - #%d] and leveldb [#%d - #%d] ",
|
||||||
frozen-1, number, head)
|
frozen-1, number, head)
|
||||||
}
|
}
|
||||||
@ -617,11 +468,11 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, ancient string, namespace st
|
|||||||
// store, otherwise we'll end up missing data. We check block #1 to decide
|
// store, otherwise we'll end up missing data. We check block #1 to decide
|
||||||
// if we froze anything previously or not, but do take care of databases with
|
// if we froze anything previously or not, but do take care of databases with
|
||||||
// only the genesis block.
|
// only the genesis block.
|
||||||
if ReadHeadHeaderHash(freezerDb) != common.BytesToHash(kvgenesis) {
|
if ReadHeadHeaderHash(db) != common.BytesToHash(kvgenesis) {
|
||||||
// Key-value store contains more data than the genesis block, make sure we
|
// Key-value store contains more data than the genesis block, make sure we
|
||||||
// didn't freeze anything yet.
|
// didn't freeze anything yet.
|
||||||
if kvblob, _ := db.Get(headerHashKey(1)); len(kvblob) == 0 {
|
if kvblob, _ := db.Get(headerHashKey(1)); len(kvblob) == 0 {
|
||||||
printChainMetadata(freezerDb)
|
printChainMetadata(db)
|
||||||
return nil, errors.New("ancient chain segments already extracted, please set --datadir.ancient to the correct path")
|
return nil, errors.New("ancient chain segments already extracted, please set --datadir.ancient to the correct path")
|
||||||
}
|
}
|
||||||
// Block #1 is still in the database, we're allowed to init a new freezer
|
// Block #1 is still in the database, we're allowed to init a new freezer
|
||||||
@ -643,7 +494,12 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, ancient string, namespace st
|
|||||||
frdb.wg.Done()
|
frdb.wg.Done()
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
return freezerDb, nil
|
return &freezerdb{
|
||||||
|
ancientRoot: ancient,
|
||||||
|
KeyValueStore: db,
|
||||||
|
AncientStore: frdb,
|
||||||
|
AncientFreezer: frdb,
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewMemoryDatabase creates an ephemeral in-memory key-value database without a
|
// NewMemoryDatabase creates an ephemeral in-memory key-value database without a
|
||||||
@ -719,8 +575,6 @@ type OpenOptions struct {
|
|||||||
// Ephemeral means that filesystem sync operations should be avoided: data integrity in the face of
|
// Ephemeral means that filesystem sync operations should be avoided: data integrity in the face of
|
||||||
// a crash is not important. This option should typically be used in tests.
|
// a crash is not important. This option should typically be used in tests.
|
||||||
Ephemeral bool
|
Ephemeral bool
|
||||||
|
|
||||||
MultiDataBase bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// openKeyValueDatabase opens a disk-based key-value database, e.g. leveldb or pebble.
|
// openKeyValueDatabase opens a disk-based key-value database, e.g. leveldb or pebble.
|
||||||
@ -765,13 +619,13 @@ func Open(o OpenOptions) (ethdb.Database, error) {
|
|||||||
}
|
}
|
||||||
if ReadAncientType(kvdb) == PruneFreezerType {
|
if ReadAncientType(kvdb) == PruneFreezerType {
|
||||||
if !o.PruneAncientData {
|
if !o.PruneAncientData {
|
||||||
log.Warn("NOTICE: You're opening a pruned disk db!")
|
log.Warn("Disk db is pruned")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(o.AncientsDirectory) == 0 {
|
if len(o.AncientsDirectory) == 0 {
|
||||||
return kvdb, nil
|
return kvdb, nil
|
||||||
}
|
}
|
||||||
frdb, err := NewDatabaseWithFreezer(kvdb, o.AncientsDirectory, o.Namespace, o.ReadOnly, o.DisableFreeze, o.IsLastOffset, o.PruneAncientData, o.MultiDataBase)
|
frdb, err := NewDatabaseWithFreezer(kvdb, o.AncientsDirectory, o.Namespace, o.ReadOnly, o.DisableFreeze, o.IsLastOffset, o.PruneAncientData)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
kvdb.Close()
|
kvdb.Close()
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -894,7 +748,7 @@ func DataTypeByKey(key []byte) DataType {
|
|||||||
return StateDataType
|
return StateDataType
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for _, meta := range [][]byte{headHeaderKey, headFinalizedBlockKey, headBlockKey, headFastBlockKey} {
|
for _, meta := range [][]byte{headHeaderKey, headFinalizedBlockKey} {
|
||||||
if bytes.Equal(key, meta) {
|
if bytes.Equal(key, meta) {
|
||||||
return BlockDataType
|
return BlockDataType
|
||||||
}
|
}
|
||||||
@ -915,7 +769,7 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
|
|||||||
trieIter = db.StateStore().NewIterator(keyPrefix, nil)
|
trieIter = db.StateStore().NewIterator(keyPrefix, nil)
|
||||||
defer trieIter.Release()
|
defer trieIter.Release()
|
||||||
}
|
}
|
||||||
if db.HasSeparateBlockStore() {
|
if db.BlockStore() != db {
|
||||||
blockIter = db.BlockStore().NewIterator(keyPrefix, nil)
|
blockIter = db.BlockStore().NewIterator(keyPrefix, nil)
|
||||||
defer blockIter.Release()
|
defer blockIter.Release()
|
||||||
}
|
}
|
||||||
@ -1109,7 +963,7 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
|
|||||||
hashNumPairings.Add(size)
|
hashNumPairings.Add(size)
|
||||||
default:
|
default:
|
||||||
var accounted bool
|
var accounted bool
|
||||||
for _, meta := range [][]byte{headHeaderKey, headFinalizedBlockKey, headBlockKey, headFastBlockKey} {
|
for _, meta := range [][]byte{headHeaderKey, headFinalizedBlockKey} {
|
||||||
if bytes.Equal(key, meta) {
|
if bytes.Equal(key, meta) {
|
||||||
metadata.Add(size)
|
metadata.Add(size)
|
||||||
accounted = true
|
accounted = true
|
||||||
@ -1259,7 +1113,7 @@ func DeleteTrieState(db ethdb.Database) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// printChainMetadata prints out chain metadata to stderr.
|
// printChainMetadata prints out chain metadata to stderr.
|
||||||
func printChainMetadata(db ethdb.Reader) {
|
func printChainMetadata(db ethdb.KeyValueStore) {
|
||||||
fmt.Fprintf(os.Stderr, "Chain metadata\n")
|
fmt.Fprintf(os.Stderr, "Chain metadata\n")
|
||||||
for _, v := range ReadChainMetadata(db) {
|
for _, v := range ReadChainMetadata(db) {
|
||||||
fmt.Fprintf(os.Stderr, " %s\n", strings.Join(v, ": "))
|
fmt.Fprintf(os.Stderr, " %s\n", strings.Join(v, ": "))
|
||||||
@ -1270,7 +1124,7 @@ func printChainMetadata(db ethdb.Reader) {
|
|||||||
// ReadChainMetadata returns a set of key/value pairs that contains information
|
// ReadChainMetadata returns a set of key/value pairs that contains information
|
||||||
// about the database chain status. This can be used for diagnostic purposes
|
// about the database chain status. This can be used for diagnostic purposes
|
||||||
// when investigating the state of the node.
|
// when investigating the state of the node.
|
||||||
func ReadChainMetadata(db ethdb.Reader) [][]string {
|
func ReadChainMetadata(db ethdb.KeyValueStore) [][]string {
|
||||||
pp := func(val *uint64) string {
|
pp := func(val *uint64) string {
|
||||||
if val == nil {
|
if val == nil {
|
||||||
return "<nil>"
|
return "<nil>"
|
||||||
@ -1292,3 +1146,26 @@ func ReadChainMetadata(db ethdb.Reader) [][]string {
|
|||||||
}
|
}
|
||||||
return data
|
return data
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func ReadChainMetadataFromMultiDatabase(db ethdb.Database) [][]string {
|
||||||
|
pp := func(val *uint64) string {
|
||||||
|
if val == nil {
|
||||||
|
return "<nil>"
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%d (%#x)", *val, *val)
|
||||||
|
}
|
||||||
|
data := [][]string{
|
||||||
|
{"databaseVersion", pp(ReadDatabaseVersion(db))},
|
||||||
|
{"headBlockHash", fmt.Sprintf("%v", ReadHeadBlockHash(db.BlockStore()))},
|
||||||
|
{"headFastBlockHash", fmt.Sprintf("%v", ReadHeadFastBlockHash(db))},
|
||||||
|
{"headHeaderHash", fmt.Sprintf("%v", ReadHeadHeaderHash(db.BlockStore()))},
|
||||||
|
{"lastPivotNumber", pp(ReadLastPivotNumber(db))},
|
||||||
|
{"len(snapshotSyncStatus)", fmt.Sprintf("%d bytes", len(ReadSnapshotSyncStatus(db)))},
|
||||||
|
{"snapshotDisabled", fmt.Sprintf("%v", ReadSnapshotDisabled(db))},
|
||||||
|
{"snapshotJournal", fmt.Sprintf("%d bytes", len(ReadSnapshotJournal(db)))},
|
||||||
|
{"snapshotRecoveryNumber", pp(ReadSnapshotRecoveryNumber(db))},
|
||||||
|
{"snapshotRoot", fmt.Sprintf("%v", ReadSnapshotRoot(db))},
|
||||||
|
{"txIndexTail", pp(ReadTxIndexTail(db))},
|
||||||
|
}
|
||||||
|
return data
|
||||||
|
}
|
||||||
|
@ -239,7 +239,7 @@ func (f *Freezer) Ancient(kind string, number uint64) ([]byte, error) {
|
|||||||
// - if maxBytes is not specified, 'count' items will be returned if they are present.
|
// - if maxBytes is not specified, 'count' items will be returned if they are present.
|
||||||
func (f *Freezer) AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error) {
|
func (f *Freezer) AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error) {
|
||||||
if table := f.tables[kind]; table != nil {
|
if table := f.tables[kind]; table != nil {
|
||||||
return table.RetrieveItems(start-f.offset, count, maxBytes)
|
return table.RetrieveItems(start, count, maxBytes)
|
||||||
}
|
}
|
||||||
return nil, errUnknownTable
|
return nil, errUnknownTable
|
||||||
}
|
}
|
||||||
@ -252,7 +252,7 @@ func (f *Freezer) Ancients() (uint64, error) {
|
|||||||
func (f *Freezer) TableAncients(kind string) (uint64, error) {
|
func (f *Freezer) TableAncients(kind string) (uint64, error) {
|
||||||
f.writeLock.RLock()
|
f.writeLock.RLock()
|
||||||
defer f.writeLock.RUnlock()
|
defer f.writeLock.RUnlock()
|
||||||
return f.tables[kind].items.Load() + f.offset, nil
|
return f.tables[kind].items.Load(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ItemAmountInAncient returns the actual length of current ancientDB.
|
// ItemAmountInAncient returns the actual length of current ancientDB.
|
||||||
@ -541,6 +541,41 @@ func gcKvStore(db ethdb.KeyValueStore, ancients []common.Hash, first uint64, fro
|
|||||||
}
|
}
|
||||||
batch.Reset()
|
batch.Reset()
|
||||||
|
|
||||||
|
// Step into the future and delete and dangling side chains
|
||||||
|
if frozen > 0 {
|
||||||
|
tip := frozen
|
||||||
|
nfdb := &nofreezedb{KeyValueStore: db}
|
||||||
|
for len(dangling) > 0 {
|
||||||
|
drop := make(map[common.Hash]struct{})
|
||||||
|
for _, hash := range dangling {
|
||||||
|
log.Debug("Dangling parent from freezer", "number", tip-1, "hash", hash)
|
||||||
|
drop[hash] = struct{}{}
|
||||||
|
}
|
||||||
|
children := ReadAllHashes(db, tip)
|
||||||
|
for i := 0; i < len(children); i++ {
|
||||||
|
// Dig up the child and ensure it's dangling
|
||||||
|
child := ReadHeader(nfdb, children[i], tip)
|
||||||
|
if child == nil {
|
||||||
|
log.Error("Missing dangling header", "number", tip, "hash", children[i])
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if _, ok := drop[child.ParentHash]; !ok {
|
||||||
|
children = append(children[:i], children[i+1:]...)
|
||||||
|
i--
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Delete all block data associated with the child
|
||||||
|
log.Debug("Deleting dangling block", "number", tip, "hash", children[i], "parent", child.ParentHash)
|
||||||
|
DeleteBlock(batch, children[i], tip)
|
||||||
|
}
|
||||||
|
dangling = children
|
||||||
|
tip++
|
||||||
|
}
|
||||||
|
if err := batch.Write(); err != nil {
|
||||||
|
log.Crit("Failed to delete dangling side blocks", "err", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Log something friendly for the user
|
// Log something friendly for the user
|
||||||
context := []interface{}{
|
context := []interface{}{
|
||||||
"blocks", frozen - first, "elapsed", common.PrettyDuration(time.Since(start)), "number", frozen - 1,
|
"blocks", frozen - first, "elapsed", common.PrettyDuration(time.Since(start)), "number", frozen - 1,
|
||||||
|
@ -127,11 +127,6 @@ func newFreezerTable(path, name string, disableSnappy, readonly bool) (*freezerT
|
|||||||
return newTable(path, name, metrics.NilMeter{}, metrics.NilMeter{}, metrics.NilGauge{}, freezerTableSize, disableSnappy, readonly)
|
return newTable(path, name, metrics.NilMeter{}, metrics.NilMeter{}, metrics.NilGauge{}, freezerTableSize, disableSnappy, readonly)
|
||||||
}
|
}
|
||||||
|
|
||||||
// newAdditionTable opens the given path as a addition table.
|
|
||||||
func newAdditionTable(path, name string, disableSnappy, readonly bool) (*freezerTable, error) {
|
|
||||||
return openAdditionTable(path, name, metrics.NilMeter{}, metrics.NilMeter{}, metrics.NilGauge{}, freezerTableSize, disableSnappy, readonly)
|
|
||||||
}
|
|
||||||
|
|
||||||
// newTable opens a freezer table, creating the data and index files if they are
|
// newTable opens a freezer table, creating the data and index files if they are
|
||||||
// non-existent. Both files are truncated to the shortest common length to ensure
|
// non-existent. Both files are truncated to the shortest common length to ensure
|
||||||
// they don't go out of sync.
|
// they don't go out of sync.
|
||||||
|
10
core/rawdb/metrics.go
Normal file
10
core/rawdb/metrics.go
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
package rawdb
|
||||||
|
|
||||||
|
import "github.com/ethereum/go-ethereum/metrics"
|
||||||
|
|
||||||
|
var (
|
||||||
|
rawdbGetAccountTrieNodeTimer = metrics.NewRegisteredTimer("rawdb/get/account/trienode/time", nil)
|
||||||
|
rawdbGetStorageTrieNodeTimer = metrics.NewRegisteredTimer("rawdb/get/storage/trienode/time", nil)
|
||||||
|
rawdbGetAccountSnapNodeTimer = metrics.NewRegisteredTimer("rawdb/get/account/snapnode/time", nil)
|
||||||
|
rawdbGetStorageSnapNodeTimer = metrics.NewRegisteredTimer("rawdb/get/storage/snapnode/time", nil)
|
||||||
|
)
|
@ -8,8 +8,6 @@ import (
|
|||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"golang.org/x/exp/slices"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
@ -70,43 +68,26 @@ func newPrunedFreezer(datadir string, db ethdb.KeyValueStore, offset uint64) (*p
|
|||||||
func (f *prunedfreezer) repair(datadir string) error {
|
func (f *prunedfreezer) repair(datadir string) error {
|
||||||
offset := atomic.LoadUint64(&f.frozen)
|
offset := atomic.LoadUint64(&f.frozen)
|
||||||
// compatible freezer
|
// compatible freezer
|
||||||
minItems := uint64(math.MaxUint64)
|
min := uint64(math.MaxUint64)
|
||||||
for name, disableSnappy := range chainFreezerNoSnappy {
|
for name, disableSnappy := range chainFreezerNoSnappy {
|
||||||
var (
|
table, err := newFreezerTable(datadir, name, disableSnappy, false)
|
||||||
table *freezerTable
|
|
||||||
err error
|
|
||||||
)
|
|
||||||
if slices.Contains(additionTables, name) {
|
|
||||||
table, err = newAdditionTable(datadir, name, disableSnappy, false)
|
|
||||||
} else {
|
|
||||||
table, err = newFreezerTable(datadir, name, disableSnappy, false)
|
|
||||||
}
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// addition tables only align head
|
|
||||||
if slices.Contains(additionTables, name) {
|
|
||||||
if EmptyTable(table) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
items := table.items.Load()
|
items := table.items.Load()
|
||||||
if minItems > items {
|
if min > items {
|
||||||
minItems = items
|
min = items
|
||||||
}
|
}
|
||||||
table.Close()
|
table.Close()
|
||||||
}
|
}
|
||||||
|
log.Info("Read ancientdb item counts", "items", min)
|
||||||
|
offset += min
|
||||||
|
|
||||||
// If the dataset has undergone a prune block, the offset is a non-zero value, otherwise the offset is a zero value.
|
if frozen := ReadFrozenOfAncientFreezer(f.db); frozen > offset {
|
||||||
// The minItems is the value relative to offset
|
offset = frozen
|
||||||
offset += minItems
|
}
|
||||||
|
|
||||||
// FrozenOfAncientFreezer is the progress of the last prune-freezer freeze.
|
atomic.StoreUint64(&f.frozen, offset)
|
||||||
frozenInDB := ReadFrozenOfAncientFreezer(f.db)
|
|
||||||
maxOffset := max(offset, frozenInDB)
|
|
||||||
log.Info("Read ancient db item counts", "items", minItems, "frozen", maxOffset)
|
|
||||||
|
|
||||||
atomic.StoreUint64(&f.frozen, maxOffset)
|
|
||||||
if err := f.Sync(); err != nil {
|
if err := f.Sync(); err != nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -157,12 +138,12 @@ func (f *prunedfreezer) AncientOffSet() uint64 {
|
|||||||
|
|
||||||
// MigrateTable processes the entries in a given table in sequence
|
// MigrateTable processes the entries in a given table in sequence
|
||||||
// converting them to a new format if they're of an old format.
|
// converting them to a new format if they're of an old format.
|
||||||
func (f *prunedfreezer) MigrateTable(kind string, convert convertLegacyFn) error {
|
func (db *prunedfreezer) MigrateTable(kind string, convert convertLegacyFn) error {
|
||||||
return errNotSupported
|
return errNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
// AncientDatadir returns an error as we don't have a backing chain freezer.
|
// AncientDatadir returns an error as we don't have a backing chain freezer.
|
||||||
func (f *prunedfreezer) AncientDatadir() (string, error) {
|
func (db *prunedfreezer) AncientDatadir() (string, error) {
|
||||||
return "", errNotSupported
|
return "", errNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -318,9 +299,10 @@ func (f *prunedfreezer) freeze() {
|
|||||||
log.Error("Append ancient err", "number", f.frozen, "hash", hash, "err", err)
|
log.Error("Append ancient err", "number", f.frozen, "hash", hash, "err", err)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
// may include common.Hash{}, will be delete in gcKvStore
|
if hash != (common.Hash{}) {
|
||||||
ancients = append(ancients, hash)
|
ancients = append(ancients, hash)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
// Batch of blocks have been frozen, flush them before wiping from leveldb
|
// Batch of blocks have been frozen, flush them before wiping from leveldb
|
||||||
if err := f.Sync(); err != nil {
|
if err := f.Sync(); err != nil {
|
||||||
log.Crit("Failed to flush frozen tables", "err", err)
|
log.Crit("Failed to flush frozen tables", "err", err)
|
||||||
|
@ -43,10 +43,6 @@ func (t *table) SetBlockStore(block ethdb.Database) {
|
|||||||
panic("not implement")
|
panic("not implement")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *table) HasSeparateBlockStore() bool {
|
|
||||||
panic("not implement")
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewTable returns a database object that prefixes all keys with a given string.
|
// NewTable returns a database object that prefixes all keys with a given string.
|
||||||
func NewTable(db ethdb.Database, prefix string) ethdb.Database {
|
func NewTable(db ethdb.Database, prefix string) ethdb.Database {
|
||||||
return &table{
|
return &table{
|
||||||
@ -251,10 +247,6 @@ func (t *table) SetStateStore(state ethdb.Database) {
|
|||||||
panic("not implement")
|
panic("not implement")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *table) GetStateStore() ethdb.Database {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *table) StateStoreReader() ethdb.Reader {
|
func (t *table) StateStoreReader() ethdb.Reader {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -382,7 +382,7 @@ func (p *BlockPruner) backUpOldDb(name string, cache, handles int, namespace str
|
|||||||
log.Info("chainDB opened successfully")
|
log.Info("chainDB opened successfully")
|
||||||
|
|
||||||
// Get the number of items in old ancient db.
|
// Get the number of items in old ancient db.
|
||||||
itemsOfAncient, err := chainDb.BlockStore().ItemAmountInAncient()
|
itemsOfAncient, err := chainDb.ItemAmountInAncient()
|
||||||
log.Info("the number of items in ancientDB is ", "itemsOfAncient", itemsOfAncient)
|
log.Info("the number of items in ancientDB is ", "itemsOfAncient", itemsOfAncient)
|
||||||
|
|
||||||
// If we can't access the freezer or it's empty, abort.
|
// If we can't access the freezer or it's empty, abort.
|
||||||
|
@ -236,7 +236,7 @@ func New(config Config, diskdb ethdb.KeyValueStore, triedb *triedb.Database, roo
|
|||||||
snap.layers[head.Root()] = head
|
snap.layers[head.Root()] = head
|
||||||
head = head.Parent()
|
head = head.Parent()
|
||||||
}
|
}
|
||||||
log.Info("Snapshot loaded", "diskRoot", snap.diskRoot(), "root", root)
|
log.Info("Snapshot loaded", "diskRoot", snap.diskRoot(), "root", root, "snapshot_cache_size", common.StorageSize(config.CacheSize)*1024*1024)
|
||||||
return snap, nil
|
return snap, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -933,8 +933,8 @@ func (s *StateDB) copyInternal(doPrefetch bool) *StateDB {
|
|||||||
// along with their original values.
|
// along with their original values.
|
||||||
state.accounts = copySet(s.accounts)
|
state.accounts = copySet(s.accounts)
|
||||||
state.storages = copy2DSet(s.storages)
|
state.storages = copy2DSet(s.storages)
|
||||||
state.accountsOrigin = copySet(s.accountsOrigin)
|
state.accountsOrigin = copySet(state.accountsOrigin)
|
||||||
state.storagesOrigin = copy2DSet(s.storagesOrigin)
|
state.storagesOrigin = copy2DSet(state.storagesOrigin)
|
||||||
|
|
||||||
// Deep copy the logs occurred in the scope of block
|
// Deep copy the logs occurred in the scope of block
|
||||||
for hash, logs := range s.logs {
|
for hash, logs := range s.logs {
|
||||||
|
@ -25,7 +25,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// NewStateSync creates a new state trie download scheduler.
|
// NewStateSync creates a new state trie download scheduler.
|
||||||
func NewStateSync(root common.Hash, database ethdb.Database, onLeaf func(keys [][]byte, leaf []byte) error, scheme string) *trie.Sync {
|
func NewStateSync(root common.Hash, database ethdb.KeyValueReader, onLeaf func(keys [][]byte, leaf []byte) error, scheme string) *trie.Sync {
|
||||||
// Register the storage slot callback if the external callback is specified.
|
// Register the storage slot callback if the external callback is specified.
|
||||||
var onSlot func(keys [][]byte, path []byte, leaf []byte, parent common.Hash, parentPath []byte) error
|
var onSlot func(keys [][]byte, path []byte, leaf []byte, parent common.Hash, parentPath []byte) error
|
||||||
if onLeaf != nil {
|
if onLeaf != nil {
|
||||||
|
@ -268,7 +268,7 @@ func testIterativeStateSync(t *testing.T, count int, commit bool, bypath bool, s
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
batch := dstDb.NewBatch()
|
batch := dstDb.NewBatch()
|
||||||
if err := sched.Commit(batch, nil); err != nil {
|
if err := sched.Commit(batch); err != nil {
|
||||||
t.Fatalf("failed to commit data: %v", err)
|
t.Fatalf("failed to commit data: %v", err)
|
||||||
}
|
}
|
||||||
batch.Write()
|
batch.Write()
|
||||||
@ -369,7 +369,7 @@ func testIterativeDelayedStateSync(t *testing.T, scheme string) {
|
|||||||
nodeProcessed = len(nodeResults)
|
nodeProcessed = len(nodeResults)
|
||||||
}
|
}
|
||||||
batch := dstDb.NewBatch()
|
batch := dstDb.NewBatch()
|
||||||
if err := sched.Commit(batch, nil); err != nil {
|
if err := sched.Commit(batch); err != nil {
|
||||||
t.Fatalf("failed to commit data: %v", err)
|
t.Fatalf("failed to commit data: %v", err)
|
||||||
}
|
}
|
||||||
batch.Write()
|
batch.Write()
|
||||||
@ -469,7 +469,7 @@ func testIterativeRandomStateSync(t *testing.T, count int, scheme string) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
batch := dstDb.NewBatch()
|
batch := dstDb.NewBatch()
|
||||||
if err := sched.Commit(batch, nil); err != nil {
|
if err := sched.Commit(batch); err != nil {
|
||||||
t.Fatalf("failed to commit data: %v", err)
|
t.Fatalf("failed to commit data: %v", err)
|
||||||
}
|
}
|
||||||
batch.Write()
|
batch.Write()
|
||||||
@ -575,7 +575,7 @@ func testIterativeRandomDelayedStateSync(t *testing.T, scheme string) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
batch := dstDb.NewBatch()
|
batch := dstDb.NewBatch()
|
||||||
if err := sched.Commit(batch, nil); err != nil {
|
if err := sched.Commit(batch); err != nil {
|
||||||
t.Fatalf("failed to commit data: %v", err)
|
t.Fatalf("failed to commit data: %v", err)
|
||||||
}
|
}
|
||||||
batch.Write()
|
batch.Write()
|
||||||
@ -688,7 +688,7 @@ func testIncompleteStateSync(t *testing.T, scheme string) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
batch := dstDb.NewBatch()
|
batch := dstDb.NewBatch()
|
||||||
if err := sched.Commit(batch, nil); err != nil {
|
if err := sched.Commit(batch); err != nil {
|
||||||
t.Fatalf("failed to commit data: %v", err)
|
t.Fatalf("failed to commit data: %v", err)
|
||||||
}
|
}
|
||||||
batch.Write()
|
batch.Write()
|
||||||
|
@ -20,6 +20,7 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/big"
|
"math/big"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/consensus"
|
"github.com/ethereum/go-ethereum/consensus"
|
||||||
@ -29,9 +30,14 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/core/vm"
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
processTxTimer = metrics.NewRegisteredTimer("process/tx/time", nil)
|
||||||
|
)
|
||||||
|
|
||||||
// StateProcessor is a basic Processor, which takes care of transitioning
|
// StateProcessor is a basic Processor, which takes care of transitioning
|
||||||
// state from one point to another.
|
// state from one point to another.
|
||||||
//
|
//
|
||||||
@ -104,6 +110,10 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
|
|||||||
systemTxs := make([]*types.Transaction, 0, 2)
|
systemTxs := make([]*types.Transaction, 0, 2)
|
||||||
|
|
||||||
for i, tx := range block.Transactions() {
|
for i, tx := range block.Transactions() {
|
||||||
|
if metrics.EnabledExpensive {
|
||||||
|
start := time.Now()
|
||||||
|
defer processTxTimer.UpdateSince(start)
|
||||||
|
}
|
||||||
if isPoSA {
|
if isPoSA {
|
||||||
if isSystemTx, err := posa.IsSystemTransaction(tx, block.Header()); err != nil {
|
if isSystemTx, err := posa.IsSystemTransaction(tx, block.Header()); err != nil {
|
||||||
bloomProcessors.Close()
|
bloomProcessors.Close()
|
||||||
@ -231,13 +241,6 @@ func ApplyTransaction(config *params.ChainConfig, bc ChainContext, author *commo
|
|||||||
// ProcessBeaconBlockRoot applies the EIP-4788 system call to the beacon block root
|
// ProcessBeaconBlockRoot applies the EIP-4788 system call to the beacon block root
|
||||||
// contract. This method is exported to be used in tests.
|
// contract. This method is exported to be used in tests.
|
||||||
func ProcessBeaconBlockRoot(beaconRoot common.Hash, vmenv *vm.EVM, statedb *state.StateDB) {
|
func ProcessBeaconBlockRoot(beaconRoot common.Hash, vmenv *vm.EVM, statedb *state.StateDB) {
|
||||||
// Return immediately if beaconRoot equals the zero hash when using the Parlia engine.
|
|
||||||
if beaconRoot == (common.Hash{}) {
|
|
||||||
if chainConfig := vmenv.ChainConfig(); chainConfig != nil && chainConfig.Parlia != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If EIP-4788 is enabled, we need to invoke the beaconroot storage contract with
|
// If EIP-4788 is enabled, we need to invoke the beaconroot storage contract with
|
||||||
// the new root
|
// the new root
|
||||||
msg := &Message{
|
msg := &Message{
|
||||||
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@ -1,27 +0,0 @@
|
|||||||
package bohr
|
|
||||||
|
|
||||||
import _ "embed"
|
|
||||||
|
|
||||||
// contract codes for Mainnet upgrade
|
|
||||||
var (
|
|
||||||
//go:embed mainnet/ValidatorContract
|
|
||||||
MainnetValidatorContract string
|
|
||||||
//go:embed mainnet/StakeHubContract
|
|
||||||
MainnetStakeHubContract string
|
|
||||||
)
|
|
||||||
|
|
||||||
// contract codes for Chapel upgrade
|
|
||||||
var (
|
|
||||||
//go:embed chapel/ValidatorContract
|
|
||||||
ChapelValidatorContract string
|
|
||||||
//go:embed chapel/StakeHubContract
|
|
||||||
ChapelStakeHubContract string
|
|
||||||
)
|
|
||||||
|
|
||||||
// contract codes for Rialto upgrade
|
|
||||||
var (
|
|
||||||
//go:embed rialto/ValidatorContract
|
|
||||||
RialtoValidatorContract string
|
|
||||||
//go:embed rialto/StakeHubContract
|
|
||||||
RialtoStakeHubContract string
|
|
||||||
)
|
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@ -1,19 +0,0 @@
|
|||||||
package haber_fix
|
|
||||||
|
|
||||||
import _ "embed"
|
|
||||||
|
|
||||||
// contract codes for Chapel upgrade
|
|
||||||
var (
|
|
||||||
//go:embed chapel/ValidatorContract
|
|
||||||
ChapelValidatorContract string
|
|
||||||
//go:embed chapel/SlashContract
|
|
||||||
ChapelSlashContract string
|
|
||||||
)
|
|
||||||
|
|
||||||
// contract codes for Mainnet upgrade
|
|
||||||
var (
|
|
||||||
//go:embed mainnet/ValidatorContract
|
|
||||||
MainnetValidatorContract string
|
|
||||||
//go:embed mainnet/SlashContract
|
|
||||||
MainnetSlashContract string
|
|
||||||
)
|
|
@ -4,17 +4,17 @@ import (
|
|||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/big"
|
"math/big"
|
||||||
"reflect"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core/systemcontracts/bohr"
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
|
"github.com/ethereum/go-ethereum/log"
|
||||||
|
"github.com/ethereum/go-ethereum/params"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/core/systemcontracts/bruno"
|
"github.com/ethereum/go-ethereum/core/systemcontracts/bruno"
|
||||||
"github.com/ethereum/go-ethereum/core/systemcontracts/euler"
|
"github.com/ethereum/go-ethereum/core/systemcontracts/euler"
|
||||||
"github.com/ethereum/go-ethereum/core/systemcontracts/feynman"
|
"github.com/ethereum/go-ethereum/core/systemcontracts/feynman"
|
||||||
feynmanFix "github.com/ethereum/go-ethereum/core/systemcontracts/feynman_fix"
|
feynmanFix "github.com/ethereum/go-ethereum/core/systemcontracts/feynman_fix"
|
||||||
"github.com/ethereum/go-ethereum/core/systemcontracts/gibbs"
|
"github.com/ethereum/go-ethereum/core/systemcontracts/gibbs"
|
||||||
haberFix "github.com/ethereum/go-ethereum/core/systemcontracts/haber_fix"
|
|
||||||
"github.com/ethereum/go-ethereum/core/systemcontracts/kepler"
|
"github.com/ethereum/go-ethereum/core/systemcontracts/kepler"
|
||||||
"github.com/ethereum/go-ethereum/core/systemcontracts/luban"
|
"github.com/ethereum/go-ethereum/core/systemcontracts/luban"
|
||||||
"github.com/ethereum/go-ethereum/core/systemcontracts/mirror"
|
"github.com/ethereum/go-ethereum/core/systemcontracts/mirror"
|
||||||
@ -23,9 +23,6 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/core/systemcontracts/planck"
|
"github.com/ethereum/go-ethereum/core/systemcontracts/planck"
|
||||||
"github.com/ethereum/go-ethereum/core/systemcontracts/plato"
|
"github.com/ethereum/go-ethereum/core/systemcontracts/plato"
|
||||||
"github.com/ethereum/go-ethereum/core/systemcontracts/ramanujan"
|
"github.com/ethereum/go-ethereum/core/systemcontracts/ramanujan"
|
||||||
"github.com/ethereum/go-ethereum/core/vm"
|
|
||||||
"github.com/ethereum/go-ethereum/log"
|
|
||||||
"github.com/ethereum/go-ethereum/params"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type UpgradeConfig struct {
|
type UpgradeConfig struct {
|
||||||
@ -41,7 +38,7 @@ type Upgrade struct {
|
|||||||
Configs []*UpgradeConfig
|
Configs []*UpgradeConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
type upgradeHook func(blockNumber *big.Int, contractAddr common.Address, statedb vm.StateDB) error
|
type upgradeHook func(blockNumber *big.Int, contractAddr common.Address, statedb *state.StateDB) error
|
||||||
|
|
||||||
const (
|
const (
|
||||||
mainNet = "Mainnet"
|
mainNet = "Mainnet"
|
||||||
@ -78,10 +75,6 @@ var (
|
|||||||
feynmanUpgrade = make(map[string]*Upgrade)
|
feynmanUpgrade = make(map[string]*Upgrade)
|
||||||
|
|
||||||
feynmanFixUpgrade = make(map[string]*Upgrade)
|
feynmanFixUpgrade = make(map[string]*Upgrade)
|
||||||
|
|
||||||
haberFixUpgrade = make(map[string]*Upgrade)
|
|
||||||
|
|
||||||
bohrUpgrade = make(map[string]*Upgrade)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@ -708,93 +701,12 @@ func init() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
haberFixUpgrade[mainNet] = &Upgrade{
|
|
||||||
UpgradeName: "haberFix",
|
|
||||||
Configs: []*UpgradeConfig{
|
|
||||||
{
|
|
||||||
ContractAddr: common.HexToAddress(ValidatorContract),
|
|
||||||
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/b743ce3f1f1e94c349b175cd6593bc263463b33b",
|
|
||||||
Code: haberFix.MainnetValidatorContract,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ContractAddr: common.HexToAddress(SlashContract),
|
|
||||||
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/b743ce3f1f1e94c349b175cd6593bc263463b33b",
|
|
||||||
Code: haberFix.MainnetSlashContract,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
haberFixUpgrade[chapelNet] = &Upgrade{
|
func UpgradeBuildInSystemContract(config *params.ChainConfig, blockNumber *big.Int, lastBlockTime uint64, blockTime uint64, statedb *state.StateDB) {
|
||||||
UpgradeName: "haberFix",
|
if config == nil || blockNumber == nil || statedb == nil {
|
||||||
Configs: []*UpgradeConfig{
|
|
||||||
{
|
|
||||||
ContractAddr: common.HexToAddress(ValidatorContract),
|
|
||||||
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/b743ce3f1f1e94c349b175cd6593bc263463b33b",
|
|
||||||
Code: haberFix.ChapelValidatorContract,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ContractAddr: common.HexToAddress(SlashContract),
|
|
||||||
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/b743ce3f1f1e94c349b175cd6593bc263463b33b",
|
|
||||||
Code: haberFix.ChapelSlashContract,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
bohrUpgrade[mainNet] = &Upgrade{
|
|
||||||
UpgradeName: "bohr",
|
|
||||||
Configs: []*UpgradeConfig{
|
|
||||||
{
|
|
||||||
ContractAddr: common.HexToAddress(ValidatorContract),
|
|
||||||
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/398c9364aad5261c1ecd90ac3ab2df89b65c45e3",
|
|
||||||
Code: bohr.MainnetValidatorContract,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ContractAddr: common.HexToAddress(StakeHubContract),
|
|
||||||
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/398c9364aad5261c1ecd90ac3ab2df89b65c45e3",
|
|
||||||
Code: bohr.MainnetStakeHubContract,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
bohrUpgrade[chapelNet] = &Upgrade{
|
|
||||||
UpgradeName: "bohr",
|
|
||||||
Configs: []*UpgradeConfig{
|
|
||||||
{
|
|
||||||
ContractAddr: common.HexToAddress(ValidatorContract),
|
|
||||||
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/398c9364aad5261c1ecd90ac3ab2df89b65c45e3",
|
|
||||||
Code: bohr.ChapelValidatorContract,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ContractAddr: common.HexToAddress(StakeHubContract),
|
|
||||||
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/398c9364aad5261c1ecd90ac3ab2df89b65c45e3",
|
|
||||||
Code: bohr.ChapelStakeHubContract,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
bohrUpgrade[rialtoNet] = &Upgrade{
|
|
||||||
UpgradeName: "bohr",
|
|
||||||
Configs: []*UpgradeConfig{
|
|
||||||
{
|
|
||||||
ContractAddr: common.HexToAddress(ValidatorContract),
|
|
||||||
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/398c9364aad5261c1ecd90ac3ab2df89b65c45e3",
|
|
||||||
Code: bohr.RialtoValidatorContract,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ContractAddr: common.HexToAddress(StakeHubContract),
|
|
||||||
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/398c9364aad5261c1ecd90ac3ab2df89b65c45e3",
|
|
||||||
Code: bohr.RialtoStakeHubContract,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func UpgradeBuildInSystemContract(config *params.ChainConfig, blockNumber *big.Int, lastBlockTime uint64, blockTime uint64, statedb vm.StateDB) {
|
|
||||||
if config == nil || blockNumber == nil || statedb == nil || reflect.ValueOf(statedb).IsNil() {
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
var network string
|
var network string
|
||||||
switch GenesisHash {
|
switch GenesisHash {
|
||||||
/* Add mainnet genesis hash */
|
/* Add mainnet genesis hash */
|
||||||
@ -865,20 +777,12 @@ func UpgradeBuildInSystemContract(config *params.ChainConfig, blockNumber *big.I
|
|||||||
applySystemContractUpgrade(feynmanFixUpgrade[network], blockNumber, statedb, logger)
|
applySystemContractUpgrade(feynmanFixUpgrade[network], blockNumber, statedb, logger)
|
||||||
}
|
}
|
||||||
|
|
||||||
if config.IsOnHaberFix(blockNumber, lastBlockTime, blockTime) {
|
|
||||||
applySystemContractUpgrade(haberFixUpgrade[network], blockNumber, statedb, logger)
|
|
||||||
}
|
|
||||||
|
|
||||||
if config.IsOnBohr(blockNumber, lastBlockTime, blockTime) {
|
|
||||||
applySystemContractUpgrade(bohrUpgrade[network], blockNumber, statedb, logger)
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
apply other upgrades
|
apply other upgrades
|
||||||
*/
|
*/
|
||||||
}
|
}
|
||||||
|
|
||||||
func applySystemContractUpgrade(upgrade *Upgrade, blockNumber *big.Int, statedb vm.StateDB, logger log.Logger) {
|
func applySystemContractUpgrade(upgrade *Upgrade, blockNumber *big.Int, statedb *state.StateDB, logger log.Logger) {
|
||||||
if upgrade == nil {
|
if upgrade == nil {
|
||||||
logger.Info("Empty upgrade config", "height", blockNumber.String())
|
logger.Info("Empty upgrade config", "height", blockNumber.String())
|
||||||
return
|
return
|
||||||
@ -895,7 +799,7 @@ func applySystemContractUpgrade(upgrade *Upgrade, blockNumber *big.Int, statedb
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
newContractCode, err := hex.DecodeString(strings.TrimSpace(cfg.Code))
|
newContractCode, err := hex.DecodeString(cfg.Code)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(fmt.Errorf("failed to decode new contract code: %s", err.Error()))
|
panic(fmt.Errorf("failed to decode new contract code: %s", err.Error()))
|
||||||
}
|
}
|
||||||
|
@ -2,13 +2,9 @@ package systemcontracts
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
"math/big"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core/state"
|
|
||||||
"github.com/ethereum/go-ethereum/core/vm"
|
|
||||||
"github.com/ethereum/go-ethereum/params"
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -43,31 +39,3 @@ func TestAllCodesHash(t *testing.T) {
|
|||||||
allCodeHash := sha256.Sum256(allCodes)
|
allCodeHash := sha256.Sum256(allCodes)
|
||||||
require.Equal(t, allCodeHash[:], common.Hex2Bytes("833cc0fc87c46ad8a223e44ccfdc16a51a7e7383525136441bd0c730f06023df"))
|
require.Equal(t, allCodeHash[:], common.Hex2Bytes("833cc0fc87c46ad8a223e44ccfdc16a51a7e7383525136441bd0c730f06023df"))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestUpgradeBuildInSystemContractNilInterface(t *testing.T) {
|
|
||||||
var (
|
|
||||||
config = params.BSCChainConfig
|
|
||||||
blockNumber = big.NewInt(37959559)
|
|
||||||
lastBlockTime uint64 = 1713419337
|
|
||||||
blockTime uint64 = 1713419340
|
|
||||||
statedb vm.StateDB
|
|
||||||
)
|
|
||||||
|
|
||||||
GenesisHash = params.BSCGenesisHash
|
|
||||||
|
|
||||||
UpgradeBuildInSystemContract(config, blockNumber, lastBlockTime, blockTime, statedb)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestUpgradeBuildInSystemContractNilValue(t *testing.T) {
|
|
||||||
var (
|
|
||||||
config = params.BSCChainConfig
|
|
||||||
blockNumber = big.NewInt(37959559)
|
|
||||||
lastBlockTime uint64 = 1713419337
|
|
||||||
blockTime uint64 = 1713419340
|
|
||||||
statedb vm.StateDB = (*state.StateDB)(nil)
|
|
||||||
)
|
|
||||||
|
|
||||||
GenesisHash = params.BSCGenesisHash
|
|
||||||
|
|
||||||
UpgradeBuildInSystemContract(config, blockNumber, lastBlockTime, blockTime, statedb)
|
|
||||||
}
|
|
||||||
|
@ -53,6 +53,7 @@ type txIndexer struct {
|
|||||||
|
|
||||||
// newTxIndexer initializes the transaction indexer.
|
// newTxIndexer initializes the transaction indexer.
|
||||||
func newTxIndexer(limit uint64, chain *BlockChain) *txIndexer {
|
func newTxIndexer(limit uint64, chain *BlockChain) *txIndexer {
|
||||||
|
limit = 0
|
||||||
indexer := &txIndexer{
|
indexer := &txIndexer{
|
||||||
limit: limit,
|
limit: limit,
|
||||||
db: chain.db,
|
db: chain.db,
|
||||||
|
@ -212,7 +212,7 @@ func TestTxIndexer(t *testing.T) {
|
|||||||
}
|
}
|
||||||
for _, c := range cases {
|
for _, c := range cases {
|
||||||
frdir := t.TempDir()
|
frdir := t.TempDir()
|
||||||
db, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false, false, false, false, false)
|
db, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false, false, false, false)
|
||||||
rawdb.WriteAncientBlocks(db, append([]*types.Block{gspec.ToBlock()}, blocks...), append([]types.Receipts{{}}, receipts...), big.NewInt(0))
|
rawdb.WriteAncientBlocks(db, append([]*types.Block{gspec.ToBlock()}, blocks...), append([]types.Receipts{{}}, receipts...), big.NewInt(0))
|
||||||
|
|
||||||
// Index the initial blocks from ancient store
|
// Index the initial blocks from ancient store
|
||||||
|
@ -6,8 +6,6 @@ import (
|
|||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
mapset "github.com/deckarep/golang-set/v2"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
@ -42,12 +40,6 @@ func (b *BidArgs) ToBid(builder common.Address, signer Signer) (*Bid, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(b.RawBid.UnRevertible) > len(txs) {
|
|
||||||
return nil, fmt.Errorf("expect NonRevertible no more than %d", len(txs))
|
|
||||||
}
|
|
||||||
unRevertibleHashes := mapset.NewThreadUnsafeSetWithSize[common.Hash](len(b.RawBid.UnRevertible))
|
|
||||||
unRevertibleHashes.Append(b.RawBid.UnRevertible...)
|
|
||||||
|
|
||||||
if len(b.PayBidTx) != 0 {
|
if len(b.PayBidTx) != 0 {
|
||||||
var payBidTx = new(Transaction)
|
var payBidTx = new(Transaction)
|
||||||
err = payBidTx.UnmarshalBinary(b.PayBidTx)
|
err = payBidTx.UnmarshalBinary(b.PayBidTx)
|
||||||
@ -63,7 +55,6 @@ func (b *BidArgs) ToBid(builder common.Address, signer Signer) (*Bid, error) {
|
|||||||
BlockNumber: b.RawBid.BlockNumber,
|
BlockNumber: b.RawBid.BlockNumber,
|
||||||
ParentHash: b.RawBid.ParentHash,
|
ParentHash: b.RawBid.ParentHash,
|
||||||
Txs: txs,
|
Txs: txs,
|
||||||
UnRevertible: unRevertibleHashes,
|
|
||||||
GasUsed: b.RawBid.GasUsed + b.PayBidTxGasUsed,
|
GasUsed: b.RawBid.GasUsed + b.PayBidTxGasUsed,
|
||||||
GasFee: b.RawBid.GasFee,
|
GasFee: b.RawBid.GasFee,
|
||||||
BuilderFee: b.RawBid.BuilderFee,
|
BuilderFee: b.RawBid.BuilderFee,
|
||||||
@ -82,7 +73,6 @@ type RawBid struct {
|
|||||||
BlockNumber uint64 `json:"blockNumber"`
|
BlockNumber uint64 `json:"blockNumber"`
|
||||||
ParentHash common.Hash `json:"parentHash"`
|
ParentHash common.Hash `json:"parentHash"`
|
||||||
Txs []hexutil.Bytes `json:"txs"`
|
Txs []hexutil.Bytes `json:"txs"`
|
||||||
UnRevertible []common.Hash `json:"unRevertible"`
|
|
||||||
GasUsed uint64 `json:"gasUsed"`
|
GasUsed uint64 `json:"gasUsed"`
|
||||||
GasFee *big.Int `json:"gasFee"`
|
GasFee *big.Int `json:"gasFee"`
|
||||||
BuilderFee *big.Int `json:"builderFee"`
|
BuilderFee *big.Int `json:"builderFee"`
|
||||||
@ -168,7 +158,6 @@ type Bid struct {
|
|||||||
BlockNumber uint64
|
BlockNumber uint64
|
||||||
ParentHash common.Hash
|
ParentHash common.Hash
|
||||||
Txs Transactions
|
Txs Transactions
|
||||||
UnRevertible mapset.Set[common.Hash]
|
|
||||||
GasUsed uint64
|
GasUsed uint64
|
||||||
GasFee *big.Int
|
GasFee *big.Int
|
||||||
BuilderFee *big.Int
|
BuilderFee *big.Int
|
||||||
@ -193,7 +182,5 @@ type MevParams struct {
|
|||||||
ValidatorCommission uint64 // 100 means 1%
|
ValidatorCommission uint64 // 100 means 1%
|
||||||
BidSimulationLeftOver time.Duration
|
BidSimulationLeftOver time.Duration
|
||||||
GasCeil uint64
|
GasCeil uint64
|
||||||
GasPrice *big.Int // Minimum avg gas price for bid block
|
|
||||||
BuilderFeeCeil *big.Int
|
BuilderFeeCeil *big.Int
|
||||||
Version string
|
|
||||||
}
|
}
|
||||||
|
@ -2,12 +2,10 @@ package types
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
"errors"
|
||||||
"math/big"
|
"math/big"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -55,40 +53,3 @@ func (s *BlobSidecar) SanityCheck(blockNumber *big.Int, blockHash common.Hash) e
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *BlobSidecar) MarshalJSON() ([]byte, error) {
|
|
||||||
fields := map[string]interface{}{
|
|
||||||
"blockHash": s.BlockHash,
|
|
||||||
"blockNumber": hexutil.EncodeUint64(s.BlockNumber.Uint64()),
|
|
||||||
"txHash": s.TxHash,
|
|
||||||
"txIndex": hexutil.EncodeUint64(s.TxIndex),
|
|
||||||
}
|
|
||||||
fields["blobSidecar"] = s.BlobTxSidecar
|
|
||||||
return json.Marshal(fields)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *BlobSidecar) UnmarshalJSON(input []byte) error {
|
|
||||||
type blobSidecar struct {
|
|
||||||
BlobSidecar BlobTxSidecar `json:"blobSidecar"`
|
|
||||||
BlockNumber *hexutil.Big `json:"blockNumber"`
|
|
||||||
BlockHash common.Hash `json:"blockHash"`
|
|
||||||
TxIndex *hexutil.Big `json:"txIndex"`
|
|
||||||
TxHash common.Hash `json:"txHash"`
|
|
||||||
}
|
|
||||||
var blob blobSidecar
|
|
||||||
if err := json.Unmarshal(input, &blob); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
s.BlobTxSidecar = blob.BlobSidecar
|
|
||||||
if blob.BlockNumber == nil {
|
|
||||||
return errors.New("missing required field 'blockNumber' for BlobSidecar")
|
|
||||||
}
|
|
||||||
s.BlockNumber = blob.BlockNumber.ToInt()
|
|
||||||
s.BlockHash = blob.BlockHash
|
|
||||||
if blob.TxIndex == nil {
|
|
||||||
return errors.New("missing required field 'txIndex' for BlobSidecar")
|
|
||||||
}
|
|
||||||
s.TxIndex = blob.TxIndex.ToInt().Uint64()
|
|
||||||
s.TxHash = blob.TxHash
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
@ -673,7 +673,10 @@ type DiffAccountsInBlock struct {
|
|||||||
Transactions []DiffAccountsInTx
|
Transactions []DiffAccountsInTx
|
||||||
}
|
}
|
||||||
|
|
||||||
var extraSeal = 65 // Fixed number of extra-data suffix bytes reserved for signer seal
|
var (
|
||||||
|
extraVanity = 32 // Fixed number of extra-data prefix bytes reserved for signer vanity
|
||||||
|
extraSeal = 65 // Fixed number of extra-data suffix bytes reserved for signer seal
|
||||||
|
)
|
||||||
|
|
||||||
// SealHash returns the hash of a block prior to it being sealed.
|
// SealHash returns the hash of a block prior to it being sealed.
|
||||||
func SealHash(header *Header, chainId *big.Int) (hash common.Hash) {
|
func SealHash(header *Header, chainId *big.Int) (hash common.Hash) {
|
||||||
@ -684,33 +687,7 @@ func SealHash(header *Header, chainId *big.Int) (hash common.Hash) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func EncodeSigHeader(w io.Writer, header *Header, chainId *big.Int) {
|
func EncodeSigHeader(w io.Writer, header *Header, chainId *big.Int) {
|
||||||
var err error
|
err := rlp.Encode(w, []interface{}{
|
||||||
if header.ParentBeaconRoot != nil && *header.ParentBeaconRoot == (common.Hash{}) {
|
|
||||||
err = rlp.Encode(w, []interface{}{
|
|
||||||
chainId,
|
|
||||||
header.ParentHash,
|
|
||||||
header.UncleHash,
|
|
||||||
header.Coinbase,
|
|
||||||
header.Root,
|
|
||||||
header.TxHash,
|
|
||||||
header.ReceiptHash,
|
|
||||||
header.Bloom,
|
|
||||||
header.Difficulty,
|
|
||||||
header.Number,
|
|
||||||
header.GasLimit,
|
|
||||||
header.GasUsed,
|
|
||||||
header.Time,
|
|
||||||
header.Extra[:len(header.Extra)-extraSeal], // this will panic if extra is too short, should check before calling encodeSigHeader
|
|
||||||
header.MixDigest,
|
|
||||||
header.Nonce,
|
|
||||||
header.BaseFee,
|
|
||||||
header.WithdrawalsHash,
|
|
||||||
header.BlobGasUsed,
|
|
||||||
header.ExcessBlobGas,
|
|
||||||
header.ParentBeaconRoot,
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
err = rlp.Encode(w, []interface{}{
|
|
||||||
chainId,
|
chainId,
|
||||||
header.ParentHash,
|
header.ParentHash,
|
||||||
header.UncleHash,
|
header.UncleHash,
|
||||||
@ -728,7 +705,30 @@ func EncodeSigHeader(w io.Writer, header *Header, chainId *big.Int) {
|
|||||||
header.MixDigest,
|
header.MixDigest,
|
||||||
header.Nonce,
|
header.Nonce,
|
||||||
})
|
})
|
||||||
}
|
if err != nil {
|
||||||
|
panic("can't encode: " + err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func EncodeSigHeaderWithoutVoteAttestation(w io.Writer, header *Header, chainId *big.Int) {
|
||||||
|
err := rlp.Encode(w, []interface{}{
|
||||||
|
chainId,
|
||||||
|
header.ParentHash,
|
||||||
|
header.UncleHash,
|
||||||
|
header.Coinbase,
|
||||||
|
header.Root,
|
||||||
|
header.TxHash,
|
||||||
|
header.ReceiptHash,
|
||||||
|
header.Bloom,
|
||||||
|
header.Difficulty,
|
||||||
|
header.Number,
|
||||||
|
header.GasLimit,
|
||||||
|
header.GasUsed,
|
||||||
|
header.Time,
|
||||||
|
header.Extra[:extraVanity], // this will panic if extra is too short, should check before calling encodeSigHeaderWithoutVoteAttestation
|
||||||
|
header.MixDigest,
|
||||||
|
header.Nonce,
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic("can't encode: " + err.Error())
|
panic("can't encode: " + err.Error())
|
||||||
}
|
}
|
||||||
|
@ -36,7 +36,6 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/crypto/bn256"
|
"github.com/ethereum/go-ethereum/crypto/bn256"
|
||||||
"github.com/ethereum/go-ethereum/crypto/kzg4844"
|
"github.com/ethereum/go-ethereum/crypto/kzg4844"
|
||||||
"github.com/ethereum/go-ethereum/crypto/secp256k1"
|
"github.com/ethereum/go-ethereum/crypto/secp256k1"
|
||||||
"github.com/ethereum/go-ethereum/crypto/secp256r1"
|
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
@ -248,36 +247,6 @@ var PrecompiledContractsCancun = map[common.Address]PrecompiledContract{
|
|||||||
common.BytesToAddress([]byte{105}): &secp256k1SignatureRecover{},
|
common.BytesToAddress([]byte{105}): &secp256k1SignatureRecover{},
|
||||||
}
|
}
|
||||||
|
|
||||||
// PrecompiledContractsHaber contains the default set of pre-compiled Ethereum
|
|
||||||
// contracts used in the Haber release.
|
|
||||||
var PrecompiledContractsHaber = map[common.Address]PrecompiledContract{
|
|
||||||
common.BytesToAddress([]byte{1}): &ecrecover{},
|
|
||||||
common.BytesToAddress([]byte{2}): &sha256hash{},
|
|
||||||
common.BytesToAddress([]byte{3}): &ripemd160hash{},
|
|
||||||
common.BytesToAddress([]byte{4}): &dataCopy{},
|
|
||||||
common.BytesToAddress([]byte{5}): &bigModExp{eip2565: true},
|
|
||||||
common.BytesToAddress([]byte{6}): &bn256AddIstanbul{},
|
|
||||||
common.BytesToAddress([]byte{7}): &bn256ScalarMulIstanbul{},
|
|
||||||
common.BytesToAddress([]byte{8}): &bn256PairingIstanbul{},
|
|
||||||
common.BytesToAddress([]byte{9}): &blake2F{},
|
|
||||||
common.BytesToAddress([]byte{0x0a}): &kzgPointEvaluation{},
|
|
||||||
|
|
||||||
common.BytesToAddress([]byte{100}): &tmHeaderValidate{},
|
|
||||||
common.BytesToAddress([]byte{101}): &iavlMerkleProofValidatePlato{},
|
|
||||||
common.BytesToAddress([]byte{102}): &blsSignatureVerify{},
|
|
||||||
common.BytesToAddress([]byte{103}): &cometBFTLightBlockValidateHertz{},
|
|
||||||
common.BytesToAddress([]byte{104}): &verifyDoubleSignEvidence{},
|
|
||||||
common.BytesToAddress([]byte{105}): &secp256k1SignatureRecover{},
|
|
||||||
|
|
||||||
common.BytesToAddress([]byte{0x01, 0x00}): &p256Verify{},
|
|
||||||
}
|
|
||||||
|
|
||||||
// PrecompiledContractsP256Verify contains the precompiled Ethereum
|
|
||||||
// contract specified in EIP-7212. This is exported for testing purposes.
|
|
||||||
var PrecompiledContractsP256Verify = map[common.Address]PrecompiledContract{
|
|
||||||
common.BytesToAddress([]byte{0x01, 0x00}): &p256Verify{},
|
|
||||||
}
|
|
||||||
|
|
||||||
// PrecompiledContractsBLS contains the set of pre-compiled Ethereum
|
// PrecompiledContractsBLS contains the set of pre-compiled Ethereum
|
||||||
// contracts specified in EIP-2537. These are exported for testing purposes.
|
// contracts specified in EIP-2537. These are exported for testing purposes.
|
||||||
var PrecompiledContractsBLS = map[common.Address]PrecompiledContract{
|
var PrecompiledContractsBLS = map[common.Address]PrecompiledContract{
|
||||||
@ -293,7 +262,6 @@ var PrecompiledContractsBLS = map[common.Address]PrecompiledContract{
|
|||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
PrecompiledAddressesHaber []common.Address
|
|
||||||
PrecompiledAddressesCancun []common.Address
|
PrecompiledAddressesCancun []common.Address
|
||||||
PrecompiledAddressesFeynman []common.Address
|
PrecompiledAddressesFeynman []common.Address
|
||||||
PrecompiledAddressesHertz []common.Address
|
PrecompiledAddressesHertz []common.Address
|
||||||
@ -345,16 +313,11 @@ func init() {
|
|||||||
for k := range PrecompiledContractsCancun {
|
for k := range PrecompiledContractsCancun {
|
||||||
PrecompiledAddressesCancun = append(PrecompiledAddressesCancun, k)
|
PrecompiledAddressesCancun = append(PrecompiledAddressesCancun, k)
|
||||||
}
|
}
|
||||||
for k := range PrecompiledContractsHaber {
|
|
||||||
PrecompiledAddressesHaber = append(PrecompiledAddressesHaber, k)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ActivePrecompiles returns the precompiles enabled with the current configuration.
|
// ActivePrecompiles returns the precompiles enabled with the current configuration.
|
||||||
func ActivePrecompiles(rules params.Rules) []common.Address {
|
func ActivePrecompiles(rules params.Rules) []common.Address {
|
||||||
switch {
|
switch {
|
||||||
case rules.IsHaber:
|
|
||||||
return PrecompiledAddressesHaber
|
|
||||||
case rules.IsCancun:
|
case rules.IsCancun:
|
||||||
return PrecompiledAddressesCancun
|
return PrecompiledAddressesCancun
|
||||||
case rules.IsFeynman:
|
case rules.IsFeynman:
|
||||||
@ -1426,40 +1389,6 @@ func kZGToVersionedHash(kzg kzg4844.Commitment) common.Hash {
|
|||||||
return h
|
return h
|
||||||
}
|
}
|
||||||
|
|
||||||
// P256VERIFY (secp256r1 signature verification)
|
|
||||||
// implemented as a native contract
|
|
||||||
type p256Verify struct{}
|
|
||||||
|
|
||||||
// RequiredGas returns the gas required to execute the precompiled contract
|
|
||||||
func (c *p256Verify) RequiredGas(input []byte) uint64 {
|
|
||||||
return params.P256VerifyGas
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run executes the precompiled contract with given 160 bytes of param, returning the output and the used gas
|
|
||||||
func (c *p256Verify) Run(input []byte) ([]byte, error) {
|
|
||||||
// Required input length is 160 bytes
|
|
||||||
const p256VerifyInputLength = 160
|
|
||||||
// Check the input length
|
|
||||||
if len(input) != p256VerifyInputLength {
|
|
||||||
// Input length is invalid
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Extract the hash, r, s, x, y from the input
|
|
||||||
hash := input[0:32]
|
|
||||||
r, s := new(big.Int).SetBytes(input[32:64]), new(big.Int).SetBytes(input[64:96])
|
|
||||||
x, y := new(big.Int).SetBytes(input[96:128]), new(big.Int).SetBytes(input[128:160])
|
|
||||||
|
|
||||||
// Verify the secp256r1 signature
|
|
||||||
if secp256r1.Verify(hash, r, s, x, y) {
|
|
||||||
// Signature is valid
|
|
||||||
return common.LeftPadBytes(common.Big1.Bytes(), 32), nil
|
|
||||||
} else {
|
|
||||||
// Signature is invalid
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// verifyDoubleSignEvidence implements bsc header verification precompile.
|
// verifyDoubleSignEvidence implements bsc header verification precompile.
|
||||||
type verifyDoubleSignEvidence struct{}
|
type verifyDoubleSignEvidence struct{}
|
||||||
|
|
||||||
|
@ -57,8 +57,6 @@ var allPrecompiles = map[common.Address]PrecompiledContract{
|
|||||||
common.BytesToAddress([]byte{8}): &bn256PairingIstanbul{},
|
common.BytesToAddress([]byte{8}): &bn256PairingIstanbul{},
|
||||||
common.BytesToAddress([]byte{9}): &blake2F{},
|
common.BytesToAddress([]byte{9}): &blake2F{},
|
||||||
common.BytesToAddress([]byte{0x0a}): &kzgPointEvaluation{},
|
common.BytesToAddress([]byte{0x0a}): &kzgPointEvaluation{},
|
||||||
|
|
||||||
common.BytesToAddress([]byte{0x01, 0x00}): &p256Verify{},
|
|
||||||
common.BytesToAddress([]byte{0x0f, 0x0a}): &bls12381G1Add{},
|
common.BytesToAddress([]byte{0x0f, 0x0a}): &bls12381G1Add{},
|
||||||
common.BytesToAddress([]byte{0x0f, 0x0b}): &bls12381G1Mul{},
|
common.BytesToAddress([]byte{0x0f, 0x0b}): &bls12381G1Mul{},
|
||||||
common.BytesToAddress([]byte{0x0f, 0x0c}): &bls12381G1MultiExp{},
|
common.BytesToAddress([]byte{0x0f, 0x0c}): &bls12381G1MultiExp{},
|
||||||
@ -409,18 +407,6 @@ func BenchmarkPrecompiledBLS12381G2MultiExpWorstCase(b *testing.B) {
|
|||||||
benchmarkPrecompiled("0f", testcase, b)
|
benchmarkPrecompiled("0f", testcase, b)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Benchmarks the sample inputs from the P256VERIFY precompile.
|
|
||||||
func BenchmarkPrecompiledP256Verify(bench *testing.B) {
|
|
||||||
t := precompiledTest{
|
|
||||||
Input: "4cee90eb86eaa050036147a12d49004b6b9c72bd725d39d4785011fe190f0b4da73bd4903f0ce3b639bbbf6e8e80d16931ff4bcf5993d58468e8fb19086e8cac36dbcd03009df8c59286b162af3bd7fcc0450c9aa81be5d10d312af6c66b1d604aebd3099c618202fcfe16ae7770b0c49ab5eadf74b754204a3bb6060e44eff37618b065f9832de4ca6ca971a7a1adc826d0f7c00181a5fb2ddf79ae00b4e10e",
|
|
||||||
Expected: "0000000000000000000000000000000000000000000000000000000000000001",
|
|
||||||
Name: "p256Verify",
|
|
||||||
}
|
|
||||||
benchmarkPrecompiled("100", t, bench)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPrecompiledP256Verify(t *testing.T) { testJson("p256Verify", "100", t) }
|
|
||||||
|
|
||||||
func TestDoubleSignSlash(t *testing.T) {
|
func TestDoubleSignSlash(t *testing.T) {
|
||||||
tc := precompiledTest{
|
tc := precompiledTest{
|
||||||
Input: "f906278202cab9030ff9030ca01062d3d5015b9242bc193a9b0769f3d3780ecb55f97f40a752ae26d0b68cd0d8a0fae1a05fcb14bfd9b8a9f2b65007a9b6c2000de0627a73be644dd993d32342c494976ea74026e726554db657fa54763abd0c3a0aa9a0f385cc58ed297ff0d66eb5580b02853d3478ba418b1819ac659ee05df49b9794a0bf88464af369ed6b8cf02db00f0b9556ffa8d49cd491b00952a7f83431446638a00a6d0870e586a76278fbfdcedf76ef6679af18fc1f9137cfad495f434974ea81b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001820cdf830f4240830f4240846555fa64b90111d983010301846765746888676f312e32302e378664617277696e00007abd731ef8ae07b86091cb8836d58f5444b883422a18825d899035d3e6ea39ad1a50069bf0b86da8b5573dde1cb4a0a34f19ce94e0ef78ff7518c80265b8a3ca56e3c60167523590d4e8dcc324900559465fc0fa403774096614e135de280949b58a45cc96f2ba9e17f848820d41a08429d0d8b33ee72a84f750fefea846cbca54e487129c7961c680bb72309ca888820d42a08c9db14d938b19f9e2261bbeca2679945462be2b58103dfff73665d0d150fb8a804ae755e0fe64b59753f4db6308a1f679747bce186aa2c62b95fa6eeff3fbd08f3b0667e45428a54ade15bad19f49641c499b431b36f65803ea71b379e6b61de501a0232c9ba2d41b40d36ed794c306747bcbc49bf61a0f37409c18bfe2b5bef26a2d880000000000000000b9030ff9030ca01062d3d5015b9242bc193a9b0769f3d3780ecb55f97f40a752ae26d0b68cd0d8a0b2789a5357827ed838335283e15c4dcc42b9bebcbf2919a18613246787e2f96094976ea74026e726554db657fa54763abd0c3a0aa9a071ce4c09ee275206013f0063761bc19c93c13990582f918cc57333634c94ce89a00e095703e5c9b149f253fe89697230029e32484a410b4b1f2c61442d73c3095aa0d317ae19ede7c8a2d3ac9ef98735b049bcb7278d12f48c42b924538b60a25e12b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001820cdf830f4240830f4240846555fa64b90111d983010301846765746888676f312e32302e378664617277696e00007abd731ef8ae07b86091cb8836d58f5444b883422a18825d899035d3e6ea39ad1a50069bf0b86da8b5573dde1cb4a0a34f19ce94e0ef78ff7518c80265b8a3ca56e3c60167523590d4e8dcc324900559465fc0fa403774096614e135de280949b58a45cc96f2ba9e17f848820d41a08429d0d8b33ee72a84f750fefea846cbca54e487129c7961c680bb72309ca888820d42a08c9db14d938b19f9e2261bbeca2679945462be2b58103dfff73665d0d150fb8a80c0b17bfe88534296ff064cb7156548f6deba2d6310d5044ed6485f087dc6ef232e051c28e1909c2b50a3b4f29345d66681c319bef653e52e5d746480d5a3983b00a0b56228685be711834d0f154292d07826dea42a0fad3e4f56c31470b7fbfbea26880000000000000000",
|
Input: "f906278202cab9030ff9030ca01062d3d5015b9242bc193a9b0769f3d3780ecb55f97f40a752ae26d0b68cd0d8a0fae1a05fcb14bfd9b8a9f2b65007a9b6c2000de0627a73be644dd993d32342c494976ea74026e726554db657fa54763abd0c3a0aa9a0f385cc58ed297ff0d66eb5580b02853d3478ba418b1819ac659ee05df49b9794a0bf88464af369ed6b8cf02db00f0b9556ffa8d49cd491b00952a7f83431446638a00a6d0870e586a76278fbfdcedf76ef6679af18fc1f9137cfad495f434974ea81b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001820cdf830f4240830f4240846555fa64b90111d983010301846765746888676f312e32302e378664617277696e00007abd731ef8ae07b86091cb8836d58f5444b883422a18825d899035d3e6ea39ad1a50069bf0b86da8b5573dde1cb4a0a34f19ce94e0ef78ff7518c80265b8a3ca56e3c60167523590d4e8dcc324900559465fc0fa403774096614e135de280949b58a45cc96f2ba9e17f848820d41a08429d0d8b33ee72a84f750fefea846cbca54e487129c7961c680bb72309ca888820d42a08c9db14d938b19f9e2261bbeca2679945462be2b58103dfff73665d0d150fb8a804ae755e0fe64b59753f4db6308a1f679747bce186aa2c62b95fa6eeff3fbd08f3b0667e45428a54ade15bad19f49641c499b431b36f65803ea71b379e6b61de501a0232c9ba2d41b40d36ed794c306747bcbc49bf61a0f37409c18bfe2b5bef26a2d880000000000000000b9030ff9030ca01062d3d5015b9242bc193a9b0769f3d3780ecb55f97f40a752ae26d0b68cd0d8a0b2789a5357827ed838335283e15c4dcc42b9bebcbf2919a18613246787e2f96094976ea74026e726554db657fa54763abd0c3a0aa9a071ce4c09ee275206013f0063761bc19c93c13990582f918cc57333634c94ce89a00e095703e5c9b149f253fe89697230029e32484a410b4b1f2c61442d73c3095aa0d317ae19ede7c8a2d3ac9ef98735b049bcb7278d12f48c42b924538b60a25e12b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001820cdf830f4240830f4240846555fa64b90111d983010301846765746888676f312e32302e378664617277696e00007abd731ef8ae07b86091cb8836d58f5444b883422a18825d899035d3e6ea39ad1a50069bf0b86da8b5573dde1cb4a0a34f19ce94e0ef78ff7518c80265b8a3ca56e3c60167523590d4e8dcc324900559465fc0fa403774096614e135de280949b58a45cc96f2ba9e17f848820d41a08429d0d8b33ee72a84f750fefea846cbca54e487129c7961c680bb72309ca888820d42a08c9db14d938b19f9e2261bbeca2679945462be2b58103dfff73665d0d150fb8a80c0b17bfe88534296ff064cb7156548f6deba2d6310d5044ed6485f087dc6ef232e051c28e1909c2b50a3b4f29345d66681c319bef653e52e5d746480d5a3983b00a0b56228685be711834d0f154292d07826dea42a0fad3e4f56c31470b7fbfbea26880000000000000000",
|
||||||
|
@ -48,8 +48,6 @@ type (
|
|||||||
func (evm *EVM) precompile(addr common.Address) (PrecompiledContract, bool) {
|
func (evm *EVM) precompile(addr common.Address) (PrecompiledContract, bool) {
|
||||||
var precompiles map[common.Address]PrecompiledContract
|
var precompiles map[common.Address]PrecompiledContract
|
||||||
switch {
|
switch {
|
||||||
case evm.chainRules.IsHaber:
|
|
||||||
precompiles = PrecompiledContractsHaber
|
|
||||||
case evm.chainRules.IsCancun:
|
case evm.chainRules.IsCancun:
|
||||||
precompiles = PrecompiledContractsCancun
|
precompiles = PrecompiledContractsCancun
|
||||||
case evm.chainRules.IsFeynman:
|
case evm.chainRules.IsFeynman:
|
||||||
|
5469
core/vm/testdata/precompiles/p256Verify.json
vendored
5469
core/vm/testdata/precompiles/p256Verify.json
vendored
File diff suppressed because it is too large
Load Diff
@ -3,13 +3,10 @@ package vote
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/big"
|
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/consensus"
|
"github.com/ethereum/go-ethereum/consensus"
|
||||||
"github.com/ethereum/go-ethereum/consensus/parlia"
|
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/eth/downloader"
|
"github.com/ethereum/go-ethereum/eth/downloader"
|
||||||
@ -20,13 +17,7 @@ import (
|
|||||||
|
|
||||||
const blocksNumberSinceMining = 5 // the number of blocks need to wait before voting, counting from the validator begin to mine
|
const blocksNumberSinceMining = 5 // the number of blocks need to wait before voting, counting from the validator begin to mine
|
||||||
|
|
||||||
var diffInTurn = big.NewInt(2) // Block difficulty for in-turn signatures
|
|
||||||
var votesManagerCounter = metrics.NewRegisteredCounter("votesManager/local", nil)
|
var votesManagerCounter = metrics.NewRegisteredCounter("votesManager/local", nil)
|
||||||
var notJustified = metrics.NewRegisteredCounter("votesManager/notJustified", nil)
|
|
||||||
var inTurnJustified = metrics.NewRegisteredCounter("votesManager/inTurnJustified", nil)
|
|
||||||
var notInTurnJustified = metrics.NewRegisteredCounter("votesManager/notInTurnJustified", nil)
|
|
||||||
var continuousJustified = metrics.NewRegisteredCounter("votesManager/continuousJustified", nil)
|
|
||||||
var notContinuousJustified = metrics.NewRegisteredCounter("votesManager/notContinuousJustified", nil)
|
|
||||||
|
|
||||||
// Backend wraps all methods required for voting.
|
// Backend wraps all methods required for voting.
|
||||||
type Backend interface {
|
type Backend interface {
|
||||||
@ -40,8 +31,8 @@ type VoteManager struct {
|
|||||||
|
|
||||||
chain *core.BlockChain
|
chain *core.BlockChain
|
||||||
|
|
||||||
highestVerifiedBlockCh chan core.HighestVerifiedBlockEvent
|
chainHeadCh chan core.ChainHeadEvent
|
||||||
highestVerifiedBlockSub event.Subscription
|
chainHeadSub event.Subscription
|
||||||
|
|
||||||
// used for backup validators to sync votes from corresponding mining validator
|
// used for backup validators to sync votes from corresponding mining validator
|
||||||
syncVoteCh chan core.NewVoteEvent
|
syncVoteCh chan core.NewVoteEvent
|
||||||
@ -58,7 +49,7 @@ func NewVoteManager(eth Backend, chain *core.BlockChain, pool *VotePool, journal
|
|||||||
voteManager := &VoteManager{
|
voteManager := &VoteManager{
|
||||||
eth: eth,
|
eth: eth,
|
||||||
chain: chain,
|
chain: chain,
|
||||||
highestVerifiedBlockCh: make(chan core.HighestVerifiedBlockEvent, highestVerifiedBlockChanSize),
|
chainHeadCh: make(chan core.ChainHeadEvent, chainHeadChanSize),
|
||||||
syncVoteCh: make(chan core.NewVoteEvent, voteBufferForPut),
|
syncVoteCh: make(chan core.NewVoteEvent, voteBufferForPut),
|
||||||
pool: pool,
|
pool: pool,
|
||||||
engine: engine,
|
engine: engine,
|
||||||
@ -81,7 +72,7 @@ func NewVoteManager(eth Backend, chain *core.BlockChain, pool *VotePool, journal
|
|||||||
voteManager.journal = voteJournal
|
voteManager.journal = voteJournal
|
||||||
|
|
||||||
// Subscribe to chain head event.
|
// Subscribe to chain head event.
|
||||||
voteManager.highestVerifiedBlockSub = voteManager.chain.SubscribeHighestVerifiedHeaderEvent(voteManager.highestVerifiedBlockCh)
|
voteManager.chainHeadSub = voteManager.chain.SubscribeChainHeadEvent(voteManager.chainHeadCh)
|
||||||
voteManager.syncVoteSub = voteManager.pool.SubscribeNewVoteEvent(voteManager.syncVoteCh)
|
voteManager.syncVoteSub = voteManager.pool.SubscribeNewVoteEvent(voteManager.syncVoteCh)
|
||||||
|
|
||||||
go voteManager.loop()
|
go voteManager.loop()
|
||||||
@ -91,7 +82,7 @@ func NewVoteManager(eth Backend, chain *core.BlockChain, pool *VotePool, journal
|
|||||||
|
|
||||||
func (voteManager *VoteManager) loop() {
|
func (voteManager *VoteManager) loop() {
|
||||||
log.Debug("vote manager routine loop started")
|
log.Debug("vote manager routine loop started")
|
||||||
defer voteManager.highestVerifiedBlockSub.Unsubscribe()
|
defer voteManager.chainHeadSub.Unsubscribe()
|
||||||
defer voteManager.syncVoteSub.Unsubscribe()
|
defer voteManager.syncVoteSub.Unsubscribe()
|
||||||
|
|
||||||
events := voteManager.eth.EventMux().Subscribe(downloader.StartEvent{}, downloader.DoneEvent{}, downloader.FailedEvent{})
|
events := voteManager.eth.EventMux().Subscribe(downloader.StartEvent{}, downloader.DoneEvent{}, downloader.FailedEvent{})
|
||||||
@ -126,7 +117,7 @@ func (voteManager *VoteManager) loop() {
|
|||||||
log.Debug("downloader is in DoneEvent mode, set the startVote flag to true")
|
log.Debug("downloader is in DoneEvent mode, set the startVote flag to true")
|
||||||
startVote = true
|
startVote = true
|
||||||
}
|
}
|
||||||
case cHead := <-voteManager.highestVerifiedBlockCh:
|
case cHead := <-voteManager.chainHeadCh:
|
||||||
if !startVote {
|
if !startVote {
|
||||||
log.Debug("startVote flag is false, continue")
|
log.Debug("startVote flag is false, continue")
|
||||||
continue
|
continue
|
||||||
@ -142,27 +133,18 @@ func (voteManager *VoteManager) loop() {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if cHead.Header == nil {
|
if cHead.Block == nil {
|
||||||
log.Debug("cHead.Header is nil, continue")
|
log.Debug("cHead.Block is nil, continue")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
curHead := cHead.Header
|
curHead := cHead.Block.Header()
|
||||||
if p, ok := voteManager.engine.(*parlia.Parlia); ok {
|
|
||||||
nextBlockMinedTime := time.Unix(int64((curHead.Time + p.Period())), 0)
|
|
||||||
timeForBroadcast := 50 * time.Millisecond // enough to broadcast a vote
|
|
||||||
if time.Now().Add(timeForBroadcast).After(nextBlockMinedTime) {
|
|
||||||
log.Warn("too late to vote", "Head.Time(Second)", curHead.Time, "Now(Millisecond)", time.Now().UnixMilli())
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if cur validator is within the validatorSet at curHead
|
// Check if cur validator is within the validatorSet at curHead
|
||||||
if !voteManager.engine.IsActiveValidatorAt(voteManager.chain, curHead,
|
if !voteManager.engine.IsActiveValidatorAt(voteManager.chain, curHead,
|
||||||
func(bLSPublicKey *types.BLSPublicKey) bool {
|
func(bLSPublicKey *types.BLSPublicKey) bool {
|
||||||
return bytes.Equal(voteManager.signer.PubKey[:], bLSPublicKey[:])
|
return bytes.Equal(voteManager.signer.PubKey[:], bLSPublicKey[:])
|
||||||
}) {
|
}) {
|
||||||
log.Debug("local validator with voteKey is not within the validatorSet at curHead")
|
log.Debug("cur validator is not within the validatorSet at curHead")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -209,36 +191,6 @@ func (voteManager *VoteManager) loop() {
|
|||||||
voteManager.pool.PutVote(voteMessage)
|
voteManager.pool.PutVote(voteMessage)
|
||||||
votesManagerCounter.Inc(1)
|
votesManagerCounter.Inc(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
// check the latest justified block, which indicating the stability of the network
|
|
||||||
curJustifiedNumber, _, err := voteManager.engine.GetJustifiedNumberAndHash(voteManager.chain, []*types.Header{curHead})
|
|
||||||
if err == nil && curJustifiedNumber != 0 {
|
|
||||||
if curJustifiedNumber+1 != curHead.Number.Uint64() {
|
|
||||||
log.Debug("not justified", "blockNumber", curHead.Number.Uint64()-1)
|
|
||||||
notJustified.Inc(1)
|
|
||||||
} else {
|
|
||||||
parent := voteManager.chain.GetHeaderByHash(curHead.ParentHash)
|
|
||||||
if parent != nil {
|
|
||||||
if parent.Difficulty.Cmp(diffInTurn) == 0 {
|
|
||||||
inTurnJustified.Inc(1)
|
|
||||||
} else {
|
|
||||||
log.Debug("not in turn block justified", "blockNumber", parent.Number.Int64(), "blockHash", parent.Hash())
|
|
||||||
notInTurnJustified.Inc(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
lastJustifiedNumber, _, err := voteManager.engine.GetJustifiedNumberAndHash(voteManager.chain, []*types.Header{parent})
|
|
||||||
if err == nil {
|
|
||||||
if lastJustifiedNumber == 0 || lastJustifiedNumber+1 == curJustifiedNumber {
|
|
||||||
continuousJustified.Inc(1)
|
|
||||||
} else {
|
|
||||||
log.Debug("not continuous block justified", "lastJustified", lastJustifiedNumber, "curJustified", curJustifiedNumber)
|
|
||||||
notContinuousJustified.Inc(1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
case event := <-voteManager.syncVoteCh:
|
case event := <-voteManager.syncVoteCh:
|
||||||
voteMessage := event.Vote
|
voteMessage := event.Vote
|
||||||
if voteManager.eth.IsMining() || !bytes.Equal(voteManager.signer.PubKey[:], voteMessage.VoteAddress[:]) {
|
if voteManager.eth.IsMining() || !bytes.Equal(voteManager.signer.PubKey[:], voteMessage.VoteAddress[:]) {
|
||||||
@ -254,7 +206,7 @@ func (voteManager *VoteManager) loop() {
|
|||||||
case <-voteManager.syncVoteSub.Err():
|
case <-voteManager.syncVoteSub.Err():
|
||||||
log.Debug("voteManager subscribed votes failed")
|
log.Debug("voteManager subscribed votes failed")
|
||||||
return
|
return
|
||||||
case <-voteManager.highestVerifiedBlockSub.Err():
|
case <-voteManager.chainHeadSub.Err():
|
||||||
log.Debug("voteManager subscribed chainHead failed")
|
log.Debug("voteManager subscribed chainHead failed")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -24,7 +24,7 @@ const (
|
|||||||
lowerLimitOfVoteBlockNumber = 256
|
lowerLimitOfVoteBlockNumber = 256
|
||||||
upperLimitOfVoteBlockNumber = 11 // refer to fetcher.maxUncleDist
|
upperLimitOfVoteBlockNumber = 11 // refer to fetcher.maxUncleDist
|
||||||
|
|
||||||
highestVerifiedBlockChanSize = 10 // highestVerifiedBlockChanSize is the size of channel listening to HighestVerifiedBlockEvent.
|
chainHeadChanSize = 10 // chainHeadChanSize is the size of channel listening to ChainHeadEvent.
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -57,8 +57,8 @@ type VotePool struct {
|
|||||||
curVotesPq *votesPriorityQueue
|
curVotesPq *votesPriorityQueue
|
||||||
futureVotesPq *votesPriorityQueue
|
futureVotesPq *votesPriorityQueue
|
||||||
|
|
||||||
highestVerifiedBlockCh chan core.HighestVerifiedBlockEvent
|
chainHeadCh chan core.ChainHeadEvent
|
||||||
highestVerifiedBlockSub event.Subscription
|
chainHeadSub event.Subscription
|
||||||
|
|
||||||
votesCh chan *types.VoteEnvelope
|
votesCh chan *types.VoteEnvelope
|
||||||
|
|
||||||
@ -75,13 +75,13 @@ func NewVotePool(chain *core.BlockChain, engine consensus.PoSA) *VotePool {
|
|||||||
futureVotes: make(map[common.Hash]*VoteBox),
|
futureVotes: make(map[common.Hash]*VoteBox),
|
||||||
curVotesPq: &votesPriorityQueue{},
|
curVotesPq: &votesPriorityQueue{},
|
||||||
futureVotesPq: &votesPriorityQueue{},
|
futureVotesPq: &votesPriorityQueue{},
|
||||||
highestVerifiedBlockCh: make(chan core.HighestVerifiedBlockEvent, highestVerifiedBlockChanSize),
|
chainHeadCh: make(chan core.ChainHeadEvent, chainHeadChanSize),
|
||||||
votesCh: make(chan *types.VoteEnvelope, voteBufferForPut),
|
votesCh: make(chan *types.VoteEnvelope, voteBufferForPut),
|
||||||
engine: engine,
|
engine: engine,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Subscribe events from blockchain and start the main event loop.
|
// Subscribe events from blockchain and start the main event loop.
|
||||||
votePool.highestVerifiedBlockSub = votePool.chain.SubscribeHighestVerifiedHeaderEvent(votePool.highestVerifiedBlockCh)
|
votePool.chainHeadSub = votePool.chain.SubscribeChainHeadEvent(votePool.chainHeadCh)
|
||||||
|
|
||||||
go votePool.loop()
|
go votePool.loop()
|
||||||
return votePool
|
return votePool
|
||||||
@ -89,18 +89,18 @@ func NewVotePool(chain *core.BlockChain, engine consensus.PoSA) *VotePool {
|
|||||||
|
|
||||||
// loop is the vote pool's main even loop, waiting for and reacting to outside blockchain events and votes channel event.
|
// loop is the vote pool's main even loop, waiting for and reacting to outside blockchain events and votes channel event.
|
||||||
func (pool *VotePool) loop() {
|
func (pool *VotePool) loop() {
|
||||||
defer pool.highestVerifiedBlockSub.Unsubscribe()
|
defer pool.chainHeadSub.Unsubscribe()
|
||||||
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
// Handle ChainHeadEvent.
|
// Handle ChainHeadEvent.
|
||||||
case ev := <-pool.highestVerifiedBlockCh:
|
case ev := <-pool.chainHeadCh:
|
||||||
if ev.Header != nil {
|
if ev.Block != nil {
|
||||||
latestBlockNumber := ev.Header.Number.Uint64()
|
latestBlockNumber := ev.Block.NumberU64()
|
||||||
pool.prune(latestBlockNumber)
|
pool.prune(latestBlockNumber)
|
||||||
pool.transferVotesFromFutureToCur(ev.Header)
|
pool.transferVotesFromFutureToCur(ev.Block.Header())
|
||||||
}
|
}
|
||||||
case <-pool.highestVerifiedBlockSub.Err():
|
case <-pool.chainHeadSub.Err():
|
||||||
return
|
return
|
||||||
|
|
||||||
// Handle votes channel and put the vote into vote pool.
|
// Handle votes channel and put the vote into vote pool.
|
||||||
@ -135,7 +135,7 @@ func (pool *VotePool) putIntoVotePool(vote *types.VoteEnvelope) bool {
|
|||||||
var votesPq *votesPriorityQueue
|
var votesPq *votesPriorityQueue
|
||||||
isFutureVote := false
|
isFutureVote := false
|
||||||
|
|
||||||
voteBlock := pool.chain.GetVerifiedBlockByHash(targetHash)
|
voteBlock := pool.chain.GetHeaderByHash(targetHash)
|
||||||
if voteBlock == nil {
|
if voteBlock == nil {
|
||||||
votes = pool.futureVotes
|
votes = pool.futureVotes
|
||||||
votesPq = pool.futureVotesPq
|
votesPq = pool.futureVotesPq
|
||||||
@ -226,7 +226,7 @@ func (pool *VotePool) transferVotesFromFutureToCur(latestBlockHeader *types.Head
|
|||||||
futurePqBuffer := make([]*types.VoteData, 0)
|
futurePqBuffer := make([]*types.VoteData, 0)
|
||||||
for futurePq.Len() > 0 && futurePq.Peek().TargetNumber <= latestBlockNumber {
|
for futurePq.Len() > 0 && futurePq.Peek().TargetNumber <= latestBlockNumber {
|
||||||
blockHash := futurePq.Peek().TargetHash
|
blockHash := futurePq.Peek().TargetHash
|
||||||
header := pool.chain.GetVerifiedBlockByHash(blockHash)
|
header := pool.chain.GetHeaderByHash(blockHash)
|
||||||
if header == nil {
|
if header == nil {
|
||||||
// Put into pq buffer used for later put again into futurePq
|
// Put into pq buffer used for later put again into futurePq
|
||||||
futurePqBuffer = append(futurePqBuffer, heap.Pop(futurePq).(*types.VoteData))
|
futurePqBuffer = append(futurePqBuffer, heap.Pop(futurePq).(*types.VoteData))
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
package bn256
|
package bn256
|
||||||
|
|
||||||
// For details of the algorithms used, see "Multiplication and Squaring on
|
// For details of the algorithms used, see "Multiplication and Squaring on
|
||||||
// Pairing-Friendly Fields", Devegili et al.
|
// Pairing-Friendly Fields, Devegili et al.
|
||||||
// http://eprint.iacr.org/2006/471.pdf.
|
// http://eprint.iacr.org/2006/471.pdf.
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
package bn256
|
package bn256
|
||||||
|
|
||||||
// For details of the algorithms used, see "Multiplication and Squaring on
|
// For details of the algorithms used, see "Multiplication and Squaring on
|
||||||
// Pairing-Friendly Fields", Devegili et al.
|
// Pairing-Friendly Fields, Devegili et al.
|
||||||
// http://eprint.iacr.org/2006/471.pdf.
|
// http://eprint.iacr.org/2006/471.pdf.
|
||||||
|
|
||||||
// gfP2 implements a field of size p² as a quadratic extension of the base field
|
// gfP2 implements a field of size p² as a quadratic extension of the base field
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user