Compare commits

..

3 Commits

Author SHA1 Message Date
Felix Lange
a9523b6428 Merge branch 'master' into release/1.14 2024-08-12 14:16:16 +02:00
Felix Lange
aa55f5ea20 Merge branch 'master' into release/1.14 2024-07-11 14:34:03 +02:00
Guillaume Ballet
aadddf3a6e params: release Geth v1.14.6 2024-07-02 15:19:57 +02:00
723 changed files with 14184 additions and 44616 deletions

40
.github/CODEOWNERS vendored

@ -1,36 +1,24 @@
# Lines starting with '#' are comments.
# Each line is a file pattern followed by one or more owners.
accounts/usbwallet/ @gballet
accounts/scwallet/ @gballet
accounts/abi/ @gballet @MariusVanDerWijden
beacon/engine/ @MariusVanDerWijden @lightclient @fjl
beacon/light/ @zsfelfoldi
beacon/merkle/ @zsfelfoldi
beacon/types/ @zsfelfoldi @fjl
beacon/params/ @zsfelfoldi @fjl
cmd/clef/ @holiman
cmd/evm/ @holiman @MariusVanDerWijden @lightclient
core/state/ @rjl493456442 @holiman
crypto/ @gballet @jwasinger @holiman @fjl
core/ @holiman @rjl493456442
eth/ @holiman @rjl493456442
eth/catalyst/ @MariusVanDerWijden @lightclient @fjl @jwasinger
accounts/usbwallet @karalabe
accounts/scwallet @gballet
accounts/abi @gballet @MariusVanDerWijden
beacon/engine @lightclient
cmd/clef @holiman
cmd/evm @holiman @MariusVanDerWijden @lightclient
consensus @karalabe
core/ @karalabe @holiman @rjl493456442
eth/ @karalabe @holiman @rjl493456442
eth/catalyst/ @gballet @lightclient
eth/tracers/ @s1na
ethclient/ @fjl
ethdb/ @rjl493456442
event/ @fjl
trie/ @rjl493456442
triedb/ @rjl493456442
core/tracing/ @s1na
graphql/ @s1na
internal/ethapi/ @fjl @s1na @lightclient
internal/era/ @lightclient
metrics/ @holiman
miner/ @MariusVanDerWijden @holiman @fjl @rjl493456442
internal/ethapi @lightclient
internal/era @lightclient
les/ @zsfelfoldi @rjl493456442
light/ @zsfelfoldi @rjl493456442
node/ @fjl
p2p/ @fjl @zsfelfoldi
rlp/ @fjl
params/ @fjl @holiman @karalabe @gballet @rjl493456442 @zsfelfoldi
rpc/ @fjl @holiman
signer/ @holiman

@ -15,7 +15,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: 1.23.0
go-version: 1.21.4
cache: false
- name: Run tests
run: go test -short ./...

25
.gitignore vendored

@ -4,11 +4,16 @@
# or operating system, you probably want to add a global ignore instead:
# git config --global core.excludesfile ~/.gitignore_global
/tmp
*/**/*un~
*/**/*.test
*un~
.DS_Store
*/**/.DS_Store
.ethtest
*/**/*tx_database*
*/**/*dapps*
build/_vendor/pkg
#*
.#*
@ -23,23 +28,25 @@
/build/bin/
/geth*.zip
# used by the build/ci.go archive + upload tool
/geth*.tar.gz
/geth*.tar.gz.sig
/geth*.tar.gz.asc
/geth*.zip.sig
/geth*.zip.asc
# travis
profile.tmp
profile.cov
# IdeaIDE
.idea
*.iml
# VS Code
.vscode
# dashboard
/dashboard/assets/flow-typed
/dashboard/assets/node_modules
/dashboard/assets/stats.json
/dashboard/assets/bundle.js
/dashboard/assets/bundle.js.map
/dashboard/assets/package-lock.json
**/yarn-error.log
logs/
tests/spec-tests/

@ -3,6 +3,9 @@
run:
timeout: 20m
tests: true
# default is true. Enables skipping of directories:
# vendor$, third_party$, testdata$, examples$, Godeps$, builtin$
skip-dirs-use-default: true
linters:
disable-all: true
@ -18,14 +21,10 @@ linters:
- staticcheck
- bidichk
- durationcheck
- copyloopvar
- exportloopref
- whitespace
- revive # only certain checks enabled
- durationcheck
- gocheckcompilerdirectives
- reassign
- mirror
- tenv
### linters we tried and will not be using:
###
# - structcheck # lots of false positives
@ -51,9 +50,6 @@ linters-settings:
exclude: [""]
issues:
# default is true. Enables skipping of directories:
# vendor$, third_party$, testdata$, examples$, Godeps$, builtin$
exclude-dirs-use-default: true
exclude-files:
- core/genesis_alloc.go
exclude-rules:
@ -64,10 +60,6 @@ issues:
- path: crypto/bn256/
linters:
- revive
- path: cmd/utils/flags.go
text: "SA1019: cfg.TxLookupLimit is deprecated: use 'TransactionHistory' instead."
- path: cmd/utils/flags.go
text: "SA1019: ethconfig.Defaults.TxLookupLimit is deprecated: use 'TransactionHistory' instead."
- path: internal/build/pgp.go
text: 'SA1019: "golang.org/x/crypto/openpgp" is deprecated: this package is unmaintained except for security fixes.'
- path: core/vm/contracts.go

@ -9,13 +9,14 @@ jobs:
- azure-osx
include:
# This builder create and push the Docker images for all architectures
# These builders create the Docker sub-images for multi-arch push and each
# will attempt to push the multi-arch image if they are the last builder
- stage: build
if: type = push
os: linux
arch: amd64
dist: focal
go: 1.23.x
dist: noble
go: 1.22.x
env:
- docker
services:
@ -25,15 +26,32 @@ jobs:
before_install:
- export DOCKER_CLI_EXPERIMENTAL=enabled
script:
- go run build/ci.go dockerx -platform "linux/amd64,linux/arm64,linux/riscv64" -upload ethereum/client-go
- go run build/ci.go docker -image -manifest amd64,arm64 -upload ethereum/client-go
- stage: build
if: type = push
os: linux
arch: arm64
dist: noble
go: 1.22.x
env:
- docker
services:
- docker
git:
submodules: false # avoid cloning ethereum/tests
before_install:
- export DOCKER_CLI_EXPERIMENTAL=enabled
script:
- go run build/ci.go docker -image -manifest amd64,arm64 -upload ethereum/client-go
# This builder does the Linux Azure uploads
- stage: build
if: type = push
os: linux
dist: focal
dist: noble
sudo: required
go: 1.23.x
go: 1.22.x
env:
- azure-linux
git:
@ -45,7 +63,6 @@ jobs:
# build 386
- sudo -E apt-get -yq --no-install-suggests --no-install-recommends install gcc-multilib
- git status --porcelain
- go run build/ci.go install -dlgo -arch 386
- go run build/ci.go archive -arch 386 -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds
@ -67,13 +84,12 @@ jobs:
if: type = push
os: osx
osx_image: xcode14.2
go: 1.23.1 # See https://github.com/ethereum/go-ethereum/pull/30478
go: 1.22.x
env:
- azure-osx
git:
submodules: false # avoid cloning ethereum/tests
script:
- ln -sf /Users/travis/gopath/bin/go1.23.1 /usr/local/bin/go # Work around travis go-setup bug
- go run build/ci.go install -dlgo
- go run build/ci.go archive -type tar -signer OSX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds
- go run build/ci.go install -dlgo -arch arm64
@ -84,16 +100,16 @@ jobs:
if: type = push
os: linux
arch: amd64
dist: focal
go: 1.23.x
dist: noble
go: 1.22.x
script:
- travis_wait 45 go run build/ci.go test $TEST_PACKAGES
- stage: build
if: type = push
os: linux
dist: focal
go: 1.22.x
dist: noble
go: 1.21.x
script:
- travis_wait 45 go run build/ci.go test $TEST_PACKAGES
@ -101,8 +117,8 @@ jobs:
- stage: build
if: type = cron || (type = push && tag ~= /^v[0-9]/)
os: linux
dist: focal
go: 1.23.x
dist: noble
go: 1.22.x
env:
- ubuntu-ppa
git:
@ -117,8 +133,8 @@ jobs:
- stage: build
if: type = cron
os: linux
dist: focal
go: 1.23.x
dist: noble
go: 1.22.x
env:
- azure-purge
git:
@ -130,8 +146,8 @@ jobs:
- stage: build
if: type = cron
os: linux
dist: focal
go: 1.23.x
dist: noble
go: 1.22.x
env:
- racetests
script:

@ -4,7 +4,7 @@ ARG VERSION=""
ARG BUILDNUM=""
# Build Geth in a stock Go builder container
FROM golang:1.23-alpine AS builder
FROM golang:1.22-alpine as builder
RUN apk add --no-cache gcc musl-dev linux-headers git

@ -4,7 +4,7 @@ ARG VERSION=""
ARG BUILDNUM=""
# Build Geth in a stock Go builder container
FROM golang:1.23-alpine AS builder
FROM golang:1.22-alpine as builder
RUN apk add --no-cache gcc musl-dev linux-headers git
@ -14,13 +14,6 @@ COPY go.sum /go-ethereum/
RUN cd /go-ethereum && go mod download
ADD . /go-ethereum
# This is not strictly necessary, but it matches the "Dockerfile" steps, thus
# makes it so that under certain circumstances, the docker layer can be cached,
# and the builder can jump to the next (build all) command, with the go cache fully loaded.
#
RUN cd /go-ethereum && go run build/ci.go install -static ./cmd/geth
RUN cd /go-ethereum && go run build/ci.go install -static
# Pull all binaries into a second stage deploy alpine container

135
README.md

@ -16,7 +16,7 @@ archives are published at https://geth.ethereum.org/downloads/.
For prerequisites and detailed build instructions please read the [Installation Instructions](https://geth.ethereum.org/docs/getting-started/installing-geth).
Building `geth` requires both a Go (version 1.22 or later) and a C compiler. You can install
Building `geth` requires both a Go (version 1.21 or later) and a C compiler. You can install
them using your favourite package manager. Once the dependencies are installed, run
```shell
@ -40,6 +40,7 @@ directory.
| `clef` | Stand-alone signing tool, which can be used as a backend signer for `geth`. |
| `devp2p` | Utilities to interact with nodes on the networking layer, without running a full blockchain. |
| `abigen` | Source code generator to convert Ethereum contract definitions into easy-to-use, compile-time type-safe Go packages. It operates on plain [Ethereum contract ABIs](https://docs.soliditylang.org/en/develop/abi-spec.html) with expanded functionality if the contract bytecode is also available. However, it also accepts Solidity source files, making development much more streamlined. Please see our [Native DApps](https://geth.ethereum.org/docs/developers/dapp-developer/native-bindings) page for details. |
| `bootnode` | Stripped down version of our Ethereum client implementation that only takes part in the network node discovery protocol, but does not run any of the higher level application protocols. It can be used as a lightweight bootstrap node to aid in finding peers in private networks. |
| `evm` | Developer utility version of the EVM (Ethereum Virtual Machine) that is capable of running bytecode snippets within a configurable environment and execution mode. Its purpose is to allow isolated, fine-grained debugging of EVM opcodes (e.g. `evm --code 60ff60ff --debug run`). |
| `rlpdump` | Developer utility tool to convert binary RLP ([Recursive Length Prefix](https://ethereum.org/en/developers/docs/data-structures-and-encoding/rlp)) dumps (data encoding used by the Ethereum protocol both network as well as consensus wise) to user-friendlier hierarchical representation (e.g. `rlpdump --hex CE0183FFFFFFC4C304050583616263`). |
@ -54,14 +55,14 @@ on how you can run your own `geth` instance.
Minimum:
* CPU with 4+ cores
* 8GB RAM
* CPU with 2+ cores
* 4GB RAM
* 1TB free storage space to sync the Mainnet
* 8 MBit/sec download Internet service
Recommended:
* Fast CPU with 8+ cores
* Fast CPU with 4+ cores
* 16GB+ RAM
* High-performance SSD with at least 1TB of free space
* 25+ MBit/sec download Internet service
@ -88,7 +89,7 @@ This command will:
This tool is optional and if you leave it out you can always attach it to an already running
`geth` instance with `geth attach`.
### A Full node on the Holesky test network
### A Full node on the Görli test network
Transitioning towards developers, if you'd like to play around with creating Ethereum
contracts, you almost certainly would like to do that without any real money involved until
@ -97,23 +98,23 @@ network, you want to join the **test** network with your node, which is fully eq
the main network, but with play-Ether only.
```shell
$ geth --holesky console
$ geth --goerli console
```
The `console` subcommand has the same meaning as above and is equally
useful on the testnet too.
Specifying the `--holesky` flag, however, will reconfigure your `geth` instance a bit:
Specifying the `--goerli` flag, however, will reconfigure your `geth` instance a bit:
* Instead of connecting to the main Ethereum network, the client will connect to the Holesky
* Instead of connecting to the main Ethereum network, the client will connect to the Görli
test network, which uses different P2P bootnodes, different network IDs and genesis
states.
* Instead of using the default data directory (`~/.ethereum` on Linux for example), `geth`
will nest itself one level deeper into a `holesky` subfolder (`~/.ethereum/holesky` on
will nest itself one level deeper into a `goerli` subfolder (`~/.ethereum/goerli` on
Linux). Note, on OSX and Linux this also means that attaching to a running testnet node
requires the use of a custom endpoint since `geth attach` will try to attach to a
production node endpoint by default, e.g.,
`geth attach <datadir>/holesky/geth.ipc`. Windows users are not affected by
`geth attach <datadir>/goerli/geth.ipc`. Windows users are not affected by
this.
*Note: Although some internal protective measures prevent transactions from
@ -138,6 +139,8 @@ export your existing configuration:
$ geth --your-favourite-flags dumpconfig
```
*Note: This works only with `geth` v1.6.0 and above.*
#### Docker quick start
One of the quickest ways to get Ethereum up and running on your machine is by using
@ -185,6 +188,7 @@ HTTP based JSON-RPC API options:
* `--ws.api` API's offered over the WS-RPC interface (default: `eth,net,web3`)
* `--ws.origins` Origins from which to accept WebSocket requests
* `--ipcdisable` Disable the IPC-RPC server
* `--ipcapi` API's offered over the IPC-RPC interface (default: `admin,debug,eth,miner,net,personal,txpool,web3`)
* `--ipcpath` Filename for IPC socket/pipe within the datadir (explicit paths escape it)
You'll need to use your own programming environments' capabilities (libraries, tools, etc) to
@ -203,14 +207,113 @@ APIs!**
Maintaining your own private network is more involved as a lot of configurations taken for
granted in the official networks need to be manually set up.
Unfortunately since [the Merge](https://ethereum.org/en/roadmap/merge/) it is no longer possible
to easily set up a network of geth nodes without also setting up a corresponding beacon chain.
#### Defining the private genesis state
There are three different solutions depending on your use case:
First, you'll need to create the genesis state of your networks, which all nodes need to be
aware of and agree upon. This consists of a small JSON file (e.g. call it `genesis.json`):
* If you are looking for a simple way to test smart contracts from go in your CI, you can use the [Simulated Backend](https://geth.ethereum.org/docs/developers/dapp-developer/native-bindings#blockchain-simulator).
* If you want a convenient single node environment for testing, you can use our [Dev Mode](https://geth.ethereum.org/docs/developers/dapp-developer/dev-mode).
* If you are looking for a multiple node test network, you can set one up quite easily with [Kurtosis](https://geth.ethereum.org/docs/fundamentals/kurtosis).
```json
{
"config": {
"chainId": <arbitrary positive integer>,
"homesteadBlock": 0,
"eip150Block": 0,
"eip155Block": 0,
"eip158Block": 0,
"byzantiumBlock": 0,
"constantinopleBlock": 0,
"petersburgBlock": 0,
"istanbulBlock": 0,
"berlinBlock": 0,
"londonBlock": 0
},
"alloc": {},
"coinbase": "0x0000000000000000000000000000000000000000",
"difficulty": "0x20000",
"extraData": "",
"gasLimit": "0x2fefd8",
"nonce": "0x0000000000000042",
"mixhash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"timestamp": "0x00"
}
```
The above fields should be fine for most purposes, although we'd recommend changing
the `nonce` to some random value so you prevent unknown remote nodes from being able
to connect to you. If you'd like to pre-fund some accounts for easier testing, create
the accounts and populate the `alloc` field with their addresses.
```json
"alloc": {
"0x0000000000000000000000000000000000000001": {
"balance": "111111111"
},
"0x0000000000000000000000000000000000000002": {
"balance": "222222222"
}
}
```
With the genesis state defined in the above JSON file, you'll need to initialize **every**
`geth` node with it prior to starting it up to ensure all blockchain parameters are correctly
set:
```shell
$ geth init path/to/genesis.json
```
#### Creating the rendezvous point
With all nodes that you want to run initialized to the desired genesis state, you'll need to
start a bootstrap node that others can use to find each other in your network and/or over
the internet. The clean way is to configure and run a dedicated bootnode:
```shell
$ bootnode --genkey=boot.key
$ bootnode --nodekey=boot.key
```
With the bootnode online, it will display an [`enode` URL](https://ethereum.org/en/developers/docs/networking-layer/network-addresses/#enode)
that other nodes can use to connect to it and exchange peer information. Make sure to
replace the displayed IP address information (most probably `[::]`) with your externally
accessible IP to get the actual `enode` URL.
*Note: You could also use a full-fledged `geth` node as a bootnode, but it's the less
recommended way.*
#### Starting up your member nodes
With the bootnode operational and externally reachable (you can try
`telnet <ip> <port>` to ensure it's indeed reachable), start every subsequent `geth`
node pointed to the bootnode for peer discovery via the `--bootnodes` flag. It will
probably also be desirable to keep the data directory of your private network separated, so
do also specify a custom `--datadir` flag.
```shell
$ geth --datadir=path/to/custom/data/folder --bootnodes=<bootnode-enode-url-from-above>
```
*Note: Since your network will be completely cut off from the main and test networks, you'll
also need to configure a miner to process transactions and create new blocks for you.*
#### Running a private miner
In a private network setting a single CPU miner instance is more than enough for
practical purposes as it can produce a stable stream of blocks at the correct intervals
without needing heavy resources (consider running on a single thread, no need for multiple
ones either). To start a `geth` instance for mining, run it with all your usual flags, extended
by:
```shell
$ geth <usual-flags> --mine --miner.threads=1 --miner.etherbase=0x0000000000000000000000000000000000000000
```
Which will start mining blocks and transactions on a single CPU thread, crediting all
proceedings to the account specified by `--miner.etherbase`. You can further tune the mining
by changing the default gas limit blocks converge to (`--miner.targetgaslimit`) and the price
transactions are accepted at (`--miner.gasprice`).
## Contribution

@ -29,69 +29,147 @@ Fingerprint: `AE96 ED96 9E47 9B00 84F3 E17F E88D 3334 FA5F 6A0A`
```
-----BEGIN PGP PUBLIC KEY BLOCK-----
Version: SKS 1.1.6
Comment: Hostname: pgp.mit.edu
mQINBFgl3tgBEAC8A1tUBkD9YV+eLrOmtgy+/JS/H9RoZvkg3K1WZ8IYfj6iIRaY
neAk3Bp182GUPVz/zhKr2g0tMXIScDR3EnaDsY+Qg+JqQl8NOG+Cikr1nnkG2on9
L8c8yiqry1ZTCmYMqCa2acTFqnyuXJ482aZNtB4QG2BpzfhW4k8YThpegk/EoRUi
m+y7buJDtoNf7YILlhDQXN8qlHB02DWOVUihph9tUIFsPK6BvTr9SIr/eG6j6k0b
fUo9pexOn7LS4SojoJmsm/5dp6AoKlac48cZU5zwR9AYcq/nvkrfmf2WkObg/xRd
EvKZzn05jRopmAIwmoC3CiLmqCHPmT5a29vEob/yPFE335k+ujjZCPOu7OwjzDk7
M0zMSfnNfDq8bXh16nn+ueBxJ0NzgD1oC6c2PhM+XRQCXChoyI8vbfp4dGvCvYqv
QAE1bWjqnumZ/7vUPgZN6gDfiAzG2mUxC2SeFBhacgzDvtQls+uuvm+FnQOUgg2H
h8x2zgoZ7kqV29wjaUPFREuew7e+Th5BxielnzOfVycVXeSuvvIn6cd3g/s8mX1c
2kLSXJR7+KdWDrIrR5Az0kwAqFZt6B6QTlDrPswu3mxsm5TzMbny0PsbL/HBM+GZ
EZCjMXxB8bqV2eSaktjnSlUNX1VXxyOxXA+ZG2jwpr51egi57riVRXokrQARAQAB
tDRFdGhlcmV1bSBGb3VuZGF0aW9uIEJ1ZyBCb3VudHkgPGJvdW50eUBldGhlcmV1
bS5vcmc+iQJVBBMBCAA/AhsDBgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgBYhBK6W
7ZaeR5sAhPPhf+iNMzT6X2oKBQJl2LD9BQkRdTklAAoJEOiNMzT6X2oKYYYQALkV
wJjWYoVoMuw9D1ybQo4Sqyp6D/XYHXSpqZDO9RlADQisYBfuO7EW75evgZ+54Ajc
8gZ2BUkFcSR9z2t0TEkUyjmPDZsaElTTP2Boa2GG5pyziEM6t1cMMY1sP1aotx9H
DYwCeMmDv0wTMi6v0C6+/in2hBxbGALRbQKWKd/5ss4OEPe37hG9zAJcBYZg2tes
O7ceg7LHZpNC1zvMUrBY6os74FJ437f8bankqvVE83/dvTcCDhMsei9LiWS2uo26
qiyqeR9lZEj8W5F6UgkQH+UOhamJ9UB3N/h//ipKrwtiv0+jQm9oNG7aIAi3UJgD
CvSod87H0l7/U8RWzyam/r8eh4KFM75hIVtqEy5jFV2z7x2SibXQi7WRfAysjFLp
/li8ff6kLDR9IMATuMSF7Ol0O9JMRfSPjRZRtVOwYVIBla3BhfMrjvMMcZMAy/qS
DWx2iFYDMGsswv7hp3lsFOaa1ju95ClZZk3q/z7u5gH7LFAxR0jPaW48ay3CFylW
sDpQpO1DWb9uXBdhOU+MN18uSjqzocga3Wz2C8jhWRvxyFf3SNIybm3zk6W6IIoy
6KmwSRZ30oxizy6zMYw1qJE89zjjumzlZAm0R/Q4Ui+WJhlSyrYbqzqdxYuLgdEL
lgKfbv9/t8tNXGGSuCe5L7quOv9k7l2+QmLlg+SJtDlFdGhlcmV1bSBGb3VuZGF0
aW9uIFNlY3VyaXR5IFRlYW0gPHNlY3VyaXR5QGV0aGVyZXVtLm9yZz6JAlUEEwEI
AD8CGwMGCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAFiEErpbtlp5HmwCE8+F/6I0z
NPpfagoFAmXYsP4FCRF1OSUACgkQ6I0zNPpfagoUGA/+LVzXUJrsfi8+ADMF1hru
wFDcY1r+vM4Ovbk1NhCc/DnV5VG40j5FiQpE81BNiH59sYeZkQm9jFbwevK7Zpuq
RZaG2WGiwU/11xrt5/Qjq7T+vEtd94546kFcBnP8uexZqP4dTi4LHa2on8aRbwzN
7RjCpCQhy1TUuk47dyOR1y3ZHrpTwkHpuhwgffaWtxgSyCMYz7fsd5Ukh3eE+Ani
90CIUieve2U3o+WPxBD9PRaIPg6LmBhfGxGvC/6tqY9W3Z9xEOVDxC4wdYppQzsg
Pg7bNnVmlFWHsEk8FuMfY8nTqY3/ojhJxikWKz2V3Y2AbsLEXCvrEg6b4FvmsS97
8ifEBbFXU8hvMSpMLtO7vLamWyOHq41IXWH6HLNLhDfDzTfpAJ8iYDKGj72YsMzF
0fIjPa6mniMB2RmREAM0Jas3M/6DUw1EzwK1iQofIBoCRPIkR5mxmzjcRB6tVdQa
on20/9YTKKBUQAdK0OWW8j1euuULDgNdkN2LBXdQLy/JcQiggU8kOCKL/Lmj5HWP
FNT9rYfnjmCuux3UfJGfhPryujEA0CdIfq1Qf4ldOVzpWYjsMn+yQxAQTorAzF3z
iYddP2cw/Nvookay8xywKJnDsaRaWqdQ8Ceox3qSB4LCjQRNR5c3HfvGm3EBdEyI
zEEpjZ6GHa05DCajqKjtjlm5Ag0EWCXe2AEQAJuCrodM3mAQGLSWQP8xp8ieY2L7
n1TmBEZiqTjpaV9GOEe51eMOmAPSWiUZviFiie2QxopGUKDZG+CO+Tdm97Q8paMr
DuCvxgFr18wVjwGEBcjfY53Ij2sWHERkV9YB/ApWZPX0F14BBEW9x937zDx/VdVz
7N11QswkUFOv7EoOUhFbBOR0s9B5ZuOjR4eX+Di24uIutPFVuePbpt/7b7UNsz/D
lVq/M+uS+Ieq8p79A/+BrLhANWJa8WAtv3SJ18Ach2w+B+WnRUNLmtUcUvoPvetJ
F0hGjcjxzyZig2NJHhcO6+A6QApb0tHem+i4UceOnoWvQZ6xFuttvYQbrqI+xH30
xDsWogv1Uwc+baa1H5e9ucqQfatjISpoxtJ2Tb2MZqmQBaV7iwiFIqTvj0Di0aQe
XTwpnY32joat9R6E/9XZ4ktqmHYOKgUvUfFGvKsrLzRBAoswlF6TGKCryCt5bzEH
jO5/0yS6i75Ec2ajw95seMWy0uKCIPr/M/Z77i1SatPT8lMY5KGgYyXxG3RVHF08
iYq6f7gs5dt87ECs5KRjqLfn6CyCSRLLWBMkTQFjdL1q5Pr5iuCVj9NY9D0gnFZU
4qVP7dYinnAm7ZsEpDjbRUuoNjOShbK16X9szUAJS2KkyIhV5Sza4WJGOnMDVbLR
Aco9N1K4aUk9Gt9xABEBAAGJAjwEGAEIACYCGwwWIQSulu2WnkebAITz4X/ojTM0
+l9qCgUCZdiwoAUJEXU4yAAKCRDojTM0+l9qCj2PD/9pbIPRMZtvKIIE+OhOAl/s
qfZJXByAM40ELpUhDHqwbOplIEyvXtWfQ5c+kWlG/LPJ2CgLkHyFQDn6tuat82rH
/5VoZyxp16CBAwEgYdycOr9hMGSVKNIJDfV9Bu6VtZnn6fa/swBzGE7eVpXsIoNr
jeqsogBtzLecG1oHMXRMq7oUqu9c6VNoCx2uxRUOeWW8YuP7h9j6mxIuKKbcpmQ5
RSLNEhJZJsMMFLf8RAQPXmshG1ZixY2ZliNe/TTm6eEfFCw0KcQxoX9LmurLWE9w
dIKgn1/nQ04GFnmtcq3hVxY/m9BvzY1jmZXNd4TdpfrPXhi0W/GDn53ERFPJmw5L
F8ogxzD/ekxzyd9nCCgtzkamtBKDJk35x/MoVWMLjD5k6P+yW7YY4xMQliSJHKss
leLnaPpgDBi4KPtLxPswgFqObcy4TNse07rFO4AyHf11FBwMTEfuODCOMnQTpi3z
Zx6KxvS3BEY36abjvwrqsmt8dJ/+/QXT0e82fo2kJ65sXIszez3e0VUZ8KrMp+wd
X0GWYWAfqXws6HrQFYfIpEE0Vz9gXDxEOTFZ2FoVIvIHyRfyDrAIz3wZLmnLGk1h
l3CDjHF0Wigv0CacIQ1V1aYp3NhIVwAvShQ+qS5nFgik6UZnjjWibobOm3yQDzll
6F7hEeTW+gnXEI2gPjfb5w==
=b5eA
mQINBFgl3tgBEAC8A1tUBkD9YV+eLrOmtgy+/JS/H9RoZvkg3K1WZ8IYfj6iIRaYneAk3Bp1
82GUPVz/zhKr2g0tMXIScDR3EnaDsY+Qg+JqQl8NOG+Cikr1nnkG2on9L8c8yiqry1ZTCmYM
qCa2acTFqnyuXJ482aZNtB4QG2BpzfhW4k8YThpegk/EoRUim+y7buJDtoNf7YILlhDQXN8q
lHB02DWOVUihph9tUIFsPK6BvTr9SIr/eG6j6k0bfUo9pexOn7LS4SojoJmsm/5dp6AoKlac
48cZU5zwR9AYcq/nvkrfmf2WkObg/xRdEvKZzn05jRopmAIwmoC3CiLmqCHPmT5a29vEob/y
PFE335k+ujjZCPOu7OwjzDk7M0zMSfnNfDq8bXh16nn+ueBxJ0NzgD1oC6c2PhM+XRQCXCho
yI8vbfp4dGvCvYqvQAE1bWjqnumZ/7vUPgZN6gDfiAzG2mUxC2SeFBhacgzDvtQls+uuvm+F
nQOUgg2Hh8x2zgoZ7kqV29wjaUPFREuew7e+Th5BxielnzOfVycVXeSuvvIn6cd3g/s8mX1c
2kLSXJR7+KdWDrIrR5Az0kwAqFZt6B6QTlDrPswu3mxsm5TzMbny0PsbL/HBM+GZEZCjMXxB
8bqV2eSaktjnSlUNX1VXxyOxXA+ZG2jwpr51egi57riVRXokrQARAQABtDRFdGhlcmV1bSBG
b3VuZGF0aW9uIEJ1ZyBCb3VudHkgPGJvdW50eUBldGhlcmV1bS5vcmc+iQIcBBEBCAAGBQJa
FCY6AAoJEHoMA3Q0/nfveH8P+gJBPo9BXZL8isUfbUWjwLi81Yi70hZqIJUnz64SWTqBzg5b
mCZ69Ji5637THsxQetS2ARabz0DybQ779FhD/IWnqV9T3KuBM/9RzJtuhLzKCyMrAINPMo28
rKWdunHHarpuR4m3tL2zWJkle5QVYb+vkZXJJE98PJw+N4IYeKKeCs2ubeqZu636GA0sMzzB
Jn3m/dRRA2va+/zzbr6F6b51ynzbMxWKTsJnstjC8gs8EeI+Zcd6otSyelLtCUkk3h5sTvpV
Wv67BNSU0BYsMkxyFi9PUyy07Wixgeas89K5jG1oOtDva/FkpRHrTE/WA5OXDRcLrHJM+SwD
CwqcLQqJd09NxwUW1iKeBmPptTiOGu1Gv2o7aEyoaWrHRBO7JuYrQrj6q2B3H1Je0zjAd2qt
09ni2bLwLn4LA+VDpprNTO+eZDprv09s2oFSU6NwziHybovu0y7X4pADGkK2evOM7c86PohX
QRQ1M1T16xLj6wP8/Ykwl6v/LUk7iDPXP3GPILnh4YOkwBR3DsCOPn8098xy7FxEELmupRzt
Cj9oC7YAoweeShgUjBPzb+nGY1m6OcFfbUPBgFyMMfwF6joHbiVIO+39+Ut2g2ysZa7KF+yp
XqVDqyEkYXsOLb25OC7brt8IJEPgBPwcHK5GNag6RfLxnQV+iVZ9KNH1yQgSiQI+BBMBAgAo
AhsDBgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgAUCWglh+gUJBaNgWAAKCRDojTM0+l9qCgQ2
D/4udJpV4zGIZW1yNaVvtd3vfKsTLi7GIRJLUBqVb2Yx/uhnN8jTl/tAhCVosCQ1pzvi9kMl
s8qO1vu2kw5EWFFkwK96roI8pTql3VIjwhRVQrCkR7oAk/eUd1U/nt2q6J4UTYeVgqbq4dsI
ZZTRyPJMD667YpuAIcaah+w9j/E5xksYQdMeprnDrQkkBCb4FIMqfDzBPKvEa8DcQr949K85
kxhr6LDq9i5l4Egxt2JdH8DaR4GLca6+oHy0MyPs/bZOsfmZUObfM2oZgPpqYM96JanhzO1j
dpnItyBii2pc+kNx5nMOf4eikE/MBv+WUJ0TttWzApGGmFUzDhtuEvRH9NBjtJ/pMrYspIGu
O/QNY5KKOKQTvVIlwGcm8dTsSkqtBDSUwZyWbfKfKOI1/RhM9dC3gj5/BOY57DYYV4rdTK01
ZtYjuhdfs2bhuP1uF/cgnSSZlv8azvf7Egh7tHPnYxvLjfq1bJAhCIX0hNg0a81/ndPAEFky
fSko+JPKvdSvsUcSi2QQ4U2HX//jNBjXRfG4F0utgbJnhXzEckz6gqt7wSDZH2oddVuO8Ssc
T7sK+CdXthSKnRyuI+sGUpG+6glpKWIfYkWFKNZWuQ+YUatY3QEDHXTIioycSmV8p4d/g/0S
V6TegidLxY8bXMkbqz+3n6FArRffv5MH7qt3cYkCPgQTAQIAKAUCWCXhOwIbAwUJAeEzgAYL
CQgHAwIGFQgCCQoLBBYCAwECHgECF4AACgkQ6I0zNPpfagrN/w/+Igp3vtYdNunikw3yHnYf
Jkm0MmaMDUM9mtsaXVN6xb9n25N3Xa3GWCpmdsbYZ8334tI/oQ4/NHq/bEI5WFH5F1aFkMkm
5AJVLuUkipCtmCZ5NkbRPJA9l0uNUUE6uuFXBhf4ddu7jb0jMetRF/kifJHVCCo5fISUNhLp
7bwcWq9qgDQNZNYMOo4s9WX5Tl+5x4gTZdd2/cAYt49h/wnkw+huM+Jm0GojpLqIQ1jZiffm
otf5rF4L+JhIIdW0W4IIh1v9BhHVllXw+z9oj0PALstT5h8/DuKoIiirFJ4DejU85GR1KKAS
DeO19G/lSpWj1rSgFv2N2gAOxq0X+BbQTua2jdcY6JpHR4H1JJ2wzfHsHPgDQcgY1rGlmjVF
aqU73WV4/hzXc/HshK/k4Zd8uD4zypv6rFsZ3UemK0aL2zXLVpV8SPWQ61nS03x675SmDlYr
A80ENfdqvsn00JQuBVIv4Tv0Ub7NfDraDGJCst8rObjBT/0vnBWTBCebb2EsnS2iStIFkWdz
/WXs4L4Yzre1iJwqRjiuqahZR5jHsjAUf2a0O29HVHE7zlFtCFmLPClml2lGQfQOpm5klGZF
rmvus+qZ9rt35UgWHPZezykkwtWrFOwspwuCWaPDto6tgbRJZ4ftitpdYYM3dKW9IGJXBwrt
BQrMsu+lp0vDF+yJAlUEEwEIAD8CGwMGCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAFiEErpbt
lp5HmwCE8+F/6I0zNPpfagoFAmEAEJwFCQycmLgACgkQ6I0zNPpfagpWoBAAhOcbMAUw6Zt0
GYzT3sR5/c0iatezPzXEXJf9ebzR8M5uPElXcxcnMx1dvXZmGPXPJKCPa99WCu1NZYy8F+Wj
GTOY9tfIkvSxhys1p/giPAmvid6uQmD+bz7ivktnyzCkDWfMA+l8lsCSEqVlaq6y5T+a6SWB
6TzC2S0MPb/RrC/7DpwyrNYWumvyVJh09adm1Mw/UGgst/sZ8eMaRYEd3X0yyT1CBpX4zp2E
qQj9IEOTizvzv1x2jkHe5ZUeU3+nTBNlhSA+WFHUi0pfBdo2qog3Mv2EC1P2qMKoSdD5tPbA
zql1yKoHHnXOMsqdftGwbiv2sYXWvrYvmaCd3Ys/viOyt3HOy9uV2ZEtBd9Yqo9x/NZj8QMA
nY5k8jjrIXbUC89MqrJsQ6xxWQIg5ikMT7DvY0Ln89ev4oJyVvwIQAwCm4jUzFNm9bZLYDOP
5lGJCV7tF5NYVU7NxNM8vescKc40mVNK/pygS5mxhK9QYOUjZsIv8gddrl1TkqrFMuxFnTyN
WvzE29wFu/n4N1DkF+ZBqS70SlRvB+Hjz5LrDgEzF1Wf1eA/wq1dZbvMjjDVIc2VGlYp8Cp2
8ob23c1seTtYXTNYgSR5go4EpH+xi+bIWv01bQQ9xGwBbT5sm4WUeWOcmX4QewzLZ3T/wK9+
N4Ye/hmU9O34FwWJOY58EIe0OUV0aGVyZXVtIEZvdW5kYXRpb24gU2VjdXJpdHkgVGVhbSA8
c2VjdXJpdHlAZXRoZXJldW0ub3JnPokCHAQRAQgABgUCWhQmOgAKCRB6DAN0NP5372LSEACT
wZk1TASWZj5QF7rmkIM1GEyBxLE+PundNcMgM9Ktj1315ED8SmiukNI4knVS1MY99OIgXhQl
D1foF2GKdTomrwwC4012zTNyUYCY60LnPZ6Z511HG+rZgZtZrbkz0IiUpwAlhGQND77lBqem
J3K+CFX2XpDA/ojui/kqrY4cwMT5P8xPJkwgpRgw/jgdcZyJTsXdHblV9IGU4H1Vd1SgcfAf
Db3YxDUlBtzlp0NkZqxen8irLIXUQvsfuIfRUbUSkWoK/n3U/gOCajAe8ZNF07iX4OWjH4Sw
NDA841WhFWcGE+d8+pfMVfPASU3UPKH72uw86b2VgR46Av6voyMFd1pj+yCA+YAhJuOpV4yL
QaGg2Z0kVOjuNWK/kBzp1F58DWGh4YBatbhE/UyQOqAAtR7lNf0M3QF9AdrHTxX8oZeqVW3V
Fmi2mk0NwCIUv8SSrZr1dTchp04OtyXe5gZBXSfzncCSRQIUDC8OgNWaOzAaUmK299v4bvye
uSCxOysxC7Q1hZtjzFPKdljS81mRlYeUL4fHlJU9R57bg8mriSXLmn7eKrSEDm/EG5T8nRx7
TgX2MqJs8sWFxD2+bboVEu75yuFmZ//nmCBApAit9Hr2/sCshGIEpa9MQ6xJCYUxyqeJH+Cc
Aja0UfXhnK2uvPClpJLIl4RE3gm4OXeE1IkCPgQTAQIAKAIbAwYLCQgHAwIGFQgCCQoLBBYC
AwECHgECF4AFAloJYfoFCQWjYFgACgkQ6I0zNPpfagr4MQ//cfp3GSbSG8dkqgctW67Fy7cQ
diiTmx3cwxY+tlI3yrNmdjtrIQMzGdqtY6LNz7aN87F8mXNf+DyVHX9+wd1Y8U+E+hVCTzKC
sefUfxTz6unD9TTcGqaoelgIPMn4IiKz1RZE6eKpfDWe6q78W1Y6x1bE0qGNSjqT/QSxpezF
E/OAm/t8RRxVxDtqz8LfH2zLea5zaC+ADj8EqgY9vX9TQa4DyVV8MgOyECCCadJQCD5O5hIA
B2gVDWwrAUw+KBwskXZ7Iq4reJTKLEmt5z9zgtJ/fABwaCFt66ojwg0/RjbO9cNA3ZwHLGwU
C6hkb6bRzIoZoMfYxVS84opiqf/Teq+t/XkBYCxbSXTJDA5MKjcVuw3N6YKWbkGP/EfQThe7
BfAKFwwIw5YmsWjHK8IQj6R6hBxzTz9rz8y1Lu8EAAFfA7OJKaboI2qbOlauH98OuOUmVtr1
TczHO+pTcgWVN0ytq2/pX5KBf4vbmULNbg3HFRq+gHx8CW+jyXGkcqjbgU/5FwtDxeqRTdGJ
SyBGNBEU6pBNolyynyaKaaJjJ/biY27pvjymL5rlz95BH3Dn16Z4RRmqwlT6eq/wFYginujg
CCE1icqOSE+Vjl7V8tV8AcgANkXKdbBE+Q8wlKsGI/kS1w4XFAYcaNHFT8qNeS8TSFXFhvU8
HylYxO79t56JAj4EEwECACgFAlgl3tgCGwMFCQHhM4AGCwkIBwMCBhUIAgkKCwQWAgMBAh4B
AheAAAoJEOiNMzT6X2oKmUMP/0hnaL6bVyepAq2LIdvIUbHfagt/Oo/KVfZs4bkM+xJOitJR
0kwZV9PTihXFdzhL/YNWc2+LtEBtKItqkJZKmWC0E6OPXGVuU6hfFPebuzVccYJfm0Q3Ej19
VJI9Uomf59Bpak8HYyEED7WVQjoYn7XVPsonwus/9+LDX+c5vutbrUdbjga3KjHbewD93X4O
wVVoXyHEmU2Plyg8qvzFbNDylCWO7N2McO6SN6+7DitGZGr2+jO+P2R4RT1cnl2V3IRVcWZ0
OTspPSnRGVr2fFiHN/+v8G/wHPLQcJZFvYPfUGNdcYbTmhWdiY0bEYXFiNrgzCCsyad7eKUR
WN9QmxqmyqLDjUEDJCAh19ES6Vg3tqGwXk+uNUCoF30ga0TxQt6UXZJDEQFAGeASQ/RqE/q1
EAuLv8IGM8o7IqKO2pWfLuqsY6dTbKBwDzz9YOJt7EOGuPPQbHxaYStTushZmJnm7hi8lhVG
jT7qsEJdE95Il+I/mHWnXsCevaXjZugBiyV9yvOq4Hwwe2s1zKfrnQ4u0cadvGAh2eIqum7M
Y3o6nD47aJ3YmEPX/WnhI56bACa2GmWvUwjI4c0/er3esSPYnuHnM9L8Am4qQwMVSmyU80tC
MI7A9e13Mvv+RRkYFLJ7PVPdNpbW5jqX1doklFpKf6/XM+B+ngYneU+zgCUBiQJVBBMBCAA/
AhsDBgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgBYhBK6W7ZaeR5sAhPPhf+iNMzT6X2oKBQJh
ABCQBQkMnJi4AAoJEOiNMzT6X2oKAv0P+gJ3twBp5efNWyVLcIg4h4cOo9uD0NPvz8/fm2gX
FoOJL3MeigtPuSVfE9kuTaTuRbArzuFtdvH6G/kcRQvOlO4zyiIRHCk1gDHoIvvtn6RbRhVm
/Xo4uGIsFHst7n4A7BjicwEK5Op6Ih5Hoq19xz83YSBgBVk2fYEJIRyJiKFbyPjH0eSYe8v+
Ra5/F85ugLx1P6mMVkW+WPzULns89riW7BGTnZmXFHZp8nO2pkUlcI7F3KRG7l4kmlC50ox6
DiG/6AJCVulbAClky9C68TmJ/R1RazQxU/9IqVywsydq66tbJQbm5Z7GEti0C5jjbSRJL2oT
1xC7Rilr85PMREkPL3vegJdgj5PKlffZ/MocD/0EohiQ7wFpejFD4iTljeh0exRUwCRb6655
9ib34JSQgU8Hl4JJu+mEgd9v0ZHD0/1mMD6fnAR84zca+O3cdASbnQmzTOKcGzLIrkE8TEnU
+2UZ8Ol7SAAqmBgzY1gKOilUho6dkyCAwNL+QDpvrITDPLEFPsjyB/M2KudZSVEn+Rletju1
qkMW31qFMNlsbwzMZw+0USeGcs31Cs0B2/WQsro99CExlhS9auUFkmoVjJmYVTIYOM0zuPa4
OyGspqPhRu5hEsmMDPDWD7Aad5k4GTqogQNnuKyRliZjXXrDZqFD5nfsJSL8Ky/sJGEMuQIN
BFgl3tgBEACbgq6HTN5gEBi0lkD/MafInmNi+59U5gRGYqk46WlfRjhHudXjDpgD0lolGb4h
YontkMaKRlCg2Rvgjvk3Zve0PKWjKw7gr8YBa9fMFY8BhAXI32OdyI9rFhxEZFfWAfwKVmT1
9BdeAQRFvcfd+8w8f1XVc+zddULMJFBTr+xKDlIRWwTkdLPQeWbjo0eHl/g4tuLiLrTxVbnj
26bf+2+1DbM/w5VavzPrkviHqvKe/QP/gay4QDViWvFgLb90idfAHIdsPgflp0VDS5rVHFL6
D73rSRdIRo3I8c8mYoNjSR4XDuvgOkAKW9LR3pvouFHHjp6Fr0GesRbrbb2EG66iPsR99MQ7
FqIL9VMHPm2mtR+XvbnKkH2rYyEqaMbSdk29jGapkAWle4sIhSKk749A4tGkHl08KZ2N9o6G
rfUehP/V2eJLaph2DioFL1HxRryrKy80QQKLMJRekxigq8greW8xB4zuf9Mkuou+RHNmo8Pe
bHjFstLigiD6/zP2e+4tUmrT0/JTGOShoGMl8Rt0VRxdPImKun+4LOXbfOxArOSkY6i35+gs
gkkSy1gTJE0BY3S9auT6+YrglY/TWPQ9IJxWVOKlT+3WIp5wJu2bBKQ420VLqDYzkoWytel/
bM1ACUtipMiIVeUs2uFiRjpzA1Wy0QHKPTdSuGlJPRrfcQARAQABiQIlBBgBAgAPAhsMBQJa
CWIIBQkFo2BYAAoJEOiNMzT6X2oKgSwQAKKs7BGF8TyZeIEO2EUK7R2bdQDCdSGZY06tqLFg
3IHMGxDMb/7FVoa2AEsFgv6xpoebxBB5zkhUk7lslgxvKiSLYjxfNjTBltfiFJ+eQnf+OTs8
KeR51lLa66rvIH2qUzkNDCCTF45H4wIDpV05AXhBjKYkrDCrtey1rQyFp5fxI+0IQ1UKKXvz
ZK4GdxhxDbOUSd38MYy93nqcmclGSGK/gF8XiyuVjeifDCM6+T1NQTX0K9lneidcqtBDvlgg
JTLJtQPO33o5EHzXSiud+dKth1uUhZOFEaYRZoye1YE3yB0TNOOE8fXlvu8iuIAMBSDL9ep6
sEIaXYwoD60I2gHdWD0lkP0DOjGQpi4ouXM3Edsd5MTi0MDRNTij431kn8T/D0LCgmoUmYYM
BgbwFhXr67axPZlKjrqR0z3F/Elv0ZPPcVg1tNznsALYQ9Ovl6b5M3cJ5GapbbvNWC7yEE1q
Scl9HiMxjt/H6aPastH63/7wcN0TslW+zRBy05VNJvpWGStQXcngsSUeJtI1Gd992YNjUJq4
/Lih6Z1TlwcFVap+cTcDptoUvXYGg/9mRNNPZwErSfIJ0Ibnx9wPVuRN6NiCLOt2mtKp2F1p
M6AOQPpZ85vEh6I8i6OaO0w/Z0UHBwvpY6jDUliaROsWUQsqz78Z34CVj4cy6vPW2EF4iQIl
BBgBAgAPBQJYJd7YAhsMBQkB4TOAAAoJEOiNMzT6X2oKTjgP/1ojCVyGyvHMLUgnX0zwrR5Q
1M5RKFz6kHwKjODVLR3Isp8I935oTQt3DY7yFDI4t0GqbYRQMtxcNEb7maianhK2trCXfhPs
6/L04igjDf5iTcmzamXN6xnh5xkz06hZJJCMuu4MvKxC9MQHCVKAwjswl/9H9JqIBXAY3E2l
LpX5P+5jDZuPxS86p3+k4Rrdp9KTGXjiuEleM3zGlz5BLWydqovOck7C2aKh27ETFpDYY0z3
yQ5AsPJyk1rAr0wrH6+ywmwWlzuQewavnrLnJ2M8iMFXpIhyHeEIU/f7o8f+dQk72rZ9CGzd
cqig2za/BS3zawZWgbv2vB2elNsIllYLdir45jxBOxx2yvJvEuu4glz78y4oJTCTAYAbMlle
5gVdPkVcGyvvVS9tinnSaiIzuvWrYHKWll1uYPm2Q1CDs06P5I7bUGAXpgQLUh/XQguy/0sX
GWqW3FS5JzP+XgcR/7UASvwBdHylubKbeqEpB7G1s+m+8C67qOrc7EQv3Jmy1YDOkhEyNig1
rmjplLuir3tC1X+D7dHpn7NJe7nMwFx2b2MpMkLA9jPPAGPp/ekcu5sxCe+E0J/4UF++K+CR
XIxgtzU2UJfp8p9x+ygbx5qHinR0tVRdIzv3ZnGsXrfxnWfSOaB582cU3VRN9INzHHax8ETa
QVDnGO5uQa+FiQI8BBgBCAAmAhsMFiEErpbtlp5HmwCE8+F/6I0zNPpfagoFAmEAELYFCQyc
mN4ACgkQ6I0zNPpfagoqAQ/+MnDjBx8JWMd/XjeFoYKx/Oo0ntkInV+ME61JTBls4PdVk+TB
8PWZdPQHw9SnTvRmykFeznXIRzuxkowjrZYXdPXBxY2b1WyD5V3Ati1TM9vqpaR4osyPs2xy
I4dzDssh9YvUsIRL99O04/65lGiYeBNuACq+yK/7nD/ErzBkDYJHhMCdadbVWUACxvVIDvro
yQeVLKMsHqMCd8BTGD7VDs79NXskPnN77pAFnkzS4Z2b8SNzrlgTc5pUiuZHIXPIpEYmsYzh
ucTU6uI3dN1PbSFHK5tG2pHb4ZrPxY3L20Dgc2Tfu5/SDApZzwvvKTqjdO891MEJ++H+ssOz
i4O1UeWKs9owWttan9+PI47ozBSKOTxmMqLSQ0f56Np9FJsV0ilGxRKfjhzJ4KniOMUBA7mP
+m+TmXfVtthJred4sHlJMTJNpt+sCcT6wLMmyc3keIEAu33gsJj3LTpkEA2q+V+ZiP6Q8HRB
402ITklABSArrPSE/fQU9L8hZ5qmy0Z96z0iyILgVMLuRCCfQOMWhwl8yQWIIaf1yPI07xur
epy6lH7HmxjjOR7eo0DaSxQGQpThAtFGwkWkFh8yki8j3E42kkrxvEyyYZDXn2YcI3bpqhJx
PtwCMZUJ3kc/skOrs6bOI19iBNaEoNX5Dllm7UHjOgWNDQkcCuOCxucKano=
=arte
-----END PGP PUBLIC KEY BLOCK-----
```

@ -84,7 +84,7 @@ func (abi ABI) Pack(name string, args ...interface{}) ([]byte, error) {
func (abi ABI) getArguments(name string, data []byte) (Arguments, error) {
// since there can't be naming collisions with contracts and events,
// we need to decide whether we're calling a method, event or an error
// we need to decide whether we're calling a method or an event
var args Arguments
if method, ok := abi.Methods[name]; ok {
if len(data)%32 != 0 {
@ -95,11 +95,8 @@ func (abi ABI) getArguments(name string, data []byte) (Arguments, error) {
if event, ok := abi.Events[name]; ok {
args = event.Inputs
}
if err, ok := abi.Errors[name]; ok {
args = err.Inputs
}
if args == nil {
return nil, fmt.Errorf("abi: could not locate named method, event or error: %s", name)
return nil, fmt.Errorf("abi: could not locate named method or event: %s", name)
}
return args, nil
}

@ -29,7 +29,6 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/internal/testrand"
)
const jsondata = `
@ -318,38 +317,6 @@ func TestCustomErrors(t *testing.T) {
check("MyError", "MyError(uint256)")
}
func TestCustomErrorUnpackIntoInterface(t *testing.T) {
t.Parallel()
errorName := "MyError"
json := fmt.Sprintf(`[{"inputs":[{"internalType":"address","name":"sender","type":"address"},{"internalType":"uint256","name":"balance","type":"uint256"}],"name":"%s","type":"error"}]`, errorName)
abi, err := JSON(strings.NewReader(json))
if err != nil {
t.Fatal(err)
}
type MyError struct {
Sender common.Address
Balance *big.Int
}
sender := testrand.Address()
balance := new(big.Int).SetBytes(testrand.Bytes(8))
encoded, err := abi.Errors[errorName].Inputs.Pack(sender, balance)
if err != nil {
t.Fatal(err)
}
result := MyError{}
err = abi.UnpackIntoInterface(&result, errorName, encoded)
if err != nil {
t.Fatal(err)
}
if result.Sender != sender {
t.Errorf("expected %x got %x", sender, result.Sender)
}
if result.Balance.Cmp(balance) != 0 {
t.Errorf("expected %v got %v", balance, result.Balance)
}
}
func TestMultiPack(t *testing.T) {
t.Parallel()
abi, err := JSON(strings.NewReader(jsondata))
@ -1232,6 +1199,7 @@ func TestUnpackRevert(t *testing.T) {
{"4e487b7100000000000000000000000000000000000000000000000000000000000000ff", "unknown panic code: 0xff", nil},
}
for index, c := range cases {
index, c := index, c
t.Run(fmt.Sprintf("case %d", index), func(t *testing.T) {
t.Parallel()
got, err := UnpackRevert(common.Hex2Bytes(c.input))
@ -1250,10 +1218,3 @@ func TestUnpackRevert(t *testing.T) {
})
}
}
func TestInternalContractType(t *testing.T) {
jsonData := `[{"inputs":[{"components":[{"internalType":"uint256","name":"dailyLimit","type":"uint256"},{"internalType":"uint256","name":"txLimit","type":"uint256"},{"internalType":"uint256","name":"accountDailyLimit","type":"uint256"},{"internalType":"uint256","name":"minAmount","type":"uint256"},{"internalType":"bool","name":"onlyWhitelisted","type":"bool"}],"internalType":"struct IMessagePassingBridge.BridgeLimits","name":"bridgeLimits","type":"tuple"},{"components":[{"internalType":"uint256","name":"lastTransferReset","type":"uint256"},{"internalType":"uint256","name":"bridged24Hours","type":"uint256"}],"internalType":"struct IMessagePassingBridge.AccountLimit","name":"accountDailyLimit","type":"tuple"},{"components":[{"internalType":"uint256","name":"lastTransferReset","type":"uint256"},{"internalType":"uint256","name":"bridged24Hours","type":"uint256"}],"internalType":"struct IMessagePassingBridge.BridgeDailyLimit","name":"bridgeDailyLimit","type":"tuple"},{"internalType":"contract INameService","name":"nameService","type":"INameService"},{"internalType":"bool","name":"isClosed","type":"bool"},{"internalType":"address","name":"from","type":"address"},{"internalType":"uint256","name":"amount","type":"uint256"}],"name":"canBridge","outputs":[{"internalType":"bool","name":"isWithinLimit","type":"bool"},{"internalType":"string","name":"error","type":"string"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint256","name":"amount","type":"uint256"},{"internalType":"uint8","name":"decimals","type":"uint8"}],"name":"normalizeFrom18ToTokenDecimals","outputs":[{"internalType":"uint256","name":"normalized","type":"uint256"}],"stateMutability":"pure","type":"function"},{"inputs":[{"internalType":"uint256","name":"amount","type":"uint256"},{"internalType":"uint8","name":"decimals","type":"uint8"}],"name":"normalizeFromTokenTo18Decimals","outputs":[{"internalType":"uint256","name":"normalized","type":"uint256"}],"stateMutability":"pure","type":"function"}]`
if _, err := JSON(strings.NewReader(jsonData)); err != nil {
t.Fatal(err)
}
}

@ -252,7 +252,7 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string]
}
// Parse library references.
for pattern, name := range libs {
matched, err := regexp.MatchString("__\\$"+pattern+"\\$__", contracts[types[i]].InputBin)
matched, err := regexp.Match("__\\$"+pattern+"\\$__", []byte(contracts[types[i]].InputBin))
if err != nil {
log.Error("Could not search for pattern", "pattern", pattern, "contract", contracts[types[i]], "err", err)
}

@ -30,18 +30,12 @@ import (
// WaitMined waits for tx to be mined on the blockchain.
// It stops waiting when the context is canceled.
func WaitMined(ctx context.Context, b DeployBackend, tx *types.Transaction) (*types.Receipt, error) {
return WaitMinedHash(ctx, b, tx.Hash())
}
// WaitMinedHash waits for a transaction with the provided hash to be mined on the blockchain.
// It stops waiting when the context is canceled.
func WaitMinedHash(ctx context.Context, b DeployBackend, hash common.Hash) (*types.Receipt, error) {
queryTicker := time.NewTicker(time.Second)
defer queryTicker.Stop()
logger := log.New("hash", hash)
logger := log.New("hash", tx.Hash())
for {
receipt, err := b.TransactionReceipt(ctx, hash)
receipt, err := b.TransactionReceipt(ctx, tx.Hash())
if err == nil {
return receipt, nil
}
@ -67,13 +61,7 @@ func WaitDeployed(ctx context.Context, b DeployBackend, tx *types.Transaction) (
if tx.To() != nil {
return common.Address{}, errors.New("tx is not contract creation")
}
return WaitDeployedHash(ctx, b, tx.Hash())
}
// WaitDeployedHash waits for a contract deployment transaction with the provided hash and returns the on-chain
// contract address when it is mined. It stops waiting when ctx is canceled.
func WaitDeployedHash(ctx context.Context, b DeployBackend, hash common.Hash) (common.Address, error) {
receipt, err := WaitMinedHash(ctx, b, hash)
receipt, err := WaitMined(ctx, b, tx)
if err != nil {
return common.Address{}, err
}

@ -82,9 +82,7 @@ func TestWaitDeployed(t *testing.T) {
}()
// Send and mine the transaction.
if err := backend.Client().SendTransaction(ctx, tx); err != nil {
t.Errorf("test %q: failed to send transaction: %v", name, err)
}
backend.Client().SendTransaction(ctx, tx)
backend.Commit()
select {
@ -118,9 +116,7 @@ func TestWaitDeployedCornerCases(t *testing.T) {
tx, _ = types.SignTx(tx, types.LatestSigner(params.AllDevChainProtocolChanges), testKey)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
if err := backend.Client().SendTransaction(ctx, tx); err != nil {
t.Errorf("failed to send transaction: %q", err)
}
backend.Client().SendTransaction(ctx, tx)
backend.Commit()
notContractCreation := errors.New("tx is not contract creation")
if _, err := bind.WaitDeployed(ctx, backend.Client(), tx); err.Error() != notContractCreation.Error() {
@ -138,8 +134,6 @@ func TestWaitDeployedCornerCases(t *testing.T) {
}
}()
if err := backend.Client().SendTransaction(ctx, tx); err != nil {
t.Errorf("failed to send transaction: %q", err)
}
backend.Client().SendTransaction(ctx, tx)
cancel()
}

@ -331,6 +331,7 @@ func TestEventTupleUnpack(t *testing.T) {
for _, tc := range testCases {
assert := assert.New(t)
tc := tc
t.Run(tc.name, func(t *testing.T) {
err := unpackTestEventData(tc.dest, tc.data, tc.jsonLog, assert)
if tc.error == "" {

@ -34,6 +34,7 @@ import (
func TestPack(t *testing.T) {
t.Parallel()
for i, test := range packUnpackTests {
i, test := i, test
t.Run(strconv.Itoa(i), func(t *testing.T) {
t.Parallel()
encb, err := hex.DecodeString(test.packed)

@ -172,6 +172,7 @@ var reflectTests = []reflectTest{
func TestReflectNameToStruct(t *testing.T) {
t.Parallel()
for _, test := range reflectTests {
test := test
t.Run(test.name, func(t *testing.T) {
t.Parallel()
m, err := mapArgNamesToStructFields(test.args, reflect.ValueOf(test.struc))

@ -42,7 +42,7 @@ func MakeTopics(query ...[]interface{}) ([][]common.Hash, error) {
case common.Address:
copy(topic[common.HashLength-common.AddressLength:], rule[:])
case *big.Int:
copy(topic[:], math.U256Bytes(new(big.Int).Set(rule)))
copy(topic[:], math.U256Bytes(rule))
case bool:
if rule {
topic[common.HashLength-1] = 1

@ -137,6 +137,7 @@ func TestMakeTopics(t *testing.T) {
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
got, err := MakeTopics(tt.args.query...)
@ -149,23 +150,6 @@ func TestMakeTopics(t *testing.T) {
}
})
}
t.Run("does not mutate big.Int", func(t *testing.T) {
t.Parallel()
want := [][]common.Hash{{common.HexToHash("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")}}
in := big.NewInt(-1)
got, err := MakeTopics([]interface{}{in})
if err != nil {
t.Fatalf("makeTopics() error = %v", err)
}
if !reflect.DeepEqual(got, want) {
t.Fatalf("makeTopics() = %v, want %v", got, want)
}
if orig := big.NewInt(-1); in.Cmp(orig) != 0 {
t.Fatalf("makeTopics() mutated an input parameter from %v to %v", orig, in)
}
})
}
type args struct {
@ -389,6 +373,7 @@ func TestParseTopics(t *testing.T) {
tests := setupTopicsTests()
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
createObj := tt.args.createObj()
@ -408,6 +393,7 @@ func TestParseTopicsIntoMap(t *testing.T) {
tests := setupTopicsTests()
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
outMap := make(map[string]interface{})

@ -219,12 +219,7 @@ func NewType(t string, internalType string, components []ArgumentMarshaling) (ty
typ.T = FunctionTy
typ.Size = 24
default:
if strings.HasPrefix(internalType, "contract ") {
typ.Size = 20
typ.T = AddressTy
} else {
return Type{}, fmt.Errorf("unsupported arg type: %s", t)
}
return Type{}, fmt.Errorf("unsupported arg type: %s", t)
}
return

@ -389,6 +389,7 @@ func TestMethodMultiReturn(t *testing.T) {
"Can not unpack into a slice with wrong types",
}}
for _, tc := range testCases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
require := require.New(t)
err := abi.UnpackIntoInterface(tc.dest, "multi", data)
@ -946,7 +947,7 @@ func TestOOMMaliciousInput(t *testing.T) {
}
encb, err := hex.DecodeString(test.enc)
if err != nil {
t.Fatalf("invalid hex: %s", test.enc)
t.Fatalf("invalid hex: %s" + test.enc)
}
_, err = abi.Methods["method"].Outputs.UnpackValues(encb)
if err == nil {

@ -214,9 +214,7 @@ const (
// of starting any background processes such as automatic key derivation.
WalletOpened
// WalletDropped is fired when a wallet is removed or disconnected, either via USB
// or due to a filesystem event in the keystore. This event indicates that the wallet
// is no longer available for operations.
// WalletDropped
WalletDropped
)

@ -215,7 +215,7 @@ func (api *ExternalSigner) SignTx(account accounts.Account, tx *types.Transactio
switch tx.Type() {
case types.LegacyTxType, types.AccessListTxType:
args.GasPrice = (*hexutil.Big)(tx.GasPrice())
case types.DynamicFeeTxType, types.BlobTxType, types.SetCodeTxType:
case types.DynamicFeeTxType, types.BlobTxType:
args.MaxFeePerGas = (*hexutil.Big)(tx.GasFeeCap())
args.MaxPriorityFeePerGas = (*hexutil.Big)(tx.GasTipCap())
default:

@ -44,7 +44,8 @@ func byURL(a, b accounts.Account) int {
return a.URL.Cmp(b.URL)
}
// AmbiguousAddrError is returned when an address matches multiple files.
// AmbiguousAddrError is returned when attempting to unlock
// an address for which more than one file exists.
type AmbiguousAddrError struct {
Addr common.Address
Matches []accounts.Account

1
accounts/keystore/testdata/dupes/1 vendored Normal file

@ -0,0 +1 @@
{"address":"f466859ead1932d743d622cb74fc058882e8648a","crypto":{"cipher":"aes-128-ctr","ciphertext":"cb664472deacb41a2e995fa7f96fe29ce744471deb8d146a0e43c7898c9ddd4d","cipherparams":{"iv":"dfd9ee70812add5f4b8f89d0811c9158"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":8,"p":16,"r":8,"salt":"0d6769bf016d45c479213990d6a08d938469c4adad8a02ce507b4a4e7b7739f1"},"mac":"bac9af994b15a45dd39669fc66f9aa8a3b9dd8c22cb16e4d8d7ea089d0f1a1a9"},"id":"472e8b3d-afb6-45b5-8111-72c89895099a","version":3}

1
accounts/keystore/testdata/dupes/2 vendored Normal file

@ -0,0 +1 @@
{"address":"f466859ead1932d743d622cb74fc058882e8648a","crypto":{"cipher":"aes-128-ctr","ciphertext":"cb664472deacb41a2e995fa7f96fe29ce744471deb8d146a0e43c7898c9ddd4d","cipherparams":{"iv":"dfd9ee70812add5f4b8f89d0811c9158"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":8,"p":16,"r":8,"salt":"0d6769bf016d45c479213990d6a08d938469c4adad8a02ce507b4a4e7b7739f1"},"mac":"bac9af994b15a45dd39669fc66f9aa8a3b9dd8c22cb16e4d8d7ea089d0f1a1a9"},"id":"472e8b3d-afb6-45b5-8111-72c89895099a","version":3}

1
accounts/keystore/testdata/dupes/foo vendored Normal file

@ -0,0 +1 @@
{"address":"7ef5a6135f1fd6a02593eedc869c6d41d934aef8","crypto":{"cipher":"aes-128-ctr","ciphertext":"1d0839166e7a15b9c1333fc865d69858b22df26815ccf601b28219b6192974e1","cipherparams":{"iv":"8df6caa7ff1b00c4e871f002cb7921ed"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":8,"p":16,"r":8,"salt":"e5e6ef3f4ea695f496b643ebd3f75c0aa58ef4070e90c80c5d3fb0241bf1595c"},"mac":"6d16dfde774845e4585357f24bce530528bc69f4f84e1e22880d34fa45c273e5"},"id":"950077c7-71e3-4c44-a4a1-143919141ed4","version":3}

@ -29,9 +29,12 @@ import (
// the manager will buffer in its channel.
const managerSubBufferSize = 50
// Config is a legacy struct which is not used
// Config contains the settings of the global account manager.
//
// TODO(rjl493456442, karalabe, holiman): Get rid of this when account management
// is removed in favor of Clef.
type Config struct {
InsecureUnlockAllowed bool // Unused legacy-parameter
InsecureUnlockAllowed bool // Whether account unlocking in insecure environment is allowed
}
// newBackendEvent lets the manager know it should
@ -44,6 +47,7 @@ type newBackendEvent struct {
// Manager is an overarching account manager that can communicate with various
// backends for signing transactions.
type Manager struct {
config *Config // Global account manager configurations
backends map[reflect.Type][]Backend // Index of backends currently registered
updaters []event.Subscription // Wallet update subscriptions for all backends
updates chan WalletEvent // Subscription sink for backend wallet changes
@ -74,6 +78,7 @@ func NewManager(config *Config, backends ...Backend) *Manager {
}
// Assemble the account manager and return
am := &Manager{
config: config,
backends: make(map[reflect.Type][]Backend),
updaters: subs,
updates: updates,
@ -101,6 +106,11 @@ func (am *Manager) Close() error {
return <-errc
}
// Config returns the configuration of account manager.
func (am *Manager) Config() *Config {
return am.config
}
// AddBackend starts the tracking of an additional backend for wallet updates.
// cmd/geth assumes once this func returns the backends have been already integrated.
func (am *Manager) AddBackend(backend Backend) {

@ -338,22 +338,8 @@ func (w *ledgerDriver) ledgerSign(derivationPath []uint32, tx *types.Transaction
return common.Address{}, nil, err
}
} else {
if tx.Type() == types.DynamicFeeTxType {
if txrlp, err = rlp.EncodeToBytes([]interface{}{chainID, tx.Nonce(), tx.GasTipCap(), tx.GasFeeCap(), tx.Gas(), tx.To(), tx.Value(), tx.Data(), tx.AccessList()}); err != nil {
return common.Address{}, nil, err
}
// append type to transaction
txrlp = append([]byte{tx.Type()}, txrlp...)
} else if tx.Type() == types.AccessListTxType {
if txrlp, err = rlp.EncodeToBytes([]interface{}{chainID, tx.Nonce(), tx.GasPrice(), tx.Gas(), tx.To(), tx.Value(), tx.Data(), tx.AccessList()}); err != nil {
return common.Address{}, nil, err
}
// append type to transaction
txrlp = append([]byte{tx.Type()}, txrlp...)
} else if tx.Type() == types.LegacyTxType {
if txrlp, err = rlp.EncodeToBytes([]interface{}{tx.Nonce(), tx.GasPrice(), tx.Gas(), tx.To(), tx.Value(), tx.Data(), chainID, big.NewInt(0), big.NewInt(0)}); err != nil {
return common.Address{}, nil, err
}
if txrlp, err = rlp.EncodeToBytes([]interface{}{tx.Nonce(), tx.GasPrice(), tx.Gas(), tx.To(), tx.Value(), tx.Data(), chainID, big.NewInt(0), big.NewInt(0)}); err != nil {
return common.Address{}, nil, err
}
}
payload := append(path, txrlp...)
@ -367,9 +353,7 @@ func (w *ledgerDriver) ledgerSign(derivationPath []uint32, tx *types.Transaction
// Chunk size selection to mitigate an underlying RLP deserialization issue on the ledger app.
// https://github.com/LedgerHQ/app-ethereum/issues/409
chunk := 255
if tx.Type() == types.LegacyTxType {
for ; len(payload)%chunk <= ledgerEip155Size; chunk-- {
}
for ; len(payload)%chunk <= ledgerEip155Size; chunk-- {
}
for len(payload) > 0 {
@ -397,11 +381,8 @@ func (w *ledgerDriver) ledgerSign(derivationPath []uint32, tx *types.Transaction
if chainID == nil {
signer = new(types.HomesteadSigner)
} else {
signer = types.LatestSignerForChainID(chainID)
// For non-legacy transactions, V is 0 or 1, no need to subtract here.
if tx.Type() == types.LegacyTxType {
signature[64] -= byte(chainID.Uint64()*2 + 35)
}
signer = types.NewEIP155Signer(chainID)
signature[64] -= byte(chainID.Uint64()*2 + 35)
}
signed, err := tx.WithSignature(signer, signature)
if err != nil {

@ -24,9 +24,7 @@ for:
- image: Ubuntu
build_script:
- go run build/ci.go lint
- go run build/ci.go check_tidy
- go run build/ci.go check_generate
- go run build/ci.go check_baddeps
- go run build/ci.go generate -verify
- go run build/ci.go install -dlgo
test_script:
- go run build/ci.go test -dlgo -short

@ -70,10 +70,7 @@ func TestBlockSync(t *testing.T) {
t.Helper()
var expNumber, headNumber uint64
if expHead != nil {
p, err := expHead.ExecutionPayload()
if err != nil {
t.Fatalf("expHead.ExecutionPayload() failed: %v", err)
}
p, _ := expHead.ExecutionPayload()
expNumber = p.NumberU64()
}
select {

@ -17,22 +17,25 @@
package blsync
import (
"strings"
"github.com/ethereum/go-ethereum/beacon/light"
"github.com/ethereum/go-ethereum/beacon/light/api"
"github.com/ethereum/go-ethereum/beacon/light/request"
"github.com/ethereum/go-ethereum/beacon/light/sync"
"github.com/ethereum/go-ethereum/beacon/params"
"github.com/ethereum/go-ethereum/beacon/types"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/ethdb/memorydb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/rpc"
"github.com/urfave/cli/v2"
)
type Client struct {
urls []string
customHeader map[string]string
config *params.ClientConfig
chainConfig *lightClientConfig
scheduler *request.Scheduler
blockSync *beaconBlockSync
engineRPC *rpc.Client
@ -41,18 +44,34 @@ type Client struct {
engineClient *engineClient
}
func NewClient(config params.ClientConfig) *Client {
func NewClient(ctx *cli.Context) *Client {
if !ctx.IsSet(utils.BeaconApiFlag.Name) {
utils.Fatalf("Beacon node light client API URL not specified")
}
var (
chainConfig = makeChainConfig(ctx)
customHeader = make(map[string]string)
)
for _, s := range ctx.StringSlice(utils.BeaconApiHeaderFlag.Name) {
kv := strings.Split(s, ":")
if len(kv) != 2 {
utils.Fatalf("Invalid custom API header entry: %s", s)
}
customHeader[strings.TrimSpace(kv[0])] = strings.TrimSpace(kv[1])
}
// create data structures
var (
db = memorydb.New()
committeeChain = light.NewCommitteeChain(db, &config.ChainConfig, config.Threshold, !config.NoFilter)
headTracker = light.NewHeadTracker(committeeChain, config.Threshold)
threshold = ctx.Int(utils.BeaconThresholdFlag.Name)
committeeChain = light.NewCommitteeChain(db, chainConfig.ChainConfig, threshold, !ctx.Bool(utils.BeaconNoFilterFlag.Name))
headTracker = light.NewHeadTracker(committeeChain, threshold)
)
headSync := sync.NewHeadSync(headTracker, committeeChain)
// set up scheduler and sync modules
scheduler := request.NewScheduler()
checkpointInit := sync.NewCheckpointInit(committeeChain, config.Checkpoint)
checkpointInit := sync.NewCheckpointInit(committeeChain, chainConfig.Checkpoint)
forwardSync := sync.NewForwardUpdateSync(committeeChain)
beaconBlockSync := newBeaconBlockSync(headTracker)
scheduler.RegisterTarget(headTracker)
@ -64,9 +83,9 @@ func NewClient(config params.ClientConfig) *Client {
return &Client{
scheduler: scheduler,
urls: config.Apis,
customHeader: config.CustomHeader,
config: &config,
urls: ctx.StringSlice(utils.BeaconApiFlag.Name),
customHeader: customHeader,
chainConfig: &chainConfig,
blockSync: beaconBlockSync,
}
}
@ -78,7 +97,7 @@ func (c *Client) SetEngineRPC(engine *rpc.Client) {
func (c *Client) Start() error {
headCh := make(chan types.ChainHeadEvent, 16)
c.chainHeadSub = c.blockSync.SubscribeChainHead(headCh)
c.engineClient = startEngineClient(c.config, c.engineRPC, headCh)
c.engineClient = startEngineClient(c.chainConfig, c.engineRPC, headCh)
c.scheduler.Start()
for _, url := range c.urls {

129
beacon/blsync/config.go Normal file

@ -0,0 +1,129 @@
// Copyright 2022 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package blsync
import (
"github.com/ethereum/go-ethereum/beacon/types"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/urfave/cli/v2"
)
// lightClientConfig contains beacon light client configuration
type lightClientConfig struct {
*types.ChainConfig
Checkpoint common.Hash
}
var (
MainnetConfig = lightClientConfig{
ChainConfig: (&types.ChainConfig{
GenesisValidatorsRoot: common.HexToHash("0x4b363db94e286120d76eb905340fdd4e54bfe9f06bf33ff6cf5ad27f511bfe95"),
GenesisTime: 1606824023,
}).
AddFork("GENESIS", 0, []byte{0, 0, 0, 0}).
AddFork("ALTAIR", 74240, []byte{1, 0, 0, 0}).
AddFork("BELLATRIX", 144896, []byte{2, 0, 0, 0}).
AddFork("CAPELLA", 194048, []byte{3, 0, 0, 0}).
AddFork("DENEB", 269568, []byte{4, 0, 0, 0}),
Checkpoint: common.HexToHash("0x388be41594ec7d6a6894f18c73f3469f07e2c19a803de4755d335817ed8e2e5a"),
}
SepoliaConfig = lightClientConfig{
ChainConfig: (&types.ChainConfig{
GenesisValidatorsRoot: common.HexToHash("0xd8ea171f3c94aea21ebc42a1ed61052acf3f9209c00e4efbaaddac09ed9b8078"),
GenesisTime: 1655733600,
}).
AddFork("GENESIS", 0, []byte{144, 0, 0, 105}).
AddFork("ALTAIR", 50, []byte{144, 0, 0, 112}).
AddFork("BELLATRIX", 100, []byte{144, 0, 0, 113}).
AddFork("CAPELLA", 56832, []byte{144, 0, 0, 114}).
AddFork("DENEB", 132608, []byte{144, 0, 0, 115}),
Checkpoint: common.HexToHash("0x1005a6d9175e96bfbce4d35b80f468e9bff0b674e1e861d16e09e10005a58e81"),
}
GoerliConfig = lightClientConfig{
ChainConfig: (&types.ChainConfig{
GenesisValidatorsRoot: common.HexToHash("0x043db0d9a83813551ee2f33450d23797757d430911a9320530ad8a0eabc43efb"),
GenesisTime: 1614588812,
}).
AddFork("GENESIS", 0, []byte{0, 0, 16, 32}).
AddFork("ALTAIR", 36660, []byte{1, 0, 16, 32}).
AddFork("BELLATRIX", 112260, []byte{2, 0, 16, 32}).
AddFork("CAPELLA", 162304, []byte{3, 0, 16, 32}).
AddFork("DENEB", 231680, []byte{4, 0, 16, 32}),
Checkpoint: common.HexToHash("0x53a0f4f0a378e2c4ae0a9ee97407eb69d0d737d8d8cd0a5fb1093f42f7b81c49"),
}
)
func makeChainConfig(ctx *cli.Context) lightClientConfig {
var config lightClientConfig
customConfig := ctx.IsSet(utils.BeaconConfigFlag.Name)
utils.CheckExclusive(ctx, utils.MainnetFlag, utils.GoerliFlag, utils.SepoliaFlag, utils.BeaconConfigFlag)
switch {
case ctx.Bool(utils.MainnetFlag.Name):
config = MainnetConfig
case ctx.Bool(utils.SepoliaFlag.Name):
config = SepoliaConfig
case ctx.Bool(utils.GoerliFlag.Name):
config = GoerliConfig
default:
if !customConfig {
config = MainnetConfig
}
}
// Genesis root and time should always be specified together with custom chain config
if customConfig {
if !ctx.IsSet(utils.BeaconGenesisRootFlag.Name) {
utils.Fatalf("Custom beacon chain config is specified but genesis root is missing")
}
if !ctx.IsSet(utils.BeaconGenesisTimeFlag.Name) {
utils.Fatalf("Custom beacon chain config is specified but genesis time is missing")
}
if !ctx.IsSet(utils.BeaconCheckpointFlag.Name) {
utils.Fatalf("Custom beacon chain config is specified but checkpoint is missing")
}
config.ChainConfig = &types.ChainConfig{
GenesisTime: ctx.Uint64(utils.BeaconGenesisTimeFlag.Name),
}
if c, err := hexutil.Decode(ctx.String(utils.BeaconGenesisRootFlag.Name)); err == nil && len(c) <= 32 {
copy(config.GenesisValidatorsRoot[:len(c)], c)
} else {
utils.Fatalf("Invalid hex string", "beacon.genesis.gvroot", ctx.String(utils.BeaconGenesisRootFlag.Name), "error", err)
}
if err := config.ChainConfig.LoadForks(ctx.String(utils.BeaconConfigFlag.Name)); err != nil {
utils.Fatalf("Could not load beacon chain config file", "file name", ctx.String(utils.BeaconConfigFlag.Name), "error", err)
}
} else {
if ctx.IsSet(utils.BeaconGenesisRootFlag.Name) {
utils.Fatalf("Genesis root is specified but custom beacon chain config is missing")
}
if ctx.IsSet(utils.BeaconGenesisTimeFlag.Name) {
utils.Fatalf("Genesis time is specified but custom beacon chain config is missing")
}
}
// Checkpoint is required with custom chain config and is optional with pre-defined config
if ctx.IsSet(utils.BeaconCheckpointFlag.Name) {
if c, err := hexutil.Decode(ctx.String(utils.BeaconCheckpointFlag.Name)); err == nil && len(c) <= 32 {
copy(config.Checkpoint[:len(c)], c)
} else {
utils.Fatalf("Invalid hex string", "beacon.checkpoint", ctx.String(utils.BeaconCheckpointFlag.Name), "error", err)
}
}
return config
}

@ -23,7 +23,6 @@ import (
"time"
"github.com/ethereum/go-ethereum/beacon/engine"
"github.com/ethereum/go-ethereum/beacon/params"
"github.com/ethereum/go-ethereum/beacon/types"
"github.com/ethereum/go-ethereum/common"
ctypes "github.com/ethereum/go-ethereum/core/types"
@ -32,14 +31,14 @@ import (
)
type engineClient struct {
config *params.ClientConfig
config *lightClientConfig
rpc *rpc.Client
rootCtx context.Context
cancelRoot context.CancelFunc
wg sync.WaitGroup
}
func startEngineClient(config *params.ClientConfig, rpc *rpc.Client, headCh <-chan types.ChainHeadEvent) *engineClient {
func startEngineClient(config *lightClientConfig, rpc *rpc.Client, headCh <-chan types.ChainHeadEvent) *engineClient {
ctx, cancel := context.WithCancel(context.Background())
ec := &engineClient{
config: config,
@ -93,7 +92,7 @@ func (ec *engineClient) updateLoop(headCh <-chan types.ChainHeadEvent) {
}
func (ec *engineClient) callNewPayload(fork string, event types.ChainHeadEvent) (string, error) {
execData := engine.BlockToExecutableData(event.Block, nil, nil, nil).ExecutionPayload
execData := engine.BlockToExecutableData(event.Block, nil, nil).ExecutionPayload
var (
method string

@ -17,24 +17,23 @@ var _ = (*executableDataMarshaling)(nil)
// MarshalJSON marshals as JSON.
func (e ExecutableData) MarshalJSON() ([]byte, error) {
type ExecutableData struct {
ParentHash common.Hash `json:"parentHash" gencodec:"required"`
FeeRecipient common.Address `json:"feeRecipient" gencodec:"required"`
StateRoot common.Hash `json:"stateRoot" gencodec:"required"`
ReceiptsRoot common.Hash `json:"receiptsRoot" gencodec:"required"`
LogsBloom hexutil.Bytes `json:"logsBloom" gencodec:"required"`
Random common.Hash `json:"prevRandao" gencodec:"required"`
Number hexutil.Uint64 `json:"blockNumber" gencodec:"required"`
GasLimit hexutil.Uint64 `json:"gasLimit" gencodec:"required"`
GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
Timestamp hexutil.Uint64 `json:"timestamp" gencodec:"required"`
ExtraData hexutil.Bytes `json:"extraData" gencodec:"required"`
BaseFeePerGas *hexutil.Big `json:"baseFeePerGas" gencodec:"required"`
BlockHash common.Hash `json:"blockHash" gencodec:"required"`
Transactions []hexutil.Bytes `json:"transactions" gencodec:"required"`
Withdrawals []*types.Withdrawal `json:"withdrawals"`
BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed"`
ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas"`
ExecutionWitness *types.ExecutionWitness `json:"executionWitness,omitempty"`
ParentHash common.Hash `json:"parentHash" gencodec:"required"`
FeeRecipient common.Address `json:"feeRecipient" gencodec:"required"`
StateRoot common.Hash `json:"stateRoot" gencodec:"required"`
ReceiptsRoot common.Hash `json:"receiptsRoot" gencodec:"required"`
LogsBloom hexutil.Bytes `json:"logsBloom" gencodec:"required"`
Random common.Hash `json:"prevRandao" gencodec:"required"`
Number hexutil.Uint64 `json:"blockNumber" gencodec:"required"`
GasLimit hexutil.Uint64 `json:"gasLimit" gencodec:"required"`
GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
Timestamp hexutil.Uint64 `json:"timestamp" gencodec:"required"`
ExtraData hexutil.Bytes `json:"extraData" gencodec:"required"`
BaseFeePerGas *hexutil.Big `json:"baseFeePerGas" gencodec:"required"`
BlockHash common.Hash `json:"blockHash" gencodec:"required"`
Transactions []hexutil.Bytes `json:"transactions" gencodec:"required"`
Withdrawals []*types.Withdrawal `json:"withdrawals"`
BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed"`
ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas"`
}
var enc ExecutableData
enc.ParentHash = e.ParentHash
@ -59,31 +58,29 @@ func (e ExecutableData) MarshalJSON() ([]byte, error) {
enc.Withdrawals = e.Withdrawals
enc.BlobGasUsed = (*hexutil.Uint64)(e.BlobGasUsed)
enc.ExcessBlobGas = (*hexutil.Uint64)(e.ExcessBlobGas)
enc.ExecutionWitness = e.ExecutionWitness
return json.Marshal(&enc)
}
// UnmarshalJSON unmarshals from JSON.
func (e *ExecutableData) UnmarshalJSON(input []byte) error {
type ExecutableData struct {
ParentHash *common.Hash `json:"parentHash" gencodec:"required"`
FeeRecipient *common.Address `json:"feeRecipient" gencodec:"required"`
StateRoot *common.Hash `json:"stateRoot" gencodec:"required"`
ReceiptsRoot *common.Hash `json:"receiptsRoot" gencodec:"required"`
LogsBloom *hexutil.Bytes `json:"logsBloom" gencodec:"required"`
Random *common.Hash `json:"prevRandao" gencodec:"required"`
Number *hexutil.Uint64 `json:"blockNumber" gencodec:"required"`
GasLimit *hexutil.Uint64 `json:"gasLimit" gencodec:"required"`
GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
Timestamp *hexutil.Uint64 `json:"timestamp" gencodec:"required"`
ExtraData *hexutil.Bytes `json:"extraData" gencodec:"required"`
BaseFeePerGas *hexutil.Big `json:"baseFeePerGas" gencodec:"required"`
BlockHash *common.Hash `json:"blockHash" gencodec:"required"`
Transactions []hexutil.Bytes `json:"transactions" gencodec:"required"`
Withdrawals []*types.Withdrawal `json:"withdrawals"`
BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed"`
ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas"`
ExecutionWitness *types.ExecutionWitness `json:"executionWitness,omitempty"`
ParentHash *common.Hash `json:"parentHash" gencodec:"required"`
FeeRecipient *common.Address `json:"feeRecipient" gencodec:"required"`
StateRoot *common.Hash `json:"stateRoot" gencodec:"required"`
ReceiptsRoot *common.Hash `json:"receiptsRoot" gencodec:"required"`
LogsBloom *hexutil.Bytes `json:"logsBloom" gencodec:"required"`
Random *common.Hash `json:"prevRandao" gencodec:"required"`
Number *hexutil.Uint64 `json:"blockNumber" gencodec:"required"`
GasLimit *hexutil.Uint64 `json:"gasLimit" gencodec:"required"`
GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
Timestamp *hexutil.Uint64 `json:"timestamp" gencodec:"required"`
ExtraData *hexutil.Bytes `json:"extraData" gencodec:"required"`
BaseFeePerGas *hexutil.Big `json:"baseFeePerGas" gencodec:"required"`
BlockHash *common.Hash `json:"blockHash" gencodec:"required"`
Transactions []hexutil.Bytes `json:"transactions" gencodec:"required"`
Withdrawals []*types.Withdrawal `json:"withdrawals"`
BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed"`
ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas"`
}
var dec ExecutableData
if err := json.Unmarshal(input, &dec); err != nil {
@ -157,8 +154,5 @@ func (e *ExecutableData) UnmarshalJSON(input []byte) error {
if dec.ExcessBlobGas != nil {
e.ExcessBlobGas = (*uint64)(dec.ExcessBlobGas)
}
if dec.ExecutionWitness != nil {
e.ExecutionWitness = dec.ExecutionWitness
}
return nil
}

@ -18,22 +18,13 @@ func (e ExecutionPayloadEnvelope) MarshalJSON() ([]byte, error) {
ExecutionPayload *ExecutableData `json:"executionPayload" gencodec:"required"`
BlockValue *hexutil.Big `json:"blockValue" gencodec:"required"`
BlobsBundle *BlobsBundleV1 `json:"blobsBundle"`
Requests []hexutil.Bytes `json:"executionRequests"`
Override bool `json:"shouldOverrideBuilder"`
Witness *hexutil.Bytes `json:"witness,omitempty"`
}
var enc ExecutionPayloadEnvelope
enc.ExecutionPayload = e.ExecutionPayload
enc.BlockValue = (*hexutil.Big)(e.BlockValue)
enc.BlobsBundle = e.BlobsBundle
if e.Requests != nil {
enc.Requests = make([]hexutil.Bytes, len(e.Requests))
for k, v := range e.Requests {
enc.Requests[k] = v
}
}
enc.Override = e.Override
enc.Witness = e.Witness
return json.Marshal(&enc)
}
@ -43,9 +34,7 @@ func (e *ExecutionPayloadEnvelope) UnmarshalJSON(input []byte) error {
ExecutionPayload *ExecutableData `json:"executionPayload" gencodec:"required"`
BlockValue *hexutil.Big `json:"blockValue" gencodec:"required"`
BlobsBundle *BlobsBundleV1 `json:"blobsBundle"`
Requests []hexutil.Bytes `json:"executionRequests"`
Override *bool `json:"shouldOverrideBuilder"`
Witness *hexutil.Bytes `json:"witness,omitempty"`
}
var dec ExecutionPayloadEnvelope
if err := json.Unmarshal(input, &dec); err != nil {
@ -62,17 +51,8 @@ func (e *ExecutionPayloadEnvelope) UnmarshalJSON(input []byte) error {
if dec.BlobsBundle != nil {
e.BlobsBundle = dec.BlobsBundle
}
if dec.Requests != nil {
e.Requests = make([][]byte, len(dec.Requests))
for k, v := range dec.Requests {
e.Requests[k] = v
}
}
if dec.Override != nil {
e.Override = *dec.Override
}
if dec.Witness != nil {
e.Witness = dec.Witness
}
return nil
}

@ -59,24 +59,23 @@ type payloadAttributesMarshaling struct {
// ExecutableData is the data necessary to execute an EL payload.
type ExecutableData struct {
ParentHash common.Hash `json:"parentHash" gencodec:"required"`
FeeRecipient common.Address `json:"feeRecipient" gencodec:"required"`
StateRoot common.Hash `json:"stateRoot" gencodec:"required"`
ReceiptsRoot common.Hash `json:"receiptsRoot" gencodec:"required"`
LogsBloom []byte `json:"logsBloom" gencodec:"required"`
Random common.Hash `json:"prevRandao" gencodec:"required"`
Number uint64 `json:"blockNumber" gencodec:"required"`
GasLimit uint64 `json:"gasLimit" gencodec:"required"`
GasUsed uint64 `json:"gasUsed" gencodec:"required"`
Timestamp uint64 `json:"timestamp" gencodec:"required"`
ExtraData []byte `json:"extraData" gencodec:"required"`
BaseFeePerGas *big.Int `json:"baseFeePerGas" gencodec:"required"`
BlockHash common.Hash `json:"blockHash" gencodec:"required"`
Transactions [][]byte `json:"transactions" gencodec:"required"`
Withdrawals []*types.Withdrawal `json:"withdrawals"`
BlobGasUsed *uint64 `json:"blobGasUsed"`
ExcessBlobGas *uint64 `json:"excessBlobGas"`
ExecutionWitness *types.ExecutionWitness `json:"executionWitness,omitempty"`
ParentHash common.Hash `json:"parentHash" gencodec:"required"`
FeeRecipient common.Address `json:"feeRecipient" gencodec:"required"`
StateRoot common.Hash `json:"stateRoot" gencodec:"required"`
ReceiptsRoot common.Hash `json:"receiptsRoot" gencodec:"required"`
LogsBloom []byte `json:"logsBloom" gencodec:"required"`
Random common.Hash `json:"prevRandao" gencodec:"required"`
Number uint64 `json:"blockNumber" gencodec:"required"`
GasLimit uint64 `json:"gasLimit" gencodec:"required"`
GasUsed uint64 `json:"gasUsed" gencodec:"required"`
Timestamp uint64 `json:"timestamp" gencodec:"required"`
ExtraData []byte `json:"extraData" gencodec:"required"`
BaseFeePerGas *big.Int `json:"baseFeePerGas" gencodec:"required"`
BlockHash common.Hash `json:"blockHash" gencodec:"required"`
Transactions [][]byte `json:"transactions" gencodec:"required"`
Withdrawals []*types.Withdrawal `json:"withdrawals"`
BlobGasUsed *uint64 `json:"blobGasUsed"`
ExcessBlobGas *uint64 `json:"excessBlobGas"`
}
// JSON type overrides for executableData.
@ -93,23 +92,13 @@ type executableDataMarshaling struct {
ExcessBlobGas *hexutil.Uint64
}
// StatelessPayloadStatusV1 is the result of a stateless payload execution.
type StatelessPayloadStatusV1 struct {
Status string `json:"status"`
StateRoot common.Hash `json:"stateRoot"`
ReceiptsRoot common.Hash `json:"receiptsRoot"`
ValidationError *string `json:"validationError"`
}
//go:generate go run github.com/fjl/gencodec -type ExecutionPayloadEnvelope -field-override executionPayloadEnvelopeMarshaling -out gen_epe.go
type ExecutionPayloadEnvelope struct {
ExecutionPayload *ExecutableData `json:"executionPayload" gencodec:"required"`
BlockValue *big.Int `json:"blockValue" gencodec:"required"`
BlobsBundle *BlobsBundleV1 `json:"blobsBundle"`
Requests [][]byte `json:"executionRequests"`
Override bool `json:"shouldOverrideBuilder"`
Witness *hexutil.Bytes `json:"witness,omitempty"`
}
type BlobsBundleV1 struct {
@ -118,22 +107,15 @@ type BlobsBundleV1 struct {
Blobs []hexutil.Bytes `json:"blobs"`
}
type BlobAndProofV1 struct {
Blob hexutil.Bytes `json:"blob"`
Proof hexutil.Bytes `json:"proof"`
}
// JSON type overrides for ExecutionPayloadEnvelope.
type executionPayloadEnvelopeMarshaling struct {
BlockValue *hexutil.Big
Requests []hexutil.Bytes
}
type PayloadStatusV1 struct {
Status string `json:"status"`
Witness *hexutil.Bytes `json:"witness"`
LatestValidHash *common.Hash `json:"latestValidHash"`
ValidationError *string `json:"validationError"`
Status string `json:"status"`
LatestValidHash *common.Hash `json:"latestValidHash"`
ValidationError *string `json:"validationError"`
}
type TransitionConfigurationV1 struct {
@ -213,21 +195,7 @@ func decodeTransactions(enc [][]byte) ([]*types.Transaction, error) {
// and that the blockhash of the constructed block matches the parameters. Nil
// Withdrawals value will propagate through the returned block. Empty
// Withdrawals value must be passed via non-nil, length 0 value in data.
func ExecutableDataToBlock(data ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash, requests [][]byte) (*types.Block, error) {
block, err := ExecutableDataToBlockNoHash(data, versionedHashes, beaconRoot, requests)
if err != nil {
return nil, err
}
if block.Hash() != data.BlockHash {
return nil, fmt.Errorf("blockhash mismatch, want %x, got %x", data.BlockHash, block.Hash())
}
return block, nil
}
// ExecutableDataToBlockNoHash is analogous to ExecutableDataToBlock, but is used
// for stateless execution, so it skips checking if the executable data hashes to
// the requested hash (stateless has to *compute* the root hash, it's not given).
func ExecutableDataToBlockNoHash(data ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash, requests [][]byte) (*types.Block, error) {
func ExecutableDataToBlock(data ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash) (*types.Block, error) {
txs, err := decodeTransactions(data.Transactions)
if err != nil {
return nil, err
@ -262,13 +230,6 @@ func ExecutableDataToBlockNoHash(data ExecutableData, versionedHashes []common.H
h := types.DeriveSha(types.Withdrawals(data.Withdrawals), trie.NewStackTrie(nil))
withdrawalsRoot = &h
}
var requestsHash *common.Hash
if requests != nil {
h := types.CalcRequestsHash(requests)
requestsHash = &h
}
header := &types.Header{
ParentHash: data.ParentHash,
UncleHash: types.EmptyUncleHash,
@ -289,39 +250,36 @@ func ExecutableDataToBlockNoHash(data ExecutableData, versionedHashes []common.H
ExcessBlobGas: data.ExcessBlobGas,
BlobGasUsed: data.BlobGasUsed,
ParentBeaconRoot: beaconRoot,
RequestsHash: requestsHash,
}
return types.NewBlockWithHeader(header).
WithBody(types.Body{Transactions: txs, Uncles: nil, Withdrawals: data.Withdrawals}).
WithWitness(data.ExecutionWitness),
nil
block := types.NewBlockWithHeader(header).WithBody(types.Body{Transactions: txs, Uncles: nil, Withdrawals: data.Withdrawals})
if block.Hash() != data.BlockHash {
return nil, fmt.Errorf("blockhash mismatch, want %x, got %x", data.BlockHash, block.Hash())
}
return block, nil
}
// BlockToExecutableData constructs the ExecutableData structure by filling the
// fields from the given block. It assumes the given block is post-merge block.
func BlockToExecutableData(block *types.Block, fees *big.Int, sidecars []*types.BlobTxSidecar, requests [][]byte) *ExecutionPayloadEnvelope {
func BlockToExecutableData(block *types.Block, fees *big.Int, sidecars []*types.BlobTxSidecar) *ExecutionPayloadEnvelope {
data := &ExecutableData{
BlockHash: block.Hash(),
ParentHash: block.ParentHash(),
FeeRecipient: block.Coinbase(),
StateRoot: block.Root(),
Number: block.NumberU64(),
GasLimit: block.GasLimit(),
GasUsed: block.GasUsed(),
BaseFeePerGas: block.BaseFee(),
Timestamp: block.Time(),
ReceiptsRoot: block.ReceiptHash(),
LogsBloom: block.Bloom().Bytes(),
Transactions: encodeTransactions(block.Transactions()),
Random: block.MixDigest(),
ExtraData: block.Extra(),
Withdrawals: block.Withdrawals(),
BlobGasUsed: block.BlobGasUsed(),
ExcessBlobGas: block.ExcessBlobGas(),
ExecutionWitness: block.ExecutionWitness(),
BlockHash: block.Hash(),
ParentHash: block.ParentHash(),
FeeRecipient: block.Coinbase(),
StateRoot: block.Root(),
Number: block.NumberU64(),
GasLimit: block.GasLimit(),
GasUsed: block.GasUsed(),
BaseFeePerGas: block.BaseFee(),
Timestamp: block.Time(),
ReceiptsRoot: block.ReceiptHash(),
LogsBloom: block.Bloom().Bytes(),
Transactions: encodeTransactions(block.Transactions()),
Random: block.MixDigest(),
ExtraData: block.Extra(),
Withdrawals: block.Withdrawals(),
BlobGasUsed: block.BlobGasUsed(),
ExcessBlobGas: block.ExcessBlobGas(),
}
// Add blobs.
bundle := BlobsBundleV1{
Commitments: make([]hexutil.Bytes, 0),
Blobs: make([]hexutil.Bytes, 0),
@ -334,18 +292,11 @@ func BlockToExecutableData(block *types.Block, fees *big.Int, sidecars []*types.
bundle.Proofs = append(bundle.Proofs, hexutil.Bytes(sidecar.Proofs[j][:]))
}
}
return &ExecutionPayloadEnvelope{
ExecutionPayload: data,
BlockValue: fees,
BlobsBundle: &bundle,
Requests: requests,
Override: false,
}
return &ExecutionPayloadEnvelope{ExecutionPayload: data, BlockValue: fees, BlobsBundle: &bundle, Override: false}
}
// ExecutionPayloadBody is used in the response to GetPayloadBodiesByHash and GetPayloadBodiesByRange
type ExecutionPayloadBody struct {
// ExecutionPayloadBodyV1 is used in the response to GetPayloadBodiesByHashV1 and GetPayloadBodiesByRangeV1
type ExecutionPayloadBodyV1 struct {
TransactionData []hexutil.Bytes `json:"transactions"`
Withdrawals []*types.Withdrawal `json:"withdrawals"`
}

@ -23,8 +23,6 @@ import (
"fmt"
"io"
"net/http"
"net/url"
"strconv"
"sync"
"time"
@ -122,12 +120,8 @@ func NewBeaconLightApi(url string, customHeaders map[string]string) *BeaconLight
}
}
func (api *BeaconLightApi) httpGet(path string, params url.Values) ([]byte, error) {
uri, err := api.buildURL(path, params)
if err != nil {
return nil, err
}
req, err := http.NewRequest("GET", uri, nil)
func (api *BeaconLightApi) httpGet(path string) ([]byte, error) {
req, err := http.NewRequest("GET", api.url+path, nil)
if err != nil {
return nil, err
}
@ -151,16 +145,17 @@ func (api *BeaconLightApi) httpGet(path string, params url.Values) ([]byte, erro
}
}
func (api *BeaconLightApi) httpGetf(format string, params ...any) ([]byte, error) {
return api.httpGet(fmt.Sprintf(format, params...))
}
// GetBestUpdatesAndCommittees fetches and validates LightClientUpdate for given
// period and full serialized committee for the next period (committee root hash
// equals update.NextSyncCommitteeRoot).
// Note that the results are validated but the update signature should be verified
// by the caller as its validity depends on the update chain.
func (api *BeaconLightApi) GetBestUpdatesAndCommittees(firstPeriod, count uint64) ([]*types.LightClientUpdate, []*types.SerializedSyncCommittee, error) {
resp, err := api.httpGet("/eth/v1/beacon/light_client/updates", map[string][]string{
"start_period": {strconv.FormatUint(firstPeriod, 10)},
"count": {strconv.FormatUint(count, 10)},
})
resp, err := api.httpGetf("/eth/v1/beacon/light_client/updates?start_period=%d&count=%d", firstPeriod, count)
if err != nil {
return nil, nil, err
}
@ -197,7 +192,7 @@ func (api *BeaconLightApi) GetBestUpdatesAndCommittees(firstPeriod, count uint64
// See data structure definition here:
// https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/light-client/sync-protocol.md#lightclientoptimisticupdate
func (api *BeaconLightApi) GetOptimisticUpdate() (types.OptimisticUpdate, error) {
resp, err := api.httpGet("/eth/v1/beacon/light_client/optimistic_update", nil)
resp, err := api.httpGet("/eth/v1/beacon/light_client/optimistic_update")
if err != nil {
return types.OptimisticUpdate{}, err
}
@ -250,7 +245,7 @@ func decodeOptimisticUpdate(enc []byte) (types.OptimisticUpdate, error) {
// See data structure definition here:
// https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/light-client/sync-protocol.md#lightclientfinalityupdate
func (api *BeaconLightApi) GetFinalityUpdate() (types.FinalityUpdate, error) {
resp, err := api.httpGet("/eth/v1/beacon/light_client/finality_update", nil)
resp, err := api.httpGet("/eth/v1/beacon/light_client/finality_update")
if err != nil {
return types.FinalityUpdate{}, err
}
@ -316,7 +311,7 @@ func (api *BeaconLightApi) GetHeader(blockRoot common.Hash) (types.Header, bool,
} else {
blockId = blockRoot.Hex()
}
resp, err := api.httpGet(fmt.Sprintf("/eth/v1/beacon/headers/%s", blockId), nil)
resp, err := api.httpGetf("/eth/v1/beacon/headers/%s", blockId)
if err != nil {
return types.Header{}, false, false, err
}
@ -347,7 +342,7 @@ func (api *BeaconLightApi) GetHeader(blockRoot common.Hash) (types.Header, bool,
// GetCheckpointData fetches and validates bootstrap data belonging to the given checkpoint.
func (api *BeaconLightApi) GetCheckpointData(checkpointHash common.Hash) (*types.BootstrapData, error) {
resp, err := api.httpGet(fmt.Sprintf("/eth/v1/beacon/light_client/bootstrap/0x%x", checkpointHash[:]), nil)
resp, err := api.httpGetf("/eth/v1/beacon/light_client/bootstrap/0x%x", checkpointHash[:])
if err != nil {
return nil, err
}
@ -389,7 +384,7 @@ func (api *BeaconLightApi) GetCheckpointData(checkpointHash common.Hash) (*types
}
func (api *BeaconLightApi) GetBeaconBlock(blockRoot common.Hash) (*types.BeaconBlock, error) {
resp, err := api.httpGet(fmt.Sprintf("/eth/v2/beacon/blocks/0x%x", blockRoot), nil)
resp, err := api.httpGetf("/eth/v2/beacon/blocks/0x%x", blockRoot)
if err != nil {
return nil, err
}
@ -550,13 +545,9 @@ func (api *BeaconLightApi) StartHeadListener(listener HeadEventListener) func()
// established. It can only return nil when the context is canceled.
func (api *BeaconLightApi) startEventStream(ctx context.Context, listener *HeadEventListener) *eventsource.Stream {
for retry := true; retry; retry = ctxSleep(ctx, 5*time.Second) {
path := "/eth/v1/events?topics=head&topics=light_client_finality_update&topics=light_client_optimistic_update"
log.Trace("Sending event subscription request")
uri, err := api.buildURL("/eth/v1/events", map[string][]string{"topics": {"head", "light_client_finality_update", "light_client_optimistic_update"}})
if err != nil {
listener.OnError(fmt.Errorf("error creating event subscription URL: %v", err))
continue
}
req, err := http.NewRequestWithContext(ctx, "GET", uri, nil)
req, err := http.NewRequestWithContext(ctx, "GET", api.url+path, nil)
if err != nil {
listener.OnError(fmt.Errorf("error creating event subscription request: %v", err))
continue
@ -585,15 +576,3 @@ func ctxSleep(ctx context.Context, timeout time.Duration) (ok bool) {
return false
}
}
func (api *BeaconLightApi) buildURL(path string, params url.Values) (string, error) {
uri, err := url.Parse(api.url)
if err != nil {
return "", err
}
uri = uri.JoinPath(path)
if params != nil {
uri.RawQuery = params.Encode()
}
return uri.String(), nil
}

@ -76,32 +76,34 @@ type CommitteeChain struct {
unixNano func() int64 // system clock (simulated clock in tests)
sigVerifier committeeSigVerifier // BLS sig verifier (dummy verifier in tests)
config *params.ChainConfig
config *types.ChainConfig
signerThreshold int
minimumUpdateScore types.UpdateScore
enforceTime bool // enforceTime specifies whether the age of a signed header should be checked
}
// NewCommitteeChain creates a new CommitteeChain.
func NewCommitteeChain(db ethdb.KeyValueStore, config *params.ChainConfig, signerThreshold int, enforceTime bool) *CommitteeChain {
func NewCommitteeChain(db ethdb.KeyValueStore, config *types.ChainConfig, signerThreshold int, enforceTime bool) *CommitteeChain {
return newCommitteeChain(db, config, signerThreshold, enforceTime, blsVerifier{}, &mclock.System{}, func() int64 { return time.Now().UnixNano() })
}
// NewTestCommitteeChain creates a new CommitteeChain for testing.
func NewTestCommitteeChain(db ethdb.KeyValueStore, config *params.ChainConfig, signerThreshold int, enforceTime bool, clock *mclock.Simulated) *CommitteeChain {
func NewTestCommitteeChain(db ethdb.KeyValueStore, config *types.ChainConfig, signerThreshold int, enforceTime bool, clock *mclock.Simulated) *CommitteeChain {
return newCommitteeChain(db, config, signerThreshold, enforceTime, dummyVerifier{}, clock, func() int64 { return int64(clock.Now()) })
}
// newCommitteeChain creates a new CommitteeChain with the option of replacing the
// clock source and signature verification for testing purposes.
func newCommitteeChain(db ethdb.KeyValueStore, config *params.ChainConfig, signerThreshold int, enforceTime bool, sigVerifier committeeSigVerifier, clock mclock.Clock, unixNano func() int64) *CommitteeChain {
func newCommitteeChain(db ethdb.KeyValueStore, config *types.ChainConfig, signerThreshold int, enforceTime bool, sigVerifier committeeSigVerifier, clock mclock.Clock, unixNano func() int64) *CommitteeChain {
s := &CommitteeChain{
committeeCache: lru.NewCache[uint64, syncCommittee](10),
db: db,
sigVerifier: sigVerifier,
clock: clock,
unixNano: unixNano,
config: config,
enforceTime: enforceTime,
committeeCache: lru.NewCache[uint64, syncCommittee](10),
db: db,
sigVerifier: sigVerifier,
clock: clock,
unixNano: unixNano,
config: config,
signerThreshold: signerThreshold,
enforceTime: enforceTime,
minimumUpdateScore: types.UpdateScore{
SignerCount: uint32(signerThreshold),
SubPeriodIndex: params.SyncPeriodLength / 16,
@ -505,7 +507,7 @@ func (s *CommitteeChain) verifySignedHeader(head types.SignedHeader) (bool, time
if committee == nil {
return false, age, nil
}
if signingRoot, err := s.config.Forks.SigningRoot(head.Header.Epoch(), head.Header.Hash()); err == nil {
if signingRoot, err := s.config.Forks.SigningRoot(head.Header); err == nil {
return s.sigVerifier.verifySignature(committee, signingRoot, &head.Signature), age, nil
}
return false, age, nil

@ -31,15 +31,15 @@ var (
testGenesis = newTestGenesis()
testGenesis2 = newTestGenesis()
tfBase = newTestForks(testGenesis, params.Forks{
&params.Fork{Epoch: 0, Version: []byte{0}},
tfBase = newTestForks(testGenesis, types.Forks{
&types.Fork{Epoch: 0, Version: []byte{0}},
})
tfAlternative = newTestForks(testGenesis, params.Forks{
&params.Fork{Epoch: 0, Version: []byte{0}},
&params.Fork{Epoch: 0x700, Version: []byte{1}},
tfAlternative = newTestForks(testGenesis, types.Forks{
&types.Fork{Epoch: 0, Version: []byte{0}},
&types.Fork{Epoch: 0x700, Version: []byte{1}},
})
tfAnotherGenesis = newTestForks(testGenesis2, params.Forks{
&params.Fork{Epoch: 0, Version: []byte{0}},
tfAnotherGenesis = newTestForks(testGenesis2, types.Forks{
&types.Fork{Epoch: 0, Version: []byte{0}},
})
tcBase = newTestCommitteeChain(nil, tfBase, true, 0, 10, 400, false)
@ -226,13 +226,13 @@ type committeeChainTest struct {
t *testing.T
db *memorydb.Database
clock *mclock.Simulated
config params.ChainConfig
config types.ChainConfig
signerThreshold int
enforceTime bool
chain *CommitteeChain
}
func newCommitteeChainTest(t *testing.T, config params.ChainConfig, signerThreshold int, enforceTime bool) *committeeChainTest {
func newCommitteeChainTest(t *testing.T, config types.ChainConfig, signerThreshold int, enforceTime bool) *committeeChainTest {
c := &committeeChainTest{
t: t,
db: memorydb.New(),
@ -298,20 +298,20 @@ func (c *committeeChainTest) verifyRange(tc *testCommitteeChain, begin, end uint
c.verifySignedHeader(tc, float64(end)+1.5, false)
}
func newTestGenesis() params.ChainConfig {
var config params.ChainConfig
func newTestGenesis() types.ChainConfig {
var config types.ChainConfig
rand.Read(config.GenesisValidatorsRoot[:])
return config
}
func newTestForks(config params.ChainConfig, forks params.Forks) params.ChainConfig {
func newTestForks(config types.ChainConfig, forks types.Forks) types.ChainConfig {
for _, fork := range forks {
config.AddFork(fork.Name, fork.Epoch, fork.Version)
}
return config
}
func newTestCommitteeChain(parent *testCommitteeChain, config params.ChainConfig, newCommittees bool, begin, end int, signerCount int, finalizedHeader bool) *testCommitteeChain {
func newTestCommitteeChain(parent *testCommitteeChain, config types.ChainConfig, newCommittees bool, begin, end int, signerCount int, finalizedHeader bool) *testCommitteeChain {
tc := &testCommitteeChain{
config: config,
}
@ -337,7 +337,7 @@ type testPeriod struct {
type testCommitteeChain struct {
periods []testPeriod
config params.ChainConfig
config types.ChainConfig
}
func (tc *testCommitteeChain) fillCommittees(begin, end int) {

@ -69,13 +69,12 @@ func (h *HeadTracker) ValidatedFinality() (types.FinalityUpdate, bool) {
// slot or same slot and more signers) then ValidatedOptimistic is updated.
// The boolean return flag signals if ValidatedOptimistic has been changed.
func (h *HeadTracker) ValidateOptimistic(update types.OptimisticUpdate) (bool, error) {
if err := update.Validate(); err != nil {
return false, err
}
h.lock.Lock()
defer h.lock.Unlock()
if err := update.Validate(); err != nil {
return false, err
}
replace, err := h.validate(update.SignedHeader(), h.optimisticUpdate.SignedHeader())
if replace {
h.optimisticUpdate, h.hasOptimisticUpdate = update, true
@ -89,13 +88,12 @@ func (h *HeadTracker) ValidateOptimistic(update types.OptimisticUpdate) (bool, e
// slot or same slot and more signers) then ValidatedFinality is updated.
// The boolean return flag signals if ValidatedFinality has been changed.
func (h *HeadTracker) ValidateFinality(update types.FinalityUpdate) (bool, error) {
if err := update.Validate(); err != nil {
return false, err
}
h.lock.Lock()
defer h.lock.Unlock()
if err := update.Validate(); err != nil {
return false, err
}
replace, err := h.validate(update.SignedHeader(), h.finalityUpdate.SignedHeader())
if replace {
h.finalityUpdate, h.hasFinalityUpdate = update, true

@ -201,34 +201,6 @@ func TestUpdateSyncDifferentHeads(t *testing.T) {
chain.ExpNextSyncPeriod(t, 17)
}
func TestRangeLock(t *testing.T) {
r := make(rangeLock)
// Lock from 0 to 99.
r.lock(0, 100, 1)
for i := 0; i < 100; i++ {
if v, ok := r[uint64(i)]; v <= 0 || !ok {
t.Fatalf("integer space: %d not locked", i)
}
}
// Unlock from 0 to 99.
r.lock(0, 100, -1)
for i := 0; i < 100; i++ {
if v, ok := r[uint64(i)]; v > 0 || ok {
t.Fatalf("integer space: %d is locked", i)
}
}
// Lock from 0 to 99 then unlock from 10 to 59.
r.lock(0, 100, 1)
r.lock(10, 50, -1)
first, count := r.firstUnlocked(0, 100)
if first != 10 || count != 50 {
t.Fatalf("unexpected first: %d or count: %d", first, count)
}
}
func testRespUpdate(request requestWithID) request.Response {
var resp RespUpdates
if request.request == nil {

@ -33,7 +33,7 @@ func GenerateTestCommittee() *types.SerializedSyncCommittee {
return s
}
func GenerateTestUpdate(config *params.ChainConfig, period uint64, committee, nextCommittee *types.SerializedSyncCommittee, signerCount int, finalizedHeader bool) *types.LightClientUpdate {
func GenerateTestUpdate(config *types.ChainConfig, period uint64, committee, nextCommittee *types.SerializedSyncCommittee, signerCount int, finalizedHeader bool) *types.LightClientUpdate {
update := new(types.LightClientUpdate)
update.NextSyncCommitteeRoot = nextCommittee.Root()
var attestedHeader types.Header
@ -48,9 +48,9 @@ func GenerateTestUpdate(config *params.ChainConfig, period uint64, committee, ne
return update
}
func GenerateTestSignedHeader(header types.Header, config *params.ChainConfig, committee *types.SerializedSyncCommittee, signatureSlot uint64, signerCount int) types.SignedHeader {
func GenerateTestSignedHeader(header types.Header, config *types.ChainConfig, committee *types.SerializedSyncCommittee, signatureSlot uint64, signerCount int) types.SignedHeader {
bitmask := makeBitmask(signerCount)
signingRoot, _ := config.Forks.SigningRoot(header.Epoch(), header.Hash())
signingRoot, _ := config.Forks.SigningRoot(header)
c, _ := dummyVerifier{}.deserializeSyncCommittee(committee)
return types.SignedHeader{
Header: header,

@ -1,56 +0,0 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package params
import (
"github.com/ethereum/go-ethereum/common"
)
var (
MainnetLightConfig = (&ChainConfig{
GenesisValidatorsRoot: common.HexToHash("0x4b363db94e286120d76eb905340fdd4e54bfe9f06bf33ff6cf5ad27f511bfe95"),
GenesisTime: 1606824023,
Checkpoint: common.HexToHash("0x6509b691f4de4f7b083f2784938fd52f0e131675432b3fd85ea549af9aebd3d0"),
}).
AddFork("GENESIS", 0, []byte{0, 0, 0, 0}).
AddFork("ALTAIR", 74240, []byte{1, 0, 0, 0}).
AddFork("BELLATRIX", 144896, []byte{2, 0, 0, 0}).
AddFork("CAPELLA", 194048, []byte{3, 0, 0, 0}).
AddFork("DENEB", 269568, []byte{4, 0, 0, 0})
SepoliaLightConfig = (&ChainConfig{
GenesisValidatorsRoot: common.HexToHash("0xd8ea171f3c94aea21ebc42a1ed61052acf3f9209c00e4efbaaddac09ed9b8078"),
GenesisTime: 1655733600,
Checkpoint: common.HexToHash("0x456e85f5608afab3465a0580bff8572255f6d97af0c5f939e3f7536b5edb2d3f"),
}).
AddFork("GENESIS", 0, []byte{144, 0, 0, 105}).
AddFork("ALTAIR", 50, []byte{144, 0, 0, 112}).
AddFork("BELLATRIX", 100, []byte{144, 0, 0, 113}).
AddFork("CAPELLA", 56832, []byte{144, 0, 0, 114}).
AddFork("DENEB", 132608, []byte{144, 0, 0, 115})
HoleskyLightConfig = (&ChainConfig{
GenesisValidatorsRoot: common.HexToHash("0x9143aa7c615a7f7115e2b6aac319c03529df8242ae705fba9df39b79c59fa8b1"),
GenesisTime: 1695902400,
Checkpoint: common.HexToHash("0x6456a1317f54d4b4f2cb5bc9d153b5af0988fe767ef0609f0236cf29030bcff7"),
}).
AddFork("GENESIS", 0, []byte{1, 1, 112, 0}).
AddFork("ALTAIR", 0, []byte{2, 1, 112, 0}).
AddFork("BELLATRIX", 0, []byte{3, 1, 112, 0}).
AddFork("CAPELLA", 256, []byte{4, 1, 112, 0}).
AddFork("DENEB", 29696, []byte{5, 1, 112, 0})
)

@ -14,7 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package params
package types
import (
"crypto/sha256"
@ -39,13 +39,81 @@ const syncCommitteeDomain = 7
var knownForks = []string{"GENESIS", "ALTAIR", "BELLATRIX", "CAPELLA", "DENEB"}
// ClientConfig contains beacon light client configuration.
type ClientConfig struct {
ChainConfig
Apis []string
CustomHeader map[string]string
Threshold int
NoFilter bool
// Fork describes a single beacon chain fork and also stores the calculated
// signature domain used after this fork.
type Fork struct {
// Name of the fork in the chain config (config.yaml) file
Name string
// Epoch when given fork version is activated
Epoch uint64
// Fork version, see https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/beacon-chain.md#custom-types
Version []byte
// index in list of known forks or MaxInt if unknown
knownIndex int
// calculated by computeDomain, based on fork version and genesis validators root
domain merkle.Value
}
// computeDomain returns the signature domain based on the given fork version
// and genesis validator set root.
func (f *Fork) computeDomain(genesisValidatorsRoot common.Hash) {
var (
hasher = sha256.New()
forkVersion32 merkle.Value
forkDataRoot merkle.Value
)
copy(forkVersion32[:], f.Version)
hasher.Write(forkVersion32[:])
hasher.Write(genesisValidatorsRoot[:])
hasher.Sum(forkDataRoot[:0])
f.domain[0] = syncCommitteeDomain
copy(f.domain[4:], forkDataRoot[:28])
}
// Forks is the list of all beacon chain forks in the chain configuration.
type Forks []*Fork
// domain returns the signature domain for the given epoch (assumes that domains
// have already been calculated).
func (f Forks) domain(epoch uint64) (merkle.Value, error) {
for i := len(f) - 1; i >= 0; i-- {
if epoch >= f[i].Epoch {
return f[i].domain, nil
}
}
return merkle.Value{}, fmt.Errorf("unknown fork for epoch %d", epoch)
}
// SigningRoot calculates the signing root of the given header.
func (f Forks) SigningRoot(header Header) (common.Hash, error) {
domain, err := f.domain(header.Epoch())
if err != nil {
return common.Hash{}, err
}
var (
signingRoot common.Hash
headerHash = header.Hash()
hasher = sha256.New()
)
hasher.Write(headerHash[:])
hasher.Write(domain[:])
hasher.Sum(signingRoot[:0])
return signingRoot, nil
}
func (f Forks) Len() int { return len(f) }
func (f Forks) Swap(i, j int) { f[i], f[j] = f[j], f[i] }
func (f Forks) Less(i, j int) bool {
if f[i].Epoch != f[j].Epoch {
return f[i].Epoch < f[j].Epoch
}
return f[i].knownIndex < f[j].knownIndex
}
// ChainConfig contains the beacon chain configuration.
@ -53,7 +121,6 @@ type ChainConfig struct {
GenesisTime uint64 // Unix timestamp of slot 0
GenesisValidatorsRoot common.Hash // Root hash of the genesis validator set, used for signature domain calculation
Forks Forks
Checkpoint common.Hash
}
// ForkAtEpoch returns the latest active fork at the given epoch.
@ -135,79 +202,3 @@ func (c *ChainConfig) LoadForks(path string) error {
}
return nil
}
// Fork describes a single beacon chain fork and also stores the calculated
// signature domain used after this fork.
type Fork struct {
// Name of the fork in the chain config (config.yaml) file
Name string
// Epoch when given fork version is activated
Epoch uint64
// Fork version, see https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/beacon-chain.md#custom-types
Version []byte
// index in list of known forks or MaxInt if unknown
knownIndex int
// calculated by computeDomain, based on fork version and genesis validators root
domain merkle.Value
}
// computeDomain returns the signature domain based on the given fork version
// and genesis validator set root.
func (f *Fork) computeDomain(genesisValidatorsRoot common.Hash) {
var (
hasher = sha256.New()
forkVersion32 merkle.Value
forkDataRoot merkle.Value
)
copy(forkVersion32[:], f.Version)
hasher.Write(forkVersion32[:])
hasher.Write(genesisValidatorsRoot[:])
hasher.Sum(forkDataRoot[:0])
f.domain[0] = syncCommitteeDomain
copy(f.domain[4:], forkDataRoot[:28])
}
// Forks is the list of all beacon chain forks in the chain configuration.
type Forks []*Fork
// domain returns the signature domain for the given epoch (assumes that domains
// have already been calculated).
func (f Forks) domain(epoch uint64) (merkle.Value, error) {
for i := len(f) - 1; i >= 0; i-- {
if epoch >= f[i].Epoch {
return f[i].domain, nil
}
}
return merkle.Value{}, fmt.Errorf("unknown fork for epoch %d", epoch)
}
// SigningRoot calculates the signing root of the given header.
func (f Forks) SigningRoot(epoch uint64, root common.Hash) (common.Hash, error) {
domain, err := f.domain(epoch)
if err != nil {
return common.Hash{}, err
}
var (
signingRoot common.Hash
hasher = sha256.New()
)
hasher.Write(root[:])
hasher.Write(domain[:])
hasher.Sum(signingRoot[:0])
return signingRoot, nil
}
func (f Forks) Len() int { return len(f) }
func (f Forks) Swap(i, j int) { f[i], f[j] = f[j], f[i] }
func (f Forks) Less(i, j int) bool {
if f[i].Epoch != f[j].Epoch {
return f[i].Epoch < f[j].Epoch
}
return f[i].knownIndex < f[j].knownIndex
}

@ -5,88 +5,87 @@
# https://github.com/ethereum/execution-spec-tests/releases/download/v2.1.0/
ca89c76851b0900bfcc3cbb9a26cbece1f3d7c64a3bed38723e914713290df6c fixtures_develop.tar.gz
# version:golang 1.23.4
# version:golang 1.22.6
# https://go.dev/dl/
ad345ac421e90814293a9699cca19dd5238251c3f687980bbcae28495b263531 go1.23.4.src.tar.gz
459a09504f7ebf2cbcee6ac282c8f34f97651217b1feae64557dcdd392b9bb62 go1.23.4.aix-ppc64.tar.gz
0f4e569b2d38cb75cb2efcaf56beb42778ab5e46d89318fef39060fe36d7b9b7 go1.23.4.darwin-amd64.pkg
6700067389a53a1607d30aa8d6e01d198230397029faa0b109e89bc871ab5a0e go1.23.4.darwin-amd64.tar.gz
19c054eaf40c5fac65b027f7443c01382e493d3c8c42cf8b2504832ebddce037 go1.23.4.darwin-arm64.pkg
87d2bb0ad4fe24d2a0685a55df321e0efe4296419a9b3de03369dbe60b8acd3a go1.23.4.darwin-arm64.tar.gz
5e73dc89b44626677ec9d9aa4257d6d2ef1245502bc36a99385284910d0ade0a go1.23.4.dragonfly-amd64.tar.gz
8df26b1e71234756c1f0e82cfffba3f427c5a91a251844ada2c7694a6986c546 go1.23.4.freebsd-386.tar.gz
7de078d94d2af50ee9506ef7df85e4d12d4018b23e0b2cbcbc61d686f549b41a go1.23.4.freebsd-amd64.tar.gz
3f23e0a01cfe24e4160124cd7ab02bdd188264652074abdbce401c93f41e58a4 go1.23.4.freebsd-arm.tar.gz
986a20e7c94431f03b44b3c415abc698c7b4edc2ae8431f7ecae1c2429d4cfa2 go1.23.4.freebsd-arm64.tar.gz
25e39f005f977778ce963fc43089510fe7514f3cfc0358eab584de4ce9f181fb go1.23.4.freebsd-riscv64.tar.gz
7e1d52f93da68c3bab39e3d83f89944d7d151208e54fdc30b0eda2a3812661d7 go1.23.4.illumos-amd64.tar.gz
4a4a0e7587ef8c8a326439b957027f2791795e2d29d4ae3885b4091a48f843bc go1.23.4.linux-386.tar.gz
6924efde5de86fe277676e929dc9917d466efa02fb934197bc2eba35d5680971 go1.23.4.linux-amd64.tar.gz
16e5017863a7f6071363782b1b8042eb12c6ca4f4cd71528b2123f0a1275b13e go1.23.4.linux-arm64.tar.gz
1f1dda0dc7ce0b2295f57258ec5ef0803fd31b9ed0aa20e2e9222334e5755de1 go1.23.4.linux-armv6l.tar.gz
4f469179a335a1a7bb9f991ad0c567f3d3eeb9b412ecd192206ab5c3e1a52b5a go1.23.4.linux-loong64.tar.gz
86b68185bcc43ea07190e95137c3442f062acc7ae10c3f1cf900fbe23e07df24 go1.23.4.linux-mips.tar.gz
3a19245eec76533b3d01c90f3a40a38d63684028f0fd54d442dc9a9d03197891 go1.23.4.linux-mips64.tar.gz
b53a06fc8455f6a875329e8d2e24d39db298122c9cce6e948117022191f6c613 go1.23.4.linux-mips64le.tar.gz
66120a8105b8ba6559f4e6a13b1e39b433fb8032df9d1744e4486876fa1723ce go1.23.4.linux-mipsle.tar.gz
33be2bfb27f2821a65e9f6aba744c85ea7c5e233e16bac27bb3ec253bcd4e970 go1.23.4.linux-ppc64.tar.gz
65a303ef51e48ff77e004a6a5b4db6ce59495cd59c6af51b54bf4f786c01a1b9 go1.23.4.linux-ppc64le.tar.gz
7c40e9e0d722cef14ede765159ba297f4c6e3093bb106f10fbccf8564780049a go1.23.4.linux-riscv64.tar.gz
74aab82bf4eca7c26c830a5b0e2a31d193a4d5ba47045526b92473cc7188d7d7 go1.23.4.linux-s390x.tar.gz
dba009d8bf9928cb5a1e31fcbe0eb41335cce4fe63755d95cef6b5987df4ed5a go1.23.4.netbsd-386.tar.gz
54b081cc36355aa5ecb6db9544cf7e77366a7b08ce96cb98a45d043e393660c7 go1.23.4.netbsd-amd64.tar.gz
f05fec348c7c9f07e1ad4e436db4122e98de99ebcfbf6ac6176869785f334a02 go1.23.4.netbsd-arm.tar.gz
317878da2bface5a57a8eaf5c1fe2b40b1c82d8172a10453ad3eea36f6946bdb go1.23.4.netbsd-arm64.tar.gz
0d84350dfd72c505c6ad474e51676b04e95ffb748c614bd5bf8510026873059b go1.23.4.openbsd-386.tar.gz
cc62f5a14ea3d573d8edbce1833f70a8f99ca048a9db0fcc9e738fd48e950505 go1.23.4.openbsd-amd64.tar.gz
326aba6cf5bb9348fa3e41217abd2c84eac92608684e2fe8c5474fdab23a0db9 go1.23.4.openbsd-arm.tar.gz
51ea2a2588bf3da8e1476f3e2bd4d6724d74126e99f9c6ea9af4ebe389e64de6 go1.23.4.openbsd-arm64.tar.gz
44c5c82ab23e40225b2ba1e7d19150a5973ea58e93b4931e426e6e6f0d108872 go1.23.4.openbsd-ppc64.tar.gz
5fa31fc13d1e3c123a5e96ba38683fa2c947baed23ac9c7c341afcfe007c8993 go1.23.4.openbsd-riscv64.tar.gz
e5952fc93eeaa0094ef09a0e72a9f06f0621ce841a39f9637fb5b9062e77d67a go1.23.4.plan9-386.tar.gz
fb2a9ee3ae5a77e734862e257a9395b43e707ac45e060dfa84c5a40688e73170 go1.23.4.plan9-amd64.tar.gz
e1b95563b19fdebd6ea0d20c07641e69580976fa754e586c831ad7a3ae987140 go1.23.4.plan9-arm.tar.gz
088c282509fc9e1a8f29fc0dd16fe486854d05b8ceba08d077d17d11d6979a41 go1.23.4.solaris-amd64.tar.gz
e5865c1bfc3fee5d003819b2e2c800f598fe9994931bac63f573e8d05a10d91f go1.23.4.windows-386.msi
e544e0e356147ba998e267002bd0f2c4bf3370d495467a55baf2c63595a2026d go1.23.4.windows-386.zip
5f8cc5991eb8f4f96b6c611d839453cd11c9a2c3f23672a4188342c97ee159fa go1.23.4.windows-amd64.msi
16c59ac9196b63afb872ce9b47f945b9821a3e1542ec125f16f6085a1c0f3c39 go1.23.4.windows-amd64.zip
fc77c0531406d092c5356167e45c05a22d16bea84e3fa555e0f03af085c11763 go1.23.4.windows-arm.msi
1012cfd8ca7241c2beecb5c345dd61f01897c6f6baca80ea1bfed357035c868a go1.23.4.windows-arm.zip
8347c1aa4e1e67954d12830f88dbe44bd7ac0ec134bb472783dbfb5a3a8865d0 go1.23.4.windows-arm64.msi
db69cae5006753c785345c3215ad941f8b6224e2f81fec471c42d6857bee0e6f go1.23.4.windows-arm64.zip
9e48d99d519882579917d8189c17e98c373ce25abaebb98772e2927088992a51 go1.22.6.src.tar.gz
eeb0cc42120cbae6d3695dae2e5420fa0e93a5db957db139b55efdb879dd9856 go1.22.6.aix-ppc64.tar.gz
b47ac340f0b072943fed1f558a26eb260cc23bd21b8af175582e9103141d465b go1.22.6.darwin-amd64.pkg
9c3c0124b01b5365f73a1489649f78f971ecf84844ad9ca58fde133096ddb61b go1.22.6.darwin-amd64.tar.gz
14d0355ec1c0eeb213a16efa8635fac1f16067ef78a8173abf9a8c7b805e551e go1.22.6.darwin-arm64.pkg
ebac39fd44fc22feed1bb519af431c84c55776e39b30f4fd62930da9c0cfd1e3 go1.22.6.darwin-arm64.tar.gz
3695b10c722a4920c8a736284f8820c142e1e752f3a87f797a45c64366f7a173 go1.22.6.dragonfly-amd64.tar.gz
a9b9570c80294a664d50b566d6bd1aa42465997d2d76a57936b32f55f5c69c63 go1.22.6.freebsd-386.tar.gz
424a5618406800365fe3ad96a795fb55ce394bea3ff48eaf56d292bf7a916d1e go1.22.6.freebsd-amd64.tar.gz
e0dce3a6dbe8e7e054d329dd4cb403935c63c0f7e22e693077aa60e12018b883 go1.22.6.freebsd-arm.tar.gz
34930b01f58889c71f7a78c51c6c3bd2ce289ac7862c76dab691303cfa935fd1 go1.22.6.freebsd-arm64.tar.gz
4c9d630e55d4d600a5b4297e59620c3bdfe63a441981682b3638e2fdda228a44 go1.22.6.freebsd-riscv64.tar.gz
9ed63feaf2ef56c56f1cf0d9d3fab4006efd22a38e2f1f5252e95c6ac09332f3 go1.22.6.illumos-amd64.tar.gz
9e680027b058beab10ce5938607660964b6d2c564bf50bdb01aa090dc5beda98 go1.22.6.linux-386.tar.gz
999805bed7d9039ec3da1a53bfbcafc13e367da52aa823cb60b68ba22d44c616 go1.22.6.linux-amd64.tar.gz
c15fa895341b8eaf7f219fada25c36a610eb042985dc1a912410c1c90098eaf2 go1.22.6.linux-arm64.tar.gz
b566484fe89a54c525dd1a4cbfec903c1f6e8f0b7b3dbaf94c79bc9145391083 go1.22.6.linux-armv6l.tar.gz
1ee6e1896aea856142d2af7045cea118995b39404aa61afd12677d023d47ee69 go1.22.6.linux-loong64.tar.gz
fdd0e1a3e178f9bc79adf6ff1e3de4554ce581b4c468fd6e113c43fbbbe1eec6 go1.22.6.linux-mips.tar.gz
d3e5a621fc5a07759e503a971af0b28ded6a7d6f5604ab511f51f930a18dd3e4 go1.22.6.linux-mips64.tar.gz
01547606c5b5c1b0e5587b3afd65172860d2f4755e523785832905759ecce2d7 go1.22.6.linux-mips64le.tar.gz
2cd771416ae03c11240cfdb551d66ab9a941077664f3727b966f94386c23b0fa go1.22.6.linux-mipsle.tar.gz
6ef61d517777925e6bdb0321ea42d5f60acc20c1314dd902b9d0bfa3a5fd4fca go1.22.6.linux-ppc64.tar.gz
9d99fce3f6f72a76630fe91ec0884dfe3db828def4713368424900fa98bb2bd6 go1.22.6.linux-ppc64le.tar.gz
30be9c9b9cc4f044d4da9a33ee601ab7b3aff4246107d323a79e08888710754e go1.22.6.linux-riscv64.tar.gz
82f3bae3ddb4ede45b848db48c5486fadb58551e74507bda45484257e7194a95 go1.22.6.linux-s390x.tar.gz
85b2eb9d40a930bd3e75d0096a6eb5847aac86c5085e6d13a5845e9ef03f8d4b go1.22.6.netbsd-386.tar.gz
6e9acbdc34fb2a942d547c47c9c1989bb6e32b4a37d57fb312499e2bb33b46b7 go1.22.6.netbsd-amd64.tar.gz
e6eff3cf0038f2a9b0c9e01e228577a783bddcd8051222a3d949e24ee392e769 go1.22.6.netbsd-arm.tar.gz
43a7e2ba22da700b844f7561e3dd5434540ed6c9781be2e9c42e8a8cbf558f8e go1.22.6.netbsd-arm64.tar.gz
a90b758ccb45d8a17af8e140fafa1e97607de5a7ecd53a4c55f69258bfb043d0 go1.22.6.openbsd-386.tar.gz
cc13436c4a644e55bedcea65981eb80ca8317b39b129f5563ab3b6da1391bd47 go1.22.6.openbsd-amd64.tar.gz
aee34f61ba2b0a8f2618f5c7065e20da7714ce7651680509eda30728fe01ee88 go1.22.6.openbsd-arm.tar.gz
c67d57daf8baada93c69c8fb02401270cd33159730b1f2d70d9e724ba1a918cf go1.22.6.openbsd-arm64.tar.gz
03e1f96002e94a6b381bcf66a0a62b9d5f63148682a780d727840ad540185c7c go1.22.6.openbsd-ppc64.tar.gz
0ac2b5bbe2c8a293d284512630e629bf0578aaa7b7b1f39ac4ee182c7924aaad go1.22.6.plan9-386.tar.gz
f9afdab8a72a8d874f023f5605482cc94160843ac768dbd840e6f772d16578c7 go1.22.6.plan9-amd64.tar.gz
4b9f01a47e6a29d57cbb3097b6770583336cef9c8f0d51d3d1451e42a851002e go1.22.6.plan9-arm.tar.gz
46c2552ac7b8d6314a52e14e0a0761aaeebdd6aba5f531de386f4cf2b66ec723 go1.22.6.solaris-amd64.tar.gz
a57821dab76af1ef7a6b62db1628f0caa74343e0c7cb829df9ce8ea0713a3e8e go1.22.6.windows-386.msi
eb734bacc9aabca1273b61dd392bb84a9bb33783f5e2fff2cd6ab9885bbefbe6 go1.22.6.windows-386.zip
1238a3e6892eb8a0eb3fe0640e18ab82ca21cc1a933f16897b2ad081f057b5da go1.22.6.windows-amd64.msi
6023083a6e4d3199b44c37e9ba7b25d9674da20fd846a35ee5f9589d81c21a6a go1.22.6.windows-amd64.zip
6791218c568a3d000cb36317506541d7fd67e7cfe613baaf361ca36cad5e2cd5 go1.22.6.windows-arm.msi
ee41ca83bb07c4fd46a1d6b2d083519bb8ca156fcd9db37ee711234d43126e2f go1.22.6.windows-arm.zip
91c6b3376612095315a0aeae4b03e3da34fabe9dfd4532d023e2a70f913cf22a go1.22.6.windows-arm64.msi
7cf55f357ba8116cd3bff992980e20a704ba451b3dab341cf1787b133d900512 go1.22.6.windows-arm64.zip
# version:golangci 1.63.4
# version:golangci 1.59.0
# https://github.com/golangci/golangci-lint/releases/
# https://github.com/golangci/golangci-lint/releases/download/v1.63.4/
878d017cc360e4fb19510d39852c8189852e3c48e7ce0337577df73507c97d68 golangci-lint-1.63.4-darwin-amd64.tar.gz
a2b630c2ac8466393f0ccbbede4462387b6c190697a70bc2298c6d2123f21bbf golangci-lint-1.63.4-darwin-arm64.tar.gz
8938b74aa92888e561a1c5a4c175110b92f84e7d24733703e6d9ebc39e9cd5f8 golangci-lint-1.63.4-freebsd-386.tar.gz
054903339d620df2e760b978920100986e3b03bcb058f669d520a71dac9c34ed golangci-lint-1.63.4-freebsd-amd64.tar.gz
a19d499f961a02608348e8b626537a88edfaab6e1b6534f1eff742b5d6d750e4 golangci-lint-1.63.4-freebsd-armv6.tar.gz
00d616f0fb275b780ce4d26604bdd7fdbfe6bc9c63acd5a0b31498e1f7511108 golangci-lint-1.63.4-freebsd-armv7.tar.gz
d453688e0eabded3c1a97ff5a2777bb0df5a18851efdaaaf6b472e3e5713c33e golangci-lint-1.63.4-illumos-amd64.tar.gz
6b1bec847fc9f347d53712d05606a49d55d0e3b5c1bacadfed2393f3503de0e9 golangci-lint-1.63.4-linux-386.tar.gz
01abb14a4df47b5ca585eff3c34b105023cba92ec34ff17212dbb83855581690 golangci-lint-1.63.4-linux-amd64.tar.gz
51f0c79d19a92353e0465fb30a4901a0644a975d34e6f399ad2eebc0160bbb24 golangci-lint-1.63.4-linux-arm64.tar.gz
8d0a43f41e8424fbae10f7aa2dc29999f98112817c6dba63d7dc76832940a673 golangci-lint-1.63.4-linux-armv6.tar.gz
1045a047b31e9302c9160c7b0f199f4ac1bd02a1b221a2d9521bd3507f0cf671 golangci-lint-1.63.4-linux-armv7.tar.gz
933fe10ab50ce3bb0806e15a4ae69fe20f0549abf91dea0161236000ca706e67 golangci-lint-1.63.4-linux-loong64.tar.gz
45798630cbad5642862766051199fa862ef3c33d569cab12f01cac4f68e2ddd5 golangci-lint-1.63.4-linux-mips64.tar.gz
86ae25335ddb24975d2c915c1af0c7fad70dce99d0b4614fa4bee392de714aa2 golangci-lint-1.63.4-linux-mips64le.tar.gz
33dabd11aaba4b602938da98bcf49aabab55019557e0115cdc3dbcc3009768fa golangci-lint-1.63.4-linux-ppc64le.tar.gz
4e7a81230a663bcdf30bba5689ce96040abc76994dbc2003dce32c8dca8c06f3 golangci-lint-1.63.4-linux-riscv64.tar.gz
21370b49c7c47f4d9b8f982c952f940b01e65710174c3b4dad7b6452d58f92ec golangci-lint-1.63.4-linux-s390x.tar.gz
255866a6464c7e11bb7edd8e6e6ad54f11e1f01b82ba9ca229698ac788cd9724 golangci-lint-1.63.4-netbsd-386.tar.gz
2798c040ac658bda97224f204795199c81ac97bb207b21c02b664aaed380d5d2 golangci-lint-1.63.4-netbsd-amd64.tar.gz
b910eecffd0064103837e7e1abe870deb8ade22331e6dffe319f430d49399c8e golangci-lint-1.63.4-netbsd-arm64.tar.gz
df2693ef37147b457c3e2089614537dd2ae2e18e53641e756a5b404f4c72d3fa golangci-lint-1.63.4-netbsd-armv6.tar.gz
a28a533366974bd7834c4516cd6075bff3419a508d1ed7aa63ae8182768b352e golangci-lint-1.63.4-netbsd-armv7.tar.gz
368932775fb5c620b324dabf018155f3365f5e33c5af5b26e9321db373f96eea golangci-lint-1.63.4-windows-386.zip
184d13c2b8f5441576bec2a0d8ba7b2d45445595cf796b879a73bcc98c39f8c1 golangci-lint-1.63.4-windows-amd64.zip
4fabf175d5b05ef0858ded49527948eebac50e9093814979fd84555a75fb80a6 golangci-lint-1.63.4-windows-arm64.zip
e92be3f3ff30d4a849fb4b9a4c8d56837dee45269cb405a3ecad52fa034c781b golangci-lint-1.63.4-windows-armv6.zip
c71d348653b8f7fbb109bb10c1a481722bc6b0b2b6e731b897f99ac869f7653e golangci-lint-1.63.4-windows-armv7.zip
# https://github.com/golangci/golangci-lint/releases/download/v1.59.0/
418acf7e255ddc0783e97129c9b03d9311b77826a5311d425a01c708a86417e7 golangci-lint-1.59.0-darwin-amd64.tar.gz
5f6a1d95a6dd69f6e328eb56dd311a38e04cfab79a1305fbf4957f4e203f47b6 golangci-lint-1.59.0-darwin-arm64.tar.gz
8899bf589185d49f747f3e5db9f0bde8a47245a100c64a3dd4d65e8e92cfc4f2 golangci-lint-1.59.0-freebsd-386.tar.gz
658212f138d9df2ac89427e22115af34bf387c0871d70f2a25101718946a014f golangci-lint-1.59.0-freebsd-amd64.tar.gz
4c6395ea40f314d3b6fa17d8997baab93464d5d1deeaab513155e625473bd03a golangci-lint-1.59.0-freebsd-armv6.tar.gz
ff37da4fbaacdb6bbae70fdbdbb1ba932a859956f788c82822fa06bef5b7c6b3 golangci-lint-1.59.0-freebsd-armv7.tar.gz
439739469ed2bda182b1ec276d40c40e02f195537f78e3672996741ad223d6b6 golangci-lint-1.59.0-illumos-amd64.tar.gz
940801d46790e40d0a097d8fee34e2606f0ef148cd039654029b0b8750a15ed6 golangci-lint-1.59.0-linux-386.tar.gz
3b14a439f33c4fff83dbe0349950d984042b9a1feb6c62f82787b598fc3ab5f4 golangci-lint-1.59.0-linux-amd64.tar.gz
c57e6c0b0fa03089a2611dceddd5bc5d206716cccdff8b149da8baac598719a1 golangci-lint-1.59.0-linux-arm64.tar.gz
93149e2d3b25ac754df9a23172403d8aa6d021a7e0d9c090a12f51897f68c9a0 golangci-lint-1.59.0-linux-armv6.tar.gz
d10ac38239d9efee3ee87b55c96cdf3fa09e1a525babe3ffdaaf65ccc48cf3dc golangci-lint-1.59.0-linux-armv7.tar.gz
047338114b4f0d5f08f0fb9a397b03cc171916ed0960be7dfb355c2320cd5e9c golangci-lint-1.59.0-linux-loong64.tar.gz
5632df0f7f8fc03a80a266130faef0b5902d280cf60621f1b2bdc1aef6d97ee9 golangci-lint-1.59.0-linux-mips64.tar.gz
71dd638c82fa4439171e7126d2c7a32b5d103bfdef282cea40c83632cb3d1f4b golangci-lint-1.59.0-linux-mips64le.tar.gz
6cf9ea0d34e91669948483f9ae7f07da319a879344373a1981099fbd890cde00 golangci-lint-1.59.0-linux-ppc64le.tar.gz
af0205fa6fbab197cee613c359947711231739095d21b5c837086233b36ad971 golangci-lint-1.59.0-linux-riscv64.tar.gz
a9d2fb93f3c688ebccef94f5dc96c0b07c4d20bf6556cddebd8442159b0c80f6 golangci-lint-1.59.0-linux-s390x.tar.gz
68ab4c57a847b8ace9679887f2f8b2b6760e57ee29dcde8c3f40dd8bb2654fa2 golangci-lint-1.59.0-netbsd-386.tar.gz
d277b8b435c19406d00de4d509eadf5a024a5782878332e9a1b7c02bb76e87a7 golangci-lint-1.59.0-netbsd-amd64.tar.gz
83211656be8dcfa1545af4f92894409f412d1f37566798cb9460a526593ad62c golangci-lint-1.59.0-netbsd-arm64.tar.gz
6c6866d28bf79fa9817a0f7d2b050890ed109cae80bdb4dfa39536a7226da237 golangci-lint-1.59.0-netbsd-armv6.tar.gz
11587566363bd03ca586b7df9776ccaed569fcd1f3489930ac02f9375b307503 golangci-lint-1.59.0-netbsd-armv7.tar.gz
466181a8967bafa495e41494f93a0bec829c2cf715de874583b0460b3b8ae2b8 golangci-lint-1.59.0-windows-386.zip
3317d8a87a99a49a0a1321d295c010790e6dbf43ee96b318f4b8bb23eae7a565 golangci-lint-1.59.0-windows-amd64.zip
b3af955c7fceac8220a36fc799e1b3f19d3b247d32f422caac5f9845df8f7316 golangci-lint-1.59.0-windows-arm64.zip
6f083c7d0c764e5a0e5bde46ee3e91ae357d80c194190fe1d9754392e9064c7e golangci-lint-1.59.0-windows-armv6.zip
3709b4dd425deadab27748778d08e03c0f804d7748f7dd5b6bb488d98aa031c7 golangci-lint-1.59.0-windows-armv7.zip
# This is the builder on PPA that will build Go itself (inception-y), don't modify!
#

@ -24,14 +24,9 @@ Usage: go run build/ci.go <command> <command flags/arguments>
Available commands are:
lint -- runs certain pre-selected linters
check_tidy -- verifies that everything is 'go mod tidy'-ed
check_generate -- verifies that everything is 'go generate'-ed
check_baddeps -- verifies that certain dependencies are avoided
install [ -arch architecture ] [ -cc compiler ] [ packages... ] -- builds packages and executables
test [ -coverage ] [ packages... ] -- runs the tests
install [ -arch architecture ] [ -cc compiler ] [ packages... ] -- builds packages and executables
test [ -coverage ] [ packages... ] -- runs the tests
lint -- runs certain pre-selected linters
archive [ -arch architecture ] [ -type zip|tar ] [ -signer key-envvar ] [ -signify key-envvar ] [ -upload dest ] -- archives build artifacts
importkeys -- imports signing keys from env
debsrc [ -signer key-id ] [ -upload dest ] -- creates a debian source package
@ -44,23 +39,26 @@ package main
import (
"bytes"
"crypto/sha256"
"encoding/base64"
"flag"
"fmt"
"io"
"log"
"os"
"os/exec"
"path"
"path/filepath"
"runtime"
"slices"
"strconv"
"strings"
"time"
"github.com/cespare/cp"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto/signify"
"github.com/ethereum/go-ethereum/internal/build"
"github.com/ethereum/go-ethereum/internal/version"
"github.com/ethereum/go-ethereum/params"
)
var (
@ -74,6 +72,7 @@ var (
allToolsArchiveFiles = []string{
"COPYING",
executablePath("abigen"),
executablePath("bootnode"),
executablePath("evm"),
executablePath("geth"),
executablePath("rlpdump"),
@ -86,6 +85,10 @@ var (
BinaryName: "abigen",
Description: "Source code generator to convert Ethereum contract definitions into easy to use, compile-time type-safe Go packages.",
},
{
BinaryName: "bootnode",
Description: "Ethereum bootnode.",
},
{
BinaryName: "evm",
Description: "Developer utility version of the EVM (Ethereum Virtual Machine) that is capable of running bytecode snippets within a configurable environment and execution mode.",
@ -107,7 +110,7 @@ var (
// A debian package is created for all executables listed here.
debEthereum = debPackage{
Name: "ethereum",
Version: version.Semantic,
Version: params.Version,
Executables: debExecutables,
}
@ -118,12 +121,13 @@ var (
// Distros for which packages are created
debDistros = []string{
"xenial", // 16.04, EOL: 04/2026
"bionic", // 18.04, EOL: 04/2028
"focal", // 20.04, EOL: 04/2030
"jammy", // 22.04, EOL: 04/2032
"noble", // 24.04, EOL: 04/2034
"oracular", // 24.10, EOL: 07/2025
"xenial", // 16.04, EOL: 04/2026
"bionic", // 18.04, EOL: 04/2028
"focal", // 20.04, EOL: 04/2030
"jammy", // 22.04, EOL: 04/2032
"noble", // 24.04, EOL: 04/2034
"mantic", // 23.10, EOL: 07/2024
}
// This is where the tests should be unpacked.
@ -142,7 +146,7 @@ func executablePath(name string) string {
func main() {
log.SetFlags(log.Lshortfile)
if !build.FileExist(filepath.Join("build", "ci.go")) {
if !common.FileExist(filepath.Join("build", "ci.go")) {
log.Fatal("this script must be run from the root of the repository")
}
if len(os.Args) < 2 {
@ -155,16 +159,10 @@ func main() {
doTest(os.Args[2:])
case "lint":
doLint(os.Args[2:])
case "check_tidy":
doCheckTidy()
case "check_generate":
doCheckGenerate()
case "check_baddeps":
doCheckBadDeps()
case "archive":
doArchive(os.Args[2:])
case "dockerx":
doDockerBuildx(os.Args[2:])
case "docker":
doDocker(os.Args[2:])
case "debsrc":
doDebianSource(os.Args[2:])
case "nsis":
@ -173,6 +171,8 @@ func main() {
doPurge(os.Args[2:])
case "sanitycheck":
doSanityCheck()
case "generate":
doGenerate()
default:
log.Fatal("unknown command ", os.Args[1])
}
@ -207,6 +207,12 @@ func doInstall(cmdline []string) {
// Configure the build.
gobuild := tc.Go("build", buildFlags(env, *staticlink, buildTags)...)
// arm64 CI builders are memory-constrained and can't handle concurrent builds,
// better disable it. This check isn't the best, it should probably
// check for something in env instead.
if env.CI && runtime.GOARCH == "arm64" {
gobuild.Args = append(gobuild.Args, "-p", "1")
}
// We use -trimpath to avoid leaking local paths into the built executables.
gobuild.Args = append(gobuild.Args, "-trimpath")
@ -222,7 +228,8 @@ func doInstall(cmdline []string) {
// Do the build!
for _, pkg := range packages {
args := slices.Clone(gobuild.Args)
args := make([]string, len(gobuild.Args))
copy(args, gobuild.Args)
args = append(args, "-o", executablePath(path.Base(pkg)))
args = append(args, pkg)
build.MustRun(&exec.Cmd{Path: gobuild.Path, Args: args, Env: gobuild.Env})
@ -232,10 +239,6 @@ func doInstall(cmdline []string) {
// buildFlags returns the go tool flags for building.
func buildFlags(env build.Environment, staticLinking bool, buildTags []string) (flags []string) {
var ld []string
// See https://github.com/golang/go/issues/33772#issuecomment-528176001
// We need to set --buildid to the linker here, and also pass --build-id to the
// cgo-linker further down.
ld = append(ld, "--buildid=none")
if env.Commit != "" {
ld = append(ld, "-X", "github.com/ethereum/go-ethereum/internal/version.gitCommit="+env.Commit)
ld = append(ld, "-X", "github.com/ethereum/go-ethereum/internal/version.gitDate="+env.Date)
@ -248,11 +251,7 @@ func buildFlags(env build.Environment, staticLinking bool, buildTags []string) (
if runtime.GOOS == "linux" {
// Enforce the stacksize to 8M, which is the case on most platforms apart from
// alpine Linux.
// See https://sourceware.org/binutils/docs-2.23.1/ld/Options.html#Options
// regarding the options --build-id=none and --strip-all. It is needed for
// reproducible builds; removing references to temporary files in C-land, and
// making build-id reproducibly absent.
extld := []string{"-Wl,-z,stack-size=0x800000,--build-id=none,--strip-all"}
extld := []string{"-Wl,-z,stack-size=0x800000"}
if staticLinking {
extld = append(extld, "-static")
// Under static linking, use of certain glibc features must be
@ -299,7 +298,7 @@ func doTest(cmdline []string) {
gotest := tc.Go("test")
// CI needs a bit more time for the statetests (default 10m).
gotest.Args = append(gotest.Args, "-timeout=30m")
gotest.Args = append(gotest.Args, "-timeout=20m")
// Enable CKZG backend in CI.
gotest.Args = append(gotest.Args, "-tags=ckzg")
@ -350,93 +349,84 @@ func downloadSpecTestFixtures(csdb *build.ChecksumDB, cachedir string) string {
return filepath.Join(cachedir, base)
}
// doCheckTidy assets that the Go modules files are tidied already.
func doCheckTidy() {
targets := []string{"go.mod", "go.sum"}
hashes, err := build.HashFiles(targets)
// hashSourceFiles iterates all files under the top-level project directory
// computing the hash of each file (excluding files within the tests
// subrepo)
func hashSourceFiles() (map[string]common.Hash, error) {
res := make(map[string]common.Hash)
err := filepath.WalkDir(".", func(path string, d os.DirEntry, err error) error {
if strings.HasPrefix(path, filepath.FromSlash("tests/testdata")) {
return filepath.SkipDir
}
if !d.Type().IsRegular() {
return nil
}
// open the file and hash it
f, err := os.OpenFile(path, os.O_RDONLY, 0666)
if err != nil {
return err
}
hasher := sha256.New()
if _, err := io.Copy(hasher, f); err != nil {
return err
}
res[path] = common.Hash(hasher.Sum(nil))
return nil
})
if err != nil {
log.Fatalf("failed to hash go.mod/go.sum: %v", err)
return nil, err
}
build.MustRun(new(build.GoToolchain).Go("mod", "tidy"))
tidied, err := build.HashFiles(targets)
if err != nil {
log.Fatalf("failed to rehash go.mod/go.sum: %v", err)
}
if updates := build.DiffHashes(hashes, tidied); len(updates) > 0 {
log.Fatalf("files changed on running 'go mod tidy': %v", updates)
}
fmt.Println("No untidy module files detected.")
return res, nil
}
// doCheckGenerate ensures that re-generating generated files does not cause
// any mutations in the source file tree.
func doCheckGenerate() {
// doGenerate ensures that re-generating generated files does not cause
// any mutations in the source file tree: i.e. all generated files were
// updated and committed. Any stale generated files are updated.
func doGenerate() {
var (
tc = new(build.GoToolchain)
cachedir = flag.String("cachedir", "./build/cache", "directory for caching binaries.")
verify = flag.Bool("verify", false, "check whether any files are changed by go generate")
)
// Compute the origin hashes of all the files
var hashes map[string][32]byte
var err error
hashes, err = build.HashFolder(".", []string{"tests/testdata", "build/cache"})
if err != nil {
log.Fatal("Error computing hashes", "err", err)
protocPath := downloadProtoc(*cachedir)
protocGenGoPath := downloadProtocGenGo(*cachedir)
var preHashes map[string]common.Hash
if *verify {
var err error
preHashes, err = hashSourceFiles()
if err != nil {
log.Fatal("failed to compute map of source hashes", "err", err)
}
}
// Run any go generate steps we might be missing
var (
protocPath = downloadProtoc(*cachedir)
protocGenGoPath = downloadProtocGenGo(*cachedir)
)
c := new(build.GoToolchain).Go("generate", "./...")
c := tc.Go("generate", "./...")
pathList := []string{filepath.Join(protocPath, "bin"), protocGenGoPath, os.Getenv("PATH")}
c.Env = append(c.Env, "PATH="+strings.Join(pathList, string(os.PathListSeparator)))
build.MustRun(c)
// Check if generate file hashes have changed
generated, err := build.HashFolder(".", []string{"tests/testdata", "build/cache"})
if err != nil {
log.Fatalf("Error re-computing hashes: %v", err)
if !*verify {
return
}
updates := build.DiffHashes(hashes, generated)
for _, file := range updates {
log.Printf("File changed: %s", file)
// Check if files were changed.
postHashes, err := hashSourceFiles()
if err != nil {
log.Fatal("error computing source tree file hashes", "err", err)
}
updates := []string{}
for path, postHash := range postHashes {
preHash, ok := preHashes[path]
if !ok || preHash != postHash {
updates = append(updates, path)
}
}
for _, updatedFile := range updates {
fmt.Fprintf(os.Stderr, "changed file %s\n", updatedFile)
}
if len(updates) != 0 {
log.Fatal("One or more generated files were updated by running 'go generate ./...'")
}
fmt.Println("No stale files detected.")
}
// doCheckBadDeps verifies whether certain unintended dependencies between some
// packages leak into the codebase due to a refactor. This is not an exhaustive
// list, rather something we build up over time at sensitive places.
func doCheckBadDeps() {
baddeps := [][2]string{
// Rawdb tends to be a dumping ground for db utils, sometimes leaking the db itself
{"github.com/ethereum/go-ethereum/core/rawdb", "github.com/ethereum/go-ethereum/ethdb/leveldb"},
{"github.com/ethereum/go-ethereum/core/rawdb", "github.com/ethereum/go-ethereum/ethdb/pebbledb"},
}
tc := new(build.GoToolchain)
var failed bool
for _, rule := range baddeps {
out, err := tc.Go("list", "-deps", rule[0]).CombinedOutput()
if err != nil {
log.Fatalf("Failed to list '%s' dependencies: %v", rule[0], err)
}
for _, line := range strings.Split(string(out), "\n") {
if strings.TrimSpace(line) == rule[1] {
log.Printf("Found bad dependency '%s' -> '%s'", rule[0], rule[1])
failed = true
}
}
}
if failed {
log.Fatalf("Bad dependencies detected.")
}
fmt.Println("No bad dependencies detected.")
}
// doLint runs golangci-lint on requested packages.
@ -596,7 +586,7 @@ func doArchive(cmdline []string) {
var (
env = build.Env()
basegeth = archiveBasename(*arch, version.Archive(env.Commit))
basegeth = archiveBasename(*arch, params.ArchiveVersion(env.Commit))
geth = "geth-" + basegeth + ext
alltools = "geth-alltools-" + basegeth + ext
)
@ -681,9 +671,10 @@ func maybeSkipArchive(env build.Environment) {
}
// Builds the docker images and optionally uploads them to Docker Hub.
func doDockerBuildx(cmdline []string) {
func doDocker(cmdline []string) {
var (
platform = flag.String("platform", "", `Push a multi-arch docker image for the specified architectures (usually "linux/amd64,linux/arm64")`)
image = flag.Bool("image", false, `Whether to build and push an arch specific docker image`)
manifest = flag.String("manifest", "", `Push a multi-arch docker image for the specified architectures (usually "amd64,arm64")`)
upload = flag.String("upload", "", `Where to upload the docker image (usually "ethereum/client-go")`)
)
flag.CommandLine.Parse(cmdline)
@ -716,28 +707,131 @@ func doDockerBuildx(cmdline []string) {
case env.Branch == "master":
tags = []string{"latest"}
case strings.HasPrefix(env.Tag, "v1."):
tags = []string{"stable", fmt.Sprintf("release-%v", version.Family), "v" + version.Semantic}
tags = []string{"stable", fmt.Sprintf("release-1.%d", params.VersionMinor), "v" + params.Version}
}
// Need to create a mult-arch builder
build.MustRunCommand("docker", "buildx", "create", "--use", "--name", "multi-arch-builder", "--platform", *platform)
// If architecture specific image builds are requested, build and push them
if *image {
build.MustRunCommand("docker", "build", "--build-arg", "COMMIT="+env.Commit, "--build-arg", "VERSION="+params.VersionWithMeta, "--build-arg", "BUILDNUM="+env.Buildnum, "--tag", fmt.Sprintf("%s:TAG", *upload), ".")
build.MustRunCommand("docker", "build", "--build-arg", "COMMIT="+env.Commit, "--build-arg", "VERSION="+params.VersionWithMeta, "--build-arg", "BUILDNUM="+env.Buildnum, "--tag", fmt.Sprintf("%s:alltools-TAG", *upload), "-f", "Dockerfile.alltools", ".")
for _, spec := range []struct {
file string
base string
}{
{file: "Dockerfile", base: fmt.Sprintf("%s:", *upload)},
{file: "Dockerfile.alltools", base: fmt.Sprintf("%s:alltools-", *upload)},
} {
for _, tag := range tags { // latest, stable etc
gethImage := fmt.Sprintf("%s%s", spec.base, tag)
build.MustRunCommand("docker", "buildx", "build",
"--build-arg", "COMMIT="+env.Commit,
"--build-arg", "VERSION="+version.WithMeta,
"--build-arg", "BUILDNUM="+env.Buildnum,
"--tag", gethImage,
"--platform", *platform,
"--push",
"--file", spec.file, ".")
// Tag and upload the images to Docker Hub
for _, tag := range tags {
gethImage := fmt.Sprintf("%s:%s-%s", *upload, tag, runtime.GOARCH)
toolImage := fmt.Sprintf("%s:alltools-%s-%s", *upload, tag, runtime.GOARCH)
// If the image already exists (non version tag), check the build
// number to prevent overwriting a newer commit if concurrent builds
// are running. This is still a tiny bit racey if two published are
// done at the same time, but that's extremely unlikely even on the
// master branch.
for _, img := range []string{gethImage, toolImage} {
if exec.Command("docker", "pull", img).Run() != nil {
continue // Generally the only failure is a missing image, which is good
}
buildnum, err := exec.Command("docker", "inspect", "--format", "{{index .Config.Labels \"buildnum\"}}", img).CombinedOutput()
if err != nil {
log.Fatalf("Failed to inspect container: %v\nOutput: %s", err, string(buildnum))
}
buildnum = bytes.TrimSpace(buildnum)
if len(buildnum) > 0 && len(env.Buildnum) > 0 {
oldnum, err := strconv.Atoi(string(buildnum))
if err != nil {
log.Fatalf("Failed to parse old image build number: %v", err)
}
newnum, err := strconv.Atoi(env.Buildnum)
if err != nil {
log.Fatalf("Failed to parse current build number: %v", err)
}
if oldnum > newnum {
log.Fatalf("Current build number %d not newer than existing %d", newnum, oldnum)
} else {
log.Printf("Updating %s from build %d to %d", img, oldnum, newnum)
}
}
}
build.MustRunCommand("docker", "image", "tag", fmt.Sprintf("%s:TAG", *upload), gethImage)
build.MustRunCommand("docker", "image", "tag", fmt.Sprintf("%s:alltools-TAG", *upload), toolImage)
build.MustRunCommand("docker", "push", gethImage)
build.MustRunCommand("docker", "push", toolImage)
}
}
// If multi-arch image manifest push is requested, assemble it
if len(*manifest) != 0 {
// Since different architectures are pushed by different builders, wait
// until all required images are updated.
var mismatch bool
for i := 0; i < 2; i++ { // 2 attempts, second is race check
mismatch = false // hope there's no mismatch now
for _, tag := range tags {
for _, arch := range strings.Split(*manifest, ",") {
gethImage := fmt.Sprintf("%s:%s-%s", *upload, tag, arch)
toolImage := fmt.Sprintf("%s:alltools-%s-%s", *upload, tag, arch)
for _, img := range []string{gethImage, toolImage} {
if out, err := exec.Command("docker", "pull", img).CombinedOutput(); err != nil {
log.Printf("Required image %s unavailable: %v\nOutput: %s", img, err, out)
mismatch = true
break
}
buildnum, err := exec.Command("docker", "inspect", "--format", "{{index .Config.Labels \"buildnum\"}}", img).CombinedOutput()
if err != nil {
log.Fatalf("Failed to inspect container: %v\nOutput: %s", err, string(buildnum))
}
buildnum = bytes.TrimSpace(buildnum)
if string(buildnum) != env.Buildnum {
log.Printf("Build number mismatch on %s: want %s, have %s", img, env.Buildnum, buildnum)
mismatch = true
break
}
}
if mismatch {
break
}
}
if mismatch {
break
}
}
if mismatch {
// Build numbers mismatching, retry in a short time to
// avoid concurrent fails in both publisher images. If
// however the retry failed too, it means the concurrent
// builder is still crunching, let that do the publish.
if i == 0 {
time.Sleep(30 * time.Second)
}
continue
}
break
}
if mismatch {
log.Println("Relinquishing publish to other builder")
return
}
// Assemble and push the Geth manifest image
for _, tag := range tags {
gethImage := fmt.Sprintf("%s:%s", *upload, tag)
var gethSubImages []string
for _, arch := range strings.Split(*manifest, ",") {
gethSubImages = append(gethSubImages, gethImage+"-"+arch)
}
build.MustRunCommand("docker", append([]string{"manifest", "create", gethImage}, gethSubImages...)...)
build.MustRunCommand("docker", "manifest", "push", gethImage)
}
// Assemble and push the alltools manifest image
for _, tag := range tags {
toolImage := fmt.Sprintf("%s:alltools-%s", *upload, tag)
var toolSubImages []string
for _, arch := range strings.Split(*manifest, ",") {
toolSubImages = append(toolSubImages, toolImage+"-"+arch)
}
build.MustRunCommand("docker", append([]string{"manifest", "create", toolImage}, toolSubImages...)...)
build.MustRunCommand("docker", "manifest", "push", toolImage)
}
}
}
@ -879,7 +973,7 @@ func ppaUpload(workdir, ppa, sshUser string, files []string) {
var idfile string
if sshkey := getenvBase64("PPA_SSH_KEY"); len(sshkey) > 0 {
idfile = filepath.Join(workdir, "sshkey")
if !build.FileExist(idfile) {
if !common.FileExist(idfile) {
os.WriteFile(idfile, sshkey, 0600)
}
}
@ -1104,19 +1198,19 @@ func doWindowsInstaller(cmdline []string) {
// Build the installer. This assumes that all the needed files have been previously
// built (don't mix building and packaging to keep cross compilation complexity to a
// minimum).
ver := strings.Split(version.Semantic, ".")
version := strings.Split(params.Version, ".")
if env.Commit != "" {
ver[2] += "-" + env.Commit[:8]
version[2] += "-" + env.Commit[:8]
}
installer, err := filepath.Abs("geth-" + archiveBasename(*arch, version.Archive(env.Commit)) + ".exe")
installer, err := filepath.Abs("geth-" + archiveBasename(*arch, params.ArchiveVersion(env.Commit)) + ".exe")
if err != nil {
log.Fatalf("Failed to convert installer file path: %v", err)
}
build.MustRunCommand("makensis.exe",
"/DOUTPUTFILE="+installer,
"/DMAJORVERSION="+ver[0],
"/DMINORVERSION="+ver[1],
"/DBUILDVERSION="+ver[2],
"/DMAJORVERSION="+version[0],
"/DMINORVERSION="+version[1],
"/DBUILDVERSION="+version[2],
"/DARCH="+*arch,
filepath.Join(*workdir, "geth.nsi"),
)

@ -20,7 +20,6 @@ import (
"context"
"fmt"
"os"
"slices"
"github.com/ethereum/go-ethereum/beacon/blsync"
"github.com/ethereum/go-ethereum/cmd/utils"
@ -34,7 +33,7 @@ import (
func main() {
app := flags.NewApp("beacon light syncer tool")
app.Flags = slices.Concat([]cli.Flag{
app.Flags = flags.Merge([]cli.Flag{
utils.BeaconApiFlag,
utils.BeaconApiHeaderFlag,
utils.BeaconThresholdFlag,
@ -46,7 +45,7 @@ func main() {
//TODO datadir for optional permanent database
utils.MainnetFlag,
utils.SepoliaFlag,
utils.HoleskyFlag,
utils.GoerliFlag,
utils.BlsyncApiFlag,
utils.BlsyncJWTSecretFlag,
},
@ -70,7 +69,7 @@ func main() {
func sync(ctx *cli.Context) error {
// set up blsync
client := blsync.NewClient(utils.MakeBeaconLightConfig(ctx))
client := blsync.NewClient(ctx)
client.SetEngineRPC(makeRPCClient(ctx))
client.Start()

209
cmd/bootnode/main.go Normal file

@ -0,0 +1,209 @@
// Copyright 2015 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
// bootnode runs a bootstrap node for the Ethereum Discovery Protocol.
package main
import (
"crypto/ecdsa"
"flag"
"fmt"
"net"
"os"
"time"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/nat"
"github.com/ethereum/go-ethereum/p2p/netutil"
)
func main() {
var (
listenAddr = flag.String("addr", ":30301", "listen address")
genKey = flag.String("genkey", "", "generate a node key")
writeAddr = flag.Bool("writeaddress", false, "write out the node's public key and quit")
nodeKeyFile = flag.String("nodekey", "", "private key filename")
nodeKeyHex = flag.String("nodekeyhex", "", "private key as hex (for testing)")
natdesc = flag.String("nat", "none", "port mapping mechanism (any|none|upnp|pmp|pmp:<IP>|extip:<IP>)")
netrestrict = flag.String("netrestrict", "", "restrict network communication to the given IP networks (CIDR masks)")
runv5 = flag.Bool("v5", false, "run a v5 topic discovery bootnode")
verbosity = flag.Int("verbosity", 3, "log verbosity (0-5)")
vmodule = flag.String("vmodule", "", "log verbosity pattern")
nodeKey *ecdsa.PrivateKey
err error
)
flag.Parse()
glogger := log.NewGlogHandler(log.NewTerminalHandler(os.Stderr, false))
slogVerbosity := log.FromLegacyLevel(*verbosity)
glogger.Verbosity(slogVerbosity)
glogger.Vmodule(*vmodule)
log.SetDefault(log.NewLogger(glogger))
natm, err := nat.Parse(*natdesc)
if err != nil {
utils.Fatalf("-nat: %v", err)
}
switch {
case *genKey != "":
nodeKey, err = crypto.GenerateKey()
if err != nil {
utils.Fatalf("could not generate key: %v", err)
}
if err = crypto.SaveECDSA(*genKey, nodeKey); err != nil {
utils.Fatalf("%v", err)
}
if !*writeAddr {
return
}
case *nodeKeyFile == "" && *nodeKeyHex == "":
utils.Fatalf("Use -nodekey or -nodekeyhex to specify a private key")
case *nodeKeyFile != "" && *nodeKeyHex != "":
utils.Fatalf("Options -nodekey and -nodekeyhex are mutually exclusive")
case *nodeKeyFile != "":
if nodeKey, err = crypto.LoadECDSA(*nodeKeyFile); err != nil {
utils.Fatalf("-nodekey: %v", err)
}
case *nodeKeyHex != "":
if nodeKey, err = crypto.HexToECDSA(*nodeKeyHex); err != nil {
utils.Fatalf("-nodekeyhex: %v", err)
}
}
if *writeAddr {
fmt.Printf("%x\n", crypto.FromECDSAPub(&nodeKey.PublicKey)[1:])
os.Exit(0)
}
var restrictList *netutil.Netlist
if *netrestrict != "" {
restrictList, err = netutil.ParseNetlist(*netrestrict)
if err != nil {
utils.Fatalf("-netrestrict: %v", err)
}
}
addr, err := net.ResolveUDPAddr("udp", *listenAddr)
if err != nil {
utils.Fatalf("-ResolveUDPAddr: %v", err)
}
conn, err := net.ListenUDP("udp", addr)
if err != nil {
utils.Fatalf("-ListenUDP: %v", err)
}
defer conn.Close()
db, _ := enode.OpenDB("")
ln := enode.NewLocalNode(db, nodeKey)
listenerAddr := conn.LocalAddr().(*net.UDPAddr)
if natm != nil && !listenerAddr.IP.IsLoopback() {
natAddr := doPortMapping(natm, ln, listenerAddr)
if natAddr != nil {
listenerAddr = natAddr
}
}
printNotice(&nodeKey.PublicKey, *listenerAddr)
cfg := discover.Config{
PrivateKey: nodeKey,
NetRestrict: restrictList,
}
if *runv5 {
if _, err := discover.ListenV5(conn, ln, cfg); err != nil {
utils.Fatalf("%v", err)
}
} else {
if _, err := discover.ListenUDP(conn, ln, cfg); err != nil {
utils.Fatalf("%v", err)
}
}
select {}
}
func printNotice(nodeKey *ecdsa.PublicKey, addr net.UDPAddr) {
if addr.IP.IsUnspecified() {
addr.IP = net.IP{127, 0, 0, 1}
}
n := enode.NewV4(nodeKey, addr.IP, 0, addr.Port)
fmt.Println(n.URLv4())
fmt.Println("Note: you're using cmd/bootnode, a developer tool.")
fmt.Println("We recommend using a regular node as bootstrap node for production deployments.")
}
func doPortMapping(natm nat.Interface, ln *enode.LocalNode, addr *net.UDPAddr) *net.UDPAddr {
const (
protocol = "udp"
name = "ethereum discovery"
)
newLogger := func(external int, internal int) log.Logger {
return log.New("proto", protocol, "extport", external, "intport", internal, "interface", natm)
}
var (
intport = addr.Port
extaddr = &net.UDPAddr{IP: addr.IP, Port: addr.Port}
mapTimeout = nat.DefaultMapTimeout
log = newLogger(addr.Port, intport)
)
addMapping := func() {
// Get the external address.
var err error
extaddr.IP, err = natm.ExternalIP()
if err != nil {
log.Debug("Couldn't get external IP", "err", err)
return
}
// Create the mapping.
p, err := natm.AddMapping(protocol, extaddr.Port, intport, name, mapTimeout)
if err != nil {
log.Debug("Couldn't add port mapping", "err", err)
return
}
if p != uint16(extaddr.Port) {
extaddr.Port = int(p)
log = newLogger(extaddr.Port, intport)
log.Info("NAT mapped alternative port")
} else {
log.Info("NAT mapped port")
}
// Update IP/port information of the local node.
ln.SetStaticIP(extaddr.IP)
ln.SetFallbackUDP(extaddr.Port)
}
// Perform mapping once, synchronously.
log.Info("Attempting port mapping")
addMapping()
// Refresh the mapping periodically.
go func() {
refresh := time.NewTimer(mapTimeout)
defer refresh.Stop()
for range refresh.C {
addMapping()
refresh.Reset(mapTimeout)
}
}()
return extaddr
}

@ -29,7 +29,7 @@ GLOBAL OPTIONS:
--loglevel value log level to emit to the screen (default: 4)
--keystore value Directory for the keystore (default: "$HOME/.ethereum/keystore")
--configdir value Directory for Clef configuration (default: "$HOME/.clef")
--chainid value Chain id to use for signing (1=mainnet, 17000=Holesky) (default: 1)
--chainid value Chain id to use for signing (1=mainnet, 5=Goerli) (default: 1)
--lightkdf Reduce key-derivation RAM & CPU usage at some expense of KDF strength
--nousb Disables monitoring for and managing USB hardware wallets
--pcscdpath value Path to the smartcard daemon (pcscd) socket file (default: "/run/pcscd/pcscd.comm")

@ -100,7 +100,7 @@ var (
chainIdFlag = &cli.Int64Flag{
Name: "chainid",
Value: params.MainnetChainConfig.ChainID.Int64(),
Usage: "Chain id to use for signing (1=mainnet, 17000=Holesky)",
Usage: "Chain id to use for signing (1=mainnet, 5=Goerli)",
}
rpcPortFlag = &cli.IntFlag{
Name: "http.port",

@ -7,7 +7,7 @@ It enables usecases like the following:
* I want to auto-approve transactions with contract `CasinoDapp`, with up to `0.05 ether` in value to maximum `1 ether` per 24h period
* I want to auto-approve transaction to contract `EthAlarmClock` with `data`=`0xdeadbeef`, if `value=0`, `gas < 44k` and `gasPrice < 40Gwei`
The two main features that are required for this to work well are:
The two main features that are required for this to work well are;
1. Rule Implementation: how to create, manage, and interpret rules in a flexible but secure manner
2. Credential management and credentials; how to provide auto-unlock without exposing keys unnecessarily.
@ -29,10 +29,10 @@ function asBig(str) {
// Approve transactions to a certain contract if the value is below a certain limit
function ApproveTx(req) {
var limit = new BigNumber("0xb1a2bc2ec50000")
var limit = big.Newint("0xb1a2bc2ec50000")
var value = asBig(req.transaction.value);
if (req.transaction.to.toLowerCase() == "0xae967917c465db8578ca9024c205720b1a3651a9" && value.lt(limit)) {
if (req.transaction.to.toLowerCase() == "0xae967917c465db8578ca9024c205720b1a3651a9") && value.lt(limit)) {
return "Approve"
}
// If we return "Reject", it will be rejected.

@ -44,7 +44,7 @@ set to standard output. The following filters are supported:
- `-limit <N>` limits the output set to N entries, taking the top N nodes by score
- `-ip <CIDR>` filters nodes by IP subnet
- `-min-age <duration>` filters nodes by 'first seen' time
- `-eth-network <mainnet/sepolia/holesky>` filters nodes by "eth" ENR entry
- `-eth-network <mainnet/goerli/sepolia/holesky>` filters nodes by "eth" ENR entry
- `-les-server` filters nodes by LES server support
- `-snap` filters nodes by snap protocol support

@ -21,7 +21,6 @@ import (
"fmt"
"net"
"net/http"
"slices"
"strconv"
"strings"
"time"
@ -29,6 +28,7 @@ import (
"github.com/ethereum/go-ethereum/cmd/devp2p/internal/v4test"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/internal/flags"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/p2p/enode"
@ -83,7 +83,7 @@ var (
Name: "listen",
Usage: "Runs a discovery node",
Action: discv4Listen,
Flags: slices.Concat(discoveryNodeFlags, []cli.Flag{
Flags: flags.Merge(discoveryNodeFlags, []cli.Flag{
httpAddrFlag,
}),
}
@ -91,7 +91,7 @@ var (
Name: "crawl",
Usage: "Updates a nodes.json file with random nodes found in the DHT",
Action: discv4Crawl,
Flags: slices.Concat(discoveryNodeFlags, []cli.Flag{crawlTimeoutFlag, crawlParallelismFlag}),
Flags: flags.Merge(discoveryNodeFlags, []cli.Flag{crawlTimeoutFlag, crawlParallelismFlag}),
}
discv4TestCommand = &cli.Command{
Name: "test",

@ -19,11 +19,11 @@ package main
import (
"errors"
"fmt"
"slices"
"time"
"github.com/ethereum/go-ethereum/cmd/devp2p/internal/v5test"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/internal/flags"
"github.com/ethereum/go-ethereum/p2p/discover"
"github.com/urfave/cli/v2"
)
@ -56,7 +56,7 @@ var (
Name: "crawl",
Usage: "Updates a nodes.json file with random nodes found in the DHT",
Action: discv5Crawl,
Flags: slices.Concat(discoveryNodeFlags, []cli.Flag{
Flags: flags.Merge(discoveryNodeFlags, []cli.Flag{
crawlTimeoutFlag,
}),
}

@ -88,8 +88,7 @@ func (c *cloudflareClient) checkZone(name string) error {
if !strings.HasSuffix(name, "."+zone.Name) {
return fmt.Errorf("CloudFlare zone name %q does not match name %q to be deployed", zone.Name, name)
}
// Necessary permissions for Cloudlare management - Zone:Read, DNS:Read, Zone:Edit, DNS:Edit
needPerms := map[string]bool{"#zone:edit": false, "#zone:read": false, "#dns_records:read": false, "#dns_records:edit": false}
needPerms := map[string]bool{"#zone:edit": false, "#zone:read": false}
for _, perm := range zone.Permissions {
if _, ok := needPerms[perm]; ok {
needPerms[perm] = true

@ -17,7 +17,6 @@
package main
import (
"cmp"
"context"
"errors"
"fmt"
@ -293,7 +292,13 @@ func sortChanges(changes []types.Change) {
if a.Action == b.Action {
return strings.Compare(*a.ResourceRecordSet.Name, *b.ResourceRecordSet.Name)
}
return cmp.Compare(score[string(a.Action)], score[string(b.Action)])
if score[string(a.Action)] < score[string(b.Action)] {
return -1
}
if score[string(a.Action)] > score[string(b.Action)] {
return 1
}
return 0
})
}

@ -28,6 +28,7 @@ import (
"os"
"path/filepath"
"slices"
"sort"
"strings"
"github.com/ethereum/go-ethereum/common"
@ -40,7 +41,6 @@ import (
"github.com/ethereum/go-ethereum/eth/protocols/eth"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"golang.org/x/exp/maps"
)
// Chain is a lightweight blockchain-like store which can read a hivechain
@ -100,6 +100,7 @@ func (c *Chain) AccountsInHashOrder() []state.DumpAccount {
list := make([]state.DumpAccount, len(c.state))
i := 0
for addr, acc := range c.state {
addr := addr
list[i] = acc
list[i].Address = &addr
if len(acc.AddressHash) != 32 {
@ -166,8 +167,11 @@ func (c *Chain) RootAt(height int) common.Hash {
// GetSender returns the address associated with account at the index in the
// pre-funded accounts list.
func (c *Chain) GetSender(idx int) (common.Address, uint64) {
accounts := maps.Keys(c.senders)
slices.SortFunc(accounts, common.Address.Cmp)
var accounts Addresses
for addr := range c.senders {
accounts = append(accounts, addr)
}
sort.Sort(accounts)
addr := accounts[idx]
return addr, c.senders[addr].Nonce
}
@ -257,6 +261,22 @@ func loadGenesis(genesisFile string) (core.Genesis, error) {
return gen, nil
}
type Addresses []common.Address
func (a Addresses) Len() int {
return len(a)
}
func (a Addresses) Less(i, j int) bool {
return bytes.Compare(a[i][:], a[j][:]) < 0
}
func (a Addresses) Swap(i, j int) {
tmp := a[i]
a[i] = a[j]
a[j] = tmp
}
func blocksFromFile(chainfile string, gblock *types.Block) ([]*types.Block, error) {
// Load chain.rlp.
fh, err := os.Open(chainfile)

@ -13,7 +13,6 @@
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package ethtest
import (

@ -286,6 +286,7 @@ a key before startingHash (wrong order). The server should return the first avai
}
for i, tc := range tests {
tc := tc
if i > 0 {
t.Log("\n")
}
@ -428,6 +429,7 @@ of the test account. The server should return slots [2,3] (i.e. the 'next availa
}
for i, tc := range tests {
tc := tc
if i > 0 {
t.Log("\n")
}
@ -524,6 +526,7 @@ func (s *Suite) TestSnapGetByteCodes(t *utesting.T) {
}
for i, tc := range tests {
tc := tc
if i > 0 {
t.Log("\n")
}
@ -720,6 +723,7 @@ The server should reject the request.`,
}
for i, tc := range tests {
tc := tc
if i > 0 {
t.Log("\n")
}

@ -849,16 +849,7 @@ func (s *Suite) TestBlobViolations(t *utesting.T) {
if code, _, err := conn.Read(); err != nil {
t.Fatalf("expected disconnect on blob violation, got err: %v", err)
} else if code != discMsg {
if code == protoOffset(ethProto)+eth.NewPooledTransactionHashesMsg {
// sometimes we'll get a blob transaction hashes announcement before the disconnect
// because blob transactions are scheduled to be fetched right away.
if code, _, err = conn.Read(); err != nil {
t.Fatalf("expected disconnect on blob violation, got err on second read: %v", err)
}
}
if code != discMsg {
t.Fatalf("expected disconnect on blob violation, got msg code: %d", code)
}
t.Fatalf("expected disconnect on blob violation, got msg code: %d", code)
}
conn.Close()
}

@ -18,6 +18,7 @@
"shanghaiTime": 780,
"cancunTime": 840,
"terminalTotalDifficulty": 9454784,
"terminalTotalDifficultyPassed": true,
"ethash": {}
},
"nonce": "0x0",

@ -194,7 +194,7 @@ func PingExtraData(t *utesting.T) {
}
}
// PingExtraDataWrongFrom sends a PING packet with additional data and wrong 'from' field
// This test sends a PING packet with additional data and wrong 'from' field
// and expects a PONG response.
func PingExtraDataWrongFrom(t *utesting.T) {
te := newTestEnv(Remote, Listen1, Listen2)
@ -215,7 +215,7 @@ func PingExtraDataWrongFrom(t *utesting.T) {
}
}
// PingPastExpiration sends a PING packet with an expiration in the past.
// This test sends a PING packet with an expiration in the past.
// The remote node should not respond.
func PingPastExpiration(t *utesting.T) {
te := newTestEnv(Remote, Listen1, Listen2)
@ -234,7 +234,7 @@ func PingPastExpiration(t *utesting.T) {
}
}
// WrongPacketType sends an invalid packet. The remote node should not respond.
// This test sends an invalid packet. The remote node should not respond.
func WrongPacketType(t *utesting.T) {
te := newTestEnv(Remote, Listen1, Listen2)
defer te.close()
@ -252,7 +252,7 @@ func WrongPacketType(t *utesting.T) {
}
}
// BondThenPingWithWrongFrom verifies that the default behaviour of ignoring 'from' fields is unaffected by
// This test verifies that the default behaviour of ignoring 'from' fields is unaffected by
// the bonding process. After bonding, it pings the target with a different from endpoint.
func BondThenPingWithWrongFrom(t *utesting.T) {
te := newTestEnv(Remote, Listen1, Listen2)
@ -289,7 +289,7 @@ waitForPong:
}
}
// FindnodeWithoutEndpointProof sends FINDNODE. The remote node should not reply
// This test just sends FINDNODE. The remote node should not reply
// because the endpoint proof has not completed.
func FindnodeWithoutEndpointProof(t *utesting.T) {
te := newTestEnv(Remote, Listen1, Listen2)
@ -332,7 +332,7 @@ func BasicFindnode(t *utesting.T) {
}
}
// UnsolicitedNeighbors sends an unsolicited NEIGHBORS packet after the endpoint proof, then sends
// This test sends an unsolicited NEIGHBORS packet after the endpoint proof, then sends
// FINDNODE to read the remote table. The remote node should not return the node contained
// in the unsolicited NEIGHBORS packet.
func UnsolicitedNeighbors(t *utesting.T) {
@ -373,7 +373,7 @@ func UnsolicitedNeighbors(t *utesting.T) {
}
}
// FindnodePastExpiration sends FINDNODE with an expiration timestamp in the past.
// This test sends FINDNODE with an expiration timestamp in the past.
// The remote node should not respond.
func FindnodePastExpiration(t *utesting.T) {
te := newTestEnv(Remote, Listen1, Listen2)
@ -426,7 +426,7 @@ func bond(t *utesting.T, te *testenv) {
}
}
// FindnodeAmplificationInvalidPongHash attempts to perform a traffic amplification attack against a
// This test attempts to perform a traffic amplification attack against a
// 'victim' endpoint using FINDNODE. In this attack scenario, the attacker
// attempts to complete the endpoint proof non-interactively by sending a PONG
// with mismatching reply token from the 'victim' endpoint. The attack works if
@ -478,7 +478,7 @@ func FindnodeAmplificationInvalidPongHash(t *utesting.T) {
}
}
// FindnodeAmplificationWrongIP attempts to perform a traffic amplification attack using FINDNODE.
// This test attempts to perform a traffic amplification attack using FINDNODE.
// The attack works if the remote node does not verify the IP address of FINDNODE
// against the endpoint verification proof done by PING/PONG.
func FindnodeAmplificationWrongIP(t *utesting.T) {

@ -18,7 +18,6 @@ package main
import (
"bytes"
"cmp"
"encoding/json"
"fmt"
"os"
@ -105,7 +104,13 @@ func (ns nodeSet) topN(n int) nodeSet {
byscore = append(byscore, v)
}
slices.SortFunc(byscore, func(a, b nodeJSON) int {
return cmp.Compare(b.Score, a.Score)
if a.Score > b.Score {
return -1
}
if a.Score < b.Score {
return 1
}
return 0
})
result := make(nodeSet, n)
for _, v := range byscore[:n] {

@ -230,6 +230,8 @@ func ethFilter(args []string) (nodeFilter, error) {
switch args[0] {
case "mainnet":
filter = forkid.NewStaticFilter(params.MainnetChainConfig, core.DefaultGenesisBlock().ToBlock())
case "goerli":
filter = forkid.NewStaticFilter(params.GoerliChainConfig, core.DefaultGoerliGenesisBlock().ToBlock())
case "sepolia":
filter = forkid.NewStaticFilter(params.SepoliaChainConfig, core.DefaultSepoliaGenesisBlock().ToBlock())
case "holesky":

@ -22,84 +22,79 @@ import (
"fmt"
"os"
"regexp"
"slices"
"sort"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/tracing"
"github.com/ethereum/go-ethereum/eth/tracers/logger"
"github.com/ethereum/go-ethereum/tests"
"github.com/urfave/cli/v2"
"golang.org/x/exp/maps"
)
var RunFlag = &cli.StringFlag{
Name: "run",
Value: ".*",
Usage: "Run only those tests matching the regular expression.",
}
var blockTestCommand = &cli.Command{
Action: blockTestCmd,
Name: "blocktest",
Usage: "Executes the given blockchain tests",
ArgsUsage: "<path>",
Flags: slices.Concat([]cli.Flag{
DumpFlag,
HumanReadableFlag,
RunFlag,
WitnessCrossCheckFlag,
}, traceFlags),
ArgsUsage: "<file>",
Flags: []cli.Flag{RunFlag},
}
func blockTestCmd(ctx *cli.Context) error {
path := ctx.Args().First()
if len(path) == 0 {
return errors.New("path argument required")
if len(ctx.Args().First()) == 0 {
return errors.New("path-to-test argument required")
}
var (
collected = collectFiles(path)
results []testResult
)
for _, fname := range collected {
r, err := runBlockTest(ctx, fname)
if err != nil {
return err
}
results = append(results, r...)
}
report(ctx, results)
return nil
}
func runBlockTest(ctx *cli.Context, fname string) ([]testResult, error) {
src, err := os.ReadFile(fname)
if err != nil {
return nil, err
var tracer *tracing.Hooks
// Configure the EVM logger
if ctx.Bool(MachineFlag.Name) {
tracer = logger.NewJSONLogger(&logger.Config{
EnableMemory: !ctx.Bool(DisableMemoryFlag.Name),
DisableStack: ctx.Bool(DisableStackFlag.Name),
DisableStorage: ctx.Bool(DisableStorageFlag.Name),
EnableReturnData: !ctx.Bool(DisableReturnDataFlag.Name),
}, os.Stderr)
}
var tests map[string]*tests.BlockTest
// Load the test content from the input file
src, err := os.ReadFile(ctx.Args().First())
if err != nil {
return err
}
var tests map[string]tests.BlockTest
if err = json.Unmarshal(src, &tests); err != nil {
return nil, err
return err
}
re, err := regexp.Compile(ctx.String(RunFlag.Name))
if err != nil {
return nil, fmt.Errorf("invalid regex -%s: %v", RunFlag.Name, err)
return fmt.Errorf("invalid regex -%s: %v", RunFlag.Name, err)
}
tracer := tracerFromFlags(ctx)
// Pull out keys to sort and ensure tests are run in order.
keys := maps.Keys(tests)
slices.Sort(keys)
// Run all the tests.
var results []testResult
// Run them in order
var keys []string
for key := range tests {
keys = append(keys, key)
}
sort.Strings(keys)
for _, name := range keys {
if !re.MatchString(name) {
continue
}
result := &testResult{Name: name, Pass: true}
if err := tests[name].Run(false, rawdb.HashScheme, ctx.Bool(WitnessCrossCheckFlag.Name), tracer, func(res error, chain *core.BlockChain) {
test := tests[name]
if err := test.Run(false, rawdb.HashScheme, false, tracer, func(res error, chain *core.BlockChain) {
if ctx.Bool(DumpFlag.Name) {
if s, _ := chain.State(); s != nil {
result.State = dump(s)
if state, _ := chain.State(); state != nil {
fmt.Println(string(state.Dump(nil)))
}
}
}); err != nil {
result.Pass, result.Error = false, err.Error()
return fmt.Errorf("test %v: %w", name, err)
}
results = append(results, *result)
}
return results, nil
return nil
}

55
cmd/evm/compiler.go Normal file

@ -0,0 +1,55 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"errors"
"fmt"
"os"
"github.com/ethereum/go-ethereum/cmd/evm/internal/compiler"
"github.com/urfave/cli/v2"
)
var compileCommand = &cli.Command{
Action: compileCmd,
Name: "compile",
Usage: "Compiles easm source to evm binary",
ArgsUsage: "<file>",
}
func compileCmd(ctx *cli.Context) error {
debug := ctx.Bool(DebugFlag.Name)
if len(ctx.Args().First()) == 0 {
return errors.New("filename required")
}
fn := ctx.Args().First()
src, err := os.ReadFile(fn)
if err != nil {
return err
}
bin, err := compiler.Compile(fn, src, debug)
if err != nil {
return err
}
fmt.Println(bin)
return nil
}

55
cmd/evm/disasm.go Normal file

@ -0,0 +1,55 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"errors"
"fmt"
"os"
"strings"
"github.com/ethereum/go-ethereum/core/asm"
"github.com/urfave/cli/v2"
)
var disasmCommand = &cli.Command{
Action: disasmCmd,
Name: "disasm",
Usage: "Disassembles evm binary",
ArgsUsage: "<file>",
}
func disasmCmd(ctx *cli.Context) error {
var in string
switch {
case len(ctx.Args().First()) > 0:
fn := ctx.Args().First()
input, err := os.ReadFile(fn)
if err != nil {
return err
}
in = string(input)
case ctx.IsSet(InputFlag.Name):
in = ctx.String(InputFlag.Name)
default:
return errors.New("missing filename or --input value")
}
code := strings.TrimSpace(in)
fmt.Printf("%v\n", code)
return asm.PrintDisassembled(code)
}

@ -1,49 +0,0 @@
// Copyright 2024 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package main
import "regexp"
// testMetadata provides more granular access to the test information encoded
// within its filename by the execution spec test (EEST).
type testMetadata struct {
fork string
module string // which python module gnerated the test, e.g. eip7702
file string // exact file the test came from, e.g. test_gas.py
function string // func that created the test, e.g. test_valid_mcopy_operations
parameters string // the name of the parameters which were used to fill the test, e.g. zero_inputs
}
// parseTestMetadata reads a test name and parses out more specific information
// about the test.
func parseTestMetadata(s string) *testMetadata {
var (
pattern = `tests\/([^\/]+)\/([^\/]+)\/([^:]+)::([^[]+)\[fork_([^-\]]+)-[^-]+-(.+)\]`
re = regexp.MustCompile(pattern)
)
match := re.FindStringSubmatch(s)
if len(match) == 0 {
return nil
}
return &testMetadata{
fork: match[5],
module: match[2],
file: match[3],
function: match[4],
parameters: match[6],
}
}

@ -1,228 +0,0 @@
// Copyright 2023 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"bufio"
"encoding/hex"
"encoding/json"
"fmt"
"io/fs"
"os"
"path/filepath"
"strings"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/log"
"github.com/urfave/cli/v2"
)
var jt vm.JumpTable
const initcode = "INITCODE"
func init() {
jt = vm.NewEOFInstructionSetForTesting()
}
var (
hexFlag = &cli.StringFlag{
Name: "hex",
Usage: "Single container data parse and validation",
}
refTestFlag = &cli.StringFlag{
Name: "test",
Usage: "Path to EOF validation reference test.",
}
eofParseCommand = &cli.Command{
Name: "eofparse",
Aliases: []string{"eof"},
Usage: "Parses hex eof container and returns validation errors (if any)",
Action: eofParseAction,
Flags: []cli.Flag{
hexFlag,
refTestFlag,
},
}
eofDumpCommand = &cli.Command{
Name: "eofdump",
Usage: "Parses hex eof container and prints out human-readable representation of the container.",
Action: eofDumpAction,
Flags: []cli.Flag{
hexFlag,
},
}
)
func eofParseAction(ctx *cli.Context) error {
// If `--test` is set, parse and validate the reference test at the provided path.
if ctx.IsSet(refTestFlag.Name) {
var (
file = ctx.String(refTestFlag.Name)
executedTests int
passedTests int
)
err := filepath.Walk(file, func(path string, info fs.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() {
return nil
}
log.Debug("Executing test", "name", info.Name())
passed, tot, err := executeTest(path)
passedTests += passed
executedTests += tot
return err
})
if err != nil {
return err
}
log.Info("Executed tests", "passed", passedTests, "total executed", executedTests)
return nil
}
// If `--hex` is set, parse and validate the hex string argument.
if ctx.IsSet(hexFlag.Name) {
if _, err := parseAndValidate(ctx.String(hexFlag.Name), false); err != nil {
return fmt.Errorf("err: %w", err)
}
fmt.Println("OK")
return nil
}
// If neither are passed in, read input from stdin.
scanner := bufio.NewScanner(os.Stdin)
scanner.Buffer(make([]byte, 1024*1024), 10*1024*1024)
for scanner.Scan() {
l := strings.TrimSpace(scanner.Text())
if strings.HasPrefix(l, "#") || l == "" {
continue
}
if _, err := parseAndValidate(l, false); err != nil {
fmt.Printf("err: %v\n", err)
} else {
fmt.Println("OK")
}
}
if err := scanner.Err(); err != nil {
fmt.Println(err.Error())
}
return nil
}
type refTests struct {
Vectors map[string]eOFTest `json:"vectors"`
}
type eOFTest struct {
Code string `json:"code"`
Results map[string]etResult `json:"results"`
ContainerKind string `json:"containerKind"`
}
type etResult struct {
Result bool `json:"result"`
Exception string `json:"exception,omitempty"`
}
func executeTest(path string) (int, int, error) {
src, err := os.ReadFile(path)
if err != nil {
return 0, 0, err
}
var testsByName map[string]refTests
if err := json.Unmarshal(src, &testsByName); err != nil {
return 0, 0, err
}
passed, total := 0, 0
for testsName, tests := range testsByName {
for name, tt := range tests.Vectors {
for fork, r := range tt.Results {
total++
_, err := parseAndValidate(tt.Code, tt.ContainerKind == initcode)
if r.Result && err != nil {
log.Error("Test failure, expected validation success", "name", testsName, "idx", name, "fork", fork, "err", err)
continue
}
if !r.Result && err == nil {
log.Error("Test failure, expected validation error", "name", testsName, "idx", name, "fork", fork, "have err", r.Exception, "err", err)
continue
}
passed++
}
}
}
return passed, total, nil
}
func parseAndValidate(s string, isInitCode bool) (*vm.Container, error) {
if len(s) >= 2 && strings.HasPrefix(s, "0x") {
s = s[2:]
}
b, err := hex.DecodeString(s)
if err != nil {
return nil, fmt.Errorf("unable to decode data: %w", err)
}
return parse(b, isInitCode)
}
func parse(b []byte, isInitCode bool) (*vm.Container, error) {
var c vm.Container
if err := c.UnmarshalBinary(b, isInitCode); err != nil {
return nil, err
}
if err := c.ValidateCode(&jt, isInitCode); err != nil {
return nil, err
}
return &c, nil
}
func eofDumpAction(ctx *cli.Context) error {
// If `--hex` is set, parse and validate the hex string argument.
if ctx.IsSet(hexFlag.Name) {
return eofDump(ctx.String(hexFlag.Name))
}
// Otherwise read from stdin
scanner := bufio.NewScanner(os.Stdin)
scanner.Buffer(make([]byte, 1024*1024), 10*1024*1024)
for scanner.Scan() {
l := strings.TrimSpace(scanner.Text())
if strings.HasPrefix(l, "#") || l == "" {
continue
}
if err := eofDump(l); err != nil {
return err
}
fmt.Println("")
}
return scanner.Err()
}
func eofDump(hexdata string) error {
if len(hexdata) >= 2 && strings.HasPrefix(hexdata, "0x") {
hexdata = hexdata[2:]
}
b, err := hex.DecodeString(hexdata)
if err != nil {
return fmt.Errorf("unable to decode data: %w", err)
}
var c vm.Container
if err := c.UnmarshalBinary(b, false); err != nil {
return err
}
fmt.Println(c.String())
return nil
}

@ -1,166 +0,0 @@
package main
import (
"bufio"
"bytes"
"encoding/hex"
"fmt"
"os"
"strings"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/vm"
)
func FuzzEofParsing(f *testing.F) {
// Seed with corpus from execution-spec-tests
for i := 0; ; i++ {
fname := fmt.Sprintf("testdata/eof/eof_corpus_%d.txt", i)
corpus, err := os.Open(fname)
if err != nil {
break
}
f.Logf("Reading seed data from %v", fname)
scanner := bufio.NewScanner(corpus)
scanner.Buffer(make([]byte, 1024), 10*1024*1024)
for scanner.Scan() {
s := scanner.Text()
if len(s) >= 2 && strings.HasPrefix(s, "0x") {
s = s[2:]
}
b, err := hex.DecodeString(s)
if err != nil {
panic(err) // rotten corpus
}
f.Add(b)
}
corpus.Close()
if err := scanner.Err(); err != nil {
panic(err) // rotten corpus
}
}
// And do the fuzzing
f.Fuzz(func(t *testing.T, data []byte) {
var (
jt = vm.NewEOFInstructionSetForTesting()
c vm.Container
)
cpy := common.CopyBytes(data)
if err := c.UnmarshalBinary(data, true); err == nil {
c.ValidateCode(&jt, true)
if have := c.MarshalBinary(); !bytes.Equal(have, data) {
t.Fatal("Unmarshal-> Marshal failure!")
}
}
if err := c.UnmarshalBinary(data, false); err == nil {
c.ValidateCode(&jt, false)
if have := c.MarshalBinary(); !bytes.Equal(have, data) {
t.Fatal("Unmarshal-> Marshal failure!")
}
}
if !bytes.Equal(cpy, data) {
panic("data modified during unmarshalling")
}
})
}
func TestEofParseInitcode(t *testing.T) {
testEofParse(t, true, "testdata/eof/results.initcode.txt")
}
func TestEofParseRegular(t *testing.T) {
testEofParse(t, false, "testdata/eof/results.regular.txt")
}
func testEofParse(t *testing.T, isInitCode bool, wantFile string) {
var wantFn func() string
var wantLoc = 0
{ // Configure the want-reader
wants, err := os.Open(wantFile)
if err != nil {
t.Fatal(err)
}
scanner := bufio.NewScanner(wants)
scanner.Buffer(make([]byte, 1024), 10*1024*1024)
wantFn = func() string {
if scanner.Scan() {
wantLoc++
return scanner.Text()
}
return "end of file reached"
}
}
for i := 0; ; i++ {
fname := fmt.Sprintf("testdata/eof/eof_corpus_%d.txt", i)
corpus, err := os.Open(fname)
if err != nil {
break
}
t.Logf("# Reading seed data from %v", fname)
scanner := bufio.NewScanner(corpus)
scanner.Buffer(make([]byte, 1024), 10*1024*1024)
line := 1
for scanner.Scan() {
s := scanner.Text()
if len(s) >= 2 && strings.HasPrefix(s, "0x") {
s = s[2:]
}
b, err := hex.DecodeString(s)
if err != nil {
panic(err) // rotten corpus
}
have := "OK"
if _, err := parse(b, isInitCode); err != nil {
have = fmt.Sprintf("ERR: %v", err)
}
if false { // Change this to generate the want-output
fmt.Printf("%v\n", have)
} else {
want := wantFn()
if have != want {
if len(want) > 100 {
want = want[:100]
}
if len(b) > 100 {
b = b[:100]
}
t.Errorf("%v:%d\n%v\ninput %x\nisInit: %v\nhave: %q\nwant: %q\n",
fname, line, fmt.Sprintf("%v:%d", wantFile, wantLoc), b, isInitCode, have, want)
}
}
line++
}
corpus.Close()
}
}
func BenchmarkEofParse(b *testing.B) {
corpus, err := os.Open("testdata/eof/eof_benches.txt")
if err != nil {
b.Fatal(err)
}
defer corpus.Close()
scanner := bufio.NewScanner(corpus)
scanner.Buffer(make([]byte, 1024), 10*1024*1024)
line := 1
for scanner.Scan() {
s := scanner.Text()
if len(s) >= 2 && strings.HasPrefix(s, "0x") {
s = s[2:]
}
data, err := hex.DecodeString(s)
if err != nil {
b.Fatal(err) // rotten corpus
}
b.Run(fmt.Sprintf("test-%d", line), func(b *testing.B) {
b.ReportAllocs()
b.SetBytes(int64(len(data)))
for i := 0; i < b.N; i++ {
_, _ = parse(data, false)
}
})
line++
}
}

@ -0,0 +1,39 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package compiler
import (
"errors"
"fmt"
"github.com/ethereum/go-ethereum/core/asm"
)
func Compile(fn string, src []byte, debug bool) (string, error) {
compiler := asm.NewCompiler(debug)
compiler.Feed(asm.Lex(src, debug))
bin, compileErrors := compiler.Compile()
if len(compileErrors) > 0 {
// report errors
for _, err := range compileErrors {
fmt.Printf("%s:%v\n", fn, err)
}
return "", errors.New("compiling failed")
}
return bin, nil
}

@ -23,7 +23,6 @@ import (
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/consensus/misc"
@ -51,8 +50,6 @@ type Prestate struct {
Pre types.GenesisAlloc `json:"pre"`
}
//go:generate go run github.com/fjl/gencodec -type ExecutionResult -field-override executionResultMarshaling -out gen_execresult.go
// ExecutionResult contains the execution status after running a state test, any
// error that might have occurred and a dump of the final state if requested.
type ExecutionResult struct {
@ -69,12 +66,6 @@ type ExecutionResult struct {
WithdrawalsRoot *common.Hash `json:"withdrawalsRoot,omitempty"`
CurrentExcessBlobGas *math.HexOrDecimal64 `json:"currentExcessBlobGas,omitempty"`
CurrentBlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed,omitempty"`
RequestsHash *common.Hash `json:"requestsHash,omitempty"`
Requests [][]byte `json:"requests"`
}
type executionResultMarshaling struct {
Requests []hexutil.Bytes `json:"requests"`
}
type ommer struct {
@ -132,7 +123,7 @@ type rejectedTx struct {
// Apply applies a set of transactions to a pre-state
func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
txIt txIterator, miningReward int64,
getTracerFn func(txIndex int, txHash common.Hash, chainConfig *params.ChainConfig) (*tracers.Tracer, io.WriteCloser, error)) (*state.StateDB, *ExecutionResult, []byte, error) {
getTracerFn func(txIndex int, txHash common.Hash) (*tracers.Tracer, io.WriteCloser, error)) (*state.StateDB, *ExecutionResult, []byte, error) {
// Capture errors for BLOCKHASH operation, if we haven't been supplied the
// required blockhashes
var hashError error
@ -201,17 +192,11 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
chainConfig.DAOForkBlock.Cmp(new(big.Int).SetUint64(pre.Env.Number)) == 0 {
misc.ApplyDAOHardFork(statedb)
}
evm := vm.NewEVM(vmContext, statedb, chainConfig, vmConfig)
if beaconRoot := pre.Env.ParentBeaconBlockRoot; beaconRoot != nil {
core.ProcessBeaconBlockRoot(*beaconRoot, evm)
}
if pre.Env.BlockHashes != nil && chainConfig.IsPrague(new(big.Int).SetUint64(pre.Env.Number), pre.Env.Timestamp) {
var (
prevNumber = pre.Env.Number - 1
prevHash = pre.Env.BlockHashes[math.HexOrDecimal64(prevNumber)]
)
core.ProcessParentBlockHash(prevHash, evm)
evm := vm.NewEVM(vmContext, vm.TxContext{}, statedb, chainConfig, vmConfig)
core.ProcessBeaconBlockRoot(*beaconRoot, evm, statedb)
}
for i := 0; txIt.Next(); i++ {
tx, err := txIt.Tx()
if err != nil {
@ -241,21 +226,22 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
continue
}
}
tracer, traceOutput, err := getTracerFn(txIndex, tx.Hash(), chainConfig)
tracer, traceOutput, err := getTracerFn(txIndex, tx.Hash())
if err != nil {
return nil, nil, nil, err
}
// TODO (rjl493456442) it's a bit weird to reset the tracer in the
// middle of block execution, please improve it somehow.
if tracer != nil {
evm.SetTracer(tracer.Hooks)
vmConfig.Tracer = tracer.Hooks
}
statedb.SetTxContext(tx.Hash(), txIndex)
var (
snapshot = statedb.Snapshot()
prevGas = gaspool.Gas()
txContext = core.NewEVMTxContext(msg)
snapshot = statedb.Snapshot()
prevGas = gaspool.Gas()
)
evm := vm.NewEVM(vmContext, txContext, statedb, chainConfig, vmConfig)
if tracer != nil && tracer.OnTxStart != nil {
tracer.OnTxStart(evm.GetVMContext(), tx, msg.From)
}
@ -359,25 +345,6 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
amount := new(big.Int).Mul(new(big.Int).SetUint64(w.Amount), big.NewInt(params.GWei))
statedb.AddBalance(w.Address, uint256.MustFromBig(amount), tracing.BalanceIncreaseWithdrawal)
}
// Gather the execution-layer triggered requests.
var requests [][]byte
if chainConfig.IsPrague(vmContext.BlockNumber, vmContext.Time) {
requests = [][]byte{}
// EIP-6110
var allLogs []*types.Log
for _, receipt := range receipts {
allLogs = append(allLogs, receipt.Logs...)
}
if err := core.ParseDepositLogs(&requests, allLogs, chainConfig); err != nil {
return nil, nil, nil, NewError(ErrorEVM, fmt.Errorf("could not parse requests logs: %v", err))
}
// EIP-7002
core.ProcessWithdrawalQueue(&requests, evm)
// EIP-7251
core.ProcessConsolidationQueue(&requests, evm)
}
// Commit block
root, err := statedb.Commit(vmContext.BlockNumber.Uint64(), chainConfig.IsEIP158(vmContext.BlockNumber))
if err != nil {
@ -403,20 +370,9 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
execRs.CurrentExcessBlobGas = (*math.HexOrDecimal64)(&excessBlobGas)
execRs.CurrentBlobGasUsed = (*math.HexOrDecimal64)(&blobGasUsed)
}
if requests != nil {
// Set requestsHash on block.
h := types.CalcRequestsHash(requests)
execRs.RequestsHash = &h
for i := range requests {
// remove prefix
requests[i] = requests[i][1:]
}
execRs.Requests = requests
}
// Re-create statedb instance with new root upon the updated database
// for accessing latest states.
statedb, err = state.New(root, statedb.Database())
statedb, err = state.New(root, statedb.Database(), nil)
if err != nil {
return nil, nil, nil, NewError(ErrorEVM, fmt.Errorf("could not reopen state: %v", err))
}
@ -425,9 +381,8 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
}
func MakePreState(db ethdb.Database, accounts types.GenesisAlloc) *state.StateDB {
tdb := triedb.NewDatabase(db, &triedb.Config{Preimages: true})
sdb := state.NewDatabase(tdb, nil)
statedb, _ := state.New(types.EmptyRootHash, sdb)
sdb := state.NewDatabaseWithConfig(db, &triedb.Config{Preimages: true})
statedb, _ := state.New(types.EmptyRootHash, sdb, nil)
for addr, a := range accounts {
statedb.SetCode(addr, a.Code)
statedb.SetNonce(addr, a.Nonce)
@ -438,7 +393,7 @@ func MakePreState(db ethdb.Database, accounts types.GenesisAlloc) *state.StateDB
}
// Commit and re-open to start with a clean state.
root, _ := statedb.Commit(0, false)
statedb, _ = state.New(root, sdb)
statedb, _ = state.New(root, sdb, nil)
return statedb
}

@ -1,134 +0,0 @@
// Code generated by github.com/fjl/gencodec. DO NOT EDIT.
package t8ntool
import (
"encoding/json"
"errors"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/core/types"
)
var _ = (*executionResultMarshaling)(nil)
// MarshalJSON marshals as JSON.
func (e ExecutionResult) MarshalJSON() ([]byte, error) {
type ExecutionResult struct {
StateRoot common.Hash `json:"stateRoot"`
TxRoot common.Hash `json:"txRoot"`
ReceiptRoot common.Hash `json:"receiptsRoot"`
LogsHash common.Hash `json:"logsHash"`
Bloom types.Bloom `json:"logsBloom" gencodec:"required"`
Receipts types.Receipts `json:"receipts"`
Rejected []*rejectedTx `json:"rejected,omitempty"`
Difficulty *math.HexOrDecimal256 `json:"currentDifficulty" gencodec:"required"`
GasUsed math.HexOrDecimal64 `json:"gasUsed"`
BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"`
WithdrawalsRoot *common.Hash `json:"withdrawalsRoot,omitempty"`
CurrentExcessBlobGas *math.HexOrDecimal64 `json:"currentExcessBlobGas,omitempty"`
CurrentBlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed,omitempty"`
RequestsHash *common.Hash `json:"requestsHash,omitempty"`
Requests []hexutil.Bytes `json:"requests"`
}
var enc ExecutionResult
enc.StateRoot = e.StateRoot
enc.TxRoot = e.TxRoot
enc.ReceiptRoot = e.ReceiptRoot
enc.LogsHash = e.LogsHash
enc.Bloom = e.Bloom
enc.Receipts = e.Receipts
enc.Rejected = e.Rejected
enc.Difficulty = e.Difficulty
enc.GasUsed = e.GasUsed
enc.BaseFee = e.BaseFee
enc.WithdrawalsRoot = e.WithdrawalsRoot
enc.CurrentExcessBlobGas = e.CurrentExcessBlobGas
enc.CurrentBlobGasUsed = e.CurrentBlobGasUsed
enc.RequestsHash = e.RequestsHash
if e.Requests != nil {
enc.Requests = make([]hexutil.Bytes, len(e.Requests))
for k, v := range e.Requests {
enc.Requests[k] = v
}
}
return json.Marshal(&enc)
}
// UnmarshalJSON unmarshals from JSON.
func (e *ExecutionResult) UnmarshalJSON(input []byte) error {
type ExecutionResult struct {
StateRoot *common.Hash `json:"stateRoot"`
TxRoot *common.Hash `json:"txRoot"`
ReceiptRoot *common.Hash `json:"receiptsRoot"`
LogsHash *common.Hash `json:"logsHash"`
Bloom *types.Bloom `json:"logsBloom" gencodec:"required"`
Receipts *types.Receipts `json:"receipts"`
Rejected []*rejectedTx `json:"rejected,omitempty"`
Difficulty *math.HexOrDecimal256 `json:"currentDifficulty" gencodec:"required"`
GasUsed *math.HexOrDecimal64 `json:"gasUsed"`
BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"`
WithdrawalsRoot *common.Hash `json:"withdrawalsRoot,omitempty"`
CurrentExcessBlobGas *math.HexOrDecimal64 `json:"currentExcessBlobGas,omitempty"`
CurrentBlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed,omitempty"`
RequestsHash *common.Hash `json:"requestsHash,omitempty"`
Requests []hexutil.Bytes `json:"requests"`
}
var dec ExecutionResult
if err := json.Unmarshal(input, &dec); err != nil {
return err
}
if dec.StateRoot != nil {
e.StateRoot = *dec.StateRoot
}
if dec.TxRoot != nil {
e.TxRoot = *dec.TxRoot
}
if dec.ReceiptRoot != nil {
e.ReceiptRoot = *dec.ReceiptRoot
}
if dec.LogsHash != nil {
e.LogsHash = *dec.LogsHash
}
if dec.Bloom == nil {
return errors.New("missing required field 'logsBloom' for ExecutionResult")
}
e.Bloom = *dec.Bloom
if dec.Receipts != nil {
e.Receipts = *dec.Receipts
}
if dec.Rejected != nil {
e.Rejected = dec.Rejected
}
if dec.Difficulty == nil {
return errors.New("missing required field 'currentDifficulty' for ExecutionResult")
}
e.Difficulty = dec.Difficulty
if dec.GasUsed != nil {
e.GasUsed = *dec.GasUsed
}
if dec.BaseFee != nil {
e.BaseFee = dec.BaseFee
}
if dec.WithdrawalsRoot != nil {
e.WithdrawalsRoot = dec.WithdrawalsRoot
}
if dec.CurrentExcessBlobGas != nil {
e.CurrentExcessBlobGas = dec.CurrentExcessBlobGas
}
if dec.CurrentBlobGasUsed != nil {
e.CurrentBlobGasUsed = dec.CurrentBlobGasUsed
}
if dec.RequestsHash != nil {
e.RequestsHash = dec.RequestsHash
}
if dec.Requests != nil {
e.Requests = make([][]byte, len(dec.Requests))
for k, v := range dec.Requests {
e.Requests[k] = v
}
}
return nil
}

@ -133,7 +133,7 @@ func Transaction(ctx *cli.Context) error {
r.Address = sender
}
// Check intrinsic gas
if gas, err := core.IntrinsicGas(tx.Data(), tx.AccessList(), tx.SetCodeAuthorizations(), tx.To() == nil,
if gas, err := core.IntrinsicGas(tx.Data(), tx.AccessList(), tx.To() == nil,
chainConfig.IsHomestead(new(big.Int)), chainConfig.IsIstanbul(new(big.Int)), chainConfig.IsShanghai(new(big.Int), 0)); err != nil {
r.Error = err
results = append(results, r)

@ -82,9 +82,7 @@ type input struct {
}
func Transition(ctx *cli.Context) error {
var getTracer = func(txIndex int, txHash common.Hash, chainConfig *params.ChainConfig) (*tracers.Tracer, io.WriteCloser, error) {
return nil, nil, nil
}
var getTracer = func(txIndex int, txHash common.Hash) (*tracers.Tracer, io.WriteCloser, error) { return nil, nil, nil }
baseDir, err := createBasedir(ctx)
if err != nil {
@ -97,8 +95,9 @@ func Transition(ctx *cli.Context) error {
DisableStack: ctx.Bool(TraceDisableStackFlag.Name),
EnableMemory: ctx.Bool(TraceEnableMemoryFlag.Name),
EnableReturnData: ctx.Bool(TraceEnableReturnDataFlag.Name),
Debug: true,
}
getTracer = func(txIndex int, txHash common.Hash, _ *params.ChainConfig) (*tracers.Tracer, io.WriteCloser, error) {
getTracer = func(txIndex int, txHash common.Hash) (*tracers.Tracer, io.WriteCloser, error) {
traceFile, err := os.Create(filepath.Join(baseDir, fmt.Sprintf("trace-%d-%v.jsonl", txIndex, txHash.String())))
if err != nil {
return nil, nil, NewError(ErrorIO, fmt.Errorf("failed creating trace-file: %v", err))
@ -122,12 +121,12 @@ func Transition(ctx *cli.Context) error {
if ctx.IsSet(TraceTracerConfigFlag.Name) {
config = []byte(ctx.String(TraceTracerConfigFlag.Name))
}
getTracer = func(txIndex int, txHash common.Hash, chainConfig *params.ChainConfig) (*tracers.Tracer, io.WriteCloser, error) {
getTracer = func(txIndex int, txHash common.Hash) (*tracers.Tracer, io.WriteCloser, error) {
traceFile, err := os.Create(filepath.Join(baseDir, fmt.Sprintf("trace-%d-%v.json", txIndex, txHash.String())))
if err != nil {
return nil, nil, NewError(ErrorIO, fmt.Errorf("failed creating trace-file: %v", err))
}
tracer, err := tracers.DefaultDirectory.New(ctx.String(TraceTracerFlag.Name), nil, config, chainConfig)
tracer, err := tracers.DefaultDirectory.New(ctx.String(TraceTracerFlag.Name), nil, config)
if err != nil {
return nil, nil, NewError(ErrorConfig, fmt.Errorf("failed instantiating tracer: %w", err))
}

@ -19,14 +19,10 @@ package main
import (
"fmt"
"io/fs"
"math/big"
"os"
"path/filepath"
"github.com/ethereum/go-ethereum/cmd/evm/internal/t8ntool"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/tracing"
"github.com/ethereum/go-ethereum/eth/tracers/logger"
"github.com/ethereum/go-ethereum/internal/debug"
"github.com/ethereum/go-ethereum/internal/flags"
"github.com/urfave/cli/v2"
@ -36,182 +32,209 @@ import (
_ "github.com/ethereum/go-ethereum/eth/tracers/native"
)
// Some other nice-to-haves:
// * accumulate traces into an object to bundle with test
// * write tx identifier for trace before hand (blocktest only)
// * combine blocktest and statetest runner logic using unified test interface
const traceCategory = "TRACING"
var (
// Test running flags.
RunFlag = &cli.StringFlag{
Name: "run",
Value: ".*",
Usage: "Run only those tests matching the regular expression.",
DebugFlag = &cli.BoolFlag{
Name: "debug",
Usage: "output full trace logs",
Category: flags.VMCategory,
}
StatDumpFlag = &cli.BoolFlag{
Name: "statdump",
Usage: "displays stack and heap memory information",
Category: flags.VMCategory,
}
CodeFlag = &cli.StringFlag{
Name: "code",
Usage: "EVM code",
Category: flags.VMCategory,
}
CodeFileFlag = &cli.StringFlag{
Name: "codefile",
Usage: "File containing EVM code. If '-' is specified, code is read from stdin ",
Category: flags.VMCategory,
}
GasFlag = &cli.Uint64Flag{
Name: "gas",
Usage: "gas limit for the evm",
Value: 10000000000,
Category: flags.VMCategory,
}
PriceFlag = &flags.BigFlag{
Name: "price",
Usage: "price set for the evm",
Value: new(big.Int),
Category: flags.VMCategory,
}
ValueFlag = &flags.BigFlag{
Name: "value",
Usage: "value set for the evm",
Value: new(big.Int),
Category: flags.VMCategory,
}
DumpFlag = &cli.BoolFlag{
Name: "dump",
Usage: "dumps the state after the run",
Category: flags.VMCategory,
}
InputFlag = &cli.StringFlag{
Name: "input",
Usage: "input for the EVM",
Category: flags.VMCategory,
}
InputFileFlag = &cli.StringFlag{
Name: "inputfile",
Usage: "file containing input for the EVM",
Category: flags.VMCategory,
}
BenchFlag = &cli.BoolFlag{
Name: "bench",
Usage: "benchmark the execution",
Category: flags.VMCategory,
}
WitnessCrossCheckFlag = &cli.BoolFlag{
Name: "cross-check",
Aliases: []string{"xc"},
Usage: "Cross-check stateful execution against stateless, verifying the witness generation.",
CreateFlag = &cli.BoolFlag{
Name: "create",
Usage: "indicates the action should be create rather than call",
Category: flags.VMCategory,
}
// Debugging flags.
DumpFlag = &cli.BoolFlag{
Name: "dump",
Usage: "dumps the state after the run",
}
HumanReadableFlag = &cli.BoolFlag{
Name: "human",
Usage: "\"Human-readable\" output",
}
StatDumpFlag = &cli.BoolFlag{
Name: "statdump",
Usage: "displays stack and heap memory information",
}
// Tracing flags.
TraceFlag = &cli.BoolFlag{
Name: "trace",
Usage: "Enable tracing and output trace log.",
Category: traceCategory,
}
TraceFormatFlag = &cli.StringFlag{
Name: "trace.format",
Usage: "Trace output format to use (struct|json)",
Value: "struct",
Category: traceCategory,
}
TraceDisableMemoryFlag = &cli.BoolFlag{
Name: "trace.nomemory",
Aliases: []string{"nomemory"},
Value: true,
Usage: "disable memory output",
Category: traceCategory,
}
TraceDisableStackFlag = &cli.BoolFlag{
Name: "trace.nostack",
Aliases: []string{"nostack"},
Usage: "disable stack output",
Category: traceCategory,
}
TraceDisableStorageFlag = &cli.BoolFlag{
Name: "trace.nostorage",
Aliases: []string{"nostorage"},
Usage: "disable storage output",
Category: traceCategory,
}
TraceDisableReturnDataFlag = &cli.BoolFlag{
Name: "trace.noreturndata",
Aliases: []string{"noreturndata"},
Value: true,
Usage: "enable return data output",
Category: traceCategory,
}
// Deprecated flags.
DebugFlag = &cli.BoolFlag{
Name: "debug",
Usage: "output full trace logs (deprecated)",
Hidden: true,
Category: traceCategory,
GenesisFlag = &cli.StringFlag{
Name: "prestate",
Usage: "JSON file with prestate (genesis) config",
Category: flags.VMCategory,
}
MachineFlag = &cli.BoolFlag{
Name: "json",
Usage: "output trace logs in machine readable format, json (deprecated)",
Hidden: true,
Category: traceCategory,
Usage: "output trace logs in machine readable format (json)",
Category: flags.VMCategory,
}
SenderFlag = &cli.StringFlag{
Name: "sender",
Usage: "The transaction origin",
Category: flags.VMCategory,
}
ReceiverFlag = &cli.StringFlag{
Name: "receiver",
Usage: "The transaction receiver (execution context)",
Category: flags.VMCategory,
}
DisableMemoryFlag = &cli.BoolFlag{
Name: "nomemory",
Value: true,
Usage: "disable memory output",
Category: flags.VMCategory,
}
DisableStackFlag = &cli.BoolFlag{
Name: "nostack",
Usage: "disable stack output",
Category: flags.VMCategory,
}
DisableStorageFlag = &cli.BoolFlag{
Name: "nostorage",
Usage: "disable storage output",
Category: flags.VMCategory,
}
DisableReturnDataFlag = &cli.BoolFlag{
Name: "noreturndata",
Value: true,
Usage: "enable return data output",
Category: flags.VMCategory,
}
)
// Command definitions.
var (
stateTransitionCommand = &cli.Command{
Name: "transition",
Aliases: []string{"t8n"},
Usage: "Executes a full state transition",
Action: t8ntool.Transition,
Flags: []cli.Flag{
t8ntool.TraceFlag,
t8ntool.TraceTracerFlag,
t8ntool.TraceTracerConfigFlag,
t8ntool.TraceEnableMemoryFlag,
t8ntool.TraceDisableStackFlag,
t8ntool.TraceEnableReturnDataFlag,
t8ntool.TraceEnableCallFramesFlag,
t8ntool.OutputBasedir,
t8ntool.OutputAllocFlag,
t8ntool.OutputResultFlag,
t8ntool.OutputBodyFlag,
t8ntool.InputAllocFlag,
t8ntool.InputEnvFlag,
t8ntool.InputTxsFlag,
t8ntool.ForknameFlag,
t8ntool.ChainIDFlag,
t8ntool.RewardFlag,
},
}
transactionCommand = &cli.Command{
Name: "transaction",
Aliases: []string{"t9n"},
Usage: "Performs transaction validation",
Action: t8ntool.Transaction,
Flags: []cli.Flag{
t8ntool.InputTxsFlag,
t8ntool.ChainIDFlag,
t8ntool.ForknameFlag,
},
}
var stateTransitionCommand = &cli.Command{
Name: "transition",
Aliases: []string{"t8n"},
Usage: "Executes a full state transition",
Action: t8ntool.Transition,
Flags: []cli.Flag{
t8ntool.TraceFlag,
t8ntool.TraceTracerFlag,
t8ntool.TraceTracerConfigFlag,
t8ntool.TraceEnableMemoryFlag,
t8ntool.TraceDisableStackFlag,
t8ntool.TraceEnableReturnDataFlag,
t8ntool.TraceEnableCallFramesFlag,
t8ntool.OutputBasedir,
t8ntool.OutputAllocFlag,
t8ntool.OutputResultFlag,
t8ntool.OutputBodyFlag,
t8ntool.InputAllocFlag,
t8ntool.InputEnvFlag,
t8ntool.InputTxsFlag,
t8ntool.ForknameFlag,
t8ntool.ChainIDFlag,
t8ntool.RewardFlag,
},
}
blockBuilderCommand = &cli.Command{
Name: "block-builder",
Aliases: []string{"b11r"},
Usage: "Builds a block",
Action: t8ntool.BuildBlock,
Flags: []cli.Flag{
t8ntool.OutputBasedir,
t8ntool.OutputBlockFlag,
t8ntool.InputHeaderFlag,
t8ntool.InputOmmersFlag,
t8ntool.InputWithdrawalsFlag,
t8ntool.InputTxsRlpFlag,
t8ntool.SealCliqueFlag,
},
}
)
var transactionCommand = &cli.Command{
Name: "transaction",
Aliases: []string{"t9n"},
Usage: "Performs transaction validation",
Action: t8ntool.Transaction,
Flags: []cli.Flag{
t8ntool.InputTxsFlag,
t8ntool.ChainIDFlag,
t8ntool.ForknameFlag,
},
}
var blockBuilderCommand = &cli.Command{
Name: "block-builder",
Aliases: []string{"b11r"},
Usage: "Builds a block",
Action: t8ntool.BuildBlock,
Flags: []cli.Flag{
t8ntool.OutputBasedir,
t8ntool.OutputBlockFlag,
t8ntool.InputHeaderFlag,
t8ntool.InputOmmersFlag,
t8ntool.InputWithdrawalsFlag,
t8ntool.InputTxsRlpFlag,
t8ntool.SealCliqueFlag,
},
}
// vmFlags contains flags related to running the EVM.
var vmFlags = []cli.Flag{
CodeFlag,
CodeFileFlag,
CreateFlag,
GasFlag,
PriceFlag,
ValueFlag,
InputFlag,
InputFileFlag,
GenesisFlag,
SenderFlag,
ReceiverFlag,
}
// traceFlags contains flags that configure tracing output.
var traceFlags = []cli.Flag{
TraceFlag,
TraceFormatFlag,
TraceDisableStackFlag,
TraceDisableMemoryFlag,
TraceDisableStorageFlag,
TraceDisableReturnDataFlag,
// deprecated
BenchFlag,
DebugFlag,
DumpFlag,
MachineFlag,
StatDumpFlag,
DisableMemoryFlag,
DisableStackFlag,
DisableStorageFlag,
DisableReturnDataFlag,
}
var app = flags.NewApp("the evm command line interface")
func init() {
app.Flags = debug.Flags
app.Flags = flags.Merge(vmFlags, traceFlags, debug.Flags)
app.Commands = []*cli.Command{
compileCommand,
disasmCommand,
runCommand,
blockTestCommand,
stateTestCommand,
stateTransitionCommand,
transactionCommand,
blockBuilderCommand,
eofParseCommand,
eofDumpCommand,
}
app.Before = func(ctx *cli.Context) error {
flags.MigrateGlobalFlags(ctx)
@ -225,71 +248,11 @@ func init() {
func main() {
if err := app.Run(os.Args); err != nil {
code := 1
if ec, ok := err.(*t8ntool.NumberedError); ok {
code = ec.ExitCode()
}
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
os.Exit(code)
}
}
// tracerFromFlags parses the cli flags and returns the specified tracer.
func tracerFromFlags(ctx *cli.Context) *tracing.Hooks {
config := &logger.Config{
EnableMemory: !ctx.Bool(TraceDisableMemoryFlag.Name),
DisableStack: ctx.Bool(TraceDisableStackFlag.Name),
DisableStorage: ctx.Bool(TraceDisableStorageFlag.Name),
EnableReturnData: !ctx.Bool(TraceDisableReturnDataFlag.Name),
}
switch {
case ctx.Bool(TraceFlag.Name):
switch format := ctx.String(TraceFormatFlag.Name); format {
case "struct":
return logger.NewStreamingStructLogger(config, os.Stderr).Hooks()
case "json":
return logger.NewJSONLogger(config, os.Stderr)
case "md", "markdown":
return logger.NewMarkdownLogger(config, os.Stderr).Hooks()
default:
fmt.Fprintf(os.Stderr, "unknown trace format: %q\n", format)
os.Exit(1)
return nil
}
// Deprecated ways of configuring tracing.
case ctx.Bool(MachineFlag.Name):
return logger.NewJSONLogger(config, os.Stderr)
case ctx.Bool(DebugFlag.Name):
return logger.NewStreamingStructLogger(config, os.Stderr).Hooks()
default:
return nil
}
}
// collectFiles walks the given path. If the path is a directory, it will
// return a list of all accumulates all files with json extension.
// Otherwise (if path points to a file), it will return the path.
func collectFiles(path string) []string {
var out []string
if info, err := os.Stat(path); err == nil && !info.IsDir() {
// User explicitly pointed out a file, ignore extension.
return []string{path}
}
err := filepath.Walk(path, func(path string, info fs.FileInfo, err error) error {
if err != nil {
return err
}
if !info.IsDir() && filepath.Ext(info.Name()) == ".json" {
out = append(out, path)
}
return nil
})
if err != nil {
fmt.Fprintln(os.Stderr, err)
}
return out
}
// dump returns a state dump for the most current trie.
func dump(s *state.StateDB) *state.Dump {
root := s.IntermediateRoot(false)
cpy, _ := state.New(root, s.Database())
dump := cpy.RawDump(nil)
return &dump
}

@ -1,87 +0,0 @@
// Copyright 2024 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"encoding/json"
"fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/state"
"github.com/urfave/cli/v2"
)
const (
PASS = "\033[32mPASS\033[0m"
FAIL = "\033[31mFAIL\033[0m"
)
// testResult contains the execution status after running a state test, any
// error that might have occurred and a dump of the final state if requested.
type testResult struct {
Name string `json:"name"`
Pass bool `json:"pass"`
Root *common.Hash `json:"stateRoot,omitempty"`
Fork string `json:"fork"`
Error string `json:"error,omitempty"`
State *state.Dump `json:"state,omitempty"`
Stats *execStats `json:"benchStats,omitempty"`
}
func (r testResult) String() string {
var status string
if r.Pass {
status = fmt.Sprintf("[%s]", PASS)
} else {
status = fmt.Sprintf("[%s]", FAIL)
}
info := r.Name
m := parseTestMetadata(r.Name)
if m != nil {
info = fmt.Sprintf("%s %s, param=%s", m.module, m.function, m.parameters)
}
var extra string
if !r.Pass {
extra = fmt.Sprintf(", err=%v, fork=%s", r.Error, r.Fork)
}
out := fmt.Sprintf("%s %s%s", status, info, extra)
if r.State != nil {
state, _ := json.MarshalIndent(r.State, "", " ")
out += "\n" + string(state)
}
return out
}
// report prints the after-test summary.
func report(ctx *cli.Context, results []testResult) {
if ctx.Bool(HumanReadableFlag.Name) {
pass := 0
for _, r := range results {
if r.Pass {
pass++
}
}
for _, r := range results {
fmt.Println(r)
}
fmt.Println("--")
fmt.Printf("%d tests passed, %d tests failed.\n", pass, len(results)-pass)
return
}
out, _ := json.MarshalIndent(results, "", " ")
fmt.Println(string(out))
}

@ -18,27 +18,25 @@ package main
import (
"bytes"
"encoding/hex"
"encoding/json"
"fmt"
"io"
"math/big"
"os"
goruntime "runtime"
"slices"
"strings"
"testing"
"time"
"github.com/ethereum/go-ethereum/cmd/evm/internal/compiler"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/tracing"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/core/vm/runtime"
"github.com/ethereum/go-ethereum/eth/tracers/logger"
"github.com/ethereum/go-ethereum/internal/flags"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/triedb"
@ -52,83 +50,14 @@ var runCommand = &cli.Command{
Usage: "Run arbitrary evm binary",
ArgsUsage: "<code>",
Description: `The run command runs arbitrary EVM code.`,
Flags: slices.Concat([]cli.Flag{
BenchFlag,
CodeFileFlag,
CreateFlag,
GasFlag,
GenesisFlag,
InputFlag,
InputFileFlag,
PriceFlag,
ReceiverFlag,
SenderFlag,
ValueFlag,
StatDumpFlag,
DumpFlag,
}, traceFlags),
Flags: flags.Merge(vmFlags, traceFlags),
}
var (
CodeFileFlag = &cli.StringFlag{
Name: "codefile",
Usage: "File containing EVM code. If '-' is specified, code is read from stdin ",
Category: flags.VMCategory,
}
CreateFlag = &cli.BoolFlag{
Name: "create",
Usage: "Indicates the action should be create rather than call",
Category: flags.VMCategory,
}
GasFlag = &cli.Uint64Flag{
Name: "gas",
Usage: "Gas limit for the evm",
Value: 10000000000,
Category: flags.VMCategory,
}
GenesisFlag = &cli.StringFlag{
Name: "prestate",
Usage: "JSON file with prestate (genesis) config",
Category: flags.VMCategory,
}
InputFlag = &cli.StringFlag{
Name: "input",
Usage: "Input for the EVM",
Category: flags.VMCategory,
}
InputFileFlag = &cli.StringFlag{
Name: "inputfile",
Usage: "File containing input for the EVM",
Category: flags.VMCategory,
}
PriceFlag = &flags.BigFlag{
Name: "price",
Usage: "Price set for the evm",
Value: new(big.Int),
Category: flags.VMCategory,
}
ReceiverFlag = &cli.StringFlag{
Name: "receiver",
Usage: "The transaction receiver (execution context)",
Category: flags.VMCategory,
}
SenderFlag = &cli.StringFlag{
Name: "sender",
Usage: "The transaction origin",
Category: flags.VMCategory,
}
ValueFlag = &flags.BigFlag{
Name: "value",
Usage: "Value set for the evm",
Value: new(big.Int),
Category: flags.VMCategory,
}
)
// readGenesis will read the given JSON format genesis file and return
// the initialized Genesis structure
func readGenesis(genesisPath string) *core.Genesis {
// Make sure we have a valid genesis JSON
//genesisPath := ctx.Args().First()
if len(genesisPath) == 0 {
utils.Fatalf("Must supply path to genesis JSON file")
}
@ -146,60 +75,51 @@ func readGenesis(genesisPath string) *core.Genesis {
}
type execStats struct {
Time time.Duration `json:"time"` // The execution Time.
Allocs int64 `json:"allocs"` // The number of heap allocations during execution.
BytesAllocated int64 `json:"bytesAllocated"` // The cumulative number of bytes allocated during execution.
GasUsed uint64 `json:"gasUsed"` // the amount of gas used during execution
time time.Duration // The execution time.
allocs int64 // The number of heap allocations during execution.
bytesAllocated int64 // The cumulative number of bytes allocated during execution.
}
func timedExec(bench bool, execFunc func() ([]byte, uint64, error)) ([]byte, execStats, error) {
func timedExec(bench bool, execFunc func() ([]byte, uint64, error)) (output []byte, gasLeft uint64, stats execStats, err error) {
if bench {
testing.Init()
// Do one warm-up run
output, gasUsed, err := execFunc()
result := testing.Benchmark(func(b *testing.B) {
for i := 0; i < b.N; i++ {
haveOutput, haveGasUsed, haveErr := execFunc()
if !bytes.Equal(haveOutput, output) {
panic(fmt.Sprintf("output differs\nhave %x\nwant %x\n", haveOutput, output))
}
if haveGasUsed != gasUsed {
panic(fmt.Sprintf("gas differs, have %v want %v", haveGasUsed, gasUsed))
}
if haveErr != err {
panic(fmt.Sprintf("err differs, have %v want %v", haveErr, err))
}
output, gasLeft, err = execFunc()
}
})
// Get the average execution time from the benchmarking result.
// There are other useful stats here that could be reported.
stats := execStats{
Time: time.Duration(result.NsPerOp()),
Allocs: result.AllocsPerOp(),
BytesAllocated: result.AllocedBytesPerOp(),
GasUsed: gasUsed,
}
return output, stats, err
stats.time = time.Duration(result.NsPerOp())
stats.allocs = result.AllocsPerOp()
stats.bytesAllocated = result.AllocedBytesPerOp()
} else {
var memStatsBefore, memStatsAfter goruntime.MemStats
goruntime.ReadMemStats(&memStatsBefore)
startTime := time.Now()
output, gasLeft, err = execFunc()
stats.time = time.Since(startTime)
goruntime.ReadMemStats(&memStatsAfter)
stats.allocs = int64(memStatsAfter.Mallocs - memStatsBefore.Mallocs)
stats.bytesAllocated = int64(memStatsAfter.TotalAlloc - memStatsBefore.TotalAlloc)
}
var memStatsBefore, memStatsAfter goruntime.MemStats
goruntime.ReadMemStats(&memStatsBefore)
t0 := time.Now()
output, gasUsed, err := execFunc()
duration := time.Since(t0)
goruntime.ReadMemStats(&memStatsAfter)
stats := execStats{
Time: duration,
Allocs: int64(memStatsAfter.Mallocs - memStatsBefore.Mallocs),
BytesAllocated: int64(memStatsAfter.TotalAlloc - memStatsBefore.TotalAlloc),
GasUsed: gasUsed,
}
return output, stats, err
return output, gasLeft, stats, err
}
func runCmd(ctx *cli.Context) error {
logconfig := &logger.Config{
EnableMemory: !ctx.Bool(DisableMemoryFlag.Name),
DisableStack: ctx.Bool(DisableStackFlag.Name),
DisableStorage: ctx.Bool(DisableStorageFlag.Name),
EnableReturnData: !ctx.Bool(DisableReturnDataFlag.Name),
Debug: ctx.Bool(DebugFlag.Name),
}
var (
tracer *tracing.Hooks
prestate *state.StateDB
debugLogger *logger.StructLogger
statedb *state.StateDB
chainConfig *params.ChainConfig
sender = common.BytesToAddress([]byte("sender"))
receiver = common.BytesToAddress([]byte("receiver"))
@ -207,7 +127,15 @@ func runCmd(ctx *cli.Context) error {
blobHashes []common.Hash // TODO (MariusVanDerWijden) implement blob hashes in state tests
blobBaseFee = new(big.Int) // TODO (MariusVanDerWijden) implement blob fee in state tests
)
tracer = tracerFromFlags(ctx)
if ctx.Bool(MachineFlag.Name) {
tracer = logger.NewJSONLogger(logconfig, os.Stdout)
} else if ctx.Bool(DebugFlag.Name) {
debugLogger = logger.NewStructLogger(logconfig)
tracer = debugLogger.Hooks()
} else {
debugLogger = logger.NewStructLogger(logconfig)
}
initialGas := ctx.Uint64(GasFlag.Name)
genesisConfig := new(core.Genesis)
genesisConfig.GasLimit = initialGas
@ -227,8 +155,8 @@ func runCmd(ctx *cli.Context) error {
})
defer triedb.Close()
genesis := genesisConfig.MustCommit(db, triedb)
sdb := state.NewDatabase(triedb, nil)
prestate, _ = state.New(genesis.Root(), sdb)
sdb := state.NewDatabaseWithNodeDB(db, triedb)
statedb, _ = state.New(genesis.Root(), sdb, nil)
chainConfig = genesisConfig.Config
if ctx.String(SenderFlag.Name) != "" {
@ -241,38 +169,51 @@ func runCmd(ctx *cli.Context) error {
var code []byte
codeFileFlag := ctx.String(CodeFileFlag.Name)
hexcode := ctx.Args().First()
codeFlag := ctx.String(CodeFlag.Name)
// The '--codefile' flag overrides code in state
if codeFileFlag == "-" {
// If - is specified, it means that code comes from stdin
// Try reading from stdin
input, err := io.ReadAll(os.Stdin)
if err != nil {
fmt.Printf("Could not load code from stdin: %v\n", err)
// The '--code' or '--codefile' flag overrides code in state
if codeFileFlag != "" || codeFlag != "" {
var hexcode []byte
if codeFileFlag != "" {
var err error
// If - is specified, it means that code comes from stdin
if codeFileFlag == "-" {
//Try reading from stdin
if hexcode, err = io.ReadAll(os.Stdin); err != nil {
fmt.Printf("Could not load code from stdin: %v\n", err)
os.Exit(1)
}
} else {
// Codefile with hex assembly
if hexcode, err = os.ReadFile(codeFileFlag); err != nil {
fmt.Printf("Could not load code from file: %v\n", err)
os.Exit(1)
}
}
} else {
hexcode = []byte(codeFlag)
}
hexcode = bytes.TrimSpace(hexcode)
if len(hexcode)%2 != 0 {
fmt.Printf("Invalid input length for hex data (%d)\n", len(hexcode))
os.Exit(1)
}
hexcode = string(input)
} else if codeFileFlag != "" {
// Codefile with hex assembly
input, err := os.ReadFile(codeFileFlag)
code = common.FromHex(string(hexcode))
} else if fn := ctx.Args().First(); len(fn) > 0 {
// EASM-file to compile
src, err := os.ReadFile(fn)
if err != nil {
fmt.Printf("Could not load code from file: %v\n", err)
os.Exit(1)
return err
}
hexcode = string(input)
bin, err := compiler.Compile(fn, src, false)
if err != nil {
return err
}
code = common.Hex2Bytes(bin)
}
hexcode = strings.TrimSpace(hexcode)
if len(hexcode)%2 != 0 {
fmt.Printf("Invalid input length for hex data (%d)\n", len(hexcode))
os.Exit(1)
}
code = common.FromHex(hexcode)
runtimeConfig := runtime.Config{
Origin: sender,
State: prestate,
State: statedb,
GasLimit: initialGas,
GasPrice: flags.GlobalBig(ctx, PriceFlag.Name),
Value: flags.GlobalBig(ctx, ValueFlag.Name),
@ -315,33 +256,28 @@ func runCmd(ctx *cli.Context) error {
if ctx.Bool(CreateFlag.Name) {
input = append(code, input...)
execFunc = func() ([]byte, uint64, error) {
// don't mutate the state!
runtimeConfig.State = prestate.Copy()
output, _, gasLeft, err := runtime.Create(input, &runtimeConfig)
return output, gasLeft, err
}
} else {
if len(code) > 0 {
prestate.SetCode(receiver, code)
statedb.SetCode(receiver, code)
}
execFunc = func() ([]byte, uint64, error) {
// don't mutate the state!
runtimeConfig.State = prestate.Copy()
output, gasLeft, err := runtime.Call(receiver, input, &runtimeConfig)
return output, initialGas - gasLeft, err
return runtime.Call(receiver, input, &runtimeConfig)
}
}
bench := ctx.Bool(BenchFlag.Name)
output, stats, err := timedExec(bench, execFunc)
output, leftOverGas, stats, err := timedExec(bench, execFunc)
if ctx.Bool(DumpFlag.Name) {
root, err := runtimeConfig.State.Commit(genesisConfig.Number, true)
root, err := statedb.Commit(genesisConfig.Number, true)
if err != nil {
fmt.Printf("Failed to commit changes %v\n", err)
return err
}
dumpdb, err := state.New(root, sdb)
dumpdb, err := state.New(root, sdb, nil)
if err != nil {
fmt.Printf("Failed to open statedb %v\n", err)
return err
@ -350,10 +286,12 @@ func runCmd(ctx *cli.Context) error {
}
if ctx.Bool(DebugFlag.Name) {
if logs := runtimeConfig.State.Logs(); len(logs) > 0 {
fmt.Fprintln(os.Stderr, "### LOGS")
writeLogs(os.Stderr, logs)
if debugLogger != nil {
fmt.Fprintln(os.Stderr, "#### TRACE ####")
logger.WriteTrace(os.Stderr, debugLogger.StructLogs())
}
fmt.Fprintln(os.Stderr, "#### LOGS ####")
logger.WriteLogs(os.Stderr, statedb.Logs())
}
if bench || ctx.Bool(StatDumpFlag.Name) {
@ -361,7 +299,7 @@ func runCmd(ctx *cli.Context) error {
execution time: %v
allocations: %d
allocated bytes: %d
`, stats.GasUsed, stats.Time, stats.Allocs, stats.BytesAllocated)
`, initialGas-leftOverGas, stats.time, stats.allocs, stats.bytesAllocated)
}
if tracer == nil {
fmt.Printf("%#x\n", output)
@ -372,16 +310,3 @@ allocated bytes: %d
return nil
}
// writeLogs writes vm logs in a readable format to the given writer
func writeLogs(writer io.Writer, logs []*types.Log) {
for _, log := range logs {
fmt.Fprintf(writer, "LOG%d: %x bn=%d txi=%x\n", len(log.Topics), log.Address, log.BlockNumber, log.TxIndex)
for i, topic := range log.Topics {
fmt.Fprintf(writer, "%08d %x\n", i, topic)
}
fmt.Fprint(writer, hex.Dump(log.Data))
fmt.Fprintln(writer)
}
}

@ -21,138 +21,106 @@ import (
"encoding/json"
"fmt"
"os"
"regexp"
"slices"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/internal/flags"
"github.com/ethereum/go-ethereum/eth/tracers/logger"
"github.com/ethereum/go-ethereum/tests"
"github.com/urfave/cli/v2"
)
var (
forkFlag = &cli.StringFlag{
Name: "statetest.fork",
Usage: "Only run tests for the specified fork.",
Category: flags.VMCategory,
}
idxFlag = &cli.IntFlag{
Name: "statetest.index",
Usage: "The index of the subtest to run.",
Category: flags.VMCategory,
Value: -1, // default to select all subtest indices
}
)
var stateTestCommand = &cli.Command{
Action: stateTestCmd,
Name: "statetest",
Usage: "Executes the given state tests. Filenames can be fed via standard input (batch mode) or as an argument (one-off execution).",
ArgsUsage: "<file>",
Flags: slices.Concat([]cli.Flag{
DumpFlag,
HumanReadableFlag,
RunFlag,
}, traceFlags),
}
// StatetestResult contains the execution status after running a state test, any
// error that might have occurred and a dump of the final state if requested.
type StatetestResult struct {
Name string `json:"name"`
Pass bool `json:"pass"`
Root *common.Hash `json:"stateRoot,omitempty"`
Fork string `json:"fork"`
Error string `json:"error,omitempty"`
State *state.Dump `json:"state,omitempty"`
}
func stateTestCmd(ctx *cli.Context) error {
path := ctx.Args().First()
// If path is provided, run the tests at that path.
if len(path) != 0 {
var (
collected = collectFiles(path)
results []testResult
)
for _, fname := range collected {
r, err := runStateTest(ctx, fname)
if err != nil {
return err
}
results = append(results, r...)
}
report(ctx, results)
return nil
// Configure the EVM logger
config := &logger.Config{
EnableMemory: !ctx.Bool(DisableMemoryFlag.Name),
DisableStack: ctx.Bool(DisableStackFlag.Name),
DisableStorage: ctx.Bool(DisableStorageFlag.Name),
EnableReturnData: !ctx.Bool(DisableReturnDataFlag.Name),
}
// Otherwise, read filenames from stdin and execute back-to-back.
var cfg vm.Config
switch {
case ctx.Bool(MachineFlag.Name):
cfg.Tracer = logger.NewJSONLogger(config, os.Stderr)
case ctx.Bool(DebugFlag.Name):
cfg.Tracer = logger.NewStructLogger(config).Hooks()
}
// Load the test content from the input file
if len(ctx.Args().First()) != 0 {
return runStateTest(ctx.Args().First(), cfg, ctx.Bool(DumpFlag.Name))
}
// Read filenames from stdin and execute back-to-back
scanner := bufio.NewScanner(os.Stdin)
for scanner.Scan() {
fname := scanner.Text()
if len(fname) == 0 {
return nil
}
results, err := runStateTest(ctx, fname)
if err != nil {
if err := runStateTest(fname, cfg, ctx.Bool(DumpFlag.Name)); err != nil {
return err
}
report(ctx, results)
}
return nil
}
// runStateTest loads the state-test given by fname, and executes the test.
func runStateTest(ctx *cli.Context, fname string) ([]testResult, error) {
func runStateTest(fname string, cfg vm.Config, dump bool) error {
src, err := os.ReadFile(fname)
if err != nil {
return nil, err
return err
}
var testsByName map[string]tests.StateTest
if err := json.Unmarshal(src, &testsByName); err != nil {
return nil, fmt.Errorf("unable to read test file %s: %w", fname, err)
}
cfg := vm.Config{Tracer: tracerFromFlags(ctx)}
re, err := regexp.Compile(ctx.String(RunFlag.Name))
if err != nil {
return nil, fmt.Errorf("invalid regex -%s: %v", RunFlag.Name, err)
return err
}
// Iterate over all the tests, run them and aggregate the results
results := make([]testResult, 0, len(testsByName))
results := make([]StatetestResult, 0, len(testsByName))
for key, test := range testsByName {
if !re.MatchString(key) {
continue
}
for i, st := range test.Subtests() {
if idx := ctx.Int(idxFlag.Name); idx != -1 && idx != i {
// If specific index requested, skip all tests that do not match.
continue
}
if fork := ctx.String(forkFlag.Name); fork != "" && st.Fork != fork {
// If specific fork requested, skip all tests that do not match.
continue
}
for _, st := range test.Subtests() {
// Run the test and aggregate the result
result := &testResult{Name: key, Fork: st.Fork, Pass: true}
test.Run(st, cfg, false, rawdb.HashScheme, func(err error, state *tests.StateTestState) {
result := &StatetestResult{Name: key, Fork: st.Fork, Pass: true}
test.Run(st, cfg, false, rawdb.HashScheme, func(err error, tstate *tests.StateTestState) {
var root common.Hash
if state.StateDB != nil {
root = state.StateDB.IntermediateRoot(false)
if tstate.StateDB != nil {
root = tstate.StateDB.IntermediateRoot(false)
result.Root = &root
fmt.Fprintf(os.Stderr, "{\"stateRoot\": \"%#x\"}\n", root)
// Dump any state to aid debugging.
if ctx.Bool(DumpFlag.Name) {
result.State = dump(state.StateDB)
if dump { // Dump any state to aid debugging
cpy, _ := state.New(root, tstate.StateDB.Database(), nil)
dump := cpy.RawDump(nil)
result.State = &dump
}
}
// Collect bench stats if requested.
if ctx.Bool(BenchFlag.Name) {
_, stats, _ := timedExec(true, func() ([]byte, uint64, error) {
_, _, gasUsed, _ := test.RunNoVerify(st, cfg, false, rawdb.HashScheme)
return nil, gasUsed, nil
})
result.Stats = &stats
}
if err != nil {
// Test failed, mark as so.
// Test failed, mark as so
result.Pass, result.Error = false, err.Error()
return
}
})
results = append(results, *result)
}
}
return results, nil
out, _ := json.MarshalIndent(results, "", " ")
fmt.Println(string(out))
return nil
}

@ -287,14 +287,6 @@ func TestT8n(t *testing.T) {
output: t8nOutput{alloc: true, result: true},
expOut: "exp.json",
},
{ // Prague test, EIP-7702 transaction
base: "./testdata/33",
input: t8nInput{
"alloc.json", "txs.json", "env.json", "Prague", "",
},
output: t8nOutput{alloc: true, result: true},
expOut: "exp.json",
},
} {
args := []string{"t8n"}
args = append(args, tc.output.get()...)
@ -349,6 +341,98 @@ func lineIterator(path string) func() (string, error) {
}
}
// TestT8nTracing is a test that checks the tracing-output from t8n.
func TestT8nTracing(t *testing.T) {
t.Parallel()
tt := new(testT8n)
tt.TestCmd = cmdtest.NewTestCmd(t, tt)
for i, tc := range []struct {
base string
input t8nInput
expExitCode int
extraArgs []string
expectedTraces []string
}{
{
base: "./testdata/31",
input: t8nInput{
"alloc.json", "txs.json", "env.json", "Cancun", "",
},
extraArgs: []string{"--trace"},
expectedTraces: []string{"trace-0-0x88f5fbd1524731a81e49f637aa847543268a5aaf2a6b32a69d2c6d978c45dcfb.jsonl"},
},
{
base: "./testdata/31",
input: t8nInput{
"alloc.json", "txs.json", "env.json", "Cancun", "",
},
extraArgs: []string{"--trace.tracer", `
{
result: function(){
return "hello world"
},
fault: function(){}
}`},
expectedTraces: []string{"trace-0-0x88f5fbd1524731a81e49f637aa847543268a5aaf2a6b32a69d2c6d978c45dcfb.json"},
},
{
base: "./testdata/32",
input: t8nInput{
"alloc.json", "txs.json", "env.json", "Paris", "",
},
extraArgs: []string{"--trace", "--trace.callframes"},
expectedTraces: []string{"trace-0-0x47806361c0fa084be3caa18afe8c48156747c01dbdfc1ee11b5aecdbe4fcf23e.jsonl"},
},
} {
args := []string{"t8n"}
args = append(args, tc.input.get(tc.base)...)
// Place the output somewhere we can find it
outdir := t.TempDir()
args = append(args, "--output.basedir", outdir)
args = append(args, tc.extraArgs...)
var qArgs []string // quoted args for debugging purposes
for _, arg := range args {
if len(arg) == 0 {
qArgs = append(qArgs, `""`)
} else {
qArgs = append(qArgs, arg)
}
}
tt.Logf("args: %v\n", strings.Join(qArgs, " "))
tt.Run("evm-test", args...)
t.Log(string(tt.Output()))
// Compare the expected traces
for _, traceFile := range tc.expectedTraces {
haveFn := lineIterator(filepath.Join(outdir, traceFile))
wantFn := lineIterator(filepath.Join(tc.base, traceFile))
for line := 0; ; line++ {
want, wErr := wantFn()
have, hErr := haveFn()
if want != have {
t.Fatalf("test %d, trace %v, line %d\nwant: %v\nhave: %v\n",
i, traceFile, line, want, have)
}
if wErr != nil && hErr != nil {
break
}
if wErr != nil {
t.Fatal(wErr)
}
if hErr != nil {
t.Fatal(hErr)
}
t.Logf("%v\n", want)
}
}
if have, want := tt.ExitStatus(), tc.expExitCode; have != want {
t.Fatalf("test %d: wrong exit code, have %d, want %d", i, have, want)
}
}
}
type t9nInput struct {
inTxs string
stFork string
@ -440,7 +524,7 @@ func TestT9n(t *testing.T) {
ok, err := cmpJson(have, want)
switch {
case err != nil:
t.Log(string(have))
t.Logf(string(have))
t.Fatalf("test %d, json parsing failed: %v", i, err)
case !ok:
t.Fatalf("test %d: output wrong, have \n%v\nwant\n%v\n", i, string(have), string(want))
@ -575,7 +659,7 @@ func TestB11r(t *testing.T) {
ok, err := cmpJson(have, want)
switch {
case err != nil:
t.Log(string(have))
t.Logf(string(have))
t.Fatalf("test %d, json parsing failed: %v", i, err)
case !ok:
t.Fatalf("test %d: output wrong, have \n%v\nwant\n%v\n", i, string(have), string(want))
@ -588,88 +672,6 @@ func TestB11r(t *testing.T) {
}
}
func TestEvmRun(t *testing.T) {
t.Parallel()
tt := cmdtest.NewTestCmd(t, nil)
for i, tc := range []struct {
input []string
wantStdout string
wantStderr string
}{
{ // json tracing
input: []string{"run", "--trace", "--trace.format=json", "6040"},
wantStdout: "./testdata/evmrun/1.out.1.txt",
wantStderr: "./testdata/evmrun/1.out.2.txt",
},
{ // Same as above, using the deprecated --json
input: []string{"run", "--json", "6040"},
wantStdout: "./testdata/evmrun/1.out.1.txt",
wantStderr: "./testdata/evmrun/1.out.2.txt",
},
{ // default tracing (struct)
input: []string{"run", "--trace", "0x6040"},
wantStdout: "./testdata/evmrun/2.out.1.txt",
wantStderr: "./testdata/evmrun/2.out.2.txt",
},
{ // default tracing (struct), plus alloc-dump
input: []string{"run", "--trace", "--dump", "0x6040"},
wantStdout: "./testdata/evmrun/3.out.1.txt",
//wantStderr: "./testdata/evmrun/3.out.2.txt",
},
{ // json-tracing, plus alloc-dump
input: []string{"run", "--trace", "--trace.format=json", "--dump", "0x6040"},
wantStdout: "./testdata/evmrun/4.out.1.txt",
//wantStderr: "./testdata/evmrun/4.out.2.txt",
},
{ // md-tracing
input: []string{"run", "--trace", "--trace.format=md", "0x6040"},
wantStdout: "./testdata/evmrun/5.out.1.txt",
wantStderr: "./testdata/evmrun/5.out.2.txt",
},
{ // statetest subcommand
input: []string{"statetest", "./testdata/statetest.json"},
wantStdout: "./testdata/evmrun/6.out.1.txt",
wantStderr: "./testdata/evmrun/6.out.2.txt",
},
{ // statetest subcommand with output
input: []string{"statetest", "--trace", "--trace.format=md", "./testdata/statetest.json"},
wantStdout: "./testdata/evmrun/7.out.1.txt",
wantStderr: "./testdata/evmrun/7.out.2.txt",
},
{ // statetest subcommand with output
input: []string{"statetest", "--trace", "--trace.format=json", "./testdata/statetest.json"},
wantStdout: "./testdata/evmrun/8.out.1.txt",
wantStderr: "./testdata/evmrun/8.out.2.txt",
},
} {
tt.Logf("args: go run ./cmd/evm %v\n", strings.Join(tc.input, " "))
tt.Run("evm-test", tc.input...)
haveStdOut := tt.Output()
tt.WaitExit()
haveStdErr := tt.StderrText()
if have, wantFile := haveStdOut, tc.wantStdout; wantFile != "" {
want, err := os.ReadFile(wantFile)
if err != nil {
t.Fatalf("test %d: could not read expected output: %v", i, err)
}
if string(haveStdOut) != string(want) {
t.Fatalf("test %d, output wrong, have \n%v\nwant\n%v\n", i, string(have), string(want))
}
}
if have, wantFile := haveStdErr, tc.wantStderr; wantFile != "" {
want, err := os.ReadFile(wantFile)
if err != nil {
t.Fatalf("test %d: could not read expected output: %v", i, err)
}
if have != string(want) {
t.Fatalf("test %d, output wrong\nhave %q\nwant %q\n", i, have, string(want))
}
}
}
}
// cmpJson compares the JSON in two byte slices.
func cmpJson(a, b []byte) (bool, error) {
var j, j2 interface{}
@ -681,93 +683,3 @@ func cmpJson(a, b []byte) (bool, error) {
}
return reflect.DeepEqual(j2, j), nil
}
// TestEVMTracing is a test that checks the tracing-output from evm.
func TestEVMTracing(t *testing.T) {
t.Parallel()
tt := cmdtest.NewTestCmd(t, nil)
for i, tc := range []struct {
base string
input []string
expectedTraces []string
}{
{
base: "./testdata/31",
input: []string{"t8n",
"--input.alloc=./testdata/31/alloc.json", "--input.txs=./testdata/31/txs.json",
"--input.env=./testdata/31/env.json", "--state.fork=Cancun",
"--trace",
},
expectedTraces: []string{"trace-0-0x88f5fbd1524731a81e49f637aa847543268a5aaf2a6b32a69d2c6d978c45dcfb.jsonl"},
},
{
base: "./testdata/31",
input: []string{"t8n",
"--input.alloc=./testdata/31/alloc.json", "--input.txs=./testdata/31/txs.json",
"--input.env=./testdata/31/env.json", "--state.fork=Cancun",
"--trace.tracer", `
{
result: function(){
return "hello world"
},
fault: function(){}
}`,
},
expectedTraces: []string{"trace-0-0x88f5fbd1524731a81e49f637aa847543268a5aaf2a6b32a69d2c6d978c45dcfb.json"},
},
{
base: "./testdata/32",
input: []string{"t8n",
"--input.alloc=./testdata/32/alloc.json", "--input.txs=./testdata/32/txs.json",
"--input.env=./testdata/32/env.json", "--state.fork=Paris",
"--trace", "--trace.callframes",
},
expectedTraces: []string{"trace-0-0x47806361c0fa084be3caa18afe8c48156747c01dbdfc1ee11b5aecdbe4fcf23e.jsonl"},
},
// TODO, make it possible to run tracers on statetests, e.g:
//{
// base: "./testdata/31",
// input: []string{"statetest", "--trace", "--trace.tracer", `{
// result: function(){
// return "hello world"
// },
// fault: function(){}
//}`, "./testdata/statetest.json"},
// expectedTraces: []string{"trace-0-0x88f5fbd1524731a81e49f637aa847543268a5aaf2a6b32a69d2c6d978c45dcfb.json"},
// },
} {
// Place the output somewhere we can find it
outdir := t.TempDir()
args := append(tc.input, "--output.basedir", outdir)
tt.Run("evm-test", args...)
tt.Logf("args: go run ./cmd/evm %v\n", args)
tt.WaitExit()
//t.Log(string(tt.Output()))
// Compare the expected traces
for _, traceFile := range tc.expectedTraces {
haveFn := lineIterator(filepath.Join(outdir, traceFile))
wantFn := lineIterator(filepath.Join(tc.base, traceFile))
for line := 0; ; line++ {
want, wErr := wantFn()
have, hErr := haveFn()
if want != have {
t.Fatalf("test %d, trace %v, line %d\nwant: %v\nhave: %v\n",
i, traceFile, line, want, have)
}
if wErr != nil && hErr != nil {
break
}
if wErr != nil {
t.Fatal(wErr)
}
if hErr != nil {
t.Fatal(hErr)
}
//t.Logf("%v\n", want)
}
}
}
}

@ -40,7 +40,6 @@
}
],
"currentDifficulty": "0x20000",
"gasUsed": "0x5208",
"requests": null
"gasUsed": "0x5208"
}
}

@ -37,7 +37,6 @@
],
"currentDifficulty": "0x20000",
"gasUsed": "0x109a0",
"currentBaseFee": "0x36b",
"requests": null
"currentBaseFee": "0x36b"
}
}

@ -8,7 +8,6 @@
"currentDifficulty": "0x2000020000000",
"receipts": [],
"gasUsed": "0x0",
"currentBaseFee": "0x500",
"requests": null
"currentBaseFee": "0x500"
}
}

@ -8,7 +8,6 @@
"receipts": [],
"currentDifficulty": "0x1ff8020000000",
"gasUsed": "0x0",
"currentBaseFee": "0x500",
"requests": null
"currentBaseFee": "0x500"
}
}

@ -8,7 +8,6 @@
"receipts": [],
"currentDifficulty": "0x1ff9000000000",
"gasUsed": "0x0",
"currentBaseFee": "0x500",
"requests": null
"currentBaseFee": "0x500"
}
}

@ -8,7 +8,6 @@
"currentDifficulty": "0x2000000200000",
"receipts": [],
"gasUsed": "0x0",
"currentBaseFee": "0x500",
"requests": null
"currentBaseFee": "0x500"
}
}

@ -1,14 +1,13 @@
{
"result": {
"stateRoot": "0x6f058887ca01549716789c380ede95aecc510e6d1fdc4dbf67d053c7c07f4bdc",
"txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"receipts": [],
"currentDifficulty": "0x2000000004000",
"gasUsed": "0x0",
"currentBaseFee": "0x500",
"requests": null
}
"result": {
"stateRoot": "0x6f058887ca01549716789c380ede95aecc510e6d1fdc4dbf67d053c7c07f4bdc",
"txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"receipts": [],
"currentDifficulty": "0x2000000004000",
"gasUsed": "0x0",
"currentBaseFee": "0x500"
}
}

@ -8,7 +8,6 @@
"currentDifficulty": "0x2000080000000",
"receipts": [],
"gasUsed": "0x0",
"currentBaseFee": "0x500",
"requests": null
"currentBaseFee": "0x500"
}
}

@ -21,7 +21,6 @@
}
],
"currentDifficulty": "0x20000",
"gasUsed": "0x520b",
"requests": null
"gasUsed": "0x520b"
}
}

@ -51,7 +51,6 @@
],
"currentDifficulty": null,
"gasUsed": "0x10306",
"currentBaseFee": "0x500",
"requests": null
"currentBaseFee": "0x500"
}
}

@ -34,7 +34,6 @@
],
"currentDifficulty": null,
"gasUsed": "0x5208",
"currentBaseFee": "0x460",
"requests": null
"currentBaseFee": "0x460"
}
}

@ -15,7 +15,6 @@
"currentDifficulty": null,
"gasUsed": "0x0",
"currentBaseFee": "0x500",
"withdrawalsRoot": "0x4921c0162c359755b2ae714a0978a1dad2eb8edce7ff9b38b9b6fc4cbc547eb5",
"requests": null
"withdrawalsRoot": "0x4921c0162c359755b2ae714a0978a1dad2eb8edce7ff9b38b9b6fc4cbc547eb5"
}
}

@ -42,7 +42,6 @@
"currentBaseFee": "0x9",
"withdrawalsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"currentExcessBlobGas": "0x0",
"blobGasUsed": "0x20000",
"requests": null
"blobGasUsed": "0x20000"
}
}

@ -40,7 +40,6 @@
"currentBaseFee": "0x9",
"withdrawalsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"currentExcessBlobGas": "0x0",
"blobGasUsed": "0x0",
"requests": null
"blobGasUsed": "0x0"
}
}

@ -34,7 +34,6 @@
}
],
"currentDifficulty": "0x20000",
"gasUsed": "0x521f",
"requests": null
"gasUsed": "0x521f"
}
}

@ -59,7 +59,6 @@
"currentBaseFee": "0x7",
"withdrawalsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"currentExcessBlobGas": "0x0",
"blobGasUsed": "0x0",
"requests": null
"blobGasUsed": "0x0"
}
}

@ -1 +0,0 @@
This test sets some EIP-7702 delegations and calls them.

@ -1,30 +0,0 @@
{
"0x8a0a19589531694250d570040a0c4b74576919b8": {
"nonce": "0x00",
"balance": "0x0de0b6b3a7640000",
"code": "0x600060006000600060007310000000000000000000000000000000000000015af1600155600060006000600060007310000000000000000000000000000000000000025af16002553d600060003e600051600355",
"storage": {
"0x01": "0x0100",
"0x02": "0x0100",
"0x03": "0x0100"
}
},
"0x000000000000000000000000000000000000aaaa": {
"nonce": "0x00",
"balance": "0x4563918244f40000",
"code": "0x58808080600173703c4b2bd70c169f5717101caee543299fc946c75af100",
"storage": {}
},
"0x000000000000000000000000000000000000bbbb": {
"nonce": "0x00",
"balance": "0x29a2241af62c0000",
"code": "0x6042805500",
"storage": {}
},
"0x71562b71999873DB5b286dF957af199Ec94617F7": {
"nonce": "0x00",
"balance": "0x6124fee993bc0000",
"code": "0x",
"storage": {}
}
}

@ -1,14 +0,0 @@
{
"currentCoinbase": "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba",
"currentGasLimit": "71794957647893862",
"currentNumber": "1",
"currentTimestamp": "1000",
"currentRandom": "0",
"currentDifficulty": "0",
"blockHashes": {},
"ommers": [],
"currentBaseFee": "7",
"parentUncleHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"withdrawals": [],
"parentBeaconBlockRoot": "0x0000000000000000000000000000000000000000000000000000000000000000"
}

Some files were not shown because too many files have changed in this diff Show More