Compare commits
16 Commits
bc-fusion-
...
v1.3.11
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9fb0241eab | ||
|
|
c8cc91963f | ||
|
|
bd13416162 | ||
|
|
b7b64da564 | ||
|
|
73f27a590f | ||
|
|
5e74ea650d | ||
|
|
5378df3702 | ||
|
|
40cae45436 | ||
|
|
361e8413e6 | ||
|
|
36a283ef98 | ||
|
|
78d1cade19 | ||
|
|
82beb2c5f3 | ||
|
|
c6aeee2001 | ||
|
|
f28b98a994 | ||
|
|
5ee77bbe8b | ||
|
|
fe928d4778 |
22
CHANGELOG.md
22
CHANGELOG.md
@@ -1,4 +1,26 @@
|
||||
# Changelog
|
||||
## v1.3.11
|
||||
BUGFIX
|
||||
* [\#2288](https://github.com/bnb-chain/bsc/pull/2288) fix: add FeynmanFix upgrade for a testnet issue
|
||||
|
||||
## v1.3.10
|
||||
FEATURE
|
||||
* [\#2047](https://github.com/bnb-chain/bsc/pull/2047) feat: add new fork block and precompile contract for BEP294 and BEP299
|
||||
|
||||
## v1.3.9
|
||||
FEATURE
|
||||
* [\#2186](https://github.com/bnb-chain/bsc/pull/2186) log: support maxBackups in config.toml
|
||||
|
||||
BUGFIX
|
||||
* [\#2160](https://github.com/bnb-chain/bsc/pull/2160) cmd: fix dump cli cannot work in path mode
|
||||
* [\#2183](https://github.com/bnb-chain/bsc/pull/2183) p2p: resolved deadlock on p2p server shutdown
|
||||
|
||||
IMPROVEMENT
|
||||
* [\#2177](https://github.com/bnb-chain/bsc/pull/0000) build(deps): bump github.com/quic-go/quic-go from 0.39.3 to 0.39.4
|
||||
* [\#2185](https://github.com/bnb-chain/bsc/pull/2185) consensus/parlia: set nonce before evm run
|
||||
* [\#2190](https://github.com/bnb-chain/bsc/pull/2190) fix(legacypool): deprecate already known error
|
||||
* [\#2195](https://github.com/bnb-chain/bsc/pull/2195) eth/fetcher: downgrade state tx log
|
||||
|
||||
## v1.3.8
|
||||
FEATURE
|
||||
* [\#2074](https://github.com/bnb-chain/bsc/pull/2074) faucet: new faucet client
|
||||
|
||||
@@ -199,7 +199,7 @@ Delete the selected BLS account from the BLS wallet.`,
|
||||
Name: "generate-proof",
|
||||
Usage: "Generate ownership proof for the selected BLS account from the BLS wallet",
|
||||
Action: blsAccountGenerateProof,
|
||||
ArgsUsage: "<BLS pubkey>",
|
||||
ArgsUsage: "<operator address> <BLS pubkey>",
|
||||
Category: "BLS ACCOUNT COMMANDS",
|
||||
Flags: []cli.Flag{
|
||||
utils.DataDirFlag,
|
||||
|
||||
@@ -202,6 +202,10 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) {
|
||||
v := ctx.Uint64(utils.OverrideFeynman.Name)
|
||||
cfg.Eth.OverrideFeynman = &v
|
||||
}
|
||||
if ctx.IsSet(utils.OverrideFeynmanFix.Name) {
|
||||
v := ctx.Uint64(utils.OverrideFeynmanFix.Name)
|
||||
cfg.Eth.OverrideFeynmanFix = &v
|
||||
}
|
||||
backend, _ := utils.RegisterEthService(stack, &cfg.Eth)
|
||||
|
||||
// Configure log filter RPC API.
|
||||
|
||||
@@ -75,6 +75,7 @@ var (
|
||||
utils.OverrideCancun,
|
||||
utils.OverrideVerkle,
|
||||
utils.OverrideFeynman,
|
||||
utils.OverrideFeynmanFix,
|
||||
utils.EnablePersonal,
|
||||
utils.TxPoolLocalsFlag,
|
||||
utils.TxPoolNoLocalsFlag,
|
||||
|
||||
@@ -18,7 +18,6 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"encoding/hex"
|
||||
@@ -320,6 +319,11 @@ var (
|
||||
Usage: "Manually specify the Feynman fork timestamp, overriding the bundled setting",
|
||||
Category: flags.EthCategory,
|
||||
}
|
||||
OverrideFeynmanFix = &cli.Uint64Flag{
|
||||
Name: "override.feynmanfix",
|
||||
Usage: "Manually specify the FeynmanFix fork timestamp, overriding the bundled setting",
|
||||
Category: flags.EthCategory,
|
||||
}
|
||||
SyncModeFlag = &flags.TextMarshalerFlag{
|
||||
Name: "syncmode",
|
||||
Usage: `Blockchain sync mode ("snap" or "full")`,
|
||||
@@ -1889,7 +1893,7 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
|
||||
if ctx.IsSet(StateHistoryFlag.Name) {
|
||||
cfg.StateHistory = ctx.Uint64(StateHistoryFlag.Name)
|
||||
}
|
||||
scheme, err := CompareStateSchemeCLIWithConfig(ctx)
|
||||
scheme, err := ParseCLIAndConfigStateScheme(ctx.String(StateSchemeFlag.Name), cfg.StateScheme)
|
||||
if err != nil {
|
||||
Fatalf("%v", err)
|
||||
}
|
||||
@@ -2358,11 +2362,7 @@ func MakeChain(ctx *cli.Context, stack *node.Node, readonly bool) (*core.BlockCh
|
||||
if gcmode := ctx.String(GCModeFlag.Name); gcmode != "full" && gcmode != "archive" {
|
||||
Fatalf("--%s must be either 'full' or 'archive'", GCModeFlag.Name)
|
||||
}
|
||||
provided, err := CompareStateSchemeCLIWithConfig(ctx)
|
||||
if err != nil {
|
||||
Fatalf("%v", err)
|
||||
}
|
||||
scheme, err := rawdb.ParseStateScheme(provided, chainDb)
|
||||
scheme, err := rawdb.ParseStateScheme(ctx.String(StateSchemeFlag.Name), chainDb)
|
||||
if err != nil {
|
||||
Fatalf("%v", err)
|
||||
}
|
||||
@@ -2430,11 +2430,7 @@ func MakeTrieDatabase(ctx *cli.Context, disk ethdb.Database, preimage bool, read
|
||||
config := &trie.Config{
|
||||
Preimages: preimage,
|
||||
}
|
||||
provided, err := CompareStateSchemeCLIWithConfig(ctx)
|
||||
if err != nil {
|
||||
Fatalf("%v", err)
|
||||
}
|
||||
scheme, err := rawdb.ParseStateScheme(provided, disk)
|
||||
scheme, err := rawdb.ParseStateScheme(ctx.String(StateSchemeFlag.Name), disk)
|
||||
if err != nil {
|
||||
Fatalf("%v", err)
|
||||
}
|
||||
@@ -2453,27 +2449,15 @@ func MakeTrieDatabase(ctx *cli.Context, disk ethdb.Database, preimage bool, read
|
||||
return trie.NewDatabase(disk, config)
|
||||
}
|
||||
|
||||
// CompareStateSchemeCLIWithConfig compare state scheme in CLI with config whether are equal.
|
||||
func CompareStateSchemeCLIWithConfig(ctx *cli.Context) (string, error) {
|
||||
var (
|
||||
cfgScheme string
|
||||
err error
|
||||
)
|
||||
if file := ctx.String("config"); file != "" {
|
||||
// we don't validate cfgScheme because it's already checked in cmd/geth/loadBaseConfig
|
||||
if cfgScheme, err = scanConfigForStateScheme(file); err != nil {
|
||||
log.Error("Failed to parse config file", "error", err)
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
if !ctx.IsSet(StateSchemeFlag.Name) {
|
||||
// ParseCLIAndConfigStateScheme parses state scheme in CLI and config.
|
||||
func ParseCLIAndConfigStateScheme(cliScheme, cfgScheme string) (string, error) {
|
||||
if cliScheme == "" {
|
||||
if cfgScheme != "" {
|
||||
log.Info("Use config state scheme", "config", cfgScheme)
|
||||
}
|
||||
return cfgScheme, nil
|
||||
}
|
||||
|
||||
cliScheme := ctx.String(StateSchemeFlag.Name)
|
||||
if !rawdb.ValidateStateScheme(cliScheme) {
|
||||
return "", fmt.Errorf("invalid state scheme in CLI: %s", cliScheme)
|
||||
}
|
||||
@@ -2483,35 +2467,3 @@ func CompareStateSchemeCLIWithConfig(ctx *cli.Context) (string, error) {
|
||||
}
|
||||
return "", fmt.Errorf("incompatible state scheme, CLI: %s, config: %s", cliScheme, cfgScheme)
|
||||
}
|
||||
|
||||
func scanConfigForStateScheme(file string) (string, error) {
|
||||
f, err := os.Open(file)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
scanner := bufio.NewScanner(f)
|
||||
targetStr := "StateScheme"
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
if strings.Contains(line, targetStr) {
|
||||
return indexStateScheme(line), nil
|
||||
}
|
||||
}
|
||||
|
||||
if err = scanner.Err(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func indexStateScheme(str string) string {
|
||||
i1 := strings.Index(str, "\"")
|
||||
i2 := strings.LastIndex(str, "\"")
|
||||
|
||||
if i1 != -1 && i2 != -1 && i1 < i2 {
|
||||
return str[i1+1 : i2]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
@@ -18,13 +18,8 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"os"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
)
|
||||
|
||||
func Test_SplitTagsFlag(t *testing.T) {
|
||||
@@ -67,126 +62,3 @@ func Test_SplitTagsFlag(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_parseConfig(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
fn func() string
|
||||
wantedResult string
|
||||
wantedIsErr bool
|
||||
wantedErrStr string
|
||||
}{
|
||||
{
|
||||
name: "path",
|
||||
fn: func() string {
|
||||
tomlString := `[Eth]NetworkId = 56StateScheme = "path"`
|
||||
return createTempTomlFile(t, tomlString)
|
||||
},
|
||||
wantedResult: rawdb.PathScheme,
|
||||
wantedIsErr: false,
|
||||
wantedErrStr: "",
|
||||
},
|
||||
{
|
||||
name: "hash",
|
||||
fn: func() string {
|
||||
tomlString := `[Eth]NetworkId = 56StateScheme = "hash"`
|
||||
return createTempTomlFile(t, tomlString)
|
||||
},
|
||||
wantedResult: rawdb.HashScheme,
|
||||
wantedIsErr: false,
|
||||
wantedErrStr: "",
|
||||
},
|
||||
{
|
||||
name: "empty state scheme",
|
||||
fn: func() string {
|
||||
tomlString := `[Eth]NetworkId = 56StateScheme = ""`
|
||||
return createTempTomlFile(t, tomlString)
|
||||
},
|
||||
wantedResult: "",
|
||||
wantedIsErr: false,
|
||||
wantedErrStr: "",
|
||||
},
|
||||
{
|
||||
name: "unset state scheme",
|
||||
fn: func() string {
|
||||
tomlString := `[Eth]NetworkId = 56`
|
||||
return createTempTomlFile(t, tomlString)
|
||||
},
|
||||
wantedResult: "",
|
||||
wantedIsErr: false,
|
||||
wantedErrStr: "",
|
||||
},
|
||||
{
|
||||
name: "failed to open file",
|
||||
fn: func() string { return "" },
|
||||
wantedResult: "",
|
||||
wantedIsErr: true,
|
||||
wantedErrStr: "open : no such file or directory",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result, err := scanConfigForStateScheme(tt.fn())
|
||||
if tt.wantedIsErr {
|
||||
assert.Contains(t, err.Error(), tt.wantedErrStr)
|
||||
} else {
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
assert.Equal(t, tt.wantedResult, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// createTempTomlFile is a helper function to create a temp file with the provided TOML content
|
||||
func createTempTomlFile(t *testing.T, content string) string {
|
||||
t.Helper()
|
||||
|
||||
dir := t.TempDir()
|
||||
file, err := os.CreateTemp(dir, "config.toml")
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to create temporary file: %v", err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
_, err = file.WriteString(content)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to write to temporary file: %v", err)
|
||||
}
|
||||
return file.Name()
|
||||
}
|
||||
|
||||
func Test_parseString(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
arg string
|
||||
wantResult string
|
||||
}{
|
||||
{
|
||||
name: "hash string",
|
||||
arg: "\"hash\"",
|
||||
wantResult: rawdb.HashScheme,
|
||||
},
|
||||
{
|
||||
name: "path string",
|
||||
arg: "\"path\"",
|
||||
wantResult: rawdb.PathScheme,
|
||||
},
|
||||
{
|
||||
name: "empty string",
|
||||
arg: "",
|
||||
wantResult: "",
|
||||
},
|
||||
{
|
||||
name: "empty string",
|
||||
arg: "\"\"",
|
||||
wantResult: "",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := indexStateScheme(tt.arg); got != tt.wantResult {
|
||||
t.Errorf("parseString() = %v, want %v", got, tt.wantResult)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -18,10 +18,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
)
|
||||
|
||||
// TODO: SecondsPerDay represents the seconds in a day, it should be 86400
|
||||
// We set it to 60 for testing purpose and we will change it back to 86400 when launching
|
||||
// const SecondsPerDay uint64 = 86400
|
||||
const SecondsPerDay uint64 = 60
|
||||
const SecondsPerDay uint64 = 86400
|
||||
|
||||
// the params should be two blocks' time(timestamp)
|
||||
func sameDayInUTC(first, second uint64) bool {
|
||||
|
||||
@@ -276,11 +276,12 @@ func (e *GenesisMismatchError) Error() string {
|
||||
// ChainOverrides contains the changes to chain config
|
||||
// Typically, these modifications involve hardforks that are not enabled on the BSC mainnet, intended for testing purposes.
|
||||
type ChainOverrides struct {
|
||||
OverrideShanghai *uint64
|
||||
OverrideKepler *uint64
|
||||
OverrideCancun *uint64
|
||||
OverrideVerkle *uint64
|
||||
OverrideFeynman *uint64
|
||||
OverrideShanghai *uint64
|
||||
OverrideKepler *uint64
|
||||
OverrideCancun *uint64
|
||||
OverrideVerkle *uint64
|
||||
OverrideFeynman *uint64
|
||||
OverrideFeynmanFix *uint64
|
||||
}
|
||||
|
||||
// SetupGenesisBlock writes or updates the genesis block in db.
|
||||
@@ -321,6 +322,9 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, triedb *trie.Database, gen
|
||||
if overrides != nil && overrides.OverrideFeynman != nil {
|
||||
config.FeynmanTime = overrides.OverrideFeynman
|
||||
}
|
||||
if overrides != nil && overrides.OverrideFeynmanFix != nil {
|
||||
config.FeynmanFixTime = overrides.OverrideFeynmanFix
|
||||
}
|
||||
}
|
||||
}
|
||||
// Just commit the new block if there is no stored genesis block.
|
||||
|
||||
@@ -335,7 +335,7 @@ func ParseStateScheme(provided string, disk ethdb.Database) (string, error) {
|
||||
if stored == "" {
|
||||
// use default scheme for empty database, flip it when
|
||||
// path mode is chosen as default
|
||||
log.Info("State schema set to default", "scheme", "hash")
|
||||
log.Info("State scheme set to default", "scheme", "hash")
|
||||
return HashScheme, nil
|
||||
}
|
||||
log.Info("State scheme set to already existing disk db", "scheme", stored)
|
||||
|
||||
@@ -129,6 +129,8 @@ func InspectFreezerTable(ancient string, freezerName string, tableName string, s
|
||||
switch freezerName {
|
||||
case chainFreezerName:
|
||||
path, tables = resolveChainFreezerDir(ancient), chainFreezerNoSnappy
|
||||
case stateFreezerName:
|
||||
path, tables = filepath.Join(ancient, freezerName), stateFreezerNoSnappy
|
||||
default:
|
||||
return fmt.Errorf("unknown freezer, supported ones: %v", freezers)
|
||||
}
|
||||
|
||||
@@ -188,19 +188,27 @@ func (batch *freezerTableBatch) maybeCommit() error {
|
||||
|
||||
// commit writes the batched items to the backing freezerTable.
|
||||
func (batch *freezerTableBatch) commit() error {
|
||||
// Write data.
|
||||
// Write data. The head file is fsync'd after write to ensure the
|
||||
// data is truly transferred to disk.
|
||||
_, err := batch.t.head.Write(batch.dataBuffer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := batch.t.head.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
dataSize := int64(len(batch.dataBuffer))
|
||||
batch.dataBuffer = batch.dataBuffer[:0]
|
||||
|
||||
// Write indices.
|
||||
// Write indices. The index file is fsync'd after write to ensure the
|
||||
// data indexes are truly transferred to disk.
|
||||
_, err = batch.t.index.Write(batch.indexBuffer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := batch.t.index.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
indexSize := int64(len(batch.indexBuffer))
|
||||
batch.indexBuffer = batch.indexBuffer[:0]
|
||||
|
||||
|
||||
@@ -223,7 +223,9 @@ func (t *freezerTable) repair() error {
|
||||
if t.readonly {
|
||||
return fmt.Errorf("index file(path: %s, name: %s) size is not a multiple of %d", t.path, t.name, indexEntrySize)
|
||||
}
|
||||
truncateFreezerFile(t.index, stat.Size()-overflow) // New file can't trigger this path
|
||||
if err := truncateFreezerFile(t.index, stat.Size()-overflow); err != nil {
|
||||
return err
|
||||
} // New file can't trigger this path
|
||||
}
|
||||
// Retrieve the file sizes and prepare for truncation
|
||||
if stat, err = t.index.Stat(); err != nil {
|
||||
@@ -268,8 +270,8 @@ func (t *freezerTable) repair() error {
|
||||
// Print an error log if the index is corrupted due to an incorrect
|
||||
// last index item. While it is theoretically possible to have a zero offset
|
||||
// by storing all zero-size items, it is highly unlikely to occur in practice.
|
||||
if lastIndex.offset == 0 && offsetsSize%indexEntrySize > 1 {
|
||||
log.Error("Corrupted index file detected", "lastOffset", lastIndex.offset, "items", offsetsSize%indexEntrySize-1)
|
||||
if lastIndex.offset == 0 && offsetsSize/indexEntrySize > 1 {
|
||||
log.Error("Corrupted index file detected", "lastOffset", lastIndex.offset, "indexes", offsetsSize/indexEntrySize)
|
||||
}
|
||||
if t.readonly {
|
||||
t.head, err = t.openFile(lastIndex.filenum, openFreezerFileForReadOnly)
|
||||
@@ -424,6 +426,9 @@ func (t *freezerTable) truncateHead(items uint64) error {
|
||||
if err := truncateFreezerFile(t.index, int64(length+1)*indexEntrySize); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := t.index.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
// Calculate the new expected size of the data file and truncate it
|
||||
var expected indexEntry
|
||||
if length == 0 {
|
||||
@@ -446,6 +451,7 @@ func (t *freezerTable) truncateHead(items uint64) error {
|
||||
// Release any files _after the current head -- both the previous head
|
||||
// and any files which may have been opened for reading
|
||||
t.releaseFilesAfter(expected.filenum, true)
|
||||
|
||||
// Set back the historic head
|
||||
t.head = newHead
|
||||
t.headId = expected.filenum
|
||||
@@ -453,6 +459,9 @@ func (t *freezerTable) truncateHead(items uint64) error {
|
||||
if err := truncateFreezerFile(t.head, int64(expected.offset)); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := t.head.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
// All data files truncated, set internal counters and return
|
||||
t.headBytes = int64(expected.offset)
|
||||
t.items.Store(items)
|
||||
@@ -597,10 +606,12 @@ func (t *freezerTable) Close() error {
|
||||
// error on Windows.
|
||||
doClose(t.index, true, true)
|
||||
doClose(t.meta, true, true)
|
||||
|
||||
// The preopened non-head data-files are all opened in readonly.
|
||||
// The head is opened in rw-mode, so we sync it here - but since it's also
|
||||
// part of t.files, it will be closed in the loop below.
|
||||
doClose(t.head, true, false) // sync but do not close
|
||||
|
||||
for _, f := range t.files {
|
||||
doClose(f, false, true) // close but do not sync
|
||||
}
|
||||
|
||||
@@ -73,11 +73,7 @@ func copyFrom(srcPath, destPath string, offset uint64, before func(f *os.File) e
|
||||
return err
|
||||
}
|
||||
f = nil
|
||||
|
||||
if err := os.Rename(fname, destPath); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return os.Rename(fname, destPath)
|
||||
}
|
||||
|
||||
// openFreezerFileForAppend opens a freezer table file and seeks to the end
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -39,9 +39,7 @@ web3.eth.sendTransaction({
|
||||
web3.eth.sendTransaction({
|
||||
from: "consensus address of your validator",
|
||||
to: "0x0000000000000000000000000000000000001000",
|
||||
gas: "1000000",
|
||||
data: "0x04c4fec6"
|
||||
})
|
||||
```
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -193,6 +193,10 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
|
||||
chainConfig.FeynmanTime = config.OverrideFeynman
|
||||
overrides.OverrideFeynman = config.OverrideFeynman
|
||||
}
|
||||
if config.OverrideFeynmanFix != nil {
|
||||
chainConfig.FeynmanFixTime = config.OverrideFeynmanFix
|
||||
overrides.OverrideFeynmanFix = config.OverrideFeynmanFix
|
||||
}
|
||||
|
||||
eth := &Ethereum{
|
||||
config: config,
|
||||
|
||||
@@ -200,6 +200,9 @@ type Config struct {
|
||||
|
||||
// OverrideFeynman (TODO: remove after the fork)
|
||||
OverrideFeynman *uint64 `toml:",omitempty"`
|
||||
|
||||
// OverrideFeynmanFix (TODO: remove after the fork)
|
||||
OverrideFeynmanFix *uint64 `toml:",omitempty"`
|
||||
}
|
||||
|
||||
// CreateConsensusEngine creates a consensus engine for the given chain config.
|
||||
|
||||
@@ -338,7 +338,7 @@ func (f *TxFetcher) Enqueue(peer string, txs []*types.Transaction, direct bool)
|
||||
// If 'other reject' is >25% of the deliveries in any batch, sleep a bit.
|
||||
if otherreject > 128/4 {
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
log.Warn("Peer delivering stale transactions", "peer", peer, "rejected", otherreject)
|
||||
log.Debug("Peer delivering stale transactions", "peer", peer, "rejected", otherreject)
|
||||
}
|
||||
}
|
||||
select {
|
||||
|
||||
@@ -227,8 +227,8 @@ func (oracle *Oracle) FeeHistory(ctx context.Context, blocks uint64, unresolvedL
|
||||
if p < 0 || p > 100 {
|
||||
return common.Big0, nil, nil, nil, fmt.Errorf("%w: %f", errInvalidPercentile, p)
|
||||
}
|
||||
if i > 0 && p < rewardPercentiles[i-1] {
|
||||
return common.Big0, nil, nil, nil, fmt.Errorf("%w: #%d:%f > #%d:%f", errInvalidPercentile, i-1, rewardPercentiles[i-1], i, p)
|
||||
if i > 0 && p <= rewardPercentiles[i-1] {
|
||||
return common.Big0, nil, nil, nil, fmt.Errorf("%w: #%d:%f >= #%d:%f", errInvalidPercentile, i-1, rewardPercentiles[i-1], i, p)
|
||||
}
|
||||
}
|
||||
var (
|
||||
|
||||
@@ -10,6 +10,8 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
const backupTimeFormat = "2006-01-02_15"
|
||||
|
||||
type TimeTicker struct {
|
||||
stop chan struct{}
|
||||
C <-chan time.Time
|
||||
@@ -69,19 +71,24 @@ type AsyncFileWriter struct {
|
||||
buf chan []byte
|
||||
stop chan struct{}
|
||||
timeTicker *TimeTicker
|
||||
|
||||
rotateHours uint
|
||||
maxBackups int
|
||||
}
|
||||
|
||||
func NewAsyncFileWriter(filePath string, maxBytesSize int64, rotateHours uint) *AsyncFileWriter {
|
||||
func NewAsyncFileWriter(filePath string, maxBytesSize int64, maxBackups int, rotateHours uint) *AsyncFileWriter {
|
||||
absFilePath, err := filepath.Abs(filePath)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("get file path of logger error. filePath=%s, err=%s", filePath, err))
|
||||
}
|
||||
|
||||
return &AsyncFileWriter{
|
||||
filePath: absFilePath,
|
||||
buf: make(chan []byte, maxBytesSize),
|
||||
stop: make(chan struct{}),
|
||||
timeTicker: NewTimeTicker(rotateHours),
|
||||
filePath: absFilePath,
|
||||
buf: make(chan []byte, maxBytesSize),
|
||||
stop: make(chan struct{}),
|
||||
rotateHours: rotateHours,
|
||||
maxBackups: maxBackups,
|
||||
timeTicker: NewTimeTicker(rotateHours),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -178,6 +185,9 @@ func (w *AsyncFileWriter) rotateFile() {
|
||||
if err := w.initLogFile(); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "init log file error. err=%s", err)
|
||||
}
|
||||
if err := w.removeExpiredFile(); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "remove expired file error. err=%s", err)
|
||||
}
|
||||
default:
|
||||
}
|
||||
}
|
||||
@@ -222,5 +232,29 @@ func (w *AsyncFileWriter) flushAndClose() error {
|
||||
}
|
||||
|
||||
func (w *AsyncFileWriter) timeFilePath(filePath string) string {
|
||||
return filePath + "." + time.Now().Format("2006-01-02_15")
|
||||
return filePath + "." + time.Now().Format(backupTimeFormat)
|
||||
}
|
||||
|
||||
func (w *AsyncFileWriter) getExpiredFile(filePath string, maxBackups int, rotateHours uint) string {
|
||||
if rotateHours > 0 {
|
||||
maxBackups = int(rotateHours) * maxBackups
|
||||
}
|
||||
return filePath + "." + time.Now().Add(-time.Hour*time.Duration(maxBackups)).Format(backupTimeFormat)
|
||||
}
|
||||
|
||||
func (w *AsyncFileWriter) removeExpiredFile() error {
|
||||
if w.maxBackups == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
oldFilepath := w.getExpiredFile(w.filePath, w.maxBackups, w.rotateHours)
|
||||
_, err := os.Stat(oldFilepath)
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
errRemove := os.Remove(oldFilepath)
|
||||
if err != nil {
|
||||
return errRemove
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -6,10 +6,12 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestWriterHourly(t *testing.T) {
|
||||
w := NewAsyncFileWriter("./hello.log", 100, 1)
|
||||
w := NewAsyncFileWriter("./hello.log", 100, 1, 1)
|
||||
w.Start()
|
||||
w.Write([]byte("hello\n"))
|
||||
w.Write([]byte("world\n"))
|
||||
@@ -67,3 +69,22 @@ func TestGetNextRotationHour(t *testing.T) {
|
||||
t.Run("TestGetNextRotationHour_"+strconv.Itoa(i), test(tc.now, tc.delta, tc.expectedHour))
|
||||
}
|
||||
}
|
||||
|
||||
func TestClearBackups(t *testing.T) {
|
||||
dir := "./test"
|
||||
os.Mkdir(dir, 0700)
|
||||
w := NewAsyncFileWriter("./test/bsc.log", 100, 1, 1)
|
||||
defer os.RemoveAll(dir)
|
||||
fakeCurrentTime := time.Now()
|
||||
name := ""
|
||||
data := []byte("data")
|
||||
for i := 0; i < 5; i++ {
|
||||
name = w.filePath + "." + fakeCurrentTime.Format(backupTimeFormat)
|
||||
_ = os.WriteFile(name, data, 0700)
|
||||
fakeCurrentTime = fakeCurrentTime.Add(-time.Hour * 1)
|
||||
}
|
||||
oldFile := w.getExpiredFile(w.filePath, w.maxBackups, w.rotateHours)
|
||||
w.removeExpiredFile()
|
||||
_, err := os.Stat(oldFile)
|
||||
assert.True(t, os.IsNotExist(err))
|
||||
}
|
||||
|
||||
@@ -75,14 +75,14 @@ func FileHandler(path string, fmtr Format) (Handler, error) {
|
||||
// RotatingFileHandler returns a handler which writes log records to file chunks
|
||||
// at the given path. When a file's size reaches the limit, the handler creates
|
||||
// a new file named after the timestamp of the first log record it will contain.
|
||||
func RotatingFileHandler(filePath string, limit uint, formatter Format, rotateHours uint) (Handler, error) {
|
||||
func RotatingFileHandler(filePath string, limit uint, maxBackups uint, formatter Format, rotateHours uint) (Handler, error) {
|
||||
if _, err := os.Stat(path.Dir(filePath)); os.IsNotExist(err) {
|
||||
err := os.MkdirAll(path.Dir(filePath), 0755)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not create directory %s, %v", path.Dir(filePath), err)
|
||||
}
|
||||
}
|
||||
fileWriter := NewAsyncFileWriter(filePath, int64(limit), rotateHours)
|
||||
fileWriter := NewAsyncFileWriter(filePath, int64(limit), int(maxBackups), rotateHours)
|
||||
fileWriter.Start()
|
||||
return StreamHandler(fileWriter, formatter), nil
|
||||
}
|
||||
|
||||
@@ -290,8 +290,8 @@ func (c Ctx) toArray() []interface{} {
|
||||
return arr
|
||||
}
|
||||
|
||||
func NewFileLvlHandler(logPath string, maxBytesSize uint, level string, rotateHours uint) Handler {
|
||||
rfh, err := RotatingFileHandler(logPath, maxBytesSize, LogfmtFormat(), rotateHours)
|
||||
func NewFileLvlHandler(logPath string, maxBytesSize uint, maxBackups uint, level string, rotateHours uint) Handler {
|
||||
rfh, err := RotatingFileHandler(logPath, maxBytesSize, maxBackups, LogfmtFormat(), rotateHours)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
@@ -513,6 +513,7 @@ type LogConfig struct {
|
||||
MaxBytesSize *uint `toml:",omitempty"`
|
||||
Level *string `toml:",omitempty"`
|
||||
RotateHours *uint `toml:",omitempty"`
|
||||
MaxBackups *uint `toml:",omitempty"`
|
||||
|
||||
// TermTimeFormat is the time format used for console logging.
|
||||
TermTimeFormat *string `toml:",omitempty"`
|
||||
|
||||
@@ -118,7 +118,12 @@ func New(conf *Config) (*Node, error) {
|
||||
rotateHours = *conf.LogConfig.RotateHours
|
||||
}
|
||||
|
||||
log.Root().SetHandler(log.NewFileLvlHandler(logFilePath, *conf.LogConfig.MaxBytesSize, *conf.LogConfig.Level, rotateHours))
|
||||
maxBackups := uint(0)
|
||||
if conf.LogConfig.MaxBackups != nil {
|
||||
maxBackups = *conf.LogConfig.MaxBackups
|
||||
}
|
||||
|
||||
log.Root().SetHandler(log.NewFileLvlHandler(logFilePath, *conf.LogConfig.MaxBytesSize, maxBackups, *conf.LogConfig.Level, rotateHours))
|
||||
}
|
||||
}
|
||||
if conf.Logger == nil {
|
||||
|
||||
121
params/config.go
121
params/config.go
@@ -66,27 +66,6 @@ var (
|
||||
Ethash: new(EthashConfig),
|
||||
}
|
||||
|
||||
// just for prysm compile pass
|
||||
// RopstenChainConfig contains the chain parameters to run a node on the Ropsten test network.
|
||||
RopstenChainConfig = &ChainConfig{
|
||||
ChainID: big.NewInt(3),
|
||||
HomesteadBlock: big.NewInt(0),
|
||||
DAOForkBlock: nil,
|
||||
DAOForkSupport: true,
|
||||
EIP150Block: big.NewInt(0),
|
||||
EIP155Block: big.NewInt(10),
|
||||
EIP158Block: big.NewInt(10),
|
||||
ByzantiumBlock: big.NewInt(1_700_000),
|
||||
ConstantinopleBlock: big.NewInt(4_230_000),
|
||||
PetersburgBlock: big.NewInt(4_939_394),
|
||||
IstanbulBlock: big.NewInt(6_485_846),
|
||||
MuirGlacierBlock: big.NewInt(7_117_117),
|
||||
BerlinBlock: big.NewInt(9_812_189),
|
||||
LondonBlock: big.NewInt(10_499_401),
|
||||
TerminalTotalDifficulty: new(big.Int).SetUint64(50_000_000_000_000_000),
|
||||
Ethash: new(EthashConfig),
|
||||
}
|
||||
|
||||
// just for prysm compile pass
|
||||
// SepoliaChainConfig contains the chain parameters to run a node on the Sepolia test network.
|
||||
SepoliaChainConfig = &ChainConfig{
|
||||
@@ -205,11 +184,10 @@ var (
|
||||
HertzBlock: big.NewInt(31103030),
|
||||
HertzfixBlock: big.NewInt(35682300),
|
||||
// UnixTime: 1702972800 is December 19, 2023 8:00:00 AM UTC
|
||||
ShanghaiTime: newUint64(1702972800),
|
||||
KeplerTime: newUint64(1702972800),
|
||||
|
||||
// TODO
|
||||
FeynmanTime: _rialto_upgrade_height_,
|
||||
ShanghaiTime: newUint64(1702972800),
|
||||
KeplerTime: newUint64(1702972800),
|
||||
FeynmanTime: newUint64(1710136800),
|
||||
FeynmanFixTime: newUint64(1711342800),
|
||||
|
||||
Parlia: &ParliaConfig{
|
||||
Period: 3,
|
||||
@@ -217,6 +195,7 @@ var (
|
||||
},
|
||||
}
|
||||
|
||||
// used to test hard fork upgrade, following https://github.com/bnb-chain/bsc-genesis-contract/blob/master/genesis.json
|
||||
RialtoChainConfig = &ChainConfig{
|
||||
ChainID: big.NewInt(714),
|
||||
HomesteadBlock: big.NewInt(0),
|
||||
@@ -243,15 +222,14 @@ var (
|
||||
LondonBlock: big.NewInt(8),
|
||||
HertzBlock: big.NewInt(8),
|
||||
HertzfixBlock: big.NewInt(8),
|
||||
|
||||
// TODO
|
||||
ShanghaiTime: _rialto_upgrade_height_,
|
||||
KeplerTime: _rialto_upgrade_height_,
|
||||
FeynmanTime: _rialto_upgrade_height_,
|
||||
ShanghaiTime: newUint64(0),
|
||||
KeplerTime: newUint64(0),
|
||||
FeynmanTime: newUint64(0),
|
||||
FeynmanFixTime: newUint64(0),
|
||||
|
||||
Parlia: &ParliaConfig{
|
||||
Period: _rialto_parlia_period_,
|
||||
Epoch: _rialto_parlia_epoch_,
|
||||
Period: 3,
|
||||
Epoch: 200,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -458,10 +436,10 @@ var NetworkNames = map[string]string{
|
||||
type ChainConfig struct {
|
||||
ChainID *big.Int `json:"chainId"` // chainId identifies the current chain and is used for replay protection
|
||||
|
||||
HomesteadBlock *big.Int `json:"homesteadBlock,omitempty" toml:",omitempty"` // Homestead switch block (nil = no fork, 0 = already homestead)
|
||||
HomesteadBlock *big.Int `json:"homesteadBlock,omitempty"` // Homestead switch block (nil = no fork, 0 = already homestead)
|
||||
|
||||
DAOForkBlock *big.Int `json:"daoForkBlock,omitempty" toml:",omitempty"` // TheDAO hard-fork switch block (nil = no fork)
|
||||
DAOForkSupport bool `json:"daoForkSupport,omitempty" toml:",omitempty"` // Whether the nodes supports or opposes the DAO hard-fork
|
||||
DAOForkBlock *big.Int `json:"daoForkBlock,omitempty"` // TheDAO hard-fork switch block (nil = no fork)
|
||||
DAOForkSupport bool `json:"daoForkSupport,omitempty"` // Whether the nodes supports or opposes the DAO hard-fork
|
||||
|
||||
// EIP150 implements the Gas price changes (https://github.com/ethereum/EIPs/issues/150)
|
||||
EIP150Block *big.Int `json:"eip150Block,omitempty"` // EIP150 HF block (nil = no fork)
|
||||
@@ -483,12 +461,13 @@ type ChainConfig struct {
|
||||
|
||||
// Fork scheduling was switched from blocks to timestamps here
|
||||
|
||||
ShanghaiTime *uint64 `json:"shanghaiTime,omitempty" toml:",omitempty"` // Shanghai switch time (nil = no fork, 0 = already on shanghai)
|
||||
KeplerTime *uint64 `json:"keplerTime,omitempty" toml:",omitempty"` // Kepler switch time (nil = no fork, 0 = already activated)
|
||||
FeynmanTime *uint64 `json:"feynmanTime,omitempty" toml:",omitempty"` // Feynman switch time (nil = no fork, 0 = already activated)
|
||||
CancunTime *uint64 `json:"cancunTime,omitempty" toml:",omitempty"` // Cancun switch time (nil = no fork, 0 = already on cancun)
|
||||
PragueTime *uint64 `json:"pragueTime,omitempty" toml:",omitempty"` // Prague switch time (nil = no fork, 0 = already on prague)
|
||||
VerkleTime *uint64 `json:"verkleTime,omitempty" toml:",omitempty"` // Verkle switch time (nil = no fork, 0 = already on verkle)
|
||||
ShanghaiTime *uint64 `json:"shanghaiTime,omitempty"` // Shanghai switch time (nil = no fork, 0 = already on shanghai)
|
||||
KeplerTime *uint64 `json:"keplerTime,omitempty"` // Kepler switch time (nil = no fork, 0 = already activated)
|
||||
FeynmanTime *uint64 `json:"feynmanTime,omitempty"` // Feynman switch time (nil = no fork, 0 = already activated)
|
||||
FeynmanFixTime *uint64 `json:"feynmanFixTime,omitempty"` // FeynmanFix switch time (nil = no fork, 0 = already activated)
|
||||
CancunTime *uint64 `json:"cancunTime,omitempty"` // Cancun switch time (nil = no fork, 0 = already on cancun)
|
||||
PragueTime *uint64 `json:"pragueTime,omitempty"` // Prague switch time (nil = no fork, 0 = already on prague)
|
||||
VerkleTime *uint64 `json:"verkleTime,omitempty"` // Verkle switch time (nil = no fork, 0 = already on verkle)
|
||||
|
||||
// TerminalTotalDifficulty is the amount of total difficulty reached by
|
||||
// the network that triggers the consensus upgrade.
|
||||
@@ -499,23 +478,23 @@ type ChainConfig struct {
|
||||
// even without having seen the TTD locally (safer long term).
|
||||
TerminalTotalDifficultyPassed bool `json:"terminalTotalDifficultyPassed,omitempty"`
|
||||
|
||||
RamanujanBlock *big.Int `json:"ramanujanBlock,omitempty" toml:",omitempty"` // ramanujanBlock switch block (nil = no fork, 0 = already activated)
|
||||
NielsBlock *big.Int `json:"nielsBlock,omitempty" toml:",omitempty"` // nielsBlock switch block (nil = no fork, 0 = already activated)
|
||||
MirrorSyncBlock *big.Int `json:"mirrorSyncBlock,omitempty" toml:",omitempty"` // mirrorSyncBlock switch block (nil = no fork, 0 = already activated)
|
||||
BrunoBlock *big.Int `json:"brunoBlock,omitempty" toml:",omitempty"` // brunoBlock switch block (nil = no fork, 0 = already activated)
|
||||
EulerBlock *big.Int `json:"eulerBlock,omitempty" toml:",omitempty"` // eulerBlock switch block (nil = no fork, 0 = already activated)
|
||||
GibbsBlock *big.Int `json:"gibbsBlock,omitempty" toml:",omitempty"` // gibbsBlock switch block (nil = no fork, 0 = already activated)
|
||||
NanoBlock *big.Int `json:"nanoBlock,omitempty" toml:",omitempty"` // nanoBlock switch block (nil = no fork, 0 = already activated)
|
||||
MoranBlock *big.Int `json:"moranBlock,omitempty" toml:",omitempty"` // moranBlock switch block (nil = no fork, 0 = already activated)
|
||||
PlanckBlock *big.Int `json:"planckBlock,omitempty" toml:",omitempty"` // planckBlock switch block (nil = no fork, 0 = already activated)
|
||||
LubanBlock *big.Int `json:"lubanBlock,omitempty" toml:",omitempty"` // lubanBlock switch block (nil = no fork, 0 = already activated)
|
||||
PlatoBlock *big.Int `json:"platoBlock,omitempty" toml:",omitempty"` // platoBlock switch block (nil = no fork, 0 = already activated)
|
||||
HertzBlock *big.Int `json:"hertzBlock,omitempty" toml:",omitempty"` // hertzBlock switch block (nil = no fork, 0 = already activated)
|
||||
HertzfixBlock *big.Int `json:"hertzfixBlock,omitempty" toml:",omitempty"` // hertzfixBlock switch block (nil = no fork, 0 = already activated)
|
||||
RamanujanBlock *big.Int `json:"ramanujanBlock,omitempty"` // ramanujanBlock switch block (nil = no fork, 0 = already activated)
|
||||
NielsBlock *big.Int `json:"nielsBlock,omitempty"` // nielsBlock switch block (nil = no fork, 0 = already activated)
|
||||
MirrorSyncBlock *big.Int `json:"mirrorSyncBlock,omitempty"` // mirrorSyncBlock switch block (nil = no fork, 0 = already activated)
|
||||
BrunoBlock *big.Int `json:"brunoBlock,omitempty"` // brunoBlock switch block (nil = no fork, 0 = already activated)
|
||||
EulerBlock *big.Int `json:"eulerBlock,omitempty"` // eulerBlock switch block (nil = no fork, 0 = already activated)
|
||||
GibbsBlock *big.Int `json:"gibbsBlock,omitempty"` // gibbsBlock switch block (nil = no fork, 0 = already activated)
|
||||
NanoBlock *big.Int `json:"nanoBlock,omitempty"` // nanoBlock switch block (nil = no fork, 0 = already activated)
|
||||
MoranBlock *big.Int `json:"moranBlock,omitempty"` // moranBlock switch block (nil = no fork, 0 = already activated)
|
||||
PlanckBlock *big.Int `json:"planckBlock,omitempty"` // planckBlock switch block (nil = no fork, 0 = already activated)
|
||||
LubanBlock *big.Int `json:"lubanBlock,omitempty"` // lubanBlock switch block (nil = no fork, 0 = already activated)
|
||||
PlatoBlock *big.Int `json:"platoBlock,omitempty"` // platoBlock switch block (nil = no fork, 0 = already activated)
|
||||
HertzBlock *big.Int `json:"hertzBlock,omitempty"` // hertzBlock switch block (nil = no fork, 0 = already activated)
|
||||
HertzfixBlock *big.Int `json:"hertzfixBlock,omitempty"` // hertzfixBlock switch block (nil = no fork, 0 = already activated)
|
||||
// Various consensus engines
|
||||
Ethash *EthashConfig `json:"ethash,omitempty" toml:",omitempty"`
|
||||
Clique *CliqueConfig `json:"clique,omitempty" toml:",omitempty"`
|
||||
Parlia *ParliaConfig `json:"parlia,omitempty" toml:",omitempty"`
|
||||
Ethash *EthashConfig `json:"ethash,omitempty"`
|
||||
Clique *CliqueConfig `json:"clique,omitempty"`
|
||||
Parlia *ParliaConfig `json:"parlia,omitempty"`
|
||||
IsDevMode bool `json:"isDev,omitempty"`
|
||||
}
|
||||
|
||||
@@ -583,7 +562,12 @@ func (c *ChainConfig) String() string {
|
||||
FeynmanTime = big.NewInt(0).SetUint64(*c.FeynmanTime)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("{ChainID: %v Homestead: %v DAO: %v DAOSupport: %v EIP150: %v EIP155: %v EIP158: %v Byzantium: %v Constantinople: %v Petersburg: %v Istanbul: %v, Muir Glacier: %v, Ramanujan: %v, Niels: %v, MirrorSync: %v, Bruno: %v, Berlin: %v, YOLO v3: %v, CatalystBlock: %v, London: %v, ArrowGlacier: %v, MergeFork:%v, Euler: %v, Gibbs: %v, Nano: %v, Moran: %v, Planck: %v,Luban: %v, Plato: %v, Hertz: %v, Hertzfix: %v, ShanghaiTime: %v, KeplerTime: %v, FeynmanTime: %v, Engine: %v}",
|
||||
var FeynmanFixTime *big.Int
|
||||
if c.FeynmanFixTime != nil {
|
||||
FeynmanFixTime = big.NewInt(0).SetUint64(*c.FeynmanFixTime)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("{ChainID: %v Homestead: %v DAO: %v DAOSupport: %v EIP150: %v EIP155: %v EIP158: %v Byzantium: %v Constantinople: %v Petersburg: %v Istanbul: %v, Muir Glacier: %v, Ramanujan: %v, Niels: %v, MirrorSync: %v, Bruno: %v, Berlin: %v, YOLO v3: %v, CatalystBlock: %v, London: %v, ArrowGlacier: %v, MergeFork:%v, Euler: %v, Gibbs: %v, Nano: %v, Moran: %v, Planck: %v,Luban: %v, Plato: %v, Hertz: %v, Hertzfix: %v, ShanghaiTime: %v, KeplerTime: %v, FeynmanTime: %v, FeynmanFixTime: %v, Engine: %v}",
|
||||
c.ChainID,
|
||||
c.HomesteadBlock,
|
||||
c.DAOForkBlock,
|
||||
@@ -618,6 +602,7 @@ func (c *ChainConfig) String() string {
|
||||
ShanghaiTime,
|
||||
KeplerTime,
|
||||
FeynmanTime,
|
||||
FeynmanFixTime,
|
||||
engine,
|
||||
)
|
||||
}
|
||||
@@ -866,6 +851,20 @@ func (c *ChainConfig) IsOnFeynman(currentBlockNumber *big.Int, lastBlockTime uin
|
||||
return !c.IsFeynman(lastBlockNumber, lastBlockTime) && c.IsFeynman(currentBlockNumber, currentBlockTime)
|
||||
}
|
||||
|
||||
// IsFeynmanFix returns whether time is either equal to the FeynmanFix fork time or greater.
|
||||
func (c *ChainConfig) IsFeynmanFix(num *big.Int, time uint64) bool {
|
||||
return c.IsLondon(num) && isTimestampForked(c.FeynmanFixTime, time)
|
||||
}
|
||||
|
||||
// IsOnFeynmanFix returns whether currentBlockTime is either equal to the FeynmanFix fork time or greater firstly.
|
||||
func (c *ChainConfig) IsOnFeynmanFix(currentBlockNumber *big.Int, lastBlockTime uint64, currentBlockTime uint64) bool {
|
||||
lastBlockNumber := new(big.Int)
|
||||
if currentBlockNumber.Cmp(big.NewInt(1)) >= 0 {
|
||||
lastBlockNumber.Sub(currentBlockNumber, big.NewInt(1))
|
||||
}
|
||||
return !c.IsFeynmanFix(lastBlockNumber, lastBlockTime) && c.IsFeynmanFix(currentBlockNumber, currentBlockTime)
|
||||
}
|
||||
|
||||
// IsCancun returns whether num is either equal to the Cancun fork time or greater.
|
||||
func (c *ChainConfig) IsCancun(num *big.Int, time uint64) bool {
|
||||
return c.IsLondon(num) && isTimestampForked(c.CancunTime, time)
|
||||
@@ -932,6 +931,7 @@ func (c *ChainConfig) CheckConfigForkOrder() error {
|
||||
{name: "hertzfixBlock", block: c.HertzfixBlock},
|
||||
{name: "keplerTime", timestamp: c.KeplerTime},
|
||||
{name: "feynmanTime", timestamp: c.FeynmanTime},
|
||||
{name: "feynmanFixTime", timestamp: c.FeynmanFixTime},
|
||||
{name: "cancunTime", timestamp: c.CancunTime, optional: true},
|
||||
{name: "pragueTime", timestamp: c.PragueTime, optional: true},
|
||||
{name: "verkleTime", timestamp: c.VerkleTime, optional: true},
|
||||
@@ -1074,6 +1074,9 @@ func (c *ChainConfig) checkCompatible(newcfg *ChainConfig, headNumber *big.Int,
|
||||
if isForkTimestampIncompatible(c.FeynmanTime, newcfg.FeynmanTime, headTimestamp) {
|
||||
return newTimestampCompatError("Feynman fork timestamp", c.FeynmanTime, newcfg.FeynmanTime)
|
||||
}
|
||||
if isForkTimestampIncompatible(c.FeynmanFixTime, newcfg.FeynmanFixTime, headTimestamp) {
|
||||
return newTimestampCompatError("FeynmanFix fork timestamp", c.FeynmanFixTime, newcfg.FeynmanFixTime)
|
||||
}
|
||||
if isForkTimestampIncompatible(c.CancunTime, newcfg.CancunTime, headTimestamp) {
|
||||
return newTimestampCompatError("Cancun fork timestamp", c.CancunTime, newcfg.CancunTime)
|
||||
}
|
||||
|
||||
@@ -23,7 +23,7 @@ import (
|
||||
const (
|
||||
VersionMajor = 1 // Major version component of the current release
|
||||
VersionMinor = 3 // Minor version component of the current release
|
||||
VersionPatch = 8 // Patch version component of the current release
|
||||
VersionPatch = 10 // Patch version component of the current release
|
||||
VersionMeta = "" // Version metadata to append to the version string
|
||||
)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user