Compare commits
19 Commits
bc-fusion
...
v1.3.13_ba
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
bae7e6b52e | ||
|
|
3b55be5eb2 | ||
|
|
280cad3098 | ||
|
|
9fb0241eab | ||
|
|
c8cc91963f | ||
|
|
bd13416162 | ||
|
|
b7b64da564 | ||
|
|
73f27a590f | ||
|
|
5e74ea650d | ||
|
|
5378df3702 | ||
|
|
40cae45436 | ||
|
|
361e8413e6 | ||
|
|
36a283ef98 | ||
|
|
78d1cade19 | ||
|
|
82beb2c5f3 | ||
|
|
c6aeee2001 | ||
|
|
f28b98a994 | ||
|
|
5ee77bbe8b | ||
|
|
fe928d4778 |
30
CHANGELOG.md
30
CHANGELOG.md
@@ -1,4 +1,34 @@
|
|||||||
# Changelog
|
# Changelog
|
||||||
|
## v1.3.13
|
||||||
|
* [\#2358](https://github.com/bnb-chain/bsc/pull/2358) doc: add Feynman upgrade for mainnet
|
||||||
|
* [\#2335](https://github.com/bnb-chain/bsc/pull/2335) upgrade: update system contracts bytes code and hardfork time of Feynman upgrade
|
||||||
|
|
||||||
|
## v1.3.12
|
||||||
|
BUGFIX
|
||||||
|
* [\#2305](https://github.com/bnb-chain/bsc/pull/2305) fix: fix the wrong version number
|
||||||
|
|
||||||
|
## v1.3.11
|
||||||
|
BUGFIX
|
||||||
|
* [\#2288](https://github.com/bnb-chain/bsc/pull/2288) fix: add FeynmanFix upgrade for a testnet issue
|
||||||
|
|
||||||
|
## v1.3.10
|
||||||
|
FEATURE
|
||||||
|
* [\#2047](https://github.com/bnb-chain/bsc/pull/2047) feat: add new fork block and precompile contract for BEP294 and BEP299
|
||||||
|
|
||||||
|
## v1.3.9
|
||||||
|
FEATURE
|
||||||
|
* [\#2186](https://github.com/bnb-chain/bsc/pull/2186) log: support maxBackups in config.toml
|
||||||
|
|
||||||
|
BUGFIX
|
||||||
|
* [\#2160](https://github.com/bnb-chain/bsc/pull/2160) cmd: fix dump cli cannot work in path mode
|
||||||
|
* [\#2183](https://github.com/bnb-chain/bsc/pull/2183) p2p: resolved deadlock on p2p server shutdown
|
||||||
|
|
||||||
|
IMPROVEMENT
|
||||||
|
* [\#2177](https://github.com/bnb-chain/bsc/pull/0000) build(deps): bump github.com/quic-go/quic-go from 0.39.3 to 0.39.4
|
||||||
|
* [\#2185](https://github.com/bnb-chain/bsc/pull/2185) consensus/parlia: set nonce before evm run
|
||||||
|
* [\#2190](https://github.com/bnb-chain/bsc/pull/2190) fix(legacypool): deprecate already known error
|
||||||
|
* [\#2195](https://github.com/bnb-chain/bsc/pull/2195) eth/fetcher: downgrade state tx log
|
||||||
|
|
||||||
## v1.3.8
|
## v1.3.8
|
||||||
FEATURE
|
FEATURE
|
||||||
* [\#2074](https://github.com/bnb-chain/bsc/pull/2074) faucet: new faucet client
|
* [\#2074](https://github.com/bnb-chain/bsc/pull/2074) faucet: new faucet client
|
||||||
|
|||||||
@@ -199,7 +199,7 @@ Delete the selected BLS account from the BLS wallet.`,
|
|||||||
Name: "generate-proof",
|
Name: "generate-proof",
|
||||||
Usage: "Generate ownership proof for the selected BLS account from the BLS wallet",
|
Usage: "Generate ownership proof for the selected BLS account from the BLS wallet",
|
||||||
Action: blsAccountGenerateProof,
|
Action: blsAccountGenerateProof,
|
||||||
ArgsUsage: "<BLS pubkey>",
|
ArgsUsage: "<operator address> <BLS pubkey>",
|
||||||
Category: "BLS ACCOUNT COMMANDS",
|
Category: "BLS ACCOUNT COMMANDS",
|
||||||
Flags: []cli.Flag{
|
Flags: []cli.Flag{
|
||||||
utils.DataDirFlag,
|
utils.DataDirFlag,
|
||||||
|
|||||||
@@ -202,6 +202,10 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) {
|
|||||||
v := ctx.Uint64(utils.OverrideFeynman.Name)
|
v := ctx.Uint64(utils.OverrideFeynman.Name)
|
||||||
cfg.Eth.OverrideFeynman = &v
|
cfg.Eth.OverrideFeynman = &v
|
||||||
}
|
}
|
||||||
|
if ctx.IsSet(utils.OverrideFeynmanFix.Name) {
|
||||||
|
v := ctx.Uint64(utils.OverrideFeynmanFix.Name)
|
||||||
|
cfg.Eth.OverrideFeynmanFix = &v
|
||||||
|
}
|
||||||
backend, _ := utils.RegisterEthService(stack, &cfg.Eth)
|
backend, _ := utils.RegisterEthService(stack, &cfg.Eth)
|
||||||
|
|
||||||
// Configure log filter RPC API.
|
// Configure log filter RPC API.
|
||||||
|
|||||||
@@ -75,6 +75,7 @@ var (
|
|||||||
utils.OverrideCancun,
|
utils.OverrideCancun,
|
||||||
utils.OverrideVerkle,
|
utils.OverrideVerkle,
|
||||||
utils.OverrideFeynman,
|
utils.OverrideFeynman,
|
||||||
|
utils.OverrideFeynmanFix,
|
||||||
utils.EnablePersonal,
|
utils.EnablePersonal,
|
||||||
utils.TxPoolLocalsFlag,
|
utils.TxPoolLocalsFlag,
|
||||||
utils.TxPoolNoLocalsFlag,
|
utils.TxPoolNoLocalsFlag,
|
||||||
|
|||||||
@@ -18,7 +18,6 @@
|
|||||||
package utils
|
package utils
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
|
||||||
"context"
|
"context"
|
||||||
"crypto/ecdsa"
|
"crypto/ecdsa"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
@@ -320,6 +319,11 @@ var (
|
|||||||
Usage: "Manually specify the Feynman fork timestamp, overriding the bundled setting",
|
Usage: "Manually specify the Feynman fork timestamp, overriding the bundled setting",
|
||||||
Category: flags.EthCategory,
|
Category: flags.EthCategory,
|
||||||
}
|
}
|
||||||
|
OverrideFeynmanFix = &cli.Uint64Flag{
|
||||||
|
Name: "override.feynmanfix",
|
||||||
|
Usage: "Manually specify the FeynmanFix fork timestamp, overriding the bundled setting",
|
||||||
|
Category: flags.EthCategory,
|
||||||
|
}
|
||||||
SyncModeFlag = &flags.TextMarshalerFlag{
|
SyncModeFlag = &flags.TextMarshalerFlag{
|
||||||
Name: "syncmode",
|
Name: "syncmode",
|
||||||
Usage: `Blockchain sync mode ("snap" or "full")`,
|
Usage: `Blockchain sync mode ("snap" or "full")`,
|
||||||
@@ -1889,7 +1893,7 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
|
|||||||
if ctx.IsSet(StateHistoryFlag.Name) {
|
if ctx.IsSet(StateHistoryFlag.Name) {
|
||||||
cfg.StateHistory = ctx.Uint64(StateHistoryFlag.Name)
|
cfg.StateHistory = ctx.Uint64(StateHistoryFlag.Name)
|
||||||
}
|
}
|
||||||
scheme, err := CompareStateSchemeCLIWithConfig(ctx)
|
scheme, err := ParseCLIAndConfigStateScheme(ctx.String(StateSchemeFlag.Name), cfg.StateScheme)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fatalf("%v", err)
|
Fatalf("%v", err)
|
||||||
}
|
}
|
||||||
@@ -2358,11 +2362,7 @@ func MakeChain(ctx *cli.Context, stack *node.Node, readonly bool) (*core.BlockCh
|
|||||||
if gcmode := ctx.String(GCModeFlag.Name); gcmode != "full" && gcmode != "archive" {
|
if gcmode := ctx.String(GCModeFlag.Name); gcmode != "full" && gcmode != "archive" {
|
||||||
Fatalf("--%s must be either 'full' or 'archive'", GCModeFlag.Name)
|
Fatalf("--%s must be either 'full' or 'archive'", GCModeFlag.Name)
|
||||||
}
|
}
|
||||||
provided, err := CompareStateSchemeCLIWithConfig(ctx)
|
scheme, err := rawdb.ParseStateScheme(ctx.String(StateSchemeFlag.Name), chainDb)
|
||||||
if err != nil {
|
|
||||||
Fatalf("%v", err)
|
|
||||||
}
|
|
||||||
scheme, err := rawdb.ParseStateScheme(provided, chainDb)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fatalf("%v", err)
|
Fatalf("%v", err)
|
||||||
}
|
}
|
||||||
@@ -2430,11 +2430,7 @@ func MakeTrieDatabase(ctx *cli.Context, disk ethdb.Database, preimage bool, read
|
|||||||
config := &trie.Config{
|
config := &trie.Config{
|
||||||
Preimages: preimage,
|
Preimages: preimage,
|
||||||
}
|
}
|
||||||
provided, err := CompareStateSchemeCLIWithConfig(ctx)
|
scheme, err := rawdb.ParseStateScheme(ctx.String(StateSchemeFlag.Name), disk)
|
||||||
if err != nil {
|
|
||||||
Fatalf("%v", err)
|
|
||||||
}
|
|
||||||
scheme, err := rawdb.ParseStateScheme(provided, disk)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fatalf("%v", err)
|
Fatalf("%v", err)
|
||||||
}
|
}
|
||||||
@@ -2453,27 +2449,15 @@ func MakeTrieDatabase(ctx *cli.Context, disk ethdb.Database, preimage bool, read
|
|||||||
return trie.NewDatabase(disk, config)
|
return trie.NewDatabase(disk, config)
|
||||||
}
|
}
|
||||||
|
|
||||||
// CompareStateSchemeCLIWithConfig compare state scheme in CLI with config whether are equal.
|
// ParseCLIAndConfigStateScheme parses state scheme in CLI and config.
|
||||||
func CompareStateSchemeCLIWithConfig(ctx *cli.Context) (string, error) {
|
func ParseCLIAndConfigStateScheme(cliScheme, cfgScheme string) (string, error) {
|
||||||
var (
|
if cliScheme == "" {
|
||||||
cfgScheme string
|
|
||||||
err error
|
|
||||||
)
|
|
||||||
if file := ctx.String("config"); file != "" {
|
|
||||||
// we don't validate cfgScheme because it's already checked in cmd/geth/loadBaseConfig
|
|
||||||
if cfgScheme, err = scanConfigForStateScheme(file); err != nil {
|
|
||||||
log.Error("Failed to parse config file", "error", err)
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !ctx.IsSet(StateSchemeFlag.Name) {
|
|
||||||
if cfgScheme != "" {
|
if cfgScheme != "" {
|
||||||
log.Info("Use config state scheme", "config", cfgScheme)
|
log.Info("Use config state scheme", "config", cfgScheme)
|
||||||
}
|
}
|
||||||
return cfgScheme, nil
|
return cfgScheme, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
cliScheme := ctx.String(StateSchemeFlag.Name)
|
|
||||||
if !rawdb.ValidateStateScheme(cliScheme) {
|
if !rawdb.ValidateStateScheme(cliScheme) {
|
||||||
return "", fmt.Errorf("invalid state scheme in CLI: %s", cliScheme)
|
return "", fmt.Errorf("invalid state scheme in CLI: %s", cliScheme)
|
||||||
}
|
}
|
||||||
@@ -2483,35 +2467,3 @@ func CompareStateSchemeCLIWithConfig(ctx *cli.Context) (string, error) {
|
|||||||
}
|
}
|
||||||
return "", fmt.Errorf("incompatible state scheme, CLI: %s, config: %s", cliScheme, cfgScheme)
|
return "", fmt.Errorf("incompatible state scheme, CLI: %s, config: %s", cliScheme, cfgScheme)
|
||||||
}
|
}
|
||||||
|
|
||||||
func scanConfigForStateScheme(file string) (string, error) {
|
|
||||||
f, err := os.Open(file)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
|
|
||||||
scanner := bufio.NewScanner(f)
|
|
||||||
targetStr := "StateScheme"
|
|
||||||
for scanner.Scan() {
|
|
||||||
line := scanner.Text()
|
|
||||||
if strings.Contains(line, targetStr) {
|
|
||||||
return indexStateScheme(line), nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = scanner.Err(); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return "", nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func indexStateScheme(str string) string {
|
|
||||||
i1 := strings.Index(str, "\"")
|
|
||||||
i2 := strings.LastIndex(str, "\"")
|
|
||||||
|
|
||||||
if i1 != -1 && i2 != -1 && i1 < i2 {
|
|
||||||
return str[i1+1 : i2]
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -18,13 +18,8 @@
|
|||||||
package utils
|
package utils
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
|
||||||
"reflect"
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func Test_SplitTagsFlag(t *testing.T) {
|
func Test_SplitTagsFlag(t *testing.T) {
|
||||||
@@ -67,126 +62,3 @@ func Test_SplitTagsFlag(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func Test_parseConfig(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
fn func() string
|
|
||||||
wantedResult string
|
|
||||||
wantedIsErr bool
|
|
||||||
wantedErrStr string
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "path",
|
|
||||||
fn: func() string {
|
|
||||||
tomlString := `[Eth]NetworkId = 56StateScheme = "path"`
|
|
||||||
return createTempTomlFile(t, tomlString)
|
|
||||||
},
|
|
||||||
wantedResult: rawdb.PathScheme,
|
|
||||||
wantedIsErr: false,
|
|
||||||
wantedErrStr: "",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "hash",
|
|
||||||
fn: func() string {
|
|
||||||
tomlString := `[Eth]NetworkId = 56StateScheme = "hash"`
|
|
||||||
return createTempTomlFile(t, tomlString)
|
|
||||||
},
|
|
||||||
wantedResult: rawdb.HashScheme,
|
|
||||||
wantedIsErr: false,
|
|
||||||
wantedErrStr: "",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "empty state scheme",
|
|
||||||
fn: func() string {
|
|
||||||
tomlString := `[Eth]NetworkId = 56StateScheme = ""`
|
|
||||||
return createTempTomlFile(t, tomlString)
|
|
||||||
},
|
|
||||||
wantedResult: "",
|
|
||||||
wantedIsErr: false,
|
|
||||||
wantedErrStr: "",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "unset state scheme",
|
|
||||||
fn: func() string {
|
|
||||||
tomlString := `[Eth]NetworkId = 56`
|
|
||||||
return createTempTomlFile(t, tomlString)
|
|
||||||
},
|
|
||||||
wantedResult: "",
|
|
||||||
wantedIsErr: false,
|
|
||||||
wantedErrStr: "",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "failed to open file",
|
|
||||||
fn: func() string { return "" },
|
|
||||||
wantedResult: "",
|
|
||||||
wantedIsErr: true,
|
|
||||||
wantedErrStr: "open : no such file or directory",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
result, err := scanConfigForStateScheme(tt.fn())
|
|
||||||
if tt.wantedIsErr {
|
|
||||||
assert.Contains(t, err.Error(), tt.wantedErrStr)
|
|
||||||
} else {
|
|
||||||
assert.Nil(t, err)
|
|
||||||
}
|
|
||||||
assert.Equal(t, tt.wantedResult, result)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// createTempTomlFile is a helper function to create a temp file with the provided TOML content
|
|
||||||
func createTempTomlFile(t *testing.T, content string) string {
|
|
||||||
t.Helper()
|
|
||||||
|
|
||||||
dir := t.TempDir()
|
|
||||||
file, err := os.CreateTemp(dir, "config.toml")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Unable to create temporary file: %v", err)
|
|
||||||
}
|
|
||||||
defer file.Close()
|
|
||||||
|
|
||||||
_, err = file.WriteString(content)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Unable to write to temporary file: %v", err)
|
|
||||||
}
|
|
||||||
return file.Name()
|
|
||||||
}
|
|
||||||
|
|
||||||
func Test_parseString(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
arg string
|
|
||||||
wantResult string
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "hash string",
|
|
||||||
arg: "\"hash\"",
|
|
||||||
wantResult: rawdb.HashScheme,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "path string",
|
|
||||||
arg: "\"path\"",
|
|
||||||
wantResult: rawdb.PathScheme,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "empty string",
|
|
||||||
arg: "",
|
|
||||||
wantResult: "",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "empty string",
|
|
||||||
arg: "\"\"",
|
|
||||||
wantResult: "",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
if got := indexStateScheme(tt.arg); got != tt.wantResult {
|
|
||||||
t.Errorf("parseString() = %v, want %v", got, tt.wantResult)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -281,6 +281,7 @@ type ChainOverrides struct {
|
|||||||
OverrideCancun *uint64
|
OverrideCancun *uint64
|
||||||
OverrideVerkle *uint64
|
OverrideVerkle *uint64
|
||||||
OverrideFeynman *uint64
|
OverrideFeynman *uint64
|
||||||
|
OverrideFeynmanFix *uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetupGenesisBlock writes or updates the genesis block in db.
|
// SetupGenesisBlock writes or updates the genesis block in db.
|
||||||
@@ -321,6 +322,9 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, triedb *trie.Database, gen
|
|||||||
if overrides != nil && overrides.OverrideFeynman != nil {
|
if overrides != nil && overrides.OverrideFeynman != nil {
|
||||||
config.FeynmanTime = overrides.OverrideFeynman
|
config.FeynmanTime = overrides.OverrideFeynman
|
||||||
}
|
}
|
||||||
|
if overrides != nil && overrides.OverrideFeynmanFix != nil {
|
||||||
|
config.FeynmanFixTime = overrides.OverrideFeynmanFix
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Just commit the new block if there is no stored genesis block.
|
// Just commit the new block if there is no stored genesis block.
|
||||||
|
|||||||
@@ -335,7 +335,7 @@ func ParseStateScheme(provided string, disk ethdb.Database) (string, error) {
|
|||||||
if stored == "" {
|
if stored == "" {
|
||||||
// use default scheme for empty database, flip it when
|
// use default scheme for empty database, flip it when
|
||||||
// path mode is chosen as default
|
// path mode is chosen as default
|
||||||
log.Info("State schema set to default", "scheme", "hash")
|
log.Info("State scheme set to default", "scheme", "hash")
|
||||||
return HashScheme, nil
|
return HashScheme, nil
|
||||||
}
|
}
|
||||||
log.Info("State scheme set to already existing disk db", "scheme", stored)
|
log.Info("State scheme set to already existing disk db", "scheme", stored)
|
||||||
|
|||||||
@@ -129,6 +129,8 @@ func InspectFreezerTable(ancient string, freezerName string, tableName string, s
|
|||||||
switch freezerName {
|
switch freezerName {
|
||||||
case chainFreezerName:
|
case chainFreezerName:
|
||||||
path, tables = resolveChainFreezerDir(ancient), chainFreezerNoSnappy
|
path, tables = resolveChainFreezerDir(ancient), chainFreezerNoSnappy
|
||||||
|
case stateFreezerName:
|
||||||
|
path, tables = filepath.Join(ancient, freezerName), stateFreezerNoSnappy
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("unknown freezer, supported ones: %v", freezers)
|
return fmt.Errorf("unknown freezer, supported ones: %v", freezers)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -188,19 +188,27 @@ func (batch *freezerTableBatch) maybeCommit() error {
|
|||||||
|
|
||||||
// commit writes the batched items to the backing freezerTable.
|
// commit writes the batched items to the backing freezerTable.
|
||||||
func (batch *freezerTableBatch) commit() error {
|
func (batch *freezerTableBatch) commit() error {
|
||||||
// Write data.
|
// Write data. The head file is fsync'd after write to ensure the
|
||||||
|
// data is truly transferred to disk.
|
||||||
_, err := batch.t.head.Write(batch.dataBuffer)
|
_, err := batch.t.head.Write(batch.dataBuffer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if err := batch.t.head.Sync(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
dataSize := int64(len(batch.dataBuffer))
|
dataSize := int64(len(batch.dataBuffer))
|
||||||
batch.dataBuffer = batch.dataBuffer[:0]
|
batch.dataBuffer = batch.dataBuffer[:0]
|
||||||
|
|
||||||
// Write indices.
|
// Write indices. The index file is fsync'd after write to ensure the
|
||||||
|
// data indexes are truly transferred to disk.
|
||||||
_, err = batch.t.index.Write(batch.indexBuffer)
|
_, err = batch.t.index.Write(batch.indexBuffer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if err := batch.t.index.Sync(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
indexSize := int64(len(batch.indexBuffer))
|
indexSize := int64(len(batch.indexBuffer))
|
||||||
batch.indexBuffer = batch.indexBuffer[:0]
|
batch.indexBuffer = batch.indexBuffer[:0]
|
||||||
|
|
||||||
|
|||||||
@@ -223,7 +223,9 @@ func (t *freezerTable) repair() error {
|
|||||||
if t.readonly {
|
if t.readonly {
|
||||||
return fmt.Errorf("index file(path: %s, name: %s) size is not a multiple of %d", t.path, t.name, indexEntrySize)
|
return fmt.Errorf("index file(path: %s, name: %s) size is not a multiple of %d", t.path, t.name, indexEntrySize)
|
||||||
}
|
}
|
||||||
truncateFreezerFile(t.index, stat.Size()-overflow) // New file can't trigger this path
|
if err := truncateFreezerFile(t.index, stat.Size()-overflow); err != nil {
|
||||||
|
return err
|
||||||
|
} // New file can't trigger this path
|
||||||
}
|
}
|
||||||
// Retrieve the file sizes and prepare for truncation
|
// Retrieve the file sizes and prepare for truncation
|
||||||
if stat, err = t.index.Stat(); err != nil {
|
if stat, err = t.index.Stat(); err != nil {
|
||||||
@@ -268,8 +270,8 @@ func (t *freezerTable) repair() error {
|
|||||||
// Print an error log if the index is corrupted due to an incorrect
|
// Print an error log if the index is corrupted due to an incorrect
|
||||||
// last index item. While it is theoretically possible to have a zero offset
|
// last index item. While it is theoretically possible to have a zero offset
|
||||||
// by storing all zero-size items, it is highly unlikely to occur in practice.
|
// by storing all zero-size items, it is highly unlikely to occur in practice.
|
||||||
if lastIndex.offset == 0 && offsetsSize%indexEntrySize > 1 {
|
if lastIndex.offset == 0 && offsetsSize/indexEntrySize > 1 {
|
||||||
log.Error("Corrupted index file detected", "lastOffset", lastIndex.offset, "items", offsetsSize%indexEntrySize-1)
|
log.Error("Corrupted index file detected", "lastOffset", lastIndex.offset, "indexes", offsetsSize/indexEntrySize)
|
||||||
}
|
}
|
||||||
if t.readonly {
|
if t.readonly {
|
||||||
t.head, err = t.openFile(lastIndex.filenum, openFreezerFileForReadOnly)
|
t.head, err = t.openFile(lastIndex.filenum, openFreezerFileForReadOnly)
|
||||||
@@ -424,6 +426,9 @@ func (t *freezerTable) truncateHead(items uint64) error {
|
|||||||
if err := truncateFreezerFile(t.index, int64(length+1)*indexEntrySize); err != nil {
|
if err := truncateFreezerFile(t.index, int64(length+1)*indexEntrySize); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if err := t.index.Sync(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
// Calculate the new expected size of the data file and truncate it
|
// Calculate the new expected size of the data file and truncate it
|
||||||
var expected indexEntry
|
var expected indexEntry
|
||||||
if length == 0 {
|
if length == 0 {
|
||||||
@@ -446,6 +451,7 @@ func (t *freezerTable) truncateHead(items uint64) error {
|
|||||||
// Release any files _after the current head -- both the previous head
|
// Release any files _after the current head -- both the previous head
|
||||||
// and any files which may have been opened for reading
|
// and any files which may have been opened for reading
|
||||||
t.releaseFilesAfter(expected.filenum, true)
|
t.releaseFilesAfter(expected.filenum, true)
|
||||||
|
|
||||||
// Set back the historic head
|
// Set back the historic head
|
||||||
t.head = newHead
|
t.head = newHead
|
||||||
t.headId = expected.filenum
|
t.headId = expected.filenum
|
||||||
@@ -453,6 +459,9 @@ func (t *freezerTable) truncateHead(items uint64) error {
|
|||||||
if err := truncateFreezerFile(t.head, int64(expected.offset)); err != nil {
|
if err := truncateFreezerFile(t.head, int64(expected.offset)); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if err := t.head.Sync(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
// All data files truncated, set internal counters and return
|
// All data files truncated, set internal counters and return
|
||||||
t.headBytes = int64(expected.offset)
|
t.headBytes = int64(expected.offset)
|
||||||
t.items.Store(items)
|
t.items.Store(items)
|
||||||
@@ -597,10 +606,12 @@ func (t *freezerTable) Close() error {
|
|||||||
// error on Windows.
|
// error on Windows.
|
||||||
doClose(t.index, true, true)
|
doClose(t.index, true, true)
|
||||||
doClose(t.meta, true, true)
|
doClose(t.meta, true, true)
|
||||||
|
|
||||||
// The preopened non-head data-files are all opened in readonly.
|
// The preopened non-head data-files are all opened in readonly.
|
||||||
// The head is opened in rw-mode, so we sync it here - but since it's also
|
// The head is opened in rw-mode, so we sync it here - but since it's also
|
||||||
// part of t.files, it will be closed in the loop below.
|
// part of t.files, it will be closed in the loop below.
|
||||||
doClose(t.head, true, false) // sync but do not close
|
doClose(t.head, true, false) // sync but do not close
|
||||||
|
|
||||||
for _, f := range t.files {
|
for _, f := range t.files {
|
||||||
doClose(f, false, true) // close but do not sync
|
doClose(f, false, true) // close but do not sync
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -73,11 +73,7 @@ func copyFrom(srcPath, destPath string, offset uint64, before func(f *os.File) e
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
f = nil
|
f = nil
|
||||||
|
return os.Rename(fname, destPath)
|
||||||
if err := os.Rename(fname, destPath); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// openFreezerFileForAppend opens a freezer table file and seeks to the end
|
// openFreezerFileForAppend opens a freezer table file and seeks to the end
|
||||||
|
|||||||
File diff suppressed because one or more lines are too long
@@ -39,9 +39,7 @@ web3.eth.sendTransaction({
|
|||||||
web3.eth.sendTransaction({
|
web3.eth.sendTransaction({
|
||||||
from: "consensus address of your validator",
|
from: "consensus address of your validator",
|
||||||
to: "0x0000000000000000000000000000000000001000",
|
to: "0x0000000000000000000000000000000000001000",
|
||||||
|
gas: "1000000",
|
||||||
data: "0x04c4fec6"
|
data: "0x04c4fec6"
|
||||||
})
|
})
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -193,6 +193,10 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
|
|||||||
chainConfig.FeynmanTime = config.OverrideFeynman
|
chainConfig.FeynmanTime = config.OverrideFeynman
|
||||||
overrides.OverrideFeynman = config.OverrideFeynman
|
overrides.OverrideFeynman = config.OverrideFeynman
|
||||||
}
|
}
|
||||||
|
if config.OverrideFeynmanFix != nil {
|
||||||
|
chainConfig.FeynmanFixTime = config.OverrideFeynmanFix
|
||||||
|
overrides.OverrideFeynmanFix = config.OverrideFeynmanFix
|
||||||
|
}
|
||||||
|
|
||||||
eth := &Ethereum{
|
eth := &Ethereum{
|
||||||
config: config,
|
config: config,
|
||||||
|
|||||||
@@ -200,6 +200,9 @@ type Config struct {
|
|||||||
|
|
||||||
// OverrideFeynman (TODO: remove after the fork)
|
// OverrideFeynman (TODO: remove after the fork)
|
||||||
OverrideFeynman *uint64 `toml:",omitempty"`
|
OverrideFeynman *uint64 `toml:",omitempty"`
|
||||||
|
|
||||||
|
// OverrideFeynmanFix (TODO: remove after the fork)
|
||||||
|
OverrideFeynmanFix *uint64 `toml:",omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateConsensusEngine creates a consensus engine for the given chain config.
|
// CreateConsensusEngine creates a consensus engine for the given chain config.
|
||||||
|
|||||||
@@ -338,7 +338,7 @@ func (f *TxFetcher) Enqueue(peer string, txs []*types.Transaction, direct bool)
|
|||||||
// If 'other reject' is >25% of the deliveries in any batch, sleep a bit.
|
// If 'other reject' is >25% of the deliveries in any batch, sleep a bit.
|
||||||
if otherreject > 128/4 {
|
if otherreject > 128/4 {
|
||||||
time.Sleep(200 * time.Millisecond)
|
time.Sleep(200 * time.Millisecond)
|
||||||
log.Warn("Peer delivering stale transactions", "peer", peer, "rejected", otherreject)
|
log.Debug("Peer delivering stale transactions", "peer", peer, "rejected", otherreject)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
select {
|
select {
|
||||||
|
|||||||
@@ -227,8 +227,8 @@ func (oracle *Oracle) FeeHistory(ctx context.Context, blocks uint64, unresolvedL
|
|||||||
if p < 0 || p > 100 {
|
if p < 0 || p > 100 {
|
||||||
return common.Big0, nil, nil, nil, fmt.Errorf("%w: %f", errInvalidPercentile, p)
|
return common.Big0, nil, nil, nil, fmt.Errorf("%w: %f", errInvalidPercentile, p)
|
||||||
}
|
}
|
||||||
if i > 0 && p < rewardPercentiles[i-1] {
|
if i > 0 && p <= rewardPercentiles[i-1] {
|
||||||
return common.Big0, nil, nil, nil, fmt.Errorf("%w: #%d:%f > #%d:%f", errInvalidPercentile, i-1, rewardPercentiles[i-1], i, p)
|
return common.Big0, nil, nil, nil, fmt.Errorf("%w: #%d:%f >= #%d:%f", errInvalidPercentile, i-1, rewardPercentiles[i-1], i, p)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
var (
|
var (
|
||||||
|
|||||||
@@ -10,6 +10,8 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const backupTimeFormat = "2006-01-02_15"
|
||||||
|
|
||||||
type TimeTicker struct {
|
type TimeTicker struct {
|
||||||
stop chan struct{}
|
stop chan struct{}
|
||||||
C <-chan time.Time
|
C <-chan time.Time
|
||||||
@@ -69,9 +71,12 @@ type AsyncFileWriter struct {
|
|||||||
buf chan []byte
|
buf chan []byte
|
||||||
stop chan struct{}
|
stop chan struct{}
|
||||||
timeTicker *TimeTicker
|
timeTicker *TimeTicker
|
||||||
|
|
||||||
|
rotateHours uint
|
||||||
|
maxBackups int
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewAsyncFileWriter(filePath string, maxBytesSize int64, rotateHours uint) *AsyncFileWriter {
|
func NewAsyncFileWriter(filePath string, maxBytesSize int64, maxBackups int, rotateHours uint) *AsyncFileWriter {
|
||||||
absFilePath, err := filepath.Abs(filePath)
|
absFilePath, err := filepath.Abs(filePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(fmt.Sprintf("get file path of logger error. filePath=%s, err=%s", filePath, err))
|
panic(fmt.Sprintf("get file path of logger error. filePath=%s, err=%s", filePath, err))
|
||||||
@@ -81,6 +86,8 @@ func NewAsyncFileWriter(filePath string, maxBytesSize int64, rotateHours uint) *
|
|||||||
filePath: absFilePath,
|
filePath: absFilePath,
|
||||||
buf: make(chan []byte, maxBytesSize),
|
buf: make(chan []byte, maxBytesSize),
|
||||||
stop: make(chan struct{}),
|
stop: make(chan struct{}),
|
||||||
|
rotateHours: rotateHours,
|
||||||
|
maxBackups: maxBackups,
|
||||||
timeTicker: NewTimeTicker(rotateHours),
|
timeTicker: NewTimeTicker(rotateHours),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -178,6 +185,9 @@ func (w *AsyncFileWriter) rotateFile() {
|
|||||||
if err := w.initLogFile(); err != nil {
|
if err := w.initLogFile(); err != nil {
|
||||||
fmt.Fprintf(os.Stderr, "init log file error. err=%s", err)
|
fmt.Fprintf(os.Stderr, "init log file error. err=%s", err)
|
||||||
}
|
}
|
||||||
|
if err := w.removeExpiredFile(); err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "remove expired file error. err=%s", err)
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -222,5 +232,29 @@ func (w *AsyncFileWriter) flushAndClose() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (w *AsyncFileWriter) timeFilePath(filePath string) string {
|
func (w *AsyncFileWriter) timeFilePath(filePath string) string {
|
||||||
return filePath + "." + time.Now().Format("2006-01-02_15")
|
return filePath + "." + time.Now().Format(backupTimeFormat)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *AsyncFileWriter) getExpiredFile(filePath string, maxBackups int, rotateHours uint) string {
|
||||||
|
if rotateHours > 0 {
|
||||||
|
maxBackups = int(rotateHours) * maxBackups
|
||||||
|
}
|
||||||
|
return filePath + "." + time.Now().Add(-time.Hour*time.Duration(maxBackups)).Format(backupTimeFormat)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *AsyncFileWriter) removeExpiredFile() error {
|
||||||
|
if w.maxBackups == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
oldFilepath := w.getExpiredFile(w.filePath, w.maxBackups, w.rotateHours)
|
||||||
|
_, err := os.Stat(oldFilepath)
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
errRemove := os.Remove(oldFilepath)
|
||||||
|
if err != nil {
|
||||||
|
return errRemove
|
||||||
|
}
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,10 +6,12 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestWriterHourly(t *testing.T) {
|
func TestWriterHourly(t *testing.T) {
|
||||||
w := NewAsyncFileWriter("./hello.log", 100, 1)
|
w := NewAsyncFileWriter("./hello.log", 100, 1, 1)
|
||||||
w.Start()
|
w.Start()
|
||||||
w.Write([]byte("hello\n"))
|
w.Write([]byte("hello\n"))
|
||||||
w.Write([]byte("world\n"))
|
w.Write([]byte("world\n"))
|
||||||
@@ -67,3 +69,22 @@ func TestGetNextRotationHour(t *testing.T) {
|
|||||||
t.Run("TestGetNextRotationHour_"+strconv.Itoa(i), test(tc.now, tc.delta, tc.expectedHour))
|
t.Run("TestGetNextRotationHour_"+strconv.Itoa(i), test(tc.now, tc.delta, tc.expectedHour))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestClearBackups(t *testing.T) {
|
||||||
|
dir := "./test"
|
||||||
|
os.Mkdir(dir, 0700)
|
||||||
|
w := NewAsyncFileWriter("./test/bsc.log", 100, 1, 1)
|
||||||
|
defer os.RemoveAll(dir)
|
||||||
|
fakeCurrentTime := time.Now()
|
||||||
|
name := ""
|
||||||
|
data := []byte("data")
|
||||||
|
for i := 0; i < 5; i++ {
|
||||||
|
name = w.filePath + "." + fakeCurrentTime.Format(backupTimeFormat)
|
||||||
|
_ = os.WriteFile(name, data, 0700)
|
||||||
|
fakeCurrentTime = fakeCurrentTime.Add(-time.Hour * 1)
|
||||||
|
}
|
||||||
|
oldFile := w.getExpiredFile(w.filePath, w.maxBackups, w.rotateHours)
|
||||||
|
w.removeExpiredFile()
|
||||||
|
_, err := os.Stat(oldFile)
|
||||||
|
assert.True(t, os.IsNotExist(err))
|
||||||
|
}
|
||||||
|
|||||||
@@ -75,14 +75,14 @@ func FileHandler(path string, fmtr Format) (Handler, error) {
|
|||||||
// RotatingFileHandler returns a handler which writes log records to file chunks
|
// RotatingFileHandler returns a handler which writes log records to file chunks
|
||||||
// at the given path. When a file's size reaches the limit, the handler creates
|
// at the given path. When a file's size reaches the limit, the handler creates
|
||||||
// a new file named after the timestamp of the first log record it will contain.
|
// a new file named after the timestamp of the first log record it will contain.
|
||||||
func RotatingFileHandler(filePath string, limit uint, formatter Format, rotateHours uint) (Handler, error) {
|
func RotatingFileHandler(filePath string, limit uint, maxBackups uint, formatter Format, rotateHours uint) (Handler, error) {
|
||||||
if _, err := os.Stat(path.Dir(filePath)); os.IsNotExist(err) {
|
if _, err := os.Stat(path.Dir(filePath)); os.IsNotExist(err) {
|
||||||
err := os.MkdirAll(path.Dir(filePath), 0755)
|
err := os.MkdirAll(path.Dir(filePath), 0755)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("could not create directory %s, %v", path.Dir(filePath), err)
|
return nil, fmt.Errorf("could not create directory %s, %v", path.Dir(filePath), err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
fileWriter := NewAsyncFileWriter(filePath, int64(limit), rotateHours)
|
fileWriter := NewAsyncFileWriter(filePath, int64(limit), int(maxBackups), rotateHours)
|
||||||
fileWriter.Start()
|
fileWriter.Start()
|
||||||
return StreamHandler(fileWriter, formatter), nil
|
return StreamHandler(fileWriter, formatter), nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -290,8 +290,8 @@ func (c Ctx) toArray() []interface{} {
|
|||||||
return arr
|
return arr
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewFileLvlHandler(logPath string, maxBytesSize uint, level string, rotateHours uint) Handler {
|
func NewFileLvlHandler(logPath string, maxBytesSize uint, maxBackups uint, level string, rotateHours uint) Handler {
|
||||||
rfh, err := RotatingFileHandler(logPath, maxBytesSize, LogfmtFormat(), rotateHours)
|
rfh, err := RotatingFileHandler(logPath, maxBytesSize, maxBackups, LogfmtFormat(), rotateHours)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -513,6 +513,7 @@ type LogConfig struct {
|
|||||||
MaxBytesSize *uint `toml:",omitempty"`
|
MaxBytesSize *uint `toml:",omitempty"`
|
||||||
Level *string `toml:",omitempty"`
|
Level *string `toml:",omitempty"`
|
||||||
RotateHours *uint `toml:",omitempty"`
|
RotateHours *uint `toml:",omitempty"`
|
||||||
|
MaxBackups *uint `toml:",omitempty"`
|
||||||
|
|
||||||
// TermTimeFormat is the time format used for console logging.
|
// TermTimeFormat is the time format used for console logging.
|
||||||
TermTimeFormat *string `toml:",omitempty"`
|
TermTimeFormat *string `toml:",omitempty"`
|
||||||
|
|||||||
@@ -118,7 +118,12 @@ func New(conf *Config) (*Node, error) {
|
|||||||
rotateHours = *conf.LogConfig.RotateHours
|
rotateHours = *conf.LogConfig.RotateHours
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Root().SetHandler(log.NewFileLvlHandler(logFilePath, *conf.LogConfig.MaxBytesSize, *conf.LogConfig.Level, rotateHours))
|
maxBackups := uint(0)
|
||||||
|
if conf.LogConfig.MaxBackups != nil {
|
||||||
|
maxBackups = *conf.LogConfig.MaxBackups
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Root().SetHandler(log.NewFileLvlHandler(logFilePath, *conf.LogConfig.MaxBytesSize, maxBackups, *conf.LogConfig.Level, rotateHours))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if conf.Logger == nil {
|
if conf.Logger == nil {
|
||||||
|
|||||||
@@ -147,9 +147,8 @@ var (
|
|||||||
// UnixTime: 1705996800 is January 23, 2024 8:00:00 AM UTC
|
// UnixTime: 1705996800 is January 23, 2024 8:00:00 AM UTC
|
||||||
ShanghaiTime: newUint64(1705996800),
|
ShanghaiTime: newUint64(1705996800),
|
||||||
KeplerTime: newUint64(1705996800),
|
KeplerTime: newUint64(1705996800),
|
||||||
|
FeynmanTime: newUint64(1713419340),
|
||||||
// TODO
|
FeynmanFixTime: newUint64(1713419340),
|
||||||
FeynmanTime: nil,
|
|
||||||
|
|
||||||
Parlia: &ParliaConfig{
|
Parlia: &ParliaConfig{
|
||||||
Period: 3,
|
Period: 3,
|
||||||
@@ -187,6 +186,7 @@ var (
|
|||||||
ShanghaiTime: newUint64(1702972800),
|
ShanghaiTime: newUint64(1702972800),
|
||||||
KeplerTime: newUint64(1702972800),
|
KeplerTime: newUint64(1702972800),
|
||||||
FeynmanTime: newUint64(1710136800),
|
FeynmanTime: newUint64(1710136800),
|
||||||
|
FeynmanFixTime: newUint64(1711342800),
|
||||||
|
|
||||||
Parlia: &ParliaConfig{
|
Parlia: &ParliaConfig{
|
||||||
Period: 3,
|
Period: 3,
|
||||||
@@ -224,6 +224,7 @@ var (
|
|||||||
ShanghaiTime: newUint64(0),
|
ShanghaiTime: newUint64(0),
|
||||||
KeplerTime: newUint64(0),
|
KeplerTime: newUint64(0),
|
||||||
FeynmanTime: newUint64(0),
|
FeynmanTime: newUint64(0),
|
||||||
|
FeynmanFixTime: newUint64(0),
|
||||||
|
|
||||||
Parlia: &ParliaConfig{
|
Parlia: &ParliaConfig{
|
||||||
Period: 3,
|
Period: 3,
|
||||||
@@ -462,6 +463,7 @@ type ChainConfig struct {
|
|||||||
ShanghaiTime *uint64 `json:"shanghaiTime,omitempty"` // Shanghai switch time (nil = no fork, 0 = already on shanghai)
|
ShanghaiTime *uint64 `json:"shanghaiTime,omitempty"` // Shanghai switch time (nil = no fork, 0 = already on shanghai)
|
||||||
KeplerTime *uint64 `json:"keplerTime,omitempty"` // Kepler switch time (nil = no fork, 0 = already activated)
|
KeplerTime *uint64 `json:"keplerTime,omitempty"` // Kepler switch time (nil = no fork, 0 = already activated)
|
||||||
FeynmanTime *uint64 `json:"feynmanTime,omitempty"` // Feynman switch time (nil = no fork, 0 = already activated)
|
FeynmanTime *uint64 `json:"feynmanTime,omitempty"` // Feynman switch time (nil = no fork, 0 = already activated)
|
||||||
|
FeynmanFixTime *uint64 `json:"feynmanFixTime,omitempty"` // FeynmanFix switch time (nil = no fork, 0 = already activated)
|
||||||
CancunTime *uint64 `json:"cancunTime,omitempty"` // Cancun switch time (nil = no fork, 0 = already on cancun)
|
CancunTime *uint64 `json:"cancunTime,omitempty"` // Cancun switch time (nil = no fork, 0 = already on cancun)
|
||||||
PragueTime *uint64 `json:"pragueTime,omitempty"` // Prague switch time (nil = no fork, 0 = already on prague)
|
PragueTime *uint64 `json:"pragueTime,omitempty"` // Prague switch time (nil = no fork, 0 = already on prague)
|
||||||
VerkleTime *uint64 `json:"verkleTime,omitempty"` // Verkle switch time (nil = no fork, 0 = already on verkle)
|
VerkleTime *uint64 `json:"verkleTime,omitempty"` // Verkle switch time (nil = no fork, 0 = already on verkle)
|
||||||
@@ -559,7 +561,12 @@ func (c *ChainConfig) String() string {
|
|||||||
FeynmanTime = big.NewInt(0).SetUint64(*c.FeynmanTime)
|
FeynmanTime = big.NewInt(0).SetUint64(*c.FeynmanTime)
|
||||||
}
|
}
|
||||||
|
|
||||||
return fmt.Sprintf("{ChainID: %v Homestead: %v DAO: %v DAOSupport: %v EIP150: %v EIP155: %v EIP158: %v Byzantium: %v Constantinople: %v Petersburg: %v Istanbul: %v, Muir Glacier: %v, Ramanujan: %v, Niels: %v, MirrorSync: %v, Bruno: %v, Berlin: %v, YOLO v3: %v, CatalystBlock: %v, London: %v, ArrowGlacier: %v, MergeFork:%v, Euler: %v, Gibbs: %v, Nano: %v, Moran: %v, Planck: %v,Luban: %v, Plato: %v, Hertz: %v, Hertzfix: %v, ShanghaiTime: %v, KeplerTime: %v, FeynmanTime: %v, Engine: %v}",
|
var FeynmanFixTime *big.Int
|
||||||
|
if c.FeynmanFixTime != nil {
|
||||||
|
FeynmanFixTime = big.NewInt(0).SetUint64(*c.FeynmanFixTime)
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Sprintf("{ChainID: %v Homestead: %v DAO: %v DAOSupport: %v EIP150: %v EIP155: %v EIP158: %v Byzantium: %v Constantinople: %v Petersburg: %v Istanbul: %v, Muir Glacier: %v, Ramanujan: %v, Niels: %v, MirrorSync: %v, Bruno: %v, Berlin: %v, YOLO v3: %v, CatalystBlock: %v, London: %v, ArrowGlacier: %v, MergeFork:%v, Euler: %v, Gibbs: %v, Nano: %v, Moran: %v, Planck: %v,Luban: %v, Plato: %v, Hertz: %v, Hertzfix: %v, ShanghaiTime: %v, KeplerTime: %v, FeynmanTime: %v, FeynmanFixTime: %v, Engine: %v}",
|
||||||
c.ChainID,
|
c.ChainID,
|
||||||
c.HomesteadBlock,
|
c.HomesteadBlock,
|
||||||
c.DAOForkBlock,
|
c.DAOForkBlock,
|
||||||
@@ -594,6 +601,7 @@ func (c *ChainConfig) String() string {
|
|||||||
ShanghaiTime,
|
ShanghaiTime,
|
||||||
KeplerTime,
|
KeplerTime,
|
||||||
FeynmanTime,
|
FeynmanTime,
|
||||||
|
FeynmanFixTime,
|
||||||
engine,
|
engine,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@@ -842,6 +850,20 @@ func (c *ChainConfig) IsOnFeynman(currentBlockNumber *big.Int, lastBlockTime uin
|
|||||||
return !c.IsFeynman(lastBlockNumber, lastBlockTime) && c.IsFeynman(currentBlockNumber, currentBlockTime)
|
return !c.IsFeynman(lastBlockNumber, lastBlockTime) && c.IsFeynman(currentBlockNumber, currentBlockTime)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsFeynmanFix returns whether time is either equal to the FeynmanFix fork time or greater.
|
||||||
|
func (c *ChainConfig) IsFeynmanFix(num *big.Int, time uint64) bool {
|
||||||
|
return c.IsLondon(num) && isTimestampForked(c.FeynmanFixTime, time)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsOnFeynmanFix returns whether currentBlockTime is either equal to the FeynmanFix fork time or greater firstly.
|
||||||
|
func (c *ChainConfig) IsOnFeynmanFix(currentBlockNumber *big.Int, lastBlockTime uint64, currentBlockTime uint64) bool {
|
||||||
|
lastBlockNumber := new(big.Int)
|
||||||
|
if currentBlockNumber.Cmp(big.NewInt(1)) >= 0 {
|
||||||
|
lastBlockNumber.Sub(currentBlockNumber, big.NewInt(1))
|
||||||
|
}
|
||||||
|
return !c.IsFeynmanFix(lastBlockNumber, lastBlockTime) && c.IsFeynmanFix(currentBlockNumber, currentBlockTime)
|
||||||
|
}
|
||||||
|
|
||||||
// IsCancun returns whether num is either equal to the Cancun fork time or greater.
|
// IsCancun returns whether num is either equal to the Cancun fork time or greater.
|
||||||
func (c *ChainConfig) IsCancun(num *big.Int, time uint64) bool {
|
func (c *ChainConfig) IsCancun(num *big.Int, time uint64) bool {
|
||||||
return c.IsLondon(num) && isTimestampForked(c.CancunTime, time)
|
return c.IsLondon(num) && isTimestampForked(c.CancunTime, time)
|
||||||
@@ -908,6 +930,7 @@ func (c *ChainConfig) CheckConfigForkOrder() error {
|
|||||||
{name: "hertzfixBlock", block: c.HertzfixBlock},
|
{name: "hertzfixBlock", block: c.HertzfixBlock},
|
||||||
{name: "keplerTime", timestamp: c.KeplerTime},
|
{name: "keplerTime", timestamp: c.KeplerTime},
|
||||||
{name: "feynmanTime", timestamp: c.FeynmanTime},
|
{name: "feynmanTime", timestamp: c.FeynmanTime},
|
||||||
|
{name: "feynmanFixTime", timestamp: c.FeynmanFixTime},
|
||||||
{name: "cancunTime", timestamp: c.CancunTime, optional: true},
|
{name: "cancunTime", timestamp: c.CancunTime, optional: true},
|
||||||
{name: "pragueTime", timestamp: c.PragueTime, optional: true},
|
{name: "pragueTime", timestamp: c.PragueTime, optional: true},
|
||||||
{name: "verkleTime", timestamp: c.VerkleTime, optional: true},
|
{name: "verkleTime", timestamp: c.VerkleTime, optional: true},
|
||||||
@@ -1050,6 +1073,9 @@ func (c *ChainConfig) checkCompatible(newcfg *ChainConfig, headNumber *big.Int,
|
|||||||
if isForkTimestampIncompatible(c.FeynmanTime, newcfg.FeynmanTime, headTimestamp) {
|
if isForkTimestampIncompatible(c.FeynmanTime, newcfg.FeynmanTime, headTimestamp) {
|
||||||
return newTimestampCompatError("Feynman fork timestamp", c.FeynmanTime, newcfg.FeynmanTime)
|
return newTimestampCompatError("Feynman fork timestamp", c.FeynmanTime, newcfg.FeynmanTime)
|
||||||
}
|
}
|
||||||
|
if isForkTimestampIncompatible(c.FeynmanFixTime, newcfg.FeynmanFixTime, headTimestamp) {
|
||||||
|
return newTimestampCompatError("FeynmanFix fork timestamp", c.FeynmanFixTime, newcfg.FeynmanFixTime)
|
||||||
|
}
|
||||||
if isForkTimestampIncompatible(c.CancunTime, newcfg.CancunTime, headTimestamp) {
|
if isForkTimestampIncompatible(c.CancunTime, newcfg.CancunTime, headTimestamp) {
|
||||||
return newTimestampCompatError("Cancun fork timestamp", c.CancunTime, newcfg.CancunTime)
|
return newTimestampCompatError("Cancun fork timestamp", c.CancunTime, newcfg.CancunTime)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -23,7 +23,7 @@ import (
|
|||||||
const (
|
const (
|
||||||
VersionMajor = 1 // Major version component of the current release
|
VersionMajor = 1 // Major version component of the current release
|
||||||
VersionMinor = 3 // Minor version component of the current release
|
VersionMinor = 3 // Minor version component of the current release
|
||||||
VersionPatch = 8 // Patch version component of the current release
|
VersionPatch = 13 // Patch version component of the current release
|
||||||
VersionMeta = "" // Version metadata to append to the version string
|
VersionMeta = "" // Version metadata to append to the version string
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user