Compare commits

...

11 Commits

Author SHA1 Message Date
zzzckck
5e74ea650d Merge pull request #2218 from bnb-chain/develop
draft release v1.3.9
2024-02-20 17:52:32 +08:00
VM
5378df3702 cmd: optimize parse state scheme in cli and config (#2220) 2024-02-20 17:22:27 +08:00
Matus Kysel
40cae45436 Merge pull request #2213 from bnb-chain/freezer-fix
Freezer fix
2024-02-20 09:30:30 +01:00
zzzckck
361e8413e6 release: prepare for release v1.3.9 (#2217) 2024-02-19 14:43:15 +08:00
rjl493456442
36a283ef98 core/rawdb: fsync the index file after each freezer write (#28483)
* core/rawdb: fsync the index and data file after each freezer write

* core/rawdb: fsync the data file in freezer after write
2024-02-14 08:22:43 +01:00
Ng Wei Han
78d1cade19 eth/fetcher: downgrade state tx log (#2195) 2024-02-02 10:58:17 +08:00
Eric
82beb2c5f3 log: support maxBackups in config.toml (#2186) 2024-01-30 19:16:01 +08:00
zzzckck
c6aeee2001 Merge pull request #2169 from bnb-chain/develop
draft release v1.3.8
2024-01-23 17:22:42 +08:00
zzzckck
f28b98a994 Merge pull request #2088 from bnb-chain/develop
draft release v1.3.7
2023-12-19 14:09:44 +08:00
zzzckck
5ee77bbe8b Merge pull request #2066 from bnb-chain/develop
draft release v1.3.6
2023-12-14 14:26:55 +08:00
zzzckck
fe928d4778 Merge pull request #2006 from bnb-chain/develop
release: draft release v1.3.5
2023-12-08 14:17:24 +08:00
16 changed files with 123 additions and 212 deletions

View File

@@ -1,4 +1,18 @@
# Changelog
## v1.3.9
FEATURE
* [\#2186](https://github.com/bnb-chain/bsc/pull/2186) log: support maxBackups in config.toml
BUGFIX
* [\#2160](https://github.com/bnb-chain/bsc/pull/2160) cmd: fix dump cli cannot work in path mode
* [\#2183](https://github.com/bnb-chain/bsc/pull/2183) p2p: resolved deadlock on p2p server shutdown
IMPROVEMENT
* [\#2177](https://github.com/bnb-chain/bsc/pull/0000) build(deps): bump github.com/quic-go/quic-go from 0.39.3 to 0.39.4
* [\#2185](https://github.com/bnb-chain/bsc/pull/2185) consensus/parlia: set nonce before evm run
* [\#2190](https://github.com/bnb-chain/bsc/pull/2190) fix(legacypool): deprecate already known error
* [\#2195](https://github.com/bnb-chain/bsc/pull/2195) eth/fetcher: downgrade state tx log
## v1.3.8
FEATURE
* [\#2074](https://github.com/bnb-chain/bsc/pull/2074) faucet: new faucet client

View File

@@ -18,7 +18,6 @@
package utils
import (
"bufio"
"context"
"crypto/ecdsa"
"encoding/hex"
@@ -1884,7 +1883,7 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
if ctx.IsSet(StateHistoryFlag.Name) {
cfg.StateHistory = ctx.Uint64(StateHistoryFlag.Name)
}
scheme, err := CompareStateSchemeCLIWithConfig(ctx)
scheme, err := ParseCLIAndConfigStateScheme(ctx.String(StateSchemeFlag.Name), cfg.StateScheme)
if err != nil {
Fatalf("%v", err)
}
@@ -2353,11 +2352,7 @@ func MakeChain(ctx *cli.Context, stack *node.Node, readonly bool) (*core.BlockCh
if gcmode := ctx.String(GCModeFlag.Name); gcmode != "full" && gcmode != "archive" {
Fatalf("--%s must be either 'full' or 'archive'", GCModeFlag.Name)
}
provided, err := CompareStateSchemeCLIWithConfig(ctx)
if err != nil {
Fatalf("%v", err)
}
scheme, err := rawdb.ParseStateScheme(provided, chainDb)
scheme, err := rawdb.ParseStateScheme(ctx.String(StateSchemeFlag.Name), chainDb)
if err != nil {
Fatalf("%v", err)
}
@@ -2425,11 +2420,7 @@ func MakeTrieDatabase(ctx *cli.Context, disk ethdb.Database, preimage bool, read
config := &trie.Config{
Preimages: preimage,
}
provided, err := CompareStateSchemeCLIWithConfig(ctx)
if err != nil {
Fatalf("%v", err)
}
scheme, err := rawdb.ParseStateScheme(provided, disk)
scheme, err := rawdb.ParseStateScheme(ctx.String(StateSchemeFlag.Name), disk)
if err != nil {
Fatalf("%v", err)
}
@@ -2448,27 +2439,15 @@ func MakeTrieDatabase(ctx *cli.Context, disk ethdb.Database, preimage bool, read
return trie.NewDatabase(disk, config)
}
// CompareStateSchemeCLIWithConfig compare state scheme in CLI with config whether are equal.
func CompareStateSchemeCLIWithConfig(ctx *cli.Context) (string, error) {
var (
cfgScheme string
err error
)
if file := ctx.String("config"); file != "" {
// we don't validate cfgScheme because it's already checked in cmd/geth/loadBaseConfig
if cfgScheme, err = scanConfigForStateScheme(file); err != nil {
log.Error("Failed to parse config file", "error", err)
return "", err
}
}
if !ctx.IsSet(StateSchemeFlag.Name) {
// ParseCLIAndConfigStateScheme parses state scheme in CLI and config.
func ParseCLIAndConfigStateScheme(cliScheme, cfgScheme string) (string, error) {
if cliScheme == "" {
if cfgScheme != "" {
log.Info("Use config state scheme", "config", cfgScheme)
}
return cfgScheme, nil
}
cliScheme := ctx.String(StateSchemeFlag.Name)
if !rawdb.ValidateStateScheme(cliScheme) {
return "", fmt.Errorf("invalid state scheme in CLI: %s", cliScheme)
}
@@ -2478,35 +2457,3 @@ func CompareStateSchemeCLIWithConfig(ctx *cli.Context) (string, error) {
}
return "", fmt.Errorf("incompatible state scheme, CLI: %s, config: %s", cliScheme, cfgScheme)
}
func scanConfigForStateScheme(file string) (string, error) {
f, err := os.Open(file)
if err != nil {
return "", err
}
defer f.Close()
scanner := bufio.NewScanner(f)
targetStr := "StateScheme"
for scanner.Scan() {
line := scanner.Text()
if strings.Contains(line, targetStr) {
return indexStateScheme(line), nil
}
}
if err = scanner.Err(); err != nil {
return "", err
}
return "", nil
}
func indexStateScheme(str string) string {
i1 := strings.Index(str, "\"")
i2 := strings.LastIndex(str, "\"")
if i1 != -1 && i2 != -1 && i1 < i2 {
return str[i1+1 : i2]
}
return ""
}

View File

@@ -18,13 +18,8 @@
package utils
import (
"os"
"reflect"
"testing"
"github.com/stretchr/testify/assert"
"github.com/ethereum/go-ethereum/core/rawdb"
)
func Test_SplitTagsFlag(t *testing.T) {
@@ -67,126 +62,3 @@ func Test_SplitTagsFlag(t *testing.T) {
})
}
}
func Test_parseConfig(t *testing.T) {
tests := []struct {
name string
fn func() string
wantedResult string
wantedIsErr bool
wantedErrStr string
}{
{
name: "path",
fn: func() string {
tomlString := `[Eth]NetworkId = 56StateScheme = "path"`
return createTempTomlFile(t, tomlString)
},
wantedResult: rawdb.PathScheme,
wantedIsErr: false,
wantedErrStr: "",
},
{
name: "hash",
fn: func() string {
tomlString := `[Eth]NetworkId = 56StateScheme = "hash"`
return createTempTomlFile(t, tomlString)
},
wantedResult: rawdb.HashScheme,
wantedIsErr: false,
wantedErrStr: "",
},
{
name: "empty state scheme",
fn: func() string {
tomlString := `[Eth]NetworkId = 56StateScheme = ""`
return createTempTomlFile(t, tomlString)
},
wantedResult: "",
wantedIsErr: false,
wantedErrStr: "",
},
{
name: "unset state scheme",
fn: func() string {
tomlString := `[Eth]NetworkId = 56`
return createTempTomlFile(t, tomlString)
},
wantedResult: "",
wantedIsErr: false,
wantedErrStr: "",
},
{
name: "failed to open file",
fn: func() string { return "" },
wantedResult: "",
wantedIsErr: true,
wantedErrStr: "open : no such file or directory",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result, err := scanConfigForStateScheme(tt.fn())
if tt.wantedIsErr {
assert.Contains(t, err.Error(), tt.wantedErrStr)
} else {
assert.Nil(t, err)
}
assert.Equal(t, tt.wantedResult, result)
})
}
}
// createTempTomlFile is a helper function to create a temp file with the provided TOML content
func createTempTomlFile(t *testing.T, content string) string {
t.Helper()
dir := t.TempDir()
file, err := os.CreateTemp(dir, "config.toml")
if err != nil {
t.Fatalf("Unable to create temporary file: %v", err)
}
defer file.Close()
_, err = file.WriteString(content)
if err != nil {
t.Fatalf("Unable to write to temporary file: %v", err)
}
return file.Name()
}
func Test_parseString(t *testing.T) {
tests := []struct {
name string
arg string
wantResult string
}{
{
name: "hash string",
arg: "\"hash\"",
wantResult: rawdb.HashScheme,
},
{
name: "path string",
arg: "\"path\"",
wantResult: rawdb.PathScheme,
},
{
name: "empty string",
arg: "",
wantResult: "",
},
{
name: "empty string",
arg: "\"\"",
wantResult: "",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := indexStateScheme(tt.arg); got != tt.wantResult {
t.Errorf("parseString() = %v, want %v", got, tt.wantResult)
}
})
}
}

View File

@@ -335,7 +335,7 @@ func ParseStateScheme(provided string, disk ethdb.Database) (string, error) {
if stored == "" {
// use default scheme for empty database, flip it when
// path mode is chosen as default
log.Info("State schema set to default", "scheme", "hash")
log.Info("State scheme set to default", "scheme", "hash")
return HashScheme, nil
}
log.Info("State scheme set to already existing disk db", "scheme", stored)

View File

@@ -129,6 +129,8 @@ func InspectFreezerTable(ancient string, freezerName string, tableName string, s
switch freezerName {
case chainFreezerName:
path, tables = resolveChainFreezerDir(ancient), chainFreezerNoSnappy
case stateFreezerName:
path, tables = filepath.Join(ancient, freezerName), stateFreezerNoSnappy
default:
return fmt.Errorf("unknown freezer, supported ones: %v", freezers)
}

View File

@@ -188,19 +188,27 @@ func (batch *freezerTableBatch) maybeCommit() error {
// commit writes the batched items to the backing freezerTable.
func (batch *freezerTableBatch) commit() error {
// Write data.
// Write data. The head file is fsync'd after write to ensure the
// data is truly transferred to disk.
_, err := batch.t.head.Write(batch.dataBuffer)
if err != nil {
return err
}
if err := batch.t.head.Sync(); err != nil {
return err
}
dataSize := int64(len(batch.dataBuffer))
batch.dataBuffer = batch.dataBuffer[:0]
// Write indices.
// Write indices. The index file is fsync'd after write to ensure the
// data indexes are truly transferred to disk.
_, err = batch.t.index.Write(batch.indexBuffer)
if err != nil {
return err
}
if err := batch.t.index.Sync(); err != nil {
return err
}
indexSize := int64(len(batch.indexBuffer))
batch.indexBuffer = batch.indexBuffer[:0]

View File

@@ -223,7 +223,9 @@ func (t *freezerTable) repair() error {
if t.readonly {
return fmt.Errorf("index file(path: %s, name: %s) size is not a multiple of %d", t.path, t.name, indexEntrySize)
}
truncateFreezerFile(t.index, stat.Size()-overflow) // New file can't trigger this path
if err := truncateFreezerFile(t.index, stat.Size()-overflow); err != nil {
return err
} // New file can't trigger this path
}
// Retrieve the file sizes and prepare for truncation
if stat, err = t.index.Stat(); err != nil {
@@ -268,8 +270,8 @@ func (t *freezerTable) repair() error {
// Print an error log if the index is corrupted due to an incorrect
// last index item. While it is theoretically possible to have a zero offset
// by storing all zero-size items, it is highly unlikely to occur in practice.
if lastIndex.offset == 0 && offsetsSize%indexEntrySize > 1 {
log.Error("Corrupted index file detected", "lastOffset", lastIndex.offset, "items", offsetsSize%indexEntrySize-1)
if lastIndex.offset == 0 && offsetsSize/indexEntrySize > 1 {
log.Error("Corrupted index file detected", "lastOffset", lastIndex.offset, "indexes", offsetsSize/indexEntrySize)
}
if t.readonly {
t.head, err = t.openFile(lastIndex.filenum, openFreezerFileForReadOnly)
@@ -424,6 +426,9 @@ func (t *freezerTable) truncateHead(items uint64) error {
if err := truncateFreezerFile(t.index, int64(length+1)*indexEntrySize); err != nil {
return err
}
if err := t.index.Sync(); err != nil {
return err
}
// Calculate the new expected size of the data file and truncate it
var expected indexEntry
if length == 0 {
@@ -446,6 +451,7 @@ func (t *freezerTable) truncateHead(items uint64) error {
// Release any files _after the current head -- both the previous head
// and any files which may have been opened for reading
t.releaseFilesAfter(expected.filenum, true)
// Set back the historic head
t.head = newHead
t.headId = expected.filenum
@@ -453,6 +459,9 @@ func (t *freezerTable) truncateHead(items uint64) error {
if err := truncateFreezerFile(t.head, int64(expected.offset)); err != nil {
return err
}
if err := t.head.Sync(); err != nil {
return err
}
// All data files truncated, set internal counters and return
t.headBytes = int64(expected.offset)
t.items.Store(items)
@@ -597,10 +606,12 @@ func (t *freezerTable) Close() error {
// error on Windows.
doClose(t.index, true, true)
doClose(t.meta, true, true)
// The preopened non-head data-files are all opened in readonly.
// The head is opened in rw-mode, so we sync it here - but since it's also
// part of t.files, it will be closed in the loop below.
doClose(t.head, true, false) // sync but do not close
for _, f := range t.files {
doClose(f, false, true) // close but do not sync
}

View File

@@ -73,11 +73,7 @@ func copyFrom(srcPath, destPath string, offset uint64, before func(f *os.File) e
return err
}
f = nil
if err := os.Rename(fname, destPath); err != nil {
return err
}
return nil
return os.Rename(fname, destPath)
}
// openFreezerFileForAppend opens a freezer table file and seeks to the end

View File

@@ -338,7 +338,7 @@ func (f *TxFetcher) Enqueue(peer string, txs []*types.Transaction, direct bool)
// If 'other reject' is >25% of the deliveries in any batch, sleep a bit.
if otherreject > 128/4 {
time.Sleep(200 * time.Millisecond)
log.Warn("Peer delivering stale transactions", "peer", peer, "rejected", otherreject)
log.Debug("Peer delivering stale transactions", "peer", peer, "rejected", otherreject)
}
}
select {

View File

@@ -10,6 +10,8 @@ import (
"time"
)
const backupTimeFormat = "2006-01-02_15"
type TimeTicker struct {
stop chan struct{}
C <-chan time.Time
@@ -69,19 +71,24 @@ type AsyncFileWriter struct {
buf chan []byte
stop chan struct{}
timeTicker *TimeTicker
rotateHours uint
maxBackups int
}
func NewAsyncFileWriter(filePath string, maxBytesSize int64, rotateHours uint) *AsyncFileWriter {
func NewAsyncFileWriter(filePath string, maxBytesSize int64, maxBackups int, rotateHours uint) *AsyncFileWriter {
absFilePath, err := filepath.Abs(filePath)
if err != nil {
panic(fmt.Sprintf("get file path of logger error. filePath=%s, err=%s", filePath, err))
}
return &AsyncFileWriter{
filePath: absFilePath,
buf: make(chan []byte, maxBytesSize),
stop: make(chan struct{}),
timeTicker: NewTimeTicker(rotateHours),
filePath: absFilePath,
buf: make(chan []byte, maxBytesSize),
stop: make(chan struct{}),
rotateHours: rotateHours,
maxBackups: maxBackups,
timeTicker: NewTimeTicker(rotateHours),
}
}
@@ -178,6 +185,9 @@ func (w *AsyncFileWriter) rotateFile() {
if err := w.initLogFile(); err != nil {
fmt.Fprintf(os.Stderr, "init log file error. err=%s", err)
}
if err := w.removeExpiredFile(); err != nil {
fmt.Fprintf(os.Stderr, "remove expired file error. err=%s", err)
}
default:
}
}
@@ -222,5 +232,29 @@ func (w *AsyncFileWriter) flushAndClose() error {
}
func (w *AsyncFileWriter) timeFilePath(filePath string) string {
return filePath + "." + time.Now().Format("2006-01-02_15")
return filePath + "." + time.Now().Format(backupTimeFormat)
}
func (w *AsyncFileWriter) getExpiredFile(filePath string, maxBackups int, rotateHours uint) string {
if rotateHours > 0 {
maxBackups = int(rotateHours) * maxBackups
}
return filePath + "." + time.Now().Add(-time.Hour*time.Duration(maxBackups)).Format(backupTimeFormat)
}
func (w *AsyncFileWriter) removeExpiredFile() error {
if w.maxBackups == 0 {
return nil
}
oldFilepath := w.getExpiredFile(w.filePath, w.maxBackups, w.rotateHours)
_, err := os.Stat(oldFilepath)
if os.IsNotExist(err) {
return nil
}
errRemove := os.Remove(oldFilepath)
if err != nil {
return errRemove
}
return err
}

View File

@@ -6,10 +6,12 @@ import (
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestWriterHourly(t *testing.T) {
w := NewAsyncFileWriter("./hello.log", 100, 1)
w := NewAsyncFileWriter("./hello.log", 100, 1, 1)
w.Start()
w.Write([]byte("hello\n"))
w.Write([]byte("world\n"))
@@ -67,3 +69,22 @@ func TestGetNextRotationHour(t *testing.T) {
t.Run("TestGetNextRotationHour_"+strconv.Itoa(i), test(tc.now, tc.delta, tc.expectedHour))
}
}
func TestClearBackups(t *testing.T) {
dir := "./test"
os.Mkdir(dir, 0700)
w := NewAsyncFileWriter("./test/bsc.log", 100, 1, 1)
defer os.RemoveAll(dir)
fakeCurrentTime := time.Now()
name := ""
data := []byte("data")
for i := 0; i < 5; i++ {
name = w.filePath + "." + fakeCurrentTime.Format(backupTimeFormat)
_ = os.WriteFile(name, data, 0700)
fakeCurrentTime = fakeCurrentTime.Add(-time.Hour * 1)
}
oldFile := w.getExpiredFile(w.filePath, w.maxBackups, w.rotateHours)
w.removeExpiredFile()
_, err := os.Stat(oldFile)
assert.True(t, os.IsNotExist(err))
}

View File

@@ -75,14 +75,14 @@ func FileHandler(path string, fmtr Format) (Handler, error) {
// RotatingFileHandler returns a handler which writes log records to file chunks
// at the given path. When a file's size reaches the limit, the handler creates
// a new file named after the timestamp of the first log record it will contain.
func RotatingFileHandler(filePath string, limit uint, formatter Format, rotateHours uint) (Handler, error) {
func RotatingFileHandler(filePath string, limit uint, maxBackups uint, formatter Format, rotateHours uint) (Handler, error) {
if _, err := os.Stat(path.Dir(filePath)); os.IsNotExist(err) {
err := os.MkdirAll(path.Dir(filePath), 0755)
if err != nil {
return nil, fmt.Errorf("could not create directory %s, %v", path.Dir(filePath), err)
}
}
fileWriter := NewAsyncFileWriter(filePath, int64(limit), rotateHours)
fileWriter := NewAsyncFileWriter(filePath, int64(limit), int(maxBackups), rotateHours)
fileWriter.Start()
return StreamHandler(fileWriter, formatter), nil
}

View File

@@ -290,8 +290,8 @@ func (c Ctx) toArray() []interface{} {
return arr
}
func NewFileLvlHandler(logPath string, maxBytesSize uint, level string, rotateHours uint) Handler {
rfh, err := RotatingFileHandler(logPath, maxBytesSize, LogfmtFormat(), rotateHours)
func NewFileLvlHandler(logPath string, maxBytesSize uint, maxBackups uint, level string, rotateHours uint) Handler {
rfh, err := RotatingFileHandler(logPath, maxBytesSize, maxBackups, LogfmtFormat(), rotateHours)
if err != nil {
panic(err)
}

View File

@@ -513,6 +513,7 @@ type LogConfig struct {
MaxBytesSize *uint `toml:",omitempty"`
Level *string `toml:",omitempty"`
RotateHours *uint `toml:",omitempty"`
MaxBackups *uint `toml:",omitempty"`
// TermTimeFormat is the time format used for console logging.
TermTimeFormat *string `toml:",omitempty"`

View File

@@ -118,7 +118,12 @@ func New(conf *Config) (*Node, error) {
rotateHours = *conf.LogConfig.RotateHours
}
log.Root().SetHandler(log.NewFileLvlHandler(logFilePath, *conf.LogConfig.MaxBytesSize, *conf.LogConfig.Level, rotateHours))
maxBackups := uint(0)
if conf.LogConfig.MaxBackups != nil {
maxBackups = *conf.LogConfig.MaxBackups
}
log.Root().SetHandler(log.NewFileLvlHandler(logFilePath, *conf.LogConfig.MaxBytesSize, maxBackups, *conf.LogConfig.Level, rotateHours))
}
}
if conf.Logger == nil {

View File

@@ -23,7 +23,7 @@ import (
const (
VersionMajor = 1 // Major version component of the current release
VersionMinor = 3 // Minor version component of the current release
VersionPatch = 8 // Patch version component of the current release
VersionPatch = 9 // Patch version component of the current release
VersionMeta = "" // Version metadata to append to the version string
)