4844: bugfix and improve (#2337)
* core: add debug log for CheckDataAvailableInBatch * narrow the semantics of func resetItems * freezer: refactor ResetTable & ResetItems; * fix: fix some lint issues; * only newSnapshot for genesis block * freezer: opt reset blob table logic; * fix: opt da check logic; * freezer: opt reset blob table logic; * fix: fix failed UTs; * core/types: fix EmptyBody * freezer: refactor write ancient blocks logic; * code: update code owner file --------- Co-authored-by: GalaIO <GalaIO@users.noreply.github.com> Co-authored-by: zzzckck <152148891+zzzckck@users.noreply.github.com>
This commit is contained in:
parent
e7c5ce2e94
commit
7c89c65a97
3
.github/CODEOWNERS
vendored
3
.github/CODEOWNERS
vendored
@ -1,4 +1,3 @@
|
||||
# Lines starting with '#' are comments.
|
||||
# Each line is a file pattern followed by one or more owners.
|
||||
* @zzzckck
|
||||
* @zjubfd
|
||||
* @zzzckck @zjubfd
|
||||
|
@ -701,10 +701,8 @@ func (p *Parlia) snapshot(chain consensus.ChainHeaderReader, number uint64, hash
|
||||
}
|
||||
}
|
||||
|
||||
// If we're at the genesis, snapshot the initial state. Alternatively if we have
|
||||
// piled up more headers than allowed to be reorged (chain reinit from a freezer),
|
||||
// consider the checkpoint trusted and snapshot it.
|
||||
if number == 0 || (number%p.config.Epoch == 0 && (len(headers) > int(params.FullImmutabilityThreshold)/10)) {
|
||||
// If we're at the genesis, snapshot the initial state.
|
||||
if number == 0 {
|
||||
checkpoint := chain.GetHeaderByNumber(number)
|
||||
if checkpoint != nil {
|
||||
// get checkpoint data
|
||||
@ -718,12 +716,10 @@ func (p *Parlia) snapshot(chain consensus.ChainHeaderReader, number uint64, hash
|
||||
|
||||
// new snapshot
|
||||
snap = newSnapshot(p.config, p.signatures, number, hash, validators, voteAddrs, p.ethAPI)
|
||||
if snap.Number%checkpointInterval == 0 { // snapshot will only be loaded when snap.Number%checkpointInterval == 0
|
||||
if err := snap.store(p.db); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.Info("Stored checkpoint snapshot to disk", "number", number, "hash", hash)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
@ -973,6 +973,7 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, time uint64, root common.Ha
|
||||
// The header, total difficulty and canonical hash will be
|
||||
// removed in the hc.SetHead function.
|
||||
rawdb.DeleteBody(db, hash, num)
|
||||
rawdb.DeleteBlobSidecars(db, hash, num)
|
||||
rawdb.DeleteReceipts(db, hash, num)
|
||||
}
|
||||
// Todo(rjl493456442) txlookup, bloombits, etc
|
||||
@ -1340,6 +1341,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
|
||||
lastBlk := blockChain[len(blockChain)-1]
|
||||
if bc.chainConfig.Parlia != nil && bc.chainConfig.IsCancun(lastBlk.Number(), lastBlk.Time()) {
|
||||
if _, err := CheckDataAvailableInBatch(bc, blockChain); err != nil {
|
||||
log.Debug("CheckDataAvailableInBatch", "err", err)
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
@ -1404,8 +1406,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
|
||||
|
||||
// Write all chain data to ancients.
|
||||
td := bc.GetTd(first.Hash(), first.NumberU64())
|
||||
writeSize, err := rawdb.WriteAncientBlocks(bc.db, blockChain, receiptChain, td)
|
||||
|
||||
writeSize, err := rawdb.WriteAncientBlocksWithBlobs(bc.db, blockChain, receiptChain, td)
|
||||
if err != nil {
|
||||
log.Error("Error importing chain data to ancients", "err", err)
|
||||
return 0, err
|
||||
|
@ -256,7 +256,7 @@ func (bc *BlockChain) GetSidecarsByHash(hash common.Hash) types.BlobSidecars {
|
||||
if number == nil {
|
||||
return nil
|
||||
}
|
||||
sidecars := rawdb.ReadRawBlobSidecars(bc.db, hash, *number)
|
||||
sidecars := rawdb.ReadBlobSidecars(bc.db, hash, *number)
|
||||
if sidecars == nil {
|
||||
return nil
|
||||
}
|
||||
|
@ -11,7 +11,6 @@ import (
|
||||
"github.com/ethereum/go-ethereum/consensus"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/crypto/kzg4844"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
)
|
||||
|
||||
@ -49,13 +48,10 @@ func validateBlobSidecar(hashes []common.Hash, sidecar *types.BlobSidecar) error
|
||||
func IsDataAvailable(chain consensus.ChainHeaderReader, block *types.Block) (err error) {
|
||||
// refer logic in ValidateBody
|
||||
if !chain.Config().IsCancun(block.Number(), block.Time()) {
|
||||
if block.Sidecars() == nil {
|
||||
return nil
|
||||
} else {
|
||||
if block.Sidecars() != nil {
|
||||
return errors.New("sidecars present in block body before cancun")
|
||||
}
|
||||
} else if block.Sidecars() == nil {
|
||||
return errors.New("missing sidecars in block body after cancun")
|
||||
return nil
|
||||
}
|
||||
|
||||
// only required to check within MinBlocksForBlobRequests block's DA
|
||||
@ -64,15 +60,16 @@ func IsDataAvailable(chain consensus.ChainHeaderReader, block *types.Block) (err
|
||||
if highest == nil || highest.Number.Cmp(current.Number) < 0 {
|
||||
highest = current
|
||||
}
|
||||
defer func() {
|
||||
log.Info("IsDataAvailable", "block", block.Number(), "hash", block.Hash(), "highest", highest.Number, "sidecars", len(block.Sidecars()), "err", err)
|
||||
}()
|
||||
if block.NumberU64()+params.MinBlocksForBlobRequests < highest.Number.Uint64() {
|
||||
// if we needn't check DA of this block, just clean it
|
||||
block.CleanSidecars()
|
||||
return nil
|
||||
}
|
||||
|
||||
// if sidecar is nil, just clean it. And it will be used for saving in ancient.
|
||||
if block.Sidecars() == nil {
|
||||
block.CleanSidecars()
|
||||
}
|
||||
sidecars := block.Sidecars()
|
||||
for _, s := range sidecars {
|
||||
if err := s.SanityCheck(block.Number(), block.Hash()); err != nil {
|
||||
|
@ -121,7 +121,7 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
}, nil),
|
||||
chasingHead: params.MinBlocksForBlobRequests + 1,
|
||||
withSidecar: false,
|
||||
err: true,
|
||||
err: false,
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -798,8 +798,9 @@ func WriteBlock(db ethdb.KeyValueWriter, block *types.Block) {
|
||||
WriteHeader(db, block.Header())
|
||||
}
|
||||
|
||||
// WriteAncientBlocks writes entire block data into ancient store and returns the total written size.
|
||||
func WriteAncientBlocks(db ethdb.AncientWriter, blocks []*types.Block, receipts []types.Receipts, td *big.Int) (int64, error) {
|
||||
// WriteAncientBlocksWithBlobs writes entire block data with blobs into ancient store and returns the total written size.
|
||||
func WriteAncientBlocksWithBlobs(db ethdb.AncientWriter, blocks []*types.Block, receipts []types.Receipts, td *big.Int) (int64, error) {
|
||||
// find cancun index, it's used for new added blob ancient table
|
||||
cancunIndex := -1
|
||||
for i, block := range blocks {
|
||||
if block.Sidecars() != nil {
|
||||
@ -808,12 +809,39 @@ func WriteAncientBlocks(db ethdb.AncientWriter, blocks []*types.Block, receipts
|
||||
}
|
||||
}
|
||||
log.Info("WriteAncientBlocks", "startAt", blocks[0].Number(), "cancunIndex", cancunIndex, "len", len(blocks))
|
||||
if cancunIndex >= 0 {
|
||||
if err := ResetEmptyBlobAncientTable(db, blocks[cancunIndex].NumberU64()); err != nil {
|
||||
return 0, err
|
||||
|
||||
var (
|
||||
tdSum = new(big.Int).Set(td)
|
||||
preSize int64
|
||||
err error
|
||||
)
|
||||
if cancunIndex > 0 {
|
||||
preSize, err = WriteAncientBlocks(db, blocks[:cancunIndex], receipts[:cancunIndex], td)
|
||||
if err != nil {
|
||||
return preSize, err
|
||||
}
|
||||
for i, block := range blocks[:cancunIndex] {
|
||||
if i > 0 {
|
||||
tdSum.Add(tdSum, block.Difficulty())
|
||||
}
|
||||
}
|
||||
tdSum.Add(tdSum, blocks[cancunIndex].Difficulty())
|
||||
}
|
||||
|
||||
// It will reset blob ancient table at cancunIndex
|
||||
if cancunIndex >= 0 {
|
||||
if err = ResetEmptyBlobAncientTable(db, blocks[cancunIndex].NumberU64()); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
blocks = blocks[cancunIndex:]
|
||||
receipts = receipts[cancunIndex:]
|
||||
}
|
||||
postSize, err := WriteAncientBlocks(db, blocks, receipts, tdSum)
|
||||
return preSize + postSize, err
|
||||
}
|
||||
|
||||
// WriteAncientBlocks writes entire block data into ancient store and returns the total written size.
|
||||
func WriteAncientBlocks(db ethdb.AncientWriter, blocks []*types.Block, receipts []types.Receipts, td *big.Int) (int64, error) {
|
||||
var (
|
||||
tdSum = new(big.Int).Set(td)
|
||||
stReceipts []*types.ReceiptForStorage
|
||||
@ -853,8 +881,8 @@ func ReadBlobSidecarsRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.R
|
||||
return data
|
||||
}
|
||||
|
||||
// ReadRawBlobSidecars retrieves all the transaction blobs belonging to a block.
|
||||
func ReadRawBlobSidecars(db ethdb.Reader, hash common.Hash, number uint64) types.BlobSidecars {
|
||||
// ReadBlobSidecars retrieves all the transaction blobs belonging to a block.
|
||||
func ReadBlobSidecars(db ethdb.Reader, hash common.Hash, number uint64) types.BlobSidecars {
|
||||
data := ReadBlobSidecarsRLP(db, hash, number)
|
||||
if len(data) == 0 {
|
||||
return nil
|
||||
|
@ -455,13 +455,13 @@ func TestBlockBlobSidecarsStorage(t *testing.T) {
|
||||
sidecars := types.BlobSidecars{types.NewBlobSidecarFromTx(tx1)}
|
||||
|
||||
// Check that no sidecars entries are in a pristine database
|
||||
if bs := ReadRawBlobSidecars(db, blkHash, 0); len(bs) != 0 {
|
||||
if bs := ReadBlobSidecars(db, blkHash, 0); len(bs) != 0 {
|
||||
t.Fatalf("non existent sidecars returned: %v", bs)
|
||||
}
|
||||
WriteBody(db, blkHash, 0, body)
|
||||
WriteBlobSidecars(db, blkHash, 0, sidecars)
|
||||
|
||||
if bs := ReadRawBlobSidecars(db, blkHash, 0); len(bs) == 0 {
|
||||
if bs := ReadBlobSidecars(db, blkHash, 0); len(bs) == 0 {
|
||||
t.Fatalf("no sidecars returned")
|
||||
} else {
|
||||
if err := checkBlobSidecarsRLP(bs, sidecars); err != nil {
|
||||
@ -470,7 +470,7 @@ func TestBlockBlobSidecarsStorage(t *testing.T) {
|
||||
}
|
||||
|
||||
DeleteBlobSidecars(db, blkHash, 0)
|
||||
if bs := ReadRawBlobSidecars(db, blkHash, 0); len(bs) != 0 {
|
||||
if bs := ReadBlobSidecars(db, blkHash, 0); len(bs) != 0 {
|
||||
t.Fatalf("deleted sidecars returned: %v", bs)
|
||||
}
|
||||
}
|
||||
|
@ -252,7 +252,7 @@ func (f *chainFreezer) freeze(db ethdb.KeyValueStore) {
|
||||
env, _ := f.freezeEnv.Load().(*ethdb.FreezerEnv)
|
||||
// try prune blob data after cancun fork
|
||||
if isCancun(env, head.Number, head.Time) {
|
||||
f.tryPruneBlobAncient(env, *number)
|
||||
f.tryPruneBlobAncientTable(env, *number)
|
||||
}
|
||||
|
||||
// Avoid database thrashing with tiny writes
|
||||
@ -262,7 +262,7 @@ func (f *chainFreezer) freeze(db ethdb.KeyValueStore) {
|
||||
}
|
||||
}
|
||||
|
||||
func (f *chainFreezer) tryPruneBlobAncient(env *ethdb.FreezerEnv, num uint64) {
|
||||
func (f *chainFreezer) tryPruneBlobAncientTable(env *ethdb.FreezerEnv, num uint64) {
|
||||
extraReserve := getBlobExtraReserveFromEnv(env)
|
||||
// It means that there is no need for pruning
|
||||
if extraReserve == 0 {
|
||||
@ -273,13 +273,8 @@ func (f *chainFreezer) tryPruneBlobAncient(env *ethdb.FreezerEnv, num uint64) {
|
||||
return
|
||||
}
|
||||
expectTail := num - reserveThreshold
|
||||
h, err := f.TableAncients(ChainFreezerBlobSidecarTable)
|
||||
if err != nil {
|
||||
log.Error("Cannot get blob ancient head when prune", "block", num)
|
||||
return
|
||||
}
|
||||
start := time.Now()
|
||||
if err = f.ResetTable(ChainFreezerBlobSidecarTable, expectTail, h, false); err != nil {
|
||||
if _, err := f.TruncateTableTail(ChainFreezerBlobSidecarTable, expectTail); err != nil {
|
||||
log.Error("Cannot prune blob ancient", "block", num, "expectTail", expectTail, "err", err)
|
||||
return
|
||||
}
|
||||
@ -312,9 +307,8 @@ func (f *chainFreezer) freezeRangeWithBlobs(nfdb *nofreezedb, number, limit uint
|
||||
|
||||
var (
|
||||
cancunNumber uint64
|
||||
found bool
|
||||
preHashes []common.Hash
|
||||
)
|
||||
|
||||
for i := number; i <= limit; i++ {
|
||||
hash := ReadCanonicalHash(nfdb, i)
|
||||
if hash == (common.Hash{}) {
|
||||
@ -326,16 +320,12 @@ func (f *chainFreezer) freezeRangeWithBlobs(nfdb *nofreezedb, number, limit uint
|
||||
}
|
||||
if isCancun(env, h.Number, h.Time) {
|
||||
cancunNumber = i
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return f.freezeRange(nfdb, number, limit)
|
||||
}
|
||||
|
||||
// freeze pre cancun
|
||||
preHashes, err := f.freezeRange(nfdb, number, cancunNumber-1)
|
||||
preHashes, err = f.freezeRange(nfdb, number, cancunNumber-1)
|
||||
if err != nil {
|
||||
return preHashes, err
|
||||
}
|
||||
@ -432,5 +422,5 @@ func isCancun(env *ethdb.FreezerEnv, num *big.Int, time uint64) bool {
|
||||
}
|
||||
|
||||
func ResetEmptyBlobAncientTable(db ethdb.AncientWriter, next uint64) error {
|
||||
return db.ResetTable(ChainFreezerBlobSidecarTable, next, next, true)
|
||||
return db.ResetTable(ChainFreezerBlobSidecarTable, next, true)
|
||||
}
|
||||
|
@ -177,10 +177,6 @@ func (db *nofreezedb) ModifyAncients(func(ethdb.AncientWriteOp) error) (int64, e
|
||||
return 0, errNotSupported
|
||||
}
|
||||
|
||||
func (db *nofreezedb) ResetTable(kind string, tail uint64, head uint64, onlyEmpty bool) error {
|
||||
return errNotSupported
|
||||
}
|
||||
|
||||
// TruncateHead returns an error as we don't have a backing chain freezer.
|
||||
func (db *nofreezedb) TruncateHead(items uint64) (uint64, error) {
|
||||
return 0, errNotSupported
|
||||
@ -191,6 +187,16 @@ func (db *nofreezedb) TruncateTail(items uint64) (uint64, error) {
|
||||
return 0, errNotSupported
|
||||
}
|
||||
|
||||
// TruncateTableTail will truncate certain table to new tail
|
||||
func (db *nofreezedb) TruncateTableTail(kind string, tail uint64) (uint64, error) {
|
||||
return 0, errNotSupported
|
||||
}
|
||||
|
||||
// ResetTable will reset certain table with new start point
|
||||
func (db *nofreezedb) ResetTable(kind string, startAt uint64, onlyEmpty bool) error {
|
||||
return errNotSupported
|
||||
}
|
||||
|
||||
// Sync returns an error as we don't have a backing chain freezer.
|
||||
func (db *nofreezedb) Sync() error {
|
||||
return errNotSupported
|
||||
|
@ -348,7 +348,7 @@ func (f *Freezer) TruncateHead(items uint64) (uint64, error) {
|
||||
if kind != ChainFreezerBlobSidecarTable {
|
||||
return 0, err
|
||||
}
|
||||
nt, err := table.resetItems(items-f.offset, items-f.offset)
|
||||
nt, err := table.resetItems(items - f.offset)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@ -489,7 +489,7 @@ func (f *Freezer) repair() error {
|
||||
if kind != ChainFreezerBlobSidecarTable {
|
||||
return err
|
||||
}
|
||||
nt, err := table.resetItems(head, head)
|
||||
nt, err := table.resetItems(head)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -698,34 +698,63 @@ func (f *Freezer) MigrateTable(kind string, convert convertLegacyFn) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// only used for ChainFreezerBlobSidecarTable now
|
||||
func (f *Freezer) ResetTable(kind string, tail, head uint64, onlyEmpty bool) error {
|
||||
// TruncateTableTail will truncate certain table to new tail
|
||||
func (f *Freezer) TruncateTableTail(kind string, tail uint64) (uint64, error) {
|
||||
if f.readonly {
|
||||
return errReadOnly
|
||||
}
|
||||
if err := f.Sync(); err != nil {
|
||||
return err
|
||||
return 0, errReadOnly
|
||||
}
|
||||
|
||||
f.writeLock.Lock()
|
||||
defer f.writeLock.Unlock()
|
||||
if tail < f.offset || head < f.offset {
|
||||
return errors.New("the input tail&head is less than offset")
|
||||
|
||||
if !slices.Contains(additionTables, kind) {
|
||||
return 0, errors.New("only new added table could be truncated independently")
|
||||
}
|
||||
if _, exist := f.tables[kind]; !exist {
|
||||
if tail < f.offset {
|
||||
return 0, errors.New("the input tail&head is less than offset")
|
||||
}
|
||||
t, exist := f.tables[kind]
|
||||
if !exist {
|
||||
return 0, errors.New("you reset a non-exist table")
|
||||
}
|
||||
|
||||
old := t.itemHidden.Load() + f.offset
|
||||
if err := t.truncateTail(tail - f.offset); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return old, nil
|
||||
}
|
||||
|
||||
// ResetTable will reset certain table with new start point
|
||||
// only used for ChainFreezerBlobSidecarTable now
|
||||
func (f *Freezer) ResetTable(kind string, startAt uint64, onlyEmpty bool) error {
|
||||
if f.readonly {
|
||||
return errReadOnly
|
||||
}
|
||||
|
||||
f.writeLock.Lock()
|
||||
defer f.writeLock.Unlock()
|
||||
|
||||
t, exist := f.tables[kind]
|
||||
if !exist {
|
||||
return errors.New("you reset a non-exist table")
|
||||
}
|
||||
|
||||
// if you reset a non empty table just skip
|
||||
if onlyEmpty && !EmptyTable(f.tables[kind]) {
|
||||
if onlyEmpty && !EmptyTable(t) {
|
||||
return nil
|
||||
}
|
||||
|
||||
nt, err := f.tables[kind].resetItems(tail-f.offset, head-f.offset)
|
||||
if err := f.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
nt, err := t.resetItems(startAt - f.offset)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.tables[kind] = nt
|
||||
|
||||
// repair all tables with same tail & head
|
||||
if err := f.repair(); err != nil {
|
||||
for _, table := range f.tables {
|
||||
table.Close()
|
||||
|
@ -187,13 +187,6 @@ func (f *ResettableFreezer) ModifyAncients(fn func(ethdb.AncientWriteOp) error)
|
||||
return f.freezer.ModifyAncients(fn)
|
||||
}
|
||||
|
||||
func (f *ResettableFreezer) ResetTable(kind string, tail uint64, head uint64, onlyEmpty bool) error {
|
||||
f.lock.RLock()
|
||||
defer f.lock.RUnlock()
|
||||
|
||||
return f.freezer.ResetTable(kind, tail, head, onlyEmpty)
|
||||
}
|
||||
|
||||
// TruncateHead discards any recent data above the provided threshold number.
|
||||
// It returns the previous head number.
|
||||
func (f *ResettableFreezer) TruncateHead(items uint64) (uint64, error) {
|
||||
@ -212,6 +205,22 @@ func (f *ResettableFreezer) TruncateTail(tail uint64) (uint64, error) {
|
||||
return f.freezer.TruncateTail(tail)
|
||||
}
|
||||
|
||||
// TruncateTableTail will truncate certain table to new tail
|
||||
func (f *ResettableFreezer) TruncateTableTail(kind string, tail uint64) (uint64, error) {
|
||||
f.lock.RLock()
|
||||
defer f.lock.RUnlock()
|
||||
|
||||
return f.freezer.TruncateTableTail(kind, tail)
|
||||
}
|
||||
|
||||
// ResetTable will reset certain table with new start point
|
||||
func (f *ResettableFreezer) ResetTable(kind string, startAt uint64, onlyEmpty bool) error {
|
||||
f.lock.RLock()
|
||||
defer f.lock.RUnlock()
|
||||
|
||||
return f.freezer.ResetTable(kind, startAt, onlyEmpty)
|
||||
}
|
||||
|
||||
// Sync flushes all data tables to disk.
|
||||
func (f *ResettableFreezer) Sync() error {
|
||||
f.lock.RLock()
|
||||
|
@ -1029,38 +1029,32 @@ func (t *freezerTable) ResetItemsOffset(virtualTail uint64) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// resetItems reset freezer table head & tail
|
||||
// resetItems reset freezer table to 0 items with new startAt
|
||||
// only used for ChainFreezerBlobSidecarTable now
|
||||
func (t *freezerTable) resetItems(tail, head uint64) (*freezerTable, error) {
|
||||
func (t *freezerTable) resetItems(startAt uint64) (*freezerTable, error) {
|
||||
t.lock.Lock()
|
||||
defer t.lock.Unlock()
|
||||
if t.readonly {
|
||||
return nil, errors.New("resetItems in readonly mode")
|
||||
}
|
||||
itemHidden := t.itemHidden.Load()
|
||||
items := t.items.Load()
|
||||
if tail != head && (itemHidden > tail || items < head) {
|
||||
return nil, errors.New("cannot reset to non-exist range")
|
||||
}
|
||||
|
||||
var err error
|
||||
if tail != head {
|
||||
if err = t.truncateHead(head); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = t.truncateTail(tail); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// if tail == head, it means table reset to 0 item
|
||||
t.releaseFilesAfter(t.tailId-1, true)
|
||||
// remove all data files
|
||||
t.head.Close()
|
||||
os.Remove(t.head.Name())
|
||||
t.releaseFilesAfter(0, true)
|
||||
t.releaseFile(0)
|
||||
|
||||
// overwrite metadata file
|
||||
if err := writeMetadata(t.meta, newMetadata(startAt)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := t.meta.Sync(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
t.meta.Close()
|
||||
|
||||
// recreate the index file
|
||||
t.index.Close()
|
||||
os.Remove(t.index.Name())
|
||||
t.meta.Close()
|
||||
os.Remove(t.meta.Name())
|
||||
|
||||
var idxName string
|
||||
if t.noCompression {
|
||||
idxName = fmt.Sprintf("%s.ridx", t.name) // raw index file
|
||||
@ -1072,11 +1066,16 @@ func (t *freezerTable) resetItems(tail, head uint64) (*freezerTable, error) {
|
||||
return nil, err
|
||||
}
|
||||
tailIndex := indexEntry{
|
||||
offset: uint32(tail),
|
||||
filenum: 0,
|
||||
offset: uint32(startAt),
|
||||
}
|
||||
if _, err = index.Write(tailIndex.append(nil)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := index.Sync(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
index.Close()
|
||||
|
||||
return newFreezerTable(t.path, t.name, t.noCompression, t.readonly)
|
||||
}
|
||||
|
@ -1384,59 +1384,40 @@ func TestResetItems(t *testing.T) {
|
||||
|
||||
// Write 7 x 20 bytes, splitting out into four files
|
||||
batch := f.newBatch(0)
|
||||
require.NoError(t, batch.AppendRaw(0, getChunk(20, 0xFF)))
|
||||
require.NoError(t, batch.AppendRaw(1, getChunk(20, 0xEE)))
|
||||
require.NoError(t, batch.AppendRaw(2, getChunk(20, 0xdd)))
|
||||
require.NoError(t, batch.AppendRaw(3, getChunk(20, 0xcc)))
|
||||
require.NoError(t, batch.AppendRaw(4, getChunk(20, 0xbb)))
|
||||
require.NoError(t, batch.AppendRaw(5, getChunk(20, 0xaa)))
|
||||
require.NoError(t, batch.AppendRaw(6, getChunk(20, 0x11)))
|
||||
require.NoError(t, batch.AppendRaw(0, getChunk(20, 0x00)))
|
||||
require.NoError(t, batch.AppendRaw(1, getChunk(20, 0x11)))
|
||||
require.NoError(t, batch.AppendRaw(2, getChunk(20, 0x22)))
|
||||
require.NoError(t, batch.AppendRaw(3, getChunk(20, 0x33)))
|
||||
require.NoError(t, batch.AppendRaw(4, getChunk(20, 0x44)))
|
||||
require.NoError(t, batch.AppendRaw(5, getChunk(20, 0x55)))
|
||||
require.NoError(t, batch.AppendRaw(6, getChunk(20, 0x66)))
|
||||
require.NoError(t, batch.commit())
|
||||
|
||||
// nothing to do, all the items should still be there.
|
||||
f, err = f.resetItems(0, 7)
|
||||
f, err = f.resetItems(0)
|
||||
require.NoError(t, err)
|
||||
fmt.Println(f.dumpIndexString(0, 1000))
|
||||
checkRetrieve(t, f, map[uint64][]byte{
|
||||
0: getChunk(20, 0xFF),
|
||||
1: getChunk(20, 0xEE),
|
||||
2: getChunk(20, 0xdd),
|
||||
3: getChunk(20, 0xcc),
|
||||
4: getChunk(20, 0xbb),
|
||||
5: getChunk(20, 0xaa),
|
||||
6: getChunk(20, 0x11),
|
||||
})
|
||||
|
||||
f, err = f.resetItems(1, 5)
|
||||
f, err = f.resetItems(8)
|
||||
require.NoError(t, err)
|
||||
f, err = f.resetItems(7)
|
||||
require.NoError(t, err)
|
||||
_, err = f.resetItems(0, 5)
|
||||
require.Error(t, err)
|
||||
_, err = f.resetItems(1, 6)
|
||||
require.Error(t, err)
|
||||
|
||||
fmt.Println(f.dumpIndexString(0, 1000))
|
||||
checkRetrieveError(t, f, map[uint64]error{
|
||||
0: errOutOfBounds,
|
||||
})
|
||||
checkRetrieve(t, f, map[uint64][]byte{
|
||||
1: getChunk(20, 0xEE),
|
||||
2: getChunk(20, 0xdd),
|
||||
3: getChunk(20, 0xcc),
|
||||
4: getChunk(20, 0xbb),
|
||||
})
|
||||
|
||||
f, err = f.resetItems(4, 4)
|
||||
require.NoError(t, err)
|
||||
checkRetrieveError(t, f, map[uint64]error{
|
||||
4: errOutOfBounds,
|
||||
6: errOutOfBounds,
|
||||
})
|
||||
|
||||
// append
|
||||
batch = f.newBatch(0)
|
||||
require.Error(t, batch.AppendRaw(0, getChunk(20, 0xa0)))
|
||||
require.NoError(t, batch.AppendRaw(4, getChunk(20, 0xa4)))
|
||||
require.NoError(t, batch.AppendRaw(5, getChunk(20, 0xa5)))
|
||||
require.NoError(t, batch.AppendRaw(7, getChunk(20, 0x77)))
|
||||
require.NoError(t, batch.AppendRaw(8, getChunk(20, 0x88)))
|
||||
require.NoError(t, batch.AppendRaw(9, getChunk(20, 0x99)))
|
||||
require.NoError(t, batch.commit())
|
||||
fmt.Println(f.dumpIndexString(0, 1000))
|
||||
checkRetrieve(t, f, map[uint64][]byte{
|
||||
7: getChunk(20, 0x77),
|
||||
9: getChunk(20, 0x99),
|
||||
})
|
||||
|
||||
// Reopen the table, the deletion information should be persisted as well
|
||||
f.Close()
|
||||
@ -1447,21 +1428,18 @@ func TestResetItems(t *testing.T) {
|
||||
fmt.Println(f.dumpIndexString(0, 1000))
|
||||
checkRetrieveError(t, f, map[uint64]error{
|
||||
0: errOutOfBounds,
|
||||
6: errOutOfBounds,
|
||||
10: errOutOfBounds,
|
||||
})
|
||||
checkRetrieve(t, f, map[uint64][]byte{
|
||||
4: getChunk(20, 0xa4),
|
||||
5: getChunk(20, 0xa5),
|
||||
7: getChunk(20, 0x77),
|
||||
9: getChunk(20, 0x99),
|
||||
})
|
||||
|
||||
// truncate all, the entire freezer should be deleted
|
||||
f.truncateTail(6)
|
||||
f.truncateTail(10)
|
||||
checkRetrieveError(t, f, map[uint64]error{
|
||||
0: errOutOfBounds,
|
||||
1: errOutOfBounds,
|
||||
2: errOutOfBounds,
|
||||
3: errOutOfBounds,
|
||||
4: errOutOfBounds,
|
||||
5: errOutOfBounds,
|
||||
6: errOutOfBounds,
|
||||
9: errOutOfBounds,
|
||||
})
|
||||
}
|
||||
|
@ -371,36 +371,70 @@ func TestFreezer_AdditionTables(t *testing.T) {
|
||||
f, err = NewFreezer(dir, "", false, 0, 2049, map[string]bool{"o1": true, "o2": true, "a1": true})
|
||||
require.NoError(t, err)
|
||||
frozen, _ := f.Ancients()
|
||||
f.ResetTable("a1", frozen, frozen, true)
|
||||
require.NoError(t, f.ResetTable("a1", frozen, true))
|
||||
_, err = f.ModifyAncients(func(op ethdb.AncientWriteOp) error {
|
||||
if err := op.AppendRaw("o1", 2, item); err != nil {
|
||||
if err := appendSameItem(op, []string{"o1", "o2", "a1"}, 2, item); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := op.AppendRaw("o2", 2, item); err != nil {
|
||||
if err := appendSameItem(op, []string{"o1", "o2", "a1"}, 3, item); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := op.AppendRaw("a1", 2, item); err != nil {
|
||||
if err := appendSameItem(op, []string{"o1", "o2", "a1"}, 4, item); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// check additional table boundary
|
||||
_, err = f.Ancient("a1", 1)
|
||||
require.Error(t, err)
|
||||
actual, err := f.Ancient("a1", 2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, item, actual)
|
||||
|
||||
// truncate additional table, and check boundary
|
||||
_, err = f.TruncateTableTail("o1", 3)
|
||||
require.Error(t, err)
|
||||
_, err = f.TruncateTableTail("a1", 3)
|
||||
require.NoError(t, err)
|
||||
_, err = f.Ancient("a1", 2)
|
||||
require.Error(t, err)
|
||||
actual, err = f.Ancient("a1", 3)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, item, actual)
|
||||
|
||||
// check additional table head
|
||||
ancients, err := f.TableAncients("a1")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(5), ancients)
|
||||
require.NoError(t, f.Close())
|
||||
|
||||
// reopen and read
|
||||
f, err = NewFreezer(dir, "", true, 0, 2049, map[string]bool{"o1": true, "o2": true, "a1": true})
|
||||
require.NoError(t, err)
|
||||
actual, err = f.Ancient("a1", 2)
|
||||
|
||||
// recheck additional table boundary
|
||||
_, err = f.Ancient("a1", 2)
|
||||
require.Error(t, err)
|
||||
actual, err = f.Ancient("a1", 3)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, item, actual)
|
||||
ancients, err = f.TableAncients("a1")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(5), ancients)
|
||||
require.NoError(t, f.Close())
|
||||
}
|
||||
|
||||
func appendSameItem(op ethdb.AncientWriteOp, tables []string, i uint64, item []byte) error {
|
||||
for _, t := range tables {
|
||||
if err := op.AppendRaw(t, i, item); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func newFreezerForTesting(t *testing.T, tables map[string]bool) (*Freezer, string) {
|
||||
t.Helper()
|
||||
|
||||
|
@ -328,6 +328,12 @@ func (f *prunedfreezer) ModifyAncients(func(ethdb.AncientWriteOp) error) (int64,
|
||||
return 0, errNotSupported
|
||||
}
|
||||
|
||||
func (f *prunedfreezer) ResetTable(kind string, tail uint64, head uint64, onlyEmpty bool) error {
|
||||
// TruncateTableTail will truncate certain table to new tail
|
||||
func (f *prunedfreezer) TruncateTableTail(kind string, tail uint64) (uint64, error) {
|
||||
return 0, errNotSupported
|
||||
}
|
||||
|
||||
// ResetTable will reset certain table with new start point
|
||||
func (f *prunedfreezer) ResetTable(kind string, startAt uint64, onlyEmpty bool) error {
|
||||
return errNotSupported
|
||||
}
|
||||
|
@ -101,8 +101,14 @@ func (t *table) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (int64, erro
|
||||
return t.db.ModifyAncients(fn)
|
||||
}
|
||||
|
||||
func (t *table) ResetTable(kind string, tail uint64, head uint64, onlyEmpty bool) error {
|
||||
return t.db.ResetTable(kind, tail, head, onlyEmpty)
|
||||
// TruncateTableTail will truncate certain table to new tail
|
||||
func (t *table) TruncateTableTail(kind string, tail uint64) (uint64, error) {
|
||||
return t.db.TruncateTableTail(kind, tail)
|
||||
}
|
||||
|
||||
// ResetTable will reset certain table with new start point
|
||||
func (t *table) ResetTable(kind string, startAt uint64, onlyEmpty bool) error {
|
||||
return t.db.ResetTable(kind, startAt, onlyEmpty)
|
||||
}
|
||||
|
||||
func (t *table) ReadAncients(fn func(reader ethdb.AncientReaderOp) error) (err error) {
|
||||
|
@ -454,10 +454,10 @@ func (p *BlockPruner) backUpOldDb(name string, cache, handles int, namespace str
|
||||
return consensus.ErrUnknownAncestor
|
||||
}
|
||||
// if there has blobs, it needs to back up too.
|
||||
blobs := rawdb.ReadRawBlobSidecars(chainDb, blockHash, blockNumber)
|
||||
blobs := rawdb.ReadBlobSidecars(chainDb, blockHash, blockNumber)
|
||||
block = block.WithSidecars(blobs)
|
||||
// Write into new ancient_back db.
|
||||
if _, err := rawdb.WriteAncientBlocks(frdbBack, []*types.Block{block}, []types.Receipts{receipts}, td); err != nil {
|
||||
if _, err := rawdb.WriteAncientBlocksWithBlobs(frdbBack, []*types.Block{block}, []types.Receipts{receipts}, td); err != nil {
|
||||
log.Error("failed to write new ancient", "error", err)
|
||||
return err
|
||||
}
|
||||
|
@ -183,7 +183,7 @@ func (h *Header) SanityCheck() error {
|
||||
// that is: no transactions, no uncles and no withdrawals.
|
||||
func (h *Header) EmptyBody() bool {
|
||||
if h.WithdrawalsHash != nil {
|
||||
return h.TxHash == EmptyTxsHash && *h.WithdrawalsHash == EmptyWithdrawalsHash
|
||||
return h.TxHash == EmptyTxsHash && (*h.WithdrawalsHash == EmptyWithdrawalsHash || *h.WithdrawalsHash == common.Hash{})
|
||||
}
|
||||
return h.TxHash == EmptyTxsHash && h.UncleHash == EmptyUncleHash
|
||||
}
|
||||
|
@ -827,7 +827,7 @@ func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, txListH
|
||||
if want := *header.BlobGasUsed / params.BlobTxBlobGasPerBlob; uint64(blobs) != want { // div because the header is surely good vs the body might be bloated
|
||||
return errInvalidBody
|
||||
}
|
||||
if blobs > params.MaxBlobGasPerBlock {
|
||||
if blobs > params.MaxBlobGasPerBlock/params.BlobTxBlobGasPerBlob {
|
||||
return errInvalidBody
|
||||
}
|
||||
} else {
|
||||
|
@ -141,8 +141,11 @@ type AncientWriter interface {
|
||||
// in the newest format.
|
||||
MigrateTable(string, func([]byte) ([]byte, error)) error
|
||||
|
||||
// ResetTable will reset certain table to new boundary
|
||||
ResetTable(kind string, tail uint64, head uint64, onlyEmpty bool) error
|
||||
// TruncateTableTail will truncate certain table to new tail
|
||||
TruncateTableTail(kind string, tail uint64) (uint64, error)
|
||||
|
||||
// ResetTable will reset certain table with new start point
|
||||
ResetTable(kind string, startAt uint64, onlyEmpty bool) error
|
||||
}
|
||||
|
||||
type FreezerEnv struct {
|
||||
|
@ -126,10 +126,6 @@ func (db *Database) ModifyAncients(f func(ethdb.AncientWriteOp) error) (int64, e
|
||||
panic("not supported")
|
||||
}
|
||||
|
||||
func (db *Database) ResetTable(kind string, tail uint64, head uint64, onlyEmpty bool) error {
|
||||
panic("not supported")
|
||||
}
|
||||
|
||||
func (db *Database) AncientReset(tail, head uint64) error {
|
||||
panic("not supported")
|
||||
}
|
||||
@ -142,6 +138,16 @@ func (db *Database) TruncateTail(n uint64) (uint64, error) {
|
||||
panic("not supported")
|
||||
}
|
||||
|
||||
// TruncateTableTail will truncate certain table to new tail
|
||||
func (db *Database) TruncateTableTail(kind string, tail uint64) (uint64, error) {
|
||||
panic("not supported")
|
||||
}
|
||||
|
||||
// ResetTable will reset certain table with new start point
|
||||
func (db *Database) ResetTable(kind string, startAt uint64, onlyEmpty bool) error {
|
||||
panic("not supported")
|
||||
}
|
||||
|
||||
func (db *Database) Sync() error {
|
||||
return nil
|
||||
}
|
||||
|
@ -574,7 +574,7 @@ func (b testBackend) GetBlobSidecars(ctx context.Context, hash common.Hash) (typ
|
||||
if header == nil || err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blobSidecars := rawdb.ReadRawBlobSidecars(b.db, hash, header.Number.Uint64())
|
||||
blobSidecars := rawdb.ReadBlobSidecars(b.db, hash, header.Number.Uint64())
|
||||
return blobSidecars, nil
|
||||
}
|
||||
func (b testBackend) GetTd(ctx context.Context, hash common.Hash) *big.Int {
|
||||
|
Loading…
Reference in New Issue
Block a user