2019-03-26 14:28:23 +03:00
|
|
|
// Copyright 2019 The go-ethereum Authors
|
2019-03-08 16:56:20 +03:00
|
|
|
// This file is part of the go-ethereum library.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Lesser General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Lesser General Public License
|
|
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
|
|
|
package rawdb
|
|
|
|
|
|
|
|
import (
|
|
|
|
"encoding/binary"
|
|
|
|
"errors"
|
|
|
|
"fmt"
|
2019-06-10 12:45:12 +03:00
|
|
|
"io"
|
2019-03-08 16:56:20 +03:00
|
|
|
"os"
|
|
|
|
"path/filepath"
|
|
|
|
"sync"
|
2019-03-26 14:28:23 +03:00
|
|
|
"sync/atomic"
|
2019-03-08 16:56:20 +03:00
|
|
|
|
|
|
|
"github.com/ethereum/go-ethereum/common"
|
|
|
|
"github.com/ethereum/go-ethereum/log"
|
|
|
|
"github.com/ethereum/go-ethereum/metrics"
|
|
|
|
"github.com/golang/snappy"
|
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
|
|
|
// errClosed is returned if an operation attempts to read from or write to the
|
|
|
|
// freezer table after it has already been closed.
|
|
|
|
errClosed = errors.New("closed")
|
|
|
|
|
|
|
|
// errOutOfBounds is returned if the item requested is not contained within the
|
|
|
|
// freezer table.
|
|
|
|
errOutOfBounds = errors.New("out of bounds")
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
|
|
|
|
// errNotSupported is returned if the database doesn't support the required operation.
|
|
|
|
errNotSupported = errors.New("this operation is not supported")
|
2019-03-08 16:56:20 +03:00
|
|
|
)
|
|
|
|
|
2019-03-26 14:28:23 +03:00
|
|
|
// indexEntry contains the number/id of the file that the data resides in, aswell as the
|
|
|
|
// offset within the file to the end of the data
|
|
|
|
// In serialized form, the filenum is stored as uint16.
|
|
|
|
type indexEntry struct {
|
|
|
|
filenum uint32 // stored as uint16 ( 2 bytes)
|
|
|
|
offset uint32 // stored as uint32 ( 4 bytes)
|
|
|
|
}
|
|
|
|
|
|
|
|
const indexEntrySize = 6
|
|
|
|
|
|
|
|
// unmarshallBinary deserializes binary b into the rawIndex entry.
|
|
|
|
func (i *indexEntry) unmarshalBinary(b []byte) error {
|
|
|
|
i.filenum = uint32(binary.BigEndian.Uint16(b[:2]))
|
|
|
|
i.offset = binary.BigEndian.Uint32(b[2:6])
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// marshallBinary serializes the rawIndex entry into binary.
|
|
|
|
func (i *indexEntry) marshallBinary() []byte {
|
|
|
|
b := make([]byte, indexEntrySize)
|
|
|
|
binary.BigEndian.PutUint16(b[:2], uint16(i.filenum))
|
|
|
|
binary.BigEndian.PutUint32(b[2:6], i.offset)
|
|
|
|
return b
|
|
|
|
}
|
|
|
|
|
2019-03-08 16:56:20 +03:00
|
|
|
// freezerTable represents a single chained data table within the freezer (e.g. blocks).
|
2019-03-26 14:28:23 +03:00
|
|
|
// It consists of a data file (snappy encoded arbitrary data blobs) and an indexEntry
|
2019-03-08 16:56:20 +03:00
|
|
|
// file (uncompressed 64 bit indices into the data file).
|
|
|
|
type freezerTable struct {
|
2019-05-17 01:45:05 +03:00
|
|
|
// WARNING: The `items` field is accessed atomically. On 32 bit platforms, only
|
|
|
|
// 64-bit aligned fields can be atomic. The struct is guaranteed to be so aligned,
|
|
|
|
// so take advantage of that (https://golang.org/pkg/sync/atomic/#pkg-note-BUG).
|
|
|
|
items uint64 // Number of items stored in the table (including items removed from tail)
|
|
|
|
|
2019-03-26 14:28:23 +03:00
|
|
|
noCompression bool // if true, disables snappy compression. Note: does not work retroactively
|
|
|
|
maxFileSize uint32 // Max file size for data-files
|
|
|
|
name string
|
|
|
|
path string
|
2019-03-08 16:56:20 +03:00
|
|
|
|
2019-03-26 14:28:23 +03:00
|
|
|
head *os.File // File descriptor for the data head of the table
|
|
|
|
files map[uint32]*os.File // open files
|
|
|
|
headId uint32 // number of the currently active head file
|
2019-04-14 22:25:32 +03:00
|
|
|
tailId uint32 // number of the earliest file
|
2019-03-26 14:28:23 +03:00
|
|
|
index *os.File // File descriptor for the indexEntry file of the table
|
2019-03-08 16:56:20 +03:00
|
|
|
|
2019-04-14 22:25:32 +03:00
|
|
|
// In the case that old items are deleted (from the tail), we use itemOffset
|
|
|
|
// to count how many historic items have gone missing.
|
|
|
|
itemOffset uint32 // Offset (number of discarded items)
|
|
|
|
|
2019-06-10 14:21:02 +03:00
|
|
|
headBytes uint32 // Number of bytes written to the head file
|
|
|
|
readMeter metrics.Meter // Meter for measuring the effective amount of data read
|
|
|
|
writeMeter metrics.Meter // Meter for measuring the effective amount of data written
|
|
|
|
sizeCounter metrics.Counter // Counter for tracking the combined size of all freezer tables
|
2019-03-08 16:56:20 +03:00
|
|
|
|
|
|
|
logger log.Logger // Logger with database path and table name ambedded
|
|
|
|
lock sync.RWMutex // Mutex protecting the data file descriptors
|
|
|
|
}
|
|
|
|
|
2019-05-03 13:55:36 +03:00
|
|
|
// newTable opens a freezer table with default settings - 2G files
|
2019-06-10 14:21:02 +03:00
|
|
|
func newTable(path string, name string, readMeter metrics.Meter, writeMeter metrics.Meter, sizeCounter metrics.Counter, disableSnappy bool) (*freezerTable, error) {
|
|
|
|
return newCustomTable(path, name, readMeter, writeMeter, sizeCounter, 2*1000*1000*1000, disableSnappy)
|
2019-03-26 14:28:23 +03:00
|
|
|
}
|
|
|
|
|
2019-06-10 12:45:12 +03:00
|
|
|
// openFreezerFileForAppend opens a freezer table file and seeks to the end
|
|
|
|
func openFreezerFileForAppend(filename string) (*os.File, error) {
|
|
|
|
// Open the file without the O_APPEND flag
|
|
|
|
// because it has differing behaviour during Truncate operations
|
|
|
|
// on different OS's
|
|
|
|
file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0644)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
// Seek to end for append
|
|
|
|
if _, err = file.Seek(0, io.SeekEnd); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return file, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// openFreezerFileForReadOnly opens a freezer table file for read only access
|
|
|
|
func openFreezerFileForReadOnly(filename string) (*os.File, error) {
|
|
|
|
return os.OpenFile(filename, os.O_RDONLY, 0644)
|
|
|
|
}
|
|
|
|
|
|
|
|
// openFreezerFileTruncated opens a freezer table making sure it is truncated
|
|
|
|
func openFreezerFileTruncated(filename string) (*os.File, error) {
|
|
|
|
return os.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
|
|
|
|
}
|
|
|
|
|
|
|
|
// truncateFreezerFile resizes a freezer table file and seeks to the end
|
|
|
|
func truncateFreezerFile(file *os.File, size int64) error {
|
|
|
|
if err := file.Truncate(size); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
// Seek to end for append
|
|
|
|
if _, err := file.Seek(0, io.SeekEnd); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-03-26 14:28:23 +03:00
|
|
|
// newCustomTable opens a freezer table, creating the data and index files if they are
|
2019-03-08 16:56:20 +03:00
|
|
|
// non existent. Both files are truncated to the shortest common length to ensure
|
|
|
|
// they don't go out of sync.
|
2019-06-10 14:21:02 +03:00
|
|
|
func newCustomTable(path string, name string, readMeter metrics.Meter, writeMeter metrics.Meter, sizeCounter metrics.Counter, maxFilesize uint32, noCompression bool) (*freezerTable, error) {
|
2019-03-26 14:28:23 +03:00
|
|
|
// Ensure the containing directory exists and open the indexEntry file
|
2019-03-08 16:56:20 +03:00
|
|
|
if err := os.MkdirAll(path, 0755); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-03-26 14:28:23 +03:00
|
|
|
var idxName string
|
|
|
|
if noCompression {
|
2019-06-10 12:45:12 +03:00
|
|
|
// Raw idx
|
2019-03-26 14:28:23 +03:00
|
|
|
idxName = fmt.Sprintf("%s.ridx", name)
|
|
|
|
} else {
|
2019-06-10 12:45:12 +03:00
|
|
|
// Compressed idx
|
2019-03-26 14:28:23 +03:00
|
|
|
idxName = fmt.Sprintf("%s.cidx", name)
|
2019-03-08 16:56:20 +03:00
|
|
|
}
|
2019-06-10 12:45:12 +03:00
|
|
|
offsets, err := openFreezerFileForAppend(filepath.Join(path, idxName))
|
2019-03-08 16:56:20 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
// Create the table and repair any past inconsistency
|
|
|
|
tab := &freezerTable{
|
2019-03-26 14:28:23 +03:00
|
|
|
index: offsets,
|
|
|
|
files: make(map[uint32]*os.File),
|
|
|
|
readMeter: readMeter,
|
|
|
|
writeMeter: writeMeter,
|
2019-06-10 14:21:02 +03:00
|
|
|
sizeCounter: sizeCounter,
|
2019-03-26 14:28:23 +03:00
|
|
|
name: name,
|
|
|
|
path: path,
|
|
|
|
logger: log.New("database", path, "table", name),
|
|
|
|
noCompression: noCompression,
|
|
|
|
maxFileSize: maxFilesize,
|
2019-03-08 16:56:20 +03:00
|
|
|
}
|
|
|
|
if err := tab.repair(); err != nil {
|
2019-03-26 14:28:23 +03:00
|
|
|
tab.Close()
|
2019-03-08 16:56:20 +03:00
|
|
|
return nil, err
|
|
|
|
}
|
2019-06-10 14:21:02 +03:00
|
|
|
// Initialize the starting size counter
|
|
|
|
size, err := tab.sizeNolock()
|
|
|
|
if err != nil {
|
|
|
|
tab.Close()
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
tab.sizeCounter.Inc(int64(size))
|
|
|
|
|
2019-03-08 16:56:20 +03:00
|
|
|
return tab, nil
|
|
|
|
}
|
|
|
|
|
2019-03-26 14:28:23 +03:00
|
|
|
// repair cross checks the head and the index file and truncates them to
|
2019-03-08 16:56:20 +03:00
|
|
|
// be in sync with each other after a potential crash / data loss.
|
|
|
|
func (t *freezerTable) repair() error {
|
2019-03-26 14:28:23 +03:00
|
|
|
// Create a temporary offset buffer to init files with and read indexEntry into
|
|
|
|
buffer := make([]byte, indexEntrySize)
|
2019-03-08 16:56:20 +03:00
|
|
|
|
2019-03-26 14:28:23 +03:00
|
|
|
// If we've just created the files, initialize the index with the 0 indexEntry
|
|
|
|
stat, err := t.index.Stat()
|
2019-03-08 16:56:20 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if stat.Size() == 0 {
|
2019-03-26 14:28:23 +03:00
|
|
|
if _, err := t.index.Write(buffer); err != nil {
|
2019-03-08 16:56:20 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2019-03-26 14:28:23 +03:00
|
|
|
// Ensure the index is a multiple of indexEntrySize bytes
|
|
|
|
if overflow := stat.Size() % indexEntrySize; overflow != 0 {
|
2019-06-10 12:45:12 +03:00
|
|
|
truncateFreezerFile(t.index, stat.Size()-overflow) // New file can't trigger this path
|
2019-03-08 16:56:20 +03:00
|
|
|
}
|
|
|
|
// Retrieve the file sizes and prepare for truncation
|
2019-03-26 14:28:23 +03:00
|
|
|
if stat, err = t.index.Stat(); err != nil {
|
2019-03-08 16:56:20 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
offsetsSize := stat.Size()
|
|
|
|
|
2019-03-26 14:28:23 +03:00
|
|
|
// Open the head file
|
|
|
|
var (
|
2019-04-14 22:25:32 +03:00
|
|
|
firstIndex indexEntry
|
2019-03-26 14:28:23 +03:00
|
|
|
lastIndex indexEntry
|
|
|
|
contentSize int64
|
|
|
|
contentExp int64
|
|
|
|
)
|
2019-04-14 22:25:32 +03:00
|
|
|
// Read index zero, determine what file is the earliest
|
|
|
|
// and what item offset to use
|
|
|
|
t.index.ReadAt(buffer, 0)
|
|
|
|
firstIndex.unmarshalBinary(buffer)
|
|
|
|
|
|
|
|
t.tailId = firstIndex.offset
|
|
|
|
t.itemOffset = firstIndex.filenum
|
|
|
|
|
2019-03-26 14:28:23 +03:00
|
|
|
t.index.ReadAt(buffer, offsetsSize-indexEntrySize)
|
|
|
|
lastIndex.unmarshalBinary(buffer)
|
2019-06-10 12:45:12 +03:00
|
|
|
t.head, err = t.openFile(lastIndex.filenum, openFreezerFileForAppend)
|
2019-03-26 14:28:23 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if stat, err = t.head.Stat(); err != nil {
|
2019-03-08 16:56:20 +03:00
|
|
|
return err
|
|
|
|
}
|
2019-03-26 14:28:23 +03:00
|
|
|
contentSize = stat.Size()
|
2019-03-08 16:56:20 +03:00
|
|
|
|
|
|
|
// Keep truncating both files until they come in sync
|
2019-03-26 14:28:23 +03:00
|
|
|
contentExp = int64(lastIndex.offset)
|
2019-03-08 16:56:20 +03:00
|
|
|
|
|
|
|
for contentExp != contentSize {
|
2019-03-26 14:28:23 +03:00
|
|
|
// Truncate the head file to the last offset pointer
|
2019-03-08 16:56:20 +03:00
|
|
|
if contentExp < contentSize {
|
2019-03-26 14:28:23 +03:00
|
|
|
t.logger.Warn("Truncating dangling head", "indexed", common.StorageSize(contentExp), "stored", common.StorageSize(contentSize))
|
2019-06-10 12:45:12 +03:00
|
|
|
if err := truncateFreezerFile(t.head, contentExp); err != nil {
|
2019-03-08 16:56:20 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
contentSize = contentExp
|
|
|
|
}
|
2019-03-26 14:28:23 +03:00
|
|
|
// Truncate the index to point within the head file
|
2019-03-08 16:56:20 +03:00
|
|
|
if contentExp > contentSize {
|
2019-03-26 14:28:23 +03:00
|
|
|
t.logger.Warn("Truncating dangling indexes", "indexed", common.StorageSize(contentExp), "stored", common.StorageSize(contentSize))
|
2019-06-10 12:45:12 +03:00
|
|
|
if err := truncateFreezerFile(t.index, offsetsSize-indexEntrySize); err != nil {
|
2019-03-08 16:56:20 +03:00
|
|
|
return err
|
|
|
|
}
|
2019-03-26 14:28:23 +03:00
|
|
|
offsetsSize -= indexEntrySize
|
|
|
|
t.index.ReadAt(buffer, offsetsSize-indexEntrySize)
|
|
|
|
var newLastIndex indexEntry
|
|
|
|
newLastIndex.unmarshalBinary(buffer)
|
|
|
|
// We might have slipped back into an earlier head-file here
|
|
|
|
if newLastIndex.filenum != lastIndex.filenum {
|
2019-06-10 12:45:12 +03:00
|
|
|
// Release earlier opened file
|
2019-03-26 14:28:23 +03:00
|
|
|
t.releaseFile(lastIndex.filenum)
|
2019-06-10 12:45:12 +03:00
|
|
|
t.head, err = t.openFile(newLastIndex.filenum, openFreezerFileForAppend)
|
2019-03-26 14:28:23 +03:00
|
|
|
if stat, err = t.head.Stat(); err != nil {
|
|
|
|
// TODO, anything more we can do here?
|
|
|
|
// A data file has gone missing...
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
contentSize = stat.Size()
|
|
|
|
}
|
|
|
|
lastIndex = newLastIndex
|
|
|
|
contentExp = int64(lastIndex.offset)
|
2019-03-08 16:56:20 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
// Ensure all reparation changes have been written to disk
|
2019-03-26 14:28:23 +03:00
|
|
|
if err := t.index.Sync(); err != nil {
|
2019-03-08 16:56:20 +03:00
|
|
|
return err
|
|
|
|
}
|
2019-03-26 14:28:23 +03:00
|
|
|
if err := t.head.Sync(); err != nil {
|
2019-03-08 16:56:20 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
// Update the item and byte counters and return
|
2019-04-14 22:25:32 +03:00
|
|
|
t.items = uint64(t.itemOffset) + uint64(offsetsSize/indexEntrySize-1) // last indexEntry points to the end of the data file
|
2019-03-26 14:28:23 +03:00
|
|
|
t.headBytes = uint32(contentSize)
|
|
|
|
t.headId = lastIndex.filenum
|
2019-03-08 16:56:20 +03:00
|
|
|
|
2019-03-26 14:28:23 +03:00
|
|
|
// Close opened files and preopen all files
|
|
|
|
if err := t.preopen(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
t.logger.Debug("Chain freezer table opened", "items", t.items, "size", common.StorageSize(t.headBytes))
|
2019-03-08 16:56:20 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-03-26 14:28:23 +03:00
|
|
|
// preopen opens all files that the freezer will need. This method should be called from an init-context,
|
|
|
|
// since it assumes that it doesn't have to bother with locking
|
|
|
|
// The rationale for doing preopen is to not have to do it from within Retrieve, thus not needing to ever
|
|
|
|
// obtain a write-lock within Retrieve.
|
|
|
|
func (t *freezerTable) preopen() (err error) {
|
|
|
|
// The repair might have already opened (some) files
|
|
|
|
t.releaseFilesAfter(0, false)
|
|
|
|
// Open all except head in RDONLY
|
2019-05-16 14:30:11 +03:00
|
|
|
for i := t.tailId; i < t.headId; i++ {
|
2019-06-10 12:45:12 +03:00
|
|
|
if _, err = t.openFile(i, openFreezerFileForReadOnly); err != nil {
|
2019-03-26 14:28:23 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Open head in read/write
|
2019-06-10 12:45:12 +03:00
|
|
|
t.head, err = t.openFile(t.headId, openFreezerFileForAppend)
|
2019-03-26 14:28:23 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-06-10 12:45:12 +03:00
|
|
|
// truncate discards any recent data above the provided threshold number.
|
2019-03-08 16:56:20 +03:00
|
|
|
func (t *freezerTable) truncate(items uint64) error {
|
2019-03-26 14:28:23 +03:00
|
|
|
t.lock.Lock()
|
|
|
|
defer t.lock.Unlock()
|
2019-04-14 22:25:32 +03:00
|
|
|
|
|
|
|
// If our item count is correct, don't do anything
|
2019-03-26 14:28:23 +03:00
|
|
|
if atomic.LoadUint64(&t.items) <= items {
|
2019-03-08 16:56:20 +03:00
|
|
|
return nil
|
|
|
|
}
|
2019-06-10 14:21:02 +03:00
|
|
|
// We need to truncate, save the old size for metrics tracking
|
|
|
|
oldSize, err := t.sizeNolock()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-03-08 16:56:20 +03:00
|
|
|
// Something's out of sync, truncate the table's offset index
|
|
|
|
t.logger.Warn("Truncating freezer table", "items", t.items, "limit", items)
|
2019-06-10 12:45:12 +03:00
|
|
|
if err := truncateFreezerFile(t.index, int64(items+1)*indexEntrySize); err != nil {
|
2019-03-08 16:56:20 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
// Calculate the new expected size of the data file and truncate it
|
2019-03-26 14:28:23 +03:00
|
|
|
buffer := make([]byte, indexEntrySize)
|
|
|
|
if _, err := t.index.ReadAt(buffer, int64(items*indexEntrySize)); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
var expected indexEntry
|
|
|
|
expected.unmarshalBinary(buffer)
|
2019-04-14 22:25:32 +03:00
|
|
|
|
2019-03-26 14:28:23 +03:00
|
|
|
// We might need to truncate back to older files
|
|
|
|
if expected.filenum != t.headId {
|
|
|
|
// If already open for reading, force-reopen for writing
|
|
|
|
t.releaseFile(expected.filenum)
|
2019-06-10 12:45:12 +03:00
|
|
|
newHead, err := t.openFile(expected.filenum, openFreezerFileForAppend)
|
2019-03-26 14:28:23 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-06-10 12:45:12 +03:00
|
|
|
// Release any files _after the current head -- both the previous head
|
2019-03-26 14:28:23 +03:00
|
|
|
// and any files which may have been opened for reading
|
|
|
|
t.releaseFilesAfter(expected.filenum, true)
|
2019-06-10 12:45:12 +03:00
|
|
|
// Set back the historic head
|
2019-03-26 14:28:23 +03:00
|
|
|
t.head = newHead
|
|
|
|
atomic.StoreUint32(&t.headId, expected.filenum)
|
|
|
|
}
|
2019-06-10 12:45:12 +03:00
|
|
|
if err := truncateFreezerFile(t.head, int64(expected.offset)); err != nil {
|
2019-03-08 16:56:20 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
// All data files truncated, set internal counters and return
|
2019-03-26 14:28:23 +03:00
|
|
|
atomic.StoreUint64(&t.items, items)
|
|
|
|
atomic.StoreUint32(&t.headBytes, expected.offset)
|
2019-06-10 14:21:02 +03:00
|
|
|
|
|
|
|
// Retrieve the new size and update the total size counter
|
|
|
|
newSize, err := t.sizeNolock()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
t.sizeCounter.Dec(int64(oldSize - newSize))
|
|
|
|
|
2019-03-08 16:56:20 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-03-26 14:28:23 +03:00
|
|
|
// Close closes all opened files.
|
2019-03-08 16:56:20 +03:00
|
|
|
func (t *freezerTable) Close() error {
|
|
|
|
t.lock.Lock()
|
|
|
|
defer t.lock.Unlock()
|
|
|
|
|
|
|
|
var errs []error
|
2019-03-26 14:28:23 +03:00
|
|
|
if err := t.index.Close(); err != nil {
|
2019-03-08 16:56:20 +03:00
|
|
|
errs = append(errs, err)
|
|
|
|
}
|
2019-03-26 14:28:23 +03:00
|
|
|
t.index = nil
|
2019-03-08 16:56:20 +03:00
|
|
|
|
2019-03-26 14:28:23 +03:00
|
|
|
for _, f := range t.files {
|
|
|
|
if err := f.Close(); err != nil {
|
|
|
|
errs = append(errs, err)
|
|
|
|
}
|
2019-03-08 16:56:20 +03:00
|
|
|
}
|
2019-03-26 14:28:23 +03:00
|
|
|
t.head = nil
|
2019-03-08 16:56:20 +03:00
|
|
|
|
|
|
|
if errs != nil {
|
|
|
|
return fmt.Errorf("%v", errs)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-03-26 14:28:23 +03:00
|
|
|
// openFile assumes that the write-lock is held by the caller
|
2019-06-10 12:45:12 +03:00
|
|
|
func (t *freezerTable) openFile(num uint32, opener func(string) (*os.File, error)) (f *os.File, err error) {
|
2019-03-26 14:28:23 +03:00
|
|
|
var exist bool
|
|
|
|
if f, exist = t.files[num]; !exist {
|
|
|
|
var name string
|
|
|
|
if t.noCompression {
|
2019-04-14 22:25:32 +03:00
|
|
|
name = fmt.Sprintf("%s.%04d.rdat", t.name, num)
|
2019-03-26 14:28:23 +03:00
|
|
|
} else {
|
2019-04-14 22:25:32 +03:00
|
|
|
name = fmt.Sprintf("%s.%04d.cdat", t.name, num)
|
2019-03-26 14:28:23 +03:00
|
|
|
}
|
2019-06-10 12:45:12 +03:00
|
|
|
f, err = opener(filepath.Join(t.path, name))
|
2019-03-26 14:28:23 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
t.files[num] = f
|
|
|
|
}
|
|
|
|
return f, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// releaseFile closes a file, and removes it from the open file cache.
|
|
|
|
// Assumes that the caller holds the write lock
|
|
|
|
func (t *freezerTable) releaseFile(num uint32) {
|
|
|
|
if f, exist := t.files[num]; exist {
|
|
|
|
delete(t.files, num)
|
|
|
|
f.Close()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// releaseFilesAfter closes all open files with a higher number, and optionally also deletes the files
|
|
|
|
func (t *freezerTable) releaseFilesAfter(num uint32, remove bool) {
|
|
|
|
for fnum, f := range t.files {
|
|
|
|
if fnum > num {
|
|
|
|
delete(t.files, fnum)
|
|
|
|
f.Close()
|
|
|
|
if remove {
|
|
|
|
os.Remove(f.Name())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Append injects a binary blob at the end of the freezer table. The item number
|
2019-03-08 16:56:20 +03:00
|
|
|
// is a precautionary parameter to ensure data correctness, but the table will
|
|
|
|
// reject already existing data.
|
|
|
|
//
|
|
|
|
// Note, this method will *not* flush any data to disk so be sure to explicitly
|
|
|
|
// fsync before irreversibly deleting data from the database.
|
|
|
|
func (t *freezerTable) Append(item uint64, blob []byte) error {
|
2019-03-26 14:28:23 +03:00
|
|
|
// Read lock prevents competition with truncate
|
|
|
|
t.lock.RLock()
|
2019-03-08 16:56:20 +03:00
|
|
|
// Ensure the table is still accessible
|
2019-03-26 14:28:23 +03:00
|
|
|
if t.index == nil || t.head == nil {
|
2019-04-14 22:25:32 +03:00
|
|
|
t.lock.RUnlock()
|
2019-03-08 16:56:20 +03:00
|
|
|
return errClosed
|
|
|
|
}
|
|
|
|
// Ensure only the next item can be written, nothing else
|
2019-03-26 14:28:23 +03:00
|
|
|
if atomic.LoadUint64(&t.items) != item {
|
2019-04-14 22:25:32 +03:00
|
|
|
t.lock.RUnlock()
|
|
|
|
return fmt.Errorf("appending unexpected item: want %d, have %d", t.items, item)
|
2019-03-08 16:56:20 +03:00
|
|
|
}
|
|
|
|
// Encode the blob and write it into the data file
|
2019-03-26 14:28:23 +03:00
|
|
|
if !t.noCompression {
|
|
|
|
blob = snappy.Encode(nil, blob)
|
2019-03-08 16:56:20 +03:00
|
|
|
}
|
2019-03-26 14:28:23 +03:00
|
|
|
bLen := uint32(len(blob))
|
|
|
|
if t.headBytes+bLen < bLen ||
|
|
|
|
t.headBytes+bLen > t.maxFileSize {
|
|
|
|
// we need a new file, writing would overflow
|
|
|
|
t.lock.RUnlock()
|
|
|
|
t.lock.Lock()
|
2019-06-10 12:45:12 +03:00
|
|
|
nextID := atomic.LoadUint32(&t.headId) + 1
|
2019-03-26 14:28:23 +03:00
|
|
|
// We open the next file in truncated mode -- if this file already
|
|
|
|
// exists, we need to start over from scratch on it
|
2019-06-10 12:45:12 +03:00
|
|
|
newHead, err := t.openFile(nextID, openFreezerFileTruncated)
|
2019-03-26 14:28:23 +03:00
|
|
|
if err != nil {
|
|
|
|
t.lock.Unlock()
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
// Close old file, and reopen in RDONLY mode
|
|
|
|
t.releaseFile(t.headId)
|
2019-06-10 12:45:12 +03:00
|
|
|
t.openFile(t.headId, openFreezerFileForReadOnly)
|
2019-03-08 16:56:20 +03:00
|
|
|
|
2019-03-26 14:28:23 +03:00
|
|
|
// Swap out the current head
|
|
|
|
t.head = newHead
|
|
|
|
atomic.StoreUint32(&t.headBytes, 0)
|
2019-06-10 12:45:12 +03:00
|
|
|
atomic.StoreUint32(&t.headId, nextID)
|
2019-03-26 14:28:23 +03:00
|
|
|
t.lock.Unlock()
|
|
|
|
t.lock.RLock()
|
2019-03-08 16:56:20 +03:00
|
|
|
}
|
|
|
|
|
2019-03-26 14:28:23 +03:00
|
|
|
defer t.lock.RUnlock()
|
|
|
|
if _, err := t.head.Write(blob); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
newOffset := atomic.AddUint32(&t.headBytes, bLen)
|
|
|
|
idx := indexEntry{
|
|
|
|
filenum: atomic.LoadUint32(&t.headId),
|
|
|
|
offset: newOffset,
|
|
|
|
}
|
|
|
|
// Write indexEntry
|
|
|
|
t.index.Write(idx.marshallBinary())
|
2019-06-10 14:21:02 +03:00
|
|
|
|
2019-03-26 14:28:23 +03:00
|
|
|
t.writeMeter.Mark(int64(bLen + indexEntrySize))
|
2019-06-10 14:21:02 +03:00
|
|
|
t.sizeCounter.Inc(int64(bLen + indexEntrySize))
|
|
|
|
|
2019-03-26 14:28:23 +03:00
|
|
|
atomic.AddUint64(&t.items, 1)
|
2019-03-08 16:56:20 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-03-26 14:28:23 +03:00
|
|
|
// getBounds returns the indexes for the item
|
|
|
|
// returns start, end, filenumber and error
|
|
|
|
func (t *freezerTable) getBounds(item uint64) (uint32, uint32, uint32, error) {
|
|
|
|
var startIdx, endIdx indexEntry
|
|
|
|
buffer := make([]byte, indexEntrySize)
|
|
|
|
if _, err := t.index.ReadAt(buffer, int64(item*indexEntrySize)); err != nil {
|
|
|
|
return 0, 0, 0, err
|
|
|
|
}
|
|
|
|
startIdx.unmarshalBinary(buffer)
|
|
|
|
if _, err := t.index.ReadAt(buffer, int64((item+1)*indexEntrySize)); err != nil {
|
|
|
|
return 0, 0, 0, err
|
|
|
|
}
|
|
|
|
endIdx.unmarshalBinary(buffer)
|
|
|
|
if startIdx.filenum != endIdx.filenum {
|
|
|
|
// If a piece of data 'crosses' a data-file,
|
|
|
|
// it's actually in one piece on the second data-file.
|
|
|
|
// We return a zero-indexEntry for the second file as start
|
|
|
|
return 0, endIdx.offset, endIdx.filenum, nil
|
|
|
|
}
|
|
|
|
return startIdx.offset, endIdx.offset, endIdx.filenum, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Retrieve looks up the data offset of an item with the given number and retrieves
|
2019-03-08 16:56:20 +03:00
|
|
|
// the raw binary blob from the data file.
|
|
|
|
func (t *freezerTable) Retrieve(item uint64) ([]byte, error) {
|
|
|
|
// Ensure the table and the item is accessible
|
2019-03-26 14:28:23 +03:00
|
|
|
if t.index == nil || t.head == nil {
|
2019-03-08 16:56:20 +03:00
|
|
|
return nil, errClosed
|
|
|
|
}
|
2019-03-26 14:28:23 +03:00
|
|
|
if atomic.LoadUint64(&t.items) <= item {
|
2019-03-08 16:56:20 +03:00
|
|
|
return nil, errOutOfBounds
|
|
|
|
}
|
2019-04-14 22:25:32 +03:00
|
|
|
// Ensure the item was not deleted from the tail either
|
|
|
|
offset := atomic.LoadUint32(&t.itemOffset)
|
|
|
|
if uint64(offset) > item {
|
|
|
|
return nil, errOutOfBounds
|
|
|
|
}
|
2019-03-26 14:28:23 +03:00
|
|
|
t.lock.RLock()
|
2019-04-14 22:25:32 +03:00
|
|
|
startOffset, endOffset, filenum, err := t.getBounds(item - uint64(offset))
|
2019-03-26 14:28:23 +03:00
|
|
|
if err != nil {
|
2019-04-14 22:25:32 +03:00
|
|
|
t.lock.RUnlock()
|
2019-03-08 16:56:20 +03:00
|
|
|
return nil, err
|
|
|
|
}
|
2019-03-26 14:28:23 +03:00
|
|
|
dataFile, exist := t.files[filenum]
|
|
|
|
if !exist {
|
2019-04-14 22:25:32 +03:00
|
|
|
t.lock.RUnlock()
|
2019-03-26 14:28:23 +03:00
|
|
|
return nil, fmt.Errorf("missing data file %d", filenum)
|
2019-03-08 16:56:20 +03:00
|
|
|
}
|
|
|
|
// Retrieve the data itself, decompress and return
|
2019-03-26 14:28:23 +03:00
|
|
|
blob := make([]byte, endOffset-startOffset)
|
|
|
|
if _, err := dataFile.ReadAt(blob, int64(startOffset)); err != nil {
|
|
|
|
t.lock.RUnlock()
|
2019-03-08 16:56:20 +03:00
|
|
|
return nil, err
|
|
|
|
}
|
2019-03-26 14:28:23 +03:00
|
|
|
t.lock.RUnlock()
|
|
|
|
t.readMeter.Mark(int64(len(blob) + 2*indexEntrySize))
|
|
|
|
|
|
|
|
if t.noCompression {
|
|
|
|
return blob, nil
|
|
|
|
}
|
2019-03-08 16:56:20 +03:00
|
|
|
return snappy.Decode(nil, blob)
|
|
|
|
}
|
|
|
|
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
// has returns an indicator whether the specified number data
|
|
|
|
// exists in the freezer table.
|
|
|
|
func (t *freezerTable) has(number uint64) bool {
|
|
|
|
return atomic.LoadUint64(&t.items) > number
|
|
|
|
}
|
|
|
|
|
core, cmd, vendor: fixes and database inspection tool (#15)
* core, eth: some fixes for freezer
* vendor, core/rawdb, cmd/geth: add db inspector
* core, cmd/utils: check ancient store path forceily
* cmd/geth, common, core/rawdb: a few fixes
* cmd/geth: support windows file rename and fix rename error
* core: support ancient plugin
* core, cmd: streaming file copy
* cmd, consensus, core, tests: keep genesis in leveldb
* core: write txlookup during ancient init
* core: bump database version
2019-05-14 17:07:44 +03:00
|
|
|
// size returns the total data size in the freezer table.
|
|
|
|
func (t *freezerTable) size() (uint64, error) {
|
|
|
|
t.lock.RLock()
|
|
|
|
defer t.lock.RUnlock()
|
|
|
|
|
2019-06-10 14:21:02 +03:00
|
|
|
return t.sizeNolock()
|
|
|
|
}
|
|
|
|
|
|
|
|
// sizeNolock returns the total data size in the freezer table without obtaining
|
|
|
|
// the mutex first.
|
|
|
|
func (t *freezerTable) sizeNolock() (uint64, error) {
|
core, cmd, vendor: fixes and database inspection tool (#15)
* core, eth: some fixes for freezer
* vendor, core/rawdb, cmd/geth: add db inspector
* core, cmd/utils: check ancient store path forceily
* cmd/geth, common, core/rawdb: a few fixes
* cmd/geth: support windows file rename and fix rename error
* core: support ancient plugin
* core, cmd: streaming file copy
* cmd, consensus, core, tests: keep genesis in leveldb
* core: write txlookup during ancient init
* core: bump database version
2019-05-14 17:07:44 +03:00
|
|
|
stat, err := t.index.Stat()
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
total := uint64(t.maxFileSize)*uint64(t.headId-t.tailId) + uint64(t.headBytes) + uint64(stat.Size())
|
|
|
|
return total, nil
|
|
|
|
}
|
|
|
|
|
2019-03-08 16:56:20 +03:00
|
|
|
// Sync pushes any pending data from memory out to disk. This is an expensive
|
|
|
|
// operation, so use it with care.
|
|
|
|
func (t *freezerTable) Sync() error {
|
2019-03-26 14:28:23 +03:00
|
|
|
if err := t.index.Sync(); err != nil {
|
2019-03-08 16:56:20 +03:00
|
|
|
return err
|
|
|
|
}
|
2019-03-26 14:28:23 +03:00
|
|
|
return t.head.Sync()
|
2019-03-08 16:56:20 +03:00
|
|
|
}
|
2019-04-14 22:25:32 +03:00
|
|
|
|
|
|
|
// printIndex is a debug print utility function for testing
|
|
|
|
func (t *freezerTable) printIndex() {
|
|
|
|
buf := make([]byte, indexEntrySize)
|
|
|
|
|
|
|
|
fmt.Printf("|-----------------|\n")
|
|
|
|
fmt.Printf("| fileno | offset |\n")
|
|
|
|
fmt.Printf("|--------+--------|\n")
|
|
|
|
|
|
|
|
for i := uint64(0); ; i++ {
|
|
|
|
if _, err := t.index.ReadAt(buf, int64(i*indexEntrySize)); err != nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
var entry indexEntry
|
|
|
|
entry.unmarshalBinary(buf)
|
|
|
|
fmt.Printf("| %03d | %03d | \n", entry.filenum, entry.offset)
|
|
|
|
if i > 100 {
|
|
|
|
fmt.Printf(" ... \n")
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
fmt.Printf("|-----------------|\n")
|
|
|
|
}
|