2014-02-15 00:56:09 +02:00
|
|
|
package ethdb
|
|
|
|
|
|
|
|
import (
|
2015-06-23 18:36:08 +03:00
|
|
|
"time"
|
|
|
|
|
2015-02-12 18:06:15 +02:00
|
|
|
"github.com/ethereum/go-ethereum/compression/rle"
|
2015-04-07 23:19:01 +03:00
|
|
|
"github.com/ethereum/go-ethereum/logger"
|
|
|
|
"github.com/ethereum/go-ethereum/logger/glog"
|
2015-06-22 12:01:27 +03:00
|
|
|
"github.com/rcrowley/go-metrics"
|
2014-02-15 00:56:09 +02:00
|
|
|
"github.com/syndtr/goleveldb/leveldb"
|
2015-05-21 12:43:05 +03:00
|
|
|
"github.com/syndtr/goleveldb/leveldb/errors"
|
2014-11-03 01:31:15 +02:00
|
|
|
"github.com/syndtr/goleveldb/leveldb/iterator"
|
2015-05-10 02:55:39 +03:00
|
|
|
"github.com/syndtr/goleveldb/leveldb/opt"
|
2014-02-15 00:56:09 +02:00
|
|
|
)
|
|
|
|
|
2015-05-12 12:28:33 +03:00
|
|
|
var OpenFileLimit = 64
|
2015-05-10 02:55:39 +03:00
|
|
|
|
2014-02-15 00:56:09 +02:00
|
|
|
type LDBDatabase struct {
|
2015-06-22 12:01:27 +03:00
|
|
|
fn string // filename for reporting
|
|
|
|
db *leveldb.DB // LevelDB instance
|
|
|
|
|
2015-06-23 18:36:08 +03:00
|
|
|
GetTimer metrics.Timer // Timer for measuring the database get request counts and latencies
|
|
|
|
PutTimer metrics.Timer // Timer for measuring the database put request counts and latencies
|
|
|
|
DelTimer metrics.Timer // Timer for measuring the database delete request counts and latencies
|
|
|
|
MissMeter metrics.Meter // MEter for measuring the missed database get requests
|
2015-06-23 13:03:33 +03:00
|
|
|
ReadMeter metrics.Meter // Meter for measuring the database get request data usage
|
|
|
|
WriteMeter metrics.Meter // Meter for measuring the database put request data usage
|
2014-02-15 00:56:09 +02:00
|
|
|
}
|
|
|
|
|
2015-05-21 12:43:05 +03:00
|
|
|
// NewLDBDatabase returns a LevelDB wrapped object. LDBDatabase does not persist data by
|
|
|
|
// it self but requires a background poller which syncs every X. `Flush` should be called
|
|
|
|
// when data needs to be stored and written to disk.
|
2015-03-06 03:46:56 +02:00
|
|
|
func NewLDBDatabase(file string) (*LDBDatabase, error) {
|
2014-02-15 00:56:09 +02:00
|
|
|
// Open the db
|
2015-05-12 12:28:33 +03:00
|
|
|
db, err := leveldb.OpenFile(file, &opt.Options{OpenFilesCacheCapacity: OpenFileLimit})
|
2015-05-21 12:43:05 +03:00
|
|
|
// check for curruption and attempt to recover
|
|
|
|
if _, iscorrupted := err.(*errors.ErrCorrupted); iscorrupted {
|
|
|
|
db, err = leveldb.RecoverFile(file, nil)
|
|
|
|
}
|
|
|
|
// (re) check for errors and abort if opening of the db failed
|
2014-02-15 00:56:09 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2015-04-07 23:19:01 +03:00
|
|
|
database := &LDBDatabase{
|
2015-05-27 19:03:16 +03:00
|
|
|
fn: file,
|
|
|
|
db: db,
|
2015-04-07 23:19:01 +03:00
|
|
|
}
|
|
|
|
|
2014-02-15 00:56:09 +02:00
|
|
|
return database, nil
|
|
|
|
}
|
|
|
|
|
2015-05-21 12:43:05 +03:00
|
|
|
// Put puts the given key / value to the queue
|
2015-06-20 21:31:11 +03:00
|
|
|
func (self *LDBDatabase) Put(key []byte, value []byte) error {
|
2015-06-23 18:36:08 +03:00
|
|
|
// Measure the database put latency, if requested
|
|
|
|
if self.PutTimer != nil {
|
|
|
|
start := time.Now()
|
|
|
|
defer self.PutTimer.UpdateSince(start)
|
2015-06-23 13:03:33 +03:00
|
|
|
}
|
2015-06-23 18:36:08 +03:00
|
|
|
// Generate the data to write to disk, update the meter and write
|
|
|
|
dat := rle.Compress(value)
|
|
|
|
|
2015-06-23 13:03:33 +03:00
|
|
|
if self.WriteMeter != nil {
|
|
|
|
self.WriteMeter.Mark(int64(len(dat)))
|
2015-06-22 12:01:27 +03:00
|
|
|
}
|
|
|
|
return self.db.Put(key, dat, nil)
|
2014-02-15 00:56:09 +02:00
|
|
|
}
|
|
|
|
|
2015-05-21 12:43:05 +03:00
|
|
|
// Get returns the given key if it's present.
|
2014-11-03 01:31:15 +02:00
|
|
|
func (self *LDBDatabase) Get(key []byte) ([]byte, error) {
|
2015-06-23 18:36:08 +03:00
|
|
|
// Measure the database get latency, if requested
|
|
|
|
if self.GetTimer != nil {
|
|
|
|
start := time.Now()
|
|
|
|
defer self.GetTimer.UpdateSince(start)
|
|
|
|
}
|
|
|
|
// Retrieve the key and increment the miss counter if not found
|
2014-11-03 01:31:15 +02:00
|
|
|
dat, err := self.db.Get(key, nil)
|
|
|
|
if err != nil {
|
2015-06-23 18:36:08 +03:00
|
|
|
if self.MissMeter != nil {
|
|
|
|
self.MissMeter.Mark(1)
|
|
|
|
}
|
2014-11-03 01:31:15 +02:00
|
|
|
return nil, err
|
|
|
|
}
|
2015-06-23 18:36:08 +03:00
|
|
|
// Otherwise update the actually retrieved amount of data
|
2015-06-23 13:03:33 +03:00
|
|
|
if self.ReadMeter != nil {
|
|
|
|
self.ReadMeter.Mark(int64(len(dat)))
|
2015-06-22 12:01:27 +03:00
|
|
|
}
|
2015-04-07 23:19:01 +03:00
|
|
|
return rle.Decompress(dat)
|
2014-02-24 13:12:01 +02:00
|
|
|
}
|
|
|
|
|
2015-05-21 12:43:05 +03:00
|
|
|
// Delete deletes the key from the queue and database
|
2014-11-03 01:31:15 +02:00
|
|
|
func (self *LDBDatabase) Delete(key []byte) error {
|
2015-06-23 18:36:08 +03:00
|
|
|
// Measure the database delete latency, if requested
|
|
|
|
if self.DelTimer != nil {
|
|
|
|
start := time.Now()
|
|
|
|
defer self.DelTimer.UpdateSince(start)
|
2015-06-23 13:03:33 +03:00
|
|
|
}
|
2015-06-23 18:36:08 +03:00
|
|
|
// Execute the actual operation
|
2014-11-03 01:31:15 +02:00
|
|
|
return self.db.Delete(key, nil)
|
2014-02-25 12:21:03 +02:00
|
|
|
}
|
|
|
|
|
2014-11-03 01:31:15 +02:00
|
|
|
func (self *LDBDatabase) NewIterator() iterator.Iterator {
|
|
|
|
return self.db.NewIterator(nil, nil)
|
|
|
|
}
|
|
|
|
|
2015-05-21 12:43:05 +03:00
|
|
|
// Flush flushes out the queue to leveldb
|
2015-04-07 23:19:01 +03:00
|
|
|
func (self *LDBDatabase) Flush() error {
|
2015-05-27 19:03:16 +03:00
|
|
|
return nil
|
2014-12-23 16:18:48 +02:00
|
|
|
}
|
|
|
|
|
2014-11-03 01:31:15 +02:00
|
|
|
func (self *LDBDatabase) Close() {
|
2015-04-07 23:19:01 +03:00
|
|
|
if err := self.Flush(); err != nil {
|
|
|
|
glog.V(logger.Error).Infof("error: flush '%s': %v\n", self.fn, err)
|
2014-02-15 00:56:09 +02:00
|
|
|
}
|
2015-04-07 23:19:01 +03:00
|
|
|
|
|
|
|
self.db.Close()
|
2015-04-22 13:46:41 +03:00
|
|
|
glog.V(logger.Error).Infoln("flushed and closed db:", self.fn)
|
2014-02-15 00:56:09 +02:00
|
|
|
}
|