2015-02-16 15:28:33 +02:00
|
|
|
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
|
|
|
// All rights reserved.
|
|
|
|
//
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file.
|
|
|
|
|
|
|
|
package leveldb
|
|
|
|
|
|
|
|
import (
|
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
2015-04-28 12:18:01 +03:00
|
|
|
"github.com/syndtr/goleveldb/leveldb/errors"
|
|
|
|
"github.com/syndtr/goleveldb/leveldb/opt"
|
2016-02-11 16:16:52 +02:00
|
|
|
"github.com/syndtr/goleveldb/leveldb/storage"
|
2015-02-16 15:28:33 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
|
|
|
errCompactionTransactExiting = errors.New("leveldb: compaction transact exiting")
|
|
|
|
)
|
|
|
|
|
2016-02-11 16:16:52 +02:00
|
|
|
type cStat struct {
|
2015-02-16 15:28:33 +02:00
|
|
|
duration time.Duration
|
2016-02-11 16:16:52 +02:00
|
|
|
read int64
|
|
|
|
write int64
|
2015-02-16 15:28:33 +02:00
|
|
|
}
|
|
|
|
|
2016-02-11 16:16:52 +02:00
|
|
|
func (p *cStat) add(n *cStatStaging) {
|
2015-02-16 15:28:33 +02:00
|
|
|
p.duration += n.duration
|
|
|
|
p.read += n.read
|
|
|
|
p.write += n.write
|
|
|
|
}
|
|
|
|
|
2016-02-11 16:16:52 +02:00
|
|
|
func (p *cStat) get() (duration time.Duration, read, write int64) {
|
2015-02-16 15:28:33 +02:00
|
|
|
return p.duration, p.read, p.write
|
|
|
|
}
|
|
|
|
|
2016-02-11 16:16:52 +02:00
|
|
|
type cStatStaging struct {
|
2015-02-16 15:28:33 +02:00
|
|
|
start time.Time
|
|
|
|
duration time.Duration
|
|
|
|
on bool
|
2016-02-11 16:16:52 +02:00
|
|
|
read int64
|
|
|
|
write int64
|
2015-02-16 15:28:33 +02:00
|
|
|
}
|
|
|
|
|
2016-02-11 16:16:52 +02:00
|
|
|
func (p *cStatStaging) startTimer() {
|
2015-02-16 15:28:33 +02:00
|
|
|
if !p.on {
|
|
|
|
p.start = time.Now()
|
|
|
|
p.on = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-11 16:16:52 +02:00
|
|
|
func (p *cStatStaging) stopTimer() {
|
2015-02-16 15:28:33 +02:00
|
|
|
if p.on {
|
|
|
|
p.duration += time.Since(p.start)
|
|
|
|
p.on = false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-11 16:16:52 +02:00
|
|
|
type cStats struct {
|
|
|
|
lk sync.Mutex
|
|
|
|
stats []cStat
|
2015-02-16 15:28:33 +02:00
|
|
|
}
|
|
|
|
|
2016-02-11 16:16:52 +02:00
|
|
|
func (p *cStats) addStat(level int, n *cStatStaging) {
|
|
|
|
p.lk.Lock()
|
|
|
|
if level >= len(p.stats) {
|
|
|
|
newStats := make([]cStat, level+1)
|
|
|
|
copy(newStats, p.stats)
|
|
|
|
p.stats = newStats
|
2015-02-16 15:28:33 +02:00
|
|
|
}
|
2016-02-11 16:16:52 +02:00
|
|
|
p.stats[level].add(n)
|
|
|
|
p.lk.Unlock()
|
2015-02-16 15:28:33 +02:00
|
|
|
}
|
|
|
|
|
2016-02-11 16:16:52 +02:00
|
|
|
func (p *cStats) getStat(level int) (duration time.Duration, read, write int64) {
|
|
|
|
p.lk.Lock()
|
|
|
|
defer p.lk.Unlock()
|
|
|
|
if level < len(p.stats) {
|
|
|
|
return p.stats[level].get()
|
|
|
|
}
|
|
|
|
return
|
2015-02-16 15:28:33 +02:00
|
|
|
}
|
|
|
|
|
2015-04-28 12:18:01 +03:00
|
|
|
func (db *DB) compactionError() {
|
2016-02-11 16:16:52 +02:00
|
|
|
var err error
|
2015-02-16 15:28:33 +02:00
|
|
|
noerr:
|
2015-04-28 12:18:01 +03:00
|
|
|
// No error.
|
2015-02-16 15:28:33 +02:00
|
|
|
for {
|
|
|
|
select {
|
2015-04-28 12:18:01 +03:00
|
|
|
case err = <-db.compErrSetC:
|
|
|
|
switch {
|
|
|
|
case err == nil:
|
2016-02-11 16:16:52 +02:00
|
|
|
case err == ErrReadOnly, errors.IsCorrupted(err):
|
2015-04-28 12:18:01 +03:00
|
|
|
goto hasperr
|
|
|
|
default:
|
2015-02-16 15:28:33 +02:00
|
|
|
goto haserr
|
|
|
|
}
|
2016-10-20 15:56:34 +03:00
|
|
|
case <-db.closeC:
|
2015-04-28 12:18:01 +03:00
|
|
|
return
|
2015-02-16 15:28:33 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
haserr:
|
2015-04-28 12:18:01 +03:00
|
|
|
// Transient error.
|
2015-02-16 15:28:33 +02:00
|
|
|
for {
|
|
|
|
select {
|
2015-04-28 12:18:01 +03:00
|
|
|
case db.compErrC <- err:
|
|
|
|
case err = <-db.compErrSetC:
|
|
|
|
switch {
|
|
|
|
case err == nil:
|
2015-02-16 15:28:33 +02:00
|
|
|
goto noerr
|
2016-02-11 16:16:52 +02:00
|
|
|
case err == ErrReadOnly, errors.IsCorrupted(err):
|
2015-04-28 12:18:01 +03:00
|
|
|
goto hasperr
|
|
|
|
default:
|
2015-02-16 15:28:33 +02:00
|
|
|
}
|
2016-10-20 15:56:34 +03:00
|
|
|
case <-db.closeC:
|
2015-04-28 12:18:01 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
hasperr:
|
|
|
|
// Persistent error.
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case db.compErrC <- err:
|
|
|
|
case db.compPerErrC <- err:
|
|
|
|
case db.writeLockC <- struct{}{}:
|
|
|
|
// Hold write lock, so that write won't pass-through.
|
2016-02-11 16:16:52 +02:00
|
|
|
db.compWriteLocking = true
|
2016-10-20 15:56:34 +03:00
|
|
|
case <-db.closeC:
|
2016-02-11 16:16:52 +02:00
|
|
|
if db.compWriteLocking {
|
2015-04-28 12:18:01 +03:00
|
|
|
// We should release the lock or Close will hang.
|
|
|
|
<-db.writeLockC
|
|
|
|
}
|
|
|
|
return
|
2015-02-16 15:28:33 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
type compactionTransactCounter int
|
|
|
|
|
|
|
|
func (cnt *compactionTransactCounter) incr() {
|
|
|
|
*cnt++
|
|
|
|
}
|
|
|
|
|
2015-04-28 12:18:01 +03:00
|
|
|
type compactionTransactInterface interface {
|
|
|
|
run(cnt *compactionTransactCounter) error
|
|
|
|
revert() error
|
|
|
|
}
|
|
|
|
|
|
|
|
func (db *DB) compactionTransact(name string, t compactionTransactInterface) {
|
2015-02-16 15:28:33 +02:00
|
|
|
defer func() {
|
|
|
|
if x := recover(); x != nil {
|
2015-04-28 12:18:01 +03:00
|
|
|
if x == errCompactionTransactExiting {
|
|
|
|
if err := t.revert(); err != nil {
|
|
|
|
db.logf("%s revert error %q", name, err)
|
2015-02-16 15:28:33 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
panic(x)
|
|
|
|
}
|
|
|
|
}()
|
2015-04-28 12:18:01 +03:00
|
|
|
|
2015-02-16 15:28:33 +02:00
|
|
|
const (
|
|
|
|
backoffMin = 1 * time.Second
|
|
|
|
backoffMax = 8 * time.Second
|
|
|
|
backoffMul = 2 * time.Second
|
|
|
|
)
|
2015-04-28 12:18:01 +03:00
|
|
|
var (
|
|
|
|
backoff = backoffMin
|
|
|
|
backoffT = time.NewTimer(backoff)
|
|
|
|
lastCnt = compactionTransactCounter(0)
|
|
|
|
|
|
|
|
disableBackoff = db.s.o.GetDisableCompactionBackoff()
|
|
|
|
)
|
2015-02-16 15:28:33 +02:00
|
|
|
for n := 0; ; n++ {
|
2016-07-19 19:45:42 +03:00
|
|
|
// Check whether the DB is closed.
|
2015-04-28 12:18:01 +03:00
|
|
|
if db.isClosed() {
|
|
|
|
db.logf("%s exiting", name)
|
|
|
|
db.compactionExitTransact()
|
2015-02-16 15:28:33 +02:00
|
|
|
} else if n > 0 {
|
2015-04-28 12:18:01 +03:00
|
|
|
db.logf("%s retrying N·%d", name, n)
|
2015-02-16 15:28:33 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Execute.
|
|
|
|
cnt := compactionTransactCounter(0)
|
2015-04-28 12:18:01 +03:00
|
|
|
err := t.run(&cnt)
|
|
|
|
if err != nil {
|
|
|
|
db.logf("%s error I·%d %q", name, cnt, err)
|
|
|
|
}
|
2015-02-16 15:28:33 +02:00
|
|
|
|
|
|
|
// Set compaction error status.
|
|
|
|
select {
|
2015-04-28 12:18:01 +03:00
|
|
|
case db.compErrSetC <- err:
|
|
|
|
case perr := <-db.compPerErrC:
|
|
|
|
if err != nil {
|
|
|
|
db.logf("%s exiting (persistent error %q)", name, perr)
|
|
|
|
db.compactionExitTransact()
|
|
|
|
}
|
2016-10-20 15:56:34 +03:00
|
|
|
case <-db.closeC:
|
2015-04-28 12:18:01 +03:00
|
|
|
db.logf("%s exiting", name)
|
|
|
|
db.compactionExitTransact()
|
2015-02-16 15:28:33 +02:00
|
|
|
}
|
|
|
|
if err == nil {
|
|
|
|
return
|
|
|
|
}
|
2015-04-28 12:18:01 +03:00
|
|
|
if errors.IsCorrupted(err) {
|
|
|
|
db.logf("%s exiting (corruption detected)", name)
|
|
|
|
db.compactionExitTransact()
|
2015-02-16 15:28:33 +02:00
|
|
|
}
|
|
|
|
|
2015-04-28 12:18:01 +03:00
|
|
|
if !disableBackoff {
|
|
|
|
// Reset backoff duration if counter is advancing.
|
|
|
|
if cnt > lastCnt {
|
|
|
|
backoff = backoffMin
|
|
|
|
lastCnt = cnt
|
|
|
|
}
|
|
|
|
|
|
|
|
// Backoff.
|
|
|
|
backoffT.Reset(backoff)
|
|
|
|
if backoff < backoffMax {
|
|
|
|
backoff *= backoffMul
|
|
|
|
if backoff > backoffMax {
|
|
|
|
backoff = backoffMax
|
|
|
|
}
|
|
|
|
}
|
|
|
|
select {
|
|
|
|
case <-backoffT.C:
|
2016-10-20 15:56:34 +03:00
|
|
|
case <-db.closeC:
|
2015-04-28 12:18:01 +03:00
|
|
|
db.logf("%s exiting", name)
|
|
|
|
db.compactionExitTransact()
|
2015-02-16 15:28:33 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-28 12:18:01 +03:00
|
|
|
type compactionTransactFunc struct {
|
|
|
|
runFunc func(cnt *compactionTransactCounter) error
|
|
|
|
revertFunc func() error
|
|
|
|
}
|
|
|
|
|
|
|
|
func (t *compactionTransactFunc) run(cnt *compactionTransactCounter) error {
|
|
|
|
return t.runFunc(cnt)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (t *compactionTransactFunc) revert() error {
|
|
|
|
if t.revertFunc != nil {
|
|
|
|
return t.revertFunc()
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (db *DB) compactionTransactFunc(name string, run func(cnt *compactionTransactCounter) error, revert func() error) {
|
|
|
|
db.compactionTransact(name, &compactionTransactFunc{run, revert})
|
|
|
|
}
|
|
|
|
|
|
|
|
func (db *DB) compactionExitTransact() {
|
2015-02-16 15:28:33 +02:00
|
|
|
panic(errCompactionTransactExiting)
|
|
|
|
}
|
|
|
|
|
2016-02-11 16:16:52 +02:00
|
|
|
func (db *DB) compactionCommit(name string, rec *sessionRecord) {
|
|
|
|
db.compCommitLk.Lock()
|
|
|
|
defer db.compCommitLk.Unlock() // Defer is necessary.
|
|
|
|
db.compactionTransactFunc(name+"@commit", func(cnt *compactionTransactCounter) error {
|
|
|
|
return db.s.commit(rec)
|
|
|
|
}, nil)
|
|
|
|
}
|
|
|
|
|
2015-04-28 12:18:01 +03:00
|
|
|
func (db *DB) memCompaction() {
|
2016-02-11 16:16:52 +02:00
|
|
|
mdb := db.getFrozenMem()
|
|
|
|
if mdb == nil {
|
2015-02-16 15:28:33 +02:00
|
|
|
return
|
|
|
|
}
|
2016-02-11 16:16:52 +02:00
|
|
|
defer mdb.decref()
|
2015-02-16 15:28:33 +02:00
|
|
|
|
2016-02-11 16:16:52 +02:00
|
|
|
db.logf("memdb@flush N·%d S·%s", mdb.Len(), shortenb(mdb.Size()))
|
2015-02-16 15:28:33 +02:00
|
|
|
|
|
|
|
// Don't compact empty memdb.
|
2016-02-11 16:16:52 +02:00
|
|
|
if mdb.Len() == 0 {
|
|
|
|
db.logf("memdb@flush skipping")
|
|
|
|
// drop frozen memdb
|
2015-04-28 12:18:01 +03:00
|
|
|
db.dropFrozenMem()
|
2015-02-16 15:28:33 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Pause table compaction.
|
2015-04-28 12:18:01 +03:00
|
|
|
resumeC := make(chan struct{})
|
2015-02-16 15:28:33 +02:00
|
|
|
select {
|
2015-04-28 12:18:01 +03:00
|
|
|
case db.tcompPauseC <- (chan<- struct{})(resumeC):
|
|
|
|
case <-db.compPerErrC:
|
|
|
|
close(resumeC)
|
|
|
|
resumeC = nil
|
2016-10-20 15:56:34 +03:00
|
|
|
case <-db.closeC:
|
2017-09-05 16:04:32 +03:00
|
|
|
db.compactionExitTransact()
|
2015-02-16 15:28:33 +02:00
|
|
|
}
|
|
|
|
|
2016-02-11 16:16:52 +02:00
|
|
|
var (
|
|
|
|
rec = &sessionRecord{}
|
|
|
|
stats = &cStatStaging{}
|
|
|
|
flushLevel int
|
|
|
|
)
|
|
|
|
|
|
|
|
// Generate tables.
|
|
|
|
db.compactionTransactFunc("memdb@flush", func(cnt *compactionTransactCounter) (err error) {
|
2015-02-16 15:28:33 +02:00
|
|
|
stats.startTimer()
|
2016-02-11 16:16:52 +02:00
|
|
|
flushLevel, err = db.s.flushMemdb(rec, mdb.DB, db.memdbMaxLevel)
|
|
|
|
stats.stopTimer()
|
|
|
|
return
|
2015-02-16 15:28:33 +02:00
|
|
|
}, func() error {
|
2016-02-11 16:16:52 +02:00
|
|
|
for _, r := range rec.addedTables {
|
|
|
|
db.logf("memdb@flush revert @%d", r.num)
|
|
|
|
if err := db.s.stor.Remove(storage.FileDesc{Type: storage.TypeTable, Num: r.num}); err != nil {
|
2015-02-16 15:28:33 +02:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
2016-02-11 16:16:52 +02:00
|
|
|
rec.setJournalNum(db.journalFd.Num)
|
|
|
|
rec.setSeqNum(db.frozenSeq)
|
|
|
|
|
|
|
|
// Commit.
|
|
|
|
stats.startTimer()
|
|
|
|
db.compactionCommit("memdb", rec)
|
|
|
|
stats.stopTimer()
|
2015-02-16 15:28:33 +02:00
|
|
|
|
2016-02-11 16:16:52 +02:00
|
|
|
db.logf("memdb@flush committed F·%d T·%v", len(rec.addedTables), stats.duration)
|
2015-02-16 15:28:33 +02:00
|
|
|
|
2016-02-11 16:16:52 +02:00
|
|
|
for _, r := range rec.addedTables {
|
2015-02-16 15:28:33 +02:00
|
|
|
stats.write += r.size
|
|
|
|
}
|
2016-02-11 16:16:52 +02:00
|
|
|
db.compStats.addStat(flushLevel, stats)
|
2015-02-16 15:28:33 +02:00
|
|
|
|
2016-02-11 16:16:52 +02:00
|
|
|
// Drop frozen memdb.
|
2015-04-28 12:18:01 +03:00
|
|
|
db.dropFrozenMem()
|
2015-02-16 15:28:33 +02:00
|
|
|
|
|
|
|
// Resume table compaction.
|
2015-04-28 12:18:01 +03:00
|
|
|
if resumeC != nil {
|
|
|
|
select {
|
|
|
|
case <-resumeC:
|
|
|
|
close(resumeC)
|
2016-10-20 15:56:34 +03:00
|
|
|
case <-db.closeC:
|
2017-09-05 16:04:32 +03:00
|
|
|
db.compactionExitTransact()
|
2015-04-28 12:18:01 +03:00
|
|
|
}
|
2015-02-16 15:28:33 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Trigger table compaction.
|
2016-02-11 16:16:52 +02:00
|
|
|
db.compTrigger(db.tcompCmdC)
|
2015-02-16 15:28:33 +02:00
|
|
|
}
|
|
|
|
|
2015-04-28 12:18:01 +03:00
|
|
|
type tableCompactionBuilder struct {
|
|
|
|
db *DB
|
|
|
|
s *session
|
|
|
|
c *compaction
|
|
|
|
rec *sessionRecord
|
2016-02-11 16:16:52 +02:00
|
|
|
stat0, stat1 *cStatStaging
|
2015-02-16 15:28:33 +02:00
|
|
|
|
2015-04-28 12:18:01 +03:00
|
|
|
snapHasLastUkey bool
|
|
|
|
snapLastUkey []byte
|
|
|
|
snapLastSeq uint64
|
|
|
|
snapIter int
|
|
|
|
snapKerrCnt int
|
|
|
|
snapDropCnt int
|
2015-02-16 15:28:33 +02:00
|
|
|
|
2015-04-28 12:18:01 +03:00
|
|
|
kerrCnt int
|
|
|
|
dropCnt int
|
2015-02-16 15:28:33 +02:00
|
|
|
|
2015-04-28 12:18:01 +03:00
|
|
|
minSeq uint64
|
|
|
|
strict bool
|
|
|
|
tableSize int
|
|
|
|
|
|
|
|
tw *tWriter
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *tableCompactionBuilder) appendKV(key, value []byte) error {
|
|
|
|
// Create new table if not already.
|
|
|
|
if b.tw == nil {
|
|
|
|
// Check for pause event.
|
|
|
|
if b.db != nil {
|
|
|
|
select {
|
|
|
|
case ch := <-b.db.tcompPauseC:
|
|
|
|
b.db.pauseCompaction(ch)
|
2016-10-20 15:56:34 +03:00
|
|
|
case <-b.db.closeC:
|
2015-04-28 12:18:01 +03:00
|
|
|
b.db.compactionExitTransact()
|
|
|
|
default:
|
2015-02-16 15:28:33 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-28 12:18:01 +03:00
|
|
|
// Create new table.
|
|
|
|
var err error
|
|
|
|
b.tw, err = b.s.tops.create()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2015-02-16 15:28:33 +02:00
|
|
|
|
2015-04-28 12:18:01 +03:00
|
|
|
// Write key/value into table.
|
|
|
|
return b.tw.append(key, value)
|
|
|
|
}
|
2015-02-16 15:28:33 +02:00
|
|
|
|
2015-04-28 12:18:01 +03:00
|
|
|
func (b *tableCompactionBuilder) needFlush() bool {
|
|
|
|
return b.tw.tw.BytesLen() >= b.tableSize
|
|
|
|
}
|
2015-02-16 15:28:33 +02:00
|
|
|
|
2015-04-28 12:18:01 +03:00
|
|
|
func (b *tableCompactionBuilder) flush() error {
|
|
|
|
t, err := b.tw.finish()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-02-11 16:16:52 +02:00
|
|
|
b.rec.addTableFile(b.c.sourceLevel+1, t)
|
2015-04-28 12:18:01 +03:00
|
|
|
b.stat1.write += t.size
|
2016-02-11 16:16:52 +02:00
|
|
|
b.s.logf("table@build created L%d@%d N·%d S·%s %q:%q", b.c.sourceLevel+1, t.fd.Num, b.tw.tw.EntriesLen(), shortenb(int(t.size)), t.imin, t.imax)
|
2015-04-28 12:18:01 +03:00
|
|
|
b.tw = nil
|
|
|
|
return nil
|
|
|
|
}
|
2015-02-16 15:28:33 +02:00
|
|
|
|
2015-04-28 12:18:01 +03:00
|
|
|
func (b *tableCompactionBuilder) cleanup() {
|
|
|
|
if b.tw != nil {
|
|
|
|
b.tw.drop()
|
|
|
|
b.tw = nil
|
|
|
|
}
|
|
|
|
}
|
2015-02-16 15:28:33 +02:00
|
|
|
|
2015-04-28 12:18:01 +03:00
|
|
|
func (b *tableCompactionBuilder) run(cnt *compactionTransactCounter) error {
|
|
|
|
snapResumed := b.snapIter > 0
|
|
|
|
hasLastUkey := b.snapHasLastUkey // The key might has zero length, so this is necessary.
|
|
|
|
lastUkey := append([]byte{}, b.snapLastUkey...)
|
|
|
|
lastSeq := b.snapLastSeq
|
|
|
|
b.kerrCnt = b.snapKerrCnt
|
|
|
|
b.dropCnt = b.snapDropCnt
|
|
|
|
// Restore compaction state.
|
|
|
|
b.c.restore()
|
2015-02-16 15:28:33 +02:00
|
|
|
|
2015-04-28 12:18:01 +03:00
|
|
|
defer b.cleanup()
|
2015-02-16 15:28:33 +02:00
|
|
|
|
2015-04-28 12:18:01 +03:00
|
|
|
b.stat1.startTimer()
|
|
|
|
defer b.stat1.stopTimer()
|
2015-02-16 15:28:33 +02:00
|
|
|
|
2015-04-28 12:18:01 +03:00
|
|
|
iter := b.c.newIterator()
|
|
|
|
defer iter.Release()
|
|
|
|
for i := 0; iter.Next(); i++ {
|
|
|
|
// Incr transact counter.
|
|
|
|
cnt.incr()
|
|
|
|
|
|
|
|
// Skip until last state.
|
|
|
|
if i < b.snapIter {
|
|
|
|
continue
|
|
|
|
}
|
2015-02-16 15:28:33 +02:00
|
|
|
|
2015-04-28 12:18:01 +03:00
|
|
|
resumed := false
|
|
|
|
if snapResumed {
|
|
|
|
resumed = true
|
|
|
|
snapResumed = false
|
|
|
|
}
|
|
|
|
|
|
|
|
ikey := iter.Key()
|
2016-03-10 11:39:20 +02:00
|
|
|
ukey, seq, kt, kerr := parseInternalKey(ikey)
|
2015-04-28 12:18:01 +03:00
|
|
|
|
|
|
|
if kerr == nil {
|
|
|
|
shouldStop := !resumed && b.c.shouldStopBefore(ikey)
|
|
|
|
|
|
|
|
if !hasLastUkey || b.s.icmp.uCompare(lastUkey, ukey) != 0 {
|
|
|
|
// First occurrence of this user key.
|
|
|
|
|
|
|
|
// Only rotate tables if ukey doesn't hop across.
|
|
|
|
if b.tw != nil && (shouldStop || b.needFlush()) {
|
|
|
|
if err := b.flush(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Creates snapshot of the state.
|
|
|
|
b.c.save()
|
|
|
|
b.snapHasLastUkey = hasLastUkey
|
|
|
|
b.snapLastUkey = append(b.snapLastUkey[:0], lastUkey...)
|
|
|
|
b.snapLastSeq = lastSeq
|
|
|
|
b.snapIter = i
|
|
|
|
b.snapKerrCnt = b.kerrCnt
|
|
|
|
b.snapDropCnt = b.dropCnt
|
2015-02-16 15:28:33 +02:00
|
|
|
}
|
|
|
|
|
2015-04-28 12:18:01 +03:00
|
|
|
hasLastUkey = true
|
|
|
|
lastUkey = append(lastUkey[:0], ukey...)
|
2016-03-10 11:39:20 +02:00
|
|
|
lastSeq = keyMaxSeq
|
2015-02-16 15:28:33 +02:00
|
|
|
}
|
|
|
|
|
2015-04-28 12:18:01 +03:00
|
|
|
switch {
|
|
|
|
case lastSeq <= b.minSeq:
|
|
|
|
// Dropped because newer entry for same user key exist
|
|
|
|
fallthrough // (A)
|
2016-03-10 11:39:20 +02:00
|
|
|
case kt == keyTypeDel && seq <= b.minSeq && b.c.baseLevelForKey(lastUkey):
|
2015-04-28 12:18:01 +03:00
|
|
|
// For this user key:
|
|
|
|
// (1) there is no data in higher levels
|
|
|
|
// (2) data in lower levels will have larger seq numbers
|
|
|
|
// (3) data in layers that are being compacted here and have
|
|
|
|
// smaller seq numbers will be dropped in the next
|
|
|
|
// few iterations of this loop (by rule (A) above).
|
|
|
|
// Therefore this deletion marker is obsolete and can be dropped.
|
|
|
|
lastSeq = seq
|
|
|
|
b.dropCnt++
|
|
|
|
continue
|
|
|
|
default:
|
|
|
|
lastSeq = seq
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if b.strict {
|
|
|
|
return kerr
|
2015-02-16 15:28:33 +02:00
|
|
|
}
|
2015-04-28 12:18:01 +03:00
|
|
|
|
|
|
|
// Don't drop corrupted keys.
|
|
|
|
hasLastUkey = false
|
|
|
|
lastUkey = lastUkey[:0]
|
2016-03-10 11:39:20 +02:00
|
|
|
lastSeq = keyMaxSeq
|
2015-04-28 12:18:01 +03:00
|
|
|
b.kerrCnt++
|
2015-02-16 15:28:33 +02:00
|
|
|
}
|
|
|
|
|
2015-04-28 12:18:01 +03:00
|
|
|
if err := b.appendKV(ikey, iter.Value()); err != nil {
|
|
|
|
return err
|
2015-02-16 15:28:33 +02:00
|
|
|
}
|
2015-04-28 12:18:01 +03:00
|
|
|
}
|
2015-02-16 15:28:33 +02:00
|
|
|
|
2015-04-28 12:18:01 +03:00
|
|
|
if err := iter.Error(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Finish last table.
|
|
|
|
if b.tw != nil && !b.tw.empty() {
|
|
|
|
return b.flush()
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *tableCompactionBuilder) revert() error {
|
|
|
|
for _, at := range b.rec.addedTables {
|
|
|
|
b.s.logf("table@build revert @%d", at.num)
|
2016-02-11 16:16:52 +02:00
|
|
|
if err := b.s.stor.Remove(storage.FileDesc{Type: storage.TypeTable, Num: at.num}); err != nil {
|
2015-04-28 12:18:01 +03:00
|
|
|
return err
|
2015-02-16 15:28:33 +02:00
|
|
|
}
|
2015-04-28 12:18:01 +03:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (db *DB) tableCompaction(c *compaction, noTrivial bool) {
|
|
|
|
defer c.release()
|
|
|
|
|
2016-02-11 16:16:52 +02:00
|
|
|
rec := &sessionRecord{}
|
|
|
|
rec.addCompPtr(c.sourceLevel, c.imax)
|
2015-04-28 12:18:01 +03:00
|
|
|
|
|
|
|
if !noTrivial && c.trivial() {
|
2016-02-11 16:16:52 +02:00
|
|
|
t := c.levels[0][0]
|
|
|
|
db.logf("table@move L%d@%d -> L%d", c.sourceLevel, t.fd.Num, c.sourceLevel+1)
|
|
|
|
rec.delTable(c.sourceLevel, t.fd.Num)
|
|
|
|
rec.addTableFile(c.sourceLevel+1, t)
|
2016-03-10 11:39:20 +02:00
|
|
|
db.compactionCommit("table-move", rec)
|
2015-02-16 15:28:33 +02:00
|
|
|
return
|
2015-04-28 12:18:01 +03:00
|
|
|
}
|
|
|
|
|
2016-02-11 16:16:52 +02:00
|
|
|
var stats [2]cStatStaging
|
|
|
|
for i, tables := range c.levels {
|
2015-04-28 12:18:01 +03:00
|
|
|
for _, t := range tables {
|
|
|
|
stats[i].read += t.size
|
|
|
|
// Insert deleted tables into record
|
2016-02-11 16:16:52 +02:00
|
|
|
rec.delTable(c.sourceLevel+i, t.fd.Num)
|
2015-02-16 15:28:33 +02:00
|
|
|
}
|
2015-04-28 12:18:01 +03:00
|
|
|
}
|
|
|
|
sourceSize := int(stats[0].read + stats[1].read)
|
|
|
|
minSeq := db.minSeq()
|
2016-02-11 16:16:52 +02:00
|
|
|
db.logf("table@compaction L%d·%d -> L%d·%d S·%s Q·%d", c.sourceLevel, len(c.levels[0]), c.sourceLevel+1, len(c.levels[1]), shortenb(sourceSize), minSeq)
|
2015-04-28 12:18:01 +03:00
|
|
|
|
|
|
|
b := &tableCompactionBuilder{
|
|
|
|
db: db,
|
|
|
|
s: db.s,
|
|
|
|
c: c,
|
|
|
|
rec: rec,
|
|
|
|
stat1: &stats[1],
|
|
|
|
minSeq: minSeq,
|
|
|
|
strict: db.s.o.GetStrict(opt.StrictCompaction),
|
2016-02-11 16:16:52 +02:00
|
|
|
tableSize: db.s.o.GetCompactionTableSize(c.sourceLevel + 1),
|
2015-04-28 12:18:01 +03:00
|
|
|
}
|
|
|
|
db.compactionTransact("table@build", b)
|
2015-02-16 15:28:33 +02:00
|
|
|
|
2016-02-11 16:16:52 +02:00
|
|
|
// Commit.
|
|
|
|
stats[1].startTimer()
|
|
|
|
db.compactionCommit("table", rec)
|
|
|
|
stats[1].stopTimer()
|
2015-02-16 15:28:33 +02:00
|
|
|
|
2015-04-28 12:18:01 +03:00
|
|
|
resultSize := int(stats[1].write)
|
|
|
|
db.logf("table@compaction committed F%s S%s Ke·%d D·%d T·%v", sint(len(rec.addedTables)-len(rec.deletedTables)), sshortenb(resultSize-sourceSize), b.kerrCnt, b.dropCnt, stats[1].duration)
|
2015-02-16 15:28:33 +02:00
|
|
|
|
|
|
|
// Save compaction stats
|
|
|
|
for i := range stats {
|
2016-02-11 16:16:52 +02:00
|
|
|
db.compStats.addStat(c.sourceLevel+1, &stats[i])
|
2015-02-16 15:28:33 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-11 16:16:52 +02:00
|
|
|
func (db *DB) tableRangeCompaction(level int, umin, umax []byte) error {
|
2015-04-28 12:18:01 +03:00
|
|
|
db.logf("table@compaction range L%d %q:%q", level, umin, umax)
|
2015-02-16 15:28:33 +02:00
|
|
|
if level >= 0 {
|
2016-02-11 16:16:52 +02:00
|
|
|
if c := db.s.getCompactionRange(level, umin, umax, true); c != nil {
|
2015-04-28 12:18:01 +03:00
|
|
|
db.tableCompaction(c, true)
|
2015-02-16 15:28:33 +02:00
|
|
|
}
|
|
|
|
} else {
|
2016-02-11 16:16:52 +02:00
|
|
|
// Retry until nothing to compact.
|
|
|
|
for {
|
|
|
|
compacted := false
|
|
|
|
|
|
|
|
// Scan for maximum level with overlapped tables.
|
|
|
|
v := db.s.version()
|
|
|
|
m := 1
|
|
|
|
for i := m; i < len(v.levels); i++ {
|
|
|
|
tables := v.levels[i]
|
|
|
|
if tables.overlaps(db.s.icmp, umin, umax, false) {
|
|
|
|
m = i
|
|
|
|
}
|
2015-02-16 15:28:33 +02:00
|
|
|
}
|
2016-02-11 16:16:52 +02:00
|
|
|
v.release()
|
2015-04-28 12:18:01 +03:00
|
|
|
|
2016-02-11 16:16:52 +02:00
|
|
|
for level := 0; level < m; level++ {
|
|
|
|
if c := db.s.getCompactionRange(level, umin, umax, false); c != nil {
|
|
|
|
db.tableCompaction(c, true)
|
|
|
|
compacted = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if !compacted {
|
|
|
|
break
|
2015-02-16 15:28:33 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-02-11 16:16:52 +02:00
|
|
|
|
|
|
|
return nil
|
2015-02-16 15:28:33 +02:00
|
|
|
}
|
|
|
|
|
2015-04-28 12:18:01 +03:00
|
|
|
func (db *DB) tableAutoCompaction() {
|
|
|
|
if c := db.s.pickCompaction(); c != nil {
|
|
|
|
db.tableCompaction(c, false)
|
2015-02-16 15:28:33 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-28 12:18:01 +03:00
|
|
|
func (db *DB) tableNeedCompaction() bool {
|
|
|
|
v := db.s.version()
|
|
|
|
defer v.release()
|
|
|
|
return v.needCompaction()
|
2015-02-16 15:28:33 +02:00
|
|
|
}
|
|
|
|
|
2015-04-28 12:18:01 +03:00
|
|
|
func (db *DB) pauseCompaction(ch chan<- struct{}) {
|
2015-02-16 15:28:33 +02:00
|
|
|
select {
|
|
|
|
case ch <- struct{}{}:
|
2016-10-20 15:56:34 +03:00
|
|
|
case <-db.closeC:
|
2015-04-28 12:18:01 +03:00
|
|
|
db.compactionExitTransact()
|
2015-02-16 15:28:33 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
type cCmd interface {
|
|
|
|
ack(err error)
|
|
|
|
}
|
|
|
|
|
2016-02-11 16:16:52 +02:00
|
|
|
type cAuto struct {
|
2015-02-16 15:28:33 +02:00
|
|
|
ackC chan<- error
|
|
|
|
}
|
|
|
|
|
2016-02-11 16:16:52 +02:00
|
|
|
func (r cAuto) ack(err error) {
|
2015-04-28 12:18:01 +03:00
|
|
|
if r.ackC != nil {
|
|
|
|
defer func() {
|
|
|
|
recover()
|
|
|
|
}()
|
|
|
|
r.ackC <- err
|
|
|
|
}
|
2015-02-16 15:28:33 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
type cRange struct {
|
|
|
|
level int
|
|
|
|
min, max []byte
|
|
|
|
ackC chan<- error
|
|
|
|
}
|
|
|
|
|
|
|
|
func (r cRange) ack(err error) {
|
|
|
|
if r.ackC != nil {
|
2015-04-28 12:18:01 +03:00
|
|
|
defer func() {
|
|
|
|
recover()
|
|
|
|
}()
|
2015-02-16 15:28:33 +02:00
|
|
|
r.ackC <- err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-11 16:16:52 +02:00
|
|
|
// This will trigger auto compaction but will not wait for it.
|
|
|
|
func (db *DB) compTrigger(compC chan<- cCmd) {
|
|
|
|
select {
|
|
|
|
case compC <- cAuto{}:
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-19 19:45:42 +03:00
|
|
|
// This will trigger auto compaction and/or wait for all compaction to be done.
|
2016-02-11 16:16:52 +02:00
|
|
|
func (db *DB) compTriggerWait(compC chan<- cCmd) (err error) {
|
2015-02-16 15:28:33 +02:00
|
|
|
ch := make(chan error)
|
|
|
|
defer close(ch)
|
|
|
|
// Send cmd.
|
|
|
|
select {
|
2016-02-11 16:16:52 +02:00
|
|
|
case compC <- cAuto{ch}:
|
2015-04-28 12:18:01 +03:00
|
|
|
case err = <-db.compErrC:
|
|
|
|
return
|
2016-10-20 15:56:34 +03:00
|
|
|
case <-db.closeC:
|
2015-02-16 15:28:33 +02:00
|
|
|
return ErrClosed
|
|
|
|
}
|
|
|
|
// Wait cmd.
|
2015-04-28 12:18:01 +03:00
|
|
|
select {
|
|
|
|
case err = <-ch:
|
|
|
|
case err = <-db.compErrC:
|
2016-10-20 15:56:34 +03:00
|
|
|
case <-db.closeC:
|
2015-04-28 12:18:01 +03:00
|
|
|
return ErrClosed
|
|
|
|
}
|
|
|
|
return err
|
2015-02-16 15:28:33 +02:00
|
|
|
}
|
|
|
|
|
2015-04-28 12:18:01 +03:00
|
|
|
// Send range compaction request.
|
2016-02-11 16:16:52 +02:00
|
|
|
func (db *DB) compTriggerRange(compC chan<- cCmd, level int, min, max []byte) (err error) {
|
2015-02-16 15:28:33 +02:00
|
|
|
ch := make(chan error)
|
|
|
|
defer close(ch)
|
|
|
|
// Send cmd.
|
|
|
|
select {
|
|
|
|
case compC <- cRange{level, min, max, ch}:
|
2015-04-28 12:18:01 +03:00
|
|
|
case err := <-db.compErrC:
|
2015-02-16 15:28:33 +02:00
|
|
|
return err
|
2016-10-20 15:56:34 +03:00
|
|
|
case <-db.closeC:
|
2015-02-16 15:28:33 +02:00
|
|
|
return ErrClosed
|
|
|
|
}
|
|
|
|
// Wait cmd.
|
|
|
|
select {
|
|
|
|
case err = <-ch:
|
2015-04-28 12:18:01 +03:00
|
|
|
case err = <-db.compErrC:
|
2016-10-20 15:56:34 +03:00
|
|
|
case <-db.closeC:
|
2015-04-28 12:18:01 +03:00
|
|
|
return ErrClosed
|
2015-02-16 15:28:33 +02:00
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2015-04-28 12:18:01 +03:00
|
|
|
func (db *DB) mCompaction() {
|
2015-02-16 15:28:33 +02:00
|
|
|
var x cCmd
|
|
|
|
|
|
|
|
defer func() {
|
|
|
|
if x := recover(); x != nil {
|
|
|
|
if x != errCompactionTransactExiting {
|
|
|
|
panic(x)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if x != nil {
|
|
|
|
x.ack(ErrClosed)
|
|
|
|
}
|
2015-04-28 12:18:01 +03:00
|
|
|
db.closeW.Done()
|
2015-02-16 15:28:33 +02:00
|
|
|
}()
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
2015-04-28 12:18:01 +03:00
|
|
|
case x = <-db.mcompCmdC:
|
|
|
|
switch x.(type) {
|
2016-02-11 16:16:52 +02:00
|
|
|
case cAuto:
|
2015-04-28 12:18:01 +03:00
|
|
|
db.memCompaction()
|
|
|
|
x.ack(nil)
|
|
|
|
x = nil
|
|
|
|
default:
|
|
|
|
panic("leveldb: unknown command")
|
|
|
|
}
|
2016-10-20 15:56:34 +03:00
|
|
|
case <-db.closeC:
|
2015-02-16 15:28:33 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-28 12:18:01 +03:00
|
|
|
func (db *DB) tCompaction() {
|
2015-02-16 15:28:33 +02:00
|
|
|
var x cCmd
|
|
|
|
var ackQ []cCmd
|
|
|
|
|
|
|
|
defer func() {
|
|
|
|
if x := recover(); x != nil {
|
|
|
|
if x != errCompactionTransactExiting {
|
|
|
|
panic(x)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for i := range ackQ {
|
|
|
|
ackQ[i].ack(ErrClosed)
|
|
|
|
ackQ[i] = nil
|
|
|
|
}
|
|
|
|
if x != nil {
|
|
|
|
x.ack(ErrClosed)
|
|
|
|
}
|
2015-04-28 12:18:01 +03:00
|
|
|
db.closeW.Done()
|
2015-02-16 15:28:33 +02:00
|
|
|
}()
|
|
|
|
|
|
|
|
for {
|
2015-04-28 12:18:01 +03:00
|
|
|
if db.tableNeedCompaction() {
|
2015-02-16 15:28:33 +02:00
|
|
|
select {
|
2015-04-28 12:18:01 +03:00
|
|
|
case x = <-db.tcompCmdC:
|
|
|
|
case ch := <-db.tcompPauseC:
|
|
|
|
db.pauseCompaction(ch)
|
2015-02-16 15:28:33 +02:00
|
|
|
continue
|
2016-10-20 15:56:34 +03:00
|
|
|
case <-db.closeC:
|
2015-04-28 12:18:01 +03:00
|
|
|
return
|
2015-02-16 15:28:33 +02:00
|
|
|
default:
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
for i := range ackQ {
|
|
|
|
ackQ[i].ack(nil)
|
|
|
|
ackQ[i] = nil
|
|
|
|
}
|
|
|
|
ackQ = ackQ[:0]
|
|
|
|
select {
|
2015-04-28 12:18:01 +03:00
|
|
|
case x = <-db.tcompCmdC:
|
|
|
|
case ch := <-db.tcompPauseC:
|
|
|
|
db.pauseCompaction(ch)
|
2015-02-16 15:28:33 +02:00
|
|
|
continue
|
2016-10-20 15:56:34 +03:00
|
|
|
case <-db.closeC:
|
2015-02-16 15:28:33 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if x != nil {
|
|
|
|
switch cmd := x.(type) {
|
2016-02-11 16:16:52 +02:00
|
|
|
case cAuto:
|
2015-02-16 15:28:33 +02:00
|
|
|
ackQ = append(ackQ, x)
|
|
|
|
case cRange:
|
2016-02-11 16:16:52 +02:00
|
|
|
x.ack(db.tableRangeCompaction(cmd.level, cmd.min, cmd.max))
|
2015-04-28 12:18:01 +03:00
|
|
|
default:
|
|
|
|
panic("leveldb: unknown command")
|
2015-02-16 15:28:33 +02:00
|
|
|
}
|
|
|
|
x = nil
|
|
|
|
}
|
2015-04-28 12:18:01 +03:00
|
|
|
db.tableAutoCompaction()
|
2015-02-16 15:28:33 +02:00
|
|
|
}
|
|
|
|
}
|