2015-02-16 15:28:33 +02:00
|
|
|
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
|
|
|
// All rights reserved.
|
|
|
|
//
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file.
|
|
|
|
|
|
|
|
package leveldb
|
|
|
|
|
|
|
|
import (
|
2015-04-28 12:18:01 +03:00
|
|
|
"fmt"
|
2015-02-16 15:28:33 +02:00
|
|
|
"sort"
|
|
|
|
"sync/atomic"
|
|
|
|
|
|
|
|
"github.com/syndtr/goleveldb/leveldb/cache"
|
|
|
|
"github.com/syndtr/goleveldb/leveldb/iterator"
|
|
|
|
"github.com/syndtr/goleveldb/leveldb/opt"
|
|
|
|
"github.com/syndtr/goleveldb/leveldb/storage"
|
|
|
|
"github.com/syndtr/goleveldb/leveldb/table"
|
|
|
|
"github.com/syndtr/goleveldb/leveldb/util"
|
|
|
|
)
|
|
|
|
|
2015-04-28 12:18:01 +03:00
|
|
|
// tFile holds basic information about a table.
|
2015-02-16 15:28:33 +02:00
|
|
|
type tFile struct {
|
2016-02-11 16:16:52 +02:00
|
|
|
fd storage.FileDesc
|
2015-04-28 12:18:01 +03:00
|
|
|
seekLeft int32
|
2016-02-11 16:16:52 +02:00
|
|
|
size int64
|
2016-03-10 11:39:20 +02:00
|
|
|
imin, imax internalKey
|
2015-02-16 15:28:33 +02:00
|
|
|
}
|
|
|
|
|
2015-04-28 12:18:01 +03:00
|
|
|
// Returns true if given key is after largest key of this table.
|
|
|
|
func (t *tFile) after(icmp *iComparer, ukey []byte) bool {
|
|
|
|
return ukey != nil && icmp.uCompare(ukey, t.imax.ukey()) > 0
|
2015-02-16 15:28:33 +02:00
|
|
|
}
|
|
|
|
|
2015-04-28 12:18:01 +03:00
|
|
|
// Returns true if given key is before smallest key of this table.
|
|
|
|
func (t *tFile) before(icmp *iComparer, ukey []byte) bool {
|
|
|
|
return ukey != nil && icmp.uCompare(ukey, t.imin.ukey()) < 0
|
2015-02-16 15:28:33 +02:00
|
|
|
}
|
|
|
|
|
2015-04-28 12:18:01 +03:00
|
|
|
// Returns true if given key range overlaps with this table key range.
|
|
|
|
func (t *tFile) overlaps(icmp *iComparer, umin, umax []byte) bool {
|
|
|
|
return !t.after(icmp, umin) && !t.before(icmp, umax)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Cosumes one seek and return current seeks left.
|
|
|
|
func (t *tFile) consumeSeek() int32 {
|
2015-02-16 15:28:33 +02:00
|
|
|
return atomic.AddInt32(&t.seekLeft, -1)
|
|
|
|
}
|
|
|
|
|
2015-04-28 12:18:01 +03:00
|
|
|
// Creates new tFile.
|
2016-03-10 11:39:20 +02:00
|
|
|
func newTableFile(fd storage.FileDesc, size int64, imin, imax internalKey) *tFile {
|
2015-02-16 15:28:33 +02:00
|
|
|
f := &tFile{
|
2016-02-11 16:16:52 +02:00
|
|
|
fd: fd,
|
2015-02-16 15:28:33 +02:00
|
|
|
size: size,
|
2015-04-28 12:18:01 +03:00
|
|
|
imin: imin,
|
|
|
|
imax: imax,
|
2015-02-16 15:28:33 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// We arrange to automatically compact this file after
|
|
|
|
// a certain number of seeks. Let's assume:
|
|
|
|
// (1) One seek costs 10ms
|
|
|
|
// (2) Writing or reading 1MB costs 10ms (100MB/s)
|
|
|
|
// (3) A compaction of 1MB does 25MB of IO:
|
|
|
|
// 1MB read from this level
|
|
|
|
// 10-12MB read from next level (boundaries may be misaligned)
|
|
|
|
// 10-12MB written to next level
|
|
|
|
// This implies that 25 seeks cost the same as the compaction
|
|
|
|
// of 1MB of data. I.e., one seek costs approximately the
|
|
|
|
// same as the compaction of 40KB of data. We are a little
|
|
|
|
// conservative and allow approximately one seek for every 16KB
|
|
|
|
// of data before triggering a compaction.
|
|
|
|
f.seekLeft = int32(size / 16384)
|
|
|
|
if f.seekLeft < 100 {
|
|
|
|
f.seekLeft = 100
|
|
|
|
}
|
|
|
|
|
|
|
|
return f
|
|
|
|
}
|
|
|
|
|
2016-02-11 16:16:52 +02:00
|
|
|
func tableFileFromRecord(r atRecord) *tFile {
|
|
|
|
return newTableFile(storage.FileDesc{storage.TypeTable, r.num}, r.size, r.imin, r.imax)
|
|
|
|
}
|
|
|
|
|
2015-04-28 12:18:01 +03:00
|
|
|
// tFiles hold multiple tFile.
|
2015-02-16 15:28:33 +02:00
|
|
|
type tFiles []*tFile
|
|
|
|
|
|
|
|
func (tf tFiles) Len() int { return len(tf) }
|
|
|
|
func (tf tFiles) Swap(i, j int) { tf[i], tf[j] = tf[j], tf[i] }
|
|
|
|
|
2015-04-28 12:18:01 +03:00
|
|
|
func (tf tFiles) nums() string {
|
|
|
|
x := "[ "
|
|
|
|
for i, f := range tf {
|
|
|
|
if i != 0 {
|
|
|
|
x += ", "
|
|
|
|
}
|
2016-02-11 16:16:52 +02:00
|
|
|
x += fmt.Sprint(f.fd.Num)
|
2015-04-28 12:18:01 +03:00
|
|
|
}
|
|
|
|
x += " ]"
|
|
|
|
return x
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returns true if i smallest key is less than j.
|
|
|
|
// This used for sort by key in ascending order.
|
2015-02-16 15:28:33 +02:00
|
|
|
func (tf tFiles) lessByKey(icmp *iComparer, i, j int) bool {
|
|
|
|
a, b := tf[i], tf[j]
|
2015-04-28 12:18:01 +03:00
|
|
|
n := icmp.Compare(a.imin, b.imin)
|
2015-02-16 15:28:33 +02:00
|
|
|
if n == 0 {
|
2016-02-11 16:16:52 +02:00
|
|
|
return a.fd.Num < b.fd.Num
|
2015-02-16 15:28:33 +02:00
|
|
|
}
|
|
|
|
return n < 0
|
|
|
|
}
|
|
|
|
|
2015-04-28 12:18:01 +03:00
|
|
|
// Returns true if i file number is greater than j.
|
|
|
|
// This used for sort by file number in descending order.
|
2015-02-16 15:28:33 +02:00
|
|
|
func (tf tFiles) lessByNum(i, j int) bool {
|
2016-02-11 16:16:52 +02:00
|
|
|
return tf[i].fd.Num > tf[j].fd.Num
|
2015-02-16 15:28:33 +02:00
|
|
|
}
|
|
|
|
|
2015-04-28 12:18:01 +03:00
|
|
|
// Sorts tables by key in ascending order.
|
2015-02-16 15:28:33 +02:00
|
|
|
func (tf tFiles) sortByKey(icmp *iComparer) {
|
|
|
|
sort.Sort(&tFilesSortByKey{tFiles: tf, icmp: icmp})
|
|
|
|
}
|
|
|
|
|
2015-04-28 12:18:01 +03:00
|
|
|
// Sorts tables by file number in descending order.
|
2015-02-16 15:28:33 +02:00
|
|
|
func (tf tFiles) sortByNum() {
|
|
|
|
sort.Sort(&tFilesSortByNum{tFiles: tf})
|
|
|
|
}
|
|
|
|
|
2015-04-28 12:18:01 +03:00
|
|
|
// Returns sum of all tables size.
|
2016-02-11 16:16:52 +02:00
|
|
|
func (tf tFiles) size() (sum int64) {
|
2015-02-16 15:28:33 +02:00
|
|
|
for _, t := range tf {
|
|
|
|
sum += t.size
|
|
|
|
}
|
|
|
|
return sum
|
|
|
|
}
|
|
|
|
|
2015-04-28 12:18:01 +03:00
|
|
|
// Searches smallest index of tables whose its smallest
|
|
|
|
// key is after or equal with given key.
|
2016-03-10 11:39:20 +02:00
|
|
|
func (tf tFiles) searchMin(icmp *iComparer, ikey internalKey) int {
|
2015-02-16 15:28:33 +02:00
|
|
|
return sort.Search(len(tf), func(i int) bool {
|
2015-04-28 12:18:01 +03:00
|
|
|
return icmp.Compare(tf[i].imin, ikey) >= 0
|
2015-02-16 15:28:33 +02:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2015-04-28 12:18:01 +03:00
|
|
|
// Searches smallest index of tables whose its largest
|
|
|
|
// key is after or equal with given key.
|
2016-03-10 11:39:20 +02:00
|
|
|
func (tf tFiles) searchMax(icmp *iComparer, ikey internalKey) int {
|
2015-02-16 15:28:33 +02:00
|
|
|
return sort.Search(len(tf), func(i int) bool {
|
2015-04-28 12:18:01 +03:00
|
|
|
return icmp.Compare(tf[i].imax, ikey) >= 0
|
2015-02-16 15:28:33 +02:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2015-04-28 12:18:01 +03:00
|
|
|
// Returns true if given key range overlaps with one or more
|
|
|
|
// tables key range. If unsorted is true then binary search will not be used.
|
|
|
|
func (tf tFiles) overlaps(icmp *iComparer, umin, umax []byte, unsorted bool) bool {
|
|
|
|
if unsorted {
|
|
|
|
// Check against all files.
|
2015-02-16 15:28:33 +02:00
|
|
|
for _, t := range tf {
|
2015-04-28 12:18:01 +03:00
|
|
|
if t.overlaps(icmp, umin, umax) {
|
2015-02-16 15:28:33 +02:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2015-04-28 12:18:01 +03:00
|
|
|
i := 0
|
|
|
|
if len(umin) > 0 {
|
|
|
|
// Find the earliest possible internal key for min.
|
2016-03-10 11:39:20 +02:00
|
|
|
i = tf.searchMax(icmp, makeInternalKey(nil, umin, keyMaxSeq, keyTypeSeek))
|
2015-02-16 15:28:33 +02:00
|
|
|
}
|
2015-04-28 12:18:01 +03:00
|
|
|
if i >= len(tf) {
|
|
|
|
// Beginning of range is after all files, so no overlap.
|
2015-02-16 15:28:33 +02:00
|
|
|
return false
|
|
|
|
}
|
2015-04-28 12:18:01 +03:00
|
|
|
return !tf[i].before(icmp, umax)
|
2015-02-16 15:28:33 +02:00
|
|
|
}
|
|
|
|
|
2015-04-28 12:18:01 +03:00
|
|
|
// Returns tables whose its key range overlaps with given key range.
|
|
|
|
// Range will be expanded if ukey found hop across tables.
|
|
|
|
// If overlapped is true then the search will be restarted if umax
|
|
|
|
// expanded.
|
|
|
|
// The dst content will be overwritten.
|
|
|
|
func (tf tFiles) getOverlaps(dst tFiles, icmp *iComparer, umin, umax []byte, overlapped bool) tFiles {
|
|
|
|
dst = dst[:0]
|
2015-02-16 15:28:33 +02:00
|
|
|
for i := 0; i < len(tf); {
|
|
|
|
t := tf[i]
|
2015-04-28 12:18:01 +03:00
|
|
|
if t.overlaps(icmp, umin, umax) {
|
|
|
|
if umin != nil && icmp.uCompare(t.imin.ukey(), umin) < 0 {
|
|
|
|
umin = t.imin.ukey()
|
|
|
|
dst = dst[:0]
|
2015-02-16 15:28:33 +02:00
|
|
|
i = 0
|
2015-04-28 12:18:01 +03:00
|
|
|
continue
|
|
|
|
} else if umax != nil && icmp.uCompare(t.imax.ukey(), umax) > 0 {
|
|
|
|
umax = t.imax.ukey()
|
|
|
|
// Restart search if it is overlapped.
|
|
|
|
if overlapped {
|
|
|
|
dst = dst[:0]
|
|
|
|
i = 0
|
|
|
|
continue
|
|
|
|
}
|
2015-02-16 15:28:33 +02:00
|
|
|
}
|
2015-04-28 12:18:01 +03:00
|
|
|
|
|
|
|
dst = append(dst, t)
|
2015-02-16 15:28:33 +02:00
|
|
|
}
|
2015-04-28 12:18:01 +03:00
|
|
|
i++
|
2015-02-16 15:28:33 +02:00
|
|
|
}
|
|
|
|
|
2015-04-28 12:18:01 +03:00
|
|
|
return dst
|
2015-02-16 15:28:33 +02:00
|
|
|
}
|
|
|
|
|
2015-04-28 12:18:01 +03:00
|
|
|
// Returns tables key range.
|
2016-03-10 11:39:20 +02:00
|
|
|
func (tf tFiles) getRange(icmp *iComparer) (imin, imax internalKey) {
|
2015-02-16 15:28:33 +02:00
|
|
|
for i, t := range tf {
|
|
|
|
if i == 0 {
|
2015-04-28 12:18:01 +03:00
|
|
|
imin, imax = t.imin, t.imax
|
2015-02-16 15:28:33 +02:00
|
|
|
continue
|
|
|
|
}
|
2015-04-28 12:18:01 +03:00
|
|
|
if icmp.Compare(t.imin, imin) < 0 {
|
|
|
|
imin = t.imin
|
2015-02-16 15:28:33 +02:00
|
|
|
}
|
2015-04-28 12:18:01 +03:00
|
|
|
if icmp.Compare(t.imax, imax) > 0 {
|
|
|
|
imax = t.imax
|
2015-02-16 15:28:33 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-04-28 12:18:01 +03:00
|
|
|
// Creates iterator index from tables.
|
2015-02-16 15:28:33 +02:00
|
|
|
func (tf tFiles) newIndexIterator(tops *tOps, icmp *iComparer, slice *util.Range, ro *opt.ReadOptions) iterator.IteratorIndexer {
|
|
|
|
if slice != nil {
|
|
|
|
var start, limit int
|
|
|
|
if slice.Start != nil {
|
2016-03-10 11:39:20 +02:00
|
|
|
start = tf.searchMax(icmp, internalKey(slice.Start))
|
2015-02-16 15:28:33 +02:00
|
|
|
}
|
|
|
|
if slice.Limit != nil {
|
2016-03-10 11:39:20 +02:00
|
|
|
limit = tf.searchMin(icmp, internalKey(slice.Limit))
|
2015-02-16 15:28:33 +02:00
|
|
|
} else {
|
|
|
|
limit = tf.Len()
|
|
|
|
}
|
|
|
|
tf = tf[start:limit]
|
|
|
|
}
|
|
|
|
return iterator.NewArrayIndexer(&tFilesArrayIndexer{
|
|
|
|
tFiles: tf,
|
|
|
|
tops: tops,
|
|
|
|
icmp: icmp,
|
|
|
|
slice: slice,
|
|
|
|
ro: ro,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2015-04-28 12:18:01 +03:00
|
|
|
// Tables iterator index.
|
2015-02-16 15:28:33 +02:00
|
|
|
type tFilesArrayIndexer struct {
|
|
|
|
tFiles
|
|
|
|
tops *tOps
|
|
|
|
icmp *iComparer
|
|
|
|
slice *util.Range
|
|
|
|
ro *opt.ReadOptions
|
|
|
|
}
|
|
|
|
|
|
|
|
func (a *tFilesArrayIndexer) Search(key []byte) int {
|
2016-03-10 11:39:20 +02:00
|
|
|
return a.searchMax(a.icmp, internalKey(key))
|
2015-02-16 15:28:33 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
func (a *tFilesArrayIndexer) Get(i int) iterator.Iterator {
|
|
|
|
if i == 0 || i == a.Len()-1 {
|
|
|
|
return a.tops.newIterator(a.tFiles[i], a.slice, a.ro)
|
|
|
|
}
|
|
|
|
return a.tops.newIterator(a.tFiles[i], nil, a.ro)
|
|
|
|
}
|
|
|
|
|
2015-04-28 12:18:01 +03:00
|
|
|
// Helper type for sortByKey.
|
2015-02-16 15:28:33 +02:00
|
|
|
type tFilesSortByKey struct {
|
|
|
|
tFiles
|
|
|
|
icmp *iComparer
|
|
|
|
}
|
|
|
|
|
|
|
|
func (x *tFilesSortByKey) Less(i, j int) bool {
|
|
|
|
return x.lessByKey(x.icmp, i, j)
|
|
|
|
}
|
|
|
|
|
2015-04-28 12:18:01 +03:00
|
|
|
// Helper type for sortByNum.
|
2015-02-16 15:28:33 +02:00
|
|
|
type tFilesSortByNum struct {
|
|
|
|
tFiles
|
|
|
|
}
|
|
|
|
|
|
|
|
func (x *tFilesSortByNum) Less(i, j int) bool {
|
|
|
|
return x.lessByNum(i, j)
|
|
|
|
}
|
|
|
|
|
2015-04-28 12:18:01 +03:00
|
|
|
// Table operations.
|
2015-02-16 15:28:33 +02:00
|
|
|
type tOps struct {
|
2015-04-28 12:18:01 +03:00
|
|
|
s *session
|
2016-02-11 16:16:52 +02:00
|
|
|
noSync bool
|
2015-04-28 12:18:01 +03:00
|
|
|
cache *cache.Cache
|
|
|
|
bcache *cache.Cache
|
|
|
|
bpool *util.BufferPool
|
2015-02-16 15:28:33 +02:00
|
|
|
}
|
|
|
|
|
2015-04-28 12:18:01 +03:00
|
|
|
// Creates an empty table and returns table writer.
|
2015-02-16 15:28:33 +02:00
|
|
|
func (t *tOps) create() (*tWriter, error) {
|
2016-02-11 16:16:52 +02:00
|
|
|
fd := storage.FileDesc{storage.TypeTable, t.s.allocFileNum()}
|
|
|
|
fw, err := t.s.stor.Create(fd)
|
2015-02-16 15:28:33 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return &tWriter{
|
2016-02-11 16:16:52 +02:00
|
|
|
t: t,
|
|
|
|
fd: fd,
|
|
|
|
w: fw,
|
|
|
|
tw: table.NewWriter(fw, t.s.o.Options),
|
2015-02-16 15:28:33 +02:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2015-04-28 12:18:01 +03:00
|
|
|
// Builds table from src iterator.
|
2015-02-16 15:28:33 +02:00
|
|
|
func (t *tOps) createFrom(src iterator.Iterator) (f *tFile, n int, err error) {
|
|
|
|
w, err := t.create()
|
|
|
|
if err != nil {
|
2015-04-28 12:18:01 +03:00
|
|
|
return
|
2015-02-16 15:28:33 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
defer func() {
|
|
|
|
if err != nil {
|
|
|
|
w.drop()
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
for src.Next() {
|
2015-04-28 12:18:01 +03:00
|
|
|
err = w.append(src.Key(), src.Value())
|
2015-02-16 15:28:33 +02:00
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
err = src.Error()
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
n = w.tw.EntriesLen()
|
|
|
|
f, err = w.finish()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-04-28 12:18:01 +03:00
|
|
|
// Opens table. It returns a cache handle, which should
|
|
|
|
// be released after use.
|
|
|
|
func (t *tOps) open(f *tFile) (ch *cache.Handle, err error) {
|
2016-02-11 16:16:52 +02:00
|
|
|
ch = t.cache.Get(0, uint64(f.fd.Num), func() (size int, value cache.Value) {
|
2015-02-16 15:28:33 +02:00
|
|
|
var r storage.Reader
|
2016-02-11 16:16:52 +02:00
|
|
|
r, err = t.s.stor.Open(f.fd)
|
2015-02-16 15:28:33 +02:00
|
|
|
if err != nil {
|
2015-04-28 12:18:01 +03:00
|
|
|
return 0, nil
|
2015-02-16 15:28:33 +02:00
|
|
|
}
|
|
|
|
|
2016-03-10 11:39:20 +02:00
|
|
|
var bcache *cache.NamespaceGetter
|
2015-04-28 12:18:01 +03:00
|
|
|
if t.bcache != nil {
|
2016-03-10 11:39:20 +02:00
|
|
|
bcache = &cache.NamespaceGetter{Cache: t.bcache, NS: uint64(f.fd.Num)}
|
2015-02-16 15:28:33 +02:00
|
|
|
}
|
|
|
|
|
2015-04-28 12:18:01 +03:00
|
|
|
var tr *table.Reader
|
2016-02-11 16:16:52 +02:00
|
|
|
tr, err = table.NewReader(r, f.size, f.fd, bcache, t.bpool, t.s.o.Options)
|
2015-04-28 12:18:01 +03:00
|
|
|
if err != nil {
|
2015-02-16 15:28:33 +02:00
|
|
|
r.Close()
|
2015-04-28 12:18:01 +03:00
|
|
|
return 0, nil
|
2015-02-16 15:28:33 +02:00
|
|
|
}
|
2015-04-28 12:18:01 +03:00
|
|
|
return 1, tr
|
|
|
|
|
2015-02-16 15:28:33 +02:00
|
|
|
})
|
2015-04-28 12:18:01 +03:00
|
|
|
if ch == nil && err == nil {
|
2015-02-16 15:28:33 +02:00
|
|
|
err = ErrClosed
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-04-28 12:18:01 +03:00
|
|
|
// Finds key/value pair whose key is greater than or equal to the
|
|
|
|
// given key.
|
|
|
|
func (t *tOps) find(f *tFile, key []byte, ro *opt.ReadOptions) (rkey, rvalue []byte, err error) {
|
|
|
|
ch, err := t.open(f)
|
2015-02-16 15:28:33 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
2015-04-28 12:18:01 +03:00
|
|
|
defer ch.Release()
|
|
|
|
return ch.Value().(*table.Reader).Find(key, true, ro)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Finds key that is greater than or equal to the given key.
|
|
|
|
func (t *tOps) findKey(f *tFile, key []byte, ro *opt.ReadOptions) (rkey []byte, err error) {
|
|
|
|
ch, err := t.open(f)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
defer ch.Release()
|
|
|
|
return ch.Value().(*table.Reader).FindKey(key, true, ro)
|
2015-02-16 15:28:33 +02:00
|
|
|
}
|
|
|
|
|
2015-04-28 12:18:01 +03:00
|
|
|
// Returns approximate offset of the given key.
|
2016-03-10 11:39:20 +02:00
|
|
|
func (t *tOps) offsetOf(f *tFile, key []byte) (offset int64, err error) {
|
2015-04-28 12:18:01 +03:00
|
|
|
ch, err := t.open(f)
|
2015-02-16 15:28:33 +02:00
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
2015-04-28 12:18:01 +03:00
|
|
|
defer ch.Release()
|
2016-03-10 11:39:20 +02:00
|
|
|
return ch.Value().(*table.Reader).OffsetOf(key)
|
2015-02-16 15:28:33 +02:00
|
|
|
}
|
|
|
|
|
2015-04-28 12:18:01 +03:00
|
|
|
// Creates an iterator from the given table.
|
2015-02-16 15:28:33 +02:00
|
|
|
func (t *tOps) newIterator(f *tFile, slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
|
2015-04-28 12:18:01 +03:00
|
|
|
ch, err := t.open(f)
|
2015-02-16 15:28:33 +02:00
|
|
|
if err != nil {
|
|
|
|
return iterator.NewEmptyIterator(err)
|
|
|
|
}
|
2015-04-28 12:18:01 +03:00
|
|
|
iter := ch.Value().(*table.Reader).NewIterator(slice, ro)
|
|
|
|
iter.SetReleaser(ch)
|
2015-02-16 15:28:33 +02:00
|
|
|
return iter
|
|
|
|
}
|
|
|
|
|
2015-04-28 12:18:01 +03:00
|
|
|
// Removes table from persistent storage. It waits until
|
|
|
|
// no one use the the table.
|
2015-02-16 15:28:33 +02:00
|
|
|
func (t *tOps) remove(f *tFile) {
|
2016-02-11 16:16:52 +02:00
|
|
|
t.cache.Delete(0, uint64(f.fd.Num), func() {
|
|
|
|
if err := t.s.stor.Remove(f.fd); err != nil {
|
|
|
|
t.s.logf("table@remove removing @%d %q", f.fd.Num, err)
|
2015-02-16 15:28:33 +02:00
|
|
|
} else {
|
2016-02-11 16:16:52 +02:00
|
|
|
t.s.logf("table@remove removed @%d", f.fd.Num)
|
2015-02-16 15:28:33 +02:00
|
|
|
}
|
2015-04-28 12:18:01 +03:00
|
|
|
if t.bcache != nil {
|
2016-02-11 16:16:52 +02:00
|
|
|
t.bcache.EvictNS(uint64(f.fd.Num))
|
2015-02-16 15:28:33 +02:00
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2015-04-28 12:18:01 +03:00
|
|
|
// Closes the table ops instance. It will close all tables,
|
|
|
|
// regadless still used or not.
|
2015-02-16 15:28:33 +02:00
|
|
|
func (t *tOps) close() {
|
2015-04-28 12:18:01 +03:00
|
|
|
t.bpool.Close()
|
|
|
|
t.cache.Close()
|
|
|
|
if t.bcache != nil {
|
2016-10-20 15:56:34 +03:00
|
|
|
t.bcache.CloseWeak()
|
2015-04-28 12:18:01 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Creates new initialized table ops instance.
|
|
|
|
func newTableOps(s *session) *tOps {
|
|
|
|
var (
|
|
|
|
cacher cache.Cacher
|
|
|
|
bcache *cache.Cache
|
2016-02-11 16:16:52 +02:00
|
|
|
bpool *util.BufferPool
|
2015-04-28 12:18:01 +03:00
|
|
|
)
|
|
|
|
if s.o.GetOpenFilesCacheCapacity() > 0 {
|
|
|
|
cacher = cache.NewLRU(s.o.GetOpenFilesCacheCapacity())
|
|
|
|
}
|
2016-02-11 16:16:52 +02:00
|
|
|
if !s.o.GetDisableBlockCache() {
|
2015-04-28 12:18:01 +03:00
|
|
|
var bcacher cache.Cacher
|
|
|
|
if s.o.GetBlockCacheCapacity() > 0 {
|
|
|
|
bcacher = cache.NewLRU(s.o.GetBlockCacheCapacity())
|
|
|
|
}
|
|
|
|
bcache = cache.NewCache(bcacher)
|
|
|
|
}
|
2016-02-11 16:16:52 +02:00
|
|
|
if !s.o.GetDisableBufferPool() {
|
|
|
|
bpool = util.NewBufferPool(s.o.GetBlockSize() + 5)
|
|
|
|
}
|
2015-04-28 12:18:01 +03:00
|
|
|
return &tOps{
|
|
|
|
s: s,
|
2016-02-11 16:16:52 +02:00
|
|
|
noSync: s.o.GetNoSync(),
|
2015-04-28 12:18:01 +03:00
|
|
|
cache: cache.NewCache(cacher),
|
|
|
|
bcache: bcache,
|
2016-02-11 16:16:52 +02:00
|
|
|
bpool: bpool,
|
2015-04-28 12:18:01 +03:00
|
|
|
}
|
2015-02-16 15:28:33 +02:00
|
|
|
}
|
|
|
|
|
2015-04-28 12:18:01 +03:00
|
|
|
// tWriter wraps the table writer. It keep track of file descriptor
|
|
|
|
// and added key range.
|
2015-02-16 15:28:33 +02:00
|
|
|
type tWriter struct {
|
|
|
|
t *tOps
|
|
|
|
|
2016-02-11 16:16:52 +02:00
|
|
|
fd storage.FileDesc
|
|
|
|
w storage.Writer
|
|
|
|
tw *table.Writer
|
2015-02-16 15:28:33 +02:00
|
|
|
|
|
|
|
first, last []byte
|
|
|
|
}
|
|
|
|
|
2015-04-28 12:18:01 +03:00
|
|
|
// Append key/value pair to the table.
|
|
|
|
func (w *tWriter) append(key, value []byte) error {
|
2015-02-16 15:28:33 +02:00
|
|
|
if w.first == nil {
|
|
|
|
w.first = append([]byte{}, key...)
|
|
|
|
}
|
|
|
|
w.last = append(w.last[:0], key...)
|
|
|
|
return w.tw.Append(key, value)
|
|
|
|
}
|
|
|
|
|
2015-04-28 12:18:01 +03:00
|
|
|
// Returns true if the table is empty.
|
2015-02-16 15:28:33 +02:00
|
|
|
func (w *tWriter) empty() bool {
|
|
|
|
return w.first == nil
|
|
|
|
}
|
|
|
|
|
2015-04-28 12:18:01 +03:00
|
|
|
// Closes the storage.Writer.
|
|
|
|
func (w *tWriter) close() {
|
|
|
|
if w.w != nil {
|
|
|
|
w.w.Close()
|
|
|
|
w.w = nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Finalizes the table and returns table file.
|
2015-02-16 15:28:33 +02:00
|
|
|
func (w *tWriter) finish() (f *tFile, err error) {
|
2015-04-28 12:18:01 +03:00
|
|
|
defer w.close()
|
2015-02-16 15:28:33 +02:00
|
|
|
err = w.tw.Close()
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
2016-02-11 16:16:52 +02:00
|
|
|
if !w.t.noSync {
|
|
|
|
err = w.w.Sync()
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
2015-02-16 15:28:33 +02:00
|
|
|
}
|
2016-03-10 11:39:20 +02:00
|
|
|
f = newTableFile(w.fd, int64(w.tw.BytesLen()), internalKey(w.first), internalKey(w.last))
|
2015-02-16 15:28:33 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-04-28 12:18:01 +03:00
|
|
|
// Drops the table.
|
2015-02-16 15:28:33 +02:00
|
|
|
func (w *tWriter) drop() {
|
2015-04-28 12:18:01 +03:00
|
|
|
w.close()
|
2016-02-11 16:16:52 +02:00
|
|
|
w.t.s.stor.Remove(w.fd)
|
|
|
|
w.t.s.reuseFileNum(w.fd.Num)
|
2015-02-16 15:28:33 +02:00
|
|
|
w.tw = nil
|
|
|
|
w.first = nil
|
|
|
|
w.last = nil
|
|
|
|
}
|