2018-06-20 15:06:27 +03:00
|
|
|
// Copyright 2016 The go-ethereum Authors
|
|
|
|
// This file is part of the go-ethereum library.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Lesser General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Lesser General Public License
|
|
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
|
|
|
package storage
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
2018-07-13 18:40:28 +03:00
|
|
|
"context"
|
2018-11-13 17:22:53 +03:00
|
|
|
"encoding/binary"
|
2018-06-20 15:06:27 +03:00
|
|
|
"fmt"
|
|
|
|
"io/ioutil"
|
|
|
|
"os"
|
2018-10-12 17:25:38 +03:00
|
|
|
"strconv"
|
|
|
|
"strings"
|
2018-06-20 15:06:27 +03:00
|
|
|
"testing"
|
|
|
|
|
|
|
|
"github.com/ethereum/go-ethereum/common"
|
2018-09-13 12:42:19 +03:00
|
|
|
ch "github.com/ethereum/go-ethereum/swarm/chunk"
|
|
|
|
"github.com/ethereum/go-ethereum/swarm/log"
|
2018-06-20 15:06:27 +03:00
|
|
|
"github.com/ethereum/go-ethereum/swarm/storage/mock/mem"
|
|
|
|
ldberrors "github.com/syndtr/goleveldb/leveldb/errors"
|
|
|
|
)
|
|
|
|
|
|
|
|
type testDbStore struct {
|
|
|
|
*LDBStore
|
|
|
|
dir string
|
|
|
|
}
|
|
|
|
|
|
|
|
func newTestDbStore(mock bool, trusted bool) (*testDbStore, func(), error) {
|
|
|
|
dir, err := ioutil.TempDir("", "bzz-storage-test")
|
|
|
|
if err != nil {
|
|
|
|
return nil, func() {}, err
|
|
|
|
}
|
|
|
|
|
|
|
|
var db *LDBStore
|
|
|
|
storeparams := NewDefaultStoreParams()
|
|
|
|
params := NewLDBStoreParams(storeparams, dir)
|
|
|
|
params.Po = testPoFunc
|
|
|
|
|
|
|
|
if mock {
|
|
|
|
globalStore := mem.NewGlobalStore()
|
|
|
|
addr := common.HexToAddress("0x5aaeb6053f3e94c9b9a09f33669435e7ef1beaed")
|
|
|
|
mockStore := globalStore.NewNodeStore(addr)
|
|
|
|
|
|
|
|
db, err = NewMockDbStore(params, mockStore)
|
|
|
|
} else {
|
|
|
|
db, err = NewLDBStore(params)
|
|
|
|
}
|
|
|
|
|
|
|
|
cleanup := func() {
|
2018-07-09 15:11:49 +03:00
|
|
|
if db != nil {
|
2018-06-20 15:06:27 +03:00
|
|
|
db.Close()
|
|
|
|
}
|
|
|
|
err = os.RemoveAll(dir)
|
|
|
|
if err != nil {
|
2018-07-09 15:11:49 +03:00
|
|
|
panic(fmt.Sprintf("db cleanup failed: %v", err))
|
2018-06-20 15:06:27 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return &testDbStore{db, dir}, cleanup, err
|
|
|
|
}
|
|
|
|
|
|
|
|
func testPoFunc(k Address) (ret uint8) {
|
|
|
|
basekey := make([]byte, 32)
|
2018-09-14 23:07:13 +03:00
|
|
|
return uint8(Proximity(basekey, k[:]))
|
2018-06-20 15:06:27 +03:00
|
|
|
}
|
|
|
|
|
2019-02-08 19:07:11 +03:00
|
|
|
func testDbStoreRandom(n int, mock bool, t *testing.T) {
|
2018-06-20 15:06:27 +03:00
|
|
|
db, cleanup, err := newTestDbStore(mock, true)
|
|
|
|
defer cleanup()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("init dbStore failed: %v", err)
|
|
|
|
}
|
2019-02-08 19:07:11 +03:00
|
|
|
testStoreRandom(db, n, t)
|
2018-06-20 15:06:27 +03:00
|
|
|
}
|
|
|
|
|
2019-02-08 19:07:11 +03:00
|
|
|
func testDbStoreCorrect(n int, mock bool, t *testing.T) {
|
2018-06-20 15:06:27 +03:00
|
|
|
db, cleanup, err := newTestDbStore(mock, false)
|
|
|
|
defer cleanup()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("init dbStore failed: %v", err)
|
|
|
|
}
|
2019-02-08 19:07:11 +03:00
|
|
|
testStoreCorrect(db, n, t)
|
2018-06-20 15:06:27 +03:00
|
|
|
}
|
|
|
|
|
2018-11-13 09:41:01 +03:00
|
|
|
func TestMarkAccessed(t *testing.T) {
|
|
|
|
db, cleanup, err := newTestDbStore(false, true)
|
|
|
|
defer cleanup()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("init dbStore failed: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
h := GenerateRandomChunk(ch.DefaultSize)
|
|
|
|
|
|
|
|
db.Put(context.Background(), h)
|
|
|
|
|
|
|
|
var index dpaDBIndex
|
|
|
|
addr := h.Address()
|
|
|
|
idxk := getIndexKey(addr)
|
|
|
|
|
|
|
|
idata, err := db.db.Get(idxk)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
decodeIndex(idata, &index)
|
|
|
|
|
|
|
|
if index.Access != 0 {
|
|
|
|
t.Fatalf("Expected the access index to be %d, but it is %d", 0, index.Access)
|
|
|
|
}
|
|
|
|
|
|
|
|
db.MarkAccessed(addr)
|
|
|
|
db.writeCurrentBatch()
|
|
|
|
|
|
|
|
idata, err = db.db.Get(idxk)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
decodeIndex(idata, &index)
|
|
|
|
|
|
|
|
if index.Access != 1 {
|
|
|
|
t.Fatalf("Expected the access index to be %d, but it is %d", 1, index.Access)
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2018-06-20 15:06:27 +03:00
|
|
|
func TestDbStoreRandom_1(t *testing.T) {
|
2019-02-08 19:07:11 +03:00
|
|
|
testDbStoreRandom(1, false, t)
|
2018-06-20 15:06:27 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestDbStoreCorrect_1(t *testing.T) {
|
2019-02-08 19:07:11 +03:00
|
|
|
testDbStoreCorrect(1, false, t)
|
2018-06-20 15:06:27 +03:00
|
|
|
}
|
|
|
|
|
2018-10-09 12:45:42 +03:00
|
|
|
func TestDbStoreRandom_1k(t *testing.T) {
|
2019-02-08 19:07:11 +03:00
|
|
|
testDbStoreRandom(1000, false, t)
|
2018-06-20 15:06:27 +03:00
|
|
|
}
|
|
|
|
|
2018-10-09 12:45:42 +03:00
|
|
|
func TestDbStoreCorrect_1k(t *testing.T) {
|
2019-02-08 19:07:11 +03:00
|
|
|
testDbStoreCorrect(1000, false, t)
|
2018-06-20 15:06:27 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestMockDbStoreRandom_1(t *testing.T) {
|
2019-02-08 19:07:11 +03:00
|
|
|
testDbStoreRandom(1, true, t)
|
2018-06-20 15:06:27 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestMockDbStoreCorrect_1(t *testing.T) {
|
2019-02-08 19:07:11 +03:00
|
|
|
testDbStoreCorrect(1, true, t)
|
2018-06-20 15:06:27 +03:00
|
|
|
}
|
|
|
|
|
2018-10-09 12:45:42 +03:00
|
|
|
func TestMockDbStoreRandom_1k(t *testing.T) {
|
2019-02-08 19:07:11 +03:00
|
|
|
testDbStoreRandom(1000, true, t)
|
2018-06-20 15:06:27 +03:00
|
|
|
}
|
|
|
|
|
2018-10-09 12:45:42 +03:00
|
|
|
func TestMockDbStoreCorrect_1k(t *testing.T) {
|
2019-02-08 19:07:11 +03:00
|
|
|
testDbStoreCorrect(1000, true, t)
|
2018-06-20 15:06:27 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
func testDbStoreNotFound(t *testing.T, mock bool) {
|
|
|
|
db, cleanup, err := newTestDbStore(mock, false)
|
|
|
|
defer cleanup()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("init dbStore failed: %v", err)
|
|
|
|
}
|
|
|
|
|
2018-07-13 18:40:28 +03:00
|
|
|
_, err = db.Get(context.TODO(), ZeroAddr)
|
2018-06-20 15:06:27 +03:00
|
|
|
if err != ErrChunkNotFound {
|
|
|
|
t.Errorf("Expected ErrChunkNotFound, got %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestDbStoreNotFound(t *testing.T) {
|
|
|
|
testDbStoreNotFound(t, false)
|
|
|
|
}
|
|
|
|
func TestMockDbStoreNotFound(t *testing.T) {
|
|
|
|
testDbStoreNotFound(t, true)
|
|
|
|
}
|
|
|
|
|
|
|
|
func testIterator(t *testing.T, mock bool) {
|
|
|
|
var chunkcount int = 32
|
|
|
|
var i int
|
|
|
|
var poc uint
|
|
|
|
chunkkeys := NewAddressCollection(chunkcount)
|
2019-02-24 14:39:23 +03:00
|
|
|
chunkkeysResults := NewAddressCollection(chunkcount)
|
2018-06-20 15:06:27 +03:00
|
|
|
|
|
|
|
db, cleanup, err := newTestDbStore(mock, false)
|
|
|
|
defer cleanup()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("init dbStore failed: %v", err)
|
|
|
|
}
|
|
|
|
|
2018-09-13 12:42:19 +03:00
|
|
|
chunks := GenerateRandomChunks(ch.DefaultSize, chunkcount)
|
2018-06-20 15:06:27 +03:00
|
|
|
|
|
|
|
for i = 0; i < len(chunks); i++ {
|
2018-09-13 12:42:19 +03:00
|
|
|
chunkkeys[i] = chunks[i].Address()
|
|
|
|
err := db.Put(context.TODO(), chunks[i])
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("dbStore.Put failed: %v", err)
|
|
|
|
}
|
2018-06-20 15:06:27 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
for i = 0; i < len(chunkkeys); i++ {
|
|
|
|
log.Trace(fmt.Sprintf("Chunk array pos %d/%d: '%v'", i, chunkcount, chunkkeys[i]))
|
|
|
|
}
|
|
|
|
i = 0
|
|
|
|
for poc = 0; poc <= 255; poc++ {
|
|
|
|
err := db.SyncIterator(0, uint64(chunkkeys.Len()), uint8(poc), func(k Address, n uint64) bool {
|
|
|
|
log.Trace(fmt.Sprintf("Got key %v number %d poc %d", k, n, uint8(poc)))
|
2019-02-24 14:39:23 +03:00
|
|
|
chunkkeysResults[n] = k
|
2018-06-20 15:06:27 +03:00
|
|
|
i++
|
|
|
|
return true
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Iterator call failed: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for i = 0; i < chunkcount; i++ {
|
2019-02-24 14:39:23 +03:00
|
|
|
if !bytes.Equal(chunkkeys[i], chunkkeysResults[i]) {
|
|
|
|
t.Fatalf("Chunk put #%d key '%v' does not match iterator's key '%v'", i, chunkkeys[i], chunkkeysResults[i])
|
2018-06-20 15:06:27 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestIterator(t *testing.T) {
|
|
|
|
testIterator(t, false)
|
|
|
|
}
|
|
|
|
func TestMockIterator(t *testing.T) {
|
|
|
|
testIterator(t, true)
|
|
|
|
}
|
|
|
|
|
2019-02-08 19:07:11 +03:00
|
|
|
func benchmarkDbStorePut(n int, mock bool, b *testing.B) {
|
2018-06-20 15:06:27 +03:00
|
|
|
db, cleanup, err := newTestDbStore(mock, true)
|
|
|
|
defer cleanup()
|
|
|
|
if err != nil {
|
|
|
|
b.Fatalf("init dbStore failed: %v", err)
|
|
|
|
}
|
2019-02-08 19:07:11 +03:00
|
|
|
benchmarkStorePut(db, n, b)
|
2018-06-20 15:06:27 +03:00
|
|
|
}
|
|
|
|
|
2019-02-08 19:07:11 +03:00
|
|
|
func benchmarkDbStoreGet(n int, mock bool, b *testing.B) {
|
2018-06-20 15:06:27 +03:00
|
|
|
db, cleanup, err := newTestDbStore(mock, true)
|
|
|
|
defer cleanup()
|
|
|
|
if err != nil {
|
|
|
|
b.Fatalf("init dbStore failed: %v", err)
|
|
|
|
}
|
2019-02-08 19:07:11 +03:00
|
|
|
benchmarkStoreGet(db, n, b)
|
2018-06-20 15:06:27 +03:00
|
|
|
}
|
|
|
|
|
2019-02-08 19:07:11 +03:00
|
|
|
func BenchmarkDbStorePut_500(b *testing.B) {
|
|
|
|
benchmarkDbStorePut(500, false, b)
|
2018-06-20 15:06:27 +03:00
|
|
|
}
|
|
|
|
|
2019-02-08 19:07:11 +03:00
|
|
|
func BenchmarkDbStoreGet_500(b *testing.B) {
|
|
|
|
benchmarkDbStoreGet(500, false, b)
|
2018-06-20 15:06:27 +03:00
|
|
|
}
|
|
|
|
|
2019-02-08 19:07:11 +03:00
|
|
|
func BenchmarkMockDbStorePut_500(b *testing.B) {
|
|
|
|
benchmarkDbStorePut(500, true, b)
|
2018-06-20 15:06:27 +03:00
|
|
|
}
|
|
|
|
|
2019-02-08 19:07:11 +03:00
|
|
|
func BenchmarkMockDbStoreGet_500(b *testing.B) {
|
|
|
|
benchmarkDbStoreGet(500, true, b)
|
2018-06-20 15:06:27 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// TestLDBStoreWithoutCollectGarbage tests that we can put a number of random chunks in the LevelDB store, and
|
|
|
|
// retrieve them, provided we don't hit the garbage collection
|
|
|
|
func TestLDBStoreWithoutCollectGarbage(t *testing.T) {
|
|
|
|
capacity := 50
|
|
|
|
n := 10
|
|
|
|
|
|
|
|
ldb, cleanup := newLDBStore(t)
|
|
|
|
ldb.setCapacity(uint64(capacity))
|
|
|
|
defer cleanup()
|
|
|
|
|
2019-02-08 19:07:11 +03:00
|
|
|
chunks, err := mputRandomChunks(ldb, n)
|
2018-09-13 12:42:19 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err.Error())
|
2018-06-20 15:06:27 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
|
|
|
|
|
2018-09-13 12:42:19 +03:00
|
|
|
for _, ch := range chunks {
|
|
|
|
ret, err := ldb.Get(context.TODO(), ch.Address())
|
2018-06-20 15:06:27 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2018-09-13 12:42:19 +03:00
|
|
|
if !bytes.Equal(ret.Data(), ch.Data()) {
|
2018-06-20 15:06:27 +03:00
|
|
|
t.Fatal("expected to get the same data back, but got smth else")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-12 15:39:45 +03:00
|
|
|
if ldb.entryCnt != uint64(n) {
|
|
|
|
t.Fatalf("expected entryCnt to be equal to %v, but got %v", n, ldb.entryCnt)
|
2018-06-20 15:06:27 +03:00
|
|
|
}
|
|
|
|
|
2018-09-12 15:39:45 +03:00
|
|
|
if ldb.accessCnt != uint64(2*n) {
|
|
|
|
t.Fatalf("expected accessCnt to be equal to %v, but got %v", 2*n, ldb.accessCnt)
|
2018-06-20 15:06:27 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestLDBStoreCollectGarbage tests that we can put more chunks than LevelDB's capacity, and
|
2018-10-12 17:25:38 +03:00
|
|
|
// retrieve only some of them, because garbage collection must have partially cleared the store
|
|
|
|
// Also tests that we can delete chunks and that we can trigger garbage collection
|
2018-06-20 15:06:27 +03:00
|
|
|
func TestLDBStoreCollectGarbage(t *testing.T) {
|
2018-10-12 17:25:38 +03:00
|
|
|
|
|
|
|
// below max ronud
|
2018-11-19 14:26:45 +03:00
|
|
|
initialCap := defaultMaxGCRound / 100
|
|
|
|
cap := initialCap / 2
|
2018-10-12 17:25:38 +03:00
|
|
|
t.Run(fmt.Sprintf("A/%d/%d", cap, cap*4), testLDBStoreCollectGarbage)
|
|
|
|
t.Run(fmt.Sprintf("B/%d/%d", cap, cap*4), testLDBStoreRemoveThenCollectGarbage)
|
|
|
|
|
|
|
|
// at max round
|
2018-11-19 14:26:45 +03:00
|
|
|
cap = initialCap
|
2018-10-12 17:25:38 +03:00
|
|
|
t.Run(fmt.Sprintf("A/%d/%d", cap, cap*4), testLDBStoreCollectGarbage)
|
|
|
|
t.Run(fmt.Sprintf("B/%d/%d", cap, cap*4), testLDBStoreRemoveThenCollectGarbage)
|
|
|
|
|
|
|
|
// more than max around, not on threshold
|
2018-11-19 14:26:45 +03:00
|
|
|
cap = initialCap + 500
|
2018-10-12 17:25:38 +03:00
|
|
|
t.Run(fmt.Sprintf("A/%d/%d", cap, cap*4), testLDBStoreCollectGarbage)
|
|
|
|
t.Run(fmt.Sprintf("B/%d/%d", cap, cap*4), testLDBStoreRemoveThenCollectGarbage)
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
func testLDBStoreCollectGarbage(t *testing.T) {
|
|
|
|
params := strings.Split(t.Name(), "/")
|
|
|
|
capacity, err := strconv.Atoi(params[2])
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
n, err := strconv.Atoi(params[3])
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2018-06-20 15:06:27 +03:00
|
|
|
|
|
|
|
ldb, cleanup := newLDBStore(t)
|
|
|
|
ldb.setCapacity(uint64(capacity))
|
|
|
|
defer cleanup()
|
|
|
|
|
2018-10-12 17:25:38 +03:00
|
|
|
// retrieve the gc round target count for the db capacity
|
|
|
|
ldb.startGC(capacity)
|
|
|
|
roundTarget := ldb.gc.target
|
|
|
|
|
|
|
|
// split put counts to gc target count threshold, and wait for gc to finish in between
|
|
|
|
var allChunks []Chunk
|
|
|
|
remaining := n
|
|
|
|
for remaining > 0 {
|
|
|
|
var putCount int
|
|
|
|
if remaining < roundTarget {
|
|
|
|
putCount = remaining
|
|
|
|
} else {
|
|
|
|
putCount = roundTarget
|
|
|
|
}
|
|
|
|
remaining -= putCount
|
2019-02-08 19:07:11 +03:00
|
|
|
chunks, err := mputRandomChunks(ldb, putCount)
|
2018-10-12 17:25:38 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err.Error())
|
|
|
|
}
|
|
|
|
allChunks = append(allChunks, chunks...)
|
2019-01-24 14:34:12 +03:00
|
|
|
ldb.lock.RLock()
|
2018-10-12 17:25:38 +03:00
|
|
|
log.Debug("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt, "cap", capacity, "n", n)
|
2019-01-24 14:34:12 +03:00
|
|
|
ldb.lock.RUnlock()
|
2018-06-20 15:06:27 +03:00
|
|
|
|
2019-01-24 14:34:12 +03:00
|
|
|
waitGc(ldb)
|
2018-10-12 17:25:38 +03:00
|
|
|
}
|
2018-06-20 15:06:27 +03:00
|
|
|
|
2018-10-12 17:25:38 +03:00
|
|
|
// attempt gets on all put chunks
|
2018-06-20 15:06:27 +03:00
|
|
|
var missing int
|
2018-10-12 17:25:38 +03:00
|
|
|
for _, ch := range allChunks {
|
|
|
|
ret, err := ldb.Get(context.TODO(), ch.Address())
|
2018-06-20 15:06:27 +03:00
|
|
|
if err == ErrChunkNotFound || err == ldberrors.ErrNotFound {
|
|
|
|
missing++
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2018-09-13 12:42:19 +03:00
|
|
|
if !bytes.Equal(ret.Data(), ch.Data()) {
|
2018-06-20 15:06:27 +03:00
|
|
|
t.Fatal("expected to get the same data back, but got smth else")
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Trace("got back chunk", "chunk", ret)
|
|
|
|
}
|
|
|
|
|
2018-10-12 17:25:38 +03:00
|
|
|
// all surplus chunks should be missing
|
|
|
|
expectMissing := roundTarget + (((n - capacity) / roundTarget) * roundTarget)
|
|
|
|
if missing != expectMissing {
|
|
|
|
t.Fatalf("gc failure: expected to miss %v chunks, but only %v are actually missing", expectMissing, missing)
|
2018-06-20 15:06:27 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
log.Info("ldbstore", "total", n, "missing", missing, "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestLDBStoreAddRemove tests that we can put and then delete a given chunk
|
|
|
|
func TestLDBStoreAddRemove(t *testing.T) {
|
|
|
|
ldb, cleanup := newLDBStore(t)
|
|
|
|
ldb.setCapacity(200)
|
|
|
|
defer cleanup()
|
|
|
|
|
|
|
|
n := 100
|
2019-02-08 19:07:11 +03:00
|
|
|
chunks, err := mputRandomChunks(ldb, n)
|
2018-09-13 12:42:19 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf(err.Error())
|
2018-06-20 15:06:27 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
for i := 0; i < n; i++ {
|
|
|
|
// delete all even index chunks
|
|
|
|
if i%2 == 0 {
|
2018-09-13 12:42:19 +03:00
|
|
|
ldb.Delete(chunks[i].Address())
|
2018-06-20 15:06:27 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
|
|
|
|
|
|
|
|
for i := 0; i < n; i++ {
|
2018-12-23 19:31:32 +03:00
|
|
|
ret, err := ldb.Get(context.TODO(), chunks[i].Address())
|
2018-06-20 15:06:27 +03:00
|
|
|
|
|
|
|
if i%2 == 0 {
|
|
|
|
// expect even chunks to be missing
|
2018-09-13 12:42:19 +03:00
|
|
|
if err == nil {
|
2018-06-20 15:06:27 +03:00
|
|
|
t.Fatal("expected chunk to be missing, but got no error")
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// expect odd chunks to be retrieved successfully
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("expected no error, but got %s", err)
|
|
|
|
}
|
|
|
|
|
2018-09-13 12:42:19 +03:00
|
|
|
if !bytes.Equal(ret.Data(), chunks[i].Data()) {
|
2018-06-20 15:06:27 +03:00
|
|
|
t.Fatal("expected to get the same data back, but got smth else")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-12 17:25:38 +03:00
|
|
|
func testLDBStoreRemoveThenCollectGarbage(t *testing.T) {
|
2019-01-24 14:34:12 +03:00
|
|
|
t.Skip("flaky with -race flag")
|
2018-10-12 17:25:38 +03:00
|
|
|
|
|
|
|
params := strings.Split(t.Name(), "/")
|
|
|
|
capacity, err := strconv.Atoi(params[2])
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
n, err := strconv.Atoi(params[3])
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2018-06-20 15:06:27 +03:00
|
|
|
|
|
|
|
ldb, cleanup := newLDBStore(t)
|
2018-10-12 17:25:38 +03:00
|
|
|
defer cleanup()
|
2018-06-20 15:06:27 +03:00
|
|
|
ldb.setCapacity(uint64(capacity))
|
|
|
|
|
2018-10-12 17:25:38 +03:00
|
|
|
// put capacity count number of chunks
|
|
|
|
chunks := make([]Chunk, n)
|
|
|
|
for i := 0; i < n; i++ {
|
2018-09-13 12:42:19 +03:00
|
|
|
c := GenerateRandomChunk(ch.DefaultSize)
|
2018-10-12 17:25:38 +03:00
|
|
|
chunks[i] = c
|
2018-06-20 15:06:27 +03:00
|
|
|
log.Trace("generate random chunk", "idx", i, "chunk", c)
|
|
|
|
}
|
|
|
|
|
|
|
|
for i := 0; i < n; i++ {
|
2018-10-12 17:25:38 +03:00
|
|
|
err := ldb.Put(context.TODO(), chunks[i])
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2018-06-20 15:06:27 +03:00
|
|
|
}
|
|
|
|
|
2019-01-24 14:34:12 +03:00
|
|
|
waitGc(ldb)
|
2018-10-12 17:25:38 +03:00
|
|
|
|
2018-06-20 15:06:27 +03:00
|
|
|
// delete all chunks
|
2018-10-12 17:25:38 +03:00
|
|
|
// (only count the ones actually deleted, the rest will have been gc'd)
|
|
|
|
deletes := 0
|
2018-06-20 15:06:27 +03:00
|
|
|
for i := 0; i < n; i++ {
|
2018-10-12 17:25:38 +03:00
|
|
|
if ldb.Delete(chunks[i].Address()) == nil {
|
|
|
|
deletes++
|
|
|
|
}
|
2018-06-20 15:06:27 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
|
|
|
|
|
2018-09-13 12:42:19 +03:00
|
|
|
if ldb.entryCnt != 0 {
|
|
|
|
t.Fatalf("ldb.entrCnt expected 0 got %v", ldb.entryCnt)
|
|
|
|
}
|
|
|
|
|
2018-10-12 17:25:38 +03:00
|
|
|
// the manual deletes will have increased accesscnt, so we need to add this when we verify the current count
|
|
|
|
expAccessCnt := uint64(n)
|
2018-09-13 12:42:19 +03:00
|
|
|
if ldb.accessCnt != expAccessCnt {
|
2018-10-12 17:25:38 +03:00
|
|
|
t.Fatalf("ldb.accessCnt expected %v got %v", expAccessCnt, ldb.accessCnt)
|
2018-09-13 12:42:19 +03:00
|
|
|
}
|
|
|
|
|
2018-10-12 17:25:38 +03:00
|
|
|
// retrieve the gc round target count for the db capacity
|
|
|
|
ldb.startGC(capacity)
|
|
|
|
roundTarget := ldb.gc.target
|
2018-06-20 15:06:27 +03:00
|
|
|
|
2018-10-12 17:25:38 +03:00
|
|
|
remaining := n
|
|
|
|
var puts int
|
|
|
|
for remaining > 0 {
|
|
|
|
var putCount int
|
|
|
|
if remaining < roundTarget {
|
|
|
|
putCount = remaining
|
|
|
|
} else {
|
|
|
|
putCount = roundTarget
|
|
|
|
}
|
|
|
|
remaining -= putCount
|
|
|
|
for putCount > 0 {
|
|
|
|
ldb.Put(context.TODO(), chunks[puts])
|
2019-01-24 14:34:12 +03:00
|
|
|
ldb.lock.RLock()
|
2018-10-12 17:25:38 +03:00
|
|
|
log.Debug("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt, "cap", capacity, "n", n, "puts", puts, "remaining", remaining, "roundtarget", roundTarget)
|
2019-01-24 14:34:12 +03:00
|
|
|
ldb.lock.RUnlock()
|
2018-10-12 17:25:38 +03:00
|
|
|
puts++
|
|
|
|
putCount--
|
|
|
|
}
|
2018-06-20 15:06:27 +03:00
|
|
|
|
2019-01-24 14:34:12 +03:00
|
|
|
waitGc(ldb)
|
2018-06-20 15:06:27 +03:00
|
|
|
}
|
|
|
|
|
2018-09-13 12:42:19 +03:00
|
|
|
// expect first surplus chunks to be missing, because they have the smallest access value
|
2018-10-12 17:25:38 +03:00
|
|
|
expectMissing := roundTarget + (((n - capacity) / roundTarget) * roundTarget)
|
|
|
|
for i := 0; i < expectMissing; i++ {
|
2018-09-13 12:42:19 +03:00
|
|
|
_, err := ldb.Get(context.TODO(), chunks[i].Address())
|
|
|
|
if err == nil {
|
2018-10-12 17:25:38 +03:00
|
|
|
t.Fatalf("expected surplus chunk %d to be missing, but got no error", i)
|
2018-09-13 12:42:19 +03:00
|
|
|
}
|
2018-06-20 15:06:27 +03:00
|
|
|
}
|
|
|
|
|
2018-09-13 12:42:19 +03:00
|
|
|
// expect last chunks to be present, as they have the largest access value
|
2018-10-12 17:25:38 +03:00
|
|
|
for i := expectMissing; i < n; i++ {
|
2018-09-13 12:42:19 +03:00
|
|
|
ret, err := ldb.Get(context.TODO(), chunks[i].Address())
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("chunk %v: expected no error, but got %s", i, err)
|
|
|
|
}
|
|
|
|
if !bytes.Equal(ret.Data(), chunks[i].Data()) {
|
|
|
|
t.Fatal("expected to get the same data back, but got smth else")
|
|
|
|
}
|
2018-06-20 15:06:27 +03:00
|
|
|
}
|
|
|
|
}
|
2018-10-12 17:25:38 +03:00
|
|
|
|
|
|
|
// TestLDBStoreCollectGarbageAccessUnlikeIndex tests garbage collection where accesscount differs from indexcount
|
|
|
|
func TestLDBStoreCollectGarbageAccessUnlikeIndex(t *testing.T) {
|
|
|
|
|
2018-11-19 14:26:45 +03:00
|
|
|
capacity := defaultMaxGCRound / 100 * 2
|
2018-10-12 17:25:38 +03:00
|
|
|
n := capacity - 1
|
|
|
|
|
|
|
|
ldb, cleanup := newLDBStore(t)
|
|
|
|
ldb.setCapacity(uint64(capacity))
|
|
|
|
defer cleanup()
|
|
|
|
|
2019-02-08 19:07:11 +03:00
|
|
|
chunks, err := mputRandomChunks(ldb, n)
|
2018-10-12 17:25:38 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err.Error())
|
|
|
|
}
|
|
|
|
log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
|
|
|
|
|
|
|
|
// set first added capacity/2 chunks to highest accesscount
|
|
|
|
for i := 0; i < capacity/2; i++ {
|
|
|
|
_, err := ldb.Get(context.TODO(), chunks[i].Address())
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("fail add chunk #%d - %s: %v", i, chunks[i].Address(), err)
|
|
|
|
}
|
|
|
|
}
|
2019-02-08 19:07:11 +03:00
|
|
|
_, err = mputRandomChunks(ldb, 2)
|
2018-10-12 17:25:38 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
// wait for garbage collection to kick in on the responsible actor
|
2019-01-24 14:34:12 +03:00
|
|
|
waitGc(ldb)
|
2018-10-12 17:25:38 +03:00
|
|
|
|
|
|
|
var missing int
|
|
|
|
for i, ch := range chunks[2 : capacity/2] {
|
|
|
|
ret, err := ldb.Get(context.TODO(), ch.Address())
|
|
|
|
if err == ErrChunkNotFound || err == ldberrors.ErrNotFound {
|
|
|
|
t.Fatalf("fail find chunk #%d - %s: %v", i, ch.Address(), err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if !bytes.Equal(ret.Data(), ch.Data()) {
|
|
|
|
t.Fatal("expected to get the same data back, but got smth else")
|
|
|
|
}
|
|
|
|
log.Trace("got back chunk", "chunk", ret)
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Info("ldbstore", "total", n, "missing", missing, "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
|
|
|
|
}
|
|
|
|
|
2018-11-13 17:22:53 +03:00
|
|
|
func TestCleanIndex(t *testing.T) {
|
|
|
|
capacity := 5000
|
|
|
|
n := 3
|
|
|
|
|
|
|
|
ldb, cleanup := newLDBStore(t)
|
|
|
|
ldb.setCapacity(uint64(capacity))
|
|
|
|
defer cleanup()
|
|
|
|
|
2019-02-08 19:07:11 +03:00
|
|
|
chunks, err := mputRandomChunks(ldb, n)
|
2018-11-13 17:22:53 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// remove the data of the first chunk
|
|
|
|
po := ldb.po(chunks[0].Address()[:])
|
|
|
|
dataKey := make([]byte, 10)
|
|
|
|
dataKey[0] = keyData
|
|
|
|
dataKey[1] = byte(po)
|
|
|
|
// dataKey[2:10] = first chunk has storageIdx 0 on [2:10]
|
|
|
|
if _, err := ldb.db.Get(dataKey); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if err := ldb.db.Delete(dataKey); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// remove the gc index row for the first chunk
|
|
|
|
gcFirstCorrectKey := make([]byte, 9)
|
|
|
|
gcFirstCorrectKey[0] = keyGCIdx
|
|
|
|
if err := ldb.db.Delete(gcFirstCorrectKey); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// warp the gc data of the second chunk
|
|
|
|
// this data should be correct again after the clean
|
|
|
|
gcSecondCorrectKey := make([]byte, 9)
|
|
|
|
gcSecondCorrectKey[0] = keyGCIdx
|
|
|
|
binary.BigEndian.PutUint64(gcSecondCorrectKey[1:], uint64(1))
|
|
|
|
gcSecondCorrectVal, err := ldb.db.Get(gcSecondCorrectKey)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
warpedGCVal := make([]byte, len(gcSecondCorrectVal)+1)
|
|
|
|
copy(warpedGCVal[1:], gcSecondCorrectVal)
|
|
|
|
if err := ldb.db.Delete(gcSecondCorrectKey); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if err := ldb.db.Put(gcSecondCorrectKey, warpedGCVal); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := ldb.CleanGCIndex(); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// the index without corresponding data should have been deleted
|
|
|
|
idxKey := make([]byte, 33)
|
|
|
|
idxKey[0] = keyIndex
|
|
|
|
copy(idxKey[1:], chunks[0].Address())
|
|
|
|
if _, err := ldb.db.Get(idxKey); err == nil {
|
|
|
|
t.Fatalf("expected chunk 0 idx to be pruned: %v", idxKey)
|
|
|
|
}
|
|
|
|
|
|
|
|
// the two other indices should be present
|
|
|
|
copy(idxKey[1:], chunks[1].Address())
|
|
|
|
if _, err := ldb.db.Get(idxKey); err != nil {
|
|
|
|
t.Fatalf("expected chunk 1 idx to be present: %v", idxKey)
|
|
|
|
}
|
|
|
|
|
|
|
|
copy(idxKey[1:], chunks[2].Address())
|
|
|
|
if _, err := ldb.db.Get(idxKey); err != nil {
|
|
|
|
t.Fatalf("expected chunk 2 idx to be present: %v", idxKey)
|
|
|
|
}
|
|
|
|
|
|
|
|
// first gc index should still be gone
|
|
|
|
if _, err := ldb.db.Get(gcFirstCorrectKey); err == nil {
|
|
|
|
t.Fatalf("expected gc 0 idx to be pruned: %v", idxKey)
|
|
|
|
}
|
|
|
|
|
|
|
|
// second gc index should still be fixed
|
|
|
|
if _, err := ldb.db.Get(gcSecondCorrectKey); err != nil {
|
|
|
|
t.Fatalf("expected gc 1 idx to be present: %v", idxKey)
|
|
|
|
}
|
|
|
|
|
|
|
|
// third gc index should be unchanged
|
|
|
|
binary.BigEndian.PutUint64(gcSecondCorrectKey[1:], uint64(2))
|
|
|
|
if _, err := ldb.db.Get(gcSecondCorrectKey); err != nil {
|
|
|
|
t.Fatalf("expected gc 2 idx to be present: %v", idxKey)
|
|
|
|
}
|
|
|
|
|
|
|
|
c, err := ldb.db.Get(keyEntryCnt)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("expected gc 2 idx to be present: %v", idxKey)
|
|
|
|
}
|
|
|
|
|
|
|
|
// entrycount should now be one less
|
|
|
|
entryCount := binary.BigEndian.Uint64(c)
|
|
|
|
if entryCount != 2 {
|
|
|
|
t.Fatalf("expected entrycnt to be 2, was %d", c)
|
|
|
|
}
|
|
|
|
|
|
|
|
// the chunks might accidentally be in the same bin
|
|
|
|
// if so that bin counter will now be 2 - the highest added index.
|
|
|
|
// if not, the total of them will be 3
|
|
|
|
poBins := []uint8{ldb.po(chunks[1].Address()), ldb.po(chunks[2].Address())}
|
|
|
|
if poBins[0] == poBins[1] {
|
|
|
|
poBins = poBins[:1]
|
|
|
|
}
|
|
|
|
|
|
|
|
var binTotal uint64
|
|
|
|
var currentBin [2]byte
|
|
|
|
currentBin[0] = keyDistanceCnt
|
|
|
|
if len(poBins) == 1 {
|
|
|
|
currentBin[1] = poBins[0]
|
|
|
|
c, err := ldb.db.Get(currentBin[:])
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("expected gc 2 idx to be present: %v", idxKey)
|
|
|
|
}
|
|
|
|
binCount := binary.BigEndian.Uint64(c)
|
|
|
|
if binCount != 2 {
|
|
|
|
t.Fatalf("expected entrycnt to be 2, was %d", binCount)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
for _, bin := range poBins {
|
|
|
|
currentBin[1] = bin
|
|
|
|
c, err := ldb.db.Get(currentBin[:])
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("expected gc 2 idx to be present: %v", idxKey)
|
|
|
|
}
|
|
|
|
binCount := binary.BigEndian.Uint64(c)
|
|
|
|
binTotal += binCount
|
|
|
|
|
|
|
|
}
|
|
|
|
if binTotal != 3 {
|
|
|
|
t.Fatalf("expected sum of bin indices to be 3, was %d", binTotal)
|
|
|
|
}
|
|
|
|
}
|
2018-11-15 16:57:03 +03:00
|
|
|
|
|
|
|
// check that the iterator quits properly
|
2019-02-08 19:07:11 +03:00
|
|
|
chunks, err = mputRandomChunks(ldb, 4100)
|
2018-11-15 16:57:03 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
po = ldb.po(chunks[4099].Address()[:])
|
|
|
|
dataKey = make([]byte, 10)
|
|
|
|
dataKey[0] = keyData
|
|
|
|
dataKey[1] = byte(po)
|
|
|
|
binary.BigEndian.PutUint64(dataKey[2:], 4099+3)
|
|
|
|
if _, err := ldb.db.Get(dataKey); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if err := ldb.db.Delete(dataKey); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := ldb.CleanGCIndex(); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// entrycount should now be one less of added chunks
|
|
|
|
c, err = ldb.db.Get(keyEntryCnt)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("expected gc 2 idx to be present: %v", idxKey)
|
|
|
|
}
|
|
|
|
entryCount = binary.BigEndian.Uint64(c)
|
|
|
|
if entryCount != 4099+2 {
|
|
|
|
t.Fatalf("expected entrycnt to be 2, was %d", c)
|
|
|
|
}
|
2018-11-13 17:22:53 +03:00
|
|
|
}
|
|
|
|
|
2019-01-24 14:34:12 +03:00
|
|
|
// Note: waitGc does not guarantee that we wait 1 GC round; it only
|
|
|
|
// guarantees that if the GC is running we wait for that run to finish
|
|
|
|
// ticket: https://github.com/ethersphere/go-ethereum/issues/1151
|
|
|
|
func waitGc(ldb *LDBStore) {
|
2018-10-12 17:25:38 +03:00
|
|
|
<-ldb.gc.runC
|
|
|
|
ldb.gc.runC <- struct{}{}
|
|
|
|
}
|