2016-08-29 22:18:00 +03:00
|
|
|
// Copyright 2016 The go-ethereum Authors
|
|
|
|
// This file is part of the go-ethereum library.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Lesser General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Lesser General Public License
|
|
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
|
|
|
package storage
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
2018-07-13 18:40:28 +03:00
|
|
|
"context"
|
2016-08-29 22:18:00 +03:00
|
|
|
"crypto/rand"
|
2018-06-20 15:06:27 +03:00
|
|
|
"flag"
|
2016-10-08 13:33:52 +03:00
|
|
|
"fmt"
|
2016-08-29 22:18:00 +03:00
|
|
|
"io"
|
2018-09-13 12:42:19 +03:00
|
|
|
"io/ioutil"
|
|
|
|
"os"
|
2016-08-29 22:18:00 +03:00
|
|
|
"sync"
|
|
|
|
"testing"
|
2018-06-20 15:06:27 +03:00
|
|
|
"time"
|
2016-08-29 22:18:00 +03:00
|
|
|
|
2017-02-22 15:10:07 +03:00
|
|
|
"github.com/ethereum/go-ethereum/log"
|
2018-09-13 12:42:19 +03:00
|
|
|
ch "github.com/ethereum/go-ethereum/swarm/chunk"
|
2018-06-20 15:06:27 +03:00
|
|
|
colorable "github.com/mattn/go-colorable"
|
2016-08-29 22:18:00 +03:00
|
|
|
)
|
|
|
|
|
2018-06-20 15:06:27 +03:00
|
|
|
var (
|
2018-09-13 12:42:19 +03:00
|
|
|
loglevel = flag.Int("loglevel", 3, "verbosity of logs")
|
|
|
|
getTimeout = 30 * time.Second
|
2018-06-20 15:06:27 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
func init() {
|
|
|
|
flag.Parse()
|
|
|
|
log.PrintOrigins(true)
|
|
|
|
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true))))
|
|
|
|
}
|
|
|
|
|
2016-10-08 13:33:52 +03:00
|
|
|
type brokenLimitedReader struct {
|
|
|
|
lr io.Reader
|
|
|
|
errAt int
|
|
|
|
off int
|
|
|
|
size int
|
2016-08-29 22:18:00 +03:00
|
|
|
}
|
|
|
|
|
2016-10-08 13:33:52 +03:00
|
|
|
func brokenLimitReader(data io.Reader, size int, errAt int) *brokenLimitedReader {
|
|
|
|
return &brokenLimitedReader{
|
|
|
|
lr: data,
|
|
|
|
errAt: errAt,
|
|
|
|
size: size,
|
2016-08-29 22:18:00 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-13 12:42:19 +03:00
|
|
|
func newLDBStore(t *testing.T) (*LDBStore, func()) {
|
|
|
|
dir, err := ioutil.TempDir("", "bzz-storage-test")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
log.Trace("memstore.tempdir", "dir", dir)
|
|
|
|
|
|
|
|
ldbparams := NewLDBStoreParams(NewDefaultStoreParams(), dir)
|
|
|
|
db, err := NewLDBStore(ldbparams)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
cleanup := func() {
|
|
|
|
db.Close()
|
|
|
|
err := os.RemoveAll(dir)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return db, cleanup
|
2018-06-20 15:06:27 +03:00
|
|
|
}
|
|
|
|
|
2018-09-13 12:42:19 +03:00
|
|
|
func mputRandomChunks(store ChunkStore, n int, chunksize int64) ([]Chunk, error) {
|
|
|
|
return mput(store, n, GenerateRandomChunk)
|
|
|
|
}
|
|
|
|
|
|
|
|
func mput(store ChunkStore, n int, f func(i int64) Chunk) (hs []Chunk, err error) {
|
|
|
|
// put to localstore and wait for stored channel
|
|
|
|
// does not check delivery error state
|
|
|
|
errc := make(chan error)
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
|
|
|
defer cancel()
|
|
|
|
for i := int64(0); i < int64(n); i++ {
|
|
|
|
chunk := f(ch.DefaultSize)
|
2018-06-20 15:06:27 +03:00
|
|
|
go func() {
|
2018-09-13 12:42:19 +03:00
|
|
|
select {
|
|
|
|
case errc <- store.Put(ctx, chunk):
|
|
|
|
case <-ctx.Done():
|
2018-06-20 15:06:27 +03:00
|
|
|
}
|
|
|
|
}()
|
2018-09-13 12:42:19 +03:00
|
|
|
hs = append(hs, chunk)
|
2018-06-20 15:06:27 +03:00
|
|
|
}
|
2018-09-13 12:42:19 +03:00
|
|
|
|
|
|
|
// wait for all chunks to be stored
|
2018-06-20 15:06:27 +03:00
|
|
|
for i := 0; i < n; i++ {
|
2018-09-13 12:42:19 +03:00
|
|
|
err := <-errc
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2018-06-20 15:06:27 +03:00
|
|
|
}
|
2018-09-13 12:42:19 +03:00
|
|
|
return hs, nil
|
2018-06-20 15:06:27 +03:00
|
|
|
}
|
|
|
|
|
2018-09-13 12:42:19 +03:00
|
|
|
func mget(store ChunkStore, hs []Address, f func(h Address, chunk Chunk) error) error {
|
2018-06-20 15:06:27 +03:00
|
|
|
wg := sync.WaitGroup{}
|
|
|
|
wg.Add(len(hs))
|
|
|
|
errc := make(chan error)
|
|
|
|
|
|
|
|
for _, k := range hs {
|
|
|
|
go func(h Address) {
|
|
|
|
defer wg.Done()
|
2018-09-13 12:42:19 +03:00
|
|
|
// TODO: write timeout with context
|
2018-07-13 18:40:28 +03:00
|
|
|
chunk, err := store.Get(context.TODO(), h)
|
2018-06-20 15:06:27 +03:00
|
|
|
if err != nil {
|
|
|
|
errc <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if f != nil {
|
|
|
|
err = f(h, chunk)
|
|
|
|
if err != nil {
|
|
|
|
errc <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}(k)
|
|
|
|
}
|
|
|
|
go func() {
|
|
|
|
wg.Wait()
|
|
|
|
close(errc)
|
|
|
|
}()
|
|
|
|
var err error
|
|
|
|
select {
|
|
|
|
case err = <-errc:
|
|
|
|
case <-time.NewTimer(5 * time.Second).C:
|
|
|
|
err = fmt.Errorf("timed out after 5 seconds")
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-08-29 22:18:00 +03:00
|
|
|
func testDataReader(l int) (r io.Reader) {
|
2016-10-08 13:33:52 +03:00
|
|
|
return io.LimitReader(rand.Reader, int64(l))
|
|
|
|
}
|
|
|
|
|
2018-06-20 15:06:27 +03:00
|
|
|
func (r *brokenLimitedReader) Read(buf []byte) (int, error) {
|
|
|
|
if r.off+len(buf) > r.errAt {
|
2016-10-08 13:33:52 +03:00
|
|
|
return 0, fmt.Errorf("Broken reader")
|
|
|
|
}
|
2018-06-20 15:06:27 +03:00
|
|
|
r.off += len(buf)
|
|
|
|
return r.lr.Read(buf)
|
2016-08-29 22:18:00 +03:00
|
|
|
}
|
|
|
|
|
2018-09-13 12:42:19 +03:00
|
|
|
func testStoreRandom(m ChunkStore, n int, chunksize int64, t *testing.T) {
|
|
|
|
chunks, err := mputRandomChunks(m, n, chunksize)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("expected no error, got %v", err)
|
2016-08-29 22:18:00 +03:00
|
|
|
}
|
2018-09-13 12:42:19 +03:00
|
|
|
err = mget(m, chunkAddresses(chunks), nil)
|
2018-06-20 15:06:27 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("testStore failed: %v", err)
|
|
|
|
}
|
|
|
|
}
|
2016-08-29 22:18:00 +03:00
|
|
|
|
2018-09-13 12:42:19 +03:00
|
|
|
func testStoreCorrect(m ChunkStore, n int, chunksize int64, t *testing.T) {
|
|
|
|
chunks, err := mputRandomChunks(m, n, chunksize)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("expected no error, got %v", err)
|
|
|
|
}
|
|
|
|
f := func(h Address, chunk Chunk) error {
|
|
|
|
if !bytes.Equal(h, chunk.Address()) {
|
|
|
|
return fmt.Errorf("key does not match retrieved chunk Address")
|
2016-08-29 22:18:00 +03:00
|
|
|
}
|
2018-06-20 15:06:27 +03:00
|
|
|
hasher := MakeHashFunc(DefaultHash)()
|
2018-09-13 12:42:19 +03:00
|
|
|
hasher.ResetWithLength(chunk.SpanBytes())
|
|
|
|
hasher.Write(chunk.Payload())
|
2018-06-20 15:06:27 +03:00
|
|
|
exp := hasher.Sum(nil)
|
|
|
|
if !bytes.Equal(h, exp) {
|
|
|
|
return fmt.Errorf("key is not hash of chunk data")
|
2016-08-29 22:18:00 +03:00
|
|
|
}
|
2018-06-20 15:06:27 +03:00
|
|
|
return nil
|
|
|
|
}
|
2018-09-13 12:42:19 +03:00
|
|
|
err = mget(m, chunkAddresses(chunks), f)
|
2018-06-20 15:06:27 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("testStore failed: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-13 12:42:19 +03:00
|
|
|
func benchmarkStorePut(store ChunkStore, n int, chunksize int64, b *testing.B) {
|
|
|
|
chunks := make([]Chunk, n)
|
2018-06-20 15:06:27 +03:00
|
|
|
i := 0
|
2018-09-13 12:42:19 +03:00
|
|
|
f := func(dataSize int64) Chunk {
|
2018-06-20 15:06:27 +03:00
|
|
|
chunk := GenerateRandomChunk(dataSize)
|
|
|
|
chunks[i] = chunk
|
|
|
|
i++
|
|
|
|
return chunk
|
|
|
|
}
|
|
|
|
|
2018-09-13 12:42:19 +03:00
|
|
|
mput(store, n, f)
|
2018-06-20 15:06:27 +03:00
|
|
|
|
2018-09-13 12:42:19 +03:00
|
|
|
f = func(dataSize int64) Chunk {
|
2018-06-20 15:06:27 +03:00
|
|
|
chunk := chunks[i]
|
|
|
|
i++
|
|
|
|
return chunk
|
|
|
|
}
|
2016-08-29 22:18:00 +03:00
|
|
|
|
2018-06-20 15:06:27 +03:00
|
|
|
b.ReportAllocs()
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
for j := 0; j < b.N; j++ {
|
|
|
|
i = 0
|
2018-09-13 12:42:19 +03:00
|
|
|
mput(store, n, f)
|
2018-06-20 15:06:27 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-13 12:42:19 +03:00
|
|
|
func benchmarkStoreGet(store ChunkStore, n int, chunksize int64, b *testing.B) {
|
|
|
|
chunks, err := mputRandomChunks(store, n, chunksize)
|
|
|
|
if err != nil {
|
|
|
|
b.Fatalf("expected no error, got %v", err)
|
|
|
|
}
|
2018-06-20 15:06:27 +03:00
|
|
|
b.ReportAllocs()
|
|
|
|
b.ResetTimer()
|
2018-09-13 12:42:19 +03:00
|
|
|
addrs := chunkAddresses(chunks)
|
2018-06-20 15:06:27 +03:00
|
|
|
for i := 0; i < b.N; i++ {
|
2018-09-13 12:42:19 +03:00
|
|
|
err := mget(store, addrs, nil)
|
2018-06-20 15:06:27 +03:00
|
|
|
if err != nil {
|
|
|
|
b.Fatalf("mget failed: %v", err)
|
|
|
|
}
|
2016-08-29 22:18:00 +03:00
|
|
|
}
|
|
|
|
}
|
2018-09-13 12:42:19 +03:00
|
|
|
|
|
|
|
// MapChunkStore is a very simple ChunkStore implementation to store chunks in a map in memory.
|
|
|
|
type MapChunkStore struct {
|
|
|
|
chunks map[string]Chunk
|
|
|
|
mu sync.RWMutex
|
|
|
|
}
|
|
|
|
|
|
|
|
func NewMapChunkStore() *MapChunkStore {
|
|
|
|
return &MapChunkStore{
|
|
|
|
chunks: make(map[string]Chunk),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *MapChunkStore) Put(_ context.Context, ch Chunk) error {
|
|
|
|
m.mu.Lock()
|
|
|
|
defer m.mu.Unlock()
|
|
|
|
m.chunks[ch.Address().Hex()] = ch
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *MapChunkStore) Get(_ context.Context, ref Address) (Chunk, error) {
|
|
|
|
m.mu.RLock()
|
|
|
|
defer m.mu.RUnlock()
|
|
|
|
chunk := m.chunks[ref.Hex()]
|
|
|
|
if chunk == nil {
|
|
|
|
return nil, ErrChunkNotFound
|
|
|
|
}
|
|
|
|
return chunk, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *MapChunkStore) Close() {
|
|
|
|
}
|
|
|
|
|
|
|
|
func chunkAddresses(chunks []Chunk) []Address {
|
|
|
|
addrs := make([]Address, len(chunks))
|
|
|
|
for i, ch := range chunks {
|
|
|
|
addrs[i] = ch.Address()
|
|
|
|
}
|
|
|
|
return addrs
|
|
|
|
}
|