go-ethereum/swarm/storage/common_test.go

278 lines
6.3 KiB
Go
Raw Normal View History

// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package storage
import (
"bytes"
"context"
2018-06-20 15:06:27 +03:00
"flag"
"fmt"
"io"
"sync"
"testing"
2018-06-20 15:06:27 +03:00
"time"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/swarm/chunk"
"github.com/mattn/go-colorable"
)
2018-06-20 15:06:27 +03:00
var (
loglevel = flag.Int("loglevel", 3, "verbosity of logs")
getTimeout = 30 * time.Second
2018-06-20 15:06:27 +03:00
)
func init() {
flag.Parse()
log.PrintOrigins(true)
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true))))
}
type brokenLimitedReader struct {
lr io.Reader
errAt int
off int
size int
}
func brokenLimitReader(data io.Reader, size int, errAt int) *brokenLimitedReader {
return &brokenLimitedReader{
lr: data,
errAt: errAt,
size: size,
}
}
func mputRandomChunks(store ChunkStore, n int) ([]Chunk, error) {
return mput(store, n, GenerateRandomChunk)
}
func mput(store ChunkStore, n int, f func(i int64) Chunk) (hs []Chunk, err error) {
// put to localstore and wait for stored channel
// does not check delivery error state
errc := make(chan error)
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute)
defer cancel()
for i := int64(0); i < int64(n); i++ {
ch := f(chunk.DefaultSize)
2018-06-20 15:06:27 +03:00
go func() {
_, err := store.Put(ctx, chunk.ModePutUpload, ch)
select {
case errc <- err:
case <-ctx.Done():
2018-06-20 15:06:27 +03:00
}
}()
hs = append(hs, ch)
2018-06-20 15:06:27 +03:00
}
// wait for all chunks to be stored
2018-06-20 15:06:27 +03:00
for i := 0; i < n; i++ {
err := <-errc
if err != nil {
return nil, err
}
2018-06-20 15:06:27 +03:00
}
return hs, nil
2018-06-20 15:06:27 +03:00
}
func mget(store ChunkStore, hs []Address, f func(h Address, chunk Chunk) error) error {
2018-06-20 15:06:27 +03:00
wg := sync.WaitGroup{}
wg.Add(len(hs))
errc := make(chan error)
for _, k := range hs {
go func(h Address) {
defer wg.Done()
// TODO: write timeout with context
ch, err := store.Get(context.TODO(), chunk.ModeGetRequest, h)
2018-06-20 15:06:27 +03:00
if err != nil {
errc <- err
return
}
if f != nil {
err = f(h, ch)
2018-06-20 15:06:27 +03:00
if err != nil {
errc <- err
return
}
}
}(k)
}
go func() {
wg.Wait()
close(errc)
}()
var err error
Ci race detector handle failing tests (#19143) * swarm/storage: increase mget timeout in common_test.go TestDbStoreCorrect_1k sometimes timed out with -race on Travis. --- FAIL: TestDbStoreCorrect_1k (24.63s) common_test.go:194: testStore failed: timed out after 10s * swarm: remove unused vars from TestSnapshotSyncWithServer nodeCount and chunkCount is returned from setupSim and those values we use. * swarm: move race/norace helpers from stream to testutil As we will need to use the flag in other packages, too. * swarm: refactor TestSwarmNetwork case Extract long running test cases for better visibility. * swarm/network: skip TestSyncingViaGlobalSync with -race As panics on Travis. panic: runtime error: invalid memory address or nil pointer dereference [signal SIGSEGV: segmentation violation code=0x1 addr=0x0 pc=0x7e351b] * swarm: run TestSwarmNetwork with fewer nodes with -race As otherwise we always get test failure with `network_test.go:374: context deadline exceeded` even with raised `Timeout`. * swarm/network: run TestDeliveryFromNodes with fewer nodes with -race Test on Travis times out with 8 or more nodes if -race flag is present. * swarm/network: smaller node count for discovery tests with -race TestDiscoveryPersistenceSimulationSimAdapters failed on Travis with `-race` flag present. The failure was due to extensive memory usage, coming from the CGO runtime. Using a smaller node count resolves the issue. === RUN TestDiscoveryPersistenceSimulationSimAdapter ==7227==ERROR: ThreadSanitizer failed to allocate 0x80000 (524288) bytes of clock allocator (error code: 12) FATAL: ThreadSanitizer CHECK failed: ./gotsan.cc:6976 "((0 && "unable to mmap")) != (0)" (0x0, 0x0) FAIL github.com/ethereum/go-ethereum/swarm/network/simulations/discovery 804.826s * swarm/network: run TestFileRetrieval with fewer nodes with -race Otherwise we get a failure due to extensive memory usage, as the CGO runtime cannot allocate more bytes. === RUN TestFileRetrieval ==7366==ERROR: ThreadSanitizer failed to allocate 0x80000 (524288) bytes of clock allocator (error code: 12) FATAL: ThreadSanitizer CHECK failed: ./gotsan.cc:6976 "((0 && "unable to mmap")) != (0)" (0x0, 0x0) FAIL github.com/ethereum/go-ethereum/swarm/network/stream 155.165s * swarm/network: run TestRetrieval with fewer nodes with -race Otherwise we get a failure due to extensive memory usage, as the CGO runtime cannot allocate more bytes ("ThreadSanitizer failed to allocate"). * swarm/network: skip flaky TestGetSubscriptionsRPC on Travis w/ -race Test fails a lot with something like: streamer_test.go:1332: Real subscriptions and expected amount don't match; real: 0, expected: 20 * swarm/storage: skip TestDB_SubscribePull* tests on Travis w/ -race Travis just hangs... ok github.com/ethereum/go-ethereum/swarm/storage/feed/lookup 1.307s keepalive keepalive keepalive or panics after a while. Without these tests the race detector job is now stable. Let's invetigate these tests in a separate issue: https://github.com/ethersphere/go-ethereum/issues/1245
2019-02-21 00:57:42 +03:00
timeout := 20 * time.Second
2018-06-20 15:06:27 +03:00
select {
case err = <-errc:
case <-time.NewTimer(timeout).C:
err = fmt.Errorf("timed out after %v", timeout)
2018-06-20 15:06:27 +03:00
}
return err
}
func (r *brokenLimitedReader) Read(buf []byte) (int, error) {
if r.off+len(buf) > r.errAt {
return 0, fmt.Errorf("Broken reader")
}
2018-06-20 15:06:27 +03:00
r.off += len(buf)
return r.lr.Read(buf)
}
func testStoreRandom(m ChunkStore, n int, t *testing.T) {
chunks, err := mputRandomChunks(m, n)
if err != nil {
t.Fatalf("expected no error, got %v", err)
}
err = mget(m, chunkAddresses(chunks), nil)
2018-06-20 15:06:27 +03:00
if err != nil {
t.Fatalf("testStore failed: %v", err)
}
}
func testStoreCorrect(m ChunkStore, n int, t *testing.T) {
chunks, err := mputRandomChunks(m, n)
if err != nil {
t.Fatalf("expected no error, got %v", err)
}
f := func(h Address, chunk Chunk) error {
if !bytes.Equal(h, chunk.Address()) {
return fmt.Errorf("key does not match retrieved chunk Address")
}
2018-06-20 15:06:27 +03:00
hasher := MakeHashFunc(DefaultHash)()
data := chunk.Data()
hasher.ResetWithLength(data[:8])
hasher.Write(data[8:])
2018-06-20 15:06:27 +03:00
exp := hasher.Sum(nil)
if !bytes.Equal(h, exp) {
return fmt.Errorf("key is not hash of chunk data")
}
2018-06-20 15:06:27 +03:00
return nil
}
err = mget(m, chunkAddresses(chunks), f)
2018-06-20 15:06:27 +03:00
if err != nil {
t.Fatalf("testStore failed: %v", err)
}
}
func benchmarkStorePut(store ChunkStore, n int, b *testing.B) {
chunks := make([]Chunk, n)
2018-06-20 15:06:27 +03:00
i := 0
f := func(dataSize int64) Chunk {
2018-06-20 15:06:27 +03:00
chunk := GenerateRandomChunk(dataSize)
chunks[i] = chunk
i++
return chunk
}
mput(store, n, f)
2018-06-20 15:06:27 +03:00
f = func(dataSize int64) Chunk {
2018-06-20 15:06:27 +03:00
chunk := chunks[i]
i++
return chunk
}
2018-06-20 15:06:27 +03:00
b.ReportAllocs()
b.ResetTimer()
for j := 0; j < b.N; j++ {
i = 0
mput(store, n, f)
2018-06-20 15:06:27 +03:00
}
}
func benchmarkStoreGet(store ChunkStore, n int, b *testing.B) {
chunks, err := mputRandomChunks(store, n)
if err != nil {
b.Fatalf("expected no error, got %v", err)
}
2018-06-20 15:06:27 +03:00
b.ReportAllocs()
b.ResetTimer()
addrs := chunkAddresses(chunks)
2018-06-20 15:06:27 +03:00
for i := 0; i < b.N; i++ {
err := mget(store, addrs, nil)
2018-06-20 15:06:27 +03:00
if err != nil {
b.Fatalf("mget failed: %v", err)
}
}
}
// MapChunkStore is a very simple ChunkStore implementation to store chunks in a map in memory.
type MapChunkStore struct {
chunks map[string]Chunk
mu sync.RWMutex
}
func NewMapChunkStore() *MapChunkStore {
return &MapChunkStore{
chunks: make(map[string]Chunk),
}
}
func (m *MapChunkStore) Put(_ context.Context, _ chunk.ModePut, ch Chunk) (bool, error) {
m.mu.Lock()
defer m.mu.Unlock()
_, exists := m.chunks[ch.Address().Hex()]
m.chunks[ch.Address().Hex()] = ch
return exists, nil
}
func (m *MapChunkStore) Get(_ context.Context, _ chunk.ModeGet, ref Address) (Chunk, error) {
m.mu.RLock()
defer m.mu.RUnlock()
chunk := m.chunks[ref.Hex()]
if chunk == nil {
return nil, ErrChunkNotFound
}
return chunk, nil
}
// Need to implement Has from SyncChunkStore
func (m *MapChunkStore) Has(ctx context.Context, ref Address) (has bool, err error) {
m.mu.RLock()
defer m.mu.RUnlock()
_, has = m.chunks[ref.Hex()]
return has, nil
}
func (m *MapChunkStore) Set(ctx context.Context, mode chunk.ModeSet, addr chunk.Address) (err error) {
return nil
}
func (m *MapChunkStore) LastPullSubscriptionBinID(bin uint8) (id uint64, err error) {
return 0, nil
}
func (m *MapChunkStore) SubscribePull(ctx context.Context, bin uint8, since, until uint64) (c <-chan chunk.Descriptor, stop func()) {
return nil, nil
}
func (m *MapChunkStore) Close() error {
return nil
}
func chunkAddresses(chunks []Chunk) []Address {
addrs := make([]Address, len(chunks))
for i, ch := range chunks {
addrs[i] = ch.Address()
}
return addrs
}