swarm/storage: Get all chunk references for a given file (#19002)

This commit is contained in:
holisticode 2019-02-06 06:16:43 -05:00 committed by Anton Evangelatov
parent 75c9570c31
commit 3eff652a7b
2 changed files with 76 additions and 0 deletions

@ -19,6 +19,7 @@ package storage
import (
"context"
"io"
"sort"
)
/*
@ -96,3 +97,42 @@ func (f *FileStore) Store(ctx context.Context, data io.Reader, size int64, toEnc
func (f *FileStore) HashSize() int {
return f.hashFunc().Size()
}
// Public API. This endpoint returns all chunk hashes (only) for a given file
func (f *FileStore) GetAllReferences(ctx context.Context, data io.Reader, toEncrypt bool) (addrs AddressCollection, err error) {
// create a special kind of putter, which only will store the references
putter := &HashExplorer{
hasherStore: NewHasherStore(f.ChunkStore, f.hashFunc, toEncrypt),
References: make([]Reference, 0),
}
// do the actual splitting anyway, no way around it
_, _, err = PyramidSplit(ctx, data, putter, putter)
if err != nil {
return nil, err
}
// collect all references
addrs = NewAddressCollection(0)
for _, ref := range putter.References {
addrs = append(addrs, Address(ref))
}
sort.Sort(addrs)
return addrs, nil
}
// HashExplorer is a special kind of putter which will only store chunk references
type HashExplorer struct {
*hasherStore
References []Reference
}
// HashExplorer's Put will add just the chunk hashes to its `References`
func (he *HashExplorer) Put(ctx context.Context, chunkData ChunkData) (Reference, error) {
// Need to do the actual Put, which returns the references
ref, err := he.hasherStore.Put(ctx, chunkData)
if err != nil {
return nil, err
}
// internally store the reference
he.References = append(he.References, ref)
return ref, nil
}

@ -173,3 +173,39 @@ func testFileStoreCapacity(toEncrypt bool, t *testing.T) {
t.Fatalf("Comparison error after clearing memStore.")
}
}
// TestGetAllReferences only tests that GetAllReferences returns an expected
// number of references for a given file
func TestGetAllReferences(t *testing.T) {
tdb, cleanup, err := newTestDbStore(false, false)
defer cleanup()
if err != nil {
t.Fatalf("init dbStore failed: %v", err)
}
db := tdb.LDBStore
memStore := NewMemStore(NewDefaultStoreParams(), db)
localStore := &LocalStore{
memStore: memStore,
DbStore: db,
}
fileStore := NewFileStore(localStore, NewFileStoreParams())
checkRefs := func(dataSize int, expectedLen int) {
slice := testutil.RandomBytes(1, dataSize)
addrs, err := fileStore.GetAllReferences(context.Background(), bytes.NewReader(slice), false)
if err != nil {
t.Fatal(err)
}
if len(addrs) != expectedLen {
t.Fatalf("Expected reference array length to be %d, but is %d", expectedLen, len(addrs))
}
}
// testRuns[i] and expectedLen[i] are dataSize and expected length respectively
testRuns := []int{1024, 8192, 16000, 30000, 1000000}
expectedLens := []int{1, 3, 5, 9, 248}
for i, r := range testRuns {
checkRefs(r, expectedLens[i])
}
}