swarm: schemas and migrations (#17813)

This commit is contained in:
Anton Evangelatov 2018-10-03 14:31:59 +02:00 committed by GitHub
parent 14bef9a2db
commit 303b99663e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 81 additions and 34 deletions

@ -93,21 +93,6 @@ func dbImport(ctx *cli.Context) {
log.Info(fmt.Sprintf("successfully imported %d chunks", count)) log.Info(fmt.Sprintf("successfully imported %d chunks", count))
} }
func dbClean(ctx *cli.Context) {
args := ctx.Args()
if len(args) != 2 {
utils.Fatalf("invalid arguments, please specify <chunkdb> (path to a local chunk database) and the base key")
}
store, err := openLDBStore(args[0], common.Hex2Bytes(args[1]))
if err != nil {
utils.Fatalf("error opening local chunk database: %s", err)
}
defer store.Close()
store.Cleanup()
}
func openLDBStore(path string, basekey []byte) (*storage.LDBStore, error) { func openLDBStore(path string, basekey []byte) (*storage.LDBStore, error) {
if _, err := os.Stat(filepath.Join(path, "CURRENT")); err != nil { if _, err := os.Stat(filepath.Join(path, "CURRENT")); err != nil {
return nil, fmt.Errorf("invalid chunkdb path: %s", err) return nil, fmt.Errorf("invalid chunkdb path: %s", err)

@ -537,14 +537,6 @@ pv(1) tool to get a progress bar:
pv chunks.tar | swarm db import ~/.ethereum/swarm/bzz-KEY/chunks -`, pv chunks.tar | swarm db import ~/.ethereum/swarm/bzz-KEY/chunks -`,
}, },
{
Action: dbClean,
CustomHelpTemplate: helpTemplate,
Name: "clean",
Usage: "remove corrupt entries from a local chunk database",
ArgsUsage: "<chunkdb>",
Description: "Remove corrupt entries from a local chunk database",
},
}, },
}, },

@ -20,8 +20,6 @@ package storage
// no need for queueing/caching // no need for queueing/caching
import ( import (
"fmt"
"github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/metrics"
"github.com/syndtr/goleveldb/leveldb" "github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/iterator" "github.com/syndtr/goleveldb/leveldb/iterator"
@ -46,13 +44,10 @@ func NewLDBDatabase(file string) (*LDBDatabase, error) {
return database, nil return database, nil
} }
func (db *LDBDatabase) Put(key []byte, value []byte) { func (db *LDBDatabase) Put(key []byte, value []byte) error {
metrics.GetOrRegisterCounter("ldbdatabase.put", nil).Inc(1) metrics.GetOrRegisterCounter("ldbdatabase.put", nil).Inc(1)
err := db.db.Put(key, value, nil) return db.db.Put(key, value, nil)
if err != nil {
fmt.Println("Error put", err)
}
} }
func (db *LDBDatabase) Get(key []byte) ([]byte, error) { func (db *LDBDatabase) Get(key []byte) ([]byte, error) {

@ -37,7 +37,6 @@ import (
"github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
ch "github.com/ethereum/go-ethereum/swarm/chunk"
"github.com/ethereum/go-ethereum/swarm/log" "github.com/ethereum/go-ethereum/swarm/log"
"github.com/ethereum/go-ethereum/swarm/storage/mock" "github.com/ethereum/go-ethereum/swarm/storage/mock"
"github.com/syndtr/goleveldb/leveldb" "github.com/syndtr/goleveldb/leveldb"
@ -61,6 +60,7 @@ var (
keyDataIdx = []byte{4} keyDataIdx = []byte{4}
keyData = byte(6) keyData = byte(6)
keyDistanceCnt = byte(7) keyDistanceCnt = byte(7)
keySchema = []byte{8}
) )
var ( var (
@ -418,8 +418,8 @@ func (s *LDBStore) Import(in io.Reader) (int64, error) {
} }
} }
func (s *LDBStore) Cleanup() { //Cleanup iterates over the database and deletes chunks if they pass the `f` condition
//Iterates over the database and checks that there are no chunks bigger than 4kb func (s *LDBStore) Cleanup(f func(*chunk) bool) {
var errorsFound, removed, total int var errorsFound, removed, total int
it := s.db.NewIterator() it := s.db.NewIterator()
@ -471,7 +471,8 @@ func (s *LDBStore) Cleanup() {
cs := int64(binary.LittleEndian.Uint64(c.sdata[:8])) cs := int64(binary.LittleEndian.Uint64(c.sdata[:8]))
log.Trace("chunk", "key", fmt.Sprintf("%x", key), "ck", fmt.Sprintf("%x", ck), "dkey", fmt.Sprintf("%x", datakey), "dataidx", index.Idx, "po", po, "len data", len(data), "len sdata", len(c.sdata), "size", cs) log.Trace("chunk", "key", fmt.Sprintf("%x", key), "ck", fmt.Sprintf("%x", ck), "dkey", fmt.Sprintf("%x", datakey), "dataidx", index.Idx, "po", po, "len data", len(data), "len sdata", len(c.sdata), "size", cs)
if len(c.sdata) > ch.DefaultSize+8 { // if chunk is to be removed
if f(c) {
log.Warn("chunk for cleanup", "key", fmt.Sprintf("%x", key), "ck", fmt.Sprintf("%x", ck), "dkey", fmt.Sprintf("%x", datakey), "dataidx", index.Idx, "po", po, "len data", len(data), "len sdata", len(c.sdata), "size", cs) log.Warn("chunk for cleanup", "key", fmt.Sprintf("%x", key), "ck", fmt.Sprintf("%x", ck), "dkey", fmt.Sprintf("%x", datakey), "dataidx", index.Idx, "po", po, "len data", len(data), "len sdata", len(c.sdata), "size", cs)
s.delete(index.Idx, getIndexKey(key[1:]), po) s.delete(index.Idx, getIndexKey(key[1:]), po)
removed++ removed++
@ -730,6 +731,30 @@ func (s *LDBStore) tryAccessIdx(ikey []byte, index *dpaDBIndex) bool {
return true return true
} }
// GetSchema is returning the current named schema of the datastore as read from LevelDB
func (s *LDBStore) GetSchema() (string, error) {
s.lock.Lock()
defer s.lock.Unlock()
data, err := s.db.Get(keySchema)
if err != nil {
if err == leveldb.ErrNotFound {
return "", nil
}
return "", err
}
return string(data), nil
}
// PutSchema is saving a named schema to the LevelDB datastore
func (s *LDBStore) PutSchema(schema string) error {
s.lock.Lock()
defer s.lock.Unlock()
return s.db.Put(keySchema, []byte(schema))
}
func (s *LDBStore) Get(_ context.Context, addr Address) (chunk Chunk, err error) { func (s *LDBStore) Get(_ context.Context, addr Address) (chunk Chunk, err error) {
metrics.GetOrRegisterCounter("ldbstore.get", nil).Inc(1) metrics.GetOrRegisterCounter("ldbstore.get", nil).Inc(1)
log.Trace("ldbstore.get", "key", addr) log.Trace("ldbstore.get", "key", addr)

@ -184,3 +184,42 @@ func (ls *LocalStore) Iterator(from uint64, to uint64, po uint8, f func(Address,
func (ls *LocalStore) Close() { func (ls *LocalStore) Close() {
ls.DbStore.Close() ls.DbStore.Close()
} }
// Migrate checks the datastore schema vs the runtime schema, and runs migrations if they don't match
func (ls *LocalStore) Migrate() error {
schema, err := ls.DbStore.GetSchema()
if err != nil {
log.Error(err.Error())
return err
}
log.Debug("found schema", "schema", schema, "runtime-schema", CurrentDbSchema)
if schema != CurrentDbSchema {
// run migrations
if schema == "" {
log.Debug("running migrations for", "schema", schema, "runtime-schema", CurrentDbSchema)
cleanupFunc := func(c *chunk) bool {
// if one of the ls.Validators passes, it means a chunk is of particular type and it is valid
valid := false
for _, v := range ls.Validators {
if valid = v.Validate(c.Address(), c.Data()); valid {
break
}
}
return valid
}
ls.DbStore.Cleanup(cleanupFunc)
err := ls.DbStore.PutSchema(DbSchemaPurity)
if err != nil {
log.Error(err.Error())
return err
}
}
}
return nil
}

6
swarm/storage/schema.go Normal file

@ -0,0 +1,6 @@
package storage
// "purity" is the first formal schema of LevelDB we release together with Swarm 0.3.5
const DbSchemaPurity = "purity"
const CurrentDbSchema = DbSchemaPurity

@ -197,6 +197,11 @@ func NewSwarm(config *api.Config, mockStore *mock.NodeStore) (self *Swarm, err e
resourceHandler, resourceHandler,
} }
err = lstore.Migrate()
if err != nil {
return nil, err
}
log.Debug("Setup local storage") log.Debug("Setup local storage")
self.bzz = network.NewBzz(bzzconfig, to, stateStore, stream.Spec, self.streamer.Run) self.bzz = network.NewBzz(bzzconfig, to, stateStore, stream.Spec, self.streamer.Run)