go-ethereum/trie/cache.go

55 lines
1.0 KiB
Go
Raw Normal View History

2015-01-08 12:47:04 +02:00
package trie
2014-11-19 17:35:57 +02:00
import "github.com/ethereum/go-ethereum/logger/glog"
2014-11-19 17:35:57 +02:00
type Backend interface {
2014-11-19 17:56:01 +02:00
Get([]byte) ([]byte, error)
Put([]byte, []byte) error
2014-11-19 17:35:57 +02:00
}
type Cache struct {
store map[string][]byte
backend Backend
}
func NewCache(backend Backend) *Cache {
return &Cache{make(map[string][]byte), backend}
}
func (self *Cache) Get(key []byte) []byte {
data := self.store[string(key)]
if data == nil {
2014-11-19 17:56:01 +02:00
data, _ = self.backend.Get(key)
2014-11-19 17:35:57 +02:00
}
return data
}
2014-11-19 17:56:01 +02:00
func (self *Cache) Put(key []byte, data []byte) {
2014-11-19 17:35:57 +02:00
self.store[string(key)] = data
}
func (self *Cache) Flush() {
for k, v := range self.store {
if err := self.backend.Put([]byte(k), v); err != nil {
glog.Fatal("db write err:", err)
}
2014-11-19 17:35:57 +02:00
}
// This will eventually grow too large. We'd could
// do a make limit on storage and push out not-so-popular nodes.
//self.Reset()
}
func (self *Cache) Copy() *Cache {
cache := NewCache(self.backend)
for k, v := range self.store {
cache.store[k] = v
}
return cache
}
2014-11-19 17:35:57 +02:00
func (self *Cache) Reset() {
//self.store = make(map[string][]byte)
2014-11-19 17:35:57 +02:00
}