2019-10-04 15:24:01 +02:00
|
|
|
// Copyright 2019 The go-ethereum Authors
|
|
|
|
// This file is part of the go-ethereum library.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Lesser General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Lesser General Public License
|
|
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
|
|
|
package snapshot
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
2023-02-16 14:36:58 -05:00
|
|
|
crand "crypto/rand"
|
2019-10-04 15:24:01 +02:00
|
|
|
"math/rand"
|
|
|
|
"testing"
|
|
|
|
|
2019-11-26 09:48:29 +02:00
|
|
|
"github.com/VictoriaMetrics/fastcache"
|
2019-10-04 15:24:01 +02:00
|
|
|
"github.com/ethereum/go-ethereum/common"
|
2019-12-02 09:31:07 +01:00
|
|
|
"github.com/ethereum/go-ethereum/crypto"
|
2019-11-26 09:48:29 +02:00
|
|
|
"github.com/ethereum/go-ethereum/ethdb/memorydb"
|
2019-10-04 15:24:01 +02:00
|
|
|
)
|
|
|
|
|
2020-03-04 15:06:04 +02:00
|
|
|
func copyDestructs(destructs map[common.Hash]struct{}) map[common.Hash]struct{} {
|
|
|
|
copy := make(map[common.Hash]struct{})
|
|
|
|
for hash := range destructs {
|
|
|
|
copy[hash] = struct{}{}
|
|
|
|
}
|
|
|
|
return copy
|
|
|
|
}
|
|
|
|
|
|
|
|
func copyAccounts(accounts map[common.Hash][]byte) map[common.Hash][]byte {
|
|
|
|
copy := make(map[common.Hash][]byte)
|
|
|
|
for hash, blob := range accounts {
|
|
|
|
copy[hash] = blob
|
|
|
|
}
|
|
|
|
return copy
|
|
|
|
}
|
|
|
|
|
|
|
|
func copyStorage(storage map[common.Hash]map[common.Hash][]byte) map[common.Hash]map[common.Hash][]byte {
|
|
|
|
copy := make(map[common.Hash]map[common.Hash][]byte)
|
|
|
|
for accHash, slots := range storage {
|
|
|
|
copy[accHash] = make(map[common.Hash][]byte)
|
|
|
|
for slotHash, blob := range slots {
|
|
|
|
copy[accHash][slotHash] = blob
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return copy
|
|
|
|
}
|
|
|
|
|
2019-10-04 15:24:01 +02:00
|
|
|
// TestMergeBasics tests some simple merges
|
|
|
|
func TestMergeBasics(t *testing.T) {
|
|
|
|
var (
|
2020-03-03 15:52:00 +02:00
|
|
|
destructs = make(map[common.Hash]struct{})
|
|
|
|
accounts = make(map[common.Hash][]byte)
|
|
|
|
storage = make(map[common.Hash]map[common.Hash][]byte)
|
2019-10-04 15:24:01 +02:00
|
|
|
)
|
|
|
|
// Fill up a parent
|
|
|
|
for i := 0; i < 100; i++ {
|
|
|
|
h := randomHash()
|
|
|
|
data := randomAccount()
|
|
|
|
|
|
|
|
accounts[h] = data
|
2020-03-03 15:52:00 +02:00
|
|
|
if rand.Intn(4) == 0 {
|
|
|
|
destructs[h] = struct{}{}
|
|
|
|
}
|
|
|
|
if rand.Intn(2) == 0 {
|
2019-10-04 15:24:01 +02:00
|
|
|
accStorage := make(map[common.Hash][]byte)
|
|
|
|
value := make([]byte, 32)
|
2023-02-16 14:36:58 -05:00
|
|
|
crand.Read(value)
|
2019-10-04 15:24:01 +02:00
|
|
|
accStorage[randomHash()] = value
|
|
|
|
storage[h] = accStorage
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Add some (identical) layers on top
|
2020-03-04 15:06:04 +02:00
|
|
|
parent := newDiffLayer(emptyLayer(), common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage))
|
|
|
|
child := newDiffLayer(parent, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage))
|
|
|
|
child = newDiffLayer(child, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage))
|
|
|
|
child = newDiffLayer(child, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage))
|
|
|
|
child = newDiffLayer(child, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage))
|
2019-10-04 15:24:01 +02:00
|
|
|
// And flatten
|
|
|
|
merged := (child.flatten()).(*diffLayer)
|
|
|
|
|
|
|
|
{ // Check account lists
|
2020-03-04 15:06:04 +02:00
|
|
|
if have, want := len(merged.accountList), 0; have != want {
|
|
|
|
t.Errorf("accountList wrong: have %v, want %v", have, want)
|
2019-10-04 15:24:01 +02:00
|
|
|
}
|
2020-03-04 15:06:04 +02:00
|
|
|
if have, want := len(merged.AccountList()), len(accounts); have != want {
|
|
|
|
t.Errorf("AccountList() wrong: have %v, want %v", have, want)
|
2019-10-04 15:24:01 +02:00
|
|
|
}
|
2020-03-04 15:06:04 +02:00
|
|
|
if have, want := len(merged.accountList), len(accounts); have != want {
|
|
|
|
t.Errorf("accountList [2] wrong: have %v, want %v", have, want)
|
2019-10-04 15:24:01 +02:00
|
|
|
}
|
|
|
|
}
|
2020-03-03 15:52:00 +02:00
|
|
|
{ // Check account drops
|
2020-03-04 15:06:04 +02:00
|
|
|
if have, want := len(merged.destructSet), len(destructs); have != want {
|
|
|
|
t.Errorf("accountDrop wrong: have %v, want %v", have, want)
|
2020-03-03 15:52:00 +02:00
|
|
|
}
|
|
|
|
}
|
2019-10-04 15:24:01 +02:00
|
|
|
{ // Check storage lists
|
|
|
|
i := 0
|
|
|
|
for aHash, sMap := range storage {
|
2020-03-04 15:06:04 +02:00
|
|
|
if have, want := len(merged.storageList), i; have != want {
|
|
|
|
t.Errorf("[1] storageList wrong: have %v, want %v", have, want)
|
2019-10-04 15:24:01 +02:00
|
|
|
}
|
2020-04-29 17:53:08 +08:00
|
|
|
list, _ := merged.StorageList(aHash)
|
|
|
|
if have, want := len(list), len(sMap); have != want {
|
2020-03-04 15:06:04 +02:00
|
|
|
t.Errorf("[2] StorageList() wrong: have %v, want %v", have, want)
|
2019-10-04 15:24:01 +02:00
|
|
|
}
|
2020-03-04 15:06:04 +02:00
|
|
|
if have, want := len(merged.storageList[aHash]), len(sMap); have != want {
|
|
|
|
t.Errorf("storageList wrong: have %v, want %v", have, want)
|
2019-10-04 15:24:01 +02:00
|
|
|
}
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestMergeDelete tests some deletion
|
|
|
|
func TestMergeDelete(t *testing.T) {
|
|
|
|
var (
|
|
|
|
storage = make(map[common.Hash]map[common.Hash][]byte)
|
|
|
|
)
|
|
|
|
// Fill up a parent
|
|
|
|
h1 := common.HexToHash("0x01")
|
|
|
|
h2 := common.HexToHash("0x02")
|
|
|
|
|
2020-03-03 15:52:00 +02:00
|
|
|
flipDrops := func() map[common.Hash]struct{} {
|
|
|
|
return map[common.Hash]struct{}{
|
2020-04-22 16:25:36 +08:00
|
|
|
h2: {},
|
2020-03-03 15:52:00 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
flipAccs := func() map[common.Hash][]byte {
|
|
|
|
return map[common.Hash][]byte{
|
|
|
|
h1: randomAccount(),
|
|
|
|
}
|
2019-10-04 15:24:01 +02:00
|
|
|
}
|
2020-03-03 15:52:00 +02:00
|
|
|
flopDrops := func() map[common.Hash]struct{} {
|
|
|
|
return map[common.Hash]struct{}{
|
2020-04-22 16:25:36 +08:00
|
|
|
h1: {},
|
2020-03-03 15:52:00 +02:00
|
|
|
}
|
2019-10-04 15:24:01 +02:00
|
|
|
}
|
2020-03-03 15:52:00 +02:00
|
|
|
flopAccs := func() map[common.Hash][]byte {
|
|
|
|
return map[common.Hash][]byte{
|
|
|
|
h2: randomAccount(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Add some flipAccs-flopping layers on top
|
|
|
|
parent := newDiffLayer(emptyLayer(), common.Hash{}, flipDrops(), flipAccs(), storage)
|
|
|
|
child := parent.Update(common.Hash{}, flopDrops(), flopAccs(), storage)
|
|
|
|
child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage)
|
|
|
|
child = child.Update(common.Hash{}, flopDrops(), flopAccs(), storage)
|
|
|
|
child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage)
|
|
|
|
child = child.Update(common.Hash{}, flopDrops(), flopAccs(), storage)
|
|
|
|
child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage)
|
2019-10-04 15:24:01 +02:00
|
|
|
|
|
|
|
if data, _ := child.Account(h1); data == nil {
|
2020-03-03 15:52:00 +02:00
|
|
|
t.Errorf("last diff layer: expected %x account to be non-nil", h1)
|
2019-10-04 15:24:01 +02:00
|
|
|
}
|
|
|
|
if data, _ := child.Account(h2); data != nil {
|
2020-03-03 15:52:00 +02:00
|
|
|
t.Errorf("last diff layer: expected %x account to be nil", h2)
|
|
|
|
}
|
|
|
|
if _, ok := child.destructSet[h1]; ok {
|
|
|
|
t.Errorf("last diff layer: expected %x drop to be missing", h1)
|
|
|
|
}
|
|
|
|
if _, ok := child.destructSet[h2]; !ok {
|
|
|
|
t.Errorf("last diff layer: expected %x drop to be present", h1)
|
2019-10-04 15:24:01 +02:00
|
|
|
}
|
|
|
|
// And flatten
|
|
|
|
merged := (child.flatten()).(*diffLayer)
|
|
|
|
|
|
|
|
if data, _ := merged.Account(h1); data == nil {
|
2020-03-03 15:52:00 +02:00
|
|
|
t.Errorf("merged layer: expected %x account to be non-nil", h1)
|
2019-10-04 15:24:01 +02:00
|
|
|
}
|
|
|
|
if data, _ := merged.Account(h2); data != nil {
|
2020-03-03 15:52:00 +02:00
|
|
|
t.Errorf("merged layer: expected %x account to be nil", h2)
|
|
|
|
}
|
|
|
|
if _, ok := merged.destructSet[h1]; !ok { // Note, drops stay alive until persisted to disk!
|
|
|
|
t.Errorf("merged diff layer: expected %x drop to be present", h1)
|
|
|
|
}
|
|
|
|
if _, ok := merged.destructSet[h2]; !ok { // Note, drops stay alive until persisted to disk!
|
|
|
|
t.Errorf("merged diff layer: expected %x drop to be present", h1)
|
2019-10-04 15:24:01 +02:00
|
|
|
}
|
|
|
|
// If we add more granular metering of memory, we can enable this again,
|
|
|
|
// but it's not implemented for now
|
2020-03-04 15:06:04 +02:00
|
|
|
//if have, want := merged.memory, child.memory; have != want {
|
|
|
|
// t.Errorf("mem wrong: have %d, want %d", have, want)
|
2019-10-04 15:24:01 +02:00
|
|
|
//}
|
|
|
|
}
|
|
|
|
|
|
|
|
// This tests that if we create a new account, and set a slot, and then merge
|
|
|
|
// it, the lists will be correct.
|
|
|
|
func TestInsertAndMerge(t *testing.T) {
|
|
|
|
// Fill up a parent
|
|
|
|
var (
|
|
|
|
acc = common.HexToHash("0x01")
|
|
|
|
slot = common.HexToHash("0x02")
|
|
|
|
parent *diffLayer
|
|
|
|
child *diffLayer
|
|
|
|
)
|
|
|
|
{
|
2020-03-03 15:52:00 +02:00
|
|
|
var (
|
|
|
|
destructs = make(map[common.Hash]struct{})
|
|
|
|
accounts = make(map[common.Hash][]byte)
|
|
|
|
storage = make(map[common.Hash]map[common.Hash][]byte)
|
|
|
|
)
|
|
|
|
parent = newDiffLayer(emptyLayer(), common.Hash{}, destructs, accounts, storage)
|
2019-10-04 15:24:01 +02:00
|
|
|
}
|
|
|
|
{
|
2020-03-03 15:52:00 +02:00
|
|
|
var (
|
|
|
|
destructs = make(map[common.Hash]struct{})
|
|
|
|
accounts = make(map[common.Hash][]byte)
|
|
|
|
storage = make(map[common.Hash]map[common.Hash][]byte)
|
|
|
|
)
|
2019-10-04 15:24:01 +02:00
|
|
|
accounts[acc] = randomAccount()
|
2020-03-03 15:52:00 +02:00
|
|
|
storage[acc] = make(map[common.Hash][]byte)
|
2019-10-04 15:24:01 +02:00
|
|
|
storage[acc][slot] = []byte{0x01}
|
2020-03-03 15:52:00 +02:00
|
|
|
child = newDiffLayer(parent, common.Hash{}, destructs, accounts, storage)
|
2019-10-04 15:24:01 +02:00
|
|
|
}
|
|
|
|
// And flatten
|
|
|
|
merged := (child.flatten()).(*diffLayer)
|
|
|
|
{ // Check that slot value is present
|
2020-03-04 15:06:04 +02:00
|
|
|
have, _ := merged.Storage(acc, slot)
|
|
|
|
if want := []byte{0x01}; !bytes.Equal(have, want) {
|
|
|
|
t.Errorf("merged slot value wrong: have %x, want %x", have, want)
|
2019-10-04 15:24:01 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-26 09:48:29 +02:00
|
|
|
func emptyLayer() *diskLayer {
|
|
|
|
return &diskLayer{
|
|
|
|
diskdb: memorydb.New(),
|
|
|
|
cache: fastcache.New(500 * 1024),
|
|
|
|
}
|
2019-10-04 15:24:01 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// BenchmarkSearch checks how long it takes to find a non-existing key
|
|
|
|
// BenchmarkSearch-6 200000 10481 ns/op (1K per layer)
|
|
|
|
// BenchmarkSearch-6 200000 10760 ns/op (10K per layer)
|
|
|
|
// BenchmarkSearch-6 100000 17866 ns/op
|
|
|
|
//
|
|
|
|
// BenchmarkSearch-6 500000 3723 ns/op (10k per layer, only top-level RLock()
|
|
|
|
func BenchmarkSearch(b *testing.B) {
|
|
|
|
// First, we set up 128 diff layers, with 1K items each
|
|
|
|
fill := func(parent snapshot) *diffLayer {
|
2020-03-03 15:52:00 +02:00
|
|
|
var (
|
|
|
|
destructs = make(map[common.Hash]struct{})
|
|
|
|
accounts = make(map[common.Hash][]byte)
|
|
|
|
storage = make(map[common.Hash]map[common.Hash][]byte)
|
|
|
|
)
|
2019-10-04 15:24:01 +02:00
|
|
|
for i := 0; i < 10000; i++ {
|
|
|
|
accounts[randomHash()] = randomAccount()
|
|
|
|
}
|
2020-03-03 15:52:00 +02:00
|
|
|
return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage)
|
2019-10-04 15:24:01 +02:00
|
|
|
}
|
|
|
|
var layer snapshot
|
2019-11-26 09:48:29 +02:00
|
|
|
layer = emptyLayer()
|
2019-10-04 15:24:01 +02:00
|
|
|
for i := 0; i < 128; i++ {
|
|
|
|
layer = fill(layer)
|
|
|
|
}
|
2019-12-02 09:31:07 +01:00
|
|
|
key := crypto.Keccak256Hash([]byte{0x13, 0x38})
|
2019-10-04 15:24:01 +02:00
|
|
|
b.ResetTimer()
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
layer.AccountRLP(key)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// BenchmarkSearchSlot checks how long it takes to find a non-existing key
|
|
|
|
// - Number of layers: 128
|
|
|
|
// - Each layers contains the account, with a couple of storage slots
|
|
|
|
// BenchmarkSearchSlot-6 100000 14554 ns/op
|
|
|
|
// BenchmarkSearchSlot-6 100000 22254 ns/op (when checking parent root using mutex)
|
|
|
|
// BenchmarkSearchSlot-6 100000 14551 ns/op (when checking parent number using atomic)
|
2019-12-02 09:31:07 +01:00
|
|
|
// With bloom filter:
|
|
|
|
// BenchmarkSearchSlot-6 3467835 351 ns/op
|
2019-10-04 15:24:01 +02:00
|
|
|
func BenchmarkSearchSlot(b *testing.B) {
|
|
|
|
// First, we set up 128 diff layers, with 1K items each
|
2019-12-02 09:31:07 +01:00
|
|
|
accountKey := crypto.Keccak256Hash([]byte{0x13, 0x37})
|
|
|
|
storageKey := crypto.Keccak256Hash([]byte{0x13, 0x37})
|
2019-10-04 15:24:01 +02:00
|
|
|
accountRLP := randomAccount()
|
|
|
|
fill := func(parent snapshot) *diffLayer {
|
2020-03-03 15:52:00 +02:00
|
|
|
var (
|
|
|
|
destructs = make(map[common.Hash]struct{})
|
|
|
|
accounts = make(map[common.Hash][]byte)
|
|
|
|
storage = make(map[common.Hash]map[common.Hash][]byte)
|
|
|
|
)
|
2019-10-04 15:24:01 +02:00
|
|
|
accounts[accountKey] = accountRLP
|
|
|
|
|
|
|
|
accStorage := make(map[common.Hash][]byte)
|
|
|
|
for i := 0; i < 5; i++ {
|
|
|
|
value := make([]byte, 32)
|
2023-02-16 14:36:58 -05:00
|
|
|
crand.Read(value)
|
2019-10-04 15:24:01 +02:00
|
|
|
accStorage[randomHash()] = value
|
|
|
|
storage[accountKey] = accStorage
|
|
|
|
}
|
2020-03-03 15:52:00 +02:00
|
|
|
return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage)
|
2019-10-04 15:24:01 +02:00
|
|
|
}
|
|
|
|
var layer snapshot
|
2019-11-26 09:48:29 +02:00
|
|
|
layer = emptyLayer()
|
2019-10-04 15:24:01 +02:00
|
|
|
for i := 0; i < 128; i++ {
|
|
|
|
layer = fill(layer)
|
|
|
|
}
|
|
|
|
b.ResetTimer()
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
layer.Storage(accountKey, storageKey)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// With accountList and sorting
|
2020-03-03 15:52:00 +02:00
|
|
|
// BenchmarkFlatten-6 50 29890856 ns/op
|
2019-10-04 15:24:01 +02:00
|
|
|
//
|
2021-01-07 15:36:21 +09:00
|
|
|
// Without sorting and tracking accountList
|
2019-10-04 15:24:01 +02:00
|
|
|
// BenchmarkFlatten-6 300 5511511 ns/op
|
|
|
|
func BenchmarkFlatten(b *testing.B) {
|
2019-11-22 13:23:49 +02:00
|
|
|
fill := func(parent snapshot) *diffLayer {
|
2020-03-03 15:52:00 +02:00
|
|
|
var (
|
|
|
|
destructs = make(map[common.Hash]struct{})
|
|
|
|
accounts = make(map[common.Hash][]byte)
|
|
|
|
storage = make(map[common.Hash]map[common.Hash][]byte)
|
|
|
|
)
|
2019-10-04 15:24:01 +02:00
|
|
|
for i := 0; i < 100; i++ {
|
|
|
|
accountKey := randomHash()
|
|
|
|
accounts[accountKey] = randomAccount()
|
|
|
|
|
|
|
|
accStorage := make(map[common.Hash][]byte)
|
|
|
|
for i := 0; i < 20; i++ {
|
|
|
|
value := make([]byte, 32)
|
2023-02-16 14:36:58 -05:00
|
|
|
crand.Read(value)
|
2019-10-04 15:24:01 +02:00
|
|
|
accStorage[randomHash()] = value
|
|
|
|
}
|
|
|
|
storage[accountKey] = accStorage
|
|
|
|
}
|
2020-03-03 15:52:00 +02:00
|
|
|
return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage)
|
2019-10-04 15:24:01 +02:00
|
|
|
}
|
|
|
|
b.ResetTimer()
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
b.StopTimer()
|
|
|
|
var layer snapshot
|
2019-11-26 09:48:29 +02:00
|
|
|
layer = emptyLayer()
|
2019-10-04 15:24:01 +02:00
|
|
|
for i := 1; i < 128; i++ {
|
2019-11-22 13:23:49 +02:00
|
|
|
layer = fill(layer)
|
2019-10-04 15:24:01 +02:00
|
|
|
}
|
|
|
|
b.StartTimer()
|
|
|
|
|
|
|
|
for i := 1; i < 128; i++ {
|
|
|
|
dl, ok := layer.(*diffLayer)
|
|
|
|
if !ok {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
layer = dl.flatten()
|
|
|
|
}
|
|
|
|
b.StopTimer()
|
|
|
|
}
|
|
|
|
}
|
2019-10-23 15:19:02 +02:00
|
|
|
|
|
|
|
// This test writes ~324M of diff layers to disk, spread over
|
|
|
|
// - 128 individual layers,
|
|
|
|
// - each with 200 accounts
|
|
|
|
// - containing 200 slots
|
|
|
|
//
|
|
|
|
// BenchmarkJournal-6 1 1471373923 ns/ops
|
|
|
|
// BenchmarkJournal-6 1 1208083335 ns/op // bufio writer
|
|
|
|
func BenchmarkJournal(b *testing.B) {
|
2019-11-22 13:23:49 +02:00
|
|
|
fill := func(parent snapshot) *diffLayer {
|
2020-03-03 15:52:00 +02:00
|
|
|
var (
|
|
|
|
destructs = make(map[common.Hash]struct{})
|
|
|
|
accounts = make(map[common.Hash][]byte)
|
|
|
|
storage = make(map[common.Hash]map[common.Hash][]byte)
|
|
|
|
)
|
2019-10-23 15:19:02 +02:00
|
|
|
for i := 0; i < 200; i++ {
|
|
|
|
accountKey := randomHash()
|
|
|
|
accounts[accountKey] = randomAccount()
|
|
|
|
|
|
|
|
accStorage := make(map[common.Hash][]byte)
|
|
|
|
for i := 0; i < 200; i++ {
|
|
|
|
value := make([]byte, 32)
|
2023-02-16 14:36:58 -05:00
|
|
|
crand.Read(value)
|
2019-10-23 15:19:02 +02:00
|
|
|
accStorage[randomHash()] = value
|
|
|
|
}
|
|
|
|
storage[accountKey] = accStorage
|
|
|
|
}
|
2020-03-03 15:52:00 +02:00
|
|
|
return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage)
|
2019-10-23 15:19:02 +02:00
|
|
|
}
|
2021-10-26 11:01:01 +02:00
|
|
|
layer := snapshot(emptyLayer())
|
2019-10-23 15:19:02 +02:00
|
|
|
for i := 1; i < 128; i++ {
|
2019-11-22 13:23:49 +02:00
|
|
|
layer = fill(layer)
|
2019-10-23 15:19:02 +02:00
|
|
|
}
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
for i := 0; i < b.N; i++ {
|
2019-12-02 13:27:20 +02:00
|
|
|
layer.Journal(new(bytes.Buffer))
|
2019-10-23 15:19:02 +02:00
|
|
|
}
|
|
|
|
}
|