2015-07-07 03:54:22 +03:00
|
|
|
// Copyright 2014 The go-ethereum Authors
|
2015-07-22 19:48:40 +03:00
|
|
|
// This file is part of the go-ethereum library.
|
2015-07-07 03:54:22 +03:00
|
|
|
//
|
2015-07-23 19:35:11 +03:00
|
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
2015-07-07 03:54:22 +03:00
|
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
2015-07-22 19:48:40 +03:00
|
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
2015-07-07 03:54:22 +03:00
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
2015-07-22 19:48:40 +03:00
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
2015-07-07 03:54:22 +03:00
|
|
|
// GNU Lesser General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Lesser General Public License
|
2015-07-22 19:48:40 +03:00
|
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
2015-07-07 03:54:22 +03:00
|
|
|
|
2014-10-31 15:45:03 +02:00
|
|
|
package trie
|
2014-02-15 00:56:09 +02:00
|
|
|
|
|
|
|
import (
|
2014-11-18 20:52:45 +02:00
|
|
|
"bytes"
|
2015-06-30 15:54:37 +03:00
|
|
|
"encoding/binary"
|
2020-09-30 14:45:56 +03:00
|
|
|
"errors"
|
2014-05-26 01:09:38 +03:00
|
|
|
"fmt"
|
2020-09-30 14:45:56 +03:00
|
|
|
"hash"
|
2015-06-30 15:54:37 +03:00
|
|
|
"io/ioutil"
|
2017-10-13 11:10:02 +03:00
|
|
|
"math/big"
|
2016-09-29 17:51:32 +03:00
|
|
|
"math/rand"
|
2015-06-30 15:54:37 +03:00
|
|
|
"os"
|
2016-09-29 17:51:32 +03:00
|
|
|
"reflect"
|
2014-11-18 20:52:45 +02:00
|
|
|
"testing"
|
2016-09-29 17:51:32 +03:00
|
|
|
"testing/quick"
|
2014-11-16 04:21:55 +02:00
|
|
|
|
2015-07-06 02:19:48 +03:00
|
|
|
"github.com/davecgh/go-spew/spew"
|
2015-03-16 12:27:38 +02:00
|
|
|
"github.com/ethereum/go-ethereum/common"
|
2017-10-13 11:10:02 +03:00
|
|
|
"github.com/ethereum/go-ethereum/crypto"
|
2020-09-30 14:45:56 +03:00
|
|
|
"github.com/ethereum/go-ethereum/ethdb"
|
2018-09-24 15:57:49 +03:00
|
|
|
"github.com/ethereum/go-ethereum/ethdb/leveldb"
|
|
|
|
"github.com/ethereum/go-ethereum/ethdb/memorydb"
|
2017-10-13 11:10:02 +03:00
|
|
|
"github.com/ethereum/go-ethereum/rlp"
|
2020-09-30 14:45:56 +03:00
|
|
|
"golang.org/x/crypto/sha3"
|
2014-02-15 00:56:09 +02:00
|
|
|
)
|
|
|
|
|
2015-07-06 02:19:48 +03:00
|
|
|
func init() {
|
|
|
|
spew.Config.Indent = " "
|
2017-06-20 19:26:09 +03:00
|
|
|
spew.Config.DisableMethods = false
|
2014-02-15 14:21:11 +02:00
|
|
|
}
|
|
|
|
|
2015-07-06 02:19:48 +03:00
|
|
|
// Used for testing
|
|
|
|
func newEmpty() *Trie {
|
2018-09-24 15:57:49 +03:00
|
|
|
trie, _ := New(common.Hash{}, NewDatabase(memorydb.New()))
|
2015-07-06 02:19:48 +03:00
|
|
|
return trie
|
2015-03-03 13:15:58 +02:00
|
|
|
}
|
|
|
|
|
2015-01-08 12:47:04 +02:00
|
|
|
func TestEmptyTrie(t *testing.T) {
|
2015-07-06 02:19:48 +03:00
|
|
|
var trie Trie
|
2015-01-08 12:47:04 +02:00
|
|
|
res := trie.Hash()
|
2015-07-06 02:19:48 +03:00
|
|
|
exp := emptyRoot
|
2019-07-22 10:30:09 +03:00
|
|
|
if res != exp {
|
2015-01-08 12:47:04 +02:00
|
|
|
t.Errorf("expected %x got %x", exp, res)
|
|
|
|
}
|
2014-02-15 14:21:11 +02:00
|
|
|
}
|
|
|
|
|
2015-03-19 11:57:02 +02:00
|
|
|
func TestNull(t *testing.T) {
|
2015-07-06 02:19:48 +03:00
|
|
|
var trie Trie
|
2015-03-19 11:57:02 +02:00
|
|
|
key := make([]byte, 32)
|
2017-01-09 13:16:06 +03:00
|
|
|
value := []byte("test")
|
2015-03-19 11:57:02 +02:00
|
|
|
trie.Update(key, value)
|
2017-01-09 13:16:06 +03:00
|
|
|
if !bytes.Equal(trie.Get(key), value) {
|
|
|
|
t.Fatal("wrong value")
|
|
|
|
}
|
2015-03-19 11:57:02 +02:00
|
|
|
}
|
|
|
|
|
2015-07-06 02:19:48 +03:00
|
|
|
func TestMissingRoot(t *testing.T) {
|
2018-09-24 15:57:49 +03:00
|
|
|
trie, err := New(common.HexToHash("0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33"), NewDatabase(memorydb.New()))
|
2015-07-06 02:19:48 +03:00
|
|
|
if trie != nil {
|
|
|
|
t.Error("New returned non-nil trie for invalid root")
|
|
|
|
}
|
2015-11-25 19:28:21 +02:00
|
|
|
if _, ok := err.(*MissingNodeError); !ok {
|
2016-04-15 12:06:57 +03:00
|
|
|
t.Errorf("New returned wrong error: %v", err)
|
2015-07-06 02:19:48 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-02-05 19:40:32 +03:00
|
|
|
func TestMissingNodeDisk(t *testing.T) { testMissingNode(t, false) }
|
|
|
|
func TestMissingNodeMemonly(t *testing.T) { testMissingNode(t, true) }
|
|
|
|
|
|
|
|
func testMissingNode(t *testing.T, memonly bool) {
|
2018-09-24 15:57:49 +03:00
|
|
|
diskdb := memorydb.New()
|
2018-02-05 19:40:32 +03:00
|
|
|
triedb := NewDatabase(diskdb)
|
|
|
|
|
|
|
|
trie, _ := New(common.Hash{}, triedb)
|
2015-11-25 19:28:21 +02:00
|
|
|
updateString(trie, "120000", "qwerqwerqwerqwerqwerqwerqwerqwer")
|
|
|
|
updateString(trie, "123456", "asdfasdfasdfasdfasdfasdfasdfasdf")
|
2018-02-05 19:40:32 +03:00
|
|
|
root, _ := trie.Commit(nil)
|
|
|
|
if !memonly {
|
2020-07-13 12:02:54 +03:00
|
|
|
triedb.Commit(root, true, nil)
|
2018-02-05 19:40:32 +03:00
|
|
|
}
|
2015-11-25 19:28:21 +02:00
|
|
|
|
2018-02-05 19:40:32 +03:00
|
|
|
trie, _ = New(root, triedb)
|
2015-11-25 19:28:21 +02:00
|
|
|
_, err := trie.TryGet([]byte("120000"))
|
|
|
|
if err != nil {
|
|
|
|
t.Errorf("Unexpected error: %v", err)
|
|
|
|
}
|
2018-02-05 19:40:32 +03:00
|
|
|
trie, _ = New(root, triedb)
|
2015-11-25 19:28:21 +02:00
|
|
|
_, err = trie.TryGet([]byte("120099"))
|
|
|
|
if err != nil {
|
|
|
|
t.Errorf("Unexpected error: %v", err)
|
|
|
|
}
|
2018-02-05 19:40:32 +03:00
|
|
|
trie, _ = New(root, triedb)
|
2015-11-25 19:28:21 +02:00
|
|
|
_, err = trie.TryGet([]byte("123456"))
|
|
|
|
if err != nil {
|
|
|
|
t.Errorf("Unexpected error: %v", err)
|
|
|
|
}
|
2018-02-05 19:40:32 +03:00
|
|
|
trie, _ = New(root, triedb)
|
2015-11-25 19:28:21 +02:00
|
|
|
err = trie.TryUpdate([]byte("120099"), []byte("zxcvzxcvzxcvzxcvzxcvzxcvzxcvzxcv"))
|
|
|
|
if err != nil {
|
|
|
|
t.Errorf("Unexpected error: %v", err)
|
|
|
|
}
|
2018-02-05 19:40:32 +03:00
|
|
|
trie, _ = New(root, triedb)
|
2015-11-25 19:28:21 +02:00
|
|
|
err = trie.TryDelete([]byte("123456"))
|
|
|
|
if err != nil {
|
|
|
|
t.Errorf("Unexpected error: %v", err)
|
|
|
|
}
|
|
|
|
|
2018-02-05 19:40:32 +03:00
|
|
|
hash := common.HexToHash("0xe1d943cc8f061a0c0b98162830b970395ac9315654824bf21b73b891365262f9")
|
|
|
|
if memonly {
|
2018-11-12 19:47:34 +03:00
|
|
|
delete(triedb.dirties, hash)
|
2018-02-05 19:40:32 +03:00
|
|
|
} else {
|
|
|
|
diskdb.Delete(hash[:])
|
|
|
|
}
|
2015-11-25 19:28:21 +02:00
|
|
|
|
2018-02-05 19:40:32 +03:00
|
|
|
trie, _ = New(root, triedb)
|
2015-11-25 19:28:21 +02:00
|
|
|
_, err = trie.TryGet([]byte("120000"))
|
|
|
|
if _, ok := err.(*MissingNodeError); !ok {
|
|
|
|
t.Errorf("Wrong error: %v", err)
|
|
|
|
}
|
2018-02-05 19:40:32 +03:00
|
|
|
trie, _ = New(root, triedb)
|
2015-11-25 19:28:21 +02:00
|
|
|
_, err = trie.TryGet([]byte("120099"))
|
|
|
|
if _, ok := err.(*MissingNodeError); !ok {
|
|
|
|
t.Errorf("Wrong error: %v", err)
|
|
|
|
}
|
2018-02-05 19:40:32 +03:00
|
|
|
trie, _ = New(root, triedb)
|
2015-11-25 19:28:21 +02:00
|
|
|
_, err = trie.TryGet([]byte("123456"))
|
|
|
|
if err != nil {
|
|
|
|
t.Errorf("Unexpected error: %v", err)
|
|
|
|
}
|
2018-02-05 19:40:32 +03:00
|
|
|
trie, _ = New(root, triedb)
|
2015-11-25 19:28:21 +02:00
|
|
|
err = trie.TryUpdate([]byte("120099"), []byte("zxcv"))
|
|
|
|
if _, ok := err.(*MissingNodeError); !ok {
|
|
|
|
t.Errorf("Wrong error: %v", err)
|
|
|
|
}
|
2018-02-05 19:40:32 +03:00
|
|
|
trie, _ = New(root, triedb)
|
2015-11-25 19:28:21 +02:00
|
|
|
err = trie.TryDelete([]byte("123456"))
|
|
|
|
if _, ok := err.(*MissingNodeError); !ok {
|
|
|
|
t.Errorf("Wrong error: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-01-08 12:47:04 +02:00
|
|
|
func TestInsert(t *testing.T) {
|
2015-07-06 02:19:48 +03:00
|
|
|
trie := newEmpty()
|
2014-02-15 14:21:11 +02:00
|
|
|
|
2015-07-06 02:19:48 +03:00
|
|
|
updateString(trie, "doe", "reindeer")
|
|
|
|
updateString(trie, "dog", "puppy")
|
|
|
|
updateString(trie, "dogglesworth", "cat")
|
2014-02-15 14:21:11 +02:00
|
|
|
|
2015-07-06 02:19:48 +03:00
|
|
|
exp := common.HexToHash("8aad789dff2f538bca5d8ea56e8abe10f4c7ba3a5dea95fea4cd6e7c3a1168d3")
|
2015-01-08 12:47:04 +02:00
|
|
|
root := trie.Hash()
|
2015-07-06 02:19:48 +03:00
|
|
|
if root != exp {
|
2020-01-17 14:59:45 +03:00
|
|
|
t.Errorf("case 1: exp %x got %x", exp, root)
|
2015-01-08 12:47:04 +02:00
|
|
|
}
|
2014-02-20 15:40:00 +02:00
|
|
|
|
2015-07-06 02:19:48 +03:00
|
|
|
trie = newEmpty()
|
|
|
|
updateString(trie, "A", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
|
2014-02-20 15:40:00 +02:00
|
|
|
|
2015-07-06 02:19:48 +03:00
|
|
|
exp = common.HexToHash("d23786fb4a010da3ce639d66d5e904a11dbc02746d1ce25029e53290cabf28ab")
|
2018-02-05 19:40:32 +03:00
|
|
|
root, err := trie.Commit(nil)
|
2015-07-06 02:19:48 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("commit error: %v", err)
|
|
|
|
}
|
|
|
|
if root != exp {
|
2020-01-17 14:59:45 +03:00
|
|
|
t.Errorf("case 2: exp %x got %x", exp, root)
|
2015-01-08 12:47:04 +02:00
|
|
|
}
|
2014-02-20 15:40:00 +02:00
|
|
|
}
|
2014-02-24 13:11:00 +02:00
|
|
|
|
2015-01-08 12:47:04 +02:00
|
|
|
func TestGet(t *testing.T) {
|
2015-07-06 02:19:48 +03:00
|
|
|
trie := newEmpty()
|
|
|
|
updateString(trie, "doe", "reindeer")
|
|
|
|
updateString(trie, "dog", "puppy")
|
|
|
|
updateString(trie, "dogglesworth", "cat")
|
|
|
|
|
|
|
|
for i := 0; i < 2; i++ {
|
|
|
|
res := getString(trie, "dog")
|
|
|
|
if !bytes.Equal(res, []byte("puppy")) {
|
|
|
|
t.Errorf("expected puppy got %x", res)
|
|
|
|
}
|
2014-05-26 01:09:38 +03:00
|
|
|
|
2015-07-06 02:19:48 +03:00
|
|
|
unknown := getString(trie, "unknown")
|
|
|
|
if unknown != nil {
|
|
|
|
t.Errorf("expected nil got %x", unknown)
|
|
|
|
}
|
2014-06-30 14:08:00 +03:00
|
|
|
|
2015-07-06 02:19:48 +03:00
|
|
|
if i == 1 {
|
|
|
|
return
|
|
|
|
}
|
2018-02-05 19:40:32 +03:00
|
|
|
trie.Commit(nil)
|
2014-06-17 19:05:46 +03:00
|
|
|
}
|
2014-06-30 14:08:00 +03:00
|
|
|
}
|
|
|
|
|
2015-01-08 12:47:04 +02:00
|
|
|
func TestDelete(t *testing.T) {
|
2015-07-06 02:19:48 +03:00
|
|
|
trie := newEmpty()
|
2015-01-08 12:47:04 +02:00
|
|
|
vals := []struct{ k, v string }{
|
|
|
|
{"do", "verb"},
|
|
|
|
{"ether", "wookiedoo"},
|
|
|
|
{"horse", "stallion"},
|
|
|
|
{"shaman", "horse"},
|
|
|
|
{"doge", "coin"},
|
|
|
|
{"ether", ""},
|
|
|
|
{"dog", "puppy"},
|
|
|
|
{"shaman", ""},
|
2014-06-17 19:05:46 +03:00
|
|
|
}
|
2015-01-08 12:47:04 +02:00
|
|
|
for _, val := range vals {
|
|
|
|
if val.v != "" {
|
2015-07-06 02:19:48 +03:00
|
|
|
updateString(trie, val.k, val.v)
|
2015-01-08 12:47:04 +02:00
|
|
|
} else {
|
2015-07-06 02:19:48 +03:00
|
|
|
deleteString(trie, val.k)
|
2015-01-08 12:47:04 +02:00
|
|
|
}
|
2014-06-30 14:08:00 +03:00
|
|
|
}
|
|
|
|
|
2015-01-08 12:47:04 +02:00
|
|
|
hash := trie.Hash()
|
2015-07-06 02:19:48 +03:00
|
|
|
exp := common.HexToHash("5991bb8c6514148a29db676a14ac506cd2cd5775ace63c30a4fe457715e9ac84")
|
|
|
|
if hash != exp {
|
2015-01-08 12:47:04 +02:00
|
|
|
t.Errorf("expected %x got %x", exp, hash)
|
2014-06-30 14:08:00 +03:00
|
|
|
}
|
2015-01-08 12:47:04 +02:00
|
|
|
}
|
2014-05-27 02:08:51 +03:00
|
|
|
|
2015-01-08 12:47:04 +02:00
|
|
|
func TestEmptyValues(t *testing.T) {
|
2015-07-06 02:19:48 +03:00
|
|
|
trie := newEmpty()
|
2014-06-30 14:08:00 +03:00
|
|
|
|
2015-01-08 12:47:04 +02:00
|
|
|
vals := []struct{ k, v string }{
|
|
|
|
{"do", "verb"},
|
|
|
|
{"ether", "wookiedoo"},
|
|
|
|
{"horse", "stallion"},
|
|
|
|
{"shaman", "horse"},
|
|
|
|
{"doge", "coin"},
|
|
|
|
{"ether", ""},
|
|
|
|
{"dog", "puppy"},
|
|
|
|
{"shaman", ""},
|
2014-06-30 14:08:00 +03:00
|
|
|
}
|
2015-01-08 12:47:04 +02:00
|
|
|
for _, val := range vals {
|
2015-07-06 02:19:48 +03:00
|
|
|
updateString(trie, val.k, val.v)
|
2014-07-01 10:55:20 +03:00
|
|
|
}
|
|
|
|
|
2015-01-08 12:47:04 +02:00
|
|
|
hash := trie.Hash()
|
2015-07-06 02:19:48 +03:00
|
|
|
exp := common.HexToHash("5991bb8c6514148a29db676a14ac506cd2cd5775ace63c30a4fe457715e9ac84")
|
|
|
|
if hash != exp {
|
2015-01-08 12:47:04 +02:00
|
|
|
t.Errorf("expected %x got %x", exp, hash)
|
2014-06-17 19:05:46 +03:00
|
|
|
}
|
2014-07-01 10:55:20 +03:00
|
|
|
}
|
|
|
|
|
2015-01-08 12:47:04 +02:00
|
|
|
func TestReplication(t *testing.T) {
|
2015-07-06 02:19:48 +03:00
|
|
|
trie := newEmpty()
|
2015-01-08 12:47:04 +02:00
|
|
|
vals := []struct{ k, v string }{
|
|
|
|
{"do", "verb"},
|
|
|
|
{"ether", "wookiedoo"},
|
|
|
|
{"horse", "stallion"},
|
|
|
|
{"shaman", "horse"},
|
|
|
|
{"doge", "coin"},
|
|
|
|
{"dog", "puppy"},
|
|
|
|
{"somethingveryoddindeedthis is", "myothernodedata"},
|
2014-06-17 19:05:46 +03:00
|
|
|
}
|
2015-01-08 12:47:04 +02:00
|
|
|
for _, val := range vals {
|
2015-07-06 02:19:48 +03:00
|
|
|
updateString(trie, val.k, val.v)
|
2015-01-08 12:47:04 +02:00
|
|
|
}
|
2018-02-05 19:40:32 +03:00
|
|
|
exp, err := trie.Commit(nil)
|
2015-07-06 02:19:48 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("commit error: %v", err)
|
2015-01-08 12:47:04 +02:00
|
|
|
}
|
2014-07-15 16:29:54 +03:00
|
|
|
|
2015-07-06 02:19:48 +03:00
|
|
|
// create a new trie on top of the database and check that lookups work.
|
|
|
|
trie2, err := New(exp, trie.db)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("can't recreate trie at %x: %v", exp, err)
|
|
|
|
}
|
|
|
|
for _, kv := range vals {
|
|
|
|
if string(getString(trie2, kv.k)) != kv.v {
|
|
|
|
t.Errorf("trie2 doesn't have %q => %q", kv.k, kv.v)
|
|
|
|
}
|
|
|
|
}
|
2018-02-05 19:40:32 +03:00
|
|
|
hash, err := trie2.Commit(nil)
|
2015-07-06 02:19:48 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("commit error: %v", err)
|
|
|
|
}
|
|
|
|
if hash != exp {
|
2015-01-08 12:47:04 +02:00
|
|
|
t.Errorf("root failure. expected %x got %x", exp, hash)
|
|
|
|
}
|
2014-07-17 12:21:18 +03:00
|
|
|
|
2015-07-06 02:19:48 +03:00
|
|
|
// perform some insertions on the new trie.
|
|
|
|
vals2 := []struct{ k, v string }{
|
2015-01-08 12:47:04 +02:00
|
|
|
{"do", "verb"},
|
|
|
|
{"ether", "wookiedoo"},
|
|
|
|
{"horse", "stallion"},
|
2015-07-06 02:19:48 +03:00
|
|
|
// {"shaman", "horse"},
|
|
|
|
// {"doge", "coin"},
|
|
|
|
// {"ether", ""},
|
|
|
|
// {"dog", "puppy"},
|
|
|
|
// {"somethingveryoddindeedthis is", "myothernodedata"},
|
|
|
|
// {"shaman", ""},
|
2014-10-10 18:00:06 +03:00
|
|
|
}
|
2015-07-06 02:19:48 +03:00
|
|
|
for _, val := range vals2 {
|
|
|
|
updateString(trie2, val.k, val.v)
|
2014-10-10 18:00:06 +03:00
|
|
|
}
|
2016-05-19 13:24:14 +03:00
|
|
|
if hash := trie2.Hash(); hash != exp {
|
2015-07-06 02:19:48 +03:00
|
|
|
t.Errorf("root failure. expected %x got %x", exp, hash)
|
|
|
|
}
|
|
|
|
}
|
2014-11-14 23:19:50 +02:00
|
|
|
|
2015-07-06 02:19:48 +03:00
|
|
|
func TestLargeValue(t *testing.T) {
|
|
|
|
trie := newEmpty()
|
|
|
|
trie.Update([]byte("key1"), []byte{99, 99, 99, 99})
|
|
|
|
trie.Update([]byte("key2"), bytes.Repeat([]byte{1}, 32))
|
|
|
|
trie.Hash()
|
2016-09-29 17:51:32 +03:00
|
|
|
}
|
|
|
|
|
2020-01-17 14:59:45 +03:00
|
|
|
// TestRandomCases tests som cases that were found via random fuzzing
|
|
|
|
func TestRandomCases(t *testing.T) {
|
2021-02-02 12:32:44 +03:00
|
|
|
var rt = []randTestStep{
|
2020-01-17 14:59:45 +03:00
|
|
|
{op: 6, key: common.Hex2Bytes(""), value: common.Hex2Bytes("")}, // step 0
|
|
|
|
{op: 6, key: common.Hex2Bytes(""), value: common.Hex2Bytes("")}, // step 1
|
|
|
|
{op: 0, key: common.Hex2Bytes("d51b182b95d677e5f1c82508c0228de96b73092d78ce78b2230cd948674f66fd1483bd"), value: common.Hex2Bytes("0000000000000002")}, // step 2
|
|
|
|
{op: 2, key: common.Hex2Bytes("c2a38512b83107d665c65235b0250002882ac2022eb00711552354832c5f1d030d0e408e"), value: common.Hex2Bytes("")}, // step 3
|
|
|
|
{op: 3, key: common.Hex2Bytes(""), value: common.Hex2Bytes("")}, // step 4
|
|
|
|
{op: 3, key: common.Hex2Bytes(""), value: common.Hex2Bytes("")}, // step 5
|
|
|
|
{op: 6, key: common.Hex2Bytes(""), value: common.Hex2Bytes("")}, // step 6
|
|
|
|
{op: 3, key: common.Hex2Bytes(""), value: common.Hex2Bytes("")}, // step 7
|
|
|
|
{op: 0, key: common.Hex2Bytes("c2a38512b83107d665c65235b0250002882ac2022eb00711552354832c5f1d030d0e408e"), value: common.Hex2Bytes("0000000000000008")}, // step 8
|
|
|
|
{op: 0, key: common.Hex2Bytes("d51b182b95d677e5f1c82508c0228de96b73092d78ce78b2230cd948674f66fd1483bd"), value: common.Hex2Bytes("0000000000000009")}, // step 9
|
|
|
|
{op: 2, key: common.Hex2Bytes("fd"), value: common.Hex2Bytes("")}, // step 10
|
|
|
|
{op: 6, key: common.Hex2Bytes(""), value: common.Hex2Bytes("")}, // step 11
|
|
|
|
{op: 6, key: common.Hex2Bytes(""), value: common.Hex2Bytes("")}, // step 12
|
|
|
|
{op: 0, key: common.Hex2Bytes("fd"), value: common.Hex2Bytes("000000000000000d")}, // step 13
|
|
|
|
{op: 6, key: common.Hex2Bytes(""), value: common.Hex2Bytes("")}, // step 14
|
|
|
|
{op: 1, key: common.Hex2Bytes("c2a38512b83107d665c65235b0250002882ac2022eb00711552354832c5f1d030d0e408e"), value: common.Hex2Bytes("")}, // step 15
|
|
|
|
{op: 3, key: common.Hex2Bytes(""), value: common.Hex2Bytes("")}, // step 16
|
|
|
|
{op: 0, key: common.Hex2Bytes("c2a38512b83107d665c65235b0250002882ac2022eb00711552354832c5f1d030d0e408e"), value: common.Hex2Bytes("0000000000000011")}, // step 17
|
|
|
|
{op: 5, key: common.Hex2Bytes(""), value: common.Hex2Bytes("")}, // step 18
|
|
|
|
{op: 3, key: common.Hex2Bytes(""), value: common.Hex2Bytes("")}, // step 19
|
|
|
|
{op: 0, key: common.Hex2Bytes("d51b182b95d677e5f1c82508c0228de96b73092d78ce78b2230cd948674f66fd1483bd"), value: common.Hex2Bytes("0000000000000014")}, // step 20
|
|
|
|
{op: 0, key: common.Hex2Bytes("d51b182b95d677e5f1c82508c0228de96b73092d78ce78b2230cd948674f66fd1483bd"), value: common.Hex2Bytes("0000000000000015")}, // step 21
|
|
|
|
{op: 0, key: common.Hex2Bytes("c2a38512b83107d665c65235b0250002882ac2022eb00711552354832c5f1d030d0e408e"), value: common.Hex2Bytes("0000000000000016")}, // step 22
|
|
|
|
{op: 5, key: common.Hex2Bytes(""), value: common.Hex2Bytes("")}, // step 23
|
|
|
|
{op: 1, key: common.Hex2Bytes("980c393656413a15c8da01978ed9f89feb80b502f58f2d640e3a2f5f7a99a7018f1b573befd92053ac6f78fca4a87268"), value: common.Hex2Bytes("")}, // step 24
|
|
|
|
{op: 1, key: common.Hex2Bytes("fd"), value: common.Hex2Bytes("")}, // step 25
|
|
|
|
}
|
|
|
|
runRandTest(rt)
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2016-10-17 17:13:50 +03:00
|
|
|
// randTest performs random trie operations.
|
|
|
|
// Instances of this test are created by Generate.
|
|
|
|
type randTest []randTestStep
|
|
|
|
|
2016-09-29 17:51:32 +03:00
|
|
|
type randTestStep struct {
|
|
|
|
op int
|
|
|
|
key []byte // for opUpdate, opDelete, opGet
|
|
|
|
value []byte // for opUpdate
|
2017-06-20 19:26:09 +03:00
|
|
|
err error // for debugging
|
2016-09-29 17:51:32 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
const (
|
|
|
|
opUpdate = iota
|
|
|
|
opDelete
|
|
|
|
opGet
|
|
|
|
opCommit
|
|
|
|
opHash
|
|
|
|
opReset
|
|
|
|
opItercheckhash
|
|
|
|
opMax // boundary value, not an actual op
|
|
|
|
)
|
|
|
|
|
|
|
|
func (randTest) Generate(r *rand.Rand, size int) reflect.Value {
|
|
|
|
var allKeys [][]byte
|
|
|
|
genKey := func() []byte {
|
|
|
|
if len(allKeys) < 2 || r.Intn(100) < 10 {
|
|
|
|
// new key
|
|
|
|
key := make([]byte, r.Intn(50))
|
2017-03-22 21:48:51 +03:00
|
|
|
r.Read(key)
|
2016-09-29 17:51:32 +03:00
|
|
|
allKeys = append(allKeys, key)
|
|
|
|
return key
|
|
|
|
}
|
|
|
|
// use existing key
|
|
|
|
return allKeys[r.Intn(len(allKeys))]
|
|
|
|
}
|
|
|
|
|
|
|
|
var steps randTest
|
|
|
|
for i := 0; i < size; i++ {
|
|
|
|
step := randTestStep{op: r.Intn(opMax)}
|
|
|
|
switch step.op {
|
|
|
|
case opUpdate:
|
|
|
|
step.key = genKey()
|
|
|
|
step.value = make([]byte, 8)
|
|
|
|
binary.BigEndian.PutUint64(step.value, uint64(i))
|
|
|
|
case opGet, opDelete:
|
|
|
|
step.key = genKey()
|
|
|
|
}
|
|
|
|
steps = append(steps, step)
|
|
|
|
}
|
|
|
|
return reflect.ValueOf(steps)
|
|
|
|
}
|
|
|
|
|
|
|
|
func runRandTest(rt randTest) bool {
|
2018-09-24 15:57:49 +03:00
|
|
|
triedb := NewDatabase(memorydb.New())
|
2018-02-05 19:40:32 +03:00
|
|
|
|
|
|
|
tr, _ := New(common.Hash{}, triedb)
|
2016-09-29 17:51:32 +03:00
|
|
|
values := make(map[string]string) // tracks content of the trie
|
|
|
|
|
2017-06-20 19:26:09 +03:00
|
|
|
for i, step := range rt {
|
2020-01-17 14:59:45 +03:00
|
|
|
fmt.Printf("{op: %d, key: common.Hex2Bytes(\"%x\"), value: common.Hex2Bytes(\"%x\")}, // step %d\n",
|
|
|
|
step.op, step.key, step.value, i)
|
2016-09-29 17:51:32 +03:00
|
|
|
switch step.op {
|
|
|
|
case opUpdate:
|
|
|
|
tr.Update(step.key, step.value)
|
|
|
|
values[string(step.key)] = string(step.value)
|
|
|
|
case opDelete:
|
|
|
|
tr.Delete(step.key)
|
|
|
|
delete(values, string(step.key))
|
|
|
|
case opGet:
|
|
|
|
v := tr.Get(step.key)
|
|
|
|
want := values[string(step.key)]
|
|
|
|
if string(v) != want {
|
2017-06-20 19:26:09 +03:00
|
|
|
rt[i].err = fmt.Errorf("mismatch for key 0x%x, got 0x%x want 0x%x", step.key, v, want)
|
2016-09-29 17:51:32 +03:00
|
|
|
}
|
|
|
|
case opCommit:
|
2018-02-05 19:40:32 +03:00
|
|
|
_, rt[i].err = tr.Commit(nil)
|
2016-09-29 17:51:32 +03:00
|
|
|
case opHash:
|
|
|
|
tr.Hash()
|
|
|
|
case opReset:
|
2018-02-05 19:40:32 +03:00
|
|
|
hash, err := tr.Commit(nil)
|
2016-09-29 17:51:32 +03:00
|
|
|
if err != nil {
|
2017-06-20 19:26:09 +03:00
|
|
|
rt[i].err = err
|
|
|
|
return false
|
2016-09-29 17:51:32 +03:00
|
|
|
}
|
2018-02-05 19:40:32 +03:00
|
|
|
newtr, err := New(hash, triedb)
|
2016-09-29 17:51:32 +03:00
|
|
|
if err != nil {
|
2017-06-20 19:26:09 +03:00
|
|
|
rt[i].err = err
|
|
|
|
return false
|
2016-09-29 17:51:32 +03:00
|
|
|
}
|
|
|
|
tr = newtr
|
|
|
|
case opItercheckhash:
|
2018-02-05 19:40:32 +03:00
|
|
|
checktr, _ := New(common.Hash{}, triedb)
|
2017-04-13 15:41:24 +03:00
|
|
|
it := NewIterator(tr.NodeIterator(nil))
|
2016-09-29 17:51:32 +03:00
|
|
|
for it.Next() {
|
|
|
|
checktr.Update(it.Key, it.Value)
|
|
|
|
}
|
|
|
|
if tr.Hash() != checktr.Hash() {
|
2017-06-20 19:26:09 +03:00
|
|
|
rt[i].err = fmt.Errorf("hash mismatch in opItercheckhash")
|
2016-09-29 17:51:32 +03:00
|
|
|
}
|
2017-06-20 19:26:09 +03:00
|
|
|
}
|
|
|
|
// Abort the test on error.
|
|
|
|
if rt[i].err != nil {
|
|
|
|
return false
|
2016-10-17 17:13:50 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2016-09-29 17:51:32 +03:00
|
|
|
func TestRandom(t *testing.T) {
|
|
|
|
if err := quick.Check(runRandTest, nil); err != nil {
|
2017-06-20 19:26:09 +03:00
|
|
|
if cerr, ok := err.(*quick.CheckError); ok {
|
|
|
|
t.Fatalf("random test iteration %d failed: %s", cerr.Count, spew.Sdump(cerr.In))
|
|
|
|
}
|
2016-09-29 17:51:32 +03:00
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2015-07-06 02:19:48 +03:00
|
|
|
}
|
|
|
|
|
2015-06-30 15:54:37 +03:00
|
|
|
func BenchmarkGet(b *testing.B) { benchGet(b, false) }
|
|
|
|
func BenchmarkGetDB(b *testing.B) { benchGet(b, true) }
|
|
|
|
func BenchmarkUpdateBE(b *testing.B) { benchUpdate(b, binary.BigEndian) }
|
|
|
|
func BenchmarkUpdateLE(b *testing.B) { benchUpdate(b, binary.LittleEndian) }
|
|
|
|
|
|
|
|
const benchElemCount = 20000
|
|
|
|
|
|
|
|
func benchGet(b *testing.B, commit bool) {
|
2015-07-06 02:19:48 +03:00
|
|
|
trie := new(Trie)
|
2015-06-30 15:54:37 +03:00
|
|
|
if commit {
|
2016-10-14 19:04:33 +03:00
|
|
|
_, tmpdb := tempDB()
|
2015-07-06 02:19:48 +03:00
|
|
|
trie, _ = New(common.Hash{}, tmpdb)
|
2015-06-30 15:54:37 +03:00
|
|
|
}
|
|
|
|
k := make([]byte, 32)
|
|
|
|
for i := 0; i < benchElemCount; i++ {
|
|
|
|
binary.LittleEndian.PutUint64(k, uint64(i))
|
|
|
|
trie.Update(k, k)
|
|
|
|
}
|
|
|
|
binary.LittleEndian.PutUint64(k, benchElemCount/2)
|
|
|
|
if commit {
|
2018-02-05 19:40:32 +03:00
|
|
|
trie.Commit(nil)
|
2015-06-30 15:54:37 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
trie.Get(k)
|
|
|
|
}
|
2016-10-14 19:04:33 +03:00
|
|
|
b.StopTimer()
|
|
|
|
|
|
|
|
if commit {
|
2019-03-11 18:01:47 +03:00
|
|
|
ldb := trie.db.diskdb.(*leveldb.Database)
|
2016-10-14 19:04:33 +03:00
|
|
|
ldb.Close()
|
|
|
|
os.RemoveAll(ldb.Path())
|
|
|
|
}
|
2015-06-30 15:54:37 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
func benchUpdate(b *testing.B, e binary.ByteOrder) *Trie {
|
2015-07-06 02:19:48 +03:00
|
|
|
trie := newEmpty()
|
2015-06-30 15:54:37 +03:00
|
|
|
k := make([]byte, 32)
|
2020-01-17 14:59:45 +03:00
|
|
|
b.ReportAllocs()
|
2015-06-30 15:54:37 +03:00
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
e.PutUint64(k, uint64(i))
|
|
|
|
trie.Update(k, k)
|
|
|
|
}
|
|
|
|
return trie
|
|
|
|
}
|
|
|
|
|
2017-10-13 11:10:02 +03:00
|
|
|
// Benchmarks the trie hashing. Since the trie caches the result of any operation,
|
|
|
|
// we cannot use b.N as the number of hashing rouns, since all rounds apart from
|
|
|
|
// the first one will be NOOP. As such, we'll use b.N as the number of account to
|
|
|
|
// insert into the trie before measuring the hashing.
|
2020-01-17 14:59:45 +03:00
|
|
|
// BenchmarkHash-6 288680 4561 ns/op 682 B/op 9 allocs/op
|
|
|
|
// BenchmarkHash-6 275095 4800 ns/op 685 B/op 9 allocs/op
|
|
|
|
// pure hasher:
|
|
|
|
// BenchmarkHash-6 319362 4230 ns/op 675 B/op 9 allocs/op
|
|
|
|
// BenchmarkHash-6 257460 4674 ns/op 689 B/op 9 allocs/op
|
|
|
|
// With hashing in-between and pure hasher:
|
|
|
|
// BenchmarkHash-6 225417 7150 ns/op 982 B/op 12 allocs/op
|
|
|
|
// BenchmarkHash-6 220378 6197 ns/op 983 B/op 12 allocs/op
|
|
|
|
// same with old hasher
|
|
|
|
// BenchmarkHash-6 229758 6437 ns/op 981 B/op 12 allocs/op
|
|
|
|
// BenchmarkHash-6 212610 7137 ns/op 986 B/op 12 allocs/op
|
2017-10-13 11:10:02 +03:00
|
|
|
func BenchmarkHash(b *testing.B) {
|
2020-01-17 14:59:45 +03:00
|
|
|
// Create a realistic account trie to hash. We're first adding and hashing N
|
|
|
|
// entries, then adding N more.
|
|
|
|
addresses, accounts := makeAccounts(2 * b.N)
|
|
|
|
// Insert the accounts into the trie and hash it
|
|
|
|
trie := newEmpty()
|
|
|
|
i := 0
|
|
|
|
for ; i < len(addresses)/2; i++ {
|
|
|
|
trie.Update(crypto.Keccak256(addresses[i][:]), accounts[i])
|
|
|
|
}
|
|
|
|
trie.Hash()
|
|
|
|
for ; i < len(addresses); i++ {
|
|
|
|
trie.Update(crypto.Keccak256(addresses[i][:]), accounts[i])
|
|
|
|
}
|
|
|
|
b.ResetTimer()
|
|
|
|
b.ReportAllocs()
|
|
|
|
//trie.hashRoot(nil, nil)
|
|
|
|
trie.Hash()
|
|
|
|
}
|
|
|
|
|
|
|
|
type account struct {
|
|
|
|
Nonce uint64
|
|
|
|
Balance *big.Int
|
|
|
|
Root common.Hash
|
|
|
|
Code []byte
|
|
|
|
}
|
|
|
|
|
|
|
|
// Benchmarks the trie Commit following a Hash. Since the trie caches the result of any operation,
|
|
|
|
// we cannot use b.N as the number of hashing rouns, since all rounds apart from
|
|
|
|
// the first one will be NOOP. As such, we'll use b.N as the number of account to
|
|
|
|
// insert into the trie before measuring the hashing.
|
|
|
|
func BenchmarkCommitAfterHash(b *testing.B) {
|
|
|
|
b.Run("no-onleaf", func(b *testing.B) {
|
|
|
|
benchmarkCommitAfterHash(b, nil)
|
|
|
|
})
|
|
|
|
var a account
|
2020-08-26 13:05:06 +03:00
|
|
|
onleaf := func(path []byte, leaf []byte, parent common.Hash) error {
|
2020-01-17 14:59:45 +03:00
|
|
|
rlp.DecodeBytes(leaf, &a)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
b.Run("with-onleaf", func(b *testing.B) {
|
|
|
|
benchmarkCommitAfterHash(b, onleaf)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func benchmarkCommitAfterHash(b *testing.B, onleaf LeafCallback) {
|
2017-10-13 11:10:02 +03:00
|
|
|
// Make the random benchmark deterministic
|
2020-01-17 14:59:45 +03:00
|
|
|
addresses, accounts := makeAccounts(b.N)
|
|
|
|
trie := newEmpty()
|
|
|
|
for i := 0; i < len(addresses); i++ {
|
|
|
|
trie.Update(crypto.Keccak256(addresses[i][:]), accounts[i])
|
|
|
|
}
|
|
|
|
// Insert the accounts into the trie and hash it
|
|
|
|
trie.Hash()
|
|
|
|
b.ResetTimer()
|
|
|
|
b.ReportAllocs()
|
|
|
|
trie.Commit(onleaf)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestTinyTrie(t *testing.T) {
|
|
|
|
// Create a realistic account trie to hash
|
2020-11-20 00:50:47 +03:00
|
|
|
_, accounts := makeAccounts(5)
|
2020-01-17 14:59:45 +03:00
|
|
|
trie := newEmpty()
|
|
|
|
trie.Update(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000001337"), accounts[3])
|
2020-11-20 00:50:47 +03:00
|
|
|
if exp, root := common.HexToHash("8c6a85a4d9fda98feff88450299e574e5378e32391f75a055d470ac0653f1005"), trie.Hash(); exp != root {
|
|
|
|
t.Errorf("1: got %x, exp %x", root, exp)
|
2020-01-17 14:59:45 +03:00
|
|
|
}
|
|
|
|
trie.Update(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000001338"), accounts[4])
|
2020-11-20 00:50:47 +03:00
|
|
|
if exp, root := common.HexToHash("ec63b967e98a5720e7f720482151963982890d82c9093c0d486b7eb8883a66b1"), trie.Hash(); exp != root {
|
|
|
|
t.Errorf("2: got %x, exp %x", root, exp)
|
2020-01-17 14:59:45 +03:00
|
|
|
}
|
|
|
|
trie.Update(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000001339"), accounts[4])
|
2020-11-20 00:50:47 +03:00
|
|
|
if exp, root := common.HexToHash("0608c1d1dc3905fa22204c7a0e43644831c3b6d3def0f274be623a948197e64a"), trie.Hash(); exp != root {
|
|
|
|
t.Errorf("3: got %x, exp %x", root, exp)
|
2020-01-17 14:59:45 +03:00
|
|
|
}
|
|
|
|
checktr, _ := New(common.Hash{}, trie.db)
|
|
|
|
it := NewIterator(trie.NodeIterator(nil))
|
|
|
|
for it.Next() {
|
|
|
|
checktr.Update(it.Key, it.Value)
|
|
|
|
}
|
|
|
|
if troot, itroot := trie.Hash(), checktr.Hash(); troot != itroot {
|
|
|
|
t.Fatalf("hash mismatch in opItercheckhash, trie: %x, check: %x", troot, itroot)
|
|
|
|
}
|
|
|
|
}
|
2017-10-13 11:10:02 +03:00
|
|
|
|
2020-01-17 14:59:45 +03:00
|
|
|
func TestCommitAfterHash(t *testing.T) {
|
2017-10-13 11:10:02 +03:00
|
|
|
// Create a realistic account trie to hash
|
2020-01-17 14:59:45 +03:00
|
|
|
addresses, accounts := makeAccounts(1000)
|
|
|
|
trie := newEmpty()
|
|
|
|
for i := 0; i < len(addresses); i++ {
|
|
|
|
trie.Update(crypto.Keccak256(addresses[i][:]), accounts[i])
|
|
|
|
}
|
|
|
|
// Insert the accounts into the trie and hash it
|
|
|
|
trie.Hash()
|
|
|
|
trie.Commit(nil)
|
|
|
|
root := trie.Hash()
|
2020-11-20 00:50:47 +03:00
|
|
|
exp := common.HexToHash("72f9d3f3fe1e1dd7b8936442e7642aef76371472d94319900790053c493f3fe6")
|
2020-01-17 14:59:45 +03:00
|
|
|
if exp != root {
|
|
|
|
t.Errorf("got %x, exp %x", root, exp)
|
|
|
|
}
|
|
|
|
root, _ = trie.Commit(nil)
|
|
|
|
if exp != root {
|
|
|
|
t.Errorf("got %x, exp %x", root, exp)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func makeAccounts(size int) (addresses [][20]byte, accounts [][]byte) {
|
|
|
|
// Make the random benchmark deterministic
|
|
|
|
random := rand.New(rand.NewSource(0))
|
|
|
|
// Create a realistic account trie to hash
|
|
|
|
addresses = make([][20]byte, size)
|
2017-10-13 11:10:02 +03:00
|
|
|
for i := 0; i < len(addresses); i++ {
|
2020-11-20 00:50:47 +03:00
|
|
|
data := make([]byte, 20)
|
|
|
|
random.Read(data)
|
|
|
|
copy(addresses[i][:], data)
|
2017-10-13 11:10:02 +03:00
|
|
|
}
|
2020-01-17 14:59:45 +03:00
|
|
|
accounts = make([][]byte, len(addresses))
|
2017-10-13 11:10:02 +03:00
|
|
|
for i := 0; i < len(accounts); i++ {
|
|
|
|
var (
|
2020-11-20 00:50:47 +03:00
|
|
|
nonce = uint64(random.Int63())
|
|
|
|
root = emptyRoot
|
|
|
|
code = crypto.Keccak256(nil)
|
2017-10-13 11:10:02 +03:00
|
|
|
)
|
2020-11-20 00:50:47 +03:00
|
|
|
// The big.Rand function is not deterministic with regards to 64 vs 32 bit systems,
|
|
|
|
// and will consume different amount of data from the rand source.
|
|
|
|
//balance = new(big.Int).Rand(random, new(big.Int).Exp(common.Big2, common.Big256, nil))
|
|
|
|
// Therefore, we instead just read via byte buffer
|
|
|
|
numBytes := random.Uint32() % 33 // [0, 32] bytes
|
|
|
|
balanceBytes := make([]byte, numBytes)
|
|
|
|
random.Read(balanceBytes)
|
|
|
|
balance := new(big.Int).SetBytes(balanceBytes)
|
|
|
|
data, _ := rlp.EncodeToBytes(&account{nonce, balance, root, code})
|
|
|
|
accounts[i] = data
|
2017-10-13 11:10:02 +03:00
|
|
|
}
|
2020-01-17 14:59:45 +03:00
|
|
|
return addresses, accounts
|
|
|
|
}
|
|
|
|
|
2020-09-30 14:45:56 +03:00
|
|
|
// spongeDb is a dummy db backend which accumulates writes in a sponge
|
|
|
|
type spongeDb struct {
|
2020-09-30 20:49:20 +03:00
|
|
|
sponge hash.Hash
|
|
|
|
id string
|
|
|
|
journal []string
|
2020-09-30 14:45:56 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *spongeDb) Has(key []byte) (bool, error) { panic("implement me") }
|
|
|
|
func (s *spongeDb) Get(key []byte) ([]byte, error) { return nil, errors.New("no such elem") }
|
|
|
|
func (s *spongeDb) Delete(key []byte) error { panic("implement me") }
|
|
|
|
func (s *spongeDb) NewBatch() ethdb.Batch { return &spongeBatch{s} }
|
|
|
|
func (s *spongeDb) Stat(property string) (string, error) { panic("implement me") }
|
|
|
|
func (s *spongeDb) Compact(start []byte, limit []byte) error { panic("implement me") }
|
|
|
|
func (s *spongeDb) Close() error { return nil }
|
|
|
|
func (s *spongeDb) Put(key []byte, value []byte) error {
|
2020-09-30 20:49:20 +03:00
|
|
|
valbrief := value
|
|
|
|
if len(valbrief) > 8 {
|
|
|
|
valbrief = valbrief[:8]
|
|
|
|
}
|
|
|
|
s.journal = append(s.journal, fmt.Sprintf("%v: PUT([%x...], [%d bytes] %x...)\n", s.id, key[:8], len(value), valbrief))
|
2020-09-30 14:45:56 +03:00
|
|
|
s.sponge.Write(key)
|
|
|
|
s.sponge.Write(value)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
func (s *spongeDb) NewIterator(prefix []byte, start []byte) ethdb.Iterator { panic("implement me") }
|
|
|
|
|
|
|
|
// spongeBatch is a dummy batch which immediately writes to the underlying spongedb
|
|
|
|
type spongeBatch struct {
|
|
|
|
db *spongeDb
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *spongeBatch) Put(key, value []byte) error {
|
|
|
|
b.db.Put(key, value)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
func (b *spongeBatch) Delete(key []byte) error { panic("implement me") }
|
|
|
|
func (b *spongeBatch) ValueSize() int { return 100 }
|
|
|
|
func (b *spongeBatch) Write() error { return nil }
|
|
|
|
func (b *spongeBatch) Reset() {}
|
|
|
|
func (b *spongeBatch) Replay(w ethdb.KeyValueWriter) error { return nil }
|
|
|
|
|
|
|
|
// TestCommitSequence tests that the trie.Commit operation writes the elements of the trie
|
|
|
|
// in the expected order, and calls the callbacks in the expected order.
|
|
|
|
// The test data was based on the 'master' code, and is basically random. It can be used
|
|
|
|
// to check whether changes to the trie modifies the write order or data in any way.
|
|
|
|
func TestCommitSequence(t *testing.T) {
|
|
|
|
for i, tc := range []struct {
|
|
|
|
count int
|
|
|
|
expWriteSeqHash []byte
|
|
|
|
expCallbackSeqHash []byte
|
|
|
|
}{
|
2020-11-20 00:50:47 +03:00
|
|
|
{20, common.FromHex("873c78df73d60e59d4a2bcf3716e8bfe14554549fea2fc147cb54129382a8066"),
|
|
|
|
common.FromHex("ff00f91ac05df53b82d7f178d77ada54fd0dca64526f537034a5dbe41b17df2a")},
|
|
|
|
{200, common.FromHex("ba03d891bb15408c940eea5ee3d54d419595102648d02774a0268d892add9c8e"),
|
|
|
|
common.FromHex("f3cd509064c8d319bbdd1c68f511850a902ad275e6ed5bea11547e23d492a926")},
|
|
|
|
{2000, common.FromHex("f7a184f20df01c94f09537401d11e68d97ad0c00115233107f51b9c287ce60c7"),
|
|
|
|
common.FromHex("ff795ea898ba1e4cfed4a33b4cf5535a347a02cf931f88d88719faf810f9a1c9")},
|
2020-09-30 14:45:56 +03:00
|
|
|
} {
|
|
|
|
addresses, accounts := makeAccounts(tc.count)
|
|
|
|
// This spongeDb is used to check the sequence of disk-db-writes
|
|
|
|
s := &spongeDb{sponge: sha3.NewLegacyKeccak256()}
|
|
|
|
db := NewDatabase(s)
|
|
|
|
trie, _ := New(common.Hash{}, db)
|
|
|
|
// Another sponge is used to check the callback-sequence
|
|
|
|
callbackSponge := sha3.NewLegacyKeccak256()
|
|
|
|
// Fill the trie with elements
|
|
|
|
for i := 0; i < tc.count; i++ {
|
|
|
|
trie.Update(crypto.Keccak256(addresses[i][:]), accounts[i])
|
|
|
|
}
|
|
|
|
// Flush trie -> database
|
|
|
|
root, _ := trie.Commit(nil)
|
|
|
|
// Flush memdb -> disk (sponge)
|
|
|
|
db.Commit(root, false, func(c common.Hash) {
|
|
|
|
// And spongify the callback-order
|
|
|
|
callbackSponge.Write(c[:])
|
|
|
|
})
|
|
|
|
if got, exp := s.sponge.Sum(nil), tc.expWriteSeqHash; !bytes.Equal(got, exp) {
|
2020-11-20 00:50:47 +03:00
|
|
|
t.Errorf("test %d, disk write sequence wrong:\ngot %x exp %x\n", i, got, exp)
|
2020-09-30 14:45:56 +03:00
|
|
|
}
|
|
|
|
if got, exp := callbackSponge.Sum(nil), tc.expCallbackSeqHash; !bytes.Equal(got, exp) {
|
2020-11-20 00:50:47 +03:00
|
|
|
t.Errorf("test %d, call back sequence wrong:\ngot: %x exp %x\n", i, got, exp)
|
2020-09-30 14:45:56 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestCommitSequenceRandomBlobs is identical to TestCommitSequence
|
|
|
|
// but uses random blobs instead of 'accounts'
|
|
|
|
func TestCommitSequenceRandomBlobs(t *testing.T) {
|
|
|
|
for i, tc := range []struct {
|
|
|
|
count int
|
|
|
|
expWriteSeqHash []byte
|
|
|
|
expCallbackSeqHash []byte
|
|
|
|
}{
|
|
|
|
{20, common.FromHex("8e4a01548551d139fa9e833ebc4e66fc1ba40a4b9b7259d80db32cff7b64ebbc"),
|
|
|
|
common.FromHex("450238d73bc36dc6cc6f926987e5428535e64be403877c4560e238a52749ba24")},
|
|
|
|
{200, common.FromHex("6869b4e7b95f3097a19ddb30ff735f922b915314047e041614df06958fc50554"),
|
|
|
|
common.FromHex("0ace0b03d6cb8c0b82f6289ef5b1a1838306b455a62dafc63cada8e2924f2550")},
|
|
|
|
{2000, common.FromHex("444200e6f4e2df49f77752f629a96ccf7445d4698c164f962bbd85a0526ef424"),
|
|
|
|
common.FromHex("117d30dafaa62a1eed498c3dfd70982b377ba2b46dd3e725ed6120c80829e518")},
|
|
|
|
} {
|
|
|
|
prng := rand.New(rand.NewSource(int64(i)))
|
|
|
|
// This spongeDb is used to check the sequence of disk-db-writes
|
|
|
|
s := &spongeDb{sponge: sha3.NewLegacyKeccak256()}
|
|
|
|
db := NewDatabase(s)
|
|
|
|
trie, _ := New(common.Hash{}, db)
|
|
|
|
// Another sponge is used to check the callback-sequence
|
|
|
|
callbackSponge := sha3.NewLegacyKeccak256()
|
|
|
|
// Fill the trie with elements
|
|
|
|
for i := 0; i < tc.count; i++ {
|
|
|
|
key := make([]byte, 32)
|
|
|
|
var val []byte
|
|
|
|
// 50% short elements, 50% large elements
|
|
|
|
if prng.Intn(2) == 0 {
|
|
|
|
val = make([]byte, 1+prng.Intn(32))
|
|
|
|
} else {
|
|
|
|
val = make([]byte, 1+prng.Intn(4096))
|
|
|
|
}
|
|
|
|
prng.Read(key)
|
|
|
|
prng.Read(val)
|
|
|
|
trie.Update(key, val)
|
|
|
|
}
|
|
|
|
// Flush trie -> database
|
|
|
|
root, _ := trie.Commit(nil)
|
|
|
|
// Flush memdb -> disk (sponge)
|
|
|
|
db.Commit(root, false, func(c common.Hash) {
|
|
|
|
// And spongify the callback-order
|
|
|
|
callbackSponge.Write(c[:])
|
|
|
|
})
|
|
|
|
if got, exp := s.sponge.Sum(nil), tc.expWriteSeqHash; !bytes.Equal(got, exp) {
|
|
|
|
t.Fatalf("test %d, disk write sequence wrong:\ngot %x exp %x\n", i, got, exp)
|
|
|
|
}
|
|
|
|
if got, exp := callbackSponge.Sum(nil), tc.expCallbackSeqHash; !bytes.Equal(got, exp) {
|
|
|
|
t.Fatalf("test %d, call back sequence wrong:\ngot: %x exp %x\n", i, got, exp)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-30 20:49:20 +03:00
|
|
|
func TestCommitSequenceStackTrie(t *testing.T) {
|
|
|
|
for count := 1; count < 200; count++ {
|
|
|
|
prng := rand.New(rand.NewSource(int64(count)))
|
|
|
|
// This spongeDb is used to check the sequence of disk-db-writes
|
|
|
|
s := &spongeDb{sponge: sha3.NewLegacyKeccak256(), id: "a"}
|
|
|
|
db := NewDatabase(s)
|
|
|
|
trie, _ := New(common.Hash{}, db)
|
|
|
|
// Another sponge is used for the stacktrie commits
|
|
|
|
stackTrieSponge := &spongeDb{sponge: sha3.NewLegacyKeccak256(), id: "b"}
|
|
|
|
stTrie := NewStackTrie(stackTrieSponge)
|
|
|
|
// Fill the trie with elements
|
|
|
|
for i := 1; i < count; i++ {
|
|
|
|
// For the stack trie, we need to do inserts in proper order
|
|
|
|
key := make([]byte, 32)
|
|
|
|
binary.BigEndian.PutUint64(key, uint64(i))
|
|
|
|
var val []byte
|
|
|
|
// 50% short elements, 50% large elements
|
|
|
|
if prng.Intn(2) == 0 {
|
|
|
|
val = make([]byte, 1+prng.Intn(32))
|
|
|
|
} else {
|
|
|
|
val = make([]byte, 1+prng.Intn(1024))
|
|
|
|
}
|
|
|
|
prng.Read(val)
|
|
|
|
trie.TryUpdate(key, common.CopyBytes(val))
|
|
|
|
stTrie.TryUpdate(key, common.CopyBytes(val))
|
|
|
|
}
|
|
|
|
// Flush trie -> database
|
|
|
|
root, _ := trie.Commit(nil)
|
|
|
|
// Flush memdb -> disk (sponge)
|
|
|
|
db.Commit(root, false, nil)
|
|
|
|
// And flush stacktrie -> disk
|
2020-10-12 13:08:04 +03:00
|
|
|
stRoot, err := stTrie.Commit()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Failed to commit stack trie %v", err)
|
|
|
|
}
|
2020-09-30 20:49:20 +03:00
|
|
|
if stRoot != root {
|
|
|
|
t.Fatalf("root wrong, got %x exp %x", stRoot, root)
|
|
|
|
}
|
|
|
|
if got, exp := stackTrieSponge.sponge.Sum(nil), s.sponge.Sum(nil); !bytes.Equal(got, exp) {
|
|
|
|
// Show the journal
|
|
|
|
t.Logf("Expected:")
|
|
|
|
for i, v := range s.journal {
|
|
|
|
t.Logf("op %d: %v", i, v)
|
|
|
|
}
|
|
|
|
t.Logf("Stacktrie:")
|
|
|
|
for i, v := range stackTrieSponge.journal {
|
|
|
|
t.Logf("op %d: %v", i, v)
|
|
|
|
}
|
|
|
|
t.Fatalf("test %d, disk write sequence wrong:\ngot %x exp %x\n", count, got, exp)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-09 17:08:12 +03:00
|
|
|
// TestCommitSequenceSmallRoot tests that a trie which is essentially only a
|
|
|
|
// small (<32 byte) shortnode with an included value is properly committed to a
|
|
|
|
// database.
|
|
|
|
// This case might not matter, since in practice, all keys are 32 bytes, which means
|
|
|
|
// that even a small trie which contains a leaf will have an extension making it
|
|
|
|
// not fit into 32 bytes, rlp-encoded. However, it's still the correct thing to do.
|
|
|
|
func TestCommitSequenceSmallRoot(t *testing.T) {
|
|
|
|
s := &spongeDb{sponge: sha3.NewLegacyKeccak256(), id: "a"}
|
|
|
|
db := NewDatabase(s)
|
|
|
|
trie, _ := New(common.Hash{}, db)
|
|
|
|
// Another sponge is used for the stacktrie commits
|
|
|
|
stackTrieSponge := &spongeDb{sponge: sha3.NewLegacyKeccak256(), id: "b"}
|
|
|
|
stTrie := NewStackTrie(stackTrieSponge)
|
|
|
|
// Add a single small-element to the trie(s)
|
|
|
|
key := make([]byte, 5)
|
|
|
|
key[0] = 1
|
|
|
|
trie.TryUpdate(key, []byte{0x1})
|
|
|
|
stTrie.TryUpdate(key, []byte{0x1})
|
|
|
|
// Flush trie -> database
|
|
|
|
root, _ := trie.Commit(nil)
|
|
|
|
// Flush memdb -> disk (sponge)
|
|
|
|
db.Commit(root, false, nil)
|
|
|
|
// And flush stacktrie -> disk
|
|
|
|
stRoot, err := stTrie.Commit()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Failed to commit stack trie %v", err)
|
|
|
|
}
|
|
|
|
if stRoot != root {
|
|
|
|
t.Fatalf("root wrong, got %x exp %x", stRoot, root)
|
|
|
|
}
|
|
|
|
fmt.Printf("root: %x\n", stRoot)
|
|
|
|
if got, exp := stackTrieSponge.sponge.Sum(nil), s.sponge.Sum(nil); !bytes.Equal(got, exp) {
|
|
|
|
t.Fatalf("test, disk write sequence wrong:\ngot %x exp %x\n", got, exp)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-17 14:59:45 +03:00
|
|
|
// BenchmarkCommitAfterHashFixedSize benchmarks the Commit (after Hash) of a fixed number of updates to a trie.
|
|
|
|
// This benchmark is meant to capture the difference on efficiency of small versus large changes. Typically,
|
|
|
|
// storage tries are small (a couple of entries), whereas the full post-block account trie update is large (a couple
|
|
|
|
// of thousand entries)
|
|
|
|
func BenchmarkHashFixedSize(b *testing.B) {
|
|
|
|
b.Run("10", func(b *testing.B) {
|
|
|
|
b.StopTimer()
|
|
|
|
acc, add := makeAccounts(20)
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
benchmarkHashFixedSize(b, acc, add)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
b.Run("100", func(b *testing.B) {
|
|
|
|
b.StopTimer()
|
|
|
|
acc, add := makeAccounts(100)
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
benchmarkHashFixedSize(b, acc, add)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
b.Run("1K", func(b *testing.B) {
|
|
|
|
b.StopTimer()
|
|
|
|
acc, add := makeAccounts(1000)
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
benchmarkHashFixedSize(b, acc, add)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
b.Run("10K", func(b *testing.B) {
|
|
|
|
b.StopTimer()
|
|
|
|
acc, add := makeAccounts(10000)
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
benchmarkHashFixedSize(b, acc, add)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
b.Run("100K", func(b *testing.B) {
|
|
|
|
b.StopTimer()
|
|
|
|
acc, add := makeAccounts(100000)
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
benchmarkHashFixedSize(b, acc, add)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func benchmarkHashFixedSize(b *testing.B, addresses [][20]byte, accounts [][]byte) {
|
|
|
|
b.ReportAllocs()
|
2015-07-06 02:19:48 +03:00
|
|
|
trie := newEmpty()
|
2017-10-13 11:10:02 +03:00
|
|
|
for i := 0; i < len(addresses); i++ {
|
|
|
|
trie.Update(crypto.Keccak256(addresses[i][:]), accounts[i])
|
2015-06-30 15:54:37 +03:00
|
|
|
}
|
2020-01-17 14:59:45 +03:00
|
|
|
// Insert the accounts into the trie and hash it
|
|
|
|
b.StartTimer()
|
|
|
|
trie.Hash()
|
|
|
|
b.StopTimer()
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkCommitAfterHashFixedSize(b *testing.B) {
|
|
|
|
b.Run("10", func(b *testing.B) {
|
|
|
|
b.StopTimer()
|
|
|
|
acc, add := makeAccounts(20)
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
benchmarkCommitAfterHashFixedSize(b, acc, add)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
b.Run("100", func(b *testing.B) {
|
|
|
|
b.StopTimer()
|
|
|
|
acc, add := makeAccounts(100)
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
benchmarkCommitAfterHashFixedSize(b, acc, add)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
b.Run("1K", func(b *testing.B) {
|
|
|
|
b.StopTimer()
|
|
|
|
acc, add := makeAccounts(1000)
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
benchmarkCommitAfterHashFixedSize(b, acc, add)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
b.Run("10K", func(b *testing.B) {
|
|
|
|
b.StopTimer()
|
|
|
|
acc, add := makeAccounts(10000)
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
benchmarkCommitAfterHashFixedSize(b, acc, add)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
b.Run("100K", func(b *testing.B) {
|
|
|
|
b.StopTimer()
|
|
|
|
acc, add := makeAccounts(100000)
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
benchmarkCommitAfterHashFixedSize(b, acc, add)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func benchmarkCommitAfterHashFixedSize(b *testing.B, addresses [][20]byte, accounts [][]byte) {
|
2017-10-13 11:10:02 +03:00
|
|
|
b.ReportAllocs()
|
2020-01-17 14:59:45 +03:00
|
|
|
trie := newEmpty()
|
|
|
|
for i := 0; i < len(addresses); i++ {
|
|
|
|
trie.Update(crypto.Keccak256(addresses[i][:]), accounts[i])
|
|
|
|
}
|
|
|
|
// Insert the accounts into the trie and hash it
|
2017-10-13 11:10:02 +03:00
|
|
|
trie.Hash()
|
2020-01-17 14:59:45 +03:00
|
|
|
b.StartTimer()
|
|
|
|
trie.Commit(nil)
|
|
|
|
b.StopTimer()
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkDerefRootFixedSize(b *testing.B) {
|
|
|
|
b.Run("10", func(b *testing.B) {
|
|
|
|
b.StopTimer()
|
|
|
|
acc, add := makeAccounts(20)
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
benchmarkDerefRootFixedSize(b, acc, add)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
b.Run("100", func(b *testing.B) {
|
|
|
|
b.StopTimer()
|
|
|
|
acc, add := makeAccounts(100)
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
benchmarkDerefRootFixedSize(b, acc, add)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
b.Run("1K", func(b *testing.B) {
|
|
|
|
b.StopTimer()
|
|
|
|
acc, add := makeAccounts(1000)
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
benchmarkDerefRootFixedSize(b, acc, add)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
b.Run("10K", func(b *testing.B) {
|
|
|
|
b.StopTimer()
|
|
|
|
acc, add := makeAccounts(10000)
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
benchmarkDerefRootFixedSize(b, acc, add)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
b.Run("100K", func(b *testing.B) {
|
|
|
|
b.StopTimer()
|
|
|
|
acc, add := makeAccounts(100000)
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
benchmarkDerefRootFixedSize(b, acc, add)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func benchmarkDerefRootFixedSize(b *testing.B, addresses [][20]byte, accounts [][]byte) {
|
|
|
|
b.ReportAllocs()
|
|
|
|
trie := newEmpty()
|
|
|
|
for i := 0; i < len(addresses); i++ {
|
|
|
|
trie.Update(crypto.Keccak256(addresses[i][:]), accounts[i])
|
|
|
|
}
|
|
|
|
h := trie.Hash()
|
|
|
|
trie.Commit(nil)
|
|
|
|
b.StartTimer()
|
|
|
|
trie.db.Dereference(h)
|
|
|
|
b.StopTimer()
|
2015-06-30 15:54:37 +03:00
|
|
|
}
|
|
|
|
|
2018-02-05 19:40:32 +03:00
|
|
|
func tempDB() (string, *Database) {
|
2015-06-30 15:54:37 +03:00
|
|
|
dir, err := ioutil.TempDir("", "trie-bench")
|
|
|
|
if err != nil {
|
|
|
|
panic(fmt.Sprintf("can't create temporary directory: %v", err))
|
|
|
|
}
|
2021-03-22 21:06:30 +03:00
|
|
|
diskdb, err := leveldb.New(dir, 256, 0, "", false)
|
2015-06-30 15:54:37 +03:00
|
|
|
if err != nil {
|
|
|
|
panic(fmt.Sprintf("can't create temporary database: %v", err))
|
|
|
|
}
|
2018-02-05 19:40:32 +03:00
|
|
|
return dir, NewDatabase(diskdb)
|
2015-06-30 15:54:37 +03:00
|
|
|
}
|
2015-07-06 02:19:48 +03:00
|
|
|
|
|
|
|
func getString(trie *Trie, k string) []byte {
|
|
|
|
return trie.Get([]byte(k))
|
|
|
|
}
|
|
|
|
|
|
|
|
func updateString(trie *Trie, k, v string) {
|
|
|
|
trie.Update([]byte(k), []byte(v))
|
|
|
|
}
|
|
|
|
|
|
|
|
func deleteString(trie *Trie, k string) {
|
|
|
|
trie.Delete([]byte(k))
|
|
|
|
}
|
2019-02-16 18:16:12 +03:00
|
|
|
|
|
|
|
func TestDecodeNode(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
var (
|
|
|
|
hash = make([]byte, 20)
|
|
|
|
elems = make([]byte, 20)
|
|
|
|
)
|
|
|
|
for i := 0; i < 5000000; i++ {
|
|
|
|
rand.Read(hash)
|
|
|
|
rand.Read(elems)
|
2019-03-14 16:25:12 +03:00
|
|
|
decodeNode(hash, elems)
|
2019-02-16 18:16:12 +03:00
|
|
|
}
|
|
|
|
}
|