2020-12-14 11:27:15 +02:00
|
|
|
// Copyright 2020 The go-ethereum Authors
|
|
|
|
// This file is part of the go-ethereum library.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Lesser General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Lesser General Public License
|
|
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
|
|
|
package rangeproof
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"encoding/binary"
|
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
|
|
|
|
"github.com/ethereum/go-ethereum/common"
|
2022-03-31 15:28:32 +08:00
|
|
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
2020-12-14 11:27:15 +02:00
|
|
|
"github.com/ethereum/go-ethereum/ethdb/memorydb"
|
|
|
|
"github.com/ethereum/go-ethereum/trie"
|
2023-06-19 05:41:31 -04:00
|
|
|
"golang.org/x/exp/slices"
|
2020-12-14 11:27:15 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
type kv struct {
|
|
|
|
k, v []byte
|
|
|
|
t bool
|
|
|
|
}
|
|
|
|
|
|
|
|
type fuzzer struct {
|
|
|
|
input io.Reader
|
|
|
|
exhausted bool
|
|
|
|
}
|
|
|
|
|
|
|
|
func (f *fuzzer) randBytes(n int) []byte {
|
|
|
|
r := make([]byte, n)
|
|
|
|
if _, err := f.input.Read(r); err != nil {
|
|
|
|
f.exhausted = true
|
|
|
|
}
|
|
|
|
return r
|
|
|
|
}
|
|
|
|
|
|
|
|
func (f *fuzzer) readInt() uint64 {
|
|
|
|
var x uint64
|
|
|
|
if err := binary.Read(f.input, binary.LittleEndian, &x); err != nil {
|
|
|
|
f.exhausted = true
|
|
|
|
}
|
|
|
|
return x
|
|
|
|
}
|
|
|
|
|
|
|
|
func (f *fuzzer) randomTrie(n int) (*trie.Trie, map[string]*kv) {
|
all: activate pbss as experimental feature (#26274)
* all: activate pbss
* core/rawdb: fix compilation error
* cma, core, eth, les, trie: address comments
* cmd, core, eth, trie: polish code
* core, cmd, eth: address comments
* cmd, core, eth, les, light, tests: address comment
* cmd/utils: shorten log message
* trie/triedb/pathdb: limit node buffer size to 1gb
* cmd/utils: fix opening non-existing db
* cmd/utils: rename flag name
* cmd, core: group chain history flags and fix tests
* core, eth, trie: fix memory leak in snapshot generation
* cmd, eth, internal: deprecate flags
* all: enable state tests for pathdb, fixes
* cmd, core: polish code
* trie/triedb/pathdb: limit the node buffer size to 256mb
---------
Co-authored-by: Martin Holst Swende <martin@swende.se>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2023-08-11 03:21:36 +08:00
|
|
|
trie := trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase(), nil))
|
2020-12-14 11:27:15 +02:00
|
|
|
vals := make(map[string]*kv)
|
|
|
|
size := f.readInt()
|
|
|
|
// Fill it with some fluff
|
|
|
|
for i := byte(0); i < byte(size); i++ {
|
|
|
|
value := &kv{common.LeftPadBytes([]byte{i}, 32), []byte{i}, false}
|
|
|
|
value2 := &kv{common.LeftPadBytes([]byte{i + 10}, 32), []byte{i}, false}
|
2023-04-20 18:57:24 +08:00
|
|
|
trie.MustUpdate(value.k, value.v)
|
|
|
|
trie.MustUpdate(value2.k, value2.v)
|
2020-12-14 11:27:15 +02:00
|
|
|
vals[string(value.k)] = value
|
|
|
|
vals[string(value2.k)] = value2
|
|
|
|
}
|
|
|
|
if f.exhausted {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
// And now fill with some random
|
|
|
|
for i := 0; i < n; i++ {
|
|
|
|
k := f.randBytes(32)
|
|
|
|
v := f.randBytes(20)
|
|
|
|
value := &kv{k, v, false}
|
2023-04-20 18:57:24 +08:00
|
|
|
trie.MustUpdate(k, v)
|
2020-12-14 11:27:15 +02:00
|
|
|
vals[string(k)] = value
|
|
|
|
if f.exhausted {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return trie, vals
|
|
|
|
}
|
|
|
|
|
|
|
|
func (f *fuzzer) fuzz() int {
|
|
|
|
maxSize := 200
|
|
|
|
tr, vals := f.randomTrie(1 + int(f.readInt())%maxSize)
|
|
|
|
if f.exhausted {
|
|
|
|
return 0 // input too short
|
|
|
|
}
|
2023-06-19 05:41:31 -04:00
|
|
|
var entries []*kv
|
2020-12-14 11:27:15 +02:00
|
|
|
for _, kv := range vals {
|
|
|
|
entries = append(entries, kv)
|
|
|
|
}
|
|
|
|
if len(entries) <= 1 {
|
|
|
|
return 0
|
|
|
|
}
|
2023-08-12 01:04:12 +03:00
|
|
|
slices.SortFunc(entries, func(a, b *kv) int {
|
|
|
|
return bytes.Compare(a.k, b.k)
|
2023-06-19 05:41:31 -04:00
|
|
|
})
|
2020-12-14 11:27:15 +02:00
|
|
|
|
|
|
|
var ok = 0
|
|
|
|
for {
|
|
|
|
start := int(f.readInt() % uint64(len(entries)))
|
|
|
|
end := 1 + int(f.readInt()%uint64(len(entries)-1))
|
|
|
|
testcase := int(f.readInt() % uint64(6))
|
|
|
|
index := int(f.readInt() & 0xFFFFFFFF)
|
|
|
|
index2 := int(f.readInt() & 0xFFFFFFFF)
|
|
|
|
if f.exhausted {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
proof := memorydb.New()
|
2023-06-19 22:28:40 +08:00
|
|
|
if err := tr.Prove(entries[start].k, proof); err != nil {
|
2020-12-14 11:27:15 +02:00
|
|
|
panic(fmt.Sprintf("Failed to prove the first node %v", err))
|
|
|
|
}
|
2023-06-19 22:28:40 +08:00
|
|
|
if err := tr.Prove(entries[end-1].k, proof); err != nil {
|
2020-12-14 11:27:15 +02:00
|
|
|
panic(fmt.Sprintf("Failed to prove the last node %v", err))
|
|
|
|
}
|
|
|
|
var keys [][]byte
|
|
|
|
var vals [][]byte
|
|
|
|
for i := start; i < end; i++ {
|
|
|
|
keys = append(keys, entries[i].k)
|
|
|
|
vals = append(vals, entries[i].v)
|
|
|
|
}
|
|
|
|
if len(keys) == 0 {
|
|
|
|
return 0
|
|
|
|
}
|
trie: make rhs-proof align with last key in range proofs (#28311)
During snap-sync, we request ranges of values: either a range of accounts or a range of storage values. For any large trie, e.g. the main account trie or a large storage trie, we cannot fetch everything at once.
Short version; we split it up and request in multiple stages. To do so, we use an origin field, to say "Give me all storage key/values where key > 0x20000000000000000". When the server fulfils this, the server provides the first key after origin, let's say 0x2e030000000000000 -- never providing the exact origin. However, the client-side needs to be able to verify that the 0x2e03.. indeed is the first one after 0x2000.., and therefore the attached proof concerns the origin, not the first key.
So, short-short version: the left-hand side of the proof relates to the origin, and is free-standing from the first leaf.
On the other hand, (pun intended), the right-hand side, there's no such 'gap' between "along what path does the proof walk" and the last provided leaf. The proof must prove the last element (unless there are no elements).
Therefore, we can simplify the semantics for trie.VerifyRangeProof by removing an argument. This doesn't make much difference in practice, but makes it so that we can remove some tests. The reason I am raising this is that the upcoming stacktrie-based verifier does not support such fancy features as standalone right-hand borders.
2023-10-13 16:05:29 +02:00
|
|
|
var first = keys[0]
|
2020-12-14 11:27:15 +02:00
|
|
|
testcase %= 6
|
|
|
|
switch testcase {
|
|
|
|
case 0:
|
|
|
|
// Modified key
|
|
|
|
keys[index%len(keys)] = f.randBytes(32) // In theory it can't be same
|
|
|
|
case 1:
|
|
|
|
// Modified val
|
|
|
|
vals[index%len(vals)] = f.randBytes(20) // In theory it can't be same
|
|
|
|
case 2:
|
|
|
|
// Gapped entry slice
|
|
|
|
index = index % len(keys)
|
|
|
|
keys = append(keys[:index], keys[index+1:]...)
|
|
|
|
vals = append(vals[:index], vals[index+1:]...)
|
|
|
|
case 3:
|
|
|
|
// Out of order
|
|
|
|
index1 := index % len(keys)
|
|
|
|
index2 := index2 % len(keys)
|
|
|
|
keys[index1], keys[index2] = keys[index2], keys[index1]
|
|
|
|
vals[index1], vals[index2] = vals[index2], vals[index1]
|
|
|
|
case 4:
|
|
|
|
// Set random key to nil, do nothing
|
|
|
|
keys[index%len(keys)] = nil
|
|
|
|
case 5:
|
|
|
|
// Set random value to nil, deletion
|
|
|
|
vals[index%len(vals)] = nil
|
|
|
|
|
|
|
|
// Other cases:
|
|
|
|
// Modify something in the proof db
|
|
|
|
// add stuff to proof db
|
|
|
|
// drop stuff from proof db
|
|
|
|
}
|
|
|
|
if f.exhausted {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
ok = 1
|
|
|
|
//nodes, subtrie
|
trie: make rhs-proof align with last key in range proofs (#28311)
During snap-sync, we request ranges of values: either a range of accounts or a range of storage values. For any large trie, e.g. the main account trie or a large storage trie, we cannot fetch everything at once.
Short version; we split it up and request in multiple stages. To do so, we use an origin field, to say "Give me all storage key/values where key > 0x20000000000000000". When the server fulfils this, the server provides the first key after origin, let's say 0x2e030000000000000 -- never providing the exact origin. However, the client-side needs to be able to verify that the 0x2e03.. indeed is the first one after 0x2000.., and therefore the attached proof concerns the origin, not the first key.
So, short-short version: the left-hand side of the proof relates to the origin, and is free-standing from the first leaf.
On the other hand, (pun intended), the right-hand side, there's no such 'gap' between "along what path does the proof walk" and the last provided leaf. The proof must prove the last element (unless there are no elements).
Therefore, we can simplify the semantics for trie.VerifyRangeProof by removing an argument. This doesn't make much difference in practice, but makes it so that we can remove some tests. The reason I am raising this is that the upcoming stacktrie-based verifier does not support such fancy features as standalone right-hand borders.
2023-10-13 16:05:29 +02:00
|
|
|
hasMore, err := trie.VerifyRangeProof(tr.Hash(), first, keys, vals, proof)
|
2020-12-14 11:27:15 +02:00
|
|
|
if err != nil {
|
|
|
|
if hasMore {
|
|
|
|
panic("err != nil && hasMore == true")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ok
|
|
|
|
}
|
|
|
|
|
2022-10-11 09:37:00 +02:00
|
|
|
// Fuzz is the fuzzing entry-point.
|
2020-12-14 11:27:15 +02:00
|
|
|
// The function must return
|
2022-09-10 13:25:40 +02:00
|
|
|
//
|
|
|
|
// - 1 if the fuzzer should increase priority of the
|
|
|
|
// given input during subsequent fuzzing (for example, the input is lexically
|
|
|
|
// correct and was parsed successfully);
|
|
|
|
// - -1 if the input must not be added to corpus even if gives new coverage; and
|
|
|
|
// - 0 otherwise
|
|
|
|
//
|
|
|
|
// other values are reserved for future use.
|
2023-10-18 15:01:16 +02:00
|
|
|
func fuzz(input []byte) int {
|
2020-12-14 11:27:15 +02:00
|
|
|
if len(input) < 100 {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
r := bytes.NewReader(input)
|
|
|
|
f := fuzzer{
|
|
|
|
input: r,
|
|
|
|
exhausted: false,
|
|
|
|
}
|
|
|
|
return f.fuzz()
|
|
|
|
}
|