2016-04-14 18:18:24 +02:00
|
|
|
// Copyright 2015 The go-ethereum Authors
|
|
|
|
// This file is part of the go-ethereum library.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Lesser General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Lesser General Public License
|
|
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
2015-09-09 03:35:41 +02:00
|
|
|
package trie
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
2020-04-24 19:37:56 +08:00
|
|
|
"errors"
|
2015-09-09 03:35:41 +02:00
|
|
|
"fmt"
|
|
|
|
|
|
|
|
"github.com/ethereum/go-ethereum/common"
|
2018-02-05 18:40:32 +02:00
|
|
|
"github.com/ethereum/go-ethereum/ethdb"
|
2017-02-22 14:10:07 +02:00
|
|
|
"github.com/ethereum/go-ethereum/log"
|
2015-09-09 03:35:41 +02:00
|
|
|
)
|
|
|
|
|
2018-02-05 18:40:32 +02:00
|
|
|
// Prove constructs a merkle proof for key. The result contains all encoded nodes
|
|
|
|
// on the path to the value at key. The value itself is also included in the last
|
|
|
|
// node and can be retrieved by verifying the proof.
|
2015-09-09 03:35:41 +02:00
|
|
|
//
|
2018-02-05 18:40:32 +02:00
|
|
|
// If the trie does not contain a value for key, the returned proof contains all
|
|
|
|
// nodes of the longest existing prefix of the key (at least the root node), ending
|
|
|
|
// with the node that proves the absence of the key.
|
2023-06-19 22:28:40 +08:00
|
|
|
func (t *Trie) Prove(key []byte, proofDb ethdb.KeyValueWriter) error {
|
cmd, core/state, eth, tests, trie: improve state reader (#27428)
The state availability is checked during the creation of a state reader.
- In hash-based database, if the specified root node does not exist on disk disk, then
the state reader won't be created and an error will be returned.
- In path-based database, if the specified state layer is not available, then the
state reader won't be created and an error will be returned.
This change also contains a stricter semantics regarding the `Commit` operation: once it has been performed, the trie is no longer usable, and certain operations will return an error.
2023-06-21 03:31:45 +08:00
|
|
|
// Short circuit if the trie is already committed and not usable.
|
|
|
|
if t.committed {
|
|
|
|
return ErrCommitted
|
|
|
|
}
|
2015-09-09 03:35:41 +02:00
|
|
|
// Collect all nodes on the path to key.
|
2022-08-04 16:03:20 +08:00
|
|
|
var (
|
|
|
|
prefix []byte
|
|
|
|
nodes []node
|
|
|
|
tn = t.root
|
|
|
|
)
|
2017-04-18 13:25:07 +02:00
|
|
|
key = keybytesToHex(key)
|
2015-11-30 13:34:19 +01:00
|
|
|
for len(key) > 0 && tn != nil {
|
2015-09-09 03:35:41 +02:00
|
|
|
switch n := tn.(type) {
|
2016-10-14 18:04:33 +02:00
|
|
|
case *shortNode:
|
2015-09-09 03:35:41 +02:00
|
|
|
if len(key) < len(n.Key) || !bytes.Equal(n.Key, key[:len(n.Key)]) {
|
|
|
|
// The trie doesn't contain the key.
|
2015-11-30 13:34:19 +01:00
|
|
|
tn = nil
|
|
|
|
} else {
|
|
|
|
tn = n.Val
|
2022-08-04 16:03:20 +08:00
|
|
|
prefix = append(prefix, n.Key...)
|
2015-11-30 13:34:19 +01:00
|
|
|
key = key[len(n.Key):]
|
2015-09-09 03:35:41 +02:00
|
|
|
}
|
|
|
|
nodes = append(nodes, n)
|
2016-10-14 18:04:33 +02:00
|
|
|
case *fullNode:
|
2016-05-19 13:24:14 +03:00
|
|
|
tn = n.Children[key[0]]
|
2022-08-04 16:03:20 +08:00
|
|
|
prefix = append(prefix, key[0])
|
2015-09-09 03:35:41 +02:00
|
|
|
key = key[1:]
|
|
|
|
nodes = append(nodes, n)
|
|
|
|
case hashNode:
|
cmd, core, eth, les, light: track deleted nodes (#25757)
* cmd, core, eth, les, light: track deleted nodes
* trie: add docs
* trie: address comments
* cmd, core, eth, les, light, trie: trie id
* trie: add tests
* trie, core: updates
* trie: fix imports
* trie: add utility print-method for nodeset
* trie: import err
* trie: fix go vet warnings
Co-authored-by: Martin Holst Swende <martin@swende.se>
2022-09-27 16:01:02 +08:00
|
|
|
// Retrieve the specified node from the underlying node reader.
|
|
|
|
// trie.resolveAndTrack is not used since in that function the
|
|
|
|
// loaded blob will be tracked, while it's not required here since
|
|
|
|
// all loaded nodes won't be linked to trie at all and track nodes
|
|
|
|
// may lead to out-of-memory issue.
|
2023-04-24 15:38:52 +08:00
|
|
|
blob, err := t.reader.node(prefix, common.BytesToHash(n))
|
2015-11-25 18:28:21 +01:00
|
|
|
if err != nil {
|
2022-09-01 08:41:10 +02:00
|
|
|
log.Error("Unhandled trie error in Trie.Prove", "err", err)
|
2017-10-24 15:19:09 +02:00
|
|
|
return err
|
2015-11-25 18:28:21 +01:00
|
|
|
}
|
2023-04-24 15:38:52 +08:00
|
|
|
// The raw-blob format nodes are loaded either from the
|
|
|
|
// clean cache or the database, they are all in their own
|
|
|
|
// copy and safe to use unsafe decoder.
|
|
|
|
tn = mustDecodeNodeUnsafe(n, blob)
|
2015-09-09 03:35:41 +02:00
|
|
|
default:
|
|
|
|
panic(fmt.Sprintf("%T: invalid node: %v", tn, tn))
|
|
|
|
}
|
|
|
|
}
|
2020-02-04 13:02:38 +01:00
|
|
|
hasher := newHasher(false)
|
2018-11-16 10:50:48 +01:00
|
|
|
defer returnHasherToPool(hasher)
|
2018-11-16 16:35:39 +02:00
|
|
|
|
2015-09-09 03:35:41 +02:00
|
|
|
for i, n := range nodes {
|
2020-02-03 16:28:30 +01:00
|
|
|
var hn node
|
|
|
|
n, hn = hasher.proofHash(n)
|
2017-10-24 15:19:09 +02:00
|
|
|
if hash, ok := hn.(hashNode); ok || i == 0 {
|
2015-09-09 03:35:41 +02:00
|
|
|
// If the node's database encoding is a hash (or is the
|
|
|
|
// root node), it becomes a proof element.
|
2022-03-09 21:45:17 +08:00
|
|
|
enc := nodeToBytes(n)
|
2020-02-03 16:28:30 +01:00
|
|
|
if !ok {
|
|
|
|
hash = hasher.hashData(enc)
|
2017-10-24 15:19:09 +02:00
|
|
|
}
|
2020-02-03 16:28:30 +01:00
|
|
|
proofDb.Put(hash, enc)
|
2015-09-09 03:35:41 +02:00
|
|
|
}
|
|
|
|
}
|
2017-10-24 15:19:09 +02:00
|
|
|
return nil
|
2015-09-09 03:35:41 +02:00
|
|
|
}
|
|
|
|
|
2018-02-05 18:40:32 +02:00
|
|
|
// Prove constructs a merkle proof for key. The result contains all encoded nodes
|
|
|
|
// on the path to the value at key. The value itself is also included in the last
|
|
|
|
// node and can be retrieved by verifying the proof.
|
|
|
|
//
|
|
|
|
// If the trie does not contain a value for key, the returned proof contains all
|
|
|
|
// nodes of the longest existing prefix of the key (at least the root node), ending
|
|
|
|
// with the node that proves the absence of the key.
|
2023-06-19 22:28:40 +08:00
|
|
|
func (t *StateTrie) Prove(key []byte, proofDb ethdb.KeyValueWriter) error {
|
|
|
|
return t.trie.Prove(key, proofDb)
|
2018-02-05 18:40:32 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// VerifyProof checks merkle proofs. The given proof must contain the value for
|
|
|
|
// key in a trie with the given root hash. VerifyProof returns an error if the
|
|
|
|
// proof contains invalid trie nodes or the wrong value.
|
2020-04-24 19:37:56 +08:00
|
|
|
func VerifyProof(rootHash common.Hash, key []byte, proofDb ethdb.KeyValueReader) (value []byte, err error) {
|
2017-04-18 13:25:07 +02:00
|
|
|
key = keybytesToHex(key)
|
2018-02-05 18:40:32 +02:00
|
|
|
wantHash := rootHash
|
2017-10-24 15:19:09 +02:00
|
|
|
for i := 0; ; i++ {
|
2018-02-05 18:40:32 +02:00
|
|
|
buf, _ := proofDb.Get(wantHash[:])
|
2017-10-24 15:19:09 +02:00
|
|
|
if buf == nil {
|
2020-04-24 19:37:56 +08:00
|
|
|
return nil, fmt.Errorf("proof node %d (hash %064x) missing", i, wantHash)
|
2015-09-09 03:35:41 +02:00
|
|
|
}
|
2019-03-14 15:25:12 +02:00
|
|
|
n, err := decodeNode(wantHash[:], buf)
|
2015-09-09 03:35:41 +02:00
|
|
|
if err != nil {
|
2020-04-24 19:37:56 +08:00
|
|
|
return nil, fmt.Errorf("bad proof node %d: %v", i, err)
|
2015-09-09 03:35:41 +02:00
|
|
|
}
|
2020-04-24 19:37:56 +08:00
|
|
|
keyrest, cld := get(n, key, true)
|
2015-09-09 03:35:41 +02:00
|
|
|
switch cld := cld.(type) {
|
|
|
|
case nil:
|
2017-10-24 15:19:09 +02:00
|
|
|
// The trie doesn't contain the key.
|
2020-04-24 19:37:56 +08:00
|
|
|
return nil, nil
|
2015-09-09 03:35:41 +02:00
|
|
|
case hashNode:
|
|
|
|
key = keyrest
|
2018-02-05 18:40:32 +02:00
|
|
|
copy(wantHash[:], cld)
|
2015-09-09 03:35:41 +02:00
|
|
|
case valueNode:
|
2020-04-24 19:37:56 +08:00
|
|
|
return cld, nil
|
2015-09-09 03:35:41 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-23 17:44:09 +08:00
|
|
|
// proofToPath converts a merkle proof to trie node path. The main purpose of
|
|
|
|
// this function is recovering a node path from the merkle proof stream. All
|
|
|
|
// necessary nodes will be resolved and leave the remaining as hashnode.
|
|
|
|
//
|
|
|
|
// The given edge proof is allowed to be an existent or non-existent proof.
|
2020-05-27 22:37:37 +08:00
|
|
|
func proofToPath(rootHash common.Hash, root node, key []byte, proofDb ethdb.KeyValueReader, allowNonExistent bool) (node, []byte, error) {
|
2020-04-24 19:37:56 +08:00
|
|
|
// resolveNode retrieves and resolves trie node from merkle proof stream
|
|
|
|
resolveNode := func(hash common.Hash) (node, error) {
|
|
|
|
buf, _ := proofDb.Get(hash[:])
|
|
|
|
if buf == nil {
|
|
|
|
return nil, fmt.Errorf("proof node (hash %064x) missing", hash)
|
|
|
|
}
|
|
|
|
n, err := decodeNode(hash[:], buf)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("bad proof node %v", err)
|
|
|
|
}
|
|
|
|
return n, err
|
|
|
|
}
|
2020-05-20 20:45:38 +08:00
|
|
|
// If the root node is empty, resolve it first.
|
|
|
|
// Root node must be included in the proof.
|
2020-04-24 19:37:56 +08:00
|
|
|
if root == nil {
|
|
|
|
n, err := resolveNode(rootHash)
|
|
|
|
if err != nil {
|
2020-05-27 22:37:37 +08:00
|
|
|
return nil, nil, err
|
2020-04-24 19:37:56 +08:00
|
|
|
}
|
|
|
|
root = n
|
|
|
|
}
|
|
|
|
var (
|
|
|
|
err error
|
|
|
|
child, parent node
|
|
|
|
keyrest []byte
|
2020-05-27 22:37:37 +08:00
|
|
|
valnode []byte
|
2020-04-24 19:37:56 +08:00
|
|
|
)
|
|
|
|
key, parent = keybytesToHex(key), root
|
|
|
|
for {
|
|
|
|
keyrest, child = get(parent, key, false)
|
|
|
|
switch cld := child.(type) {
|
|
|
|
case nil:
|
2020-05-20 20:45:38 +08:00
|
|
|
// The trie doesn't contain the key. It's possible
|
|
|
|
// the proof is a non-existing proof, but at least
|
|
|
|
// we can prove all resolved nodes are correct, it's
|
|
|
|
// enough for us to prove range.
|
|
|
|
if allowNonExistent {
|
2020-05-27 22:37:37 +08:00
|
|
|
return root, nil, nil
|
2020-05-20 20:45:38 +08:00
|
|
|
}
|
2020-05-27 22:37:37 +08:00
|
|
|
return nil, nil, errors.New("the node is not contained in trie")
|
2020-04-24 19:37:56 +08:00
|
|
|
case *shortNode:
|
|
|
|
key, parent = keyrest, child // Already resolved
|
|
|
|
continue
|
|
|
|
case *fullNode:
|
|
|
|
key, parent = keyrest, child // Already resolved
|
|
|
|
continue
|
|
|
|
case hashNode:
|
|
|
|
child, err = resolveNode(common.BytesToHash(cld))
|
|
|
|
if err != nil {
|
2020-05-27 22:37:37 +08:00
|
|
|
return nil, nil, err
|
2020-04-24 19:37:56 +08:00
|
|
|
}
|
|
|
|
case valueNode:
|
2020-05-27 22:37:37 +08:00
|
|
|
valnode = cld
|
2020-04-24 19:37:56 +08:00
|
|
|
}
|
|
|
|
// Link the parent and child.
|
|
|
|
switch pnode := parent.(type) {
|
|
|
|
case *shortNode:
|
|
|
|
pnode.Val = child
|
|
|
|
case *fullNode:
|
|
|
|
pnode.Children[key[0]] = child
|
|
|
|
default:
|
|
|
|
panic(fmt.Sprintf("%T: invalid node: %v", pnode, pnode))
|
|
|
|
}
|
2020-05-27 22:37:37 +08:00
|
|
|
if len(valnode) > 0 {
|
|
|
|
return root, valnode, nil // The whole path is resolved
|
2020-04-24 19:37:56 +08:00
|
|
|
}
|
|
|
|
key, parent = keyrest, child
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// unsetInternal removes all internal node references(hashnode, embedded node).
|
2020-09-23 17:44:09 +08:00
|
|
|
// It should be called after a trie is constructed with two edge paths. Also
|
|
|
|
// the given boundary keys must be the one used to construct the edge paths.
|
2020-04-24 19:37:56 +08:00
|
|
|
//
|
|
|
|
// It's the key step for range proof. All visited nodes should be marked dirty
|
|
|
|
// since the node content might be modified. Besides it can happen that some
|
|
|
|
// fullnodes only have one child which is disallowed. But if the proof is valid,
|
|
|
|
// the missing children will be filled, otherwise it will be thrown anyway.
|
2020-09-23 17:44:09 +08:00
|
|
|
//
|
|
|
|
// Note we have the assumption here the given boundary keys are different
|
|
|
|
// and right is larger than left.
|
2021-01-22 17:11:24 +08:00
|
|
|
func unsetInternal(n node, left []byte, right []byte) (bool, error) {
|
2020-04-24 19:37:56 +08:00
|
|
|
left, right = keybytesToHex(left), keybytesToHex(right)
|
|
|
|
|
2020-05-26 18:11:29 +08:00
|
|
|
// Step down to the fork point. There are two scenarios can happen:
|
2020-09-23 17:44:09 +08:00
|
|
|
// - the fork point is a shortnode: either the key of left proof or
|
|
|
|
// right proof doesn't match with shortnode's key.
|
|
|
|
// - the fork point is a fullnode: both two edge proofs are allowed
|
|
|
|
// to point to a non-existent key.
|
2020-05-26 18:11:29 +08:00
|
|
|
var (
|
|
|
|
pos = 0
|
|
|
|
parent node
|
2020-09-23 17:44:09 +08:00
|
|
|
|
|
|
|
// fork indicator, 0 means no fork, -1 means proof is less, 1 means proof is greater
|
|
|
|
shortForkLeft, shortForkRight int
|
2020-05-26 18:11:29 +08:00
|
|
|
)
|
|
|
|
findFork:
|
2020-04-24 19:37:56 +08:00
|
|
|
for {
|
2020-05-20 20:45:38 +08:00
|
|
|
switch rn := (n).(type) {
|
2020-04-24 19:37:56 +08:00
|
|
|
case *shortNode:
|
2020-05-26 18:11:29 +08:00
|
|
|
rn.flags = nodeFlag{dirty: true}
|
2020-09-23 17:44:09 +08:00
|
|
|
|
|
|
|
// If either the key of left proof or right proof doesn't match with
|
|
|
|
// shortnode, stop here and the forkpoint is the shortnode.
|
|
|
|
if len(left)-pos < len(rn.Key) {
|
|
|
|
shortForkLeft = bytes.Compare(left[pos:], rn.Key)
|
|
|
|
} else {
|
|
|
|
shortForkLeft = bytes.Compare(left[pos:pos+len(rn.Key)], rn.Key)
|
|
|
|
}
|
|
|
|
if len(right)-pos < len(rn.Key) {
|
|
|
|
shortForkRight = bytes.Compare(right[pos:], rn.Key)
|
|
|
|
} else {
|
|
|
|
shortForkRight = bytes.Compare(right[pos:pos+len(rn.Key)], rn.Key)
|
|
|
|
}
|
|
|
|
if shortForkLeft != 0 || shortForkRight != 0 {
|
2020-05-26 18:11:29 +08:00
|
|
|
break findFork
|
2020-05-20 20:45:38 +08:00
|
|
|
}
|
|
|
|
parent = n
|
|
|
|
n, pos = rn.Val, pos+len(rn.Key)
|
2020-04-24 19:37:56 +08:00
|
|
|
case *fullNode:
|
2020-05-20 20:45:38 +08:00
|
|
|
rn.flags = nodeFlag{dirty: true}
|
2020-09-23 17:44:09 +08:00
|
|
|
|
|
|
|
// If either the node pointed by left proof or right proof is nil,
|
|
|
|
// stop here and the forkpoint is the fullnode.
|
|
|
|
leftnode, rightnode := rn.Children[left[pos]], rn.Children[right[pos]]
|
|
|
|
if leftnode == nil || rightnode == nil || leftnode != rightnode {
|
2020-05-26 18:11:29 +08:00
|
|
|
break findFork
|
|
|
|
}
|
2020-05-20 20:45:38 +08:00
|
|
|
parent = n
|
2020-05-26 18:11:29 +08:00
|
|
|
n, pos = rn.Children[left[pos]], pos+1
|
2020-04-24 19:37:56 +08:00
|
|
|
default:
|
2020-05-20 20:45:38 +08:00
|
|
|
panic(fmt.Sprintf("%T: invalid node: %v", n, n))
|
2020-04-24 19:37:56 +08:00
|
|
|
}
|
|
|
|
}
|
2020-05-26 18:11:29 +08:00
|
|
|
switch rn := n.(type) {
|
|
|
|
case *shortNode:
|
2020-09-23 17:44:09 +08:00
|
|
|
// There can have these five scenarios:
|
|
|
|
// - both proofs are less than the trie path => no valid range
|
|
|
|
// - both proofs are greater than the trie path => no valid range
|
|
|
|
// - left proof is less and right proof is greater => valid range, unset the shortnode entirely
|
|
|
|
// - left proof points to the shortnode, but right proof is greater
|
|
|
|
// - right proof points to the shortnode, but left proof is less
|
|
|
|
if shortForkLeft == -1 && shortForkRight == -1 {
|
2021-01-22 17:11:24 +08:00
|
|
|
return false, errors.New("empty range")
|
2020-09-23 17:44:09 +08:00
|
|
|
}
|
|
|
|
if shortForkLeft == 1 && shortForkRight == 1 {
|
2021-01-22 17:11:24 +08:00
|
|
|
return false, errors.New("empty range")
|
2020-09-23 17:44:09 +08:00
|
|
|
}
|
|
|
|
if shortForkLeft != 0 && shortForkRight != 0 {
|
2021-01-22 17:11:24 +08:00
|
|
|
// The fork point is root node, unset the entire trie
|
|
|
|
if parent == nil {
|
|
|
|
return true, nil
|
|
|
|
}
|
2020-09-23 17:44:09 +08:00
|
|
|
parent.(*fullNode).Children[left[pos-1]] = nil
|
2021-01-22 17:11:24 +08:00
|
|
|
return false, nil
|
2020-05-26 18:11:29 +08:00
|
|
|
}
|
2020-09-23 17:44:09 +08:00
|
|
|
// Only one proof points to non-existent key.
|
|
|
|
if shortForkRight != 0 {
|
|
|
|
if _, ok := rn.Val.(valueNode); ok {
|
2021-01-22 17:11:24 +08:00
|
|
|
// The fork point is root node, unset the entire trie
|
|
|
|
if parent == nil {
|
|
|
|
return true, nil
|
|
|
|
}
|
2020-09-23 17:44:09 +08:00
|
|
|
parent.(*fullNode).Children[left[pos-1]] = nil
|
2021-01-22 17:11:24 +08:00
|
|
|
return false, nil
|
2020-09-23 17:44:09 +08:00
|
|
|
}
|
2021-01-22 17:11:24 +08:00
|
|
|
return false, unset(rn, rn.Val, left[pos:], len(rn.Key), false)
|
2020-09-23 17:44:09 +08:00
|
|
|
}
|
|
|
|
if shortForkLeft != 0 {
|
|
|
|
if _, ok := rn.Val.(valueNode); ok {
|
2021-01-22 17:11:24 +08:00
|
|
|
// The fork point is root node, unset the entire trie
|
|
|
|
if parent == nil {
|
|
|
|
return true, nil
|
|
|
|
}
|
2020-09-23 17:44:09 +08:00
|
|
|
parent.(*fullNode).Children[right[pos-1]] = nil
|
2021-01-22 17:11:24 +08:00
|
|
|
return false, nil
|
2020-09-23 17:44:09 +08:00
|
|
|
}
|
2021-01-22 17:11:24 +08:00
|
|
|
return false, unset(rn, rn.Val, right[pos:], len(rn.Key), true)
|
2020-09-23 17:44:09 +08:00
|
|
|
}
|
2021-01-22 17:11:24 +08:00
|
|
|
return false, nil
|
2020-05-26 18:11:29 +08:00
|
|
|
case *fullNode:
|
2020-09-23 17:44:09 +08:00
|
|
|
// unset all internal nodes in the forkpoint
|
2020-05-26 18:11:29 +08:00
|
|
|
for i := left[pos] + 1; i < right[pos]; i++ {
|
|
|
|
rn.Children[i] = nil
|
|
|
|
}
|
|
|
|
if err := unset(rn, rn.Children[left[pos]], left[pos:], 1, false); err != nil {
|
2021-01-22 17:11:24 +08:00
|
|
|
return false, err
|
2020-05-26 18:11:29 +08:00
|
|
|
}
|
|
|
|
if err := unset(rn, rn.Children[right[pos]], right[pos:], 1, true); err != nil {
|
2021-01-22 17:11:24 +08:00
|
|
|
return false, err
|
2020-05-26 18:11:29 +08:00
|
|
|
}
|
2021-01-22 17:11:24 +08:00
|
|
|
return false, nil
|
2020-05-26 18:11:29 +08:00
|
|
|
default:
|
|
|
|
panic(fmt.Sprintf("%T: invalid node: %v", n, n))
|
2020-05-20 20:45:38 +08:00
|
|
|
}
|
2020-04-24 19:37:56 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// unset removes all internal node references either the left most or right most.
|
2020-09-23 17:44:09 +08:00
|
|
|
// It can meet these scenarios:
|
2020-05-20 20:45:38 +08:00
|
|
|
//
|
2022-09-10 13:25:40 +02:00
|
|
|
// - The given path is existent in the trie, unset the associated nodes with the
|
|
|
|
// specific direction
|
|
|
|
// - The given path is non-existent in the trie
|
2020-05-20 20:45:38 +08:00
|
|
|
// - the fork point is a fullnode, the corresponding child pointed by path
|
|
|
|
// is nil, return
|
2020-09-23 17:44:09 +08:00
|
|
|
// - the fork point is a shortnode, the shortnode is included in the range,
|
2020-05-20 20:45:38 +08:00
|
|
|
// keep the entire branch and return.
|
2020-09-23 17:44:09 +08:00
|
|
|
// - the fork point is a shortnode, the shortnode is excluded in the range,
|
2020-05-20 20:45:38 +08:00
|
|
|
// unset the entire branch.
|
|
|
|
func unset(parent node, child node, key []byte, pos int, removeLeft bool) error {
|
|
|
|
switch cld := child.(type) {
|
2020-04-24 19:37:56 +08:00
|
|
|
case *fullNode:
|
|
|
|
if removeLeft {
|
2020-05-20 20:45:38 +08:00
|
|
|
for i := 0; i < int(key[pos]); i++ {
|
|
|
|
cld.Children[i] = nil
|
2020-04-24 19:37:56 +08:00
|
|
|
}
|
2020-05-20 20:45:38 +08:00
|
|
|
cld.flags = nodeFlag{dirty: true}
|
2020-04-24 19:37:56 +08:00
|
|
|
} else {
|
2020-05-20 20:45:38 +08:00
|
|
|
for i := key[pos] + 1; i < 16; i++ {
|
|
|
|
cld.Children[i] = nil
|
2020-04-24 19:37:56 +08:00
|
|
|
}
|
2020-05-20 20:45:38 +08:00
|
|
|
cld.flags = nodeFlag{dirty: true}
|
2020-04-24 19:37:56 +08:00
|
|
|
}
|
2020-05-20 20:45:38 +08:00
|
|
|
return unset(cld, cld.Children[key[pos]], key, pos+1, removeLeft)
|
2020-04-24 19:37:56 +08:00
|
|
|
case *shortNode:
|
2020-05-20 20:45:38 +08:00
|
|
|
if len(key[pos:]) < len(cld.Key) || !bytes.Equal(cld.Key, key[pos:pos+len(cld.Key)]) {
|
|
|
|
// Find the fork point, it's an non-existent branch.
|
|
|
|
if removeLeft {
|
2020-09-23 17:44:09 +08:00
|
|
|
if bytes.Compare(cld.Key, key[pos:]) < 0 {
|
|
|
|
// The key of fork shortnode is less than the path
|
2022-10-11 09:37:00 +02:00
|
|
|
// (it belongs to the range), unset the entire
|
2020-09-23 17:44:09 +08:00
|
|
|
// branch. The parent must be a fullnode.
|
|
|
|
fn := parent.(*fullNode)
|
|
|
|
fn.Children[key[pos-1]] = nil
|
|
|
|
}
|
2022-06-13 16:24:45 +02:00
|
|
|
//else {
|
|
|
|
// The key of fork shortnode is greater than the
|
|
|
|
// path(it doesn't belong to the range), keep
|
|
|
|
// it with the cached hash available.
|
|
|
|
//}
|
2020-05-20 20:45:38 +08:00
|
|
|
} else {
|
2020-09-23 17:44:09 +08:00
|
|
|
if bytes.Compare(cld.Key, key[pos:]) > 0 {
|
|
|
|
// The key of fork shortnode is greater than the
|
2024-02-05 23:16:32 +02:00
|
|
|
// path(it belongs to the range), unset the entries
|
2020-09-23 17:44:09 +08:00
|
|
|
// branch. The parent must be a fullnode.
|
|
|
|
fn := parent.(*fullNode)
|
|
|
|
fn.Children[key[pos-1]] = nil
|
|
|
|
}
|
2022-06-13 16:24:45 +02:00
|
|
|
//else {
|
|
|
|
// The key of fork shortnode is less than the
|
|
|
|
// path(it doesn't belong to the range), keep
|
|
|
|
// it with the cached hash available.
|
|
|
|
//}
|
2020-05-20 20:45:38 +08:00
|
|
|
}
|
2020-05-26 18:11:29 +08:00
|
|
|
return nil
|
2020-05-20 20:45:38 +08:00
|
|
|
}
|
|
|
|
if _, ok := cld.Val.(valueNode); ok {
|
|
|
|
fn := parent.(*fullNode)
|
|
|
|
fn.Children[key[pos-1]] = nil
|
|
|
|
return nil
|
2020-04-24 19:37:56 +08:00
|
|
|
}
|
2020-05-20 20:45:38 +08:00
|
|
|
cld.flags = nodeFlag{dirty: true}
|
|
|
|
return unset(cld, cld.Val, key, pos+len(cld.Key), removeLeft)
|
|
|
|
case nil:
|
2020-09-23 17:44:09 +08:00
|
|
|
// If the node is nil, then it's a child of the fork point
|
|
|
|
// fullnode(it's a non-existent branch).
|
2020-05-20 20:45:38 +08:00
|
|
|
return nil
|
|
|
|
default:
|
|
|
|
panic("it shouldn't happen") // hashNode, valueNode
|
2020-04-24 19:37:56 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-27 22:37:37 +08:00
|
|
|
// hasRightElement returns the indicator whether there exists more elements
|
2022-01-21 17:41:51 +08:00
|
|
|
// on the right side of the given path. The given path can point to an existent
|
2020-05-27 22:37:37 +08:00
|
|
|
// key or a non-existent one. This function has the assumption that the whole
|
|
|
|
// path should already be resolved.
|
|
|
|
func hasRightElement(node node, key []byte) bool {
|
|
|
|
pos, key := 0, keybytesToHex(key)
|
|
|
|
for node != nil {
|
|
|
|
switch rn := node.(type) {
|
|
|
|
case *fullNode:
|
|
|
|
for i := key[pos] + 1; i < 16; i++ {
|
|
|
|
if rn.Children[i] != nil {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
node, pos = rn.Children[key[pos]], pos+1
|
|
|
|
case *shortNode:
|
|
|
|
if len(key)-pos < len(rn.Key) || !bytes.Equal(rn.Key, key[pos:pos+len(rn.Key)]) {
|
|
|
|
return bytes.Compare(rn.Key, key[pos:]) > 0
|
|
|
|
}
|
|
|
|
node, pos = rn.Val, pos+len(rn.Key)
|
|
|
|
case valueNode:
|
|
|
|
return false // We have resolved the whole path
|
|
|
|
default:
|
|
|
|
panic(fmt.Sprintf("%T: invalid node: %v", node, node)) // hashnode
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2020-09-23 17:44:09 +08:00
|
|
|
// VerifyRangeProof checks whether the given leaf nodes and edge proof
|
|
|
|
// can prove the given trie leaves range is matched with the specific root.
|
2020-12-14 11:27:15 +02:00
|
|
|
// Besides, the range should be consecutive (no gap inside) and monotonic
|
2020-09-23 17:44:09 +08:00
|
|
|
// increasing.
|
2020-05-20 20:45:38 +08:00
|
|
|
//
|
2020-09-23 17:44:09 +08:00
|
|
|
// Note the given proof actually contains two edge proofs. Both of them can
|
|
|
|
// be non-existent proofs. For example the first proof is for a non-existent
|
|
|
|
// key 0x03, the last proof is for a non-existent key 0x10. The given batch
|
|
|
|
// leaves are [0x04, 0x05, .. 0x09]. It's still feasible to prove the given
|
|
|
|
// batch is valid.
|
2020-05-20 20:45:38 +08:00
|
|
|
//
|
|
|
|
// The firstKey is paired with firstProof, not necessarily the same as keys[0]
|
2020-09-23 17:44:09 +08:00
|
|
|
// (unless firstProof is an existent proof). Similarly, lastKey and lastProof
|
|
|
|
// are paired.
|
2020-05-20 20:45:38 +08:00
|
|
|
//
|
|
|
|
// Expect the normal case, this function can also be used to verify the following
|
2020-09-23 16:03:21 +08:00
|
|
|
// range proofs:
|
2020-05-20 20:45:38 +08:00
|
|
|
//
|
2022-09-10 13:25:40 +02:00
|
|
|
// - All elements proof. In this case the proof can be nil, but the range should
|
|
|
|
// be all the leaves in the trie.
|
2020-05-20 20:45:38 +08:00
|
|
|
//
|
2022-09-10 13:25:40 +02:00
|
|
|
// - One element proof. In this case no matter the edge proof is a non-existent
|
|
|
|
// proof or not, we can always verify the correctness of the proof.
|
2020-05-27 22:37:37 +08:00
|
|
|
//
|
2022-09-10 13:25:40 +02:00
|
|
|
// - Zero element proof. In this case a single non-existent proof is enough to prove.
|
|
|
|
// Besides, if there are still some other leaves available on the right side, then
|
|
|
|
// an error will be returned.
|
2020-09-23 16:03:21 +08:00
|
|
|
//
|
2020-05-27 22:37:37 +08:00
|
|
|
// Except returning the error to indicate the proof is valid or not, the function will
|
|
|
|
// also return a flag to indicate whether there exists more accounts/slots in the trie.
|
2021-04-28 23:09:15 +03:00
|
|
|
//
|
|
|
|
// Note: This method does not verify that the proof is of minimal form. If the input
|
|
|
|
// proofs are 'bloated' with neighbour leaves or random data, aside from the 'useful'
|
|
|
|
// data, then the proof will still be accepted.
|
trie: make rhs-proof align with last key in range proofs (#28311)
During snap-sync, we request ranges of values: either a range of accounts or a range of storage values. For any large trie, e.g. the main account trie or a large storage trie, we cannot fetch everything at once.
Short version; we split it up and request in multiple stages. To do so, we use an origin field, to say "Give me all storage key/values where key > 0x20000000000000000". When the server fulfils this, the server provides the first key after origin, let's say 0x2e030000000000000 -- never providing the exact origin. However, the client-side needs to be able to verify that the 0x2e03.. indeed is the first one after 0x2000.., and therefore the attached proof concerns the origin, not the first key.
So, short-short version: the left-hand side of the proof relates to the origin, and is free-standing from the first leaf.
On the other hand, (pun intended), the right-hand side, there's no such 'gap' between "along what path does the proof walk" and the last provided leaf. The proof must prove the last element (unless there are no elements).
Therefore, we can simplify the semantics for trie.VerifyRangeProof by removing an argument. This doesn't make much difference in practice, but makes it so that we can remove some tests. The reason I am raising this is that the upcoming stacktrie-based verifier does not support such fancy features as standalone right-hand borders.
2023-10-13 16:05:29 +02:00
|
|
|
func VerifyRangeProof(rootHash common.Hash, firstKey []byte, keys [][]byte, values [][]byte, proof ethdb.KeyValueReader) (bool, error) {
|
2020-04-24 19:37:56 +08:00
|
|
|
if len(keys) != len(values) {
|
2021-04-28 23:09:15 +03:00
|
|
|
return false, fmt.Errorf("inconsistent proof data, keys: %d, values: %d", len(keys), len(values))
|
2020-05-27 22:37:37 +08:00
|
|
|
}
|
2021-11-23 19:28:17 +02:00
|
|
|
// Ensure the received batch is monotonic increasing and contains no deletions
|
2020-05-27 22:37:37 +08:00
|
|
|
for i := 0; i < len(keys)-1; i++ {
|
|
|
|
if bytes.Compare(keys[i], keys[i+1]) >= 0 {
|
2021-04-28 23:09:15 +03:00
|
|
|
return false, errors.New("range is not monotonically increasing")
|
2020-05-27 22:37:37 +08:00
|
|
|
}
|
2020-04-24 19:37:56 +08:00
|
|
|
}
|
2021-11-23 19:28:17 +02:00
|
|
|
for _, value := range values {
|
|
|
|
if len(value) == 0 {
|
|
|
|
return false, errors.New("range contains deletion")
|
|
|
|
}
|
|
|
|
}
|
2020-05-20 20:45:38 +08:00
|
|
|
// Special case, there is no edge proof at all. The given range is expected
|
|
|
|
// to be the whole leaf-set in the trie.
|
2020-09-23 17:44:09 +08:00
|
|
|
if proof == nil {
|
2021-04-28 23:09:15 +03:00
|
|
|
tr := NewStackTrie(nil)
|
2020-05-20 20:45:38 +08:00
|
|
|
for index, key := range keys {
|
2023-04-20 18:57:24 +08:00
|
|
|
tr.Update(key, values[index])
|
2020-12-14 11:27:15 +02:00
|
|
|
}
|
2021-04-28 21:47:48 +02:00
|
|
|
if have, want := tr.Hash(), rootHash; have != want {
|
2021-04-28 23:09:15 +03:00
|
|
|
return false, fmt.Errorf("invalid proof, want hash %x, got %x", want, have)
|
2020-05-20 20:45:38 +08:00
|
|
|
}
|
2021-04-28 23:09:15 +03:00
|
|
|
return false, nil // No more elements
|
2020-04-24 19:37:56 +08:00
|
|
|
}
|
2020-09-23 17:44:09 +08:00
|
|
|
// Special case, there is a provided edge proof but zero key/value
|
2020-09-23 16:03:21 +08:00
|
|
|
// pairs, ensure there are no more accounts / slots in the trie.
|
|
|
|
if len(keys) == 0 {
|
2021-04-28 23:09:15 +03:00
|
|
|
root, val, err := proofToPath(rootHash, nil, firstKey, proof, true)
|
2020-09-23 16:03:21 +08:00
|
|
|
if err != nil {
|
2021-04-28 23:09:15 +03:00
|
|
|
return false, err
|
2020-09-23 16:03:21 +08:00
|
|
|
}
|
|
|
|
if val != nil || hasRightElement(root, firstKey) {
|
2021-04-28 23:09:15 +03:00
|
|
|
return false, errors.New("more entries available")
|
2020-09-23 16:03:21 +08:00
|
|
|
}
|
2022-01-21 17:41:51 +08:00
|
|
|
return false, nil
|
2020-09-23 16:03:21 +08:00
|
|
|
}
|
trie: make rhs-proof align with last key in range proofs (#28311)
During snap-sync, we request ranges of values: either a range of accounts or a range of storage values. For any large trie, e.g. the main account trie or a large storage trie, we cannot fetch everything at once.
Short version; we split it up and request in multiple stages. To do so, we use an origin field, to say "Give me all storage key/values where key > 0x20000000000000000". When the server fulfils this, the server provides the first key after origin, let's say 0x2e030000000000000 -- never providing the exact origin. However, the client-side needs to be able to verify that the 0x2e03.. indeed is the first one after 0x2000.., and therefore the attached proof concerns the origin, not the first key.
So, short-short version: the left-hand side of the proof relates to the origin, and is free-standing from the first leaf.
On the other hand, (pun intended), the right-hand side, there's no such 'gap' between "along what path does the proof walk" and the last provided leaf. The proof must prove the last element (unless there are no elements).
Therefore, we can simplify the semantics for trie.VerifyRangeProof by removing an argument. This doesn't make much difference in practice, but makes it so that we can remove some tests. The reason I am raising this is that the upcoming stacktrie-based verifier does not support such fancy features as standalone right-hand borders.
2023-10-13 16:05:29 +02:00
|
|
|
var lastKey = keys[len(keys)-1]
|
2020-09-23 17:44:09 +08:00
|
|
|
// Special case, there is only one element and two edge keys are same.
|
|
|
|
// In this case, we can't construct two edge paths. So handle it here.
|
|
|
|
if len(keys) == 1 && bytes.Equal(firstKey, lastKey) {
|
2021-04-28 23:09:15 +03:00
|
|
|
root, val, err := proofToPath(rootHash, nil, firstKey, proof, false)
|
2020-04-24 19:37:56 +08:00
|
|
|
if err != nil {
|
2021-04-28 23:09:15 +03:00
|
|
|
return false, err
|
2020-04-24 19:37:56 +08:00
|
|
|
}
|
2020-09-23 17:44:09 +08:00
|
|
|
if !bytes.Equal(firstKey, keys[0]) {
|
2021-04-28 23:09:15 +03:00
|
|
|
return false, errors.New("correct proof but invalid key")
|
2020-09-23 17:44:09 +08:00
|
|
|
}
|
2020-05-27 22:37:37 +08:00
|
|
|
if !bytes.Equal(val, values[0]) {
|
2021-04-28 23:09:15 +03:00
|
|
|
return false, errors.New("correct proof but invalid data")
|
2020-12-14 11:27:15 +02:00
|
|
|
}
|
2021-04-28 23:09:15 +03:00
|
|
|
return hasRightElement(root, firstKey), nil
|
2020-09-23 17:44:09 +08:00
|
|
|
}
|
|
|
|
// Ok, in all other cases, we require two edge paths available.
|
|
|
|
// First check the validity of edge keys.
|
|
|
|
if bytes.Compare(firstKey, lastKey) >= 0 {
|
2021-04-28 23:09:15 +03:00
|
|
|
return false, errors.New("invalid edge keys")
|
2020-09-23 17:44:09 +08:00
|
|
|
}
|
|
|
|
// todo(rjl493456442) different length edge keys should be supported
|
|
|
|
if len(firstKey) != len(lastKey) {
|
2021-04-28 23:09:15 +03:00
|
|
|
return false, errors.New("inconsistent edge keys")
|
2020-04-24 19:37:56 +08:00
|
|
|
}
|
|
|
|
// Convert the edge proofs to edge trie paths. Then we can
|
|
|
|
// have the same tree architecture with the original one.
|
2020-05-20 20:45:38 +08:00
|
|
|
// For the first edge proof, non-existent proof is allowed.
|
2021-04-28 23:09:15 +03:00
|
|
|
root, _, err := proofToPath(rootHash, nil, firstKey, proof, true)
|
2020-04-24 19:37:56 +08:00
|
|
|
if err != nil {
|
2021-04-28 23:09:15 +03:00
|
|
|
return false, err
|
2020-04-24 19:37:56 +08:00
|
|
|
}
|
|
|
|
// Pass the root node here, the second path will be merged
|
2020-05-20 20:45:38 +08:00
|
|
|
// with the first one. For the last edge proof, non-existent
|
2020-09-23 17:44:09 +08:00
|
|
|
// proof is also allowed.
|
2021-04-28 23:09:15 +03:00
|
|
|
root, _, err = proofToPath(rootHash, root, lastKey, proof, true)
|
2020-04-24 19:37:56 +08:00
|
|
|
if err != nil {
|
2021-04-28 23:09:15 +03:00
|
|
|
return false, err
|
2020-04-24 19:37:56 +08:00
|
|
|
}
|
|
|
|
// Remove all internal references. All the removed parts should
|
|
|
|
// be re-filled(or re-constructed) by the given leaves range.
|
2021-01-22 17:11:24 +08:00
|
|
|
empty, err := unsetInternal(root, firstKey, lastKey)
|
|
|
|
if err != nil {
|
2021-04-28 23:09:15 +03:00
|
|
|
return false, err
|
2020-04-24 19:37:56 +08:00
|
|
|
}
|
2020-12-14 11:27:15 +02:00
|
|
|
// Rebuild the trie with the leaf stream, the shape of trie
|
2020-04-24 19:37:56 +08:00
|
|
|
// should be same with the original one.
|
2023-03-14 16:50:53 +08:00
|
|
|
tr := &Trie{root: root, reader: newEmptyReader(), tracer: newTracer()}
|
2021-01-22 17:11:24 +08:00
|
|
|
if empty {
|
|
|
|
tr.root = nil
|
|
|
|
}
|
2020-04-24 19:37:56 +08:00
|
|
|
for index, key := range keys {
|
2023-04-20 18:57:24 +08:00
|
|
|
tr.Update(key, values[index])
|
2020-12-14 11:27:15 +02:00
|
|
|
}
|
|
|
|
if tr.Hash() != rootHash {
|
2021-04-28 23:09:15 +03:00
|
|
|
return false, fmt.Errorf("invalid proof, want hash %x, got %x", rootHash, tr.Hash())
|
2020-04-24 19:37:56 +08:00
|
|
|
}
|
2022-01-21 17:41:51 +08:00
|
|
|
return hasRightElement(tr.root, keys[len(keys)-1]), nil
|
2020-04-24 19:37:56 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// get returns the child of the given node. Return nil if the
|
|
|
|
// node with specified key doesn't exist at all.
|
|
|
|
//
|
|
|
|
// There is an additional flag `skipResolved`. If it's set then
|
|
|
|
// all resolved nodes won't be returned.
|
|
|
|
func get(tn node, key []byte, skipResolved bool) ([]byte, node) {
|
2017-06-27 15:57:06 +02:00
|
|
|
for {
|
2015-09-09 03:35:41 +02:00
|
|
|
switch n := tn.(type) {
|
2016-10-14 18:04:33 +02:00
|
|
|
case *shortNode:
|
2015-09-09 03:35:41 +02:00
|
|
|
if len(key) < len(n.Key) || !bytes.Equal(n.Key, key[:len(n.Key)]) {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
tn = n.Val
|
|
|
|
key = key[len(n.Key):]
|
2020-04-24 19:37:56 +08:00
|
|
|
if !skipResolved {
|
|
|
|
return key, tn
|
|
|
|
}
|
2016-10-14 18:04:33 +02:00
|
|
|
case *fullNode:
|
2016-05-19 13:24:14 +03:00
|
|
|
tn = n.Children[key[0]]
|
2015-09-09 03:35:41 +02:00
|
|
|
key = key[1:]
|
2020-04-24 19:37:56 +08:00
|
|
|
if !skipResolved {
|
|
|
|
return key, tn
|
|
|
|
}
|
2015-09-09 03:35:41 +02:00
|
|
|
case hashNode:
|
|
|
|
return key, n
|
|
|
|
case nil:
|
|
|
|
return key, nil
|
2017-06-27 15:57:06 +02:00
|
|
|
case valueNode:
|
|
|
|
return nil, n
|
2015-09-09 03:35:41 +02:00
|
|
|
default:
|
|
|
|
panic(fmt.Sprintf("%T: invalid node: %v", tn, tn))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|