2016-04-14 19:18:24 +03:00
|
|
|
// Copyright 2015 The go-ethereum Authors
|
|
|
|
// This file is part of the go-ethereum library.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Lesser General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Lesser General Public License
|
|
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
2015-09-09 04:35:41 +03:00
|
|
|
package trie
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
2020-04-24 14:37:56 +03:00
|
|
|
"errors"
|
2015-09-09 04:35:41 +03:00
|
|
|
"fmt"
|
|
|
|
|
|
|
|
"github.com/ethereum/go-ethereum/common"
|
2018-02-05 19:40:32 +03:00
|
|
|
"github.com/ethereum/go-ethereum/ethdb"
|
2020-04-24 14:37:56 +03:00
|
|
|
"github.com/ethereum/go-ethereum/ethdb/memorydb"
|
2017-02-22 15:10:07 +03:00
|
|
|
"github.com/ethereum/go-ethereum/log"
|
2015-09-09 04:35:41 +03:00
|
|
|
"github.com/ethereum/go-ethereum/rlp"
|
|
|
|
)
|
|
|
|
|
2018-02-05 19:40:32 +03:00
|
|
|
// Prove constructs a merkle proof for key. The result contains all encoded nodes
|
|
|
|
// on the path to the value at key. The value itself is also included in the last
|
|
|
|
// node and can be retrieved by verifying the proof.
|
2015-09-09 04:35:41 +03:00
|
|
|
//
|
2018-02-05 19:40:32 +03:00
|
|
|
// If the trie does not contain a value for key, the returned proof contains all
|
|
|
|
// nodes of the longest existing prefix of the key (at least the root node), ending
|
|
|
|
// with the node that proves the absence of the key.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
func (t *Trie) Prove(key []byte, fromLevel uint, proofDb ethdb.KeyValueWriter) error {
|
2015-09-09 04:35:41 +03:00
|
|
|
// Collect all nodes on the path to key.
|
2017-04-18 14:25:07 +03:00
|
|
|
key = keybytesToHex(key)
|
2019-02-19 16:50:11 +03:00
|
|
|
var nodes []node
|
2015-09-09 04:35:41 +03:00
|
|
|
tn := t.root
|
2015-11-30 14:34:19 +02:00
|
|
|
for len(key) > 0 && tn != nil {
|
2015-09-09 04:35:41 +03:00
|
|
|
switch n := tn.(type) {
|
2016-10-14 19:04:33 +03:00
|
|
|
case *shortNode:
|
2015-09-09 04:35:41 +03:00
|
|
|
if len(key) < len(n.Key) || !bytes.Equal(n.Key, key[:len(n.Key)]) {
|
|
|
|
// The trie doesn't contain the key.
|
2015-11-30 14:34:19 +02:00
|
|
|
tn = nil
|
|
|
|
} else {
|
|
|
|
tn = n.Val
|
|
|
|
key = key[len(n.Key):]
|
2015-09-09 04:35:41 +03:00
|
|
|
}
|
|
|
|
nodes = append(nodes, n)
|
2016-10-14 19:04:33 +03:00
|
|
|
case *fullNode:
|
2016-05-19 13:24:14 +03:00
|
|
|
tn = n.Children[key[0]]
|
2015-09-09 04:35:41 +03:00
|
|
|
key = key[1:]
|
|
|
|
nodes = append(nodes, n)
|
|
|
|
case hashNode:
|
2015-11-25 19:28:21 +02:00
|
|
|
var err error
|
2017-06-20 19:26:09 +03:00
|
|
|
tn, err = t.resolveHash(n, nil)
|
2015-11-25 19:28:21 +02:00
|
|
|
if err != nil {
|
2017-02-22 15:10:07 +03:00
|
|
|
log.Error(fmt.Sprintf("Unhandled trie error: %v", err))
|
2017-10-24 16:19:09 +03:00
|
|
|
return err
|
2015-11-25 19:28:21 +02:00
|
|
|
}
|
2015-09-09 04:35:41 +03:00
|
|
|
default:
|
|
|
|
panic(fmt.Sprintf("%T: invalid node: %v", tn, tn))
|
|
|
|
}
|
|
|
|
}
|
2020-02-04 15:02:38 +03:00
|
|
|
hasher := newHasher(false)
|
2018-11-16 12:50:48 +03:00
|
|
|
defer returnHasherToPool(hasher)
|
2018-11-16 17:35:39 +03:00
|
|
|
|
2015-09-09 04:35:41 +03:00
|
|
|
for i, n := range nodes {
|
2020-02-03 18:28:30 +03:00
|
|
|
if fromLevel > 0 {
|
|
|
|
fromLevel--
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
var hn node
|
|
|
|
n, hn = hasher.proofHash(n)
|
2017-10-24 16:19:09 +03:00
|
|
|
if hash, ok := hn.(hashNode); ok || i == 0 {
|
2015-09-09 04:35:41 +03:00
|
|
|
// If the node's database encoding is a hash (or is the
|
|
|
|
// root node), it becomes a proof element.
|
2020-02-03 18:28:30 +03:00
|
|
|
enc, _ := rlp.EncodeToBytes(n)
|
|
|
|
if !ok {
|
|
|
|
hash = hasher.hashData(enc)
|
2017-10-24 16:19:09 +03:00
|
|
|
}
|
2020-02-03 18:28:30 +03:00
|
|
|
proofDb.Put(hash, enc)
|
2015-09-09 04:35:41 +03:00
|
|
|
}
|
|
|
|
}
|
2017-10-24 16:19:09 +03:00
|
|
|
return nil
|
2015-09-09 04:35:41 +03:00
|
|
|
}
|
|
|
|
|
2018-02-05 19:40:32 +03:00
|
|
|
// Prove constructs a merkle proof for key. The result contains all encoded nodes
|
|
|
|
// on the path to the value at key. The value itself is also included in the last
|
|
|
|
// node and can be retrieved by verifying the proof.
|
|
|
|
//
|
|
|
|
// If the trie does not contain a value for key, the returned proof contains all
|
|
|
|
// nodes of the longest existing prefix of the key (at least the root node), ending
|
|
|
|
// with the node that proves the absence of the key.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
func (t *SecureTrie) Prove(key []byte, fromLevel uint, proofDb ethdb.KeyValueWriter) error {
|
2018-02-05 19:40:32 +03:00
|
|
|
return t.trie.Prove(key, fromLevel, proofDb)
|
|
|
|
}
|
|
|
|
|
|
|
|
// VerifyProof checks merkle proofs. The given proof must contain the value for
|
|
|
|
// key in a trie with the given root hash. VerifyProof returns an error if the
|
|
|
|
// proof contains invalid trie nodes or the wrong value.
|
2020-04-24 14:37:56 +03:00
|
|
|
func VerifyProof(rootHash common.Hash, key []byte, proofDb ethdb.KeyValueReader) (value []byte, err error) {
|
2017-04-18 14:25:07 +03:00
|
|
|
key = keybytesToHex(key)
|
2018-02-05 19:40:32 +03:00
|
|
|
wantHash := rootHash
|
2017-10-24 16:19:09 +03:00
|
|
|
for i := 0; ; i++ {
|
2018-02-05 19:40:32 +03:00
|
|
|
buf, _ := proofDb.Get(wantHash[:])
|
2017-10-24 16:19:09 +03:00
|
|
|
if buf == nil {
|
2020-04-24 14:37:56 +03:00
|
|
|
return nil, fmt.Errorf("proof node %d (hash %064x) missing", i, wantHash)
|
2015-09-09 04:35:41 +03:00
|
|
|
}
|
2019-03-14 16:25:12 +03:00
|
|
|
n, err := decodeNode(wantHash[:], buf)
|
2015-09-09 04:35:41 +03:00
|
|
|
if err != nil {
|
2020-04-24 14:37:56 +03:00
|
|
|
return nil, fmt.Errorf("bad proof node %d: %v", i, err)
|
2015-09-09 04:35:41 +03:00
|
|
|
}
|
2020-04-24 14:37:56 +03:00
|
|
|
keyrest, cld := get(n, key, true)
|
2015-09-09 04:35:41 +03:00
|
|
|
switch cld := cld.(type) {
|
|
|
|
case nil:
|
2017-10-24 16:19:09 +03:00
|
|
|
// The trie doesn't contain the key.
|
2020-04-24 14:37:56 +03:00
|
|
|
return nil, nil
|
2015-09-09 04:35:41 +03:00
|
|
|
case hashNode:
|
|
|
|
key = keyrest
|
2018-02-05 19:40:32 +03:00
|
|
|
copy(wantHash[:], cld)
|
2015-09-09 04:35:41 +03:00
|
|
|
case valueNode:
|
2020-04-24 14:37:56 +03:00
|
|
|
return cld, nil
|
2015-09-09 04:35:41 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-23 12:44:09 +03:00
|
|
|
// proofToPath converts a merkle proof to trie node path. The main purpose of
|
|
|
|
// this function is recovering a node path from the merkle proof stream. All
|
|
|
|
// necessary nodes will be resolved and leave the remaining as hashnode.
|
|
|
|
//
|
|
|
|
// The given edge proof is allowed to be an existent or non-existent proof.
|
2020-05-27 17:37:37 +03:00
|
|
|
func proofToPath(rootHash common.Hash, root node, key []byte, proofDb ethdb.KeyValueReader, allowNonExistent bool) (node, []byte, error) {
|
2020-04-24 14:37:56 +03:00
|
|
|
// resolveNode retrieves and resolves trie node from merkle proof stream
|
|
|
|
resolveNode := func(hash common.Hash) (node, error) {
|
|
|
|
buf, _ := proofDb.Get(hash[:])
|
|
|
|
if buf == nil {
|
|
|
|
return nil, fmt.Errorf("proof node (hash %064x) missing", hash)
|
|
|
|
}
|
|
|
|
n, err := decodeNode(hash[:], buf)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("bad proof node %v", err)
|
|
|
|
}
|
|
|
|
return n, err
|
|
|
|
}
|
2020-05-20 15:45:38 +03:00
|
|
|
// If the root node is empty, resolve it first.
|
|
|
|
// Root node must be included in the proof.
|
2020-04-24 14:37:56 +03:00
|
|
|
if root == nil {
|
|
|
|
n, err := resolveNode(rootHash)
|
|
|
|
if err != nil {
|
2020-05-27 17:37:37 +03:00
|
|
|
return nil, nil, err
|
2020-04-24 14:37:56 +03:00
|
|
|
}
|
|
|
|
root = n
|
|
|
|
}
|
|
|
|
var (
|
|
|
|
err error
|
|
|
|
child, parent node
|
|
|
|
keyrest []byte
|
2020-05-27 17:37:37 +03:00
|
|
|
valnode []byte
|
2020-04-24 14:37:56 +03:00
|
|
|
)
|
|
|
|
key, parent = keybytesToHex(key), root
|
|
|
|
for {
|
|
|
|
keyrest, child = get(parent, key, false)
|
|
|
|
switch cld := child.(type) {
|
|
|
|
case nil:
|
2020-05-20 15:45:38 +03:00
|
|
|
// The trie doesn't contain the key. It's possible
|
|
|
|
// the proof is a non-existing proof, but at least
|
|
|
|
// we can prove all resolved nodes are correct, it's
|
|
|
|
// enough for us to prove range.
|
|
|
|
if allowNonExistent {
|
2020-05-27 17:37:37 +03:00
|
|
|
return root, nil, nil
|
2020-05-20 15:45:38 +03:00
|
|
|
}
|
2020-05-27 17:37:37 +03:00
|
|
|
return nil, nil, errors.New("the node is not contained in trie")
|
2020-04-24 14:37:56 +03:00
|
|
|
case *shortNode:
|
|
|
|
key, parent = keyrest, child // Already resolved
|
|
|
|
continue
|
|
|
|
case *fullNode:
|
|
|
|
key, parent = keyrest, child // Already resolved
|
|
|
|
continue
|
|
|
|
case hashNode:
|
|
|
|
child, err = resolveNode(common.BytesToHash(cld))
|
|
|
|
if err != nil {
|
2020-05-27 17:37:37 +03:00
|
|
|
return nil, nil, err
|
2020-04-24 14:37:56 +03:00
|
|
|
}
|
|
|
|
case valueNode:
|
2020-05-27 17:37:37 +03:00
|
|
|
valnode = cld
|
2020-04-24 14:37:56 +03:00
|
|
|
}
|
|
|
|
// Link the parent and child.
|
|
|
|
switch pnode := parent.(type) {
|
|
|
|
case *shortNode:
|
|
|
|
pnode.Val = child
|
|
|
|
case *fullNode:
|
|
|
|
pnode.Children[key[0]] = child
|
|
|
|
default:
|
|
|
|
panic(fmt.Sprintf("%T: invalid node: %v", pnode, pnode))
|
|
|
|
}
|
2020-05-27 17:37:37 +03:00
|
|
|
if len(valnode) > 0 {
|
|
|
|
return root, valnode, nil // The whole path is resolved
|
2020-04-24 14:37:56 +03:00
|
|
|
}
|
|
|
|
key, parent = keyrest, child
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// unsetInternal removes all internal node references(hashnode, embedded node).
|
2020-09-23 12:44:09 +03:00
|
|
|
// It should be called after a trie is constructed with two edge paths. Also
|
|
|
|
// the given boundary keys must be the one used to construct the edge paths.
|
2020-04-24 14:37:56 +03:00
|
|
|
//
|
|
|
|
// It's the key step for range proof. All visited nodes should be marked dirty
|
|
|
|
// since the node content might be modified. Besides it can happen that some
|
|
|
|
// fullnodes only have one child which is disallowed. But if the proof is valid,
|
|
|
|
// the missing children will be filled, otherwise it will be thrown anyway.
|
2020-09-23 12:44:09 +03:00
|
|
|
//
|
|
|
|
// Note we have the assumption here the given boundary keys are different
|
|
|
|
// and right is larger than left.
|
2020-05-20 15:45:38 +03:00
|
|
|
func unsetInternal(n node, left []byte, right []byte) error {
|
2020-04-24 14:37:56 +03:00
|
|
|
left, right = keybytesToHex(left), keybytesToHex(right)
|
|
|
|
|
2020-05-26 13:11:29 +03:00
|
|
|
// Step down to the fork point. There are two scenarios can happen:
|
2020-09-23 12:44:09 +03:00
|
|
|
// - the fork point is a shortnode: either the key of left proof or
|
|
|
|
// right proof doesn't match with shortnode's key.
|
|
|
|
// - the fork point is a fullnode: both two edge proofs are allowed
|
|
|
|
// to point to a non-existent key.
|
2020-05-26 13:11:29 +03:00
|
|
|
var (
|
|
|
|
pos = 0
|
|
|
|
parent node
|
2020-09-23 12:44:09 +03:00
|
|
|
|
|
|
|
// fork indicator, 0 means no fork, -1 means proof is less, 1 means proof is greater
|
|
|
|
shortForkLeft, shortForkRight int
|
2020-05-26 13:11:29 +03:00
|
|
|
)
|
|
|
|
findFork:
|
2020-04-24 14:37:56 +03:00
|
|
|
for {
|
2020-05-20 15:45:38 +03:00
|
|
|
switch rn := (n).(type) {
|
2020-04-24 14:37:56 +03:00
|
|
|
case *shortNode:
|
2020-05-26 13:11:29 +03:00
|
|
|
rn.flags = nodeFlag{dirty: true}
|
2020-09-23 12:44:09 +03:00
|
|
|
|
|
|
|
// If either the key of left proof or right proof doesn't match with
|
|
|
|
// shortnode, stop here and the forkpoint is the shortnode.
|
|
|
|
if len(left)-pos < len(rn.Key) {
|
|
|
|
shortForkLeft = bytes.Compare(left[pos:], rn.Key)
|
|
|
|
} else {
|
|
|
|
shortForkLeft = bytes.Compare(left[pos:pos+len(rn.Key)], rn.Key)
|
|
|
|
}
|
|
|
|
if len(right)-pos < len(rn.Key) {
|
|
|
|
shortForkRight = bytes.Compare(right[pos:], rn.Key)
|
|
|
|
} else {
|
|
|
|
shortForkRight = bytes.Compare(right[pos:pos+len(rn.Key)], rn.Key)
|
|
|
|
}
|
|
|
|
if shortForkLeft != 0 || shortForkRight != 0 {
|
2020-05-26 13:11:29 +03:00
|
|
|
break findFork
|
2020-05-20 15:45:38 +03:00
|
|
|
}
|
|
|
|
parent = n
|
|
|
|
n, pos = rn.Val, pos+len(rn.Key)
|
2020-04-24 14:37:56 +03:00
|
|
|
case *fullNode:
|
2020-05-20 15:45:38 +03:00
|
|
|
rn.flags = nodeFlag{dirty: true}
|
2020-09-23 12:44:09 +03:00
|
|
|
|
|
|
|
// If either the node pointed by left proof or right proof is nil,
|
|
|
|
// stop here and the forkpoint is the fullnode.
|
|
|
|
leftnode, rightnode := rn.Children[left[pos]], rn.Children[right[pos]]
|
|
|
|
if leftnode == nil || rightnode == nil || leftnode != rightnode {
|
2020-05-26 13:11:29 +03:00
|
|
|
break findFork
|
|
|
|
}
|
2020-05-20 15:45:38 +03:00
|
|
|
parent = n
|
2020-05-26 13:11:29 +03:00
|
|
|
n, pos = rn.Children[left[pos]], pos+1
|
2020-04-24 14:37:56 +03:00
|
|
|
default:
|
2020-05-20 15:45:38 +03:00
|
|
|
panic(fmt.Sprintf("%T: invalid node: %v", n, n))
|
2020-04-24 14:37:56 +03:00
|
|
|
}
|
|
|
|
}
|
2020-05-26 13:11:29 +03:00
|
|
|
switch rn := n.(type) {
|
|
|
|
case *shortNode:
|
2020-09-23 12:44:09 +03:00
|
|
|
// There can have these five scenarios:
|
|
|
|
// - both proofs are less than the trie path => no valid range
|
|
|
|
// - both proofs are greater than the trie path => no valid range
|
|
|
|
// - left proof is less and right proof is greater => valid range, unset the shortnode entirely
|
|
|
|
// - left proof points to the shortnode, but right proof is greater
|
|
|
|
// - right proof points to the shortnode, but left proof is less
|
|
|
|
if shortForkLeft == -1 && shortForkRight == -1 {
|
|
|
|
return errors.New("empty range")
|
|
|
|
}
|
|
|
|
if shortForkLeft == 1 && shortForkRight == 1 {
|
|
|
|
return errors.New("empty range")
|
|
|
|
}
|
|
|
|
if shortForkLeft != 0 && shortForkRight != 0 {
|
|
|
|
parent.(*fullNode).Children[left[pos-1]] = nil
|
2020-05-26 13:11:29 +03:00
|
|
|
return nil
|
|
|
|
}
|
2020-09-23 12:44:09 +03:00
|
|
|
// Only one proof points to non-existent key.
|
|
|
|
if shortForkRight != 0 {
|
|
|
|
// Unset left proof's path
|
|
|
|
if _, ok := rn.Val.(valueNode); ok {
|
|
|
|
parent.(*fullNode).Children[left[pos-1]] = nil
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return unset(rn, rn.Val, left[pos:], len(rn.Key), false)
|
|
|
|
}
|
|
|
|
if shortForkLeft != 0 {
|
|
|
|
// Unset right proof's path.
|
|
|
|
if _, ok := rn.Val.(valueNode); ok {
|
|
|
|
parent.(*fullNode).Children[right[pos-1]] = nil
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return unset(rn, rn.Val, right[pos:], len(rn.Key), true)
|
|
|
|
}
|
|
|
|
return nil
|
2020-05-26 13:11:29 +03:00
|
|
|
case *fullNode:
|
2020-09-23 12:44:09 +03:00
|
|
|
// unset all internal nodes in the forkpoint
|
2020-05-26 13:11:29 +03:00
|
|
|
for i := left[pos] + 1; i < right[pos]; i++ {
|
|
|
|
rn.Children[i] = nil
|
|
|
|
}
|
|
|
|
if err := unset(rn, rn.Children[left[pos]], left[pos:], 1, false); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := unset(rn, rn.Children[right[pos]], right[pos:], 1, true); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
default:
|
|
|
|
panic(fmt.Sprintf("%T: invalid node: %v", n, n))
|
2020-05-20 15:45:38 +03:00
|
|
|
}
|
2020-04-24 14:37:56 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// unset removes all internal node references either the left most or right most.
|
2020-09-23 12:44:09 +03:00
|
|
|
// It can meet these scenarios:
|
2020-05-20 15:45:38 +03:00
|
|
|
//
|
2020-09-23 12:44:09 +03:00
|
|
|
// - The given path is existent in the trie, unset the associated nodes with the
|
|
|
|
// specific direction
|
2020-05-20 15:45:38 +03:00
|
|
|
// - The given path is non-existent in the trie
|
|
|
|
// - the fork point is a fullnode, the corresponding child pointed by path
|
|
|
|
// is nil, return
|
2020-09-23 12:44:09 +03:00
|
|
|
// - the fork point is a shortnode, the shortnode is included in the range,
|
2020-05-20 15:45:38 +03:00
|
|
|
// keep the entire branch and return.
|
2020-09-23 12:44:09 +03:00
|
|
|
// - the fork point is a shortnode, the shortnode is excluded in the range,
|
2020-05-20 15:45:38 +03:00
|
|
|
// unset the entire branch.
|
|
|
|
func unset(parent node, child node, key []byte, pos int, removeLeft bool) error {
|
|
|
|
switch cld := child.(type) {
|
2020-04-24 14:37:56 +03:00
|
|
|
case *fullNode:
|
|
|
|
if removeLeft {
|
2020-05-20 15:45:38 +03:00
|
|
|
for i := 0; i < int(key[pos]); i++ {
|
|
|
|
cld.Children[i] = nil
|
2020-04-24 14:37:56 +03:00
|
|
|
}
|
2020-05-20 15:45:38 +03:00
|
|
|
cld.flags = nodeFlag{dirty: true}
|
2020-04-24 14:37:56 +03:00
|
|
|
} else {
|
2020-05-20 15:45:38 +03:00
|
|
|
for i := key[pos] + 1; i < 16; i++ {
|
|
|
|
cld.Children[i] = nil
|
2020-04-24 14:37:56 +03:00
|
|
|
}
|
2020-05-20 15:45:38 +03:00
|
|
|
cld.flags = nodeFlag{dirty: true}
|
2020-04-24 14:37:56 +03:00
|
|
|
}
|
2020-05-20 15:45:38 +03:00
|
|
|
return unset(cld, cld.Children[key[pos]], key, pos+1, removeLeft)
|
2020-04-24 14:37:56 +03:00
|
|
|
case *shortNode:
|
2020-05-20 15:45:38 +03:00
|
|
|
if len(key[pos:]) < len(cld.Key) || !bytes.Equal(cld.Key, key[pos:pos+len(cld.Key)]) {
|
|
|
|
// Find the fork point, it's an non-existent branch.
|
|
|
|
if removeLeft {
|
2020-09-23 12:44:09 +03:00
|
|
|
if bytes.Compare(cld.Key, key[pos:]) < 0 {
|
|
|
|
// The key of fork shortnode is less than the path
|
|
|
|
// (it belongs to the range), unset the entrie
|
|
|
|
// branch. The parent must be a fullnode.
|
|
|
|
fn := parent.(*fullNode)
|
|
|
|
fn.Children[key[pos-1]] = nil
|
|
|
|
} else {
|
|
|
|
// The key of fork shortnode is greater than the
|
|
|
|
// path(it doesn't belong to the range), keep
|
|
|
|
// it with the cached hash available.
|
|
|
|
}
|
2020-05-20 15:45:38 +03:00
|
|
|
} else {
|
2020-09-23 12:44:09 +03:00
|
|
|
if bytes.Compare(cld.Key, key[pos:]) > 0 {
|
|
|
|
// The key of fork shortnode is greater than the
|
|
|
|
// path(it belongs to the range), unset the entrie
|
|
|
|
// branch. The parent must be a fullnode.
|
|
|
|
fn := parent.(*fullNode)
|
|
|
|
fn.Children[key[pos-1]] = nil
|
|
|
|
} else {
|
|
|
|
// The key of fork shortnode is less than the
|
|
|
|
// path(it doesn't belong to the range), keep
|
|
|
|
// it with the cached hash available.
|
|
|
|
}
|
2020-05-20 15:45:38 +03:00
|
|
|
}
|
2020-05-26 13:11:29 +03:00
|
|
|
return nil
|
2020-05-20 15:45:38 +03:00
|
|
|
}
|
|
|
|
if _, ok := cld.Val.(valueNode); ok {
|
|
|
|
fn := parent.(*fullNode)
|
|
|
|
fn.Children[key[pos-1]] = nil
|
|
|
|
return nil
|
2020-04-24 14:37:56 +03:00
|
|
|
}
|
2020-05-20 15:45:38 +03:00
|
|
|
cld.flags = nodeFlag{dirty: true}
|
|
|
|
return unset(cld, cld.Val, key, pos+len(cld.Key), removeLeft)
|
|
|
|
case nil:
|
2020-09-23 12:44:09 +03:00
|
|
|
// If the node is nil, then it's a child of the fork point
|
|
|
|
// fullnode(it's a non-existent branch).
|
2020-05-20 15:45:38 +03:00
|
|
|
return nil
|
|
|
|
default:
|
|
|
|
panic("it shouldn't happen") // hashNode, valueNode
|
2020-04-24 14:37:56 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-27 17:37:37 +03:00
|
|
|
// hasRightElement returns the indicator whether there exists more elements
|
|
|
|
// in the right side of the given path. The given path can point to an existent
|
|
|
|
// key or a non-existent one. This function has the assumption that the whole
|
|
|
|
// path should already be resolved.
|
|
|
|
func hasRightElement(node node, key []byte) bool {
|
|
|
|
pos, key := 0, keybytesToHex(key)
|
|
|
|
for node != nil {
|
|
|
|
switch rn := node.(type) {
|
|
|
|
case *fullNode:
|
|
|
|
for i := key[pos] + 1; i < 16; i++ {
|
|
|
|
if rn.Children[i] != nil {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
node, pos = rn.Children[key[pos]], pos+1
|
|
|
|
case *shortNode:
|
|
|
|
if len(key)-pos < len(rn.Key) || !bytes.Equal(rn.Key, key[pos:pos+len(rn.Key)]) {
|
|
|
|
return bytes.Compare(rn.Key, key[pos:]) > 0
|
|
|
|
}
|
|
|
|
node, pos = rn.Val, pos+len(rn.Key)
|
|
|
|
case valueNode:
|
|
|
|
return false // We have resolved the whole path
|
|
|
|
default:
|
|
|
|
panic(fmt.Sprintf("%T: invalid node: %v", node, node)) // hashnode
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2020-09-23 12:44:09 +03:00
|
|
|
// VerifyRangeProof checks whether the given leaf nodes and edge proof
|
|
|
|
// can prove the given trie leaves range is matched with the specific root.
|
2020-12-14 12:27:15 +03:00
|
|
|
// Besides, the range should be consecutive (no gap inside) and monotonic
|
2020-09-23 12:44:09 +03:00
|
|
|
// increasing.
|
2020-05-20 15:45:38 +03:00
|
|
|
//
|
2020-09-23 12:44:09 +03:00
|
|
|
// Note the given proof actually contains two edge proofs. Both of them can
|
|
|
|
// be non-existent proofs. For example the first proof is for a non-existent
|
|
|
|
// key 0x03, the last proof is for a non-existent key 0x10. The given batch
|
|
|
|
// leaves are [0x04, 0x05, .. 0x09]. It's still feasible to prove the given
|
|
|
|
// batch is valid.
|
2020-05-20 15:45:38 +03:00
|
|
|
//
|
|
|
|
// The firstKey is paired with firstProof, not necessarily the same as keys[0]
|
2020-09-23 12:44:09 +03:00
|
|
|
// (unless firstProof is an existent proof). Similarly, lastKey and lastProof
|
|
|
|
// are paired.
|
2020-05-20 15:45:38 +03:00
|
|
|
//
|
|
|
|
// Expect the normal case, this function can also be used to verify the following
|
2020-09-23 11:03:21 +03:00
|
|
|
// range proofs:
|
2020-05-20 15:45:38 +03:00
|
|
|
//
|
2020-09-23 12:44:09 +03:00
|
|
|
// - All elements proof. In this case the proof can be nil, but the range should
|
|
|
|
// be all the leaves in the trie.
|
2020-05-20 15:45:38 +03:00
|
|
|
//
|
2020-09-23 12:44:09 +03:00
|
|
|
// - One element proof. In this case no matter the edge proof is a non-existent
|
2020-05-20 15:45:38 +03:00
|
|
|
// proof or not, we can always verify the correctness of the proof.
|
2020-05-27 17:37:37 +03:00
|
|
|
//
|
2020-09-23 12:44:09 +03:00
|
|
|
// - Zero element proof. In this case a single non-existent proof is enough to prove.
|
|
|
|
// Besides, if there are still some other leaves available on the right side, then
|
2020-09-23 11:03:21 +03:00
|
|
|
// an error will be returned.
|
|
|
|
//
|
2020-05-27 17:37:37 +03:00
|
|
|
// Except returning the error to indicate the proof is valid or not, the function will
|
|
|
|
// also return a flag to indicate whether there exists more accounts/slots in the trie.
|
2020-12-14 12:27:15 +03:00
|
|
|
func VerifyRangeProof(rootHash common.Hash, firstKey []byte, lastKey []byte, keys [][]byte, values [][]byte, proof ethdb.KeyValueReader) (ethdb.KeyValueStore, *Trie, *KeyValueNotary, bool, error) {
|
2020-04-24 14:37:56 +03:00
|
|
|
if len(keys) != len(values) {
|
2020-12-14 12:27:15 +03:00
|
|
|
return nil, nil, nil, false, fmt.Errorf("inconsistent proof data, keys: %d, values: %d", len(keys), len(values))
|
2020-05-27 17:37:37 +03:00
|
|
|
}
|
|
|
|
// Ensure the received batch is monotonic increasing.
|
|
|
|
for i := 0; i < len(keys)-1; i++ {
|
|
|
|
if bytes.Compare(keys[i], keys[i+1]) >= 0 {
|
2020-12-14 12:27:15 +03:00
|
|
|
return nil, nil, nil, false, errors.New("range is not monotonically increasing")
|
2020-05-27 17:37:37 +03:00
|
|
|
}
|
2020-04-24 14:37:56 +03:00
|
|
|
}
|
2020-12-14 12:27:15 +03:00
|
|
|
// Create a key-value notary to track which items from the given proof the
|
|
|
|
// range prover actually needed to verify the data
|
|
|
|
notary := NewKeyValueNotary(proof)
|
|
|
|
|
2020-05-20 15:45:38 +03:00
|
|
|
// Special case, there is no edge proof at all. The given range is expected
|
|
|
|
// to be the whole leaf-set in the trie.
|
2020-09-23 12:44:09 +03:00
|
|
|
if proof == nil {
|
2020-12-14 12:27:15 +03:00
|
|
|
var (
|
|
|
|
diskdb = memorydb.New()
|
|
|
|
triedb = NewDatabase(diskdb)
|
|
|
|
)
|
|
|
|
tr, err := New(common.Hash{}, triedb)
|
2020-05-20 15:45:38 +03:00
|
|
|
if err != nil {
|
2020-12-14 12:27:15 +03:00
|
|
|
return nil, nil, nil, false, err
|
2020-05-20 15:45:38 +03:00
|
|
|
}
|
|
|
|
for index, key := range keys {
|
2020-12-14 12:27:15 +03:00
|
|
|
tr.TryUpdate(key, values[index])
|
|
|
|
}
|
|
|
|
if tr.Hash() != rootHash {
|
|
|
|
return nil, nil, nil, false, fmt.Errorf("invalid proof, want hash %x, got %x", rootHash, tr.Hash())
|
|
|
|
}
|
|
|
|
// Proof seems valid, serialize all the nodes into the database
|
|
|
|
if _, err := tr.Commit(nil); err != nil {
|
|
|
|
return nil, nil, nil, false, err
|
2020-05-20 15:45:38 +03:00
|
|
|
}
|
2020-12-14 12:27:15 +03:00
|
|
|
if err := triedb.Commit(rootHash, false, nil); err != nil {
|
|
|
|
return nil, nil, nil, false, err
|
2020-05-20 15:45:38 +03:00
|
|
|
}
|
2020-12-14 12:27:15 +03:00
|
|
|
return diskdb, tr, notary, false, nil // No more elements
|
2020-04-24 14:37:56 +03:00
|
|
|
}
|
2020-09-23 12:44:09 +03:00
|
|
|
// Special case, there is a provided edge proof but zero key/value
|
2020-09-23 11:03:21 +03:00
|
|
|
// pairs, ensure there are no more accounts / slots in the trie.
|
|
|
|
if len(keys) == 0 {
|
2020-12-14 12:27:15 +03:00
|
|
|
root, val, err := proofToPath(rootHash, nil, firstKey, notary, true)
|
2020-09-23 11:03:21 +03:00
|
|
|
if err != nil {
|
2020-12-14 12:27:15 +03:00
|
|
|
return nil, nil, nil, false, err
|
2020-09-23 11:03:21 +03:00
|
|
|
}
|
|
|
|
if val != nil || hasRightElement(root, firstKey) {
|
2020-12-14 12:27:15 +03:00
|
|
|
return nil, nil, nil, false, errors.New("more entries available")
|
2020-09-23 11:03:21 +03:00
|
|
|
}
|
2020-12-14 12:27:15 +03:00
|
|
|
// Since the entire proof is a single path, we can construct a trie and a
|
|
|
|
// node database directly out of the inputs, no need to generate them
|
|
|
|
diskdb := notary.Accessed()
|
|
|
|
tr := &Trie{
|
|
|
|
db: NewDatabase(diskdb),
|
|
|
|
root: root,
|
|
|
|
}
|
|
|
|
return diskdb, tr, notary, hasRightElement(root, firstKey), nil
|
2020-09-23 11:03:21 +03:00
|
|
|
}
|
2020-09-23 12:44:09 +03:00
|
|
|
// Special case, there is only one element and two edge keys are same.
|
|
|
|
// In this case, we can't construct two edge paths. So handle it here.
|
|
|
|
if len(keys) == 1 && bytes.Equal(firstKey, lastKey) {
|
2020-12-14 12:27:15 +03:00
|
|
|
root, val, err := proofToPath(rootHash, nil, firstKey, notary, false)
|
2020-04-24 14:37:56 +03:00
|
|
|
if err != nil {
|
2020-12-14 12:27:15 +03:00
|
|
|
return nil, nil, nil, false, err
|
2020-04-24 14:37:56 +03:00
|
|
|
}
|
2020-09-23 12:44:09 +03:00
|
|
|
if !bytes.Equal(firstKey, keys[0]) {
|
2020-12-14 12:27:15 +03:00
|
|
|
return nil, nil, nil, false, errors.New("correct proof but invalid key")
|
2020-09-23 12:44:09 +03:00
|
|
|
}
|
2020-05-27 17:37:37 +03:00
|
|
|
if !bytes.Equal(val, values[0]) {
|
2020-12-14 12:27:15 +03:00
|
|
|
return nil, nil, nil, false, errors.New("correct proof but invalid data")
|
|
|
|
}
|
|
|
|
// Since the entire proof is a single path, we can construct a trie and a
|
|
|
|
// node database directly out of the inputs, no need to generate them
|
|
|
|
diskdb := notary.Accessed()
|
|
|
|
tr := &Trie{
|
|
|
|
db: NewDatabase(diskdb),
|
|
|
|
root: root,
|
2020-04-24 14:37:56 +03:00
|
|
|
}
|
2020-12-14 12:27:15 +03:00
|
|
|
return diskdb, tr, notary, hasRightElement(root, firstKey), nil
|
2020-09-23 12:44:09 +03:00
|
|
|
}
|
|
|
|
// Ok, in all other cases, we require two edge paths available.
|
|
|
|
// First check the validity of edge keys.
|
|
|
|
if bytes.Compare(firstKey, lastKey) >= 0 {
|
2020-12-14 12:27:15 +03:00
|
|
|
return nil, nil, nil, false, errors.New("invalid edge keys")
|
2020-09-23 12:44:09 +03:00
|
|
|
}
|
|
|
|
// todo(rjl493456442) different length edge keys should be supported
|
|
|
|
if len(firstKey) != len(lastKey) {
|
2020-12-14 12:27:15 +03:00
|
|
|
return nil, nil, nil, false, errors.New("inconsistent edge keys")
|
2020-04-24 14:37:56 +03:00
|
|
|
}
|
|
|
|
// Convert the edge proofs to edge trie paths. Then we can
|
|
|
|
// have the same tree architecture with the original one.
|
2020-05-20 15:45:38 +03:00
|
|
|
// For the first edge proof, non-existent proof is allowed.
|
2020-12-14 12:27:15 +03:00
|
|
|
root, _, err := proofToPath(rootHash, nil, firstKey, notary, true)
|
2020-04-24 14:37:56 +03:00
|
|
|
if err != nil {
|
2020-12-14 12:27:15 +03:00
|
|
|
return nil, nil, nil, false, err
|
2020-04-24 14:37:56 +03:00
|
|
|
}
|
|
|
|
// Pass the root node here, the second path will be merged
|
2020-05-20 15:45:38 +03:00
|
|
|
// with the first one. For the last edge proof, non-existent
|
2020-09-23 12:44:09 +03:00
|
|
|
// proof is also allowed.
|
2020-12-14 12:27:15 +03:00
|
|
|
root, _, err = proofToPath(rootHash, root, lastKey, notary, true)
|
2020-04-24 14:37:56 +03:00
|
|
|
if err != nil {
|
2020-12-14 12:27:15 +03:00
|
|
|
return nil, nil, nil, false, err
|
2020-04-24 14:37:56 +03:00
|
|
|
}
|
|
|
|
// Remove all internal references. All the removed parts should
|
|
|
|
// be re-filled(or re-constructed) by the given leaves range.
|
2020-09-23 12:44:09 +03:00
|
|
|
if err := unsetInternal(root, firstKey, lastKey); err != nil {
|
2020-12-14 12:27:15 +03:00
|
|
|
return nil, nil, nil, false, err
|
2020-04-24 14:37:56 +03:00
|
|
|
}
|
2020-12-14 12:27:15 +03:00
|
|
|
// Rebuild the trie with the leaf stream, the shape of trie
|
2020-04-24 14:37:56 +03:00
|
|
|
// should be same with the original one.
|
2020-12-14 12:27:15 +03:00
|
|
|
var (
|
|
|
|
diskdb = memorydb.New()
|
|
|
|
triedb = NewDatabase(diskdb)
|
|
|
|
)
|
|
|
|
tr := &Trie{root: root, db: triedb}
|
2020-04-24 14:37:56 +03:00
|
|
|
for index, key := range keys {
|
2020-12-14 12:27:15 +03:00
|
|
|
tr.TryUpdate(key, values[index])
|
|
|
|
}
|
|
|
|
if tr.Hash() != rootHash {
|
|
|
|
return nil, nil, nil, false, fmt.Errorf("invalid proof, want hash %x, got %x", rootHash, tr.Hash())
|
|
|
|
}
|
|
|
|
// Proof seems valid, serialize all the nodes into the database
|
|
|
|
if _, err := tr.Commit(nil); err != nil {
|
|
|
|
return nil, nil, nil, false, err
|
2020-04-24 14:37:56 +03:00
|
|
|
}
|
2020-12-14 12:27:15 +03:00
|
|
|
if err := triedb.Commit(rootHash, false, nil); err != nil {
|
|
|
|
return nil, nil, nil, false, err
|
2020-04-24 14:37:56 +03:00
|
|
|
}
|
2020-12-14 12:27:15 +03:00
|
|
|
return diskdb, tr, notary, hasRightElement(root, keys[len(keys)-1]), nil
|
2020-04-24 14:37:56 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// get returns the child of the given node. Return nil if the
|
|
|
|
// node with specified key doesn't exist at all.
|
|
|
|
//
|
|
|
|
// There is an additional flag `skipResolved`. If it's set then
|
|
|
|
// all resolved nodes won't be returned.
|
|
|
|
func get(tn node, key []byte, skipResolved bool) ([]byte, node) {
|
2017-06-27 16:57:06 +03:00
|
|
|
for {
|
2015-09-09 04:35:41 +03:00
|
|
|
switch n := tn.(type) {
|
2016-10-14 19:04:33 +03:00
|
|
|
case *shortNode:
|
2015-09-09 04:35:41 +03:00
|
|
|
if len(key) < len(n.Key) || !bytes.Equal(n.Key, key[:len(n.Key)]) {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
tn = n.Val
|
|
|
|
key = key[len(n.Key):]
|
2020-04-24 14:37:56 +03:00
|
|
|
if !skipResolved {
|
|
|
|
return key, tn
|
|
|
|
}
|
2016-10-14 19:04:33 +03:00
|
|
|
case *fullNode:
|
2016-05-19 13:24:14 +03:00
|
|
|
tn = n.Children[key[0]]
|
2015-09-09 04:35:41 +03:00
|
|
|
key = key[1:]
|
2020-04-24 14:37:56 +03:00
|
|
|
if !skipResolved {
|
|
|
|
return key, tn
|
|
|
|
}
|
2015-09-09 04:35:41 +03:00
|
|
|
case hashNode:
|
|
|
|
return key, n
|
|
|
|
case nil:
|
|
|
|
return key, nil
|
2017-06-27 16:57:06 +03:00
|
|
|
case valueNode:
|
|
|
|
return nil, n
|
2015-09-09 04:35:41 +03:00
|
|
|
default:
|
|
|
|
panic(fmt.Sprintf("%T: invalid node: %v", tn, tn))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|