diff --git a/core/state/statedb.go b/core/state/statedb.go index a36d65fce7..e945ab5950 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -940,7 +940,7 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) { // The onleaf func is called _serially_, so we can reuse the same account // for unmarshalling every time. var account types.StateAccount - root, accountCommitted, err := s.trie.Commit(func(_ [][]byte, _ []byte, leaf []byte, parent common.Hash) error { + root, accountCommitted, err := s.trie.Commit(func(_ [][]byte, _ []byte, leaf []byte, parent common.Hash, _ []byte) error { if err := rlp.DecodeBytes(leaf, &account); err != nil { return nil } diff --git a/core/state/sync.go b/core/state/sync.go index cc7d01a218..00a4c67aa3 100644 --- a/core/state/sync.go +++ b/core/state/sync.go @@ -27,20 +27,20 @@ import ( ) // NewStateSync create a new state trie download scheduler. -func NewStateSync(root common.Hash, database ethdb.KeyValueReader, onLeaf func(paths [][]byte, leaf []byte) error) *trie.Sync { +func NewStateSync(root common.Hash, database ethdb.KeyValueReader, onLeaf func(keys [][]byte, leaf []byte) error) *trie.Sync { // Register the storage slot callback if the external callback is specified. - var onSlot func(paths [][]byte, hexpath []byte, leaf []byte, parent common.Hash) error + var onSlot func(keys [][]byte, path []byte, leaf []byte, parent common.Hash, parentPath []byte) error if onLeaf != nil { - onSlot = func(paths [][]byte, hexpath []byte, leaf []byte, parent common.Hash) error { - return onLeaf(paths, leaf) + onSlot = func(keys [][]byte, path []byte, leaf []byte, parent common.Hash, parentPath []byte) error { + return onLeaf(keys, leaf) } } // Register the account callback to connect the state trie and the storage // trie belongs to the contract. var syncer *trie.Sync - onAccount := func(paths [][]byte, hexpath []byte, leaf []byte, parent common.Hash) error { + onAccount := func(keys [][]byte, path []byte, leaf []byte, parent common.Hash, parentPath []byte) error { if onLeaf != nil { - if err := onLeaf(paths, leaf); err != nil { + if err := onLeaf(keys, leaf); err != nil { return err } } @@ -48,8 +48,8 @@ func NewStateSync(root common.Hash, database ethdb.KeyValueReader, onLeaf func(p if err := rlp.Decode(bytes.NewReader(leaf), &obj); err != nil { return err } - syncer.AddSubTrie(obj.Root, hexpath, parent, onSlot) - syncer.AddCodeEntry(common.BytesToHash(obj.CodeHash), hexpath, parent) + syncer.AddSubTrie(obj.Root, path, parent, parentPath, onSlot) + syncer.AddCodeEntry(common.BytesToHash(obj.CodeHash), path, parent, parentPath) return nil } syncer = trie.NewSync(root, database, onAccount) diff --git a/core/state/sync_test.go b/core/state/sync_test.go index 83c5aa2df7..95c79eaf36 100644 --- a/core/state/sync_test.go +++ b/core/state/sync_test.go @@ -134,8 +134,8 @@ func checkStateConsistency(db ethdb.Database, root common.Hash) error { func TestEmptyStateSync(t *testing.T) { empty := common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") sync := NewStateSync(empty, rawdb.NewMemoryDatabase(), nil) - if nodes, paths, codes := sync.Missing(1); len(nodes) != 0 || len(paths) != 0 || len(codes) != 0 { - t.Errorf(" content requested for empty state: %v, %v, %v", nodes, paths, codes) + if paths, nodes, codes := sync.Missing(1); len(paths) != 0 || len(nodes) != 0 || len(codes) != 0 { + t.Errorf("content requested for empty state: %v, %v, %v", nodes, paths, codes) } } @@ -160,6 +160,14 @@ func TestIterativeStateSyncBatchedByPath(t *testing.T) { testIterativeStateSync(t, 100, false, true) } +// stateElement represents the element in the state trie(bytecode or trie node). +type stateElement struct { + path string + hash common.Hash + code common.Hash + syncPath trie.SyncPath +} + func testIterativeStateSync(t *testing.T, count int, commit bool, bypath bool) { // Create a random state to copy srcDb, srcRoot, srcAccounts := makeTestState() @@ -172,54 +180,73 @@ func testIterativeStateSync(t *testing.T, count int, commit bool, bypath bool) { dstDb := rawdb.NewMemoryDatabase() sched := NewStateSync(srcRoot, dstDb, nil) - nodes, paths, codes := sched.Missing(count) var ( - hashQueue []common.Hash - pathQueue []trie.SyncPath + nodeElements []stateElement + codeElements []stateElement ) - if !bypath { - hashQueue = append(append(hashQueue[:0], nodes...), codes...) - } else { - hashQueue = append(hashQueue[:0], codes...) - pathQueue = append(pathQueue[:0], paths...) + paths, nodes, codes := sched.Missing(count) + for i := 0; i < len(paths); i++ { + nodeElements = append(nodeElements, stateElement{ + path: paths[i], + hash: nodes[i], + syncPath: trie.NewSyncPath([]byte(paths[i])), + }) } - for len(hashQueue)+len(pathQueue) > 0 { - results := make([]trie.SyncResult, len(hashQueue)+len(pathQueue)) - for i, hash := range hashQueue { - data, err := srcDb.TrieDB().Node(hash) + for i := 0; i < len(codes); i++ { + codeElements = append(codeElements, stateElement{ + code: codes[i], + }) + } + for len(nodeElements)+len(codeElements) > 0 { + var ( + nodeResults = make([]trie.NodeSyncResult, len(nodeElements)) + codeResults = make([]trie.CodeSyncResult, len(codeElements)) + ) + for i, element := range codeElements { + data, err := srcDb.ContractCode(common.Hash{}, element.code) if err != nil { - data, err = srcDb.ContractCode(common.Hash{}, hash) + t.Fatalf("failed to retrieve contract bytecode for hash %x", element.code) } - if err != nil { - t.Fatalf("failed to retrieve node data for hash %x", hash) - } - results[i] = trie.SyncResult{Hash: hash, Data: data} + codeResults[i] = trie.CodeSyncResult{Hash: element.code, Data: data} } - for i, path := range pathQueue { - if len(path) == 1 { - data, _, err := srcTrie.TryGetNode(path[0]) - if err != nil { - t.Fatalf("failed to retrieve node data for path %x: %v", path, err) + for i, node := range nodeElements { + if bypath { + if len(node.syncPath) == 1 { + data, _, err := srcTrie.TryGetNode(node.syncPath[0]) + if err != nil { + t.Fatalf("failed to retrieve node data for path %x: %v", node.syncPath[0], err) + } + nodeResults[i] = trie.NodeSyncResult{Path: node.path, Data: data} + } else { + var acc types.StateAccount + if err := rlp.DecodeBytes(srcTrie.Get(node.syncPath[0]), &acc); err != nil { + t.Fatalf("failed to decode account on path %x: %v", node.syncPath[0], err) + } + stTrie, err := trie.New(common.BytesToHash(node.syncPath[0]), acc.Root, srcDb.TrieDB()) + if err != nil { + t.Fatalf("failed to retriev storage trie for path %x: %v", node.syncPath[1], err) + } + data, _, err := stTrie.TryGetNode(node.syncPath[1]) + if err != nil { + t.Fatalf("failed to retrieve node data for path %x: %v", node.syncPath[1], err) + } + nodeResults[i] = trie.NodeSyncResult{Path: node.path, Data: data} } - results[len(hashQueue)+i] = trie.SyncResult{Hash: crypto.Keccak256Hash(data), Data: data} } else { - var acc types.StateAccount - if err := rlp.DecodeBytes(srcTrie.Get(path[0]), &acc); err != nil { - t.Fatalf("failed to decode account on path %x: %v", path, err) - } - stTrie, err := trie.New(common.BytesToHash(path[0]), acc.Root, srcDb.TrieDB()) + data, err := srcDb.TrieDB().Node(node.hash) if err != nil { - t.Fatalf("failed to retriev storage trie for path %x: %v", path, err) + t.Fatalf("failed to retrieve node data for key %v", []byte(node.path)) } - data, _, err := stTrie.TryGetNode(path[1]) - if err != nil { - t.Fatalf("failed to retrieve node data for path %x: %v", path, err) - } - results[len(hashQueue)+i] = trie.SyncResult{Hash: crypto.Keccak256Hash(data), Data: data} + nodeResults[i] = trie.NodeSyncResult{Path: node.path, Data: data} } } - for _, result := range results { - if err := sched.Process(result); err != nil { + for _, result := range codeResults { + if err := sched.ProcessCode(result); err != nil { + t.Errorf("failed to process result %v", err) + } + } + for _, result := range nodeResults { + if err := sched.ProcessNode(result); err != nil { t.Errorf("failed to process result %v", err) } } @@ -229,12 +256,20 @@ func testIterativeStateSync(t *testing.T, count int, commit bool, bypath bool) { } batch.Write() - nodes, paths, codes = sched.Missing(count) - if !bypath { - hashQueue = append(append(hashQueue[:0], nodes...), codes...) - } else { - hashQueue = append(hashQueue[:0], codes...) - pathQueue = append(pathQueue[:0], paths...) + paths, nodes, codes = sched.Missing(count) + nodeElements = nodeElements[:0] + for i := 0; i < len(paths); i++ { + nodeElements = append(nodeElements, stateElement{ + path: paths[i], + hash: nodes[i], + syncPath: trie.NewSyncPath([]byte(paths[i])), + }) + } + codeElements = codeElements[:0] + for i := 0; i < len(codes); i++ { + codeElements = append(codeElements, stateElement{ + code: codes[i], + }) } } // Cross check that the two states are in sync @@ -251,26 +286,58 @@ func TestIterativeDelayedStateSync(t *testing.T) { dstDb := rawdb.NewMemoryDatabase() sched := NewStateSync(srcRoot, dstDb, nil) - nodes, _, codes := sched.Missing(0) - queue := append(append([]common.Hash{}, nodes...), codes...) - - for len(queue) > 0 { + var ( + nodeElements []stateElement + codeElements []stateElement + ) + paths, nodes, codes := sched.Missing(0) + for i := 0; i < len(paths); i++ { + nodeElements = append(nodeElements, stateElement{ + path: paths[i], + hash: nodes[i], + syncPath: trie.NewSyncPath([]byte(paths[i])), + }) + } + for i := 0; i < len(codes); i++ { + codeElements = append(codeElements, stateElement{ + code: codes[i], + }) + } + for len(nodeElements)+len(codeElements) > 0 { // Sync only half of the scheduled nodes - results := make([]trie.SyncResult, len(queue)/2+1) - for i, hash := range queue[:len(results)] { - data, err := srcDb.TrieDB().Node(hash) - if err != nil { - data, err = srcDb.ContractCode(common.Hash{}, hash) + var nodeProcessd int + var codeProcessd int + if len(codeElements) > 0 { + codeResults := make([]trie.CodeSyncResult, len(codeElements)/2+1) + for i, element := range codeElements[:len(codeResults)] { + data, err := srcDb.ContractCode(common.Hash{}, element.code) + if err != nil { + t.Fatalf("failed to retrieve contract bytecode for %x", element.code) + } + codeResults[i] = trie.CodeSyncResult{Hash: element.code, Data: data} } - if err != nil { - t.Fatalf("failed to retrieve node data for %x", hash) + for _, result := range codeResults { + if err := sched.ProcessCode(result); err != nil { + t.Fatalf("failed to process result %v", err) + } } - results[i] = trie.SyncResult{Hash: hash, Data: data} + codeProcessd = len(codeResults) } - for _, result := range results { - if err := sched.Process(result); err != nil { - t.Fatalf("failed to process result %v", err) + if len(nodeElements) > 0 { + nodeResults := make([]trie.NodeSyncResult, len(nodeElements)/2+1) + for i, element := range nodeElements[:len(nodeResults)] { + data, err := srcDb.TrieDB().Node(element.hash) + if err != nil { + t.Fatalf("failed to retrieve contract bytecode for %x", element.code) + } + nodeResults[i] = trie.NodeSyncResult{Path: element.path, Data: data} } + for _, result := range nodeResults { + if err := sched.ProcessNode(result); err != nil { + t.Fatalf("failed to process result %v", err) + } + } + nodeProcessd = len(nodeResults) } batch := dstDb.NewBatch() if err := sched.Commit(batch); err != nil { @@ -278,8 +345,21 @@ func TestIterativeDelayedStateSync(t *testing.T) { } batch.Write() - nodes, _, codes = sched.Missing(0) - queue = append(append(queue[len(results):], nodes...), codes...) + paths, nodes, codes = sched.Missing(0) + nodeElements = nodeElements[nodeProcessd:] + for i := 0; i < len(paths); i++ { + nodeElements = append(nodeElements, stateElement{ + path: paths[i], + hash: nodes[i], + syncPath: trie.NewSyncPath([]byte(paths[i])), + }) + } + codeElements = codeElements[codeProcessd:] + for i := 0; i < len(codes); i++ { + codeElements = append(codeElements, stateElement{ + code: codes[i], + }) + } } // Cross check that the two states are in sync checkStateAccounts(t, dstDb, srcRoot, srcAccounts) @@ -299,40 +379,70 @@ func testIterativeRandomStateSync(t *testing.T, count int) { dstDb := rawdb.NewMemoryDatabase() sched := NewStateSync(srcRoot, dstDb, nil) - queue := make(map[common.Hash]struct{}) - nodes, _, codes := sched.Missing(count) - for _, hash := range append(nodes, codes...) { - queue[hash] = struct{}{} + nodeQueue := make(map[string]stateElement) + codeQueue := make(map[common.Hash]struct{}) + paths, nodes, codes := sched.Missing(count) + for i, path := range paths { + nodeQueue[path] = stateElement{ + path: path, + hash: nodes[i], + syncPath: trie.NewSyncPath([]byte(path)), + } } - for len(queue) > 0 { + for _, hash := range codes { + codeQueue[hash] = struct{}{} + } + for len(nodeQueue)+len(codeQueue) > 0 { // Fetch all the queued nodes in a random order - results := make([]trie.SyncResult, 0, len(queue)) - for hash := range queue { - data, err := srcDb.TrieDB().Node(hash) - if err != nil { - data, err = srcDb.ContractCode(common.Hash{}, hash) + if len(codeQueue) > 0 { + results := make([]trie.CodeSyncResult, 0, len(codeQueue)) + for hash := range codeQueue { + data, err := srcDb.ContractCode(common.Hash{}, hash) + if err != nil { + t.Fatalf("failed to retrieve node data for %x", hash) + } + results = append(results, trie.CodeSyncResult{Hash: hash, Data: data}) } - if err != nil { - t.Fatalf("failed to retrieve node data for %x", hash) + for _, result := range results { + if err := sched.ProcessCode(result); err != nil { + t.Fatalf("failed to process result %v", err) + } + } + } + if len(nodeQueue) > 0 { + results := make([]trie.NodeSyncResult, 0, len(nodeQueue)) + for path, element := range nodeQueue { + data, err := srcDb.TrieDB().Node(element.hash) + if err != nil { + t.Fatalf("failed to retrieve node data for %x %v %v", element.hash, []byte(element.path), element.path) + } + results = append(results, trie.NodeSyncResult{Path: path, Data: data}) + } + for _, result := range results { + if err := sched.ProcessNode(result); err != nil { + t.Fatalf("failed to process result %v", err) + } } - results = append(results, trie.SyncResult{Hash: hash, Data: data}) } // Feed the retrieved results back and queue new tasks - for _, result := range results { - if err := sched.Process(result); err != nil { - t.Fatalf("failed to process result %v", err) - } - } batch := dstDb.NewBatch() if err := sched.Commit(batch); err != nil { t.Fatalf("failed to commit data: %v", err) } batch.Write() - queue = make(map[common.Hash]struct{}) - nodes, _, codes = sched.Missing(count) - for _, hash := range append(nodes, codes...) { - queue[hash] = struct{}{} + nodeQueue = make(map[string]stateElement) + codeQueue = make(map[common.Hash]struct{}) + paths, nodes, codes := sched.Missing(count) + for i, path := range paths { + nodeQueue[path] = stateElement{ + path: path, + hash: nodes[i], + syncPath: trie.NewSyncPath([]byte(path)), + } + } + for _, hash := range codes { + codeQueue[hash] = struct{}{} } } // Cross check that the two states are in sync @@ -349,34 +459,62 @@ func TestIterativeRandomDelayedStateSync(t *testing.T) { dstDb := rawdb.NewMemoryDatabase() sched := NewStateSync(srcRoot, dstDb, nil) - queue := make(map[common.Hash]struct{}) - nodes, _, codes := sched.Missing(0) - for _, hash := range append(nodes, codes...) { - queue[hash] = struct{}{} + nodeQueue := make(map[string]stateElement) + codeQueue := make(map[common.Hash]struct{}) + paths, nodes, codes := sched.Missing(0) + for i, path := range paths { + nodeQueue[path] = stateElement{ + path: path, + hash: nodes[i], + syncPath: trie.NewSyncPath([]byte(path)), + } } - for len(queue) > 0 { + for _, hash := range codes { + codeQueue[hash] = struct{}{} + } + for len(nodeQueue)+len(codeQueue) > 0 { // Sync only half of the scheduled nodes, even those in random order - results := make([]trie.SyncResult, 0, len(queue)/2+1) - for hash := range queue { - delete(queue, hash) + if len(codeQueue) > 0 { + results := make([]trie.CodeSyncResult, 0, len(codeQueue)/2+1) + for hash := range codeQueue { + delete(codeQueue, hash) - data, err := srcDb.TrieDB().Node(hash) - if err != nil { - data, err = srcDb.ContractCode(common.Hash{}, hash) - } - if err != nil { - t.Fatalf("failed to retrieve node data for %x", hash) - } - results = append(results, trie.SyncResult{Hash: hash, Data: data}) + data, err := srcDb.ContractCode(common.Hash{}, hash) + if err != nil { + t.Fatalf("failed to retrieve node data for %x", hash) + } + results = append(results, trie.CodeSyncResult{Hash: hash, Data: data}) - if len(results) >= cap(results) { - break + if len(results) >= cap(results) { + break + } + } + for _, result := range results { + if err := sched.ProcessCode(result); err != nil { + t.Fatalf("failed to process result %v", err) + } } } - // Feed the retrieved results back and queue new tasks - for _, result := range results { - if err := sched.Process(result); err != nil { - t.Fatalf("failed to process result %v", err) + if len(nodeQueue) > 0 { + results := make([]trie.NodeSyncResult, 0, len(nodeQueue)/2+1) + for path, element := range nodeQueue { + delete(nodeQueue, path) + + data, err := srcDb.TrieDB().Node(element.hash) + if err != nil { + t.Fatalf("failed to retrieve node data for %x", element.hash) + } + results = append(results, trie.NodeSyncResult{Path: path, Data: data}) + + if len(results) >= cap(results) { + break + } + } + // Feed the retrieved results back and queue new tasks + for _, result := range results { + if err := sched.ProcessNode(result); err != nil { + t.Fatalf("failed to process result %v", err) + } } } batch := dstDb.NewBatch() @@ -384,12 +522,17 @@ func TestIterativeRandomDelayedStateSync(t *testing.T) { t.Fatalf("failed to commit data: %v", err) } batch.Write() - for _, result := range results { - delete(queue, result.Hash) + + paths, nodes, codes := sched.Missing(0) + for i, path := range paths { + nodeQueue[path] = stateElement{ + path: path, + hash: nodes[i], + syncPath: trie.NewSyncPath([]byte(path)), + } } - nodes, _, codes = sched.Missing(0) - for _, hash := range append(nodes, codes...) { - queue[hash] = struct{}{} + for _, hash := range codes { + codeQueue[hash] = struct{}{} } } // Cross check that the two states are in sync @@ -416,28 +559,62 @@ func TestIncompleteStateSync(t *testing.T) { dstDb := rawdb.NewMemoryDatabase() sched := NewStateSync(srcRoot, dstDb, nil) - var added []common.Hash - - nodes, _, codes := sched.Missing(1) - queue := append(append([]common.Hash{}, nodes...), codes...) - - for len(queue) > 0 { - // Fetch a batch of state nodes - results := make([]trie.SyncResult, len(queue)) - for i, hash := range queue { - data, err := srcDb.TrieDB().Node(hash) - if err != nil { - data, err = srcDb.ContractCode(common.Hash{}, hash) - } - if err != nil { - t.Fatalf("failed to retrieve node data for %x", hash) - } - results[i] = trie.SyncResult{Hash: hash, Data: data} + var ( + addedCodes []common.Hash + addedNodes []common.Hash + ) + nodeQueue := make(map[string]stateElement) + codeQueue := make(map[common.Hash]struct{}) + paths, nodes, codes := sched.Missing(1) + for i, path := range paths { + nodeQueue[path] = stateElement{ + path: path, + hash: nodes[i], + syncPath: trie.NewSyncPath([]byte(path)), } - // Process each of the state nodes - for _, result := range results { - if err := sched.Process(result); err != nil { - t.Fatalf("failed to process result %v", err) + } + for _, hash := range codes { + codeQueue[hash] = struct{}{} + } + for len(nodeQueue)+len(codeQueue) > 0 { + // Fetch a batch of state nodes + if len(codeQueue) > 0 { + results := make([]trie.CodeSyncResult, 0, len(codeQueue)) + for hash := range codeQueue { + data, err := srcDb.ContractCode(common.Hash{}, hash) + if err != nil { + t.Fatalf("failed to retrieve node data for %x", hash) + } + results = append(results, trie.CodeSyncResult{Hash: hash, Data: data}) + addedCodes = append(addedCodes, hash) + } + // Process each of the state nodes + for _, result := range results { + if err := sched.ProcessCode(result); err != nil { + t.Fatalf("failed to process result %v", err) + } + } + } + var nodehashes []common.Hash + if len(nodeQueue) > 0 { + results := make([]trie.NodeSyncResult, 0, len(nodeQueue)) + for key, element := range nodeQueue { + data, err := srcDb.TrieDB().Node(element.hash) + if err != nil { + t.Fatalf("failed to retrieve node data for %x", element.hash) + } + results = append(results, trie.NodeSyncResult{Path: key, Data: data}) + + if element.hash != srcRoot { + addedNodes = append(addedNodes, element.hash) + } + nodehashes = append(nodehashes, element.hash) + } + // Process each of the state nodes + for _, result := range results { + if err := sched.ProcessNode(result); err != nil { + t.Fatalf("failed to process result %v", err) + } } } batch := dstDb.NewBatch() @@ -445,43 +622,44 @@ func TestIncompleteStateSync(t *testing.T) { t.Fatalf("failed to commit data: %v", err) } batch.Write() - for _, result := range results { - added = append(added, result.Hash) - // Check that all known sub-tries added so far are complete or missing entirely. - if _, ok := isCode[result.Hash]; ok { - continue - } + + for _, root := range nodehashes { // Can't use checkStateConsistency here because subtrie keys may have odd // length and crash in LeafKey. - if err := checkTrieConsistency(dstDb, result.Hash); err != nil { + if err := checkTrieConsistency(dstDb, root); err != nil { t.Fatalf("state inconsistent: %v", err) } } // Fetch the next batch to retrieve - nodes, _, codes = sched.Missing(1) - queue = append(append(queue[:0], nodes...), codes...) + nodeQueue = make(map[string]stateElement) + codeQueue = make(map[common.Hash]struct{}) + paths, nodes, codes := sched.Missing(1) + for i, path := range paths { + nodeQueue[path] = stateElement{ + path: path, + hash: nodes[i], + syncPath: trie.NewSyncPath([]byte(path)), + } + } + for _, hash := range codes { + codeQueue[hash] = struct{}{} + } } // Sanity check that removing any node from the database is detected - for _, node := range added[1:] { - var ( - key = node.Bytes() - _, code = isCode[node] - val []byte - ) - if code { - val = rawdb.ReadCode(dstDb, node) - rawdb.DeleteCode(dstDb, node) - } else { - val = rawdb.ReadTrieNode(dstDb, node) - rawdb.DeleteTrieNode(dstDb, node) + for _, node := range addedCodes { + val := rawdb.ReadCode(dstDb, node) + rawdb.DeleteCode(dstDb, node) + if err := checkStateConsistency(dstDb, srcRoot); err == nil { + t.Errorf("trie inconsistency not caught, missing: %x", node) } - if err := checkStateConsistency(dstDb, added[0]); err == nil { - t.Fatalf("trie inconsistency not caught, missing: %x", key) - } - if code { - rawdb.WriteCode(dstDb, node, val) - } else { - rawdb.WriteTrieNode(dstDb, node, val) + rawdb.WriteCode(dstDb, node, val) + } + for _, node := range addedNodes { + val := rawdb.ReadTrieNode(dstDb, node) + rawdb.DeleteTrieNode(dstDb, node) + if err := checkStateConsistency(dstDb, srcRoot); err == nil { + t.Errorf("trie inconsistency not caught, missing: %v", node.Hex()) } + rawdb.WriteTrieNode(dstDb, node, val) } } diff --git a/eth/downloader/skeleton_test.go b/eth/downloader/skeleton_test.go index 836efabebc..7d0b78dca6 100644 --- a/eth/downloader/skeleton_test.go +++ b/eth/downloader/skeleton_test.go @@ -21,7 +21,6 @@ import ( "errors" "fmt" "math/big" - "os" "sync/atomic" "testing" "time" @@ -515,7 +514,7 @@ func TestSkeletonSyncExtend(t *testing.T) { // Tests that the skeleton sync correctly retrieves headers from one or more // peers without duplicates or other strange side effects. func TestSkeletonSyncRetrievals(t *testing.T) { - log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) + //log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) // Since skeleton headers don't need to be meaningful, beyond a parent hash // progression, create a long fake chain to test with. diff --git a/eth/protocols/snap/sort_test.go b/eth/protocols/snap/sort_test.go index 49730c886e..be0a8c5706 100644 --- a/eth/protocols/snap/sort_test.go +++ b/eth/protocols/snap/sort_test.go @@ -22,7 +22,6 @@ import ( "testing" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/trie" ) func hexToNibbles(s string) []byte { @@ -38,22 +37,17 @@ func hexToNibbles(s string) []byte { } func TestRequestSorting(t *testing.T) { - // - Path 0x9 -> {0x19} // - Path 0x99 -> {0x0099} // - Path 0x01234567890123456789012345678901012345678901234567890123456789019 -> {0x0123456789012345678901234567890101234567890123456789012345678901, 0x19} // - Path 0x012345678901234567890123456789010123456789012345678901234567890199 -> {0x0123456789012345678901234567890101234567890123456789012345678901, 0x0099} - var f = func(path string) (trie.SyncPath, TrieNodePathSet, common.Hash) { + var f = func(path string) string { data := hexToNibbles(path) - sp := trie.NewSyncPath(data) - tnps := TrieNodePathSet([][]byte(sp)) - hash := common.Hash{} - return sp, tnps, hash + return string(data) } var ( - hashes []common.Hash - paths []trie.SyncPath - pathsets []TrieNodePathSet + hashes []common.Hash + paths []string ) for _, x := range []string{ "0x9", @@ -67,15 +61,14 @@ func TestRequestSorting(t *testing.T) { "0x01234567890123456789012345678901012345678901234567890123456789010", "0x01234567890123456789012345678901012345678901234567890123456789011", } { - sp, _, hash := f(x) - hashes = append(hashes, hash) - paths = append(paths, sp) + paths = append(paths, f(x)) + hashes = append(hashes, common.Hash{}) } - _, paths, pathsets = sortByAccountPath(hashes, paths) + _, _, syncPaths, pathsets := sortByAccountPath(paths, hashes) { var b = new(bytes.Buffer) - for i := 0; i < len(paths); i++ { - fmt.Fprintf(b, "\n%d. paths %x", i, paths[i]) + for i := 0; i < len(syncPaths); i++ { + fmt.Fprintf(b, "\n%d. paths %x", i, syncPaths[i]) } want := ` 0. paths [0099] diff --git a/eth/protocols/snap/sync.go b/eth/protocols/snap/sync.go index d68e728ff2..b2462f5f89 100644 --- a/eth/protocols/snap/sync.go +++ b/eth/protocols/snap/sync.go @@ -230,8 +230,8 @@ type trienodeHealRequest struct { timeout *time.Timer // Timer to track delivery timeout stale chan struct{} // Channel to signal the request was dropped - hashes []common.Hash // Trie node hashes to validate responses - paths []trie.SyncPath // Trie node paths requested for rescheduling + paths []string // Trie node paths for identifying trie node + hashes []common.Hash // Trie node hashes to validate responses task *healTask // Task which this request is filling (only access fields through the runloop!!) } @@ -240,9 +240,9 @@ type trienodeHealRequest struct { type trienodeHealResponse struct { task *healTask // Task which this request is filling - hashes []common.Hash // Hashes of the trie nodes to avoid double hashing - paths []trie.SyncPath // Trie node paths requested for rescheduling missing ones - nodes [][]byte // Actual trie nodes to store into the database (nil = missing) + paths []string // Paths of the trie nodes + hashes []common.Hash // Hashes of the trie nodes to avoid double hashing + nodes [][]byte // Actual trie nodes to store into the database (nil = missing) } // bytecodeHealRequest tracks a pending bytecode request to ensure responses are to @@ -321,8 +321,8 @@ type storageTask struct { type healTask struct { scheduler *trie.Sync // State trie sync scheduler defining the tasks - trieTasks map[common.Hash]trie.SyncPath // Set of trie node tasks currently queued for retrieval - codeTasks map[common.Hash]struct{} // Set of byte code tasks currently queued for retrieval + trieTasks map[string]common.Hash // Set of trie node tasks currently queued for retrieval, indexed by node path + codeTasks map[common.Hash]struct{} // Set of byte code tasks currently queued for retrieval, indexed by code hash } // SyncProgress is a database entry to allow suspending and resuming a snapshot state @@ -540,7 +540,7 @@ func (s *Syncer) Unregister(id string) error { return nil } -// Sync starts (or resumes a previous) sync cycle to iterate over an state trie +// Sync starts (or resumes a previous) sync cycle to iterate over a state trie // with the given root and reconstruct the nodes based on the snapshot leaves. // Previously downloaded segments will not be redownloaded of fixed, rather any // errors will be healed after the leaves are fully accumulated. @@ -551,7 +551,7 @@ func (s *Syncer) Sync(root common.Hash, cancel chan struct{}) error { s.root = root s.healer = &healTask{ scheduler: state.NewStateSync(root, s.db, s.onHealState), - trieTasks: make(map[common.Hash]trie.SyncPath), + trieTasks: make(map[string]common.Hash), codeTasks: make(map[common.Hash]struct{}), } s.statelessPeers = make(map[string]struct{}) @@ -743,7 +743,7 @@ func (s *Syncer) loadSyncStatus() { return } } - // Either we've failed to decode the previus state, or there was none. + // Either we've failed to decode the previous state, or there was none. // Start a fresh sync by chunking up the account range and scheduling // them for retrieval. s.tasks = nil @@ -1280,9 +1280,9 @@ func (s *Syncer) assignTrienodeHealTasks(success chan *trienodeHealResponse, fai want = maxTrieRequestCount + maxCodeRequestCount ) if have < want { - nodes, paths, codes := s.healer.scheduler.Missing(want - have) - for i, hash := range nodes { - s.healer.trieTasks[hash] = paths[i] + paths, hashes, codes := s.healer.scheduler.Missing(want - have) + for i, path := range paths { + s.healer.trieTasks[path] = hashes[i] } for _, hash := range codes { s.healer.codeTasks[hash] = struct{}{} @@ -1323,21 +1323,20 @@ func (s *Syncer) assignTrienodeHealTasks(success chan *trienodeHealResponse, fai } var ( hashes = make([]common.Hash, 0, cap) - paths = make([]trie.SyncPath, 0, cap) + paths = make([]string, 0, cap) pathsets = make([]TrieNodePathSet, 0, cap) ) - for hash, pathset := range s.healer.trieTasks { - delete(s.healer.trieTasks, hash) + for path, hash := range s.healer.trieTasks { + delete(s.healer.trieTasks, path) + paths = append(paths, path) hashes = append(hashes, hash) - paths = append(paths, pathset) - - if len(hashes) >= cap { + if len(paths) >= cap { break } } // Group requests by account hash - hashes, paths, pathsets = sortByAccountPath(hashes, paths) + paths, hashes, _, pathsets = sortByAccountPath(paths, hashes) req := &trienodeHealRequest{ peer: idle, id: reqid, @@ -1346,8 +1345,8 @@ func (s *Syncer) assignTrienodeHealTasks(success chan *trienodeHealResponse, fai revert: fail, cancel: cancel, stale: make(chan struct{}), - hashes: hashes, paths: paths, + hashes: hashes, task: s.healer, } req.timeout = time.AfterFunc(s.rates.TargetTimeout(), func() { @@ -1405,9 +1404,9 @@ func (s *Syncer) assignBytecodeHealTasks(success chan *bytecodeHealResponse, fai want = maxTrieRequestCount + maxCodeRequestCount ) if have < want { - nodes, paths, codes := s.healer.scheduler.Missing(want - have) - for i, hash := range nodes { - s.healer.trieTasks[hash] = paths[i] + paths, hashes, codes := s.healer.scheduler.Missing(want - have) + for i, path := range paths { + s.healer.trieTasks[path] = hashes[i] } for _, hash := range codes { s.healer.codeTasks[hash] = struct{}{} @@ -1703,10 +1702,10 @@ func (s *Syncer) revertTrienodeHealRequest(req *trienodeHealRequest) { s.lock.Unlock() // If there's a timeout timer still running, abort it and mark the trie node - // retrievals as not-pending, ready for resheduling + // retrievals as not-pending, ready for rescheduling req.timeout.Stop() - for i, hash := range req.hashes { - req.task.trieTasks[hash] = req.paths[i] + for i, path := range req.paths { + req.task.trieTasks[path] = req.hashes[i] } } @@ -2096,14 +2095,14 @@ func (s *Syncer) processTrienodeHealResponse(res *trienodeHealResponse) { // If the trie node was not delivered, reschedule it if node == nil { - res.task.trieTasks[hash] = res.paths[i] + res.task.trieTasks[res.paths[i]] = res.hashes[i] continue } // Push the trie node into the state syncer s.trienodeHealSynced++ s.trienodeHealBytes += common.StorageSize(len(node)) - err := s.healer.scheduler.Process(trie.SyncResult{Hash: hash, Data: node}) + err := s.healer.scheduler.ProcessNode(trie.NodeSyncResult{Path: res.paths[i], Data: node}) switch err { case nil: case trie.ErrAlreadyProcessed: @@ -2139,7 +2138,7 @@ func (s *Syncer) processBytecodeHealResponse(res *bytecodeHealResponse) { s.bytecodeHealSynced++ s.bytecodeHealBytes += common.StorageSize(len(node)) - err := s.healer.scheduler.Process(trie.SyncResult{Hash: hash, Data: node}) + err := s.healer.scheduler.ProcessCode(trie.CodeSyncResult{Hash: hash, Data: node}) switch err { case nil: case trie.ErrAlreadyProcessed: @@ -2666,9 +2665,9 @@ func (s *Syncer) OnTrieNodes(peer SyncPeer, id uint64, trienodes [][]byte) error } // Response validated, send it to the scheduler for filling response := &trienodeHealResponse{ + paths: req.paths, task: req.task, hashes: req.hashes, - paths: req.paths, nodes: nodes, } select { @@ -2913,8 +2912,9 @@ func (s *capacitySort) Swap(i, j int) { // healRequestSort implements the Sort interface, allowing sorting trienode // heal requests, which is a prerequisite for merging storage-requests. type healRequestSort struct { - hashes []common.Hash - paths []trie.SyncPath + paths []string + hashes []common.Hash + syncPaths []trie.SyncPath } func (t *healRequestSort) Len() int { @@ -2922,8 +2922,8 @@ func (t *healRequestSort) Len() int { } func (t *healRequestSort) Less(i, j int) bool { - a := t.paths[i] - b := t.paths[j] + a := t.syncPaths[i] + b := t.syncPaths[j] switch bytes.Compare(a[0], b[0]) { case -1: return true @@ -2944,8 +2944,9 @@ func (t *healRequestSort) Less(i, j int) bool { } func (t *healRequestSort) Swap(i, j int) { - t.hashes[i], t.hashes[j] = t.hashes[j], t.hashes[i] t.paths[i], t.paths[j] = t.paths[j], t.paths[i] + t.hashes[i], t.hashes[j] = t.hashes[j], t.hashes[i] + t.syncPaths[i], t.syncPaths[j] = t.syncPaths[j], t.syncPaths[i] } // Merge merges the pathsets, so that several storage requests concerning the @@ -2953,7 +2954,7 @@ func (t *healRequestSort) Swap(i, j int) { // OBS: This operation is moot if t has not first been sorted. func (t *healRequestSort) Merge() []TrieNodePathSet { var result []TrieNodePathSet - for _, path := range t.paths { + for _, path := range t.syncPaths { pathset := TrieNodePathSet([][]byte(path)) if len(path) == 1 { // It's an account reference. @@ -2962,7 +2963,7 @@ func (t *healRequestSort) Merge() []TrieNodePathSet { // It's a storage reference. end := len(result) - 1 if len(result) == 0 || !bytes.Equal(pathset[0], result[end][0]) { - // The account doesn't doesn't match last, create a new entry. + // The account doesn't match last, create a new entry. result = append(result, pathset) } else { // It's the same account as the previous one, add to the storage @@ -2976,9 +2977,13 @@ func (t *healRequestSort) Merge() []TrieNodePathSet { // sortByAccountPath takes hashes and paths, and sorts them. After that, it generates // the TrieNodePaths and merges paths which belongs to the same account path. -func sortByAccountPath(hashes []common.Hash, paths []trie.SyncPath) ([]common.Hash, []trie.SyncPath, []TrieNodePathSet) { - n := &healRequestSort{hashes, paths} +func sortByAccountPath(paths []string, hashes []common.Hash) ([]string, []common.Hash, []trie.SyncPath, []TrieNodePathSet) { + var syncPaths []trie.SyncPath + for _, path := range paths { + syncPaths = append(syncPaths, trie.NewSyncPath([]byte(path))) + } + n := &healRequestSort{paths, hashes, syncPaths} sort.Sort(n) pathsets := n.Merge() - return n.hashes, n.paths, pathsets + return n.paths, n.hashes, n.syncPaths, pathsets } diff --git a/les/downloader/statesync.go b/les/downloader/statesync.go index fd24c5150b..22f952155f 100644 --- a/les/downloader/statesync.go +++ b/les/downloader/statesync.go @@ -34,7 +34,7 @@ import ( // a single data retrieval network packet. type stateReq struct { nItems uint16 // Number of items requested for download (max is 384, so uint16 is sufficient) - trieTasks map[common.Hash]*trieTask // Trie node download tasks to track previous attempts + trieTasks map[string]*trieTask // Trie node download tasks to track previous attempts codeTasks map[common.Hash]*codeTask // Byte code download tasks to track previous attempts timeout time.Duration // Maximum round trip time for this to complete timer *time.Timer // Timer to fire when the RTT timeout expires @@ -263,8 +263,8 @@ type stateSync struct { sched *trie.Sync // State trie sync scheduler defining the tasks keccak crypto.KeccakState // Keccak256 hasher to verify deliveries with - trieTasks map[common.Hash]*trieTask // Set of trie node tasks currently queued for retrieval - codeTasks map[common.Hash]*codeTask // Set of byte code tasks currently queued for retrieval + trieTasks map[string]*trieTask // Set of trie node tasks currently queued for retrieval, indexed by path + codeTasks map[common.Hash]*codeTask // Set of byte code tasks currently queued for retrieval, indexed by hash numUncommitted int bytesUncommitted int @@ -281,6 +281,7 @@ type stateSync struct { // trieTask represents a single trie node download task, containing a set of // peers already attempted retrieval from to detect stalled syncs and abort. type trieTask struct { + hash common.Hash path [][]byte attempts map[string]struct{} } @@ -299,7 +300,7 @@ func newStateSync(d *Downloader, root common.Hash) *stateSync { root: root, sched: state.NewStateSync(root, d.stateDB, nil), keccak: sha3.NewLegacyKeccak256().(crypto.KeccakState), - trieTasks: make(map[common.Hash]*trieTask), + trieTasks: make(map[string]*trieTask), codeTasks: make(map[common.Hash]*codeTask), deliver: make(chan *stateReq), cancel: make(chan struct{}), @@ -455,10 +456,11 @@ func (s *stateSync) assignTasks() { func (s *stateSync) fillTasks(n int, req *stateReq) (nodes []common.Hash, paths []trie.SyncPath, codes []common.Hash) { // Refill available tasks from the scheduler. if fill := n - (len(s.trieTasks) + len(s.codeTasks)); fill > 0 { - nodes, paths, codes := s.sched.Missing(fill) - for i, hash := range nodes { - s.trieTasks[hash] = &trieTask{ - path: paths[i], + paths, hashes, codes := s.sched.Missing(fill) + for i, path := range paths { + s.trieTasks[path] = &trieTask{ + hash: hashes[i], + path: trie.NewSyncPath([]byte(path)), attempts: make(map[string]struct{}), } } @@ -474,7 +476,7 @@ func (s *stateSync) fillTasks(n int, req *stateReq) (nodes []common.Hash, paths paths = make([]trie.SyncPath, 0, n) codes = make([]common.Hash, 0, n) - req.trieTasks = make(map[common.Hash]*trieTask, n) + req.trieTasks = make(map[string]*trieTask, n) req.codeTasks = make(map[common.Hash]*codeTask, n) for hash, t := range s.codeTasks { @@ -492,7 +494,7 @@ func (s *stateSync) fillTasks(n int, req *stateReq) (nodes []common.Hash, paths req.codeTasks[hash] = t delete(s.codeTasks, hash) } - for hash, t := range s.trieTasks { + for path, t := range s.trieTasks { // Stop when we've gathered enough requests if len(nodes)+len(codes) == n { break @@ -504,11 +506,11 @@ func (s *stateSync) fillTasks(n int, req *stateReq) (nodes []common.Hash, paths // Assign the request to this peer t.attempts[req.peer.id] = struct{}{} - nodes = append(nodes, hash) + nodes = append(nodes, t.hash) paths = append(paths, t.path) - req.trieTasks[hash] = t - delete(s.trieTasks, hash) + req.trieTasks[path] = t + delete(s.trieTasks, path) } req.nItems = uint16(len(nodes) + len(codes)) return nodes, paths, codes @@ -530,7 +532,7 @@ func (s *stateSync) process(req *stateReq) (int, error) { // Iterate over all the delivered data and inject one-by-one into the trie for _, blob := range req.response { - hash, err := s.processNodeData(blob) + hash, err := s.processNodeData(req.trieTasks, req.codeTasks, blob) switch err { case nil: s.numUncommitted++ @@ -543,13 +545,10 @@ func (s *stateSync) process(req *stateReq) (int, error) { default: return successful, fmt.Errorf("invalid state node %s: %v", hash.TerminalString(), err) } - // Delete from both queues (one delivery is enough for the syncer) - delete(req.trieTasks, hash) - delete(req.codeTasks, hash) } // Put unfulfilled tasks back into the retry queue npeers := s.d.peers.Len() - for hash, task := range req.trieTasks { + for path, task := range req.trieTasks { // If the node did deliver something, missing items may be due to a protocol // limit or a previous timeout + delayed delivery. Both cases should permit // the node to retry the missing items (to avoid single-peer stalls). @@ -559,10 +558,10 @@ func (s *stateSync) process(req *stateReq) (int, error) { // If we've requested the node too many times already, it may be a malicious // sync where nobody has the right data. Abort. if len(task.attempts) >= npeers { - return successful, fmt.Errorf("trie node %s failed with all peers (%d tries, %d peers)", hash.TerminalString(), len(task.attempts), npeers) + return successful, fmt.Errorf("trie node %s failed with all peers (%d tries, %d peers)", task.hash.TerminalString(), len(task.attempts), npeers) } // Missing item, place into the retry queue. - s.trieTasks[hash] = task + s.trieTasks[path] = task } for hash, task := range req.codeTasks { // If the node did deliver something, missing items may be due to a protocol @@ -585,13 +584,35 @@ func (s *stateSync) process(req *stateReq) (int, error) { // processNodeData tries to inject a trie node data blob delivered from a remote // peer into the state trie, returning whether anything useful was written or any // error occurred. -func (s *stateSync) processNodeData(blob []byte) (common.Hash, error) { - res := trie.SyncResult{Data: blob} +// +// If multiple requests correspond to the same hash, this method will inject the +// blob as a result for the first one only, leaving the remaining duplicates to +// be fetched again. +func (s *stateSync) processNodeData(nodeTasks map[string]*trieTask, codeTasks map[common.Hash]*codeTask, blob []byte) (common.Hash, error) { + var hash common.Hash s.keccak.Reset() s.keccak.Write(blob) - s.keccak.Read(res.Hash[:]) - err := s.sched.Process(res) - return res.Hash, err + s.keccak.Read(hash[:]) + + if _, present := codeTasks[hash]; present { + err := s.sched.ProcessCode(trie.CodeSyncResult{ + Hash: hash, + Data: blob, + }) + delete(codeTasks, hash) + return hash, err + } + for path, task := range nodeTasks { + if task.hash == hash { + err := s.sched.ProcessNode(trie.NodeSyncResult{ + Path: path, + Data: blob, + }) + delete(nodeTasks, path) + return hash, err + } + } + return common.Hash{}, trie.ErrNotRequested } // updateStats bumps the various state sync progress counters and displays a log diff --git a/trie/committer.go b/trie/committer.go index 9b7ecbf5fc..7a392abab7 100644 --- a/trie/committer.go +++ b/trie/committer.go @@ -33,6 +33,7 @@ type leaf struct { size int // size of the rlp data (estimate) hash common.Hash // hash of rlp data node node // the node to commit + path []byte // the path from the root node } // committer is a type used for the trie Commit operation. A committer has some @@ -69,7 +70,7 @@ func (c *committer) Commit(n node, db *Database) (hashNode, int, error) { if db == nil { return nil, 0, errors.New("no db provided") } - h, committed, err := c.commit(n, db) + h, committed, err := c.commit(nil, n, db) if err != nil { return nil, 0, err } @@ -77,7 +78,7 @@ func (c *committer) Commit(n node, db *Database) (hashNode, int, error) { } // commit collapses a node down into a hash node and inserts it into the database -func (c *committer) commit(n node, db *Database) (node, int, error) { +func (c *committer) commit(path []byte, n node, db *Database) (node, int, error) { // if this path is clean, use available cached data hash, dirty := n.cache() if hash != nil && !dirty { @@ -93,7 +94,7 @@ func (c *committer) commit(n node, db *Database) (node, int, error) { // otherwise it can only be hashNode or valueNode. var childCommitted int if _, ok := cn.Val.(*fullNode); ok { - childV, committed, err := c.commit(cn.Val, db) + childV, committed, err := c.commit(append(path, cn.Key...), cn.Val, db) if err != nil { return nil, 0, err } @@ -101,20 +102,20 @@ func (c *committer) commit(n node, db *Database) (node, int, error) { } // The key needs to be copied, since we're delivering it to database collapsed.Key = hexToCompact(cn.Key) - hashedNode := c.store(collapsed, db) + hashedNode := c.store(path, collapsed, db) if hn, ok := hashedNode.(hashNode); ok { return hn, childCommitted + 1, nil } return collapsed, childCommitted, nil case *fullNode: - hashedKids, childCommitted, err := c.commitChildren(cn, db) + hashedKids, childCommitted, err := c.commitChildren(path, cn, db) if err != nil { return nil, 0, err } collapsed := cn.copy() collapsed.Children = hashedKids - hashedNode := c.store(collapsed, db) + hashedNode := c.store(path, collapsed, db) if hn, ok := hashedNode.(hashNode); ok { return hn, childCommitted + 1, nil } @@ -128,7 +129,7 @@ func (c *committer) commit(n node, db *Database) (node, int, error) { } // commitChildren commits the children of the given fullnode -func (c *committer) commitChildren(n *fullNode, db *Database) ([17]node, int, error) { +func (c *committer) commitChildren(path []byte, n *fullNode, db *Database) ([17]node, int, error) { var ( committed int children [17]node @@ -148,7 +149,7 @@ func (c *committer) commitChildren(n *fullNode, db *Database) ([17]node, int, er // Commit the child recursively and store the "hashed" value. // Note the returned node can be some embedded nodes, so it's // possible the type is not hashNode. - hashed, childCommitted, err := c.commit(child, db) + hashed, childCommitted, err := c.commit(append(path, byte(i)), child, db) if err != nil { return children, 0, err } @@ -165,7 +166,7 @@ func (c *committer) commitChildren(n *fullNode, db *Database) ([17]node, int, er // store hashes the node n and if we have a storage layer specified, it writes // the key/value pair to it and tracks any node->child references as well as any // node->external trie references. -func (c *committer) store(n node, db *Database) node { +func (c *committer) store(path []byte, n node, db *Database) node { // Larger nodes are replaced by their hash and stored in the database. var ( hash, _ = n.cache() @@ -189,6 +190,7 @@ func (c *committer) store(n node, db *Database) node { size: size, hash: common.BytesToHash(hash), node: n, + path: path, } } else if db != nil { // No leaf-callback used, but there's still a database. Do serial @@ -213,13 +215,13 @@ func (c *committer) commitLoop(db *Database) { switch n := n.(type) { case *shortNode: if child, ok := n.Val.(valueNode); ok { - c.onleaf(nil, nil, child, hash) + c.onleaf(nil, nil, child, hash, nil) } case *fullNode: // For children in range [0, 15], it's impossible // to contain valueNode. Only check the 17th child. if n.Children[16] != nil { - c.onleaf(nil, nil, n.Children[16].(valueNode), hash) + c.onleaf(nil, nil, n.Children[16].(valueNode), hash, nil) } } } diff --git a/trie/sync.go b/trie/sync.go index db51dd4b03..7f4e67dbfe 100644 --- a/trie/sync.go +++ b/trie/sync.go @@ -24,6 +24,7 @@ import ( "github.com/ethereum/go-ethereum/common/prque" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/log" ) // ErrNotRequested is returned by the trie sync when it's requested to process a @@ -39,19 +40,6 @@ var ErrAlreadyProcessed = errors.New("already processed") // memory if the node was configured with a significant number of peers. const maxFetchesPerDepth = 16384 -// request represents a scheduled or already in-flight state retrieval request. -type request struct { - path []byte // Merkle path leading to this node for prioritization - hash common.Hash // Hash of the node data content to retrieve - data []byte // Data content of the node, cached until all subtrees complete - code bool // Whether this is a code entry - - parents []*request // Parent state nodes referencing this entry (notify all upon completion) - deps int // Number of dependencies before allowed to commit this node - - callback LeafCallback // Callback to invoke if a leaf node it reached on this branch -} - // SyncPath is a path tuple identifying a particular trie node either in a single // trie (account) or a layered trie (account -> storage). // @@ -85,30 +73,57 @@ func NewSyncPath(path []byte) SyncPath { return SyncPath{hexToKeybytes(path[:64]), hexToCompact(path[64:])} } -// SyncResult is a response with requested data along with it's hash. -type SyncResult struct { - Hash common.Hash // Hash of the originally unknown trie node - Data []byte // Data content of the retrieved node +// nodeRequest represents a scheduled or already in-flight trie node retrieval request. +type nodeRequest struct { + hash common.Hash // Hash of the trie node to retrieve + path []byte // Merkle path leading to this node for prioritization + data []byte // Data content of the node, cached until all subtrees complete + + parent *nodeRequest // Parent state node referencing this entry + deps int // Number of dependencies before allowed to commit this node + callback LeafCallback // Callback to invoke if a leaf node it reached on this branch +} + +// codeRequest represents a scheduled or already in-flight bytecode retrieval request. +type codeRequest struct { + hash common.Hash // Hash of the contract bytecode to retrieve + path []byte // Merkle path leading to this node for prioritization + data []byte // Data content of the node, cached until all subtrees complete + parents []*nodeRequest // Parent state nodes referencing this entry (notify all upon completion) +} + +// NodeSyncResult is a response with requested trie node along with its node path. +type NodeSyncResult struct { + Path string // Path of the originally unknown trie node + Data []byte // Data content of the retrieved trie node +} + +// CodeSyncResult is a response with requested bytecode along with its hash. +type CodeSyncResult struct { + Hash common.Hash // Hash the originally unknown bytecode + Data []byte // Data content of the retrieved bytecode } // syncMemBatch is an in-memory buffer of successfully downloaded but not yet // persisted data items. type syncMemBatch struct { - nodes map[common.Hash][]byte // In-memory membatch of recently completed nodes - codes map[common.Hash][]byte // In-memory membatch of recently completed codes + nodes map[string][]byte // In-memory membatch of recently completed nodes + hashes map[string]common.Hash // Hashes of recently completed nodes + codes map[common.Hash][]byte // In-memory membatch of recently completed codes } // newSyncMemBatch allocates a new memory-buffer for not-yet persisted trie nodes. func newSyncMemBatch() *syncMemBatch { return &syncMemBatch{ - nodes: make(map[common.Hash][]byte), - codes: make(map[common.Hash][]byte), + nodes: make(map[string][]byte), + hashes: make(map[string]common.Hash), + codes: make(map[common.Hash][]byte), } } -// hasNode reports the trie node with specific hash is already cached. -func (batch *syncMemBatch) hasNode(hash common.Hash) bool { - _, ok := batch.nodes[hash] +// hasNode reports the trie node with specific path is already cached. +func (batch *syncMemBatch) hasNode(path []byte) bool { + _, ok := batch.nodes[string(path)] return ok } @@ -122,12 +137,12 @@ func (batch *syncMemBatch) hasCode(hash common.Hash) bool { // unknown trie hashes to retrieve, accepts node data associated with said hashes // and reconstructs the trie step by step until all is done. type Sync struct { - database ethdb.KeyValueReader // Persistent database to check for existing entries - membatch *syncMemBatch // Memory buffer to avoid frequent database writes - nodeReqs map[common.Hash]*request // Pending requests pertaining to a trie node hash - codeReqs map[common.Hash]*request // Pending requests pertaining to a code hash - queue *prque.Prque // Priority queue with the pending requests - fetches map[int]int // Number of active fetches per trie node depth + database ethdb.KeyValueReader // Persistent database to check for existing entries + membatch *syncMemBatch // Memory buffer to avoid frequent database writes + nodeReqs map[string]*nodeRequest // Pending requests pertaining to a trie node path + codeReqs map[common.Hash]*codeRequest // Pending requests pertaining to a code hash + queue *prque.Prque // Priority queue with the pending requests + fetches map[int]int // Number of active fetches per trie node depth } // NewSync creates a new trie data download scheduler. @@ -135,51 +150,51 @@ func NewSync(root common.Hash, database ethdb.KeyValueReader, callback LeafCallb ts := &Sync{ database: database, membatch: newSyncMemBatch(), - nodeReqs: make(map[common.Hash]*request), - codeReqs: make(map[common.Hash]*request), + nodeReqs: make(map[string]*nodeRequest), + codeReqs: make(map[common.Hash]*codeRequest), queue: prque.New(nil), fetches: make(map[int]int), } - ts.AddSubTrie(root, nil, common.Hash{}, callback) + ts.AddSubTrie(root, nil, common.Hash{}, nil, callback) return ts } -// AddSubTrie registers a new trie to the sync code, rooted at the designated parent. -func (s *Sync) AddSubTrie(root common.Hash, path []byte, parent common.Hash, callback LeafCallback) { +// AddSubTrie registers a new trie to the sync code, rooted at the designated +// parent for completion tracking. The given path is a unique node path in +// hex format and contain all the parent path if it's layered trie node. +func (s *Sync) AddSubTrie(root common.Hash, path []byte, parent common.Hash, parentPath []byte, callback LeafCallback) { // Short circuit if the trie is empty or already known if root == emptyRoot { return } - if s.membatch.hasNode(root) { + if s.membatch.hasNode(path) { return } - // If database says this is a duplicate, then at least the trie node is - // present, and we hold the assumption that it's NOT legacy contract code. if rawdb.HasTrieNode(s.database, root) { return } // Assemble the new sub-trie sync request - req := &request{ - path: path, + req := &nodeRequest{ hash: root, + path: path, callback: callback, } // If this sub-trie has a designated parent, link them together if parent != (common.Hash{}) { - ancestor := s.nodeReqs[parent] + ancestor := s.nodeReqs[string(parentPath)] if ancestor == nil { panic(fmt.Sprintf("sub-trie ancestor not found: %x", parent)) } ancestor.deps++ - req.parents = append(req.parents, ancestor) + req.parent = ancestor } - s.schedule(req) + s.scheduleNodeRequest(req) } // AddCodeEntry schedules the direct retrieval of a contract code that should not // be interpreted as a trie node, but rather accepted and stored into the database // as is. -func (s *Sync) AddCodeEntry(hash common.Hash, path []byte, parent common.Hash) { +func (s *Sync) AddCodeEntry(hash common.Hash, path []byte, parent common.Hash, parentPath []byte) { // Short circuit if the entry is empty or already known if hash == emptyState { return @@ -196,30 +211,29 @@ func (s *Sync) AddCodeEntry(hash common.Hash, path []byte, parent common.Hash) { return } // Assemble the new sub-trie sync request - req := &request{ + req := &codeRequest{ path: path, hash: hash, - code: true, } // If this sub-trie has a designated parent, link them together if parent != (common.Hash{}) { - ancestor := s.nodeReqs[parent] // the parent of codereq can ONLY be nodereq + ancestor := s.nodeReqs[string(parentPath)] // the parent of codereq can ONLY be nodereq if ancestor == nil { panic(fmt.Sprintf("raw-entry ancestor not found: %x", parent)) } ancestor.deps++ req.parents = append(req.parents, ancestor) } - s.schedule(req) + s.scheduleCodeRequest(req) } // Missing retrieves the known missing nodes from the trie for retrieval. To aid // both eth/6x style fast sync and snap/1x style state sync, the paths of trie // nodes are returned too, as well as separate hash list for codes. -func (s *Sync) Missing(max int) (nodes []common.Hash, paths []SyncPath, codes []common.Hash) { +func (s *Sync) Missing(max int) ([]string, []common.Hash, []common.Hash) { var ( + nodePaths []string nodeHashes []common.Hash - nodePaths []SyncPath codeHashes []common.Hash ) for !s.queue.Empty() && (max == 0 || len(nodeHashes)+len(codeHashes) < max) { @@ -235,62 +249,77 @@ func (s *Sync) Missing(max int) (nodes []common.Hash, paths []SyncPath, codes [] s.queue.Pop() s.fetches[depth]++ - hash := item.(common.Hash) - if req, ok := s.nodeReqs[hash]; ok { - nodeHashes = append(nodeHashes, hash) - nodePaths = append(nodePaths, NewSyncPath(req.path)) - } else { - codeHashes = append(codeHashes, hash) + switch item.(type) { + case common.Hash: + codeHashes = append(codeHashes, item.(common.Hash)) + case string: + path := item.(string) + req, ok := s.nodeReqs[path] + if !ok { + log.Error("Missing node request", "path", path) + continue // System very wrong, shouldn't happen + } + nodePaths = append(nodePaths, path) + nodeHashes = append(nodeHashes, req.hash) } } - return nodeHashes, nodePaths, codeHashes + return nodePaths, nodeHashes, codeHashes } -// Process injects the received data for requested item. Note it can +// ProcessCode injects the received data for requested item. Note it can // happpen that the single response commits two pending requests(e.g. // there are two requests one for code and one for node but the hash // is same). In this case the second response for the same hash will // be treated as "non-requested" item or "already-processed" item but // there is no downside. -func (s *Sync) Process(result SyncResult) error { - // If the item was not requested either for code or node, bail out - if s.nodeReqs[result.Hash] == nil && s.codeReqs[result.Hash] == nil { +func (s *Sync) ProcessCode(result CodeSyncResult) error { + // If the code was not requested or it's already processed, bail out + req := s.codeReqs[result.Hash] + if req == nil { return ErrNotRequested } - // There is an pending code request for this data, commit directly - var filled bool - if req := s.codeReqs[result.Hash]; req != nil && req.data == nil { - filled = true - req.data = result.Data - s.commit(req) - } - // There is an pending node request for this data, fill it. - if req := s.nodeReqs[result.Hash]; req != nil && req.data == nil { - filled = true - // Decode the node data content and update the request - node, err := decodeNode(result.Hash[:], result.Data) - if err != nil { - return err - } - req.data = result.Data - - // Create and schedule a request for all the children nodes - requests, err := s.children(req, node) - if err != nil { - return err - } - if len(requests) == 0 && req.deps == 0 { - s.commit(req) - } else { - req.deps += len(requests) - for _, child := range requests { - s.schedule(child) - } - } - } - if !filled { + if req.data != nil { return ErrAlreadyProcessed } + req.data = result.Data + return s.commitCodeRequest(req) +} + +// ProcessNode injects the received data for requested item. Note it can +// happen that the single response commits two pending requests(e.g. +// there are two requests one for code and one for node but the hash +// is same). In this case the second response for the same hash will +// be treated as "non-requested" item or "already-processed" item but +// there is no downside. +func (s *Sync) ProcessNode(result NodeSyncResult) error { + // If the trie node was not requested or it's already processed, bail out + req := s.nodeReqs[result.Path] + if req == nil { + return ErrNotRequested + } + if req.data != nil { + return ErrAlreadyProcessed + } + // Decode the node data content and update the request + node, err := decodeNode(req.hash.Bytes(), result.Data) + if err != nil { + return err + } + req.data = result.Data + + // Create and schedule a request for all the children nodes + requests, err := s.children(req, node) + if err != nil { + return err + } + if len(requests) == 0 && req.deps == 0 { + s.commitNodeRequest(req) + } else { + req.deps += len(requests) + for _, child := range requests { + s.scheduleNodeRequest(child) + } + } return nil } @@ -298,11 +327,11 @@ func (s *Sync) Process(result SyncResult) error { // storage, returning any occurred error. func (s *Sync) Commit(dbw ethdb.Batch) error { // Dump the membatch into a database dbw - for key, value := range s.membatch.nodes { - rawdb.WriteTrieNode(dbw, key, value) + for path, value := range s.membatch.nodes { + rawdb.WriteTrieNode(dbw, s.membatch.hashes[path], value) } - for key, value := range s.membatch.codes { - rawdb.WriteCode(dbw, key, value) + for hash, value := range s.membatch.codes { + rawdb.WriteCode(dbw, hash, value) } // Drop the membatch data and return s.membatch = newSyncMemBatch() @@ -317,23 +346,31 @@ func (s *Sync) Pending() int { // schedule inserts a new state retrieval request into the fetch queue. If there // is already a pending request for this node, the new request will be discarded // and only a parent reference added to the old one. -func (s *Sync) schedule(req *request) { - var reqset = s.nodeReqs - if req.code { - reqset = s.codeReqs +func (s *Sync) scheduleNodeRequest(req *nodeRequest) { + s.nodeReqs[string(req.path)] = req + + // Schedule the request for future retrieval. This queue is shared + // by both node requests and code requests. + prio := int64(len(req.path)) << 56 // depth >= 128 will never happen, storage leaves will be included in their parents + for i := 0; i < 14 && i < len(req.path); i++ { + prio |= int64(15-req.path[i]) << (52 - i*4) // 15-nibble => lexicographic order } + s.queue.Push(string(req.path), prio) +} + +// schedule inserts a new state retrieval request into the fetch queue. If there +// is already a pending request for this node, the new request will be discarded +// and only a parent reference added to the old one. +func (s *Sync) scheduleCodeRequest(req *codeRequest) { // If we're already requesting this node, add a new reference and stop - if old, ok := reqset[req.hash]; ok { + if old, ok := s.codeReqs[req.hash]; ok { old.parents = append(old.parents, req.parents...) return } - reqset[req.hash] = req + s.codeReqs[req.hash] = req // Schedule the request for future retrieval. This queue is shared - // by both node requests and code requests. It can happen that there - // is a trie node and code has same hash. In this case two elements - // with same hash and same or different depth will be pushed. But it's - // ok the worst case is the second response will be treated as duplicated. + // by both node requests and code requests. prio := int64(len(req.path)) << 56 // depth >= 128 will never happen, storage leaves will be included in their parents for i := 0; i < 14 && i < len(req.path); i++ { prio |= int64(15-req.path[i]) << (52 - i*4) // 15-nibble => lexicographic order @@ -343,7 +380,7 @@ func (s *Sync) schedule(req *request) { // children retrieves all the missing children of a state trie entry for future // retrieval scheduling. -func (s *Sync) children(req *request, object node) ([]*request, error) { +func (s *Sync) children(req *nodeRequest, object node) ([]*nodeRequest, error) { // Gather all the children of the node, irrelevant whether known or not type child struct { path []byte @@ -374,7 +411,7 @@ func (s *Sync) children(req *request, object node) ([]*request, error) { panic(fmt.Sprintf("unknown node: %+v", node)) } // Iterate over the children, and request all unknown ones - requests := make([]*request, 0, len(children)) + requests := make([]*nodeRequest, 0, len(children)) for _, child := range children { // Notify any external watcher of a new key/value node if req.callback != nil { @@ -386,7 +423,7 @@ func (s *Sync) children(req *request, object node) ([]*request, error) { paths = append(paths, hexToKeybytes(child.path[:2*common.HashLength])) paths = append(paths, hexToKeybytes(child.path[2*common.HashLength:])) } - if err := req.callback(paths, child.path, node, req.hash); err != nil { + if err := req.callback(paths, child.path, node, req.hash, req.path); err != nil { return nil, err } } @@ -394,20 +431,20 @@ func (s *Sync) children(req *request, object node) ([]*request, error) { // If the child references another node, resolve or schedule if node, ok := (child.node).(hashNode); ok { // Try to resolve the node from the local database - hash := common.BytesToHash(node) - if s.membatch.hasNode(hash) { + if s.membatch.hasNode(child.path) { continue } // If database says duplicate, then at least the trie node is present // and we hold the assumption that it's NOT legacy contract code. - if rawdb.HasTrieNode(s.database, hash) { + chash := common.BytesToHash(node) + if rawdb.HasTrieNode(s.database, chash) { continue } // Locally unknown node, schedule for retrieval - requests = append(requests, &request{ + requests = append(requests, &nodeRequest{ path: child.path, - hash: hash, - parents: []*request{req}, + hash: chash, + parent: req, callback: req.callback, }) } @@ -418,22 +455,40 @@ func (s *Sync) children(req *request, object node) ([]*request, error) { // commit finalizes a retrieval request and stores it into the membatch. If any // of the referencing parent requests complete due to this commit, they are also // committed themselves. -func (s *Sync) commit(req *request) (err error) { +func (s *Sync) commitNodeRequest(req *nodeRequest) error { // Write the node content to the membatch - if req.code { - s.membatch.codes[req.hash] = req.data - delete(s.codeReqs, req.hash) - s.fetches[len(req.path)]-- - } else { - s.membatch.nodes[req.hash] = req.data - delete(s.nodeReqs, req.hash) - s.fetches[len(req.path)]-- - } - // Check all parents for completion - for _, parent := range req.parents { - parent.deps-- - if parent.deps == 0 { - if err := s.commit(parent); err != nil { + s.membatch.nodes[string(req.path)] = req.data + s.membatch.hashes[string(req.path)] = req.hash + + delete(s.nodeReqs, string(req.path)) + s.fetches[len(req.path)]-- + + // Check parent for completion + if req.parent != nil { + req.parent.deps-- + if req.parent.deps == 0 { + if err := s.commitNodeRequest(req.parent); err != nil { + return err + } + } + } + return nil +} + +// commit finalizes a retrieval request and stores it into the membatch. If any +// of the referencing parent requests complete due to this commit, they are also +// committed themselves. +func (s *Sync) commitCodeRequest(req *codeRequest) error { + // Write the node content to the membatch + s.membatch.codes[req.hash] = req.data + delete(s.codeReqs, req.hash) + s.fetches[len(req.path)]-- + + // Check all parents for completion + for _, parent := range req.parents { + parent.deps-- + if parent.deps == 0 { + if err := s.commitNodeRequest(parent); err != nil { return err } } diff --git a/trie/sync_test.go b/trie/sync_test.go index 4c2c50d7a1..472c31a63b 100644 --- a/trie/sync_test.go +++ b/trie/sync_test.go @@ -87,6 +87,13 @@ func checkTrieConsistency(db *Database, root common.Hash) error { return it.Error() } +// trieElement represents the element in the state trie(bytecode or trie node). +type trieElement struct { + path string + hash common.Hash + syncPath SyncPath +} + // Tests that an empty trie is not scheduled for syncing. func TestEmptySync(t *testing.T) { dbA := NewDatabase(memorydb.New()) @@ -96,8 +103,8 @@ func TestEmptySync(t *testing.T) { for i, trie := range []*Trie{emptyA, emptyB} { sync := NewSync(trie.Hash(), memorydb.New(), nil) - if nodes, paths, codes := sync.Missing(1); len(nodes) != 0 || len(paths) != 0 || len(codes) != 0 { - t.Errorf("test %d: content requested for empty trie: %v, %v, %v", i, nodes, paths, codes) + if paths, nodes, codes := sync.Missing(1); len(paths) != 0 || len(nodes) != 0 || len(codes) != 0 { + t.Errorf("test %d: content requested for empty trie: %v, %v, %v", i, paths, nodes, codes) } } } @@ -118,35 +125,38 @@ func testIterativeSync(t *testing.T, count int, bypath bool) { triedb := NewDatabase(diskdb) sched := NewSync(srcTrie.Hash(), diskdb, nil) - nodes, paths, codes := sched.Missing(count) - var ( - hashQueue []common.Hash - pathQueue []SyncPath - ) - if !bypath { - hashQueue = append(append(hashQueue[:0], nodes...), codes...) - } else { - hashQueue = append(hashQueue[:0], codes...) - pathQueue = append(pathQueue[:0], paths...) + // The code requests are ignored here since there is no code + // at the testing trie. + paths, nodes, _ := sched.Missing(count) + var elements []trieElement + for i := 0; i < len(paths); i++ { + elements = append(elements, trieElement{ + path: paths[i], + hash: nodes[i], + syncPath: NewSyncPath([]byte(paths[i])), + }) } - for len(hashQueue)+len(pathQueue) > 0 { - results := make([]SyncResult, len(hashQueue)+len(pathQueue)) - for i, hash := range hashQueue { - data, err := srcDb.Node(hash) - if err != nil { - t.Fatalf("failed to retrieve node data for hash %x: %v", hash, err) + for len(elements) > 0 { + results := make([]NodeSyncResult, len(elements)) + if !bypath { + for i, element := range elements { + data, err := srcDb.Node(element.hash) + if err != nil { + t.Fatalf("failed to retrieve node data for hash %x: %v", element.hash, err) + } + results[i] = NodeSyncResult{element.path, data} } - results[i] = SyncResult{hash, data} - } - for i, path := range pathQueue { - data, _, err := srcTrie.TryGetNode(path[0]) - if err != nil { - t.Fatalf("failed to retrieve node data for path %x: %v", path, err) + } else { + for i, element := range elements { + data, _, err := srcTrie.TryGetNode(element.syncPath[len(element.syncPath)-1]) + if err != nil { + t.Fatalf("failed to retrieve node data for path %x: %v", element.path, err) + } + results[i] = NodeSyncResult{element.path, data} } - results[len(hashQueue)+i] = SyncResult{crypto.Keccak256Hash(data), data} } for _, result := range results { - if err := sched.Process(result); err != nil { + if err := sched.ProcessNode(result); err != nil { t.Fatalf("failed to process result %v", err) } } @@ -156,12 +166,14 @@ func testIterativeSync(t *testing.T, count int, bypath bool) { } batch.Write() - nodes, paths, codes = sched.Missing(count) - if !bypath { - hashQueue = append(append(hashQueue[:0], nodes...), codes...) - } else { - hashQueue = append(hashQueue[:0], codes...) - pathQueue = append(pathQueue[:0], paths...) + paths, nodes, _ = sched.Missing(count) + elements = elements[:0] + for i := 0; i < len(paths); i++ { + elements = append(elements, trieElement{ + path: paths[i], + hash: nodes[i], + syncPath: NewSyncPath([]byte(paths[i])), + }) } } // Cross check that the two tries are in sync @@ -179,21 +191,29 @@ func TestIterativeDelayedSync(t *testing.T) { triedb := NewDatabase(diskdb) sched := NewSync(srcTrie.Hash(), diskdb, nil) - nodes, _, codes := sched.Missing(10000) - queue := append(append([]common.Hash{}, nodes...), codes...) - - for len(queue) > 0 { + // The code requests are ignored here since there is no code + // at the testing trie. + paths, nodes, _ := sched.Missing(10000) + var elements []trieElement + for i := 0; i < len(paths); i++ { + elements = append(elements, trieElement{ + path: paths[i], + hash: nodes[i], + syncPath: NewSyncPath([]byte(paths[i])), + }) + } + for len(elements) > 0 { // Sync only half of the scheduled nodes - results := make([]SyncResult, len(queue)/2+1) - for i, hash := range queue[:len(results)] { - data, err := srcDb.Node(hash) + results := make([]NodeSyncResult, len(elements)/2+1) + for i, element := range elements[:len(results)] { + data, err := srcDb.Node(element.hash) if err != nil { - t.Fatalf("failed to retrieve node data for %x: %v", hash, err) + t.Fatalf("failed to retrieve node data for %x: %v", element.hash, err) } - results[i] = SyncResult{hash, data} + results[i] = NodeSyncResult{element.path, data} } for _, result := range results { - if err := sched.Process(result); err != nil { + if err := sched.ProcessNode(result); err != nil { t.Fatalf("failed to process result %v", err) } } @@ -203,8 +223,15 @@ func TestIterativeDelayedSync(t *testing.T) { } batch.Write() - nodes, _, codes = sched.Missing(10000) - queue = append(append(queue[len(results):], nodes...), codes...) + paths, nodes, _ = sched.Missing(10000) + elements = elements[len(results):] + for i := 0; i < len(paths); i++ { + elements = append(elements, trieElement{ + path: paths[i], + hash: nodes[i], + syncPath: NewSyncPath([]byte(paths[i])), + }) + } } // Cross check that the two tries are in sync checkTrieContents(t, triedb, srcTrie.Hash().Bytes(), srcData) @@ -225,24 +252,30 @@ func testIterativeRandomSync(t *testing.T, count int) { triedb := NewDatabase(diskdb) sched := NewSync(srcTrie.Hash(), diskdb, nil) - queue := make(map[common.Hash]struct{}) - nodes, _, codes := sched.Missing(count) - for _, hash := range append(nodes, codes...) { - queue[hash] = struct{}{} + // The code requests are ignored here since there is no code + // at the testing trie. + paths, nodes, _ := sched.Missing(count) + queue := make(map[string]trieElement) + for i, path := range paths { + queue[path] = trieElement{ + path: paths[i], + hash: nodes[i], + syncPath: NewSyncPath([]byte(paths[i])), + } } for len(queue) > 0 { // Fetch all the queued nodes in a random order - results := make([]SyncResult, 0, len(queue)) - for hash := range queue { - data, err := srcDb.Node(hash) + results := make([]NodeSyncResult, 0, len(queue)) + for path, element := range queue { + data, err := srcDb.Node(element.hash) if err != nil { - t.Fatalf("failed to retrieve node data for %x: %v", hash, err) + t.Fatalf("failed to retrieve node data for %x: %v", element.hash, err) } - results = append(results, SyncResult{hash, data}) + results = append(results, NodeSyncResult{path, data}) } // Feed the retrieved results back and queue new tasks for _, result := range results { - if err := sched.Process(result); err != nil { + if err := sched.ProcessNode(result); err != nil { t.Fatalf("failed to process result %v", err) } } @@ -252,10 +285,14 @@ func testIterativeRandomSync(t *testing.T, count int) { } batch.Write() - queue = make(map[common.Hash]struct{}) - nodes, _, codes = sched.Missing(count) - for _, hash := range append(nodes, codes...) { - queue[hash] = struct{}{} + paths, nodes, _ = sched.Missing(count) + queue = make(map[string]trieElement) + for i, path := range paths { + queue[path] = trieElement{ + path: path, + hash: nodes[i], + syncPath: NewSyncPath([]byte(path)), + } } } // Cross check that the two tries are in sync @@ -273,20 +310,26 @@ func TestIterativeRandomDelayedSync(t *testing.T) { triedb := NewDatabase(diskdb) sched := NewSync(srcTrie.Hash(), diskdb, nil) - queue := make(map[common.Hash]struct{}) - nodes, _, codes := sched.Missing(10000) - for _, hash := range append(nodes, codes...) { - queue[hash] = struct{}{} + // The code requests are ignored here since there is no code + // at the testing trie. + paths, nodes, _ := sched.Missing(10000) + queue := make(map[string]trieElement) + for i, path := range paths { + queue[path] = trieElement{ + path: path, + hash: nodes[i], + syncPath: NewSyncPath([]byte(path)), + } } for len(queue) > 0 { // Sync only half of the scheduled nodes, even those in random order - results := make([]SyncResult, 0, len(queue)/2+1) - for hash := range queue { - data, err := srcDb.Node(hash) + results := make([]NodeSyncResult, 0, len(queue)/2+1) + for path, element := range queue { + data, err := srcDb.Node(element.hash) if err != nil { - t.Fatalf("failed to retrieve node data for %x: %v", hash, err) + t.Fatalf("failed to retrieve node data for %x: %v", element.hash, err) } - results = append(results, SyncResult{hash, data}) + results = append(results, NodeSyncResult{path, data}) if len(results) >= cap(results) { break @@ -294,7 +337,7 @@ func TestIterativeRandomDelayedSync(t *testing.T) { } // Feed the retrieved results back and queue new tasks for _, result := range results { - if err := sched.Process(result); err != nil { + if err := sched.ProcessNode(result); err != nil { t.Fatalf("failed to process result %v", err) } } @@ -304,11 +347,15 @@ func TestIterativeRandomDelayedSync(t *testing.T) { } batch.Write() for _, result := range results { - delete(queue, result.Hash) + delete(queue, result.Path) } - nodes, _, codes = sched.Missing(10000) - for _, hash := range append(nodes, codes...) { - queue[hash] = struct{}{} + paths, nodes, _ = sched.Missing(10000) + for i, path := range paths { + queue[path] = trieElement{ + path: path, + hash: nodes[i], + syncPath: NewSyncPath([]byte(path)), + } } } // Cross check that the two tries are in sync @@ -326,26 +373,35 @@ func TestDuplicateAvoidanceSync(t *testing.T) { triedb := NewDatabase(diskdb) sched := NewSync(srcTrie.Hash(), diskdb, nil) - nodes, _, codes := sched.Missing(0) - queue := append(append([]common.Hash{}, nodes...), codes...) + // The code requests are ignored here since there is no code + // at the testing trie. + paths, nodes, _ := sched.Missing(0) + var elements []trieElement + for i := 0; i < len(paths); i++ { + elements = append(elements, trieElement{ + path: paths[i], + hash: nodes[i], + syncPath: NewSyncPath([]byte(paths[i])), + }) + } requested := make(map[common.Hash]struct{}) - for len(queue) > 0 { - results := make([]SyncResult, len(queue)) - for i, hash := range queue { - data, err := srcDb.Node(hash) + for len(elements) > 0 { + results := make([]NodeSyncResult, len(elements)) + for i, element := range elements { + data, err := srcDb.Node(element.hash) if err != nil { - t.Fatalf("failed to retrieve node data for %x: %v", hash, err) + t.Fatalf("failed to retrieve node data for %x: %v", element.hash, err) } - if _, ok := requested[hash]; ok { - t.Errorf("hash %x already requested once", hash) + if _, ok := requested[element.hash]; ok { + t.Errorf("hash %x already requested once", element.hash) } - requested[hash] = struct{}{} + requested[element.hash] = struct{}{} - results[i] = SyncResult{hash, data} + results[i] = NodeSyncResult{element.path, data} } for _, result := range results { - if err := sched.Process(result); err != nil { + if err := sched.ProcessNode(result); err != nil { t.Fatalf("failed to process result %v", err) } } @@ -355,8 +411,15 @@ func TestDuplicateAvoidanceSync(t *testing.T) { } batch.Write() - nodes, _, codes = sched.Missing(0) - queue = append(append(queue[:0], nodes...), codes...) + paths, nodes, _ = sched.Missing(0) + elements = elements[:0] + for i := 0; i < len(paths); i++ { + elements = append(elements, trieElement{ + path: paths[i], + hash: nodes[i], + syncPath: NewSyncPath([]byte(paths[i])), + }) + } } // Cross check that the two tries are in sync checkTrieContents(t, triedb, srcTrie.Hash().Bytes(), srcData) @@ -373,23 +436,34 @@ func TestIncompleteSync(t *testing.T) { triedb := NewDatabase(diskdb) sched := NewSync(srcTrie.Hash(), diskdb, nil) - var added []common.Hash - - nodes, _, codes := sched.Missing(1) - queue := append(append([]common.Hash{}, nodes...), codes...) - for len(queue) > 0 { + // The code requests are ignored here since there is no code + // at the testing trie. + var ( + added []common.Hash + elements []trieElement + root = srcTrie.Hash() + ) + paths, nodes, _ := sched.Missing(1) + for i := 0; i < len(paths); i++ { + elements = append(elements, trieElement{ + path: paths[i], + hash: nodes[i], + syncPath: NewSyncPath([]byte(paths[i])), + }) + } + for len(elements) > 0 { // Fetch a batch of trie nodes - results := make([]SyncResult, len(queue)) - for i, hash := range queue { - data, err := srcDb.Node(hash) + results := make([]NodeSyncResult, len(elements)) + for i, element := range elements { + data, err := srcDb.Node(element.hash) if err != nil { - t.Fatalf("failed to retrieve node data for %x: %v", hash, err) + t.Fatalf("failed to retrieve node data for %x: %v", element.hash, err) } - results[i] = SyncResult{hash, data} + results[i] = NodeSyncResult{element.path, data} } // Process each of the trie nodes for _, result := range results { - if err := sched.Process(result); err != nil { + if err := sched.ProcessNode(result); err != nil { t.Fatalf("failed to process result %v", err) } } @@ -398,27 +472,36 @@ func TestIncompleteSync(t *testing.T) { t.Fatalf("failed to commit data: %v", err) } batch.Write() + for _, result := range results { - added = append(added, result.Hash) + hash := crypto.Keccak256Hash(result.Data) + if hash != root { + added = append(added, hash) + } // Check that all known sub-tries in the synced trie are complete - if err := checkTrieConsistency(triedb, result.Hash); err != nil { + if err := checkTrieConsistency(triedb, hash); err != nil { t.Fatalf("trie inconsistent: %v", err) } } // Fetch the next batch to retrieve - nodes, _, codes = sched.Missing(1) - queue = append(append(queue[:0], nodes...), codes...) + paths, nodes, _ = sched.Missing(1) + elements = elements[:0] + for i := 0; i < len(paths); i++ { + elements = append(elements, trieElement{ + path: paths[i], + hash: nodes[i], + syncPath: NewSyncPath([]byte(paths[i])), + }) + } } // Sanity check that removing any node from the database is detected - for _, node := range added[1:] { - key := node.Bytes() - value, _ := diskdb.Get(key) - - diskdb.Delete(key) - if err := checkTrieConsistency(triedb, added[0]); err == nil { - t.Fatalf("trie inconsistency not caught, missing: %x", key) + for _, hash := range added { + value, _ := diskdb.Get(hash.Bytes()) + diskdb.Delete(hash.Bytes()) + if err := checkTrieConsistency(triedb, root); err == nil { + t.Fatalf("trie inconsistency not caught, missing: %x", hash) } - diskdb.Put(key, value) + diskdb.Put(hash.Bytes(), value) } } @@ -433,21 +516,33 @@ func TestSyncOrdering(t *testing.T) { triedb := NewDatabase(diskdb) sched := NewSync(srcTrie.Hash(), diskdb, nil) - nodes, paths, _ := sched.Missing(1) - queue := append([]common.Hash{}, nodes...) - reqs := append([]SyncPath{}, paths...) + // The code requests are ignored here since there is no code + // at the testing trie. + var ( + reqs []SyncPath + elements []trieElement + ) + paths, nodes, _ := sched.Missing(1) + for i := 0; i < len(paths); i++ { + elements = append(elements, trieElement{ + path: paths[i], + hash: nodes[i], + syncPath: NewSyncPath([]byte(paths[i])), + }) + reqs = append(reqs, NewSyncPath([]byte(paths[i]))) + } - for len(queue) > 0 { - results := make([]SyncResult, len(queue)) - for i, hash := range queue { - data, err := srcDb.Node(hash) + for len(elements) > 0 { + results := make([]NodeSyncResult, len(elements)) + for i, element := range elements { + data, err := srcDb.Node(element.hash) if err != nil { - t.Fatalf("failed to retrieve node data for %x: %v", hash, err) + t.Fatalf("failed to retrieve node data for %x: %v", element.hash, err) } - results[i] = SyncResult{hash, data} + results[i] = NodeSyncResult{element.path, data} } for _, result := range results { - if err := sched.Process(result); err != nil { + if err := sched.ProcessNode(result); err != nil { t.Fatalf("failed to process result %v", err) } } @@ -457,9 +552,16 @@ func TestSyncOrdering(t *testing.T) { } batch.Write() - nodes, paths, _ = sched.Missing(1) - queue = append(queue[:0], nodes...) - reqs = append(reqs, paths...) + paths, nodes, _ = sched.Missing(1) + elements = elements[:0] + for i := 0; i < len(paths); i++ { + elements = append(elements, trieElement{ + path: paths[i], + hash: nodes[i], + syncPath: NewSyncPath([]byte(paths[i])), + }) + reqs = append(reqs, NewSyncPath([]byte(paths[i]))) + } } // Cross check that the two tries are in sync checkTrieContents(t, triedb, srcTrie.Hash().Bytes(), srcData) diff --git a/trie/trie.go b/trie/trie.go index 0c81cb2c39..1e168402ad 100644 --- a/trie/trie.go +++ b/trie/trie.go @@ -42,18 +42,18 @@ var ( // LeafCallback is a callback type invoked when a trie operation reaches a leaf // node. // -// The paths is a path tuple identifying a particular trie node either in a single -// trie (account) or a layered trie (account -> storage). Each path in the tuple +// The keys is a path tuple identifying a particular trie node either in a single +// trie (account) or a layered trie (account -> storage). Each key in the tuple // is in the raw format(32 bytes). // -// The hexpath is a composite hexary path identifying the trie node. All the key +// The path is a composite hexary path identifying the trie node. All the key // bytes are converted to the hexary nibbles and composited with the parent path // if the trie node is in a layered trie. // // It's used by state sync and commit to allow handling external references // between account and storage tries. And also it's used in the state healing // for extracting the raw states(leaf nodes) with corresponding paths. -type LeafCallback func(paths [][]byte, hexpath []byte, leaf []byte, parent common.Hash) error +type LeafCallback func(keys [][]byte, path []byte, leaf []byte, parent common.Hash, parentPath []byte) error // Trie is a Merkle Patricia Trie. // The zero value is an empty trie with no database. diff --git a/trie/trie_test.go b/trie/trie_test.go index 371bdf01c0..7baae88aea 100644 --- a/trie/trie_test.go +++ b/trie/trie_test.go @@ -625,7 +625,7 @@ func BenchmarkCommitAfterHash(b *testing.B) { benchmarkCommitAfterHash(b, nil) }) var a types.StateAccount - onleaf := func(paths [][]byte, hexpath []byte, leaf []byte, parent common.Hash) error { + onleaf := func(paths [][]byte, hexpath []byte, leaf []byte, parent common.Hash, parentPath []byte) error { rlp.DecodeBytes(leaf, &a) return nil }