2015-07-07 03:54:22 +03:00
|
|
|
// Copyright 2015 The go-ethereum Authors
|
2015-07-22 19:48:40 +03:00
|
|
|
// This file is part of the go-ethereum library.
|
2015-07-07 03:54:22 +03:00
|
|
|
//
|
2015-07-23 19:35:11 +03:00
|
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
2015-07-07 03:54:22 +03:00
|
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
2015-07-22 19:48:40 +03:00
|
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
2015-07-07 03:54:22 +03:00
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
2015-07-22 19:48:40 +03:00
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
2015-07-07 03:54:22 +03:00
|
|
|
// GNU Lesser General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Lesser General Public License
|
2015-07-22 19:48:40 +03:00
|
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
2015-07-07 03:54:22 +03:00
|
|
|
|
2015-01-27 15:33:26 +02:00
|
|
|
// Package discover implements the Node Discovery Protocol.
|
|
|
|
//
|
|
|
|
// The Node Discovery protocol provides a way to find RLPx nodes that
|
|
|
|
// can be connected to. It uses a Kademlia-like protocol to maintain a
|
|
|
|
// distributed database of the IDs and endpoints of all listening
|
|
|
|
// nodes.
|
|
|
|
package discover
|
|
|
|
|
|
|
|
import (
|
2015-04-27 01:50:18 +03:00
|
|
|
"crypto/rand"
|
2015-05-21 03:11:41 +03:00
|
|
|
"encoding/binary"
|
2016-05-02 18:57:07 +03:00
|
|
|
"errors"
|
2015-12-07 13:06:49 +02:00
|
|
|
"fmt"
|
2015-01-27 15:33:26 +02:00
|
|
|
"net"
|
|
|
|
"sort"
|
|
|
|
"sync"
|
|
|
|
"time"
|
2015-04-23 18:47:24 +03:00
|
|
|
|
2015-04-27 01:50:18 +03:00
|
|
|
"github.com/ethereum/go-ethereum/common"
|
2015-04-23 13:11:21 +03:00
|
|
|
"github.com/ethereum/go-ethereum/crypto"
|
2015-04-23 18:47:24 +03:00
|
|
|
"github.com/ethereum/go-ethereum/logger"
|
|
|
|
"github.com/ethereum/go-ethereum/logger/glog"
|
2015-01-27 15:33:26 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
2015-04-27 01:50:18 +03:00
|
|
|
alpha = 3 // Kademlia concurrency factor
|
|
|
|
bucketSize = 16 // Kademlia bucket size
|
|
|
|
hashBits = len(common.Hash{}) * 8
|
|
|
|
nBuckets = hashBits + 1 // Number of buckets
|
|
|
|
|
2015-05-14 02:49:39 +03:00
|
|
|
maxBondingPingPongs = 16
|
2015-05-25 15:04:40 +03:00
|
|
|
maxFindnodeFailures = 5
|
2015-09-30 06:01:49 +03:00
|
|
|
|
|
|
|
autoRefreshInterval = 1 * time.Hour
|
|
|
|
seedCount = 30
|
|
|
|
seedMaxAge = 5 * 24 * time.Hour
|
2015-01-27 15:33:26 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
type Table struct {
|
|
|
|
mutex sync.Mutex // protects buckets, their content, and nursery
|
|
|
|
buckets [nBuckets]*bucket // index of known nodes by distance
|
|
|
|
nursery []*Node // bootstrap nodes
|
2015-04-24 18:04:41 +03:00
|
|
|
db *nodeDB // database of known nodes
|
2015-01-27 15:33:26 +02:00
|
|
|
|
2015-12-07 13:06:49 +02:00
|
|
|
refreshReq chan chan struct{}
|
2015-09-30 06:01:49 +03:00
|
|
|
closeReq chan struct{}
|
|
|
|
closed chan struct{}
|
|
|
|
|
2015-03-25 17:45:53 +02:00
|
|
|
bondmu sync.Mutex
|
|
|
|
bonding map[NodeID]*bondproc
|
|
|
|
bondslots chan struct{} // limits total number of active bonding processes
|
|
|
|
|
2015-06-10 14:07:30 +03:00
|
|
|
nodeAddedHook func(*Node) // for testing
|
|
|
|
|
2015-01-27 15:33:26 +02:00
|
|
|
net transport
|
|
|
|
self *Node // metadata of the local node
|
2015-03-25 17:45:53 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
type bondproc struct {
|
|
|
|
err error
|
|
|
|
n *Node
|
|
|
|
done chan struct{}
|
2015-01-27 15:33:26 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// transport is implemented by the UDP transport.
|
|
|
|
// it is an interface so we can test without opening lots of UDP
|
|
|
|
// sockets and without generating a private key.
|
|
|
|
type transport interface {
|
2015-03-25 17:45:53 +02:00
|
|
|
ping(NodeID, *net.UDPAddr) error
|
|
|
|
waitping(NodeID) error
|
|
|
|
findnode(toid NodeID, addr *net.UDPAddr, target NodeID) ([]*Node, error)
|
2015-01-27 15:33:26 +02:00
|
|
|
close()
|
|
|
|
}
|
|
|
|
|
2015-08-07 01:10:26 +03:00
|
|
|
// bucket contains nodes, ordered by their last activity. the entry
|
|
|
|
// that was most recently active is the first element in entries.
|
2015-09-30 06:17:58 +03:00
|
|
|
type bucket struct{ entries []*Node }
|
2015-01-27 15:33:26 +02:00
|
|
|
|
2015-11-06 00:57:57 +03:00
|
|
|
func newTable(t transport, ourID NodeID, ourAddr *net.UDPAddr, nodeDBPath string) (*Table, error) {
|
2015-04-27 10:19:16 +03:00
|
|
|
// If no node database was given, use an in-memory one
|
2015-05-21 19:41:46 +03:00
|
|
|
db, err := newNodeDB(nodeDBPath, Version, ourID)
|
2015-04-24 18:04:41 +03:00
|
|
|
if err != nil {
|
2015-11-06 00:57:57 +03:00
|
|
|
return nil, err
|
2015-04-23 18:47:24 +03:00
|
|
|
}
|
2015-03-25 17:45:53 +02:00
|
|
|
tab := &Table{
|
2015-09-30 06:01:49 +03:00
|
|
|
net: t,
|
|
|
|
db: db,
|
2015-10-23 00:46:01 +03:00
|
|
|
self: NewNode(ourID, ourAddr.IP, uint16(ourAddr.Port), uint16(ourAddr.Port)),
|
2015-09-30 06:01:49 +03:00
|
|
|
bonding: make(map[NodeID]*bondproc),
|
|
|
|
bondslots: make(chan struct{}, maxBondingPingPongs),
|
2015-12-07 13:06:49 +02:00
|
|
|
refreshReq: make(chan chan struct{}),
|
2015-09-30 06:01:49 +03:00
|
|
|
closeReq: make(chan struct{}),
|
|
|
|
closed: make(chan struct{}),
|
2015-03-25 17:45:53 +02:00
|
|
|
}
|
|
|
|
for i := 0; i < cap(tab.bondslots); i++ {
|
|
|
|
tab.bondslots <- struct{}{}
|
|
|
|
}
|
2015-01-27 15:33:26 +02:00
|
|
|
for i := range tab.buckets {
|
2015-02-06 15:40:53 +02:00
|
|
|
tab.buckets[i] = new(bucket)
|
2015-01-27 15:33:26 +02:00
|
|
|
}
|
2015-09-30 06:01:49 +03:00
|
|
|
go tab.refreshLoop()
|
2015-11-06 00:57:57 +03:00
|
|
|
return tab, nil
|
2015-01-27 15:33:26 +02:00
|
|
|
}
|
|
|
|
|
2015-03-15 08:38:41 +02:00
|
|
|
// Self returns the local node.
|
2015-05-21 03:11:41 +03:00
|
|
|
// The returned node should not be modified by the caller.
|
2015-03-15 08:38:41 +02:00
|
|
|
func (tab *Table) Self() *Node {
|
|
|
|
return tab.self
|
2015-02-05 04:07:18 +02:00
|
|
|
}
|
|
|
|
|
2015-05-21 03:11:41 +03:00
|
|
|
// ReadRandomNodes fills the given slice with random nodes from the
|
|
|
|
// table. It will not write the same node more than once. The nodes in
|
|
|
|
// the slice are copies and can be modified by the caller.
|
|
|
|
func (tab *Table) ReadRandomNodes(buf []*Node) (n int) {
|
|
|
|
tab.mutex.Lock()
|
|
|
|
defer tab.mutex.Unlock()
|
|
|
|
// TODO: tree-based buckets would help here
|
|
|
|
// Find all non-empty buckets and get a fresh slice of their entries.
|
|
|
|
var buckets [][]*Node
|
|
|
|
for _, b := range tab.buckets {
|
|
|
|
if len(b.entries) > 0 {
|
|
|
|
buckets = append(buckets, b.entries[:])
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if len(buckets) == 0 {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
// Shuffle the buckets.
|
|
|
|
for i := uint32(len(buckets)) - 1; i > 0; i-- {
|
|
|
|
j := randUint(i)
|
|
|
|
buckets[i], buckets[j] = buckets[j], buckets[i]
|
|
|
|
}
|
|
|
|
// Move head of each bucket into buf, removing buckets that become empty.
|
|
|
|
var i, j int
|
|
|
|
for ; i < len(buf); i, j = i+1, (j+1)%len(buckets) {
|
|
|
|
b := buckets[j]
|
|
|
|
buf[i] = &(*b[0])
|
|
|
|
buckets[j] = b[1:]
|
|
|
|
if len(b) == 1 {
|
|
|
|
buckets = append(buckets[:j], buckets[j+1:]...)
|
|
|
|
}
|
|
|
|
if len(buckets) == 0 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return i + 1
|
|
|
|
}
|
|
|
|
|
|
|
|
func randUint(max uint32) uint32 {
|
|
|
|
if max == 0 {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
var b [4]byte
|
|
|
|
rand.Read(b[:])
|
|
|
|
return binary.BigEndian.Uint32(b[:]) % max
|
|
|
|
}
|
|
|
|
|
2015-04-27 10:19:16 +03:00
|
|
|
// Close terminates the network listener and flushes the node database.
|
2015-02-05 04:07:18 +02:00
|
|
|
func (tab *Table) Close() {
|
2015-09-30 06:01:49 +03:00
|
|
|
select {
|
|
|
|
case <-tab.closed:
|
|
|
|
// already closed.
|
|
|
|
case tab.closeReq <- struct{}{}:
|
|
|
|
<-tab.closed // wait for refreshLoop to end.
|
2015-08-06 13:27:59 +03:00
|
|
|
}
|
2015-02-05 04:07:18 +02:00
|
|
|
}
|
|
|
|
|
2015-12-07 13:06:49 +02:00
|
|
|
// SetFallbackNodes sets the initial points of contact. These nodes
|
|
|
|
// are used to connect to the network if the table is empty and there
|
|
|
|
// are no known nodes in the database.
|
|
|
|
func (tab *Table) SetFallbackNodes(nodes []*Node) error {
|
|
|
|
for _, n := range nodes {
|
|
|
|
if err := n.validateComplete(); err != nil {
|
|
|
|
return fmt.Errorf("bad bootstrap/fallback node %q (%v)", n, err)
|
|
|
|
}
|
|
|
|
}
|
2015-01-27 15:33:26 +02:00
|
|
|
tab.mutex.Lock()
|
|
|
|
tab.nursery = make([]*Node, 0, len(nodes))
|
|
|
|
for _, n := range nodes {
|
2015-02-07 01:38:36 +02:00
|
|
|
cpy := *n
|
2015-12-07 13:06:49 +02:00
|
|
|
// Recompute cpy.sha because the node might not have been
|
|
|
|
// created by NewNode or ParseNode.
|
2016-02-21 20:40:27 +02:00
|
|
|
cpy.sha = crypto.Keccak256Hash(n.ID[:])
|
2015-01-27 15:33:26 +02:00
|
|
|
tab.nursery = append(tab.nursery, &cpy)
|
|
|
|
}
|
|
|
|
tab.mutex.Unlock()
|
2015-12-07 13:06:49 +02:00
|
|
|
tab.refresh()
|
|
|
|
return nil
|
2015-01-27 15:33:26 +02:00
|
|
|
}
|
|
|
|
|
2015-10-23 00:46:01 +03:00
|
|
|
// Resolve searches for a specific node with the given ID.
|
|
|
|
// It returns nil if the node could not be found.
|
|
|
|
func (tab *Table) Resolve(targetID NodeID) *Node {
|
|
|
|
// If the node is present in the local table, no
|
|
|
|
// network interaction is required.
|
2016-02-21 20:40:27 +02:00
|
|
|
hash := crypto.Keccak256Hash(targetID[:])
|
2015-10-23 00:46:01 +03:00
|
|
|
tab.mutex.Lock()
|
|
|
|
cl := tab.closest(hash, 1)
|
|
|
|
tab.mutex.Unlock()
|
|
|
|
if len(cl.entries) > 0 && cl.entries[0].ID == targetID {
|
|
|
|
return cl.entries[0]
|
|
|
|
}
|
|
|
|
// Otherwise, do a network lookup.
|
|
|
|
result := tab.Lookup(targetID)
|
|
|
|
for _, n := range result {
|
|
|
|
if n.ID == targetID {
|
|
|
|
return n
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-01-27 15:33:26 +02:00
|
|
|
// Lookup performs a network search for nodes close
|
|
|
|
// to the given target. It approaches the target by querying
|
|
|
|
// nodes that are closer to it on each iteration.
|
2015-04-27 01:50:18 +03:00
|
|
|
// The given target does not need to be an actual node
|
|
|
|
// identifier.
|
|
|
|
func (tab *Table) Lookup(targetID NodeID) []*Node {
|
2015-12-07 13:06:49 +02:00
|
|
|
return tab.lookup(targetID, true)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (tab *Table) lookup(targetID NodeID, refreshIfEmpty bool) []*Node {
|
2015-01-27 15:33:26 +02:00
|
|
|
var (
|
2016-02-21 20:40:27 +02:00
|
|
|
target = crypto.Keccak256Hash(targetID[:])
|
2015-01-27 15:33:26 +02:00
|
|
|
asked = make(map[NodeID]bool)
|
|
|
|
seen = make(map[NodeID]bool)
|
|
|
|
reply = make(chan []*Node, alpha)
|
|
|
|
pendingQueries = 0
|
2015-12-07 13:06:49 +02:00
|
|
|
result *nodesByDistance
|
2015-01-27 15:33:26 +02:00
|
|
|
)
|
2015-04-27 01:50:18 +03:00
|
|
|
// don't query further if we hit ourself.
|
2015-01-27 15:33:26 +02:00
|
|
|
// unlikely to happen often in practice.
|
2015-02-09 12:02:32 +02:00
|
|
|
asked[tab.self.ID] = true
|
2015-01-27 15:33:26 +02:00
|
|
|
|
2015-12-07 13:06:49 +02:00
|
|
|
for {
|
|
|
|
tab.mutex.Lock()
|
|
|
|
// generate initial result set
|
|
|
|
result = tab.closest(target, bucketSize)
|
|
|
|
tab.mutex.Unlock()
|
|
|
|
if len(result.entries) > 0 || !refreshIfEmpty {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
// The result set is empty, all nodes were dropped, refresh.
|
|
|
|
// We actually wait for the refresh to complete here. The very
|
|
|
|
// first query will hit this case and run the bootstrapping
|
|
|
|
// logic.
|
|
|
|
<-tab.refresh()
|
|
|
|
refreshIfEmpty = false
|
2015-05-25 15:57:44 +03:00
|
|
|
}
|
|
|
|
|
2015-01-27 15:33:26 +02:00
|
|
|
for {
|
2015-02-12 12:59:52 +02:00
|
|
|
// ask the alpha closest nodes that we haven't asked yet
|
2015-01-27 15:33:26 +02:00
|
|
|
for i := 0; i < len(result.entries) && pendingQueries < alpha; i++ {
|
|
|
|
n := result.entries[i]
|
|
|
|
if !asked[n.ID] {
|
|
|
|
asked[n.ID] = true
|
|
|
|
pendingQueries++
|
|
|
|
go func() {
|
2015-05-25 15:04:40 +03:00
|
|
|
// Find potential neighbors to bond with
|
|
|
|
r, err := tab.net.findnode(n.ID, n.addr(), targetID)
|
|
|
|
if err != nil {
|
|
|
|
// Bump the failure counter to detect and evacuate non-bonded entries
|
|
|
|
fails := tab.db.findFails(n.ID) + 1
|
|
|
|
tab.db.updateFindFails(n.ID, fails)
|
|
|
|
glog.V(logger.Detail).Infof("Bumping failures for %x: %d", n.ID[:8], fails)
|
|
|
|
|
2015-05-25 15:57:44 +03:00
|
|
|
if fails >= maxFindnodeFailures {
|
2015-05-25 15:04:40 +03:00
|
|
|
glog.V(logger.Detail).Infof("Evacuating node %x: %d findnode failures", n.ID[:8], fails)
|
2015-08-07 01:10:26 +03:00
|
|
|
tab.delete(n)
|
2015-05-25 15:04:40 +03:00
|
|
|
}
|
|
|
|
}
|
2015-03-25 17:45:53 +02:00
|
|
|
reply <- tab.bondall(r)
|
2015-01-27 15:33:26 +02:00
|
|
|
}()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if pendingQueries == 0 {
|
|
|
|
// we have asked all closest nodes, stop the search
|
|
|
|
break
|
|
|
|
}
|
|
|
|
// wait for the next reply
|
|
|
|
for _, n := range <-reply {
|
2015-03-25 17:45:53 +02:00
|
|
|
if n != nil && !seen[n.ID] {
|
2015-01-27 15:33:26 +02:00
|
|
|
seen[n.ID] = true
|
2015-03-25 17:45:53 +02:00
|
|
|
result.push(n, bucketSize)
|
2015-01-27 15:33:26 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
pendingQueries--
|
|
|
|
}
|
|
|
|
return result.entries
|
|
|
|
}
|
|
|
|
|
2015-12-07 13:06:49 +02:00
|
|
|
func (tab *Table) refresh() <-chan struct{} {
|
|
|
|
done := make(chan struct{})
|
2015-09-30 06:01:49 +03:00
|
|
|
select {
|
2015-12-07 13:06:49 +02:00
|
|
|
case tab.refreshReq <- done:
|
2015-09-30 06:01:49 +03:00
|
|
|
case <-tab.closed:
|
2015-12-07 13:06:49 +02:00
|
|
|
close(done)
|
2015-09-30 06:01:49 +03:00
|
|
|
}
|
2015-12-07 13:06:49 +02:00
|
|
|
return done
|
2015-09-30 06:01:49 +03:00
|
|
|
}
|
2015-05-25 15:57:44 +03:00
|
|
|
|
2015-12-07 13:06:49 +02:00
|
|
|
// refreshLoop schedules doRefresh runs and coordinates shutdown.
|
2015-09-30 06:01:49 +03:00
|
|
|
func (tab *Table) refreshLoop() {
|
2015-12-07 13:06:49 +02:00
|
|
|
var (
|
|
|
|
timer = time.NewTicker(autoRefreshInterval)
|
|
|
|
waiting []chan struct{} // accumulates waiting callers while doRefresh runs
|
|
|
|
done chan struct{} // where doRefresh reports completion
|
|
|
|
)
|
|
|
|
loop:
|
2015-09-30 06:01:49 +03:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-timer.C:
|
|
|
|
if done == nil {
|
|
|
|
done = make(chan struct{})
|
|
|
|
go tab.doRefresh(done)
|
|
|
|
}
|
2015-12-07 13:06:49 +02:00
|
|
|
case req := <-tab.refreshReq:
|
|
|
|
waiting = append(waiting, req)
|
2015-09-30 06:01:49 +03:00
|
|
|
if done == nil {
|
|
|
|
done = make(chan struct{})
|
|
|
|
go tab.doRefresh(done)
|
|
|
|
}
|
|
|
|
case <-done:
|
2015-12-07 13:06:49 +02:00
|
|
|
for _, ch := range waiting {
|
|
|
|
close(ch)
|
|
|
|
}
|
|
|
|
waiting = nil
|
2015-09-30 06:01:49 +03:00
|
|
|
done = nil
|
|
|
|
case <-tab.closeReq:
|
2015-12-07 13:06:49 +02:00
|
|
|
break loop
|
2015-05-25 15:57:44 +03:00
|
|
|
}
|
|
|
|
}
|
2015-12-07 13:06:49 +02:00
|
|
|
|
|
|
|
if tab.net != nil {
|
|
|
|
tab.net.close()
|
|
|
|
}
|
|
|
|
if done != nil {
|
|
|
|
<-done
|
|
|
|
}
|
|
|
|
for _, ch := range waiting {
|
|
|
|
close(ch)
|
|
|
|
}
|
|
|
|
tab.db.close()
|
|
|
|
close(tab.closed)
|
2015-09-30 06:01:49 +03:00
|
|
|
}
|
2015-05-25 15:57:44 +03:00
|
|
|
|
2015-09-30 06:01:49 +03:00
|
|
|
// doRefresh performs a lookup for a random target to keep buckets
|
|
|
|
// full. seed nodes are inserted if the table is empty (initial
|
|
|
|
// bootstrap or discarded faulty peers).
|
|
|
|
func (tab *Table) doRefresh(done chan struct{}) {
|
|
|
|
defer close(done)
|
|
|
|
|
|
|
|
// The Kademlia paper specifies that the bucket refresh should
|
|
|
|
// perform a lookup in the least recently used bucket. We cannot
|
|
|
|
// adhere to this because the findnode target is a 512bit value
|
|
|
|
// (not hash-sized) and it is not easily possible to generate a
|
|
|
|
// sha3 preimage that falls into a chosen bucket.
|
|
|
|
// We perform a lookup with a random target instead.
|
|
|
|
var target NodeID
|
|
|
|
rand.Read(target[:])
|
2015-12-07 13:06:49 +02:00
|
|
|
result := tab.lookup(target, false)
|
2015-09-30 06:01:49 +03:00
|
|
|
if len(result) > 0 {
|
|
|
|
return
|
|
|
|
}
|
2015-05-25 16:23:16 +03:00
|
|
|
|
2015-09-30 06:01:49 +03:00
|
|
|
// The table is empty. Load nodes from the database and insert
|
|
|
|
// them. This should yield a few previously seen nodes that are
|
|
|
|
// (hopefully) still alive.
|
|
|
|
seeds := tab.db.querySeeds(seedCount, seedMaxAge)
|
|
|
|
seeds = tab.bondall(append(seeds, tab.nursery...))
|
|
|
|
if glog.V(logger.Debug) {
|
|
|
|
if len(seeds) == 0 {
|
|
|
|
glog.Infof("no seed nodes found")
|
|
|
|
}
|
|
|
|
for _, n := range seeds {
|
|
|
|
age := time.Since(tab.db.lastPong(n.ID))
|
|
|
|
glog.Infof("seed node (age %v): %v", age, n)
|
2015-05-25 16:23:16 +03:00
|
|
|
}
|
2015-01-27 15:33:26 +02:00
|
|
|
}
|
2015-09-30 06:01:49 +03:00
|
|
|
tab.mutex.Lock()
|
|
|
|
tab.stuff(seeds)
|
|
|
|
tab.mutex.Unlock()
|
|
|
|
|
|
|
|
// Finally, do a self lookup to fill up the buckets.
|
2015-12-07 13:06:49 +02:00
|
|
|
tab.lookup(tab.self.ID, false)
|
2015-01-27 15:33:26 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// closest returns the n nodes in the table that are closest to the
|
|
|
|
// given id. The caller must hold tab.mutex.
|
2015-04-27 01:50:18 +03:00
|
|
|
func (tab *Table) closest(target common.Hash, nresults int) *nodesByDistance {
|
2015-01-27 15:33:26 +02:00
|
|
|
// This is a very wasteful way to find the closest nodes but
|
|
|
|
// obviously correct. I believe that tree-based buckets would make
|
|
|
|
// this easier to implement efficiently.
|
|
|
|
close := &nodesByDistance{target: target}
|
|
|
|
for _, b := range tab.buckets {
|
|
|
|
for _, n := range b.entries {
|
|
|
|
close.push(n, nresults)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return close
|
|
|
|
}
|
|
|
|
|
|
|
|
func (tab *Table) len() (n int) {
|
|
|
|
for _, b := range tab.buckets {
|
|
|
|
n += len(b.entries)
|
|
|
|
}
|
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
2015-03-25 17:45:53 +02:00
|
|
|
// bondall bonds with all given nodes concurrently and returns
|
|
|
|
// those nodes for which bonding has probably succeeded.
|
|
|
|
func (tab *Table) bondall(nodes []*Node) (result []*Node) {
|
|
|
|
rc := make(chan *Node, len(nodes))
|
|
|
|
for i := range nodes {
|
|
|
|
go func(n *Node) {
|
2015-04-18 02:50:31 +03:00
|
|
|
nn, _ := tab.bond(false, n.ID, n.addr(), uint16(n.TCP))
|
2015-03-25 17:45:53 +02:00
|
|
|
rc <- nn
|
|
|
|
}(nodes[i])
|
|
|
|
}
|
2017-01-06 17:52:03 +03:00
|
|
|
for range nodes {
|
2015-03-25 17:45:53 +02:00
|
|
|
if n := <-rc; n != nil {
|
|
|
|
result = append(result, n)
|
2015-01-27 15:33:26 +02:00
|
|
|
}
|
|
|
|
}
|
2015-03-25 17:45:53 +02:00
|
|
|
return result
|
2015-01-27 15:33:26 +02:00
|
|
|
}
|
|
|
|
|
2015-03-25 17:45:53 +02:00
|
|
|
// bond ensures the local node has a bond with the given remote node.
|
|
|
|
// It also attempts to insert the node into the table if bonding succeeds.
|
|
|
|
// The caller must not hold tab.mutex.
|
|
|
|
//
|
|
|
|
// A bond is must be established before sending findnode requests.
|
|
|
|
// Both sides must have completed a ping/pong exchange for a bond to
|
|
|
|
// exist. The total number of active bonding processes is limited in
|
|
|
|
// order to restrain network use.
|
|
|
|
//
|
|
|
|
// bond is meant to operate idempotently in that bonding with a remote
|
|
|
|
// node which still remembers a previously established bond will work.
|
|
|
|
// The remote node will simply not send a ping back, causing waitping
|
|
|
|
// to time out.
|
|
|
|
//
|
|
|
|
// If pinged is true, the remote node has just pinged us and one half
|
|
|
|
// of the process can be skipped.
|
|
|
|
func (tab *Table) bond(pinged bool, id NodeID, addr *net.UDPAddr, tcpPort uint16) (*Node, error) {
|
2016-05-02 18:57:07 +03:00
|
|
|
if id == tab.self.ID {
|
|
|
|
return nil, errors.New("is self")
|
|
|
|
}
|
2015-05-25 15:04:40 +03:00
|
|
|
// Retrieve a previously known node and any recent findnode failures
|
|
|
|
node, fails := tab.db.node(id), 0
|
|
|
|
if node != nil {
|
|
|
|
fails = tab.db.findFails(id)
|
|
|
|
}
|
|
|
|
// If the node is unknown (non-bonded) or failed (remotely unknown), bond from scratch
|
2015-05-25 15:22:54 +03:00
|
|
|
var result error
|
2015-09-30 06:01:49 +03:00
|
|
|
age := time.Since(tab.db.lastPong(id))
|
|
|
|
if node == nil || fails > 0 || age > nodeDBNodeExpiration {
|
|
|
|
glog.V(logger.Detail).Infof("Bonding %x: known=%t, fails=%d age=%v", id[:8], node != nil, fails, age)
|
2015-05-25 15:04:40 +03:00
|
|
|
|
2015-03-25 17:45:53 +02:00
|
|
|
tab.bondmu.Lock()
|
|
|
|
w := tab.bonding[id]
|
|
|
|
if w != nil {
|
|
|
|
// Wait for an existing bonding process to complete.
|
|
|
|
tab.bondmu.Unlock()
|
|
|
|
<-w.done
|
|
|
|
} else {
|
|
|
|
// Register a new bonding process.
|
|
|
|
w = &bondproc{done: make(chan struct{})}
|
|
|
|
tab.bonding[id] = w
|
|
|
|
tab.bondmu.Unlock()
|
|
|
|
// Do the ping/pong. The result goes into w.
|
|
|
|
tab.pingpong(w, pinged, id, addr, tcpPort)
|
|
|
|
// Unregister the process after it's done.
|
|
|
|
tab.bondmu.Lock()
|
|
|
|
delete(tab.bonding, id)
|
|
|
|
tab.bondmu.Unlock()
|
2015-01-27 15:33:26 +02:00
|
|
|
}
|
2015-05-25 15:22:54 +03:00
|
|
|
// Retrieve the bonding results
|
|
|
|
result = w.err
|
|
|
|
if result == nil {
|
|
|
|
node = w.n
|
2015-01-27 15:33:26 +02:00
|
|
|
}
|
2015-03-25 17:45:53 +02:00
|
|
|
}
|
2015-05-25 15:22:54 +03:00
|
|
|
if node != nil {
|
2015-08-07 01:10:26 +03:00
|
|
|
// Add the node to the table even if the bonding ping/pong
|
|
|
|
// fails. It will be relaced quickly if it continues to be
|
|
|
|
// unresponsive.
|
|
|
|
tab.add(node)
|
2015-05-25 15:22:54 +03:00
|
|
|
tab.db.updateFindFails(id, 0)
|
2015-03-25 17:45:53 +02:00
|
|
|
}
|
2015-05-25 15:22:54 +03:00
|
|
|
return node, result
|
2015-03-25 17:45:53 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
func (tab *Table) pingpong(w *bondproc, pinged bool, id NodeID, addr *net.UDPAddr, tcpPort uint16) {
|
2015-04-24 18:04:41 +03:00
|
|
|
// Request a bonding slot to limit network usage
|
2015-03-25 17:45:53 +02:00
|
|
|
<-tab.bondslots
|
|
|
|
defer func() { tab.bondslots <- struct{}{} }()
|
2015-04-24 18:04:41 +03:00
|
|
|
|
2015-08-07 01:10:26 +03:00
|
|
|
// Ping the remote side and wait for a pong.
|
2015-04-27 14:56:42 +03:00
|
|
|
if w.err = tab.ping(id, addr); w.err != nil {
|
2015-03-25 17:45:53 +02:00
|
|
|
close(w.done)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if !pinged {
|
|
|
|
// Give the remote node a chance to ping us before we start
|
|
|
|
// sending findnode requests. If they still remember us,
|
|
|
|
// waitping will simply time out.
|
|
|
|
tab.net.waitping(id)
|
|
|
|
}
|
2015-08-07 01:10:26 +03:00
|
|
|
// Bonding succeeded, update the node database.
|
2015-10-23 00:46:01 +03:00
|
|
|
w.n = NewNode(id, addr.IP, uint16(addr.Port), tcpPort)
|
2015-04-24 18:04:41 +03:00
|
|
|
tab.db.updateNode(w.n)
|
2015-03-25 17:45:53 +02:00
|
|
|
close(w.done)
|
2015-01-27 15:33:26 +02:00
|
|
|
}
|
|
|
|
|
2015-08-07 01:10:26 +03:00
|
|
|
// ping a remote endpoint and wait for a reply, also updating the node
|
|
|
|
// database accordingly.
|
2015-04-27 14:56:42 +03:00
|
|
|
func (tab *Table) ping(id NodeID, addr *net.UDPAddr) error {
|
|
|
|
tab.db.updateLastPing(id, time.Now())
|
|
|
|
if err := tab.net.ping(id, addr); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
tab.db.updateLastPong(id, time.Now())
|
2015-09-30 06:01:49 +03:00
|
|
|
|
|
|
|
// Start the background expiration goroutine after the first
|
|
|
|
// successful communication. Subsequent calls have no effect if it
|
|
|
|
// is already running. We do this here instead of somewhere else
|
|
|
|
// so that the search for seed nodes also considers older nodes
|
|
|
|
// that would otherwise be removed by the expiration.
|
2015-04-28 10:28:04 +03:00
|
|
|
tab.db.ensureExpirer()
|
2015-04-27 14:56:42 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-08-07 01:10:26 +03:00
|
|
|
// add attempts to add the given node its corresponding bucket. If the
|
|
|
|
// bucket has space available, adding the node succeeds immediately.
|
|
|
|
// Otherwise, the node is added if the least recently active node in
|
|
|
|
// the bucket does not respond to a ping packet.
|
|
|
|
//
|
|
|
|
// The caller must not hold tab.mutex.
|
|
|
|
func (tab *Table) add(new *Node) {
|
|
|
|
b := tab.buckets[logdist(tab.self.sha, new.sha)]
|
|
|
|
tab.mutex.Lock()
|
2015-08-17 12:27:41 +03:00
|
|
|
defer tab.mutex.Unlock()
|
2015-08-07 01:10:26 +03:00
|
|
|
if b.bump(new) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
var oldest *Node
|
|
|
|
if len(b.entries) == bucketSize {
|
|
|
|
oldest = b.entries[bucketSize-1]
|
2015-08-17 12:27:41 +03:00
|
|
|
if oldest.contested {
|
|
|
|
// The node is already being replaced, don't attempt
|
|
|
|
// to replace it.
|
|
|
|
return
|
|
|
|
}
|
|
|
|
oldest.contested = true
|
2015-08-07 01:10:26 +03:00
|
|
|
// Let go of the mutex so other goroutines can access
|
|
|
|
// the table while we ping the least recently active node.
|
|
|
|
tab.mutex.Unlock()
|
2015-08-17 12:27:41 +03:00
|
|
|
err := tab.ping(oldest.ID, oldest.addr())
|
|
|
|
tab.mutex.Lock()
|
|
|
|
oldest.contested = false
|
|
|
|
if err == nil {
|
2015-08-07 01:10:26 +03:00
|
|
|
// The node responded, don't replace it.
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
added := b.replace(new, oldest)
|
|
|
|
if added && tab.nodeAddedHook != nil {
|
|
|
|
tab.nodeAddedHook(new)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// stuff adds nodes the table to the end of their corresponding bucket
|
|
|
|
// if the bucket is not full. The caller must hold tab.mutex.
|
|
|
|
func (tab *Table) stuff(nodes []*Node) {
|
2015-01-27 15:33:26 +02:00
|
|
|
outer:
|
2015-08-07 01:10:26 +03:00
|
|
|
for _, n := range nodes {
|
2015-04-23 13:11:21 +03:00
|
|
|
if n.ID == tab.self.ID {
|
2015-08-07 01:10:26 +03:00
|
|
|
continue // don't add self
|
2015-01-27 15:33:26 +02:00
|
|
|
}
|
2015-04-27 01:50:18 +03:00
|
|
|
bucket := tab.buckets[logdist(tab.self.sha, n.sha)]
|
2015-01-27 15:33:26 +02:00
|
|
|
for i := range bucket.entries {
|
|
|
|
if bucket.entries[i].ID == n.ID {
|
2015-08-07 01:10:26 +03:00
|
|
|
continue outer // already in bucket
|
2015-01-27 15:33:26 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if len(bucket.entries) < bucketSize {
|
|
|
|
bucket.entries = append(bucket.entries, n)
|
2015-06-10 14:07:30 +03:00
|
|
|
if tab.nodeAddedHook != nil {
|
|
|
|
tab.nodeAddedHook(n)
|
|
|
|
}
|
2015-01-27 15:33:26 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-08-07 01:10:26 +03:00
|
|
|
// delete removes an entry from the node table (used to evacuate
|
|
|
|
// failed/non-bonded discovery peers).
|
|
|
|
func (tab *Table) delete(node *Node) {
|
2015-05-25 15:04:40 +03:00
|
|
|
tab.mutex.Lock()
|
|
|
|
defer tab.mutex.Unlock()
|
|
|
|
bucket := tab.buckets[logdist(tab.self.sha, node.sha)]
|
|
|
|
for i := range bucket.entries {
|
|
|
|
if bucket.entries[i].ID == node.ID {
|
|
|
|
bucket.entries = append(bucket.entries[:i], bucket.entries[i+1:]...)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-08-07 01:10:26 +03:00
|
|
|
func (b *bucket) replace(n *Node, last *Node) bool {
|
|
|
|
// Don't add if b already contains n.
|
|
|
|
for i := range b.entries {
|
|
|
|
if b.entries[i].ID == n.ID {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Replace last if it is still the last entry or just add n if b
|
|
|
|
// isn't full. If is no longer the last entry, it has either been
|
|
|
|
// replaced with someone else or became active.
|
|
|
|
if len(b.entries) == bucketSize && (last == nil || b.entries[bucketSize-1].ID != last.ID) {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
if len(b.entries) < bucketSize {
|
|
|
|
b.entries = append(b.entries, nil)
|
|
|
|
}
|
|
|
|
copy(b.entries[1:], b.entries)
|
|
|
|
b.entries[0] = n
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2015-03-25 17:45:53 +02:00
|
|
|
func (b *bucket) bump(n *Node) bool {
|
|
|
|
for i := range b.entries {
|
|
|
|
if b.entries[i].ID == n.ID {
|
2015-01-27 15:33:26 +02:00
|
|
|
// move it to the front
|
2015-03-30 18:23:28 +03:00
|
|
|
copy(b.entries[1:], b.entries[:i])
|
2015-01-27 15:33:26 +02:00
|
|
|
b.entries[0] = n
|
2015-03-25 17:45:53 +02:00
|
|
|
return true
|
2015-01-27 15:33:26 +02:00
|
|
|
}
|
|
|
|
}
|
2015-03-25 17:45:53 +02:00
|
|
|
return false
|
2015-01-27 15:33:26 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// nodesByDistance is a list of nodes, ordered by
|
|
|
|
// distance to target.
|
|
|
|
type nodesByDistance struct {
|
|
|
|
entries []*Node
|
2015-04-27 01:50:18 +03:00
|
|
|
target common.Hash
|
2015-01-27 15:33:26 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// push adds the given node to the list, keeping the total size below maxElems.
|
|
|
|
func (h *nodesByDistance) push(n *Node, maxElems int) {
|
|
|
|
ix := sort.Search(len(h.entries), func(i int) bool {
|
2015-04-27 01:50:18 +03:00
|
|
|
return distcmp(h.target, h.entries[i].sha, n.sha) > 0
|
2015-01-27 15:33:26 +02:00
|
|
|
})
|
|
|
|
if len(h.entries) < maxElems {
|
|
|
|
h.entries = append(h.entries, n)
|
|
|
|
}
|
|
|
|
if ix == len(h.entries) {
|
|
|
|
// farther away than all nodes we already have.
|
|
|
|
// if there was room for it, the node is now the last element.
|
|
|
|
} else {
|
|
|
|
// slide existing entries down to make room
|
|
|
|
// this will overwrite the entry we just appended.
|
|
|
|
copy(h.entries[ix+1:], h.entries[ix:])
|
|
|
|
h.entries[ix] = n
|
|
|
|
}
|
|
|
|
}
|