2019-06-07 16:29:16 +03:00
|
|
|
// Copyright 2019 The go-ethereum Authors
|
|
|
|
// This file is part of go-ethereum.
|
|
|
|
//
|
|
|
|
// go-ethereum is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// go-ethereum is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU General Public License
|
|
|
|
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
|
|
|
package main
|
|
|
|
|
|
|
|
import (
|
2023-05-24 13:21:29 +03:00
|
|
|
"errors"
|
2019-06-07 16:29:16 +03:00
|
|
|
"fmt"
|
|
|
|
"net"
|
p2p/discover: improved node revalidation (#29572)
Node discovery periodically revalidates the nodes in its table by sending PING, checking
if they are still alive. I recently noticed some issues with the implementation of this
process, which can cause strange results such as nodes dropping unexpectedly, certain
nodes not getting revalidated often enough, and bad results being returned to incoming
FINDNODE queries.
In this change, the revalidation process is improved with the following logic:
- We maintain two 'revalidation lists' containing the table nodes, named 'fast' and 'slow'.
- The process chooses random nodes from each list on a randomized interval, the interval being
faster for the 'fast' list, and performs revalidation for the chosen node.
- Whenever a node is newly inserted into the table, it goes into the 'fast' list.
Once validation passes, it transfers to the 'slow' list. If a request fails, or the
node changes endpoint, it transfers back into 'fast'.
- livenessChecks is incremented by one for successful checks. Unlike the old implementation,
we will not drop the node on the first failing check. We instead quickly decay the
livenessChecks give it another chance.
- Order of nodes in bucket doesn't matter anymore.
I am also adding a debug API endpoint to dump the node table content.
Co-authored-by: Martin HS <martin@swende.se>
2024-05-23 15:26:09 +03:00
|
|
|
"net/http"
|
2022-12-06 18:25:53 +03:00
|
|
|
"strconv"
|
2019-06-07 16:29:16 +03:00
|
|
|
"strings"
|
|
|
|
"time"
|
|
|
|
|
2020-07-07 15:37:33 +03:00
|
|
|
"github.com/ethereum/go-ethereum/cmd/devp2p/internal/v4test"
|
2019-09-25 12:38:13 +03:00
|
|
|
"github.com/ethereum/go-ethereum/common"
|
2019-06-07 16:29:16 +03:00
|
|
|
"github.com/ethereum/go-ethereum/crypto"
|
2022-07-04 20:52:19 +03:00
|
|
|
"github.com/ethereum/go-ethereum/internal/flags"
|
p2p/discover: improved node revalidation (#29572)
Node discovery periodically revalidates the nodes in its table by sending PING, checking
if they are still alive. I recently noticed some issues with the implementation of this
process, which can cause strange results such as nodes dropping unexpectedly, certain
nodes not getting revalidated often enough, and bad results being returned to incoming
FINDNODE queries.
In this change, the revalidation process is improved with the following logic:
- We maintain two 'revalidation lists' containing the table nodes, named 'fast' and 'slow'.
- The process chooses random nodes from each list on a randomized interval, the interval being
faster for the 'fast' list, and performs revalidation for the chosen node.
- Whenever a node is newly inserted into the table, it goes into the 'fast' list.
Once validation passes, it transfers to the 'slow' list. If a request fails, or the
node changes endpoint, it transfers back into 'fast'.
- livenessChecks is incremented by one for successful checks. Unlike the old implementation,
we will not drop the node on the first failing check. We instead quickly decay the
livenessChecks give it another chance.
- Order of nodes in bucket doesn't matter anymore.
I am also adding a debug API endpoint to dump the node table content.
Co-authored-by: Martin HS <martin@swende.se>
2024-05-23 15:26:09 +03:00
|
|
|
"github.com/ethereum/go-ethereum/log"
|
2019-06-07 16:29:16 +03:00
|
|
|
"github.com/ethereum/go-ethereum/p2p/discover"
|
|
|
|
"github.com/ethereum/go-ethereum/p2p/enode"
|
|
|
|
"github.com/ethereum/go-ethereum/params"
|
p2p/discover: improved node revalidation (#29572)
Node discovery periodically revalidates the nodes in its table by sending PING, checking
if they are still alive. I recently noticed some issues with the implementation of this
process, which can cause strange results such as nodes dropping unexpectedly, certain
nodes not getting revalidated often enough, and bad results being returned to incoming
FINDNODE queries.
In this change, the revalidation process is improved with the following logic:
- We maintain two 'revalidation lists' containing the table nodes, named 'fast' and 'slow'.
- The process chooses random nodes from each list on a randomized interval, the interval being
faster for the 'fast' list, and performs revalidation for the chosen node.
- Whenever a node is newly inserted into the table, it goes into the 'fast' list.
Once validation passes, it transfers to the 'slow' list. If a request fails, or the
node changes endpoint, it transfers back into 'fast'.
- livenessChecks is incremented by one for successful checks. Unlike the old implementation,
we will not drop the node on the first failing check. We instead quickly decay the
livenessChecks give it another chance.
- Order of nodes in bucket doesn't matter anymore.
I am also adding a debug API endpoint to dump the node table content.
Co-authored-by: Martin HS <martin@swende.se>
2024-05-23 15:26:09 +03:00
|
|
|
"github.com/ethereum/go-ethereum/rpc"
|
2022-06-27 19:22:36 +03:00
|
|
|
"github.com/urfave/cli/v2"
|
2019-06-07 16:29:16 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
2022-06-27 19:22:36 +03:00
|
|
|
discv4Command = &cli.Command{
|
2019-06-07 16:29:16 +03:00
|
|
|
Name: "discv4",
|
|
|
|
Usage: "Node Discovery v4 tools",
|
2022-06-27 19:22:36 +03:00
|
|
|
Subcommands: []*cli.Command{
|
2019-06-07 16:29:16 +03:00
|
|
|
discv4PingCommand,
|
|
|
|
discv4RequestRecordCommand,
|
|
|
|
discv4ResolveCommand,
|
2019-09-25 12:38:13 +03:00
|
|
|
discv4ResolveJSONCommand,
|
2019-10-29 18:08:57 +03:00
|
|
|
discv4CrawlCommand,
|
2020-07-07 15:37:33 +03:00
|
|
|
discv4TestCommand,
|
p2p/discover: improved node revalidation (#29572)
Node discovery periodically revalidates the nodes in its table by sending PING, checking
if they are still alive. I recently noticed some issues with the implementation of this
process, which can cause strange results such as nodes dropping unexpectedly, certain
nodes not getting revalidated often enough, and bad results being returned to incoming
FINDNODE queries.
In this change, the revalidation process is improved with the following logic:
- We maintain two 'revalidation lists' containing the table nodes, named 'fast' and 'slow'.
- The process chooses random nodes from each list on a randomized interval, the interval being
faster for the 'fast' list, and performs revalidation for the chosen node.
- Whenever a node is newly inserted into the table, it goes into the 'fast' list.
Once validation passes, it transfers to the 'slow' list. If a request fails, or the
node changes endpoint, it transfers back into 'fast'.
- livenessChecks is incremented by one for successful checks. Unlike the old implementation,
we will not drop the node on the first failing check. We instead quickly decay the
livenessChecks give it another chance.
- Order of nodes in bucket doesn't matter anymore.
I am also adding a debug API endpoint to dump the node table content.
Co-authored-by: Martin HS <martin@swende.se>
2024-05-23 15:26:09 +03:00
|
|
|
discv4ListenCommand,
|
2019-06-07 16:29:16 +03:00
|
|
|
},
|
|
|
|
}
|
2022-06-27 19:22:36 +03:00
|
|
|
discv4PingCommand = &cli.Command{
|
2019-09-25 12:38:13 +03:00
|
|
|
Name: "ping",
|
|
|
|
Usage: "Sends ping to a node",
|
|
|
|
Action: discv4Ping,
|
|
|
|
ArgsUsage: "<node>",
|
2022-12-06 18:25:53 +03:00
|
|
|
Flags: discoveryNodeFlags,
|
2019-06-07 16:29:16 +03:00
|
|
|
}
|
2022-06-27 19:22:36 +03:00
|
|
|
discv4RequestRecordCommand = &cli.Command{
|
2019-09-25 12:38:13 +03:00
|
|
|
Name: "requestenr",
|
|
|
|
Usage: "Requests a node record using EIP-868 enrRequest",
|
|
|
|
Action: discv4RequestRecord,
|
|
|
|
ArgsUsage: "<node>",
|
2022-12-06 18:25:53 +03:00
|
|
|
Flags: discoveryNodeFlags,
|
2019-06-07 16:29:16 +03:00
|
|
|
}
|
2022-06-27 19:22:36 +03:00
|
|
|
discv4ResolveCommand = &cli.Command{
|
2019-09-25 12:38:13 +03:00
|
|
|
Name: "resolve",
|
|
|
|
Usage: "Finds a node in the DHT",
|
|
|
|
Action: discv4Resolve,
|
|
|
|
ArgsUsage: "<node>",
|
2022-12-06 18:25:53 +03:00
|
|
|
Flags: discoveryNodeFlags,
|
2019-09-25 12:38:13 +03:00
|
|
|
}
|
2022-06-27 19:22:36 +03:00
|
|
|
discv4ResolveJSONCommand = &cli.Command{
|
2019-09-25 12:38:13 +03:00
|
|
|
Name: "resolve-json",
|
|
|
|
Usage: "Re-resolves nodes in a nodes.json file",
|
|
|
|
Action: discv4ResolveJSON,
|
2022-12-06 18:25:53 +03:00
|
|
|
Flags: discoveryNodeFlags,
|
2019-09-25 12:38:13 +03:00
|
|
|
ArgsUsage: "<nodes.json file>",
|
2019-06-07 16:29:16 +03:00
|
|
|
}
|
p2p/discover: improved node revalidation (#29572)
Node discovery periodically revalidates the nodes in its table by sending PING, checking
if they are still alive. I recently noticed some issues with the implementation of this
process, which can cause strange results such as nodes dropping unexpectedly, certain
nodes not getting revalidated often enough, and bad results being returned to incoming
FINDNODE queries.
In this change, the revalidation process is improved with the following logic:
- We maintain two 'revalidation lists' containing the table nodes, named 'fast' and 'slow'.
- The process chooses random nodes from each list on a randomized interval, the interval being
faster for the 'fast' list, and performs revalidation for the chosen node.
- Whenever a node is newly inserted into the table, it goes into the 'fast' list.
Once validation passes, it transfers to the 'slow' list. If a request fails, or the
node changes endpoint, it transfers back into 'fast'.
- livenessChecks is incremented by one for successful checks. Unlike the old implementation,
we will not drop the node on the first failing check. We instead quickly decay the
livenessChecks give it another chance.
- Order of nodes in bucket doesn't matter anymore.
I am also adding a debug API endpoint to dump the node table content.
Co-authored-by: Martin HS <martin@swende.se>
2024-05-23 15:26:09 +03:00
|
|
|
discv4ListenCommand = &cli.Command{
|
|
|
|
Name: "listen",
|
|
|
|
Usage: "Runs a discovery node",
|
|
|
|
Action: discv4Listen,
|
|
|
|
Flags: flags.Merge(discoveryNodeFlags, []cli.Flag{
|
|
|
|
httpAddrFlag,
|
|
|
|
}),
|
|
|
|
}
|
2022-06-27 19:22:36 +03:00
|
|
|
discv4CrawlCommand = &cli.Command{
|
2019-10-29 18:08:57 +03:00
|
|
|
Name: "crawl",
|
|
|
|
Usage: "Updates a nodes.json file with random nodes found in the DHT",
|
|
|
|
Action: discv4Crawl,
|
2023-02-27 13:36:26 +03:00
|
|
|
Flags: flags.Merge(discoveryNodeFlags, []cli.Flag{crawlTimeoutFlag, crawlParallelismFlag}),
|
2019-10-29 18:08:57 +03:00
|
|
|
}
|
2022-06-27 19:22:36 +03:00
|
|
|
discv4TestCommand = &cli.Command{
|
2020-07-07 15:37:33 +03:00
|
|
|
Name: "test",
|
|
|
|
Usage: "Runs tests against a node",
|
|
|
|
Action: discv4Test,
|
2020-11-04 17:02:58 +03:00
|
|
|
Flags: []cli.Flag{
|
|
|
|
remoteEnodeFlag,
|
|
|
|
testPatternFlag,
|
|
|
|
testTAPFlag,
|
|
|
|
testListen1Flag,
|
|
|
|
testListen2Flag,
|
|
|
|
},
|
2020-07-07 15:37:33 +03:00
|
|
|
}
|
2019-06-07 16:29:16 +03:00
|
|
|
)
|
|
|
|
|
2019-10-29 18:08:57 +03:00
|
|
|
var (
|
2022-06-27 19:22:36 +03:00
|
|
|
bootnodesFlag = &cli.StringFlag{
|
2019-10-29 18:08:57 +03:00
|
|
|
Name: "bootnodes",
|
|
|
|
Usage: "Comma separated nodes used for bootstrapping",
|
|
|
|
}
|
2022-06-27 19:22:36 +03:00
|
|
|
nodekeyFlag = &cli.StringFlag{
|
2020-04-08 10:57:23 +03:00
|
|
|
Name: "nodekey",
|
|
|
|
Usage: "Hex-encoded node key",
|
|
|
|
}
|
2022-06-27 19:22:36 +03:00
|
|
|
nodedbFlag = &cli.StringFlag{
|
2020-04-08 10:57:23 +03:00
|
|
|
Name: "nodedb",
|
|
|
|
Usage: "Nodes database location",
|
|
|
|
}
|
2022-06-27 19:22:36 +03:00
|
|
|
listenAddrFlag = &cli.StringFlag{
|
2020-04-08 10:57:23 +03:00
|
|
|
Name: "addr",
|
|
|
|
Usage: "Listening address",
|
|
|
|
}
|
2022-12-06 18:25:53 +03:00
|
|
|
extAddrFlag = &cli.StringFlag{
|
|
|
|
Name: "extaddr",
|
|
|
|
Usage: "UDP endpoint announced in ENR. You can provide a bare IP address or IP:port as the value of this flag.",
|
|
|
|
}
|
2022-06-27 19:22:36 +03:00
|
|
|
crawlTimeoutFlag = &cli.DurationFlag{
|
2019-10-29 18:08:57 +03:00
|
|
|
Name: "timeout",
|
|
|
|
Usage: "Time limit for the crawl.",
|
|
|
|
Value: 30 * time.Minute,
|
|
|
|
}
|
2023-02-27 13:36:26 +03:00
|
|
|
crawlParallelismFlag = &cli.IntFlag{
|
|
|
|
Name: "parallel",
|
|
|
|
Usage: "How many parallel discoveries to attempt.",
|
|
|
|
Value: 16,
|
|
|
|
}
|
2022-06-27 19:22:36 +03:00
|
|
|
remoteEnodeFlag = &cli.StringFlag{
|
|
|
|
Name: "remote",
|
|
|
|
Usage: "Enode of the remote node under test",
|
|
|
|
EnvVars: []string{"REMOTE_ENODE"},
|
2020-07-07 15:37:33 +03:00
|
|
|
}
|
p2p/discover: improved node revalidation (#29572)
Node discovery periodically revalidates the nodes in its table by sending PING, checking
if they are still alive. I recently noticed some issues with the implementation of this
process, which can cause strange results such as nodes dropping unexpectedly, certain
nodes not getting revalidated often enough, and bad results being returned to incoming
FINDNODE queries.
In this change, the revalidation process is improved with the following logic:
- We maintain two 'revalidation lists' containing the table nodes, named 'fast' and 'slow'.
- The process chooses random nodes from each list on a randomized interval, the interval being
faster for the 'fast' list, and performs revalidation for the chosen node.
- Whenever a node is newly inserted into the table, it goes into the 'fast' list.
Once validation passes, it transfers to the 'slow' list. If a request fails, or the
node changes endpoint, it transfers back into 'fast'.
- livenessChecks is incremented by one for successful checks. Unlike the old implementation,
we will not drop the node on the first failing check. We instead quickly decay the
livenessChecks give it another chance.
- Order of nodes in bucket doesn't matter anymore.
I am also adding a debug API endpoint to dump the node table content.
Co-authored-by: Martin HS <martin@swende.se>
2024-05-23 15:26:09 +03:00
|
|
|
httpAddrFlag = &cli.StringFlag{
|
|
|
|
Name: "rpc",
|
|
|
|
Usage: "HTTP server listening address",
|
|
|
|
}
|
2019-10-29 18:08:57 +03:00
|
|
|
)
|
2019-06-07 16:29:16 +03:00
|
|
|
|
2022-12-06 18:25:53 +03:00
|
|
|
var discoveryNodeFlags = []cli.Flag{
|
2022-07-04 20:52:19 +03:00
|
|
|
bootnodesFlag,
|
|
|
|
nodekeyFlag,
|
|
|
|
nodedbFlag,
|
|
|
|
listenAddrFlag,
|
2022-12-06 18:25:53 +03:00
|
|
|
extAddrFlag,
|
2022-07-04 20:52:19 +03:00
|
|
|
}
|
|
|
|
|
2019-06-07 16:29:16 +03:00
|
|
|
func discv4Ping(ctx *cli.Context) error {
|
2019-09-25 12:38:13 +03:00
|
|
|
n := getNodeArg(ctx)
|
2023-09-19 15:18:29 +03:00
|
|
|
disc, _ := startV4(ctx)
|
2019-06-07 16:29:16 +03:00
|
|
|
defer disc.Close()
|
|
|
|
|
|
|
|
start := time.Now()
|
|
|
|
if err := disc.Ping(n); err != nil {
|
|
|
|
return fmt.Errorf("node didn't respond: %v", err)
|
|
|
|
}
|
|
|
|
fmt.Printf("node responded to ping (RTT %v).\n", time.Since(start))
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
p2p/discover: improved node revalidation (#29572)
Node discovery periodically revalidates the nodes in its table by sending PING, checking
if they are still alive. I recently noticed some issues with the implementation of this
process, which can cause strange results such as nodes dropping unexpectedly, certain
nodes not getting revalidated often enough, and bad results being returned to incoming
FINDNODE queries.
In this change, the revalidation process is improved with the following logic:
- We maintain two 'revalidation lists' containing the table nodes, named 'fast' and 'slow'.
- The process chooses random nodes from each list on a randomized interval, the interval being
faster for the 'fast' list, and performs revalidation for the chosen node.
- Whenever a node is newly inserted into the table, it goes into the 'fast' list.
Once validation passes, it transfers to the 'slow' list. If a request fails, or the
node changes endpoint, it transfers back into 'fast'.
- livenessChecks is incremented by one for successful checks. Unlike the old implementation,
we will not drop the node on the first failing check. We instead quickly decay the
livenessChecks give it another chance.
- Order of nodes in bucket doesn't matter anymore.
I am also adding a debug API endpoint to dump the node table content.
Co-authored-by: Martin HS <martin@swende.se>
2024-05-23 15:26:09 +03:00
|
|
|
func discv4Listen(ctx *cli.Context) error {
|
|
|
|
disc, _ := startV4(ctx)
|
|
|
|
defer disc.Close()
|
|
|
|
|
|
|
|
fmt.Println(disc.Self())
|
|
|
|
|
|
|
|
httpAddr := ctx.String(httpAddrFlag.Name)
|
|
|
|
if httpAddr == "" {
|
|
|
|
// Non-HTTP mode.
|
|
|
|
select {}
|
|
|
|
}
|
|
|
|
|
|
|
|
api := &discv4API{disc}
|
|
|
|
log.Info("Starting RPC API server", "addr", httpAddr)
|
|
|
|
srv := rpc.NewServer()
|
|
|
|
srv.RegisterName("discv4", api)
|
|
|
|
http.DefaultServeMux.Handle("/", srv)
|
|
|
|
httpsrv := http.Server{Addr: httpAddr, Handler: http.DefaultServeMux}
|
|
|
|
return httpsrv.ListenAndServe()
|
|
|
|
}
|
|
|
|
|
2019-06-07 16:29:16 +03:00
|
|
|
func discv4RequestRecord(ctx *cli.Context) error {
|
2019-09-25 12:38:13 +03:00
|
|
|
n := getNodeArg(ctx)
|
2023-09-19 15:18:29 +03:00
|
|
|
disc, _ := startV4(ctx)
|
2019-06-07 16:29:16 +03:00
|
|
|
defer disc.Close()
|
|
|
|
|
|
|
|
respN, err := disc.RequestENR(n)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("can't retrieve record: %v", err)
|
|
|
|
}
|
|
|
|
fmt.Println(respN.String())
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func discv4Resolve(ctx *cli.Context) error {
|
2019-09-25 12:38:13 +03:00
|
|
|
n := getNodeArg(ctx)
|
2023-09-19 15:18:29 +03:00
|
|
|
disc, _ := startV4(ctx)
|
2019-06-07 16:29:16 +03:00
|
|
|
defer disc.Close()
|
|
|
|
|
|
|
|
fmt.Println(disc.Resolve(n).String())
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-09-25 12:38:13 +03:00
|
|
|
func discv4ResolveJSON(ctx *cli.Context) error {
|
|
|
|
if ctx.NArg() < 1 {
|
2023-05-24 13:21:29 +03:00
|
|
|
return errors.New("need nodes file as argument")
|
2019-06-07 16:29:16 +03:00
|
|
|
}
|
2019-10-29 18:08:57 +03:00
|
|
|
nodesFile := ctx.Args().Get(0)
|
|
|
|
inputSet := make(nodeSet)
|
|
|
|
if common.FileExist(nodesFile) {
|
|
|
|
inputSet = loadNodesJSON(nodesFile)
|
2019-06-07 16:29:16 +03:00
|
|
|
}
|
2019-10-29 18:08:57 +03:00
|
|
|
|
|
|
|
// Add extra nodes from command line arguments.
|
|
|
|
var nodeargs []*enode.Node
|
2019-09-25 12:38:13 +03:00
|
|
|
for i := 1; i < ctx.NArg(); i++ {
|
|
|
|
n, err := parseNode(ctx.Args().Get(i))
|
2019-06-07 16:29:16 +03:00
|
|
|
if err != nil {
|
2019-09-25 12:38:13 +03:00
|
|
|
exit(err)
|
2019-06-07 16:29:16 +03:00
|
|
|
}
|
2019-10-29 18:08:57 +03:00
|
|
|
nodeargs = append(nodeargs, n)
|
2019-09-25 12:38:13 +03:00
|
|
|
}
|
|
|
|
|
2023-09-19 15:18:29 +03:00
|
|
|
disc, config := startV4(ctx)
|
2019-10-29 18:08:57 +03:00
|
|
|
defer disc.Close()
|
2023-09-19 15:18:29 +03:00
|
|
|
|
|
|
|
c, err := newCrawler(inputSet, config.Bootnodes, disc, enode.IterNodes(nodeargs))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-10-29 18:08:57 +03:00
|
|
|
c.revalidateInterval = 0
|
2023-02-27 13:36:26 +03:00
|
|
|
output := c.run(0, 1)
|
2019-10-29 18:08:57 +03:00
|
|
|
writeNodesJSON(nodesFile, output)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func discv4Crawl(ctx *cli.Context) error {
|
|
|
|
if ctx.NArg() < 1 {
|
2023-05-24 13:21:29 +03:00
|
|
|
return errors.New("need nodes file as argument")
|
2019-10-29 18:08:57 +03:00
|
|
|
}
|
|
|
|
nodesFile := ctx.Args().First()
|
2023-09-19 15:18:29 +03:00
|
|
|
inputSet := make(nodeSet)
|
2019-10-29 18:08:57 +03:00
|
|
|
if common.FileExist(nodesFile) {
|
|
|
|
inputSet = loadNodesJSON(nodesFile)
|
2019-06-07 16:29:16 +03:00
|
|
|
}
|
2019-10-29 18:08:57 +03:00
|
|
|
|
2023-09-19 15:18:29 +03:00
|
|
|
disc, config := startV4(ctx)
|
2019-10-29 18:08:57 +03:00
|
|
|
defer disc.Close()
|
2023-09-19 15:18:29 +03:00
|
|
|
|
|
|
|
c, err := newCrawler(inputSet, config.Bootnodes, disc, disc.RandomNodes())
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-10-29 18:08:57 +03:00
|
|
|
c.revalidateInterval = 10 * time.Minute
|
2023-02-27 13:36:26 +03:00
|
|
|
output := c.run(ctx.Duration(crawlTimeoutFlag.Name), ctx.Int(crawlParallelismFlag.Name))
|
2019-10-29 18:08:57 +03:00
|
|
|
writeNodesJSON(nodesFile, output)
|
2019-09-25 12:38:13 +03:00
|
|
|
return nil
|
2019-06-07 16:29:16 +03:00
|
|
|
}
|
|
|
|
|
2020-11-04 17:02:58 +03:00
|
|
|
// discv4Test runs the protocol test suite.
|
2020-07-07 15:37:33 +03:00
|
|
|
func discv4Test(ctx *cli.Context) error {
|
|
|
|
// Configure test package globals.
|
|
|
|
if !ctx.IsSet(remoteEnodeFlag.Name) {
|
2023-11-15 15:36:57 +03:00
|
|
|
return fmt.Errorf("missing -%v", remoteEnodeFlag.Name)
|
2020-07-07 15:37:33 +03:00
|
|
|
}
|
|
|
|
v4test.Remote = ctx.String(remoteEnodeFlag.Name)
|
|
|
|
v4test.Listen1 = ctx.String(testListen1Flag.Name)
|
|
|
|
v4test.Listen2 = ctx.String(testListen2Flag.Name)
|
2020-11-04 17:02:58 +03:00
|
|
|
return runTests(ctx, v4test.AllTests)
|
2020-07-07 15:37:33 +03:00
|
|
|
}
|
|
|
|
|
2019-09-25 12:38:13 +03:00
|
|
|
// startV4 starts an ephemeral discovery V4 node.
|
2023-09-19 15:18:29 +03:00
|
|
|
func startV4(ctx *cli.Context) (*discover.UDPv4, discover.Config) {
|
2020-04-08 10:57:23 +03:00
|
|
|
ln, config := makeDiscoveryConfig(ctx)
|
2022-12-06 18:25:53 +03:00
|
|
|
socket := listen(ctx, ln)
|
2020-04-08 10:57:23 +03:00
|
|
|
disc, err := discover.ListenV4(socket, ln, config)
|
2019-09-25 12:38:13 +03:00
|
|
|
if err != nil {
|
|
|
|
exit(err)
|
|
|
|
}
|
2023-09-19 15:18:29 +03:00
|
|
|
return disc, config
|
2020-04-08 10:57:23 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
func makeDiscoveryConfig(ctx *cli.Context) (*enode.LocalNode, discover.Config) {
|
|
|
|
var cfg discover.Config
|
|
|
|
|
|
|
|
if ctx.IsSet(nodekeyFlag.Name) {
|
|
|
|
key, err := crypto.HexToECDSA(ctx.String(nodekeyFlag.Name))
|
|
|
|
if err != nil {
|
|
|
|
exit(fmt.Errorf("-%s: %v", nodekeyFlag.Name, err))
|
|
|
|
}
|
|
|
|
cfg.PrivateKey = key
|
|
|
|
} else {
|
|
|
|
cfg.PrivateKey, _ = crypto.GenerateKey()
|
|
|
|
}
|
|
|
|
|
2019-09-25 12:38:13 +03:00
|
|
|
if commandHasFlag(ctx, bootnodesFlag) {
|
|
|
|
bn, err := parseBootnodes(ctx)
|
|
|
|
if err != nil {
|
|
|
|
exit(err)
|
|
|
|
}
|
|
|
|
cfg.Bootnodes = bn
|
|
|
|
}
|
2020-04-08 10:57:23 +03:00
|
|
|
|
|
|
|
dbpath := ctx.String(nodedbFlag.Name)
|
|
|
|
db, err := enode.OpenDB(dbpath)
|
2019-09-25 12:38:13 +03:00
|
|
|
if err != nil {
|
|
|
|
exit(err)
|
|
|
|
}
|
2019-06-07 16:29:16 +03:00
|
|
|
ln := enode.NewLocalNode(db, cfg.PrivateKey)
|
2020-04-08 10:57:23 +03:00
|
|
|
return ln, cfg
|
|
|
|
}
|
2019-06-07 16:29:16 +03:00
|
|
|
|
2022-12-06 18:25:53 +03:00
|
|
|
func parseExtAddr(spec string) (ip net.IP, port int, ok bool) {
|
|
|
|
ip = net.ParseIP(spec)
|
|
|
|
if ip != nil {
|
|
|
|
return ip, 0, true
|
|
|
|
}
|
|
|
|
host, portstr, err := net.SplitHostPort(spec)
|
|
|
|
if err != nil {
|
|
|
|
return nil, 0, false
|
|
|
|
}
|
|
|
|
ip = net.ParseIP(host)
|
|
|
|
if ip == nil {
|
|
|
|
return nil, 0, false
|
|
|
|
}
|
|
|
|
port, err = strconv.Atoi(portstr)
|
|
|
|
if err != nil {
|
|
|
|
return nil, 0, false
|
|
|
|
}
|
|
|
|
return ip, port, true
|
|
|
|
}
|
|
|
|
|
|
|
|
func listen(ctx *cli.Context, ln *enode.LocalNode) *net.UDPConn {
|
|
|
|
addr := ctx.String(listenAddrFlag.Name)
|
2020-04-08 10:57:23 +03:00
|
|
|
if addr == "" {
|
|
|
|
addr = "0.0.0.0:0"
|
|
|
|
}
|
|
|
|
socket, err := net.ListenPacket("udp4", addr)
|
2019-06-07 16:29:16 +03:00
|
|
|
if err != nil {
|
2020-04-08 10:57:23 +03:00
|
|
|
exit(err)
|
2019-06-07 16:29:16 +03:00
|
|
|
}
|
2022-12-06 18:25:53 +03:00
|
|
|
|
|
|
|
// Configure UDP endpoint in ENR from listener address.
|
2020-04-08 10:57:23 +03:00
|
|
|
usocket := socket.(*net.UDPConn)
|
|
|
|
uaddr := socket.LocalAddr().(*net.UDPAddr)
|
2020-10-14 13:28:17 +03:00
|
|
|
if uaddr.IP.IsUnspecified() {
|
|
|
|
ln.SetFallbackIP(net.IP{127, 0, 0, 1})
|
|
|
|
} else {
|
|
|
|
ln.SetFallbackIP(uaddr.IP)
|
|
|
|
}
|
2020-04-08 10:57:23 +03:00
|
|
|
ln.SetFallbackUDP(uaddr.Port)
|
2022-12-06 18:25:53 +03:00
|
|
|
|
|
|
|
// If an ENR endpoint is set explicitly on the command-line, override
|
|
|
|
// the information from the listening address. Note this is careful not
|
|
|
|
// to set the UDP port if the external address doesn't have it.
|
|
|
|
extAddr := ctx.String(extAddrFlag.Name)
|
|
|
|
if extAddr != "" {
|
|
|
|
ip, port, ok := parseExtAddr(extAddr)
|
|
|
|
if !ok {
|
|
|
|
exit(fmt.Errorf("-%s: invalid external address %q", extAddrFlag.Name, extAddr))
|
|
|
|
}
|
|
|
|
ln.SetStaticIP(ip)
|
|
|
|
if port != 0 {
|
|
|
|
ln.SetFallbackUDP(port)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-08 10:57:23 +03:00
|
|
|
return usocket
|
|
|
|
}
|
|
|
|
|
|
|
|
func parseBootnodes(ctx *cli.Context) ([]*enode.Node, error) {
|
2023-06-02 14:03:21 +03:00
|
|
|
s := params.MainnetBootnodes
|
2020-04-08 10:57:23 +03:00
|
|
|
if ctx.IsSet(bootnodesFlag.Name) {
|
2020-10-14 13:28:17 +03:00
|
|
|
input := ctx.String(bootnodesFlag.Name)
|
|
|
|
if input == "" {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
s = strings.Split(input, ",")
|
2020-04-08 10:57:23 +03:00
|
|
|
}
|
|
|
|
nodes := make([]*enode.Node, len(s))
|
|
|
|
var err error
|
|
|
|
for i, record := range s {
|
|
|
|
nodes[i], err = parseNode(record)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("invalid bootstrap node: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nodes, nil
|
2019-06-07 16:29:16 +03:00
|
|
|
}
|
p2p/discover: improved node revalidation (#29572)
Node discovery periodically revalidates the nodes in its table by sending PING, checking
if they are still alive. I recently noticed some issues with the implementation of this
process, which can cause strange results such as nodes dropping unexpectedly, certain
nodes not getting revalidated often enough, and bad results being returned to incoming
FINDNODE queries.
In this change, the revalidation process is improved with the following logic:
- We maintain two 'revalidation lists' containing the table nodes, named 'fast' and 'slow'.
- The process chooses random nodes from each list on a randomized interval, the interval being
faster for the 'fast' list, and performs revalidation for the chosen node.
- Whenever a node is newly inserted into the table, it goes into the 'fast' list.
Once validation passes, it transfers to the 'slow' list. If a request fails, or the
node changes endpoint, it transfers back into 'fast'.
- livenessChecks is incremented by one for successful checks. Unlike the old implementation,
we will not drop the node on the first failing check. We instead quickly decay the
livenessChecks give it another chance.
- Order of nodes in bucket doesn't matter anymore.
I am also adding a debug API endpoint to dump the node table content.
Co-authored-by: Martin HS <martin@swende.se>
2024-05-23 15:26:09 +03:00
|
|
|
|
|
|
|
type discv4API struct {
|
|
|
|
host *discover.UDPv4
|
|
|
|
}
|
|
|
|
|
|
|
|
func (api *discv4API) LookupRandom(n int) (ns []*enode.Node) {
|
|
|
|
it := api.host.RandomNodes()
|
|
|
|
for len(ns) < n && it.Next() {
|
|
|
|
ns = append(ns, it.Node())
|
|
|
|
}
|
|
|
|
return ns
|
|
|
|
}
|
|
|
|
|
|
|
|
func (api *discv4API) Buckets() [][]discover.BucketNode {
|
|
|
|
return api.host.TableBuckets()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (api *discv4API) Self() *enode.Node {
|
|
|
|
return api.host.Self()
|
|
|
|
}
|