cmd/geth: rename --whitelist to --eth.requiredblocks (#24505)
* cmd, eth: Rename whitelist argument to peer.requiredblocks * eth/ethconfig: document PeerRequiredBlocks better * cmd/utils: rename new flag to --eth.requiredblocks Co-authored-by: Felix Lange <fjl@twurst.com>
This commit is contained in:
parent
6cd72660d0
commit
dbfd397262
@ -107,7 +107,8 @@ var (
|
||||
utils.UltraLightFractionFlag,
|
||||
utils.UltraLightOnlyAnnounceFlag,
|
||||
utils.LightNoSyncServeFlag,
|
||||
utils.WhitelistFlag,
|
||||
utils.EthPeerRequiredBlocksFlag,
|
||||
utils.LegacyWhitelistFlag,
|
||||
utils.BloomFilterSizeFlag,
|
||||
utils.CacheFlag,
|
||||
utils.CacheDatabaseFlag,
|
||||
|
@ -53,7 +53,7 @@ var AppHelpFlagGroups = []flags.FlagGroup{
|
||||
utils.EthStatsURLFlag,
|
||||
utils.IdentityFlag,
|
||||
utils.LightKDFFlag,
|
||||
utils.WhitelistFlag,
|
||||
utils.EthPeerRequiredBlocksFlag,
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -225,6 +225,7 @@ var AppHelpFlagGroups = []flags.FlagGroup{
|
||||
Name: "ALIASED (deprecated)",
|
||||
Flags: []cli.Flag{
|
||||
utils.NoUSBFlag,
|
||||
utils.LegacyWhitelistFlag,
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -237,9 +237,13 @@ var (
|
||||
Name: "lightkdf",
|
||||
Usage: "Reduce key-derivation RAM & CPU usage at some expense of KDF strength",
|
||||
}
|
||||
WhitelistFlag = cli.StringFlag{
|
||||
EthPeerRequiredBlocksFlag = cli.StringFlag{
|
||||
Name: "eth.requiredblocks",
|
||||
Usage: "Comma separated block number-to-hash mappings to require for peering (<number>=<hash>)",
|
||||
}
|
||||
LegacyWhitelistFlag = cli.StringFlag{
|
||||
Name: "whitelist",
|
||||
Usage: "Comma separated block number-to-hash mappings to enforce (<number>=<hash>)",
|
||||
Usage: "Comma separated block number-to-hash mappings to enforce (<number>=<hash>) (deprecated in favor of --peer.requiredblocks)",
|
||||
}
|
||||
BloomFilterSizeFlag = cli.Uint64Flag{
|
||||
Name: "bloomfilter.size",
|
||||
@ -1447,26 +1451,33 @@ func setMiner(ctx *cli.Context, cfg *miner.Config) {
|
||||
}
|
||||
}
|
||||
|
||||
func setWhitelist(ctx *cli.Context, cfg *ethconfig.Config) {
|
||||
whitelist := ctx.GlobalString(WhitelistFlag.Name)
|
||||
if whitelist == "" {
|
||||
func setPeerRequiredBlocks(ctx *cli.Context, cfg *ethconfig.Config) {
|
||||
peerRequiredBlocks := ctx.GlobalString(EthPeerRequiredBlocksFlag.Name)
|
||||
|
||||
if peerRequiredBlocks == "" {
|
||||
if ctx.GlobalIsSet(LegacyWhitelistFlag.Name) {
|
||||
log.Warn("The flag --rpc is deprecated and will be removed, please use --peer.requiredblocks")
|
||||
peerRequiredBlocks = ctx.GlobalString(LegacyWhitelistFlag.Name)
|
||||
} else {
|
||||
return
|
||||
}
|
||||
cfg.Whitelist = make(map[uint64]common.Hash)
|
||||
for _, entry := range strings.Split(whitelist, ",") {
|
||||
}
|
||||
|
||||
cfg.PeerRequiredBlocks = make(map[uint64]common.Hash)
|
||||
for _, entry := range strings.Split(peerRequiredBlocks, ",") {
|
||||
parts := strings.Split(entry, "=")
|
||||
if len(parts) != 2 {
|
||||
Fatalf("Invalid whitelist entry: %s", entry)
|
||||
Fatalf("Invalid peer required block entry: %s", entry)
|
||||
}
|
||||
number, err := strconv.ParseUint(parts[0], 0, 64)
|
||||
if err != nil {
|
||||
Fatalf("Invalid whitelist block number %s: %v", parts[0], err)
|
||||
Fatalf("Invalid peer required block number %s: %v", parts[0], err)
|
||||
}
|
||||
var hash common.Hash
|
||||
if err = hash.UnmarshalText([]byte(parts[1])); err != nil {
|
||||
Fatalf("Invalid whitelist hash %s: %v", parts[1], err)
|
||||
Fatalf("Invalid peer required block hash %s: %v", parts[1], err)
|
||||
}
|
||||
cfg.Whitelist[number] = hash
|
||||
cfg.PeerRequiredBlocks[number] = hash
|
||||
}
|
||||
}
|
||||
|
||||
@ -1533,7 +1544,7 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
|
||||
setTxPool(ctx, &cfg.TxPool)
|
||||
setEthash(ctx, cfg)
|
||||
setMiner(ctx, &cfg.Miner)
|
||||
setWhitelist(ctx, cfg)
|
||||
setPeerRequiredBlocks(ctx, cfg)
|
||||
setLes(ctx, cfg)
|
||||
|
||||
// Cap the cache allowance and tune the garbage collector
|
||||
|
@ -229,7 +229,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
|
||||
BloomCache: uint64(cacheLimit),
|
||||
EventMux: eth.eventMux,
|
||||
Checkpoint: checkpoint,
|
||||
Whitelist: config.Whitelist,
|
||||
PeerRequiredBlocks: config.PeerRequiredBlocks,
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -138,8 +138,10 @@ type Config struct {
|
||||
|
||||
TxLookupLimit uint64 `toml:",omitempty"` // The maximum number of blocks from head whose tx indices are reserved.
|
||||
|
||||
// Whitelist of required block number -> hash values to accept
|
||||
Whitelist map[uint64]common.Hash `toml:"-"`
|
||||
// PeerRequiredBlocks is a set of block number -> hash mappings which must be in the
|
||||
// canonical chain of all remote peers. Setting the option makes geth verify the
|
||||
// presence of these blocks for every new peer connection.
|
||||
PeerRequiredBlocks map[uint64]common.Hash `toml:"-"`
|
||||
|
||||
// Light client options
|
||||
LightServ int `toml:",omitempty"` // Maximum percentage of time allowed for serving LES requests
|
||||
|
@ -26,7 +26,7 @@ func (c Config) MarshalTOML() (interface{}, error) {
|
||||
NoPruning bool
|
||||
NoPrefetch bool
|
||||
TxLookupLimit uint64 `toml:",omitempty"`
|
||||
Whitelist map[uint64]common.Hash `toml:"-"`
|
||||
PeerRequiredBlocks map[uint64]common.Hash `toml:"-"`
|
||||
LightServ int `toml:",omitempty"`
|
||||
LightIngress int `toml:",omitempty"`
|
||||
LightEgress int `toml:",omitempty"`
|
||||
@ -71,7 +71,7 @@ func (c Config) MarshalTOML() (interface{}, error) {
|
||||
enc.NoPruning = c.NoPruning
|
||||
enc.NoPrefetch = c.NoPrefetch
|
||||
enc.TxLookupLimit = c.TxLookupLimit
|
||||
enc.Whitelist = c.Whitelist
|
||||
enc.PeerRequiredBlocks = c.PeerRequiredBlocks
|
||||
enc.LightServ = c.LightServ
|
||||
enc.LightIngress = c.LightIngress
|
||||
enc.LightEgress = c.LightEgress
|
||||
@ -120,7 +120,7 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
|
||||
NoPruning *bool
|
||||
NoPrefetch *bool
|
||||
TxLookupLimit *uint64 `toml:",omitempty"`
|
||||
Whitelist map[uint64]common.Hash `toml:"-"`
|
||||
PeerRequiredBlocks map[uint64]common.Hash `toml:"-"`
|
||||
LightServ *int `toml:",omitempty"`
|
||||
LightIngress *int `toml:",omitempty"`
|
||||
LightEgress *int `toml:",omitempty"`
|
||||
@ -184,8 +184,8 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
|
||||
if dec.TxLookupLimit != nil {
|
||||
c.TxLookupLimit = *dec.TxLookupLimit
|
||||
}
|
||||
if dec.Whitelist != nil {
|
||||
c.Whitelist = dec.Whitelist
|
||||
if dec.PeerRequiredBlocks != nil {
|
||||
c.PeerRequiredBlocks = dec.PeerRequiredBlocks
|
||||
}
|
||||
if dec.LightServ != nil {
|
||||
c.LightServ = *dec.LightServ
|
||||
|
@ -86,7 +86,8 @@ type handlerConfig struct {
|
||||
BloomCache uint64 // Megabytes to alloc for snap sync bloom
|
||||
EventMux *event.TypeMux // Legacy event mux, deprecate for `feed`
|
||||
Checkpoint *params.TrustedCheckpoint // Hard coded checkpoint for sync challenges
|
||||
Whitelist map[uint64]common.Hash // Hard coded whitelist for sync challenged
|
||||
|
||||
PeerRequiredBlocks map[uint64]common.Hash // Hard coded map of required block hashes for sync challenges
|
||||
}
|
||||
|
||||
type handler struct {
|
||||
@ -115,7 +116,7 @@ type handler struct {
|
||||
txsSub event.Subscription
|
||||
minedBlockSub *event.TypeMuxSubscription
|
||||
|
||||
whitelist map[uint64]common.Hash
|
||||
peerRequiredBlocks map[uint64]common.Hash
|
||||
|
||||
// channels for fetcher, syncer, txsyncLoop
|
||||
quitSync chan struct{}
|
||||
@ -140,7 +141,7 @@ func newHandler(config *handlerConfig) (*handler, error) {
|
||||
chain: config.Chain,
|
||||
peers: newPeerSet(),
|
||||
merger: config.Merger,
|
||||
whitelist: config.Whitelist,
|
||||
peerRequiredBlocks: config.PeerRequiredBlocks,
|
||||
quitSync: make(chan struct{}),
|
||||
}
|
||||
if config.Sync == downloader.FullSync {
|
||||
@ -423,8 +424,8 @@ func (h *handler) runEthPeer(peer *eth.Peer, handler eth.Handler) error {
|
||||
}
|
||||
}()
|
||||
}
|
||||
// If we have any explicit whitelist block hashes, request them
|
||||
for number, hash := range h.whitelist {
|
||||
// If we have any explicit peer required block hashes, request them
|
||||
for number := range h.peerRequiredBlocks {
|
||||
resCh := make(chan *eth.Response)
|
||||
if _, err := peer.RequestHeadersByNumber(number, 1, 0, false, resCh); err != nil {
|
||||
return err
|
||||
@ -437,25 +438,25 @@ func (h *handler) runEthPeer(peer *eth.Peer, handler eth.Handler) error {
|
||||
case res := <-resCh:
|
||||
headers := ([]*types.Header)(*res.Res.(*eth.BlockHeadersPacket))
|
||||
if len(headers) == 0 {
|
||||
// Whitelisted blocks are allowed to be missing if the remote
|
||||
// Required blocks are allowed to be missing if the remote
|
||||
// node is not yet synced
|
||||
res.Done <- nil
|
||||
return
|
||||
}
|
||||
// Validate the header and either drop the peer or continue
|
||||
if len(headers) > 1 {
|
||||
res.Done <- errors.New("too many headers in whitelist response")
|
||||
res.Done <- errors.New("too many headers in required block response")
|
||||
return
|
||||
}
|
||||
if headers[0].Number.Uint64() != number || headers[0].Hash() != hash {
|
||||
peer.Log().Info("Whitelist mismatch, dropping peer", "number", number, "hash", headers[0].Hash(), "want", hash)
|
||||
res.Done <- errors.New("whitelist block mismatch")
|
||||
peer.Log().Info("Required block mismatch, dropping peer", "number", number, "hash", headers[0].Hash(), "want", hash)
|
||||
res.Done <- errors.New("required block mismatch")
|
||||
return
|
||||
}
|
||||
peer.Log().Debug("Whitelist block verified", "number", number, "hash", hash)
|
||||
peer.Log().Debug("Peer required block verified", "number", number, "hash", hash)
|
||||
res.Done <- nil
|
||||
case <-timeout.C:
|
||||
peer.Log().Warn("Whitelist challenge timed out, dropping", "addr", peer.RemoteAddr(), "type", peer.Name())
|
||||
peer.Log().Warn("Required block challenge timed out, dropping", "addr", peer.RemoteAddr(), "type", peer.Name())
|
||||
h.removePeer(peer.ID())
|
||||
}
|
||||
}(number, hash)
|
||||
|
Loading…
Reference in New Issue
Block a user