2015-07-07 03:54:22 +03:00
|
|
|
// Copyright 2015 The go-ethereum Authors
|
2015-07-22 19:48:40 +03:00
|
|
|
// This file is part of the go-ethereum library.
|
2015-07-07 03:54:22 +03:00
|
|
|
//
|
2015-07-23 19:35:11 +03:00
|
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
2015-07-07 03:54:22 +03:00
|
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
2015-07-22 19:48:40 +03:00
|
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
2015-07-07 03:54:22 +03:00
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
2015-07-22 19:48:40 +03:00
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
2015-07-07 03:54:22 +03:00
|
|
|
// GNU Lesser General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Lesser General Public License
|
2015-07-22 19:48:40 +03:00
|
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
2015-07-07 03:54:22 +03:00
|
|
|
|
2015-04-18 02:11:09 +03:00
|
|
|
package eth
|
|
|
|
|
|
|
|
import (
|
2015-05-18 21:33:37 +03:00
|
|
|
"errors"
|
2015-04-18 02:11:09 +03:00
|
|
|
"fmt"
|
|
|
|
"math/big"
|
2015-05-18 21:33:37 +03:00
|
|
|
"sync"
|
2015-10-22 23:22:04 +03:00
|
|
|
"time"
|
2015-04-18 02:11:09 +03:00
|
|
|
|
2018-07-16 10:54:19 +03:00
|
|
|
mapset "github.com/deckarep/golang-set"
|
2015-04-18 02:11:09 +03:00
|
|
|
"github.com/ethereum/go-ethereum/common"
|
2019-09-30 21:28:50 +03:00
|
|
|
"github.com/ethereum/go-ethereum/core/forkid"
|
2015-04-18 02:11:09 +03:00
|
|
|
"github.com/ethereum/go-ethereum/core/types"
|
2019-10-28 14:59:07 +03:00
|
|
|
"github.com/ethereum/go-ethereum/eth/fetcher"
|
2015-04-18 02:11:09 +03:00
|
|
|
"github.com/ethereum/go-ethereum/p2p"
|
2015-09-07 20:43:01 +03:00
|
|
|
"github.com/ethereum/go-ethereum/rlp"
|
2015-04-18 02:11:09 +03:00
|
|
|
)
|
|
|
|
|
2015-05-18 21:33:37 +03:00
|
|
|
var (
|
2016-03-29 04:08:16 +03:00
|
|
|
errClosed = errors.New("peer set is closed")
|
2015-05-18 21:33:37 +03:00
|
|
|
errAlreadyRegistered = errors.New("peer is already registered")
|
|
|
|
errNotRegistered = errors.New("peer is not registered")
|
|
|
|
)
|
|
|
|
|
2015-06-29 17:06:07 +03:00
|
|
|
const (
|
2018-05-21 11:32:42 +03:00
|
|
|
maxKnownTxs = 32768 // Maximum transactions hashes to keep in the known list (prevent DOS)
|
|
|
|
maxKnownBlocks = 1024 // Maximum block hashes to keep in the known list (prevent DOS)
|
|
|
|
|
2019-10-28 14:59:07 +03:00
|
|
|
// maxQueuedTxs is the maximum number of transactions to queue up before dropping
|
|
|
|
// broadcasts.
|
|
|
|
maxQueuedTxs = 4096
|
2018-05-21 11:32:42 +03:00
|
|
|
|
2019-10-28 14:59:07 +03:00
|
|
|
// maxQueuedTxAnns is the maximum number of transaction announcements to queue up
|
|
|
|
// before dropping broadcasts.
|
|
|
|
maxQueuedTxAnns = 4096
|
|
|
|
|
|
|
|
// maxQueuedTxRetrieval is the maximum number of tx retrieval requests to queue up
|
|
|
|
// before dropping requests.
|
|
|
|
maxQueuedTxRetrieval = 4096
|
|
|
|
|
|
|
|
// maxQueuedBlocks is the maximum number of block propagations to queue up before
|
2018-05-21 11:32:42 +03:00
|
|
|
// dropping broadcasts. There's not much point in queueing stale blocks, so a few
|
|
|
|
// that might cover uncles should be enough.
|
2019-10-28 14:59:07 +03:00
|
|
|
maxQueuedBlocks = 4
|
2018-05-21 11:32:42 +03:00
|
|
|
|
2019-10-28 14:59:07 +03:00
|
|
|
// maxQueuedBlockAnns is the maximum number of block announcements to queue up before
|
2018-05-21 11:32:42 +03:00
|
|
|
// dropping broadcasts. Similarly to block propagations, there's no point to queue
|
|
|
|
// above some healthy uncle limit, so use that.
|
2019-10-28 14:59:07 +03:00
|
|
|
maxQueuedBlockAnns = 4
|
2018-05-21 11:32:42 +03:00
|
|
|
|
2015-10-22 23:22:04 +03:00
|
|
|
handshakeTimeout = 5 * time.Second
|
2015-06-29 17:06:07 +03:00
|
|
|
)
|
|
|
|
|
2019-10-28 14:59:07 +03:00
|
|
|
// max is a helper function which returns the larger of the two given integers.
|
|
|
|
func max(a, b int) int {
|
|
|
|
if a > b {
|
|
|
|
return a
|
|
|
|
}
|
|
|
|
return b
|
|
|
|
}
|
|
|
|
|
2015-10-27 16:10:30 +03:00
|
|
|
// PeerInfo represents a short summary of the Ethereum sub-protocol metadata known
|
|
|
|
// about a connected peer.
|
|
|
|
type PeerInfo struct {
|
|
|
|
Version int `json:"version"` // Ethereum protocol version negotiated
|
|
|
|
Difficulty *big.Int `json:"difficulty"` // Total difficulty of the peer's blockchain
|
|
|
|
Head string `json:"head"` // SHA3 hash of the peer's best owned block
|
|
|
|
}
|
|
|
|
|
2018-05-21 11:32:42 +03:00
|
|
|
// propEvent is a block propagation, waiting for its turn in the broadcast queue.
|
|
|
|
type propEvent struct {
|
|
|
|
block *types.Block
|
|
|
|
td *big.Int
|
|
|
|
}
|
|
|
|
|
2015-04-18 02:11:09 +03:00
|
|
|
type peer struct {
|
2015-10-27 16:10:30 +03:00
|
|
|
id string
|
2015-04-18 02:11:09 +03:00
|
|
|
|
2015-10-27 16:10:30 +03:00
|
|
|
*p2p.Peer
|
2015-04-18 02:11:09 +03:00
|
|
|
rw p2p.MsgReadWriter
|
|
|
|
|
2016-07-08 20:59:11 +03:00
|
|
|
version int // Protocol version negotiated
|
2019-04-16 13:20:38 +03:00
|
|
|
syncDrop *time.Timer // Timed connection dropper if sync progress isn't validated in time
|
2016-07-08 20:59:11 +03:00
|
|
|
|
|
|
|
head common.Hash
|
|
|
|
td *big.Int
|
|
|
|
lock sync.RWMutex
|
2015-04-18 02:11:09 +03:00
|
|
|
|
2019-10-28 14:59:07 +03:00
|
|
|
knownTxs mapset.Set // Set of transaction hashes known to be known by this peer
|
|
|
|
knownBlocks mapset.Set // Set of block hashes known to be known by this peer
|
|
|
|
queuedBlocks chan *propEvent // Queue of blocks to broadcast to the peer
|
|
|
|
queuedBlockAnns chan *types.Block // Queue of blocks to announce to the peer
|
|
|
|
txPropagation chan []common.Hash // Channel used to queue transaction propagation requests
|
|
|
|
txAnnounce chan []common.Hash // Channel used to queue transaction announcement requests
|
|
|
|
txRetrieval chan []common.Hash // Channel used to queue transaction retrieval requests
|
|
|
|
getPooledTx func(common.Hash) *types.Transaction // Callback used to retrieve transaction from txpool
|
|
|
|
term chan struct{} // Termination channel to stop the broadcaster
|
2015-04-18 02:11:09 +03:00
|
|
|
}
|
|
|
|
|
2019-10-28 14:59:07 +03:00
|
|
|
func newPeer(version int, p *p2p.Peer, rw p2p.MsgReadWriter, getPooledTx func(hash common.Hash) *types.Transaction) *peer {
|
2015-04-18 02:11:09 +03:00
|
|
|
return &peer{
|
2019-10-28 14:59:07 +03:00
|
|
|
Peer: p,
|
|
|
|
rw: rw,
|
|
|
|
version: version,
|
|
|
|
id: fmt.Sprintf("%x", p.ID().Bytes()[:8]),
|
|
|
|
knownTxs: mapset.NewSet(),
|
|
|
|
knownBlocks: mapset.NewSet(),
|
|
|
|
queuedBlocks: make(chan *propEvent, maxQueuedBlocks),
|
|
|
|
queuedBlockAnns: make(chan *types.Block, maxQueuedBlockAnns),
|
|
|
|
txPropagation: make(chan []common.Hash),
|
|
|
|
txAnnounce: make(chan []common.Hash),
|
|
|
|
txRetrieval: make(chan []common.Hash),
|
|
|
|
getPooledTx: getPooledTx,
|
|
|
|
term: make(chan struct{}),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// broadcastBlocks is a write loop that multiplexes block propagations,
|
|
|
|
// announcements into the remote peer. The goal is to have an async writer
|
|
|
|
// that does not lock up node internals.
|
|
|
|
func (p *peer) broadcastBlocks() {
|
2018-05-21 11:32:42 +03:00
|
|
|
for {
|
|
|
|
select {
|
2019-10-28 14:59:07 +03:00
|
|
|
case prop := <-p.queuedBlocks:
|
2018-05-21 11:32:42 +03:00
|
|
|
if err := p.SendNewBlock(prop.block, prop.td); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
p.Log().Trace("Propagated block", "number", prop.block.Number(), "hash", prop.block.Hash(), "td", prop.td)
|
|
|
|
|
2019-10-28 14:59:07 +03:00
|
|
|
case block := <-p.queuedBlockAnns:
|
2018-05-21 11:32:42 +03:00
|
|
|
if err := p.SendNewBlockHashes([]common.Hash{block.Hash()}, []uint64{block.NumberU64()}); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
p.Log().Trace("Announced block", "number", block.Number(), "hash", block.Hash())
|
|
|
|
|
|
|
|
case <-p.term:
|
|
|
|
return
|
|
|
|
}
|
2015-04-18 02:11:09 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-28 14:59:07 +03:00
|
|
|
// broadcastTxs is a write loop that multiplexes transaction propagations,
|
|
|
|
// announcements into the remote peer. The goal is to have an async writer
|
|
|
|
// that does not lock up node internals.
|
|
|
|
func (p *peer) broadcastTxs() {
|
|
|
|
var (
|
|
|
|
txProps []common.Hash // Queue of transaction propagations to the peer
|
|
|
|
txAnnos []common.Hash // Queue of transaction announcements to the peer
|
|
|
|
done chan struct{} // Non-nil if background network sender routine is active.
|
|
|
|
errch = make(chan error) // Channel used to receive network error
|
|
|
|
)
|
|
|
|
scheduleTask := func() {
|
|
|
|
// Short circuit if there already has a inflight task.
|
|
|
|
if done != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
// Spin up transaction propagation task if there is any
|
|
|
|
// queued hashes.
|
|
|
|
if len(txProps) > 0 {
|
|
|
|
var (
|
|
|
|
hashes []common.Hash
|
|
|
|
txs []*types.Transaction
|
|
|
|
size common.StorageSize
|
|
|
|
)
|
|
|
|
for i := 0; i < len(txProps) && size < txsyncPackSize; i++ {
|
|
|
|
if tx := p.getPooledTx(txProps[i]); tx != nil {
|
|
|
|
txs = append(txs, tx)
|
|
|
|
size += tx.Size()
|
|
|
|
}
|
|
|
|
hashes = append(hashes, txProps[i])
|
|
|
|
}
|
|
|
|
txProps = txProps[:copy(txProps, txProps[len(hashes):])]
|
|
|
|
if len(txs) > 0 {
|
|
|
|
done = make(chan struct{})
|
|
|
|
go func() {
|
|
|
|
if err := p.SendNewTransactions(txs); err != nil {
|
|
|
|
errch <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
close(done)
|
|
|
|
p.Log().Trace("Sent transactions", "count", len(txs))
|
|
|
|
}()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Spin up transaction announcement task if there is any
|
|
|
|
// queued hashes.
|
|
|
|
if len(txAnnos) > 0 {
|
|
|
|
var (
|
|
|
|
hashes []common.Hash
|
|
|
|
pending []common.Hash
|
|
|
|
size common.StorageSize
|
|
|
|
)
|
|
|
|
for i := 0; i < len(txAnnos) && size < txsyncPackSize; i++ {
|
|
|
|
if tx := p.getPooledTx(txAnnos[i]); tx != nil {
|
|
|
|
pending = append(pending, txAnnos[i])
|
|
|
|
size += common.HashLength
|
|
|
|
}
|
|
|
|
hashes = append(hashes, txAnnos[i])
|
|
|
|
}
|
|
|
|
txAnnos = txAnnos[:copy(txAnnos, txAnnos[len(hashes):])]
|
|
|
|
if len(pending) > 0 {
|
|
|
|
done = make(chan struct{})
|
|
|
|
go func() {
|
|
|
|
if err := p.SendNewTransactionHashes(pending); err != nil {
|
|
|
|
errch <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
close(done)
|
|
|
|
p.Log().Trace("Sent transaction announcements", "count", len(pending))
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for {
|
|
|
|
scheduleTask()
|
|
|
|
select {
|
|
|
|
case hashes := <-p.txPropagation:
|
|
|
|
if len(txProps) == maxQueuedTxs {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if len(txProps)+len(hashes) > maxQueuedTxs {
|
|
|
|
hashes = hashes[:maxQueuedTxs-len(txProps)]
|
|
|
|
}
|
|
|
|
txProps = append(txProps, hashes...)
|
|
|
|
|
|
|
|
case hashes := <-p.txAnnounce:
|
|
|
|
if len(txAnnos) == maxQueuedTxAnns {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if len(txAnnos)+len(hashes) > maxQueuedTxAnns {
|
|
|
|
hashes = hashes[:maxQueuedTxAnns-len(txAnnos)]
|
|
|
|
}
|
|
|
|
txAnnos = append(txAnnos, hashes...)
|
|
|
|
|
|
|
|
case <-done:
|
|
|
|
done = nil
|
|
|
|
|
|
|
|
case <-errch:
|
|
|
|
return
|
|
|
|
|
|
|
|
case <-p.term:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// retrievalTxs is a write loop which is responsible for retrieving transaction
|
|
|
|
// from the remote peer. The goal is to have an async writer that does not lock
|
|
|
|
// up node internals. If there are too many requests queued, then new arrival
|
|
|
|
// requests will be dropped silently so that we can ensure the memory assumption
|
|
|
|
// is fixed for each peer.
|
|
|
|
func (p *peer) retrievalTxs() {
|
|
|
|
var (
|
|
|
|
requests []common.Hash // Queue of transaction requests to the peer
|
|
|
|
done chan struct{} // Non-nil if background network sender routine is active.
|
|
|
|
errch = make(chan error) // Channel used to receive network error
|
|
|
|
)
|
|
|
|
// pick chooses a reasonble number of transaction hashes for retrieval.
|
|
|
|
pick := func() []common.Hash {
|
|
|
|
var ret []common.Hash
|
|
|
|
if len(requests) > fetcher.MaxTransactionFetch {
|
|
|
|
ret = requests[:fetcher.MaxTransactionFetch]
|
|
|
|
} else {
|
|
|
|
ret = requests[:]
|
|
|
|
}
|
|
|
|
requests = requests[:copy(requests, requests[len(ret):])]
|
|
|
|
return ret
|
|
|
|
}
|
|
|
|
// send sends transactions retrieval request.
|
|
|
|
send := func(hashes []common.Hash, done chan struct{}) {
|
|
|
|
if err := p.RequestTxs(hashes); err != nil {
|
|
|
|
errch <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
close(done)
|
|
|
|
p.Log().Trace("Sent transaction retrieval request", "count", len(hashes))
|
|
|
|
}
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case hashes := <-p.txRetrieval:
|
|
|
|
if len(requests) == maxQueuedTxRetrieval {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if len(requests)+len(hashes) > maxQueuedTxRetrieval {
|
|
|
|
hashes = hashes[:maxQueuedTxRetrieval-len(requests)]
|
|
|
|
}
|
|
|
|
requests = append(requests, hashes...)
|
|
|
|
if done == nil {
|
|
|
|
done = make(chan struct{})
|
|
|
|
go send(pick(), done)
|
|
|
|
}
|
|
|
|
|
|
|
|
case <-done:
|
|
|
|
done = nil
|
|
|
|
if pending := pick(); len(pending) > 0 {
|
|
|
|
done = make(chan struct{})
|
|
|
|
go send(pending, done)
|
|
|
|
}
|
|
|
|
|
|
|
|
case <- errch:
|
|
|
|
return
|
|
|
|
|
|
|
|
case <-p.term:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-21 11:32:42 +03:00
|
|
|
// close signals the broadcast goroutine to terminate.
|
|
|
|
func (p *peer) close() {
|
|
|
|
close(p.term)
|
|
|
|
}
|
|
|
|
|
2015-10-27 16:10:30 +03:00
|
|
|
// Info gathers and returns a collection of metadata known about a peer.
|
|
|
|
func (p *peer) Info() *PeerInfo {
|
2016-07-25 15:14:14 +03:00
|
|
|
hash, td := p.Head()
|
|
|
|
|
2015-10-27 16:10:30 +03:00
|
|
|
return &PeerInfo{
|
|
|
|
Version: p.version,
|
2016-07-25 15:14:14 +03:00
|
|
|
Difficulty: td,
|
|
|
|
Head: hash.Hex(),
|
2015-10-27 16:10:30 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-25 15:14:14 +03:00
|
|
|
// Head retrieves a copy of the current head hash and total difficulty of the
|
|
|
|
// peer.
|
|
|
|
func (p *peer) Head() (hash common.Hash, td *big.Int) {
|
2015-06-09 14:56:27 +03:00
|
|
|
p.lock.RLock()
|
|
|
|
defer p.lock.RUnlock()
|
2015-06-09 14:27:44 +03:00
|
|
|
|
|
|
|
copy(hash[:], p.head[:])
|
2016-07-25 15:14:14 +03:00
|
|
|
return hash, new(big.Int).Set(p.td)
|
2015-06-09 14:27:44 +03:00
|
|
|
}
|
|
|
|
|
2016-07-25 15:14:14 +03:00
|
|
|
// SetHead updates the head hash and total difficulty of the peer.
|
|
|
|
func (p *peer) SetHead(hash common.Hash, td *big.Int) {
|
2015-06-09 14:56:27 +03:00
|
|
|
p.lock.Lock()
|
|
|
|
defer p.lock.Unlock()
|
2015-06-09 14:27:44 +03:00
|
|
|
|
|
|
|
copy(p.head[:], hash[:])
|
2015-06-09 14:56:27 +03:00
|
|
|
p.td.Set(td)
|
|
|
|
}
|
|
|
|
|
2015-06-29 12:44:00 +03:00
|
|
|
// MarkBlock marks a block as known for the peer, ensuring that the block will
|
|
|
|
// never be propagated to this particular peer.
|
|
|
|
func (p *peer) MarkBlock(hash common.Hash) {
|
2015-06-29 17:06:07 +03:00
|
|
|
// If we reached the memory allowance, drop a previously known block hash
|
2018-07-16 10:54:19 +03:00
|
|
|
for p.knownBlocks.Cardinality() >= maxKnownBlocks {
|
2015-07-01 11:12:05 +03:00
|
|
|
p.knownBlocks.Pop()
|
2015-06-29 17:06:07 +03:00
|
|
|
}
|
2015-06-29 12:44:00 +03:00
|
|
|
p.knownBlocks.Add(hash)
|
|
|
|
}
|
|
|
|
|
|
|
|
// MarkTransaction marks a transaction as known for the peer, ensuring that it
|
|
|
|
// will never be propagated to this particular peer.
|
|
|
|
func (p *peer) MarkTransaction(hash common.Hash) {
|
2015-06-29 17:06:07 +03:00
|
|
|
// If we reached the memory allowance, drop a previously known transaction hash
|
2018-07-16 10:54:19 +03:00
|
|
|
for p.knownTxs.Cardinality() >= maxKnownTxs {
|
2015-07-01 11:12:05 +03:00
|
|
|
p.knownTxs.Pop()
|
2015-06-29 17:06:07 +03:00
|
|
|
}
|
2015-06-29 12:44:00 +03:00
|
|
|
p.knownTxs.Add(hash)
|
|
|
|
}
|
|
|
|
|
2019-10-28 14:59:07 +03:00
|
|
|
// SendNewTransactionHashes sends a batch of transaction hashes to the peer and
|
|
|
|
// includes the hashes in its transaction hash set for future reference.
|
|
|
|
func (p *peer) SendNewTransactionHashes(hashes []common.Hash) error {
|
|
|
|
// Mark all the transactions as known, but ensure we don't overflow our limits
|
|
|
|
for p.knownTxs.Cardinality() > max(0, maxKnownTxs-len(hashes)) {
|
|
|
|
p.knownTxs.Pop()
|
|
|
|
}
|
|
|
|
for _, hash := range hashes {
|
|
|
|
p.knownTxs.Add(hash)
|
|
|
|
}
|
|
|
|
return p2p.Send(p.rw, NewPooledTransactionHashesMsg, hashes)
|
|
|
|
}
|
|
|
|
|
|
|
|
// SendNewTransactions sends transactions to the peer and includes the hashes
|
2015-06-26 20:42:27 +03:00
|
|
|
// in its transaction hash set for future reference.
|
2019-10-28 14:59:07 +03:00
|
|
|
func (p *peer) SendNewTransactions(txs types.Transactions) error {
|
2019-06-12 12:30:06 +03:00
|
|
|
// Mark all the transactions as known, but ensure we don't overflow our limits
|
2019-10-28 14:59:07 +03:00
|
|
|
for p.knownTxs.Cardinality() > max(0, maxKnownTxs-len(txs)) {
|
|
|
|
p.knownTxs.Pop()
|
|
|
|
}
|
2015-04-18 02:11:09 +03:00
|
|
|
for _, tx := range txs {
|
2015-06-29 12:44:00 +03:00
|
|
|
p.knownTxs.Add(tx.Hash())
|
2015-04-18 02:11:09 +03:00
|
|
|
}
|
2019-10-28 14:59:07 +03:00
|
|
|
return p2p.Send(p.rw, TxMsg, txs)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *peer) SendTransactionRLP(txs []rlp.RawValue) error {
|
2015-04-18 02:11:09 +03:00
|
|
|
return p2p.Send(p.rw, TxMsg, txs)
|
|
|
|
}
|
|
|
|
|
2018-05-21 11:32:42 +03:00
|
|
|
// AsyncSendTransactions queues list of transactions propagation to a remote
|
|
|
|
// peer. If the peer's broadcast queue is full, the event is silently dropped.
|
2019-10-28 14:59:07 +03:00
|
|
|
func (p *peer) AsyncSendTransactions(hashes []common.Hash) {
|
2018-05-21 11:32:42 +03:00
|
|
|
select {
|
2019-10-28 14:59:07 +03:00
|
|
|
case p.txPropagation <- hashes:
|
2019-06-12 12:30:06 +03:00
|
|
|
// Mark all the transactions as known, but ensure we don't overflow our limits
|
2019-10-28 14:59:07 +03:00
|
|
|
for p.knownTxs.Cardinality() > max(0, maxKnownTxs-len(hashes)) {
|
|
|
|
p.knownTxs.Pop()
|
|
|
|
}
|
|
|
|
for _, hash := range hashes {
|
|
|
|
p.knownTxs.Add(hash)
|
2018-05-21 11:32:42 +03:00
|
|
|
}
|
2019-10-28 14:59:07 +03:00
|
|
|
case <-p.term:
|
|
|
|
p.Log().Debug("Dropping transaction propagation", "count", len(hashes))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// AsyncSendTransactions queues list of transactions propagation to a remote
|
|
|
|
// peer. If the peer's broadcast queue is full, the event is silently dropped.
|
|
|
|
func (p *peer) AsyncSendTransactionHashes(hashes []common.Hash) {
|
|
|
|
select {
|
|
|
|
case p.txAnnounce <- hashes:
|
|
|
|
// Mark all the transactions as known, but ensure we don't overflow our limits
|
|
|
|
for p.knownTxs.Cardinality() > max(0, maxKnownTxs-len(hashes)) {
|
2019-06-12 12:30:06 +03:00
|
|
|
p.knownTxs.Pop()
|
|
|
|
}
|
2019-10-28 14:59:07 +03:00
|
|
|
for _, hash := range hashes {
|
|
|
|
p.knownTxs.Add(hash)
|
|
|
|
}
|
|
|
|
case <-p.term:
|
|
|
|
p.Log().Debug("Dropping transaction announcement", "count", len(hashes))
|
2018-05-21 11:32:42 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-08-14 21:25:41 +03:00
|
|
|
// SendNewBlockHashes announces the availability of a number of blocks through
|
|
|
|
// a hash notification.
|
|
|
|
func (p *peer) SendNewBlockHashes(hashes []common.Hash, numbers []uint64) error {
|
2019-06-12 12:30:06 +03:00
|
|
|
// Mark all the block hashes as known, but ensure we don't overflow our limits
|
2019-10-28 14:59:07 +03:00
|
|
|
for p.knownBlocks.Cardinality() > max(0, maxKnownBlocks-len(hashes)) {
|
|
|
|
p.knownBlocks.Pop()
|
|
|
|
}
|
2015-08-14 21:25:41 +03:00
|
|
|
for _, hash := range hashes {
|
|
|
|
p.knownBlocks.Add(hash)
|
|
|
|
}
|
|
|
|
request := make(newBlockHashesData, len(hashes))
|
|
|
|
for i := 0; i < len(hashes); i++ {
|
|
|
|
request[i].Hash = hashes[i]
|
|
|
|
request[i].Number = numbers[i]
|
|
|
|
}
|
|
|
|
return p2p.Send(p.rw, NewBlockHashesMsg, request)
|
|
|
|
}
|
|
|
|
|
2018-05-21 11:32:42 +03:00
|
|
|
// AsyncSendNewBlockHash queues the availability of a block for propagation to a
|
|
|
|
// remote peer. If the peer's broadcast queue is full, the event is silently
|
|
|
|
// dropped.
|
|
|
|
func (p *peer) AsyncSendNewBlockHash(block *types.Block) {
|
|
|
|
select {
|
2019-10-28 14:59:07 +03:00
|
|
|
case p.queuedBlockAnns <- block:
|
2019-06-12 12:30:06 +03:00
|
|
|
// Mark all the block hash as known, but ensure we don't overflow our limits
|
|
|
|
for p.knownBlocks.Cardinality() >= maxKnownBlocks {
|
|
|
|
p.knownBlocks.Pop()
|
|
|
|
}
|
2019-10-28 14:59:07 +03:00
|
|
|
p.knownBlocks.Add(block.Hash())
|
2018-05-21 11:32:42 +03:00
|
|
|
default:
|
|
|
|
p.Log().Debug("Dropping block announcement", "number", block.NumberU64(), "hash", block.Hash())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-29 12:44:00 +03:00
|
|
|
// SendNewBlock propagates an entire block to a remote peer.
|
2015-07-09 13:55:06 +03:00
|
|
|
func (p *peer) SendNewBlock(block *types.Block, td *big.Int) error {
|
2019-06-12 12:30:06 +03:00
|
|
|
// Mark all the block hash as known, but ensure we don't overflow our limits
|
|
|
|
for p.knownBlocks.Cardinality() >= maxKnownBlocks {
|
|
|
|
p.knownBlocks.Pop()
|
|
|
|
}
|
2019-10-28 14:59:07 +03:00
|
|
|
p.knownBlocks.Add(block.Hash())
|
2015-07-09 13:55:06 +03:00
|
|
|
return p2p.Send(p.rw, NewBlockMsg, []interface{}{block, td})
|
2015-04-18 03:21:07 +03:00
|
|
|
}
|
|
|
|
|
2018-05-21 11:32:42 +03:00
|
|
|
// AsyncSendNewBlock queues an entire block for propagation to a remote peer. If
|
|
|
|
// the peer's broadcast queue is full, the event is silently dropped.
|
|
|
|
func (p *peer) AsyncSendNewBlock(block *types.Block, td *big.Int) {
|
|
|
|
select {
|
2019-10-28 14:59:07 +03:00
|
|
|
case p.queuedBlocks <- &propEvent{block: block, td: td}:
|
2019-06-12 12:30:06 +03:00
|
|
|
// Mark all the block hash as known, but ensure we don't overflow our limits
|
|
|
|
for p.knownBlocks.Cardinality() >= maxKnownBlocks {
|
|
|
|
p.knownBlocks.Pop()
|
|
|
|
}
|
2019-10-28 14:59:07 +03:00
|
|
|
p.knownBlocks.Add(block.Hash())
|
2018-05-21 11:32:42 +03:00
|
|
|
default:
|
|
|
|
p.Log().Debug("Dropping block propagation", "number", block.NumberU64(), "hash", block.Hash())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-07-02 14:13:46 +03:00
|
|
|
// SendBlockHeaders sends a batch of block headers to the remote peer.
|
|
|
|
func (p *peer) SendBlockHeaders(headers []*types.Header) error {
|
|
|
|
return p2p.Send(p.rw, BlockHeadersMsg, headers)
|
|
|
|
}
|
|
|
|
|
2015-07-02 19:55:18 +03:00
|
|
|
// SendBlockBodies sends a batch of block contents to the remote peer.
|
|
|
|
func (p *peer) SendBlockBodies(bodies []*blockBody) error {
|
|
|
|
return p2p.Send(p.rw, BlockBodiesMsg, blockBodiesData(bodies))
|
|
|
|
}
|
|
|
|
|
2015-08-31 20:21:02 +03:00
|
|
|
// SendBlockBodiesRLP sends a batch of block contents to the remote peer from
|
|
|
|
// an already RLP encoded format.
|
2015-09-07 20:43:01 +03:00
|
|
|
func (p *peer) SendBlockBodiesRLP(bodies []rlp.RawValue) error {
|
|
|
|
return p2p.Send(p.rw, BlockBodiesMsg, bodies)
|
2015-08-31 20:21:02 +03:00
|
|
|
}
|
|
|
|
|
2015-10-05 19:37:56 +03:00
|
|
|
// SendNodeDataRLP sends a batch of arbitrary internal data, corresponding to the
|
2015-07-02 14:13:46 +03:00
|
|
|
// hashes requested.
|
|
|
|
func (p *peer) SendNodeData(data [][]byte) error {
|
|
|
|
return p2p.Send(p.rw, NodeDataMsg, data)
|
|
|
|
}
|
|
|
|
|
2015-09-28 19:27:31 +03:00
|
|
|
// SendReceiptsRLP sends a batch of transaction receipts, corresponding to the
|
|
|
|
// ones requested from an already RLP encoded format.
|
|
|
|
func (p *peer) SendReceiptsRLP(receipts []rlp.RawValue) error {
|
2015-07-02 19:55:18 +03:00
|
|
|
return p2p.Send(p.rw, ReceiptsMsg, receipts)
|
|
|
|
}
|
|
|
|
|
2017-03-03 12:41:52 +03:00
|
|
|
// RequestOneHeader is a wrapper around the header query functions to fetch a
|
2015-08-14 21:25:41 +03:00
|
|
|
// single header. It is used solely by the fetcher.
|
|
|
|
func (p *peer) RequestOneHeader(hash common.Hash) error {
|
2017-03-02 16:06:16 +03:00
|
|
|
p.Log().Debug("Fetching single header", "hash", hash)
|
2015-08-14 21:25:41 +03:00
|
|
|
return p2p.Send(p.rw, GetBlockHeadersMsg, &getBlockHeadersData{Origin: hashOrNumber{Hash: hash}, Amount: uint64(1), Skip: uint64(0), Reverse: false})
|
|
|
|
}
|
|
|
|
|
|
|
|
// RequestHeadersByHash fetches a batch of blocks' headers corresponding to the
|
|
|
|
// specified header query, based on the hash of an origin block.
|
|
|
|
func (p *peer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error {
|
2017-03-02 16:06:16 +03:00
|
|
|
p.Log().Debug("Fetching batch of headers", "count", amount, "fromhash", origin, "skip", skip, "reverse", reverse)
|
2015-08-14 21:25:41 +03:00
|
|
|
return p2p.Send(p.rw, GetBlockHeadersMsg, &getBlockHeadersData{Origin: hashOrNumber{Hash: origin}, Amount: uint64(amount), Skip: uint64(skip), Reverse: reverse})
|
|
|
|
}
|
|
|
|
|
|
|
|
// RequestHeadersByNumber fetches a batch of blocks' headers corresponding to the
|
|
|
|
// specified header query, based on the number of an origin block.
|
|
|
|
func (p *peer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error {
|
2017-03-02 16:06:16 +03:00
|
|
|
p.Log().Debug("Fetching batch of headers", "count", amount, "fromnum", origin, "skip", skip, "reverse", reverse)
|
2015-08-14 21:25:41 +03:00
|
|
|
return p2p.Send(p.rw, GetBlockHeadersMsg, &getBlockHeadersData{Origin: hashOrNumber{Number: origin}, Amount: uint64(amount), Skip: uint64(skip), Reverse: reverse})
|
|
|
|
}
|
|
|
|
|
|
|
|
// RequestBodies fetches a batch of blocks' bodies corresponding to the hashes
|
|
|
|
// specified.
|
|
|
|
func (p *peer) RequestBodies(hashes []common.Hash) error {
|
2017-03-02 16:06:16 +03:00
|
|
|
p.Log().Debug("Fetching batch of block bodies", "count", len(hashes))
|
2015-08-14 21:25:41 +03:00
|
|
|
return p2p.Send(p.rw, GetBlockBodiesMsg, hashes)
|
2015-07-02 14:13:46 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// RequestNodeData fetches a batch of arbitrary data from a node's known state
|
|
|
|
// data, corresponding to the specified hashes.
|
|
|
|
func (p *peer) RequestNodeData(hashes []common.Hash) error {
|
2017-03-02 16:06:16 +03:00
|
|
|
p.Log().Debug("Fetching batch of state data", "count", len(hashes))
|
2015-07-02 14:13:46 +03:00
|
|
|
return p2p.Send(p.rw, GetNodeDataMsg, hashes)
|
2015-07-02 19:55:18 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// RequestReceipts fetches a batch of transaction receipts from a remote node.
|
|
|
|
func (p *peer) RequestReceipts(hashes []common.Hash) error {
|
2017-03-02 16:06:16 +03:00
|
|
|
p.Log().Debug("Fetching batch of receipts", "count", len(hashes))
|
2015-07-02 19:55:18 +03:00
|
|
|
return p2p.Send(p.rw, GetReceiptsMsg, hashes)
|
2015-04-18 02:11:09 +03:00
|
|
|
}
|
|
|
|
|
2019-10-28 14:59:07 +03:00
|
|
|
// RequestTxs fetches a batch of transactions from a remote node.
|
|
|
|
func (p *peer) RequestTxs(hashes []common.Hash) error {
|
|
|
|
p.Log().Debug("Fetching batch of transactions", "count", len(hashes))
|
|
|
|
return p2p.Send(p.rw, GetPooledTransactionsMsg, hashes)
|
|
|
|
}
|
|
|
|
|
|
|
|
// AsyncRequestTxs queues a tx retrieval request to a remote peer. If
|
|
|
|
// the peer's retrieval queue is full, the event is silently dropped.
|
|
|
|
func (p *peer) AsyncRequestTxs(hashes []common.Hash) {
|
|
|
|
select {
|
|
|
|
case p.txRetrieval <- hashes:
|
|
|
|
case <-p.term:
|
|
|
|
p.Log().Debug("Dropping transaction retrieval request", "count", len(hashes))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-29 12:44:00 +03:00
|
|
|
// Handshake executes the eth protocol handshake, negotiating version number,
|
|
|
|
// network IDs, difficulties, head and genesis blocks.
|
2019-09-30 21:28:50 +03:00
|
|
|
func (p *peer) Handshake(network uint64, td *big.Int, head common.Hash, genesis common.Hash, forkID forkid.ID, forkFilter forkid.Filter) error {
|
2015-10-27 16:10:30 +03:00
|
|
|
// Send out own handshake in a new thread
|
2015-10-22 23:22:04 +03:00
|
|
|
errc := make(chan error, 2)
|
2015-10-27 16:10:30 +03:00
|
|
|
|
2019-09-30 21:28:50 +03:00
|
|
|
var (
|
|
|
|
status63 statusData63 // safe to read after two values have been received from errc
|
|
|
|
status statusData // safe to read after two values have been received from errc
|
|
|
|
)
|
2015-04-18 02:11:09 +03:00
|
|
|
go func() {
|
2019-09-30 21:28:50 +03:00
|
|
|
switch {
|
|
|
|
case p.version == eth63:
|
|
|
|
errc <- p2p.Send(p.rw, StatusMsg, &statusData63{
|
|
|
|
ProtocolVersion: uint32(p.version),
|
|
|
|
NetworkId: network,
|
|
|
|
TD: td,
|
|
|
|
CurrentBlock: head,
|
|
|
|
GenesisBlock: genesis,
|
|
|
|
})
|
2019-10-28 14:59:07 +03:00
|
|
|
case p.version >= eth64:
|
2019-09-30 21:28:50 +03:00
|
|
|
errc <- p2p.Send(p.rw, StatusMsg, &statusData{
|
|
|
|
ProtocolVersion: uint32(p.version),
|
|
|
|
NetworkID: network,
|
|
|
|
TD: td,
|
|
|
|
Head: head,
|
|
|
|
Genesis: genesis,
|
|
|
|
ForkID: forkID,
|
|
|
|
})
|
|
|
|
default:
|
|
|
|
panic(fmt.Sprintf("unsupported eth protocol version: %d", p.version))
|
|
|
|
}
|
2015-04-18 02:11:09 +03:00
|
|
|
}()
|
2015-10-22 23:22:04 +03:00
|
|
|
go func() {
|
2019-09-30 21:28:50 +03:00
|
|
|
switch {
|
|
|
|
case p.version == eth63:
|
|
|
|
errc <- p.readStatusLegacy(network, &status63, genesis)
|
2019-10-28 14:59:07 +03:00
|
|
|
case p.version >= eth64:
|
2019-09-30 21:28:50 +03:00
|
|
|
errc <- p.readStatus(network, &status, genesis, forkFilter)
|
|
|
|
default:
|
|
|
|
panic(fmt.Sprintf("unsupported eth protocol version: %d", p.version))
|
|
|
|
}
|
2015-10-22 23:22:04 +03:00
|
|
|
}()
|
|
|
|
timeout := time.NewTimer(handshakeTimeout)
|
|
|
|
defer timeout.Stop()
|
|
|
|
for i := 0; i < 2; i++ {
|
|
|
|
select {
|
|
|
|
case err := <-errc:
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
case <-timeout.C:
|
|
|
|
return p2p.DiscReadTimeout
|
|
|
|
}
|
|
|
|
}
|
2019-09-30 21:28:50 +03:00
|
|
|
switch {
|
|
|
|
case p.version == eth63:
|
|
|
|
p.td, p.head = status63.TD, status63.CurrentBlock
|
2019-10-28 14:59:07 +03:00
|
|
|
case p.version >= eth64:
|
2019-09-30 21:28:50 +03:00
|
|
|
p.td, p.head = status.TD, status.Head
|
|
|
|
default:
|
|
|
|
panic(fmt.Sprintf("unsupported eth protocol version: %d", p.version))
|
|
|
|
}
|
2015-10-22 23:22:04 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-09-30 21:28:50 +03:00
|
|
|
func (p *peer) readStatusLegacy(network uint64, status *statusData63, genesis common.Hash) error {
|
2015-04-18 02:11:09 +03:00
|
|
|
msg, err := p.rw.ReadMsg()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if msg.Code != StatusMsg {
|
|
|
|
return errResp(ErrNoStatusMsg, "first msg has code %x (!= %x)", msg.Code, StatusMsg)
|
|
|
|
}
|
2019-07-08 18:53:47 +03:00
|
|
|
if msg.Size > protocolMaxMsgSize {
|
|
|
|
return errResp(ErrMsgTooLarge, "%v > %v", msg.Size, protocolMaxMsgSize)
|
2015-04-18 02:11:09 +03:00
|
|
|
}
|
2015-06-29 12:44:00 +03:00
|
|
|
// Decode the handshake and make sure everything matches
|
2015-04-18 02:11:09 +03:00
|
|
|
if err := msg.Decode(&status); err != nil {
|
|
|
|
return errResp(ErrDecode, "msg %v: %v", msg, err)
|
|
|
|
}
|
2015-06-29 12:44:00 +03:00
|
|
|
if status.GenesisBlock != genesis {
|
2019-09-30 21:28:50 +03:00
|
|
|
return errResp(ErrGenesisMismatch, "%x (!= %x)", status.GenesisBlock[:8], genesis[:8])
|
2015-04-18 02:11:09 +03:00
|
|
|
}
|
2017-04-25 14:31:15 +03:00
|
|
|
if status.NetworkId != network {
|
2019-09-30 21:28:50 +03:00
|
|
|
return errResp(ErrNetworkIDMismatch, "%d (!= %d)", status.NetworkId, network)
|
|
|
|
}
|
|
|
|
if int(status.ProtocolVersion) != p.version {
|
|
|
|
return errResp(ErrProtocolVersionMismatch, "%d (!= %d)", status.ProtocolVersion, p.version)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *peer) readStatus(network uint64, status *statusData, genesis common.Hash, forkFilter forkid.Filter) error {
|
|
|
|
msg, err := p.rw.ReadMsg()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if msg.Code != StatusMsg {
|
|
|
|
return errResp(ErrNoStatusMsg, "first msg has code %x (!= %x)", msg.Code, StatusMsg)
|
|
|
|
}
|
|
|
|
if msg.Size > protocolMaxMsgSize {
|
|
|
|
return errResp(ErrMsgTooLarge, "%v > %v", msg.Size, protocolMaxMsgSize)
|
|
|
|
}
|
|
|
|
// Decode the handshake and make sure everything matches
|
|
|
|
if err := msg.Decode(&status); err != nil {
|
|
|
|
return errResp(ErrDecode, "msg %v: %v", msg, err)
|
|
|
|
}
|
|
|
|
if status.NetworkID != network {
|
|
|
|
return errResp(ErrNetworkIDMismatch, "%d (!= %d)", status.NetworkID, network)
|
2015-04-18 02:11:09 +03:00
|
|
|
}
|
2015-06-26 20:42:27 +03:00
|
|
|
if int(status.ProtocolVersion) != p.version {
|
|
|
|
return errResp(ErrProtocolVersionMismatch, "%d (!= %d)", status.ProtocolVersion, p.version)
|
2015-04-18 02:11:09 +03:00
|
|
|
}
|
2019-09-30 21:28:50 +03:00
|
|
|
if status.Genesis != genesis {
|
|
|
|
return errResp(ErrGenesisMismatch, "%x (!= %x)", status.Genesis, genesis)
|
|
|
|
}
|
|
|
|
if err := forkFilter(status.ForkID); err != nil {
|
|
|
|
return errResp(ErrForkIDRejected, "%v", err)
|
|
|
|
}
|
2015-10-22 23:22:04 +03:00
|
|
|
return nil
|
2015-04-18 02:11:09 +03:00
|
|
|
}
|
2015-05-18 21:33:37 +03:00
|
|
|
|
2015-06-26 20:42:27 +03:00
|
|
|
// String implements fmt.Stringer.
|
|
|
|
func (p *peer) String() string {
|
|
|
|
return fmt.Sprintf("Peer %s [%s]", p.id,
|
|
|
|
fmt.Sprintf("eth/%2d", p.version),
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2015-05-18 21:33:37 +03:00
|
|
|
// peerSet represents the collection of active peers currently participating in
|
|
|
|
// the Ethereum sub-protocol.
|
|
|
|
type peerSet struct {
|
2016-03-29 04:08:16 +03:00
|
|
|
peers map[string]*peer
|
|
|
|
lock sync.RWMutex
|
|
|
|
closed bool
|
2015-05-18 21:33:37 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// newPeerSet creates a new peer set to track the active participants.
|
|
|
|
func newPeerSet() *peerSet {
|
|
|
|
return &peerSet{
|
|
|
|
peers: make(map[string]*peer),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Register injects a new peer into the working set, or returns an error if the
|
2018-05-21 11:32:42 +03:00
|
|
|
// peer is already known. If a new peer it registered, its broadcast loop is also
|
|
|
|
// started.
|
2015-05-18 21:33:37 +03:00
|
|
|
func (ps *peerSet) Register(p *peer) error {
|
|
|
|
ps.lock.Lock()
|
|
|
|
defer ps.lock.Unlock()
|
|
|
|
|
2016-03-29 04:08:16 +03:00
|
|
|
if ps.closed {
|
|
|
|
return errClosed
|
|
|
|
}
|
2015-05-18 21:33:37 +03:00
|
|
|
if _, ok := ps.peers[p.id]; ok {
|
|
|
|
return errAlreadyRegistered
|
|
|
|
}
|
|
|
|
ps.peers[p.id] = p
|
2019-10-28 14:59:07 +03:00
|
|
|
go p.broadcastBlocks()
|
|
|
|
go p.broadcastTxs()
|
|
|
|
go p.retrievalTxs()
|
2018-05-21 11:32:42 +03:00
|
|
|
|
2015-05-18 21:33:37 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Unregister removes a remote peer from the active set, disabling any further
|
|
|
|
// actions to/from that particular entity.
|
|
|
|
func (ps *peerSet) Unregister(id string) error {
|
|
|
|
ps.lock.Lock()
|
|
|
|
defer ps.lock.Unlock()
|
|
|
|
|
2018-05-21 11:32:42 +03:00
|
|
|
p, ok := ps.peers[id]
|
|
|
|
if !ok {
|
2015-05-18 21:33:37 +03:00
|
|
|
return errNotRegistered
|
|
|
|
}
|
|
|
|
delete(ps.peers, id)
|
2018-05-21 11:32:42 +03:00
|
|
|
p.close()
|
|
|
|
|
2015-05-18 21:33:37 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Peer retrieves the registered peer with the given id.
|
|
|
|
func (ps *peerSet) Peer(id string) *peer {
|
|
|
|
ps.lock.RLock()
|
|
|
|
defer ps.lock.RUnlock()
|
|
|
|
|
|
|
|
return ps.peers[id]
|
|
|
|
}
|
|
|
|
|
|
|
|
// Len returns if the current number of peers in the set.
|
|
|
|
func (ps *peerSet) Len() int {
|
|
|
|
ps.lock.RLock()
|
|
|
|
defer ps.lock.RUnlock()
|
|
|
|
|
|
|
|
return len(ps.peers)
|
|
|
|
}
|
|
|
|
|
2015-05-20 10:34:45 +03:00
|
|
|
// PeersWithoutBlock retrieves a list of peers that do not have a given block in
|
|
|
|
// their set of known hashes.
|
|
|
|
func (ps *peerSet) PeersWithoutBlock(hash common.Hash) []*peer {
|
2015-05-18 21:33:37 +03:00
|
|
|
ps.lock.RLock()
|
|
|
|
defer ps.lock.RUnlock()
|
|
|
|
|
|
|
|
list := make([]*peer, 0, len(ps.peers))
|
|
|
|
for _, p := range ps.peers {
|
2018-07-16 10:54:19 +03:00
|
|
|
if !p.knownBlocks.Contains(hash) {
|
2015-05-18 21:33:37 +03:00
|
|
|
list = append(list, p)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return list
|
|
|
|
}
|
|
|
|
|
2015-05-20 10:34:45 +03:00
|
|
|
// PeersWithoutTx retrieves a list of peers that do not have a given transaction
|
2015-05-18 21:33:37 +03:00
|
|
|
// in their set of known hashes.
|
2015-05-20 10:34:45 +03:00
|
|
|
func (ps *peerSet) PeersWithoutTx(hash common.Hash) []*peer {
|
2015-05-18 21:33:37 +03:00
|
|
|
ps.lock.RLock()
|
|
|
|
defer ps.lock.RUnlock()
|
|
|
|
|
|
|
|
list := make([]*peer, 0, len(ps.peers))
|
|
|
|
for _, p := range ps.peers {
|
2018-07-16 10:54:19 +03:00
|
|
|
if !p.knownTxs.Contains(hash) {
|
2015-05-18 21:33:37 +03:00
|
|
|
list = append(list, p)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return list
|
|
|
|
}
|
|
|
|
|
|
|
|
// BestPeer retrieves the known peer with the currently highest total difficulty.
|
|
|
|
func (ps *peerSet) BestPeer() *peer {
|
|
|
|
ps.lock.RLock()
|
|
|
|
defer ps.lock.RUnlock()
|
|
|
|
|
2015-06-09 14:56:27 +03:00
|
|
|
var (
|
|
|
|
bestPeer *peer
|
|
|
|
bestTd *big.Int
|
|
|
|
)
|
2015-05-18 21:33:37 +03:00
|
|
|
for _, p := range ps.peers {
|
2016-07-25 15:14:14 +03:00
|
|
|
if _, td := p.Head(); bestPeer == nil || td.Cmp(bestTd) > 0 {
|
2015-06-09 14:56:27 +03:00
|
|
|
bestPeer, bestTd = p, td
|
2015-05-18 21:33:37 +03:00
|
|
|
}
|
|
|
|
}
|
2015-06-09 14:56:27 +03:00
|
|
|
return bestPeer
|
2015-05-18 21:33:37 +03:00
|
|
|
}
|
2016-03-29 04:08:16 +03:00
|
|
|
|
|
|
|
// Close disconnects all peers.
|
|
|
|
// No new peers can be registered after Close has returned.
|
|
|
|
func (ps *peerSet) Close() {
|
|
|
|
ps.lock.Lock()
|
|
|
|
defer ps.lock.Unlock()
|
|
|
|
|
|
|
|
for _, p := range ps.peers {
|
|
|
|
p.Disconnect(p2p.DiscQuitting)
|
|
|
|
}
|
|
|
|
ps.closed = true
|
|
|
|
}
|