Merge pull request #20234 from rjl493456442/newtxhashes_2
core, eth: announce based transaction propagation
This commit is contained in:
commit
eddcecc160
@ -18,7 +18,6 @@ package core
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
|
||||||
"math"
|
"math"
|
||||||
"math/big"
|
"math/big"
|
||||||
"sort"
|
"sort"
|
||||||
@ -53,6 +52,10 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
// ErrAlreadyKnown is returned if the transactions is already contained
|
||||||
|
// within the pool.
|
||||||
|
ErrAlreadyKnown = errors.New("already known")
|
||||||
|
|
||||||
// ErrInvalidSender is returned if the transaction contains an invalid signature.
|
// ErrInvalidSender is returned if the transaction contains an invalid signature.
|
||||||
ErrInvalidSender = errors.New("invalid sender")
|
ErrInvalidSender = errors.New("invalid sender")
|
||||||
|
|
||||||
@ -579,7 +582,7 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e
|
|||||||
if pool.all.Get(hash) != nil {
|
if pool.all.Get(hash) != nil {
|
||||||
log.Trace("Discarding already known transaction", "hash", hash)
|
log.Trace("Discarding already known transaction", "hash", hash)
|
||||||
knownTxMeter.Mark(1)
|
knownTxMeter.Mark(1)
|
||||||
return false, fmt.Errorf("known transaction: %x", hash)
|
return false, ErrAlreadyKnown
|
||||||
}
|
}
|
||||||
// If the transaction fails basic validation, discard it
|
// If the transaction fails basic validation, discard it
|
||||||
if err := pool.validateTx(tx, local); err != nil {
|
if err := pool.validateTx(tx, local); err != nil {
|
||||||
@ -786,7 +789,7 @@ func (pool *TxPool) addTxs(txs []*types.Transaction, local, sync bool) []error {
|
|||||||
for i, tx := range txs {
|
for i, tx := range txs {
|
||||||
// If the transaction is known, pre-set the error slot
|
// If the transaction is known, pre-set the error slot
|
||||||
if pool.all.Get(tx.Hash()) != nil {
|
if pool.all.Get(tx.Hash()) != nil {
|
||||||
errs[i] = fmt.Errorf("known transaction: %x", tx.Hash())
|
errs[i] = ErrAlreadyKnown
|
||||||
knownTxMeter.Mark(1)
|
knownTxMeter.Mark(1)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -864,6 +867,12 @@ func (pool *TxPool) Get(hash common.Hash) *types.Transaction {
|
|||||||
return pool.all.Get(hash)
|
return pool.all.Get(hash)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Has returns an indicator whether txpool has a transaction cached with the
|
||||||
|
// given hash.
|
||||||
|
func (pool *TxPool) Has(hash common.Hash) bool {
|
||||||
|
return pool.all.Get(hash) != nil
|
||||||
|
}
|
||||||
|
|
||||||
// removeTx removes a single transaction from the queue, moving all subsequent
|
// removeTx removes a single transaction from the queue, moving all subsequent
|
||||||
// transactions back to the future queue.
|
// transactions back to the future queue.
|
||||||
func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) {
|
func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) {
|
||||||
|
@ -470,7 +470,7 @@ func (ps *peerSet) HeaderIdlePeers() ([]*peerConnection, int) {
|
|||||||
defer p.lock.RUnlock()
|
defer p.lock.RUnlock()
|
||||||
return p.headerThroughput
|
return p.headerThroughput
|
||||||
}
|
}
|
||||||
return ps.idlePeers(62, 64, idle, throughput)
|
return ps.idlePeers(62, 65, idle, throughput)
|
||||||
}
|
}
|
||||||
|
|
||||||
// BodyIdlePeers retrieves a flat list of all the currently body-idle peers within
|
// BodyIdlePeers retrieves a flat list of all the currently body-idle peers within
|
||||||
@ -484,7 +484,7 @@ func (ps *peerSet) BodyIdlePeers() ([]*peerConnection, int) {
|
|||||||
defer p.lock.RUnlock()
|
defer p.lock.RUnlock()
|
||||||
return p.blockThroughput
|
return p.blockThroughput
|
||||||
}
|
}
|
||||||
return ps.idlePeers(62, 64, idle, throughput)
|
return ps.idlePeers(62, 65, idle, throughput)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReceiptIdlePeers retrieves a flat list of all the currently receipt-idle peers
|
// ReceiptIdlePeers retrieves a flat list of all the currently receipt-idle peers
|
||||||
@ -498,7 +498,7 @@ func (ps *peerSet) ReceiptIdlePeers() ([]*peerConnection, int) {
|
|||||||
defer p.lock.RUnlock()
|
defer p.lock.RUnlock()
|
||||||
return p.receiptThroughput
|
return p.receiptThroughput
|
||||||
}
|
}
|
||||||
return ps.idlePeers(63, 64, idle, throughput)
|
return ps.idlePeers(63, 65, idle, throughput)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NodeDataIdlePeers retrieves a flat list of all the currently node-data-idle
|
// NodeDataIdlePeers retrieves a flat list of all the currently node-data-idle
|
||||||
@ -512,7 +512,7 @@ func (ps *peerSet) NodeDataIdlePeers() ([]*peerConnection, int) {
|
|||||||
defer p.lock.RUnlock()
|
defer p.lock.RUnlock()
|
||||||
return p.stateThroughput
|
return p.stateThroughput
|
||||||
}
|
}
|
||||||
return ps.idlePeers(63, 64, idle, throughput)
|
return ps.idlePeers(63, 65, idle, throughput)
|
||||||
}
|
}
|
||||||
|
|
||||||
// idlePeers retrieves a flat list of all currently idle peers satisfying the
|
// idlePeers retrieves a flat list of all currently idle peers satisfying the
|
||||||
|
@ -14,7 +14,7 @@
|
|||||||
// You should have received a copy of the GNU Lesser General Public License
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
// Package fetcher contains the block announcement based synchronisation.
|
// Package fetcher contains the announcement based blocks or transaction synchronisation.
|
||||||
package fetcher
|
package fetcher
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -27,18 +27,42 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/consensus"
|
"github.com/ethereum/go-ethereum/consensus"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
arriveTimeout = 500 * time.Millisecond // Time allowance before an announced block is explicitly requested
|
arriveTimeout = 500 * time.Millisecond // Time allowance before an announced block/transaction is explicitly requested
|
||||||
gatherSlack = 100 * time.Millisecond // Interval used to collate almost-expired announces with fetches
|
gatherSlack = 100 * time.Millisecond // Interval used to collate almost-expired announces with fetches
|
||||||
fetchTimeout = 5 * time.Second // Maximum allotted time to return an explicitly requested block
|
fetchTimeout = 5 * time.Second // Maximum allotted time to return an explicitly requested block/transaction
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
maxUncleDist = 7 // Maximum allowed backward distance from the chain head
|
maxUncleDist = 7 // Maximum allowed backward distance from the chain head
|
||||||
maxQueueDist = 32 // Maximum allowed distance from the chain head to queue
|
maxQueueDist = 32 // Maximum allowed distance from the chain head to queue
|
||||||
hashLimit = 256 // Maximum number of unique blocks a peer may have announced
|
hashLimit = 256 // Maximum number of unique blocks a peer may have announced
|
||||||
blockLimit = 64 // Maximum number of unique blocks a peer may have delivered
|
blockLimit = 64 // Maximum number of unique blocks a peer may have delivered
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
blockAnnounceInMeter = metrics.NewRegisteredMeter("eth/fetcher/block/announces/in", nil)
|
||||||
|
blockAnnounceOutTimer = metrics.NewRegisteredTimer("eth/fetcher/block/announces/out", nil)
|
||||||
|
blockAnnounceDropMeter = metrics.NewRegisteredMeter("eth/fetcher/block/announces/drop", nil)
|
||||||
|
blockAnnounceDOSMeter = metrics.NewRegisteredMeter("eth/fetcher/block/announces/dos", nil)
|
||||||
|
|
||||||
|
blockBroadcastInMeter = metrics.NewRegisteredMeter("eth/fetcher/block/broadcasts/in", nil)
|
||||||
|
blockBroadcastOutTimer = metrics.NewRegisteredTimer("eth/fetcher/block/broadcasts/out", nil)
|
||||||
|
blockBroadcastDropMeter = metrics.NewRegisteredMeter("eth/fetcher/block/broadcasts/drop", nil)
|
||||||
|
blockBroadcastDOSMeter = metrics.NewRegisteredMeter("eth/fetcher/block/broadcasts/dos", nil)
|
||||||
|
|
||||||
|
headerFetchMeter = metrics.NewRegisteredMeter("eth/fetcher/block/headers", nil)
|
||||||
|
bodyFetchMeter = metrics.NewRegisteredMeter("eth/fetcher/block/bodies", nil)
|
||||||
|
|
||||||
|
headerFilterInMeter = metrics.NewRegisteredMeter("eth/fetcher/block/filter/headers/in", nil)
|
||||||
|
headerFilterOutMeter = metrics.NewRegisteredMeter("eth/fetcher/block/filter/headers/out", nil)
|
||||||
|
bodyFilterInMeter = metrics.NewRegisteredMeter("eth/fetcher/block/filter/bodies/in", nil)
|
||||||
|
bodyFilterOutMeter = metrics.NewRegisteredMeter("eth/fetcher/block/filter/bodies/out", nil)
|
||||||
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
errTerminated = errors.New("terminated")
|
errTerminated = errors.New("terminated")
|
||||||
)
|
)
|
||||||
@ -67,9 +91,9 @@ type chainInsertFn func(types.Blocks) (int, error)
|
|||||||
// peerDropFn is a callback type for dropping a peer detected as malicious.
|
// peerDropFn is a callback type for dropping a peer detected as malicious.
|
||||||
type peerDropFn func(id string)
|
type peerDropFn func(id string)
|
||||||
|
|
||||||
// announce is the hash notification of the availability of a new block in the
|
// blockAnnounce is the hash notification of the availability of a new block in the
|
||||||
// network.
|
// network.
|
||||||
type announce struct {
|
type blockAnnounce struct {
|
||||||
hash common.Hash // Hash of the block being announced
|
hash common.Hash // Hash of the block being announced
|
||||||
number uint64 // Number of the block being announced (0 = unknown | old protocol)
|
number uint64 // Number of the block being announced (0 = unknown | old protocol)
|
||||||
header *types.Header // Header of the block partially reassembled (new protocol)
|
header *types.Header // Header of the block partially reassembled (new protocol)
|
||||||
@ -97,18 +121,18 @@ type bodyFilterTask struct {
|
|||||||
time time.Time // Arrival time of the blocks' contents
|
time time.Time // Arrival time of the blocks' contents
|
||||||
}
|
}
|
||||||
|
|
||||||
// inject represents a schedules import operation.
|
// blockInject represents a schedules import operation.
|
||||||
type inject struct {
|
type blockInject struct {
|
||||||
origin string
|
origin string
|
||||||
block *types.Block
|
block *types.Block
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fetcher is responsible for accumulating block announcements from various peers
|
// BlockFetcher is responsible for accumulating block announcements from various peers
|
||||||
// and scheduling them for retrieval.
|
// and scheduling them for retrieval.
|
||||||
type Fetcher struct {
|
type BlockFetcher struct {
|
||||||
// Various event channels
|
// Various event channels
|
||||||
notify chan *announce
|
notify chan *blockAnnounce
|
||||||
inject chan *inject
|
inject chan *blockInject
|
||||||
|
|
||||||
headerFilter chan chan *headerFilterTask
|
headerFilter chan chan *headerFilterTask
|
||||||
bodyFilter chan chan *bodyFilterTask
|
bodyFilter chan chan *bodyFilterTask
|
||||||
@ -117,16 +141,16 @@ type Fetcher struct {
|
|||||||
quit chan struct{}
|
quit chan struct{}
|
||||||
|
|
||||||
// Announce states
|
// Announce states
|
||||||
announces map[string]int // Per peer announce counts to prevent memory exhaustion
|
announces map[string]int // Per peer blockAnnounce counts to prevent memory exhaustion
|
||||||
announced map[common.Hash][]*announce // Announced blocks, scheduled for fetching
|
announced map[common.Hash][]*blockAnnounce // Announced blocks, scheduled for fetching
|
||||||
fetching map[common.Hash]*announce // Announced blocks, currently fetching
|
fetching map[common.Hash]*blockAnnounce // Announced blocks, currently fetching
|
||||||
fetched map[common.Hash][]*announce // Blocks with headers fetched, scheduled for body retrieval
|
fetched map[common.Hash][]*blockAnnounce // Blocks with headers fetched, scheduled for body retrieval
|
||||||
completing map[common.Hash]*announce // Blocks with headers, currently body-completing
|
completing map[common.Hash]*blockAnnounce // Blocks with headers, currently body-completing
|
||||||
|
|
||||||
// Block cache
|
// Block cache
|
||||||
queue *prque.Prque // Queue containing the import operations (block number sorted)
|
queue *prque.Prque // Queue containing the import operations (block number sorted)
|
||||||
queues map[string]int // Per peer block counts to prevent memory exhaustion
|
queues map[string]int // Per peer block counts to prevent memory exhaustion
|
||||||
queued map[common.Hash]*inject // Set of already queued blocks (to dedupe imports)
|
queued map[common.Hash]*blockInject // Set of already queued blocks (to dedupe imports)
|
||||||
|
|
||||||
// Callbacks
|
// Callbacks
|
||||||
getBlock blockRetrievalFn // Retrieves a block from the local chain
|
getBlock blockRetrievalFn // Retrieves a block from the local chain
|
||||||
@ -137,30 +161,30 @@ type Fetcher struct {
|
|||||||
dropPeer peerDropFn // Drops a peer for misbehaving
|
dropPeer peerDropFn // Drops a peer for misbehaving
|
||||||
|
|
||||||
// Testing hooks
|
// Testing hooks
|
||||||
announceChangeHook func(common.Hash, bool) // Method to call upon adding or deleting a hash from the announce list
|
announceChangeHook func(common.Hash, bool) // Method to call upon adding or deleting a hash from the blockAnnounce list
|
||||||
queueChangeHook func(common.Hash, bool) // Method to call upon adding or deleting a block from the import queue
|
queueChangeHook func(common.Hash, bool) // Method to call upon adding or deleting a block from the import queue
|
||||||
fetchingHook func([]common.Hash) // Method to call upon starting a block (eth/61) or header (eth/62) fetch
|
fetchingHook func([]common.Hash) // Method to call upon starting a block (eth/61) or header (eth/62) fetch
|
||||||
completingHook func([]common.Hash) // Method to call upon starting a block body fetch (eth/62)
|
completingHook func([]common.Hash) // Method to call upon starting a block body fetch (eth/62)
|
||||||
importedHook func(*types.Block) // Method to call upon successful block import (both eth/61 and eth/62)
|
importedHook func(*types.Block) // Method to call upon successful block import (both eth/61 and eth/62)
|
||||||
}
|
}
|
||||||
|
|
||||||
// New creates a block fetcher to retrieve blocks based on hash announcements.
|
// NewBlockFetcher creates a block fetcher to retrieve blocks based on hash announcements.
|
||||||
func New(getBlock blockRetrievalFn, verifyHeader headerVerifierFn, broadcastBlock blockBroadcasterFn, chainHeight chainHeightFn, insertChain chainInsertFn, dropPeer peerDropFn) *Fetcher {
|
func NewBlockFetcher(getBlock blockRetrievalFn, verifyHeader headerVerifierFn, broadcastBlock blockBroadcasterFn, chainHeight chainHeightFn, insertChain chainInsertFn, dropPeer peerDropFn) *BlockFetcher {
|
||||||
return &Fetcher{
|
return &BlockFetcher{
|
||||||
notify: make(chan *announce),
|
notify: make(chan *blockAnnounce),
|
||||||
inject: make(chan *inject),
|
inject: make(chan *blockInject),
|
||||||
headerFilter: make(chan chan *headerFilterTask),
|
headerFilter: make(chan chan *headerFilterTask),
|
||||||
bodyFilter: make(chan chan *bodyFilterTask),
|
bodyFilter: make(chan chan *bodyFilterTask),
|
||||||
done: make(chan common.Hash),
|
done: make(chan common.Hash),
|
||||||
quit: make(chan struct{}),
|
quit: make(chan struct{}),
|
||||||
announces: make(map[string]int),
|
announces: make(map[string]int),
|
||||||
announced: make(map[common.Hash][]*announce),
|
announced: make(map[common.Hash][]*blockAnnounce),
|
||||||
fetching: make(map[common.Hash]*announce),
|
fetching: make(map[common.Hash]*blockAnnounce),
|
||||||
fetched: make(map[common.Hash][]*announce),
|
fetched: make(map[common.Hash][]*blockAnnounce),
|
||||||
completing: make(map[common.Hash]*announce),
|
completing: make(map[common.Hash]*blockAnnounce),
|
||||||
queue: prque.New(nil),
|
queue: prque.New(nil),
|
||||||
queues: make(map[string]int),
|
queues: make(map[string]int),
|
||||||
queued: make(map[common.Hash]*inject),
|
queued: make(map[common.Hash]*blockInject),
|
||||||
getBlock: getBlock,
|
getBlock: getBlock,
|
||||||
verifyHeader: verifyHeader,
|
verifyHeader: verifyHeader,
|
||||||
broadcastBlock: broadcastBlock,
|
broadcastBlock: broadcastBlock,
|
||||||
@ -172,21 +196,21 @@ func New(getBlock blockRetrievalFn, verifyHeader headerVerifierFn, broadcastBloc
|
|||||||
|
|
||||||
// Start boots up the announcement based synchroniser, accepting and processing
|
// Start boots up the announcement based synchroniser, accepting and processing
|
||||||
// hash notifications and block fetches until termination requested.
|
// hash notifications and block fetches until termination requested.
|
||||||
func (f *Fetcher) Start() {
|
func (f *BlockFetcher) Start() {
|
||||||
go f.loop()
|
go f.loop()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stop terminates the announcement based synchroniser, canceling all pending
|
// Stop terminates the announcement based synchroniser, canceling all pending
|
||||||
// operations.
|
// operations.
|
||||||
func (f *Fetcher) Stop() {
|
func (f *BlockFetcher) Stop() {
|
||||||
close(f.quit)
|
close(f.quit)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Notify announces the fetcher of the potential availability of a new block in
|
// Notify announces the fetcher of the potential availability of a new block in
|
||||||
// the network.
|
// the network.
|
||||||
func (f *Fetcher) Notify(peer string, hash common.Hash, number uint64, time time.Time,
|
func (f *BlockFetcher) Notify(peer string, hash common.Hash, number uint64, time time.Time,
|
||||||
headerFetcher headerRequesterFn, bodyFetcher bodyRequesterFn) error {
|
headerFetcher headerRequesterFn, bodyFetcher bodyRequesterFn) error {
|
||||||
block := &announce{
|
block := &blockAnnounce{
|
||||||
hash: hash,
|
hash: hash,
|
||||||
number: number,
|
number: number,
|
||||||
time: time,
|
time: time,
|
||||||
@ -203,8 +227,8 @@ func (f *Fetcher) Notify(peer string, hash common.Hash, number uint64, time time
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Enqueue tries to fill gaps the fetcher's future import queue.
|
// Enqueue tries to fill gaps the fetcher's future import queue.
|
||||||
func (f *Fetcher) Enqueue(peer string, block *types.Block) error {
|
func (f *BlockFetcher) Enqueue(peer string, block *types.Block) error {
|
||||||
op := &inject{
|
op := &blockInject{
|
||||||
origin: peer,
|
origin: peer,
|
||||||
block: block,
|
block: block,
|
||||||
}
|
}
|
||||||
@ -218,7 +242,7 @@ func (f *Fetcher) Enqueue(peer string, block *types.Block) error {
|
|||||||
|
|
||||||
// FilterHeaders extracts all the headers that were explicitly requested by the fetcher,
|
// FilterHeaders extracts all the headers that were explicitly requested by the fetcher,
|
||||||
// returning those that should be handled differently.
|
// returning those that should be handled differently.
|
||||||
func (f *Fetcher) FilterHeaders(peer string, headers []*types.Header, time time.Time) []*types.Header {
|
func (f *BlockFetcher) FilterHeaders(peer string, headers []*types.Header, time time.Time) []*types.Header {
|
||||||
log.Trace("Filtering headers", "peer", peer, "headers", len(headers))
|
log.Trace("Filtering headers", "peer", peer, "headers", len(headers))
|
||||||
|
|
||||||
// Send the filter channel to the fetcher
|
// Send the filter channel to the fetcher
|
||||||
@ -246,7 +270,7 @@ func (f *Fetcher) FilterHeaders(peer string, headers []*types.Header, time time.
|
|||||||
|
|
||||||
// FilterBodies extracts all the block bodies that were explicitly requested by
|
// FilterBodies extracts all the block bodies that were explicitly requested by
|
||||||
// the fetcher, returning those that should be handled differently.
|
// the fetcher, returning those that should be handled differently.
|
||||||
func (f *Fetcher) FilterBodies(peer string, transactions [][]*types.Transaction, uncles [][]*types.Header, time time.Time) ([][]*types.Transaction, [][]*types.Header) {
|
func (f *BlockFetcher) FilterBodies(peer string, transactions [][]*types.Transaction, uncles [][]*types.Header, time time.Time) ([][]*types.Transaction, [][]*types.Header) {
|
||||||
log.Trace("Filtering bodies", "peer", peer, "txs", len(transactions), "uncles", len(uncles))
|
log.Trace("Filtering bodies", "peer", peer, "txs", len(transactions), "uncles", len(uncles))
|
||||||
|
|
||||||
// Send the filter channel to the fetcher
|
// Send the filter channel to the fetcher
|
||||||
@ -274,7 +298,7 @@ func (f *Fetcher) FilterBodies(peer string, transactions [][]*types.Transaction,
|
|||||||
|
|
||||||
// Loop is the main fetcher loop, checking and processing various notification
|
// Loop is the main fetcher loop, checking and processing various notification
|
||||||
// events.
|
// events.
|
||||||
func (f *Fetcher) loop() {
|
func (f *BlockFetcher) loop() {
|
||||||
// Iterate the block fetching until a quit is requested
|
// Iterate the block fetching until a quit is requested
|
||||||
fetchTimer := time.NewTimer(0)
|
fetchTimer := time.NewTimer(0)
|
||||||
completeTimer := time.NewTimer(0)
|
completeTimer := time.NewTimer(0)
|
||||||
@ -289,7 +313,7 @@ func (f *Fetcher) loop() {
|
|||||||
// Import any queued blocks that could potentially fit
|
// Import any queued blocks that could potentially fit
|
||||||
height := f.chainHeight()
|
height := f.chainHeight()
|
||||||
for !f.queue.Empty() {
|
for !f.queue.Empty() {
|
||||||
op := f.queue.PopItem().(*inject)
|
op := f.queue.PopItem().(*blockInject)
|
||||||
hash := op.block.Hash()
|
hash := op.block.Hash()
|
||||||
if f.queueChangeHook != nil {
|
if f.queueChangeHook != nil {
|
||||||
f.queueChangeHook(hash, false)
|
f.queueChangeHook(hash, false)
|
||||||
@ -313,24 +337,24 @@ func (f *Fetcher) loop() {
|
|||||||
// Wait for an outside event to occur
|
// Wait for an outside event to occur
|
||||||
select {
|
select {
|
||||||
case <-f.quit:
|
case <-f.quit:
|
||||||
// Fetcher terminating, abort all operations
|
// BlockFetcher terminating, abort all operations
|
||||||
return
|
return
|
||||||
|
|
||||||
case notification := <-f.notify:
|
case notification := <-f.notify:
|
||||||
// A block was announced, make sure the peer isn't DOSing us
|
// A block was announced, make sure the peer isn't DOSing us
|
||||||
propAnnounceInMeter.Mark(1)
|
blockAnnounceInMeter.Mark(1)
|
||||||
|
|
||||||
count := f.announces[notification.origin] + 1
|
count := f.announces[notification.origin] + 1
|
||||||
if count > hashLimit {
|
if count > hashLimit {
|
||||||
log.Debug("Peer exceeded outstanding announces", "peer", notification.origin, "limit", hashLimit)
|
log.Debug("Peer exceeded outstanding announces", "peer", notification.origin, "limit", hashLimit)
|
||||||
propAnnounceDOSMeter.Mark(1)
|
blockAnnounceDOSMeter.Mark(1)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
// If we have a valid block number, check that it's potentially useful
|
// If we have a valid block number, check that it's potentially useful
|
||||||
if notification.number > 0 {
|
if notification.number > 0 {
|
||||||
if dist := int64(notification.number) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist {
|
if dist := int64(notification.number) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist {
|
||||||
log.Debug("Peer discarded announcement", "peer", notification.origin, "number", notification.number, "hash", notification.hash, "distance", dist)
|
log.Debug("Peer discarded announcement", "peer", notification.origin, "number", notification.number, "hash", notification.hash, "distance", dist)
|
||||||
propAnnounceDropMeter.Mark(1)
|
blockAnnounceDropMeter.Mark(1)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -352,7 +376,7 @@ func (f *Fetcher) loop() {
|
|||||||
|
|
||||||
case op := <-f.inject:
|
case op := <-f.inject:
|
||||||
// A direct block insertion was requested, try and fill any pending gaps
|
// A direct block insertion was requested, try and fill any pending gaps
|
||||||
propBroadcastInMeter.Mark(1)
|
blockBroadcastInMeter.Mark(1)
|
||||||
f.enqueue(op.origin, op.block)
|
f.enqueue(op.origin, op.block)
|
||||||
|
|
||||||
case hash := <-f.done:
|
case hash := <-f.done:
|
||||||
@ -439,7 +463,7 @@ func (f *Fetcher) loop() {
|
|||||||
|
|
||||||
// Split the batch of headers into unknown ones (to return to the caller),
|
// Split the batch of headers into unknown ones (to return to the caller),
|
||||||
// known incomplete ones (requiring body retrievals) and completed blocks.
|
// known incomplete ones (requiring body retrievals) and completed blocks.
|
||||||
unknown, incomplete, complete := []*types.Header{}, []*announce{}, []*types.Block{}
|
unknown, incomplete, complete := []*types.Header{}, []*blockAnnounce{}, []*types.Block{}
|
||||||
for _, header := range task.headers {
|
for _, header := range task.headers {
|
||||||
hash := header.Hash()
|
hash := header.Hash()
|
||||||
|
|
||||||
@ -475,7 +499,7 @@ func (f *Fetcher) loop() {
|
|||||||
f.forgetHash(hash)
|
f.forgetHash(hash)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Fetcher doesn't know about it, add to the return list
|
// BlockFetcher doesn't know about it, add to the return list
|
||||||
unknown = append(unknown, header)
|
unknown = append(unknown, header)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -562,8 +586,8 @@ func (f *Fetcher) loop() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// rescheduleFetch resets the specified fetch timer to the next announce timeout.
|
// rescheduleFetch resets the specified fetch timer to the next blockAnnounce timeout.
|
||||||
func (f *Fetcher) rescheduleFetch(fetch *time.Timer) {
|
func (f *BlockFetcher) rescheduleFetch(fetch *time.Timer) {
|
||||||
// Short circuit if no blocks are announced
|
// Short circuit if no blocks are announced
|
||||||
if len(f.announced) == 0 {
|
if len(f.announced) == 0 {
|
||||||
return
|
return
|
||||||
@ -579,7 +603,7 @@ func (f *Fetcher) rescheduleFetch(fetch *time.Timer) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// rescheduleComplete resets the specified completion timer to the next fetch timeout.
|
// rescheduleComplete resets the specified completion timer to the next fetch timeout.
|
||||||
func (f *Fetcher) rescheduleComplete(complete *time.Timer) {
|
func (f *BlockFetcher) rescheduleComplete(complete *time.Timer) {
|
||||||
// Short circuit if no headers are fetched
|
// Short circuit if no headers are fetched
|
||||||
if len(f.fetched) == 0 {
|
if len(f.fetched) == 0 {
|
||||||
return
|
return
|
||||||
@ -596,27 +620,27 @@ func (f *Fetcher) rescheduleComplete(complete *time.Timer) {
|
|||||||
|
|
||||||
// enqueue schedules a new future import operation, if the block to be imported
|
// enqueue schedules a new future import operation, if the block to be imported
|
||||||
// has not yet been seen.
|
// has not yet been seen.
|
||||||
func (f *Fetcher) enqueue(peer string, block *types.Block) {
|
func (f *BlockFetcher) enqueue(peer string, block *types.Block) {
|
||||||
hash := block.Hash()
|
hash := block.Hash()
|
||||||
|
|
||||||
// Ensure the peer isn't DOSing us
|
// Ensure the peer isn't DOSing us
|
||||||
count := f.queues[peer] + 1
|
count := f.queues[peer] + 1
|
||||||
if count > blockLimit {
|
if count > blockLimit {
|
||||||
log.Debug("Discarded propagated block, exceeded allowance", "peer", peer, "number", block.Number(), "hash", hash, "limit", blockLimit)
|
log.Debug("Discarded propagated block, exceeded allowance", "peer", peer, "number", block.Number(), "hash", hash, "limit", blockLimit)
|
||||||
propBroadcastDOSMeter.Mark(1)
|
blockBroadcastDOSMeter.Mark(1)
|
||||||
f.forgetHash(hash)
|
f.forgetHash(hash)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Discard any past or too distant blocks
|
// Discard any past or too distant blocks
|
||||||
if dist := int64(block.NumberU64()) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist {
|
if dist := int64(block.NumberU64()) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist {
|
||||||
log.Debug("Discarded propagated block, too far away", "peer", peer, "number", block.Number(), "hash", hash, "distance", dist)
|
log.Debug("Discarded propagated block, too far away", "peer", peer, "number", block.Number(), "hash", hash, "distance", dist)
|
||||||
propBroadcastDropMeter.Mark(1)
|
blockBroadcastDropMeter.Mark(1)
|
||||||
f.forgetHash(hash)
|
f.forgetHash(hash)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Schedule the block for future importing
|
// Schedule the block for future importing
|
||||||
if _, ok := f.queued[hash]; !ok {
|
if _, ok := f.queued[hash]; !ok {
|
||||||
op := &inject{
|
op := &blockInject{
|
||||||
origin: peer,
|
origin: peer,
|
||||||
block: block,
|
block: block,
|
||||||
}
|
}
|
||||||
@ -633,7 +657,7 @@ func (f *Fetcher) enqueue(peer string, block *types.Block) {
|
|||||||
// insert spawns a new goroutine to run a block insertion into the chain. If the
|
// insert spawns a new goroutine to run a block insertion into the chain. If the
|
||||||
// block's number is at the same height as the current import phase, it updates
|
// block's number is at the same height as the current import phase, it updates
|
||||||
// the phase states accordingly.
|
// the phase states accordingly.
|
||||||
func (f *Fetcher) insert(peer string, block *types.Block) {
|
func (f *BlockFetcher) insert(peer string, block *types.Block) {
|
||||||
hash := block.Hash()
|
hash := block.Hash()
|
||||||
|
|
||||||
// Run the import on a new thread
|
// Run the import on a new thread
|
||||||
@ -651,7 +675,7 @@ func (f *Fetcher) insert(peer string, block *types.Block) {
|
|||||||
switch err := f.verifyHeader(block.Header()); err {
|
switch err := f.verifyHeader(block.Header()); err {
|
||||||
case nil:
|
case nil:
|
||||||
// All ok, quickly propagate to our peers
|
// All ok, quickly propagate to our peers
|
||||||
propBroadcastOutTimer.UpdateSince(block.ReceivedAt)
|
blockBroadcastOutTimer.UpdateSince(block.ReceivedAt)
|
||||||
go f.broadcastBlock(block, true)
|
go f.broadcastBlock(block, true)
|
||||||
|
|
||||||
case consensus.ErrFutureBlock:
|
case consensus.ErrFutureBlock:
|
||||||
@ -669,7 +693,7 @@ func (f *Fetcher) insert(peer string, block *types.Block) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
// If import succeeded, broadcast the block
|
// If import succeeded, broadcast the block
|
||||||
propAnnounceOutTimer.UpdateSince(block.ReceivedAt)
|
blockAnnounceOutTimer.UpdateSince(block.ReceivedAt)
|
||||||
go f.broadcastBlock(block, false)
|
go f.broadcastBlock(block, false)
|
||||||
|
|
||||||
// Invoke the testing hook if needed
|
// Invoke the testing hook if needed
|
||||||
@ -681,7 +705,7 @@ func (f *Fetcher) insert(peer string, block *types.Block) {
|
|||||||
|
|
||||||
// forgetHash removes all traces of a block announcement from the fetcher's
|
// forgetHash removes all traces of a block announcement from the fetcher's
|
||||||
// internal state.
|
// internal state.
|
||||||
func (f *Fetcher) forgetHash(hash common.Hash) {
|
func (f *BlockFetcher) forgetHash(hash common.Hash) {
|
||||||
// Remove all pending announces and decrement DOS counters
|
// Remove all pending announces and decrement DOS counters
|
||||||
for _, announce := range f.announced[hash] {
|
for _, announce := range f.announced[hash] {
|
||||||
f.announces[announce.origin]--
|
f.announces[announce.origin]--
|
||||||
@ -723,7 +747,7 @@ func (f *Fetcher) forgetHash(hash common.Hash) {
|
|||||||
|
|
||||||
// forgetBlock removes all traces of a queued block from the fetcher's internal
|
// forgetBlock removes all traces of a queued block from the fetcher's internal
|
||||||
// state.
|
// state.
|
||||||
func (f *Fetcher) forgetBlock(hash common.Hash) {
|
func (f *BlockFetcher) forgetBlock(hash common.Hash) {
|
||||||
if insert := f.queued[hash]; insert != nil {
|
if insert := f.queued[hash]; insert != nil {
|
||||||
f.queues[insert.origin]--
|
f.queues[insert.origin]--
|
||||||
if f.queues[insert.origin] == 0 {
|
if f.queues[insert.origin] == 0 {
|
@ -76,7 +76,7 @@ func makeChain(n int, seed byte, parent *types.Block) ([]common.Hash, map[common
|
|||||||
|
|
||||||
// fetcherTester is a test simulator for mocking out local block chain.
|
// fetcherTester is a test simulator for mocking out local block chain.
|
||||||
type fetcherTester struct {
|
type fetcherTester struct {
|
||||||
fetcher *Fetcher
|
fetcher *BlockFetcher
|
||||||
|
|
||||||
hashes []common.Hash // Hash chain belonging to the tester
|
hashes []common.Hash // Hash chain belonging to the tester
|
||||||
blocks map[common.Hash]*types.Block // Blocks belonging to the tester
|
blocks map[common.Hash]*types.Block // Blocks belonging to the tester
|
||||||
@ -92,7 +92,7 @@ func newTester() *fetcherTester {
|
|||||||
blocks: map[common.Hash]*types.Block{genesis.Hash(): genesis},
|
blocks: map[common.Hash]*types.Block{genesis.Hash(): genesis},
|
||||||
drops: make(map[string]bool),
|
drops: make(map[string]bool),
|
||||||
}
|
}
|
||||||
tester.fetcher = New(tester.getBlock, tester.verifyHeader, tester.broadcastBlock, tester.chainHeight, tester.insertChain, tester.dropPeer)
|
tester.fetcher = NewBlockFetcher(tester.getBlock, tester.verifyHeader, tester.broadcastBlock, tester.chainHeight, tester.insertChain, tester.dropPeer)
|
||||||
tester.fetcher.Start()
|
tester.fetcher.Start()
|
||||||
|
|
||||||
return tester
|
return tester
|
@ -1,43 +0,0 @@
|
|||||||
// Copyright 2015 The go-ethereum Authors
|
|
||||||
// This file is part of the go-ethereum library.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Lesser General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Lesser General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Lesser General Public License
|
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
// Contains the metrics collected by the fetcher.
|
|
||||||
|
|
||||||
package fetcher
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/ethereum/go-ethereum/metrics"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
propAnnounceInMeter = metrics.NewRegisteredMeter("eth/fetcher/prop/announces/in", nil)
|
|
||||||
propAnnounceOutTimer = metrics.NewRegisteredTimer("eth/fetcher/prop/announces/out", nil)
|
|
||||||
propAnnounceDropMeter = metrics.NewRegisteredMeter("eth/fetcher/prop/announces/drop", nil)
|
|
||||||
propAnnounceDOSMeter = metrics.NewRegisteredMeter("eth/fetcher/prop/announces/dos", nil)
|
|
||||||
|
|
||||||
propBroadcastInMeter = metrics.NewRegisteredMeter("eth/fetcher/prop/broadcasts/in", nil)
|
|
||||||
propBroadcastOutTimer = metrics.NewRegisteredTimer("eth/fetcher/prop/broadcasts/out", nil)
|
|
||||||
propBroadcastDropMeter = metrics.NewRegisteredMeter("eth/fetcher/prop/broadcasts/drop", nil)
|
|
||||||
propBroadcastDOSMeter = metrics.NewRegisteredMeter("eth/fetcher/prop/broadcasts/dos", nil)
|
|
||||||
|
|
||||||
headerFetchMeter = metrics.NewRegisteredMeter("eth/fetcher/fetch/headers", nil)
|
|
||||||
bodyFetchMeter = metrics.NewRegisteredMeter("eth/fetcher/fetch/bodies", nil)
|
|
||||||
|
|
||||||
headerFilterInMeter = metrics.NewRegisteredMeter("eth/fetcher/filter/headers/in", nil)
|
|
||||||
headerFilterOutMeter = metrics.NewRegisteredMeter("eth/fetcher/filter/headers/out", nil)
|
|
||||||
bodyFilterInMeter = metrics.NewRegisteredMeter("eth/fetcher/filter/bodies/in", nil)
|
|
||||||
bodyFilterOutMeter = metrics.NewRegisteredMeter("eth/fetcher/filter/bodies/out", nil)
|
|
||||||
)
|
|
894
eth/fetcher/tx_fetcher.go
Normal file
894
eth/fetcher/tx_fetcher.go
Normal file
@ -0,0 +1,894 @@
|
|||||||
|
// Copyright 2020 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package fetcher
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
mrand "math/rand"
|
||||||
|
"sort"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
mapset "github.com/deckarep/golang-set"
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/common/mclock"
|
||||||
|
"github.com/ethereum/go-ethereum/core"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ethereum/go-ethereum/log"
|
||||||
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// maxTxAnnounces is the maximum number of unique transaction a peer
|
||||||
|
// can announce in a short time.
|
||||||
|
maxTxAnnounces = 4096
|
||||||
|
|
||||||
|
// maxTxRetrievals is the maximum transaction number can be fetched in one
|
||||||
|
// request. The rationale to pick 256 is:
|
||||||
|
// - In eth protocol, the softResponseLimit is 2MB. Nowadays according to
|
||||||
|
// Etherscan the average transaction size is around 200B, so in theory
|
||||||
|
// we can include lots of transaction in a single protocol packet.
|
||||||
|
// - However the maximum size of a single transaction is raised to 128KB,
|
||||||
|
// so pick a middle value here to ensure we can maximize the efficiency
|
||||||
|
// of the retrieval and response size overflow won't happen in most cases.
|
||||||
|
maxTxRetrievals = 256
|
||||||
|
|
||||||
|
// maxTxUnderpricedSetSize is the size of the underpriced transaction set that
|
||||||
|
// is used to track recent transactions that have been dropped so we don't
|
||||||
|
// re-request them.
|
||||||
|
maxTxUnderpricedSetSize = 32768
|
||||||
|
|
||||||
|
// txArriveTimeout is the time allowance before an announced transaction is
|
||||||
|
// explicitly requested.
|
||||||
|
txArriveTimeout = 500 * time.Millisecond
|
||||||
|
|
||||||
|
// txGatherSlack is the interval used to collate almost-expired announces
|
||||||
|
// with network fetches.
|
||||||
|
txGatherSlack = 100 * time.Millisecond
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// txFetchTimeout is the maximum allotted time to return an explicitly
|
||||||
|
// requested transaction.
|
||||||
|
txFetchTimeout = 5 * time.Second
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
txAnnounceInMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/announces/in", nil)
|
||||||
|
txAnnounceKnownMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/announces/known", nil)
|
||||||
|
txAnnounceUnderpricedMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/announces/underpriced", nil)
|
||||||
|
txAnnounceDOSMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/announces/dos", nil)
|
||||||
|
|
||||||
|
txBroadcastInMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/broadcasts/in", nil)
|
||||||
|
txBroadcastKnownMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/broadcasts/known", nil)
|
||||||
|
txBroadcastUnderpricedMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/broadcasts/underpriced", nil)
|
||||||
|
txBroadcastOtherRejectMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/broadcasts/otherreject", nil)
|
||||||
|
|
||||||
|
txRequestOutMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/request/out", nil)
|
||||||
|
txRequestFailMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/request/fail", nil)
|
||||||
|
txRequestDoneMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/request/done", nil)
|
||||||
|
txRequestTimeoutMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/request/timeout", nil)
|
||||||
|
|
||||||
|
txReplyInMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/replies/in", nil)
|
||||||
|
txReplyKnownMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/replies/known", nil)
|
||||||
|
txReplyUnderpricedMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/replies/underpriced", nil)
|
||||||
|
txReplyOtherRejectMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/replies/otherreject", nil)
|
||||||
|
|
||||||
|
txFetcherWaitingPeers = metrics.NewRegisteredGauge("eth/fetcher/transaction/waiting/peers", nil)
|
||||||
|
txFetcherWaitingHashes = metrics.NewRegisteredGauge("eth/fetcher/transaction/waiting/hashes", nil)
|
||||||
|
txFetcherQueueingPeers = metrics.NewRegisteredGauge("eth/fetcher/transaction/queueing/peers", nil)
|
||||||
|
txFetcherQueueingHashes = metrics.NewRegisteredGauge("eth/fetcher/transaction/queueing/hashes", nil)
|
||||||
|
txFetcherFetchingPeers = metrics.NewRegisteredGauge("eth/fetcher/transaction/fetching/peers", nil)
|
||||||
|
txFetcherFetchingHashes = metrics.NewRegisteredGauge("eth/fetcher/transaction/fetching/hashes", nil)
|
||||||
|
)
|
||||||
|
|
||||||
|
// txAnnounce is the notification of the availability of a batch
|
||||||
|
// of new transactions in the network.
|
||||||
|
type txAnnounce struct {
|
||||||
|
origin string // Identifier of the peer originating the notification
|
||||||
|
hashes []common.Hash // Batch of transaction hashes being announced
|
||||||
|
}
|
||||||
|
|
||||||
|
// txRequest represents an in-flight transaction retrieval request destined to
|
||||||
|
// a specific peers.
|
||||||
|
type txRequest struct {
|
||||||
|
hashes []common.Hash // Transactions having been requested
|
||||||
|
stolen map[common.Hash]struct{} // Deliveries by someone else (don't re-request)
|
||||||
|
time mclock.AbsTime // Timestamp of the request
|
||||||
|
}
|
||||||
|
|
||||||
|
// txDelivery is the notification that a batch of transactions have been added
|
||||||
|
// to the pool and should be untracked.
|
||||||
|
type txDelivery struct {
|
||||||
|
origin string // Identifier of the peer originating the notification
|
||||||
|
hashes []common.Hash // Batch of transaction hashes having been delivered
|
||||||
|
direct bool // Whether this is a direct reply or a broadcast
|
||||||
|
}
|
||||||
|
|
||||||
|
// txDrop is the notiication that a peer has disconnected.
|
||||||
|
type txDrop struct {
|
||||||
|
peer string
|
||||||
|
}
|
||||||
|
|
||||||
|
// TxFetcher is responsible for retrieving new transaction based on announcements.
|
||||||
|
//
|
||||||
|
// The fetcher operates in 3 stages:
|
||||||
|
// - Transactions that are newly discovered are moved into a wait list.
|
||||||
|
// - After ~500ms passes, transactions from the wait list that have not been
|
||||||
|
// broadcast to us in whole are moved into a queueing area.
|
||||||
|
// - When a connected peer doesn't have in-flight retrieval requests, any
|
||||||
|
// transaction queued up (and announced by the peer) are allocated to the
|
||||||
|
// peer and moved into a fetching status until it's fulfilled or fails.
|
||||||
|
//
|
||||||
|
// The invariants of the fetcher are:
|
||||||
|
// - Each tracked transaction (hash) must only be present in one of the
|
||||||
|
// three stages. This ensures that the fetcher operates akin to a finite
|
||||||
|
// state automata and there's do data leak.
|
||||||
|
// - Each peer that announced transactions may be scheduled retrievals, but
|
||||||
|
// only ever one concurrently. This ensures we can immediately know what is
|
||||||
|
// missing from a reply and reschedule it.
|
||||||
|
type TxFetcher struct {
|
||||||
|
notify chan *txAnnounce
|
||||||
|
cleanup chan *txDelivery
|
||||||
|
drop chan *txDrop
|
||||||
|
quit chan struct{}
|
||||||
|
|
||||||
|
underpriced mapset.Set // Transactions discarded as too cheap (don't re-fetch)
|
||||||
|
|
||||||
|
// Stage 1: Waiting lists for newly discovered transactions that might be
|
||||||
|
// broadcast without needing explicit request/reply round trips.
|
||||||
|
waitlist map[common.Hash]map[string]struct{} // Transactions waiting for an potential broadcast
|
||||||
|
waittime map[common.Hash]mclock.AbsTime // Timestamps when transactions were added to the waitlist
|
||||||
|
waitslots map[string]map[common.Hash]struct{} // Waiting announcement sgroupped by peer (DoS protection)
|
||||||
|
|
||||||
|
// Stage 2: Queue of transactions that waiting to be allocated to some peer
|
||||||
|
// to be retrieved directly.
|
||||||
|
announces map[string]map[common.Hash]struct{} // Set of announced transactions, grouped by origin peer
|
||||||
|
announced map[common.Hash]map[string]struct{} // Set of download locations, grouped by transaction hash
|
||||||
|
|
||||||
|
// Stage 3: Set of transactions currently being retrieved, some which may be
|
||||||
|
// fulfilled and some rescheduled. Note, this step shares 'announces' from the
|
||||||
|
// previous stage to avoid having to duplicate (need it for DoS checks).
|
||||||
|
fetching map[common.Hash]string // Transaction set currently being retrieved
|
||||||
|
requests map[string]*txRequest // In-flight transaction retrievals
|
||||||
|
alternates map[common.Hash]map[string]struct{} // In-flight transaction alternate origins if retrieval fails
|
||||||
|
|
||||||
|
// Callbacks
|
||||||
|
hasTx func(common.Hash) bool // Retrieves a tx from the local txpool
|
||||||
|
addTxs func([]*types.Transaction) []error // Insert a batch of transactions into local txpool
|
||||||
|
fetchTxs func(string, []common.Hash) error // Retrieves a set of txs from a remote peer
|
||||||
|
|
||||||
|
step chan struct{} // Notification channel when the fetcher loop iterates
|
||||||
|
clock mclock.Clock // Time wrapper to simulate in tests
|
||||||
|
rand *mrand.Rand // Randomizer to use in tests instead of map range loops (soft-random)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTxFetcher creates a transaction fetcher to retrieve transaction
|
||||||
|
// based on hash announcements.
|
||||||
|
func NewTxFetcher(hasTx func(common.Hash) bool, addTxs func([]*types.Transaction) []error, fetchTxs func(string, []common.Hash) error) *TxFetcher {
|
||||||
|
return NewTxFetcherForTests(hasTx, addTxs, fetchTxs, mclock.System{}, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTxFetcherForTests is a testing method to mock out the realtime clock with
|
||||||
|
// a simulated version and the internal randomness with a deterministic one.
|
||||||
|
func NewTxFetcherForTests(
|
||||||
|
hasTx func(common.Hash) bool, addTxs func([]*types.Transaction) []error, fetchTxs func(string, []common.Hash) error,
|
||||||
|
clock mclock.Clock, rand *mrand.Rand) *TxFetcher {
|
||||||
|
return &TxFetcher{
|
||||||
|
notify: make(chan *txAnnounce),
|
||||||
|
cleanup: make(chan *txDelivery),
|
||||||
|
drop: make(chan *txDrop),
|
||||||
|
quit: make(chan struct{}),
|
||||||
|
waitlist: make(map[common.Hash]map[string]struct{}),
|
||||||
|
waittime: make(map[common.Hash]mclock.AbsTime),
|
||||||
|
waitslots: make(map[string]map[common.Hash]struct{}),
|
||||||
|
announces: make(map[string]map[common.Hash]struct{}),
|
||||||
|
announced: make(map[common.Hash]map[string]struct{}),
|
||||||
|
fetching: make(map[common.Hash]string),
|
||||||
|
requests: make(map[string]*txRequest),
|
||||||
|
alternates: make(map[common.Hash]map[string]struct{}),
|
||||||
|
underpriced: mapset.NewSet(),
|
||||||
|
hasTx: hasTx,
|
||||||
|
addTxs: addTxs,
|
||||||
|
fetchTxs: fetchTxs,
|
||||||
|
clock: clock,
|
||||||
|
rand: rand,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Notify announces the fetcher of the potential availability of a new batch of
|
||||||
|
// transactions in the network.
|
||||||
|
func (f *TxFetcher) Notify(peer string, hashes []common.Hash) error {
|
||||||
|
// Keep track of all the announced transactions
|
||||||
|
txAnnounceInMeter.Mark(int64(len(hashes)))
|
||||||
|
|
||||||
|
// Skip any transaction announcements that we already know of, or that we've
|
||||||
|
// previously marked as cheap and discarded. This check is of course racey,
|
||||||
|
// because multiple concurrent notifies will still manage to pass it, but it's
|
||||||
|
// still valuable to check here because it runs concurrent to the internal
|
||||||
|
// loop, so anything caught here is time saved internally.
|
||||||
|
var (
|
||||||
|
unknowns = make([]common.Hash, 0, len(hashes))
|
||||||
|
duplicate, underpriced int64
|
||||||
|
)
|
||||||
|
for _, hash := range hashes {
|
||||||
|
switch {
|
||||||
|
case f.hasTx(hash):
|
||||||
|
duplicate++
|
||||||
|
|
||||||
|
case f.underpriced.Contains(hash):
|
||||||
|
underpriced++
|
||||||
|
|
||||||
|
default:
|
||||||
|
unknowns = append(unknowns, hash)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
txAnnounceKnownMeter.Mark(duplicate)
|
||||||
|
txAnnounceUnderpricedMeter.Mark(underpriced)
|
||||||
|
|
||||||
|
// If anything's left to announce, push it into the internal loop
|
||||||
|
if len(unknowns) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
announce := &txAnnounce{
|
||||||
|
origin: peer,
|
||||||
|
hashes: unknowns,
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case f.notify <- announce:
|
||||||
|
return nil
|
||||||
|
case <-f.quit:
|
||||||
|
return errTerminated
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Enqueue imports a batch of received transaction into the transaction pool
|
||||||
|
// and the fetcher. This method may be called by both transaction broadcasts and
|
||||||
|
// direct request replies. The differentiation is important so the fetcher can
|
||||||
|
// re-shedule missing transactions as soon as possible.
|
||||||
|
func (f *TxFetcher) Enqueue(peer string, txs []*types.Transaction, direct bool) error {
|
||||||
|
// Keep track of all the propagated transactions
|
||||||
|
if direct {
|
||||||
|
txReplyInMeter.Mark(int64(len(txs)))
|
||||||
|
} else {
|
||||||
|
txBroadcastInMeter.Mark(int64(len(txs)))
|
||||||
|
}
|
||||||
|
// Push all the transactions into the pool, tracking underpriced ones to avoid
|
||||||
|
// re-requesting them and dropping the peer in case of malicious transfers.
|
||||||
|
var (
|
||||||
|
added = make([]common.Hash, 0, len(txs))
|
||||||
|
duplicate int64
|
||||||
|
underpriced int64
|
||||||
|
otherreject int64
|
||||||
|
)
|
||||||
|
errs := f.addTxs(txs)
|
||||||
|
for i, err := range errs {
|
||||||
|
if err != nil {
|
||||||
|
// Track the transaction hash if the price is too low for us.
|
||||||
|
// Avoid re-request this transaction when we receive another
|
||||||
|
// announcement.
|
||||||
|
if err == core.ErrUnderpriced || err == core.ErrReplaceUnderpriced {
|
||||||
|
for f.underpriced.Cardinality() >= maxTxUnderpricedSetSize {
|
||||||
|
f.underpriced.Pop()
|
||||||
|
}
|
||||||
|
f.underpriced.Add(txs[i].Hash())
|
||||||
|
}
|
||||||
|
// Track a few interesting failure types
|
||||||
|
switch err {
|
||||||
|
case nil: // Noop, but need to handle to not count these
|
||||||
|
|
||||||
|
case core.ErrAlreadyKnown:
|
||||||
|
duplicate++
|
||||||
|
|
||||||
|
case core.ErrUnderpriced, core.ErrReplaceUnderpriced:
|
||||||
|
underpriced++
|
||||||
|
|
||||||
|
default:
|
||||||
|
otherreject++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
added = append(added, txs[i].Hash())
|
||||||
|
}
|
||||||
|
if direct {
|
||||||
|
txReplyKnownMeter.Mark(duplicate)
|
||||||
|
txReplyUnderpricedMeter.Mark(underpriced)
|
||||||
|
txReplyOtherRejectMeter.Mark(otherreject)
|
||||||
|
} else {
|
||||||
|
txBroadcastKnownMeter.Mark(duplicate)
|
||||||
|
txBroadcastUnderpricedMeter.Mark(underpriced)
|
||||||
|
txBroadcastOtherRejectMeter.Mark(otherreject)
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case f.cleanup <- &txDelivery{origin: peer, hashes: added, direct: direct}:
|
||||||
|
return nil
|
||||||
|
case <-f.quit:
|
||||||
|
return errTerminated
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Drop should be called when a peer disconnects. It cleans up all the internal
|
||||||
|
// data structures of the given node.
|
||||||
|
func (f *TxFetcher) Drop(peer string) error {
|
||||||
|
select {
|
||||||
|
case f.drop <- &txDrop{peer: peer}:
|
||||||
|
return nil
|
||||||
|
case <-f.quit:
|
||||||
|
return errTerminated
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start boots up the announcement based synchroniser, accepting and processing
|
||||||
|
// hash notifications and block fetches until termination requested.
|
||||||
|
func (f *TxFetcher) Start() {
|
||||||
|
go f.loop()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop terminates the announcement based synchroniser, canceling all pending
|
||||||
|
// operations.
|
||||||
|
func (f *TxFetcher) Stop() {
|
||||||
|
close(f.quit)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *TxFetcher) loop() {
|
||||||
|
var (
|
||||||
|
waitTimer = new(mclock.Timer)
|
||||||
|
timeoutTimer = new(mclock.Timer)
|
||||||
|
|
||||||
|
waitTrigger = make(chan struct{}, 1)
|
||||||
|
timeoutTrigger = make(chan struct{}, 1)
|
||||||
|
)
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case ann := <-f.notify:
|
||||||
|
// Drop part of the new announcements if there are too many accumulated.
|
||||||
|
// Note, we could but do not filter already known transactions here as
|
||||||
|
// the probability of something arriving between this call and the pre-
|
||||||
|
// filter outside is essentially zero.
|
||||||
|
used := len(f.waitslots[ann.origin]) + len(f.announces[ann.origin])
|
||||||
|
if used >= maxTxAnnounces {
|
||||||
|
// This can happen if a set of transactions are requested but not
|
||||||
|
// all fulfilled, so the remainder are rescheduled without the cap
|
||||||
|
// check. Should be fine as the limit is in the thousands and the
|
||||||
|
// request size in the hundreds.
|
||||||
|
txAnnounceDOSMeter.Mark(int64(len(ann.hashes)))
|
||||||
|
break
|
||||||
|
}
|
||||||
|
want := used + len(ann.hashes)
|
||||||
|
if want > maxTxAnnounces {
|
||||||
|
txAnnounceDOSMeter.Mark(int64(want - maxTxAnnounces))
|
||||||
|
ann.hashes = ann.hashes[:want-maxTxAnnounces]
|
||||||
|
}
|
||||||
|
// All is well, schedule the remainder of the transactions
|
||||||
|
idleWait := len(f.waittime) == 0
|
||||||
|
_, oldPeer := f.announces[ann.origin]
|
||||||
|
|
||||||
|
for _, hash := range ann.hashes {
|
||||||
|
// If the transaction is already downloading, add it to the list
|
||||||
|
// of possible alternates (in case the current retrieval fails) and
|
||||||
|
// also account it for the peer.
|
||||||
|
if f.alternates[hash] != nil {
|
||||||
|
f.alternates[hash][ann.origin] = struct{}{}
|
||||||
|
|
||||||
|
// Stage 2 and 3 share the set of origins per tx
|
||||||
|
if announces := f.announces[ann.origin]; announces != nil {
|
||||||
|
announces[hash] = struct{}{}
|
||||||
|
} else {
|
||||||
|
f.announces[ann.origin] = map[common.Hash]struct{}{hash: struct{}{}}
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// If the transaction is not downloading, but is already queued
|
||||||
|
// from a different peer, track it for the new peer too.
|
||||||
|
if f.announced[hash] != nil {
|
||||||
|
f.announced[hash][ann.origin] = struct{}{}
|
||||||
|
|
||||||
|
// Stage 2 and 3 share the set of origins per tx
|
||||||
|
if announces := f.announces[ann.origin]; announces != nil {
|
||||||
|
announces[hash] = struct{}{}
|
||||||
|
} else {
|
||||||
|
f.announces[ann.origin] = map[common.Hash]struct{}{hash: struct{}{}}
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// If the transaction is already known to the fetcher, but not
|
||||||
|
// yet downloading, add the peer as an alternate origin in the
|
||||||
|
// waiting list.
|
||||||
|
if f.waitlist[hash] != nil {
|
||||||
|
f.waitlist[hash][ann.origin] = struct{}{}
|
||||||
|
|
||||||
|
if waitslots := f.waitslots[ann.origin]; waitslots != nil {
|
||||||
|
waitslots[hash] = struct{}{}
|
||||||
|
} else {
|
||||||
|
f.waitslots[ann.origin] = map[common.Hash]struct{}{hash: struct{}{}}
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Transaction unknown to the fetcher, insert it into the waiting list
|
||||||
|
f.waitlist[hash] = map[string]struct{}{ann.origin: struct{}{}}
|
||||||
|
f.waittime[hash] = f.clock.Now()
|
||||||
|
|
||||||
|
if waitslots := f.waitslots[ann.origin]; waitslots != nil {
|
||||||
|
waitslots[hash] = struct{}{}
|
||||||
|
} else {
|
||||||
|
f.waitslots[ann.origin] = map[common.Hash]struct{}{hash: struct{}{}}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// If a new item was added to the waitlist, schedule it into the fetcher
|
||||||
|
if idleWait && len(f.waittime) > 0 {
|
||||||
|
f.rescheduleWait(waitTimer, waitTrigger)
|
||||||
|
}
|
||||||
|
// If this peer is new and announced something already queued, maybe
|
||||||
|
// request transactions from them
|
||||||
|
if !oldPeer && len(f.announces[ann.origin]) > 0 {
|
||||||
|
f.scheduleFetches(timeoutTimer, timeoutTrigger, map[string]struct{}{ann.origin: struct{}{}})
|
||||||
|
}
|
||||||
|
|
||||||
|
case <-waitTrigger:
|
||||||
|
// At least one transaction's waiting time ran out, push all expired
|
||||||
|
// ones into the retrieval queues
|
||||||
|
actives := make(map[string]struct{})
|
||||||
|
for hash, instance := range f.waittime {
|
||||||
|
if time.Duration(f.clock.Now()-instance)+txGatherSlack > txArriveTimeout {
|
||||||
|
// Transaction expired without propagation, schedule for retrieval
|
||||||
|
if f.announced[hash] != nil {
|
||||||
|
panic("announce tracker already contains waitlist item")
|
||||||
|
}
|
||||||
|
f.announced[hash] = f.waitlist[hash]
|
||||||
|
for peer := range f.waitlist[hash] {
|
||||||
|
if announces := f.announces[peer]; announces != nil {
|
||||||
|
announces[hash] = struct{}{}
|
||||||
|
} else {
|
||||||
|
f.announces[peer] = map[common.Hash]struct{}{hash: struct{}{}}
|
||||||
|
}
|
||||||
|
delete(f.waitslots[peer], hash)
|
||||||
|
if len(f.waitslots[peer]) == 0 {
|
||||||
|
delete(f.waitslots, peer)
|
||||||
|
}
|
||||||
|
actives[peer] = struct{}{}
|
||||||
|
}
|
||||||
|
delete(f.waittime, hash)
|
||||||
|
delete(f.waitlist, hash)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// If transactions are still waiting for propagation, reschedule the wait timer
|
||||||
|
if len(f.waittime) > 0 {
|
||||||
|
f.rescheduleWait(waitTimer, waitTrigger)
|
||||||
|
}
|
||||||
|
// If any peers became active and are idle, request transactions from them
|
||||||
|
if len(actives) > 0 {
|
||||||
|
f.scheduleFetches(timeoutTimer, timeoutTrigger, actives)
|
||||||
|
}
|
||||||
|
|
||||||
|
case <-timeoutTrigger:
|
||||||
|
// Clean up any expired retrievals and avoid re-requesting them from the
|
||||||
|
// same peer (either overloaded or malicious, useless in both cases). We
|
||||||
|
// could also penalize (Drop), but there's nothing to gain, and if could
|
||||||
|
// possibly further increase the load on it.
|
||||||
|
for peer, req := range f.requests {
|
||||||
|
if time.Duration(f.clock.Now()-req.time)+txGatherSlack > txFetchTimeout {
|
||||||
|
txRequestTimeoutMeter.Mark(int64(len(req.hashes)))
|
||||||
|
|
||||||
|
// Reschedule all the not-yet-delivered fetches to alternate peers
|
||||||
|
for _, hash := range req.hashes {
|
||||||
|
// Skip rescheduling hashes already delivered by someone else
|
||||||
|
if req.stolen != nil {
|
||||||
|
if _, ok := req.stolen[hash]; ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Move the delivery back from fetching to queued
|
||||||
|
if _, ok := f.announced[hash]; ok {
|
||||||
|
panic("announced tracker already contains alternate item")
|
||||||
|
}
|
||||||
|
if f.alternates[hash] != nil { // nil if tx was broadcast during fetch
|
||||||
|
f.announced[hash] = f.alternates[hash]
|
||||||
|
}
|
||||||
|
delete(f.announced[hash], peer)
|
||||||
|
if len(f.announced[hash]) == 0 {
|
||||||
|
delete(f.announced, hash)
|
||||||
|
}
|
||||||
|
delete(f.announces[peer], hash)
|
||||||
|
delete(f.alternates, hash)
|
||||||
|
delete(f.fetching, hash)
|
||||||
|
}
|
||||||
|
if len(f.announces[peer]) == 0 {
|
||||||
|
delete(f.announces, peer)
|
||||||
|
}
|
||||||
|
// Keep track of the request as dangling, but never expire
|
||||||
|
f.requests[peer].hashes = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Schedule a new transaction retrieval
|
||||||
|
f.scheduleFetches(timeoutTimer, timeoutTrigger, nil)
|
||||||
|
|
||||||
|
// No idea if we sheduled something or not, trigger the timer if needed
|
||||||
|
// TODO(karalabe): this is kind of lame, can't we dump it into scheduleFetches somehow?
|
||||||
|
f.rescheduleTimeout(timeoutTimer, timeoutTrigger)
|
||||||
|
|
||||||
|
case delivery := <-f.cleanup:
|
||||||
|
// Independent if the delivery was direct or broadcast, remove all
|
||||||
|
// traces of the hash from internal trackers
|
||||||
|
for _, hash := range delivery.hashes {
|
||||||
|
if _, ok := f.waitlist[hash]; ok {
|
||||||
|
for peer, txset := range f.waitslots {
|
||||||
|
delete(txset, hash)
|
||||||
|
if len(txset) == 0 {
|
||||||
|
delete(f.waitslots, peer)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
delete(f.waitlist, hash)
|
||||||
|
delete(f.waittime, hash)
|
||||||
|
} else {
|
||||||
|
for peer, txset := range f.announces {
|
||||||
|
delete(txset, hash)
|
||||||
|
if len(txset) == 0 {
|
||||||
|
delete(f.announces, peer)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
delete(f.announced, hash)
|
||||||
|
delete(f.alternates, hash)
|
||||||
|
|
||||||
|
// If a transaction currently being fetched from a different
|
||||||
|
// origin was delivered (delivery stolen), mark it so the
|
||||||
|
// actual delivery won't double schedule it.
|
||||||
|
if origin, ok := f.fetching[hash]; ok && (origin != delivery.origin || !delivery.direct) {
|
||||||
|
stolen := f.requests[origin].stolen
|
||||||
|
if stolen == nil {
|
||||||
|
f.requests[origin].stolen = make(map[common.Hash]struct{})
|
||||||
|
stolen = f.requests[origin].stolen
|
||||||
|
}
|
||||||
|
stolen[hash] = struct{}{}
|
||||||
|
}
|
||||||
|
delete(f.fetching, hash)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// In case of a direct delivery, also reschedule anything missing
|
||||||
|
// from the original query
|
||||||
|
if delivery.direct {
|
||||||
|
// Mark the reqesting successful (independent of individual status)
|
||||||
|
txRequestDoneMeter.Mark(int64(len(delivery.hashes)))
|
||||||
|
|
||||||
|
// Make sure something was pending, nuke it
|
||||||
|
req := f.requests[delivery.origin]
|
||||||
|
if req == nil {
|
||||||
|
log.Warn("Unexpected transaction delivery", "peer", delivery.origin)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
delete(f.requests, delivery.origin)
|
||||||
|
|
||||||
|
// Anything not delivered should be re-scheduled (with or without
|
||||||
|
// this peer, depending on the response cutoff)
|
||||||
|
delivered := make(map[common.Hash]struct{})
|
||||||
|
for _, hash := range delivery.hashes {
|
||||||
|
delivered[hash] = struct{}{}
|
||||||
|
}
|
||||||
|
cutoff := len(req.hashes) // If nothing is delivered, assume everything is missing, don't retry!!!
|
||||||
|
for i, hash := range req.hashes {
|
||||||
|
if _, ok := delivered[hash]; ok {
|
||||||
|
cutoff = i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Reschedule missing hashes from alternates, not-fulfilled from alt+self
|
||||||
|
for i, hash := range req.hashes {
|
||||||
|
// Skip rescheduling hashes already delivered by someone else
|
||||||
|
if req.stolen != nil {
|
||||||
|
if _, ok := req.stolen[hash]; ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if _, ok := delivered[hash]; !ok {
|
||||||
|
if i < cutoff {
|
||||||
|
delete(f.alternates[hash], delivery.origin)
|
||||||
|
delete(f.announces[delivery.origin], hash)
|
||||||
|
if len(f.announces[delivery.origin]) == 0 {
|
||||||
|
delete(f.announces, delivery.origin)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(f.alternates[hash]) > 0 {
|
||||||
|
if _, ok := f.announced[hash]; ok {
|
||||||
|
panic(fmt.Sprintf("announced tracker already contains alternate item: %v", f.announced[hash]))
|
||||||
|
}
|
||||||
|
f.announced[hash] = f.alternates[hash]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
delete(f.alternates, hash)
|
||||||
|
delete(f.fetching, hash)
|
||||||
|
}
|
||||||
|
// Something was delivered, try to rechedule requests
|
||||||
|
f.scheduleFetches(timeoutTimer, timeoutTrigger, nil) // Partial delivery may enable others to deliver too
|
||||||
|
}
|
||||||
|
|
||||||
|
case drop := <-f.drop:
|
||||||
|
// A peer was dropped, remove all traces of it
|
||||||
|
if _, ok := f.waitslots[drop.peer]; ok {
|
||||||
|
for hash := range f.waitslots[drop.peer] {
|
||||||
|
delete(f.waitlist[hash], drop.peer)
|
||||||
|
if len(f.waitlist[hash]) == 0 {
|
||||||
|
delete(f.waitlist, hash)
|
||||||
|
delete(f.waittime, hash)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
delete(f.waitslots, drop.peer)
|
||||||
|
if len(f.waitlist) > 0 {
|
||||||
|
f.rescheduleWait(waitTimer, waitTrigger)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Clean up any active requests
|
||||||
|
var request *txRequest
|
||||||
|
if request = f.requests[drop.peer]; request != nil {
|
||||||
|
for _, hash := range request.hashes {
|
||||||
|
// Skip rescheduling hashes already delivered by someone else
|
||||||
|
if request.stolen != nil {
|
||||||
|
if _, ok := request.stolen[hash]; ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Undelivered hash, reschedule if there's an alternative origin available
|
||||||
|
delete(f.alternates[hash], drop.peer)
|
||||||
|
if len(f.alternates[hash]) == 0 {
|
||||||
|
delete(f.alternates, hash)
|
||||||
|
} else {
|
||||||
|
f.announced[hash] = f.alternates[hash]
|
||||||
|
delete(f.alternates, hash)
|
||||||
|
}
|
||||||
|
delete(f.fetching, hash)
|
||||||
|
}
|
||||||
|
delete(f.requests, drop.peer)
|
||||||
|
}
|
||||||
|
// Clean up general announcement tracking
|
||||||
|
if _, ok := f.announces[drop.peer]; ok {
|
||||||
|
for hash := range f.announces[drop.peer] {
|
||||||
|
delete(f.announced[hash], drop.peer)
|
||||||
|
if len(f.announced[hash]) == 0 {
|
||||||
|
delete(f.announced, hash)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
delete(f.announces, drop.peer)
|
||||||
|
}
|
||||||
|
// If a request was cancelled, check if anything needs to be rescheduled
|
||||||
|
if request != nil {
|
||||||
|
f.scheduleFetches(timeoutTimer, timeoutTrigger, nil)
|
||||||
|
f.rescheduleTimeout(timeoutTimer, timeoutTrigger)
|
||||||
|
}
|
||||||
|
|
||||||
|
case <-f.quit:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// No idea what happened, but bump some sanity metrics
|
||||||
|
txFetcherWaitingPeers.Update(int64(len(f.waitslots)))
|
||||||
|
txFetcherWaitingHashes.Update(int64(len(f.waitlist)))
|
||||||
|
txFetcherQueueingPeers.Update(int64(len(f.announces) - len(f.requests)))
|
||||||
|
txFetcherQueueingHashes.Update(int64(len(f.announced)))
|
||||||
|
txFetcherFetchingPeers.Update(int64(len(f.requests)))
|
||||||
|
txFetcherFetchingHashes.Update(int64(len(f.fetching)))
|
||||||
|
|
||||||
|
// Loop did something, ping the step notifier if needed (tests)
|
||||||
|
if f.step != nil {
|
||||||
|
f.step <- struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// rescheduleWait iterates over all the transactions currently in the waitlist
|
||||||
|
// and schedules the movement into the fetcher for the earliest.
|
||||||
|
//
|
||||||
|
// The method has a granularity of 'gatherSlack', since there's not much point in
|
||||||
|
// spinning over all the transactions just to maybe find one that should trigger
|
||||||
|
// a few ms earlier.
|
||||||
|
func (f *TxFetcher) rescheduleWait(timer *mclock.Timer, trigger chan struct{}) {
|
||||||
|
if *timer != nil {
|
||||||
|
(*timer).Stop()
|
||||||
|
}
|
||||||
|
now := f.clock.Now()
|
||||||
|
|
||||||
|
earliest := now
|
||||||
|
for _, instance := range f.waittime {
|
||||||
|
if earliest > instance {
|
||||||
|
earliest = instance
|
||||||
|
if txArriveTimeout-time.Duration(now-earliest) < gatherSlack {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
*timer = f.clock.AfterFunc(txArriveTimeout-time.Duration(now-earliest), func() {
|
||||||
|
trigger <- struct{}{}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// rescheduleTimeout iterates over all the transactions currently in flight and
|
||||||
|
// schedules a cleanup run when the first would trigger.
|
||||||
|
//
|
||||||
|
// The method has a granularity of 'gatherSlack', since there's not much point in
|
||||||
|
// spinning over all the transactions just to maybe find one that should trigger
|
||||||
|
// a few ms earlier.
|
||||||
|
//
|
||||||
|
// This method is a bit "flaky" "by design". In theory the timeout timer only ever
|
||||||
|
// should be rescheduled if some request is pending. In practice, a timeout will
|
||||||
|
// cause the timer to be rescheduled every 5 secs (until the peer comes through or
|
||||||
|
// disconnects). This is a limitation of the fetcher code because we don't trac
|
||||||
|
// pending requests and timed out requests separatey. Without double tracking, if
|
||||||
|
// we simply didn't reschedule the timer on all-timeout then the timer would never
|
||||||
|
// be set again since len(request) > 0 => something's running.
|
||||||
|
func (f *TxFetcher) rescheduleTimeout(timer *mclock.Timer, trigger chan struct{}) {
|
||||||
|
if *timer != nil {
|
||||||
|
(*timer).Stop()
|
||||||
|
}
|
||||||
|
now := f.clock.Now()
|
||||||
|
|
||||||
|
earliest := now
|
||||||
|
for _, req := range f.requests {
|
||||||
|
// If this request already timed out, skip it altogether
|
||||||
|
if req.hashes == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if earliest > req.time {
|
||||||
|
earliest = req.time
|
||||||
|
if txFetchTimeout-time.Duration(now-earliest) < gatherSlack {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
*timer = f.clock.AfterFunc(txFetchTimeout-time.Duration(now-earliest), func() {
|
||||||
|
trigger <- struct{}{}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// scheduleFetches starts a batch of retrievals for all available idle peers.
|
||||||
|
func (f *TxFetcher) scheduleFetches(timer *mclock.Timer, timeout chan struct{}, whitelist map[string]struct{}) {
|
||||||
|
// Gather the set of peers we want to retrieve from (default to all)
|
||||||
|
actives := whitelist
|
||||||
|
if actives == nil {
|
||||||
|
actives = make(map[string]struct{})
|
||||||
|
for peer := range f.announces {
|
||||||
|
actives[peer] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(actives) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// For each active peer, try to schedule some transaction fetches
|
||||||
|
idle := len(f.requests) == 0
|
||||||
|
|
||||||
|
f.forEachPeer(actives, func(peer string) {
|
||||||
|
if f.requests[peer] != nil {
|
||||||
|
return // continue in the for-each
|
||||||
|
}
|
||||||
|
if len(f.announces[peer]) == 0 {
|
||||||
|
return // continue in the for-each
|
||||||
|
}
|
||||||
|
hashes := make([]common.Hash, 0, maxTxRetrievals)
|
||||||
|
f.forEachHash(f.announces[peer], func(hash common.Hash) bool {
|
||||||
|
if _, ok := f.fetching[hash]; !ok {
|
||||||
|
// Mark the hash as fetching and stash away possible alternates
|
||||||
|
f.fetching[hash] = peer
|
||||||
|
|
||||||
|
if _, ok := f.alternates[hash]; ok {
|
||||||
|
panic(fmt.Sprintf("alternate tracker already contains fetching item: %v", f.alternates[hash]))
|
||||||
|
}
|
||||||
|
f.alternates[hash] = f.announced[hash]
|
||||||
|
delete(f.announced, hash)
|
||||||
|
|
||||||
|
// Accumulate the hash and stop if the limit was reached
|
||||||
|
hashes = append(hashes, hash)
|
||||||
|
if len(hashes) >= maxTxRetrievals {
|
||||||
|
return false // break in the for-each
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true // continue in the for-each
|
||||||
|
})
|
||||||
|
// If any hashes were allocated, request them from the peer
|
||||||
|
if len(hashes) > 0 {
|
||||||
|
f.requests[peer] = &txRequest{hashes: hashes, time: f.clock.Now()}
|
||||||
|
txRequestOutMeter.Mark(int64(len(hashes)))
|
||||||
|
|
||||||
|
go func(peer string, hashes []common.Hash) {
|
||||||
|
// Try to fetch the transactions, but in case of a request
|
||||||
|
// failure (e.g. peer disconnected), reschedule the hashes.
|
||||||
|
if err := f.fetchTxs(peer, hashes); err != nil {
|
||||||
|
txRequestFailMeter.Mark(int64(len(hashes)))
|
||||||
|
f.Drop(peer)
|
||||||
|
}
|
||||||
|
}(peer, hashes)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
// If a new request was fired, schedule a timeout timer
|
||||||
|
if idle && len(f.requests) > 0 {
|
||||||
|
f.rescheduleTimeout(timer, timeout)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// forEachPeer does a range loop over a map of peers in production, but during
|
||||||
|
// testing it does a deterministic sorted random to allow reproducing issues.
|
||||||
|
func (f *TxFetcher) forEachPeer(peers map[string]struct{}, do func(peer string)) {
|
||||||
|
// If we're running production, use whatever Go's map gives us
|
||||||
|
if f.rand == nil {
|
||||||
|
for peer := range peers {
|
||||||
|
do(peer)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// We're running the test suite, make iteration deterministic
|
||||||
|
list := make([]string, 0, len(peers))
|
||||||
|
for peer := range peers {
|
||||||
|
list = append(list, peer)
|
||||||
|
}
|
||||||
|
sort.Strings(list)
|
||||||
|
rotateStrings(list, f.rand.Intn(len(list)))
|
||||||
|
for _, peer := range list {
|
||||||
|
do(peer)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// forEachHash does a range loop over a map of hashes in production, but during
|
||||||
|
// testing it does a deterministic sorted random to allow reproducing issues.
|
||||||
|
func (f *TxFetcher) forEachHash(hashes map[common.Hash]struct{}, do func(hash common.Hash) bool) {
|
||||||
|
// If we're running production, use whatever Go's map gives us
|
||||||
|
if f.rand == nil {
|
||||||
|
for hash := range hashes {
|
||||||
|
if !do(hash) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// We're running the test suite, make iteration deterministic
|
||||||
|
list := make([]common.Hash, 0, len(hashes))
|
||||||
|
for hash := range hashes {
|
||||||
|
list = append(list, hash)
|
||||||
|
}
|
||||||
|
sortHashes(list)
|
||||||
|
rotateHashes(list, f.rand.Intn(len(list)))
|
||||||
|
for _, hash := range list {
|
||||||
|
if !do(hash) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// rotateStrings rotates the contents of a slice by n steps. This method is only
|
||||||
|
// used in tests to simulate random map iteration but keep it deterministic.
|
||||||
|
func rotateStrings(slice []string, n int) {
|
||||||
|
orig := make([]string, len(slice))
|
||||||
|
copy(orig, slice)
|
||||||
|
|
||||||
|
for i := 0; i < len(orig); i++ {
|
||||||
|
slice[i] = orig[(i+n)%len(orig)]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// sortHashes sorts a slice of hashes. This method is only used in tests in order
|
||||||
|
// to simulate random map iteration but keep it deterministic.
|
||||||
|
func sortHashes(slice []common.Hash) {
|
||||||
|
for i := 0; i < len(slice); i++ {
|
||||||
|
for j := i + 1; j < len(slice); j++ {
|
||||||
|
if bytes.Compare(slice[i][:], slice[j][:]) > 0 {
|
||||||
|
slice[i], slice[j] = slice[j], slice[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// rotateHashes rotates the contents of a slice by n steps. This method is only
|
||||||
|
// used in tests to simulate random map iteration but keep it deterministic.
|
||||||
|
func rotateHashes(slice []common.Hash, n int) {
|
||||||
|
orig := make([]common.Hash, len(slice))
|
||||||
|
copy(orig, slice)
|
||||||
|
|
||||||
|
for i := 0; i < len(orig); i++ {
|
||||||
|
slice[i] = orig[(i+n)%len(orig)]
|
||||||
|
}
|
||||||
|
}
|
1528
eth/fetcher/tx_fetcher_test.go
Normal file
1528
eth/fetcher/tx_fetcher_test.go
Normal file
File diff suppressed because it is too large
Load Diff
150
eth/handler.go
150
eth/handler.go
@ -51,7 +51,7 @@ const (
|
|||||||
// The number is referenced from the size of tx pool.
|
// The number is referenced from the size of tx pool.
|
||||||
txChanSize = 4096
|
txChanSize = 4096
|
||||||
|
|
||||||
// minimim number of peers to broadcast new blocks to
|
// minimim number of peers to broadcast entire blocks and transactions too.
|
||||||
minBroadcastPeers = 4
|
minBroadcastPeers = 4
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -78,7 +78,8 @@ type ProtocolManager struct {
|
|||||||
maxPeers int
|
maxPeers int
|
||||||
|
|
||||||
downloader *downloader.Downloader
|
downloader *downloader.Downloader
|
||||||
fetcher *fetcher.Fetcher
|
blockFetcher *fetcher.BlockFetcher
|
||||||
|
txFetcher *fetcher.TxFetcher
|
||||||
peers *peerSet
|
peers *peerSet
|
||||||
|
|
||||||
eventMux *event.TypeMux
|
eventMux *event.TypeMux
|
||||||
@ -97,6 +98,9 @@ type ProtocolManager struct {
|
|||||||
// wait group is used for graceful shutdowns during downloading
|
// wait group is used for graceful shutdowns during downloading
|
||||||
// and processing
|
// and processing
|
||||||
wg sync.WaitGroup
|
wg sync.WaitGroup
|
||||||
|
|
||||||
|
// Test fields or hooks
|
||||||
|
broadcastTxAnnouncesOnly bool // Testing field, disable transaction propagation
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewProtocolManager returns a new Ethereum sub protocol manager. The Ethereum sub protocol manages peers capable
|
// NewProtocolManager returns a new Ethereum sub protocol manager. The Ethereum sub protocol manages peers capable
|
||||||
@ -187,7 +191,16 @@ func NewProtocolManager(config *params.ChainConfig, checkpoint *params.TrustedCh
|
|||||||
}
|
}
|
||||||
return n, err
|
return n, err
|
||||||
}
|
}
|
||||||
manager.fetcher = fetcher.New(blockchain.GetBlockByHash, validator, manager.BroadcastBlock, heighter, inserter, manager.removePeer)
|
manager.blockFetcher = fetcher.NewBlockFetcher(blockchain.GetBlockByHash, validator, manager.BroadcastBlock, heighter, inserter, manager.removePeer)
|
||||||
|
|
||||||
|
fetchTx := func(peer string, hashes []common.Hash) error {
|
||||||
|
p := manager.peers.Peer(peer)
|
||||||
|
if p == nil {
|
||||||
|
return errors.New("unknown peer")
|
||||||
|
}
|
||||||
|
return p.RequestTxs(hashes)
|
||||||
|
}
|
||||||
|
manager.txFetcher = fetcher.NewTxFetcher(txpool.Has, txpool.AddRemotes, fetchTx)
|
||||||
|
|
||||||
return manager, nil
|
return manager, nil
|
||||||
}
|
}
|
||||||
@ -203,7 +216,7 @@ func (pm *ProtocolManager) makeProtocol(version uint) p2p.Protocol {
|
|||||||
Version: version,
|
Version: version,
|
||||||
Length: length,
|
Length: length,
|
||||||
Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error {
|
Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error {
|
||||||
peer := pm.newPeer(int(version), p, rw)
|
peer := pm.newPeer(int(version), p, rw, pm.txpool.Get)
|
||||||
select {
|
select {
|
||||||
case pm.newPeerCh <- peer:
|
case pm.newPeerCh <- peer:
|
||||||
pm.wg.Add(1)
|
pm.wg.Add(1)
|
||||||
@ -235,6 +248,8 @@ func (pm *ProtocolManager) removePeer(id string) {
|
|||||||
|
|
||||||
// Unregister the peer from the downloader and Ethereum peer set
|
// Unregister the peer from the downloader and Ethereum peer set
|
||||||
pm.downloader.UnregisterPeer(id)
|
pm.downloader.UnregisterPeer(id)
|
||||||
|
pm.txFetcher.Drop(id)
|
||||||
|
|
||||||
if err := pm.peers.Unregister(id); err != nil {
|
if err := pm.peers.Unregister(id); err != nil {
|
||||||
log.Error("Peer removal failed", "peer", id, "err", err)
|
log.Error("Peer removal failed", "peer", id, "err", err)
|
||||||
}
|
}
|
||||||
@ -258,7 +273,7 @@ func (pm *ProtocolManager) Start(maxPeers int) {
|
|||||||
|
|
||||||
// start sync handlers
|
// start sync handlers
|
||||||
go pm.syncer()
|
go pm.syncer()
|
||||||
go pm.txsyncLoop()
|
go pm.txsyncLoop64() // TODO(karalabe): Legacy initial tx echange, drop with eth/64.
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pm *ProtocolManager) Stop() {
|
func (pm *ProtocolManager) Stop() {
|
||||||
@ -286,8 +301,8 @@ func (pm *ProtocolManager) Stop() {
|
|||||||
log.Info("Ethereum protocol stopped")
|
log.Info("Ethereum protocol stopped")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pm *ProtocolManager) newPeer(pv int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {
|
func (pm *ProtocolManager) newPeer(pv int, p *p2p.Peer, rw p2p.MsgReadWriter, getPooledTx func(hash common.Hash) *types.Transaction) *peer {
|
||||||
return newPeer(pv, p, newMeteredMsgWriter(rw))
|
return newPeer(pv, p, rw, getPooledTx)
|
||||||
}
|
}
|
||||||
|
|
||||||
// handle is the callback invoked to manage the life cycle of an eth peer. When
|
// handle is the callback invoked to manage the life cycle of an eth peer. When
|
||||||
@ -311,9 +326,6 @@ func (pm *ProtocolManager) handle(p *peer) error {
|
|||||||
p.Log().Debug("Ethereum handshake failed", "err", err)
|
p.Log().Debug("Ethereum handshake failed", "err", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if rw, ok := p.rw.(*meteredMsgReadWriter); ok {
|
|
||||||
rw.Init(p.version)
|
|
||||||
}
|
|
||||||
// Register the peer locally
|
// Register the peer locally
|
||||||
if err := pm.peers.Register(p); err != nil {
|
if err := pm.peers.Register(p); err != nil {
|
||||||
p.Log().Error("Ethereum peer registration failed", "err", err)
|
p.Log().Error("Ethereum peer registration failed", "err", err)
|
||||||
@ -514,7 +526,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
|||||||
p.Log().Debug("Whitelist block verified", "number", headers[0].Number.Uint64(), "hash", want)
|
p.Log().Debug("Whitelist block verified", "number", headers[0].Number.Uint64(), "hash", want)
|
||||||
}
|
}
|
||||||
// Irrelevant of the fork checks, send the header to the fetcher just in case
|
// Irrelevant of the fork checks, send the header to the fetcher just in case
|
||||||
headers = pm.fetcher.FilterHeaders(p.id, headers, time.Now())
|
headers = pm.blockFetcher.FilterHeaders(p.id, headers, time.Now())
|
||||||
}
|
}
|
||||||
if len(headers) > 0 || !filter {
|
if len(headers) > 0 || !filter {
|
||||||
err := pm.downloader.DeliverHeaders(p.id, headers)
|
err := pm.downloader.DeliverHeaders(p.id, headers)
|
||||||
@ -567,7 +579,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
|||||||
// Filter out any explicitly requested bodies, deliver the rest to the downloader
|
// Filter out any explicitly requested bodies, deliver the rest to the downloader
|
||||||
filter := len(transactions) > 0 || len(uncles) > 0
|
filter := len(transactions) > 0 || len(uncles) > 0
|
||||||
if filter {
|
if filter {
|
||||||
transactions, uncles = pm.fetcher.FilterBodies(p.id, transactions, uncles, time.Now())
|
transactions, uncles = pm.blockFetcher.FilterBodies(p.id, transactions, uncles, time.Now())
|
||||||
}
|
}
|
||||||
if len(transactions) > 0 || len(uncles) > 0 || !filter {
|
if len(transactions) > 0 || len(uncles) > 0 || !filter {
|
||||||
err := pm.downloader.DeliverBodies(p.id, transactions, uncles)
|
err := pm.downloader.DeliverBodies(p.id, transactions, uncles)
|
||||||
@ -678,7 +690,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
for _, block := range unknown {
|
for _, block := range unknown {
|
||||||
pm.fetcher.Notify(p.id, block.Hash, block.Number, time.Now(), p.RequestOneHeader, p.RequestBodies)
|
pm.blockFetcher.Notify(p.id, block.Hash, block.Number, time.Now(), p.RequestOneHeader, p.RequestBodies)
|
||||||
}
|
}
|
||||||
|
|
||||||
case msg.Code == NewBlockMsg:
|
case msg.Code == NewBlockMsg:
|
||||||
@ -703,7 +715,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
|||||||
|
|
||||||
// Mark the peer as owning the block and schedule it for import
|
// Mark the peer as owning the block and schedule it for import
|
||||||
p.MarkBlock(request.Block.Hash())
|
p.MarkBlock(request.Block.Hash())
|
||||||
pm.fetcher.Enqueue(p.id, request.Block)
|
pm.blockFetcher.Enqueue(p.id, request.Block)
|
||||||
|
|
||||||
// Assuming the block is importable by the peer, but possibly not yet done so,
|
// Assuming the block is importable by the peer, but possibly not yet done so,
|
||||||
// calculate the head hash and TD that the peer truly must have.
|
// calculate the head hash and TD that the peer truly must have.
|
||||||
@ -724,7 +736,59 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
case msg.Code == TxMsg:
|
case msg.Code == NewPooledTransactionHashesMsg && p.version >= eth65:
|
||||||
|
// New transaction announcement arrived, make sure we have
|
||||||
|
// a valid and fresh chain to handle them
|
||||||
|
if atomic.LoadUint32(&pm.acceptTxs) == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
var hashes []common.Hash
|
||||||
|
if err := msg.Decode(&hashes); err != nil {
|
||||||
|
return errResp(ErrDecode, "msg %v: %v", msg, err)
|
||||||
|
}
|
||||||
|
// Schedule all the unknown hashes for retrieval
|
||||||
|
for _, hash := range hashes {
|
||||||
|
p.MarkTransaction(hash)
|
||||||
|
}
|
||||||
|
pm.txFetcher.Notify(p.id, hashes)
|
||||||
|
|
||||||
|
case msg.Code == GetPooledTransactionsMsg && p.version >= eth65:
|
||||||
|
// Decode the retrieval message
|
||||||
|
msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
|
||||||
|
if _, err := msgStream.List(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Gather transactions until the fetch or network limits is reached
|
||||||
|
var (
|
||||||
|
hash common.Hash
|
||||||
|
bytes int
|
||||||
|
hashes []common.Hash
|
||||||
|
txs []rlp.RawValue
|
||||||
|
)
|
||||||
|
for bytes < softResponseLimit {
|
||||||
|
// Retrieve the hash of the next block
|
||||||
|
if err := msgStream.Decode(&hash); err == rlp.EOL {
|
||||||
|
break
|
||||||
|
} else if err != nil {
|
||||||
|
return errResp(ErrDecode, "msg %v: %v", msg, err)
|
||||||
|
}
|
||||||
|
// Retrieve the requested transaction, skipping if unknown to us
|
||||||
|
tx := pm.txpool.Get(hash)
|
||||||
|
if tx == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// If known, encode and queue for response packet
|
||||||
|
if encoded, err := rlp.EncodeToBytes(tx); err != nil {
|
||||||
|
log.Error("Failed to encode transaction", "err", err)
|
||||||
|
} else {
|
||||||
|
hashes = append(hashes, hash)
|
||||||
|
txs = append(txs, encoded)
|
||||||
|
bytes += len(encoded)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return p.SendPooledTransactionsRLP(hashes, txs)
|
||||||
|
|
||||||
|
case msg.Code == TransactionMsg || (msg.Code == PooledTransactionsMsg && p.version >= eth65):
|
||||||
// Transactions arrived, make sure we have a valid and fresh chain to handle them
|
// Transactions arrived, make sure we have a valid and fresh chain to handle them
|
||||||
if atomic.LoadUint32(&pm.acceptTxs) == 0 {
|
if atomic.LoadUint32(&pm.acceptTxs) == 0 {
|
||||||
break
|
break
|
||||||
@ -741,7 +805,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
|||||||
}
|
}
|
||||||
p.MarkTransaction(tx.Hash())
|
p.MarkTransaction(tx.Hash())
|
||||||
}
|
}
|
||||||
pm.txpool.AddRemotes(txs)
|
pm.txFetcher.Enqueue(p.id, txs, msg.Code == PooledTransactionsMsg)
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return errResp(ErrInvalidMsgCode, "%v", msg.Code)
|
return errResp(ErrInvalidMsgCode, "%v", msg.Code)
|
||||||
@ -789,22 +853,50 @@ func (pm *ProtocolManager) BroadcastBlock(block *types.Block, propagate bool) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// BroadcastTxs will propagate a batch of transactions to all peers which are not known to
|
// BroadcastTransactions will propagate a batch of transactions to all peers which are not known to
|
||||||
// already have the given transaction.
|
// already have the given transaction.
|
||||||
func (pm *ProtocolManager) BroadcastTxs(txs types.Transactions) {
|
func (pm *ProtocolManager) BroadcastTransactions(txs types.Transactions, propagate bool) {
|
||||||
var txset = make(map[*peer]types.Transactions)
|
var (
|
||||||
|
txset = make(map[*peer][]common.Hash)
|
||||||
|
annos = make(map[*peer][]common.Hash)
|
||||||
|
)
|
||||||
// Broadcast transactions to a batch of peers not knowing about it
|
// Broadcast transactions to a batch of peers not knowing about it
|
||||||
|
if propagate {
|
||||||
for _, tx := range txs {
|
for _, tx := range txs {
|
||||||
peers := pm.peers.PeersWithoutTx(tx.Hash())
|
peers := pm.peers.PeersWithoutTx(tx.Hash())
|
||||||
for _, peer := range peers {
|
|
||||||
txset[peer] = append(txset[peer], tx)
|
// Send the block to a subset of our peers
|
||||||
|
transferLen := int(math.Sqrt(float64(len(peers))))
|
||||||
|
if transferLen < minBroadcastPeers {
|
||||||
|
transferLen = minBroadcastPeers
|
||||||
|
}
|
||||||
|
if transferLen > len(peers) {
|
||||||
|
transferLen = len(peers)
|
||||||
|
}
|
||||||
|
transfer := peers[:transferLen]
|
||||||
|
for _, peer := range transfer {
|
||||||
|
txset[peer] = append(txset[peer], tx.Hash())
|
||||||
}
|
}
|
||||||
log.Trace("Broadcast transaction", "hash", tx.Hash(), "recipients", len(peers))
|
log.Trace("Broadcast transaction", "hash", tx.Hash(), "recipients", len(peers))
|
||||||
}
|
}
|
||||||
// FIXME include this again: peers = peers[:int(math.Sqrt(float64(len(peers))))]
|
for peer, hashes := range txset {
|
||||||
for peer, txs := range txset {
|
peer.AsyncSendTransactions(hashes)
|
||||||
peer.AsyncSendTransactions(txs)
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Otherwise only broadcast the announcement to peers
|
||||||
|
for _, tx := range txs {
|
||||||
|
peers := pm.peers.PeersWithoutTx(tx.Hash())
|
||||||
|
for _, peer := range peers {
|
||||||
|
annos[peer] = append(annos[peer], tx.Hash())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for peer, hashes := range annos {
|
||||||
|
if peer.version >= eth65 {
|
||||||
|
peer.AsyncSendPooledTransactionHashes(hashes)
|
||||||
|
} else {
|
||||||
|
peer.AsyncSendTransactions(hashes)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -823,7 +915,13 @@ func (pm *ProtocolManager) txBroadcastLoop() {
|
|||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case event := <-pm.txsCh:
|
case event := <-pm.txsCh:
|
||||||
pm.BroadcastTxs(event.Txs)
|
// For testing purpose only, disable propagation
|
||||||
|
if pm.broadcastTxAnnouncesOnly {
|
||||||
|
pm.BroadcastTransactions(event.Txs, false)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
pm.BroadcastTransactions(event.Txs, true) // First propagate transactions to peers
|
||||||
|
pm.BroadcastTransactions(event.Txs, false) // Only then announce to the rest
|
||||||
|
|
||||||
// Err() channel will be closed when unsubscribing.
|
// Err() channel will be closed when unsubscribing.
|
||||||
case <-pm.txsSub.Err():
|
case <-pm.txsSub.Err():
|
||||||
|
@ -495,7 +495,7 @@ func testCheckpointChallenge(t *testing.T, syncmode downloader.SyncMode, checkpo
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to create new blockchain: %v", err)
|
t.Fatalf("failed to create new blockchain: %v", err)
|
||||||
}
|
}
|
||||||
pm, err := NewProtocolManager(config, cht, syncmode, DefaultConfig.NetworkId, new(event.TypeMux), new(testTxPool), ethash.NewFaker(), blockchain, db, 1, nil)
|
pm, err := NewProtocolManager(config, cht, syncmode, DefaultConfig.NetworkId, new(event.TypeMux), &testTxPool{pool: make(map[common.Hash]*types.Transaction)}, ethash.NewFaker(), blockchain, db, 1, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to start test protocol manager: %v", err)
|
t.Fatalf("failed to start test protocol manager: %v", err)
|
||||||
}
|
}
|
||||||
@ -582,7 +582,7 @@ func testBroadcastBlock(t *testing.T, totalPeers, broadcastExpected int) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to create new blockchain: %v", err)
|
t.Fatalf("failed to create new blockchain: %v", err)
|
||||||
}
|
}
|
||||||
pm, err := NewProtocolManager(config, nil, downloader.FullSync, DefaultConfig.NetworkId, evmux, new(testTxPool), pow, blockchain, db, 1, nil)
|
pm, err := NewProtocolManager(config, nil, downloader.FullSync, DefaultConfig.NetworkId, evmux, &testTxPool{pool: make(map[common.Hash]*types.Transaction)}, pow, blockchain, db, 1, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to start test protocol manager: %v", err)
|
t.Fatalf("failed to start test protocol manager: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -68,7 +68,7 @@ func newTestProtocolManager(mode downloader.SyncMode, blocks int, generator func
|
|||||||
if _, err := blockchain.InsertChain(chain); err != nil {
|
if _, err := blockchain.InsertChain(chain); err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
pm, err := NewProtocolManager(gspec.Config, nil, mode, DefaultConfig.NetworkId, evmux, &testTxPool{added: newtx}, engine, blockchain, db, 1, nil)
|
pm, err := NewProtocolManager(gspec.Config, nil, mode, DefaultConfig.NetworkId, evmux, &testTxPool{added: newtx, pool: make(map[common.Hash]*types.Transaction)}, engine, blockchain, db, 1, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
@ -91,22 +91,43 @@ func newTestProtocolManagerMust(t *testing.T, mode downloader.SyncMode, blocks i
|
|||||||
// testTxPool is a fake, helper transaction pool for testing purposes
|
// testTxPool is a fake, helper transaction pool for testing purposes
|
||||||
type testTxPool struct {
|
type testTxPool struct {
|
||||||
txFeed event.Feed
|
txFeed event.Feed
|
||||||
pool []*types.Transaction // Collection of all transactions
|
pool map[common.Hash]*types.Transaction // Hash map of collected transactions
|
||||||
added chan<- []*types.Transaction // Notification channel for new transactions
|
added chan<- []*types.Transaction // Notification channel for new transactions
|
||||||
|
|
||||||
lock sync.RWMutex // Protects the transaction pool
|
lock sync.RWMutex // Protects the transaction pool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Has returns an indicator whether txpool has a transaction
|
||||||
|
// cached with the given hash.
|
||||||
|
func (p *testTxPool) Has(hash common.Hash) bool {
|
||||||
|
p.lock.Lock()
|
||||||
|
defer p.lock.Unlock()
|
||||||
|
|
||||||
|
return p.pool[hash] != nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get retrieves the transaction from local txpool with given
|
||||||
|
// tx hash.
|
||||||
|
func (p *testTxPool) Get(hash common.Hash) *types.Transaction {
|
||||||
|
p.lock.Lock()
|
||||||
|
defer p.lock.Unlock()
|
||||||
|
|
||||||
|
return p.pool[hash]
|
||||||
|
}
|
||||||
|
|
||||||
// AddRemotes appends a batch of transactions to the pool, and notifies any
|
// AddRemotes appends a batch of transactions to the pool, and notifies any
|
||||||
// listeners if the addition channel is non nil
|
// listeners if the addition channel is non nil
|
||||||
func (p *testTxPool) AddRemotes(txs []*types.Transaction) []error {
|
func (p *testTxPool) AddRemotes(txs []*types.Transaction) []error {
|
||||||
p.lock.Lock()
|
p.lock.Lock()
|
||||||
defer p.lock.Unlock()
|
defer p.lock.Unlock()
|
||||||
|
|
||||||
p.pool = append(p.pool, txs...)
|
for _, tx := range txs {
|
||||||
|
p.pool[tx.Hash()] = tx
|
||||||
|
}
|
||||||
if p.added != nil {
|
if p.added != nil {
|
||||||
p.added <- txs
|
p.added <- txs
|
||||||
}
|
}
|
||||||
|
p.txFeed.Send(core.NewTxsEvent{Txs: txs})
|
||||||
return make([]error, len(txs))
|
return make([]error, len(txs))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -153,7 +174,7 @@ func newTestPeer(name string, version int, pm *ProtocolManager, shake bool) (*te
|
|||||||
var id enode.ID
|
var id enode.ID
|
||||||
rand.Read(id[:])
|
rand.Read(id[:])
|
||||||
|
|
||||||
peer := pm.newPeer(version, p2p.NewPeer(id, name, nil), net)
|
peer := pm.newPeer(version, p2p.NewPeer(id, name, nil), net, pm.txpool.Get)
|
||||||
|
|
||||||
// Start the peer on a new thread
|
// Start the peer on a new thread
|
||||||
errc := make(chan error, 1)
|
errc := make(chan error, 1)
|
||||||
@ -191,7 +212,7 @@ func (p *testPeer) handshake(t *testing.T, td *big.Int, head common.Hash, genesi
|
|||||||
CurrentBlock: head,
|
CurrentBlock: head,
|
||||||
GenesisBlock: genesis,
|
GenesisBlock: genesis,
|
||||||
}
|
}
|
||||||
case p.version == eth64:
|
case p.version >= eth64:
|
||||||
msg = &statusData{
|
msg = &statusData{
|
||||||
ProtocolVersion: uint32(p.version),
|
ProtocolVersion: uint32(p.version),
|
||||||
NetworkID: DefaultConfig.NetworkId,
|
NetworkID: DefaultConfig.NetworkId,
|
||||||
|
139
eth/metrics.go
139
eth/metrics.go
@ -1,139 +0,0 @@
|
|||||||
// Copyright 2015 The go-ethereum Authors
|
|
||||||
// This file is part of the go-ethereum library.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Lesser General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Lesser General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Lesser General Public License
|
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package eth
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/ethereum/go-ethereum/metrics"
|
|
||||||
"github.com/ethereum/go-ethereum/p2p"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
propTxnInPacketsMeter = metrics.NewRegisteredMeter("eth/prop/txns/in/packets", nil)
|
|
||||||
propTxnInTrafficMeter = metrics.NewRegisteredMeter("eth/prop/txns/in/traffic", nil)
|
|
||||||
propTxnOutPacketsMeter = metrics.NewRegisteredMeter("eth/prop/txns/out/packets", nil)
|
|
||||||
propTxnOutTrafficMeter = metrics.NewRegisteredMeter("eth/prop/txns/out/traffic", nil)
|
|
||||||
propHashInPacketsMeter = metrics.NewRegisteredMeter("eth/prop/hashes/in/packets", nil)
|
|
||||||
propHashInTrafficMeter = metrics.NewRegisteredMeter("eth/prop/hashes/in/traffic", nil)
|
|
||||||
propHashOutPacketsMeter = metrics.NewRegisteredMeter("eth/prop/hashes/out/packets", nil)
|
|
||||||
propHashOutTrafficMeter = metrics.NewRegisteredMeter("eth/prop/hashes/out/traffic", nil)
|
|
||||||
propBlockInPacketsMeter = metrics.NewRegisteredMeter("eth/prop/blocks/in/packets", nil)
|
|
||||||
propBlockInTrafficMeter = metrics.NewRegisteredMeter("eth/prop/blocks/in/traffic", nil)
|
|
||||||
propBlockOutPacketsMeter = metrics.NewRegisteredMeter("eth/prop/blocks/out/packets", nil)
|
|
||||||
propBlockOutTrafficMeter = metrics.NewRegisteredMeter("eth/prop/blocks/out/traffic", nil)
|
|
||||||
reqHeaderInPacketsMeter = metrics.NewRegisteredMeter("eth/req/headers/in/packets", nil)
|
|
||||||
reqHeaderInTrafficMeter = metrics.NewRegisteredMeter("eth/req/headers/in/traffic", nil)
|
|
||||||
reqHeaderOutPacketsMeter = metrics.NewRegisteredMeter("eth/req/headers/out/packets", nil)
|
|
||||||
reqHeaderOutTrafficMeter = metrics.NewRegisteredMeter("eth/req/headers/out/traffic", nil)
|
|
||||||
reqBodyInPacketsMeter = metrics.NewRegisteredMeter("eth/req/bodies/in/packets", nil)
|
|
||||||
reqBodyInTrafficMeter = metrics.NewRegisteredMeter("eth/req/bodies/in/traffic", nil)
|
|
||||||
reqBodyOutPacketsMeter = metrics.NewRegisteredMeter("eth/req/bodies/out/packets", nil)
|
|
||||||
reqBodyOutTrafficMeter = metrics.NewRegisteredMeter("eth/req/bodies/out/traffic", nil)
|
|
||||||
reqStateInPacketsMeter = metrics.NewRegisteredMeter("eth/req/states/in/packets", nil)
|
|
||||||
reqStateInTrafficMeter = metrics.NewRegisteredMeter("eth/req/states/in/traffic", nil)
|
|
||||||
reqStateOutPacketsMeter = metrics.NewRegisteredMeter("eth/req/states/out/packets", nil)
|
|
||||||
reqStateOutTrafficMeter = metrics.NewRegisteredMeter("eth/req/states/out/traffic", nil)
|
|
||||||
reqReceiptInPacketsMeter = metrics.NewRegisteredMeter("eth/req/receipts/in/packets", nil)
|
|
||||||
reqReceiptInTrafficMeter = metrics.NewRegisteredMeter("eth/req/receipts/in/traffic", nil)
|
|
||||||
reqReceiptOutPacketsMeter = metrics.NewRegisteredMeter("eth/req/receipts/out/packets", nil)
|
|
||||||
reqReceiptOutTrafficMeter = metrics.NewRegisteredMeter("eth/req/receipts/out/traffic", nil)
|
|
||||||
miscInPacketsMeter = metrics.NewRegisteredMeter("eth/misc/in/packets", nil)
|
|
||||||
miscInTrafficMeter = metrics.NewRegisteredMeter("eth/misc/in/traffic", nil)
|
|
||||||
miscOutPacketsMeter = metrics.NewRegisteredMeter("eth/misc/out/packets", nil)
|
|
||||||
miscOutTrafficMeter = metrics.NewRegisteredMeter("eth/misc/out/traffic", nil)
|
|
||||||
)
|
|
||||||
|
|
||||||
// meteredMsgReadWriter is a wrapper around a p2p.MsgReadWriter, capable of
|
|
||||||
// accumulating the above defined metrics based on the data stream contents.
|
|
||||||
type meteredMsgReadWriter struct {
|
|
||||||
p2p.MsgReadWriter // Wrapped message stream to meter
|
|
||||||
version int // Protocol version to select correct meters
|
|
||||||
}
|
|
||||||
|
|
||||||
// newMeteredMsgWriter wraps a p2p MsgReadWriter with metering support. If the
|
|
||||||
// metrics system is disabled, this function returns the original object.
|
|
||||||
func newMeteredMsgWriter(rw p2p.MsgReadWriter) p2p.MsgReadWriter {
|
|
||||||
if !metrics.Enabled {
|
|
||||||
return rw
|
|
||||||
}
|
|
||||||
return &meteredMsgReadWriter{MsgReadWriter: rw}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Init sets the protocol version used by the stream to know which meters to
|
|
||||||
// increment in case of overlapping message ids between protocol versions.
|
|
||||||
func (rw *meteredMsgReadWriter) Init(version int) {
|
|
||||||
rw.version = version
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rw *meteredMsgReadWriter) ReadMsg() (p2p.Msg, error) {
|
|
||||||
// Read the message and short circuit in case of an error
|
|
||||||
msg, err := rw.MsgReadWriter.ReadMsg()
|
|
||||||
if err != nil {
|
|
||||||
return msg, err
|
|
||||||
}
|
|
||||||
// Account for the data traffic
|
|
||||||
packets, traffic := miscInPacketsMeter, miscInTrafficMeter
|
|
||||||
switch {
|
|
||||||
case msg.Code == BlockHeadersMsg:
|
|
||||||
packets, traffic = reqHeaderInPacketsMeter, reqHeaderInTrafficMeter
|
|
||||||
case msg.Code == BlockBodiesMsg:
|
|
||||||
packets, traffic = reqBodyInPacketsMeter, reqBodyInTrafficMeter
|
|
||||||
|
|
||||||
case rw.version >= eth63 && msg.Code == NodeDataMsg:
|
|
||||||
packets, traffic = reqStateInPacketsMeter, reqStateInTrafficMeter
|
|
||||||
case rw.version >= eth63 && msg.Code == ReceiptsMsg:
|
|
||||||
packets, traffic = reqReceiptInPacketsMeter, reqReceiptInTrafficMeter
|
|
||||||
|
|
||||||
case msg.Code == NewBlockHashesMsg:
|
|
||||||
packets, traffic = propHashInPacketsMeter, propHashInTrafficMeter
|
|
||||||
case msg.Code == NewBlockMsg:
|
|
||||||
packets, traffic = propBlockInPacketsMeter, propBlockInTrafficMeter
|
|
||||||
case msg.Code == TxMsg:
|
|
||||||
packets, traffic = propTxnInPacketsMeter, propTxnInTrafficMeter
|
|
||||||
}
|
|
||||||
packets.Mark(1)
|
|
||||||
traffic.Mark(int64(msg.Size))
|
|
||||||
|
|
||||||
return msg, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rw *meteredMsgReadWriter) WriteMsg(msg p2p.Msg) error {
|
|
||||||
// Account for the data traffic
|
|
||||||
packets, traffic := miscOutPacketsMeter, miscOutTrafficMeter
|
|
||||||
switch {
|
|
||||||
case msg.Code == BlockHeadersMsg:
|
|
||||||
packets, traffic = reqHeaderOutPacketsMeter, reqHeaderOutTrafficMeter
|
|
||||||
case msg.Code == BlockBodiesMsg:
|
|
||||||
packets, traffic = reqBodyOutPacketsMeter, reqBodyOutTrafficMeter
|
|
||||||
|
|
||||||
case rw.version >= eth63 && msg.Code == NodeDataMsg:
|
|
||||||
packets, traffic = reqStateOutPacketsMeter, reqStateOutTrafficMeter
|
|
||||||
case rw.version >= eth63 && msg.Code == ReceiptsMsg:
|
|
||||||
packets, traffic = reqReceiptOutPacketsMeter, reqReceiptOutTrafficMeter
|
|
||||||
|
|
||||||
case msg.Code == NewBlockHashesMsg:
|
|
||||||
packets, traffic = propHashOutPacketsMeter, propHashOutTrafficMeter
|
|
||||||
case msg.Code == NewBlockMsg:
|
|
||||||
packets, traffic = propBlockOutPacketsMeter, propBlockOutTrafficMeter
|
|
||||||
case msg.Code == TxMsg:
|
|
||||||
packets, traffic = propTxnOutPacketsMeter, propTxnOutTrafficMeter
|
|
||||||
}
|
|
||||||
packets.Mark(1)
|
|
||||||
traffic.Mark(int64(msg.Size))
|
|
||||||
|
|
||||||
// Send the packet to the p2p layer
|
|
||||||
return rw.MsgReadWriter.WriteMsg(msg)
|
|
||||||
}
|
|
325
eth/peer.go
325
eth/peer.go
@ -41,24 +41,35 @@ const (
|
|||||||
maxKnownTxs = 32768 // Maximum transactions hashes to keep in the known list (prevent DOS)
|
maxKnownTxs = 32768 // Maximum transactions hashes to keep in the known list (prevent DOS)
|
||||||
maxKnownBlocks = 1024 // Maximum block hashes to keep in the known list (prevent DOS)
|
maxKnownBlocks = 1024 // Maximum block hashes to keep in the known list (prevent DOS)
|
||||||
|
|
||||||
// maxQueuedTxs is the maximum number of transaction lists to queue up before
|
// maxQueuedTxs is the maximum number of transactions to queue up before dropping
|
||||||
// dropping broadcasts. This is a sensitive number as a transaction list might
|
// older broadcasts.
|
||||||
// contain a single transaction, or thousands.
|
maxQueuedTxs = 4096
|
||||||
maxQueuedTxs = 128
|
|
||||||
|
|
||||||
// maxQueuedProps is the maximum number of block propagations to queue up before
|
// maxQueuedTxAnns is the maximum number of transaction announcements to queue up
|
||||||
|
// before dropping older announcements.
|
||||||
|
maxQueuedTxAnns = 4096
|
||||||
|
|
||||||
|
// maxQueuedBlocks is the maximum number of block propagations to queue up before
|
||||||
// dropping broadcasts. There's not much point in queueing stale blocks, so a few
|
// dropping broadcasts. There's not much point in queueing stale blocks, so a few
|
||||||
// that might cover uncles should be enough.
|
// that might cover uncles should be enough.
|
||||||
maxQueuedProps = 4
|
maxQueuedBlocks = 4
|
||||||
|
|
||||||
// maxQueuedAnns is the maximum number of block announcements to queue up before
|
// maxQueuedBlockAnns is the maximum number of block announcements to queue up before
|
||||||
// dropping broadcasts. Similarly to block propagations, there's no point to queue
|
// dropping broadcasts. Similarly to block propagations, there's no point to queue
|
||||||
// above some healthy uncle limit, so use that.
|
// above some healthy uncle limit, so use that.
|
||||||
maxQueuedAnns = 4
|
maxQueuedBlockAnns = 4
|
||||||
|
|
||||||
handshakeTimeout = 5 * time.Second
|
handshakeTimeout = 5 * time.Second
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// max is a helper function which returns the larger of the two given integers.
|
||||||
|
func max(a, b int) int {
|
||||||
|
if a > b {
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
// PeerInfo represents a short summary of the Ethereum sub-protocol metadata known
|
// PeerInfo represents a short summary of the Ethereum sub-protocol metadata known
|
||||||
// about a connected peer.
|
// about a connected peer.
|
||||||
type PeerInfo struct {
|
type PeerInfo struct {
|
||||||
@ -86,15 +97,19 @@ type peer struct {
|
|||||||
td *big.Int
|
td *big.Int
|
||||||
lock sync.RWMutex
|
lock sync.RWMutex
|
||||||
|
|
||||||
knownTxs mapset.Set // Set of transaction hashes known to be known by this peer
|
|
||||||
knownBlocks mapset.Set // Set of block hashes known to be known by this peer
|
knownBlocks mapset.Set // Set of block hashes known to be known by this peer
|
||||||
queuedTxs chan []*types.Transaction // Queue of transactions to broadcast to the peer
|
queuedBlocks chan *propEvent // Queue of blocks to broadcast to the peer
|
||||||
queuedProps chan *propEvent // Queue of blocks to broadcast to the peer
|
queuedBlockAnns chan *types.Block // Queue of blocks to announce to the peer
|
||||||
queuedAnns chan *types.Block // Queue of blocks to announce to the peer
|
|
||||||
|
knownTxs mapset.Set // Set of transaction hashes known to be known by this peer
|
||||||
|
txBroadcast chan []common.Hash // Channel used to queue transaction propagation requests
|
||||||
|
txAnnounce chan []common.Hash // Channel used to queue transaction announcement requests
|
||||||
|
getPooledTx func(common.Hash) *types.Transaction // Callback used to retrieve transaction from txpool
|
||||||
|
|
||||||
term chan struct{} // Termination channel to stop the broadcaster
|
term chan struct{} // Termination channel to stop the broadcaster
|
||||||
}
|
}
|
||||||
|
|
||||||
func newPeer(version int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {
|
func newPeer(version int, p *p2p.Peer, rw p2p.MsgReadWriter, getPooledTx func(hash common.Hash) *types.Transaction) *peer {
|
||||||
return &peer{
|
return &peer{
|
||||||
Peer: p,
|
Peer: p,
|
||||||
rw: rw,
|
rw: rw,
|
||||||
@ -102,32 +117,28 @@ func newPeer(version int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {
|
|||||||
id: fmt.Sprintf("%x", p.ID().Bytes()[:8]),
|
id: fmt.Sprintf("%x", p.ID().Bytes()[:8]),
|
||||||
knownTxs: mapset.NewSet(),
|
knownTxs: mapset.NewSet(),
|
||||||
knownBlocks: mapset.NewSet(),
|
knownBlocks: mapset.NewSet(),
|
||||||
queuedTxs: make(chan []*types.Transaction, maxQueuedTxs),
|
queuedBlocks: make(chan *propEvent, maxQueuedBlocks),
|
||||||
queuedProps: make(chan *propEvent, maxQueuedProps),
|
queuedBlockAnns: make(chan *types.Block, maxQueuedBlockAnns),
|
||||||
queuedAnns: make(chan *types.Block, maxQueuedAnns),
|
txBroadcast: make(chan []common.Hash),
|
||||||
|
txAnnounce: make(chan []common.Hash),
|
||||||
|
getPooledTx: getPooledTx,
|
||||||
term: make(chan struct{}),
|
term: make(chan struct{}),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// broadcast is a write loop that multiplexes block propagations, announcements
|
// broadcastBlocks is a write loop that multiplexes blocks and block accouncements
|
||||||
// and transaction broadcasts into the remote peer. The goal is to have an async
|
// to the remote peer. The goal is to have an async writer that does not lock up
|
||||||
// writer that does not lock up node internals.
|
// node internals and at the same time rate limits queued data.
|
||||||
func (p *peer) broadcast() {
|
func (p *peer) broadcastBlocks() {
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case txs := <-p.queuedTxs:
|
case prop := <-p.queuedBlocks:
|
||||||
if err := p.SendTransactions(txs); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
p.Log().Trace("Broadcast transactions", "count", len(txs))
|
|
||||||
|
|
||||||
case prop := <-p.queuedProps:
|
|
||||||
if err := p.SendNewBlock(prop.block, prop.td); err != nil {
|
if err := p.SendNewBlock(prop.block, prop.td); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
p.Log().Trace("Propagated block", "number", prop.block.Number(), "hash", prop.block.Hash(), "td", prop.td)
|
p.Log().Trace("Propagated block", "number", prop.block.Number(), "hash", prop.block.Hash(), "td", prop.td)
|
||||||
|
|
||||||
case block := <-p.queuedAnns:
|
case block := <-p.queuedBlockAnns:
|
||||||
if err := p.SendNewBlockHashes([]common.Hash{block.Hash()}, []uint64{block.NumberU64()}); err != nil {
|
if err := p.SendNewBlockHashes([]common.Hash{block.Hash()}, []uint64{block.NumberU64()}); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -139,6 +150,130 @@ func (p *peer) broadcast() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// broadcastTransactions is a write loop that schedules transaction broadcasts
|
||||||
|
// to the remote peer. The goal is to have an async writer that does not lock up
|
||||||
|
// node internals and at the same time rate limits queued data.
|
||||||
|
func (p *peer) broadcastTransactions() {
|
||||||
|
var (
|
||||||
|
queue []common.Hash // Queue of hashes to broadcast as full transactions
|
||||||
|
done chan struct{} // Non-nil if background broadcaster is running
|
||||||
|
fail = make(chan error) // Channel used to receive network error
|
||||||
|
)
|
||||||
|
for {
|
||||||
|
// If there's no in-flight broadcast running, check if a new one is needed
|
||||||
|
if done == nil && len(queue) > 0 {
|
||||||
|
// Pile transaction until we reach our allowed network limit
|
||||||
|
var (
|
||||||
|
hashes []common.Hash
|
||||||
|
txs []*types.Transaction
|
||||||
|
size common.StorageSize
|
||||||
|
)
|
||||||
|
for i := 0; i < len(queue) && size < txsyncPackSize; i++ {
|
||||||
|
if tx := p.getPooledTx(queue[i]); tx != nil {
|
||||||
|
txs = append(txs, tx)
|
||||||
|
size += tx.Size()
|
||||||
|
}
|
||||||
|
hashes = append(hashes, queue[i])
|
||||||
|
}
|
||||||
|
queue = queue[:copy(queue, queue[len(hashes):])]
|
||||||
|
|
||||||
|
// If there's anything available to transfer, fire up an async writer
|
||||||
|
if len(txs) > 0 {
|
||||||
|
done = make(chan struct{})
|
||||||
|
go func() {
|
||||||
|
if err := p.sendTransactions(txs); err != nil {
|
||||||
|
fail <- err
|
||||||
|
return
|
||||||
|
}
|
||||||
|
close(done)
|
||||||
|
p.Log().Trace("Sent transactions", "count", len(txs))
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Transfer goroutine may or may not have been started, listen for events
|
||||||
|
select {
|
||||||
|
case hashes := <-p.txBroadcast:
|
||||||
|
// New batch of transactions to be broadcast, queue them (with cap)
|
||||||
|
queue = append(queue, hashes...)
|
||||||
|
if len(queue) > maxQueuedTxs {
|
||||||
|
// Fancy copy and resize to ensure buffer doesn't grow indefinitely
|
||||||
|
queue = queue[:copy(queue, queue[len(queue)-maxQueuedTxs:])]
|
||||||
|
}
|
||||||
|
|
||||||
|
case <-done:
|
||||||
|
done = nil
|
||||||
|
|
||||||
|
case <-fail:
|
||||||
|
return
|
||||||
|
|
||||||
|
case <-p.term:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// announceTransactions is a write loop that schedules transaction broadcasts
|
||||||
|
// to the remote peer. The goal is to have an async writer that does not lock up
|
||||||
|
// node internals and at the same time rate limits queued data.
|
||||||
|
func (p *peer) announceTransactions() {
|
||||||
|
var (
|
||||||
|
queue []common.Hash // Queue of hashes to announce as transaction stubs
|
||||||
|
done chan struct{} // Non-nil if background announcer is running
|
||||||
|
fail = make(chan error) // Channel used to receive network error
|
||||||
|
)
|
||||||
|
for {
|
||||||
|
// If there's no in-flight announce running, check if a new one is needed
|
||||||
|
if done == nil && len(queue) > 0 {
|
||||||
|
// Pile transaction hashes until we reach our allowed network limit
|
||||||
|
var (
|
||||||
|
hashes []common.Hash
|
||||||
|
pending []common.Hash
|
||||||
|
size common.StorageSize
|
||||||
|
)
|
||||||
|
for i := 0; i < len(queue) && size < txsyncPackSize; i++ {
|
||||||
|
if p.getPooledTx(queue[i]) != nil {
|
||||||
|
pending = append(pending, queue[i])
|
||||||
|
size += common.HashLength
|
||||||
|
}
|
||||||
|
hashes = append(hashes, queue[i])
|
||||||
|
}
|
||||||
|
queue = queue[:copy(queue, queue[len(hashes):])]
|
||||||
|
|
||||||
|
// If there's anything available to transfer, fire up an async writer
|
||||||
|
if len(pending) > 0 {
|
||||||
|
done = make(chan struct{})
|
||||||
|
go func() {
|
||||||
|
if err := p.sendPooledTransactionHashes(pending); err != nil {
|
||||||
|
fail <- err
|
||||||
|
return
|
||||||
|
}
|
||||||
|
close(done)
|
||||||
|
p.Log().Trace("Sent transaction announcements", "count", len(pending))
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Transfer goroutine may or may not have been started, listen for events
|
||||||
|
select {
|
||||||
|
case hashes := <-p.txAnnounce:
|
||||||
|
// New batch of transactions to be broadcast, queue them (with cap)
|
||||||
|
queue = append(queue, hashes...)
|
||||||
|
if len(queue) > maxQueuedTxAnns {
|
||||||
|
// Fancy copy and resize to ensure buffer doesn't grow indefinitely
|
||||||
|
queue = queue[:copy(queue, queue[len(queue)-maxQueuedTxs:])]
|
||||||
|
}
|
||||||
|
|
||||||
|
case <-done:
|
||||||
|
done = nil
|
||||||
|
|
||||||
|
case <-fail:
|
||||||
|
return
|
||||||
|
|
||||||
|
case <-p.term:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// close signals the broadcast goroutine to terminate.
|
// close signals the broadcast goroutine to terminate.
|
||||||
func (p *peer) close() {
|
func (p *peer) close() {
|
||||||
close(p.term)
|
close(p.term)
|
||||||
@ -194,46 +329,111 @@ func (p *peer) MarkTransaction(hash common.Hash) {
|
|||||||
p.knownTxs.Add(hash)
|
p.knownTxs.Add(hash)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SendTransactions sends transactions to the peer and includes the hashes
|
// SendTransactions64 sends transactions to the peer and includes the hashes
|
||||||
// in its transaction hash set for future reference.
|
// in its transaction hash set for future reference.
|
||||||
func (p *peer) SendTransactions(txs types.Transactions) error {
|
//
|
||||||
// Mark all the transactions as known, but ensure we don't overflow our limits
|
// This method is legacy support for initial transaction exchange in eth/64 and
|
||||||
for _, tx := range txs {
|
// prior. For eth/65 and higher use SendPooledTransactionHashes.
|
||||||
p.knownTxs.Add(tx.Hash())
|
func (p *peer) SendTransactions64(txs types.Transactions) error {
|
||||||
}
|
return p.sendTransactions(txs)
|
||||||
for p.knownTxs.Cardinality() >= maxKnownTxs {
|
|
||||||
p.knownTxs.Pop()
|
|
||||||
}
|
|
||||||
return p2p.Send(p.rw, TxMsg, txs)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// AsyncSendTransactions queues list of transactions propagation to a remote
|
// sendTransactions sends transactions to the peer and includes the hashes
|
||||||
// peer. If the peer's broadcast queue is full, the event is silently dropped.
|
// in its transaction hash set for future reference.
|
||||||
func (p *peer) AsyncSendTransactions(txs []*types.Transaction) {
|
//
|
||||||
select {
|
// This method is a helper used by the async transaction sender. Don't call it
|
||||||
case p.queuedTxs <- txs:
|
// directly as the queueing (memory) and transmission (bandwidth) costs should
|
||||||
|
// not be managed directly.
|
||||||
|
func (p *peer) sendTransactions(txs types.Transactions) error {
|
||||||
// Mark all the transactions as known, but ensure we don't overflow our limits
|
// Mark all the transactions as known, but ensure we don't overflow our limits
|
||||||
|
for p.knownTxs.Cardinality() > max(0, maxKnownTxs-len(txs)) {
|
||||||
|
p.knownTxs.Pop()
|
||||||
|
}
|
||||||
for _, tx := range txs {
|
for _, tx := range txs {
|
||||||
p.knownTxs.Add(tx.Hash())
|
p.knownTxs.Add(tx.Hash())
|
||||||
}
|
}
|
||||||
for p.knownTxs.Cardinality() >= maxKnownTxs {
|
return p2p.Send(p.rw, TransactionMsg, txs)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AsyncSendTransactions queues a list of transactions (by hash) to eventually
|
||||||
|
// propagate to a remote peer. The number of pending sends are capped (new ones
|
||||||
|
// will force old sends to be dropped)
|
||||||
|
func (p *peer) AsyncSendTransactions(hashes []common.Hash) {
|
||||||
|
select {
|
||||||
|
case p.txBroadcast <- hashes:
|
||||||
|
// Mark all the transactions as known, but ensure we don't overflow our limits
|
||||||
|
for p.knownTxs.Cardinality() > max(0, maxKnownTxs-len(hashes)) {
|
||||||
p.knownTxs.Pop()
|
p.knownTxs.Pop()
|
||||||
}
|
}
|
||||||
default:
|
for _, hash := range hashes {
|
||||||
p.Log().Debug("Dropping transaction propagation", "count", len(txs))
|
p.knownTxs.Add(hash)
|
||||||
}
|
}
|
||||||
|
case <-p.term:
|
||||||
|
p.Log().Debug("Dropping transaction propagation", "count", len(hashes))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// sendPooledTransactionHashes sends transaction hashes to the peer and includes
|
||||||
|
// them in its transaction hash set for future reference.
|
||||||
|
//
|
||||||
|
// This method is a helper used by the async transaction announcer. Don't call it
|
||||||
|
// directly as the queueing (memory) and transmission (bandwidth) costs should
|
||||||
|
// not be managed directly.
|
||||||
|
func (p *peer) sendPooledTransactionHashes(hashes []common.Hash) error {
|
||||||
|
// Mark all the transactions as known, but ensure we don't overflow our limits
|
||||||
|
for p.knownTxs.Cardinality() > max(0, maxKnownTxs-len(hashes)) {
|
||||||
|
p.knownTxs.Pop()
|
||||||
|
}
|
||||||
|
for _, hash := range hashes {
|
||||||
|
p.knownTxs.Add(hash)
|
||||||
|
}
|
||||||
|
return p2p.Send(p.rw, NewPooledTransactionHashesMsg, hashes)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AsyncSendPooledTransactionHashes queues a list of transactions hashes to eventually
|
||||||
|
// announce to a remote peer. The number of pending sends are capped (new ones
|
||||||
|
// will force old sends to be dropped)
|
||||||
|
func (p *peer) AsyncSendPooledTransactionHashes(hashes []common.Hash) {
|
||||||
|
select {
|
||||||
|
case p.txAnnounce <- hashes:
|
||||||
|
// Mark all the transactions as known, but ensure we don't overflow our limits
|
||||||
|
for p.knownTxs.Cardinality() > max(0, maxKnownTxs-len(hashes)) {
|
||||||
|
p.knownTxs.Pop()
|
||||||
|
}
|
||||||
|
for _, hash := range hashes {
|
||||||
|
p.knownTxs.Add(hash)
|
||||||
|
}
|
||||||
|
case <-p.term:
|
||||||
|
p.Log().Debug("Dropping transaction announcement", "count", len(hashes))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SendPooledTransactionsRLP sends requested transactions to the peer and adds the
|
||||||
|
// hashes in its transaction hash set for future reference.
|
||||||
|
//
|
||||||
|
// Note, the method assumes the hashes are correct and correspond to the list of
|
||||||
|
// transactions being sent.
|
||||||
|
func (p *peer) SendPooledTransactionsRLP(hashes []common.Hash, txs []rlp.RawValue) error {
|
||||||
|
// Mark all the transactions as known, but ensure we don't overflow our limits
|
||||||
|
for p.knownTxs.Cardinality() > max(0, maxKnownTxs-len(hashes)) {
|
||||||
|
p.knownTxs.Pop()
|
||||||
|
}
|
||||||
|
for _, hash := range hashes {
|
||||||
|
p.knownTxs.Add(hash)
|
||||||
|
}
|
||||||
|
return p2p.Send(p.rw, PooledTransactionsMsg, txs)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SendNewBlockHashes announces the availability of a number of blocks through
|
// SendNewBlockHashes announces the availability of a number of blocks through
|
||||||
// a hash notification.
|
// a hash notification.
|
||||||
func (p *peer) SendNewBlockHashes(hashes []common.Hash, numbers []uint64) error {
|
func (p *peer) SendNewBlockHashes(hashes []common.Hash, numbers []uint64) error {
|
||||||
// Mark all the block hashes as known, but ensure we don't overflow our limits
|
// Mark all the block hashes as known, but ensure we don't overflow our limits
|
||||||
|
for p.knownBlocks.Cardinality() > max(0, maxKnownBlocks-len(hashes)) {
|
||||||
|
p.knownBlocks.Pop()
|
||||||
|
}
|
||||||
for _, hash := range hashes {
|
for _, hash := range hashes {
|
||||||
p.knownBlocks.Add(hash)
|
p.knownBlocks.Add(hash)
|
||||||
}
|
}
|
||||||
for p.knownBlocks.Cardinality() >= maxKnownBlocks {
|
|
||||||
p.knownBlocks.Pop()
|
|
||||||
}
|
|
||||||
request := make(newBlockHashesData, len(hashes))
|
request := make(newBlockHashesData, len(hashes))
|
||||||
for i := 0; i < len(hashes); i++ {
|
for i := 0; i < len(hashes); i++ {
|
||||||
request[i].Hash = hashes[i]
|
request[i].Hash = hashes[i]
|
||||||
@ -247,12 +447,12 @@ func (p *peer) SendNewBlockHashes(hashes []common.Hash, numbers []uint64) error
|
|||||||
// dropped.
|
// dropped.
|
||||||
func (p *peer) AsyncSendNewBlockHash(block *types.Block) {
|
func (p *peer) AsyncSendNewBlockHash(block *types.Block) {
|
||||||
select {
|
select {
|
||||||
case p.queuedAnns <- block:
|
case p.queuedBlockAnns <- block:
|
||||||
// Mark all the block hash as known, but ensure we don't overflow our limits
|
// Mark all the block hash as known, but ensure we don't overflow our limits
|
||||||
p.knownBlocks.Add(block.Hash())
|
|
||||||
for p.knownBlocks.Cardinality() >= maxKnownBlocks {
|
for p.knownBlocks.Cardinality() >= maxKnownBlocks {
|
||||||
p.knownBlocks.Pop()
|
p.knownBlocks.Pop()
|
||||||
}
|
}
|
||||||
|
p.knownBlocks.Add(block.Hash())
|
||||||
default:
|
default:
|
||||||
p.Log().Debug("Dropping block announcement", "number", block.NumberU64(), "hash", block.Hash())
|
p.Log().Debug("Dropping block announcement", "number", block.NumberU64(), "hash", block.Hash())
|
||||||
}
|
}
|
||||||
@ -261,10 +461,10 @@ func (p *peer) AsyncSendNewBlockHash(block *types.Block) {
|
|||||||
// SendNewBlock propagates an entire block to a remote peer.
|
// SendNewBlock propagates an entire block to a remote peer.
|
||||||
func (p *peer) SendNewBlock(block *types.Block, td *big.Int) error {
|
func (p *peer) SendNewBlock(block *types.Block, td *big.Int) error {
|
||||||
// Mark all the block hash as known, but ensure we don't overflow our limits
|
// Mark all the block hash as known, but ensure we don't overflow our limits
|
||||||
p.knownBlocks.Add(block.Hash())
|
|
||||||
for p.knownBlocks.Cardinality() >= maxKnownBlocks {
|
for p.knownBlocks.Cardinality() >= maxKnownBlocks {
|
||||||
p.knownBlocks.Pop()
|
p.knownBlocks.Pop()
|
||||||
}
|
}
|
||||||
|
p.knownBlocks.Add(block.Hash())
|
||||||
return p2p.Send(p.rw, NewBlockMsg, []interface{}{block, td})
|
return p2p.Send(p.rw, NewBlockMsg, []interface{}{block, td})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -272,12 +472,12 @@ func (p *peer) SendNewBlock(block *types.Block, td *big.Int) error {
|
|||||||
// the peer's broadcast queue is full, the event is silently dropped.
|
// the peer's broadcast queue is full, the event is silently dropped.
|
||||||
func (p *peer) AsyncSendNewBlock(block *types.Block, td *big.Int) {
|
func (p *peer) AsyncSendNewBlock(block *types.Block, td *big.Int) {
|
||||||
select {
|
select {
|
||||||
case p.queuedProps <- &propEvent{block: block, td: td}:
|
case p.queuedBlocks <- &propEvent{block: block, td: td}:
|
||||||
// Mark all the block hash as known, but ensure we don't overflow our limits
|
// Mark all the block hash as known, but ensure we don't overflow our limits
|
||||||
p.knownBlocks.Add(block.Hash())
|
|
||||||
for p.knownBlocks.Cardinality() >= maxKnownBlocks {
|
for p.knownBlocks.Cardinality() >= maxKnownBlocks {
|
||||||
p.knownBlocks.Pop()
|
p.knownBlocks.Pop()
|
||||||
}
|
}
|
||||||
|
p.knownBlocks.Add(block.Hash())
|
||||||
default:
|
default:
|
||||||
p.Log().Debug("Dropping block propagation", "number", block.NumberU64(), "hash", block.Hash())
|
p.Log().Debug("Dropping block propagation", "number", block.NumberU64(), "hash", block.Hash())
|
||||||
}
|
}
|
||||||
@ -352,6 +552,12 @@ func (p *peer) RequestReceipts(hashes []common.Hash) error {
|
|||||||
return p2p.Send(p.rw, GetReceiptsMsg, hashes)
|
return p2p.Send(p.rw, GetReceiptsMsg, hashes)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RequestTxs fetches a batch of transactions from a remote node.
|
||||||
|
func (p *peer) RequestTxs(hashes []common.Hash) error {
|
||||||
|
p.Log().Debug("Fetching batch of transactions", "count", len(hashes))
|
||||||
|
return p2p.Send(p.rw, GetPooledTransactionsMsg, hashes)
|
||||||
|
}
|
||||||
|
|
||||||
// Handshake executes the eth protocol handshake, negotiating version number,
|
// Handshake executes the eth protocol handshake, negotiating version number,
|
||||||
// network IDs, difficulties, head and genesis blocks.
|
// network IDs, difficulties, head and genesis blocks.
|
||||||
func (p *peer) Handshake(network uint64, td *big.Int, head common.Hash, genesis common.Hash, forkID forkid.ID, forkFilter forkid.Filter) error {
|
func (p *peer) Handshake(network uint64, td *big.Int, head common.Hash, genesis common.Hash, forkID forkid.ID, forkFilter forkid.Filter) error {
|
||||||
@ -372,7 +578,7 @@ func (p *peer) Handshake(network uint64, td *big.Int, head common.Hash, genesis
|
|||||||
CurrentBlock: head,
|
CurrentBlock: head,
|
||||||
GenesisBlock: genesis,
|
GenesisBlock: genesis,
|
||||||
})
|
})
|
||||||
case p.version == eth64:
|
case p.version >= eth64:
|
||||||
errc <- p2p.Send(p.rw, StatusMsg, &statusData{
|
errc <- p2p.Send(p.rw, StatusMsg, &statusData{
|
||||||
ProtocolVersion: uint32(p.version),
|
ProtocolVersion: uint32(p.version),
|
||||||
NetworkID: network,
|
NetworkID: network,
|
||||||
@ -389,7 +595,7 @@ func (p *peer) Handshake(network uint64, td *big.Int, head common.Hash, genesis
|
|||||||
switch {
|
switch {
|
||||||
case p.version == eth63:
|
case p.version == eth63:
|
||||||
errc <- p.readStatusLegacy(network, &status63, genesis)
|
errc <- p.readStatusLegacy(network, &status63, genesis)
|
||||||
case p.version == eth64:
|
case p.version >= eth64:
|
||||||
errc <- p.readStatus(network, &status, genesis, forkFilter)
|
errc <- p.readStatus(network, &status, genesis, forkFilter)
|
||||||
default:
|
default:
|
||||||
panic(fmt.Sprintf("unsupported eth protocol version: %d", p.version))
|
panic(fmt.Sprintf("unsupported eth protocol version: %d", p.version))
|
||||||
@ -410,7 +616,7 @@ func (p *peer) Handshake(network uint64, td *big.Int, head common.Hash, genesis
|
|||||||
switch {
|
switch {
|
||||||
case p.version == eth63:
|
case p.version == eth63:
|
||||||
p.td, p.head = status63.TD, status63.CurrentBlock
|
p.td, p.head = status63.TD, status63.CurrentBlock
|
||||||
case p.version == eth64:
|
case p.version >= eth64:
|
||||||
p.td, p.head = status.TD, status.Head
|
p.td, p.head = status.TD, status.Head
|
||||||
default:
|
default:
|
||||||
panic(fmt.Sprintf("unsupported eth protocol version: %d", p.version))
|
panic(fmt.Sprintf("unsupported eth protocol version: %d", p.version))
|
||||||
@ -511,7 +717,10 @@ func (ps *peerSet) Register(p *peer) error {
|
|||||||
return errAlreadyRegistered
|
return errAlreadyRegistered
|
||||||
}
|
}
|
||||||
ps.peers[p.id] = p
|
ps.peers[p.id] = p
|
||||||
go p.broadcast()
|
|
||||||
|
go p.broadcastBlocks()
|
||||||
|
go p.broadcastTransactions()
|
||||||
|
go p.announceTransactions()
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -33,16 +33,17 @@ import (
|
|||||||
const (
|
const (
|
||||||
eth63 = 63
|
eth63 = 63
|
||||||
eth64 = 64
|
eth64 = 64
|
||||||
|
eth65 = 65
|
||||||
)
|
)
|
||||||
|
|
||||||
// protocolName is the official short name of the protocol used during capability negotiation.
|
// protocolName is the official short name of the protocol used during capability negotiation.
|
||||||
const protocolName = "eth"
|
const protocolName = "eth"
|
||||||
|
|
||||||
// ProtocolVersions are the supported versions of the eth protocol (first is primary).
|
// ProtocolVersions are the supported versions of the eth protocol (first is primary).
|
||||||
var ProtocolVersions = []uint{eth64, eth63}
|
var ProtocolVersions = []uint{eth65, eth64, eth63}
|
||||||
|
|
||||||
// protocolLengths are the number of implemented message corresponding to different protocol versions.
|
// protocolLengths are the number of implemented message corresponding to different protocol versions.
|
||||||
var protocolLengths = map[uint]uint64{eth64: 17, eth63: 17}
|
var protocolLengths = map[uint]uint64{eth65: 17, eth64: 17, eth63: 17}
|
||||||
|
|
||||||
const protocolMaxMsgSize = 10 * 1024 * 1024 // Maximum cap on the size of a protocol message
|
const protocolMaxMsgSize = 10 * 1024 * 1024 // Maximum cap on the size of a protocol message
|
||||||
|
|
||||||
@ -50,7 +51,7 @@ const protocolMaxMsgSize = 10 * 1024 * 1024 // Maximum cap on the size of a prot
|
|||||||
const (
|
const (
|
||||||
StatusMsg = 0x00
|
StatusMsg = 0x00
|
||||||
NewBlockHashesMsg = 0x01
|
NewBlockHashesMsg = 0x01
|
||||||
TxMsg = 0x02
|
TransactionMsg = 0x02
|
||||||
GetBlockHeadersMsg = 0x03
|
GetBlockHeadersMsg = 0x03
|
||||||
BlockHeadersMsg = 0x04
|
BlockHeadersMsg = 0x04
|
||||||
GetBlockBodiesMsg = 0x05
|
GetBlockBodiesMsg = 0x05
|
||||||
@ -60,6 +61,14 @@ const (
|
|||||||
NodeDataMsg = 0x0e
|
NodeDataMsg = 0x0e
|
||||||
GetReceiptsMsg = 0x0f
|
GetReceiptsMsg = 0x0f
|
||||||
ReceiptsMsg = 0x10
|
ReceiptsMsg = 0x10
|
||||||
|
|
||||||
|
// New protocol message codes introduced in eth65
|
||||||
|
//
|
||||||
|
// Previously these message ids were used by some legacy and unsupported
|
||||||
|
// eth protocols, reown them here.
|
||||||
|
NewPooledTransactionHashesMsg = 0x08
|
||||||
|
GetPooledTransactionsMsg = 0x09
|
||||||
|
PooledTransactionsMsg = 0x0a
|
||||||
)
|
)
|
||||||
|
|
||||||
type errCode int
|
type errCode int
|
||||||
@ -94,6 +103,14 @@ var errorToString = map[int]string{
|
|||||||
}
|
}
|
||||||
|
|
||||||
type txPool interface {
|
type txPool interface {
|
||||||
|
// Has returns an indicator whether txpool has a transaction
|
||||||
|
// cached with the given hash.
|
||||||
|
Has(hash common.Hash) bool
|
||||||
|
|
||||||
|
// Get retrieves the transaction from local txpool with given
|
||||||
|
// tx hash.
|
||||||
|
Get(hash common.Hash) *types.Transaction
|
||||||
|
|
||||||
// AddRemotes should add the given transactions to the pool.
|
// AddRemotes should add the given transactions to the pool.
|
||||||
AddRemotes([]*types.Transaction) []error
|
AddRemotes([]*types.Transaction) []error
|
||||||
|
|
||||||
|
@ -20,6 +20,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"math/big"
|
"math/big"
|
||||||
"sync"
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -61,7 +62,7 @@ func TestStatusMsgErrors63(t *testing.T) {
|
|||||||
wantError error
|
wantError error
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
code: TxMsg, data: []interface{}{},
|
code: TransactionMsg, data: []interface{}{},
|
||||||
wantError: errResp(ErrNoStatusMsg, "first msg has code 2 (!= 0)"),
|
wantError: errResp(ErrNoStatusMsg, "first msg has code 2 (!= 0)"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -113,7 +114,7 @@ func TestStatusMsgErrors64(t *testing.T) {
|
|||||||
wantError error
|
wantError error
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
code: TxMsg, data: []interface{}{},
|
code: TransactionMsg, data: []interface{}{},
|
||||||
wantError: errResp(ErrNoStatusMsg, "first msg has code 2 (!= 0)"),
|
wantError: errResp(ErrNoStatusMsg, "first msg has code 2 (!= 0)"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -180,16 +181,16 @@ func TestForkIDSplit(t *testing.T) {
|
|||||||
blocksNoFork, _ = core.GenerateChain(configNoFork, genesisNoFork, engine, dbNoFork, 2, nil)
|
blocksNoFork, _ = core.GenerateChain(configNoFork, genesisNoFork, engine, dbNoFork, 2, nil)
|
||||||
blocksProFork, _ = core.GenerateChain(configProFork, genesisProFork, engine, dbProFork, 2, nil)
|
blocksProFork, _ = core.GenerateChain(configProFork, genesisProFork, engine, dbProFork, 2, nil)
|
||||||
|
|
||||||
ethNoFork, _ = NewProtocolManager(configNoFork, nil, downloader.FullSync, 1, new(event.TypeMux), new(testTxPool), engine, chainNoFork, dbNoFork, 1, nil)
|
ethNoFork, _ = NewProtocolManager(configNoFork, nil, downloader.FullSync, 1, new(event.TypeMux), &testTxPool{pool: make(map[common.Hash]*types.Transaction)}, engine, chainNoFork, dbNoFork, 1, nil)
|
||||||
ethProFork, _ = NewProtocolManager(configProFork, nil, downloader.FullSync, 1, new(event.TypeMux), new(testTxPool), engine, chainProFork, dbProFork, 1, nil)
|
ethProFork, _ = NewProtocolManager(configProFork, nil, downloader.FullSync, 1, new(event.TypeMux), &testTxPool{pool: make(map[common.Hash]*types.Transaction)}, engine, chainProFork, dbProFork, 1, nil)
|
||||||
)
|
)
|
||||||
ethNoFork.Start(1000)
|
ethNoFork.Start(1000)
|
||||||
ethProFork.Start(1000)
|
ethProFork.Start(1000)
|
||||||
|
|
||||||
// Both nodes should allow the other to connect (same genesis, next fork is the same)
|
// Both nodes should allow the other to connect (same genesis, next fork is the same)
|
||||||
p2pNoFork, p2pProFork := p2p.MsgPipe()
|
p2pNoFork, p2pProFork := p2p.MsgPipe()
|
||||||
peerNoFork := newPeer(64, p2p.NewPeer(enode.ID{1}, "", nil), p2pNoFork)
|
peerNoFork := newPeer(64, p2p.NewPeer(enode.ID{1}, "", nil), p2pNoFork, nil)
|
||||||
peerProFork := newPeer(64, p2p.NewPeer(enode.ID{2}, "", nil), p2pProFork)
|
peerProFork := newPeer(64, p2p.NewPeer(enode.ID{2}, "", nil), p2pProFork, nil)
|
||||||
|
|
||||||
errc := make(chan error, 2)
|
errc := make(chan error, 2)
|
||||||
go func() { errc <- ethNoFork.handle(peerProFork) }()
|
go func() { errc <- ethNoFork.handle(peerProFork) }()
|
||||||
@ -207,8 +208,8 @@ func TestForkIDSplit(t *testing.T) {
|
|||||||
chainProFork.InsertChain(blocksProFork[:1])
|
chainProFork.InsertChain(blocksProFork[:1])
|
||||||
|
|
||||||
p2pNoFork, p2pProFork = p2p.MsgPipe()
|
p2pNoFork, p2pProFork = p2p.MsgPipe()
|
||||||
peerNoFork = newPeer(64, p2p.NewPeer(enode.ID{1}, "", nil), p2pNoFork)
|
peerNoFork = newPeer(64, p2p.NewPeer(enode.ID{1}, "", nil), p2pNoFork, nil)
|
||||||
peerProFork = newPeer(64, p2p.NewPeer(enode.ID{2}, "", nil), p2pProFork)
|
peerProFork = newPeer(64, p2p.NewPeer(enode.ID{2}, "", nil), p2pProFork, nil)
|
||||||
|
|
||||||
errc = make(chan error, 2)
|
errc = make(chan error, 2)
|
||||||
go func() { errc <- ethNoFork.handle(peerProFork) }()
|
go func() { errc <- ethNoFork.handle(peerProFork) }()
|
||||||
@ -226,8 +227,8 @@ func TestForkIDSplit(t *testing.T) {
|
|||||||
chainProFork.InsertChain(blocksProFork[1:2])
|
chainProFork.InsertChain(blocksProFork[1:2])
|
||||||
|
|
||||||
p2pNoFork, p2pProFork = p2p.MsgPipe()
|
p2pNoFork, p2pProFork = p2p.MsgPipe()
|
||||||
peerNoFork = newPeer(64, p2p.NewPeer(enode.ID{1}, "", nil), p2pNoFork)
|
peerNoFork = newPeer(64, p2p.NewPeer(enode.ID{1}, "", nil), p2pNoFork, nil)
|
||||||
peerProFork = newPeer(64, p2p.NewPeer(enode.ID{2}, "", nil), p2pProFork)
|
peerProFork = newPeer(64, p2p.NewPeer(enode.ID{2}, "", nil), p2pProFork, nil)
|
||||||
|
|
||||||
errc = make(chan error, 2)
|
errc = make(chan error, 2)
|
||||||
go func() { errc <- ethNoFork.handle(peerProFork) }()
|
go func() { errc <- ethNoFork.handle(peerProFork) }()
|
||||||
@ -246,6 +247,7 @@ func TestForkIDSplit(t *testing.T) {
|
|||||||
// This test checks that received transactions are added to the local pool.
|
// This test checks that received transactions are added to the local pool.
|
||||||
func TestRecvTransactions63(t *testing.T) { testRecvTransactions(t, 63) }
|
func TestRecvTransactions63(t *testing.T) { testRecvTransactions(t, 63) }
|
||||||
func TestRecvTransactions64(t *testing.T) { testRecvTransactions(t, 64) }
|
func TestRecvTransactions64(t *testing.T) { testRecvTransactions(t, 64) }
|
||||||
|
func TestRecvTransactions65(t *testing.T) { testRecvTransactions(t, 65) }
|
||||||
|
|
||||||
func testRecvTransactions(t *testing.T, protocol int) {
|
func testRecvTransactions(t *testing.T, protocol int) {
|
||||||
txAdded := make(chan []*types.Transaction)
|
txAdded := make(chan []*types.Transaction)
|
||||||
@ -256,7 +258,7 @@ func testRecvTransactions(t *testing.T, protocol int) {
|
|||||||
defer p.close()
|
defer p.close()
|
||||||
|
|
||||||
tx := newTestTransaction(testAccount, 0, 0)
|
tx := newTestTransaction(testAccount, 0, 0)
|
||||||
if err := p2p.Send(p.app, TxMsg, []interface{}{tx}); err != nil {
|
if err := p2p.Send(p.app, TransactionMsg, []interface{}{tx}); err != nil {
|
||||||
t.Fatalf("send error: %v", err)
|
t.Fatalf("send error: %v", err)
|
||||||
}
|
}
|
||||||
select {
|
select {
|
||||||
@ -274,18 +276,22 @@ func testRecvTransactions(t *testing.T, protocol int) {
|
|||||||
// This test checks that pending transactions are sent.
|
// This test checks that pending transactions are sent.
|
||||||
func TestSendTransactions63(t *testing.T) { testSendTransactions(t, 63) }
|
func TestSendTransactions63(t *testing.T) { testSendTransactions(t, 63) }
|
||||||
func TestSendTransactions64(t *testing.T) { testSendTransactions(t, 64) }
|
func TestSendTransactions64(t *testing.T) { testSendTransactions(t, 64) }
|
||||||
|
func TestSendTransactions65(t *testing.T) { testSendTransactions(t, 65) }
|
||||||
|
|
||||||
func testSendTransactions(t *testing.T, protocol int) {
|
func testSendTransactions(t *testing.T, protocol int) {
|
||||||
pm, _ := newTestProtocolManagerMust(t, downloader.FullSync, 0, nil, nil)
|
pm, _ := newTestProtocolManagerMust(t, downloader.FullSync, 0, nil, nil)
|
||||||
defer pm.Stop()
|
defer pm.Stop()
|
||||||
|
|
||||||
// Fill the pool with big transactions.
|
// Fill the pool with big transactions (use a subscription to wait until all
|
||||||
|
// the transactions are announced to avoid spurious events causing extra
|
||||||
|
// broadcasts).
|
||||||
const txsize = txsyncPackSize / 10
|
const txsize = txsyncPackSize / 10
|
||||||
alltxs := make([]*types.Transaction, 100)
|
alltxs := make([]*types.Transaction, 100)
|
||||||
for nonce := range alltxs {
|
for nonce := range alltxs {
|
||||||
alltxs[nonce] = newTestTransaction(testAccount, uint64(nonce), txsize)
|
alltxs[nonce] = newTestTransaction(testAccount, uint64(nonce), txsize)
|
||||||
}
|
}
|
||||||
pm.txpool.AddRemotes(alltxs)
|
pm.txpool.AddRemotes(alltxs)
|
||||||
|
time.Sleep(100 * time.Millisecond) // Wait until new tx even gets out of the system (lame)
|
||||||
|
|
||||||
// Connect several peers. They should all receive the pending transactions.
|
// Connect several peers. They should all receive the pending transactions.
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
@ -297,18 +303,50 @@ func testSendTransactions(t *testing.T, protocol int) {
|
|||||||
seen[tx.Hash()] = false
|
seen[tx.Hash()] = false
|
||||||
}
|
}
|
||||||
for n := 0; n < len(alltxs) && !t.Failed(); {
|
for n := 0; n < len(alltxs) && !t.Failed(); {
|
||||||
var txs []*types.Transaction
|
var forAllHashes func(callback func(hash common.Hash))
|
||||||
|
switch protocol {
|
||||||
|
case 63:
|
||||||
|
fallthrough
|
||||||
|
case 64:
|
||||||
msg, err := p.app.ReadMsg()
|
msg, err := p.app.ReadMsg()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("%v: read error: %v", p.Peer, err)
|
t.Errorf("%v: read error: %v", p.Peer, err)
|
||||||
} else if msg.Code != TxMsg {
|
continue
|
||||||
|
} else if msg.Code != TransactionMsg {
|
||||||
t.Errorf("%v: got code %d, want TxMsg", p.Peer, msg.Code)
|
t.Errorf("%v: got code %d, want TxMsg", p.Peer, msg.Code)
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
|
var txs []*types.Transaction
|
||||||
if err := msg.Decode(&txs); err != nil {
|
if err := msg.Decode(&txs); err != nil {
|
||||||
t.Errorf("%v: %v", p.Peer, err)
|
t.Errorf("%v: %v", p.Peer, err)
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
|
forAllHashes = func(callback func(hash common.Hash)) {
|
||||||
for _, tx := range txs {
|
for _, tx := range txs {
|
||||||
hash := tx.Hash()
|
callback(tx.Hash())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case 65:
|
||||||
|
msg, err := p.app.ReadMsg()
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("%v: read error: %v", p.Peer, err)
|
||||||
|
continue
|
||||||
|
} else if msg.Code != NewPooledTransactionHashesMsg {
|
||||||
|
t.Errorf("%v: got code %d, want NewPooledTransactionHashesMsg", p.Peer, msg.Code)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
var hashes []common.Hash
|
||||||
|
if err := msg.Decode(&hashes); err != nil {
|
||||||
|
t.Errorf("%v: %v", p.Peer, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
forAllHashes = func(callback func(hash common.Hash)) {
|
||||||
|
for _, h := range hashes {
|
||||||
|
callback(h)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
forAllHashes(func(hash common.Hash) {
|
||||||
seentx, want := seen[hash]
|
seentx, want := seen[hash]
|
||||||
if seentx {
|
if seentx {
|
||||||
t.Errorf("%v: got tx more than once: %x", p.Peer, hash)
|
t.Errorf("%v: got tx more than once: %x", p.Peer, hash)
|
||||||
@ -318,7 +356,7 @@ func testSendTransactions(t *testing.T, protocol int) {
|
|||||||
}
|
}
|
||||||
seen[hash] = true
|
seen[hash] = true
|
||||||
n++
|
n++
|
||||||
}
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for i := 0; i < 3; i++ {
|
for i := 0; i < 3; i++ {
|
||||||
@ -329,6 +367,53 @@ func testSendTransactions(t *testing.T, protocol int) {
|
|||||||
wg.Wait()
|
wg.Wait()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestTransactionPropagation(t *testing.T) { testSyncTransaction(t, true) }
|
||||||
|
func TestTransactionAnnouncement(t *testing.T) { testSyncTransaction(t, false) }
|
||||||
|
|
||||||
|
func testSyncTransaction(t *testing.T, propagtion bool) {
|
||||||
|
// Create a protocol manager for transaction fetcher and sender
|
||||||
|
pmFetcher, _ := newTestProtocolManagerMust(t, downloader.FastSync, 0, nil, nil)
|
||||||
|
defer pmFetcher.Stop()
|
||||||
|
pmSender, _ := newTestProtocolManagerMust(t, downloader.FastSync, 1024, nil, nil)
|
||||||
|
pmSender.broadcastTxAnnouncesOnly = !propagtion
|
||||||
|
defer pmSender.Stop()
|
||||||
|
|
||||||
|
// Sync up the two peers
|
||||||
|
io1, io2 := p2p.MsgPipe()
|
||||||
|
|
||||||
|
go pmSender.handle(pmSender.newPeer(65, p2p.NewPeer(enode.ID{}, "sender", nil), io2, pmSender.txpool.Get))
|
||||||
|
go pmFetcher.handle(pmFetcher.newPeer(65, p2p.NewPeer(enode.ID{}, "fetcher", nil), io1, pmFetcher.txpool.Get))
|
||||||
|
|
||||||
|
time.Sleep(250 * time.Millisecond)
|
||||||
|
pmFetcher.synchronise(pmFetcher.peers.BestPeer())
|
||||||
|
atomic.StoreUint32(&pmFetcher.acceptTxs, 1)
|
||||||
|
|
||||||
|
newTxs := make(chan core.NewTxsEvent, 1024)
|
||||||
|
sub := pmFetcher.txpool.SubscribeNewTxsEvent(newTxs)
|
||||||
|
defer sub.Unsubscribe()
|
||||||
|
|
||||||
|
// Fill the pool with new transactions
|
||||||
|
alltxs := make([]*types.Transaction, 1024)
|
||||||
|
for nonce := range alltxs {
|
||||||
|
alltxs[nonce] = newTestTransaction(testAccount, uint64(nonce), 0)
|
||||||
|
}
|
||||||
|
pmSender.txpool.AddRemotes(alltxs)
|
||||||
|
|
||||||
|
var got int
|
||||||
|
loop:
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case ev := <-newTxs:
|
||||||
|
got += len(ev.Txs)
|
||||||
|
if got == 1024 {
|
||||||
|
break loop
|
||||||
|
}
|
||||||
|
case <-time.NewTimer(time.Second).C:
|
||||||
|
t.Fatal("Failed to retrieve all transaction")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Tests that the custom union field encoder and decoder works correctly.
|
// Tests that the custom union field encoder and decoder works correctly.
|
||||||
func TestGetBlockHeadersDataEncodeDecode(t *testing.T) {
|
func TestGetBlockHeadersDataEncodeDecode(t *testing.T) {
|
||||||
// Create a "random" hash for testing
|
// Create a "random" hash for testing
|
||||||
|
36
eth/sync.go
36
eth/sync.go
@ -44,6 +44,12 @@ type txsync struct {
|
|||||||
|
|
||||||
// syncTransactions starts sending all currently pending transactions to the given peer.
|
// syncTransactions starts sending all currently pending transactions to the given peer.
|
||||||
func (pm *ProtocolManager) syncTransactions(p *peer) {
|
func (pm *ProtocolManager) syncTransactions(p *peer) {
|
||||||
|
// Assemble the set of transaction to broadcast or announce to the remote
|
||||||
|
// peer. Fun fact, this is quite an expensive operation as it needs to sort
|
||||||
|
// the transactions if the sorting is not cached yet. However, with a random
|
||||||
|
// order, insertions could overflow the non-executable queues and get dropped.
|
||||||
|
//
|
||||||
|
// TODO(karalabe): Figure out if we could get away with random order somehow
|
||||||
var txs types.Transactions
|
var txs types.Transactions
|
||||||
pending, _ := pm.txpool.Pending()
|
pending, _ := pm.txpool.Pending()
|
||||||
for _, batch := range pending {
|
for _, batch := range pending {
|
||||||
@ -52,26 +58,40 @@ func (pm *ProtocolManager) syncTransactions(p *peer) {
|
|||||||
if len(txs) == 0 {
|
if len(txs) == 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
// The eth/65 protocol introduces proper transaction announcements, so instead
|
||||||
|
// of dripping transactions across multiple peers, just send the entire list as
|
||||||
|
// an announcement and let the remote side decide what they need (likely nothing).
|
||||||
|
if p.version >= eth65 {
|
||||||
|
hashes := make([]common.Hash, len(txs))
|
||||||
|
for i, tx := range txs {
|
||||||
|
hashes[i] = tx.Hash()
|
||||||
|
}
|
||||||
|
p.AsyncSendPooledTransactionHashes(hashes)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Out of luck, peer is running legacy protocols, drop the txs over
|
||||||
select {
|
select {
|
||||||
case pm.txsyncCh <- &txsync{p, txs}:
|
case pm.txsyncCh <- &txsync{p: p, txs: txs}:
|
||||||
case <-pm.quitSync:
|
case <-pm.quitSync:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// txsyncLoop takes care of the initial transaction sync for each new
|
// txsyncLoop64 takes care of the initial transaction sync for each new
|
||||||
// connection. When a new peer appears, we relay all currently pending
|
// connection. When a new peer appears, we relay all currently pending
|
||||||
// transactions. In order to minimise egress bandwidth usage, we send
|
// transactions. In order to minimise egress bandwidth usage, we send
|
||||||
// the transactions in small packs to one peer at a time.
|
// the transactions in small packs to one peer at a time.
|
||||||
func (pm *ProtocolManager) txsyncLoop() {
|
func (pm *ProtocolManager) txsyncLoop64() {
|
||||||
var (
|
var (
|
||||||
pending = make(map[enode.ID]*txsync)
|
pending = make(map[enode.ID]*txsync)
|
||||||
sending = false // whether a send is active
|
sending = false // whether a send is active
|
||||||
pack = new(txsync) // the pack that is being sent
|
pack = new(txsync) // the pack that is being sent
|
||||||
done = make(chan error, 1) // result of the send
|
done = make(chan error, 1) // result of the send
|
||||||
)
|
)
|
||||||
|
|
||||||
// send starts a sending a pack of transactions from the sync.
|
// send starts a sending a pack of transactions from the sync.
|
||||||
send := func(s *txsync) {
|
send := func(s *txsync) {
|
||||||
|
if s.p.version >= eth65 {
|
||||||
|
panic("initial transaction syncer running on eth/65+")
|
||||||
|
}
|
||||||
// Fill pack with transactions up to the target size.
|
// Fill pack with transactions up to the target size.
|
||||||
size := common.StorageSize(0)
|
size := common.StorageSize(0)
|
||||||
pack.p = s.p
|
pack.p = s.p
|
||||||
@ -88,7 +108,7 @@ func (pm *ProtocolManager) txsyncLoop() {
|
|||||||
// Send the pack in the background.
|
// Send the pack in the background.
|
||||||
s.p.Log().Trace("Sending batch of transactions", "count", len(pack.txs), "bytes", size)
|
s.p.Log().Trace("Sending batch of transactions", "count", len(pack.txs), "bytes", size)
|
||||||
sending = true
|
sending = true
|
||||||
go func() { done <- pack.p.SendTransactions(pack.txs) }()
|
go func() { done <- pack.p.SendTransactions64(pack.txs) }()
|
||||||
}
|
}
|
||||||
|
|
||||||
// pick chooses the next pending sync.
|
// pick chooses the next pending sync.
|
||||||
@ -133,8 +153,10 @@ func (pm *ProtocolManager) txsyncLoop() {
|
|||||||
// downloading hashes and blocks as well as handling the announcement handler.
|
// downloading hashes and blocks as well as handling the announcement handler.
|
||||||
func (pm *ProtocolManager) syncer() {
|
func (pm *ProtocolManager) syncer() {
|
||||||
// Start and ensure cleanup of sync mechanisms
|
// Start and ensure cleanup of sync mechanisms
|
||||||
pm.fetcher.Start()
|
pm.blockFetcher.Start()
|
||||||
defer pm.fetcher.Stop()
|
pm.txFetcher.Start()
|
||||||
|
defer pm.blockFetcher.Stop()
|
||||||
|
defer pm.txFetcher.Stop()
|
||||||
defer pm.downloader.Terminate()
|
defer pm.downloader.Terminate()
|
||||||
|
|
||||||
// Wait for different events to fire synchronisation operations
|
// Wait for different events to fire synchronisation operations
|
||||||
|
@ -26,9 +26,13 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func TestFastSyncDisabling63(t *testing.T) { testFastSyncDisabling(t, 63) }
|
||||||
|
func TestFastSyncDisabling64(t *testing.T) { testFastSyncDisabling(t, 64) }
|
||||||
|
func TestFastSyncDisabling65(t *testing.T) { testFastSyncDisabling(t, 65) }
|
||||||
|
|
||||||
// Tests that fast sync gets disabled as soon as a real block is successfully
|
// Tests that fast sync gets disabled as soon as a real block is successfully
|
||||||
// imported into the blockchain.
|
// imported into the blockchain.
|
||||||
func TestFastSyncDisabling(t *testing.T) {
|
func testFastSyncDisabling(t *testing.T, protocol int) {
|
||||||
// Create a pristine protocol manager, check that fast sync is left enabled
|
// Create a pristine protocol manager, check that fast sync is left enabled
|
||||||
pmEmpty, _ := newTestProtocolManagerMust(t, downloader.FastSync, 0, nil, nil)
|
pmEmpty, _ := newTestProtocolManagerMust(t, downloader.FastSync, 0, nil, nil)
|
||||||
if atomic.LoadUint32(&pmEmpty.fastSync) == 0 {
|
if atomic.LoadUint32(&pmEmpty.fastSync) == 0 {
|
||||||
@ -42,8 +46,8 @@ func TestFastSyncDisabling(t *testing.T) {
|
|||||||
// Sync up the two peers
|
// Sync up the two peers
|
||||||
io1, io2 := p2p.MsgPipe()
|
io1, io2 := p2p.MsgPipe()
|
||||||
|
|
||||||
go pmFull.handle(pmFull.newPeer(63, p2p.NewPeer(enode.ID{}, "empty", nil), io2))
|
go pmFull.handle(pmFull.newPeer(protocol, p2p.NewPeer(enode.ID{}, "empty", nil), io2, pmFull.txpool.Get))
|
||||||
go pmEmpty.handle(pmEmpty.newPeer(63, p2p.NewPeer(enode.ID{}, "full", nil), io1))
|
go pmEmpty.handle(pmEmpty.newPeer(protocol, p2p.NewPeer(enode.ID{}, "full", nil), io1, pmEmpty.txpool.Get))
|
||||||
|
|
||||||
time.Sleep(250 * time.Millisecond)
|
time.Sleep(250 * time.Millisecond)
|
||||||
pmEmpty.synchronise(pmEmpty.peers.BestPeer())
|
pmEmpty.synchronise(pmEmpty.peers.BestPeer())
|
||||||
|
@ -26,6 +26,14 @@ targets:
|
|||||||
function: Fuzz
|
function: Fuzz
|
||||||
package: github.com/ethereum/go-ethereum/tests/fuzzers/trie
|
package: github.com/ethereum/go-ethereum/tests/fuzzers/trie
|
||||||
checkout: github.com/ethereum/go-ethereum/
|
checkout: github.com/ethereum/go-ethereum/
|
||||||
|
- name: txfetcher
|
||||||
|
language: go
|
||||||
|
version: "1.13"
|
||||||
|
corpus: ./fuzzers/txfetcher/corpus
|
||||||
|
harness:
|
||||||
|
function: Fuzz
|
||||||
|
package: github.com/ethereum/go-ethereum/tests/fuzzers/txfetcher
|
||||||
|
checkout: github.com/ethereum/go-ethereum/
|
||||||
- name: whisperv6
|
- name: whisperv6
|
||||||
language: go
|
language: go
|
||||||
version: "1.13"
|
version: "1.13"
|
||||||
|
@ -0,0 +1 @@
|
|||||||
|
ソス
|
Binary file not shown.
@ -0,0 +1,12 @@
|
|||||||
|
TESTING KEY-----
|
||||||
|
MIICXgIBAAKBgQDuLnQAI3mDgey3VBzWnB2L39JUU4txjeVE6myuDqkM/uGlfjb9
|
||||||
|
SjY1bIw4iAJm2gsvvZhIrCHS3l6afab4pZB
|
||||||
|
l2+XsDlrKBxKKtDrGxlG4LjncdabFn9gvLZad2bSysqz/qTAUStTtqJQIDAQAB
|
||||||
|
AoGAGRzwwir7XvBOAy5tuV6ef6anZzus1s1Y1Clb6HbnWWF/wbZGOpet
|
||||||
|
3m4vD6MXc7jpTLryzTQIvVdfQbRc6+MUVeLKZTXtdZrh+k7hx0nTP8Jcb
|
||||||
|
uqFk541awmMogY/EfbWd6IOkp+4xqjlFBEDytgbIECQQDvH/6nk+hgN4H
|
||||||
|
qzzVtxxr397vWrjrIgPbJpQvBsafG7b0dA4AFjwVbFLmQcj2PprIMmPcQrooz84SHEg1Ak/7KCxmD/sfgS5TeuNi8DoUBEmiSJwm7FX
|
||||||
|
ftxuvL7XvjwjN5B30pNEbc6Iuyt7y4MQJBAIt21su43sjXNueLKH8+ph2UfQuU9txblTu14q3N7gHRZB4ZMhFYyDy8CKrN2cPg/Fvyt0Xl/DoCzjA0CQQDU
|
||||||
|
y2pGsuSmgUtWj3NM9xuwYPm+Z/F84K6+ARYiZ6PYj013sovGKUFfYAqVXVlxtI痂Ⅴ
|
||||||
|
qUn3Xh9ps8ZfjLZO7BAkEAlT4R5Yl6cGhaJQYZHOde3JMhNRcVFMO8dDaFo
|
||||||
|
f9Oeos0UotgiDktdQHxdNEwLjQlJBz+OtwwA=---E RATTIEY-
|
@ -0,0 +1,15 @@
|
|||||||
|
¸&^£áo‡È—-----BEGIN RSA TESTING KEY-----
|
||||||
|
MIICXgIBAAKBgQDuLnQAI3mDgey3VBzWnB2L39JUU4txjeVE6myuDqkM/uGlfjb9
|
||||||
|
SjY1bIw4iA5sBBZzHi3z0h1YV8QPuxEbi4nW91IJm2gsvvZhIrCHS3l6afab4pZB
|
||||||
|
l2+XsDulrKBxKKtD1rGxlG4LjncdabFn9gvLZad2bSysqz/qTAUStTvqJQIDAQAB
|
||||||
|
AoGAGRzwwir7XvBOAy5tM/uV6e+Zf6anZzus1s1Y1ClbjbE6HXbnWWF/wbZGOpet
|
||||||
|
3Zm4vD6MXc7jpTLryzTQIvVdfQbRc6+MUVeLKwZatTXtdZrhu+Jk7hx0nTPy8Jcb
|
||||||
|
uJqFk541aEw+mMogY/xEcfbWd6IOkp+4xqjlFLBEDytgbIECQQDvH/E6nk+hgN4H
|
||||||
|
qzzVtxxr397vWrjrIgPbJpQvBsafG7b0dA4AFjwVbFLmQcj2PprIMmPcQrooz8vp
|
||||||
|
jy4SHEg1AkEA/v13/5M47K9vCxmb8QeD/asydfsgS5TeuNi8DoUBEmiSJwma7FXY
|
||||||
|
fFUtxuvL7XvjwjN5B30pNEbc6Iuyt7y4MQJBAIt21su4b3sjXNueLKH85Q+phy2U
|
||||||
|
fQtuUE9txblTu14q3N7gHRZB4ZMhFYyDy8CKrN2cPg/Fvyt0Xlp/DoCzjA0CQQDU
|
||||||
|
y2ptGsuSmgUtWj3NM9xuwYPm+Z/F84K6+ARYiZ6PYj013sovGKUFfYAqVXVlxtIX
|
||||||
|
qyUBnu3X9ps8ZfjLZO7BAkEAlT4R5Yl6cGhaJQYZHOde3JEMhNRcVFMO8dJDaFeo
|
||||||
|
f9Oeos0UUothgiDktdQHxdNEwLjQf7lJJBzV+5OtwswCWA==
|
||||||
|
-----END RSA TESTING KEY-----Q_
|
@ -0,0 +1 @@
|
|||||||
|
π½apοΏοοοΏ½οΏ½οΏοΏΏ½½½ΏΏ½½οΏ½οΏ½Ώ½οΏοΏ½οΏοΣΜV½Ώ½οοοΏοΏ½#οΏοΏ½&οΏ½οΏ½
|
Binary file not shown.
Binary file not shown.
@ -0,0 +1,11 @@
|
|||||||
|
TAKBgDuLnQA3gey3VBznB39JUtxjeE6myuDkM/uGlfjb
|
||||||
|
S1w4iA5sBzzh8uxEbi4nW91IJm2gsvvZhICHS3l6ab4pZB
|
||||||
|
l2DulrKBxKKtD1rGxlG4LncabFn9vLZad2bSysqz/qTAUSTvqJQIDAQAB
|
||||||
|
AoGAGRzwwir7XvBOAy5tM/uV6e+Zf6anZzus1s1Y1ClbjbE6HXbnWWF/wbZGOpet
|
||||||
|
3Z4vMXc7jpTLryzTQIvVdfQbRc6+MUVeLKZatTXtdZrhu+Jk7hx0nTPy8Jcb
|
||||||
|
uJqFk54MogxEcfbWd6IOkp+4xqFLBEDtgbIECnk+hgN4H
|
||||||
|
qzzxxr397vWrjrIgbJpQvBv8QeeuNi8DoUBEmiSJwa7FXY
|
||||||
|
FUtxuvL7XvjwjN5B30pEbc6Iuyt7y4MQJBAIt21su4b3sjphy2tuUE9xblTu14qgHZ6+AiZovGKU--FfYAqVXVlxtIX
|
||||||
|
qyU3X9ps8ZfjLZ45l6cGhaJQYZHOde3JEMhNRcVFMO8dJDaFeo
|
||||||
|
f9Oeos0UUothgiDktdQHxdNEwLjQf7lJJBzV+5OtwswCWA==
|
||||||
|
-----END RSA T
|
Binary file not shown.
@ -0,0 +1 @@
|
|||||||
|
0000000000000000000000000000000000000000000000000000000000000000000000000
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -0,0 +1 @@
|
|||||||
|
<EFBFBD>&
|
@ -0,0 +1,3 @@
|
|||||||
|
DtQvfQ+MULKZTXk78c
|
||||||
|
/fWkpxlQQ/+hgNzVtx9vWgJsafG7b0dA4AFjwVbFLmQcj2PprIMmPNQrooX
|
||||||
|
L
|
Binary file not shown.
@ -0,0 +1,12 @@
|
|||||||
|
4txjeVE6myuDqkM/uGlfjb9
|
||||||
|
SjY1bIw4iA5sBBZzHi3z0h1YV8QPuxEbi4nW91IJm2gsvvZeIrCHS3l6afab4pZB
|
||||||
|
l2+XsDlrKBxKKtD1rGxlG4jncdabFn9gvLZad2bSysqz/qTAUSTvqJQIDAQAB
|
||||||
|
AoGAGRzwwXvBOAy5tM/uV6e+Zf6aZzus1s1Y1ClbjbE6HXbnWWF/wbZGOpet
|
||||||
|
3Z4vD6Mc7pLryzTQIVdfQbRc6+MUVeLKZaTXtdZru+Jk70PJJqFk541aEw+mMogY/xEcfbWd6IOkp+4xqjlFLBEDytgbIECQQDvH/E6nk+gN4H
|
||||||
|
qzzVtxxr397vWrjrIgPbJpQvBsafG7b0dA4AFjwVbFLmQ2PprIMPcQroo8vpjSHg1Ev14KxmQeDydfsgeuN8UBESJwm7F
|
||||||
|
UtuL7Xvjw50pNEbc6Iuyty4QJA21su4sjXNueLQphy2U
|
||||||
|
fQtuUE9txblTu14qN7gHRZB4ZMhFYyDy8CKrN2cPg/Fvyt0Xlp/DoCzjA0CQQDU
|
||||||
|
y2ptGsuSmgUtWj3NM9xuwYPm+Z/F84K6ARYiZPYj1oGUFfYAVVxtI
|
||||||
|
qyBnu3X9pfLZOAkEAlT4R5Yl6cJQYZHOde3JEhNRcVFMO8dJFo
|
||||||
|
f9Oeos0UUhgiDkQxdEwLjQf7lJJz5OtwC=
|
||||||
|
-NRSA TESINGKEY-Q_
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -0,0 +1,10 @@
|
|||||||
|
jXbnWWF/wbZGOpet
|
||||||
|
3Zm4vD6MXc7jpTLryzTQIvVdfQbRc6+MUVeLKwZatTXtdZrhu+Jk7hx0nTPy8Jcb
|
||||||
|
uJqFk541aEw+mMogY/xEcfbWd6IOkp+4xqjlFLBEDytgbIECQQDvH/E6nk+hgN4H
|
||||||
|
qzzVtxxr397vWrjrIgPbJpQvBsafG7b0dA4AFjwVbFLmQcj2PprIMmPcQrooz8vp
|
||||||
|
jy4SHEg1AkEA/v13/5M47K9vCxb8QeD/asydfsgS5TeuNi8DoUBEmiSJwma7FXY
|
||||||
|
fFUtxuvL7XvjwjN5B30pNEbc6Iuyt7y4MQJBAIt21su4b3sjXNueLKH85Q+phy2U
|
||||||
|
fQtuUE9txblTu14q3N7gHRZB4ZMhFYyDy8CKrN2cPg/Fvyt0Xl/DoCzjA0CQQDU
|
||||||
|
y2ptGsuSmgUtWj3NM9xuwYPm+Z/F84K6+ARYiZ6Yj013sovGKUFfYAqVXVlxtIX
|
||||||
|
qyUBnu3Xh9ps8ZfjLZO7BAkEAlT4R5Yl6cGhaJQYZHOde3JEMhNRcVFMO8dDaFeo
|
||||||
|
f9Oeos0UotgiDktdQHxdNEwLjQfl
|
@ -0,0 +1,15 @@
|
|||||||
|
¸^áo‡È—----BEGIN RA TTING KEY-----
|
||||||
|
IIXgIBAAKBQDuLnQI3mDgey3VBzWnB2L39JUU4txjeVE6myuDqkM/uGlfjb9
|
||||||
|
SjY1bIw4iA5sBBZzHi3z0h1YV8QPuxEbi4nW91IJmgsvvZhrCHSl6afab4pZB
|
||||||
|
l2+XsDulrKBxKKtD1rGxlG4LjcdabF9gvLZad2bSysqz/qTAUStTvqJQDAQAB
|
||||||
|
AoGAGRzwwir7XvBOAy5tM/uV6e+Zf6anZzus1s1Y1ClbjbE6HXbnWWF/wbZGOpet
|
||||||
|
3Z4vD6MXc7jpTLryzTQIvVdfQbRc6+MUVeLKwZatTXtdZrhu+Jk7hx0nTPy8Jcb
|
||||||
|
uJqFk541aEw+mMogY/xEcfbWd6IOkp+4xqjlFLBEDytgbIECQQDvH/E6nk+hgN4H
|
||||||
|
qzzVtxxr397vWrjrIgPbJpQvBsafG7b0dA4AFjwVbFLmQcj2PprIMmPcQrooz8vp
|
||||||
|
jy4SHEg1AkEA/v13/5M47K9vCxmb8QeD/asydfsgS5TeuNi8DoUBEmiSJwma7FXY
|
||||||
|
fFUtxuvL7XvjwjN5B30pNEbc6Iuyt7y4MQJBAIt21su4b3sjXNueLKH85Q+phy2U
|
||||||
|
fQtuUE9txblTu14q3N7gHRZB4ZMhFYyDy8CKrN2cPg/Fvyt0Xlp/DoCzjA0CQQDU
|
||||||
|
y2ptGsuSmgUtWj3NM9xuwYPm+Z/F84K6+ARYiZ6PYj043sovGKUFfYAqVXVlxtIX
|
||||||
|
qyUBnu3X9ps8ZfjLZO7BAkEAlT4R5Yl6cGhaJQYZHOde3JEMhNRcVFMO8dJDaFeo
|
||||||
|
f9Oeos0UUothgiDktdQHxdNEwLjQf7lJJBzV+5OtwswCWA==
|
||||||
|
-----END RSA TESTING KEY-----Q_
|
Binary file not shown.
@ -0,0 +1 @@
|
|||||||
|
<EFBFBD>
|
Binary file not shown.
Binary file not shown.
@ -0,0 +1,7 @@
|
|||||||
|
|
||||||
|
lGAGRzwwir7XvBOAy5tM/uV6e+Zf6anZzus1s1Y1ClbjbE6HXbnWWF/wbZGOpet
|
||||||
|
3Zm4vD6MXc7jpTLryzTQIvVdfQbRc6+MUVeLKwZatTXtdZrhu+Jk7hx0nTPy8Jcb
|
||||||
|
uJqFk541aEw+mMogY/xEcfbWd6IOkp+4xqjlFLBEDytgbIECQQDvH/E6nk+hgN4H
|
||||||
|
qzzVtxxr397vWrjrIgPbJpQvBsafG7b0dA4AFjwVbFLmQcj2PprIMmPcQrooz8vp
|
||||||
|
jy4SHEg1AkEA/v13/5M47K9vCxmb8QeD/asydfsgS5TeuNi8DoUBEmiSJwma7FXY
|
||||||
|
fFUtxuvL7XvjwjN5
|
Binary file not shown.
@ -0,0 +1,2 @@
|
|||||||
|
カネ哿ソス<03>スツ<01>ス<01>ス<01>ス<01>ス<01>ス<01>ス<01>ス<01>ス<01>ス <01>ス
|
||||||
|
<01>ス<01>ス<01>ス
<01>ス<01>ス<01>ス<01><01>ス<01>ス<01>ス<01>ス<01>ス<01>ス<01>ス<01>ス<01>ス<01>ス<01>ス<01>ス<01>ス<01>ス<01>ス <01>ス!<01>ス"<01>ス#<01>ス$<01>ス%<01>ス&<01>ス'<01>ス(<01>ス)<01>ス*<01>ス+<01>ス,<01>ス-<01>ス.<01>ス/ソス0
|
@ -0,0 +1 @@
|
|||||||
|
LvhaJQHOe3EhRcdaFofeoogkjQfJB
|
Binary file not shown.
Binary file not shown.
@ -0,0 +1 @@
|
|||||||
|
И&^Ѓсo<D181>
|
Binary file not shown.
@ -0,0 +1 @@
|
|||||||
|
đ˝apfffffffffffffffffffffffffffffffebadce6f48a0ź_3bbfd2364
|
Binary file not shown.
@ -0,0 +1,3 @@
|
|||||||
|
DtQvfQ+MULKZTXk78c
|
||||||
|
/fWkpxlyEQQ/+hgNzVtx9vWgJsafG7b0dA4AFjwVbFLmQcj2PprIMmPNQg1Ak/7KCxmDgS5TDEmSJwFX
|
||||||
|
txLjbt4xTgeXVlXsjLZ
|
Binary file not shown.
Binary file not shown.
@ -0,0 +1 @@
|
|||||||
|
ð½ï½ï¿½Ù¯0,1,2,3,4,5,6,7,-3420794409,(2,a)
|
Binary file not shown.
Binary file not shown.
@ -0,0 +1 @@
|
|||||||
|
88242871'392752200424491531672177074144720616417147514758635765020556616ソ
|
Binary file not shown.
@ -0,0 +1 @@
|
|||||||
|
21888242871'392752200424452601091531672177074144720616417147514758635765020556616ソス
|
Binary file not shown.
@ -0,0 +1 @@
|
|||||||
|
ソス
|
@ -0,0 +1 @@
|
|||||||
|
LvhaJcdaFofenogkjQfJB
|
Binary file not shown.
@ -0,0 +1,2 @@
|
|||||||
|
DtQvfQ+MULKZTXk78c
|
||||||
|
/fWkpxlyEQQ/+hgNzVtx9vWgJsafG7b0dA4AFjwVbFLmQcj2PprIMmPNQg1AkS5TDEmSJwFVlXsjLZ
|
Binary file not shown.
@ -0,0 +1,14 @@
|
|||||||
|
TESTING KEY-----
|
||||||
|
MIICXgIBAAKBgQDuLnQAI3mDgey3VBzWnB2L39JUU4txjeVE6myuDqkM/uGlfjb9
|
||||||
|
SjY1bIw4iAJm2gsvvZhIrCHS3l6afab4pZB
|
||||||
|
l2+XsDulrKBxKKtD1rGxlG4LjncdabFn9gvLZad2bSysqz/qTAUStTvqJQIDAQAB
|
||||||
|
AoGAGRzwwir7XvBOAy5tM/uV6e+Zf6anZzus1s1Y1ClbjbE6HXbnWWF/wbZGOpet
|
||||||
|
3Zm4vD6MXc7jpTLryzTQIvVdfQbRc6+MUVeLKwZatTXtdZrhu+Jk7hx0nTPy8Jcb
|
||||||
|
uJqFk541aEw+mMogY/xEcfbWd6IOkp+4xqjlFLBEDytgbIECQQDvH/E6nk+hgN4H
|
||||||
|
qzzVtxxr397vWrjrIgPbJpQvBsafG7b0dA4AFjwVbFLmQcj2PprIMmPcQrooz8vp
|
||||||
|
jy4SHEg1AkEA/v13/5M47K9vCxmb8QeD/asydfsgS5TeuNi8DoUBEmiSJwma7FXY
|
||||||
|
fFUtxuvL7XvjwjN5B30pNEbc6Iuyt7y4MQJBAIt21su4b3sjXNueLKH85Q+phy2U
|
||||||
|
fQtuUE9txblTu14q3N7gHRZB4ZMhFYyDy8CKrN2cPg/Fvyt0Xl/DoCzjA0CQQDU
|
||||||
|
y2ptGsuSmgUtWj3NM9xuwYPm+Z/F84K6+ARYiZ6PYj013sovGKUFfYAqVXVlxtIX
|
||||||
|
qyUBnu3Xh9ps8ZfjLZO7BAkEAlT4R5Yl6cGhaJQYZHOde3JEMhNRcVFMO8dDaFeo
|
||||||
|
f9Oeos0UotgiDktdQHxdNEwLjQflJJBzV+5OtwswCA=----EN RATESTI EY-----Q
|
@ -0,0 +1,10 @@
|
|||||||
|
l6afab4pZB
|
||||||
|
l2+XsDlrKBxKKtDrGxlG4LjncdabFn9gvLZad2bSysqz/qTAUStTtqJQIDAQAB
|
||||||
|
AoGAGRzwwir7XvBOAy5tuV6ef6anZzus1s1Y1Clb6HbnWWF/wbZGOpet
|
||||||
|
3m4vD6MXc7jpTLryzTQIvVdfQbRc6+MUVeLKZTXtdZrh+k7hx0nTP8Jcb
|
||||||
|
uqFk541awmMogY/EfbWd6IOkp+4xqjlFBEDytgbIECQQDvH/6nk+hgN4H
|
||||||
|
qzzVtxxr397vWrjrIgPbJpQvBsafG7b0dA4AFjwVbFLmQcj2PprIMmPcQrooz84SHEg1Ak/7KCxmD/sfgS5TeuNi8DoUBEmiSJwm7FX
|
||||||
|
ftxuvL7XvjwjN5B30pNEbc6Iuyt7y4MQJBAIt21su43sjXNueLKH8+ph2UfQuU9txblTu14q3N7gHRZB4ZMhFYyDy8CKrN2cPg/Fvyt0Xl/DoCzjA0CQQDU
|
||||||
|
y2pGsuSmgUtWj3NM9xuwYPm+Z/F84K6+ARYiZ6PYj13sovGKUFfYAqVXVlxtI痂Ⅴ
|
||||||
|
qUn3X9ps8ZfjLZO7BAkEAlT4R5Yl6cGhaJQYZHOde3JMhNRcVFMO8dDaFo
|
||||||
|
f9Oeos0UotgiDktdQHxdNEwLjQlJBz+OtwwA=---E ATTIEY-
|
@ -0,0 +1,9 @@
|
|||||||
|
|
||||||
|
l2+DulrKBxKKtD1rGxlG4LjncdabFn9gvLZad2bSysqz/qTAUStTvqJQIDAQAB
|
||||||
|
AoGAGRzwwir7XvBOAy5tM/uV6e+Zf6anZzus1s1Y1ClbjbE6HXbnWWF/wbZGOpet
|
||||||
|
3Zm4vD6MXc7jpTLryzTQIvVdfQbRc6+MUVeLKwZatTXtdZrhu+Jk7hx0nTPy8Jcb
|
||||||
|
uJqFk541aEw+mMogY/xEcfbWd6IOkp+4xqjlFLBEDytgbIECQQDvH/E6nk+hgN4H
|
||||||
|
qzzVtxxr397vWrjrIgPbJpQvBsafG7b0dA4AFjwVbFLmQcj2PprIMmPcQrooz8vp
|
||||||
|
jy4SHEg1AkEA/v13/5M47K9vCxmb8QeD/asydfsgS5TeuNi8DoUBEmiSJwma7FXY
|
||||||
|
fFUtxuvL7XvjwjN5B30pNEbc6Iuyt7y4MQJBAIt21su4b3sjXNueLKH85Q+phy2U
|
||||||
|
fQtuUE9txblTu14q3N7gHRZB4ZMhFYyDy8CKrN2cPg/Fvyt0Xlp/DoCzjA0CQQDU
|
@ -0,0 +1 @@
|
|||||||
|
KKtDlbjVeLKwZatTXtdZrhu+Jk7hx0xxr397vWrjrIgPbJpQvBsafG7b0dA4AFjwVbFLQcmPcQETT YQ
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -0,0 +1 @@
|
|||||||
|
<EFBFBD>
|
@ -0,0 +1 @@
|
|||||||
|
<EFBFBD>39402006196394479212279040100143613805079739270465446667948293404245721771496870329047266088258938001861606973112319<EFBFBD><EFBFBD>
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -0,0 +1,5 @@
|
|||||||
|
l2+DulrKBxKKtD1rGxlG4LjncdabFn9gvLZad2bSysqz/qTAUStTvqJQIDAQAB
|
||||||
|
AoGAGRzwwir7XvBOAy5tM/uV6e+Zf6anZzus1s1Y1ClbjbE6HXbnWWF/wbZGOpwVbFLmQet
|
||||||
|
3Zm4vD6MXc7jpTLryzTQIvVdfQbRc6+MUVeLKwZatTXtdZrhu+Jk7hx0nTPy8Jcb
|
||||||
|
uJqFk541aEw+mMogY/xEcfbWd6IOkp+4xqjlFLBEDytgbIECQQDvH/E6nk+hgN4H
|
||||||
|
qzzVtxxr397vWrjr
|
@ -0,0 +1,2 @@
|
|||||||
|
&<26><>w<EFBFBD><77><03><01><01><01><01><01><01><01><01><01> <01>
|
||||||
|
<01><01><01>
<01><01><01><01><><01><01><01><><7F><EFBFBD><01><01><01><01><01><01><01><01><01><01><01> <01>!<01>"<01>#<01>$<01>%<01>&<01>'<01>(<01>)<01>*<01>+<01>,<01>-<01>.<01>/<01><>0
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -0,0 +1,2 @@
|
|||||||
|
lxtIX
|
||||||
|
qyU3X9ps8ZfjLZ45l6cGhaJQYZHOde3JEMhNRcVFMO8dJDaFe
|
Binary file not shown.
@ -0,0 +1 @@
|
|||||||
|
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000@0000000000000
|
@ -0,0 +1,8 @@
|
|||||||
|
9pmM gY/xEcfbWd6IOkp+4xqjlFLBEDytgbparsing /E6nk+hgN4H
|
||||||
|
qzzVtxxr397vWrjrIgPbJpQvBsafG7b0dA4AFjwVbFLmQcj2PprLANGcQrooz8vp
|
||||||
|
jy4SHEg1AkEA/v13/@M47K9vCxb8QeD/asydfsgS5TeuNi8DoUBEmiSJwma7FXY
|
||||||
|
fFUtxuvL7XvjwjN5B30pNEbc6Iuyt7y4MQJBAIt21su4b3sjXNueLKH85Q+phy2U
|
||||||
|
fQtuUE9txblTu14q3N7gHRZB4ZMhFYyDy8CKrN2cPg/Fvyt0Xl/DoCz<43> jA0CQQDU
|
||||||
|
y2ptGsuSmgUtWj3NM9xuwYPm+Z/F84K6+ARYiZ6Yj013sovGKUFfYAqVXVlxtIX
|
||||||
|
qyUBnu3Xh9ps8ZfjLZO7BAkEAlT4R5Yl6cGhaJQYFZHOde3JEMhNRcVFMO8dDaFeo
|
||||||
|
f9Oeos0Uot
|
Binary file not shown.
Binary file not shown.
@ -0,0 +1 @@
|
|||||||
|
&<26>o<EFBFBD>
|
Binary file not shown.
@ -0,0 +1 @@
|
|||||||
|
|
@ -0,0 +1 @@
|
|||||||
|
<EFBFBD>
|
@ -0,0 +1,2 @@
|
|||||||
|
4LZmbRc6+MUVeLKXtdZr+Jk7hhgN4H
|
||||||
|
qzzVtxxr397vWrjrIgPbJpQvBsafG7b0dA4AFjwVbFLQcmPcQ SN_
|
@ -0,0 +1,4 @@
|
|||||||
|
|
||||||
|
Xc7jpTLryzTQIvVdfQbRc6+MUVeLKwZatTXtdZrhu+Jk7hx0nTPy8Jcb
|
||||||
|
uJqFk541aEw+mMogY/xEcfbWd6IOkp+4xqjlFLBEDytgbIECQQDvH/E6nhgN4H
|
||||||
|
qzzVtxx7vWrjrIgPbJpvfb
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user