2015-07-07 03:54:22 +03:00
// Copyright 2015 The go-ethereum Authors
2015-07-22 19:48:40 +03:00
// This file is part of the go-ethereum library.
2015-07-07 03:54:22 +03:00
//
2015-07-23 19:35:11 +03:00
// The go-ethereum library is free software: you can redistribute it and/or modify
2015-07-07 03:54:22 +03:00
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
2015-07-22 19:48:40 +03:00
// The go-ethereum library is distributed in the hope that it will be useful,
2015-07-07 03:54:22 +03:00
// but WITHOUT ANY WARRANTY; without even the implied warranty of
2015-07-22 19:48:40 +03:00
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2015-07-07 03:54:22 +03:00
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
2015-07-22 19:48:40 +03:00
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
2015-07-07 03:54:22 +03:00
2015-06-16 11:58:32 +03:00
// Package fetcher contains the block announcement based synchonisation.
package fetcher
import (
"errors"
2015-06-19 16:46:16 +03:00
"fmt"
2015-06-16 11:58:32 +03:00
"math/rand"
"time"
"github.com/ethereum/go-ethereum/common"
2015-06-29 16:11:01 +03:00
"github.com/ethereum/go-ethereum/core"
2015-06-16 11:58:32 +03:00
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger/glog"
2015-06-16 17:39:04 +03:00
"gopkg.in/karalabe/cookiejar.v2/collections/prque"
2015-06-16 11:58:32 +03:00
)
const (
arriveTimeout = 500 * time . Millisecond // Time allowance before an announced block is explicitly requested
2015-06-19 16:46:16 +03:00
gatherSlack = 100 * time . Millisecond // Interval used to collate almost-expired announces with fetches
2015-06-16 11:58:32 +03:00
fetchTimeout = 5 * time . Second // Maximum alloted time to return an explicitly requested block
2015-06-18 19:43:47 +03:00
maxUncleDist = 7 // Maximum allowed backward distance from the chain head
2015-06-19 16:46:16 +03:00
maxQueueDist = 32 // Maximum allowed distance from the chain head to queue
2015-06-22 16:49:47 +03:00
hashLimit = 256 // Maximum number of unique blocks a peer may have announced
blockLimit = 64 // Maximum number of unique blocks a per may have delivered
2015-06-16 11:58:32 +03:00
)
var (
errTerminated = errors . New ( "terminated" )
)
2015-06-18 18:00:19 +03:00
// blockRetrievalFn is a callback type for retrieving a block from the local chain.
type blockRetrievalFn func ( common . Hash ) * types . Block
2015-06-16 11:58:32 +03:00
// blockRequesterFn is a callback type for sending a block retrieval request.
type blockRequesterFn func ( [ ] common . Hash ) error
2015-08-14 21:25:41 +03:00
// headerRequesterFn is a callback type for sending a header retrieval request.
type headerRequesterFn func ( common . Hash ) error
// bodyRequesterFn is a callback type for sending a body retrieval request.
type bodyRequesterFn func ( [ ] common . Hash ) error
2015-06-18 18:00:19 +03:00
// blockValidatorFn is a callback type to verify a block's header for fast propagation.
type blockValidatorFn func ( block * types . Block , parent * types . Block ) error
2015-06-17 18:25:23 +03:00
// blockBroadcasterFn is a callback type for broadcasting a block to connected peers.
2015-06-18 18:00:19 +03:00
type blockBroadcasterFn func ( block * types . Block , propagate bool )
2015-06-16 11:58:32 +03:00
2015-06-16 17:39:04 +03:00
// chainHeightFn is a callback type to retrieve the current chain height.
type chainHeightFn func ( ) uint64
2015-06-17 18:25:23 +03:00
// chainInsertFn is a callback type to insert a batch of blocks into the local chain.
type chainInsertFn func ( types . Blocks ) ( int , error )
// peerDropFn is a callback type for dropping a peer detected as malicious.
type peerDropFn func ( id string )
2015-06-16 11:58:32 +03:00
// announce is the hash notification of the availability of a new block in the
// network.
type announce struct {
2015-08-14 21:25:41 +03:00
hash common . Hash // Hash of the block being announced
number uint64 // Number of the block being announced (0 = unknown | old protocol)
header * types . Header // Header of the block partially reassembled (new protocol)
time time . Time // Timestamp of the announcement
origin string // Identifier of the peer originating the notification
fetch61 blockRequesterFn // [eth/61] Fetcher function to retrieve an announced block
fetchHeader headerRequesterFn // [eth/62] Fetcher function to retrieve the header of an announced block
fetchBodies bodyRequesterFn // [eth/62] Fetcher function to retrieve the body of an announced block
}
2015-06-16 11:58:32 +03:00
2015-08-14 21:25:41 +03:00
// headerFilterTask represents a batch of headers needing fetcher filtering.
type headerFilterTask struct {
headers [ ] * types . Header // Collection of headers to filter
time time . Time // Arrival time of the headers
}
// headerFilterTask represents a batch of block bodies (transactions and uncles)
// needing fetcher filtering.
type bodyFilterTask struct {
transactions [ ] [ ] * types . Transaction // Collection of transactions per block bodies
uncles [ ] [ ] * types . Header // Collection of uncles per block bodies
time time . Time // Arrival time of the blocks' contents
2015-06-16 11:58:32 +03:00
}
2015-06-16 17:39:04 +03:00
// inject represents a schedules import operation.
type inject struct {
origin string
block * types . Block
}
2015-06-16 11:58:32 +03:00
// Fetcher is responsible for accumulating block announcements from various peers
// and scheduling them for retrieval.
type Fetcher struct {
// Various event channels
notify chan * announce
2015-06-17 16:53:28 +03:00
inject chan * inject
2015-08-14 21:25:41 +03:00
blockFilter chan chan [ ] * types . Block
headerFilter chan chan * headerFilterTask
bodyFilter chan chan * bodyFilterTask
done chan common . Hash
quit chan struct { }
2015-06-16 11:58:32 +03:00
2015-06-17 00:19:09 +03:00
// Announce states
2015-08-14 21:25:41 +03:00
announces map [ string ] int // Per peer announce counts to prevent memory exhaustion
announced map [ common . Hash ] [ ] * announce // Announced blocks, scheduled for fetching
fetching map [ common . Hash ] * announce // Announced blocks, currently fetching
fetched map [ common . Hash ] [ ] * announce // Blocks with headers fetched, scheduled for body retrieval
completing map [ common . Hash ] * announce // Blocks with headers, currently body-completing
2015-06-17 00:19:09 +03:00
2015-06-16 23:18:01 +03:00
// Block cache
2015-06-22 16:49:47 +03:00
queue * prque . Prque // Queue containing the import operations (block number sorted)
queues map [ string ] int // Per peer block counts to prevent memory exhaustion
queued map [ common . Hash ] * inject // Set of already queued blocks (to dedup imports)
2015-06-16 23:18:01 +03:00
2015-06-16 11:58:32 +03:00
// Callbacks
2015-06-18 18:00:19 +03:00
getBlock blockRetrievalFn // Retrieves a block from the local chain
validateBlock blockValidatorFn // Checks if a block's headers have a valid proof of work
2015-06-17 18:25:23 +03:00
broadcastBlock blockBroadcasterFn // Broadcasts a block to connected peers
chainHeight chainHeightFn // Retrieves the current chain's height
insertChain chainInsertFn // Injects a batch of blocks into the chain
dropPeer peerDropFn // Drops a peer for misbehaving
2015-06-22 18:08:28 +03:00
// Testing hooks
2015-08-14 21:25:41 +03:00
fetchingHook func ( [ ] common . Hash ) // Method to call upon starting a block (eth/61) or header (eth/62) fetch
completingHook func ( [ ] common . Hash ) // Method to call upon starting a block body fetch (eth/62)
importedHook func ( * types . Block ) // Method to call upon successful block import (both eth/61 and eth/62)
2015-06-16 11:58:32 +03:00
}
// New creates a block fetcher to retrieve blocks based on hash announcements.
2015-06-18 18:00:19 +03:00
func New ( getBlock blockRetrievalFn , validateBlock blockValidatorFn , broadcastBlock blockBroadcasterFn , chainHeight chainHeightFn , insertChain chainInsertFn , dropPeer peerDropFn ) * Fetcher {
2015-06-16 11:58:32 +03:00
return & Fetcher {
2015-06-17 18:25:23 +03:00
notify : make ( chan * announce ) ,
inject : make ( chan * inject ) ,
2015-08-14 21:25:41 +03:00
blockFilter : make ( chan chan [ ] * types . Block ) ,
headerFilter : make ( chan chan * headerFilterTask ) ,
bodyFilter : make ( chan chan * bodyFilterTask ) ,
2015-06-17 18:25:23 +03:00
done : make ( chan common . Hash ) ,
quit : make ( chan struct { } ) ,
2015-06-22 14:07:08 +03:00
announces : make ( map [ string ] int ) ,
2015-06-17 18:25:23 +03:00
announced : make ( map [ common . Hash ] [ ] * announce ) ,
fetching : make ( map [ common . Hash ] * announce ) ,
2015-08-14 21:25:41 +03:00
fetched : make ( map [ common . Hash ] [ ] * announce ) ,
completing : make ( map [ common . Hash ] * announce ) ,
2015-06-17 18:25:23 +03:00
queue : prque . New ( ) ,
2015-06-22 16:49:47 +03:00
queues : make ( map [ string ] int ) ,
queued : make ( map [ common . Hash ] * inject ) ,
2015-06-18 18:00:19 +03:00
getBlock : getBlock ,
validateBlock : validateBlock ,
2015-06-17 18:25:23 +03:00
broadcastBlock : broadcastBlock ,
chainHeight : chainHeight ,
insertChain : insertChain ,
dropPeer : dropPeer ,
2015-06-16 11:58:32 +03:00
}
}
// Start boots up the announcement based synchoniser, accepting and processing
// hash notifications and block fetches until termination requested.
func ( f * Fetcher ) Start ( ) {
go f . loop ( )
}
// Stop terminates the announcement based synchroniser, canceling all pending
// operations.
func ( f * Fetcher ) Stop ( ) {
close ( f . quit )
}
// Notify announces the fetcher of the potential availability of a new block in
// the network.
2015-08-14 21:25:41 +03:00
func ( f * Fetcher ) Notify ( peer string , hash common . Hash , number uint64 , time time . Time ,
blockFetcher blockRequesterFn , // eth/61 specific whole block fetcher
headerFetcher headerRequesterFn , bodyFetcher bodyRequesterFn ) error {
2015-06-16 11:58:32 +03:00
block := & announce {
2015-08-14 21:25:41 +03:00
hash : hash ,
number : number ,
time : time ,
origin : peer ,
fetch61 : blockFetcher ,
fetchHeader : headerFetcher ,
fetchBodies : bodyFetcher ,
2015-06-16 11:58:32 +03:00
}
select {
case f . notify <- block :
return nil
case <- f . quit :
return errTerminated
}
}
2015-06-16 18:14:52 +03:00
// Enqueue tries to fill gaps the the fetcher's future import queue.
func ( f * Fetcher ) Enqueue ( peer string , block * types . Block ) error {
op := & inject {
origin : peer ,
block : block ,
}
select {
2015-06-17 16:53:28 +03:00
case f . inject <- op :
2015-06-16 18:14:52 +03:00
return nil
case <- f . quit :
return errTerminated
}
}
2015-08-14 21:25:41 +03:00
// FilterBlocks extracts all the blocks that were explicitly requested by the fetcher,
2015-06-16 11:58:32 +03:00
// returning those that should be handled differently.
2015-08-14 21:25:41 +03:00
func ( f * Fetcher ) FilterBlocks ( blocks types . Blocks ) types . Blocks {
glog . V ( logger . Detail ) . Infof ( "[eth/61] filtering %d blocks" , len ( blocks ) )
2015-06-16 11:58:32 +03:00
// Send the filter channel to the fetcher
filter := make ( chan [ ] * types . Block )
select {
2015-08-14 21:25:41 +03:00
case f . blockFilter <- filter :
2015-06-16 11:58:32 +03:00
case <- f . quit :
return nil
}
// Request the filtering of the block list
select {
case filter <- blocks :
case <- f . quit :
return nil
}
// Retrieve the blocks remaining after filtering
select {
case blocks := <- filter :
return blocks
case <- f . quit :
return nil
}
}
2015-08-14 21:25:41 +03:00
// FilterHeaders extracts all the headers that were explicitly requested by the fetcher,
// returning those that should be handled differently.
func ( f * Fetcher ) FilterHeaders ( headers [ ] * types . Header , time time . Time ) [ ] * types . Header {
glog . V ( logger . Detail ) . Infof ( "[eth/62] filtering %d headers" , len ( headers ) )
// Send the filter channel to the fetcher
filter := make ( chan * headerFilterTask )
select {
case f . headerFilter <- filter :
case <- f . quit :
return nil
}
// Request the filtering of the header list
select {
case filter <- & headerFilterTask { headers : headers , time : time } :
case <- f . quit :
return nil
}
// Retrieve the headers remaining after filtering
select {
case task := <- filter :
return task . headers
case <- f . quit :
return nil
}
}
// FilterBodies extracts all the block bodies that were explicitly requested by
// the fetcher, returning those that should be handled differently.
func ( f * Fetcher ) FilterBodies ( transactions [ ] [ ] * types . Transaction , uncles [ ] [ ] * types . Header , time time . Time ) ( [ ] [ ] * types . Transaction , [ ] [ ] * types . Header ) {
glog . V ( logger . Detail ) . Infof ( "[eth/62] filtering %d:%d bodies" , len ( transactions ) , len ( uncles ) )
// Send the filter channel to the fetcher
filter := make ( chan * bodyFilterTask )
select {
case f . bodyFilter <- filter :
case <- f . quit :
return nil , nil
}
// Request the filtering of the body list
select {
case filter <- & bodyFilterTask { transactions : transactions , uncles : uncles , time : time } :
case <- f . quit :
return nil , nil
}
// Retrieve the bodies remaining after filtering
select {
case task := <- filter :
return task . transactions , task . uncles
case <- f . quit :
return nil , nil
}
}
2015-06-16 11:58:32 +03:00
// Loop is the main fetcher loop, checking and processing various notification
// events.
func ( f * Fetcher ) loop ( ) {
2015-06-16 18:43:58 +03:00
// Iterate the block fetching until a quit is requested
2015-08-14 21:25:41 +03:00
fetchTimer := time . NewTimer ( 0 )
completeTimer := time . NewTimer ( 0 )
2015-06-16 11:58:32 +03:00
for {
// Clean up any expired block fetches
2015-06-17 00:19:09 +03:00
for hash , announce := range f . fetching {
2015-06-16 11:58:32 +03:00
if time . Since ( announce . time ) > fetchTimeout {
2015-06-22 16:49:47 +03:00
f . forgetHash ( hash )
2015-06-16 11:58:32 +03:00
}
}
2015-06-16 17:39:04 +03:00
// Import any queued blocks that could potentially fit
height := f . chainHeight ( )
2015-06-16 23:18:01 +03:00
for ! f . queue . Empty ( ) {
op := f . queue . PopItem ( ) . ( * inject )
2015-06-17 16:53:28 +03:00
// If too high up the chain or phase, continue later
2015-06-22 16:49:47 +03:00
number := op . block . NumberU64 ( )
2015-06-17 16:53:28 +03:00
if number > height + 1 {
2015-06-16 23:18:01 +03:00
f . queue . Push ( op , - float32 ( op . block . NumberU64 ( ) ) )
2015-06-16 17:39:04 +03:00
break
}
2015-06-17 16:53:28 +03:00
// Otherwise if fresh and still unknown, try and import
2015-06-22 16:49:47 +03:00
hash := op . block . Hash ( )
if number + maxUncleDist < height || f . getBlock ( hash ) != nil {
f . forgetBlock ( hash )
2015-06-16 18:43:58 +03:00
continue
}
2015-06-17 16:53:28 +03:00
f . insert ( op . origin , op . block )
2015-06-16 17:39:04 +03:00
}
2015-06-16 11:58:32 +03:00
// Wait for an outside event to occur
select {
case <- f . quit :
// Fetcher terminating, abort all operations
return
case notification := <- f . notify :
2015-06-22 14:07:08 +03:00
// A block was announced, make sure the peer isn't DOSing us
2015-08-25 13:57:49 +03:00
propAnnounceInMeter . Mark ( 1 )
2015-06-19 18:13:49 +03:00
2015-06-22 14:07:08 +03:00
count := f . announces [ notification . origin ] + 1
2015-06-22 16:49:47 +03:00
if count > hashLimit {
glog . V ( logger . Debug ) . Infof ( "Peer %s: exceeded outstanding announces (%d)" , notification . origin , hashLimit )
2015-08-25 13:57:49 +03:00
propAnnounceDOSMeter . Mark ( 1 )
2015-06-22 14:07:08 +03:00
break
}
2015-08-14 21:25:41 +03:00
// If we have a valid block number, check that it's potentially useful
if notification . number > 0 {
if dist := int64 ( notification . number ) - int64 ( f . chainHeight ( ) ) ; dist < - maxUncleDist || dist > maxQueueDist {
glog . V ( logger . Debug ) . Infof ( "[eth/62] Peer %s: discarded announcement #%d [%x…], distance %d" , notification . origin , notification . number , notification . hash [ : 4 ] , dist )
2015-08-25 13:57:49 +03:00
propAnnounceDropMeter . Mark ( 1 )
2015-08-14 21:25:41 +03:00
break
}
}
2015-06-22 14:07:08 +03:00
// All is well, schedule the announce if block's not yet downloading
2015-06-17 00:19:09 +03:00
if _ , ok := f . fetching [ notification . hash ] ; ok {
2015-06-16 11:58:32 +03:00
break
}
2015-08-14 21:25:41 +03:00
if _ , ok := f . completing [ notification . hash ] ; ok {
break
}
2015-06-22 14:07:08 +03:00
f . announces [ notification . origin ] = count
2015-06-17 00:19:09 +03:00
f . announced [ notification . hash ] = append ( f . announced [ notification . hash ] , notification )
if len ( f . announced ) == 1 {
2015-08-14 21:25:41 +03:00
f . rescheduleFetch ( fetchTimer )
2015-06-16 11:58:32 +03:00
}
2015-06-17 16:53:28 +03:00
case op := <- f . inject :
2015-06-16 18:14:52 +03:00
// A direct block insertion was requested, try and fill any pending gaps
2015-08-25 13:57:49 +03:00
propBroadcastInMeter . Mark ( 1 )
2015-06-16 23:18:01 +03:00
f . enqueue ( op . origin , op . block )
2015-06-16 18:14:52 +03:00
2015-06-17 16:53:28 +03:00
case hash := <- f . done :
2015-06-16 11:58:32 +03:00
// A pending import finished, remove all traces of the notification
2015-06-22 16:49:47 +03:00
f . forgetHash ( hash )
2015-06-22 14:07:08 +03:00
f . forgetBlock ( hash )
2015-06-16 11:58:32 +03:00
2015-08-14 21:25:41 +03:00
case <- fetchTimer . C :
2015-06-16 11:58:32 +03:00
// At least one block's timer ran out, check for needing retrieval
request := make ( map [ string ] [ ] common . Hash )
2015-06-17 00:19:09 +03:00
for hash , announces := range f . announced {
2015-06-19 16:46:16 +03:00
if time . Since ( announces [ 0 ] . time ) > arriveTimeout - gatherSlack {
2015-06-22 14:07:08 +03:00
// Pick a random peer to retrieve from, reset all others
2015-06-16 11:58:32 +03:00
announce := announces [ rand . Intn ( len ( announces ) ) ]
2015-06-22 16:49:47 +03:00
f . forgetHash ( hash )
2015-06-22 14:07:08 +03:00
// If the block still didn't arrive, queue for fetching
2015-06-18 18:00:19 +03:00
if f . getBlock ( hash ) == nil {
2015-06-16 11:58:32 +03:00
request [ announce . origin ] = append ( request [ announce . origin ] , hash )
2015-06-17 00:19:09 +03:00
f . fetching [ hash ] = announce
2015-06-16 11:58:32 +03:00
}
}
}
2015-08-14 21:25:41 +03:00
// Send out all block (eth/61) or header (eth/62) requests
2015-06-19 16:46:16 +03:00
for peer , hashes := range request {
if glog . V ( logger . Detail ) && len ( hashes ) > 0 {
list := "["
for _ , hash := range hashes {
2015-08-14 21:25:41 +03:00
list += fmt . Sprintf ( "%x…, " , hash [ : 4 ] )
2015-06-19 16:46:16 +03:00
}
list = list [ : len ( list ) - 2 ] + "]"
2015-08-14 21:25:41 +03:00
if f . fetching [ hashes [ 0 ] ] . fetch61 != nil {
glog . V ( logger . Detail ) . Infof ( "[eth/61] Peer %s: fetching blocks %s" , peer , list )
} else {
glog . V ( logger . Detail ) . Infof ( "[eth/62] Peer %s: fetching headers %s" , peer , list )
}
2015-06-19 16:46:16 +03:00
}
2015-06-22 20:13:18 +03:00
// Create a closure of the fetch and schedule in on a new thread
2015-08-14 21:25:41 +03:00
fetchBlocks , fetchHeader , hashes := f . fetching [ hashes [ 0 ] ] . fetch61 , f . fetching [ hashes [ 0 ] ] . fetchHeader , hashes
2015-06-22 18:08:28 +03:00
go func ( ) {
if f . fetchingHook != nil {
f . fetchingHook ( hashes )
}
2015-08-14 21:25:41 +03:00
if fetchBlocks != nil {
// Use old eth/61 protocol to retrieve whole blocks
2015-08-25 13:57:49 +03:00
blockFetchMeter . Mark ( int64 ( len ( hashes ) ) )
2015-08-14 21:25:41 +03:00
fetchBlocks ( hashes )
} else {
// Use new eth/62 protocol to retrieve headers first
for _ , hash := range hashes {
2015-08-25 13:57:49 +03:00
headerFetchMeter . Mark ( 1 )
2015-08-14 21:25:41 +03:00
fetchHeader ( hash ) // Suboptimal, but protocol doesn't allow batch header retrievals
}
}
2015-06-22 18:08:28 +03:00
} ( )
2015-06-16 11:58:32 +03:00
}
// Schedule the next fetch if blocks are still pending
2015-08-14 21:25:41 +03:00
f . rescheduleFetch ( fetchTimer )
2015-06-16 11:58:32 +03:00
2015-08-14 21:25:41 +03:00
case <- completeTimer . C :
// At least one header's timer ran out, retrieve everything
request := make ( map [ string ] [ ] common . Hash )
for hash , announces := range f . fetched {
// Pick a random peer to retrieve from, reset all others
announce := announces [ rand . Intn ( len ( announces ) ) ]
f . forgetHash ( hash )
// If the block still didn't arrive, queue for completion
if f . getBlock ( hash ) == nil {
request [ announce . origin ] = append ( request [ announce . origin ] , hash )
f . completing [ hash ] = announce
}
}
// Send out all block body requests
for peer , hashes := range request {
if glog . V ( logger . Detail ) && len ( hashes ) > 0 {
list := "["
for _ , hash := range hashes {
list += fmt . Sprintf ( "%x…, " , hash [ : 4 ] )
}
list = list [ : len ( list ) - 2 ] + "]"
glog . V ( logger . Detail ) . Infof ( "[eth/62] Peer %s: fetching bodies %s" , peer , list )
}
// Create a closure of the fetch and schedule in on a new thread
if f . completingHook != nil {
f . completingHook ( hashes )
}
2015-08-25 13:57:49 +03:00
bodyFetchMeter . Mark ( int64 ( len ( hashes ) ) )
2015-08-14 21:25:41 +03:00
go f . completing [ hashes [ 0 ] ] . fetchBodies ( hashes )
}
// Schedule the next fetch if blocks are still pending
f . rescheduleComplete ( completeTimer )
case filter := <- f . blockFilter :
2015-06-16 11:58:32 +03:00
// Blocks arrived, extract any explicit fetches, return all else
var blocks types . Blocks
select {
case blocks = <- filter :
case <- f . quit :
return
}
2015-08-25 13:57:49 +03:00
blockFilterInMeter . Mark ( int64 ( len ( blocks ) ) )
2015-06-16 11:58:32 +03:00
explicit , download := [ ] * types . Block { } , [ ] * types . Block { }
for _ , block := range blocks {
hash := block . Hash ( )
// Filter explicitly requested blocks from hash announcements
2015-06-29 13:49:04 +03:00
if f . fetching [ hash ] != nil && f . queued [ hash ] == nil {
2015-06-16 11:58:32 +03:00
// Discard if already imported by other means
2015-06-18 18:00:19 +03:00
if f . getBlock ( hash ) == nil {
2015-06-16 11:58:32 +03:00
explicit = append ( explicit , block )
} else {
2015-06-22 16:49:47 +03:00
f . forgetHash ( hash )
2015-06-16 11:58:32 +03:00
}
} else {
download = append ( download , block )
}
}
2015-08-25 13:57:49 +03:00
blockFilterOutMeter . Mark ( int64 ( len ( download ) ) )
2015-06-16 11:58:32 +03:00
select {
case filter <- download :
case <- f . quit :
return
}
2015-06-16 17:39:04 +03:00
// Schedule the retrieved blocks for ordered import
2015-06-16 11:58:32 +03:00
for _ , block := range explicit {
2015-06-17 00:19:09 +03:00
if announce := f . fetching [ block . Hash ( ) ] ; announce != nil {
2015-06-16 23:18:01 +03:00
f . enqueue ( announce . origin , block )
2015-06-16 11:58:32 +03:00
}
}
2015-08-14 21:25:41 +03:00
case filter := <- f . headerFilter :
// Headers arrived from a remote peer. Extract those that were explicitly
// requested by the fetcher, and return everything else so it's delivered
// to other parts of the system.
var task * headerFilterTask
select {
case task = <- filter :
case <- f . quit :
return
}
2015-08-25 13:57:49 +03:00
headerFilterInMeter . Mark ( int64 ( len ( task . headers ) ) )
2015-08-14 21:25:41 +03:00
// Split the batch of headers into unknown ones (to return to the caller),
// known incomplete ones (requiring body retrievals) and completed blocks.
unknown , incomplete , complete := [ ] * types . Header { } , [ ] * announce { } , [ ] * types . Block { }
for _ , header := range task . headers {
hash := header . Hash ( )
// Filter fetcher-requested headers from other synchronisation algorithms
if announce := f . fetching [ hash ] ; announce != nil && f . fetched [ hash ] == nil && f . completing [ hash ] == nil && f . queued [ hash ] == nil {
// If the delivered header does not match the promised number, drop the announcer
if header . Number . Uint64 ( ) != announce . number {
glog . V ( logger . Detail ) . Infof ( "[eth/62] Peer %s: invalid block number for [%x…]: announced %d, provided %d" , announce . origin , header . Hash ( ) . Bytes ( ) [ : 4 ] , announce . number , header . Number . Uint64 ( ) )
f . dropPeer ( announce . origin )
f . forgetHash ( hash )
continue
}
// Only keep if not imported by other means
if f . getBlock ( hash ) == nil {
announce . header = header
announce . time = task . time
// If the block is empty (header only), short circuit into the final import queue
if header . TxHash == types . DeriveSha ( types . Transactions { } ) && header . UncleHash == types . CalcUncleHash ( [ ] * types . Header { } ) {
glog . V ( logger . Detail ) . Infof ( "[eth/62] Peer %s: block #%d [%x…] empty, skipping body retrieval" , announce . origin , header . Number . Uint64 ( ) , header . Hash ( ) . Bytes ( ) [ : 4 ] )
2015-08-25 13:57:49 +03:00
block := types . NewBlockWithHeader ( header )
block . ReceivedAt = task . time
complete = append ( complete , block )
2015-08-14 21:25:41 +03:00
f . completing [ hash ] = announce
continue
}
// Otherwise add to the list of blocks needing completion
incomplete = append ( incomplete , announce )
} else {
glog . V ( logger . Detail ) . Infof ( "[eth/62] Peer %s: block #%d [%x…] already imported, discarding header" , announce . origin , header . Number . Uint64 ( ) , header . Hash ( ) . Bytes ( ) [ : 4 ] )
f . forgetHash ( hash )
}
} else {
// Fetcher doesn't know about it, add to the return list
unknown = append ( unknown , header )
}
}
2015-08-25 13:57:49 +03:00
headerFilterOutMeter . Mark ( int64 ( len ( unknown ) ) )
2015-08-14 21:25:41 +03:00
select {
case filter <- & headerFilterTask { headers : unknown , time : task . time } :
case <- f . quit :
return
}
// Schedule the retrieved headers for body completion
for _ , announce := range incomplete {
hash := announce . header . Hash ( )
if _ , ok := f . completing [ hash ] ; ok {
continue
}
f . fetched [ hash ] = append ( f . fetched [ hash ] , announce )
if len ( f . fetched ) == 1 {
f . rescheduleComplete ( completeTimer )
}
}
// Schedule the header-only blocks for import
for _ , block := range complete {
if announce := f . completing [ block . Hash ( ) ] ; announce != nil {
f . enqueue ( announce . origin , block )
}
}
case filter := <- f . bodyFilter :
// Block bodies arrived, extract any explicitly requested blocks, return the rest
var task * bodyFilterTask
select {
case task = <- filter :
case <- f . quit :
return
}
2015-08-25 13:57:49 +03:00
bodyFilterInMeter . Mark ( int64 ( len ( task . transactions ) ) )
2015-08-14 21:25:41 +03:00
blocks := [ ] * types . Block { }
for i := 0 ; i < len ( task . transactions ) && i < len ( task . uncles ) ; i ++ {
// Match up a body to any possible completion request
matched := false
for hash , announce := range f . completing {
if f . queued [ hash ] == nil {
txnHash := types . DeriveSha ( types . Transactions ( task . transactions [ i ] ) )
uncleHash := types . CalcUncleHash ( task . uncles [ i ] )
if txnHash == announce . header . TxHash && uncleHash == announce . header . UncleHash {
// Mark the body matched, reassemble if still unknown
matched = true
if f . getBlock ( hash ) == nil {
2015-08-25 13:57:49 +03:00
block := types . NewBlockWithHeader ( announce . header ) . WithBody ( task . transactions [ i ] , task . uncles [ i ] )
block . ReceivedAt = task . time
blocks = append ( blocks , block )
2015-08-14 21:25:41 +03:00
} else {
f . forgetHash ( hash )
}
}
}
}
if matched {
task . transactions = append ( task . transactions [ : i ] , task . transactions [ i + 1 : ] ... )
task . uncles = append ( task . uncles [ : i ] , task . uncles [ i + 1 : ] ... )
i --
continue
}
}
2015-08-25 13:57:49 +03:00
bodyFilterOutMeter . Mark ( int64 ( len ( task . transactions ) ) )
2015-08-14 21:25:41 +03:00
select {
case filter <- task :
case <- f . quit :
return
}
// Schedule the retrieved blocks for ordered import
for _ , block := range blocks {
if announce := f . completing [ block . Hash ( ) ] ; announce != nil {
f . enqueue ( announce . origin , block )
}
}
2015-06-16 11:58:32 +03:00
}
}
}
2015-06-16 23:18:01 +03:00
2015-08-14 21:25:41 +03:00
// rescheduleFetch resets the specified fetch timer to the next announce timeout.
func ( f * Fetcher ) rescheduleFetch ( fetch * time . Timer ) {
2015-06-17 00:19:09 +03:00
// Short circuit if no blocks are announced
if len ( f . announced ) == 0 {
return
}
// Otherwise find the earliest expiring announcement
earliest := time . Now ( )
for _ , announces := range f . announced {
if earliest . After ( announces [ 0 ] . time ) {
earliest = announces [ 0 ] . time
}
}
fetch . Reset ( arriveTimeout - time . Since ( earliest ) )
}
2015-08-14 21:25:41 +03:00
// rescheduleComplete resets the specified completion timer to the next fetch timeout.
func ( f * Fetcher ) rescheduleComplete ( complete * time . Timer ) {
// Short circuit if no headers are fetched
if len ( f . fetched ) == 0 {
return
}
// Otherwise find the earliest expiring announcement
earliest := time . Now ( )
for _ , announces := range f . fetched {
if earliest . After ( announces [ 0 ] . time ) {
earliest = announces [ 0 ] . time
}
}
complete . Reset ( gatherSlack - time . Since ( earliest ) )
}
2015-06-16 23:18:01 +03:00
// enqueue schedules a new future import operation, if the block to be imported
// has not yet been seen.
func ( f * Fetcher ) enqueue ( peer string , block * types . Block ) {
2015-06-17 00:19:09 +03:00
hash := block . Hash ( )
2015-06-22 16:49:47 +03:00
// Ensure the peer isn't DOSing us
count := f . queues [ peer ] + 1
if count > blockLimit {
2015-08-14 21:25:41 +03:00
glog . V ( logger . Debug ) . Infof ( "Peer %s: discarded block #%d [%x…], exceeded allowance (%d)" , peer , block . NumberU64 ( ) , hash . Bytes ( ) [ : 4 ] , blockLimit )
2015-08-25 13:57:49 +03:00
propBroadcastDOSMeter . Mark ( 1 )
2015-08-14 21:25:41 +03:00
f . forgetHash ( hash )
2015-06-22 16:49:47 +03:00
return
}
2015-06-17 16:53:28 +03:00
// Discard any past or too distant blocks
2015-06-18 19:43:47 +03:00
if dist := int64 ( block . NumberU64 ( ) ) - int64 ( f . chainHeight ( ) ) ; dist < - maxUncleDist || dist > maxQueueDist {
2015-08-14 21:25:41 +03:00
glog . V ( logger . Debug ) . Infof ( "Peer %s: discarded block #%d [%x…], distance %d" , peer , block . NumberU64 ( ) , hash . Bytes ( ) [ : 4 ] , dist )
2015-08-25 13:57:49 +03:00
propBroadcastDropMeter . Mark ( 1 )
2015-08-14 21:25:41 +03:00
f . forgetHash ( hash )
2015-06-16 23:18:01 +03:00
return
}
// Schedule the block for future importing
if _ , ok := f . queued [ hash ] ; ! ok {
2015-06-22 16:49:47 +03:00
op := & inject {
origin : peer ,
block : block ,
}
f . queues [ peer ] = count
f . queued [ hash ] = op
f . queue . Push ( op , - float32 ( block . NumberU64 ( ) ) )
2015-06-16 23:18:01 +03:00
2015-06-18 18:00:19 +03:00
if glog . V ( logger . Debug ) {
2015-08-14 21:25:41 +03:00
glog . Infof ( "Peer %s: queued block #%d [%x…], total %v" , peer , block . NumberU64 ( ) , hash . Bytes ( ) [ : 4 ] , f . queue . Size ( ) )
2015-06-16 23:18:01 +03:00
}
}
}
2015-06-17 16:53:28 +03:00
// insert spawns a new goroutine to run a block insertion into the chain. If the
// block's number is at the same height as the current import phase, if updates
// the phase states accordingly.
func ( f * Fetcher ) insert ( peer string , block * types . Block ) {
hash := block . Hash ( )
// Run the import on a new thread
2015-08-14 21:25:41 +03:00
glog . V ( logger . Debug ) . Infof ( "Peer %s: importing block #%d [%x…]" , peer , block . NumberU64 ( ) , hash [ : 4 ] )
2015-06-17 16:53:28 +03:00
go func ( ) {
defer func ( ) { f . done <- hash } ( )
2015-06-17 18:25:23 +03:00
// If the parent's unknown, abort insertion
2015-06-18 18:00:19 +03:00
parent := f . getBlock ( block . ParentHash ( ) )
if parent == nil {
2015-08-14 21:25:41 +03:00
glog . V ( logger . Debug ) . Infof ( "Peer %s: parent []%x] of block #%d [%x…] unknown" , block . ParentHash ( ) . Bytes ( ) [ : 4 ] , peer , block . NumberU64 ( ) , hash [ : 4 ] )
2015-06-18 18:00:19 +03:00
return
}
// Quickly validate the header and propagate the block if it passes
2015-06-29 14:20:13 +03:00
switch err := f . validateBlock ( block , parent ) ; err {
case nil :
// All ok, quickly propagate to our peers
2015-08-25 13:57:49 +03:00
propBroadcastOutTimer . UpdateSince ( block . ReceivedAt )
2015-06-29 14:20:13 +03:00
go f . broadcastBlock ( block , true )
case core . BlockFutureErr :
// Weird future block, don't fail, but neither propagate
default :
// Something went very wrong, drop the peer
2015-08-14 21:25:41 +03:00
glog . V ( logger . Debug ) . Infof ( "Peer %s: block #%d [%x…] verification failed: %v" , peer , block . NumberU64 ( ) , hash [ : 4 ] , err )
2015-06-18 18:00:19 +03:00
f . dropPeer ( peer )
2015-06-17 18:25:23 +03:00
return
}
2015-06-17 16:53:28 +03:00
// Run the actual import and log any issues
2015-06-17 18:25:23 +03:00
if _ , err := f . insertChain ( types . Blocks { block } ) ; err != nil {
2015-08-14 21:25:41 +03:00
glog . V ( logger . Warn ) . Infof ( "Peer %s: block #%d [%x…] import failed: %v" , peer , block . NumberU64 ( ) , hash [ : 4 ] , err )
2015-06-17 16:53:28 +03:00
return
}
2015-06-17 18:25:23 +03:00
// If import succeeded, broadcast the block
2015-08-25 13:57:49 +03:00
propAnnounceOutTimer . UpdateSince ( block . ReceivedAt )
2015-06-18 18:00:19 +03:00
go f . broadcastBlock ( block , false )
2015-06-22 18:08:28 +03:00
// Invoke the testing hook if needed
if f . importedHook != nil {
f . importedHook ( block )
}
2015-06-17 16:53:28 +03:00
} ( )
}
2015-06-22 14:07:08 +03:00
2015-06-22 16:49:47 +03:00
// forgetHash removes all traces of a block announcement from the fetcher's
// internal state.
func ( f * Fetcher ) forgetHash ( hash common . Hash ) {
2015-06-22 14:07:08 +03:00
// Remove all pending announces and decrement DOS counters
for _ , announce := range f . announced [ hash ] {
f . announces [ announce . origin ] --
if f . announces [ announce . origin ] == 0 {
delete ( f . announces , announce . origin )
}
}
delete ( f . announced , hash )
// Remove any pending fetches and decrement the DOS counters
if announce := f . fetching [ hash ] ; announce != nil {
f . announces [ announce . origin ] --
if f . announces [ announce . origin ] == 0 {
delete ( f . announces , announce . origin )
}
delete ( f . fetching , hash )
}
2015-08-14 21:25:41 +03:00
// Remove any pending completion requests and decrement the DOS counters
for _ , announce := range f . fetched [ hash ] {
f . announces [ announce . origin ] --
if f . announces [ announce . origin ] == 0 {
delete ( f . announces , announce . origin )
}
}
delete ( f . fetched , hash )
// Remove any pending completions and decrement the DOS counters
if announce := f . completing [ hash ] ; announce != nil {
f . announces [ announce . origin ] --
if f . announces [ announce . origin ] == 0 {
delete ( f . announces , announce . origin )
}
delete ( f . completing , hash )
}
2015-06-22 14:07:08 +03:00
}
2015-06-22 16:49:47 +03:00
2015-08-14 21:25:41 +03:00
// forgetBlock removes all traces of a queued block from the fetcher's internal
2015-06-22 16:49:47 +03:00
// state.
func ( f * Fetcher ) forgetBlock ( hash common . Hash ) {
if insert := f . queued [ hash ] ; insert != nil {
f . queues [ insert . origin ] --
if f . queues [ insert . origin ] == 0 {
delete ( f . queues , insert . origin )
}
delete ( f . queued , hash )
}
}