2015-05-01 01:23:51 +03:00
|
|
|
package eth
|
|
|
|
|
|
|
|
import (
|
|
|
|
"math"
|
2015-06-09 13:03:14 +03:00
|
|
|
"math/rand"
|
2015-05-14 15:24:18 +03:00
|
|
|
"sync/atomic"
|
2015-05-01 01:23:51 +03:00
|
|
|
"time"
|
|
|
|
|
2015-06-08 19:24:56 +03:00
|
|
|
"github.com/ethereum/go-ethereum/common"
|
2015-05-26 14:00:21 +03:00
|
|
|
"github.com/ethereum/go-ethereum/core/types"
|
2015-05-01 01:23:51 +03:00
|
|
|
"github.com/ethereum/go-ethereum/eth/downloader"
|
|
|
|
"github.com/ethereum/go-ethereum/logger"
|
|
|
|
"github.com/ethereum/go-ethereum/logger/glog"
|
2015-06-09 13:03:14 +03:00
|
|
|
"github.com/ethereum/go-ethereum/p2p/discover"
|
2015-05-01 01:23:51 +03:00
|
|
|
)
|
|
|
|
|
2015-06-08 20:38:39 +03:00
|
|
|
const (
|
|
|
|
forceSyncCycle = 10 * time.Second // Time interval to force syncs, even if few peers are available
|
|
|
|
blockProcCycle = 500 * time.Millisecond // Time interval to check for new blocks to process
|
|
|
|
notifyCheckCycle = 100 * time.Millisecond // Time interval to allow hash notifies to fulfill before hard fetching
|
|
|
|
notifyArriveTimeout = 500 * time.Millisecond // Time allowance before an announced block is explicitly requested
|
|
|
|
notifyFetchTimeout = 5 * time.Second // Maximum alloted time to return an explicitly requested block
|
|
|
|
minDesiredPeerCount = 5 // Amount of peers desired to start syncing
|
|
|
|
blockProcAmount = 256
|
2015-06-09 13:03:14 +03:00
|
|
|
|
|
|
|
// This is the target size for the packs of transactions sent by txsyncLoop.
|
|
|
|
// A pack can get larger than this if a single transactions exceeds this size.
|
|
|
|
txsyncPackSize = 100 * 1024
|
2015-06-08 20:38:39 +03:00
|
|
|
)
|
|
|
|
|
2015-06-08 19:24:56 +03:00
|
|
|
// blockAnnounce is the hash notification of the availability of a new block in
|
|
|
|
// the network.
|
|
|
|
type blockAnnounce struct {
|
|
|
|
hash common.Hash
|
|
|
|
peer *peer
|
|
|
|
time time.Time
|
|
|
|
}
|
|
|
|
|
2015-06-09 13:03:14 +03:00
|
|
|
type txsync struct {
|
|
|
|
p *peer
|
|
|
|
txs []*types.Transaction
|
|
|
|
}
|
|
|
|
|
|
|
|
// syncTransactions starts sending all currently pending transactions to the given peer.
|
|
|
|
func (pm *ProtocolManager) syncTransactions(p *peer) {
|
|
|
|
txs := pm.txpool.GetTransactions()
|
|
|
|
if len(txs) == 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
select {
|
|
|
|
case pm.txsyncCh <- &txsync{p, txs}:
|
|
|
|
case <-pm.quitSync:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// txsyncLoop takes care of the initial transaction sync for each new
|
|
|
|
// connection. When a new peer appears, we relay all currently pending
|
|
|
|
// transactions. In order to minimise egress bandwidth usage, we send
|
|
|
|
// the transactions in small packs to one peer at a time.
|
|
|
|
func (pm *ProtocolManager) txsyncLoop() {
|
|
|
|
var (
|
|
|
|
pending = make(map[discover.NodeID]*txsync)
|
|
|
|
sending = false // whether a send is active
|
|
|
|
pack = new(txsync) // the pack that is being sent
|
|
|
|
done = make(chan error, 1) // result of the send
|
|
|
|
)
|
|
|
|
|
|
|
|
// send starts a sending a pack of transactions from the sync.
|
|
|
|
send := func(s *txsync) {
|
|
|
|
// Fill pack with transactions up to the target size.
|
|
|
|
size := common.StorageSize(0)
|
|
|
|
pack.p = s.p
|
|
|
|
pack.txs = pack.txs[:0]
|
|
|
|
for i := 0; i < len(s.txs) && size < txsyncPackSize; i++ {
|
|
|
|
pack.txs = append(pack.txs, s.txs[i])
|
|
|
|
size += s.txs[i].Size()
|
|
|
|
}
|
|
|
|
// Remove the transactions that will be sent.
|
|
|
|
s.txs = s.txs[:copy(s.txs, s.txs[len(pack.txs):])]
|
|
|
|
if len(s.txs) == 0 {
|
|
|
|
delete(pending, s.p.ID())
|
|
|
|
}
|
|
|
|
// Send the pack in the background.
|
|
|
|
glog.V(logger.Detail).Infof("%v: sending %d transactions (%v)", s.p.Peer, len(pack.txs), size)
|
|
|
|
sending = true
|
|
|
|
go func() { done <- pack.p.sendTransactions(pack.txs) }()
|
|
|
|
}
|
|
|
|
|
|
|
|
// pick chooses the next pending sync.
|
|
|
|
pick := func() *txsync {
|
|
|
|
if len(pending) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
n := rand.Intn(len(pending)) + 1
|
|
|
|
for _, s := range pending {
|
|
|
|
if n--; n == 0 {
|
|
|
|
return s
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case s := <-pm.txsyncCh:
|
|
|
|
pending[s.p.ID()] = s
|
|
|
|
if !sending {
|
|
|
|
send(s)
|
|
|
|
}
|
|
|
|
case err := <-done:
|
|
|
|
sending = false
|
|
|
|
// Stop tracking peers that cause send failures.
|
|
|
|
if err != nil {
|
|
|
|
glog.V(logger.Debug).Infof("%v: tx send failed: %v", pack.p.Peer, err)
|
|
|
|
delete(pending, pack.p.ID())
|
|
|
|
}
|
|
|
|
// Schedule the next send.
|
|
|
|
if s := pick(); s != nil {
|
|
|
|
send(s)
|
|
|
|
}
|
|
|
|
case <-pm.quitSync:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-08 19:24:56 +03:00
|
|
|
// fetcher is responsible for collecting hash notifications, and periodically
|
|
|
|
// checking all unknown ones and individually fetching them.
|
|
|
|
func (pm *ProtocolManager) fetcher() {
|
2015-06-10 12:30:35 +03:00
|
|
|
announces := make(map[common.Hash][]*blockAnnounce)
|
2015-06-08 19:24:56 +03:00
|
|
|
request := make(map[*peer][]common.Hash)
|
2015-06-08 20:38:39 +03:00
|
|
|
pending := make(map[common.Hash]*blockAnnounce)
|
2015-06-08 19:24:56 +03:00
|
|
|
cycle := time.Tick(notifyCheckCycle)
|
2015-06-10 13:08:00 +03:00
|
|
|
done := make(chan common.Hash)
|
2015-06-08 19:24:56 +03:00
|
|
|
|
|
|
|
// Iterate the block fetching until a quit is requested
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case notifications := <-pm.newHashCh:
|
|
|
|
// A batch of hashes the notified, schedule them for retrieval
|
2015-06-09 00:37:10 +03:00
|
|
|
glog.V(logger.Debug).Infof("Scheduling %d hash announcements from %s", len(notifications), notifications[0].peer.id)
|
2015-06-08 19:24:56 +03:00
|
|
|
for _, announce := range notifications {
|
2015-06-10 13:08:00 +03:00
|
|
|
// Skip if it's already pending fetch
|
|
|
|
if _, ok := pending[announce.hash]; ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// Otherwise queue up the peer as a potential source
|
2015-06-10 12:30:35 +03:00
|
|
|
announces[announce.hash] = append(announces[announce.hash], announce)
|
2015-06-08 19:24:56 +03:00
|
|
|
}
|
|
|
|
|
2015-06-10 13:08:00 +03:00
|
|
|
case hash := <-done:
|
|
|
|
// A pending import finished, remove all traces
|
|
|
|
delete(pending, hash)
|
|
|
|
|
2015-06-08 19:24:56 +03:00
|
|
|
case <-cycle:
|
2015-06-08 20:38:39 +03:00
|
|
|
// Clean up any expired block fetches
|
|
|
|
for hash, announce := range pending {
|
|
|
|
if time.Since(announce.time) > notifyFetchTimeout {
|
|
|
|
delete(pending, hash)
|
|
|
|
}
|
|
|
|
}
|
2015-06-08 19:24:56 +03:00
|
|
|
// Check if any notified blocks failed to arrive
|
2015-06-10 12:30:35 +03:00
|
|
|
for hash, all := range announces {
|
|
|
|
if time.Since(all[0].time) > notifyArriveTimeout {
|
|
|
|
announce := all[rand.Intn(len(all))]
|
2015-06-08 19:24:56 +03:00
|
|
|
if !pm.chainman.HasBlock(hash) {
|
|
|
|
request[announce.peer] = append(request[announce.peer], hash)
|
2015-06-08 20:38:39 +03:00
|
|
|
pending[hash] = announce
|
2015-06-08 19:24:56 +03:00
|
|
|
}
|
|
|
|
delete(announces, hash)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if len(request) == 0 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
// Send out all block requests
|
|
|
|
for peer, hashes := range request {
|
2015-06-09 00:37:10 +03:00
|
|
|
glog.V(logger.Debug).Infof("Explicitly fetching %d blocks from %s", len(hashes), peer.id)
|
2015-06-08 19:24:56 +03:00
|
|
|
peer.requestBlocks(hashes)
|
|
|
|
}
|
|
|
|
request = make(map[*peer][]common.Hash)
|
|
|
|
|
2015-06-08 20:38:39 +03:00
|
|
|
case filter := <-pm.newBlockCh:
|
2015-06-09 00:37:10 +03:00
|
|
|
// Blocks arrived, extract any explicit fetches, return all else
|
2015-06-08 20:38:39 +03:00
|
|
|
var blocks types.Blocks
|
|
|
|
select {
|
|
|
|
case blocks = <-filter:
|
|
|
|
case <-pm.quitSync:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-06-09 00:37:10 +03:00
|
|
|
explicit, download := []*types.Block{}, []*types.Block{}
|
2015-06-08 20:38:39 +03:00
|
|
|
for _, block := range blocks {
|
|
|
|
hash := block.Hash()
|
2015-06-09 00:37:10 +03:00
|
|
|
|
|
|
|
// Filter explicitly requested blocks from hash announcements
|
2015-06-08 20:38:39 +03:00
|
|
|
if _, ok := pending[hash]; ok {
|
2015-06-09 00:37:10 +03:00
|
|
|
// Discard if already imported by other means
|
|
|
|
if !pm.chainman.HasBlock(hash) {
|
|
|
|
explicit = append(explicit, block)
|
|
|
|
} else {
|
|
|
|
delete(pending, hash)
|
|
|
|
}
|
2015-06-08 20:38:39 +03:00
|
|
|
} else {
|
2015-06-09 00:37:10 +03:00
|
|
|
download = append(download, block)
|
2015-06-08 20:38:39 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
2015-06-09 00:37:10 +03:00
|
|
|
case filter <- download:
|
2015-06-08 20:38:39 +03:00
|
|
|
case <-pm.quitSync:
|
|
|
|
return
|
|
|
|
}
|
2015-06-10 12:17:06 +03:00
|
|
|
// Create a closure with the retrieved blocks and origin peers
|
|
|
|
peers := make([]*peer, 0, len(explicit))
|
|
|
|
blocks = make([]*types.Block, 0, len(explicit))
|
|
|
|
for _, block := range explicit {
|
|
|
|
hash := block.Hash()
|
|
|
|
if announce := pending[hash]; announce != nil {
|
2015-06-10 13:08:00 +03:00
|
|
|
// Drop the block if it surely cannot fit
|
|
|
|
if pm.chainman.HasBlock(hash) || !pm.chainman.HasBlock(block.ParentHash()) {
|
|
|
|
delete(pending, hash)
|
|
|
|
continue
|
2015-06-09 15:09:15 +03:00
|
|
|
}
|
2015-06-10 13:08:00 +03:00
|
|
|
// Otherwise accumulate for import
|
|
|
|
peers = append(peers, announce.peer)
|
|
|
|
blocks = append(blocks, block)
|
2015-06-09 15:09:15 +03:00
|
|
|
}
|
2015-06-10 12:17:06 +03:00
|
|
|
}
|
|
|
|
// If any explicit fetches were replied to, import them
|
|
|
|
if count := len(blocks); count > 0 {
|
|
|
|
glog.V(logger.Debug).Infof("Importing %d explicitly fetched blocks", len(blocks))
|
2015-06-08 20:38:39 +03:00
|
|
|
go func() {
|
2015-06-10 13:08:00 +03:00
|
|
|
// Make sure all hashes are cleaned up
|
|
|
|
for _, block := range blocks {
|
|
|
|
hash := block.Hash()
|
|
|
|
defer func() { done <- hash }()
|
|
|
|
}
|
|
|
|
// Try and actually import the blocks
|
2015-06-09 15:09:15 +03:00
|
|
|
for i := 0; i < len(blocks); i++ {
|
|
|
|
if err := pm.importBlock(peers[i], blocks[i], nil); err != nil {
|
|
|
|
glog.V(logger.Detail).Infof("Failed to import explicitly fetched block: %v", err)
|
|
|
|
return
|
2015-06-08 20:38:39 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
2015-06-08 19:24:56 +03:00
|
|
|
case <-pm.quitSync:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// syncer is responsible for periodically synchronising with the network, both
|
|
|
|
// downloading hashes and blocks as well as retrieving cached ones.
|
|
|
|
func (pm *ProtocolManager) syncer() {
|
2015-05-08 15:22:48 +03:00
|
|
|
forceSync := time.Tick(forceSyncCycle)
|
|
|
|
blockProc := time.Tick(blockProcCycle)
|
2015-05-14 15:24:18 +03:00
|
|
|
blockProcPend := int32(0)
|
2015-05-01 17:30:02 +03:00
|
|
|
|
2015-05-01 01:23:51 +03:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-pm.newPeerCh:
|
2015-05-18 21:33:37 +03:00
|
|
|
// Make sure we have peers to select from, then sync
|
|
|
|
if pm.peers.Len() < minDesiredPeerCount {
|
2015-05-01 01:23:51 +03:00
|
|
|
break
|
|
|
|
}
|
2015-05-18 21:33:37 +03:00
|
|
|
go pm.synchronise(pm.peers.BestPeer())
|
2015-05-01 01:23:51 +03:00
|
|
|
|
2015-05-08 15:22:48 +03:00
|
|
|
case <-forceSync:
|
|
|
|
// Force a sync even if not enough peers are present
|
2015-05-18 21:33:37 +03:00
|
|
|
go pm.synchronise(pm.peers.BestPeer())
|
|
|
|
|
2015-05-08 15:22:48 +03:00
|
|
|
case <-blockProc:
|
|
|
|
// Try to pull some blocks from the downloaded
|
2015-05-14 15:24:18 +03:00
|
|
|
if atomic.CompareAndSwapInt32(&blockProcPend, 0, 1) {
|
|
|
|
go func() {
|
2015-05-14 15:38:49 +03:00
|
|
|
pm.processBlocks()
|
2015-05-14 15:24:18 +03:00
|
|
|
atomic.StoreInt32(&blockProcPend, 0)
|
|
|
|
}()
|
|
|
|
}
|
2015-05-08 15:22:48 +03:00
|
|
|
|
2015-05-01 01:23:51 +03:00
|
|
|
case <-pm.quitSync:
|
2015-05-01 17:30:02 +03:00
|
|
|
return
|
2015-05-01 01:23:51 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-05-18 21:33:37 +03:00
|
|
|
// processBlocks retrieves downloaded blocks from the download cache and tries
|
|
|
|
// to construct the local block chain with it. Note, since the block retrieval
|
|
|
|
// order matters, access to this function *must* be synchronized/serialized.
|
2015-05-01 01:23:51 +03:00
|
|
|
func (pm *ProtocolManager) processBlocks() error {
|
|
|
|
pm.wg.Add(1)
|
|
|
|
defer pm.wg.Done()
|
|
|
|
|
2015-05-14 15:38:49 +03:00
|
|
|
// Short circuit if no blocks are available for insertion
|
|
|
|
blocks := pm.downloader.TakeBlocks()
|
2015-05-01 01:23:51 +03:00
|
|
|
if len(blocks) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
2015-05-26 14:00:21 +03:00
|
|
|
glog.V(logger.Debug).Infof("Inserting chain with %d blocks (#%v - #%v)\n", len(blocks), blocks[0].RawBlock.Number(), blocks[len(blocks)-1].RawBlock.Number())
|
2015-05-01 01:23:51 +03:00
|
|
|
|
|
|
|
for len(blocks) != 0 && !pm.quit {
|
2015-05-26 14:00:21 +03:00
|
|
|
// Retrieve the first batch of blocks to insert
|
2015-05-01 01:23:51 +03:00
|
|
|
max := int(math.Min(float64(len(blocks)), float64(blockProcAmount)))
|
2015-05-26 14:00:21 +03:00
|
|
|
raw := make(types.Blocks, 0, max)
|
|
|
|
for _, block := range blocks[:max] {
|
|
|
|
raw = append(raw, block.RawBlock)
|
|
|
|
}
|
|
|
|
// Try to inset the blocks, drop the originating peer if there's an error
|
|
|
|
index, err := pm.chainman.InsertChain(raw)
|
2015-05-01 01:23:51 +03:00
|
|
|
if err != nil {
|
2015-05-28 13:06:10 +03:00
|
|
|
glog.V(logger.Debug).Infoln("Downloaded block import failed:", err)
|
2015-05-26 14:00:21 +03:00
|
|
|
pm.removePeer(blocks[index].OriginPeer)
|
2015-05-14 15:38:49 +03:00
|
|
|
pm.downloader.Cancel()
|
2015-05-01 01:23:51 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
blocks = blocks[max:]
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-05-18 21:33:37 +03:00
|
|
|
// synchronise tries to sync up our local block chain with a remote peer, both
|
|
|
|
// adding various sanity checks as well as wrapping it with various log entries.
|
2015-05-08 15:22:48 +03:00
|
|
|
func (pm *ProtocolManager) synchronise(peer *peer) {
|
2015-05-18 21:33:37 +03:00
|
|
|
// Short circuit if no peers are available
|
|
|
|
if peer == nil {
|
|
|
|
return
|
|
|
|
}
|
2015-05-01 01:23:51 +03:00
|
|
|
// Make sure the peer's TD is higher than our own. If not drop.
|
2015-06-09 14:56:27 +03:00
|
|
|
if peer.Td().Cmp(pm.chainman.Td()) <= 0 {
|
2015-05-01 01:23:51 +03:00
|
|
|
return
|
|
|
|
}
|
2015-05-03 15:11:47 +03:00
|
|
|
// FIXME if we have the hash in our chain and the TD of the peer is
|
|
|
|
// much higher than ours, something is wrong with us or the peer.
|
|
|
|
// Check if the hash is on our own chain
|
2015-06-09 14:27:44 +03:00
|
|
|
head := peer.Head()
|
|
|
|
if pm.chainman.HasBlock(head) {
|
2015-05-18 21:33:37 +03:00
|
|
|
glog.V(logger.Debug).Infoln("Synchronisation canceled: head already known")
|
2015-05-03 15:11:47 +03:00
|
|
|
return
|
|
|
|
}
|
2015-05-01 01:23:51 +03:00
|
|
|
// Get the hashes from the peer (synchronously)
|
2015-06-09 14:27:44 +03:00
|
|
|
glog.V(logger.Detail).Infof("Attempting synchronisation: %v, 0x%x", peer.id, head)
|
2015-05-08 15:22:48 +03:00
|
|
|
|
2015-06-09 14:27:44 +03:00
|
|
|
err := pm.downloader.Synchronise(peer.id, head)
|
2015-05-08 15:22:48 +03:00
|
|
|
switch err {
|
|
|
|
case nil:
|
2015-06-09 00:37:10 +03:00
|
|
|
glog.V(logger.Detail).Infof("Synchronisation completed")
|
2015-05-08 15:22:48 +03:00
|
|
|
|
|
|
|
case downloader.ErrBusy:
|
2015-06-09 00:37:10 +03:00
|
|
|
glog.V(logger.Detail).Infof("Synchronisation already in progress")
|
2015-05-08 15:22:48 +03:00
|
|
|
|
2015-05-26 12:44:09 +03:00
|
|
|
case downloader.ErrTimeout, downloader.ErrBadPeer, downloader.ErrEmptyHashSet, downloader.ErrInvalidChain, downloader.ErrCrossCheckFailed:
|
2015-05-15 01:40:16 +03:00
|
|
|
glog.V(logger.Debug).Infof("Removing peer %v: %v", peer.id, err)
|
2015-05-26 14:00:21 +03:00
|
|
|
pm.removePeer(peer.id)
|
2015-05-15 01:40:16 +03:00
|
|
|
|
2015-05-11 18:27:34 +03:00
|
|
|
case downloader.ErrPendingQueue:
|
|
|
|
glog.V(logger.Debug).Infoln("Synchronisation aborted:", err)
|
2015-05-15 01:40:16 +03:00
|
|
|
|
2015-05-08 15:22:48 +03:00
|
|
|
default:
|
|
|
|
glog.V(logger.Warn).Infof("Synchronisation failed: %v", err)
|
2015-05-01 01:23:51 +03:00
|
|
|
}
|
|
|
|
}
|