all: update light logs (and a few others) to the new model
This commit is contained in:
parent
faf713632c
commit
e7030c4bf5
@ -1016,12 +1016,10 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
|
||||
case SplitStatTy:
|
||||
events = append(events, ChainSplitEvent{block, logs})
|
||||
}
|
||||
|
||||
stats.processed++
|
||||
stats.usedGas += usedGas.Uint64()
|
||||
stats.report(chain, i)
|
||||
}
|
||||
|
||||
go self.postChainEvents(events, coalescedLogs)
|
||||
|
||||
return 0, nil
|
||||
|
@ -308,7 +308,7 @@ func (pm *ProtocolManager) handle(p *peer) error {
|
||||
// main loop. handle incoming messages.
|
||||
for {
|
||||
if err := pm.handleMsg(p); err != nil {
|
||||
p.Log().Debug("Message handling failed", "err", err)
|
||||
p.Log().Debug("Ethereum message handling failed", "err", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -187,7 +187,7 @@ func (p *peer) SendReceiptsRLP(receipts []rlp.RawValue) error {
|
||||
return p2p.Send(p.rw, ReceiptsMsg, receipts)
|
||||
}
|
||||
|
||||
// RequestHeaders is a wrapper around the header query functions to fetch a
|
||||
// RequestOneHeader is a wrapper around the header query functions to fetch a
|
||||
// single header. It is used solely by the fetcher.
|
||||
func (p *peer) RequestOneHeader(hash common.Hash) error {
|
||||
p.Log().Debug("Fetching single header", "hash", hash)
|
||||
|
@ -95,13 +95,13 @@ func (s *Service) Start(server *p2p.Server) error {
|
||||
s.server = server
|
||||
go s.loop()
|
||||
|
||||
log.Info(fmt.Sprint("Stats daemon started"))
|
||||
log.Info("Stats daemon started")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop implements node.Service, terminating the monitoring and reporting daemon.
|
||||
func (s *Service) Stop() error {
|
||||
log.Info(fmt.Sprint("Stats daemon stopped"))
|
||||
log.Info("Stats daemon stopped")
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -130,7 +130,7 @@ func (s *Service) loop() {
|
||||
}
|
||||
conn, err := websocket.Dial(url, "", "http://localhost/")
|
||||
if err != nil {
|
||||
log.Warn(fmt.Sprintf("Stats server unreachable: %v", err))
|
||||
log.Warn("Stats server unreachable", "err", err)
|
||||
time.Sleep(10 * time.Second)
|
||||
continue
|
||||
}
|
||||
@ -138,7 +138,7 @@ func (s *Service) loop() {
|
||||
out := json.NewEncoder(conn)
|
||||
|
||||
if err = s.login(in, out); err != nil {
|
||||
log.Warn(fmt.Sprintf("Stats login failed: %v", err))
|
||||
log.Warn("Stats login failed", "err", err)
|
||||
conn.Close()
|
||||
time.Sleep(10 * time.Second)
|
||||
continue
|
||||
@ -147,12 +147,12 @@ func (s *Service) loop() {
|
||||
|
||||
// Send the initial stats so our node looks decent from the get go
|
||||
if err = s.report(out); err != nil {
|
||||
log.Warn(fmt.Sprintf("Initial stats report failed: %v", err))
|
||||
log.Warn("Initial stats report failed", "err", err)
|
||||
conn.Close()
|
||||
continue
|
||||
}
|
||||
if err = s.reportHistory(out, nil); err != nil {
|
||||
log.Warn(fmt.Sprintf("History report failed: %v", err))
|
||||
log.Warn("Initial history report failed", "err", err)
|
||||
conn.Close()
|
||||
continue
|
||||
}
|
||||
@ -163,11 +163,11 @@ func (s *Service) loop() {
|
||||
select {
|
||||
case <-fullReport.C:
|
||||
if err = s.report(out); err != nil {
|
||||
log.Warn(fmt.Sprintf("Full stats report failed: %v", err))
|
||||
log.Warn("Full stats report failed", "err", err)
|
||||
}
|
||||
case list := <-s.histCh:
|
||||
if err = s.reportHistory(out, list); err != nil {
|
||||
log.Warn(fmt.Sprintf("Block history report failed: %v", err))
|
||||
log.Warn("Requested history report failed", "err", err)
|
||||
}
|
||||
case head, ok := <-headSub.Chan():
|
||||
if !ok { // node stopped
|
||||
@ -175,10 +175,10 @@ func (s *Service) loop() {
|
||||
return
|
||||
}
|
||||
if err = s.reportBlock(out, head.Data.(core.ChainHeadEvent).Block); err != nil {
|
||||
log.Warn(fmt.Sprintf("Block stats report failed: %v", err))
|
||||
log.Warn("Block stats report failed", "err", err)
|
||||
}
|
||||
if err = s.reportPending(out); err != nil {
|
||||
log.Warn(fmt.Sprintf("Post-block transaction stats report failed: %v", err))
|
||||
log.Warn("Post-block transaction stats report failed", "err", err)
|
||||
}
|
||||
case _, ok := <-txSub.Chan():
|
||||
if !ok { // node stopped
|
||||
@ -194,7 +194,7 @@ func (s *Service) loop() {
|
||||
}
|
||||
}
|
||||
if err = s.reportPending(out); err != nil {
|
||||
log.Warn(fmt.Sprintf("Transaction stats report failed: %v", err))
|
||||
log.Warn("Transaction stats report failed", "err", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -215,16 +215,16 @@ func (s *Service) readLoop(conn *websocket.Conn, in *json.Decoder) {
|
||||
// Retrieve the next generic network packet and bail out on error
|
||||
var msg map[string][]interface{}
|
||||
if err := in.Decode(&msg); err != nil {
|
||||
log.Warn(fmt.Sprintf("Failed to decode stats server message: %v", err))
|
||||
log.Warn("Failed to decode stats server message", "err", err)
|
||||
return
|
||||
}
|
||||
if len(msg["emit"]) == 0 {
|
||||
log.Warn(fmt.Sprintf("Stats server sent non-broadcast: %v", msg))
|
||||
log.Warn("Stats server sent non-broadcast", "msg", msg)
|
||||
return
|
||||
}
|
||||
command, ok := msg["emit"][0].(string)
|
||||
if !ok {
|
||||
log.Warn(fmt.Sprintf("Invalid stats server message type: %v", msg["emit"][0]))
|
||||
log.Warn("Invalid stats server message type", "type", msg["emit"][0])
|
||||
return
|
||||
}
|
||||
// If the message is a ping reply, deliver (someone must be listening!)
|
||||
@ -235,7 +235,7 @@ func (s *Service) readLoop(conn *websocket.Conn, in *json.Decoder) {
|
||||
continue
|
||||
default:
|
||||
// Ping routine dead, abort
|
||||
log.Warn(fmt.Sprintf("Stats server pinger seems to have died"))
|
||||
log.Warn("Stats server pinger seems to have died")
|
||||
return
|
||||
}
|
||||
}
|
||||
@ -244,12 +244,12 @@ func (s *Service) readLoop(conn *websocket.Conn, in *json.Decoder) {
|
||||
// Make sure the request is valid and doesn't crash us
|
||||
request, ok := msg["emit"][1].(map[string]interface{})
|
||||
if !ok {
|
||||
log.Warn(fmt.Sprintf("Invalid history request: %v", msg["emit"][1]))
|
||||
log.Warn("Invalid history request", "msg", msg["emit"][1])
|
||||
return
|
||||
}
|
||||
list, ok := request["list"].([]interface{})
|
||||
if !ok {
|
||||
log.Warn(fmt.Sprintf("Invalid history block list: %v", request["list"]))
|
||||
log.Warn("Invalid history block list", "list", request["list"])
|
||||
return
|
||||
}
|
||||
// Convert the block number list to an integer list
|
||||
@ -257,7 +257,7 @@ func (s *Service) readLoop(conn *websocket.Conn, in *json.Decoder) {
|
||||
for i, num := range list {
|
||||
n, ok := num.(float64)
|
||||
if !ok {
|
||||
log.Warn(fmt.Sprintf("Invalid history block number: %v", num))
|
||||
log.Warn("Invalid history block number", "number", num)
|
||||
return
|
||||
}
|
||||
numbers[i] = uint64(n)
|
||||
@ -269,7 +269,7 @@ func (s *Service) readLoop(conn *websocket.Conn, in *json.Decoder) {
|
||||
}
|
||||
}
|
||||
// Report anything else and continue
|
||||
log.Info(fmt.Sprintf("Unknown stats message: %v", msg))
|
||||
log.Info("Unknown stats message", "msg", msg)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -22,7 +22,6 @@ package debug
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/user"
|
||||
@ -111,7 +110,7 @@ func (h *HandlerT) StartCPUProfile(file string) error {
|
||||
}
|
||||
h.cpuW = f
|
||||
h.cpuFile = file
|
||||
log.Info(fmt.Sprint("CPU profiling started, writing to", h.cpuFile))
|
||||
log.Info("CPU profiling started", "dump", h.cpuFile)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -123,7 +122,7 @@ func (h *HandlerT) StopCPUProfile() error {
|
||||
if h.cpuW == nil {
|
||||
return errors.New("CPU profiling not in progress")
|
||||
}
|
||||
log.Info(fmt.Sprint("done writing CPU profile to", h.cpuFile))
|
||||
log.Info("Done writing CPU profile", "dump", h.cpuFile)
|
||||
h.cpuW.Close()
|
||||
h.cpuW = nil
|
||||
h.cpuFile = ""
|
||||
@ -179,7 +178,7 @@ func (*HandlerT) Stacks() string {
|
||||
|
||||
func writeProfile(name, file string) error {
|
||||
p := pprof.Lookup(name)
|
||||
log.Info(fmt.Sprintf("writing %d %s profile records to %s", p.Count(), name, file))
|
||||
log.Info("Writing profile records", "count", p.Count(), "type", name, "dump", file)
|
||||
f, err := os.Create(expandHome(file))
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -129,8 +129,10 @@ func Setup(ctx *cli.Context) error {
|
||||
if ctx.GlobalBool(pprofFlag.Name) {
|
||||
address := fmt.Sprintf("%s:%d", ctx.GlobalString(pprofAddrFlag.Name), ctx.GlobalInt(pprofPortFlag.Name))
|
||||
go func() {
|
||||
log.Info(fmt.Sprintf("starting pprof server at http://%s/debug/pprof", address))
|
||||
log.Error(fmt.Sprint(http.ListenAndServe(address, nil)))
|
||||
log.Info("Starting pprof server", "addr", fmt.Sprintf("http://%s/debug/pprof", address))
|
||||
if err := http.ListenAndServe(address, nil); err != nil {
|
||||
log.Error("Failure in running pprof server", "err", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
return nil
|
||||
|
@ -20,7 +20,6 @@ package debug
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime/trace"
|
||||
|
||||
@ -44,7 +43,7 @@ func (h *HandlerT) StartGoTrace(file string) error {
|
||||
}
|
||||
h.traceW = f
|
||||
h.traceFile = file
|
||||
log.Info(fmt.Sprint("trace started, writing to", h.traceFile))
|
||||
log.Info("Go tracing started", "dump", h.traceFile)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -56,7 +55,7 @@ func (h *HandlerT) StopGoTrace() error {
|
||||
if h.traceW == nil {
|
||||
return errors.New("trace not in progress")
|
||||
}
|
||||
log.Info(fmt.Sprint("done writing trace to", h.traceFile))
|
||||
log.Info("Done writing Go trace", "dump", h.traceFile)
|
||||
h.traceW.Close()
|
||||
h.traceW = nil
|
||||
h.traceFile = ""
|
||||
|
@ -477,7 +477,7 @@ func (s *PublicBlockChainAPI) GetUncleByBlockNumberAndIndex(ctx context.Context,
|
||||
if block != nil {
|
||||
uncles := block.Uncles()
|
||||
if index >= hexutil.Uint(len(uncles)) {
|
||||
log.Debug(fmt.Sprintf("uncle block on index %d not found for block #%d", index, blockNr))
|
||||
log.Debug("Requested uncle not found", "number", blockNr, "hash", block.Hash(), "index", index)
|
||||
return nil, nil
|
||||
}
|
||||
block = types.NewBlockWithHeader(uncles[index])
|
||||
@ -493,7 +493,7 @@ func (s *PublicBlockChainAPI) GetUncleByBlockHashAndIndex(ctx context.Context, b
|
||||
if block != nil {
|
||||
uncles := block.Uncles()
|
||||
if index >= hexutil.Uint(len(uncles)) {
|
||||
log.Debug(fmt.Sprintf("uncle block on index %d not found for block %s", index, blockHash.Hex()))
|
||||
log.Debug("Requested uncle not found", "number", block.Number(), "hash", blockHash, "index", index)
|
||||
return nil, nil
|
||||
}
|
||||
block = types.NewBlockWithHeader(uncles[index])
|
||||
@ -579,7 +579,7 @@ type CallArgs struct {
|
||||
}
|
||||
|
||||
func (s *PublicBlockChainAPI) doCall(ctx context.Context, args CallArgs, blockNr rpc.BlockNumber, vmCfg vm.Config) ([]byte, *big.Int, error) {
|
||||
defer func(start time.Time) { log.Debug(fmt.Sprintf("call took %v", time.Since(start))) }(time.Now())
|
||||
defer func(start time.Time) { log.Debug("Executing EVM call finished", "runtime", time.Since(start)) }(time.Now())
|
||||
|
||||
state, header, err := s.b.StateAndHeaderByNumber(ctx, blockNr)
|
||||
if state == nil || err != nil {
|
||||
@ -999,42 +999,40 @@ func getTransactionBlockData(chainDb ethdb.Database, txHash common.Hash) (common
|
||||
}
|
||||
|
||||
// GetTransactionByHash returns the transaction for the given hash
|
||||
func (s *PublicTransactionPoolAPI) GetTransactionByHash(ctx context.Context, txHash common.Hash) (*RPCTransaction, error) {
|
||||
func (s *PublicTransactionPoolAPI) GetTransactionByHash(ctx context.Context, hash common.Hash) (*RPCTransaction, error) {
|
||||
var tx *types.Transaction
|
||||
var isPending bool
|
||||
var err error
|
||||
|
||||
if tx, isPending, err = getTransaction(s.b.ChainDb(), s.b, txHash); err != nil {
|
||||
log.Debug(fmt.Sprintf("%v\n", err))
|
||||
if tx, isPending, err = getTransaction(s.b.ChainDb(), s.b, hash); err != nil {
|
||||
log.Debug("Failed to retrieve transaction", "hash", hash, "err", err)
|
||||
return nil, nil
|
||||
} else if tx == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if isPending {
|
||||
return newRPCPendingTransaction(tx), nil
|
||||
}
|
||||
|
||||
blockHash, _, _, err := getTransactionBlockData(s.b.ChainDb(), txHash)
|
||||
blockHash, _, _, err := getTransactionBlockData(s.b.ChainDb(), hash)
|
||||
if err != nil {
|
||||
log.Debug(fmt.Sprintf("%v\n", err))
|
||||
log.Debug("Failed to retrieve transaction block", "hash", hash, "err", err)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if block, _ := s.b.GetBlock(ctx, blockHash); block != nil {
|
||||
return newRPCTransaction(block, txHash)
|
||||
return newRPCTransaction(block, hash)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// GetRawTransactionByHash returns the bytes of the transaction for the given hash.
|
||||
func (s *PublicTransactionPoolAPI) GetRawTransactionByHash(ctx context.Context, txHash common.Hash) (hexutil.Bytes, error) {
|
||||
func (s *PublicTransactionPoolAPI) GetRawTransactionByHash(ctx context.Context, hash common.Hash) (hexutil.Bytes, error) {
|
||||
var tx *types.Transaction
|
||||
var err error
|
||||
|
||||
if tx, _, err = getTransaction(s.b.ChainDb(), s.b, txHash); err != nil {
|
||||
log.Debug(fmt.Sprintf("%v\n", err))
|
||||
if tx, _, err = getTransaction(s.b.ChainDb(), s.b, hash); err != nil {
|
||||
log.Debug("Failed to retrieve transaction", "hash", hash, "err", err)
|
||||
return nil, nil
|
||||
} else if tx == nil {
|
||||
return nil, nil
|
||||
@ -1044,22 +1042,22 @@ func (s *PublicTransactionPoolAPI) GetRawTransactionByHash(ctx context.Context,
|
||||
}
|
||||
|
||||
// GetTransactionReceipt returns the transaction receipt for the given transaction hash.
|
||||
func (s *PublicTransactionPoolAPI) GetTransactionReceipt(txHash common.Hash) (map[string]interface{}, error) {
|
||||
receipt := core.GetReceipt(s.b.ChainDb(), txHash)
|
||||
func (s *PublicTransactionPoolAPI) GetTransactionReceipt(hash common.Hash) (map[string]interface{}, error) {
|
||||
receipt := core.GetReceipt(s.b.ChainDb(), hash)
|
||||
if receipt == nil {
|
||||
log.Debug(fmt.Sprintf("receipt not found for transaction %s", txHash.Hex()))
|
||||
log.Debug("Receipt not found for transaction", "hash", hash)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
tx, _, err := getTransaction(s.b.ChainDb(), s.b, txHash)
|
||||
tx, _, err := getTransaction(s.b.ChainDb(), s.b, hash)
|
||||
if err != nil {
|
||||
log.Debug(fmt.Sprintf("%v\n", err))
|
||||
log.Debug("Failed to retrieve transaction", "hash", hash, "err", err)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
txBlock, blockIndex, index, err := getTransactionBlockData(s.b.ChainDb(), txHash)
|
||||
txBlock, blockIndex, index, err := getTransactionBlockData(s.b.ChainDb(), hash)
|
||||
if err != nil {
|
||||
log.Debug(fmt.Sprintf("%v\n", err))
|
||||
log.Debug("Failed to retrieve transaction block", "hash", hash, "err", err)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@ -1073,7 +1071,7 @@ func (s *PublicTransactionPoolAPI) GetTransactionReceipt(txHash common.Hash) (ma
|
||||
"root": hexutil.Bytes(receipt.PostState),
|
||||
"blockHash": txBlock,
|
||||
"blockNumber": hexutil.Uint64(blockIndex),
|
||||
"transactionHash": txHash,
|
||||
"transactionHash": hash,
|
||||
"transactionIndex": hexutil.Uint64(index),
|
||||
"from": from,
|
||||
"to": tx.To(),
|
||||
@ -1162,9 +1160,9 @@ func submitTransaction(ctx context.Context, b Backend, tx *types.Transaction) (c
|
||||
signer := types.MakeSigner(b.ChainConfig(), b.CurrentBlock().Number())
|
||||
from, _ := types.Sender(signer, tx)
|
||||
addr := crypto.CreateAddress(from, tx.Nonce())
|
||||
log.Info(fmt.Sprintf("Tx(%s) created: %s\n", tx.Hash().Hex(), addr.Hex()))
|
||||
log.Info("Submitted contract creation", "fullhash", tx.Hash().Hex(), "contract", addr.Hex())
|
||||
} else {
|
||||
log.Info(fmt.Sprintf("Tx(%s) to: %s\n", tx.Hash().Hex(), tx.To().Hex()))
|
||||
log.Info("Submitted transaction", "fullhash", tx.Hash().Hex(), "recipient", tx.To())
|
||||
}
|
||||
return tx.Hash(), nil
|
||||
}
|
||||
@ -1216,9 +1214,9 @@ func (s *PublicTransactionPoolAPI) SendRawTransaction(ctx context.Context, encod
|
||||
return "", err
|
||||
}
|
||||
addr := crypto.CreateAddress(from, tx.Nonce())
|
||||
log.Info(fmt.Sprintf("Tx(%x) created: %x\n", tx.Hash(), addr))
|
||||
log.Info("Submitted contract creation", "fullhash", tx.Hash().Hex(), "contract", addr.Hex())
|
||||
} else {
|
||||
log.Info(fmt.Sprintf("Tx(%x) to: %x\n", tx.Hash(), tx.To()))
|
||||
log.Info("Submitted transaction", "fullhash", tx.Hash().Hex(), "recipient", tx.To())
|
||||
}
|
||||
|
||||
return tx.Hash().Hex(), nil
|
||||
@ -1423,10 +1421,10 @@ func (api *PrivateDebugAPI) ChaindbCompact() error {
|
||||
return fmt.Errorf("chaindbCompact does not work for memory databases")
|
||||
}
|
||||
for b := byte(0); b < 255; b++ {
|
||||
log.Info(fmt.Sprintf("compacting chain DB range 0x%0.2X-0x%0.2X", b, b+1))
|
||||
log.Info("Compacting chain database", "range", fmt.Sprintf("0x%0.2X-0x%0.2X", b, b+1))
|
||||
err := ldb.LDB().CompactRange(util.Range{Start: []byte{b}, Limit: []byte{b + 1}})
|
||||
if err != nil {
|
||||
log.Error(fmt.Sprintf("compaction error: %v", err))
|
||||
log.Error("Database compaction failed", "err", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -187,7 +187,7 @@ func (s *LightEthereum) Protocols() []p2p.Protocol {
|
||||
// Start implements node.Service, starting all internal goroutines needed by the
|
||||
// Ethereum protocol implementation.
|
||||
func (s *LightEthereum) Start(srvr *p2p.Server) error {
|
||||
log.Info(fmt.Sprintf("WARNING: light client mode is an experimental feature"))
|
||||
log.Warn("Light client mode is an experimental feature")
|
||||
s.netRPCService = ethapi.NewPublicNetAPI(srvr, s.netVersionId)
|
||||
s.protocolManager.Start(srvr)
|
||||
return nil
|
||||
|
@ -18,7 +18,6 @@
|
||||
package les
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
"sync"
|
||||
"time"
|
||||
@ -174,7 +173,7 @@ func (f *lightFetcher) syncLoop() {
|
||||
f.reqMu.Unlock()
|
||||
if ok {
|
||||
f.pm.serverPool.adjustResponseTime(req.peer.poolEntry, time.Duration(mclock.Now()-req.sent), true)
|
||||
log.Debug(fmt.Sprintf("hard timeout by peer %v", req.peer.id))
|
||||
req.peer.Log().Debug("Fetching data timed out hard")
|
||||
go f.pm.removePeer(req.peer.id)
|
||||
}
|
||||
case resp := <-f.deliverChn:
|
||||
@ -192,13 +191,13 @@ func (f *lightFetcher) syncLoop() {
|
||||
}
|
||||
f.lock.Lock()
|
||||
if !ok || !(f.syncing || f.processResponse(req, resp)) {
|
||||
log.Debug(fmt.Sprintf("failed processing response by peer %v", resp.peer.id))
|
||||
resp.peer.Log().Debug("Failed processing response")
|
||||
go f.pm.removePeer(resp.peer.id)
|
||||
}
|
||||
f.lock.Unlock()
|
||||
case p := <-f.syncDone:
|
||||
f.lock.Lock()
|
||||
log.Debug(fmt.Sprintf("done synchronising with peer %v", p.id))
|
||||
p.Log().Debug("Done synchronising with peer")
|
||||
f.checkSyncedHeaders(p)
|
||||
f.syncing = false
|
||||
f.lock.Unlock()
|
||||
@ -239,17 +238,17 @@ func (f *lightFetcher) removePeer(p *peer) {
|
||||
func (f *lightFetcher) announce(p *peer, head *announceData) {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
log.Debug(fmt.Sprintf("received announce from peer %v #%d %016x reorg: %d", p.id, head.Number, head.Hash[:8], head.ReorgDepth))
|
||||
p.Log().Debug("Received new announcement", "number", head.Number, "hash", head.Hash, "reorg", head.ReorgDepth)
|
||||
|
||||
fp := f.peers[p]
|
||||
if fp == nil {
|
||||
log.Debug(fmt.Sprintf("announce: unknown peer"))
|
||||
p.Log().Debug("Announcement from unknown peer")
|
||||
return
|
||||
}
|
||||
|
||||
if fp.lastAnnounced != nil && head.Td.Cmp(fp.lastAnnounced.td) <= 0 {
|
||||
// announced tds should be strictly monotonic
|
||||
log.Debug(fmt.Sprintf("non-monotonic Td from peer %v", p.id))
|
||||
p.Log().Debug("Received non-monotonic td", "current", head.Td, "previous", fp.lastAnnounced.td)
|
||||
go f.pm.removePeer(p.id)
|
||||
return
|
||||
}
|
||||
@ -355,14 +354,14 @@ func (f *lightFetcher) peerHasBlock(p *peer, hash common.Hash, number uint64) bo
|
||||
func (f *lightFetcher) request(p *peer, reqID uint64, n *fetcherTreeNode, amount uint64) (uint64, bool) {
|
||||
fp := f.peers[p]
|
||||
if fp == nil {
|
||||
log.Debug(fmt.Sprintf("request: unknown peer"))
|
||||
p.Log().Debug("Requesting from unknown peer")
|
||||
p.fcServer.DeassignRequest(reqID)
|
||||
return 0, false
|
||||
}
|
||||
if fp.bestConfirmed == nil || fp.root == nil || !f.checkKnownNode(p, fp.root) {
|
||||
f.syncing = true
|
||||
go func() {
|
||||
log.Debug(fmt.Sprintf("synchronising with peer %v", p.id))
|
||||
p.Log().Debug("Synchronisation started")
|
||||
f.pm.synchronise(p)
|
||||
f.syncDone <- p
|
||||
}()
|
||||
@ -457,7 +456,7 @@ func (f *lightFetcher) deliverHeaders(peer *peer, reqID uint64, headers []*types
|
||||
// processResponse processes header download request responses, returns true if successful
|
||||
func (f *lightFetcher) processResponse(req fetchRequest, resp fetchResponse) bool {
|
||||
if uint64(len(resp.headers)) != req.amount || resp.headers[0].Hash() != req.hash {
|
||||
log.Debug(fmt.Sprintf("response mismatch %v %016x != %v %016x", len(resp.headers), resp.headers[0].Hash().Bytes()[:8], req.amount, req.hash[:8]))
|
||||
req.peer.Log().Debug("Response content mismatch", "requested", len(resp.headers), "reqfrom", resp.headers[0], "delivered", req.amount, "delfrom", req.hash)
|
||||
return false
|
||||
}
|
||||
headers := make([]*types.Header, req.amount)
|
||||
@ -468,14 +467,14 @@ func (f *lightFetcher) processResponse(req fetchRequest, resp fetchResponse) boo
|
||||
if err == core.BlockFutureErr {
|
||||
return true
|
||||
}
|
||||
log.Debug(fmt.Sprintf("InsertHeaderChain error: %v", err))
|
||||
log.Debug("Failed to insert header chain", "err", err)
|
||||
return false
|
||||
}
|
||||
tds := make([]*big.Int, len(headers))
|
||||
for i, header := range headers {
|
||||
td := f.chain.GetTd(header.Hash(), header.Number.Uint64())
|
||||
if td == nil {
|
||||
log.Debug(fmt.Sprintf("TD not found for header %v of %v", i+1, len(headers)))
|
||||
log.Debug("Total difficulty not found for header", "index", i+1, "number", header.Number, "hash", header.Hash())
|
||||
return false
|
||||
}
|
||||
tds[i] = td
|
||||
@ -490,7 +489,7 @@ func (f *lightFetcher) newHeaders(headers []*types.Header, tds []*big.Int) {
|
||||
var maxTd *big.Int
|
||||
for p, fp := range f.peers {
|
||||
if !f.checkAnnouncedHeaders(fp, headers, tds) {
|
||||
log.Debug(fmt.Sprintf("announce inconsistency by peer %v", p.id))
|
||||
p.Log().Debug("Inconsistent announcement")
|
||||
go f.pm.removePeer(p.id)
|
||||
}
|
||||
if fp.confirmedTd != nil && (maxTd == nil || maxTd.Cmp(fp.confirmedTd) > 0) {
|
||||
@ -576,7 +575,7 @@ func (f *lightFetcher) checkAnnouncedHeaders(fp *fetcherPeerInfo, headers []*typ
|
||||
func (f *lightFetcher) checkSyncedHeaders(p *peer) {
|
||||
fp := f.peers[p]
|
||||
if fp == nil {
|
||||
log.Debug(fmt.Sprintf("checkSyncedHeaders: unknown peer"))
|
||||
p.Log().Debug("Unknown peer to check sync headers")
|
||||
return
|
||||
}
|
||||
n := fp.lastAnnounced
|
||||
@ -589,7 +588,7 @@ func (f *lightFetcher) checkSyncedHeaders(p *peer) {
|
||||
}
|
||||
// now n is the latest downloaded header after syncing
|
||||
if n == nil {
|
||||
log.Debug(fmt.Sprintf("synchronisation failed with peer %v", p.id))
|
||||
p.Log().Debug("Synchronisation failed")
|
||||
go f.pm.removePeer(p.id)
|
||||
} else {
|
||||
header := f.chain.GetHeader(n.hash, n.number)
|
||||
@ -610,12 +609,12 @@ func (f *lightFetcher) checkKnownNode(p *peer, n *fetcherTreeNode) bool {
|
||||
|
||||
fp := f.peers[p]
|
||||
if fp == nil {
|
||||
log.Debug(fmt.Sprintf("checkKnownNode: unknown peer"))
|
||||
p.Log().Debug("Unknown peer to check known nodes")
|
||||
return false
|
||||
}
|
||||
header := f.chain.GetHeader(n.hash, n.number)
|
||||
if !f.checkAnnouncedHeaders(fp, []*types.Header{header}, []*big.Int{td}) {
|
||||
log.Debug(fmt.Sprintf("announce inconsistency by peer %v", p.id))
|
||||
p.Log().Debug("Inconsistent announcement")
|
||||
go f.pm.removePeer(p.id)
|
||||
}
|
||||
if fp.confirmedTd != nil {
|
||||
@ -700,7 +699,7 @@ func (f *lightFetcher) checkUpdateStats(p *peer, newEntry *updateStatsEntry) {
|
||||
now := mclock.Now()
|
||||
fp := f.peers[p]
|
||||
if fp == nil {
|
||||
log.Debug(fmt.Sprintf("checkUpdateStats: unknown peer"))
|
||||
p.Log().Debug("Unknown peer to check update stats")
|
||||
return
|
||||
}
|
||||
if newEntry != nil && fp.firstUpdateStats == nil {
|
||||
|
@ -198,7 +198,6 @@ func NewProtocolManager(chainConfig *params.ChainConfig, lightSync bool, network
|
||||
}
|
||||
|
||||
if lightSync {
|
||||
log.Debug(fmt.Sprintf("LES: create downloader"))
|
||||
manager.downloader = downloader.New(downloader.LightSync, chainDb, manager.eventMux, blockchain.HasHeader, nil, blockchain.GetHeaderByHash,
|
||||
nil, blockchain.CurrentHeader, nil, nil, nil, blockchain.GetTdByHash,
|
||||
blockchain.InsertHeaderChain, nil, nil, blockchain.Rollback, removePeer)
|
||||
@ -225,16 +224,13 @@ func (pm *ProtocolManager) removePeer(id string) {
|
||||
if peer == nil {
|
||||
return
|
||||
}
|
||||
log.Debug("Removing light Ethereum peer", "peer", id)
|
||||
if err := pm.peers.Unregister(id); err != nil {
|
||||
if err == errNotRegistered {
|
||||
return
|
||||
}
|
||||
log.Error(fmt.Sprint("Removal failed:", err))
|
||||
}
|
||||
log.Debug(fmt.Sprint("Removing peer", id))
|
||||
|
||||
// Unregister the peer from the downloader and Ethereum peer set
|
||||
log.Debug(fmt.Sprintf("LES: unregister peer %v", id))
|
||||
if pm.lightSync {
|
||||
pm.downloader.UnregisterPeer(id)
|
||||
if pm.txrelay != nil {
|
||||
@ -267,9 +263,11 @@ func (pm *ProtocolManager) Start(srvr *p2p.Server) {
|
||||
} else {
|
||||
if topicDisc != nil {
|
||||
go func() {
|
||||
log.Info(fmt.Sprint("Starting registering topic", string(lesTopic)))
|
||||
logger := log.New("topic", lesTopic)
|
||||
logger.Info("Starting topic registration")
|
||||
defer logger.Info("Terminated topic registration")
|
||||
|
||||
topicDisc.RegisterTopic(lesTopic, pm.quitSync)
|
||||
log.Info(fmt.Sprint("Stopped registering topic", string(lesTopic)))
|
||||
}()
|
||||
}
|
||||
go func() {
|
||||
@ -282,7 +280,7 @@ func (pm *ProtocolManager) Start(srvr *p2p.Server) {
|
||||
func (pm *ProtocolManager) Stop() {
|
||||
// Showing a log message. During download / process this could actually
|
||||
// take between 5 to 10 seconds and therefor feedback is required.
|
||||
log.Info(fmt.Sprint("Stopping light ethereum protocol handler..."))
|
||||
log.Info("Stopping light Ethereum protocol")
|
||||
|
||||
// Quit the sync loop.
|
||||
// After this send has completed, no new peers will be accepted.
|
||||
@ -299,7 +297,7 @@ func (pm *ProtocolManager) Stop() {
|
||||
// Wait for any process action
|
||||
pm.wg.Wait()
|
||||
|
||||
log.Info(fmt.Sprint("Light ethereum protocol handler stopped"))
|
||||
log.Info("Light Ethereum protocol stopped")
|
||||
}
|
||||
|
||||
func (pm *ProtocolManager) newPeer(pv, nv int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {
|
||||
@ -309,22 +307,21 @@ func (pm *ProtocolManager) newPeer(pv, nv int, p *p2p.Peer, rw p2p.MsgReadWriter
|
||||
// handle is the callback invoked to manage the life cycle of a les peer. When
|
||||
// this function terminates, the peer is disconnected.
|
||||
func (pm *ProtocolManager) handle(p *peer) error {
|
||||
log.Debug(fmt.Sprintf("%v: peer connected [%s]", p, p.Name()))
|
||||
p.Log().Debug("Light Ethereum peer connected", "name", p.Name())
|
||||
|
||||
// Execute the LES handshake
|
||||
td, head, genesis := pm.blockchain.Status()
|
||||
headNum := core.GetBlockNumber(pm.chainDb, head)
|
||||
if err := p.Handshake(td, head, headNum, genesis, pm.server); err != nil {
|
||||
log.Debug(fmt.Sprintf("%v: handshake failed: %v", p, err))
|
||||
p.Log().Debug("Light Ethereum handshake failed", "err", err)
|
||||
return err
|
||||
}
|
||||
if rw, ok := p.rw.(*meteredMsgReadWriter); ok {
|
||||
rw.Init(p.version)
|
||||
}
|
||||
// Register the peer locally
|
||||
log.Trace(fmt.Sprintf("%v: adding peer", p))
|
||||
if err := pm.peers.Register(p); err != nil {
|
||||
log.Error(fmt.Sprintf("%v: addition failed: %v", p, err))
|
||||
p.Log().Error("Light Ethereum peer registration failed", "err", err)
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
@ -333,9 +330,7 @@ func (pm *ProtocolManager) handle(p *peer) error {
|
||||
}
|
||||
pm.removePeer(p.id)
|
||||
}()
|
||||
|
||||
// Register the peer in the downloader. If the downloader considers it banned, we disconnect
|
||||
log.Debug(fmt.Sprintf("LES: register peer %v", p.id))
|
||||
if pm.lightSync {
|
||||
requestHeadersByHash := func(origin common.Hash, amount int, skip int, reverse bool) error {
|
||||
reqID := getNextReqID()
|
||||
@ -389,7 +384,7 @@ func (pm *ProtocolManager) handle(p *peer) error {
|
||||
// main loop. handle incoming messages.
|
||||
for {
|
||||
if err := pm.handleMsg(p); err != nil {
|
||||
log.Debug(fmt.Sprintf("%v: message handling failed: %v", p, err))
|
||||
p.Log().Debug("Light Ethereum message handling failed", "err", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -405,8 +400,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debug(fmt.Sprint("msg:", msg.Code, msg.Size))
|
||||
p.Log().Trace("Light Ethereum message arrived", "code", msg.Code, "bytes", msg.Size)
|
||||
|
||||
costs := p.fcCosts[msg.Code]
|
||||
reject := func(reqCnt, maxCnt uint64) bool {
|
||||
@ -419,7 +413,8 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
||||
cost = pm.server.defParams.BufLimit
|
||||
}
|
||||
if cost > bufValue {
|
||||
log.Error(fmt.Sprintf("Request from %v came %v too early", p.id, time.Duration((cost-bufValue)*1000000/pm.server.defParams.MinRecharge)))
|
||||
recharge := time.Duration((cost - bufValue) * 1000000 / pm.server.defParams.MinRecharge)
|
||||
p.Log().Error("Request came too early", "recharge", common.PrettyDuration(recharge))
|
||||
return true
|
||||
}
|
||||
return false
|
||||
@ -435,25 +430,25 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
||||
// Handle the message depending on its contents
|
||||
switch msg.Code {
|
||||
case StatusMsg:
|
||||
log.Debug(fmt.Sprintf("<=== StatusMsg from peer %v", p.id))
|
||||
p.Log().Trace("Received status message")
|
||||
// Status messages should never arrive after the handshake
|
||||
return errResp(ErrExtraStatusMsg, "uncontrolled status message")
|
||||
|
||||
// Block header query, collect the requested headers and reply
|
||||
case AnnounceMsg:
|
||||
log.Debug(fmt.Sprintf("<=== AnnounceMsg from peer %v:", p.id))
|
||||
p.Log().Trace("Received announce message")
|
||||
|
||||
var req announceData
|
||||
if err := msg.Decode(&req); err != nil {
|
||||
return errResp(ErrDecode, "%v: %v", msg, err)
|
||||
}
|
||||
log.Trace(fmt.Sprint("AnnounceMsg:", req.Number, req.Hash, req.Td, req.ReorgDepth))
|
||||
p.Log().Trace("Announce message content", "number", req.Number, "hash", req.Hash, "td", req.Td, "reorg", req.ReorgDepth)
|
||||
if pm.fetcher != nil {
|
||||
pm.fetcher.announce(p, &req)
|
||||
}
|
||||
|
||||
case GetBlockHeadersMsg:
|
||||
log.Debug(fmt.Sprintf("<=== GetBlockHeadersMsg from peer %v", p.id))
|
||||
p.Log().Trace("Received block header request")
|
||||
// Decode the complex header query
|
||||
var req struct {
|
||||
ReqID uint64
|
||||
@ -538,7 +533,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
||||
return errResp(ErrUnexpectedResponse, "")
|
||||
}
|
||||
|
||||
log.Debug(fmt.Sprintf("<=== BlockHeadersMsg from peer %v", p.id))
|
||||
p.Log().Trace("Received block header response message")
|
||||
// A batch of headers arrived to one of our previous requests
|
||||
var resp struct {
|
||||
ReqID, BV uint64
|
||||
@ -558,7 +553,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
||||
}
|
||||
|
||||
case GetBlockBodiesMsg:
|
||||
log.Debug(fmt.Sprintf("<=== GetBlockBodiesMsg from peer %v", p.id))
|
||||
p.Log().Trace("Received block bodies request")
|
||||
// Decode the retrieval message
|
||||
var req struct {
|
||||
ReqID uint64
|
||||
@ -595,7 +590,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
||||
return errResp(ErrUnexpectedResponse, "")
|
||||
}
|
||||
|
||||
log.Debug(fmt.Sprintf("<=== BlockBodiesMsg from peer %v", p.id))
|
||||
p.Log().Trace("Received block bodies response")
|
||||
// A batch of block bodies arrived to one of our previous requests
|
||||
var resp struct {
|
||||
ReqID, BV uint64
|
||||
@ -612,7 +607,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
||||
}
|
||||
|
||||
case GetCodeMsg:
|
||||
log.Debug(fmt.Sprintf("<=== GetCodeMsg from peer %v", p.id))
|
||||
p.Log().Trace("Received code request")
|
||||
// Decode the retrieval message
|
||||
var req struct {
|
||||
ReqID uint64
|
||||
@ -656,7 +651,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
||||
return errResp(ErrUnexpectedResponse, "")
|
||||
}
|
||||
|
||||
log.Debug(fmt.Sprintf("<=== CodeMsg from peer %v", p.id))
|
||||
p.Log().Trace("Received code response")
|
||||
// A batch of node state data arrived to one of our previous requests
|
||||
var resp struct {
|
||||
ReqID, BV uint64
|
||||
@ -673,7 +668,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
||||
}
|
||||
|
||||
case GetReceiptsMsg:
|
||||
log.Debug(fmt.Sprintf("<=== GetReceiptsMsg from peer %v", p.id))
|
||||
p.Log().Trace("Received receipts request")
|
||||
// Decode the retrieval message
|
||||
var req struct {
|
||||
ReqID uint64
|
||||
@ -704,7 +699,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
||||
}
|
||||
// If known, encode and queue for response packet
|
||||
if encoded, err := rlp.EncodeToBytes(results); err != nil {
|
||||
log.Error(fmt.Sprintf("failed to encode receipt: %v", err))
|
||||
log.Error("Failed to encode receipt", "err", err)
|
||||
} else {
|
||||
receipts = append(receipts, encoded)
|
||||
bytes += len(encoded)
|
||||
@ -719,7 +714,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
||||
return errResp(ErrUnexpectedResponse, "")
|
||||
}
|
||||
|
||||
log.Debug(fmt.Sprintf("<=== ReceiptsMsg from peer %v", p.id))
|
||||
p.Log().Trace("Received receipts response")
|
||||
// A batch of receipts arrived to one of our previous requests
|
||||
var resp struct {
|
||||
ReqID, BV uint64
|
||||
@ -736,7 +731,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
||||
}
|
||||
|
||||
case GetProofsMsg:
|
||||
log.Debug(fmt.Sprintf("<=== GetProofsMsg from peer %v", p.id))
|
||||
p.Log().Trace("Received proofs request")
|
||||
// Decode the retrieval message
|
||||
var req struct {
|
||||
ReqID uint64
|
||||
@ -786,7 +781,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
||||
return errResp(ErrUnexpectedResponse, "")
|
||||
}
|
||||
|
||||
log.Debug(fmt.Sprintf("<=== ProofsMsg from peer %v", p.id))
|
||||
p.Log().Trace("Received proofs response")
|
||||
// A batch of merkle proofs arrived to one of our previous requests
|
||||
var resp struct {
|
||||
ReqID, BV uint64
|
||||
@ -803,7 +798,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
||||
}
|
||||
|
||||
case GetHeaderProofsMsg:
|
||||
log.Debug(fmt.Sprintf("<=== GetHeaderProofsMsg from peer %v", p.id))
|
||||
p.Log().Trace("Received headers proof request")
|
||||
// Decode the retrieval message
|
||||
var req struct {
|
||||
ReqID uint64
|
||||
@ -847,7 +842,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
||||
return errResp(ErrUnexpectedResponse, "")
|
||||
}
|
||||
|
||||
log.Debug(fmt.Sprintf("<=== HeaderProofsMsg from peer %v", p.id))
|
||||
p.Log().Trace("Received headers proof response")
|
||||
var resp struct {
|
||||
ReqID, BV uint64
|
||||
Data []ChtResp
|
||||
@ -884,14 +879,13 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
||||
pm.server.fcCostStats.update(msg.Code, uint64(reqCnt), rcost)
|
||||
|
||||
default:
|
||||
log.Debug(fmt.Sprintf("<=== unknown message with code %d from peer %v", msg.Code, p.id))
|
||||
p.Log().Trace("Received unknown message", "code", msg.Code)
|
||||
return errResp(ErrInvalidMsgCode, "%v", msg.Code)
|
||||
}
|
||||
|
||||
if deliverMsg != nil {
|
||||
return pm.odr.Deliver(p, deliverMsg)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
33
les/odr.go
33
les/odr.go
@ -19,7 +19,6 @@ package les
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@ -70,9 +69,8 @@ func (odr *LesOdr) Database() ethdb.Database {
|
||||
return odr.db
|
||||
}
|
||||
|
||||
// validatorFunc is a function that processes a message and returns true if
|
||||
// it was a meaningful answer to a given request
|
||||
type validatorFunc func(ethdb.Database, *Msg) bool
|
||||
// validatorFunc is a function that processes a message.
|
||||
type validatorFunc func(ethdb.Database, *Msg) error
|
||||
|
||||
// sentReq is a request waiting for an answer that satisfies its valFunc
|
||||
type sentReq struct {
|
||||
@ -113,18 +111,19 @@ func (self *LesOdr) Deliver(peer *peer, msg *Msg) error {
|
||||
return errResp(ErrUnexpectedResponse, "reqID = %v", msg.ReqID)
|
||||
}
|
||||
|
||||
if req.valFunc(self.db, msg) {
|
||||
close(delivered)
|
||||
req.lock.Lock()
|
||||
delete(req.sentTo, peer)
|
||||
if req.answered != nil {
|
||||
close(req.answered)
|
||||
req.answered = nil
|
||||
}
|
||||
req.lock.Unlock()
|
||||
return nil
|
||||
if err := req.valFunc(self.db, msg); err != nil {
|
||||
peer.Log().Warn("Invalid odr response", "err", err)
|
||||
return errResp(ErrInvalidResponse, "reqID = %v", msg.ReqID)
|
||||
}
|
||||
return errResp(ErrInvalidResponse, "reqID = %v", msg.ReqID)
|
||||
close(delivered)
|
||||
req.lock.Lock()
|
||||
delete(req.sentTo, peer)
|
||||
if req.answered != nil {
|
||||
close(req.answered)
|
||||
req.answered = nil
|
||||
}
|
||||
req.lock.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *LesOdr) requestPeer(req *sentReq, peer *peer, delivered, timeout chan struct{}, reqWg *sync.WaitGroup) {
|
||||
@ -151,7 +150,7 @@ func (self *LesOdr) requestPeer(req *sentReq, peer *peer, delivered, timeout cha
|
||||
select {
|
||||
case <-delivered:
|
||||
case <-time.After(hardRequestTimeout):
|
||||
log.Debug(fmt.Sprintf("ODR hard request timeout from peer %v", peer.id))
|
||||
peer.Log().Debug("Request timed out hard")
|
||||
go self.removePeer(peer.id)
|
||||
case <-self.stop:
|
||||
return
|
||||
@ -237,7 +236,7 @@ func (self *LesOdr) Retrieve(ctx context.Context, req light.OdrRequest) (err err
|
||||
// retrieved from network, store in db
|
||||
req.StoreResult(self.db)
|
||||
} else {
|
||||
log.Debug(fmt.Sprintf("networkRequest err = %v", err))
|
||||
log.Debug("Failed to retrieve data from network", "err", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
@ -19,8 +19,8 @@
|
||||
package les
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@ -34,11 +34,22 @@ import (
|
||||
"github.com/ethereum/go-ethereum/trie"
|
||||
)
|
||||
|
||||
var (
|
||||
errInvalidMessageType = errors.New("invalid message type")
|
||||
errMultipleEntries = errors.New("multiple response entries")
|
||||
errHeaderUnavailable = errors.New("header unavailable")
|
||||
errTxHashMismatch = errors.New("transaction hash mismatch")
|
||||
errUncleHashMismatch = errors.New("uncle hash mismatch")
|
||||
errReceiptHashMismatch = errors.New("receipt hash mismatch")
|
||||
errDataHashMismatch = errors.New("data hash mismatch")
|
||||
errCHTHashMismatch = errors.New("cht hash mismatch")
|
||||
)
|
||||
|
||||
type LesOdrRequest interface {
|
||||
GetCost(*peer) uint64
|
||||
CanSend(*peer) bool
|
||||
Request(uint64, *peer) error
|
||||
Valid(ethdb.Database, *Msg) bool // if true, keeps the retrieved object
|
||||
Valid(ethdb.Database, *Msg) error // if true, keeps the retrieved object
|
||||
}
|
||||
|
||||
func LesRequest(req light.OdrRequest) LesOdrRequest {
|
||||
@ -63,59 +74,55 @@ type BlockRequest light.BlockRequest
|
||||
|
||||
// GetCost returns the cost of the given ODR request according to the serving
|
||||
// peer's cost table (implementation of LesOdrRequest)
|
||||
func (self *BlockRequest) GetCost(peer *peer) uint64 {
|
||||
func (r *BlockRequest) GetCost(peer *peer) uint64 {
|
||||
return peer.GetRequestCost(GetBlockBodiesMsg, 1)
|
||||
}
|
||||
|
||||
// CanSend tells if a certain peer is suitable for serving the given request
|
||||
func (self *BlockRequest) CanSend(peer *peer) bool {
|
||||
return peer.HasBlock(self.Hash, self.Number)
|
||||
func (r *BlockRequest) CanSend(peer *peer) bool {
|
||||
return peer.HasBlock(r.Hash, r.Number)
|
||||
}
|
||||
|
||||
// Request sends an ODR request to the LES network (implementation of LesOdrRequest)
|
||||
func (self *BlockRequest) Request(reqID uint64, peer *peer) error {
|
||||
log.Debug(fmt.Sprintf("ODR: requesting body of block %08x from peer %v", self.Hash[:4], peer.id))
|
||||
return peer.RequestBodies(reqID, self.GetCost(peer), []common.Hash{self.Hash})
|
||||
func (r *BlockRequest) Request(reqID uint64, peer *peer) error {
|
||||
peer.Log().Debug("Requesting block body", "hash", r.Hash)
|
||||
return peer.RequestBodies(reqID, r.GetCost(peer), []common.Hash{r.Hash})
|
||||
}
|
||||
|
||||
// Valid processes an ODR request reply message from the LES network
|
||||
// returns true and stores results in memory if the message was a valid reply
|
||||
// to the request (implementation of LesOdrRequest)
|
||||
func (self *BlockRequest) Valid(db ethdb.Database, msg *Msg) bool {
|
||||
log.Debug(fmt.Sprintf("ODR: validating body of block %08x", self.Hash[:4]))
|
||||
func (r *BlockRequest) Valid(db ethdb.Database, msg *Msg) error {
|
||||
log.Debug("Validating block body", "hash", r.Hash)
|
||||
|
||||
// Ensure we have a correct message with a single block body
|
||||
if msg.MsgType != MsgBlockBodies {
|
||||
log.Debug(fmt.Sprintf("ODR: invalid message type"))
|
||||
return false
|
||||
return errInvalidMessageType
|
||||
}
|
||||
bodies := msg.Obj.([]*types.Body)
|
||||
if len(bodies) != 1 {
|
||||
log.Debug(fmt.Sprintf("ODR: invalid number of entries: %d", len(bodies)))
|
||||
return false
|
||||
return errMultipleEntries
|
||||
}
|
||||
body := bodies[0]
|
||||
header := core.GetHeader(db, self.Hash, self.Number)
|
||||
|
||||
// Retrieve our stored header and validate block content against it
|
||||
header := core.GetHeader(db, r.Hash, r.Number)
|
||||
if header == nil {
|
||||
log.Debug(fmt.Sprintf("ODR: header not found for block %08x", self.Hash[:4]))
|
||||
return false
|
||||
return errHeaderUnavailable
|
||||
}
|
||||
txHash := types.DeriveSha(types.Transactions(body.Transactions))
|
||||
if header.TxHash != txHash {
|
||||
log.Debug(fmt.Sprintf("ODR: header.TxHash %08x does not match received txHash %08x", header.TxHash[:4], txHash[:4]))
|
||||
return false
|
||||
if header.TxHash != types.DeriveSha(types.Transactions(body.Transactions)) {
|
||||
return errTxHashMismatch
|
||||
}
|
||||
uncleHash := types.CalcUncleHash(body.Uncles)
|
||||
if header.UncleHash != uncleHash {
|
||||
log.Debug(fmt.Sprintf("ODR: header.UncleHash %08x does not match received uncleHash %08x", header.UncleHash[:4], uncleHash[:4]))
|
||||
return false
|
||||
if header.UncleHash != types.CalcUncleHash(body.Uncles) {
|
||||
return errUncleHashMismatch
|
||||
}
|
||||
// Validations passed, encode and store RLP
|
||||
data, err := rlp.EncodeToBytes(body)
|
||||
if err != nil {
|
||||
log.Debug(fmt.Sprintf("ODR: body RLP encode error: %v", err))
|
||||
return false
|
||||
return err
|
||||
}
|
||||
self.Rlp = data
|
||||
log.Debug(fmt.Sprintf("ODR: validation successful"))
|
||||
return true
|
||||
r.Rlp = data
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReceiptsRequest is the ODR request type for block receipts by block hash
|
||||
@ -123,48 +130,48 @@ type ReceiptsRequest light.ReceiptsRequest
|
||||
|
||||
// GetCost returns the cost of the given ODR request according to the serving
|
||||
// peer's cost table (implementation of LesOdrRequest)
|
||||
func (self *ReceiptsRequest) GetCost(peer *peer) uint64 {
|
||||
func (r *ReceiptsRequest) GetCost(peer *peer) uint64 {
|
||||
return peer.GetRequestCost(GetReceiptsMsg, 1)
|
||||
}
|
||||
|
||||
// CanSend tells if a certain peer is suitable for serving the given request
|
||||
func (self *ReceiptsRequest) CanSend(peer *peer) bool {
|
||||
return peer.HasBlock(self.Hash, self.Number)
|
||||
func (r *ReceiptsRequest) CanSend(peer *peer) bool {
|
||||
return peer.HasBlock(r.Hash, r.Number)
|
||||
}
|
||||
|
||||
// Request sends an ODR request to the LES network (implementation of LesOdrRequest)
|
||||
func (self *ReceiptsRequest) Request(reqID uint64, peer *peer) error {
|
||||
log.Debug(fmt.Sprintf("ODR: requesting receipts for block %08x from peer %v", self.Hash[:4], peer.id))
|
||||
return peer.RequestReceipts(reqID, self.GetCost(peer), []common.Hash{self.Hash})
|
||||
func (r *ReceiptsRequest) Request(reqID uint64, peer *peer) error {
|
||||
peer.Log().Debug("Requesting block receipts", "hash", r.Hash)
|
||||
return peer.RequestReceipts(reqID, r.GetCost(peer), []common.Hash{r.Hash})
|
||||
}
|
||||
|
||||
// Valid processes an ODR request reply message from the LES network
|
||||
// returns true and stores results in memory if the message was a valid reply
|
||||
// to the request (implementation of LesOdrRequest)
|
||||
func (self *ReceiptsRequest) Valid(db ethdb.Database, msg *Msg) bool {
|
||||
log.Debug(fmt.Sprintf("ODR: validating receipts for block %08x", self.Hash[:4]))
|
||||
func (r *ReceiptsRequest) Valid(db ethdb.Database, msg *Msg) error {
|
||||
log.Debug("Validating block receipts", "hash", r.Hash)
|
||||
|
||||
// Ensure we have a correct message with a single block receipt
|
||||
if msg.MsgType != MsgReceipts {
|
||||
log.Debug(fmt.Sprintf("ODR: invalid message type"))
|
||||
return false
|
||||
return errInvalidMessageType
|
||||
}
|
||||
receipts := msg.Obj.([]types.Receipts)
|
||||
if len(receipts) != 1 {
|
||||
log.Debug(fmt.Sprintf("ODR: invalid number of entries: %d", len(receipts)))
|
||||
return false
|
||||
return errMultipleEntries
|
||||
}
|
||||
hash := types.DeriveSha(receipts[0])
|
||||
header := core.GetHeader(db, self.Hash, self.Number)
|
||||
receipt := receipts[0]
|
||||
|
||||
// Retrieve our stored header and validate receipt content against it
|
||||
header := core.GetHeader(db, r.Hash, r.Number)
|
||||
if header == nil {
|
||||
log.Debug(fmt.Sprintf("ODR: header not found for block %08x", self.Hash[:4]))
|
||||
return false
|
||||
return errHeaderUnavailable
|
||||
}
|
||||
if !bytes.Equal(header.ReceiptHash[:], hash[:]) {
|
||||
log.Debug(fmt.Sprintf("ODR: header receipts hash %08x does not match calculated RLP hash %08x", header.ReceiptHash[:4], hash[:4]))
|
||||
return false
|
||||
if header.ReceiptHash != types.DeriveSha(receipt) {
|
||||
return errReceiptHashMismatch
|
||||
}
|
||||
self.Receipts = receipts[0]
|
||||
log.Debug(fmt.Sprintf("ODR: validation successful"))
|
||||
return true
|
||||
// Validations passed, store and return
|
||||
r.Receipts = receipt
|
||||
return nil
|
||||
}
|
||||
|
||||
type ProofReq struct {
|
||||
@ -178,49 +185,46 @@ type TrieRequest light.TrieRequest
|
||||
|
||||
// GetCost returns the cost of the given ODR request according to the serving
|
||||
// peer's cost table (implementation of LesOdrRequest)
|
||||
func (self *TrieRequest) GetCost(peer *peer) uint64 {
|
||||
func (r *TrieRequest) GetCost(peer *peer) uint64 {
|
||||
return peer.GetRequestCost(GetProofsMsg, 1)
|
||||
}
|
||||
|
||||
// CanSend tells if a certain peer is suitable for serving the given request
|
||||
func (self *TrieRequest) CanSend(peer *peer) bool {
|
||||
return peer.HasBlock(self.Id.BlockHash, self.Id.BlockNumber)
|
||||
func (r *TrieRequest) CanSend(peer *peer) bool {
|
||||
return peer.HasBlock(r.Id.BlockHash, r.Id.BlockNumber)
|
||||
}
|
||||
|
||||
// Request sends an ODR request to the LES network (implementation of LesOdrRequest)
|
||||
func (self *TrieRequest) Request(reqID uint64, peer *peer) error {
|
||||
log.Debug(fmt.Sprintf("ODR: requesting trie root %08x key %08x from peer %v", self.Id.Root[:4], self.Key[:4], peer.id))
|
||||
func (r *TrieRequest) Request(reqID uint64, peer *peer) error {
|
||||
peer.Log().Debug("Requesting trie proof", "root", r.Id.Root, "key", r.Key)
|
||||
req := &ProofReq{
|
||||
BHash: self.Id.BlockHash,
|
||||
AccKey: self.Id.AccKey,
|
||||
Key: self.Key,
|
||||
BHash: r.Id.BlockHash,
|
||||
AccKey: r.Id.AccKey,
|
||||
Key: r.Key,
|
||||
}
|
||||
return peer.RequestProofs(reqID, self.GetCost(peer), []*ProofReq{req})
|
||||
return peer.RequestProofs(reqID, r.GetCost(peer), []*ProofReq{req})
|
||||
}
|
||||
|
||||
// Valid processes an ODR request reply message from the LES network
|
||||
// returns true and stores results in memory if the message was a valid reply
|
||||
// to the request (implementation of LesOdrRequest)
|
||||
func (self *TrieRequest) Valid(db ethdb.Database, msg *Msg) bool {
|
||||
log.Debug(fmt.Sprintf("ODR: validating trie root %08x key %08x", self.Id.Root[:4], self.Key[:4]))
|
||||
func (r *TrieRequest) Valid(db ethdb.Database, msg *Msg) error {
|
||||
log.Debug("Validating trie proof", "root", r.Id.Root, "key", r.Key)
|
||||
|
||||
// Ensure we have a correct message with a single proof
|
||||
if msg.MsgType != MsgProofs {
|
||||
log.Debug(fmt.Sprintf("ODR: invalid message type"))
|
||||
return false
|
||||
return errInvalidMessageType
|
||||
}
|
||||
proofs := msg.Obj.([][]rlp.RawValue)
|
||||
if len(proofs) != 1 {
|
||||
log.Debug(fmt.Sprintf("ODR: invalid number of entries: %d", len(proofs)))
|
||||
return false
|
||||
return errMultipleEntries
|
||||
}
|
||||
_, err := trie.VerifyProof(self.Id.Root, self.Key, proofs[0])
|
||||
if err != nil {
|
||||
log.Debug(fmt.Sprintf("ODR: merkle proof verification error: %v", err))
|
||||
return false
|
||||
// Verify the proof and store if checks out
|
||||
if _, err := trie.VerifyProof(r.Id.Root, r.Key, proofs[0]); err != nil {
|
||||
return fmt.Errorf("merkle proof verification failed: %v", err)
|
||||
}
|
||||
self.Proof = proofs[0]
|
||||
log.Debug(fmt.Sprintf("ODR: validation successful"))
|
||||
return true
|
||||
r.Proof = proofs[0]
|
||||
return nil
|
||||
}
|
||||
|
||||
type CodeReq struct {
|
||||
@ -233,47 +237,47 @@ type CodeRequest light.CodeRequest
|
||||
|
||||
// GetCost returns the cost of the given ODR request according to the serving
|
||||
// peer's cost table (implementation of LesOdrRequest)
|
||||
func (self *CodeRequest) GetCost(peer *peer) uint64 {
|
||||
func (r *CodeRequest) GetCost(peer *peer) uint64 {
|
||||
return peer.GetRequestCost(GetCodeMsg, 1)
|
||||
}
|
||||
|
||||
// CanSend tells if a certain peer is suitable for serving the given request
|
||||
func (self *CodeRequest) CanSend(peer *peer) bool {
|
||||
return peer.HasBlock(self.Id.BlockHash, self.Id.BlockNumber)
|
||||
func (r *CodeRequest) CanSend(peer *peer) bool {
|
||||
return peer.HasBlock(r.Id.BlockHash, r.Id.BlockNumber)
|
||||
}
|
||||
|
||||
// Request sends an ODR request to the LES network (implementation of LesOdrRequest)
|
||||
func (self *CodeRequest) Request(reqID uint64, peer *peer) error {
|
||||
log.Debug(fmt.Sprintf("ODR: requesting node data for hash %08x from peer %v", self.Hash[:4], peer.id))
|
||||
func (r *CodeRequest) Request(reqID uint64, peer *peer) error {
|
||||
peer.Log().Debug("Requesting code data", "hash", r.Hash)
|
||||
req := &CodeReq{
|
||||
BHash: self.Id.BlockHash,
|
||||
AccKey: self.Id.AccKey,
|
||||
BHash: r.Id.BlockHash,
|
||||
AccKey: r.Id.AccKey,
|
||||
}
|
||||
return peer.RequestCode(reqID, self.GetCost(peer), []*CodeReq{req})
|
||||
return peer.RequestCode(reqID, r.GetCost(peer), []*CodeReq{req})
|
||||
}
|
||||
|
||||
// Valid processes an ODR request reply message from the LES network
|
||||
// returns true and stores results in memory if the message was a valid reply
|
||||
// to the request (implementation of LesOdrRequest)
|
||||
func (self *CodeRequest) Valid(db ethdb.Database, msg *Msg) bool {
|
||||
log.Debug(fmt.Sprintf("ODR: validating node data for hash %08x", self.Hash[:4]))
|
||||
func (r *CodeRequest) Valid(db ethdb.Database, msg *Msg) error {
|
||||
log.Debug("Validating code data", "hash", r.Hash)
|
||||
|
||||
// Ensure we have a correct message with a single code element
|
||||
if msg.MsgType != MsgCode {
|
||||
log.Debug(fmt.Sprintf("ODR: invalid message type"))
|
||||
return false
|
||||
return errInvalidMessageType
|
||||
}
|
||||
reply := msg.Obj.([][]byte)
|
||||
if len(reply) != 1 {
|
||||
log.Debug(fmt.Sprintf("ODR: invalid number of entries: %d", len(reply)))
|
||||
return false
|
||||
return errMultipleEntries
|
||||
}
|
||||
data := reply[0]
|
||||
if hash := crypto.Keccak256Hash(data); self.Hash != hash {
|
||||
log.Debug(fmt.Sprintf("ODR: requested hash %08x does not match received data hash %08x", self.Hash[:4], hash[:4]))
|
||||
return false
|
||||
|
||||
// Verify the data and store if checks out
|
||||
if hash := crypto.Keccak256Hash(data); r.Hash != hash {
|
||||
return errDataHashMismatch
|
||||
}
|
||||
self.Data = data
|
||||
log.Debug(fmt.Sprintf("ODR: validation successful"))
|
||||
return true
|
||||
r.Data = data
|
||||
return nil
|
||||
}
|
||||
|
||||
type ChtReq struct {
|
||||
@ -290,64 +294,63 @@ type ChtRequest light.ChtRequest
|
||||
|
||||
// GetCost returns the cost of the given ODR request according to the serving
|
||||
// peer's cost table (implementation of LesOdrRequest)
|
||||
func (self *ChtRequest) GetCost(peer *peer) uint64 {
|
||||
func (r *ChtRequest) GetCost(peer *peer) uint64 {
|
||||
return peer.GetRequestCost(GetHeaderProofsMsg, 1)
|
||||
}
|
||||
|
||||
// CanSend tells if a certain peer is suitable for serving the given request
|
||||
func (self *ChtRequest) CanSend(peer *peer) bool {
|
||||
func (r *ChtRequest) CanSend(peer *peer) bool {
|
||||
peer.lock.RLock()
|
||||
defer peer.lock.RUnlock()
|
||||
|
||||
return self.ChtNum <= (peer.headInfo.Number-light.ChtConfirmations)/light.ChtFrequency
|
||||
return r.ChtNum <= (peer.headInfo.Number-light.ChtConfirmations)/light.ChtFrequency
|
||||
}
|
||||
|
||||
// Request sends an ODR request to the LES network (implementation of LesOdrRequest)
|
||||
func (self *ChtRequest) Request(reqID uint64, peer *peer) error {
|
||||
log.Debug(fmt.Sprintf("ODR: requesting CHT #%d block #%d from peer %v", self.ChtNum, self.BlockNum, peer.id))
|
||||
func (r *ChtRequest) Request(reqID uint64, peer *peer) error {
|
||||
peer.Log().Debug("Requesting CHT", "cht", r.ChtNum, "block", r.BlockNum)
|
||||
req := &ChtReq{
|
||||
ChtNum: self.ChtNum,
|
||||
BlockNum: self.BlockNum,
|
||||
ChtNum: r.ChtNum,
|
||||
BlockNum: r.BlockNum,
|
||||
}
|
||||
return peer.RequestHeaderProofs(reqID, self.GetCost(peer), []*ChtReq{req})
|
||||
return peer.RequestHeaderProofs(reqID, r.GetCost(peer), []*ChtReq{req})
|
||||
}
|
||||
|
||||
// Valid processes an ODR request reply message from the LES network
|
||||
// returns true and stores results in memory if the message was a valid reply
|
||||
// to the request (implementation of LesOdrRequest)
|
||||
func (self *ChtRequest) Valid(db ethdb.Database, msg *Msg) bool {
|
||||
log.Debug(fmt.Sprintf("ODR: validating CHT #%d block #%d", self.ChtNum, self.BlockNum))
|
||||
func (r *ChtRequest) Valid(db ethdb.Database, msg *Msg) error {
|
||||
log.Debug("Validating CHT", "cht", r.ChtNum, "block", r.BlockNum)
|
||||
|
||||
// Ensure we have a correct message with a single proof element
|
||||
if msg.MsgType != MsgHeaderProofs {
|
||||
log.Debug(fmt.Sprintf("ODR: invalid message type"))
|
||||
return false
|
||||
return errInvalidMessageType
|
||||
}
|
||||
proofs := msg.Obj.([]ChtResp)
|
||||
if len(proofs) != 1 {
|
||||
log.Debug(fmt.Sprintf("ODR: invalid number of entries: %d", len(proofs)))
|
||||
return false
|
||||
return errMultipleEntries
|
||||
}
|
||||
proof := proofs[0]
|
||||
|
||||
// Verify the CHT
|
||||
var encNumber [8]byte
|
||||
binary.BigEndian.PutUint64(encNumber[:], self.BlockNum)
|
||||
value, err := trie.VerifyProof(self.ChtRoot, encNumber[:], proof.Proof)
|
||||
binary.BigEndian.PutUint64(encNumber[:], r.BlockNum)
|
||||
|
||||
value, err := trie.VerifyProof(r.ChtRoot, encNumber[:], proof.Proof)
|
||||
if err != nil {
|
||||
log.Debug(fmt.Sprintf("ODR: CHT merkle proof verification error: %v", err))
|
||||
return false
|
||||
return err
|
||||
}
|
||||
var node light.ChtNode
|
||||
if err := rlp.DecodeBytes(value, &node); err != nil {
|
||||
log.Debug(fmt.Sprintf("ODR: error decoding CHT node: %v", err))
|
||||
return false
|
||||
return err
|
||||
}
|
||||
if node.Hash != proof.Header.Hash() {
|
||||
log.Debug(fmt.Sprintf("ODR: CHT header hash does not match"))
|
||||
return false
|
||||
return errCHTHashMismatch
|
||||
}
|
||||
// Verifications passed, store and return
|
||||
r.Header = proof.Header
|
||||
r.Proof = proof.Proof
|
||||
r.Td = node.Td
|
||||
|
||||
self.Proof = proof.Proof
|
||||
self.Header = proof.Header
|
||||
self.Td = node.Td
|
||||
log.Debug(fmt.Sprintf("ODR: validation successful"))
|
||||
return true
|
||||
return nil
|
||||
}
|
||||
|
17
les/peer.go
17
les/peer.go
@ -27,7 +27,6 @@ import (
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/eth"
|
||||
"github.com/ethereum/go-ethereum/les/flowcontrol"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
)
|
||||
@ -195,51 +194,51 @@ func (p *peer) SendHeaderProofs(reqID, bv uint64, proofs []ChtResp) error {
|
||||
// RequestHeadersByHash fetches a batch of blocks' headers corresponding to the
|
||||
// specified header query, based on the hash of an origin block.
|
||||
func (p *peer) RequestHeadersByHash(reqID, cost uint64, origin common.Hash, amount int, skip int, reverse bool) error {
|
||||
log.Debug(fmt.Sprintf("%v fetching %d headers from %x, skipping %d (reverse = %v)", p, amount, origin[:4], skip, reverse))
|
||||
p.Log().Debug("Fetching batch of headers", "count", amount, "fromhash", origin, "skip", skip, "reverse", reverse)
|
||||
return sendRequest(p.rw, GetBlockHeadersMsg, reqID, cost, &getBlockHeadersData{Origin: hashOrNumber{Hash: origin}, Amount: uint64(amount), Skip: uint64(skip), Reverse: reverse})
|
||||
}
|
||||
|
||||
// RequestHeadersByNumber fetches a batch of blocks' headers corresponding to the
|
||||
// specified header query, based on the number of an origin block.
|
||||
func (p *peer) RequestHeadersByNumber(reqID, cost, origin uint64, amount int, skip int, reverse bool) error {
|
||||
log.Debug(fmt.Sprintf("%v fetching %d headers from #%d, skipping %d (reverse = %v)", p, amount, origin, skip, reverse))
|
||||
p.Log().Debug("Fetching batch of headers", "count", amount, "fromnum", origin, "skip", skip, "reverse", reverse)
|
||||
return sendRequest(p.rw, GetBlockHeadersMsg, reqID, cost, &getBlockHeadersData{Origin: hashOrNumber{Number: origin}, Amount: uint64(amount), Skip: uint64(skip), Reverse: reverse})
|
||||
}
|
||||
|
||||
// RequestBodies fetches a batch of blocks' bodies corresponding to the hashes
|
||||
// specified.
|
||||
func (p *peer) RequestBodies(reqID, cost uint64, hashes []common.Hash) error {
|
||||
log.Debug(fmt.Sprintf("%v fetching %d block bodies", p, len(hashes)))
|
||||
p.Log().Debug("Fetching batch of block bodies", "count", len(hashes))
|
||||
return sendRequest(p.rw, GetBlockBodiesMsg, reqID, cost, hashes)
|
||||
}
|
||||
|
||||
// RequestCode fetches a batch of arbitrary data from a node's known state
|
||||
// data, corresponding to the specified hashes.
|
||||
func (p *peer) RequestCode(reqID, cost uint64, reqs []*CodeReq) error {
|
||||
log.Debug(fmt.Sprintf("%v fetching %v state data", p, len(reqs)))
|
||||
p.Log().Debug("Fetching batch of codes", "count", len(reqs))
|
||||
return sendRequest(p.rw, GetCodeMsg, reqID, cost, reqs)
|
||||
}
|
||||
|
||||
// RequestReceipts fetches a batch of transaction receipts from a remote node.
|
||||
func (p *peer) RequestReceipts(reqID, cost uint64, hashes []common.Hash) error {
|
||||
log.Debug(fmt.Sprintf("%v fetching %v receipts", p, len(hashes)))
|
||||
p.Log().Debug("Fetching batch of receipts", "count", len(hashes))
|
||||
return sendRequest(p.rw, GetReceiptsMsg, reqID, cost, hashes)
|
||||
}
|
||||
|
||||
// RequestProofs fetches a batch of merkle proofs from a remote node.
|
||||
func (p *peer) RequestProofs(reqID, cost uint64, reqs []*ProofReq) error {
|
||||
log.Debug(fmt.Sprintf("%v fetching %v proofs", p, len(reqs)))
|
||||
p.Log().Debug("Fetching batch of proofs", "count", len(reqs))
|
||||
return sendRequest(p.rw, GetProofsMsg, reqID, cost, reqs)
|
||||
}
|
||||
|
||||
// RequestHeaderProofs fetches a batch of header merkle proofs from a remote node.
|
||||
func (p *peer) RequestHeaderProofs(reqID, cost uint64, reqs []*ChtReq) error {
|
||||
log.Debug(fmt.Sprintf("%v fetching %v header proofs", p, len(reqs)))
|
||||
p.Log().Debug("Fetching batch of header proofs", "count", len(reqs))
|
||||
return sendRequest(p.rw, GetHeaderProofsMsg, reqID, cost, reqs)
|
||||
}
|
||||
|
||||
func (p *peer) SendTxs(cost uint64, txs types.Transactions) error {
|
||||
log.Debug(fmt.Sprintf("%v relaying %v txs", p, len(txs)))
|
||||
p.Log().Debug("Fetching batch of transactions", "count", len(txs))
|
||||
reqID := getNextReqID()
|
||||
p.fcServer.MustAssignRequest(reqID)
|
||||
p.fcServer.SendRequest(reqID, cost)
|
||||
|
@ -19,7 +19,6 @@ package les
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"math"
|
||||
"sync"
|
||||
"time"
|
||||
@ -292,7 +291,7 @@ func (pm *ProtocolManager) blockLoop() {
|
||||
lastHead = header
|
||||
lastBroadcastTd = td
|
||||
|
||||
log.Debug(fmt.Sprint("===> ", number, hash, td, reorg))
|
||||
log.Debug("Announcing block to peers", "number", number, "hash", hash, "td", td, "reorg", reorg)
|
||||
|
||||
announce := announceData{Hash: hash, Number: number, Td: td, ReorgDepth: reorg}
|
||||
for _, p := range peers {
|
||||
@ -396,7 +395,7 @@ func makeCht(db ethdb.Database) bool {
|
||||
} else {
|
||||
lastChtNum++
|
||||
|
||||
log.Trace(fmt.Sprintf("cht: %d %064x", lastChtNum, root))
|
||||
log.Trace("Generated CHT", "number", lastChtNum, "root", root.Hex())
|
||||
|
||||
storeChtRoot(db, lastChtNum, root)
|
||||
var data [8]byte
|
||||
|
@ -162,7 +162,7 @@ func (pool *serverPool) connect(p *peer, ip net.IP, port uint16) *poolEntry {
|
||||
if entry == nil {
|
||||
entry = pool.findOrNewNode(p.ID(), ip, port)
|
||||
}
|
||||
log.Debug(fmt.Sprintf("connecting to %v, state: %v", p.id, entry.state))
|
||||
p.Log().Debug("Connecting to new peer", "state", entry.state)
|
||||
if entry.state == psConnected || entry.state == psRegistered {
|
||||
return nil
|
||||
}
|
||||
@ -184,7 +184,7 @@ func (pool *serverPool) connect(p *peer, ip net.IP, port uint16) *poolEntry {
|
||||
|
||||
// registered should be called after a successful handshake
|
||||
func (pool *serverPool) registered(entry *poolEntry) {
|
||||
log.Debug(fmt.Sprintf("registered %v", entry.id.String()))
|
||||
log.Debug("Registered new entry", "enode", entry.id)
|
||||
pool.lock.Lock()
|
||||
defer pool.lock.Unlock()
|
||||
|
||||
@ -202,7 +202,7 @@ func (pool *serverPool) registered(entry *poolEntry) {
|
||||
// can be updated optionally (not updated if no registration happened, in this case
|
||||
// only connection statistics are updated, just like in case of timeout)
|
||||
func (pool *serverPool) disconnect(entry *poolEntry) {
|
||||
log.Debug(fmt.Sprintf("disconnected %v", entry.id.String()))
|
||||
log.Debug("Disconnected old entry", "enode", entry.id)
|
||||
pool.lock.Lock()
|
||||
defer pool.lock.Unlock()
|
||||
|
||||
@ -418,7 +418,7 @@ func (pool *serverPool) findOrNewNode(id discover.NodeID, ip net.IP, port uint16
|
||||
now := mclock.Now()
|
||||
entry := pool.entries[id]
|
||||
if entry == nil {
|
||||
log.Debug(fmt.Sprintf("discovered %v", id.String()))
|
||||
log.Debug("Discovered new entry", "id", id)
|
||||
entry = &poolEntry{
|
||||
id: id,
|
||||
addr: make(map[string]*poolEntryAddress),
|
||||
@ -459,11 +459,15 @@ func (pool *serverPool) loadNodes() {
|
||||
var list []*poolEntry
|
||||
err = rlp.DecodeBytes(enc, &list)
|
||||
if err != nil {
|
||||
log.Debug(fmt.Sprintf("node list decode error: %v", err))
|
||||
log.Debug("Failed to decode node list", "err", err)
|
||||
return
|
||||
}
|
||||
for _, e := range list {
|
||||
log.Debug(fmt.Sprintf("loaded server stats %016x fails: %v connStats: %v / %v delayStats: %v / %v responseStats: %v / %v timeoutStats: %v / %v", e.id[0:8], e.lastConnected.fails, e.connectStats.avg, e.connectStats.weight, time.Duration(e.delayStats.avg), e.delayStats.weight, time.Duration(e.responseStats.avg), e.responseStats.weight, e.timeoutStats.avg, e.timeoutStats.weight))
|
||||
log.Debug("Loaded server stats", "id", e.id, "fails", e.lastConnected.fails,
|
||||
"conn", fmt.Sprintf("%v/%v", e.connectStats.avg, e.connectStats.weight),
|
||||
"delay", fmt.Sprintf("%v/%v", time.Duration(e.delayStats.avg), e.delayStats.weight),
|
||||
"response", fmt.Sprintf("%v/%v", time.Duration(e.responseStats.avg), e.responseStats.weight),
|
||||
"timeout", fmt.Sprintf("%v/%v", e.timeoutStats.avg, e.timeoutStats.weight))
|
||||
pool.entries[e.id] = e
|
||||
pool.knownQueue.setLatest(e)
|
||||
pool.knownSelect.update((*knownEntry)(e))
|
||||
@ -568,7 +572,7 @@ func (pool *serverPool) dial(entry *poolEntry, knownSelected bool) {
|
||||
pool.newSelected++
|
||||
}
|
||||
addr := entry.addrSelect.choose().(*poolEntryAddress)
|
||||
log.Debug(fmt.Sprintf("dialing %v out of %v, known: %v", entry.id.String()+"@"+addr.strKey(), len(entry.addr), knownSelected))
|
||||
log.Debug("Dialing new peer", "lesaddr", entry.id.String()+"@"+addr.strKey(), "set", len(entry.addr), "known", knownSelected)
|
||||
entry.dialed = addr
|
||||
go func() {
|
||||
pool.server.AddPeer(discover.NewNode(entry.id, addr.ip, addr.port, addr.port))
|
||||
@ -589,7 +593,7 @@ func (pool *serverPool) checkDialTimeout(entry *poolEntry) {
|
||||
if entry.state != psDialed {
|
||||
return
|
||||
}
|
||||
log.Debug(fmt.Sprintf("timeout %v", entry.id.String()+"@"+entry.dialed.strKey()))
|
||||
log.Debug("Dial timeout", "lesaddr", entry.id.String()+"@"+entry.dialed.strKey())
|
||||
entry.state = psNotConnected
|
||||
if entry.knownSelected {
|
||||
pool.knownSelected--
|
||||
|
@ -17,11 +17,9 @@
|
||||
package light
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
@ -101,7 +99,7 @@ func NewLightChain(odr OdrBackend, config *params.ChainConfig, pow pow.PoW, mux
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.Info(fmt.Sprint("WARNING: Wrote default ethereum genesis block"))
|
||||
log.Warn("Wrote default ethereum genesis block")
|
||||
}
|
||||
|
||||
if bc.genesisBlock.Hash() == (common.Hash{212, 229, 103, 64, 248, 118, 174, 248, 192, 16, 184, 106, 64, 213, 245, 103, 69, 161, 24, 208, 144, 106, 52, 230, 154, 236, 140, 13, 177, 203, 143, 163}) {
|
||||
@ -117,7 +115,7 @@ func NewLightChain(odr OdrBackend, config *params.ChainConfig, pow pow.PoW, mux
|
||||
Root: common.HexToHash("c035076523faf514038f619715de404a65398c51899b5dccca9c05b00bc79315"),
|
||||
})
|
||||
}
|
||||
log.Info(fmt.Sprint("Added trusted CHT for mainnet"))
|
||||
log.Info("Added trusted CHT for mainnet")
|
||||
} else {
|
||||
if bc.genesisBlock.Hash() == (common.Hash{12, 215, 134, 162, 66, 93, 22, 241, 82, 198, 88, 49, 108, 66, 62, 108, 225, 24, 30, 21, 195, 41, 88, 38, 215, 201, 144, 76, 186, 156, 227, 3}) {
|
||||
// add trusted CHT for testnet
|
||||
@ -125,7 +123,7 @@ func NewLightChain(odr OdrBackend, config *params.ChainConfig, pow pow.PoW, mux
|
||||
Number: 452,
|
||||
Root: common.HexToHash("511da2c88e32b14cf4a4e62f7fcbb297139faebc260a4ab5eb43cce6edcba324"),
|
||||
})
|
||||
log.Info(fmt.Sprint("Added trusted CHT for testnet"))
|
||||
log.Info("Added trusted CHT for testnet")
|
||||
} else {
|
||||
DeleteTrustedCht(bc.chainDb)
|
||||
}
|
||||
@ -137,9 +135,9 @@ func NewLightChain(odr OdrBackend, config *params.ChainConfig, pow pow.PoW, mux
|
||||
// Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain
|
||||
for hash := range core.BadHashes {
|
||||
if header := bc.GetHeaderByHash(hash); header != nil {
|
||||
log.Error(fmt.Sprintf("Found bad hash, rewinding chain to block #%d [%x…]", header.Number, header.ParentHash[:4]))
|
||||
log.Error("Found bad hash, rewinding chain", "number", header.Number, "hash", header.ParentHash)
|
||||
bc.SetHead(header.Number.Uint64() - 1)
|
||||
log.Error(fmt.Sprint("Chain rewind was successful, resuming normal operation"))
|
||||
log.Error("Chain rewind was successful, resuming normal operation")
|
||||
}
|
||||
}
|
||||
return bc, nil
|
||||
@ -169,7 +167,7 @@ func (self *LightChain) loadLastState() error {
|
||||
// Issue a status log and return
|
||||
header := self.hc.CurrentHeader()
|
||||
headerTd := self.GetTd(header.Hash(), header.Number.Uint64())
|
||||
log.Info(fmt.Sprintf("Last header: #%d [%x…] TD=%v", self.hc.CurrentHeader().Number, self.hc.CurrentHeader().Hash().Bytes()[:4], headerTd))
|
||||
log.Info("Loaded most recent local header", "number", header.Number, "hash", header.Hash(), "td", headerTd)
|
||||
|
||||
return nil
|
||||
}
|
||||
@ -246,10 +244,10 @@ func (bc *LightChain) ResetWithGenesisBlock(genesis *types.Block) {
|
||||
|
||||
// Prepare the genesis block and reinitialise the chain
|
||||
if err := core.WriteTd(bc.chainDb, genesis.Hash(), genesis.NumberU64(), genesis.Difficulty()); err != nil {
|
||||
log.Crit(fmt.Sprintf("failed to write genesis block TD: %v", err))
|
||||
log.Crit("Failed to write genesis block TD", "err", err)
|
||||
}
|
||||
if err := core.WriteBlock(bc.chainDb, genesis); err != nil {
|
||||
log.Crit(fmt.Sprintf("failed to write genesis block: %v", err))
|
||||
log.Crit("Failed to write genesis block", "err", err)
|
||||
}
|
||||
bc.genesisBlock = genesis
|
||||
bc.hc.SetGenesis(bc.genesisBlock.Header())
|
||||
@ -345,8 +343,7 @@ func (bc *LightChain) Stop() {
|
||||
atomic.StoreInt32(&bc.procInterrupt, 1)
|
||||
|
||||
bc.wg.Wait()
|
||||
|
||||
log.Info(fmt.Sprint("Chain manager stopped"))
|
||||
log.Info("Blockchain manager stopped")
|
||||
}
|
||||
|
||||
// Rollback is designed to remove a chain of links from the database that aren't
|
||||
@ -406,21 +403,16 @@ func (self *LightChain) InsertHeaderChain(chain []*types.Header, checkFreq int)
|
||||
|
||||
switch status {
|
||||
case core.CanonStatTy:
|
||||
log.Debug("", "msg", log.Lazy{Fn: func() string {
|
||||
return fmt.Sprintf("[%v] inserted header #%d (%x...).\n", time.Now().UnixNano(), header.Number, header.Hash().Bytes()[0:4])
|
||||
}})
|
||||
log.Debug("Inserted new header", "number", header.Number, "hash", header.Hash())
|
||||
events = append(events, core.ChainEvent{Block: types.NewBlockWithHeader(header), Hash: header.Hash()})
|
||||
|
||||
case core.SideStatTy:
|
||||
log.Trace("", "msg", log.Lazy{Fn: func() string {
|
||||
return fmt.Sprintf("inserted forked header #%d (TD=%v) (%x...).\n", header.Number, header.Difficulty, header.Hash().Bytes()[0:4])
|
||||
}})
|
||||
log.Debug("Inserted forked header", "number", header.Number, "hash", header.Hash())
|
||||
events = append(events, core.ChainSideEvent{Block: types.NewBlockWithHeader(header)})
|
||||
|
||||
case core.SplitStatTy:
|
||||
events = append(events, core.ChainSplitEvent{Block: types.NewBlockWithHeader(header)})
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
i, err := self.hc.InsertHeaderChain(chain, checkFreq, whFunc)
|
||||
|
@ -19,7 +19,6 @@ package light
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@ -27,7 +26,6 @@ import (
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
@ -149,7 +147,6 @@ func GetBody(ctx context.Context, odr OdrBackend, hash common.Hash, number uint6
|
||||
}
|
||||
body := new(types.Body)
|
||||
if err := rlp.Decode(bytes.NewReader(data), body); err != nil {
|
||||
log.Error(fmt.Sprintf("invalid block body RLP for hash %x: %v", hash, err))
|
||||
return nil, err
|
||||
}
|
||||
return body, nil
|
||||
@ -181,7 +178,6 @@ func GetBlockReceipts(ctx context.Context, odr OdrBackend, hash common.Hash, num
|
||||
r := &ReceiptsRequest{Hash: hash, Number: number}
|
||||
if err := odr.Retrieve(ctx, r); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return r.Receipts, nil
|
||||
}
|
||||
return r.Receipts, nil
|
||||
}
|
||||
|
@ -17,12 +17,10 @@
|
||||
package light
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
@ -239,10 +237,6 @@ func (self *LightState) GetOrNewStateObject(ctx context.Context, addr common.Add
|
||||
|
||||
// newStateObject creates a state object whether it exists in the state or not
|
||||
func (self *LightState) newStateObject(addr common.Address) *StateObject {
|
||||
log.Debug("", "msg", log.Lazy{Fn: func() string {
|
||||
return fmt.Sprintf("(+) %x\n", addr)
|
||||
}})
|
||||
|
||||
stateObject := NewStateObject(addr, self.odr)
|
||||
self.stateObjects[addr.Str()] = stateObject
|
||||
|
||||
|
@ -23,7 +23,6 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
@ -107,10 +106,6 @@ func NewStateObject(address common.Address, odr OdrBackend) *StateObject {
|
||||
func (self *StateObject) MarkForDeletion() {
|
||||
self.remove = true
|
||||
self.dirty = true
|
||||
|
||||
log.Debug("", "msg", log.Lazy{Fn: func() string {
|
||||
return fmt.Sprintf("%x: #%d %v X\n", self.Address(), self.nonce, self.balance)
|
||||
}})
|
||||
}
|
||||
|
||||
// getAddr gets the storage value at the given address from the trie
|
||||
@ -156,19 +151,11 @@ func (self *StateObject) SetState(k, value common.Hash) {
|
||||
// AddBalance adds the given amount to the account balance
|
||||
func (c *StateObject) AddBalance(amount *big.Int) {
|
||||
c.SetBalance(new(big.Int).Add(c.balance, amount))
|
||||
|
||||
log.Debug("", "msg", log.Lazy{Fn: func() string {
|
||||
return fmt.Sprintf("%x: #%d %v (+ %v)\n", c.Address(), c.nonce, c.balance, amount)
|
||||
}})
|
||||
}
|
||||
|
||||
// SubBalance subtracts the given amount from the account balance
|
||||
func (c *StateObject) SubBalance(amount *big.Int) {
|
||||
c.SetBalance(new(big.Int).Sub(c.balance, amount))
|
||||
|
||||
log.Debug("", "msg", log.Lazy{Fn: func() string {
|
||||
return fmt.Sprintf("%x: #%d %v (- %v)\n", c.Address(), c.nonce, c.balance, amount)
|
||||
}})
|
||||
}
|
||||
|
||||
// SetBalance sets the account balance to the given amount
|
||||
|
@ -320,7 +320,7 @@ func (pool *TxPool) eventLoop() {
|
||||
func (pool *TxPool) Stop() {
|
||||
close(pool.quit)
|
||||
pool.events.Unsubscribe()
|
||||
log.Info(fmt.Sprint("Transaction pool stopped"))
|
||||
log.Info("Transaction pool stopped")
|
||||
}
|
||||
|
||||
// Stats returns the number of currently pending (locally created) transactions
|
||||
@ -416,20 +416,8 @@ func (self *TxPool) add(ctx context.Context, tx *types.Transaction) error {
|
||||
go self.eventMux.Post(core.TxPreEvent{Tx: tx})
|
||||
}
|
||||
|
||||
log.Debug("", "msg", log.Lazy{Fn: func() string {
|
||||
var toname string
|
||||
if to := tx.To(); to != nil {
|
||||
toname = common.Bytes2Hex(to[:4])
|
||||
} else {
|
||||
toname = "[NEW_CONTRACT]"
|
||||
}
|
||||
// we can ignore the error here because From is
|
||||
// verified in ValidateTransaction.
|
||||
f, _ := types.Sender(self.signer, tx)
|
||||
from := common.Bytes2Hex(f[:4])
|
||||
return fmt.Sprintf("(t) %x => %s (%v) %x\n", from, toname, tx.Value(), hash)
|
||||
}})
|
||||
|
||||
// Print a log message if low enough level is set
|
||||
log.Debug("Pooled new transaction", "hash", hash, "from", log.Lazy{Fn: func() common.Address { from, _ := types.Sender(self.signer, tx); return from }}, "to", tx.To())
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -462,15 +450,10 @@ func (self *TxPool) AddBatch(ctx context.Context, txs []*types.Transaction) {
|
||||
var sendTx types.Transactions
|
||||
|
||||
for _, tx := range txs {
|
||||
if err := self.add(ctx, tx); err != nil {
|
||||
log.Debug(fmt.Sprint("tx error:", err))
|
||||
} else {
|
||||
if err := self.add(ctx, tx); err == nil {
|
||||
sendTx = append(sendTx, tx)
|
||||
h := tx.Hash()
|
||||
log.Debug(fmt.Sprintf("tx %x\n", h[:4]))
|
||||
}
|
||||
}
|
||||
|
||||
if len(sendTx) > 0 {
|
||||
self.relay.Send(sendTx)
|
||||
}
|
||||
|
@ -18,7 +18,6 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
@ -41,7 +40,7 @@ var Enabled = false
|
||||
func init() {
|
||||
for _, arg := range os.Args {
|
||||
if strings.TrimLeft(arg, "-") == MetricsEnabledFlag {
|
||||
log.Info(fmt.Sprintf("Enabling metrics collection"))
|
||||
log.Info("Enabling metrics collection")
|
||||
Enabled = true
|
||||
}
|
||||
}
|
||||
@ -102,7 +101,7 @@ func CollectProcessMetrics(refresh time.Duration) {
|
||||
diskWrites = metrics.GetOrRegisterMeter("system/disk/writecount", metrics.DefaultRegistry)
|
||||
diskWriteBytes = metrics.GetOrRegisterMeter("system/disk/writedata", metrics.DefaultRegistry)
|
||||
} else {
|
||||
log.Debug(fmt.Sprintf("failed to read disk metrics: %v", err))
|
||||
log.Debug("Failed to read disk metrics", "err", err)
|
||||
}
|
||||
// Iterate loading the different stats and updating the meters
|
||||
for i := 1; ; i++ {
|
||||
|
Loading…
Reference in New Issue
Block a user