2016-11-25 18:55:06 +03:00
|
|
|
// Copyright 2016 The go-ethereum Authors
|
|
|
|
// This file is part of the go-ethereum library.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Lesser General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Lesser General Public License
|
|
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
|
|
|
// Package ethstats implements the network stats reporting service.
|
|
|
|
package ethstats
|
|
|
|
|
|
|
|
import (
|
2017-05-16 22:07:27 +03:00
|
|
|
"context"
|
2016-11-25 18:55:06 +03:00
|
|
|
"encoding/json"
|
|
|
|
"errors"
|
|
|
|
"fmt"
|
|
|
|
"math/big"
|
2019-07-22 13:22:39 +03:00
|
|
|
"net/http"
|
2016-11-25 18:55:06 +03:00
|
|
|
"runtime"
|
|
|
|
"strconv"
|
2017-03-30 12:52:34 +03:00
|
|
|
"strings"
|
2020-08-04 13:21:51 +03:00
|
|
|
"sync"
|
2016-11-25 18:55:06 +03:00
|
|
|
"time"
|
|
|
|
|
2021-09-10 10:55:48 +03:00
|
|
|
"github.com/ethereum/go-ethereum"
|
2016-11-25 18:55:06 +03:00
|
|
|
"github.com/ethereum/go-ethereum/common"
|
2017-06-01 00:14:59 +03:00
|
|
|
"github.com/ethereum/go-ethereum/common/mclock"
|
2017-04-12 16:38:31 +03:00
|
|
|
"github.com/ethereum/go-ethereum/consensus"
|
2016-11-25 18:55:06 +03:00
|
|
|
"github.com/ethereum/go-ethereum/core"
|
|
|
|
"github.com/ethereum/go-ethereum/core/types"
|
2020-12-14 12:27:15 +03:00
|
|
|
ethproto "github.com/ethereum/go-ethereum/eth/protocols/eth"
|
2016-11-25 18:55:06 +03:00
|
|
|
"github.com/ethereum/go-ethereum/event"
|
2017-02-22 15:10:07 +03:00
|
|
|
"github.com/ethereum/go-ethereum/log"
|
2020-08-03 20:40:46 +03:00
|
|
|
"github.com/ethereum/go-ethereum/node"
|
2016-11-25 18:55:06 +03:00
|
|
|
"github.com/ethereum/go-ethereum/p2p"
|
|
|
|
"github.com/ethereum/go-ethereum/rpc"
|
2019-07-22 13:22:39 +03:00
|
|
|
"github.com/gorilla/websocket"
|
2016-11-25 18:55:06 +03:00
|
|
|
)
|
|
|
|
|
2017-08-18 13:58:36 +03:00
|
|
|
const (
|
|
|
|
// historyUpdateRange is the number of blocks a node should report upon login or
|
|
|
|
// history request.
|
|
|
|
historyUpdateRange = 50
|
|
|
|
|
2018-05-18 11:45:52 +03:00
|
|
|
// txChanSize is the size of channel listening to NewTxsEvent.
|
2017-08-18 13:58:36 +03:00
|
|
|
// The number is referenced from the size of tx pool.
|
|
|
|
txChanSize = 4096
|
|
|
|
// chainHeadChanSize is the size of channel listening to ChainHeadEvent.
|
|
|
|
chainHeadChanSize = 10
|
2022-11-17 17:33:03 +03:00
|
|
|
|
|
|
|
messageSizeLimit = 15 * 1024 * 1024
|
2017-08-18 13:58:36 +03:00
|
|
|
)
|
|
|
|
|
2020-08-03 20:40:46 +03:00
|
|
|
// backend encompasses the bare-minimum functionality needed for ethstats reporting
|
|
|
|
type backend interface {
|
|
|
|
SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription
|
|
|
|
SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription
|
|
|
|
CurrentHeader() *types.Header
|
|
|
|
HeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Header, error)
|
|
|
|
GetTd(ctx context.Context, hash common.Hash) *big.Int
|
|
|
|
Stats() (pending int, queued int)
|
2021-09-10 10:55:48 +03:00
|
|
|
SyncProgress() ethereum.SyncProgress
|
2017-08-18 13:58:36 +03:00
|
|
|
}
|
|
|
|
|
2020-08-03 20:40:46 +03:00
|
|
|
// fullNodeBackend encompasses the functionality necessary for a full node
|
|
|
|
// reporting to ethstats
|
|
|
|
type fullNodeBackend interface {
|
|
|
|
backend
|
|
|
|
BlockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Block, error)
|
2023-10-23 16:06:05 +03:00
|
|
|
CurrentBlock() *types.Header
|
2021-07-05 10:49:52 +03:00
|
|
|
SuggestGasTipCap(ctx context.Context) (*big.Int, error)
|
2017-08-18 13:58:36 +03:00
|
|
|
}
|
2016-12-12 00:53:22 +03:00
|
|
|
|
2016-11-25 18:55:06 +03:00
|
|
|
// Service implements an Ethereum netstats reporting daemon that pushes local
|
|
|
|
// chain statistics up to a monitoring server.
|
|
|
|
type Service struct {
|
2020-08-03 20:40:46 +03:00
|
|
|
server *p2p.Server // Peer-to-peer server to retrieve networking infos
|
|
|
|
backend backend
|
|
|
|
engine consensus.Engine // Consensus engine to retrieve variadic block fields
|
2016-11-25 18:55:06 +03:00
|
|
|
|
|
|
|
node string // Name of the node to display on the monitoring page
|
|
|
|
pass string // Password to authorize access to the monitoring page
|
|
|
|
host string // Remote address of the monitoring service
|
2016-12-12 00:53:22 +03:00
|
|
|
|
|
|
|
pongCh chan struct{} // Pong notifications are fed into this channel
|
|
|
|
histCh chan []uint64 // History request block numbers are fed into this channel
|
2020-08-04 13:21:51 +03:00
|
|
|
|
2021-03-30 16:52:03 +03:00
|
|
|
headSub event.Subscription
|
|
|
|
txSub event.Subscription
|
2020-08-04 13:21:51 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// connWrapper is a wrapper to prevent concurrent-write or concurrent-read on the
|
|
|
|
// websocket.
|
|
|
|
//
|
2020-08-10 14:33:22 +03:00
|
|
|
// From Gorilla websocket docs:
|
2022-09-10 14:25:40 +03:00
|
|
|
//
|
|
|
|
// Connections support one concurrent reader and one concurrent writer. Applications are
|
|
|
|
// responsible for ensuring that
|
|
|
|
// - no more than one goroutine calls the write methods
|
|
|
|
// NextWriter, SetWriteDeadline, WriteMessage, WriteJSON, EnableWriteCompression,
|
|
|
|
// SetCompressionLevel concurrently; and
|
|
|
|
// - that no more than one goroutine calls the
|
|
|
|
// read methods NextReader, SetReadDeadline, ReadMessage, ReadJSON, SetPongHandler,
|
|
|
|
// SetPingHandler concurrently.
|
|
|
|
//
|
|
|
|
// The Close and WriteControl methods can be called concurrently with all other methods.
|
2020-08-04 13:21:51 +03:00
|
|
|
type connWrapper struct {
|
|
|
|
conn *websocket.Conn
|
2020-08-10 14:33:22 +03:00
|
|
|
|
|
|
|
rlock sync.Mutex
|
|
|
|
wlock sync.Mutex
|
2020-08-04 13:21:51 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
func newConnectionWrapper(conn *websocket.Conn) *connWrapper {
|
2022-11-17 17:33:03 +03:00
|
|
|
conn.SetReadLimit(messageSizeLimit)
|
2020-08-04 13:21:51 +03:00
|
|
|
return &connWrapper{conn: conn}
|
|
|
|
}
|
|
|
|
|
|
|
|
// WriteJSON wraps corresponding method on the websocket but is safe for concurrent calling
|
|
|
|
func (w *connWrapper) WriteJSON(v interface{}) error {
|
2020-08-10 14:33:22 +03:00
|
|
|
w.wlock.Lock()
|
|
|
|
defer w.wlock.Unlock()
|
|
|
|
|
2020-08-04 13:21:51 +03:00
|
|
|
return w.conn.WriteJSON(v)
|
|
|
|
}
|
|
|
|
|
|
|
|
// ReadJSON wraps corresponding method on the websocket but is safe for concurrent calling
|
|
|
|
func (w *connWrapper) ReadJSON(v interface{}) error {
|
2020-08-10 14:33:22 +03:00
|
|
|
w.rlock.Lock()
|
|
|
|
defer w.rlock.Unlock()
|
|
|
|
|
2020-08-04 13:21:51 +03:00
|
|
|
return w.conn.ReadJSON(v)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Close wraps corresponding method on the websocket but is safe for concurrent calling
|
|
|
|
func (w *connWrapper) Close() error {
|
|
|
|
// The Close and WriteControl methods can be called concurrently with all other methods,
|
|
|
|
// so the mutex is not used here
|
|
|
|
return w.conn.Close()
|
2016-11-25 18:55:06 +03:00
|
|
|
}
|
|
|
|
|
2021-05-26 00:22:46 +03:00
|
|
|
// parseEthstatsURL parses the netstats connection url.
|
|
|
|
// URL argument should be of the form <nodename:secret@host:port>
|
|
|
|
// If non-erroring, the returned slice contains 3 elements: [nodename, pass, host]
|
|
|
|
func parseEthstatsURL(url string) (parts []string, err error) {
|
|
|
|
err = fmt.Errorf("invalid netstats url: \"%s\", should be nodename:secret@host:port", url)
|
|
|
|
|
|
|
|
hostIndex := strings.LastIndex(url, "@")
|
|
|
|
if hostIndex == -1 || hostIndex == len(url)-1 {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
preHost, host := url[:hostIndex], url[hostIndex+1:]
|
|
|
|
|
|
|
|
passIndex := strings.LastIndex(preHost, ":")
|
|
|
|
if passIndex == -1 {
|
|
|
|
return []string{preHost, "", host}, nil
|
|
|
|
}
|
|
|
|
nodename, pass := preHost[:passIndex], ""
|
|
|
|
if passIndex != len(preHost)-1 {
|
|
|
|
pass = preHost[passIndex+1:]
|
|
|
|
}
|
|
|
|
|
|
|
|
return []string{nodename, pass, host}, nil
|
|
|
|
}
|
|
|
|
|
2016-11-25 18:55:06 +03:00
|
|
|
// New returns a monitoring service ready for stats reporting.
|
2020-08-03 20:40:46 +03:00
|
|
|
func New(node *node.Node, backend backend, engine consensus.Engine, url string) error {
|
2021-05-26 00:22:46 +03:00
|
|
|
parts, err := parseEthstatsURL(url)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2020-08-03 20:40:46 +03:00
|
|
|
}
|
|
|
|
ethstats := &Service{
|
|
|
|
backend: backend,
|
|
|
|
engine: engine,
|
|
|
|
server: node.Server(),
|
2021-05-26 00:22:46 +03:00
|
|
|
node: parts[0],
|
|
|
|
pass: parts[1],
|
|
|
|
host: parts[2],
|
2020-08-03 20:40:46 +03:00
|
|
|
pongCh: make(chan struct{}),
|
|
|
|
histCh: make(chan []uint64, 1),
|
2016-11-25 18:55:06 +03:00
|
|
|
}
|
|
|
|
|
2020-08-03 20:40:46 +03:00
|
|
|
node.RegisterLifecycle(ethstats)
|
|
|
|
return nil
|
|
|
|
}
|
2016-11-25 18:55:06 +03:00
|
|
|
|
2020-08-03 20:40:46 +03:00
|
|
|
// Start implements node.Lifecycle, starting up the monitoring and reporting daemon.
|
|
|
|
func (s *Service) Start() error {
|
2021-03-30 16:52:03 +03:00
|
|
|
// Subscribe to chain events to execute updates on
|
|
|
|
chainHeadCh := make(chan core.ChainHeadEvent, chainHeadChanSize)
|
|
|
|
s.headSub = s.backend.SubscribeChainHeadEvent(chainHeadCh)
|
|
|
|
txEventCh := make(chan core.NewTxsEvent, txChanSize)
|
|
|
|
s.txSub = s.backend.SubscribeNewTxsEvent(txEventCh)
|
|
|
|
go s.loop(chainHeadCh, txEventCh)
|
2016-11-25 18:55:06 +03:00
|
|
|
|
2017-03-03 12:41:52 +03:00
|
|
|
log.Info("Stats daemon started")
|
2016-11-25 18:55:06 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-08-03 20:40:46 +03:00
|
|
|
// Stop implements node.Lifecycle, terminating the monitoring and reporting daemon.
|
2016-11-25 18:55:06 +03:00
|
|
|
func (s *Service) Stop() error {
|
2021-03-30 16:52:03 +03:00
|
|
|
s.headSub.Unsubscribe()
|
|
|
|
s.txSub.Unsubscribe()
|
2017-03-03 12:41:52 +03:00
|
|
|
log.Info("Stats daemon stopped")
|
2016-11-25 18:55:06 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// loop keeps trying to connect to the netstats server, reporting chain events
|
|
|
|
// until termination.
|
2021-03-30 16:52:03 +03:00
|
|
|
func (s *Service) loop(chainHeadCh chan core.ChainHeadEvent, txEventCh chan core.NewTxsEvent) {
|
2020-05-13 12:06:19 +03:00
|
|
|
// Start a goroutine that exhausts the subscriptions to avoid events piling up
|
2017-06-01 00:14:59 +03:00
|
|
|
var (
|
|
|
|
quitCh = make(chan struct{})
|
|
|
|
headCh = make(chan *types.Block, 1)
|
|
|
|
txCh = make(chan struct{}, 1)
|
|
|
|
)
|
|
|
|
go func() {
|
|
|
|
var lastTx mclock.AbsTime
|
|
|
|
|
2017-08-18 13:58:36 +03:00
|
|
|
HandleLoop:
|
2017-06-01 00:14:59 +03:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
// Notify of chain head events, but drop if too frequent
|
2017-08-18 13:58:36 +03:00
|
|
|
case head := <-chainHeadCh:
|
2017-06-01 00:14:59 +03:00
|
|
|
select {
|
2017-08-18 13:58:36 +03:00
|
|
|
case headCh <- head.Block:
|
2017-06-01 00:14:59 +03:00
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
|
|
|
// Notify of new transaction events, but drop if too frequent
|
2017-08-18 13:58:36 +03:00
|
|
|
case <-txEventCh:
|
2017-06-01 00:14:59 +03:00
|
|
|
if time.Duration(mclock.Now()-lastTx) < time.Second {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
lastTx = mclock.Now()
|
|
|
|
|
|
|
|
select {
|
|
|
|
case txCh <- struct{}{}:
|
|
|
|
default:
|
|
|
|
}
|
2017-08-18 13:58:36 +03:00
|
|
|
|
|
|
|
// node stopped
|
2021-03-30 16:52:03 +03:00
|
|
|
case <-s.txSub.Err():
|
2017-08-18 13:58:36 +03:00
|
|
|
break HandleLoop
|
2021-03-30 16:52:03 +03:00
|
|
|
case <-s.headSub.Err():
|
2017-08-18 13:58:36 +03:00
|
|
|
break HandleLoop
|
2017-06-01 00:14:59 +03:00
|
|
|
}
|
|
|
|
}
|
2017-08-18 13:58:36 +03:00
|
|
|
close(quitCh)
|
2017-06-01 00:14:59 +03:00
|
|
|
}()
|
2020-05-13 12:06:19 +03:00
|
|
|
|
|
|
|
// Resolve the URL, defaulting to TLS, but falling back to none too
|
|
|
|
path := fmt.Sprintf("%s/api", s.host)
|
|
|
|
urls := []string{path}
|
|
|
|
|
|
|
|
// url.Parse and url.IsAbs is unsuitable (https://github.com/golang/go/issues/19779)
|
|
|
|
if !strings.Contains(path, "://") {
|
|
|
|
urls = []string{"wss://" + path, "ws://" + path}
|
|
|
|
}
|
2020-06-08 14:27:08 +03:00
|
|
|
|
|
|
|
errTimer := time.NewTimer(0)
|
|
|
|
defer errTimer.Stop()
|
2016-11-25 18:55:06 +03:00
|
|
|
// Loop reporting until termination
|
|
|
|
for {
|
2020-06-08 14:27:08 +03:00
|
|
|
select {
|
|
|
|
case <-quitCh:
|
|
|
|
return
|
|
|
|
case <-errTimer.C:
|
|
|
|
// Establish a websocket connection to the server on any supported URL
|
|
|
|
var (
|
2020-08-04 13:21:51 +03:00
|
|
|
conn *connWrapper
|
2020-06-08 14:27:08 +03:00
|
|
|
err error
|
|
|
|
)
|
|
|
|
dialer := websocket.Dialer{HandshakeTimeout: 5 * time.Second}
|
|
|
|
header := make(http.Header)
|
|
|
|
header.Set("origin", "http://localhost")
|
|
|
|
for _, url := range urls {
|
2020-08-04 13:21:51 +03:00
|
|
|
c, _, e := dialer.Dial(url, header)
|
2020-08-07 12:15:41 +03:00
|
|
|
err = e
|
|
|
|
if err == nil {
|
2020-08-04 13:21:51 +03:00
|
|
|
conn = newConnectionWrapper(c)
|
2020-06-08 14:27:08 +03:00
|
|
|
break
|
|
|
|
}
|
2017-03-20 13:02:39 +03:00
|
|
|
}
|
2020-06-08 14:27:08 +03:00
|
|
|
if err != nil {
|
|
|
|
log.Warn("Stats server unreachable", "err", err)
|
|
|
|
errTimer.Reset(10 * time.Second)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// Authenticate the client with the server
|
|
|
|
if err = s.login(conn); err != nil {
|
|
|
|
log.Warn("Stats login failed", "err", err)
|
|
|
|
conn.Close()
|
|
|
|
errTimer.Reset(10 * time.Second)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
go s.readLoop(conn)
|
2020-08-10 14:33:22 +03:00
|
|
|
|
2020-06-08 14:27:08 +03:00
|
|
|
// Send the initial stats so our node looks decent from the get go
|
|
|
|
if err = s.report(conn); err != nil {
|
|
|
|
log.Warn("Initial stats report failed", "err", err)
|
2017-06-01 00:14:59 +03:00
|
|
|
conn.Close()
|
2020-06-08 14:27:08 +03:00
|
|
|
errTimer.Reset(0)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// Keep sending status updates until the connection breaks
|
|
|
|
fullReport := time.NewTicker(15 * time.Second)
|
2017-06-01 00:14:59 +03:00
|
|
|
|
2020-06-08 14:27:08 +03:00
|
|
|
for err == nil {
|
|
|
|
select {
|
|
|
|
case <-quitCh:
|
|
|
|
fullReport.Stop()
|
|
|
|
// Make sure the connection is closed
|
|
|
|
conn.Close()
|
|
|
|
return
|
|
|
|
|
|
|
|
case <-fullReport.C:
|
|
|
|
if err = s.report(conn); err != nil {
|
|
|
|
log.Warn("Full stats report failed", "err", err)
|
|
|
|
}
|
|
|
|
case list := <-s.histCh:
|
|
|
|
if err = s.reportHistory(conn, list); err != nil {
|
|
|
|
log.Warn("Requested history report failed", "err", err)
|
|
|
|
}
|
|
|
|
case head := <-headCh:
|
|
|
|
if err = s.reportBlock(conn, head); err != nil {
|
|
|
|
log.Warn("Block stats report failed", "err", err)
|
|
|
|
}
|
|
|
|
if err = s.reportPending(conn); err != nil {
|
|
|
|
log.Warn("Post-block transaction stats report failed", "err", err)
|
|
|
|
}
|
|
|
|
case <-txCh:
|
|
|
|
if err = s.reportPending(conn); err != nil {
|
|
|
|
log.Warn("Transaction stats report failed", "err", err)
|
|
|
|
}
|
2016-11-25 18:55:06 +03:00
|
|
|
}
|
|
|
|
}
|
2020-06-08 14:27:08 +03:00
|
|
|
fullReport.Stop()
|
2020-07-20 11:11:38 +03:00
|
|
|
|
|
|
|
// Close the current connection and establish a new one
|
2020-06-08 14:27:08 +03:00
|
|
|
conn.Close()
|
2020-07-20 11:11:38 +03:00
|
|
|
errTimer.Reset(0)
|
2016-11-25 18:55:06 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-12 00:53:22 +03:00
|
|
|
// readLoop loops as long as the connection is alive and retrieves data packets
|
|
|
|
// from the network socket. If any of them match an active request, it forwards
|
|
|
|
// it, if they themselves are requests it initiates a reply, and lastly it drops
|
|
|
|
// unknown packets.
|
2020-08-04 13:21:51 +03:00
|
|
|
func (s *Service) readLoop(conn *connWrapper) {
|
2021-05-26 23:33:00 +03:00
|
|
|
// If the read loop exits, close the connection
|
2016-12-12 00:53:22 +03:00
|
|
|
defer conn.Close()
|
|
|
|
|
|
|
|
for {
|
|
|
|
// Retrieve the next generic network packet and bail out on error
|
2020-07-20 11:11:38 +03:00
|
|
|
var blob json.RawMessage
|
|
|
|
if err := conn.ReadJSON(&blob); err != nil {
|
|
|
|
log.Warn("Failed to retrieve stats server message", "err", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
// If the network packet is a system ping, respond to it directly
|
|
|
|
var ping string
|
|
|
|
if err := json.Unmarshal(blob, &ping); err == nil && strings.HasPrefix(ping, "primus::ping::") {
|
2022-05-09 13:13:23 +03:00
|
|
|
if err := conn.WriteJSON(strings.ReplaceAll(ping, "ping", "pong")); err != nil {
|
2020-07-20 11:11:38 +03:00
|
|
|
log.Warn("Failed to respond to system ping message", "err", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// Not a system ping, try to decode an actual state message
|
2016-12-12 00:53:22 +03:00
|
|
|
var msg map[string][]interface{}
|
2020-07-20 11:11:38 +03:00
|
|
|
if err := json.Unmarshal(blob, &msg); err != nil {
|
2017-03-03 12:41:52 +03:00
|
|
|
log.Warn("Failed to decode stats server message", "err", err)
|
2016-12-12 00:53:22 +03:00
|
|
|
return
|
|
|
|
}
|
2017-03-24 12:42:40 +03:00
|
|
|
log.Trace("Received message from stats server", "msg", msg)
|
2016-12-12 00:53:22 +03:00
|
|
|
if len(msg["emit"]) == 0 {
|
2017-03-03 12:41:52 +03:00
|
|
|
log.Warn("Stats server sent non-broadcast", "msg", msg)
|
2016-12-12 00:53:22 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
command, ok := msg["emit"][0].(string)
|
|
|
|
if !ok {
|
2017-03-03 12:41:52 +03:00
|
|
|
log.Warn("Invalid stats server message type", "type", msg["emit"][0])
|
2016-12-12 00:53:22 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
// If the message is a ping reply, deliver (someone must be listening!)
|
|
|
|
if len(msg["emit"]) == 2 && command == "node-pong" {
|
|
|
|
select {
|
|
|
|
case s.pongCh <- struct{}{}:
|
|
|
|
// Pong delivered, continue listening
|
|
|
|
continue
|
|
|
|
default:
|
|
|
|
// Ping routine dead, abort
|
2017-03-03 12:41:52 +03:00
|
|
|
log.Warn("Stats server pinger seems to have died")
|
2016-12-12 00:53:22 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// If the message is a history request, forward to the event processor
|
|
|
|
if len(msg["emit"]) == 2 && command == "history" {
|
|
|
|
// Make sure the request is valid and doesn't crash us
|
|
|
|
request, ok := msg["emit"][1].(map[string]interface{})
|
|
|
|
if !ok {
|
2017-03-20 13:02:39 +03:00
|
|
|
log.Warn("Invalid stats history request", "msg", msg["emit"][1])
|
2020-05-26 13:09:00 +03:00
|
|
|
select {
|
|
|
|
case s.histCh <- nil: // Treat it as an no indexes request
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
continue
|
2016-12-12 00:53:22 +03:00
|
|
|
}
|
|
|
|
list, ok := request["list"].([]interface{})
|
|
|
|
if !ok {
|
2017-03-20 13:02:39 +03:00
|
|
|
log.Warn("Invalid stats history block list", "list", request["list"])
|
2016-12-12 00:53:22 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
// Convert the block number list to an integer list
|
|
|
|
numbers := make([]uint64, len(list))
|
|
|
|
for i, num := range list {
|
|
|
|
n, ok := num.(float64)
|
|
|
|
if !ok {
|
2017-03-20 13:02:39 +03:00
|
|
|
log.Warn("Invalid stats history block number", "number", num)
|
2016-12-12 00:53:22 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
numbers[i] = uint64(n)
|
|
|
|
}
|
|
|
|
select {
|
|
|
|
case s.histCh <- numbers:
|
|
|
|
continue
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Report anything else and continue
|
2017-03-03 12:41:52 +03:00
|
|
|
log.Info("Unknown stats message", "msg", msg)
|
2016-12-12 00:53:22 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-26 13:09:00 +03:00
|
|
|
// nodeInfo is the collection of meta information about a node that is displayed
|
2016-11-25 18:55:06 +03:00
|
|
|
// on the monitoring page.
|
|
|
|
type nodeInfo struct {
|
|
|
|
Name string `json:"name"`
|
|
|
|
Node string `json:"node"`
|
|
|
|
Port int `json:"port"`
|
|
|
|
Network string `json:"net"`
|
|
|
|
Protocol string `json:"protocol"`
|
|
|
|
API string `json:"api"`
|
|
|
|
Os string `json:"os"`
|
|
|
|
OsVer string `json:"os_v"`
|
|
|
|
Client string `json:"client"`
|
2016-12-12 00:53:22 +03:00
|
|
|
History bool `json:"canUpdateHistory"`
|
2016-11-25 18:55:06 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// authMsg is the authentication infos needed to login to a monitoring server.
|
|
|
|
type authMsg struct {
|
2018-05-30 11:36:02 +03:00
|
|
|
ID string `json:"id"`
|
2016-11-25 18:55:06 +03:00
|
|
|
Info nodeInfo `json:"info"`
|
|
|
|
Secret string `json:"secret"`
|
|
|
|
}
|
|
|
|
|
|
|
|
// login tries to authorize the client at the remote server.
|
2020-08-04 13:21:51 +03:00
|
|
|
func (s *Service) login(conn *connWrapper) error {
|
2016-11-25 18:55:06 +03:00
|
|
|
// Construct and send the login authentication
|
|
|
|
infos := s.server.NodeInfo()
|
|
|
|
|
2020-12-14 12:27:15 +03:00
|
|
|
var protocols []string
|
|
|
|
for _, proto := range s.server.Protocols {
|
|
|
|
protocols = append(protocols, fmt.Sprintf("%s/%d", proto.Name, proto.Version))
|
|
|
|
}
|
|
|
|
var network string
|
2016-11-25 18:55:06 +03:00
|
|
|
if info := infos.Protocols["eth"]; info != nil {
|
2020-12-14 12:27:15 +03:00
|
|
|
network = fmt.Sprintf("%d", info.(*ethproto.NodeInfo).Network)
|
2016-11-25 18:55:06 +03:00
|
|
|
} else {
|
2023-11-23 17:28:26 +03:00
|
|
|
return errors.New("no eth protocol available")
|
2016-11-25 18:55:06 +03:00
|
|
|
}
|
|
|
|
auth := &authMsg{
|
2018-05-30 11:36:02 +03:00
|
|
|
ID: s.node,
|
2016-11-25 18:55:06 +03:00
|
|
|
Info: nodeInfo{
|
|
|
|
Name: s.node,
|
|
|
|
Node: infos.Name,
|
|
|
|
Port: infos.Ports.Listener,
|
|
|
|
Network: network,
|
2020-12-14 12:27:15 +03:00
|
|
|
Protocol: strings.Join(protocols, ", "),
|
2016-11-25 18:55:06 +03:00
|
|
|
API: "No",
|
|
|
|
Os: runtime.GOOS,
|
|
|
|
OsVer: runtime.GOARCH,
|
|
|
|
Client: "0.1.1",
|
2016-12-12 00:53:22 +03:00
|
|
|
History: true,
|
2016-11-25 18:55:06 +03:00
|
|
|
},
|
|
|
|
Secret: s.pass,
|
|
|
|
}
|
|
|
|
login := map[string][]interface{}{
|
2017-01-06 17:52:03 +03:00
|
|
|
"emit": {"hello", auth},
|
2016-11-25 18:55:06 +03:00
|
|
|
}
|
2019-07-22 13:22:39 +03:00
|
|
|
if err := conn.WriteJSON(login); err != nil {
|
2016-11-25 18:55:06 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
// Retrieve the remote ack or connection termination
|
|
|
|
var ack map[string][]string
|
2019-07-22 13:22:39 +03:00
|
|
|
if err := conn.ReadJSON(&ack); err != nil || len(ack["emit"]) != 1 || ack["emit"][0] != "ready" {
|
2016-11-25 18:55:06 +03:00
|
|
|
return errors.New("unauthorized")
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// report collects all possible data to report and send it to the stats server.
|
|
|
|
// This should only be used on reconnects or rarely to avoid overloading the
|
|
|
|
// server. Use the individual methods for reporting subscribed events.
|
2020-08-04 13:21:51 +03:00
|
|
|
func (s *Service) report(conn *connWrapper) error {
|
2017-03-24 12:42:40 +03:00
|
|
|
if err := s.reportLatency(conn); err != nil {
|
2016-11-25 18:55:06 +03:00
|
|
|
return err
|
|
|
|
}
|
2017-03-24 12:42:40 +03:00
|
|
|
if err := s.reportBlock(conn, nil); err != nil {
|
2016-11-25 18:55:06 +03:00
|
|
|
return err
|
|
|
|
}
|
2017-03-24 12:42:40 +03:00
|
|
|
if err := s.reportPending(conn); err != nil {
|
2016-11-25 18:55:06 +03:00
|
|
|
return err
|
|
|
|
}
|
2017-03-24 12:42:40 +03:00
|
|
|
if err := s.reportStats(conn); err != nil {
|
2016-11-25 18:55:06 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// reportLatency sends a ping request to the server, measures the RTT time and
|
|
|
|
// finally sends a latency update.
|
2020-08-04 13:21:51 +03:00
|
|
|
func (s *Service) reportLatency(conn *connWrapper) error {
|
2016-11-25 18:55:06 +03:00
|
|
|
// Send the current time to the ethstats server
|
|
|
|
start := time.Now()
|
|
|
|
|
|
|
|
ping := map[string][]interface{}{
|
2017-01-06 17:52:03 +03:00
|
|
|
"emit": {"node-ping", map[string]string{
|
2016-11-25 18:55:06 +03:00
|
|
|
"id": s.node,
|
|
|
|
"clientTime": start.String(),
|
|
|
|
}},
|
|
|
|
}
|
2019-07-22 13:22:39 +03:00
|
|
|
if err := conn.WriteJSON(ping); err != nil {
|
2016-11-25 18:55:06 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
// Wait for the pong request to arrive back
|
2024-04-09 09:51:54 +03:00
|
|
|
timer := time.NewTimer(5 * time.Second)
|
|
|
|
defer timer.Stop()
|
|
|
|
|
2016-12-12 00:53:22 +03:00
|
|
|
select {
|
|
|
|
case <-s.pongCh:
|
|
|
|
// Pong delivered, report the latency
|
2024-04-09 09:51:54 +03:00
|
|
|
case <-timer.C:
|
2016-12-12 00:53:22 +03:00
|
|
|
// Ping timeout, abort
|
|
|
|
return errors.New("ping timed out")
|
2016-11-25 18:55:06 +03:00
|
|
|
}
|
2017-03-24 12:42:40 +03:00
|
|
|
latency := strconv.Itoa(int((time.Since(start) / time.Duration(2)).Nanoseconds() / 1000000))
|
|
|
|
|
2016-11-25 18:55:06 +03:00
|
|
|
// Send back the measured latency
|
2017-03-24 12:42:40 +03:00
|
|
|
log.Trace("Sending measured latency to ethstats", "latency", latency)
|
|
|
|
|
|
|
|
stats := map[string][]interface{}{
|
2017-01-06 17:52:03 +03:00
|
|
|
"emit": {"latency", map[string]string{
|
2016-11-25 18:55:06 +03:00
|
|
|
"id": s.node,
|
2017-03-24 12:42:40 +03:00
|
|
|
"latency": latency,
|
2016-11-25 18:55:06 +03:00
|
|
|
}},
|
|
|
|
}
|
2019-07-22 13:22:39 +03:00
|
|
|
return conn.WriteJSON(stats)
|
2016-11-25 18:55:06 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// blockStats is the information to report about individual blocks.
|
|
|
|
type blockStats struct {
|
2017-03-24 12:42:40 +03:00
|
|
|
Number *big.Int `json:"number"`
|
|
|
|
Hash common.Hash `json:"hash"`
|
|
|
|
ParentHash common.Hash `json:"parentHash"`
|
|
|
|
Timestamp *big.Int `json:"timestamp"`
|
|
|
|
Miner common.Address `json:"miner"`
|
2017-11-13 14:47:27 +03:00
|
|
|
GasUsed uint64 `json:"gasUsed"`
|
|
|
|
GasLimit uint64 `json:"gasLimit"`
|
2017-03-24 12:42:40 +03:00
|
|
|
Diff string `json:"difficulty"`
|
|
|
|
TotalDiff string `json:"totalDifficulty"`
|
2017-05-30 02:15:40 +03:00
|
|
|
Txs []txStats `json:"transactions"`
|
2017-03-24 12:42:40 +03:00
|
|
|
TxHash common.Hash `json:"transactionsRoot"`
|
|
|
|
Root common.Hash `json:"stateRoot"`
|
|
|
|
Uncles uncleStats `json:"uncles"`
|
2016-11-25 18:55:06 +03:00
|
|
|
}
|
|
|
|
|
2017-05-30 02:15:40 +03:00
|
|
|
// txStats is the information to report about individual transactions.
|
|
|
|
type txStats struct {
|
|
|
|
Hash common.Hash `json:"hash"`
|
2016-11-25 18:55:06 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// uncleStats is a custom wrapper around an uncle array to force serializing
|
|
|
|
// empty arrays instead of returning null for them.
|
|
|
|
type uncleStats []*types.Header
|
|
|
|
|
|
|
|
func (s uncleStats) MarshalJSON() ([]byte, error) {
|
|
|
|
if uncles := ([]*types.Header)(s); len(uncles) > 0 {
|
|
|
|
return json.Marshal(uncles)
|
|
|
|
}
|
|
|
|
return []byte("[]"), nil
|
|
|
|
}
|
|
|
|
|
2018-06-29 15:15:38 +03:00
|
|
|
// reportBlock retrieves the current chain head and reports it to the stats server.
|
2020-08-04 13:21:51 +03:00
|
|
|
func (s *Service) reportBlock(conn *connWrapper, block *types.Block) error {
|
2017-03-24 12:42:40 +03:00
|
|
|
// Gather the block details from the header or block chain
|
|
|
|
details := s.assembleBlockStats(block)
|
|
|
|
|
2024-02-19 09:25:53 +03:00
|
|
|
// Short circuit if the block detail is not available.
|
|
|
|
if details == nil {
|
|
|
|
return nil
|
|
|
|
}
|
2017-03-24 12:42:40 +03:00
|
|
|
// Assemble the block report and send it to the server
|
|
|
|
log.Trace("Sending new block to ethstats", "number", details.Number, "hash", details.Hash)
|
|
|
|
|
2016-12-12 00:53:22 +03:00
|
|
|
stats := map[string]interface{}{
|
|
|
|
"id": s.node,
|
2017-03-24 12:42:40 +03:00
|
|
|
"block": details,
|
2016-12-12 00:53:22 +03:00
|
|
|
}
|
|
|
|
report := map[string][]interface{}{
|
2017-01-06 17:52:03 +03:00
|
|
|
"emit": {"block", stats},
|
2016-12-12 00:53:22 +03:00
|
|
|
}
|
2019-07-22 13:22:39 +03:00
|
|
|
return conn.WriteJSON(report)
|
2016-12-12 00:53:22 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// assembleBlockStats retrieves any required metadata to report a single block
|
|
|
|
// and assembles the block stats. If block is nil, the current head is processed.
|
|
|
|
func (s *Service) assembleBlockStats(block *types.Block) *blockStats {
|
|
|
|
// Gather the block infos from the local blockchain
|
2016-11-25 18:55:06 +03:00
|
|
|
var (
|
2016-12-12 00:53:22 +03:00
|
|
|
header *types.Header
|
2016-11-25 18:55:06 +03:00
|
|
|
td *big.Int
|
2017-05-30 02:15:40 +03:00
|
|
|
txs []txStats
|
2016-11-25 18:55:06 +03:00
|
|
|
uncles []*types.Header
|
|
|
|
)
|
2020-08-03 20:40:46 +03:00
|
|
|
|
|
|
|
// check if backend is a full node
|
|
|
|
fullBackend, ok := s.backend.(fullNodeBackend)
|
|
|
|
if ok {
|
2024-02-19 09:25:53 +03:00
|
|
|
// Retrieve current chain head if no block is given.
|
2016-11-29 14:54:54 +03:00
|
|
|
if block == nil {
|
2023-10-23 16:06:05 +03:00
|
|
|
head := fullBackend.CurrentBlock()
|
|
|
|
block, _ = fullBackend.BlockByNumber(context.Background(), rpc.BlockNumber(head.Number.Uint64()))
|
2016-11-29 14:54:54 +03:00
|
|
|
}
|
2024-02-19 09:25:53 +03:00
|
|
|
// Short circuit if no block is available. It might happen when
|
|
|
|
// the blockchain is reorging.
|
|
|
|
if block == nil {
|
|
|
|
return nil
|
|
|
|
}
|
2016-12-12 00:53:22 +03:00
|
|
|
header = block.Header()
|
2020-08-03 20:40:46 +03:00
|
|
|
td = fullBackend.GetTd(context.Background(), header.Hash())
|
2016-11-25 18:55:06 +03:00
|
|
|
|
2017-05-30 02:15:40 +03:00
|
|
|
txs = make([]txStats, len(block.Transactions()))
|
|
|
|
for i, tx := range block.Transactions() {
|
|
|
|
txs[i].Hash = tx.Hash()
|
|
|
|
}
|
2016-11-25 18:55:06 +03:00
|
|
|
uncles = block.Uncles()
|
|
|
|
} else {
|
|
|
|
// Light nodes would need on-demand lookups for transactions/uncles, skip
|
2016-11-29 14:54:54 +03:00
|
|
|
if block != nil {
|
2016-12-12 00:53:22 +03:00
|
|
|
header = block.Header()
|
|
|
|
} else {
|
2020-08-03 20:40:46 +03:00
|
|
|
header = s.backend.CurrentHeader()
|
2016-12-12 00:53:22 +03:00
|
|
|
}
|
2020-08-03 20:40:46 +03:00
|
|
|
td = s.backend.GetTd(context.Background(), header.Hash())
|
2017-05-30 02:15:40 +03:00
|
|
|
txs = []txStats{}
|
2016-12-12 00:53:22 +03:00
|
|
|
}
|
2020-08-03 20:40:46 +03:00
|
|
|
|
2016-12-12 00:53:22 +03:00
|
|
|
// Assemble and return the block stats
|
2017-04-12 16:38:31 +03:00
|
|
|
author, _ := s.engine.Author(header)
|
|
|
|
|
2016-12-12 00:53:22 +03:00
|
|
|
return &blockStats{
|
2017-03-24 12:42:40 +03:00
|
|
|
Number: header.Number,
|
|
|
|
Hash: header.Hash(),
|
|
|
|
ParentHash: header.ParentHash,
|
2019-04-02 23:28:48 +03:00
|
|
|
Timestamp: new(big.Int).SetUint64(header.Time),
|
2017-04-12 16:38:31 +03:00
|
|
|
Miner: author,
|
2017-11-13 14:47:27 +03:00
|
|
|
GasUsed: header.GasUsed,
|
|
|
|
GasLimit: header.GasLimit,
|
2017-03-24 12:42:40 +03:00
|
|
|
Diff: header.Difficulty.String(),
|
|
|
|
TotalDiff: td.String(),
|
|
|
|
Txs: txs,
|
|
|
|
TxHash: header.TxHash,
|
|
|
|
Root: header.Root,
|
|
|
|
Uncles: uncles,
|
2016-12-12 00:53:22 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// reportHistory retrieves the most recent batch of blocks and reports it to the
|
|
|
|
// stats server.
|
2020-08-04 13:21:51 +03:00
|
|
|
func (s *Service) reportHistory(conn *connWrapper, list []uint64) error {
|
2016-12-12 00:53:22 +03:00
|
|
|
// Figure out the indexes that need reporting
|
|
|
|
indexes := make([]uint64, 0, historyUpdateRange)
|
|
|
|
if len(list) > 0 {
|
|
|
|
// Specific indexes requested, send them back in particular
|
2017-01-06 18:44:20 +03:00
|
|
|
indexes = append(indexes, list...)
|
2016-12-12 00:53:22 +03:00
|
|
|
} else {
|
|
|
|
// No indexes requested, send back the top ones
|
2020-08-03 20:40:46 +03:00
|
|
|
head := s.backend.CurrentHeader().Number.Int64()
|
2017-03-24 12:42:40 +03:00
|
|
|
start := head - historyUpdateRange + 1
|
2016-12-12 00:53:22 +03:00
|
|
|
if start < 0 {
|
|
|
|
start = 0
|
|
|
|
}
|
2017-03-24 12:42:40 +03:00
|
|
|
for i := uint64(start); i <= uint64(head); i++ {
|
2016-12-12 00:53:22 +03:00
|
|
|
indexes = append(indexes, i)
|
|
|
|
}
|
2016-11-25 18:55:06 +03:00
|
|
|
}
|
2016-12-12 00:53:22 +03:00
|
|
|
// Gather the batch of blocks to report
|
|
|
|
history := make([]*blockStats, len(indexes))
|
|
|
|
for i, number := range indexes {
|
2020-08-03 20:40:46 +03:00
|
|
|
fullBackend, ok := s.backend.(fullNodeBackend)
|
2017-04-04 11:41:17 +03:00
|
|
|
// Retrieve the next block if it's known to us
|
2017-03-31 15:06:54 +03:00
|
|
|
var block *types.Block
|
2020-08-03 20:40:46 +03:00
|
|
|
if ok {
|
|
|
|
block, _ = fullBackend.BlockByNumber(context.Background(), rpc.BlockNumber(number)) // TODO ignore error here ?
|
2016-12-12 00:53:22 +03:00
|
|
|
} else {
|
2020-08-03 20:40:46 +03:00
|
|
|
if header, _ := s.backend.HeaderByNumber(context.Background(), rpc.BlockNumber(number)); header != nil {
|
2017-03-31 15:06:54 +03:00
|
|
|
block = types.NewBlockWithHeader(header)
|
|
|
|
}
|
|
|
|
}
|
2017-04-04 11:41:17 +03:00
|
|
|
// If we do have the block, add to the history and continue
|
2017-03-31 15:06:54 +03:00
|
|
|
if block != nil {
|
|
|
|
history[len(history)-1-i] = s.assembleBlockStats(block)
|
2017-04-04 11:41:17 +03:00
|
|
|
continue
|
2016-12-12 00:53:22 +03:00
|
|
|
}
|
2017-04-04 11:41:17 +03:00
|
|
|
// Ran out of blocks, cut the report short and send
|
|
|
|
history = history[len(history)-i:]
|
2018-01-12 19:01:41 +03:00
|
|
|
break
|
2016-12-12 00:53:22 +03:00
|
|
|
}
|
|
|
|
// Assemble the history report and send it to the server
|
2017-04-04 11:41:17 +03:00
|
|
|
if len(history) > 0 {
|
|
|
|
log.Trace("Sending historical blocks to ethstats", "first", history[0].Number, "last", history[len(history)-1].Number)
|
|
|
|
} else {
|
|
|
|
log.Trace("No history to send to stats server")
|
|
|
|
}
|
2016-11-25 18:55:06 +03:00
|
|
|
stats := map[string]interface{}{
|
2016-12-12 00:53:22 +03:00
|
|
|
"id": s.node,
|
|
|
|
"history": history,
|
2016-11-25 18:55:06 +03:00
|
|
|
}
|
|
|
|
report := map[string][]interface{}{
|
2017-01-06 17:52:03 +03:00
|
|
|
"emit": {"history", stats},
|
2016-11-25 18:55:06 +03:00
|
|
|
}
|
2019-07-22 13:22:39 +03:00
|
|
|
return conn.WriteJSON(report)
|
2016-11-25 18:55:06 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// pendStats is the information to report about pending transactions.
|
|
|
|
type pendStats struct {
|
|
|
|
Pending int `json:"pending"`
|
|
|
|
}
|
|
|
|
|
|
|
|
// reportPending retrieves the current number of pending transactions and reports
|
|
|
|
// it to the stats server.
|
2020-08-04 13:21:51 +03:00
|
|
|
func (s *Service) reportPending(conn *connWrapper) error {
|
2016-11-25 18:55:06 +03:00
|
|
|
// Retrieve the pending count from the local blockchain
|
2020-08-03 20:40:46 +03:00
|
|
|
pending, _ := s.backend.Stats()
|
2016-11-25 18:55:06 +03:00
|
|
|
// Assemble the transaction stats and send it to the server
|
2017-03-24 12:42:40 +03:00
|
|
|
log.Trace("Sending pending transactions to ethstats", "count", pending)
|
|
|
|
|
2016-11-25 18:55:06 +03:00
|
|
|
stats := map[string]interface{}{
|
|
|
|
"id": s.node,
|
|
|
|
"stats": &pendStats{
|
|
|
|
Pending: pending,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
report := map[string][]interface{}{
|
2017-01-06 17:52:03 +03:00
|
|
|
"emit": {"pending", stats},
|
2016-11-25 18:55:06 +03:00
|
|
|
}
|
2019-07-22 13:22:39 +03:00
|
|
|
return conn.WriteJSON(report)
|
2016-11-25 18:55:06 +03:00
|
|
|
}
|
|
|
|
|
2017-03-24 12:42:40 +03:00
|
|
|
// nodeStats is the information to report about the local node.
|
2016-11-25 18:55:06 +03:00
|
|
|
type nodeStats struct {
|
|
|
|
Active bool `json:"active"`
|
|
|
|
Syncing bool `json:"syncing"`
|
|
|
|
Peers int `json:"peers"`
|
|
|
|
GasPrice int `json:"gasPrice"`
|
|
|
|
Uptime int `json:"uptime"`
|
|
|
|
}
|
|
|
|
|
2024-03-06 15:45:03 +03:00
|
|
|
// reportStats retrieves various stats about the node at the networking layer
|
|
|
|
// and reports it to the stats server.
|
2020-08-04 13:21:51 +03:00
|
|
|
func (s *Service) reportStats(conn *connWrapper) error {
|
2024-03-06 15:45:03 +03:00
|
|
|
// Gather the syncing infos from the local miner instance
|
2016-11-25 18:55:06 +03:00
|
|
|
var (
|
|
|
|
syncing bool
|
|
|
|
gasprice int
|
|
|
|
)
|
2020-08-03 20:40:46 +03:00
|
|
|
// check if backend is a full node
|
2023-10-23 16:06:05 +03:00
|
|
|
if fullBackend, ok := s.backend.(fullNodeBackend); ok {
|
2021-09-10 10:55:48 +03:00
|
|
|
sync := fullBackend.SyncProgress()
|
2024-01-22 23:05:18 +03:00
|
|
|
syncing = !sync.Done()
|
2016-11-25 18:55:06 +03:00
|
|
|
|
2021-07-05 10:49:52 +03:00
|
|
|
price, _ := fullBackend.SuggestGasTipCap(context.Background())
|
2017-05-16 22:07:27 +03:00
|
|
|
gasprice = int(price.Uint64())
|
2021-07-05 10:49:52 +03:00
|
|
|
if basefee := fullBackend.CurrentHeader().BaseFee; basefee != nil {
|
|
|
|
gasprice += int(basefee.Uint64())
|
|
|
|
}
|
2016-11-25 18:55:06 +03:00
|
|
|
} else {
|
2021-09-10 10:55:48 +03:00
|
|
|
sync := s.backend.SyncProgress()
|
2024-01-22 23:05:18 +03:00
|
|
|
syncing = !sync.Done()
|
2016-11-25 18:55:06 +03:00
|
|
|
}
|
2017-03-24 12:42:40 +03:00
|
|
|
// Assemble the node stats and send it to the server
|
|
|
|
log.Trace("Sending node details to ethstats")
|
|
|
|
|
2016-11-25 18:55:06 +03:00
|
|
|
stats := map[string]interface{}{
|
|
|
|
"id": s.node,
|
|
|
|
"stats": &nodeStats{
|
|
|
|
Active: true,
|
|
|
|
Peers: s.server.PeerCount(),
|
|
|
|
GasPrice: gasprice,
|
|
|
|
Syncing: syncing,
|
|
|
|
Uptime: 100,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
report := map[string][]interface{}{
|
2017-01-06 17:52:03 +03:00
|
|
|
"emit": {"stats", stats},
|
2016-11-25 18:55:06 +03:00
|
|
|
}
|
2019-07-22 13:22:39 +03:00
|
|
|
return conn.WriteJSON(report)
|
2016-11-25 18:55:06 +03:00
|
|
|
}
|