cmd, dashboard, log: log collection and exploration (#17097)

* cmd, dashboard, internal, log, node: logging feature

* cmd, dashboard, internal, log: requested changes

* dashboard, vendor: gofmt, govendor, use vendored file watcher

* dashboard, log: gofmt -s -w, goimports

* dashboard, log: gosimple
This commit is contained in:
Kurkó Mihály 2018-07-11 10:59:04 +03:00 committed by Péter Szilágyi
parent 2eedbe799f
commit a9835c1816
28 changed files with 11444 additions and 8211 deletions

@ -199,7 +199,12 @@ func init() {
app.Before = func(ctx *cli.Context) error {
runtime.GOMAXPROCS(runtime.NumCPU())
if err := debug.Setup(ctx); err != nil {
logdir := ""
if ctx.GlobalBool(utils.DashboardEnabledFlag.Name) {
logdir = (&node.Config{DataDir: utils.MakeDataDir(ctx)}).ResolvePath("logs")
}
if err := debug.Setup(ctx, logdir); err != nil {
return err
}
// Cap the cache allowance and tune the garbage collector

@ -432,7 +432,7 @@ pv(1) tool to get a progress bar:
app.Flags = append(app.Flags, swarmmetrics.Flags...)
app.Before = func(ctx *cli.Context) error {
runtime.GOMAXPROCS(runtime.NumCPU())
if err := debug.Setup(ctx); err != nil {
if err := debug.Setup(ctx, ""); err != nil {
return err
}
swarmmetrics.Setup(ctx)

@ -193,7 +193,7 @@ var (
}
// Dashboard settings
DashboardEnabledFlag = cli.BoolFlag{
Name: "dashboard",
Name: metrics.DashboardEnabledFlag,
Usage: "Enable the dashboard",
}
DashboardAddrFlag = cli.StringFlag{
@ -1185,7 +1185,7 @@ func RegisterEthService(stack *node.Node, cfg *eth.Config) {
// RegisterDashboardService adds a dashboard to the stack.
func RegisterDashboardService(stack *node.Node, cfg *dashboard.Config, commit string) {
stack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
return dashboard.New(cfg, commit)
return dashboard.New(cfg, commit, ctx.ResolvePath("logs")), nil
})
}

File diff suppressed because one or more lines are too long

@ -68,4 +68,4 @@ export const styles = {
light: {
color: 'rgba(255, 255, 255, 0.54)',
},
}
};

@ -32,11 +32,12 @@ const styles = {
};
export type Props = {
opened: boolean,
opened: boolean,
changeContent: string => void,
active: string,
content: Content,
shouldUpdate: Object,
active: string,
content: Content,
shouldUpdate: Object,
send: string => void,
};
// Body renders the body of the dashboard.
@ -52,6 +53,7 @@ class Body extends Component<Props> {
active={this.props.active}
content={this.props.content}
shouldUpdate={this.props.shouldUpdate}
send={this.props.send}
/>
</div>
);

@ -85,7 +85,7 @@ export type Props = {
class CustomTooltip extends Component<Props> {
render() {
const {active, payload, tooltip} = this.props;
if (!active || typeof tooltip !== 'function') {
if (!active || typeof tooltip !== 'function' || !Array.isArray(payload) || payload.length < 1) {
return null;
}
return tooltip(payload[0].value);

@ -24,6 +24,7 @@ import Header from './Header';
import Body from './Body';
import {MENU} from '../common';
import type {Content} from '../types/content';
import {inserter as logInserter} from './Logs';
// deepUpdate updates an object corresponding to the given update data, which has
// the shape of the same structure as the original object. updater also has the same
@ -75,8 +76,11 @@ const appender = <T>(limit: number, mapper = replacer) => (update: Array<T>, pre
...update.map(sample => mapper(sample)),
].slice(-limit);
// defaultContent is the initial value of the state content.
const defaultContent: Content = {
// defaultContent returns the initial value of the state content. Needs to be a function in order to
// instantiate the object again, because it is used by the state, and isn't automatically cleaned
// when a new connection is established. The state is mutated during the update in order to avoid
// the execution of unnecessary operations (e.g. copy of the log array).
const defaultContent: () => Content = () => ({
general: {
version: null,
commit: null,
@ -95,10 +99,14 @@ const defaultContent: Content = {
diskRead: [],
diskWrite: [],
},
logs: {
log: [],
logs: {
chunks: [],
endTop: false,
endBottom: true,
topChanged: 0,
bottomChanged: 0,
},
};
});
// updaters contains the state updater functions for each path of the state.
//
@ -122,9 +130,7 @@ const updaters = {
diskRead: appender(200),
diskWrite: appender(200),
},
logs: {
log: appender(200),
},
logs: logInserter(5),
};
// styles contains the constant styles of the component.
@ -151,10 +157,11 @@ export type Props = {
};
type State = {
active: string, // active menu
sideBar: boolean, // true if the sidebar is opened
content: Content, // the visualized data
shouldUpdate: Object, // labels for the components, which need to re-render based on the incoming message
active: string, // active menu
sideBar: boolean, // true if the sidebar is opened
content: Content, // the visualized data
shouldUpdate: Object, // labels for the components, which need to re-render based on the incoming message
server: ?WebSocket,
};
// Dashboard is the main component, which renders the whole page, makes connection with the server and
@ -165,8 +172,9 @@ class Dashboard extends Component<Props, State> {
this.state = {
active: MENU.get('home').id,
sideBar: true,
content: defaultContent,
content: defaultContent(),
shouldUpdate: {},
server: null,
};
}
@ -181,7 +189,7 @@ class Dashboard extends Component<Props, State> {
// PROD is defined by webpack.
const server = new WebSocket(`${((window.location.protocol === 'https:') ? 'wss://' : 'ws://')}${PROD ? window.location.host : 'localhost:8080'}/api`);
server.onopen = () => {
this.setState({content: defaultContent, shouldUpdate: {}});
this.setState({content: defaultContent(), shouldUpdate: {}, server});
};
server.onmessage = (event) => {
const msg: $Shape<Content> = JSON.parse(event.data);
@ -192,10 +200,18 @@ class Dashboard extends Component<Props, State> {
this.update(msg);
};
server.onclose = () => {
this.setState({server: null});
setTimeout(this.reconnect, 3000);
};
};
// send sends a message to the server, which can be accessed only through this function for safety reasons.
send = (msg: string) => {
if (this.state.server != null) {
this.state.server.send(msg);
}
};
// update updates the content corresponding to the incoming message.
update = (msg: $Shape<Content>) => {
this.setState(prevState => ({
@ -226,6 +242,7 @@ class Dashboard extends Component<Props, State> {
active={this.state.active}
content={this.state.content}
shouldUpdate={this.state.shouldUpdate}
send={this.send}
/>
</div>
);

@ -0,0 +1,310 @@
// @flow
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
import React, {Component} from 'react';
import List, {ListItem} from 'material-ui/List';
import type {Record, Content, LogsMessage, Logs as LogsType} from '../types/content';
// requestBand says how wide is the top/bottom zone, eg. 0.1 means 10% of the container height.
const requestBand = 0.05;
// fieldPadding is a global map with maximum field value lengths seen until now
// to allow padding log contexts in a bit smarter way.
const fieldPadding = new Map();
// createChunk creates an HTML formatted object, which displays the given array similarly to
// the server side terminal.
const createChunk = (records: Array<Record>) => {
let content = '';
records.forEach((record) => {
const {t, ctx} = record;
let {lvl, msg} = record;
let color = '#ce3c23';
switch (lvl) {
case 'trace':
case 'trce':
lvl = 'TRACE';
color = '#3465a4';
break;
case 'debug':
case 'dbug':
lvl = 'DEBUG';
color = '#3d989b';
break;
case 'info':
lvl = 'INFO&nbsp;';
color = '#4c8f0f';
break;
case 'warn':
lvl = 'WARN&nbsp;';
color = '#b79a22';
break;
case 'error':
case 'eror':
lvl = 'ERROR';
color = '#754b70';
break;
case 'crit':
lvl = 'CRIT&nbsp;';
color = '#ce3c23';
break;
default:
lvl = '';
}
const time = new Date(t);
if (lvl === '' || !(time instanceof Date) || isNaN(time) || typeof msg !== 'string' || !Array.isArray(ctx)) {
content += '<span style="color:#ce3c23">Invalid log record</span><br />';
return;
}
if (ctx.length > 0) {
msg += '&nbsp;'.repeat(Math.max(40 - msg.length, 0));
}
const month = `0${time.getMonth() + 1}`.slice(-2);
const date = `0${time.getDate()}`.slice(-2);
const hours = `0${time.getHours()}`.slice(-2);
const minutes = `0${time.getMinutes()}`.slice(-2);
const seconds = `0${time.getSeconds()}`.slice(-2);
content += `<span style="color:${color}">${lvl}</span>[${month}-${date}|${hours}:${minutes}:${seconds}] ${msg}`;
for (let i = 0; i < ctx.length; i += 2) {
const key = ctx[i];
const val = ctx[i + 1];
let padding = fieldPadding.get(key);
if (typeof padding !== 'number' || padding < val.length) {
padding = val.length;
fieldPadding.set(key, padding);
}
let p = '';
if (i < ctx.length - 2) {
p = '&nbsp;'.repeat(padding - val.length);
}
content += ` <span style="color:${color}">${key}</span>=${val}${p}`;
}
content += '<br />';
});
return content;
};
// inserter is a state updater function for the main component, which inserts the new log chunk into the chunk array.
// limit is the maximum length of the chunk array, used in order to prevent the browser from OOM.
export const inserter = (limit: number) => (update: LogsMessage, prev: LogsType) => {
prev.topChanged = 0;
prev.bottomChanged = 0;
if (!Array.isArray(update.chunk) || update.chunk.length < 1) {
return prev;
}
if (!Array.isArray(prev.chunks)) {
prev.chunks = [];
}
const content = createChunk(update.chunk);
if (!update.source) {
// In case of stream chunk.
if (!prev.endBottom) {
return prev;
}
if (prev.chunks.length < 1) {
// This should never happen, because the first chunk is always a non-stream chunk.
return [{content, name: '00000000000000.log'}];
}
prev.chunks[prev.chunks.length - 1].content += content;
prev.bottomChanged = 1;
return prev;
}
const chunk = {
content,
name: update.source.name,
};
if (prev.chunks.length > 0 && update.source.name < prev.chunks[0].name) {
if (update.source.last) {
prev.endTop = true;
}
if (prev.chunks.length >= limit) {
prev.endBottom = false;
prev.chunks.splice(limit - 1, prev.chunks.length - limit + 1);
prev.bottomChanged = -1;
}
prev.chunks = [chunk, ...prev.chunks];
prev.topChanged = 1;
return prev;
}
if (update.source.last) {
prev.endBottom = true;
}
if (prev.chunks.length >= limit) {
prev.endTop = false;
prev.chunks.splice(0, prev.chunks.length - limit + 1);
prev.topChanged = -1;
}
prev.chunks = [...prev.chunks, chunk];
prev.bottomChanged = 1;
return prev;
};
// styles contains the constant styles of the component.
const styles = {
logListItem: {
padding: 0,
},
logChunk: {
color: 'white',
fontFamily: 'monospace',
whiteSpace: 'nowrap',
width: 0,
},
};
export type Props = {
container: Object,
content: Content,
shouldUpdate: Object,
send: string => void,
};
type State = {
requestAllowed: boolean,
};
// Logs renders the log page.
class Logs extends Component<Props, State> {
constructor(props: Props) {
super(props);
this.content = React.createRef();
this.state = {
requestAllowed: true,
};
}
componentDidMount() {
const {container} = this.props;
container.scrollTop = container.scrollHeight - container.clientHeight;
}
// onScroll is triggered by the parent component's scroll event, and sends requests if the scroll position is
// at the top or at the bottom.
onScroll = () => {
if (!this.state.requestAllowed || typeof this.content === 'undefined') {
return;
}
const {logs} = this.props.content;
if (logs.chunks.length < 1) {
return;
}
if (this.atTop()) {
if (!logs.endTop) {
this.setState({requestAllowed: false});
this.props.send(JSON.stringify({
Logs: {
Name: logs.chunks[0].name,
Past: true,
},
}));
}
} else if (this.atBottom()) {
if (!logs.endBottom) {
this.setState({requestAllowed: false});
this.props.send(JSON.stringify({
Logs: {
Name: logs.chunks[logs.chunks.length - 1].name,
Past: false,
},
}));
}
}
};
// atTop checks if the scroll position it at the top of the container.
atTop = () => this.props.container.scrollTop <= this.props.container.scrollHeight * requestBand;
// atBottom checks if the scroll position it at the bottom of the container.
atBottom = () => {
const {container} = this.props;
return container.scrollHeight - container.scrollTop <=
container.clientHeight + container.scrollHeight * requestBand;
};
// beforeUpdate is called by the parent component, saves the previous scroll position
// and the height of the first log chunk, which can be deleted during the insertion.
beforeUpdate = () => {
let firstHeight = 0;
if (this.content && this.content.children[0] && this.content.children[0].children[0]) {
firstHeight = this.content.children[0].children[0].clientHeight;
}
return {
scrollTop: this.props.container.scrollTop,
firstHeight,
};
};
// didUpdate is called by the parent component, which provides the container. Sends the first request if the
// visible part of the container isn't full, and resets the scroll position in order to avoid jumping when new
// chunk is inserted.
didUpdate = (prevProps, prevState, snapshot) => {
if (typeof this.props.shouldUpdate.logs === 'undefined' || typeof this.content === 'undefined' || snapshot === null) {
return;
}
const {logs} = this.props.content;
const {container} = this.props;
if (typeof container === 'undefined' || logs.chunks.length < 1) {
return;
}
if (this.content.clientHeight < container.clientHeight) {
// Only enters here at the beginning, when there isn't enough log to fill the container
// and the scroll bar doesn't appear.
if (!logs.endTop) {
this.setState({requestAllowed: false});
this.props.send(JSON.stringify({
Logs: {
Name: logs.chunks[0].name,
Past: true,
},
}));
}
return;
}
const chunks = this.content.children[0].children;
let {scrollTop} = snapshot;
if (logs.topChanged > 0) {
scrollTop += chunks[0].clientHeight;
} else if (logs.bottomChanged > 0) {
if (logs.topChanged < 0) {
scrollTop -= snapshot.firstHeight;
} else if (logs.endBottom && this.atBottom()) {
scrollTop = container.scrollHeight - container.clientHeight;
}
}
container.scrollTop = scrollTop;
this.setState({requestAllowed: true});
};
render() {
return (
<div ref={(ref) => { this.content = ref; }}>
<List>
{this.props.content.logs.chunks.map((c, index) => (
<ListItem style={styles.logListItem} key={index}>
<div style={styles.logChunk} dangerouslySetInnerHTML={{__html: c.content}} />
</ListItem>
))}
</List>
</div>
);
}
}
export default Logs;

@ -21,6 +21,7 @@ import React, {Component} from 'react';
import withStyles from 'material-ui/styles/withStyles';
import {MENU} from '../common';
import Logs from './Logs';
import Footer from './Footer';
import type {Content} from '../types/content';
@ -32,7 +33,7 @@ const styles = {
width: '100%',
},
content: {
flex: 1,
flex: 1,
overflow: 'auto',
},
};
@ -46,14 +47,40 @@ const themeStyles = theme => ({
});
export type Props = {
classes: Object,
active: string,
content: Content,
classes: Object,
active: string,
content: Content,
shouldUpdate: Object,
send: string => void,
};
// Main renders the chosen content.
class Main extends Component<Props> {
constructor(props) {
super(props);
this.container = React.createRef();
this.content = React.createRef();
}
getSnapshotBeforeUpdate() {
if (this.content && typeof this.content.beforeUpdate === 'function') {
return this.content.beforeUpdate();
}
return null;
}
componentDidUpdate(prevProps, prevState, snapshot) {
if (this.content && typeof this.content.didUpdate === 'function') {
this.content.didUpdate(prevProps, prevState, snapshot);
}
}
onScroll = () => {
if (this.content && typeof this.content.onScroll === 'function') {
this.content.onScroll();
}
};
render() {
const {
classes, active, content, shouldUpdate,
@ -69,12 +96,27 @@ class Main extends Component<Props> {
children = <div>Work in progress.</div>;
break;
case MENU.get('logs').id:
children = <div>{content.logs.log.map((log, index) => <div key={index}>{log}</div>)}</div>;
children = (
<Logs
ref={(ref) => { this.content = ref; }}
container={this.container}
send={this.props.send}
content={this.props.content}
shouldUpdate={shouldUpdate}
/>
);
}
return (
<div style={styles.wrapper}>
<div className={classes.content} style={styles.content}>{children}</div>
<div
className={classes.content}
style={styles.content}
ref={(ref) => { this.container = ref; }}
onScroll={this.onScroll}
>
{children}
</div>
<Footer
general={content.general}
system={content.system}

@ -14,6 +14,9 @@
::-webkit-scrollbar-thumb {
background: #212121;
}
::-webkit-scrollbar-corner {
background: transparent;
}
</style>
</head>
<body style="height: 100%; margin: 0">

@ -18,24 +18,24 @@
export type Content = {
general: General,
home: Home,
chain: Chain,
txpool: TxPool,
home: Home,
chain: Chain,
txpool: TxPool,
network: Network,
system: System,
logs: Logs,
system: System,
logs: Logs,
};
export type ChartEntries = Array<ChartEntry>;
export type ChartEntry = {
time: Date,
time: Date,
value: number,
};
export type General = {
version: ?string,
commit: ?string,
version: ?string,
commit: ?string,
};
export type Home = {
@ -55,16 +55,42 @@ export type Network = {
};
export type System = {
activeMemory: ChartEntries,
virtualMemory: ChartEntries,
networkIngress: ChartEntries,
networkEgress: ChartEntries,
processCPU: ChartEntries,
systemCPU: ChartEntries,
diskRead: ChartEntries,
diskWrite: ChartEntries,
activeMemory: ChartEntries,
virtualMemory: ChartEntries,
networkIngress: ChartEntries,
networkEgress: ChartEntries,
processCPU: ChartEntries,
systemCPU: ChartEntries,
diskRead: ChartEntries,
diskWrite: ChartEntries,
};
export type Record = {
t: string,
lvl: Object,
msg: string,
ctx: Array<string>
};
export type Chunk = {
content: string,
name: string,
};
export type Logs = {
log: Array<string>,
chunks: Array<Chunk>,
endTop: boolean,
endBottom: boolean,
topChanged: number,
bottomChanged: number,
};
export type LogsMessage = {
source: ?LogFile,
chunk: Array<Record>,
};
export type LogFile = {
name: string,
last: string,
};

@ -2,70 +2,77 @@
# yarn lockfile v1
"@babel/code-frame@7.0.0-beta.40", "@babel/code-frame@^7.0.0-beta.40":
version "7.0.0-beta.40"
resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.0.0-beta.40.tgz#37e2b0cf7c56026b4b21d3927cadf81adec32ac6"
"@babel/code-frame@7.0.0-beta.44":
version "7.0.0-beta.44"
resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.0.0-beta.44.tgz#2a02643368de80916162be70865c97774f3adbd9"
dependencies:
"@babel/highlight" "7.0.0-beta.40"
"@babel/highlight" "7.0.0-beta.44"
"@babel/generator@7.0.0-beta.40":
version "7.0.0-beta.40"
resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.0.0-beta.40.tgz#ab61f9556f4f71dbd1138949c795bb9a21e302ea"
"@babel/generator@7.0.0-beta.44":
version "7.0.0-beta.44"
resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.0.0-beta.44.tgz#c7e67b9b5284afcf69b309b50d7d37f3e5033d42"
dependencies:
"@babel/types" "7.0.0-beta.40"
"@babel/types" "7.0.0-beta.44"
jsesc "^2.5.1"
lodash "^4.2.0"
source-map "^0.5.0"
trim-right "^1.0.1"
"@babel/helper-function-name@7.0.0-beta.40":
version "7.0.0-beta.40"
resolved "https://registry.yarnpkg.com/@babel/helper-function-name/-/helper-function-name-7.0.0-beta.40.tgz#9d033341ab16517f40d43a73f2d81fc431ccd7b6"
"@babel/helper-function-name@7.0.0-beta.44":
version "7.0.0-beta.44"
resolved "https://registry.yarnpkg.com/@babel/helper-function-name/-/helper-function-name-7.0.0-beta.44.tgz#e18552aaae2231100a6e485e03854bc3532d44dd"
dependencies:
"@babel/helper-get-function-arity" "7.0.0-beta.40"
"@babel/template" "7.0.0-beta.40"
"@babel/types" "7.0.0-beta.40"
"@babel/helper-get-function-arity" "7.0.0-beta.44"
"@babel/template" "7.0.0-beta.44"
"@babel/types" "7.0.0-beta.44"
"@babel/helper-get-function-arity@7.0.0-beta.40":
version "7.0.0-beta.40"
resolved "https://registry.yarnpkg.com/@babel/helper-get-function-arity/-/helper-get-function-arity-7.0.0-beta.40.tgz#ac0419cf067b0ec16453e1274f03878195791c6e"
"@babel/helper-get-function-arity@7.0.0-beta.44":
version "7.0.0-beta.44"
resolved "https://registry.yarnpkg.com/@babel/helper-get-function-arity/-/helper-get-function-arity-7.0.0-beta.44.tgz#d03ca6dd2b9f7b0b1e6b32c56c72836140db3a15"
dependencies:
"@babel/types" "7.0.0-beta.40"
"@babel/types" "7.0.0-beta.44"
"@babel/highlight@7.0.0-beta.40":
version "7.0.0-beta.40"
resolved "https://registry.yarnpkg.com/@babel/highlight/-/highlight-7.0.0-beta.40.tgz#b43d67d76bf46e1d10d227f68cddcd263786b255"
"@babel/helper-split-export-declaration@7.0.0-beta.44":
version "7.0.0-beta.44"
resolved "https://registry.yarnpkg.com/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.0.0-beta.44.tgz#c0b351735e0fbcb3822c8ad8db4e583b05ebd9dc"
dependencies:
"@babel/types" "7.0.0-beta.44"
"@babel/highlight@7.0.0-beta.44":
version "7.0.0-beta.44"
resolved "https://registry.yarnpkg.com/@babel/highlight/-/highlight-7.0.0-beta.44.tgz#18c94ce543916a80553edcdcf681890b200747d5"
dependencies:
chalk "^2.0.0"
esutils "^2.0.2"
js-tokens "^3.0.0"
"@babel/template@7.0.0-beta.40":
version "7.0.0-beta.40"
resolved "https://registry.yarnpkg.com/@babel/template/-/template-7.0.0-beta.40.tgz#034988c6424eb5c3268fe6a608626de1f4410fc8"
"@babel/template@7.0.0-beta.44":
version "7.0.0-beta.44"
resolved "https://registry.yarnpkg.com/@babel/template/-/template-7.0.0-beta.44.tgz#f8832f4fdcee5d59bf515e595fc5106c529b394f"
dependencies:
"@babel/code-frame" "7.0.0-beta.40"
"@babel/types" "7.0.0-beta.40"
babylon "7.0.0-beta.40"
"@babel/code-frame" "7.0.0-beta.44"
"@babel/types" "7.0.0-beta.44"
babylon "7.0.0-beta.44"
lodash "^4.2.0"
"@babel/traverse@^7.0.0-beta.40":
version "7.0.0-beta.40"
resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.0.0-beta.40.tgz#d140e449b2e093ef9fe1a2eecc28421ffb4e521e"
"@babel/traverse@7.0.0-beta.44":
version "7.0.0-beta.44"
resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.0.0-beta.44.tgz#a970a2c45477ad18017e2e465a0606feee0d2966"
dependencies:
"@babel/code-frame" "7.0.0-beta.40"
"@babel/generator" "7.0.0-beta.40"
"@babel/helper-function-name" "7.0.0-beta.40"
"@babel/types" "7.0.0-beta.40"
babylon "7.0.0-beta.40"
debug "^3.0.1"
"@babel/code-frame" "7.0.0-beta.44"
"@babel/generator" "7.0.0-beta.44"
"@babel/helper-function-name" "7.0.0-beta.44"
"@babel/helper-split-export-declaration" "7.0.0-beta.44"
"@babel/types" "7.0.0-beta.44"
babylon "7.0.0-beta.44"
debug "^3.1.0"
globals "^11.1.0"
invariant "^2.2.0"
lodash "^4.2.0"
"@babel/types@7.0.0-beta.40", "@babel/types@^7.0.0-beta.40":
version "7.0.0-beta.40"
resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.0.0-beta.40.tgz#25c3d7aae14126abe05fcb098c65a66b6d6b8c14"
"@babel/types@7.0.0-beta.44":
version "7.0.0-beta.44"
resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.0.0-beta.44.tgz#6b1b164591f77dec0a0342aca995f2d046b3a757"
dependencies:
esutils "^2.0.2"
lodash "^4.2.0"
@ -376,8 +383,8 @@ babel-code-frame@^6.22.0, babel-code-frame@^6.26.0:
js-tokens "^3.0.2"
babel-core@^6.26.0:
version "6.26.0"
resolved "https://registry.yarnpkg.com/babel-core/-/babel-core-6.26.0.tgz#af32f78b31a6fcef119c87b0fd8d9753f03a0bb8"
version "6.26.3"
resolved "https://registry.yarnpkg.com/babel-core/-/babel-core-6.26.3.tgz#b2e2f09e342d0f0c88e2f02e067794125e75c207"
dependencies:
babel-code-frame "^6.26.0"
babel-generator "^6.26.0"
@ -389,24 +396,24 @@ babel-core@^6.26.0:
babel-traverse "^6.26.0"
babel-types "^6.26.0"
babylon "^6.18.0"
convert-source-map "^1.5.0"
debug "^2.6.8"
convert-source-map "^1.5.1"
debug "^2.6.9"
json5 "^0.5.1"
lodash "^4.17.4"
minimatch "^3.0.4"
path-is-absolute "^1.0.1"
private "^0.1.7"
private "^0.1.8"
slash "^1.0.0"
source-map "^0.5.6"
source-map "^0.5.7"
babel-eslint@^8.2.1:
version "8.2.2"
resolved "https://registry.yarnpkg.com/babel-eslint/-/babel-eslint-8.2.2.tgz#1102273354c6f0b29b4ea28a65f97d122296b68b"
version "8.2.3"
resolved "https://registry.yarnpkg.com/babel-eslint/-/babel-eslint-8.2.3.tgz#1a2e6681cc9bc4473c32899e59915e19cd6733cf"
dependencies:
"@babel/code-frame" "^7.0.0-beta.40"
"@babel/traverse" "^7.0.0-beta.40"
"@babel/types" "^7.0.0-beta.40"
babylon "^7.0.0-beta.40"
"@babel/code-frame" "7.0.0-beta.44"
"@babel/traverse" "7.0.0-beta.44"
"@babel/types" "7.0.0-beta.44"
babylon "7.0.0-beta.44"
eslint-scope "~3.7.1"
eslint-visitor-keys "^1.0.0"
@ -550,8 +557,8 @@ babel-helpers@^6.24.1:
babel-template "^6.24.1"
babel-loader@^7.1.2:
version "7.1.3"
resolved "https://registry.yarnpkg.com/babel-loader/-/babel-loader-7.1.3.tgz#ff5b440da716e9153abb946251a9ab7670037b16"
version "7.1.4"
resolved "https://registry.yarnpkg.com/babel-loader/-/babel-loader-7.1.4.tgz#e3463938bd4e6d55d1c174c5485d406a188ed015"
dependencies:
find-cache-dir "^1.0.0"
loader-utils "^1.0.2"
@ -1081,9 +1088,9 @@ babel-types@^6.19.0, babel-types@^6.24.1, babel-types@^6.26.0:
lodash "^4.17.4"
to-fast-properties "^1.0.3"
babylon@7.0.0-beta.40, babylon@^7.0.0-beta.40:
version "7.0.0-beta.40"
resolved "https://registry.yarnpkg.com/babylon/-/babylon-7.0.0-beta.40.tgz#91fc8cd56d5eb98b28e6fde41045f2957779940a"
babylon@7.0.0-beta.44:
version "7.0.0-beta.44"
resolved "https://registry.yarnpkg.com/babylon/-/babylon-7.0.0-beta.44.tgz#89159e15e6e30c5096e22d738d8c0af8a0e8ca1d"
babylon@^6.18.0:
version "6.18.0"
@ -1413,7 +1420,15 @@ chalk@^1.1.3:
strip-ansi "^3.0.0"
supports-color "^2.0.0"
chalk@^2.0.0, chalk@^2.1.0, chalk@^2.3.1:
chalk@^2.0.0:
version "2.4.1"
resolved "https://registry.yarnpkg.com/chalk/-/chalk-2.4.1.tgz#18c49ab16a037b6eb0152cc83e3471338215b66e"
dependencies:
ansi-styles "^3.2.1"
escape-string-regexp "^1.0.5"
supports-color "^5.3.0"
chalk@^2.1.0, chalk@^2.3.1:
version "2.3.2"
resolved "https://registry.yarnpkg.com/chalk/-/chalk-2.3.2.tgz#250dc96b07491bfd601e648d66ddf5f60c7a5c65"
dependencies:
@ -1646,7 +1661,7 @@ content-type@~1.0.4:
version "1.0.4"
resolved "https://registry.yarnpkg.com/content-type/-/content-type-1.0.4.tgz#e138cc75e040c727b1966fe5e5f8c9aee256fe3b"
convert-source-map@^1.5.0:
convert-source-map@^1.5.1:
version "1.5.1"
resolved "https://registry.yarnpkg.com/convert-source-map/-/convert-source-map-1.5.1.tgz#b8278097b9bc229365de5c62cf5fcaed8b5599e5"
@ -1671,8 +1686,8 @@ core-js@^1.0.0:
resolved "https://registry.yarnpkg.com/core-js/-/core-js-1.2.7.tgz#652294c14651db28fa93bd2d5ff2983a4f08c636"
core-js@^2.4.0, core-js@^2.5.0:
version "2.5.3"
resolved "https://registry.yarnpkg.com/core-js/-/core-js-2.5.3.tgz#8acc38345824f16d8365b7c9b4259168e8ed603e"
version "2.5.7"
resolved "https://registry.yarnpkg.com/core-js/-/core-js-2.5.7.tgz#f972608ff0cead68b841a16a932d0b183791814e"
core-util-is@1.0.2, core-util-is@~1.0.0:
version "1.0.2"
@ -1914,7 +1929,7 @@ debug@2.6.9, debug@^2.2.0, debug@^2.3.3, debug@^2.6.6, debug@^2.6.8, debug@^2.6.
dependencies:
ms "2.0.0"
debug@^3.0.1, debug@^3.1.0:
debug@^3.1.0:
version "3.1.0"
resolved "https://registry.yarnpkg.com/debug/-/debug-3.1.0.tgz#5bb5a0672628b64149566ba16819e61518c67261"
dependencies:
@ -2916,10 +2931,14 @@ global@~4.3.0:
min-document "^2.19.0"
process "~0.5.1"
globals@^11.0.1, globals@^11.1.0:
globals@^11.0.1:
version "11.3.0"
resolved "https://registry.yarnpkg.com/globals/-/globals-11.3.0.tgz#e04fdb7b9796d8adac9c8f64c14837b2313378b0"
globals@^11.1.0:
version "11.5.0"
resolved "https://registry.yarnpkg.com/globals/-/globals-11.5.0.tgz#6bc840de6771173b191f13d3a9c94d441ee92642"
globals@^9.18.0:
version "9.18.0"
resolved "https://registry.yarnpkg.com/globals/-/globals-9.18.0.tgz#aa3896b3e69b487f17e31ed2143d69a8e30c2d8a"
@ -3176,10 +3195,16 @@ hyphenate-style-name@^1.0.2:
version "1.0.2"
resolved "https://registry.yarnpkg.com/hyphenate-style-name/-/hyphenate-style-name-1.0.2.tgz#31160a36930adaf1fc04c6074f7eb41465d4ec4b"
iconv-lite@0.4.19, iconv-lite@^0.4.17, iconv-lite@~0.4.13:
iconv-lite@0.4.19, iconv-lite@^0.4.17:
version "0.4.19"
resolved "https://registry.yarnpkg.com/iconv-lite/-/iconv-lite-0.4.19.tgz#f7468f60135f5e5dad3399c0a81be9a1603a082b"
iconv-lite@~0.4.13:
version "0.4.23"
resolved "https://registry.yarnpkg.com/iconv-lite/-/iconv-lite-0.4.23.tgz#297871f63be507adcfbfca715d0cd0eed84e9a63"
dependencies:
safer-buffer ">= 2.1.2 < 3"
icss-replace-symbols@^1.1.0:
version "1.1.0"
resolved "https://registry.yarnpkg.com/icss-replace-symbols/-/icss-replace-symbols-1.1.0.tgz#06ea6f83679a7749e386cfe1fe812ae5db223ded"
@ -3272,8 +3297,8 @@ interpret@^1.0.0:
resolved "https://registry.yarnpkg.com/interpret/-/interpret-1.1.0.tgz#7ed1b1410c6a0e0f78cf95d3b8440c63f78b8614"
invariant@^2.2.0, invariant@^2.2.2:
version "2.2.3"
resolved "https://registry.yarnpkg.com/invariant/-/invariant-2.2.3.tgz#1a827dfde7dcbd7c323f0ca826be8fa7c5e9d688"
version "2.2.4"
resolved "https://registry.yarnpkg.com/invariant/-/invariant-2.2.4.tgz#610f3c92c9359ce1db616e538008d23ff35158e6"
dependencies:
loose-envify "^1.0.0"
@ -3863,10 +3888,14 @@ lodash.uniq@^4.5.0:
version "4.5.0"
resolved "https://registry.yarnpkg.com/lodash.uniq/-/lodash.uniq-4.5.0.tgz#d0225373aeb652adc1bc82e4945339a842754773"
lodash@^4.14.0, lodash@^4.15.0, lodash@^4.17.2, lodash@^4.17.4, lodash@^4.2.0, lodash@^4.3.0, lodash@~4.17.4:
lodash@^4.14.0, lodash@^4.15.0, lodash@^4.17.2, lodash@^4.3.0, lodash@~4.17.4:
version "4.17.5"
resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.5.tgz#99a92d65c0272debe8c96b6057bc8fbfa3bed511"
lodash@^4.17.4, lodash@^4.2.0:
version "4.17.10"
resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.10.tgz#1b7793cf7259ea38fb3661d4d38b3260af8ae4e7"
loglevel@^1.4.1:
version "1.6.1"
resolved "https://registry.yarnpkg.com/loglevel/-/loglevel-1.6.1.tgz#e0fc95133b6ef276cdc8887cdaf24aa6f156f8fa"
@ -3904,8 +3933,8 @@ macaddress@^0.2.8:
resolved "https://registry.yarnpkg.com/macaddress/-/macaddress-0.2.8.tgz#5904dc537c39ec6dbefeae902327135fa8511f12"
make-dir@^1.0.0:
version "1.2.0"
resolved "https://registry.yarnpkg.com/make-dir/-/make-dir-1.2.0.tgz#6d6a49eead4aae296c53bbf3a1a008bd6c89469b"
version "1.3.0"
resolved "https://registry.yarnpkg.com/make-dir/-/make-dir-1.3.0.tgz#79c1033b80515bd6d24ec9933e860ca75ee27f0c"
dependencies:
pify "^3.0.0"
@ -4895,7 +4924,7 @@ preserve@^0.2.0:
version "0.2.0"
resolved "https://registry.yarnpkg.com/preserve/-/preserve-0.2.0.tgz#815ed1f6ebc65926f865b310c0713bcb3315ce4b"
private@^0.1.6, private@^0.1.7:
private@^0.1.6, private@^0.1.8:
version "0.1.8"
resolved "https://registry.yarnpkg.com/private/-/private-0.1.8.tgz#2381edb3689f7a53d653190060fcf822d2f368ff"
@ -5061,8 +5090,8 @@ rc@^1.1.7:
strip-json-comments "~2.0.1"
react-dom@^16.2.0:
version "16.2.0"
resolved "https://registry.yarnpkg.com/react-dom/-/react-dom-16.2.0.tgz#69003178601c0ca19b709b33a83369fe6124c044"
version "16.4.0"
resolved "https://registry.yarnpkg.com/react-dom/-/react-dom-16.4.0.tgz#099f067dd5827ce36a29eaf9a6cdc7cbf6216b1e"
dependencies:
fbjs "^0.8.16"
loose-envify "^1.1.0"
@ -5138,8 +5167,8 @@ react-transition-group@^2.2.1:
warning "^3.0.0"
react@^16.2.0:
version "16.2.0"
resolved "https://registry.yarnpkg.com/react/-/react-16.2.0.tgz#a31bd2dab89bff65d42134fa187f24d054c273ba"
version "16.4.0"
resolved "https://registry.yarnpkg.com/react/-/react-16.4.0.tgz#402c2db83335336fba1962c08b98c6272617d585"
dependencies:
fbjs "^0.8.16"
loose-envify "^1.1.0"
@ -5471,6 +5500,10 @@ safe-regex@^1.1.0:
dependencies:
ret "~0.1.10"
"safer-buffer@>= 2.1.2 < 3":
version "2.1.2"
resolved "https://registry.yarnpkg.com/safer-buffer/-/safer-buffer-2.1.2.tgz#44fa161b0187b9549dd84bb91802f9bd8385cd6a"
sax@~1.2.1:
version "1.2.4"
resolved "https://registry.yarnpkg.com/sax/-/sax-1.2.4.tgz#2816234e2378bddc4e5354fab5caa895df7100d9"
@ -5914,12 +5947,18 @@ supports-color@^4.2.1:
dependencies:
has-flag "^2.0.0"
supports-color@^5.1.0, supports-color@^5.2.0, supports-color@^5.3.0:
supports-color@^5.1.0, supports-color@^5.2.0:
version "5.3.0"
resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-5.3.0.tgz#5b24ac15db80fa927cf5227a4a33fd3c4c7676c0"
dependencies:
has-flag "^3.0.0"
supports-color@^5.3.0:
version "5.4.0"
resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-5.4.0.tgz#1c6b337402c2137605efe19f10fec390f6faab54"
dependencies:
has-flag "^3.0.0"
svgo@^0.7.0:
version "0.7.2"
resolved "https://registry.yarnpkg.com/svgo/-/svgo-0.7.2.tgz#9f5772413952135c6fefbf40afe6a4faa88b4bb5"
@ -6108,8 +6147,8 @@ typedarray@^0.0.6:
resolved "https://registry.yarnpkg.com/typedarray/-/typedarray-0.0.6.tgz#867ac74e3864187b1d3d47d996a78ec5c8830777"
ua-parser-js@^0.7.9:
version "0.7.17"
resolved "https://registry.yarnpkg.com/ua-parser-js/-/ua-parser-js-0.7.17.tgz#e9ec5f9498b9ec910e7ae3ac626a805c4d09ecac"
version "0.7.18"
resolved "https://registry.yarnpkg.com/ua-parser-js/-/ua-parser-js-0.7.18.tgz#a7bfd92f56edfb117083b69e31d2aa8882d4b1ed"
uglify-js@^2.8.29:
version "2.8.29"
@ -6395,8 +6434,8 @@ websocket-extensions@>=0.1.1:
resolved "https://registry.yarnpkg.com/websocket-extensions/-/websocket-extensions-0.1.3.tgz#5d2ff22977003ec687a4b87073dfbbac146ccf29"
whatwg-fetch@>=0.10.0:
version "2.0.3"
resolved "https://registry.yarnpkg.com/whatwg-fetch/-/whatwg-fetch-2.0.3.tgz#9c84ec2dcf68187ff00bc64e1274b442176e1c84"
version "2.0.4"
resolved "https://registry.yarnpkg.com/whatwg-fetch/-/whatwg-fetch-2.0.4.tgz#dde6a5df315f9d39991aa17621853d720b85566f"
whet.extend@~0.9.9:
version "0.9.9"

@ -32,12 +32,15 @@ import (
"sync/atomic"
"time"
"io"
"github.com/elastic/gosigar"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rpc"
"github.com/mohae/deepcopy"
"golang.org/x/net/websocket"
)
@ -60,10 +63,11 @@ type Dashboard struct {
listener net.Listener
conns map[uint32]*client // Currently live websocket connections
charts *SystemMessage
commit string
history *Message
lock sync.RWMutex // Lock protecting the dashboard's internals
logdir string
quit chan chan error // Channel used for graceful exit
wg sync.WaitGroup
}
@ -71,30 +75,39 @@ type Dashboard struct {
// client represents active websocket connection with a remote browser.
type client struct {
conn *websocket.Conn // Particular live websocket connection
msg chan Message // Message queue for the update messages
msg chan *Message // Message queue for the update messages
logger log.Logger // Logger for the particular live websocket connection
}
// New creates a new dashboard instance with the given configuration.
func New(config *Config, commit string) (*Dashboard, error) {
func New(config *Config, commit string, logdir string) *Dashboard {
now := time.Now()
db := &Dashboard{
versionMeta := ""
if len(params.VersionMeta) > 0 {
versionMeta = fmt.Sprintf(" (%s)", params.VersionMeta)
}
return &Dashboard{
conns: make(map[uint32]*client),
config: config,
quit: make(chan chan error),
charts: &SystemMessage{
ActiveMemory: emptyChartEntries(now, activeMemorySampleLimit, config.Refresh),
VirtualMemory: emptyChartEntries(now, virtualMemorySampleLimit, config.Refresh),
NetworkIngress: emptyChartEntries(now, networkIngressSampleLimit, config.Refresh),
NetworkEgress: emptyChartEntries(now, networkEgressSampleLimit, config.Refresh),
ProcessCPU: emptyChartEntries(now, processCPUSampleLimit, config.Refresh),
SystemCPU: emptyChartEntries(now, systemCPUSampleLimit, config.Refresh),
DiskRead: emptyChartEntries(now, diskReadSampleLimit, config.Refresh),
DiskWrite: emptyChartEntries(now, diskWriteSampleLimit, config.Refresh),
history: &Message{
General: &GeneralMessage{
Commit: commit,
Version: fmt.Sprintf("v%d.%d.%d%s", params.VersionMajor, params.VersionMinor, params.VersionPatch, versionMeta),
},
System: &SystemMessage{
ActiveMemory: emptyChartEntries(now, activeMemorySampleLimit, config.Refresh),
VirtualMemory: emptyChartEntries(now, virtualMemorySampleLimit, config.Refresh),
NetworkIngress: emptyChartEntries(now, networkIngressSampleLimit, config.Refresh),
NetworkEgress: emptyChartEntries(now, networkEgressSampleLimit, config.Refresh),
ProcessCPU: emptyChartEntries(now, processCPUSampleLimit, config.Refresh),
SystemCPU: emptyChartEntries(now, systemCPUSampleLimit, config.Refresh),
DiskRead: emptyChartEntries(now, diskReadSampleLimit, config.Refresh),
DiskWrite: emptyChartEntries(now, diskWriteSampleLimit, config.Refresh),
},
},
commit: commit,
logdir: logdir,
}
return db, nil
}
// emptyChartEntries returns a ChartEntry array containing limit number of empty samples.
@ -108,19 +121,20 @@ func emptyChartEntries(t time.Time, limit int, refresh time.Duration) ChartEntri
return ce
}
// Protocols is a meaningless implementation of node.Service.
// Protocols implements the node.Service interface.
func (db *Dashboard) Protocols() []p2p.Protocol { return nil }
// APIs is a meaningless implementation of node.Service.
// APIs implements the node.Service interface.
func (db *Dashboard) APIs() []rpc.API { return nil }
// Start implements node.Service, starting the data collection thread and the listening server of the dashboard.
// Start starts the data collection thread and the listening server of the dashboard.
// Implements the node.Service interface.
func (db *Dashboard) Start(server *p2p.Server) error {
log.Info("Starting dashboard")
db.wg.Add(2)
go db.collectData()
go db.collectLogs() // In case of removing this line change 2 back to 1 in wg.Add.
go db.streamLogs()
http.HandleFunc("/", db.webHandler)
http.Handle("/api", websocket.Handler(db.apiHandler))
@ -136,7 +150,8 @@ func (db *Dashboard) Start(server *p2p.Server) error {
return nil
}
// Stop implements node.Service, stopping the data collection thread and the connection listener of the dashboard.
// Stop stops the data collection thread and the connection listener of the dashboard.
// Implements the node.Service interface.
func (db *Dashboard) Stop() error {
// Close the connection listener.
var errs []error
@ -194,7 +209,7 @@ func (db *Dashboard) apiHandler(conn *websocket.Conn) {
id := atomic.AddUint32(&nextID, 1)
client := &client{
conn: conn,
msg: make(chan Message, 128),
msg: make(chan *Message, 128),
logger: log.New("id", id),
}
done := make(chan struct{})
@ -218,29 +233,10 @@ func (db *Dashboard) apiHandler(conn *websocket.Conn) {
}
}()
versionMeta := ""
if len(params.VersionMeta) > 0 {
versionMeta = fmt.Sprintf(" (%s)", params.VersionMeta)
}
// Send the past data.
client.msg <- Message{
General: &GeneralMessage{
Version: fmt.Sprintf("v%d.%d.%d%s", params.VersionMajor, params.VersionMinor, params.VersionPatch, versionMeta),
Commit: db.commit,
},
System: &SystemMessage{
ActiveMemory: db.charts.ActiveMemory,
VirtualMemory: db.charts.VirtualMemory,
NetworkIngress: db.charts.NetworkIngress,
NetworkEgress: db.charts.NetworkEgress,
ProcessCPU: db.charts.ProcessCPU,
SystemCPU: db.charts.SystemCPU,
DiskRead: db.charts.DiskRead,
DiskWrite: db.charts.DiskWrite,
},
}
// Start tracking the connection and drop at connection loss.
db.lock.Lock()
// Send the past data.
client.msg <- deepcopy.Copy(db.history).(*Message)
// Start tracking the connection and drop at connection loss.
db.conns[id] = client
db.lock.Unlock()
defer func() {
@ -249,29 +245,53 @@ func (db *Dashboard) apiHandler(conn *websocket.Conn) {
db.lock.Unlock()
}()
for {
fail := []byte{}
if _, err := conn.Read(fail); err != nil {
r := new(Request)
if err := websocket.JSON.Receive(conn, r); err != nil {
if err != io.EOF {
client.logger.Warn("Failed to receive request", "err", err)
}
close(done)
return
}
// Ignore all messages
if r.Logs != nil {
db.handleLogRequest(r.Logs, client)
}
}
}
// meterCollector returns a function, which retrieves a specific meter.
func meterCollector(name string) func() int64 {
if metric := metrics.DefaultRegistry.Get(name); metric != nil {
m := metric.(metrics.Meter)
return func() int64 {
return m.Count()
}
}
return func() int64 {
return 0
}
}
// collectData collects the required data to plot on the dashboard.
func (db *Dashboard) collectData() {
defer db.wg.Done()
systemCPUUsage := gosigar.Cpu{}
systemCPUUsage.Get()
var (
mem runtime.MemStats
prevNetworkIngress = metrics.DefaultRegistry.Get("p2p/InboundTraffic").(metrics.Meter).Count()
prevNetworkEgress = metrics.DefaultRegistry.Get("p2p/OutboundTraffic").(metrics.Meter).Count()
collectNetworkIngress = meterCollector("p2p/InboundTraffic")
collectNetworkEgress = meterCollector("p2p/OutboundTraffic")
collectDiskRead = meterCollector("eth/db/chaindata/disk/read")
collectDiskWrite = meterCollector("eth/db/chaindata/disk/write")
prevNetworkIngress = collectNetworkIngress()
prevNetworkEgress = collectNetworkEgress()
prevProcessCPUTime = getProcessCPUTime()
prevSystemCPUUsage = systemCPUUsage
prevDiskRead = metrics.DefaultRegistry.Get("eth/db/chaindata/disk/read").(metrics.Meter).Count()
prevDiskWrite = metrics.DefaultRegistry.Get("eth/db/chaindata/disk/write").(metrics.Meter).Count()
prevDiskRead = collectDiskRead()
prevDiskWrite = collectDiskWrite()
frequency = float64(db.config.Refresh / time.Second)
numCPU = float64(runtime.NumCPU())
@ -285,12 +305,12 @@ func (db *Dashboard) collectData() {
case <-time.After(db.config.Refresh):
systemCPUUsage.Get()
var (
curNetworkIngress = metrics.DefaultRegistry.Get("p2p/InboundTraffic").(metrics.Meter).Count()
curNetworkEgress = metrics.DefaultRegistry.Get("p2p/OutboundTraffic").(metrics.Meter).Count()
curNetworkIngress = collectNetworkIngress()
curNetworkEgress = collectNetworkEgress()
curProcessCPUTime = getProcessCPUTime()
curSystemCPUUsage = systemCPUUsage
curDiskRead = metrics.DefaultRegistry.Get("eth/db/chaindata/disk/read").(metrics.Meter).Count()
curDiskWrite = metrics.DefaultRegistry.Get("eth/db/chaindata/disk/write").(metrics.Meter).Count()
curDiskRead = collectDiskRead()
curDiskWrite = collectDiskWrite()
deltaNetworkIngress = float64(curNetworkIngress - prevNetworkIngress)
deltaNetworkEgress = float64(curNetworkEgress - prevNetworkEgress)
@ -341,14 +361,17 @@ func (db *Dashboard) collectData() {
Time: now,
Value: float64(deltaDiskWrite) / frequency,
}
db.charts.ActiveMemory = append(db.charts.ActiveMemory[1:], activeMemory)
db.charts.VirtualMemory = append(db.charts.VirtualMemory[1:], virtualMemory)
db.charts.NetworkIngress = append(db.charts.NetworkIngress[1:], networkIngress)
db.charts.NetworkEgress = append(db.charts.NetworkEgress[1:], networkEgress)
db.charts.ProcessCPU = append(db.charts.ProcessCPU[1:], processCPU)
db.charts.SystemCPU = append(db.charts.SystemCPU[1:], systemCPU)
db.charts.DiskRead = append(db.charts.DiskRead[1:], diskRead)
db.charts.DiskWrite = append(db.charts.DiskRead[1:], diskWrite)
sys := db.history.System
db.lock.Lock()
sys.ActiveMemory = append(sys.ActiveMemory[1:], activeMemory)
sys.VirtualMemory = append(sys.VirtualMemory[1:], virtualMemory)
sys.NetworkIngress = append(sys.NetworkIngress[1:], networkIngress)
sys.NetworkEgress = append(sys.NetworkEgress[1:], networkEgress)
sys.ProcessCPU = append(sys.ProcessCPU[1:], processCPU)
sys.SystemCPU = append(sys.SystemCPU[1:], systemCPU)
sys.DiskRead = append(sys.DiskRead[1:], diskRead)
sys.DiskWrite = append(sys.DiskRead[1:], diskWrite)
db.lock.Unlock()
db.sendToAll(&Message{
System: &SystemMessage{
@ -366,34 +389,12 @@ func (db *Dashboard) collectData() {
}
}
// collectLogs collects and sends the logs to the active dashboards.
func (db *Dashboard) collectLogs() {
defer db.wg.Done()
id := 1
// TODO (kurkomisi): log collection comes here.
for {
select {
case errc := <-db.quit:
errc <- nil
return
case <-time.After(db.config.Refresh / 2):
db.sendToAll(&Message{
Logs: &LogsMessage{
Log: []string{fmt.Sprintf("%-4d: This is a fake log.", id)},
},
})
id++
}
}
}
// sendToAll sends the given message to the active dashboards.
func (db *Dashboard) sendToAll(msg *Message) {
db.lock.Lock()
for _, c := range db.conns {
select {
case c.msg <- *msg:
case c.msg <- msg:
default:
c.conn.Close()
}

288
dashboard/log.go Normal file

@ -0,0 +1,288 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package dashboard
import (
"bytes"
"encoding/json"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"sort"
"time"
"github.com/ethereum/go-ethereum/log"
"github.com/mohae/deepcopy"
"github.com/rjeczalik/notify"
)
var emptyChunk = json.RawMessage("[]")
// prepLogs creates a JSON array from the given log record buffer.
// Returns the prepared array and the position of the last '\n'
// character in the original buffer, or -1 if it doesn't contain any.
func prepLogs(buf []byte) (json.RawMessage, int) {
b := make(json.RawMessage, 1, len(buf)+1)
b[0] = '['
b = append(b, buf...)
last := -1
for i := 1; i < len(b); i++ {
if b[i] == '\n' {
b[i] = ','
last = i
}
}
if last < 0 {
return emptyChunk, -1
}
b[last] = ']'
return b[:last+1], last - 1
}
// handleLogRequest searches for the log file specified by the timestamp of the
// request, creates a JSON array out of it and sends it to the requesting client.
func (db *Dashboard) handleLogRequest(r *LogsRequest, c *client) {
files, err := ioutil.ReadDir(db.logdir)
if err != nil {
log.Warn("Failed to open logdir", "path", db.logdir, "err", err)
return
}
re := regexp.MustCompile(`\.log$`)
fileNames := make([]string, 0, len(files))
for _, f := range files {
if f.Mode().IsRegular() && re.MatchString(f.Name()) {
fileNames = append(fileNames, f.Name())
}
}
if len(fileNames) < 1 {
log.Warn("No log files in logdir", "path", db.logdir)
return
}
idx := sort.Search(len(fileNames), func(idx int) bool {
// Returns the smallest index such as fileNames[idx] >= r.Name,
// if there is no such index, returns n.
return fileNames[idx] >= r.Name
})
switch {
case idx < 0:
return
case idx == 0 && r.Past:
return
case idx >= len(fileNames):
return
case r.Past:
idx--
case idx == len(fileNames)-1 && fileNames[idx] == r.Name:
return
case idx == len(fileNames)-1 || (idx == len(fileNames)-2 && fileNames[idx] == r.Name):
// The last file is continuously updated, and its chunks are streamed,
// so in order to avoid log record duplication on the client side, it is
// handled differently. Its actual content is always saved in the history.
db.lock.Lock()
if db.history.Logs != nil {
c.msg <- &Message{
Logs: db.history.Logs,
}
}
db.lock.Unlock()
return
case fileNames[idx] == r.Name:
idx++
}
path := filepath.Join(db.logdir, fileNames[idx])
var buf []byte
if buf, err = ioutil.ReadFile(path); err != nil {
log.Warn("Failed to read file", "path", path, "err", err)
return
}
chunk, end := prepLogs(buf)
if end < 0 {
log.Warn("The file doesn't contain valid logs", "path", path)
return
}
c.msg <- &Message{
Logs: &LogsMessage{
Source: &LogFile{
Name: fileNames[idx],
Last: r.Past && idx == 0,
},
Chunk: chunk,
},
}
}
// streamLogs watches the file system, and when the logger writes
// the new log records into the files, picks them up, then makes
// JSON array out of them and sends them to the clients.
func (db *Dashboard) streamLogs() {
defer db.wg.Done()
var (
err error
errc chan error
)
defer func() {
if errc == nil {
errc = <-db.quit
}
errc <- err
}()
files, err := ioutil.ReadDir(db.logdir)
if err != nil {
log.Warn("Failed to open logdir", "path", db.logdir, "err", err)
return
}
var (
opened *os.File // File descriptor for the opened active log file.
buf []byte // Contains the recently written log chunks, which are not sent to the clients yet.
)
// The log records are always written into the last file in alphabetical order, because of the timestamp.
re := regexp.MustCompile(`\.log$`)
i := len(files) - 1
for i >= 0 && (!files[i].Mode().IsRegular() || !re.MatchString(files[i].Name())) {
i--
}
if i < 0 {
log.Warn("No log files in logdir", "path", db.logdir)
return
}
if opened, err = os.OpenFile(filepath.Join(db.logdir, files[i].Name()), os.O_RDONLY, 0600); err != nil {
log.Warn("Failed to open file", "name", files[i].Name(), "err", err)
return
}
defer opened.Close() // Close the lastly opened file.
fi, err := opened.Stat()
if err != nil {
log.Warn("Problem with file", "name", opened.Name(), "err", err)
return
}
db.lock.Lock()
db.history.Logs = &LogsMessage{
Source: &LogFile{
Name: fi.Name(),
Last: true,
},
Chunk: emptyChunk,
}
db.lock.Unlock()
watcher := make(chan notify.EventInfo, 10)
if err := notify.Watch(db.logdir, watcher, notify.Create); err != nil {
log.Warn("Failed to create file system watcher", "err", err)
return
}
defer notify.Stop(watcher)
ticker := time.NewTicker(db.config.Refresh)
defer ticker.Stop()
loop:
for err == nil || errc == nil {
select {
case event := <-watcher:
// Make sure that new log file was created.
if !re.Match([]byte(event.Path())) {
break
}
if opened == nil {
log.Warn("The last log file is not opened")
break loop
}
// The new log file's name is always greater,
// because it is created using the actual log record's time.
if opened.Name() >= event.Path() {
break
}
// Read the rest of the previously opened file.
chunk, err := ioutil.ReadAll(opened)
if err != nil {
log.Warn("Failed to read file", "name", opened.Name(), "err", err)
break loop
}
buf = append(buf, chunk...)
opened.Close()
if chunk, last := prepLogs(buf); last >= 0 {
// Send the rest of the previously opened file.
db.sendToAll(&Message{
Logs: &LogsMessage{
Chunk: chunk,
},
})
}
if opened, err = os.OpenFile(event.Path(), os.O_RDONLY, 0644); err != nil {
log.Warn("Failed to open file", "name", event.Path(), "err", err)
break loop
}
buf = buf[:0]
// Change the last file in the history.
fi, err := opened.Stat()
if err != nil {
log.Warn("Problem with file", "name", opened.Name(), "err", err)
break loop
}
db.lock.Lock()
db.history.Logs.Source.Name = fi.Name()
db.history.Logs.Chunk = emptyChunk
db.lock.Unlock()
case <-ticker.C: // Send log updates to the client.
if opened == nil {
log.Warn("The last log file is not opened")
break loop
}
// Read the new logs created since the last read.
chunk, err := ioutil.ReadAll(opened)
if err != nil {
log.Warn("Failed to read file", "name", opened.Name(), "err", err)
break loop
}
b := append(buf, chunk...)
chunk, last := prepLogs(b)
if last < 0 {
break
}
// Only keep the invalid part of the buffer, which can be valid after the next read.
buf = b[last+1:]
var l *LogsMessage
// Update the history.
db.lock.Lock()
if bytes.Equal(db.history.Logs.Chunk, emptyChunk) {
db.history.Logs.Chunk = chunk
l = deepcopy.Copy(db.history.Logs).(*LogsMessage)
} else {
b = make([]byte, len(db.history.Logs.Chunk)+len(chunk)-1)
copy(b, db.history.Logs.Chunk)
b[len(db.history.Logs.Chunk)-1] = ','
copy(b[len(db.history.Logs.Chunk):], chunk[1:])
db.history.Logs.Chunk = b
l = &LogsMessage{Chunk: chunk}
}
db.lock.Unlock()
db.sendToAll(&Message{Logs: l})
case errc = <-db.quit:
break loop
}
}
}

@ -16,7 +16,10 @@
package dashboard
import "time"
import (
"encoding/json"
"time"
)
type Message struct {
General *GeneralMessage `json:"general,omitempty"`
@ -67,6 +70,24 @@ type SystemMessage struct {
DiskWrite ChartEntries `json:"diskWrite,omitempty"`
}
// LogsMessage wraps up a log chunk. If Source isn't present, the chunk is a stream chunk.
type LogsMessage struct {
Log []string `json:"log,omitempty"`
Source *LogFile `json:"source,omitempty"` // Attributes of the log file.
Chunk json.RawMessage `json:"chunk"` // Contains log records.
}
// LogFile contains the attributes of a log file.
type LogFile struct {
Name string `json:"name"` // The name of the file.
Last bool `json:"last"` // Denotes if the actual log file is the last one in the directory.
}
// Request represents the client request.
type Request struct {
Logs *LogsRequest `json:"logs,omitempty"`
}
type LogsRequest struct {
Name string `json:"name"` // The request handler searches for log file based on this file name.
Past bool `json:"past"` // Denotes whether the client wants the previous or the next file.
}

@ -95,7 +95,10 @@ var Flags = []cli.Flag{
memprofilerateFlag, blockprofilerateFlag, cpuprofileFlag, traceFlag,
}
var glogger *log.GlogHandler
var (
ostream log.Handler
glogger *log.GlogHandler
)
func init() {
usecolor := term.IsTty(os.Stderr.Fd()) && os.Getenv("TERM") != "dumb"
@ -103,14 +106,26 @@ func init() {
if usecolor {
output = colorable.NewColorableStderr()
}
glogger = log.NewGlogHandler(log.StreamHandler(output, log.TerminalFormat(usecolor)))
ostream = log.StreamHandler(output, log.TerminalFormat(usecolor))
glogger = log.NewGlogHandler(ostream)
}
// Setup initializes profiling and logging based on the CLI flags.
// It should be called as early as possible in the program.
func Setup(ctx *cli.Context) error {
func Setup(ctx *cli.Context, logdir string) error {
// logging
log.PrintOrigins(ctx.GlobalBool(debugFlag.Name))
if logdir != "" {
rfh, err := log.RotatingFileHandler(
logdir,
262144,
log.JSONFormatOrderedEx(false, true),
)
if err != nil {
return err
}
glogger.SetHandler(log.MultiHandler(ostream, rfh))
}
glogger.Verbosity(log.Lvl(ctx.GlobalInt(verbosityFlag.Name)))
glogger.Vmodule(ctx.GlobalString(vmoduleFlag.Name))
glogger.BacktraceAt(ctx.GlobalString(backtraceAtFlag.Name))

@ -77,11 +77,11 @@ type TerminalStringer interface {
// a terminal with color-coded level output and terser human friendly timestamp.
// This format should only be used for interactive programs or while developing.
//
// [TIME] [LEVEL] MESAGE key=value key=value ...
// [LEVEL] [TIME] MESAGE key=value key=value ...
//
// Example:
//
// [May 16 20:58:45] [DBUG] remove route ns=haproxy addr=127.0.0.1:50002
// [DBUG] [May 16 20:58:45] remove route ns=haproxy addr=127.0.0.1:50002
//
func TerminalFormat(usecolor bool) Format {
return FormatFunc(func(r *Record) []byte {
@ -202,6 +202,48 @@ func JSONFormat() Format {
return JSONFormatEx(false, true)
}
// JSONFormatOrderedEx formats log records as JSON arrays. If pretty is true,
// records will be pretty-printed. If lineSeparated is true, records
// will be logged with a new line between each record.
func JSONFormatOrderedEx(pretty, lineSeparated bool) Format {
jsonMarshal := json.Marshal
if pretty {
jsonMarshal = func(v interface{}) ([]byte, error) {
return json.MarshalIndent(v, "", " ")
}
}
return FormatFunc(func(r *Record) []byte {
props := make(map[string]interface{})
props[r.KeyNames.Time] = r.Time
props[r.KeyNames.Lvl] = r.Lvl.String()
props[r.KeyNames.Msg] = r.Msg
ctx := make([]string, len(r.Ctx))
for i := 0; i < len(r.Ctx); i += 2 {
k, ok := r.Ctx[i].(string)
if !ok {
props[errorKey] = fmt.Sprintf("%+v is not a string key,", r.Ctx[i])
}
ctx[i] = k
ctx[i+1] = formatLogfmtValue(r.Ctx[i+1], true)
}
props[r.KeyNames.Ctx] = ctx
b, err := jsonMarshal(props)
if err != nil {
b, _ = jsonMarshal(map[string]string{
errorKey: err.Error(),
})
return b
}
if lineSeparated {
b = append(b, '\n')
}
return b
})
}
// JSONFormatEx formats log records as JSON objects. If pretty is true,
// records will be pretty-printed. If lineSeparated is true, records
// will be logged with a new line between each record.

@ -8,6 +8,11 @@ import (
"reflect"
"sync"
"io/ioutil"
"path/filepath"
"regexp"
"strings"
"github.com/go-stack/stack"
)
@ -70,6 +75,111 @@ func FileHandler(path string, fmtr Format) (Handler, error) {
return closingHandler{f, StreamHandler(f, fmtr)}, nil
}
// countingWriter wraps a WriteCloser object in order to count the written bytes.
type countingWriter struct {
w io.WriteCloser // the wrapped object
count uint // number of bytes written
}
// Write increments the byte counter by the number of bytes written.
// Implements the WriteCloser interface.
func (w *countingWriter) Write(p []byte) (n int, err error) {
n, err = w.w.Write(p)
w.count += uint(n)
return n, err
}
// Close implements the WriteCloser interface.
func (w *countingWriter) Close() error {
return w.w.Close()
}
// prepFile opens the log file at the given path, and cuts off the invalid part
// from the end, because the previous execution could have been finished by interruption.
// Assumes that every line ended by '\n' contains a valid log record.
func prepFile(path string) (*countingWriter, error) {
f, err := os.OpenFile(path, os.O_RDWR|os.O_APPEND, 0600)
if err != nil {
return nil, err
}
_, err = f.Seek(-1, io.SeekEnd)
if err != nil {
return nil, err
}
buf := make([]byte, 1)
var cut int64
for {
if _, err := f.Read(buf); err != nil {
return nil, err
}
if buf[0] == '\n' {
break
}
if _, err = f.Seek(-2, io.SeekCurrent); err != nil {
return nil, err
}
cut++
}
fi, err := f.Stat()
if err != nil {
return nil, err
}
ns := fi.Size() - cut
if err = f.Truncate(ns); err != nil {
return nil, err
}
return &countingWriter{w: f, count: uint(ns)}, nil
}
// RotatingFileHandler returns a handler which writes log records to file chunks
// at the given path. When a file's size reaches the limit, the handler creates
// a new file named after the timestamp of the first log record it will contain.
func RotatingFileHandler(path string, limit uint, formatter Format) (Handler, error) {
if err := os.MkdirAll(path, 0700); err != nil {
return nil, err
}
files, err := ioutil.ReadDir(path)
if err != nil {
return nil, err
}
re := regexp.MustCompile(`\.log$`)
last := len(files) - 1
for last >= 0 && (!files[last].Mode().IsRegular() || !re.MatchString(files[last].Name())) {
last--
}
var counter *countingWriter
if last >= 0 && files[last].Size() < int64(limit) {
// Open the last file, and continue to write into it until it's size reaches the limit.
if counter, err = prepFile(filepath.Join(path, files[last].Name())); err != nil {
return nil, err
}
}
if counter == nil {
counter = new(countingWriter)
}
h := StreamHandler(counter, formatter)
return FuncHandler(func(r *Record) error {
if counter.count > limit {
counter.Close()
counter.w = nil
}
if counter.w == nil {
f, err := os.OpenFile(
filepath.Join(path, fmt.Sprintf("%s.log", strings.Replace(r.Time.Format("060102150405.00"), ".", "", 1))),
os.O_CREATE|os.O_APPEND|os.O_WRONLY,
0600,
)
if err != nil {
return err
}
counter.w = f
counter.count = 0
}
return h.Log(r)
}), nil
}
// NetHandler opens a socket to the given address and writes records
// over the connection.
func NetHandler(network, addr string, fmtr Format) (Handler, error) {

@ -57,6 +57,11 @@ func NewGlogHandler(h Handler) *GlogHandler {
}
}
// SetHandler updates the handler to write records to the specified sub-handler.
func (h *GlogHandler) SetHandler(nh Handler) {
h.origin = nh
}
// pattern contains a filter for the Vmodule option, holding a verbosity level
// and a file pattern to match.
type pattern struct {

@ -11,6 +11,7 @@ import (
const timeKey = "t"
const lvlKey = "lvl"
const msgKey = "msg"
const ctxKey = "ctx"
const errorKey = "LOG15_ERROR"
const skipLevel = 2
@ -101,6 +102,7 @@ type RecordKeyNames struct {
Time string
Msg string
Lvl string
Ctx string
}
// A Logger writes key/value pairs to a Handler
@ -139,6 +141,7 @@ func (l *logger) write(msg string, lvl Lvl, ctx []interface{}, skip int) {
Time: timeKey,
Msg: msgKey,
Lvl: lvlKey,
Ctx: ctxKey,
},
})
}

@ -179,7 +179,7 @@ func (c *Config) NodeDB() string {
if c.DataDir == "" {
return "" // ephemeral
}
return c.resolvePath(datadirNodeDatabase)
return c.ResolvePath(datadirNodeDatabase)
}
// DefaultIPCEndpoint returns the IPC path used by default.
@ -262,8 +262,8 @@ var isOldGethResource = map[string]bool{
"trusted-nodes.json": true,
}
// resolvePath resolves path in the instance directory.
func (c *Config) resolvePath(path string) string {
// ResolvePath resolves path in the instance directory.
func (c *Config) ResolvePath(path string) string {
if filepath.IsAbs(path) {
return path
}
@ -309,7 +309,7 @@ func (c *Config) NodeKey() *ecdsa.PrivateKey {
return key
}
keyfile := c.resolvePath(datadirPrivateKey)
keyfile := c.ResolvePath(datadirPrivateKey)
if key, err := crypto.LoadECDSA(keyfile); err == nil {
return key
}
@ -332,12 +332,12 @@ func (c *Config) NodeKey() *ecdsa.PrivateKey {
// StaticNodes returns a list of node enode URLs configured as static nodes.
func (c *Config) StaticNodes() []*discover.Node {
return c.parsePersistentNodes(c.resolvePath(datadirStaticNodes))
return c.parsePersistentNodes(c.ResolvePath(datadirStaticNodes))
}
// TrustedNodes returns a list of node enode URLs configured as trusted nodes.
func (c *Config) TrustedNodes() []*discover.Node {
return c.parsePersistentNodes(c.resolvePath(datadirTrustedNodes))
return c.parsePersistentNodes(c.ResolvePath(datadirTrustedNodes))
}
// parsePersistentNodes parses a list of discovery node URLs loaded from a .json

@ -570,12 +570,12 @@ func (n *Node) OpenDatabase(name string, cache, handles int) (ethdb.Database, er
if n.config.DataDir == "" {
return ethdb.NewMemDatabase(), nil
}
return ethdb.NewLDBDatabase(n.config.resolvePath(name), cache, handles)
return ethdb.NewLDBDatabase(n.config.ResolvePath(name), cache, handles)
}
// ResolvePath returns the absolute path of a resource in the instance directory.
func (n *Node) ResolvePath(x string) string {
return n.config.resolvePath(x)
return n.config.ResolvePath(x)
}
// apis returns the collection of RPC descriptors this node offers.

@ -43,7 +43,7 @@ func (ctx *ServiceContext) OpenDatabase(name string, cache int, handles int) (et
if ctx.config.DataDir == "" {
return ethdb.NewMemDatabase(), nil
}
db, err := ethdb.NewLDBDatabase(ctx.config.resolvePath(name), cache, handles)
db, err := ethdb.NewLDBDatabase(ctx.config.ResolvePath(name), cache, handles)
if err != nil {
return nil, err
}
@ -54,7 +54,7 @@ func (ctx *ServiceContext) OpenDatabase(name string, cache int, handles int) (et
// and if the user actually uses persistent storage. It will return an empty string
// for emphemeral storage and the user's own input for absolute paths.
func (ctx *ServiceContext) ResolvePath(path string) string {
return ctx.config.resolvePath(path)
return ctx.config.ResolvePath(path)
}
// Service retrieves a currently running service registered of a specific type.

21
vendor/github.com/mohae/deepcopy/LICENSE generated vendored Normal file

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2014 Joel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

8
vendor/github.com/mohae/deepcopy/README.md generated vendored Normal file

@ -0,0 +1,8 @@
deepCopy
========
[![GoDoc](https://godoc.org/github.com/mohae/deepcopy?status.svg)](https://godoc.org/github.com/mohae/deepcopy)[![Build Status](https://travis-ci.org/mohae/deepcopy.png)](https://travis-ci.org/mohae/deepcopy)
DeepCopy makes deep copies of things: unexported field values are not copied.
## Usage
cpy := deepcopy.Copy(orig)

125
vendor/github.com/mohae/deepcopy/deepcopy.go generated vendored Normal file

@ -0,0 +1,125 @@
// deepcopy makes deep copies of things. A standard copy will copy the
// pointers: deep copy copies the values pointed to. Unexported field
// values are not copied.
//
// Copyright (c)2014-2016, Joel Scoble (github.com/mohae), all rights reserved.
// License: MIT, for more details check the included LICENSE file.
package deepcopy
import (
"reflect"
"time"
)
// Interface for delegating copy process to type
type Interface interface {
DeepCopy() interface{}
}
// Iface is an alias to Copy; this exists for backwards compatibility reasons.
func Iface(iface interface{}) interface{} {
return Copy(iface)
}
// Copy creates a deep copy of whatever is passed to it and returns the copy
// in an interface{}. The returned value will need to be asserted to the
// correct type.
func Copy(src interface{}) interface{} {
if src == nil {
return nil
}
// Make the interface a reflect.Value
original := reflect.ValueOf(src)
// Make a copy of the same type as the original.
cpy := reflect.New(original.Type()).Elem()
// Recursively copy the original.
copyRecursive(original, cpy)
// Return the copy as an interface.
return cpy.Interface()
}
// copyRecursive does the actual copying of the interface. It currently has
// limited support for what it can handle. Add as needed.
func copyRecursive(original, cpy reflect.Value) {
// check for implement deepcopy.Interface
if original.CanInterface() {
if copier, ok := original.Interface().(Interface); ok {
cpy.Set(reflect.ValueOf(copier.DeepCopy()))
return
}
}
// handle according to original's Kind
switch original.Kind() {
case reflect.Ptr:
// Get the actual value being pointed to.
originalValue := original.Elem()
// if it isn't valid, return.
if !originalValue.IsValid() {
return
}
cpy.Set(reflect.New(originalValue.Type()))
copyRecursive(originalValue, cpy.Elem())
case reflect.Interface:
// If this is a nil, don't do anything
if original.IsNil() {
return
}
// Get the value for the interface, not the pointer.
originalValue := original.Elem()
// Get the value by calling Elem().
copyValue := reflect.New(originalValue.Type()).Elem()
copyRecursive(originalValue, copyValue)
cpy.Set(copyValue)
case reflect.Struct:
t, ok := original.Interface().(time.Time)
if ok {
cpy.Set(reflect.ValueOf(t))
return
}
// Go through each field of the struct and copy it.
for i := 0; i < original.NumField(); i++ {
// The Type's StructField for a given field is checked to see if StructField.PkgPath
// is set to determine if the field is exported or not because CanSet() returns false
// for settable fields. I'm not sure why. -mohae
if original.Type().Field(i).PkgPath != "" {
continue
}
copyRecursive(original.Field(i), cpy.Field(i))
}
case reflect.Slice:
if original.IsNil() {
return
}
// Make a new slice and copy each element.
cpy.Set(reflect.MakeSlice(original.Type(), original.Len(), original.Cap()))
for i := 0; i < original.Len(); i++ {
copyRecursive(original.Index(i), cpy.Index(i))
}
case reflect.Map:
if original.IsNil() {
return
}
cpy.Set(reflect.MakeMap(original.Type()))
for _, key := range original.MapKeys() {
originalValue := original.MapIndex(key)
copyValue := reflect.New(originalValue.Type()).Elem()
copyRecursive(originalValue, copyValue)
copyKey := Copy(key.Interface())
cpy.SetMapIndex(reflect.ValueOf(copyKey), copyValue)
}
default:
cpy.Set(original)
}
}

6
vendor/vendor.json vendored

@ -291,6 +291,12 @@
"revision": "ad45545899c7b13c020ea92b2072220eefad42b8",
"revisionTime": "2015-03-14T17:03:34Z"
},
{
"checksumSHA1": "2jsbDTvwxafPp7FJjJ8IIFlTLjs=",
"path": "github.com/mohae/deepcopy",
"revision": "c48cc78d482608239f6c4c92a4abd87eb8761c90",
"revisionTime": "2017-09-29T03:49:55Z"
},
{
"checksumSHA1": "FYM/8R2CqS6PSNAoKl6X5gNJ20A=",
"path": "github.com/naoina/toml",