* tracing

* add tracing and features to match

* use just one hostname crate

* cargo upgrade

* set up pretty logger and sentry layer
This commit is contained in:
Bryan Stitt 2023-06-23 16:28:45 -07:00 committed by GitHub
parent 3fac4248d6
commit fd661689a3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
59 changed files with 240 additions and 226 deletions

111
Cargo.lock generated

@ -110,6 +110,9 @@ name = "arc-swap"
version = "1.6.0" version = "1.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bddcadddf5e9015d310179a59bb28c4d4b9920ad0f11e8e14dbadf654890c9a6" checksum = "bddcadddf5e9015d310179a59bb28c4d4b9920ad0f11e8e14dbadf654890c9a6"
dependencies = [
"serde",
]
[[package]] [[package]]
name = "archery" name = "archery"
@ -495,6 +498,7 @@ dependencies = [
"tower", "tower",
"tower-layer", "tower-layer",
"tower-service", "tower-service",
"tracing",
] ]
[[package]] [[package]]
@ -523,6 +527,7 @@ dependencies = [
"rustversion", "rustversion",
"tower-layer", "tower-layer",
"tower-service", "tower-service",
"tracing",
] ]
[[package]] [[package]]
@ -1141,6 +1146,7 @@ dependencies = [
"futures", "futures",
"hdrhistogram", "hdrhistogram",
"humantime", "humantime",
"parking_lot 0.12.1",
"prost-types", "prost-types",
"serde", "serde",
"serde_json", "serde_json",
@ -1445,6 +1451,7 @@ dependencies = [
"moka", "moka",
"redis-rate-limiter", "redis-rate-limiter",
"tokio", "tokio",
"tracing",
] ]
[[package]] [[package]]
@ -1577,6 +1584,12 @@ version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "56ce8c6da7551ec6c462cbaf3bfbc75131ebbfa1c944aeaa9dab51ca1c5f0c3b" checksum = "56ce8c6da7551ec6c462cbaf3bfbc75131ebbfa1c944aeaa9dab51ca1c5f0c3b"
[[package]]
name = "dyn-clone"
version = "1.0.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "68b0cf012f1230e43cd00ebb729c6bb58707ecfa8ad08b52ef3a4ccd2697fc30"
[[package]] [[package]]
name = "ecdsa" name = "ecdsa"
version = "0.14.8" version = "0.14.8"
@ -1712,19 +1725,6 @@ dependencies = [
"syn 1.0.109", "syn 1.0.109",
] ]
[[package]]
name = "env_logger"
version = "0.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "85cdab6a89accf66733ad5a1693a4dcced6aeff64602b634530dd73c1f3ee9f0"
dependencies = [
"humantime",
"is-terminal",
"log",
"regex",
"termcolor",
]
[[package]] [[package]]
name = "errno" name = "errno"
version = "0.3.1" version = "0.3.1"
@ -2073,16 +2073,6 @@ version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2f20267f3a8b678b7151c0c508002e79126144a5d47badddec7f31ddc1f4c754" checksum = "2f20267f3a8b678b7151c0c508002e79126144a5d47badddec7f31ddc1f4c754"
[[package]]
name = "exponential-decay-histogram"
version = "0.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "55d9dc1064c0b1bc8c691c0ea539385bc6f299f5e5e6050583d34fdb032e9935"
dependencies = [
"ordered-float",
"rand",
]
[[package]] [[package]]
name = "eyre" name = "eyre"
version = "0.6.8" version = "0.6.8"
@ -2318,7 +2308,6 @@ dependencies = [
"futures-core", "futures-core",
"futures-task", "futures-task",
"futures-util", "futures-util",
"num_cpus",
] ]
[[package]] [[package]]
@ -2434,16 +2423,6 @@ dependencies = [
"zeroize", "zeroize",
] ]
[[package]]
name = "gethostname"
version = "0.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0176e0459c2e4a1fe232f984bca6890e681076abb9934f6cea7c326f3fc47818"
dependencies = [
"libc",
"windows-targets",
]
[[package]] [[package]]
name = "getrandom" name = "getrandom"
version = "0.2.10" version = "0.2.10"
@ -3037,6 +3016,10 @@ name = "ipnet"
version = "2.7.2" version = "2.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "12b6ee2129af8d4fb011108c73d99a1b83a85977f23b82460c0ae2e25bb4b57f" checksum = "12b6ee2129af8d4fb011108c73d99a1b83a85977f23b82460c0ae2e25bb4b57f"
dependencies = [
"schemars",
"serde",
]
[[package]] [[package]]
name = "iri-string" name = "iri-string"
@ -3184,6 +3167,7 @@ dependencies = [
"portable-atomic", "portable-atomic",
"serde", "serde",
"tokio", "tokio",
"tracing",
"watermill", "watermill",
] ]
@ -3548,6 +3532,7 @@ dependencies = [
"autocfg", "autocfg",
"num-integer", "num-integer",
"num-traits", "num-traits",
"serde",
] ]
[[package]] [[package]]
@ -3574,6 +3559,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "02e0d21255c828d6f128a1e41534206671e8c3ea0c62f32291e808dc82cff17d" checksum = "02e0d21255c828d6f128a1e41534206671e8c3ea0c62f32291e808dc82cff17d"
dependencies = [ dependencies = [
"num-traits", "num-traits",
"serde",
] ]
[[package]] [[package]]
@ -3607,6 +3593,7 @@ dependencies = [
"num-bigint", "num-bigint",
"num-integer", "num-integer",
"num-traits", "num-traits",
"serde",
] ]
[[package]] [[package]]
@ -4515,6 +4502,7 @@ dependencies = [
"serde_json", "serde_json",
"slab", "slab",
"tokio", "tokio",
"tracing",
] ]
[[package]] [[package]]
@ -5020,6 +5008,30 @@ dependencies = [
"parking_lot 0.12.1", "parking_lot 0.12.1",
] ]
[[package]]
name = "schemars"
version = "0.8.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "02c613288622e5f0c3fdc5dbd4db1c5fbe752746b1d1a56a0630b78fd00de44f"
dependencies = [
"dyn-clone",
"schemars_derive",
"serde",
"serde_json",
]
[[package]]
name = "schemars_derive"
version = "0.8.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "109da1e6b197438deb6db99952990c7f959572794b80ff93707d55a232545e7c"
dependencies = [
"proc-macro2",
"quote",
"serde_derive_internals",
"syn 1.0.109",
]
[[package]] [[package]]
name = "scoped-tls" name = "scoped-tls"
version = "1.0.1" version = "1.0.1"
@ -5317,9 +5329,9 @@ dependencies = [
"sentry-backtrace", "sentry-backtrace",
"sentry-contexts", "sentry-contexts",
"sentry-core", "sentry-core",
"sentry-log",
"sentry-panic", "sentry-panic",
"sentry-tracing", "sentry-tracing",
"serde_json",
"tokio", "tokio",
"ureq", "ureq",
"webpki-roots 0.22.6", "webpki-roots 0.22.6",
@ -5375,16 +5387,6 @@ dependencies = [
"serde_json", "serde_json",
] ]
[[package]]
name = "sentry-log"
version = "0.31.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2558fc4a85326e6063711b45ce82ed6b18cdacd0732580c1567da914ac1df33e"
dependencies = [
"log",
"sentry-core",
]
[[package]] [[package]]
name = "sentry-panic" name = "sentry-panic"
version = "0.31.5" version = "0.31.5"
@ -5444,6 +5446,17 @@ dependencies = [
"syn 2.0.18", "syn 2.0.18",
] ]
[[package]]
name = "serde_derive_internals"
version = "0.26.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "85bf8229e7920a9f636479437026331ce11aa132b4dde37d121944a44d6e5f3c"
dependencies = [
"proc-macro2",
"quote",
"syn 1.0.109",
]
[[package]] [[package]]
name = "serde_json" name = "serde_json"
version = "1.0.97" version = "1.0.97"
@ -5646,6 +5659,7 @@ dependencies = [
"iri-string", "iri-string",
"k256 0.11.6", "k256 0.11.6",
"rand", "rand",
"serde",
"sha3", "sha3",
"thiserror", "thiserror",
"time 0.3.22", "time 0.3.22",
@ -6470,6 +6484,7 @@ dependencies = [
"pin-project-lite", "pin-project-lite",
"tower-layer", "tower-layer",
"tower-service", "tower-service",
"tracing",
] ]
[[package]] [[package]]
@ -6558,6 +6573,7 @@ dependencies = [
"matchers", "matchers",
"nu-ansi-term", "nu-ansi-term",
"once_cell", "once_cell",
"parking_lot 0.12.1",
"regex", "regex",
"sharded-slab", "sharded-slab",
"smallvec", "smallvec",
@ -7016,16 +7032,13 @@ dependencies = [
"deferred-rate-limiter", "deferred-rate-limiter",
"derive_more", "derive_more",
"entities", "entities",
"env_logger",
"ethbloom", "ethbloom",
"ethers", "ethers",
"ewma", "ewma",
"exponential-decay-histogram",
"fdlimit", "fdlimit",
"flume", "flume",
"fstrings", "fstrings",
"futures", "futures",
"gethostname",
"glob", "glob",
"handlebars", "handlebars",
"hashbrown 0.14.0", "hashbrown 0.14.0",
@ -7039,7 +7052,6 @@ dependencies = [
"itertools 0.11.0", "itertools 0.11.0",
"latency", "latency",
"listenfd", "listenfd",
"log",
"migration", "migration",
"mimalloc", "mimalloc",
"moka", "moka",
@ -7060,6 +7072,7 @@ dependencies = [
"rmp-serde", "rmp-serde",
"rust_decimal", "rust_decimal",
"sentry", "sentry",
"sentry-tracing",
"serde", "serde",
"serde_json", "serde_json",
"serde_prometheus", "serde_prometheus",

@ -12,3 +12,4 @@ hashbrown = "0.14.0"
log = "0.4.19" log = "0.4.19"
moka = { version = "0.11.2", features = ["future"] } moka = { version = "0.11.2", features = ["future"] }
tokio = "1.28.2" tokio = "1.28.2"
tracing = "0.1.37"

@ -1,5 +1,4 @@
//#![warn(missing_docs)] //#![warn(missing_docs)]
use log::error;
use moka::future::{Cache, CacheBuilder}; use moka::future::{Cache, CacheBuilder};
use redis_rate_limiter::{RedisRateLimitResult, RedisRateLimiter}; use redis_rate_limiter::{RedisRateLimitResult, RedisRateLimiter};
use std::cmp::Eq; use std::cmp::Eq;
@ -9,6 +8,7 @@ use std::sync::atomic::Ordering;
use std::sync::{atomic::AtomicU64, Arc}; use std::sync::{atomic::AtomicU64, Arc};
use tokio::sync::Mutex; use tokio::sync::Mutex;
use tokio::time::{Duration, Instant}; use tokio::time::{Duration, Instant};
use tracing::error;
/// A local cache that sits in front of a RedisRateLimiter /// A local cache that sits in front of a RedisRateLimiter
/// Generic accross the key so it is simple to use with IPs or user keys /// Generic accross the key so it is simple to use with IPs or user keys

@ -11,6 +11,7 @@ log = "0.4.19"
portable-atomic = { version = "1.3.3", features = ["float"] } portable-atomic = { version = "1.3.3", features = ["float"] }
serde = { version = "1.0.164", features = [] } serde = { version = "1.0.164", features = [] }
tokio = { version = "1.28.2", features = ["full"] } tokio = { version = "1.28.2", features = ["full"] }
tracing = "0.1.37"
watermill = "0.1.1" watermill = "0.1.1"
[dev-dependencies] [dev-dependencies]

@ -2,9 +2,9 @@ mod rtt_estimate;
use std::sync::Arc; use std::sync::Arc;
use log::{error, log_enabled, trace};
use tokio::task::JoinHandle; use tokio::task::JoinHandle;
use tokio::time::{Duration, Instant}; use tokio::time::{Duration, Instant};
use tracing::{enabled, error, trace, Level};
use self::rtt_estimate::AtomicRttEstimate; use self::rtt_estimate::AtomicRttEstimate;
use crate::util::nanos::nanos; use crate::util::nanos::nanos;
@ -59,7 +59,7 @@ impl PeakEwmaLatency {
let now = Instant::now(); let now = Instant::now();
if estimate.update_at > now { if estimate.update_at > now {
if log_enabled!(log::Level::Trace) { if enabled!(Level::TRACE) {
trace!( trace!(
"update_at is {}ns in the future", "update_at is {}ns in the future",
estimate.update_at.duration_since(now).as_nanos() estimate.update_at.duration_since(now).as_nanos()

@ -1,7 +1,7 @@
use std::sync::atomic::Ordering; use std::sync::atomic::Ordering;
use log::trace;
use tokio::time::{Duration, Instant}; use tokio::time::{Duration, Instant};
use tracing::trace;
use crate::util::atomic_f32_pair::AtomicF32Pair; use crate::util::atomic_f32_pair::AtomicF32Pair;
use crate::util::nanos::{nanos, NANOS_PER_MILLI}; use crate::util::nanos::{nanos, NANOS_PER_MILLI};

@ -1,4 +1,4 @@
use log::{log_enabled, trace}; use tracing::{enabled, Level, trace};
use quick_cache::sync::KQCache; use quick_cache::sync::KQCache;
use quick_cache::{PlaceholderGuard, Weighter}; use quick_cache::{PlaceholderGuard, Weighter};
use serde::ser::SerializeStruct; use serde::ser::SerializeStruct;
@ -201,7 +201,7 @@ impl<
while let Ok((expire_at, key, qey)) = self.rx.recv_async().await { while let Ok((expire_at, key, qey)) = self.rx.recv_async().await {
let now = Instant::now(); let now = Instant::now();
if expire_at > now { if expire_at > now {
if log_enabled!(log::Level::Trace) { if enabled!(Level::TRACE) {
trace!( trace!(
"{}, {:?}, {:?} sleeping for {}ms.", "{}, {:?}, {:?} sleeping for {}ms.",
self.name, self.name,
@ -246,7 +246,7 @@ impl<
if weight <= self.cache.max_item_weight { if weight <= self.cache.max_item_weight {
self.inner.insert(val); self.inner.insert(val);
if log_enabled!(log::Level::Trace) { if enabled!(Level::TRACE) {
trace!( trace!(
"{}, {:?}, {:?} expiring in {}s", "{}, {:?}, {:?} expiring in {}s",
self.cache.name, self.cache.name,

@ -37,73 +37,70 @@ influxdb2-structmap = { git = "https://github.com/llamanodes/influxdb2/", rev =
# TODO: hdrhistogram for automated tiers # TODO: hdrhistogram for automated tiers
anyhow = { version = "1.0.71", features = ["backtrace"] } anyhow = { version = "1.0.71", features = ["backtrace"] }
arc-swap = "1.6.0" arc-swap = { version = "1.6.0", features = ["serde"] }
argh = "0.1.10" argh = "0.1.10"
async-trait = "0.1.68" async-trait = "0.1.68"
axum = { version = "0.6.18", features = ["headers", "ws"] } axum = { version = "0.6.18", features = ["headers", "tracing", "ws"] }
axum-client-ip = "0.4.1" axum-client-ip = "0.4.1"
axum-macros = "0.3.7" axum-macros = "0.3.7"
base64 = "0.21.2" base64 = "0.21.2"
check-if-email-exists = "0.9.0" check-if-email-exists = "0.9.0"
chrono = "0.4.26" chrono = { version = "0.4.26", features = ["serde"] }
console-subscriber = { version = "0.1.9", optional = true } console-subscriber = { version = "0.1.9", features = ["env-filter", "parking_lot"], optional = true }
counter = "0.5.7" counter = "0.5.7"
derive_more = "0.99.17" derive_more = { version = "0.99.17", features = ["nightly"] }
env_logger = "0.10.0" ethbloom = { version = "0.13.0", features = ["serialize"] }
ethbloom = "0.13.0"
ewma = "0.1.1" ewma = "0.1.1"
fdlimit = "0.2.1" fdlimit = "0.2.1"
flume = "0.10.14" flume = "0.10.14"
fstrings = "0.2" fstrings = "0.2"
futures = { version = "0.3.28", features = ["thread-pool"] } futures = { version = "0.3.28" }
gethostname = "0.4.3"
glob = "0.3.1" glob = "0.3.1"
handlebars = "4.3.7" handlebars = "4.3.7"
hashbrown = { version = "0.14.0", features = ["serde"] } hashbrown = { version = "0.14.0", features = ["nightly", "serde"] }
hdrhistogram = "7.5.2" hdrhistogram = "7.5.2"
hostname = "0.3.1" hostname = "0.3.1"
http = "0.2.9" http = "0.2.9"
hyper = { version = "0.14.26", features = ["full", "nightly"] } hyper = { version = "0.14.26", features = ["full", "nightly"] }
ipnet = "2.7.2" ipnet = { version = "2.7.2", features = ["json"] }
itertools = "0.11.0" itertools = "0.11.0"
listenfd = "1.0.1" listenfd = "1.0.1"
log = "0.4.19"
mimalloc = { version = "0.1.37", optional = true} mimalloc = { version = "0.1.37", optional = true}
moka = { version = "0.11.2", features = ["future"] } moka = { version = "0.11.2", default-features = false, features = ["atomic64", "future", "parking_lot", "quanta", "triomphe", "uuid",] }
nanorand = { version = "0.7.0", default-features = false, features = ["alloc", "std", "tls", "wyrand"] } nanorand = { version = "0.7.0", default-features = false, features = ["std", "tls", "wyrand"] }
num = "0.4.0" num = { version = "0.4.0", features = ["serde"] }
num-traits = "0.2.15" num-traits = "0.2.15"
once_cell = { version = "1.18.0" } once_cell = { version = "1.18.0" }
ordered-float = "3.7.0" ordered-float = {version = "3.7.0", features = ["serde"] }
pagerduty-rs = { version = "0.1.6", default-features = false, features = ["async", "rustls", "sync"] } pagerduty-rs = { version = "0.1.6", default-features = false, features = ["async", "rustls", "sync"] }
parking_lot = { version = "0.12.1", features = ["arc_lock", "nightly", "serde"] } parking_lot = { version = "0.12.1", features = ["arc_lock", "nightly", "serde"] }
prettytable = "0.10.0" prettytable = "0.10.0"
proctitle = "0.1.1" proctitle = "0.1.1"
rdkafka = { version = "0.32.2" } rdkafka = { version = "0.32.2", features = ["tracing"] }
regex = "1.8.4" regex = "1.8.4"
reqwest = { version = "0.11.18", default-features = false, features = ["deflate", "gzip", "json", "tokio-rustls"] } reqwest = { version = "0.11.18", default-features = false, features = ["deflate", "gzip", "json", "tokio-rustls"] }
rmp-serde = "1.1.1" rmp-serde = "1.1.1"
rust_decimal = { version = "1.30.0", features = ["maths"] } rust_decimal = { version = "1.30.0", features = ["maths"] }
sentry = { version = "0.31.5", default-features = false, features = ["backtrace", "contexts", "panic", "anyhow", "reqwest", "rustls", "log", "sentry-log"] } sentry = { version = "0.31.5", default-features = false, features = ["anyhow", "backtrace", "contexts", "panic", "reqwest", "rustls", "serde_json", "tracing"] }
serde = { version = "1.0.164", features = [] } sentry-tracing = "0.31.5"
serde_json = { version = "1.0.97", default-features = false, features = ["alloc", "raw_value"] } serde = { version = "1.0.164" }
serde_json = { version = "1.0.97", default-features = false, features = ["raw_value"] }
serde_prometheus = "0.2.3" serde_prometheus = "0.2.3"
siwe = "0.5.2" siwe = { version = "0.5.2", features = ["serde"] }
strum = { version = "0.25.0", features = ["derive"] } strum = { version = "0.25.0", features = ["derive"] }
time = "0.3.22" time = { version = "0.3.22", features = ["serde-well-known"] }
tokio = { version = "1.28.2", features = ["full"] } tokio = { version = "1.28.2", features = ["full", "tracing"] }
tokio-console = { version = "0.1.8", optional = true } tokio-console = { version = "0.1.8", optional = true }
tokio-stream = { version = "0.1.14", features = ["sync"] } tokio-stream = { version = "0.1.14", features = ["sync"] }
tokio-uring = { version = "0.4.0", optional = true } tokio-uring = { version = "0.4.0", optional = true }
toml = "0.7.4" toml = "0.7.4"
tower = "0.4.13" tower = { version = "0.4.13", features = ["tracing"] }
tower-http = { version = "0.4.1", features = ["cors", "sensitive-headers"] } tower-http = { version = "0.4.1", features = ["cors", "sensitive-headers", "trace"] }
tracing = "0.1.37" tracing = "0.1"
tracing-subscriber = "0.3" tracing-subscriber = { version = "0.3", features = ["env-filter"] }
ulid = { version = "1.0.0", features = ["rand", "uuid", "serde"] } ulid = { version = "1.0.0", features = ["rand", "uuid", "serde"] }
url = "2.4.0" url = { version = "2.4.0", features = ["serde"] }
uuid = { version = "1.3.4", default-features = false, features = ["fast-rng", "serde", "v4", "zerocopy"] } uuid = { version = "1.3.4", default-features = false, features = ["fast-rng", "serde", "v4", "zerocopy"] }
exponential-decay-histogram = "0.1.10"
[dev-dependencies] [dev-dependencies]
tokio = { version = "1.28.2", features = ["full", "test-util"] } tokio = { version = "1.28.2", features = ["full", "test-util"] }

@ -10,10 +10,10 @@ use axum::{
use entities::{admin, login, user, user_tier}; use entities::{admin, login, user, user_tier};
use ethers::prelude::Address; use ethers::prelude::Address;
use hashbrown::HashMap; use hashbrown::HashMap;
use log::{info, trace};
use migration::sea_orm::{ use migration::sea_orm::{
self, ActiveModelTrait, ColumnTrait, EntityTrait, IntoActiveModel, QueryFilter, self, ActiveModelTrait, ColumnTrait, EntityTrait, IntoActiveModel, QueryFilter,
}; };
use tracing::{info, trace};
// TODO: Add some logic to check if the operating user is an admin // TODO: Add some logic to check if the operating user is an admin
// If he is, return true // If he is, return true

@ -37,7 +37,6 @@ use ethers::utils::rlp::{Decodable, Rlp};
use futures::future::join_all; use futures::future::join_all;
use futures::stream::{FuturesUnordered, StreamExt}; use futures::stream::{FuturesUnordered, StreamExt};
use hashbrown::{HashMap, HashSet}; use hashbrown::{HashMap, HashSet};
use log::{error, info, trace, warn, Level};
use migration::sea_orm::{DatabaseTransaction, EntityTrait, PaginatorTrait, TransactionTrait}; use migration::sea_orm::{DatabaseTransaction, EntityTrait, PaginatorTrait, TransactionTrait};
use moka::future::{Cache, CacheBuilder}; use moka::future::{Cache, CacheBuilder};
use parking_lot::RwLock; use parking_lot::RwLock;
@ -55,6 +54,7 @@ use std::time::Duration;
use tokio::sync::{broadcast, watch, Semaphore}; use tokio::sync::{broadcast, watch, Semaphore};
use tokio::task::JoinHandle; use tokio::task::JoinHandle;
use tokio::time::timeout; use tokio::time::timeout;
use tracing::{error, info, trace, warn, Level};
// TODO: make this customizable? // TODO: make this customizable?
// TODO: include GIT_REF in here. i had trouble getting https://docs.rs/vergen/latest/vergen/ to work with a workspace. also .git is in .dockerignore // TODO: include GIT_REF in here. i had trouble getting https://docs.rs/vergen/latest/vergen/ to work with a workspace. also .git is in .dockerignore
@ -1072,7 +1072,7 @@ impl Web3ProxyApp {
None, None,
None, None,
Some(Duration::from_secs(30)), Some(Duration::from_secs(30)),
Some(Level::Trace.into()), Some(Level::TRACE.into()),
None, None,
true, true,
) )
@ -1103,7 +1103,7 @@ impl Web3ProxyApp {
None, None,
None, None,
Some(Duration::from_secs(30)), Some(Duration::from_secs(30)),
Some(Level::Trace.into()), Some(Level::TRACE.into()),
num_public_rpcs, num_public_rpcs,
true, true,
) )

@ -14,12 +14,12 @@ use futures::future::AbortHandle;
use futures::future::Abortable; use futures::future::Abortable;
use futures::stream::StreamExt; use futures::stream::StreamExt;
use http::StatusCode; use http::StatusCode;
use log::{error, trace};
use serde_json::json; use serde_json::json;
use std::sync::atomic::{self, AtomicU64}; use std::sync::atomic::{self, AtomicU64};
use std::sync::Arc; use std::sync::Arc;
use tokio::time::Instant; use tokio::time::Instant;
use tokio_stream::wrappers::{BroadcastStream, WatchStream}; use tokio_stream::wrappers::{BroadcastStream, WatchStream};
use tracing::{error, trace};
impl Web3ProxyApp { impl Web3ProxyApp {
pub async fn eth_subscribe<'a>( pub async fn eth_subscribe<'a>(

@ -5,14 +5,14 @@ use argh::FromArgs;
use chrono::Utc; use chrono::Utc;
use ethers::types::U64; use ethers::types::U64;
use ethers::types::{Block, TxHash}; use ethers::types::{Block, TxHash};
use log::info;
use log::warn;
use reqwest::Client; use reqwest::Client;
use serde::Deserialize; use serde::Deserialize;
use serde_json::json; use serde_json::json;
use std::sync::atomic::{AtomicU32, Ordering}; use std::sync::atomic::{AtomicU32, Ordering};
use tokio::time::sleep; use tokio::time::sleep;
use tokio::time::Duration; use tokio::time::Duration;
use tracing::info;
use tracing::warn;
#[derive(Debug, FromArgs)] #[derive(Debug, FromArgs)]
/// Command line interface for admins to interact with web3_proxy /// Command line interface for admins to interact with web3_proxy
@ -43,7 +43,7 @@ async fn main() -> anyhow::Result<()> {
std::env::set_var("RUST_LOG", "wait_for_sync=debug"); std::env::set_var("RUST_LOG", "wait_for_sync=debug");
} }
env_logger::init(); // todo!("set up tracing");
// this probably won't matter for us in docker, but better safe than sorry // this probably won't matter for us in docker, but better safe than sorry
fdlimit::raise_fd_limit(); fdlimit::raise_fd_limit();

@ -2,11 +2,11 @@ use anyhow::Context;
use argh::FromArgs; use argh::FromArgs;
use entities::{admin, login, user}; use entities::{admin, login, user};
use ethers::types::Address; use ethers::types::Address;
use log::{debug, info};
use migration::sea_orm::{ use migration::sea_orm::{
self, ActiveModelTrait, ColumnTrait, DatabaseConnection, EntityTrait, ModelTrait, QueryFilter, self, ActiveModelTrait, ColumnTrait, DatabaseConnection, EntityTrait, ModelTrait, QueryFilter,
}; };
use serde_json::json; use serde_json::json;
use tracing::{debug, info};
/// change a user's admin status. eiter they are an admin, or they aren't /// change a user's admin status. eiter they are an admin, or they aren't
#[derive(FromArgs, PartialEq, Eq, Debug)] #[derive(FromArgs, PartialEq, Eq, Debug)]

@ -2,12 +2,12 @@ use anyhow::Context;
use argh::FromArgs; use argh::FromArgs;
use entities::user; use entities::user;
use ethers::types::Address; use ethers::types::Address;
use log::{debug, info};
use migration::sea_orm::{ use migration::sea_orm::{
self, ActiveModelTrait, ColumnTrait, DatabaseConnection, EntityTrait, IntoActiveModel, self, ActiveModelTrait, ColumnTrait, DatabaseConnection, EntityTrait, IntoActiveModel,
QueryFilter, QueryFilter,
}; };
use serde_json::json; use serde_json::json;
use tracing::{debug, info};
/// change a user's address. /// change a user's address.
#[derive(FromArgs, PartialEq, Eq, Debug)] #[derive(FromArgs, PartialEq, Eq, Debug)]

@ -1,12 +1,12 @@
use anyhow::Context; use anyhow::Context;
use argh::FromArgs; use argh::FromArgs;
use entities::user_tier; use entities::user_tier;
use log::{debug, info};
use migration::sea_orm::{ use migration::sea_orm::{
self, ActiveModelTrait, ColumnTrait, DatabaseConnection, EntityTrait, IntoActiveModel, self, ActiveModelTrait, ColumnTrait, DatabaseConnection, EntityTrait, IntoActiveModel,
QueryFilter, QueryFilter,
}; };
use serde_json::json; use serde_json::json;
use tracing::{debug, info};
/// change a user's tier. /// change a user's tier.
#[derive(FromArgs, PartialEq, Eq, Debug)] #[derive(FromArgs, PartialEq, Eq, Debug)]

@ -2,12 +2,12 @@ use anyhow::Context;
use argh::FromArgs; use argh::FromArgs;
use entities::{user, user_tier}; use entities::{user, user_tier};
use ethers::types::Address; use ethers::types::Address;
use log::{debug, info};
use migration::sea_orm::{ use migration::sea_orm::{
self, ActiveModelTrait, ColumnTrait, DatabaseConnection, EntityTrait, IntoActiveModel, self, ActiveModelTrait, ColumnTrait, DatabaseConnection, EntityTrait, IntoActiveModel,
QueryFilter, QueryFilter,
}; };
use serde_json::json; use serde_json::json;
use tracing::{debug, info};
/// change a user's tier. /// change a user's tier.
#[derive(FromArgs, PartialEq, Eq, Debug)] #[derive(FromArgs, PartialEq, Eq, Debug)]

@ -1,12 +1,12 @@
use anyhow::Context; use anyhow::Context;
use argh::FromArgs; use argh::FromArgs;
use entities::{rpc_key, user, user_tier}; use entities::{rpc_key, user, user_tier};
use log::{debug, info};
use migration::sea_orm::{ use migration::sea_orm::{
self, ActiveModelTrait, ColumnTrait, DatabaseConnection, EntityTrait, IntoActiveModel, self, ActiveModelTrait, ColumnTrait, DatabaseConnection, EntityTrait, IntoActiveModel,
QueryFilter, QueryFilter,
}; };
use serde_json::json; use serde_json::json;
use tracing::{debug, info};
use uuid::Uuid; use uuid::Uuid;
use web3_proxy::frontend::authorization::RpcSecretKey; use web3_proxy::frontend::authorization::RpcSecretKey;

@ -1,6 +1,6 @@
use argh::FromArgs; use argh::FromArgs;
use log::{error, info, warn};
use std::fs; use std::fs;
use tracing::{error, info, warn};
use web3_proxy::config::TopConfig; use web3_proxy::config::TopConfig;
#[derive(FromArgs, PartialEq, Eq, Debug)] #[derive(FromArgs, PartialEq, Eq, Debug)]

@ -1,7 +1,7 @@
use argh::FromArgs; use argh::FromArgs;
use entities::user; use entities::user;
use log::info;
use migration::sea_orm::{self, EntityTrait, PaginatorTrait}; use migration::sea_orm::{self, EntityTrait, PaginatorTrait};
use tracing::info;
#[derive(FromArgs, PartialEq, Debug, Eq)] #[derive(FromArgs, PartialEq, Debug, Eq)]
/// Create a new user and api key /// Create a new user and api key

@ -2,8 +2,8 @@ use anyhow::Context;
use argh::FromArgs; use argh::FromArgs;
use entities::{rpc_key, user}; use entities::{rpc_key, user};
use ethers::prelude::Address; use ethers::prelude::Address;
use log::info;
use migration::sea_orm::{self, ActiveModelTrait, ColumnTrait, EntityTrait, QueryFilter}; use migration::sea_orm::{self, ActiveModelTrait, ColumnTrait, EntityTrait, QueryFilter};
use tracing::info;
use ulid::Ulid; use ulid::Ulid;
use uuid::Uuid; use uuid::Uuid;
use web3_proxy::frontend::authorization::RpcSecretKey; use web3_proxy::frontend::authorization::RpcSecretKey;

@ -2,8 +2,8 @@ use anyhow::Context;
use argh::FromArgs; use argh::FromArgs;
use entities::{rpc_key, user}; use entities::{rpc_key, user};
use ethers::prelude::Address; use ethers::prelude::Address;
use log::info;
use migration::sea_orm::{self, ActiveModelTrait, TransactionTrait}; use migration::sea_orm::{self, ActiveModelTrait, TransactionTrait};
use tracing::info;
use ulid::Ulid; use ulid::Ulid;
use uuid::Uuid; use uuid::Uuid;
use web3_proxy::frontend::authorization::RpcSecretKey; use web3_proxy::frontend::authorization::RpcSecretKey;

@ -23,15 +23,17 @@ mod user_import;
use anyhow::Context; use anyhow::Context;
use argh::FromArgs; use argh::FromArgs;
use ethers::types::U256; use ethers::types::U256;
use log::{info, warn};
use pagerduty_rs::eventsv2async::EventsV2 as PagerdutyAsyncEventsV2; use pagerduty_rs::eventsv2async::EventsV2 as PagerdutyAsyncEventsV2;
use pagerduty_rs::eventsv2sync::EventsV2 as PagerdutySyncEventsV2; use pagerduty_rs::eventsv2sync::EventsV2 as PagerdutySyncEventsV2;
use sentry::types::Dsn;
use std::{ use std::{
fs, panic, fs, panic,
path::Path, path::Path,
sync::atomic::{self, AtomicUsize}, sync::atomic::{self, AtomicUsize},
}; };
use tokio::runtime; use tokio::runtime;
use tracing::{info, warn};
use tracing_subscriber::{prelude::*, EnvFilter};
use web3_proxy::pagerduty::panic_handler; use web3_proxy::pagerduty::panic_handler;
use web3_proxy::{ use web3_proxy::{
app::APP_USER_AGENT, app::APP_USER_AGENT,
@ -66,7 +68,7 @@ pub struct Web3ProxyCli {
/// if no config, what sentry url should the client should connect to /// if no config, what sentry url should the client should connect to
#[argh(option)] #[argh(option)]
pub sentry_url: Option<String>, pub sentry_url: Option<Dsn>,
/// this one cli can do multiple things /// this one cli can do multiple things
#[argh(subcommand)] #[argh(subcommand)]
@ -126,11 +128,11 @@ fn main() -> anyhow::Result<()> {
}); });
} }
// if RUST_LOG isn't set, configure a default // TODO: can we run tokio_console and have our normal logs?
// TODO: is there a better way to do this?
#[cfg(feature = "tokio_console")] #[cfg(feature = "tokio_console")]
console_subscriber::init(); console_subscriber::init();
// if RUST_LOG isn't set, configure a default
#[cfg(not(feature = "tokio_console"))] #[cfg(not(feature = "tokio_console"))]
let rust_log = match std::env::var("RUST_LOG") { let rust_log = match std::env::var("RUST_LOG") {
Ok(x) => x, Ok(x) => x,
@ -213,44 +215,36 @@ fn main() -> anyhow::Result<()> {
(None, None) (None, None)
}; };
{ // set up sentry connection
let logger = env_logger::builder().parse_filters(&rust_log).build(); // this guard does nothing is sentry_url is None
let _sentry_guard = sentry::init(sentry::ClientOptions {
let max_level = logger.filter(); dsn: cli_config.sentry_url.clone(),
// connect to sentry for error reporting
// if no sentry, only log to stdout
let _sentry_guard = if let Some(sentry_url) = cli_config.sentry_url.clone() {
let logger = sentry::integrations::log::SentryLogger::with_dest(logger);
log::set_boxed_logger(Box::new(logger)).unwrap();
let guard = sentry::init((
sentry_url,
sentry::ClientOptions {
release: sentry::release_name!(), release: sentry::release_name!(),
// TODO: Set this a to lower value (from config) in production // Enable capturing of traces
traces_sample_rate: 1.0, // TODO: make this configurable!
traces_sample_rate: 0.01,
..Default::default() ..Default::default()
}, });
));
Some(guard) tracing_subscriber::fmt()
} else { // create a subscriber that uses the RUST_LOG env var for filtering levels
log::set_boxed_logger(Box::new(logger)).unwrap(); .with_env_filter(EnvFilter::builder().parse(rust_log)?)
// .with_env_filter(EnvFilter::from_default_env())
// print a pretty output to the terminal
// TODO: this might be too verbose. have a config setting for this, too
.pretty()
// the root subscriber is ready
.finish()
// attach tracing layer.
.with(sentry_tracing::layer())
// register as the default global subscriber
.init();
None info!(%APP_USER_AGENT);
};
log::set_max_level(max_level);
info!("RUST_LOG={}", rust_log);
}
info!("{}", APP_USER_AGENT);
// optionally connect to pagerduty // optionally connect to pagerduty
// TODO: fix this nested result // TODO: fix this nested result
// TODO: get this out of the config file instead of the environment
let (pagerduty_async, pagerduty_sync) = if let Ok(pagerduty_key) = let (pagerduty_async, pagerduty_sync) = if let Ok(pagerduty_key) =
std::env::var("PAGERDUTY_INTEGRATION_KEY") std::env::var("PAGERDUTY_INTEGRATION_KEY")
{ {

@ -3,7 +3,6 @@ use argh::FromArgs;
use entities::{rpc_accounting, rpc_key}; use entities::{rpc_accounting, rpc_key};
use futures::stream::FuturesUnordered; use futures::stream::FuturesUnordered;
use futures::StreamExt; use futures::StreamExt;
use log::{error, info};
use migration::sea_orm::QueryOrder; use migration::sea_orm::QueryOrder;
use migration::sea_orm::{ use migration::sea_orm::{
ColumnTrait, DatabaseConnection, EntityTrait, QueryFilter, QuerySelect, UpdateResult, ColumnTrait, DatabaseConnection, EntityTrait, QueryFilter, QuerySelect, UpdateResult,
@ -14,6 +13,7 @@ use std::num::NonZeroU64;
use std::sync::Arc; use std::sync::Arc;
use tokio::sync::broadcast; use tokio::sync::broadcast;
use tokio::time::Instant; use tokio::time::Instant;
use tracing::{error, info};
use ulid::Ulid; use ulid::Ulid;
use web3_proxy::app::BILLING_PERIOD_SECONDS; use web3_proxy::app::BILLING_PERIOD_SECONDS;
use web3_proxy::config::TopConfig; use web3_proxy::config::TopConfig;

@ -1,7 +1,7 @@
use argh::FromArgs; use argh::FromArgs;
use log::{error, info};
use pagerduty_rs::{eventsv2async::EventsV2 as PagerdutyAsyncEventsV2, types::Event}; use pagerduty_rs::{eventsv2async::EventsV2 as PagerdutyAsyncEventsV2, types::Event};
use serde_json::json; use serde_json::json;
use tracing::{error, info};
use web3_proxy::{ use web3_proxy::{
config::TopConfig, config::TopConfig,
pagerduty::{pagerduty_alert, pagerduty_alert_for_config}, pagerduty::{pagerduty_alert, pagerduty_alert_for_config},

@ -1,12 +1,12 @@
#![forbid(unsafe_code)] #![forbid(unsafe_code)]
use argh::FromArgs; use argh::FromArgs;
use futures::StreamExt; use futures::StreamExt;
use log::{error, info, trace, warn};
use num::Zero; use num::Zero;
use std::path::PathBuf; use std::path::PathBuf;
use std::time::Duration; use std::time::Duration;
use std::{fs, thread}; use std::{fs, thread};
use tokio::sync::broadcast; use tokio::sync::broadcast;
use tracing::{error, info, trace, warn};
use web3_proxy::app::{flatten_handle, flatten_handles, Web3ProxyApp}; use web3_proxy::app::{flatten_handle, flatten_handles, Web3ProxyApp};
use web3_proxy::config::TopConfig; use web3_proxy::config::TopConfig;
use web3_proxy::{frontend, prometheus}; use web3_proxy::{frontend, prometheus};
@ -280,7 +280,8 @@ mod tests {
"info,ethers_providers::rpc=off,web3_proxy=debug", "info,ethers_providers::rpc=off,web3_proxy=debug",
); );
let _ = env_logger::builder().is_test(true).try_init(); // TODO: how should we do test logging setup with tracing?
// let _ = env_logger::builder().is_test(true).try_init();
let anvil = Anvil::new().spawn(); let anvil = Anvil::new().spawn();

@ -3,7 +3,6 @@ use anyhow::Context;
use argh::FromArgs; use argh::FromArgs;
use entities::{rpc_accounting, rpc_key, user}; use entities::{rpc_accounting, rpc_key, user};
use ethers::types::Address; use ethers::types::Address;
use log::info;
use migration::{ use migration::{
sea_orm::{ sea_orm::{
self, self,
@ -14,6 +13,7 @@ use migration::{
}; };
use serde::Serialize; use serde::Serialize;
use serde_json::json; use serde_json::json;
use tracing::info;
/// count requests /// count requests
#[derive(FromArgs, PartialEq, Debug, Eq)] #[derive(FromArgs, PartialEq, Debug, Eq)]

@ -2,13 +2,13 @@ use anyhow::Context;
use argh::FromArgs; use argh::FromArgs;
use entities::rpc_key; use entities::rpc_key;
use futures::TryStreamExt; use futures::TryStreamExt;
use log::info;
use migration::sea_orm::{ColumnTrait, EntityTrait, QueryFilter}; use migration::sea_orm::{ColumnTrait, EntityTrait, QueryFilter};
use rdkafka::{ use rdkafka::{
consumer::{Consumer, StreamConsumer}, consumer::{Consumer, StreamConsumer},
ClientConfig, Message, ClientConfig, Message,
}; };
use std::num::NonZeroU64; use std::num::NonZeroU64;
use tracing::info;
use uuid::Uuid; use uuid::Uuid;
use web3_proxy::{config::TopConfig, frontend::authorization::RpcSecretKey, relational_db::get_db}; use web3_proxy::{config::TopConfig, frontend::authorization::RpcSecretKey, relational_db::get_db};

@ -2,9 +2,9 @@ use anyhow::{anyhow, Context};
use chrono::{DateTime, Utc}; use chrono::{DateTime, Utc};
use ethers::types::{Block, TxHash, H256}; use ethers::types::{Block, TxHash, H256};
use futures::{stream::FuturesUnordered, StreamExt}; use futures::{stream::FuturesUnordered, StreamExt};
use log::{debug, warn};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use serde_json::json; use serde_json::json;
use tracing::{debug, warn};
use web3_proxy::jsonrpc::JsonRpcErrorData; use web3_proxy::jsonrpc::JsonRpcErrorData;
use super::{SentrydErrorBuilder, SentrydResult}; use super::{SentrydErrorBuilder, SentrydResult};

@ -7,12 +7,12 @@ use futures::{
stream::{FuturesUnordered, StreamExt}, stream::{FuturesUnordered, StreamExt},
Future, Future,
}; };
use log::{error, info};
use pagerduty_rs::{eventsv2async::EventsV2 as PagerdutyAsyncEventsV2, types::Event}; use pagerduty_rs::{eventsv2async::EventsV2 as PagerdutyAsyncEventsV2, types::Event};
use serde_json::json; use serde_json::json;
use std::time::Duration; use std::time::Duration;
use tokio::sync::mpsc; use tokio::sync::mpsc;
use tokio::time::{interval, MissedTickBehavior}; use tokio::time::{interval, MissedTickBehavior};
use tracing::{debug, error, info, warn, Level};
use web3_proxy::{config::TopConfig, pagerduty::pagerduty_alert}; use web3_proxy::{config::TopConfig, pagerduty::pagerduty_alert};
#[derive(FromArgs, PartialEq, Debug, Eq)] #[derive(FromArgs, PartialEq, Debug, Eq)]
@ -53,7 +53,7 @@ pub struct SentrydError {
/// The class/type of the event, for example ping failure or cpu load /// The class/type of the event, for example ping failure or cpu load
class: String, class: String,
/// Errors will send a pagerduty alert. others just give log messages /// Errors will send a pagerduty alert. others just give log messages
level: log::Level, level: Level,
/// A short summary that should be mostly static /// A short summary that should be mostly static
summary: String, summary: String,
/// Lots of detail about the error /// Lots of detail about the error
@ -64,7 +64,7 @@ pub struct SentrydError {
#[derive(Clone)] #[derive(Clone)]
pub struct SentrydErrorBuilder { pub struct SentrydErrorBuilder {
class: String, class: String,
level: log::Level, level: Level,
} }
impl SentrydErrorBuilder { impl SentrydErrorBuilder {
@ -125,9 +125,9 @@ impl SentrydSubCommand {
} }
while let Some(err) = error_receiver.recv().await { while let Some(err) = error_receiver.recv().await {
log::log!(err.level, "check failed: {:#?}", err); if matches!(err.level, Level::ERROR) {
warn!("check failed: {:#?}", err);
if matches!(err.level, log::Level::Error) {
let alert = pagerduty_alert( let alert = pagerduty_alert(
Some(chain_id), Some(chain_id),
Some(err.class), Some(err.class),
@ -150,6 +150,8 @@ impl SentrydSubCommand {
error!("Failed sending to pagerduty: {:#?}", err); error!("Failed sending to pagerduty: {:#?}", err);
} }
} }
} else {
debug!("check failed ({:?}): {:#?}", err.level, err);
} }
} }
@ -178,7 +180,7 @@ impl SentrydSubCommand {
let loop_f = a_loop( let loop_f = a_loop(
"main /health", "main /health",
seconds, seconds,
log::Level::Error, Level::ERROR,
error_sender, error_sender,
move |error_builder| simple::main(error_builder, url.clone(), timeout), move |error_builder| simple::main(error_builder, url.clone(), timeout),
); );
@ -203,7 +205,7 @@ impl SentrydSubCommand {
let loop_f = a_loop( let loop_f = a_loop(
"other /health", "other /health",
seconds, seconds,
log::Level::Warn, Level::WARN,
error_sender, error_sender,
move |error_builder| simple::main(error_builder, url.clone(), timeout), move |error_builder| simple::main(error_builder, url.clone(), timeout),
); );
@ -225,7 +227,7 @@ impl SentrydSubCommand {
let loop_f = a_loop( let loop_f = a_loop(
"head block comparison", "head block comparison",
seconds, seconds,
log::Level::Error, Level::ERROR,
error_sender, error_sender,
move |error_builder| { move |error_builder| {
compare::main( compare::main(
@ -254,7 +256,7 @@ impl SentrydSubCommand {
async fn a_loop<T>( async fn a_loop<T>(
class: &str, class: &str,
seconds: u64, seconds: u64,
error_level: log::Level, error_level: Level,
error_sender: mpsc::Sender<SentrydError>, error_sender: mpsc::Sender<SentrydError>,
f: impl Fn(SentrydErrorBuilder) -> T, f: impl Fn(SentrydErrorBuilder) -> T,
) -> anyhow::Result<()> ) -> anyhow::Result<()>

@ -2,8 +2,8 @@ use std::time::Duration;
use super::{SentrydErrorBuilder, SentrydResult}; use super::{SentrydErrorBuilder, SentrydResult};
use anyhow::Context; use anyhow::Context;
use log::{debug, trace};
use tokio::time::Instant; use tokio::time::Instant;
use tracing::{debug, trace};
/// GET the url and return an error if it wasn't a success /// GET the url and return an error if it wasn't a success
pub async fn main( pub async fn main(

@ -2,12 +2,12 @@ use anyhow::Context;
use argh::FromArgs; use argh::FromArgs;
use entities::{rpc_key, user}; use entities::{rpc_key, user};
use ethers::types::Address; use ethers::types::Address;
use log::{debug, info};
use migration::sea_orm::{ use migration::sea_orm::{
self, ActiveModelTrait, ColumnTrait, DatabaseConnection, EntityTrait, IntoActiveModel, self, ActiveModelTrait, ColumnTrait, DatabaseConnection, EntityTrait, IntoActiveModel,
QueryFilter, QueryFilter,
}; };
use sea_orm::prelude::Uuid; use sea_orm::prelude::Uuid;
use tracing::{debug, info};
use web3_proxy::frontend::authorization::RpcSecretKey; use web3_proxy::frontend::authorization::RpcSecretKey;
/// change a key's owner. /// change a key's owner.

@ -1,9 +1,9 @@
use argh::FromArgs; use argh::FromArgs;
use entities::{rpc_key, user}; use entities::{rpc_key, user};
use log::info;
use migration::sea_orm::{DatabaseConnection, EntityTrait, PaginatorTrait}; use migration::sea_orm::{DatabaseConnection, EntityTrait, PaginatorTrait};
use std::fs::{self, create_dir_all}; use std::fs::{self, create_dir_all};
use std::path::Path; use std::path::Path;
use tracing::info;
#[derive(FromArgs, PartialEq, Eq, Debug)] #[derive(FromArgs, PartialEq, Eq, Debug)]
/// Export users from the database. /// Export users from the database.

@ -3,7 +3,6 @@ use argh::FromArgs;
use entities::{rpc_key, user}; use entities::{rpc_key, user};
use glob::glob; use glob::glob;
use hashbrown::HashMap; use hashbrown::HashMap;
use log::{info, warn};
use migration::sea_orm::ActiveValue::NotSet; use migration::sea_orm::ActiveValue::NotSet;
use migration::sea_orm::{ use migration::sea_orm::{
ActiveModelTrait, ColumnTrait, DatabaseConnection, EntityTrait, IntoActiveModel, QueryFilter, ActiveModelTrait, ColumnTrait, DatabaseConnection, EntityTrait, IntoActiveModel, QueryFilter,
@ -11,6 +10,7 @@ use migration::sea_orm::{
}; };
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use std::{fs::File, io::BufReader}; use std::{fs::File, io::BufReader};
use tracing::{info, warn};
#[derive(FromArgs, PartialEq, Eq, Debug)] #[derive(FromArgs, PartialEq, Eq, Debug)]
/// Import users from another database. /// Import users from another database.

@ -5,9 +5,9 @@ use ethers::{
prelude::{BlockNumber, U64}, prelude::{BlockNumber, U64},
types::H256, types::H256,
}; };
use log::{trace, warn};
use serde_json::json; use serde_json::json;
use std::sync::Arc; use std::sync::Arc;
use tracing::{trace, warn};
use crate::{frontend::authorization::Authorization, rpcs::many::Web3Rpcs}; use crate::{frontend::authorization::Authorization, rpcs::many::Web3Rpcs};

@ -6,9 +6,9 @@
//! TODO: pricing on compute units //! TODO: pricing on compute units
//! TODO: script that queries influx and calculates observed relative costs //! TODO: script that queries influx and calculates observed relative costs
use log::warn;
use migration::sea_orm::prelude::Decimal; use migration::sea_orm::prelude::Decimal;
use std::str::FromStr; use std::str::FromStr;
use tracing::warn;
pub struct ComputeUnit(Decimal); pub struct ComputeUnit(Decimal);

@ -5,11 +5,12 @@ use argh::FromArgs;
use ethers::prelude::{Address, TxHash, H256}; use ethers::prelude::{Address, TxHash, H256};
use ethers::types::{U256, U64}; use ethers::types::{U256, U64};
use hashbrown::HashMap; use hashbrown::HashMap;
use log::warn;
use migration::sea_orm::DatabaseConnection; use migration::sea_orm::DatabaseConnection;
use sentry::types::Dsn;
use serde::Deserialize; use serde::Deserialize;
use std::sync::Arc; use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
use tracing::warn;
pub type BlockAndRpc = (Option<Web3ProxyBlock>, Arc<Web3Rpc>); pub type BlockAndRpc = (Option<Web3ProxyBlock>, Arc<Web3Rpc>);
pub type TxHashAndRpc = (TxHash, Arc<Web3Rpc>); pub type TxHashAndRpc = (TxHash, Arc<Web3Rpc>);
@ -165,7 +166,7 @@ pub struct AppConfig {
pub redirect_rpc_key_url: Option<String>, pub redirect_rpc_key_url: Option<String>,
/// Optionally send errors to <https://sentry.io> /// Optionally send errors to <https://sentry.io>
pub sentry_url: Option<String>, pub sentry_url: Option<Dsn>,
/// Track rate limits in a redis (or compatible backend) /// Track rate limits in a redis (or compatible backend)
/// It is okay if this data is lost. /// It is okay if this data is lost.

@ -15,7 +15,6 @@ use derive_more::{Display, Error, From};
use ethers::prelude::ContractError; use ethers::prelude::ContractError;
use http::header::InvalidHeaderValue; use http::header::InvalidHeaderValue;
use ipnet::AddrParseError; use ipnet::AddrParseError;
use log::{debug, error, trace, warn};
use migration::sea_orm::DbErr; use migration::sea_orm::DbErr;
use redis_rate_limiter::redis::RedisError; use redis_rate_limiter::redis::RedisError;
use reqwest::header::ToStrError; use reqwest::header::ToStrError;
@ -25,6 +24,7 @@ use serde_json::value::RawValue;
use std::sync::Arc; use std::sync::Arc;
use std::{borrow::Cow, net::IpAddr}; use std::{borrow::Cow, net::IpAddr};
use tokio::{sync::AcquireError, task::JoinError, time::Instant}; use tokio::{sync::AcquireError, task::JoinError, time::Instant};
use tracing::{debug, error, trace, warn};
pub type Web3ProxyResult<T> = Result<T, Web3ProxyError>; pub type Web3ProxyResult<T> = Result<T, Web3ProxyError>;
// TODO: take "IntoResponse" instead of Response? // TODO: take "IntoResponse" instead of Response?

@ -24,7 +24,6 @@ use entities::{
use ethers::{prelude::Address, types::Bytes}; use ethers::{prelude::Address, types::Bytes};
use hashbrown::HashMap; use hashbrown::HashMap;
use http::StatusCode; use http::StatusCode;
use log::{debug, info, warn};
use migration::sea_orm::prelude::{Decimal, Uuid}; use migration::sea_orm::prelude::{Decimal, Uuid};
use migration::sea_orm::{ use migration::sea_orm::{
self, ActiveModelTrait, ColumnTrait, EntityTrait, IntoActiveModel, QueryFilter, self, ActiveModelTrait, ColumnTrait, EntityTrait, IntoActiveModel, QueryFilter,
@ -36,6 +35,7 @@ use std::ops::Add;
use std::str::FromStr; use std::str::FromStr;
use std::sync::Arc; use std::sync::Arc;
use time::{Duration, OffsetDateTime}; use time::{Duration, OffsetDateTime};
use tracing::{debug, info, warn};
use ulid::Ulid; use ulid::Ulid;
/// `GET /admin/increase_balance` -- As an admin, modify a user's user-tier /// `GET /admin/increase_balance` -- As an admin, modify a user's user-tier

@ -21,7 +21,6 @@ use futures::TryFutureExt;
use hashbrown::HashMap; use hashbrown::HashMap;
use http::HeaderValue; use http::HeaderValue;
use ipnet::IpNet; use ipnet::IpNet;
use log::{error, trace, warn};
use migration::sea_orm::prelude::Decimal; use migration::sea_orm::prelude::Decimal;
use migration::sea_orm::{self, ColumnTrait, DatabaseConnection, EntityTrait, QueryFilter}; use migration::sea_orm::{self, ColumnTrait, DatabaseConnection, EntityTrait, QueryFilter};
use migration::{Expr, OnConflict}; use migration::{Expr, OnConflict};
@ -43,6 +42,7 @@ use std::{net::IpAddr, str::FromStr, sync::Arc};
use tokio::sync::{OwnedSemaphorePermit, Semaphore}; use tokio::sync::{OwnedSemaphorePermit, Semaphore};
use tokio::task::JoinHandle; use tokio::task::JoinHandle;
use tokio::time::Instant; use tokio::time::Instant;
use tracing::{error, trace, warn};
use ulid::Ulid; use ulid::Ulid;
use uuid::Uuid; use uuid::Uuid;

@ -17,7 +17,6 @@ use axum::{
}; };
use http::{header::AUTHORIZATION, StatusCode}; use http::{header::AUTHORIZATION, StatusCode};
use listenfd::ListenFd; use listenfd::ListenFd;
use log::info;
use moka::future::{Cache, CacheBuilder}; use moka::future::{Cache, CacheBuilder};
use std::net::SocketAddr; use std::net::SocketAddr;
use std::sync::Arc; use std::sync::Arc;
@ -26,6 +25,7 @@ use strum::{EnumCount, EnumIter};
use tokio::sync::broadcast; use tokio::sync::broadcast;
use tower_http::cors::CorsLayer; use tower_http::cors::CorsLayer;
use tower_http::sensitive_headers::SetSensitiveRequestHeadersLayer; use tower_http::sensitive_headers::SetSensitiveRequestHeadersLayer;
use tracing::info;
use crate::errors::Web3ProxyResult; use crate::errors::Web3ProxyResult;

@ -29,13 +29,13 @@ use futures::{
use handlebars::Handlebars; use handlebars::Handlebars;
use hashbrown::HashMap; use hashbrown::HashMap;
use http::{HeaderMap, StatusCode}; use http::{HeaderMap, StatusCode};
use log::{info, trace};
use serde_json::json; use serde_json::json;
use std::net::IpAddr; use std::net::IpAddr;
use std::str::from_utf8_mut; use std::str::from_utf8_mut;
use std::sync::atomic::AtomicU64; use std::sync::atomic::AtomicU64;
use std::sync::Arc; use std::sync::Arc;
use tokio::sync::{broadcast, OwnedSemaphorePermit, RwLock}; use tokio::sync::{broadcast, OwnedSemaphorePermit, RwLock};
use tracing::{info, trace};
/// How to select backend servers for a request /// How to select backend servers for a request
#[derive(Copy, Clone, Debug)] #[derive(Copy, Clone, Debug)]

@ -15,12 +15,12 @@ use axum_client_ip::InsecureClientIp;
use axum_macros::debug_handler; use axum_macros::debug_handler;
use hashbrown::HashMap; use hashbrown::HashMap;
use http::HeaderMap; use http::HeaderMap;
use log::trace;
use moka::future::Cache; use moka::future::Cache;
use once_cell::sync::Lazy; use once_cell::sync::Lazy;
use serde::{ser::SerializeStruct, Serialize}; use serde::{ser::SerializeStruct, Serialize};
use serde_json::json; use serde_json::json;
use std::sync::Arc; use std::sync::Arc;
use tracing::trace;
static HEALTH_OK: Lazy<Bytes> = Lazy::new(|| Bytes::from("OK\n")); static HEALTH_OK: Lazy<Bytes> = Lazy::new(|| Bytes::from("OK\n"));
static HEALTH_NOT_OK: Lazy<Bytes> = Lazy::new(|| Bytes::from(":(\n")); static HEALTH_NOT_OK: Lazy<Bytes> = Lazy::new(|| Bytes::from(":(\n"));

@ -18,7 +18,6 @@ use entities::{balance, login, pending_login, referee, referrer, rpc_key, user};
use ethers::{prelude::Address, types::Bytes}; use ethers::{prelude::Address, types::Bytes};
use hashbrown::HashMap; use hashbrown::HashMap;
use http::StatusCode; use http::StatusCode;
use log::{trace, warn};
use migration::sea_orm::prelude::{Decimal, Uuid}; use migration::sea_orm::prelude::{Decimal, Uuid};
use migration::sea_orm::{ use migration::sea_orm::{
self, ActiveModelTrait, ColumnTrait, DatabaseTransaction, EntityTrait, IntoActiveModel, self, ActiveModelTrait, ColumnTrait, DatabaseTransaction, EntityTrait, IntoActiveModel,
@ -30,6 +29,7 @@ use std::ops::Add;
use std::str::FromStr; use std::str::FromStr;
use std::sync::Arc; use std::sync::Arc;
use time::{Duration, OffsetDateTime}; use time::{Duration, OffsetDateTime};
use tracing::{trace, warn};
use ulid::Ulid; use ulid::Ulid;
/// `GET /user/login/:user_address` or `GET /user/login/:user_address/:message_eip` -- Start the "Sign In with Ethereum" (siwe) login flow. /// `GET /user/login/:user_address` or `GET /user/login/:user_address/:message_eip` -- Start the "Sign In with Ethereum" (siwe) login flow.

@ -18,7 +18,6 @@ use ethers::abi::AbiEncode;
use ethers::types::{Address, Block, TransactionReceipt, TxHash, H256}; use ethers::types::{Address, Block, TransactionReceipt, TxHash, H256};
use hashbrown::{HashMap, HashSet}; use hashbrown::{HashMap, HashSet};
use http::StatusCode; use http::StatusCode;
use log::{debug, info, trace};
use migration::sea_orm::prelude::Decimal; use migration::sea_orm::prelude::Decimal;
use migration::sea_orm::{ use migration::sea_orm::{
self, ActiveModelTrait, ActiveValue, ColumnTrait, EntityTrait, IntoActiveModel, ModelTrait, self, ActiveModelTrait, ActiveValue, ColumnTrait, EntityTrait, IntoActiveModel, ModelTrait,
@ -30,6 +29,7 @@ use payment_contracts::payment_factory::{self, PaymentFactory};
use serde_json::json; use serde_json::json;
use std::num::NonZeroU64; use std::num::NonZeroU64;
use std::sync::Arc; use std::sync::Arc;
use tracing::{debug, info, trace};
/// Implements any logic related to payments /// Implements any logic related to payments
/// Removed this mainly from "user" as this was getting clogged /// Removed this mainly from "user" as this was getting clogged

@ -15,7 +15,6 @@ use entities::{balance, rpc_key, secondary_user, user};
use ethers::types::Address; use ethers::types::Address;
use hashbrown::HashMap; use hashbrown::HashMap;
use http::StatusCode; use http::StatusCode;
use log::trace;
use migration::sea_orm; use migration::sea_orm;
use migration::sea_orm::ActiveModelTrait; use migration::sea_orm::ActiveModelTrait;
use migration::sea_orm::ColumnTrait; use migration::sea_orm::ColumnTrait;
@ -25,6 +24,7 @@ use migration::sea_orm::QueryFilter;
use migration::sea_orm::TransactionTrait; use migration::sea_orm::TransactionTrait;
use serde_json::json; use serde_json::json;
use std::sync::Arc; use std::sync::Arc;
use tracing::trace;
use ulid::{self, Ulid}; use ulid::{self, Ulid};
pub async fn get_keys_as_subuser( pub async fn get_keys_as_subuser(

@ -9,9 +9,9 @@ use axum::{
use chrono::{NaiveDateTime, Utc}; use chrono::{NaiveDateTime, Utc};
use entities::login; use entities::login;
use hashbrown::HashMap; use hashbrown::HashMap;
use log::{trace, warn};
use migration::sea_orm::{ColumnTrait, EntityTrait, QueryFilter}; use migration::sea_orm::{ColumnTrait, EntityTrait, QueryFilter};
use redis_rate_limiter::{redis::AsyncCommands, RedisConnection}; use redis_rate_limiter::{redis::AsyncCommands, RedisConnection};
use tracing::{trace, warn};
/// get the attached address for the given bearer token. /// get the attached address for the given bearer token.
/// First checks redis. Then checks the database. /// First checks redis. Then checks the database.

@ -1,6 +1,4 @@
use crate::config::TopConfig; use crate::config::TopConfig;
use gethostname::gethostname;
use log::{debug, error, warn};
use pagerduty_rs::eventsv2sync::EventsV2 as PagerdutySyncEventsV2; use pagerduty_rs::eventsv2sync::EventsV2 as PagerdutySyncEventsV2;
use pagerduty_rs::types::{AlertTrigger, AlertTriggerPayload, Event}; use pagerduty_rs::types::{AlertTrigger, AlertTriggerPayload, Event};
use serde::Serialize; use serde::Serialize;
@ -11,6 +9,7 @@ use std::{
panic::PanicInfo, panic::PanicInfo,
}; };
use time::OffsetDateTime; use time::OffsetDateTime;
use tracing::{debug, error, warn};
/* /*
@ -24,7 +23,7 @@ use time::OffsetDateTime;
.and_then(|x| x.app.redirect_public_url.clone()); .and_then(|x| x.app.redirect_public_url.clone());
panic::set_hook(Box::new(move |x| { panic::set_hook(Box::new(move |x| {
let hostname = gethostname().into_string().unwrap_or("unknown".to_string()); let hostname = hostname.get().into_string().unwrap_or("unknown".to_string());
let panic_msg = format!("{} {:?}", x, x); let panic_msg = format!("{} {:?}", x, x);
if panic_msg.starts_with("panicked at 'WS Server panic") { if panic_msg.starts_with("panicked at 'WS Server panic") {
@ -162,7 +161,10 @@ pub fn pagerduty_alert<T: Serialize>(
let group = chain_id.map(|x| format!("chain #{}", x)); let group = chain_id.map(|x| format!("chain #{}", x));
let source = source.unwrap_or_else(|| { let source = source.unwrap_or_else(|| {
gethostname().into_string().unwrap_or_else(|err| { hostname::get()
.unwrap()
.into_string()
.unwrap_or_else(|err| {
warn!("unable to handle hostname: {:#?}", err); warn!("unable to handle hostname: {:#?}", err);
"unknown".to_string() "unknown".to_string()
}) })

@ -2,10 +2,10 @@ use axum::headers::HeaderName;
use axum::http::HeaderValue; use axum::http::HeaderValue;
use axum::response::{IntoResponse, Response}; use axum::response::{IntoResponse, Response};
use axum::{routing::get, Extension, Router}; use axum::{routing::get, Extension, Router};
use log::info;
use std::net::SocketAddr; use std::net::SocketAddr;
use std::sync::Arc; use std::sync::Arc;
use tokio::sync::broadcast; use tokio::sync::broadcast;
use tracing::info;
use crate::app::Web3ProxyApp; use crate::app::Web3ProxyApp;
use crate::errors::Web3ProxyResult; use crate::errors::Web3ProxyResult;

@ -1,10 +1,10 @@
use derive_more::From; use derive_more::From;
use log::{debug, info, warn};
use migration::sea_orm::{self, ConnectionTrait, Database}; use migration::sea_orm::{self, ConnectionTrait, Database};
use migration::sea_query::table::ColumnDef; use migration::sea_query::table::ColumnDef;
use migration::{Alias, DbErr, Migrator, MigratorTrait, Table}; use migration::{Alias, DbErr, Migrator, MigratorTrait, Table};
use std::time::Duration; use std::time::Duration;
use tokio::time::sleep; use tokio::time::sleep;
use tracing::{debug, info, warn};
pub use migration::sea_orm::DatabaseConnection; pub use migration::sea_orm::DatabaseConnection;

@ -8,7 +8,6 @@ use crate::errors::{Web3ProxyError, Web3ProxyErrorContext, Web3ProxyResult};
use crate::frontend::authorization::Authorization; use crate::frontend::authorization::Authorization;
use derive_more::From; use derive_more::From;
use ethers::prelude::{Block, TxHash, H256, U64}; use ethers::prelude::{Block, TxHash, H256, U64};
use log::{debug, error, trace};
use moka::future::Cache; use moka::future::Cache;
use serde::ser::SerializeStruct; use serde::ser::SerializeStruct;
use serde::Serialize; use serde::Serialize;
@ -18,6 +17,7 @@ use std::time::Duration;
use std::{fmt::Display, sync::Arc}; use std::{fmt::Display, sync::Arc};
use tokio::sync::broadcast; use tokio::sync::broadcast;
use tokio::time::timeout; use tokio::time::timeout;
use tracing::{debug, error, trace};
// TODO: type for Hydrated Blocks with their full transactions? // TODO: type for Hydrated Blocks with their full transactions?
pub type ArcBlock = Arc<Block<TxHash>>; pub type ArcBlock = Arc<Block<TxHash>>;

@ -11,7 +11,6 @@ use hashbrown::{HashMap, HashSet};
use hdrhistogram::serialization::{Serializer, V2DeflateSerializer}; use hdrhistogram::serialization::{Serializer, V2DeflateSerializer};
use hdrhistogram::Histogram; use hdrhistogram::Histogram;
use itertools::{Itertools, MinMaxResult}; use itertools::{Itertools, MinMaxResult};
use log::{debug, log_enabled, trace, warn, Level};
use moka::future::Cache; use moka::future::Cache;
use serde::Serialize; use serde::Serialize;
use std::cmp::{Ordering, Reverse}; use std::cmp::{Ordering, Reverse};
@ -21,6 +20,7 @@ use std::sync::{atomic, Arc};
use std::time::Duration; use std::time::Duration;
use tokio::sync::broadcast; use tokio::sync::broadcast;
use tokio::time::Instant; use tokio::time::Instant;
use tracing::{debug, enabled, trace, warn, Level};
#[derive(Clone, Serialize)] #[derive(Clone, Serialize)]
struct ConsensusRpcData { struct ConsensusRpcData {
@ -685,7 +685,7 @@ impl ConsensusFinder {
} }
// dev logging of a histogram // dev logging of a histogram
if log_enabled!(Level::Trace) { if enabled!(Level::TRACE) {
// convert to ms because the histogram needs ints // convert to ms because the histogram needs ints
let max_median_latency_ms = (max_median_latency_sec * 1000.0).ceil() as u64; let max_median_latency_ms = (max_median_latency_sec * 1000.0).ceil() as u64;

@ -19,7 +19,6 @@ use futures::stream::FuturesUnordered;
use futures::StreamExt; use futures::StreamExt;
use hashbrown::{HashMap, HashSet}; use hashbrown::{HashMap, HashSet};
use itertools::Itertools; use itertools::Itertools;
use log::{debug, error, info, trace, warn};
use migration::sea_orm::DatabaseConnection; use migration::sea_orm::DatabaseConnection;
use moka::future::{Cache, CacheBuilder}; use moka::future::{Cache, CacheBuilder};
use parking_lot::RwLock; use parking_lot::RwLock;
@ -34,6 +33,7 @@ use std::sync::Arc;
use tokio::select; use tokio::select;
use tokio::sync::{broadcast, watch}; use tokio::sync::{broadcast, watch};
use tokio::time::{sleep, sleep_until, Duration, Instant}; use tokio::time::{sleep, sleep_until, Duration, Instant};
use tracing::{debug, error, info, trace, warn};
/// A collection of web3 connections. Sends requests either the current best server or all servers. /// A collection of web3 connections. Sends requests either the current best server or all servers.
#[derive(From)] #[derive(From)]
@ -1385,9 +1385,9 @@ mod tests {
use ethers::types::H256; use ethers::types::H256;
use ethers::types::{Block, U256}; use ethers::types::{Block, U256};
use latency::PeakEwmaLatency; use latency::PeakEwmaLatency;
use log::{trace, LevelFilter};
use moka::future::CacheBuilder; use moka::future::CacheBuilder;
use parking_lot::RwLock; use parking_lot::RwLock;
use tracing::trace;
#[cfg(test)] #[cfg(test)]
fn new_peak_latency() -> PeakEwmaLatency { fn new_peak_latency() -> PeakEwmaLatency {
@ -1396,11 +1396,12 @@ mod tests {
#[tokio::test(start_paused = true)] #[tokio::test(start_paused = true)]
async fn test_sort_connections_by_sync_status() { async fn test_sort_connections_by_sync_status() {
let _ = env_logger::builder() // TODO: how should we do test logging setup with tracing?
.filter_level(LevelFilter::Error) // let _ = env_logger::builder()
.filter_module("web3_proxy", LevelFilter::Trace) // .filter_level(LevelFilter::Error)
.is_test(true) // .filter_module("web3_proxy", LevelFilter::Trace)
.try_init(); // .is_test(true)
// .try_init();
let block_0 = Block { let block_0 = Block {
number: Some(0.into()), number: Some(0.into()),
@ -1489,12 +1490,12 @@ mod tests {
#[tokio::test(start_paused = true)] #[tokio::test(start_paused = true)]
async fn test_server_selection_by_height() { async fn test_server_selection_by_height() {
// TODO: do this better. can test_env_logger and tokio test be stacked? // // TODO: how should we do test logging setup with tracing?
let _ = env_logger::builder() // let _ = env_logger::builder()
.filter_level(LevelFilter::Error) // .filter_level(LevelFilter::Error)
.filter_module("web3_proxy", LevelFilter::Trace) // .filter_module("web3_proxy", LevelFilter::Trace)
.is_test(true) // .is_test(true)
.try_init(); // .try_init();
let now = chrono::Utc::now().timestamp().into(); let now = chrono::Utc::now().timestamp().into();
@ -1780,12 +1781,12 @@ mod tests {
#[tokio::test(start_paused = true)] #[tokio::test(start_paused = true)]
async fn test_server_selection_by_archive() { async fn test_server_selection_by_archive() {
// TODO: do this better. can test_env_logger and tokio test be stacked? // // TODO: how should we do test logging setup with tracing?
let _ = env_logger::builder() // let _ = env_logger::builder()
.filter_level(LevelFilter::Error) // .filter_level(LevelFilter::Error)
.filter_module("web3_proxy", LevelFilter::Trace) // .filter_module("web3_proxy", LevelFilter::Trace)
.is_test(true) // .is_test(true)
.try_init(); // .try_init();
let now = chrono::Utc::now().timestamp().into(); let now = chrono::Utc::now().timestamp().into();
@ -1954,11 +1955,12 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn test_all_connections() { async fn test_all_connections() {
let _ = env_logger::builder() // // TODO: how should we do test logging setup with tracing?
.filter_level(LevelFilter::Error) // let _ = env_logger::builder()
.filter_module("web3_proxy", LevelFilter::Trace) // .filter_level(LevelFilter::Error)
.is_test(true) // .filter_module("web3_proxy", LevelFilter::Trace)
.try_init(); // .is_test(true)
// .try_init();
// TODO: use chrono, not SystemTime // TODO: use chrono, not SystemTime
let now: U256 = SystemTime::now() let now: U256 = SystemTime::now()

@ -15,7 +15,6 @@ use ethers::types::{Address, Transaction, U256};
use futures::future::try_join_all; use futures::future::try_join_all;
use futures::StreamExt; use futures::StreamExt;
use latency::{EwmaLatency, PeakEwmaLatency, RollingQuantileLatency}; use latency::{EwmaLatency, PeakEwmaLatency, RollingQuantileLatency};
use log::{debug, info, trace, warn, Level};
use migration::sea_orm::DatabaseConnection; use migration::sea_orm::DatabaseConnection;
use nanorand::Rng; use nanorand::Rng;
use parking_lot::RwLock; use parking_lot::RwLock;
@ -30,6 +29,7 @@ use std::sync::atomic::{self, AtomicU32, AtomicU64, AtomicUsize};
use std::{cmp::Ordering, sync::Arc}; use std::{cmp::Ordering, sync::Arc};
use tokio::sync::watch; use tokio::sync::watch;
use tokio::time::{interval, sleep, sleep_until, Duration, Instant, MissedTickBehavior}; use tokio::time::{interval, sleep, sleep_until, Duration, Instant, MissedTickBehavior};
use tracing::{debug, info, trace, warn, Level};
use url::Url; use url::Url;
/// An active connection to a Web3 RPC server like geth or erigon. /// An active connection to a Web3 RPC server like geth or erigon.
@ -323,7 +323,7 @@ impl Web3Rpc {
"eth_blockNumber", "eth_blockNumber",
&(), &(),
// error here are expected, so keep the level low // error here are expected, so keep the level low
Some(Level::Debug.into()), Some(Level::DEBUG.into()),
Some(Duration::from_secs(5)), Some(Duration::from_secs(5)),
) )
.await .await
@ -347,7 +347,7 @@ impl Web3Rpc {
maybe_archive_block, maybe_archive_block,
)), )),
// error here are expected, so keep the level low // error here are expected, so keep the level low
Some(Level::Trace.into()), Some(Level::TRACE.into()),
Some(Duration::from_secs(5)), Some(Duration::from_secs(5)),
) )
.await; .await;
@ -437,7 +437,7 @@ impl Web3Rpc {
.internal_request( .internal_request(
"eth_chainId", "eth_chainId",
&(), &(),
Some(Level::Trace.into()), Some(Level::TRACE.into()),
Some(Duration::from_secs(5)), Some(Duration::from_secs(5)),
) )
.await?; .await?;
@ -827,7 +827,7 @@ impl Web3Rpc {
"eth_getBlockByNumber", "eth_getBlockByNumber",
&("latest", false), &("latest", false),
&authorization, &authorization,
Some(Level::Warn.into()), Some(Level::WARN.into()),
Some(Duration::from_secs(5)), Some(Duration::from_secs(5)),
) )
.await; .await;
@ -863,7 +863,7 @@ impl Web3Rpc {
"eth_getBlockByNumber", "eth_getBlockByNumber",
&("latest", false), &("latest", false),
&authorization, &authorization,
Some(Level::Warn.into()), Some(Level::WARN.into()),
Some(Duration::from_secs(5)), Some(Duration::from_secs(5)),
) )
.await; .await;

@ -9,13 +9,13 @@ use entities::revert_log;
use entities::sea_orm_active_enums::Method; use entities::sea_orm_active_enums::Method;
use ethers::providers::ProviderError; use ethers::providers::ProviderError;
use ethers::types::{Address, Bytes}; use ethers::types::{Address, Bytes};
use log::{debug, error, trace, warn, Level};
use migration::sea_orm::{self, ActiveEnum, ActiveModelTrait}; use migration::sea_orm::{self, ActiveEnum, ActiveModelTrait};
use nanorand::Rng; use nanorand::Rng;
use serde_json::json; use serde_json::json;
use std::sync::atomic; use std::sync::atomic;
use std::sync::Arc; use std::sync::Arc;
use tokio::time::{Duration, Instant}; use tokio::time::{Duration, Instant};
use tracing::{debug, error, trace, warn, Level};
#[derive(Debug, From)] #[derive(Debug, From)]
pub enum OpenRequestResult { pub enum OpenRequestResult {
@ -69,10 +69,10 @@ struct EthCallFirstParams {
impl From<Level> for RequestErrorHandler { impl From<Level> for RequestErrorHandler {
fn from(level: Level) -> Self { fn from(level: Level) -> Self {
match level { match level {
Level::Trace => RequestErrorHandler::TraceLevel, Level::TRACE => RequestErrorHandler::TraceLevel,
Level::Debug => RequestErrorHandler::DebugLevel, Level::DEBUG => RequestErrorHandler::DebugLevel,
Level::Error => RequestErrorHandler::ErrorLevel, Level::ERROR => RequestErrorHandler::ErrorLevel,
Level::Warn => RequestErrorHandler::WarnLevel, Level::WARN => RequestErrorHandler::WarnLevel,
_ => unimplemented!("unexpected tracing Level"), _ => unimplemented!("unexpected tracing Level"),
} }
} }

@ -5,9 +5,9 @@ use super::request::OpenRequestResult;
use crate::errors::Web3ProxyResult; use crate::errors::Web3ProxyResult;
use crate::frontend::authorization::Authorization; use crate::frontend::authorization::Authorization;
use ethers::prelude::{ProviderError, Transaction, TxHash}; use ethers::prelude::{ProviderError, Transaction, TxHash};
use log::{debug, trace, Level};
use std::sync::Arc; use std::sync::Arc;
use tokio::sync::broadcast; use tokio::sync::broadcast;
use tracing::{debug, trace, Level};
// TODO: think more about TxState // TODO: think more about TxState
#[derive(Clone)] #[derive(Clone)]
@ -30,7 +30,7 @@ impl Web3Rpcs {
// TODO: if one rpc fails, try another? // TODO: if one rpc fails, try another?
// TODO: try_request_handle, or wait_for_request_handle? I think we want wait here // TODO: try_request_handle, or wait_for_request_handle? I think we want wait here
let tx: Transaction = match rpc let tx: Transaction = match rpc
.try_request_handle(authorization, Some(Level::Warn.into())) .try_request_handle(authorization, Some(Level::WARN.into()))
.await .await
{ {
Ok(OpenRequestResult::Handle(handle)) => { Ok(OpenRequestResult::Handle(handle)) => {

@ -14,7 +14,6 @@ use axum::{
}; };
use entities::{rpc_accounting, rpc_key}; use entities::{rpc_accounting, rpc_key};
use hashbrown::HashMap; use hashbrown::HashMap;
use log::warn;
use migration::sea_orm::{ use migration::sea_orm::{
ColumnTrait, EntityTrait, PaginatorTrait, QueryFilter, QueryOrder, QuerySelect, Select, ColumnTrait, EntityTrait, PaginatorTrait, QueryFilter, QueryOrder, QuerySelect, Select,
}; };
@ -22,6 +21,7 @@ use migration::{Condition, Expr, SimpleExpr};
use redis_rate_limiter::redis; use redis_rate_limiter::redis;
use redis_rate_limiter::redis::AsyncCommands; use redis_rate_limiter::redis::AsyncCommands;
use serde_json::json; use serde_json::json;
use tracing::warn;
pub fn filter_query_window_seconds( pub fn filter_query_window_seconds(
query_window_seconds: u64, query_window_seconds: u64,

@ -20,9 +20,9 @@ use fstrings::{f, format_args_f};
use hashbrown::HashMap; use hashbrown::HashMap;
use influxdb2::api::query::FluxRecord; use influxdb2::api::query::FluxRecord;
use influxdb2::models::Query; use influxdb2::models::Query;
use log::{debug, error, trace, warn};
use migration::sea_orm::{ColumnTrait, EntityTrait, QueryFilter}; use migration::sea_orm::{ColumnTrait, EntityTrait, QueryFilter};
use serde_json::json; use serde_json::json;
use tracing::{debug, error, trace, warn};
use ulid::Ulid; use ulid::Ulid;
pub async fn query_user_stats<'a>( pub async fn query_user_stats<'a>(

@ -17,7 +17,6 @@ use chrono::{DateTime, Months, TimeZone, Utc};
use derive_more::From; use derive_more::From;
use entities::{balance, referee, referrer, rpc_accounting_v2, rpc_key}; use entities::{balance, referee, referrer, rpc_accounting_v2, rpc_key};
use influxdb2::models::DataPoint; use influxdb2::models::DataPoint;
use log::trace;
use migration::sea_orm::prelude::Decimal; use migration::sea_orm::prelude::Decimal;
use migration::sea_orm::{ use migration::sea_orm::{
self, ColumnTrait, DatabaseConnection, EntityTrait, QueryFilter, TransactionTrait, self, ColumnTrait, DatabaseConnection, EntityTrait, QueryFilter, TransactionTrait,
@ -32,6 +31,7 @@ use std::num::NonZeroU64;
use std::str::FromStr; use std::str::FromStr;
use std::sync::atomic::{self, Ordering}; use std::sync::atomic::{self, Ordering};
use std::sync::Arc; use std::sync::Arc;
use tracing::trace;
pub use stat_buffer::{SpawnedStatBuffer, StatBuffer}; pub use stat_buffer::{SpawnedStatBuffer, StatBuffer};

@ -6,7 +6,6 @@ use derive_more::From;
use futures::stream; use futures::stream;
use hashbrown::HashMap; use hashbrown::HashMap;
use influxdb2::api::write::TimestampPrecision; use influxdb2::api::write::TimestampPrecision;
use log::{error, info, trace};
use migration::sea_orm::prelude::Decimal; use migration::sea_orm::prelude::Decimal;
use migration::sea_orm::DatabaseConnection; use migration::sea_orm::DatabaseConnection;
use parking_lot::RwLock; use parking_lot::RwLock;
@ -14,6 +13,7 @@ use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
use tokio::sync::broadcast; use tokio::sync::broadcast;
use tokio::time::interval; use tokio::time::interval;
use tracing::{error, info, trace};
#[derive(Debug, Default)] #[derive(Debug, Default)]
pub struct BufferedRpcQueryStats { pub struct BufferedRpcQueryStats {