cut out tracing for now

This commit is contained in:
Bryan Stitt 2022-11-12 08:24:32 +00:00
parent 9ae2337d1d
commit 0e1cf5767c
35 changed files with 356 additions and 612 deletions

317
Cargo.lock generated
View File

@ -263,7 +263,7 @@ version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b88d82667eca772c4aa12f0f1348b3ae643424c8876448f3f7bd5787032e234c" checksum = "b88d82667eca772c4aa12f0f1348b3ae643424c8876448f3f7bd5787032e234c"
dependencies = [ dependencies = [
"autocfg 1.1.0", "autocfg",
] ]
[[package]] [[package]]
@ -304,7 +304,7 @@ version = "0.7.12"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "41aed1da83ecdc799503b7cb94da1b45a34d72b49caf40a61d9cf5b88ec07cfd" checksum = "41aed1da83ecdc799503b7cb94da1b45a34d72b49caf40a61d9cf5b88ec07cfd"
dependencies = [ dependencies = [
"autocfg 1.1.0", "autocfg",
"derive_utils", "derive_utils",
"proc-macro2", "proc-macro2",
"quote", "quote",
@ -335,15 +335,6 @@ dependencies = [
"syn", "syn",
] ]
[[package]]
name = "autocfg"
version = "0.1.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0dde43e75fd43e8a1bf86103336bc699aa8d17ad1be60c76c0bdfd4828e19b78"
dependencies = [
"autocfg 1.1.0",
]
[[package]] [[package]]
name = "autocfg" name = "autocfg"
version = "1.1.0" version = "1.1.0"
@ -730,9 +721,9 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]] [[package]]
name = "chrono" name = "chrono"
version = "0.4.22" version = "0.4.23"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bfd4d1b31faaa3a89d7934dbded3111da0d2ef28e3ebccdb4f0179f5929d1ef1" checksum = "16b0a3d9ed01224b22057780a37bb8c5dbfe1be8ba48678e7bf57ec4b385411f"
dependencies = [ dependencies = [
"iana-time-zone", "iana-time-zone",
"js-sys", "js-sys",
@ -808,15 +799,6 @@ dependencies = [
"os_str_bytes", "os_str_bytes",
] ]
[[package]]
name = "cloudabi"
version = "0.0.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f"
dependencies = [
"bitflags",
]
[[package]] [[package]]
name = "coins-bip32" name = "coins-bip32"
version = "0.7.0" version = "0.7.0"
@ -848,7 +830,7 @@ dependencies = [
"hex", "hex",
"hmac", "hmac",
"pbkdf2 0.11.0", "pbkdf2 0.11.0",
"rand 0.8.5", "rand",
"sha2 0.10.6", "sha2 0.10.6",
"thiserror", "thiserror",
] ]
@ -1033,7 +1015,7 @@ version = "0.9.10"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "045ebe27666471bb549370b4b0b3e51b07f56325befa4284db65fc89c02511b1" checksum = "045ebe27666471bb549370b4b0b3e51b07f56325befa4284db65fc89c02511b1"
dependencies = [ dependencies = [
"autocfg 1.1.0", "autocfg",
"cfg-if", "cfg-if",
"crossbeam-utils", "crossbeam-utils",
"memoffset", "memoffset",
@ -1084,7 +1066,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9f2b443d17d49dad5ef0ede301c3179cc923b8822f3393b4d2c28c269dd4a122" checksum = "9f2b443d17d49dad5ef0ede301c3179cc923b8822f3393b4d2c28c269dd4a122"
dependencies = [ dependencies = [
"generic-array 0.14.6", "generic-array 0.14.6",
"rand_core 0.6.3", "rand_core",
"subtle", "subtle",
"zeroize", "zeroize",
] ]
@ -1158,10 +1140,10 @@ version = "0.2.0"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"hashbrown 0.13.1", "hashbrown 0.13.1",
"log",
"moka", "moka",
"redis-rate-limiter", "redis-rate-limiter",
"tokio", "tokio",
"tracing",
] ]
[[package]] [[package]]
@ -1333,7 +1315,7 @@ dependencies = [
"generic-array 0.14.6", "generic-array 0.14.6",
"group", "group",
"pkcs8 0.9.0", "pkcs8 0.9.0",
"rand_core 0.6.3", "rand_core",
"sec1", "sec1",
"subtle", "subtle",
"zeroize", "zeroize",
@ -1372,6 +1354,19 @@ dependencies = [
"uuid 1.2.1", "uuid 1.2.1",
] ]
[[package]]
name = "env_logger"
version = "0.9.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a12e6657c4c97ebab115a42dcee77225f7f482cdd841cf7088c657a42e9e00e7"
dependencies = [
"atty",
"humantime",
"log",
"regex",
"termcolor",
]
[[package]] [[package]]
name = "error-chain" name = "error-chain"
version = "0.12.4" version = "0.12.4"
@ -1393,7 +1388,7 @@ dependencies = [
"hex", "hex",
"hmac", "hmac",
"pbkdf2 0.11.0", "pbkdf2 0.11.0",
"rand 0.8.5", "rand",
"scrypt", "scrypt",
"serde", "serde",
"serde_json", "serde_json",
@ -1486,7 +1481,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "002a0d58a7d921b496f5f19b5b9508d01d25fbe25078286b1fcb6f4e7562acf7" checksum = "002a0d58a7d921b496f5f19b5b9508d01d25fbe25078286b1fcb6f4e7562acf7"
dependencies = [ dependencies = [
"ethers-contract-abigen", "ethers-contract-abigen",
"ethers-contract-derive",
"ethers-core", "ethers-core",
"ethers-providers", "ethers-providers",
"futures-util", "futures-util",
@ -1522,21 +1516,6 @@ dependencies = [
"walkdir", "walkdir",
] ]
[[package]]
name = "ethers-contract-derive"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "445276414690c97d88638d22dd5f89ba919d7dcea36de4825896d52280c704c7"
dependencies = [
"ethers-contract-abigen",
"ethers-core",
"hex",
"proc-macro2",
"quote",
"serde_json",
"syn",
]
[[package]] [[package]]
name = "ethers-core" name = "ethers-core"
version = "1.0.0" version = "1.0.0"
@ -1556,7 +1535,7 @@ dependencies = [
"once_cell", "once_cell",
"open-fastrlp", "open-fastrlp",
"proc-macro2", "proc-macro2",
"rand 0.8.5", "rand",
"rlp", "rlp",
"rlp-derive", "rlp-derive",
"rust_decimal", "rust_decimal",
@ -1662,7 +1641,7 @@ dependencies = [
"eth-keystore", "eth-keystore",
"ethers-core", "ethers-core",
"hex", "hex",
"rand 0.8.5", "rand",
"sha2 0.10.6", "sha2 0.10.6",
"thiserror", "thiserror",
] ]
@ -1745,7 +1724,7 @@ version = "0.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "df689201f395c6b90dfe87127685f8dbfc083a5e779e613575d8bd7314300c3e" checksum = "df689201f395c6b90dfe87127685f8dbfc083a5e779e613575d8bd7314300c3e"
dependencies = [ dependencies = [
"rand_core 0.6.3", "rand_core",
"subtle", "subtle",
] ]
@ -1768,7 +1747,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cfcf0ed7fe52a17a03854ec54a9f76d6d84508d1c0e66bc1793301c73fc8493c" checksum = "cfcf0ed7fe52a17a03854ec54a9f76d6d84508d1c0e66bc1793301c73fc8493c"
dependencies = [ dependencies = [
"byteorder", "byteorder",
"rand 0.8.5", "rand",
"rustc-hex", "rustc-hex",
"static_assertions", "static_assertions",
] ]
@ -1852,12 +1831,6 @@ version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ca45aac12b6c561b6289bc68957cb1db3dccf870e1951d590202de5e24f1dd35" checksum = "ca45aac12b6c561b6289bc68957cb1db3dccf870e1951d590202de5e24f1dd35"
[[package]]
name = "fuchsia-cprng"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba"
[[package]] [[package]]
name = "funty" name = "funty"
version = "2.0.0" version = "2.0.0"
@ -2057,7 +2030,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7391856def869c1c81063a03457c676fbcd419709c3dfb33d8d319de484b154d" checksum = "7391856def869c1c81063a03457c676fbcd419709c3dfb33d8d319de484b154d"
dependencies = [ dependencies = [
"ff", "ff",
"rand_core 0.6.3", "rand_core",
"subtle", "subtle",
] ]
@ -2272,6 +2245,12 @@ version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421"
[[package]]
name = "humantime"
version = "2.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4"
[[package]] [[package]]
name = "hyper" name = "hyper"
version = "0.14.19" version = "0.14.19"
@ -2382,7 +2361,7 @@ version = "1.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "10a35a97730320ffe8e2d410b5d3b69279b98d2c14bdb8b70ea89ecf7888d41e" checksum = "10a35a97730320ffe8e2d410b5d3b69279b98d2c14bdb8b70ea89ecf7888d41e"
dependencies = [ dependencies = [
"autocfg 1.1.0", "autocfg",
"hashbrown 0.12.3", "hashbrown 0.12.3",
"serde", "serde",
] ]
@ -2593,7 +2572,7 @@ version = "0.4.8"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9f80bf5aacaf25cbfc8210d1cfb718f2bf3b11c4c54e5afe36c236853a8ec390" checksum = "9f80bf5aacaf25cbfc8210d1cfb718f2bf3b11c4c54e5afe36c236853a8ec390"
dependencies = [ dependencies = [
"autocfg 1.1.0", "autocfg",
"scopeguard", "scopeguard",
] ]
@ -2648,7 +2627,7 @@ version = "0.6.5"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce" checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce"
dependencies = [ dependencies = [
"autocfg 1.1.0", "autocfg",
] ]
[[package]] [[package]]
@ -2826,7 +2805,7 @@ version = "0.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f93ab6289c7b344a8a9f60f88d80aa20032336fe78da341afc91c8a2341fc75f" checksum = "f93ab6289c7b344a8a9f60f88d80aa20032336fe78da341afc91c8a2341fc75f"
dependencies = [ dependencies = [
"autocfg 1.1.0", "autocfg",
"num-integer", "num-integer",
"num-traits", "num-traits",
] ]
@ -2843,7 +2822,7 @@ dependencies = [
"num-integer", "num-integer",
"num-iter", "num-iter",
"num-traits", "num-traits",
"rand 0.8.5", "rand",
"smallvec", "smallvec",
"zeroize", "zeroize",
] ]
@ -2863,7 +2842,7 @@ version = "0.1.45"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9"
dependencies = [ dependencies = [
"autocfg 1.1.0", "autocfg",
"num-traits", "num-traits",
] ]
@ -2873,7 +2852,7 @@ version = "0.1.43"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7d03e6c028c5dc5cac6e2dec0efda81fc887605bb3d884578bb6d6bf7514e252" checksum = "7d03e6c028c5dc5cac6e2dec0efda81fc887605bb3d884578bb6d6bf7514e252"
dependencies = [ dependencies = [
"autocfg 1.1.0", "autocfg",
"num-integer", "num-integer",
"num-traits", "num-traits",
] ]
@ -2884,7 +2863,7 @@ version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0638a1c9d0a3c0914158145bc76cff373a75a627e6ecbfb71cbe6f453a5a19b0" checksum = "0638a1c9d0a3c0914158145bc76cff373a75a627e6ecbfb71cbe6f453a5a19b0"
dependencies = [ dependencies = [
"autocfg 1.1.0", "autocfg",
"num-bigint", "num-bigint",
"num-integer", "num-integer",
"num-traits", "num-traits",
@ -2896,7 +2875,7 @@ version = "0.2.15"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd"
dependencies = [ dependencies = [
"autocfg 1.1.0", "autocfg",
"libm", "libm",
] ]
@ -3105,7 +3084,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1d791538a6dcc1e7cb7fe6f6b58aca40e7f79403c45b2bc274008b5e647af1d8" checksum = "1d791538a6dcc1e7cb7fe6f6b58aca40e7f79403c45b2bc274008b5e647af1d8"
dependencies = [ dependencies = [
"base64ct", "base64ct",
"rand_core 0.6.3", "rand_core",
"subtle", "subtle",
] ]
@ -3116,7 +3095,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7676374caaee8a325c9e7a2ae557f216c5563a171d6997b0ef8a65af35147700" checksum = "7676374caaee8a325c9e7a2ae557f216c5563a171d6997b0ef8a65af35147700"
dependencies = [ dependencies = [
"base64ct", "base64ct",
"rand_core 0.6.3", "rand_core",
"subtle", "subtle",
] ]
@ -3253,7 +3232,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5d5285893bb5eb82e6aaf5d59ee909a06a16737a8970984dd7746ba9283498d6" checksum = "5d5285893bb5eb82e6aaf5d59ee909a06a16737a8970984dd7746ba9283498d6"
dependencies = [ dependencies = [
"phf_shared", "phf_shared",
"rand 0.8.5", "rand",
] ]
[[package]] [[package]]
@ -3486,25 +3465,6 @@ version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09"
[[package]]
name = "rand"
version = "0.6.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6d71dacdc3c88c1fde3885a3be3fbab9f35724e6ce99467f7d9c5026132184ca"
dependencies = [
"autocfg 0.1.8",
"libc",
"rand_chacha 0.1.1",
"rand_core 0.4.2",
"rand_hc",
"rand_isaac",
"rand_jitter",
"rand_os",
"rand_pcg",
"rand_xorshift",
"winapi",
]
[[package]] [[package]]
name = "rand" name = "rand"
version = "0.8.5" version = "0.8.5"
@ -3512,18 +3472,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404"
dependencies = [ dependencies = [
"libc", "libc",
"rand_chacha 0.3.1", "rand_chacha",
"rand_core 0.6.3", "rand_core",
]
[[package]]
name = "rand_chacha"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "556d3a1ca6600bfcbab7c7c91ccb085ac7fbbcd70e008a98742e7847f4f7bcef"
dependencies = [
"autocfg 0.1.8",
"rand_core 0.3.1",
] ]
[[package]] [[package]]
@ -3533,24 +3483,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88"
dependencies = [ dependencies = [
"ppv-lite86", "ppv-lite86",
"rand_core 0.6.3", "rand_core",
] ]
[[package]]
name = "rand_core"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b"
dependencies = [
"rand_core 0.4.2",
]
[[package]]
name = "rand_core"
version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc"
[[package]] [[package]]
name = "rand_core" name = "rand_core"
version = "0.6.3" version = "0.6.3"
@ -3560,76 +3495,13 @@ dependencies = [
"getrandom", "getrandom",
] ]
[[package]]
name = "rand_hc"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7b40677c7be09ae76218dc623efbf7b18e34bced3f38883af07bb75630a21bc4"
dependencies = [
"rand_core 0.3.1",
]
[[package]]
name = "rand_isaac"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ded997c9d5f13925be2a6fd7e66bf1872597f759fd9dd93513dd7e92e5a5ee08"
dependencies = [
"rand_core 0.3.1",
]
[[package]]
name = "rand_jitter"
version = "0.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1166d5c91dc97b88d1decc3285bb0a99ed84b05cfd0bc2341bdf2d43fc41e39b"
dependencies = [
"libc",
"rand_core 0.4.2",
"winapi",
]
[[package]]
name = "rand_os"
version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7b75f676a1e053fc562eafbb47838d67c84801e38fc1ba459e8f180deabd5071"
dependencies = [
"cloudabi",
"fuchsia-cprng",
"libc",
"rand_core 0.4.2",
"rdrand",
"wasm-bindgen",
"winapi",
]
[[package]]
name = "rand_pcg"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "abf9b09b01790cfe0364f52bf32995ea3c39f4d2dd011eac241d2914146d0b44"
dependencies = [
"autocfg 0.1.8",
"rand_core 0.4.2",
]
[[package]]
name = "rand_xorshift"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cbf7e9e623549b0e21f6e97cf8ecf247c1a8fd2e8a992ae265314300b2455d5c"
dependencies = [
"rand_core 0.3.1",
]
[[package]] [[package]]
name = "rand_xoshiro" name = "rand_xoshiro"
version = "0.6.0" version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6f97cdb2a36ed4183de61b2f824cc45c9f1037f28afe0a322e9fff4c108b5aaa" checksum = "6f97cdb2a36ed4183de61b2f824cc45c9f1037f28afe0a322e9fff4c108b5aaa"
dependencies = [ dependencies = [
"rand_core 0.6.3", "rand_core",
] ]
[[package]] [[package]]
@ -3638,7 +3510,7 @@ version = "1.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bd99e5772ead8baa5215278c9b15bf92087709e9c1b2d1f97cdb5a183c933a7d" checksum = "bd99e5772ead8baa5215278c9b15bf92087709e9c1b2d1f97cdb5a183c933a7d"
dependencies = [ dependencies = [
"autocfg 1.1.0", "autocfg",
"crossbeam-deque", "crossbeam-deque",
"either", "either",
"rayon-core", "rayon-core",
@ -3656,15 +3528,6 @@ dependencies = [
"num_cpus", "num_cpus",
] ]
[[package]]
name = "rdrand"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2"
dependencies = [
"rand_core 0.3.1",
]
[[package]] [[package]]
name = "redis" name = "redis"
version = "0.22.1" version = "0.22.1"
@ -3691,7 +3554,6 @@ dependencies = [
"anyhow", "anyhow",
"deadpool-redis", "deadpool-redis",
"tokio", "tokio",
"tracing",
] ]
[[package]] [[package]]
@ -3864,7 +3726,7 @@ dependencies = [
"num-traits", "num-traits",
"pkcs1", "pkcs1",
"pkcs8 0.8.0", "pkcs8 0.8.0",
"rand_core 0.6.3", "rand_core",
"smallvec", "smallvec",
"subtle", "subtle",
"zeroize", "zeroize",
@ -4223,6 +4085,7 @@ dependencies = [
"sentry-backtrace", "sentry-backtrace",
"sentry-contexts", "sentry-contexts",
"sentry-core", "sentry-core",
"sentry-log",
"sentry-panic", "sentry-panic",
"tokio", "tokio",
"ureq", "ureq",
@ -4273,12 +4136,22 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ff58433a7ad557b586a09c42c4298d5f3ddb0c777e1a79d950e510d7b93fce0e" checksum = "ff58433a7ad557b586a09c42c4298d5f3ddb0c777e1a79d950e510d7b93fce0e"
dependencies = [ dependencies = [
"once_cell", "once_cell",
"rand 0.8.5", "rand",
"sentry-types", "sentry-types",
"serde", "serde",
"serde_json", "serde_json",
] ]
[[package]]
name = "sentry-log"
version = "0.28.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8da361d15c09707d3bd8a2742132b8af54a697994dd396942556aef01d2432de"
dependencies = [
"log",
"sentry-core",
]
[[package]] [[package]]
name = "sentry-panic" name = "sentry-panic"
version = "0.28.0" version = "0.28.0"
@ -4289,17 +4162,6 @@ dependencies = [
"sentry-core", "sentry-core",
] ]
[[package]]
name = "sentry-tracing"
version = "0.28.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fc8d5bae1e1c06d96a966efc425bf1479a90464de99757d40601ce449f91fbed"
dependencies = [
"sentry-core",
"tracing-core",
"tracing-subscriber",
]
[[package]] [[package]]
name = "sentry-types" name = "sentry-types"
version = "0.28.0" version = "0.28.0"
@ -4479,7 +4341,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f054c6c1a6e95179d6f23ed974060dcefb2d9388bb7256900badad682c499de4" checksum = "f054c6c1a6e95179d6f23ed974060dcefb2d9388bb7256900badad682c499de4"
dependencies = [ dependencies = [
"digest 0.10.5", "digest 0.10.5",
"rand_core 0.6.3", "rand_core",
] ]
[[package]] [[package]]
@ -4498,7 +4360,7 @@ dependencies = [
"http", "http",
"iri-string", "iri-string",
"k256", "k256",
"rand 0.8.5", "rand",
"sha3", "sha3",
"thiserror", "thiserror",
"time 0.3.17", "time 0.3.17",
@ -4525,7 +4387,7 @@ version = "0.4.7"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4614a76b2a8be0058caa9dbbaf66d988527d86d003c11a94fbd335d7661edcef" checksum = "4614a76b2a8be0058caa9dbbaf66d988527d86d003c11a94fbd335d7661edcef"
dependencies = [ dependencies = [
"autocfg 1.1.0", "autocfg",
] ]
[[package]] [[package]]
@ -4668,7 +4530,7 @@ dependencies = [
"once_cell", "once_cell",
"paste", "paste",
"percent-encoding", "percent-encoding",
"rand 0.8.5", "rand",
"rsa", "rsa",
"rust_decimal", "rust_decimal",
"rustls", "rustls",
@ -4806,7 +4668,7 @@ dependencies = [
"indicatif", "indicatif",
"itertools", "itertools",
"once_cell", "once_cell",
"rand 0.8.5", "rand",
"reqwest", "reqwest",
"semver", "semver",
"serde", "serde",
@ -4934,7 +4796,7 @@ dependencies = [
name = "thread-fast-rng" name = "thread-fast-rng"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"rand 0.8.5", "rand",
"rand_xoshiro", "rand_xoshiro",
] ]
@ -5025,7 +4887,7 @@ version = "1.21.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a9e03c497dc955702ba729190dc4aac6f2a0ce97f913e5b1b5912fc5039d9099" checksum = "a9e03c497dc955702ba729190dc4aac6f2a0ce97f913e5b1b5912fc5039d9099"
dependencies = [ dependencies = [
"autocfg 1.1.0", "autocfg",
"bytes", "bytes",
"libc", "libc",
"memchr", "memchr",
@ -5146,7 +5008,6 @@ dependencies = [
"tower", "tower",
"tower-layer", "tower-layer",
"tower-service", "tower-service",
"tracing",
] ]
[[package]] [[package]]
@ -5155,18 +5016,6 @@ version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "343bc9466d3fe6b0f960ef45960509f84480bf4fd96f92901afe7ff3df9d3a62" checksum = "343bc9466d3fe6b0f960ef45960509f84480bf4fd96f92901afe7ff3df9d3a62"
[[package]]
name = "tower-request-id"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "10b702a9ce17a8bd4c2f0e6acdcf75debaf14e4125cee76d4c5ac730f3ed9520"
dependencies = [
"http",
"tower-layer",
"tower-service",
"ulid 0.4.1",
]
[[package]] [[package]]
name = "tower-service" name = "tower-service"
version = "0.3.1" version = "0.3.1"
@ -5237,7 +5086,6 @@ dependencies = [
"matchers", "matchers",
"nu-ansi-term", "nu-ansi-term",
"once_cell", "once_cell",
"parking_lot 0.12.1",
"regex", "regex",
"sharded-slab", "sharded-slab",
"smallvec", "smallvec",
@ -5271,7 +5119,7 @@ dependencies = [
"http", "http",
"httparse", "httparse",
"log", "log",
"rand 0.8.5", "rand",
"rustls", "rustls",
"sha-1", "sha-1",
"thiserror", "thiserror",
@ -5304,24 +5152,13 @@ dependencies = [
"static_assertions", "static_assertions",
] ]
[[package]]
name = "ulid"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b7e95a59b292ca0cf9b45be2e52294d1ca6cb24eb11b08ef4376f73f1a00c549"
dependencies = [
"chrono",
"lazy_static",
"rand 0.6.5",
]
[[package]] [[package]]
name = "ulid" name = "ulid"
version = "1.0.0" version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "13a3aaa69b04e5b66cc27309710a569ea23593612387d67daaf102e73aa974fd" checksum = "13a3aaa69b04e5b66cc27309710a569ea23593612387d67daaf102e73aa974fd"
dependencies = [ dependencies = [
"rand 0.8.5", "rand",
"serde", "serde",
] ]
@ -5606,6 +5443,7 @@ dependencies = [
"derive_more", "derive_more",
"dotenv", "dotenv",
"entities", "entities",
"env_logger",
"ethers", "ethers",
"fdlimit", "fdlimit",
"flume", "flume",
@ -5616,6 +5454,7 @@ dependencies = [
"http", "http",
"ipnet", "ipnet",
"itertools", "itertools",
"log",
"metered", "metered",
"migration", "migration",
"moka", "moka",
@ -5631,7 +5470,6 @@ dependencies = [
"rustc-hash", "rustc-hash",
"sea-orm", "sea-orm",
"sentry", "sentry",
"sentry-tracing",
"serde", "serde",
"serde_json", "serde_json",
"serde_prometheus", "serde_prometheus",
@ -5643,10 +5481,7 @@ dependencies = [
"toml", "toml",
"tower", "tower",
"tower-http", "tower-http",
"tower-request-id", "ulid",
"tracing",
"tracing-subscriber",
"ulid 1.0.0",
"url", "url",
"uuid 1.2.1", "uuid 1.2.1",
] ]

View File

@ -98,8 +98,9 @@ Flame graphs make a developer's join of finding slow code painless:
4 4
$ echo -1 | sudo tee /proc/sys/kernel/perf_event_paranoid $ echo -1 | sudo tee /proc/sys/kernel/perf_event_paranoid
-1 -1
$ CARGO_PROFILE_RELEASE_DEBUG=true cargo flamegraph --bin web3_proxy $ CARGO_PROFILE_RELEASE_DEBUG=true cargo flamegraph --bin web3_proxy --no-inline
Be sure to use `--no-inline` or perf will be VERY slow
## GDB ## GDB

View File

@ -236,8 +236,11 @@ These are roughly in order of completition
- [-] add configurable size limits to all the Caches - [-] add configurable size limits to all the Caches
- instead of configuring each cache with MB sizes, have one value for total memory footprint and then percentages for each cache - instead of configuring each cache with MB sizes, have one value for total memory footprint and then percentages for each cache
- [x] improve sorting servers by weight. don't force to lower weights, still have a probability that smaller weights might be - [x] improve sorting servers by weight. don't force to lower weights, still have a probability that smaller weights might be
- [ ] flamegraphs show 52% of the time to be in tracing. figure out how to improve that
- [ ] add block timestamp to the /status page - [ ] add block timestamp to the /status page
- [ ] cache the status page for a second - [ ] cache the status page for a second
- [ ] probably need to turn more sentry log integrations (like anyhow) on!
- [ ] tests should use `test-env-log = "0.2.8"`
- [ ] actually block unauthenticated requests instead of emitting warning of "allowing without auth during development!" - [ ] actually block unauthenticated requests instead of emitting warning of "allowing without auth during development!"
@ -245,6 +248,7 @@ These are roughly in order of completition
These are not yet ordered. There might be duplicates. We might not actually need all of these. These are not yet ordered. There might be duplicates. We might not actually need all of these.
- [ ] flamegraphs show 25% of the time to be in moka-housekeeper. tune that
- [ ] remove the "metered" crate now that we save aggregate queries? - [ ] remove the "metered" crate now that we save aggregate queries?
- [ ] remove/change the "active_requests" counter? maybe only once we have dynamic soft limits? - [ ] remove/change the "active_requests" counter? maybe only once we have dynamic soft limits?
- [ ] refactor so configs can change while running - [ ] refactor so configs can change while running
@ -410,8 +414,6 @@ in another repo: event subscriber
- [ ] test /api/getGaugesmethod - [ ] test /api/getGaugesmethod
- usually times out after vercel's 60 second timeout - usually times out after vercel's 60 second timeout
- one time got: Error invalid Json response "" - one time got: Error invalid Json response ""
- [ ] i think all the async methods in ethers need tracing instrument. something like `cfgif(tracing, tracing::instrument)`
- if they do that, i think my request_id will show up on their logs
- [ ] page that prints a graphviz dotfile of the blockchain - [ ] page that prints a graphviz dotfile of the blockchain
- [ ] search for all the "TODO" and `todo!(...)` items in the code and move them here - [ ] search for all the "TODO" and `todo!(...)` items in the code and move them here
- [ ] add the backend server to the header? - [ ] add the backend server to the header?

View File

@ -9,6 +9,6 @@ redis-rate-limiter = { path = "../redis-rate-limiter" }
anyhow = "1.0.66" anyhow = "1.0.66"
hashbrown = "0.13.1" hashbrown = "0.13.1"
log = "0.4.17"
moka = { version = "0.9.6", default-features = false, features = ["future"] } moka = { version = "0.9.6", default-features = false, features = ["future"] }
tokio = "1.21.2" tokio = "1.21.2"
tracing = "0.1.37"

View File

@ -1,4 +1,5 @@
//#![warn(missing_docs)] //#![warn(missing_docs)]
use log::error;
use moka::future::Cache; use moka::future::Cache;
use redis_rate_limiter::{RedisRateLimitResult, RedisRateLimiter}; use redis_rate_limiter::{RedisRateLimitResult, RedisRateLimiter};
use std::cmp::Eq; use std::cmp::Eq;
@ -8,7 +9,6 @@ use std::sync::atomic::Ordering;
use std::sync::{atomic::AtomicU64, Arc}; use std::sync::{atomic::AtomicU64, Arc};
use tokio::sync::Mutex; use tokio::sync::Mutex;
use tokio::time::{Duration, Instant}; use tokio::time::{Duration, Instant};
use tracing::{error, info_span, Instrument};
/// A local cache that sits in front of a RedisRateLimiter /// A local cache that sits in front of a RedisRateLimiter
/// Generic accross the key so it is simple to use with IPs or user keys /// Generic accross the key so it is simple to use with IPs or user keys
@ -118,7 +118,7 @@ where
// if we get a redis error, just let the user through. // if we get a redis error, just let the user through.
// if users are sticky on a server, local caches will work well enough // if users are sticky on a server, local caches will work well enough
// though now that we do this, we need to reset rate limits every minute! cache must have ttl! // though now that we do this, we need to reset rate limits every minute! cache must have ttl!
error!(?err, "unable to rate limit! creating empty cache"); error!("unable to rate limit! creating empty cache. err={:?}", err);
0 0
} }
}; };
@ -177,9 +177,9 @@ where
Err(err) => { Err(err) => {
// don't let redis errors block our users! // don't let redis errors block our users!
error!( error!(
?key, "unable to query rate limits, but local cache is available. key={:?} err={:?}",
?err, key,
"unable to query rate limits, but local cache is available" err,
); );
// TODO: we need to start a timer that resets this count every minute // TODO: we need to start a timer that resets this count every minute
DeferredRateLimitResult::Allowed DeferredRateLimitResult::Allowed
@ -194,11 +194,8 @@ where
// close to period. don't risk it. wait on redis // close to period. don't risk it. wait on redis
Ok(rate_limit_f.await) Ok(rate_limit_f.await)
} else { } else {
// TODO: pass the frontend request id through
let span = info_span!("deferred rate limit");
// rate limit has enough headroom that it should be safe to do this in the background // rate limit has enough headroom that it should be safe to do this in the background
tokio::spawn(rate_limit_f.instrument(span)); tokio::spawn(rate_limit_f);
Ok(DeferredRateLimitResult::Allowed) Ok(DeferredRateLimitResult::Allowed)
} }

8
docs/faster perf.txt Normal file
View File

@ -0,0 +1,8 @@
sudo apt install bison flex
wget https://eighty-twenty.org/files/0001-tools-perf-Use-long-running-addr2line-per-dso.patch
git clone https://github.com/torvalds/linux.git
cd linux
git checkout v5.15
git apply ../0001-tools-perf-Use-long-running-addr2line-per-dso.patch
cd tools/perf
make prefix=$HOME/.local VERSION=5.15 install-bin

View File

@ -7,5 +7,4 @@ edition = "2021"
[dependencies] [dependencies]
anyhow = "1.0.66" anyhow = "1.0.66"
deadpool-redis = { version = "0.11.0", features = ["rt_tokio_1", "serde"] } deadpool-redis = { version = "0.11.0", features = ["rt_tokio_1", "serde"] }
tracing = "0.1.37"
tokio = "1.21.2" tokio = "1.21.2"

View File

@ -3,7 +3,6 @@ use anyhow::Context;
use std::ops::Add; use std::ops::Add;
use std::time::{SystemTime, UNIX_EPOCH}; use std::time::{SystemTime, UNIX_EPOCH};
use tokio::time::{Duration, Instant}; use tokio::time::{Duration, Instant};
use tracing::{debug, trace};
pub use deadpool_redis::redis; pub use deadpool_redis::redis;
pub use deadpool_redis::{ pub use deadpool_redis::{
@ -97,7 +96,6 @@ impl RedisRateLimiter {
// TODO: at high concurency, this gives "connection reset by peer" errors. at least they are off the hot path // TODO: at high concurency, this gives "connection reset by peer" errors. at least they are off the hot path
// TODO: only set expire if this is a new key // TODO: only set expire if this is a new key
trace!("redis incr+expire");
// TODO: automatic retry // TODO: automatic retry
let x: Vec<_> = redis::pipe() let x: Vec<_> = redis::pipe()
.atomic() .atomic()
@ -119,11 +117,8 @@ impl RedisRateLimiter {
// TODO: this might actually be early if we are way over the count // TODO: this might actually be early if we are way over the count
let retry_at = self.next_period(now); let retry_at = self.next_period(now);
debug!(%label, ?retry_at, "rate limited: {}/{}", new_count, max_per_period);
Ok(RedisRateLimitResult::RetryAt(retry_at, new_count)) Ok(RedisRateLimitResult::RetryAt(retry_at, new_count))
} else { } else {
trace!(%label, "NOT rate limited: {}/{}", new_count, max_per_period);
Ok(RedisRateLimitResult::Allowed(new_count)) Ok(RedisRateLimitResult::Allowed(new_count))
} }
} }

View File

@ -27,11 +27,12 @@ axum = { version = "0.5.17", features = ["headers", "serde_json", "tokio-tungste
axum-client-ip = "0.2.0" axum-client-ip = "0.2.0"
axum-macros = "0.2.3" axum-macros = "0.2.3"
# TODO: import chrono from sea-orm so we always have the same version # TODO: import chrono from sea-orm so we always have the same version
chrono = "0.4.22" chrono = "0.4.23"
counter = "0.5.7" counter = "0.5.7"
derive_more = "0.99.17" derive_more = "0.99.17"
dotenv = "0.15.0" dotenv = "0.15.0"
ethers = { version = "1.0.0", features = ["rustls", "ws"] } ethers = { version = "1.0.0", default-features = false, features = ["rustls", "ws"] }
env_logger = "0.9.3"
fdlimit = "0.2.1" fdlimit = "0.2.1"
flume = "0.10.14" flume = "0.10.14"
futures = { version = "0.3.25", features = ["thread-pool"] } futures = { version = "0.3.25", features = ["thread-pool"] }
@ -39,6 +40,7 @@ hashbrown = { version = "0.13.1", features = ["serde"] }
hdrhistogram = "7.5.2" hdrhistogram = "7.5.2"
http = "0.2.8" http = "0.2.8"
ipnet = "2.5.1" ipnet = "2.5.1"
log = "0.4.17"
metered = { version = "0.9.0", features = ["serialize"] } metered = { version = "0.9.0", features = ["serialize"] }
moka = { version = "0.9.6", default-features = false, features = ["future"] } moka = { version = "0.9.6", default-features = false, features = ["future"] }
notify = "5.0.0" notify = "5.0.0"
@ -55,24 +57,18 @@ handlebars = "4.3.5"
rustc-hash = "1.1.0" rustc-hash = "1.1.0"
siwe = "0.5.0" siwe = "0.5.0"
sea-orm = { version = "0.10.2", features = ["macros"] } sea-orm = { version = "0.10.2", features = ["macros"] }
sentry = { version = "0.28.0", default-features = false, features = ["backtrace", "contexts", "panic", "anyhow", "reqwest", "rustls"] } sentry = { version = "0.28.0", default-features = false, features = ["backtrace", "contexts", "panic", "anyhow", "reqwest", "rustls", "log", "sentry-log"] }
sentry-tracing = "0.28.0"
serde = { version = "1.0.147", features = [] } serde = { version = "1.0.147", features = [] }
serde_json = { version = "1.0.87", default-features = false, features = ["alloc", "raw_value"] } serde_json = { version = "1.0.87", default-features = false, features = ["alloc", "raw_value"] }
serde_prometheus = "0.1.6" serde_prometheus = "0.1.6"
# TODO: make sure this time version matches siwe. PR to put this in their prelude # TODO: make sure this time version matches siwe. PR to put this in their prelude
time = "0.3.17" time = "0.3.17"
tokio = { version = "1.21.2", features = ["full", "tracing"] } tokio = { version = "1.21.2", features = ["full"] }
# TODO: make sure this uuid version matches sea-orm. PR to put this in their prelude # TODO: make sure this uuid version matches sea-orm. PR to put this in their prelude
tokio-stream = { version = "0.1.11", features = ["sync"] } tokio-stream = { version = "0.1.11", features = ["sync"] }
toml = "0.5.9" toml = "0.5.9"
tower = "0.4.13" tower = "0.4.13"
# TODO: i don't think we need this. we can use it from tower-http instead. though this seems to use ulid and not uuid? tower-http = { version = "0.3.4", features = ["cors", "sensitive-headers"] }
tower-request-id = "0.2.0"
tower-http = { version = "0.3.4", features = ["cors", "sensitive-headers", "trace"] }
tracing = "0.1.37"
# TODO: tracing-subscriber has serde and serde_json features that we might want to use
tracing-subscriber = { version = "0.3.16", features = ["env-filter", "parking_lot"] }
ulid = { version = "1.0.0", features = ["serde"] } ulid = { version = "1.0.0", features = ["serde"] }
url = "2.3.1" url = "2.3.1"
uuid = "1.2.1" uuid = "1.2.1"

View File

@ -5,11 +5,6 @@ use std::time::Duration;
#[tokio::main] #[tokio::main]
async fn main() -> anyhow::Result<()> { async fn main() -> anyhow::Result<()> {
// install global collector configured based on RUST_LOG env var. // install global collector configured based on RUST_LOG env var.
tracing_subscriber::fmt()
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
.compact()
.init();
fdlimit::raise_fd_limit(); fdlimit::raise_fd_limit();
// erigon // erigon

View File

@ -4,12 +4,6 @@ use std::{str::FromStr, time::Duration};
#[tokio::main] #[tokio::main]
async fn main() -> anyhow::Result<()> { async fn main() -> anyhow::Result<()> {
// install global collector configured based on RUST_LOG env var.
tracing_subscriber::fmt()
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
.compact()
.init();
fdlimit::raise_fd_limit(); fdlimit::raise_fd_limit();
// erigon does not support most filters // erigon does not support most filters

View File

@ -25,6 +25,7 @@ use futures::stream::FuturesUnordered;
use futures::stream::StreamExt; use futures::stream::StreamExt;
use hashbrown::HashMap; use hashbrown::HashMap;
use ipnet::IpNet; use ipnet::IpNet;
use log::{error, info, warn};
use metered::{metered, ErrorCount, HitCount, ResponseTime, Throughput}; use metered::{metered, ErrorCount, HitCount, ResponseTime, Throughput};
use migration::{Migrator, MigratorTrait}; use migration::{Migrator, MigratorTrait};
use moka::future::Cache; use moka::future::Cache;
@ -43,7 +44,6 @@ use tokio::sync::{broadcast, watch, Semaphore};
use tokio::task::JoinHandle; use tokio::task::JoinHandle;
use tokio::time::timeout; use tokio::time::timeout;
use tokio_stream::wrappers::{BroadcastStream, WatchStream}; use tokio_stream::wrappers::{BroadcastStream, WatchStream};
use tracing::{error, info, instrument, trace, warn};
use ulid::Ulid; use ulid::Ulid;
// TODO: make this customizable? // TODO: make this customizable?
@ -124,7 +124,6 @@ pub struct Web3ProxyApp {
/// flatten a JoinError into an anyhow error /// flatten a JoinError into an anyhow error
/// Useful when joining multiple futures. /// Useful when joining multiple futures.
#[instrument(skip_all)]
pub async fn flatten_handle<T>(handle: AnyhowJoinHandle<T>) -> anyhow::Result<T> { pub async fn flatten_handle<T>(handle: AnyhowJoinHandle<T>) -> anyhow::Result<T> {
match handle.await { match handle.await {
Ok(Ok(result)) => Ok(result), Ok(Ok(result)) => Ok(result),
@ -134,7 +133,7 @@ pub async fn flatten_handle<T>(handle: AnyhowJoinHandle<T>) -> anyhow::Result<T>
} }
/// return the first error or okay if everything worked /// return the first error or okay if everything worked
#[instrument(skip_all)]
pub async fn flatten_handles<T>( pub async fn flatten_handles<T>(
mut handles: FuturesUnordered<AnyhowJoinHandle<T>>, mut handles: FuturesUnordered<AnyhowJoinHandle<T>>,
) -> anyhow::Result<()> { ) -> anyhow::Result<()> {
@ -150,7 +149,6 @@ pub async fn flatten_handles<T>(
} }
/// Connect to the database and run migrations /// Connect to the database and run migrations
#[instrument(level = "trace")]
pub async fn get_migrated_db( pub async fn get_migrated_db(
db_url: String, db_url: String,
min_connections: u32, min_connections: u32,
@ -191,7 +189,6 @@ pub struct Web3ProxyAppSpawn {
#[metered(registry = Web3ProxyAppMetrics, registry_expr = self.app_metrics, visibility = pub)] #[metered(registry = Web3ProxyAppMetrics, registry_expr = self.app_metrics, visibility = pub)]
impl Web3ProxyApp { impl Web3ProxyApp {
/// The main entrypoint. /// The main entrypoint.
#[instrument(level = "trace")]
pub async fn spawn( pub async fn spawn(
top_config: TopConfig, top_config: TopConfig,
num_workers: usize, num_workers: usize,
@ -272,8 +269,8 @@ impl Web3ProxyApp {
// test the redis pool // test the redis pool
if let Err(err) = redis_pool.get().await { if let Err(err) = redis_pool.get().await {
error!( error!(
?err, "failed to connect to vredis. some features will be disabled. err={:?}",
"failed to connect to vredis. some features will be disabled" err
); );
}; };
@ -504,7 +501,6 @@ impl Web3ProxyApp {
Ok((app, cancellable_handles, important_background_handles).into()) Ok((app, cancellable_handles, important_background_handles).into())
} }
#[instrument(level = "trace")]
pub fn prometheus_metrics(&self) -> String { pub fn prometheus_metrics(&self) -> String {
let globals = HashMap::new(); let globals = HashMap::new();
// TODO: what globals? should this be the hostname or what? // TODO: what globals? should this be the hostname or what?
@ -526,7 +522,6 @@ impl Web3ProxyApp {
} }
#[measure([ErrorCount, HitCount, ResponseTime, Throughput])] #[measure([ErrorCount, HitCount, ResponseTime, Throughput])]
#[instrument(level = "trace")]
pub async fn eth_subscribe<'a>( pub async fn eth_subscribe<'a>(
self: &'a Arc<Self>, self: &'a Arc<Self>,
authorization: Arc<Authorization>, authorization: Arc<Authorization>,
@ -550,7 +545,7 @@ impl Web3ProxyApp {
Some(x) if x == json!(["newHeads"]) => { Some(x) if x == json!(["newHeads"]) => {
let head_block_receiver = self.head_block_receiver.clone(); let head_block_receiver = self.head_block_receiver.clone();
trace!(?subscription_id, "new heads subscription"); // trace!("new heads subscription. id={:?}", subscription_id);
tokio::spawn(async move { tokio::spawn(async move {
let mut head_block_receiver = Abortable::new( let mut head_block_receiver = Abortable::new(
WatchStream::new(head_block_receiver), WatchStream::new(head_block_receiver),
@ -580,7 +575,7 @@ impl Web3ProxyApp {
}; };
} }
trace!(?subscription_id, "closed new heads subscription"); // trace!("closed new heads subscription. id={:?}", subscription_id);
}); });
} }
Some(x) if x == json!(["newPendingTransactions"]) => { Some(x) if x == json!(["newPendingTransactions"]) => {
@ -591,7 +586,7 @@ impl Web3ProxyApp {
subscription_registration, subscription_registration,
); );
trace!(?subscription_id, "pending transactions subscription"); // // trace!(?subscription_id, "pending transactions subscription");
tokio::spawn(async move { tokio::spawn(async move {
while let Some(Ok(new_tx_state)) = pending_tx_receiver.next().await { while let Some(Ok(new_tx_state)) = pending_tx_receiver.next().await {
let new_tx = match new_tx_state { let new_tx = match new_tx_state {
@ -619,7 +614,7 @@ impl Web3ProxyApp {
}; };
} }
trace!(?subscription_id, "closed new heads subscription"); // // trace!(?subscription_id, "closed new heads subscription");
}); });
} }
Some(x) if x == json!(["newPendingFullTransactions"]) => { Some(x) if x == json!(["newPendingFullTransactions"]) => {
@ -631,7 +626,7 @@ impl Web3ProxyApp {
subscription_registration, subscription_registration,
); );
trace!(?subscription_id, "pending transactions subscription"); // // trace!(?subscription_id, "pending transactions subscription");
// TODO: do something with this handle? // TODO: do something with this handle?
tokio::spawn(async move { tokio::spawn(async move {
@ -663,7 +658,7 @@ impl Web3ProxyApp {
}; };
} }
trace!(?subscription_id, "closed new heads subscription"); // // trace!(?subscription_id, "closed new heads subscription");
}); });
} }
Some(x) if x == json!(["newPendingRawTransactions"]) => { Some(x) if x == json!(["newPendingRawTransactions"]) => {
@ -675,7 +670,7 @@ impl Web3ProxyApp {
subscription_registration, subscription_registration,
); );
trace!(?subscription_id, "pending transactions subscription"); // // trace!(?subscription_id, "pending transactions subscription");
// TODO: do something with this handle? // TODO: do something with this handle?
tokio::spawn(async move { tokio::spawn(async move {
@ -707,7 +702,7 @@ impl Web3ProxyApp {
}; };
} }
trace!(?subscription_id, "closed new heads subscription"); // // trace!(?subscription_id, "closed new heads subscription");
}); });
} }
_ => return Err(anyhow::anyhow!("unimplemented")), _ => return Err(anyhow::anyhow!("unimplemented")),
@ -723,14 +718,13 @@ impl Web3ProxyApp {
} }
/// send the request or batch of requests to the approriate RPCs /// send the request or batch of requests to the approriate RPCs
#[instrument(level = "trace")]
pub async fn proxy_web3_rpc( pub async fn proxy_web3_rpc(
self: &Arc<Self>, self: &Arc<Self>,
authorization: Arc<Authorization>, authorization: Arc<Authorization>,
request: JsonRpcRequestEnum, request: JsonRpcRequestEnum,
) -> anyhow::Result<JsonRpcForwardedResponseEnum> { ) -> anyhow::Result<JsonRpcForwardedResponseEnum> {
// TODO: this should probably be trace level // TODO: this should probably be trace level
trace!(?request, "proxy_web3_rpc"); // // trace!(?request, "proxy_web3_rpc");
// even though we have timeouts on the requests to our backend providers, // even though we have timeouts on the requests to our backend providers,
// we need a timeout for the incoming request so that retries don't run forever // we need a timeout for the incoming request so that retries don't run forever
@ -755,14 +749,13 @@ impl Web3ProxyApp {
}; };
// TODO: this should probably be trace level // TODO: this should probably be trace level
trace!(?response, "Forwarding"); // // trace!(?response, "Forwarding");
Ok(response) Ok(response)
} }
/// cut up the request and send to potentually different servers /// cut up the request and send to potentually different servers
/// TODO: make sure this isn't a problem /// TODO: make sure this isn't a problem
#[instrument(level = "trace")]
async fn proxy_web3_rpc_requests( async fn proxy_web3_rpc_requests(
self: &Arc<Self>, self: &Arc<Self>,
authorization: &Arc<Authorization>, authorization: &Arc<Authorization>,
@ -792,12 +785,10 @@ impl Web3ProxyApp {
} }
/// TODO: i don't think we want or need this. just use app.db_conn, or maybe app.db_conn.clone() or app.db_conn.as_ref() /// TODO: i don't think we want or need this. just use app.db_conn, or maybe app.db_conn.clone() or app.db_conn.as_ref()
#[instrument(level = "trace")]
pub fn db_conn(&self) -> Option<DatabaseConnection> { pub fn db_conn(&self) -> Option<DatabaseConnection> {
self.db_conn.clone() self.db_conn.clone()
} }
#[instrument(level = "trace")]
pub async fn redis_conn(&self) -> anyhow::Result<redis_rate_limiter::RedisConnection> { pub async fn redis_conn(&self) -> anyhow::Result<redis_rate_limiter::RedisConnection> {
match self.vredis_pool.as_ref() { match self.vredis_pool.as_ref() {
None => Err(anyhow::anyhow!("no redis server configured")), None => Err(anyhow::anyhow!("no redis server configured")),
@ -810,13 +801,12 @@ impl Web3ProxyApp {
} }
#[measure([ErrorCount, HitCount, ResponseTime, Throughput])] #[measure([ErrorCount, HitCount, ResponseTime, Throughput])]
#[instrument(level = "trace")]
async fn proxy_web3_rpc_request( async fn proxy_web3_rpc_request(
self: &Arc<Self>, self: &Arc<Self>,
authorization: &Arc<Authorization>, authorization: &Arc<Authorization>,
mut request: JsonRpcRequest, mut request: JsonRpcRequest,
) -> anyhow::Result<JsonRpcForwardedResponse> { ) -> anyhow::Result<JsonRpcForwardedResponse> {
trace!("Received request: {:?}", request); // trace!("Received request: {:?}", request);
// TODO: allow customizing the period? // TODO: allow customizing the period?
let request_metadata = Arc::new(RequestMetadata::new(60, &request)?); let request_metadata = Arc::new(RequestMetadata::new(60, &request)?);

View File

@ -6,6 +6,7 @@ use derive_more::From;
use entities::rpc_accounting; use entities::rpc_accounting;
use hashbrown::HashMap; use hashbrown::HashMap;
use hdrhistogram::{Histogram, RecordError}; use hdrhistogram::{Histogram, RecordError};
use log::{error, info};
use sea_orm::{ActiveModelTrait, DatabaseConnection, DbErr}; use sea_orm::{ActiveModelTrait, DatabaseConnection, DbErr};
use std::num::NonZeroU64; use std::num::NonZeroU64;
use std::sync::atomic::Ordering; use std::sync::atomic::Ordering;
@ -14,7 +15,6 @@ use std::time::{Duration, SystemTime};
use tokio::sync::broadcast; use tokio::sync::broadcast;
use tokio::task::JoinHandle; use tokio::task::JoinHandle;
use tokio::time::{interval_at, Instant}; use tokio::time::{interval_at, Instant};
use tracing::{error, info};
/// TODO: where should this be defined? /// TODO: where should this be defined?
/// TODO: can we use something inside sea_orm instead? /// TODO: can we use something inside sea_orm instead?
@ -351,7 +351,7 @@ impl StatEmitter {
if let Some(value) = response_aggregate_map.get_mut(&key) { if let Some(value) = response_aggregate_map.get_mut(&key) {
if let Err(err) = value.add(stat) { if let Err(err) = value.add(stat) {
error!(?err, "unable to aggregate stats!"); error!( "unable to aggregate stats! err={:?}", err);
}; };
} else { } else {
unimplemented!(); unimplemented!();
@ -364,7 +364,7 @@ impl StatEmitter {
// TODO: batch these saves // TODO: batch these saves
for (key, aggregate) in response_aggregate_map.drain() { for (key, aggregate) in response_aggregate_map.drain() {
if let Err(err) = aggregate.save(self.chain_id, &self.db_conn, key, period_timestamp).await { if let Err(err) = aggregate.save(self.chain_id, &self.db_conn, key, period_timestamp).await {
error!(?err, "Unable to save stat while shutting down!"); error!("Unable to save stat while shutting down! {:?}", err);
}; };
} }
// advance to the next period // advance to the next period
@ -377,7 +377,7 @@ impl StatEmitter {
info!("aggregate stat_loop shutting down"); info!("aggregate stat_loop shutting down");
// TODO: call aggregate_stat for all the // TODO: call aggregate_stat for all the
}, },
Err(err) => error!(?err, "shutdown receiver"), Err(err) => error!("shutdown receiver. err={:?}", err),
} }
break; break;
} }
@ -391,7 +391,7 @@ impl StatEmitter {
.save(self.chain_id, &self.db_conn, key, period_timestamp) .save(self.chain_id, &self.db_conn, key, period_timestamp)
.await .await
{ {
error!(?err, "Unable to save stat while shutting down!"); error!("Unable to save stat while shutting down! err={:?}", err);
}; };
} }

View File

@ -9,6 +9,7 @@
#![forbid(unsafe_code)] #![forbid(unsafe_code)]
use futures::StreamExt; use futures::StreamExt;
use log::{debug, info, warn, LevelFilter};
use parking_lot::deadlock; use parking_lot::deadlock;
use std::fs; use std::fs;
use std::sync::atomic::{self, AtomicUsize}; use std::sync::atomic::{self, AtomicUsize};
@ -16,9 +17,6 @@ use std::thread;
use tokio::runtime; use tokio::runtime;
use tokio::sync::broadcast; use tokio::sync::broadcast;
use tokio::time::Duration; use tokio::time::Duration;
use tracing::{debug, info, warn};
use tracing_subscriber::prelude::*;
use tracing_subscriber::EnvFilter;
use web3_proxy::app::{flatten_handle, flatten_handles, Web3ProxyApp}; use web3_proxy::app::{flatten_handle, flatten_handles, Web3ProxyApp};
use web3_proxy::config::{CliConfig, TopConfig}; use web3_proxy::config::{CliConfig, TopConfig};
use web3_proxy::{frontend, metrics_frontend}; use web3_proxy::{frontend, metrics_frontend};
@ -28,7 +26,8 @@ fn run(
cli_config: CliConfig, cli_config: CliConfig,
top_config: TopConfig, top_config: TopConfig,
) -> anyhow::Result<()> { ) -> anyhow::Result<()> {
debug!(?cli_config, ?top_config); debug!("{:?}", cli_config);
debug!("{:?}", top_config);
let mut shutdown_receiver = shutdown_sender.subscribe(); let mut shutdown_receiver = shutdown_sender.subscribe();
@ -70,7 +69,7 @@ fn run(
let rt = rt_builder.build()?; let rt = rt_builder.build()?;
let num_workers = rt.metrics().num_workers(); let num_workers = rt.metrics().num_workers();
debug!(?num_workers); debug!("num_workers: {}", num_workers);
rt.block_on(async { rt.block_on(async {
let app_frontend_port = cli_config.port; let app_frontend_port = cli_config.port;
@ -134,7 +133,7 @@ fn run(
// one of the handles stopped. send a value so the others know to shut down // one of the handles stopped. send a value so the others know to shut down
if let Err(err) = shutdown_sender.send(()) { if let Err(err) = shutdown_sender.send(()) {
warn!(?err, "shutdown sender"); warn!("shutdown sender err={:?}", err);
}; };
// wait on all the important background tasks (like saving stats to the database) to complete // wait on all the important background tasks (like saving stats to the database) to complete
@ -175,6 +174,12 @@ fn main() -> anyhow::Result<()> {
// TODO: this doesn't seem to do anything // TODO: this doesn't seem to do anything
proctitle::set_title(format!("web3_proxy-{}", top_config.app.chain_id)); proctitle::set_title(format!("web3_proxy-{}", top_config.app.chain_id));
let mut log_builder = env_logger::builder();
log_builder
.filter_level(LevelFilter::Off)
.parse_env("RUST_LOG");
// connect to sentry for error reporting // connect to sentry for error reporting
// if no sentry, only log to stdout // if no sentry, only log to stdout
let _sentry_guard = if let Some(sentry_url) = top_config.app.sentry_url.clone() { let _sentry_guard = if let Some(sentry_url) = top_config.app.sentry_url.clone() {
@ -188,24 +193,16 @@ fn main() -> anyhow::Result<()> {
}, },
)); ));
// TODO: how do we put the EnvFilter on this? let logger = sentry::integrations::log::SentryLogger::with_dest(log_builder.build());
tracing_subscriber::registry()
.with( log::set_boxed_logger(Box::new(logger)).unwrap();
tracing_subscriber::fmt::layer()
.compact()
.with_filter(EnvFilter::from_default_env()),
)
.with(sentry_tracing::layer())
.init();
Some(guard) Some(guard)
} else { } else {
// install global collector configured based on RUST_LOG env var. // install global collector configured based on RUST_LOG env var.
// TODO: attach sentry here let logger = log_builder.build();
tracing_subscriber::fmt()
.compact() log::set_boxed_logger(Box::new(logger)).unwrap();
.with_env_filter(EnvFilter::from_default_env())
.init();
None None
}; };
@ -243,13 +240,8 @@ mod tests {
// TODO: how should we handle logs in this? // TODO: how should we handle logs in this?
// TODO: option for super verbose logs // TODO: option for super verbose logs
std::env::set_var("RUST_LOG", "info,web3_proxy=debug"); std::env::set_var("RUST_LOG", "info,web3_proxy=debug");
// install global collector configured based on RUST_LOG env var.
// TODO: sentry is needed here! let _ = env_logger::builder().is_test(true).try_init();
tracing_subscriber::fmt()
.with_env_filter(EnvFilter::from_default_env())
.compact()
.with_test_writer()
.init();
let anvil = Anvil::new().spawn(); let anvil = Anvil::new().spawn();

View File

@ -1,6 +1,6 @@
use argh::FromArgs; use argh::FromArgs;
use log::{error, info, warn};
use std::fs; use std::fs;
use tracing::{error, info, warn};
use web3_proxy::config::TopConfig; use web3_proxy::config::TopConfig;
#[derive(FromArgs, PartialEq, Eq, Debug)] #[derive(FromArgs, PartialEq, Eq, Debug)]

View File

@ -2,8 +2,8 @@ use anyhow::Context;
use argh::FromArgs; use argh::FromArgs;
use entities::{rpc_key, user}; use entities::{rpc_key, user};
use ethers::prelude::Address; use ethers::prelude::Address;
use log::info;
use sea_orm::{ActiveModelTrait, TransactionTrait}; use sea_orm::{ActiveModelTrait, TransactionTrait};
use tracing::info;
use ulid::Ulid; use ulid::Ulid;
use uuid::Uuid; use uuid::Uuid;
use web3_proxy::frontend::authorization::RpcSecretKey; use web3_proxy::frontend::authorization::RpcSecretKey;

View File

@ -37,11 +37,7 @@ async fn main() -> anyhow::Result<()> {
std::env::set_var("RUST_LOG", "info,web3_proxy=debug,web3_proxy_cli=debug"); std::env::set_var("RUST_LOG", "info,web3_proxy=debug,web3_proxy_cli=debug");
} }
// install global collector configured based on RUST_LOG env var. env_logger::init();
tracing_subscriber::fmt()
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
.compact()
.init();
// this probably won't matter for us in docker, but better safe than sorry // this probably won't matter for us in docker, but better safe than sorry
fdlimit::raise_fd_limit(); fdlimit::raise_fd_limit();

View File

@ -4,8 +4,8 @@ use ethers::{
prelude::{BlockNumber, U64}, prelude::{BlockNumber, U64},
types::H256, types::H256,
}; };
use log::{warn};
use std::sync::Arc; use std::sync::Arc;
use tracing::{instrument, warn};
use crate::{frontend::authorization::Authorization, rpcs::connections::Web3Connections}; use crate::{frontend::authorization::Authorization, rpcs::connections::Web3Connections};
@ -39,7 +39,7 @@ pub fn block_num_to_u64(block_num: BlockNumber, latest_block: U64) -> U64 {
} }
/// modify params to always have a block number and not "latest" /// modify params to always have a block number and not "latest"
#[instrument(level = "trace")]
pub async fn clean_block_number( pub async fn clean_block_number(
authorization: &Arc<Authorization>, authorization: &Arc<Authorization>,
params: &mut serde_json::Value, params: &mut serde_json::Value,
@ -98,7 +98,7 @@ pub async fn clean_block_number(
} }
// TODO: change this to also return the hash needed? // TODO: change this to also return the hash needed?
#[instrument(level = "trace")]
pub async fn block_needed( pub async fn block_needed(
authorization: &Arc<Authorization>, authorization: &Arc<Authorization>,
method: &str, method: &str,
@ -210,7 +210,7 @@ pub async fn block_needed(
Ok(block) => Ok(Some(block)), Ok(block) => Ok(Some(block)),
Err(err) => { Err(err) => {
// TODO: seems unlikely that we will get here // TODO: seems unlikely that we will get here
warn!(?err, "could not get block from params"); warn!("could not get block from params. err={:?}", err);
Ok(None) Ok(None)
} }
} }

View File

@ -10,7 +10,6 @@ use sea_orm::DatabaseConnection;
use serde::Deserialize; use serde::Deserialize;
use std::sync::Arc; use std::sync::Arc;
use tokio::sync::broadcast; use tokio::sync::broadcast;
use tracing::instrument;
pub type BlockAndRpc = (Option<ArcBlock>, Arc<Web3Connection>); pub type BlockAndRpc = (Option<ArcBlock>, Arc<Web3Connection>);
pub type TxHashAndRpc = (TxHash, Arc<Web3Connection>); pub type TxHashAndRpc = (TxHash, Arc<Web3Connection>);
@ -201,7 +200,6 @@ impl Web3ConnectionConfig {
/// Create a Web3Connection from config /// Create a Web3Connection from config
/// TODO: move this into Web3Connection (just need to make things pub(crate)) /// TODO: move this into Web3Connection (just need to make things pub(crate))
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
#[instrument(level = "trace", skip(redis_pool))]
pub async fn spawn( pub async fn spawn(
self, self,
name: String, name: String,

View File

@ -13,6 +13,7 @@ use entities::{rpc_key, user, user_tier};
use hashbrown::HashMap; use hashbrown::HashMap;
use http::HeaderValue; use http::HeaderValue;
use ipnet::IpNet; use ipnet::IpNet;
use log::error;
use redis_rate_limiter::redis::AsyncCommands; use redis_rate_limiter::redis::AsyncCommands;
use redis_rate_limiter::RedisRateLimitResult; use redis_rate_limiter::RedisRateLimitResult;
use sea_orm::{ColumnTrait, DatabaseConnection, EntityTrait, QueryFilter}; use sea_orm::{ColumnTrait, DatabaseConnection, EntityTrait, QueryFilter};
@ -21,7 +22,6 @@ use std::sync::atomic::{AtomicBool, AtomicU64};
use std::{net::IpAddr, str::FromStr, sync::Arc}; use std::{net::IpAddr, str::FromStr, sync::Arc};
use tokio::sync::{OwnedSemaphorePermit, Semaphore}; use tokio::sync::{OwnedSemaphorePermit, Semaphore};
use tokio::time::Instant; use tokio::time::Instant;
use tracing::{error, instrument, trace};
use ulid::Ulid; use ulid::Ulid;
use uuid::Uuid; use uuid::Uuid;
@ -348,7 +348,6 @@ pub async fn key_is_authorized(
impl Web3ProxyApp { impl Web3ProxyApp {
/// Limit the number of concurrent requests from the given ip address. /// Limit the number of concurrent requests from the given ip address.
#[instrument(level = "trace")]
pub async fn ip_semaphore(&self, ip: IpAddr) -> anyhow::Result<Option<OwnedSemaphorePermit>> { pub async fn ip_semaphore(&self, ip: IpAddr) -> anyhow::Result<Option<OwnedSemaphorePermit>> {
if let Some(max_concurrent_requests) = self.config.public_max_concurrent_requests { if let Some(max_concurrent_requests) = self.config.public_max_concurrent_requests {
let semaphore = self let semaphore = self
@ -374,7 +373,6 @@ impl Web3ProxyApp {
} }
/// Limit the number of concurrent requests from the given rpc key. /// Limit the number of concurrent requests from the given rpc key.
#[instrument(level = "trace")]
pub async fn rpc_key_semaphore( pub async fn rpc_key_semaphore(
&self, &self,
authorization_checks: &AuthorizationChecks, authorization_checks: &AuthorizationChecks,
@ -386,7 +384,7 @@ impl Web3ProxyApp {
.rpc_key_semaphores .rpc_key_semaphores
.get_with(rpc_key_id, async move { .get_with(rpc_key_id, async move {
let s = Semaphore::new(max_concurrent_requests as usize); let s = Semaphore::new(max_concurrent_requests as usize);
trace!("new semaphore for rpc_key_id {}", rpc_key_id); // // trace!("new semaphore for rpc_key_id {}", rpc_key_id);
Arc::new(s) Arc::new(s)
}) })
.await; .await;
@ -407,7 +405,6 @@ impl Web3ProxyApp {
/// Verify that the given bearer token and address are allowed to take the specified action. /// Verify that the given bearer token and address are allowed to take the specified action.
/// This includes concurrent request limiting. /// This includes concurrent request limiting.
#[instrument(level = "trace")]
pub async fn bearer_is_authorized( pub async fn bearer_is_authorized(
&self, &self,
bearer: Bearer, bearer: Bearer,
@ -447,7 +444,6 @@ impl Web3ProxyApp {
Ok((user, semaphore_permit)) Ok((user, semaphore_permit))
} }
#[instrument(level = "trace")]
pub async fn rate_limit_login(&self, ip: IpAddr) -> anyhow::Result<RateLimitResult> { pub async fn rate_limit_login(&self, ip: IpAddr) -> anyhow::Result<RateLimitResult> {
// TODO: dry this up with rate_limit_by_rpc_key? // TODO: dry this up with rate_limit_by_rpc_key?
@ -474,19 +470,19 @@ impl Web3ProxyApp {
// TODO: set headers so they know when they can retry // TODO: set headers so they know when they can retry
// TODO: debug or trace? // TODO: debug or trace?
// this is too verbose, but a stat might be good // this is too verbose, but a stat might be good
trace!(?ip, "login rate limit exceeded until {:?}", retry_at); // // trace!(?ip, "login rate limit exceeded until {:?}", retry_at);
Ok(RateLimitResult::RateLimited(authorization, Some(retry_at))) Ok(RateLimitResult::RateLimited(authorization, Some(retry_at)))
} }
Ok(RedisRateLimitResult::RetryNever) => { Ok(RedisRateLimitResult::RetryNever) => {
// TODO: i don't think we'll get here. maybe if we ban an IP forever? seems unlikely // TODO: i don't think we'll get here. maybe if we ban an IP forever? seems unlikely
trace!(?ip, "login rate limit is 0"); // // trace!(?ip, "login rate limit is 0");
Ok(RateLimitResult::RateLimited(authorization, None)) Ok(RateLimitResult::RateLimited(authorization, None))
} }
Err(err) => { Err(err) => {
// internal error, not rate limit being hit // internal error, not rate limit being hit
// TODO: i really want axum to do this for us in a single place. // TODO: i really want axum to do this for us in a single place.
error!(?err, "login rate limiter is unhappy. allowing ip"); error!("login rate limiter is unhappy. allowing ip. err={:?}", err);
Ok(RateLimitResult::Allowed(authorization, None)) Ok(RateLimitResult::Allowed(authorization, None))
} }
@ -498,7 +494,6 @@ impl Web3ProxyApp {
} }
/// origin is included because it can override the default rate limits /// origin is included because it can override the default rate limits
#[instrument(level = "trace")]
pub async fn rate_limit_by_ip( pub async fn rate_limit_by_ip(
&self, &self,
allowed_origin_requests_per_period: &HashMap<String, u64>, allowed_origin_requests_per_period: &HashMap<String, u64>,
@ -529,18 +524,18 @@ impl Web3ProxyApp {
} }
Ok(DeferredRateLimitResult::RetryAt(retry_at)) => { Ok(DeferredRateLimitResult::RetryAt(retry_at)) => {
// TODO: set headers so they know when they can retry // TODO: set headers so they know when they can retry
trace!(?ip, "rate limit exceeded until {:?}", retry_at); // // trace!(?ip, "rate limit exceeded until {:?}", retry_at);
Ok(RateLimitResult::RateLimited(authorization, Some(retry_at))) Ok(RateLimitResult::RateLimited(authorization, Some(retry_at)))
} }
Ok(DeferredRateLimitResult::RetryNever) => { Ok(DeferredRateLimitResult::RetryNever) => {
// TODO: i don't think we'll get here. maybe if we ban an IP forever? seems unlikely // TODO: i don't think we'll get here. maybe if we ban an IP forever? seems unlikely
trace!(?ip, "rate limit is 0"); // // trace!(?ip, "rate limit is 0");
Ok(RateLimitResult::RateLimited(authorization, None)) Ok(RateLimitResult::RateLimited(authorization, None))
} }
Err(err) => { Err(err) => {
// this an internal error of some kind, not the rate limit being hit // this an internal error of some kind, not the rate limit being hit
// TODO: i really want axum to do this for us in a single place. // TODO: i really want axum to do this for us in a single place.
error!(?err, "rate limiter is unhappy. allowing ip"); error!("rate limiter is unhappy. allowing ip. err={:?}", err);
// at least we can still check the semaphore // at least we can still check the semaphore
let semaphore = self.ip_semaphore(ip).await?; let semaphore = self.ip_semaphore(ip).await?;
@ -558,7 +553,6 @@ impl Web3ProxyApp {
} }
// check the local cache for user data, or query the database // check the local cache for user data, or query the database
#[instrument(level = "trace")]
pub(crate) async fn authorization_checks( pub(crate) async fn authorization_checks(
&self, &self,
rpc_secret_key: RpcSecretKey, rpc_secret_key: RpcSecretKey,
@ -566,7 +560,7 @@ impl Web3ProxyApp {
let authorization_checks: Result<_, Arc<anyhow::Error>> = self let authorization_checks: Result<_, Arc<anyhow::Error>> = self
.rpc_secret_key_cache .rpc_secret_key_cache
.try_get_with(rpc_secret_key.into(), async move { .try_get_with(rpc_secret_key.into(), async move {
trace!(?rpc_secret_key, "user cache miss"); // // trace!(?rpc_secret_key, "user cache miss");
let db_conn = self.db_conn().context("Getting database connection")?; let db_conn = self.db_conn().context("Getting database connection")?;
@ -671,7 +665,6 @@ impl Web3ProxyApp {
} }
/// Authorized the ip/origin/referer/useragent and rate limit and concurrency /// Authorized the ip/origin/referer/useragent and rate limit and concurrency
#[instrument(level = "trace")]
pub async fn rate_limit_by_rpc_key( pub async fn rate_limit_by_rpc_key(
&self, &self,
ip: IpAddr, ip: IpAddr,
@ -722,19 +715,19 @@ impl Web3ProxyApp {
// this is too verbose, but a stat might be good // this is too verbose, but a stat might be good
// TODO: keys are secrets! use the id instead // TODO: keys are secrets! use the id instead
// TODO: emit a stat // TODO: emit a stat
trace!(?rpc_key, "rate limit exceeded until {:?}", retry_at); // // trace!(?rpc_key, "rate limit exceeded until {:?}", retry_at);
Ok(RateLimitResult::RateLimited(authorization, Some(retry_at))) Ok(RateLimitResult::RateLimited(authorization, Some(retry_at)))
} }
Ok(DeferredRateLimitResult::RetryNever) => { Ok(DeferredRateLimitResult::RetryNever) => {
// TODO: keys are secret. don't log them! // TODO: keys are secret. don't log them!
trace!(?rpc_key, "rate limit is 0"); // // trace!(?rpc_key, "rate limit is 0");
// TODO: emit a stat // TODO: emit a stat
Ok(RateLimitResult::RateLimited(authorization, None)) Ok(RateLimitResult::RateLimited(authorization, None))
} }
Err(err) => { Err(err) => {
// internal error, not rate limit being hit // internal error, not rate limit being hit
// TODO: i really want axum to do this for us in a single place. // TODO: i really want axum to do this for us in a single place.
error!(?err, "rate limiter is unhappy. allowing ip"); error!("rate limiter is unhappy. allowing ip. err={:?}", err);
Ok(RateLimitResult::Allowed(authorization, semaphore)) Ok(RateLimitResult::Allowed(authorization, semaphore))
} }

View File

@ -11,12 +11,12 @@ use axum::{
use derive_more::From; use derive_more::From;
use http::header::InvalidHeaderValue; use http::header::InvalidHeaderValue;
use ipnet::AddrParseError; use ipnet::AddrParseError;
use log::warn;
use redis_rate_limiter::redis::RedisError; use redis_rate_limiter::redis::RedisError;
use reqwest::header::ToStrError; use reqwest::header::ToStrError;
use sea_orm::DbErr; use sea_orm::DbErr;
use std::error::Error; use std::error::Error;
use tokio::{task::JoinError, time::Instant}; use tokio::{task::JoinError, time::Instant};
use tracing::{instrument, trace, warn};
// TODO: take "IntoResponse" instead of Response? // TODO: take "IntoResponse" instead of Response?
pub type FrontendResult = Result<Response, FrontendErrorResponse>; pub type FrontendResult = Result<Response, FrontendErrorResponse>;
@ -43,12 +43,11 @@ pub enum FrontendErrorResponse {
} }
impl IntoResponse for FrontendErrorResponse { impl IntoResponse for FrontendErrorResponse {
#[instrument(level = "trace")]
fn into_response(self) -> Response { fn into_response(self) -> Response {
// TODO: include the request id in these so that users can give us something that will point to logs // TODO: include the request id in these so that users can give us something that will point to logs
let (status_code, response) = match self { let (status_code, response) = match self {
Self::Anyhow(err) => { Self::Anyhow(err) => {
warn!(?err, "anyhow"); warn!("anyhow. err={:?}", err);
( (
StatusCode::INTERNAL_SERVER_ERROR, StatusCode::INTERNAL_SERVER_ERROR,
JsonRpcForwardedResponse::from_string( JsonRpcForwardedResponse::from_string(
@ -60,7 +59,7 @@ impl IntoResponse for FrontendErrorResponse {
) )
} }
Self::Box(err) => { Self::Box(err) => {
warn!(?err, "boxed"); warn!("boxed err={:?}", err);
( (
StatusCode::INTERNAL_SERVER_ERROR, StatusCode::INTERNAL_SERVER_ERROR,
JsonRpcForwardedResponse::from_str( JsonRpcForwardedResponse::from_str(
@ -72,7 +71,7 @@ impl IntoResponse for FrontendErrorResponse {
) )
} }
Self::Database(err) => { Self::Database(err) => {
warn!(?err, "database"); warn!("database err={:?}", err);
( (
StatusCode::INTERNAL_SERVER_ERROR, StatusCode::INTERNAL_SERVER_ERROR,
JsonRpcForwardedResponse::from_str( JsonRpcForwardedResponse::from_str(
@ -83,7 +82,7 @@ impl IntoResponse for FrontendErrorResponse {
) )
} }
Self::HeadersError(err) => { Self::HeadersError(err) => {
warn!(?err, "HeadersError"); warn!("HeadersError {:?}", err);
( (
StatusCode::BAD_REQUEST, StatusCode::BAD_REQUEST,
JsonRpcForwardedResponse::from_str( JsonRpcForwardedResponse::from_str(
@ -94,7 +93,7 @@ impl IntoResponse for FrontendErrorResponse {
) )
} }
Self::IpAddrParse(err) => { Self::IpAddrParse(err) => {
warn!(?err, "IpAddrParse"); warn!("IpAddrParse err={:?}", err);
( (
StatusCode::BAD_REQUEST, StatusCode::BAD_REQUEST,
JsonRpcForwardedResponse::from_str( JsonRpcForwardedResponse::from_str(
@ -105,7 +104,7 @@ impl IntoResponse for FrontendErrorResponse {
) )
} }
Self::InvalidHeaderValue(err) => { Self::InvalidHeaderValue(err) => {
warn!(?err, "InvalidHeaderValue"); warn!("InvalidHeaderValue err={:?}", err);
( (
StatusCode::BAD_REQUEST, StatusCode::BAD_REQUEST,
JsonRpcForwardedResponse::from_str( JsonRpcForwardedResponse::from_str(
@ -116,7 +115,7 @@ impl IntoResponse for FrontendErrorResponse {
) )
} }
Self::JoinError(err) => { Self::JoinError(err) => {
warn!(?err, "JoinError. likely shutting down"); warn!("JoinError. likely shutting down. err={:?}", err);
( (
StatusCode::INTERNAL_SERVER_ERROR, StatusCode::INTERNAL_SERVER_ERROR,
JsonRpcForwardedResponse::from_str( JsonRpcForwardedResponse::from_str(
@ -171,7 +170,7 @@ impl IntoResponse for FrontendErrorResponse {
) )
} }
Self::Redis(err) => { Self::Redis(err) => {
warn!(?err, "redis"); warn!("redis err={:?}", err);
( (
StatusCode::INTERNAL_SERVER_ERROR, StatusCode::INTERNAL_SERVER_ERROR,
JsonRpcForwardedResponse::from_str( JsonRpcForwardedResponse::from_str(
@ -187,7 +186,7 @@ impl IntoResponse for FrontendErrorResponse {
} }
Self::StatusCode(status_code, err_msg, err) => { Self::StatusCode(status_code, err_msg, err) => {
// TODO: warn is way too loud. different status codes should get different error levels. 500s should warn. 400s should stat // TODO: warn is way too loud. different status codes should get different error levels. 500s should warn. 400s should stat
trace!(?status_code, ?err_msg, ?err); // trace!(?status_code, ?err_msg, ?err);
( (
status_code, status_code,
JsonRpcForwardedResponse::from_str( JsonRpcForwardedResponse::from_str(
@ -198,7 +197,7 @@ impl IntoResponse for FrontendErrorResponse {
) )
} }
Self::HeaderToString(err) => { Self::HeaderToString(err) => {
trace!(?err, "HeaderToString"); // // trace!(?err, "HeaderToString");
( (
StatusCode::BAD_REQUEST, StatusCode::BAD_REQUEST,
JsonRpcForwardedResponse::from_str( JsonRpcForwardedResponse::from_str(
@ -209,7 +208,7 @@ impl IntoResponse for FrontendErrorResponse {
) )
} }
Self::UlidDecodeError(err) => { Self::UlidDecodeError(err) => {
trace!(?err, "UlidDecodeError"); // // trace!(?err, "UlidDecodeError");
( (
StatusCode::BAD_REQUEST, StatusCode::BAD_REQUEST,
JsonRpcForwardedResponse::from_str( JsonRpcForwardedResponse::from_str(
@ -234,7 +233,6 @@ impl IntoResponse for FrontendErrorResponse {
} }
} }
#[instrument(level = "trace")]
pub async fn handler_404() -> Response { pub async fn handler_404() -> Response {
FrontendErrorResponse::NotFound.into_response() FrontendErrorResponse::NotFound.into_response()
} }

View File

@ -10,46 +10,21 @@ pub mod users;
use crate::app::Web3ProxyApp; use crate::app::Web3ProxyApp;
use axum::{ use axum::{
body::Body,
handler::Handler, handler::Handler,
routing::{get, post, put}, routing::{get, post, put},
Extension, Router, Extension, Router,
}; };
use http::header::AUTHORIZATION; use http::header::AUTHORIZATION;
use http::Request; use log::info;
use std::iter::once; use std::iter::once;
use std::net::SocketAddr; use std::net::SocketAddr;
use std::sync::Arc; use std::sync::Arc;
use tower_http::cors::CorsLayer; use tower_http::cors::CorsLayer;
use tower_http::sensitive_headers::SetSensitiveRequestHeadersLayer; use tower_http::sensitive_headers::SetSensitiveRequestHeadersLayer;
use tower_http::trace::TraceLayer;
use tower_request_id::{RequestId, RequestIdLayer};
use tracing::{error_span, info, instrument};
/// Start the frontend server. /// Start the frontend server.
#[instrument(level = "trace")]
pub async fn serve(port: u16, proxy_app: Arc<Web3ProxyApp>) -> anyhow::Result<()> {
// create a tracing span for each request with a random request id and the method
// GET: websocket or static pages
// POST: http rpc or login
let request_tracing_layer =
TraceLayer::new_for_http().make_span_with(|request: &Request<Body>| {
// We get the request id from the extensions
let request_id = request
.extensions()
.get::<RequestId>()
.map(ToString::to_string)
.unwrap_or_else(|| "unknown".into());
// And then we put it along with other information into the `request` span
error_span!(
"http_request",
id = %request_id,
// TODO: do we want these?
method = %request.method(),
// uri = %request.uri(),
)
});
pub async fn serve(port: u16, proxy_app: Arc<Web3ProxyApp>) -> anyhow::Result<()> {
// build our axum Router // build our axum Router
let app = Router::new() let app = Router::new()
// routes should be ordered most to least common // routes should be ordered most to least common
@ -91,12 +66,8 @@ pub async fn serve(port: u16, proxy_app: Arc<Web3ProxyApp>) -> anyhow::Result<()
// the last layer is first for requests and last for responses // the last layer is first for requests and last for responses
// Mark the `Authorization` request header as sensitive so it doesn't show in logs // Mark the `Authorization` request header as sensitive so it doesn't show in logs
.layer(SetSensitiveRequestHeadersLayer::new(once(AUTHORIZATION))) .layer(SetSensitiveRequestHeadersLayer::new(once(AUTHORIZATION)))
// add the request id to our tracing logs
.layer(request_tracing_layer)
// handle cors // handle cors
.layer(CorsLayer::very_permissive()) .layer(CorsLayer::very_permissive())
// create a unique id for each request
.layer(RequestIdLayer)
// application state // application state
.layer(Extension(proxy_app.clone())) .layer(Extension(proxy_app.clone()))
// 404 for any unknown routes // 404 for any unknown routes

View File

@ -10,38 +10,27 @@ use axum::{response::IntoResponse, Extension, Json};
use axum_client_ip::ClientIp; use axum_client_ip::ClientIp;
use axum_macros::debug_handler; use axum_macros::debug_handler;
use std::sync::Arc; use std::sync::Arc;
use tracing::{error_span, instrument, Instrument};
/// POST /rpc -- Public entrypoint for HTTP JSON-RPC requests. Web3 wallets use this. /// POST /rpc -- Public entrypoint for HTTP JSON-RPC requests. Web3 wallets use this.
/// Defaults to rate limiting by IP address, but can also read the Authorization header for a bearer token. /// Defaults to rate limiting by IP address, but can also read the Authorization header for a bearer token.
/// If possible, please use a WebSocket instead. /// If possible, please use a WebSocket instead.
#[debug_handler] #[debug_handler]
#[instrument(level = "trace")]
pub async fn proxy_web3_rpc( pub async fn proxy_web3_rpc(
Extension(app): Extension<Arc<Web3ProxyApp>>, Extension(app): Extension<Arc<Web3ProxyApp>>,
ClientIp(ip): ClientIp, ClientIp(ip): ClientIp,
origin: Option<TypedHeader<Origin>>, origin: Option<TypedHeader<Origin>>,
Json(payload): Json<JsonRpcRequestEnum>, Json(payload): Json<JsonRpcRequestEnum>,
) -> FrontendResult { ) -> FrontendResult {
let request_span = error_span!("request", %ip);
// TODO: do we care about keeping the TypedHeader wrapper? // TODO: do we care about keeping the TypedHeader wrapper?
let origin = origin.map(|x| x.0); let origin = origin.map(|x| x.0);
let (authorization, _semaphore) = ip_is_authorized(&app, ip, origin) let (authorization, _semaphore) = ip_is_authorized(&app, ip, origin).await?;
.instrument(request_span)
.await?;
let request_span = error_span!("request", ?authorization);
let authorization = Arc::new(authorization); let authorization = Arc::new(authorization);
// TODO: spawn earlier? i think we want ip_is_authorized in this future // TODO: spawn earlier? i think we want ip_is_authorized in this future
let f = tokio::spawn(async move { let f = tokio::spawn(async move { app.proxy_web3_rpc(authorization, payload).await });
app.proxy_web3_rpc(authorization, payload)
.instrument(request_span)
.await
});
let response = f.await.expect("joinhandle should always work")?; let response = f.await.expect("joinhandle should always work")?;
@ -53,7 +42,7 @@ pub async fn proxy_web3_rpc(
/// Can optionally authorized based on origin, referer, or user agent. /// Can optionally authorized based on origin, referer, or user agent.
/// If possible, please use a WebSocket instead. /// If possible, please use a WebSocket instead.
#[debug_handler] #[debug_handler]
#[instrument(level = "trace")]
pub async fn proxy_web3_rpc_with_key( pub async fn proxy_web3_rpc_with_key(
Extension(app): Extension<Arc<Web3ProxyApp>>, Extension(app): Extension<Arc<Web3ProxyApp>>,
ClientIp(ip): ClientIp, ClientIp(ip): ClientIp,
@ -65,8 +54,6 @@ pub async fn proxy_web3_rpc_with_key(
) -> FrontendResult { ) -> FrontendResult {
let rpc_key = rpc_key.parse()?; let rpc_key = rpc_key.parse()?;
let request_span = error_span!("request", %ip, ?referer, ?user_agent);
// keep the semaphore until the end of the response // keep the semaphore until the end of the response
let (authorization, _semaphore) = key_is_authorized( let (authorization, _semaphore) = key_is_authorized(
&app, &app,
@ -76,18 +63,14 @@ pub async fn proxy_web3_rpc_with_key(
referer.map(|x| x.0), referer.map(|x| x.0),
user_agent.map(|x| x.0), user_agent.map(|x| x.0),
) )
.instrument(request_span.clone())
.await?; .await?;
let request_span = error_span!("request", ?authorization);
let authorization = Arc::new(authorization); let authorization = Arc::new(authorization);
// the request can take a while, so we spawn so that we can start serving another request // the request can take a while, so we spawn so that we can start serving another request
// TODO: spawn even earlier? // TODO: spawn even earlier?
let f = tokio::spawn(async move { let f = tokio::spawn(async move {
app.proxy_web3_rpc(authorization, payload) app.proxy_web3_rpc(authorization, payload)
.instrument(request_span)
.await .await
}); });

View File

@ -21,10 +21,10 @@ use futures::{
use handlebars::Handlebars; use handlebars::Handlebars;
use hashbrown::HashMap; use hashbrown::HashMap;
use http::StatusCode; use http::StatusCode;
use log::{error, info};
use serde_json::{json, value::RawValue}; use serde_json::{json, value::RawValue};
use std::sync::Arc; use std::sync::Arc;
use std::{str::from_utf8_mut, sync::atomic::AtomicUsize}; use std::{str::from_utf8_mut, sync::atomic::AtomicUsize};
use tracing::{error, error_span, info, instrument, trace, Instrument};
use crate::{ use crate::{
app::Web3ProxyApp, app::Web3ProxyApp,
@ -34,7 +34,7 @@ use crate::{
/// Public entrypoint for WebSocket JSON-RPC requests. /// Public entrypoint for WebSocket JSON-RPC requests.
/// Defaults to rate limiting by IP address, but can also read the Authorization header for a bearer token. /// Defaults to rate limiting by IP address, but can also read the Authorization header for a bearer token.
#[debug_handler] #[debug_handler]
#[instrument(level = "trace")]
pub async fn websocket_handler( pub async fn websocket_handler(
Extension(app): Extension<Arc<Web3ProxyApp>>, Extension(app): Extension<Arc<Web3ProxyApp>>,
ClientIp(ip): ClientIp, ClientIp(ip): ClientIp,
@ -42,23 +42,16 @@ pub async fn websocket_handler(
ws_upgrade: Option<WebSocketUpgrade>, ws_upgrade: Option<WebSocketUpgrade>,
) -> FrontendResult { ) -> FrontendResult {
// TODO: i don't like logging ips. move this to trace level? // TODO: i don't like logging ips. move this to trace level?
let request_span = error_span!("request", %ip, ?origin);
let origin = origin.map(|x| x.0); let origin = origin.map(|x| x.0);
let (authorization, _semaphore) = ip_is_authorized(&app, ip, origin) let (authorization, _semaphore) = ip_is_authorized(&app, ip, origin).await?;
.instrument(request_span)
.await?;
let request_span = error_span!("request", ?authorization);
let authorization = Arc::new(authorization); let authorization = Arc::new(authorization);
match ws_upgrade { match ws_upgrade {
Some(ws) => Ok(ws Some(ws) => Ok(ws
.on_upgrade(|socket| { .on_upgrade(|socket| proxy_web3_socket(app, authorization, socket))
proxy_web3_socket(app, authorization, socket).instrument(request_span)
})
.into_response()), .into_response()),
None => { None => {
if let Some(redirect) = &app.config.redirect_public_url { if let Some(redirect) = &app.config.redirect_public_url {
@ -79,7 +72,7 @@ pub async fn websocket_handler(
/// Rate limit and billing based on the api key in the url. /// Rate limit and billing based on the api key in the url.
/// Can optionally authorized based on origin, referer, or user agent. /// Can optionally authorized based on origin, referer, or user agent.
#[debug_handler] #[debug_handler]
#[instrument(level = "trace")]
pub async fn websocket_handler_with_key( pub async fn websocket_handler_with_key(
Extension(app): Extension<Arc<Web3ProxyApp>>, Extension(app): Extension<Arc<Web3ProxyApp>>,
ClientIp(ip): ClientIp, ClientIp(ip): ClientIp,
@ -91,8 +84,6 @@ pub async fn websocket_handler_with_key(
) -> FrontendResult { ) -> FrontendResult {
let rpc_key = rpc_key.parse()?; let rpc_key = rpc_key.parse()?;
let request_span = error_span!("request", %ip, ?referer, ?user_agent);
let (authorization, _semaphore) = key_is_authorized( let (authorization, _semaphore) = key_is_authorized(
&app, &app,
rpc_key, rpc_key,
@ -101,18 +92,14 @@ pub async fn websocket_handler_with_key(
referer.map(|x| x.0), referer.map(|x| x.0),
user_agent.map(|x| x.0), user_agent.map(|x| x.0),
) )
.instrument(request_span.clone())
.await?; .await?;
// TODO: type that wraps Address and have it censor? would protect us from accidently logging addresses or other user info
let request_span = error_span!("request", ?authorization);
let authorization = Arc::new(authorization); let authorization = Arc::new(authorization);
match ws_upgrade { match ws_upgrade {
Some(ws_upgrade) => Ok(ws_upgrade.on_upgrade(move |socket| { Some(ws_upgrade) => {
proxy_web3_socket(app, authorization, socket).instrument(request_span) Ok(ws_upgrade.on_upgrade(move |socket| proxy_web3_socket(app, authorization, socket)))
})), }
None => { None => {
// if no websocket upgrade, this is probably a user loading the url with their browser // if no websocket upgrade, this is probably a user loading the url with their browser
match ( match (
@ -156,7 +143,6 @@ pub async fn websocket_handler_with_key(
} }
} }
#[instrument(level = "trace")]
async fn proxy_web3_socket( async fn proxy_web3_socket(
app: Arc<Web3ProxyApp>, app: Arc<Web3ProxyApp>,
authorization: Arc<Authorization>, authorization: Arc<Authorization>,
@ -173,7 +159,7 @@ async fn proxy_web3_socket(
} }
/// websockets support a few more methods than http clients /// websockets support a few more methods than http clients
#[instrument(level = "trace")]
async fn handle_socket_payload( async fn handle_socket_payload(
app: Arc<Web3ProxyApp>, app: Arc<Web3ProxyApp>,
authorization: &Arc<Authorization>, authorization: &Arc<Authorization>,
@ -193,7 +179,6 @@ async fn handle_socket_payload(
{ {
"eth_subscribe" => { "eth_subscribe" => {
// TODO: what should go in this span? // TODO: what should go in this span?
let span = error_span!("eth_subscribe");
let response = app let response = app
.eth_subscribe( .eth_subscribe(
@ -202,7 +187,6 @@ async fn handle_socket_payload(
subscription_count, subscription_count,
response_sender.clone(), response_sender.clone(),
) )
.instrument(span)
.await; .await;
match response { match response {
@ -269,7 +253,6 @@ async fn handle_socket_payload(
Message::Text(response_str) Message::Text(response_str)
} }
#[instrument(level = "trace")]
async fn read_web3_socket( async fn read_web3_socket(
app: Arc<Web3ProxyApp>, app: Arc<Web3ProxyApp>,
authorization: Arc<Authorization>, authorization: Arc<Authorization>,
@ -295,7 +278,7 @@ async fn read_web3_socket(
} }
Message::Ping(x) => Message::Pong(x), Message::Ping(x) => Message::Pong(x),
Message::Pong(x) => { Message::Pong(x) => {
trace!("pong: {:?}", x); // // trace!("pong: {:?}", x);
continue; continue;
} }
Message::Close(_) => { Message::Close(_) => {
@ -328,7 +311,6 @@ async fn read_web3_socket(
} }
} }
#[instrument(level = "trace")]
async fn write_web3_socket( async fn write_web3_socket(
response_rx: flume::Receiver<Message>, response_rx: flume::Receiver<Message>,
mut ws_tx: SplitSink<WebSocket, Message>, mut ws_tx: SplitSink<WebSocket, Message>,
@ -343,7 +325,7 @@ async fn write_web3_socket(
// forward the response to through the websocket // forward the response to through the websocket
if let Err(err) = ws_tx.send(msg).await { if let Err(err) = ws_tx.send(msg).await {
// this isn't a problem. this is common and happens whenever a client disconnects // this isn't a problem. this is common and happens whenever a client disconnects
trace!(?err, "unable to write to websocket"); // trace!(?err, "unable to write to websocket");
break; break;
}; };
} }

View File

@ -9,11 +9,10 @@ use axum_macros::debug_handler;
use moka::future::ConcurrentCacheExt; use moka::future::ConcurrentCacheExt;
use serde_json::json; use serde_json::json;
use std::sync::Arc; use std::sync::Arc;
use tracing::instrument;
/// Health check page for load balancers to use. /// Health check page for load balancers to use.
#[debug_handler] #[debug_handler]
#[instrument(level = "trace")]
pub async fn health(Extension(app): Extension<Arc<Web3ProxyApp>>) -> impl IntoResponse { pub async fn health(Extension(app): Extension<Arc<Web3ProxyApp>>) -> impl IntoResponse {
// TODO: also check that the head block is not too old // TODO: also check that the head block is not too old
if app.balanced_rpcs.synced() { if app.balanced_rpcs.synced() {
@ -27,7 +26,7 @@ pub async fn health(Extension(app): Extension<Arc<Web3ProxyApp>>) -> impl IntoRe
/// ///
/// TODO: when done debugging, remove this and only allow access on a different port /// TODO: when done debugging, remove this and only allow access on a different port
#[debug_handler] #[debug_handler]
#[instrument(level = "trace")]
pub async fn prometheus(Extension(app): Extension<Arc<Web3ProxyApp>>) -> impl IntoResponse { pub async fn prometheus(Extension(app): Extension<Arc<Web3ProxyApp>>) -> impl IntoResponse {
app.prometheus_metrics() app.prometheus_metrics()
} }
@ -36,7 +35,7 @@ pub async fn prometheus(Extension(app): Extension<Arc<Web3ProxyApp>>) -> impl In
/// ///
/// TODO: replace this with proper stats and monitoring /// TODO: replace this with proper stats and monitoring
#[debug_handler] #[debug_handler]
#[instrument(level = "trace")]
pub async fn status(Extension(app): Extension<Arc<Web3ProxyApp>>) -> impl IntoResponse { pub async fn status(Extension(app): Extension<Arc<Web3ProxyApp>>) -> impl IntoResponse {
app.pending_transactions.sync(); app.pending_transactions.sync();
app.rpc_secret_key_cache.sync(); app.rpc_secret_key_cache.sync();

View File

@ -24,6 +24,7 @@ use hashbrown::HashMap;
use http::{HeaderValue, StatusCode}; use http::{HeaderValue, StatusCode};
use ipnet::IpNet; use ipnet::IpNet;
use itertools::Itertools; use itertools::Itertools;
use log::warn;
use redis_rate_limiter::redis::AsyncCommands; use redis_rate_limiter::redis::AsyncCommands;
use sea_orm::{ use sea_orm::{
ActiveModelTrait, ColumnTrait, EntityTrait, PaginatorTrait, QueryFilter, QueryOrder, ActiveModelTrait, ColumnTrait, EntityTrait, PaginatorTrait, QueryFilter, QueryOrder,
@ -36,7 +37,6 @@ use std::ops::Add;
use std::str::FromStr; use std::str::FromStr;
use std::sync::Arc; use std::sync::Arc;
use time::{Duration, OffsetDateTime}; use time::{Duration, OffsetDateTime};
use tracing::{instrument, warn};
use ulid::Ulid; use ulid::Ulid;
/// `GET /user/login/:user_address` or `GET /user/login/:user_address/:message_eip` -- Start the "Sign In with Ethereum" (siwe) login flow. /// `GET /user/login/:user_address` or `GET /user/login/:user_address/:message_eip` -- Start the "Sign In with Ethereum" (siwe) login flow.
@ -57,7 +57,7 @@ use ulid::Ulid;
/// It is a better UX to just click "login with ethereum" and have the account created if it doesn't exist. /// It is a better UX to just click "login with ethereum" and have the account created if it doesn't exist.
/// We can prompt for an email and and payment after they log in. /// We can prompt for an email and and payment after they log in.
#[debug_handler] #[debug_handler]
#[instrument(level = "trace")]
pub async fn user_login_get( pub async fn user_login_get(
Extension(app): Extension<Arc<Web3ProxyApp>>, Extension(app): Extension<Arc<Web3ProxyApp>>,
ClientIp(ip): ClientIp, ClientIp(ip): ClientIp,
@ -154,7 +154,7 @@ pub struct PostLogin {
/// It is recommended to save the returned bearer token in a cookie. /// It is recommended to save the returned bearer token in a cookie.
/// The bearer token can be used to authenticate other requests, such as getting the user's stats or modifying the user's profile. /// The bearer token can be used to authenticate other requests, such as getting the user's stats or modifying the user's profile.
#[debug_handler] #[debug_handler]
#[instrument(level = "trace")]
pub async fn user_login_post( pub async fn user_login_post(
Extension(app): Extension<Arc<Web3ProxyApp>>, Extension(app): Extension<Arc<Web3ProxyApp>>,
ClientIp(ip): ClientIp, ClientIp(ip): ClientIp,
@ -333,7 +333,7 @@ pub async fn user_login_post(
/// `POST /user/logout` - Forget the bearer token in the `Authentication` header. /// `POST /user/logout` - Forget the bearer token in the `Authentication` header.
#[debug_handler] #[debug_handler]
#[instrument(level = "trace")]
pub async fn user_logout_post( pub async fn user_logout_post(
Extension(app): Extension<Arc<Web3ProxyApp>>, Extension(app): Extension<Arc<Web3ProxyApp>>,
TypedHeader(Authorization(bearer)): TypedHeader<Authorization<Bearer>>, TypedHeader(Authorization(bearer)): TypedHeader<Authorization<Bearer>>,
@ -355,7 +355,7 @@ pub async fn user_logout_post(
/// ///
/// TODO: this will change as we add better support for secondary users. /// TODO: this will change as we add better support for secondary users.
#[debug_handler] #[debug_handler]
#[instrument(level = "trace")]
pub async fn user_get( pub async fn user_get(
Extension(app): Extension<Arc<Web3ProxyApp>>, Extension(app): Extension<Arc<Web3ProxyApp>>,
TypedHeader(Authorization(bearer_token)): TypedHeader<Authorization<Bearer>>, TypedHeader(Authorization(bearer_token)): TypedHeader<Authorization<Bearer>>,
@ -373,7 +373,7 @@ pub struct UserPost {
/// `POST /user` -- modify the account connected to the bearer token in the `Authentication` header. /// `POST /user` -- modify the account connected to the bearer token in the `Authentication` header.
#[debug_handler] #[debug_handler]
#[instrument(level = "trace")]
pub async fn user_post( pub async fn user_post(
Extension(app): Extension<Arc<Web3ProxyApp>>, Extension(app): Extension<Arc<Web3ProxyApp>>,
TypedHeader(Authorization(bearer_token)): TypedHeader<Authorization<Bearer>>, TypedHeader(Authorization(bearer_token)): TypedHeader<Authorization<Bearer>>,
@ -420,7 +420,7 @@ pub async fn user_post(
/// TODO: one key per request? maybe /user/balance/:rpc_key? /// TODO: one key per request? maybe /user/balance/:rpc_key?
/// TODO: this will change as we add better support for secondary users. /// TODO: this will change as we add better support for secondary users.
#[debug_handler] #[debug_handler]
#[instrument(level = "trace")]
pub async fn user_balance_get( pub async fn user_balance_get(
Extension(app): Extension<Arc<Web3ProxyApp>>, Extension(app): Extension<Arc<Web3ProxyApp>>,
TypedHeader(Authorization(bearer)): TypedHeader<Authorization<Bearer>>, TypedHeader(Authorization(bearer)): TypedHeader<Authorization<Bearer>>,
@ -438,7 +438,7 @@ pub async fn user_balance_get(
/// TODO: one key per request? maybe /user/balance/:rpc_key? /// TODO: one key per request? maybe /user/balance/:rpc_key?
/// TODO: this will change as we add better support for secondary users. /// TODO: this will change as we add better support for secondary users.
#[debug_handler] #[debug_handler]
#[instrument(level = "trace")]
pub async fn user_balance_post( pub async fn user_balance_post(
Extension(app): Extension<Arc<Web3ProxyApp>>, Extension(app): Extension<Arc<Web3ProxyApp>>,
TypedHeader(Authorization(bearer)): TypedHeader<Authorization<Bearer>>, TypedHeader(Authorization(bearer)): TypedHeader<Authorization<Bearer>>,
@ -452,7 +452,7 @@ pub async fn user_balance_post(
/// ///
/// TODO: one key per request? maybe /user/keys/:rpc_key? /// TODO: one key per request? maybe /user/keys/:rpc_key?
#[debug_handler] #[debug_handler]
#[instrument(level = "trace")]
pub async fn rpc_keys_get( pub async fn rpc_keys_get(
Extension(app): Extension<Arc<Web3ProxyApp>>, Extension(app): Extension<Arc<Web3ProxyApp>>,
TypedHeader(Authorization(bearer)): TypedHeader<Authorization<Bearer>>, TypedHeader(Authorization(bearer)): TypedHeader<Authorization<Bearer>>,
@ -481,7 +481,7 @@ pub async fn rpc_keys_get(
/// `DELETE /user/keys` -- Use a bearer token to delete an existing key. /// `DELETE /user/keys` -- Use a bearer token to delete an existing key.
#[debug_handler] #[debug_handler]
#[instrument(level = "trace")]
pub async fn rpc_keys_delete( pub async fn rpc_keys_delete(
Extension(app): Extension<Arc<Web3ProxyApp>>, Extension(app): Extension<Arc<Web3ProxyApp>>,
TypedHeader(Authorization(bearer)): TypedHeader<Authorization<Bearer>>, TypedHeader(Authorization(bearer)): TypedHeader<Authorization<Bearer>>,
@ -511,7 +511,7 @@ pub struct UserKeyManagement {
/// `POST /user/keys` or `PUT /user/keys` -- Use a bearer token to create or update an existing key. /// `POST /user/keys` or `PUT /user/keys` -- Use a bearer token to create or update an existing key.
#[debug_handler] #[debug_handler]
#[instrument(level = "trace")]
pub async fn rpc_keys_management( pub async fn rpc_keys_management(
Extension(app): Extension<Arc<Web3ProxyApp>>, Extension(app): Extension<Arc<Web3ProxyApp>>,
TypedHeader(Authorization(bearer)): TypedHeader<Authorization<Bearer>>, TypedHeader(Authorization(bearer)): TypedHeader<Authorization<Bearer>>,
@ -677,7 +677,7 @@ pub async fn rpc_keys_management(
/// `GET /user/revert_logs` -- Use a bearer token to get the user's revert logs. /// `GET /user/revert_logs` -- Use a bearer token to get the user's revert logs.
#[debug_handler] #[debug_handler]
#[instrument(level = "trace")]
pub async fn user_revert_logs_get( pub async fn user_revert_logs_get(
Extension(app): Extension<Arc<Web3ProxyApp>>, Extension(app): Extension<Arc<Web3ProxyApp>>,
TypedHeader(Authorization(bearer)): TypedHeader<Authorization<Bearer>>, TypedHeader(Authorization(bearer)): TypedHeader<Authorization<Bearer>>,
@ -733,7 +733,7 @@ pub async fn user_revert_logs_get(
/// `GET /user/stats/aggregate` -- Public endpoint for aggregate stats such as bandwidth used and methods requested. /// `GET /user/stats/aggregate` -- Public endpoint for aggregate stats such as bandwidth used and methods requested.
#[debug_handler] #[debug_handler]
#[instrument(level = "trace")]
pub async fn user_stats_aggregate_get( pub async fn user_stats_aggregate_get(
Extension(app): Extension<Arc<Web3ProxyApp>>, Extension(app): Extension<Arc<Web3ProxyApp>>,
bearer: Option<TypedHeader<Authorization<Bearer>>>, bearer: Option<TypedHeader<Authorization<Bearer>>>,
@ -754,7 +754,7 @@ pub async fn user_stats_aggregate_get(
/// ///
/// TODO: this will change as we add better support for secondary users. /// TODO: this will change as we add better support for secondary users.
#[debug_handler] #[debug_handler]
#[instrument(level = "trace")]
pub async fn user_stats_detailed_get( pub async fn user_stats_detailed_get(
Extension(app): Extension<Arc<Web3ProxyApp>>, Extension(app): Extension<Arc<Web3ProxyApp>>,
bearer: Option<TypedHeader<Authorization<Bearer>>>, bearer: Option<TypedHeader<Authorization<Bearer>>>,

View File

@ -4,7 +4,6 @@ use serde::de::{self, Deserializer, MapAccess, SeqAccess, Visitor};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use serde_json::value::RawValue; use serde_json::value::RawValue;
use std::fmt; use std::fmt;
use tracing::instrument;
// this is used by serde // this is used by serde
#[allow(dead_code)] #[allow(dead_code)]
@ -194,12 +193,10 @@ impl JsonRpcForwardedResponse {
Self::from_string(message, code, id) Self::from_string(message, code, id)
} }
#[instrument(level = "trace")]
pub fn from_str(message: &str, code: Option<i64>, id: Option<Box<RawValue>>) -> Self { pub fn from_str(message: &str, code: Option<i64>, id: Option<Box<RawValue>>) -> Self {
Self::from_string(message.to_string(), code, id) Self::from_string(message.to_string(), code, id)
} }
#[instrument(level = "trace")]
pub fn from_string(message: String, code: Option<i64>, id: Option<Box<RawValue>>) -> Self { pub fn from_string(message: String, code: Option<i64>, id: Option<Box<RawValue>>) -> Self {
// TODO: this is too verbose. plenty of errors are valid, like users giving an invalid address. no need to log that // TODO: this is too verbose. plenty of errors are valid, like users giving an invalid address. no need to log that
// TODO: can we somehow get the initial request here? if we put that into a tracing span, will things slow down a ton? // TODO: can we somehow get the initial request here? if we put that into a tracing span, will things slow down a ton?
@ -217,7 +214,6 @@ impl JsonRpcForwardedResponse {
} }
} }
#[instrument(level = "trace")]
pub fn from_response(partial_response: Box<RawValue>, id: Box<RawValue>) -> Self { pub fn from_response(partial_response: Box<RawValue>, id: Box<RawValue>) -> Self {
JsonRpcForwardedResponse { JsonRpcForwardedResponse {
jsonrpc: "2.0".to_string(), jsonrpc: "2.0".to_string(),
@ -228,7 +224,6 @@ impl JsonRpcForwardedResponse {
} }
} }
#[instrument(level = "trace")]
pub fn from_value(partial_response: serde_json::Value, id: Box<RawValue>) -> Self { pub fn from_value(partial_response: serde_json::Value, id: Box<RawValue>) -> Self {
let partial_response = let partial_response =
serde_json::to_string(&partial_response).expect("this should always work"); serde_json::to_string(&partial_response).expect("this should always work");
@ -244,7 +239,6 @@ impl JsonRpcForwardedResponse {
} }
} }
#[instrument(level = "trace")]
pub fn from_ethers_error(e: ProviderError, id: Box<RawValue>) -> anyhow::Result<Self> { pub fn from_ethers_error(e: ProviderError, id: Box<RawValue>) -> anyhow::Result<Self> {
// TODO: move turning ClientError into json to a helper function? // TODO: move turning ClientError into json to a helper function?
let code; let code;
@ -304,7 +298,6 @@ impl JsonRpcForwardedResponse {
}) })
} }
#[instrument(level = "trace")]
pub fn try_from_response_result( pub fn try_from_response_result(
result: Result<Box<RawValue>, ProviderError>, result: Result<Box<RawValue>, ProviderError>,
id: Box<RawValue>, id: Box<RawValue>,

View File

@ -2,14 +2,14 @@ use axum::headers::HeaderName;
use axum::http::HeaderValue; use axum::http::HeaderValue;
use axum::response::{IntoResponse, Response}; use axum::response::{IntoResponse, Response};
use axum::{routing::get, Extension, Router}; use axum::{routing::get, Extension, Router};
use log::info;
use std::net::SocketAddr; use std::net::SocketAddr;
use std::sync::Arc; use std::sync::Arc;
use tracing::{info, instrument};
use crate::app::Web3ProxyApp; use crate::app::Web3ProxyApp;
/// Run a prometheus metrics server on the given port. /// Run a prometheus metrics server on the given port.
#[instrument(level = "trace")]
pub async fn serve(app: Arc<Web3ProxyApp>, port: u16) -> anyhow::Result<()> { pub async fn serve(app: Arc<Web3ProxyApp>, port: u16) -> anyhow::Result<()> {
// build our application with a route // build our application with a route
// order most to least common // order most to least common
@ -42,7 +42,6 @@ pub async fn serve(app: Arc<Web3ProxyApp>, port: u16) -> anyhow::Result<()> {
.map_err(Into::into) .map_err(Into::into)
} }
#[instrument(level = "trace")]
async fn root(Extension(app): Extension<Arc<Web3ProxyApp>>) -> Response { async fn root(Extension(app): Extension<Arc<Web3ProxyApp>>) -> Response {
let serialized = app.prometheus_metrics(); let serialized = app.prometheus_metrics();

View File

@ -10,13 +10,13 @@ use anyhow::Context;
use derive_more::From; use derive_more::From;
use ethers::prelude::{Block, TxHash, H256, U64}; use ethers::prelude::{Block, TxHash, H256, U64};
use hashbrown::{HashMap, HashSet}; use hashbrown::{HashMap, HashSet};
use log::{debug, warn, Level};
use moka::future::Cache; use moka::future::Cache;
use serde::Serialize; use serde::Serialize;
use serde_json::json; use serde_json::json;
use std::{cmp::Ordering, fmt::Display, sync::Arc}; use std::{cmp::Ordering, fmt::Display, sync::Arc};
use tokio::sync::{broadcast, watch}; use tokio::sync::{broadcast, watch};
use tokio::time::Duration; use tokio::time::Duration;
use tracing::{debug, instrument, trace, warn, Level};
// TODO: type for Hydrated Blocks with their full transactions? // TODO: type for Hydrated Blocks with their full transactions?
pub type ArcBlock = Arc<Block<TxHash>>; pub type ArcBlock = Arc<Block<TxHash>>;
@ -38,7 +38,7 @@ impl Display for BlockId {
impl Web3Connections { impl Web3Connections {
/// add a block to our map and it's hash to our graphmap of the blockchain /// add a block to our map and it's hash to our graphmap of the blockchain
#[instrument]
pub async fn save_block(&self, block: &ArcBlock, heaviest_chain: bool) -> anyhow::Result<()> { pub async fn save_block(&self, block: &ArcBlock, heaviest_chain: bool) -> anyhow::Result<()> {
// TODO: i think we can rearrange this function to make it faster on the hot path // TODO: i think we can rearrange this function to make it faster on the hot path
let block_hash = block.hash.as_ref().context("no block hash")?; let block_hash = block.hash.as_ref().context("no block hash")?;
@ -62,11 +62,11 @@ impl Web3Connections {
} }
if blockchain.contains_node(*block_hash) { if blockchain.contains_node(*block_hash) {
trace!(%block_hash, %block_num, "block already saved"); // // trace!(%block_hash, %block_num, "block already saved");
return Ok(()); return Ok(());
} }
trace!(%block_hash, %block_num, "saving new block"); // // trace!(%block_hash, %block_num, "saving new block");
self.block_hashes self.block_hashes
.insert(*block_hash, block.to_owned()) .insert(*block_hash, block.to_owned())
@ -85,7 +85,6 @@ impl Web3Connections {
/// Get a block from caches with fallback. /// Get a block from caches with fallback.
/// Will query a specific node or the best available. /// Will query a specific node or the best available.
#[instrument(level = "trace")]
pub async fn block( pub async fn block(
&self, &self,
authorization: &Arc<Authorization>, authorization: &Arc<Authorization>,
@ -108,7 +107,7 @@ impl Web3Connections {
.request( .request(
"eth_getBlockByHash", "eth_getBlockByHash",
&json!(get_block_params), &json!(get_block_params),
Level::ERROR.into(), Level::Error.into(),
) )
.await? .await?
} }
@ -237,7 +236,7 @@ impl Web3Connections {
) )
.await .await
{ {
warn!(rpc=%rpc_name, ?err, "unable to process block from rpc"); warn!("unable to process block from rpc {}: {:?}", rpc_name, err);
} }
} }
@ -267,7 +266,7 @@ impl Web3Connections {
if rpc_head_num.is_zero() { if rpc_head_num.is_zero() {
// TODO: i don't think we can get to this anymore now that we use Options // TODO: i don't think we can get to this anymore now that we use Options
debug!(%rpc, "still syncing"); debug!("{} still syncing", rpc);
connection_heads.remove(&rpc.name); connection_heads.remove(&rpc.name);
@ -286,7 +285,7 @@ impl Web3Connections {
} }
None => { None => {
// TODO: warn is too verbose. this is expected if a node disconnects and has to reconnect // TODO: warn is too verbose. this is expected if a node disconnects and has to reconnect
trace!(%rpc, "Block without number or hash!"); // // trace!(%rpc, "Block without number or hash!");
connection_heads.remove(&rpc.name); connection_heads.remove(&rpc.name);
@ -311,7 +310,7 @@ impl Web3Connections {
// TODO: why does this happen?!?! seems to only happen with uncled blocks // TODO: why does this happen?!?! seems to only happen with uncled blocks
// TODO: maybe we should do get_with? // TODO: maybe we should do get_with?
// TODO: maybe we should just continue. this only seems to happen when an older block is received // TODO: maybe we should just continue. this only seems to happen when an older block is received
warn!(%connection_head_hash, %conn_name, %rpc, "Missing connection_head_block in block_hashes. Fetching now"); warn!("Missing connection_head_block in block_hashes. Fetching now. hash={}. other={}. rpc={}", connection_head_hash, conn_name, rpc);
// this option should always be populated // this option should always be populated
let conn_rpc = self.conns.get(conn_name); let conn_rpc = self.conns.get(conn_name);
@ -322,7 +321,7 @@ impl Web3Connections {
{ {
Ok(block) => block, Ok(block) => block,
Err(err) => { Err(err) => {
warn!(%connection_head_hash, %conn_name, %rpc, ?err, "Failed fetching connection_head_block for block_hashes"); warn!("Processing {}. Failed fetching connection_head_block for block_hashes. {} head hash={}. err={:?}", rpc, conn_name, connection_head_hash, err);
continue; continue;
} }
} }
@ -393,9 +392,9 @@ impl Web3Connections {
// not enough rpcs yet. check the parent // not enough rpcs yet. check the parent
if let Some(parent_block) = self.block_hashes.get(&maybe_head_block.parent_hash) if let Some(parent_block) = self.block_hashes.get(&maybe_head_block.parent_hash)
{ {
trace!( // // trace!(
child=%maybe_head_hash, parent=%parent_block.hash.unwrap(), "avoiding thundering herd", // child=%maybe_head_hash, parent=%parent_block.hash.unwrap(), "avoiding thundering herd",
); // );
maybe_head_block = parent_block; maybe_head_block = parent_block;
continue; continue;
@ -428,9 +427,12 @@ impl Web3Connections {
.swap(Arc::new(empty_synced_connections)); .swap(Arc::new(empty_synced_connections));
// TODO: log different things depending on old_synced_connections // TODO: log different things depending on old_synced_connections
warn!(%rpc, "no consensus head! {}/{}/{}", 0, num_connection_heads, total_conns); warn!(
"Processing {}. no consensus head! {}/{}/{}",
rpc, 0, num_connection_heads, total_conns
);
} else { } else {
trace!(?highest_rpcs); // // trace!(?highest_rpcs);
// TODO: if maybe_head_block.time() is old, ignore it // TODO: if maybe_head_block.time() is old, ignore it
@ -470,7 +472,14 @@ impl Web3Connections {
// TODO: if the rpc_head_block != consensus_head_block, log something? // TODO: if the rpc_head_block != consensus_head_block, log something?
match &old_synced_connections.head_block_id { match &old_synced_connections.head_block_id {
None => { None => {
debug!(block=%consensus_head_block_id, %rpc, "first {}/{}/{}", num_consensus_rpcs, num_connection_heads, total_conns); debug!(
"first {}/{}/{} block={}, rpc={}",
num_consensus_rpcs,
num_connection_heads,
total_conns,
consensus_head_block_id,
rpc
);
self.save_block(&consensus_head_block, true).await?; self.save_block(&consensus_head_block, true).await?;
@ -491,10 +500,27 @@ impl Web3Connections {
// multiple blocks with the same fork! // multiple blocks with the same fork!
if consensus_head_block_id.hash == old_block_id.hash { if consensus_head_block_id.hash == old_block_id.hash {
// no change in hash. no need to use head_block_sender // no change in hash. no need to use head_block_sender
debug!(con_head=%consensus_head_block_id, rpc_head=%rpc_head_str, %rpc, "con {}/{}/{}", num_consensus_rpcs, num_connection_heads, total_conns) debug!(
"con {}/{}/{}. con head={}. rpc={}. rpc head={}",
num_consensus_rpcs,
num_connection_heads,
total_conns,
consensus_head_block_id,
rpc,
rpc_head_str
)
} else { } else {
// hash changed // hash changed
debug!(con_head=%consensus_head_block_id, old=%old_block_id, rpc_head=%rpc_head_str, %rpc, "unc {}/{}/{}", num_consensus_rpcs, num_connection_heads, total_conns); debug!(
"unc {}/{}/{} con_head={}. old={}. rpc_head={}. rpc={}",
num_consensus_rpcs,
num_connection_heads,
total_conns,
consensus_head_block_id,
old_block_id,
rpc_head_str,
rpc
);
self.save_block(&consensus_head_block, true) self.save_block(&consensus_head_block, true)
.await .await
@ -508,7 +534,7 @@ impl Web3Connections {
Ordering::Less => { Ordering::Less => {
// this is unlikely but possible // this is unlikely but possible
// TODO: better log // TODO: better log
warn!(con_head=%consensus_head_block_id, old_head=%old_block_id, rpc_head=%rpc_head_str, %rpc, "chain rolled back {}/{}/{}", num_consensus_rpcs, num_connection_heads, total_conns); warn!("chain rolled back {}/{}/{}. con_head={} old_head={}. rpc_head={}. rpc={}", num_consensus_rpcs, num_connection_heads, total_conns, consensus_head_block_id, old_block_id, rpc_head_str, rpc);
// TODO: tell save_block to remove any higher block numbers from the cache. not needed because we have other checks on requested blocks being > head, but still seems slike a good idea // TODO: tell save_block to remove any higher block numbers from the cache. not needed because we have other checks on requested blocks being > head, but still seems slike a good idea
self.save_block(&consensus_head_block, true).await.context( self.save_block(&consensus_head_block, true).await.context(
@ -520,7 +546,15 @@ impl Web3Connections {
.context("head_block_sender sending consensus_head_block")?; .context("head_block_sender sending consensus_head_block")?;
} }
Ordering::Greater => { Ordering::Greater => {
debug!(con_head=%consensus_head_block_id, rpc_head=%rpc_head_str, %rpc, "new {}/{}/{}", num_consensus_rpcs, num_connection_heads, total_conns); debug!(
"new {}/{}/{} conn_head={}. rpc_head={}. rpc={}",
num_consensus_rpcs,
num_connection_heads,
total_conns,
consensus_head_block_id,
rpc_head_str,
rpc
);
self.save_block(&consensus_head_block, true).await?; self.save_block(&consensus_head_block, true).await?;

View File

@ -9,6 +9,7 @@ use anyhow::Context;
use ethers::prelude::{Bytes, Middleware, ProviderError, TxHash, H256, U64}; use ethers::prelude::{Bytes, Middleware, ProviderError, TxHash, H256, U64};
use futures::future::try_join_all; use futures::future::try_join_all;
use futures::StreamExt; use futures::StreamExt;
use log::{debug, error, info, warn, Level};
use parking_lot::RwLock; use parking_lot::RwLock;
use redis_rate_limiter::{RedisPool, RedisRateLimitResult, RedisRateLimiter}; use redis_rate_limiter::{RedisPool, RedisRateLimitResult, RedisRateLimiter};
use sea_orm::DatabaseConnection; use sea_orm::DatabaseConnection;
@ -25,7 +26,6 @@ use thread_fast_rng::thread_fast_rng;
use tokio::sync::broadcast; use tokio::sync::broadcast;
use tokio::sync::RwLock as AsyncRwLock; use tokio::sync::RwLock as AsyncRwLock;
use tokio::time::{interval, sleep, sleep_until, Duration, Instant, MissedTickBehavior}; use tokio::time::{interval, sleep, sleep_until, Duration, Instant, MissedTickBehavior};
use tracing::{debug, error, info, instrument, trace, warn, Level};
/// An active connection to a Web3 RPC server like geth or erigon. /// An active connection to a Web3 RPC server like geth or erigon.
pub struct Web3Connection { pub struct Web3Connection {
@ -58,7 +58,6 @@ pub struct Web3Connection {
impl Web3Connection { impl Web3Connection {
/// Connect to a web3 rpc /// Connect to a web3 rpc
// #[instrument(name = "spawn_Web3Connection", skip(hard_limit, http_client))]
// TODO: have this take a builder (which will have channels attached) // TODO: have this take a builder (which will have channels attached)
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
pub async fn spawn( pub async fn spawn(
@ -128,7 +127,7 @@ impl Web3Connection {
.request( .request(
"eth_chainId", "eth_chainId",
&json!(Option::None::<()>), &json!(Option::None::<()>),
Level::ERROR.into(), Level::Error.into(),
) )
.await; .await;
@ -203,7 +202,7 @@ impl Web3Connection {
// TODO: subscribe to a channel instead of polling. subscribe to http_interval_sender? // TODO: subscribe to a channel instead of polling. subscribe to http_interval_sender?
while head_block_id.is_none() { while head_block_id.is_none() {
warn!(rpc=%self, "no head block yet. retrying"); warn!("no head block yet. retrying rpc {}", self);
sleep(Duration::from_secs(13)).await; sleep(Duration::from_secs(13)).await;
@ -230,11 +229,11 @@ impl Web3Connection {
maybe_archive_block, maybe_archive_block,
)), )),
// error here are expected, so keep the level low // error here are expected, so keep the level low
tracing::Level::DEBUG.into(), Level::Debug.into(),
) )
.await; .await;
trace!(?archive_result, rpc=%self); // // trace!(?archive_result, rpc=%self);
if archive_result.is_ok() { if archive_result.is_ok() {
limit = Some(block_data_limit); limit = Some(block_data_limit);
@ -300,7 +299,7 @@ impl Web3Connection {
); );
let reconnect_in = Duration::from_millis(first_sleep_ms); let reconnect_in = Duration::from_millis(first_sleep_ms);
warn!(rpc=%self, ?reconnect_in, "Reconnect in"); warn!("Reconnect to {} in {}ms", self, reconnect_in.as_millis());
sleep(reconnect_in).await; sleep(reconnect_in).await;
@ -318,7 +317,12 @@ impl Web3Connection {
); );
let retry_in = Duration::from_millis(sleep_ms); let retry_in = Duration::from_millis(sleep_ms);
warn!(rpc=%self, ?retry_in, ?err, "Failed to reconnect!"); warn!(
"Failed reconnect to {}! Retry in {}ms. err={:?}",
self,
retry_in.as_millis(),
err,
);
sleep(retry_in).await; sleep(retry_in).await;
} }
@ -346,7 +350,7 @@ impl Web3Connection {
Web3Provider::Ws(_) => {} Web3Provider::Ws(_) => {}
} }
info!(rpc=%self, "reconnecting"); info!("Reconnecting to {}", self);
// disconnect the current provider // disconnect the current provider
*provider_option = None; *provider_option = None;
@ -365,7 +369,7 @@ impl Web3Connection {
.context("block_sender during connect")?; .context("block_sender during connect")?;
} }
} else { } else {
info!(rpc=%self, "connecting"); info!("connecting to {}", self);
} }
// TODO: if this fails, keep retrying! otherwise it crashes and doesn't try again! // TODO: if this fails, keep retrying! otherwise it crashes and doesn't try again!
@ -373,7 +377,7 @@ impl Web3Connection {
*provider_option = Some(Arc::new(new_provider)); *provider_option = Some(Arc::new(new_provider));
info!(rpc=%self, "successfully connected"); info!("successfully connected to {}", self);
Ok(()) Ok(())
} }
@ -446,7 +450,7 @@ impl Web3Connection {
.context("block_sender")?; .context("block_sender")?;
} }
Err(err) => { Err(err) => {
warn!(?err, "unable to get block from {}", self); warn!("unable to get block from {}. err={:?}", self, err);
{ {
let mut head_block_id = self.head_block_id.write(); let mut head_block_id = self.head_block_id.write();
@ -477,7 +481,7 @@ impl Web3Connection {
reconnect: bool, reconnect: bool,
) -> anyhow::Result<()> { ) -> anyhow::Result<()> {
loop { loop {
debug!(rpc=%self, "subscribing"); debug!("subscribing to {}", self);
let http_interval_receiver = http_interval_sender.as_ref().map(|x| x.subscribe()); let http_interval_receiver = http_interval_sender.as_ref().map(|x| x.subscribe());
@ -510,9 +514,9 @@ impl Web3Connection {
loop { loop {
if let Some(provider) = conn.provider.read().await.as_ref() { if let Some(provider) = conn.provider.read().await.as_ref() {
if provider.ready() { if provider.ready() {
trace!(rpc=%conn, "provider is ready"); // // trace!(rpc=%conn, "provider is ready");
} else { } else {
warn!(rpc=%conn, "provider is NOT ready"); warn!("rpc {} is NOT ready", conn);
// returning error will trigger a reconnect // returning error will trigger a reconnect
// TODO: what if we just happened to have this check line up with another restart? // TODO: what if we just happened to have this check line up with another restart?
return Err(anyhow::anyhow!("provider is not ready")); return Err(anyhow::anyhow!("provider is not ready"));
@ -535,22 +539,18 @@ impl Web3Connection {
} }
Err(err) => { Err(err) => {
if reconnect { if reconnect {
warn!( warn!("{} subscription exited. err={:?}", self, err);
rpc=%self,
?err,
"subscription exited",
);
self.retrying_reconnect(block_sender.as_ref(), true).await?; self.retrying_reconnect(block_sender.as_ref(), true).await?;
} else { } else {
error!(rpc=%self, ?err, "subscription exited"); error!("{} subscription exited. err={:?}", self, err);
return Err(err); return Err(err);
} }
} }
} }
} }
info!(rpc=%self, "all subscriptions complete"); info!("all subscriptions on {} completed", self);
Ok(()) Ok(())
} }
@ -563,7 +563,7 @@ impl Web3Connection {
block_sender: flume::Sender<BlockAndRpc>, block_sender: flume::Sender<BlockAndRpc>,
block_map: BlockHashesCache, block_map: BlockHashesCache,
) -> anyhow::Result<()> { ) -> anyhow::Result<()> {
info!(%self, "watching new heads"); info!("watching new heads on {}", self);
// TODO: is a RwLock of an Option<Arc> the right thing here? // TODO: is a RwLock of an Option<Arc> the right thing here?
if let Some(provider) = self.provider.read().await.clone() { if let Some(provider) = self.provider.read().await.clone() {
@ -587,7 +587,7 @@ impl Web3Connection {
.request( .request(
"eth_getBlockByNumber", "eth_getBlockByNumber",
&json!(("latest", false)), &json!(("latest", false)),
tracing::Level::ERROR.into(), Level::Error.into(),
) )
.await; .await;
@ -632,7 +632,7 @@ impl Web3Connection {
} }
} }
Err(err) => { Err(err) => {
warn!(?err, "Internal error on latest block from {}", self); warn!("Internal error on latest block from {}. {:?}", self, err);
self.send_head_block_result( self.send_head_block_result(
Ok(None), Ok(None),
@ -656,12 +656,12 @@ impl Web3Connection {
broadcast::error::RecvError::Lagged(lagged) => { broadcast::error::RecvError::Lagged(lagged) => {
// querying the block was delayed // querying the block was delayed
// this can happen if tokio is very busy or waiting for requests limits took too long // this can happen if tokio is very busy or waiting for requests limits took too long
warn!(?err, rpc=%self, "http interval lagging by {}!", lagged); warn!("http interval on {} lagging by {}!", self, lagged);
} }
} }
} }
trace!(rpc=%self, "ok http interval"); // // trace!(rpc=%self, "ok http interval");
} }
} }
Web3Provider::Ws(provider) => { Web3Provider::Ws(provider) => {
@ -682,7 +682,7 @@ impl Web3Connection {
.request( .request(
"eth_getBlockByNumber", "eth_getBlockByNumber",
&json!(("latest", false)), &json!(("latest", false)),
tracing::Level::ERROR.into(), Level::Error.into(),
) )
.await; .await;
@ -723,7 +723,7 @@ impl Web3Connection {
// TODO: is this always an error? // TODO: is this always an error?
// TODO: we probably don't want a warn and to return error // TODO: we probably don't want a warn and to return error
warn!(rpc=%self, "new_heads subscription ended"); warn!("new_heads subscription to {} ended", self);
return Err(anyhow::anyhow!("new_heads subscription ended")); return Err(anyhow::anyhow!("new_heads subscription ended"));
} }
} }
@ -737,7 +737,7 @@ impl Web3Connection {
authorization: Arc<Authorization>, authorization: Arc<Authorization>,
tx_id_sender: flume::Sender<(TxHash, Arc<Self>)>, tx_id_sender: flume::Sender<(TxHash, Arc<Self>)>,
) -> anyhow::Result<()> { ) -> anyhow::Result<()> {
info!(%self, "watching pending transactions"); info!( "watching pending transactions on {}", self);
// TODO: is a RwLock of an Option<Arc> the right thing here? // TODO: is a RwLock of an Option<Arc> the right thing here?
if let Some(provider) = self.provider.read().await.clone() { if let Some(provider) = self.provider.read().await.clone() {
@ -790,7 +790,7 @@ impl Web3Connection {
// TODO: is this always an error? // TODO: is this always an error?
// TODO: we probably don't want a warn and to return error // TODO: we probably don't want a warn and to return error
warn!(rpc=%self, "pending_transactions subscription ended"); warn!( "pending_transactions subscription ended on {}", self);
return Err(anyhow::anyhow!("pending_transactions subscription ended")); return Err(anyhow::anyhow!("pending_transactions subscription ended"));
} }
} }
@ -800,7 +800,7 @@ impl Web3Connection {
} }
/// be careful with this; it might wait forever! /// be careful with this; it might wait forever!
#[instrument]
pub async fn wait_for_request_handle( pub async fn wait_for_request_handle(
self: &Arc<Self>, self: &Arc<Self>,
authorization: &Arc<Authorization>, authorization: &Arc<Authorization>,
@ -811,13 +811,13 @@ impl Web3Connection {
loop { loop {
let x = self.try_request_handle(authorization).await; let x = self.try_request_handle(authorization).await;
trace!(?x, "try_request_handle"); // // trace!(?x, "try_request_handle");
match x { match x {
Ok(OpenRequestResult::Handle(handle)) => return Ok(handle), Ok(OpenRequestResult::Handle(handle)) => return Ok(handle),
Ok(OpenRequestResult::RetryAt(retry_at)) => { Ok(OpenRequestResult::RetryAt(retry_at)) => {
// TODO: emit a stat? // TODO: emit a stat?
trace!(?retry_at); // // trace!(?retry_at);
if retry_at > max_wait { if retry_at > max_wait {
// break now since we will wait past our maximum wait time // break now since we will wait past our maximum wait time
@ -836,7 +836,6 @@ impl Web3Connection {
} }
} }
#[instrument]
pub async fn try_request_handle( pub async fn try_request_handle(
self: &Arc<Self>, self: &Arc<Self>,
authorization: &Arc<Authorization>, authorization: &Arc<Authorization>,
@ -853,14 +852,14 @@ impl Web3Connection {
// TODO: how should we know if we should set expire or not? // TODO: how should we know if we should set expire or not?
match ratelimiter.throttle().await? { match ratelimiter.throttle().await? {
RedisRateLimitResult::Allowed(_) => { RedisRateLimitResult::Allowed(_) => {
trace!("rate limit succeeded") // // trace!("rate limit succeeded")
} }
RedisRateLimitResult::RetryAt(retry_at, _) => { RedisRateLimitResult::RetryAt(retry_at, _) => {
// rate limit failed // rate limit failed
// save the smallest retry_after. if nothing succeeds, return an Err with retry_after in it // save the smallest retry_after. if nothing succeeds, return an Err with retry_after in it
// TODO: use tracing better // TODO: use tracing better
// TODO: i'm seeing "Exhausted rate limit on moralis: 0ns". How is it getting 0? // TODO: i'm seeing "Exhausted rate limit on moralis: 0ns". How is it getting 0?
warn!(?retry_at, rpc=%self, "Exhausted rate limit"); warn!( "Exhausted rate limit on {}. Retry at {:?}", self, retry_at);
return Ok(OpenRequestResult::RetryAt(retry_at)); return Ok(OpenRequestResult::RetryAt(retry_at));
} }

View File

@ -19,6 +19,7 @@ use futures::future::{join_all, try_join_all};
use futures::stream::FuturesUnordered; use futures::stream::FuturesUnordered;
use futures::StreamExt; use futures::StreamExt;
use hashbrown::HashMap; use hashbrown::HashMap;
use log::{error, info, warn, Level};
use moka::future::{Cache, ConcurrentCacheExt}; use moka::future::{Cache, ConcurrentCacheExt};
use petgraph::graphmap::DiGraphMap; use petgraph::graphmap::DiGraphMap;
use sea_orm::DatabaseConnection; use sea_orm::DatabaseConnection;
@ -34,7 +35,6 @@ use tokio::sync::RwLock as AsyncRwLock;
use tokio::sync::{broadcast, watch}; use tokio::sync::{broadcast, watch};
use tokio::task; use tokio::task;
use tokio::time::{interval, sleep, sleep_until, Duration, Instant, MissedTickBehavior}; use tokio::time::{interval, sleep, sleep_until, Duration, Instant, MissedTickBehavior};
use tracing::{error, info, instrument, trace, warn};
/// A collection of web3 connections. Sends requests either the current best server or all servers. /// A collection of web3 connections. Sends requests either the current best server or all servers.
#[derive(From)] #[derive(From)]
@ -95,7 +95,7 @@ impl Web3Connections {
// TODO: every time a head_block arrives (with a small delay for known slow servers), or on the interval. // TODO: every time a head_block arrives (with a small delay for known slow servers), or on the interval.
interval.tick().await; interval.tick().await;
trace!("http interval ready"); // // trace!("http interval ready");
// errors are okay. they mean that all receivers have been dropped // errors are okay. they mean that all receivers have been dropped
let _ = sender.send(()); let _ = sender.send(());
@ -171,7 +171,7 @@ impl Web3Connections {
Ok(Err(err)) => { Ok(Err(err)) => {
// if we got an error here, it is not retryable // if we got an error here, it is not retryable
// TODO: include context about which connection failed // TODO: include context about which connection failed
error!(?err, "Unable to create connection"); error!("Unable to create connection. err={:?}", err);
} }
Err(err) => { Err(err) => {
return Err(err.into()); return Err(err.into());
@ -337,7 +337,7 @@ impl Web3Connections {
.into_iter() .into_iter()
.map(|active_request_handle| async move { .map(|active_request_handle| async move {
let result: Result<Box<RawValue>, _> = active_request_handle let result: Result<Box<RawValue>, _> = active_request_handle
.request(method, &json!(&params), tracing::Level::ERROR.into()) .request(method, &json!(&params), Level::Error.into())
.await; .await;
result result
}) })
@ -472,7 +472,7 @@ impl Web3Connections {
// increment our connection counter // increment our connection counter
match rpc.try_request_handle(authorization).await { match rpc.try_request_handle(authorization).await {
Ok(OpenRequestResult::Handle(handle)) => { Ok(OpenRequestResult::Handle(handle)) => {
trace!("next server on {:?}: {:?}", self, rpc); // // trace!("next server on {:?}: {:?}", self, rpc);
return Ok(OpenRequestResult::Handle(handle)); return Ok(OpenRequestResult::Handle(handle));
} }
Ok(OpenRequestResult::RetryAt(retry_at)) => { Ok(OpenRequestResult::RetryAt(retry_at)) => {
@ -483,7 +483,7 @@ impl Web3Connections {
} }
Err(err) => { Err(err) => {
// TODO: log a warning? // TODO: log a warning?
warn!(?err, "No request handle for {}", rpc) warn!("No request handle for {}. err={:?}", rpc, err)
} }
} }
} }
@ -539,7 +539,10 @@ impl Web3Connections {
warn!("no request handle for {}", connection) warn!("no request handle for {}", connection)
} }
Err(err) => { Err(err) => {
warn!(?err, "error getting request handle for {}", connection) warn!(
"error getting request handle for {}. err={:?}",
connection, err
)
} }
} }
} }
@ -602,7 +605,7 @@ impl Web3Connections {
) { ) {
Ok(response) => { Ok(response) => {
if let Some(error) = &response.error { if let Some(error) = &response.error {
trace!(?response, "rpc error"); // // trace!(?response, "rpc error");
if let Some(request_metadata) = request_metadata { if let Some(request_metadata) = request_metadata {
request_metadata request_metadata
@ -629,17 +632,20 @@ impl Web3Connections {
} }
} }
} else { } else {
trace!(?response, "rpc success"); // // trace!(?response, "rpc success");
} }
return Ok(response); return Ok(response);
} }
Err(e) => { Err(err) => {
let rpc = skip_rpcs let rpc = skip_rpcs
.last() .last()
.expect("there must have been a provider if we got an error"); .expect("there must have been a provider if we got an error");
warn!(%rpc, ?e, "Backend server error! Retrying on another"); warn!(
"Backend server error on {}! Retrying on another. err={:?}",
rpc, err
);
// TODO: sleep how long? until synced_connections changes or rate limits are available // TODO: sleep how long? until synced_connections changes or rate limits are available
// sleep(Duration::from_millis(100)).await; // sleep(Duration::from_millis(100)).await;
@ -652,7 +658,7 @@ impl Web3Connections {
// TODO: move this to a helper function // TODO: move this to a helper function
// sleep (TODO: with a lock?) until our rate limits should be available // sleep (TODO: with a lock?) until our rate limits should be available
// TODO: if a server catches up sync while we are waiting, we could stop waiting // TODO: if a server catches up sync while we are waiting, we could stop waiting
warn!(?retry_at, "All rate limits exceeded. Sleeping"); warn!("All rate limits exceeded. Sleeping untill {:?}", retry_at);
// TODO: have a separate column for rate limited? // TODO: have a separate column for rate limited?
if let Some(request_metadata) = request_metadata { if let Some(request_metadata) = request_metadata {
@ -664,7 +670,7 @@ impl Web3Connections {
continue; continue;
} }
OpenRequestResult::RetryNever => { OpenRequestResult::RetryNever => {
warn!(?self, "No server handles!"); warn!("No server handles! {:?}", self);
if let Some(request_metadata) = request_metadata { if let Some(request_metadata) = request_metadata {
request_metadata.no_servers.fetch_add(1, Ordering::Release); request_metadata.no_servers.fetch_add(1, Ordering::Release);
@ -690,7 +696,7 @@ impl Web3Connections {
} }
/// be sure there is a timeout on this or it might loop forever /// be sure there is a timeout on this or it might loop forever
#[instrument]
pub async fn try_send_all_upstream_servers( pub async fn try_send_all_upstream_servers(
&self, &self,
authorization: &Arc<Authorization>, authorization: &Arc<Authorization>,
@ -729,7 +735,7 @@ impl Web3Connections {
return Ok(response); return Ok(response);
} }
Err(None) => { Err(None) => {
warn!(?self, "No servers in sync! Retrying"); warn!("No servers in sync on {:?}! Retrying", self);
if let Some(request_metadata) = &request_metadata { if let Some(request_metadata) = &request_metadata {
request_metadata.no_servers.fetch_add(1, Ordering::Release); request_metadata.no_servers.fetch_add(1, Ordering::Release);

View File

@ -1,7 +1,6 @@
use anyhow::Context; use anyhow::Context;
use derive_more::From; use derive_more::From;
use std::time::Duration; use std::time::Duration;
use tracing::{info_span, instrument, Instrument};
/// Use HTTP and WS providers. /// Use HTTP and WS providers.
// TODO: instead of an enum, I tried to use Box<dyn Provider>, but hit <https://github.com/gakonst/ethers-rs/issues/592> // TODO: instead of an enum, I tried to use Box<dyn Provider>, but hit <https://github.com/gakonst/ethers-rs/issues/592>
@ -20,7 +19,6 @@ impl Web3Provider {
} }
} }
#[instrument]
pub async fn from_str( pub async fn from_str(
url_str: &str, url_str: &str,
http_client: Option<reqwest::Client>, http_client: Option<reqwest::Client>,
@ -38,10 +36,7 @@ impl Web3Provider {
.interval(Duration::from_secs(13)) .interval(Duration::from_secs(13))
.into() .into()
} else if url_str.starts_with("ws") { } else if url_str.starts_with("ws") {
// TODO: i dont think this instrument does much of anything. what level should it be? let provider = ethers::providers::Ws::connect(url_str).await?;
let provider = ethers::providers::Ws::connect(url_str)
.instrument(info_span!("Web3Provider", %url_str))
.await?;
// TODO: dry this up (needs https://github.com/gakonst/ethers-rs/issues/592) // TODO: dry this up (needs https://github.com/gakonst/ethers-rs/issues/592)
// TODO: i don't think this interval matters // TODO: i don't think this interval matters

View File

@ -8,6 +8,7 @@ use entities::revert_log;
use entities::sea_orm_active_enums::Method; use entities::sea_orm_active_enums::Method;
use ethers::providers::{HttpClientError, ProviderError, WsClientError}; use ethers::providers::{HttpClientError, ProviderError, WsClientError};
use ethers::types::{Address, Bytes}; use ethers::types::{Address, Bytes};
use log::{debug, error, warn, Level};
use metered::metered; use metered::metered;
use metered::HitCount; use metered::HitCount;
use metered::ResponseTime; use metered::ResponseTime;
@ -20,8 +21,6 @@ use std::sync::atomic::{self, AtomicBool, Ordering};
use std::sync::Arc; use std::sync::Arc;
use thread_fast_rng::rand::Rng; use thread_fast_rng::rand::Rng;
use tokio::time::{sleep, Duration, Instant}; use tokio::time::{sleep, Duration, Instant};
use tracing::Level;
use tracing::{debug, error, trace, warn};
#[derive(Debug)] #[derive(Debug)]
pub enum OpenRequestResult { pub enum OpenRequestResult {
@ -67,9 +66,9 @@ struct EthCallFirstParams {
impl From<Level> for RequestErrorHandler { impl From<Level> for RequestErrorHandler {
fn from(level: Level) -> Self { fn from(level: Level) -> Self {
match level { match level {
Level::DEBUG => RequestErrorHandler::DebugLevel, Level::Debug => RequestErrorHandler::DebugLevel,
Level::ERROR => RequestErrorHandler::ErrorLevel, Level::Error => RequestErrorHandler::ErrorLevel,
Level::WARN => RequestErrorHandler::WarnLevel, Level::Warn => RequestErrorHandler::WarnLevel,
_ => unimplemented!("unexpected tracing Level"), _ => unimplemented!("unexpected tracing Level"),
} }
} }
@ -85,7 +84,7 @@ impl Authorization {
let rpc_key_id = match self.checks.rpc_key_id { let rpc_key_id = match self.checks.rpc_key_id {
Some(rpc_key_id) => rpc_key_id.into(), Some(rpc_key_id) => rpc_key_id.into(),
None => { None => {
trace!(?self, "cannot save revert without rpc_key_id"); // // trace!(?self, "cannot save revert without rpc_key_id");
return Ok(()); return Ok(());
} }
}; };
@ -119,7 +118,7 @@ impl Authorization {
// TODO: what log level? // TODO: what log level?
// TODO: better format // TODO: better format
trace!(?rl); // trace!(?rl);
// TODO: return something useful // TODO: return something useful
Ok(()) Ok(())
@ -181,14 +180,14 @@ impl OpenRequestHandle {
// TODO: requests from customers have request ids, but we should add // TODO: requests from customers have request ids, but we should add
// TODO: including params in this is way too verbose // TODO: including params in this is way too verbose
// the authorization field is already on a parent span // the authorization field is already on a parent span
trace!(rpc=%self.conn, %method, "request"); // trace!(rpc=%self.conn, %method, "request");
let mut provider = None; let mut provider = None;
while provider.is_none() { while provider.is_none() {
match self.conn.provider.read().await.clone() { match self.conn.provider.read().await.clone() {
None => { None => {
warn!(rpc=%self.conn, "no provider!"); warn!("no provider for {}!", self.conn);
// TODO: how should this work? a reconnect should be in progress. but maybe force one now? // TODO: how should this work? a reconnect should be in progress. but maybe force one now?
// TODO: sleep how long? subscribe to something instead? maybe use a watch handle? // TODO: sleep how long? subscribe to something instead? maybe use a watch handle?
// TODO: this is going to be way too verbose! // TODO: this is going to be way too verbose!
@ -211,29 +210,29 @@ impl OpenRequestHandle {
// TODO: do something special for eth_sendRawTransaction too // TODO: do something special for eth_sendRawTransaction too
let error_handler = if let RequestErrorHandler::SaveReverts = error_handler { let error_handler = if let RequestErrorHandler::SaveReverts = error_handler {
if !["eth_call", "eth_estimateGas"].contains(&method) { if !["eth_call", "eth_estimateGas"].contains(&method) {
trace!(%method, "skipping save on revert"); // trace!(%method, "skipping save on revert");
RequestErrorHandler::DebugLevel RequestErrorHandler::DebugLevel
} else if self.authorization.db_conn.is_some() { } else if self.authorization.db_conn.is_some() {
let log_revert_chance = self.authorization.checks.log_revert_chance; let log_revert_chance = self.authorization.checks.log_revert_chance;
if log_revert_chance == 0.0 { if log_revert_chance == 0.0 {
trace!(%method, "no chance. skipping save on revert"); // trace!(%method, "no chance. skipping save on revert");
RequestErrorHandler::DebugLevel RequestErrorHandler::DebugLevel
} else if log_revert_chance == 1.0 { } else if log_revert_chance == 1.0 {
trace!(%method, "gaurenteed chance. SAVING on revert"); // trace!(%method, "gaurenteed chance. SAVING on revert");
error_handler error_handler
} else if thread_fast_rng::thread_fast_rng().gen_range(0.0f64..=1.0) } else if thread_fast_rng::thread_fast_rng().gen_range(0.0f64..=1.0)
< log_revert_chance < log_revert_chance
{ {
trace!(%method, "missed chance. skipping save on revert"); // trace!(%method, "missed chance. skipping save on revert");
RequestErrorHandler::DebugLevel RequestErrorHandler::DebugLevel
} else { } else {
trace!("Saving on revert"); // trace!("Saving on revert");
// TODO: is always logging at debug level fine? // TODO: is always logging at debug level fine?
error_handler error_handler
} }
} else { } else {
trace!(%method, "no database. skipping save on revert"); // trace!(%method, "no database. skipping save on revert");
RequestErrorHandler::DebugLevel RequestErrorHandler::DebugLevel
} }
} else { } else {
@ -277,14 +276,14 @@ impl OpenRequestHandle {
RequestErrorHandler::DebugLevel => { RequestErrorHandler::DebugLevel => {
// TODO: think about this revert check more. sometimes we might want reverts logged so this needs a flag // TODO: think about this revert check more. sometimes we might want reverts logged so this needs a flag
if !is_revert { if !is_revert {
debug!(?err, %method, ?params, rpc=%self.conn, "bad response!"); debug!("bad response from {}! method={} params={:?} err={:?}", self.conn, method, params, err);
} }
} }
RequestErrorHandler::ErrorLevel => { RequestErrorHandler::ErrorLevel => {
error!(?err, %method, ?params, rpc=%self.conn, "bad response!"); error!("bad response from {}! method={} params={:?} err={:?}", self.conn, method, params, err);
} }
RequestErrorHandler::WarnLevel => { RequestErrorHandler::WarnLevel => {
warn!(?err, %method, ?params, rpc=%self.conn, "bad response!"); warn!("bad response from {}! method={} params={:?} err={:?}", self.conn, method, params, err);
} }
RequestErrorHandler::SaveReverts => { RequestErrorHandler::SaveReverts => {
// TODO: do not unwrap! (doesn't matter much since we check method as a string above) // TODO: do not unwrap! (doesn't matter much since we check method as a string above)
@ -304,8 +303,8 @@ impl OpenRequestHandle {
} else { } else {
// TODO: i think ethers already has trace logging (and does it much more fancy) // TODO: i think ethers already has trace logging (and does it much more fancy)
// TODO: opt-in response inspection to log reverts with their request. put into redis or what? // TODO: opt-in response inspection to log reverts with their request. put into redis or what?
// trace!(rpc=%self.conn, %method, ?response); // // trace!(rpc=%self.conn, %method, ?response);
trace!(%method, rpc=%self.conn, "response"); // trace!(%method, rpc=%self.conn, "response");
} }
response response

View File

@ -5,9 +5,9 @@ use super::connection::Web3Connection;
use super::connections::Web3Connections; use super::connections::Web3Connections;
use super::request::OpenRequestResult; use super::request::OpenRequestResult;
use ethers::prelude::{ProviderError, Transaction, TxHash}; use ethers::prelude::{ProviderError, Transaction, TxHash};
use log::{debug, Level};
use std::sync::Arc; use std::sync::Arc;
use tokio::sync::broadcast; use tokio::sync::broadcast;
use tracing::{debug, trace, Level};
// TODO: think more about TxState // TODO: think more about TxState
#[derive(Clone)] #[derive(Clone)]
@ -34,7 +34,7 @@ impl Web3Connections {
.request( .request(
"eth_getTransactionByHash", "eth_getTransactionByHash",
&(pending_tx_id,), &(pending_tx_id,),
Level::ERROR.into(), Level::Error.into(),
) )
.await? .await?
} }
@ -43,12 +43,12 @@ impl Web3Connections {
return Ok(None); return Ok(None);
} }
Err(err) => { Err(err) => {
trace!( // trace!(
?pending_tx_id, // ?pending_tx_id,
?rpc, // ?rpc,
?err, // ?err,
"cancelled funneling transaction" // "cancelled funneling transaction"
); // );
return Ok(None); return Ok(None);
} }
}; };
@ -79,7 +79,7 @@ impl Web3Connections {
return Ok(()); return Ok(());
} }
trace!(?pending_tx_id, "checking pending_transactions on {}", rpc); // trace!(?pending_tx_id, "checking pending_transactions on {}", rpc);
if self.pending_transactions.contains_key(&pending_tx_id) { if self.pending_transactions.contains_key(&pending_tx_id) {
// this transaction has already been processed // this transaction has already been processed
return Ok(()); return Ok(());
@ -94,14 +94,14 @@ impl Web3Connections {
Ok(Some(tx_state)) => { Ok(Some(tx_state)) => {
let _ = pending_tx_sender.send(tx_state); let _ = pending_tx_sender.send(tx_state);
trace!(?pending_tx_id, "sent"); // trace!(?pending_tx_id, "sent");
// we sent the transaction. return now. don't break looping because that gives a warning // we sent the transaction. return now. don't break looping because that gives a warning
return Ok(()); return Ok(());
} }
Ok(None) => {} Ok(None) => {}
Err(err) => { Err(err) => {
trace!(?err, ?pending_tx_id, "failed fetching transaction"); // trace!(?err, ?pending_tx_id, "failed fetching transaction");
// unable to update the entry. sleep and try again soon // unable to update the entry. sleep and try again soon
// TODO: retry with exponential backoff with jitter starting from a much smaller time // TODO: retry with exponential backoff with jitter starting from a much smaller time
// sleep(Duration::from_millis(100)).await; // sleep(Duration::from_millis(100)).await;
@ -112,7 +112,7 @@ impl Web3Connections {
// "There is a Pending txn with a lower account nonce. This txn can only be executed after confirmation of the earlier Txn Hash#" // "There is a Pending txn with a lower account nonce. This txn can only be executed after confirmation of the earlier Txn Hash#"
// sometimes it's been pending for many hours // sometimes it's been pending for many hours
// sometimes it's maybe something else? // sometimes it's maybe something else?
debug!(?pending_tx_id, "not found on {}", rpc); debug!("txid {} not found on {}", pending_tx_id, rpc);
Ok(()) Ok(())
} }
} }

View File

@ -9,16 +9,15 @@ use chrono::NaiveDateTime;
use entities::{rpc_accounting, rpc_key}; use entities::{rpc_accounting, rpc_key};
use hashbrown::HashMap; use hashbrown::HashMap;
use http::StatusCode; use http::StatusCode;
use log::warn;
use migration::{Condition, Expr, SimpleExpr}; use migration::{Condition, Expr, SimpleExpr};
use redis_rate_limiter::{redis::AsyncCommands, RedisConnection}; use redis_rate_limiter::{redis::AsyncCommands, RedisConnection};
use sea_orm::{ use sea_orm::{
ColumnTrait, EntityTrait, PaginatorTrait, QueryFilter, QueryOrder, QuerySelect, Select, ColumnTrait, EntityTrait, PaginatorTrait, QueryFilter, QueryOrder, QuerySelect, Select,
}; };
use tracing::{instrument, warn};
/// get the attached address from redis for the given auth_token. /// get the attached address from redis for the given auth_token.
/// 0 means all users /// 0 means all users
#[instrument(level = "trace", skip(redis_conn))]
pub async fn get_user_id_from_params( pub async fn get_user_id_from_params(
mut redis_conn: RedisConnection, mut redis_conn: RedisConnection,
// this is a long type. should we strip it down? // this is a long type. should we strip it down?
@ -68,7 +67,7 @@ pub async fn get_user_id_from_params(
/// only allow rpc_key to be set if user_id is also set. /// only allow rpc_key to be set if user_id is also set.
/// this will keep people from reading someone else's keys. /// this will keep people from reading someone else's keys.
/// 0 means none. /// 0 means none.
#[instrument(level = "trace")]
pub fn get_rpc_key_id_from_params( pub fn get_rpc_key_id_from_params(
user_id: u64, user_id: u64,
params: &HashMap<String, String>, params: &HashMap<String, String>,
@ -87,7 +86,6 @@ pub fn get_rpc_key_id_from_params(
} }
} }
#[instrument(level = "trace")]
pub fn get_chain_id_from_params( pub fn get_chain_id_from_params(
app: &Web3ProxyApp, app: &Web3ProxyApp,
params: &HashMap<String, String>, params: &HashMap<String, String>,
@ -102,7 +100,6 @@ pub fn get_chain_id_from_params(
) )
} }
#[instrument(level = "trace")]
pub fn get_query_start_from_params( pub fn get_query_start_from_params(
params: &HashMap<String, String>, params: &HashMap<String, String>,
) -> anyhow::Result<chrono::NaiveDateTime> { ) -> anyhow::Result<chrono::NaiveDateTime> {
@ -126,7 +123,6 @@ pub fn get_query_start_from_params(
) )
} }
#[instrument(level = "trace")]
pub fn get_page_from_params(params: &HashMap<String, String>) -> anyhow::Result<u64> { pub fn get_page_from_params(params: &HashMap<String, String>) -> anyhow::Result<u64> {
params.get("page").map_or_else::<anyhow::Result<u64>, _, _>( params.get("page").map_or_else::<anyhow::Result<u64>, _, _>(
|| { || {
@ -143,7 +139,6 @@ pub fn get_page_from_params(params: &HashMap<String, String>) -> anyhow::Result<
) )
} }
#[instrument(level = "trace")]
pub fn get_query_window_seconds_from_params( pub fn get_query_window_seconds_from_params(
params: &HashMap<String, String>, params: &HashMap<String, String>,
) -> Result<u64, FrontendErrorResponse> { ) -> Result<u64, FrontendErrorResponse> {