Suprisingly large refactor to get ids everywhere (#222)
* cargo upgrade --incompatible and update * first draft at suprisingly_large_refactor_to_get_ids_everywhere * put app in a task_local * ref cleanup * use a OnceLock instead of a tokio local * test more methods * APP isn't set in all tests * it compiles. tests fail. todos still open * use the app only when necessary * more tests. less panic * less verbose debug impl * short enum names * move kafka and secrets to their own files * main tests pass * add debug chain block time * helper for stats that ignores internal stats * Update Jenkinsfile (#223) * more tests --------- Co-authored-by: Pewxz <124064710+pewxz@users.noreply.github.com>
This commit is contained in:
parent
abe516c21e
commit
e917a11d6c
230
Cargo.lock
generated
230
Cargo.lock
generated
@ -99,9 +99,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "anstream"
|
||||
version = "0.5.0"
|
||||
version = "0.6.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b1f58811cfac344940f1a400b6e6231ce35171f614f26439e80f8c1465c5cc0c"
|
||||
checksum = "2ab91ebe16eb252986481c5b62f6098f3b698a45e34b5b98200cf20dd2484a44"
|
||||
dependencies = [
|
||||
"anstyle",
|
||||
"anstyle-parse",
|
||||
@ -113,15 +113,15 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "anstyle"
|
||||
version = "1.0.3"
|
||||
version = "1.0.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b84bf0a05bbb2a83e5eb6fa36bb6e87baa08193c35ff52bbf6b38d8af2890e46"
|
||||
checksum = "7079075b41f533b8c61d2a4d073c4676e1f8b249ff94a393b0595db304e0dd87"
|
||||
|
||||
[[package]]
|
||||
name = "anstyle-parse"
|
||||
version = "0.2.1"
|
||||
version = "0.2.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "938874ff5980b03a87c5524b3ae5b59cf99b1d6bc836848df7bc5ada9643c333"
|
||||
checksum = "317b9a89c1868f5ea6ff1d9539a69f45dffc21ce321ac1fd1160dfa48c8e2140"
|
||||
dependencies = [
|
||||
"utf8parse",
|
||||
]
|
||||
@ -137,9 +137,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "anstyle-wincon"
|
||||
version = "2.1.0"
|
||||
version = "3.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "58f54d10c6dfa51283a066ceab3ec1ab78d13fae00aa49243a45e4571fb79dfd"
|
||||
checksum = "f0699d10d2f4d628a98ee7b57b289abbc98ff3bad977cb3152709d4bf2330628"
|
||||
dependencies = [
|
||||
"anstyle",
|
||||
"windows-sys",
|
||||
@ -235,6 +235,17 @@ dependencies = [
|
||||
"event-listener",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "async-recursion"
|
||||
version = "1.0.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5fd55a5ba1179988837d24ab4c7cc8ed6efdeff578ede0416b4225a5fca35bd0"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.37",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "async-stream"
|
||||
version = "0.3.5"
|
||||
@ -259,9 +270,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "async-stripe"
|
||||
version = "0.23.0"
|
||||
version = "0.25.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b257177a9dd10350033af6d2602fb5164b4c7168c7b11f4ae8d287178df38996"
|
||||
checksum = "87dd8d77f5bfefa28601194c7233e7c3dc6a9833dae6c990804a2d90a95d6354"
|
||||
dependencies = [
|
||||
"chrono",
|
||||
"futures-util",
|
||||
@ -626,9 +637,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "bytecount"
|
||||
version = "0.6.3"
|
||||
version = "0.6.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2c676a478f63e9fa2dd5368a42f28bba0d6c560b775f38583c8bbaa7fcd67c9c"
|
||||
checksum = "ad152d03a2c813c80bb94fedbf3a3f02b28f793e39e7c214c8a0bcc196343de7"
|
||||
|
||||
[[package]]
|
||||
name = "byteorder"
|
||||
@ -777,9 +788,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "clap"
|
||||
version = "4.4.5"
|
||||
version = "4.4.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "824956d0dca8334758a5b7f7e50518d66ea319330cbceedcf76905c2f6ab30e3"
|
||||
checksum = "d04704f56c2cde07f43e8e2c154b43f216dc5c92fc98ada720177362f953b956"
|
||||
dependencies = [
|
||||
"clap_builder",
|
||||
"clap_derive 4.4.2",
|
||||
@ -787,9 +798,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "clap_builder"
|
||||
version = "4.4.5"
|
||||
version = "4.4.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "122ec64120a49b4563ccaedcbea7818d069ed8e9aa6d829b82d8a4128936b2ab"
|
||||
checksum = "0e231faeaca65ebd1ea3c737966bf858971cd38c3849107aa3ea7de90a804e45"
|
||||
dependencies = [
|
||||
"anstream",
|
||||
"anstyle",
|
||||
@ -966,10 +977,11 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "console-api"
|
||||
version = "0.5.0"
|
||||
version = "0.6.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c2895653b4d9f1538a83970077cb01dfc77a4810524e51a110944688e916b18e"
|
||||
checksum = "fd326812b3fd01da5bb1af7d340d0d555fd3d4b641e7f1dfcf5962a902952787"
|
||||
dependencies = [
|
||||
"futures-core",
|
||||
"prost",
|
||||
"prost-types",
|
||||
"tonic",
|
||||
@ -978,14 +990,14 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "console-subscriber"
|
||||
version = "0.1.10"
|
||||
version = "0.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d4cf42660ac07fcebed809cfe561dd8730bcd35b075215e6479c516bcd0d11cb"
|
||||
checksum = "7481d4c57092cd1c19dd541b92bdce883de840df30aa5d03fd48a3935c01842e"
|
||||
dependencies = [
|
||||
"console-api",
|
||||
"crossbeam-channel",
|
||||
"crossbeam-utils",
|
||||
"futures",
|
||||
"futures-task",
|
||||
"hdrhistogram",
|
||||
"humantime",
|
||||
"parking_lot",
|
||||
@ -1296,7 +1308,7 @@ name = "deferred-rate-limiter"
|
||||
version = "0.2.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"hashbrown 0.14.0",
|
||||
"hashbrown 0.14.1",
|
||||
"log",
|
||||
"moka",
|
||||
"redis-rate-limiter",
|
||||
@ -1561,9 +1573,9 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5"
|
||||
|
||||
[[package]]
|
||||
name = "errno"
|
||||
version = "0.3.3"
|
||||
version = "0.3.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "136526188508e25c6fef639d7927dfb3e0e3084488bf202267829cf7fc23dbdd"
|
||||
checksum = "add4f07d43996f76ef320709726a556a9d4f965d9410d8d0271132d2f8293480"
|
||||
dependencies = [
|
||||
"errno-dragonfly",
|
||||
"libc",
|
||||
@ -2010,13 +2022,12 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "flume"
|
||||
version = "0.10.14"
|
||||
version = "0.11.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1657b4441c3403d9f7b3409e47575237dac27b1b5726df654a6ecbf92f0f7577"
|
||||
checksum = "55ac459de2512911e4b674ce33cf20befaba382d05b62b008afc1c8b57cbf181"
|
||||
dependencies = [
|
||||
"futures-core",
|
||||
"futures-sink",
|
||||
"pin-project",
|
||||
"spin 0.9.8",
|
||||
]
|
||||
|
||||
@ -2363,9 +2374,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "hashbrown"
|
||||
version = "0.14.0"
|
||||
version = "0.14.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a"
|
||||
checksum = "7dfda62a12f55daeae5015f81b0baea145391cb4520f86c248fc615d72640d12"
|
||||
dependencies = [
|
||||
"ahash 0.8.3",
|
||||
"allocator-api2",
|
||||
@ -2387,7 +2398,7 @@ version = "0.8.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7"
|
||||
dependencies = [
|
||||
"hashbrown 0.14.0",
|
||||
"hashbrown 0.14.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -2717,12 +2728,12 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "indexmap"
|
||||
version = "2.0.0"
|
||||
version = "2.0.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d5477fe2230a79769d8dc68e0eabf5437907c0457a5614a9e8dddb67f65eb65d"
|
||||
checksum = "8adf3ddd720272c6ea8bf59463c04e0f93d0bbf7c5439b691bca2987e0270897"
|
||||
dependencies = [
|
||||
"equivalent",
|
||||
"hashbrown 0.14.0",
|
||||
"hashbrown 0.14.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -3010,9 +3021,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "linux-raw-sys"
|
||||
version = "0.4.7"
|
||||
version = "0.4.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1a9bad9f94746442c783ca431b22403b519cd7fbeed0533fdd6328b2f2212128"
|
||||
checksum = "3852614a3bd9ca9804678ba6be5e3b8ce76dfc902cae004e3e0c44051b6e88db"
|
||||
|
||||
[[package]]
|
||||
name = "listenfd"
|
||||
@ -3047,7 +3058,7 @@ version = "0.11.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a4a83fb7698b3643a0e34f9ae6f2e8f0178c0fd42f8b59d493aa271ff3a5bf21"
|
||||
dependencies = [
|
||||
"hashbrown 0.14.0",
|
||||
"hashbrown 0.14.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -3092,9 +3103,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "memchr"
|
||||
version = "2.6.3"
|
||||
version = "2.6.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8f232d6ef707e1956a43342693d2a31e72989554d58299d7a88738cc95b0d35c"
|
||||
checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167"
|
||||
|
||||
[[package]]
|
||||
name = "memoffset"
|
||||
@ -3711,7 +3722,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e1d3afd2628e69da2be385eb6f2fd57c8ac7977ceeff6dc166ff1657b0e386a9"
|
||||
dependencies = [
|
||||
"fixedbitset",
|
||||
"indexmap 2.0.0",
|
||||
"indexmap 2.0.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -3950,9 +3961,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "prost"
|
||||
version = "0.11.9"
|
||||
version = "0.12.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0b82eaa1d779e9a4bc1c3217db8ffbeabaae1dca241bf70183242128d48681cd"
|
||||
checksum = "f4fdd22f3b9c31b53c060df4a0613a1c7f062d4115a2b984dd15b1858f7e340d"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"prost-derive",
|
||||
@ -3960,22 +3971,22 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "prost-derive"
|
||||
version = "0.11.9"
|
||||
version = "0.12.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4"
|
||||
checksum = "265baba7fabd416cf5078179f7d2cbeca4ce7a9041111900675ea7c4cb8a4c32"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"itertools 0.10.5",
|
||||
"itertools 0.11.0",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 1.0.109",
|
||||
"syn 2.0.37",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "prost-types"
|
||||
version = "0.11.9"
|
||||
version = "0.12.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "213622a1460818959ac1181aaeb2dc9c7f63df720db7d788b3e24eacd1983e13"
|
||||
checksum = "e081b29f63d83a4bc75cfc9f3fe424f9156cf92d8a4f0c9407cce9a1b67327cf"
|
||||
dependencies = [
|
||||
"prost",
|
||||
]
|
||||
@ -4258,13 +4269,13 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "regex"
|
||||
version = "1.9.5"
|
||||
version = "1.9.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "697061221ea1b4a94a624f67d0ae2bfe4e22b8a17b6a192afb11046542cc8c47"
|
||||
checksum = "ebee201405406dbf528b8b672104ae6d6d63e6d118cb10e4d51abbc7b58044ff"
|
||||
dependencies = [
|
||||
"aho-corasick",
|
||||
"memchr",
|
||||
"regex-automata 0.3.8",
|
||||
"regex-automata 0.3.9",
|
||||
"regex-syntax 0.7.5",
|
||||
]
|
||||
|
||||
@ -4279,9 +4290,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "regex-automata"
|
||||
version = "0.3.8"
|
||||
version = "0.3.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c2f401f4955220693b56f8ec66ee9c78abffd8d1c4f23dc41a23839eb88f0795"
|
||||
checksum = "59b23e92ee4318893fa3fe3e6fb365258efbfe6ac6ab30f090cdcbb7aa37efa9"
|
||||
dependencies = [
|
||||
"aho-corasick",
|
||||
"memchr",
|
||||
@ -4526,9 +4537,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "rustix"
|
||||
version = "0.38.14"
|
||||
version = "0.38.15"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "747c788e9ce8e92b12cd485c49ddf90723550b654b32508f979b71a7b1ecda4f"
|
||||
checksum = "d2f9da0cbd88f9f09e7814e388301c8414c51c62aa6ce1e4b5c551d49d96e531"
|
||||
dependencies = [
|
||||
"bitflags 2.4.0",
|
||||
"errno",
|
||||
@ -4545,7 +4556,7 @@ checksum = "cd8d6c9f025a446bc4d18ad9632e69aec8f287aa84499ee335599fabd20c3fd8"
|
||||
dependencies = [
|
||||
"log",
|
||||
"ring",
|
||||
"rustls-webpki 0.101.6",
|
||||
"rustls-webpki",
|
||||
"sct",
|
||||
]
|
||||
|
||||
@ -4570,16 +4581,6 @@ dependencies = [
|
||||
"base64 0.21.4",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rustls-webpki"
|
||||
version = "0.100.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5f6a5fc258f1c1276dfe3016516945546e2d5383911efc0fc4f1cdc5df3a4ae3"
|
||||
dependencies = [
|
||||
"ring",
|
||||
"untrusted",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rustls-webpki"
|
||||
version = "0.101.6"
|
||||
@ -4729,7 +4730,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6bef60732e6016c5643350c87f43a697e8c074e41e4e2a9d961c056cb1310915"
|
||||
dependencies = [
|
||||
"chrono",
|
||||
"clap 4.4.5",
|
||||
"clap 4.4.6",
|
||||
"dotenvy",
|
||||
"glob",
|
||||
"regex",
|
||||
@ -4760,7 +4761,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7e53b6ddaf6dbb84e5dfc3fb78634ed0a4d6d64e7479500ab2585db239747031"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"clap 4.4.5",
|
||||
"clap 4.4.6",
|
||||
"dotenvy",
|
||||
"futures",
|
||||
"sea-orm",
|
||||
@ -5186,9 +5187,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "sharded-slab"
|
||||
version = "0.1.4"
|
||||
version = "0.1.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "900fba806f70c630b0a382d0d825e17a0f19fcd059a2ade1ff237bcddf446b31"
|
||||
checksum = "c1b21f559e07218024e7e9f90f96f601825397de0e25420135f7f952453fed0b"
|
||||
dependencies = [
|
||||
"lazy_static",
|
||||
]
|
||||
@ -5418,9 +5419,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "sqlx"
|
||||
version = "0.7.1"
|
||||
version = "0.7.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8e58421b6bc416714d5115a2ca953718f6c621a51b68e4f4922aea5a4391a721"
|
||||
checksum = "0e50c216e3624ec8e7ecd14c6a6a6370aad6ee5d8cfc3ab30b5162eeeef2ed33"
|
||||
dependencies = [
|
||||
"sqlx-core",
|
||||
"sqlx-macros",
|
||||
@ -5431,9 +5432,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "sqlx-core"
|
||||
version = "0.7.1"
|
||||
version = "0.7.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dd4cef4251aabbae751a3710927945901ee1d97ee96d757f6880ebb9a79bfd53"
|
||||
checksum = "8d6753e460c998bbd4cd8c6f0ed9a64346fcca0723d6e75e52fdc351c5d2169d"
|
||||
dependencies = [
|
||||
"ahash 0.8.3",
|
||||
"atoi",
|
||||
@ -5453,7 +5454,7 @@ dependencies = [
|
||||
"futures-util",
|
||||
"hashlink",
|
||||
"hex",
|
||||
"indexmap 2.0.0",
|
||||
"indexmap 2.0.2",
|
||||
"log",
|
||||
"memchr",
|
||||
"once_cell",
|
||||
@ -5479,9 +5480,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "sqlx-macros"
|
||||
version = "0.7.1"
|
||||
version = "0.7.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "208e3165167afd7f3881b16c1ef3f2af69fa75980897aac8874a0696516d12c2"
|
||||
checksum = "9a793bb3ba331ec8359c1853bd39eed32cdd7baaf22c35ccf5c92a7e8d1189ec"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@ -5492,9 +5493,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "sqlx-macros-core"
|
||||
version = "0.7.1"
|
||||
version = "0.7.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8a4a8336d278c62231d87f24e8a7a74898156e34c1c18942857be2acb29c7dfc"
|
||||
checksum = "0a4ee1e104e00dedb6aa5ffdd1343107b0a4702e862a84320ee7cc74782d96fc"
|
||||
dependencies = [
|
||||
"dotenvy",
|
||||
"either",
|
||||
@ -5518,9 +5519,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "sqlx-mysql"
|
||||
version = "0.7.1"
|
||||
version = "0.7.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8ca69bf415b93b60b80dc8fda3cb4ef52b2336614d8da2de5456cc942a110482"
|
||||
checksum = "864b869fdf56263f4c95c45483191ea0af340f9f3e3e7b4d57a61c7c87a970db"
|
||||
dependencies = [
|
||||
"atoi",
|
||||
"base64 0.21.4",
|
||||
@ -5565,9 +5566,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "sqlx-postgres"
|
||||
version = "0.7.1"
|
||||
version = "0.7.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a0db2df1b8731c3651e204629dd55e52adbae0462fa1bdcbed56a2302c18181e"
|
||||
checksum = "eb7ae0e6a97fb3ba33b23ac2671a5ce6e3cabe003f451abd5a56e7951d975624"
|
||||
dependencies = [
|
||||
"atoi",
|
||||
"base64 0.21.4",
|
||||
@ -5610,9 +5611,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "sqlx-sqlite"
|
||||
version = "0.7.1"
|
||||
version = "0.7.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "be4c21bf34c7cae5b283efb3ac1bcc7670df7561124dc2f8bdc0b59be40f79a2"
|
||||
checksum = "d59dc83cf45d89c555a577694534fcd1b55c545a816c816ce51f20bbe56a4f3f"
|
||||
dependencies = [
|
||||
"atoi",
|
||||
"chrono",
|
||||
@ -5815,18 +5816,18 @@ checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d"
|
||||
|
||||
[[package]]
|
||||
name = "thiserror"
|
||||
version = "1.0.48"
|
||||
version = "1.0.49"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9d6d7a740b8a666a7e828dd00da9c0dc290dff53154ea77ac109281de90589b7"
|
||||
checksum = "1177e8c6d7ede7afde3585fd2513e611227efd6481bd78d2e82ba1ce16557ed4"
|
||||
dependencies = [
|
||||
"thiserror-impl",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "thiserror-impl"
|
||||
version = "1.0.48"
|
||||
version = "1.0.49"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "49922ecae66cc8a249b77e68d1d0623c1b2c514f0060c27cdc68bd62a1219d35"
|
||||
checksum = "10712f02019e9288794769fba95cd6847df9874d49d871d062172f9dd41bc4cc"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@ -5928,9 +5929,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tokio-console"
|
||||
version = "0.1.9"
|
||||
version = "0.1.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4d8d44c50f1b17838c6044119701900e4242dbc0e8a3792f6fbf512b489b3dbf"
|
||||
checksum = "d5ff40e8df801b383b8666967ec4aee8dc516f376d06d0e5a9f93f310763e6d2"
|
||||
dependencies = [
|
||||
"atty",
|
||||
"clap 3.2.25",
|
||||
@ -6051,14 +6052,14 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "toml"
|
||||
version = "0.8.0"
|
||||
version = "0.8.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c226a7bba6d859b63c92c4b4fe69c5b6b72d0cb897dbc8e6012298e6154cb56e"
|
||||
checksum = "1bc1433177506450fe920e46a4f9812d0c211f5dd556da10e731a0a3dfa151f0"
|
||||
dependencies = [
|
||||
"serde",
|
||||
"serde_spanned",
|
||||
"toml_datetime",
|
||||
"toml_edit 0.20.0",
|
||||
"toml_edit 0.20.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -6076,7 +6077,7 @@ version = "0.19.15"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421"
|
||||
dependencies = [
|
||||
"indexmap 2.0.0",
|
||||
"indexmap 2.0.2",
|
||||
"serde",
|
||||
"serde_spanned",
|
||||
"toml_datetime",
|
||||
@ -6085,11 +6086,11 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "toml_edit"
|
||||
version = "0.20.0"
|
||||
version = "0.20.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8ff63e60a958cefbb518ae1fd6566af80d9d4be430a33f3723dfc47d1d411d95"
|
||||
checksum = "ca676d9ba1a322c1b64eb8045a5ec5c0cfb0c9d08e15e9ff622589ad5221c8fe"
|
||||
dependencies = [
|
||||
"indexmap 2.0.0",
|
||||
"indexmap 2.0.2",
|
||||
"serde",
|
||||
"serde_spanned",
|
||||
"toml_datetime",
|
||||
@ -6098,16 +6099,15 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tonic"
|
||||
version = "0.9.2"
|
||||
version = "0.10.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3082666a3a6433f7f511c7192923fa1fe07c69332d3c6a2e6bb040b569199d5a"
|
||||
checksum = "d560933a0de61cf715926b9cac824d4c883c2c43142f787595e48280c40a1d0e"
|
||||
dependencies = [
|
||||
"async-stream",
|
||||
"async-trait",
|
||||
"axum",
|
||||
"base64 0.21.4",
|
||||
"bytes",
|
||||
"futures-core",
|
||||
"futures-util",
|
||||
"h2",
|
||||
"http",
|
||||
"http-body",
|
||||
@ -6397,17 +6397,17 @@ checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a"
|
||||
|
||||
[[package]]
|
||||
name = "ureq"
|
||||
version = "2.7.1"
|
||||
version = "2.8.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0b11c96ac7ee530603dcdf68ed1557050f374ce55a5a07193ebf8cbc9f8927e9"
|
||||
checksum = "f5ccd538d4a604753ebc2f17cd9946e89b77bf87f6a8e2309667c6f2e87855e3"
|
||||
dependencies = [
|
||||
"base64 0.21.4",
|
||||
"log",
|
||||
"once_cell",
|
||||
"rustls",
|
||||
"rustls-webpki 0.100.3",
|
||||
"rustls-webpki",
|
||||
"url",
|
||||
"webpki-roots 0.23.1",
|
||||
"webpki-roots 0.25.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -6614,11 +6614,12 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "web3_proxy"
|
||||
version = "1.43.8"
|
||||
version = "1.43.10"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"arc-swap",
|
||||
"argh",
|
||||
"async-recursion",
|
||||
"async-stripe",
|
||||
"async-trait",
|
||||
"axum",
|
||||
@ -6643,7 +6644,7 @@ dependencies = [
|
||||
"futures-util",
|
||||
"glob",
|
||||
"handlebars",
|
||||
"hashbrown 0.14.0",
|
||||
"hashbrown 0.14.1",
|
||||
"hdrhistogram",
|
||||
"hostname",
|
||||
"http",
|
||||
@ -6684,7 +6685,7 @@ dependencies = [
|
||||
"tokio",
|
||||
"tokio-console",
|
||||
"tokio-stream",
|
||||
"toml 0.8.0",
|
||||
"toml 0.8.1",
|
||||
"tower",
|
||||
"tower-http",
|
||||
"tracing",
|
||||
@ -6696,7 +6697,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "web3_proxy_cli"
|
||||
version = "1.43.8"
|
||||
version = "1.43.10"
|
||||
dependencies = [
|
||||
"env_logger",
|
||||
"parking_lot",
|
||||
@ -6710,22 +6711,13 @@ dependencies = [
|
||||
"web3_proxy",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "webpki-roots"
|
||||
version = "0.23.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b03058f88386e5ff5310d9111d53f48b17d732b401aeb83a8d5190f2ac459338"
|
||||
dependencies = [
|
||||
"rustls-webpki 0.100.3",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "webpki-roots"
|
||||
version = "0.24.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b291546d5d9d1eab74f069c77749f2cb8504a12caa20f0f2de93ddbf6f411888"
|
||||
dependencies = [
|
||||
"rustls-webpki 0.101.6",
|
||||
"rustls-webpki",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
31
Jenkinsfile
vendored
31
Jenkinsfile
vendored
@ -18,22 +18,21 @@ pipeline {
|
||||
LATEST_BRANCH="main"
|
||||
}
|
||||
stages {
|
||||
// stage('Check and Cancel Old Builds') {
|
||||
// steps {
|
||||
// script {
|
||||
// def currentBuildNumber = currentBuild.number
|
||||
|
||||
// // Check all build from same project
|
||||
// for (build in currentBuild.rawBuild.getParent().getBuilds()) {
|
||||
// // Check if an older build is still running and cancel it in favor of the new one
|
||||
// if (build.number < currentBuildNumber && build.building) {
|
||||
// echo "Cancelling build ${build.number}"
|
||||
// build.doStop()
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
stage('Check and Cancel Old Builds') {
|
||||
steps {
|
||||
script {
|
||||
def jobName = env.JOB_NAME
|
||||
def buildNumber = env.BUILD_NUMBER.toInteger()
|
||||
|
||||
// Get all running builds of the current job
|
||||
def job = Jenkins.instance.getItemByFullName(jobName)
|
||||
def runningBuilds = job.builds.findAll { it.isBuilding() && it.number < buildNumber }
|
||||
|
||||
// Cancel running builds
|
||||
runningBuilds.each { it.doStop() }
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('build and push') {
|
||||
parallel {
|
||||
stage('Build and push arm64_graviton2 image') {
|
||||
|
@ -44,13 +44,13 @@ cargo run --release -- proxyd
|
||||
Quickly run tests:
|
||||
|
||||
```
|
||||
RUST_LOG=web3_proxy=trace,info cargo nextest run
|
||||
RUST_BACKTRACE=1 RUST_LOG=web3_proxy=trace,info cargo nextest run
|
||||
```
|
||||
|
||||
Run more tests:
|
||||
|
||||
```
|
||||
RUST_LOG=web3_proxy=trace,info cargo nextest run --features tests-needing-docker
|
||||
RUST_BACKTRACE=1 RUST_LOG=web3_proxy=trace,info cargo nextest run --features tests-needing-docker
|
||||
```
|
||||
|
||||
## Mysql
|
||||
|
2
TODO.md
2
TODO.md
@ -289,7 +289,7 @@ These are not yet ordered. There might be duplicates. We might not actually need
|
||||
- we were caching too aggressively
|
||||
- [x] BUG! if sending transactions gets "INTERNAL_ERROR: existing tx with same hash", create a success message
|
||||
- we just want to be sure that the server has our tx and in this case, it does.
|
||||
- ERROR http_request:request:try_send_all_upstream_servers: web3_proxy::rpcs::request: bad response! err=JsonRpcClientError(JsonRpcError(JsonRpcError { code: -32000, message: "INTERNAL_ERROR: existing tx with same hash", data: None })) method=eth_sendRawTransaction rpc=local_erigon_alpha_archive id=01GF4HV03Y4ZNKQV8DW5NDQ5CG method=POST authorized_request=User(Some(SqlxMySqlPoolConnection), AuthorizedKey { ip: 10.11.12.15, origin: None, user_key_id: 4, log_revert_chance: 0.0000 }) self=Web3Rpcs { conns: {"local_erigon_alpha_archive_ws": Web3Rpc { name: "local_erigon_alpha_archive_ws", blocks: "all", .. }, "local_geth_ws": Web3Rpc { name: "local_geth_ws", blocks: 64, .. }, "local_erigon_alpha_archive": Web3Rpc { name: "local_erigon_alpha_archive", blocks: "all", .. }}, .. } authorized_request=Some(User(Some(SqlxMySqlPoolConnection), AuthorizedKey { ip: 10.11.12.15, origin: None, user_key_id: 4, log_revert_chance: 0.0000 })) request=JsonRpcRequest { id: RawValue(39), method: "eth_sendRawTransaction", .. } request_metadata=Some(RequestMetadata { datetime: 2022-10-11T22:14:57.406829095Z, period_seconds: 60, request_bytes: 633, backend_requests: 0, no_servers: 0, error_response: false, response_bytes: 0, response_millis: 0 }) block_needed=None
|
||||
- ERROR http_request:request:try_send_all_upstream_servers: web3_proxy::rpcs::request: bad response! err=JsonRpcClientError(JsonRpcError(JsonRpcError { code: -32000, message: "INTERNAL_ERROR: existing tx with same hash", data: None })) method=eth_sendRawTransaction rpc=local_erigon_alpha_archive id=01GF4HV03Y4ZNKQV8DW5NDQ5CG method=POST authorized_request=User(Some(SqlxMySqlPoolConnection), AuthorizedKey { ip: 10.11.12.15, origin: None, user_key_id: 4, log_revert_chance: 0.0000 }) self=Web3Rpcs { conns: {"local_erigon_alpha_archive_ws": Web3Rpc { name: "local_erigon_alpha_archive_ws", blocks: "all", .. }, "local_geth_ws": Web3Rpc { name: "local_geth_ws", blocks: 64, .. }, "local_erigon_alpha_archive": Web3Rpc { name: "local_erigon_alpha_archive", blocks: "all", .. }}, .. } authorized_request=Some(User(Some(SqlxMySqlPoolConnection), AuthorizedKey { ip: 10.11.12.15, origin: None, user_key_id: 4, log_revert_chance: 0.0000 })) request=JsonRpcRequest { id: RawValue(39), method: "eth_sendRawTransaction", .. } web3_request=Some(RequestMetadata { datetime: 2022-10-11T22:14:57.406829095Z, period_seconds: 60, request_bytes: 633, backend_requests: 0, no_servers: 0, error_response: false, response_bytes: 0, response_millis: 0 }) block_needed=None
|
||||
- [x] serde collect unknown fields in config instead of crash
|
||||
- [x] upgrade user tier by address
|
||||
- [x] all_backend_connections skips syncing servers
|
||||
|
@ -8,7 +8,7 @@ edition = "2021"
|
||||
redis-rate-limiter = { path = "../redis-rate-limiter" }
|
||||
|
||||
anyhow = "1.0.75"
|
||||
hashbrown = "0.14.0"
|
||||
hashbrown = "0.14.1"
|
||||
log = "0.4.20"
|
||||
moka = { version = "0.12.0", features = ["future"] }
|
||||
tokio = "1.32.0"
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "web3_proxy"
|
||||
version = "1.43.8"
|
||||
version = "1.43.10"
|
||||
edition = "2021"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
@ -38,14 +38,14 @@ siwe = { git = "https://github.com/llamanodes/siwe-rs", rev = "013be5204ff1c8577
|
||||
anyhow = { version = "1.0.75", features = ["backtrace"] }
|
||||
arc-swap = { version = "1.6.0" }
|
||||
argh = "0.1.12"
|
||||
async-stripe = { version = "0.23.0", default-features = false, features = ["billing", "checkout", "connect", "runtime-tokio-hyper-rustls", "webhook-events"] }
|
||||
async-stripe = { version = "0.25.1", default-features = false, features = ["billing", "checkout", "connect", "runtime-tokio-hyper-rustls", "webhook-events"] }
|
||||
async-trait = "0.1.73"
|
||||
axum = { version = "0.6.20", features = ["headers", "tracing", "ws"] }
|
||||
axum-client-ip = "0.4.2"
|
||||
axum-macros = "0.3.8"
|
||||
base64 = "0.21.4"
|
||||
chrono = { version = "0.4.31" }
|
||||
console-subscriber = { version = "0.1.10", features = ["env-filter", "parking_lot"], optional = true }
|
||||
console-subscriber = { version = "0.2.0", features = ["env-filter", "parking_lot"], optional = true }
|
||||
counter = "0.5.7"
|
||||
derivative = "2.2.0"
|
||||
derive_more = { version = "0.99.17", features = ["nightly"] }
|
||||
@ -56,7 +56,7 @@ fstrings = "0.2"
|
||||
futures = { version = "0.3.28" }
|
||||
glob = "0.3.1"
|
||||
handlebars = "4.4.0"
|
||||
hashbrown = { version = "0.14.0", features = ["serde", "nightly"] }
|
||||
hashbrown = { version = "0.14.1", features = ["serde", "nightly"] }
|
||||
hdrhistogram = "7.5.2"
|
||||
hostname = "0.3.1"
|
||||
http = "0.2.9"
|
||||
@ -74,7 +74,7 @@ ordered-float = {version = "4.1.0" }
|
||||
pagerduty-rs = { version = "0.1.6", default-features = false, features = ["async", "rustls", "sync"] }
|
||||
parking_lot = { version = "0.12.1", features = ["arc_lock", "nightly"] }
|
||||
rdkafka = { version = "0.34.0", features = ["tracing"] }
|
||||
regex = "1.9.5"
|
||||
regex = "1.9.6"
|
||||
reqwest = { version = "0.11.20", default-features = false, features = ["json", "tokio-rustls"] }
|
||||
rmp-serde = "1.1.2"
|
||||
rust_decimal = { version = "1.32.0" }
|
||||
@ -87,9 +87,9 @@ serde_prometheus = "0.2.4"
|
||||
strum = { version = "0.25.0", features = ["derive"] }
|
||||
time = { version = "0.3" }
|
||||
tokio = { version = "1.32.0", features = ["full", "tracing"] }
|
||||
tokio-console = { version = "0.1.9", optional = true }
|
||||
tokio-console = { version = "0.1.10", optional = true }
|
||||
tokio-stream = { version = "0.1.14", features = ["sync"] }
|
||||
toml = "0.8.0"
|
||||
toml = "0.8.1"
|
||||
tower = { version = "0.4.13", features = ["timeout", "tracing"] }
|
||||
tower-http = { version = "0.4.4", features = ["cors", "normalize-path", "sensitive-headers", "trace"] }
|
||||
tracing = "0.1"
|
||||
@ -101,6 +101,7 @@ uuid = { version = "1.4.1", default-features = false, features = ["fast-rng", "v
|
||||
test-log = { version = "0.2.12", default-features = false, features = ["trace"] }
|
||||
bytes = "1.5.0"
|
||||
futures-util = "0.3.28"
|
||||
async-recursion = "1.0.5"
|
||||
|
||||
# # TODO: bring this back
|
||||
# check-if-email-exists = "0.9.0"
|
||||
|
@ -43,8 +43,8 @@ pub async fn query_admin_modify_usertier<'a>(
|
||||
let mut response_body = HashMap::new();
|
||||
|
||||
// Establish connections
|
||||
let db_conn = global_db_conn().await?;
|
||||
let db_replica = global_db_replica_conn().await?;
|
||||
let db_conn = global_db_conn()?;
|
||||
let db_replica = global_db_replica_conn()?;
|
||||
let mut redis_conn = app.redis_conn().await?;
|
||||
|
||||
// Will modify logic here
|
||||
|
@ -1,22 +1,17 @@
|
||||
mod ws;
|
||||
|
||||
use crate::block_number::CacheMode;
|
||||
use crate::caches::{RegisteredUserRateLimitKey, RpcSecretKeyCache, UserBalanceCache};
|
||||
use crate::config::{AppConfig, TopConfig};
|
||||
use crate::errors::{Web3ProxyError, Web3ProxyErrorContext, Web3ProxyResult};
|
||||
use crate::frontend::authorization::{
|
||||
Authorization, RequestMetadata, RequestOrMethod, ResponseOrBytes,
|
||||
};
|
||||
use crate::frontend::authorization::{Authorization, Web3Request};
|
||||
use crate::frontend::rpc_proxy_ws::ProxyMode;
|
||||
use crate::globals::{global_db_conn, DatabaseError, DB_CONN, DB_REPLICA};
|
||||
use crate::globals::{global_db_conn, DatabaseError, APP, DB_CONN, DB_REPLICA};
|
||||
use crate::jsonrpc::{
|
||||
self, JsonRpcErrorData, JsonRpcId, JsonRpcParams, JsonRpcRequest, JsonRpcRequestEnum,
|
||||
JsonRpcResultData, SingleResponse,
|
||||
};
|
||||
use crate::relational_db::{connect_db, migrate_db};
|
||||
use crate::response_cache::{
|
||||
JsonRpcQueryCacheKey, JsonRpcResponseCache, JsonRpcResponseEnum, JsonRpcResponseWeigher,
|
||||
};
|
||||
use crate::response_cache::{JsonRpcResponseCache, JsonRpcResponseEnum, JsonRpcResponseWeigher};
|
||||
use crate::rpcs::blockchain::Web3ProxyBlock;
|
||||
use crate::rpcs::consensus::RankedRpcs;
|
||||
use crate::rpcs::many::Web3Rpcs;
|
||||
@ -77,9 +72,9 @@ pub struct Web3ProxyApp {
|
||||
/// Send requests to the best server available
|
||||
pub balanced_rpcs: Arc<Web3Rpcs>,
|
||||
/// Send 4337 Abstraction Bundler requests to one of these servers
|
||||
pub bundler_4337_rpcs: Option<Arc<Web3Rpcs>>,
|
||||
pub bundler_4337_rpcs: Arc<Web3Rpcs>,
|
||||
/// application config
|
||||
/// TODO: this will need a large refactor to handle reloads while running. maybe use a watch::Receiver?
|
||||
/// TODO: this will need a large refactor to handle reloads while running. maybe use a watch::Receiver and a task_local?
|
||||
pub config: AppConfig,
|
||||
pub http_client: Option<reqwest::Client>,
|
||||
/// track JSONRPC responses
|
||||
@ -114,8 +109,7 @@ pub struct Web3ProxyApp {
|
||||
/// we do this because each pending login is a row in the database
|
||||
pub login_rate_limiter: Option<RedisRateLimiter>,
|
||||
/// Send private requests (like eth_sendRawTransaction) to all these servers
|
||||
/// TODO: include another type so that we can use private miner relays that do not use JSONRPC requests
|
||||
pub private_rpcs: Option<Arc<Web3Rpcs>>,
|
||||
pub protected_rpcs: Arc<Web3Rpcs>,
|
||||
pub prometheus_port: Arc<AtomicU16>,
|
||||
/// cache authenticated users so that we don't have to query the database on the hot path
|
||||
// TODO: should the key be our RpcSecretKey class instead of Ulid?
|
||||
@ -462,60 +456,42 @@ impl Web3ProxyApp {
|
||||
app_handles.push(balanced_handle);
|
||||
|
||||
// prepare a Web3Rpcs to hold all our private connections
|
||||
// only some chains have this, so this is optional
|
||||
// TODO: remove this. it should only be done by apply_top_config
|
||||
let private_rpcs = if top_config.private_rpcs.is_none() {
|
||||
warn!("No private relays configured. Any transactions will be broadcast to the public mempool!");
|
||||
None
|
||||
} else {
|
||||
// TODO: do something with the spawn handle
|
||||
let (private_rpcs, private_handle, _) = Web3Rpcs::spawn(
|
||||
chain_id,
|
||||
// private rpcs don't get subscriptions, so no need for max_head_block_lag
|
||||
None,
|
||||
0,
|
||||
0,
|
||||
"protected rpcs".into(),
|
||||
// subscribing to new heads here won't work well. if they are fast, they might be ahead of balanced_rpcs
|
||||
// they also often have low rate limits
|
||||
// however, they are well connected to miners/validators. so maybe using them as a safety check would be good
|
||||
// TODO: but maybe we could include privates in the "backup" tier
|
||||
None,
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.web3_context("spawning private_rpcs")?;
|
||||
// only some chains have this, so this might be empty
|
||||
// TODO: set min_sum_soft_limit > 0 if any private rpcs are configured. this way we don't accidently leak to the public mempool if they are all offline
|
||||
let (private_rpcs, private_handle, _) = Web3Rpcs::spawn(
|
||||
chain_id,
|
||||
// private rpcs don't get subscriptions, so no need for max_head_block_lag
|
||||
None,
|
||||
0,
|
||||
0,
|
||||
"protected rpcs".into(),
|
||||
// subscribing to new heads here won't work well. if they are fast, they might be ahead of balanced_rpcs
|
||||
// they also often have low rate limits
|
||||
// however, they are well connected to miners/validators. so maybe using them as a safety check would be good
|
||||
// TODO: but maybe we could include privates in the "backup" tier
|
||||
None,
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.web3_context("spawning private_rpcs")?;
|
||||
|
||||
app_handles.push(private_handle);
|
||||
app_handles.push(private_handle);
|
||||
|
||||
Some(private_rpcs)
|
||||
};
|
||||
// prepare a Web3Rpcs to hold all our 4337 Abstraction Bundler connections (if any)
|
||||
let (bundler_4337_rpcs, bundler_4337_rpcs_handle, _) = Web3Rpcs::spawn(
|
||||
chain_id,
|
||||
// bundler_4337_rpcs don't get subscriptions, so no need for max_head_block_lag
|
||||
None,
|
||||
0,
|
||||
0,
|
||||
"eip4337 rpcs".into(),
|
||||
None,
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.web3_context("spawning bundler_4337_rpcs")?;
|
||||
|
||||
// prepare a Web3Rpcs to hold all our 4337 Abstraction Bundler connections
|
||||
// only some chains have this, so this is optional
|
||||
// TODO: remove this. it should only be done by apply_top_config
|
||||
let bundler_4337_rpcs = if top_config.bundler_4337_rpcs.is_none() {
|
||||
warn!("No bundler_4337_rpcs configured");
|
||||
None
|
||||
} else {
|
||||
// TODO: do something with the spawn handle
|
||||
let (bundler_4337_rpcs, bundler_4337_rpcs_handle, _) = Web3Rpcs::spawn(
|
||||
chain_id,
|
||||
// bundler_4337_rpcs don't get subscriptions, so no need for max_head_block_lag
|
||||
None,
|
||||
0,
|
||||
0,
|
||||
"eip4337 rpcs".into(),
|
||||
None,
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.web3_context("spawning bundler_4337_rpcs")?;
|
||||
|
||||
app_handles.push(bundler_4337_rpcs_handle);
|
||||
|
||||
Some(bundler_4337_rpcs)
|
||||
};
|
||||
app_handles.push(bundler_4337_rpcs_handle);
|
||||
|
||||
let hostname = hostname::get()
|
||||
.ok()
|
||||
@ -557,7 +533,7 @@ impl Web3ProxyApp {
|
||||
kafka_producer,
|
||||
login_rate_limiter,
|
||||
pending_txid_firehose: deduped_txid_firehose,
|
||||
private_rpcs,
|
||||
protected_rpcs: private_rpcs,
|
||||
prometheus_port: prometheus_port.clone(),
|
||||
rpc_secret_key_cache,
|
||||
start: Instant::now(),
|
||||
@ -568,13 +544,17 @@ impl Web3ProxyApp {
|
||||
watch_consensus_head_receiver,
|
||||
};
|
||||
|
||||
let app = Arc::new(app);
|
||||
|
||||
if let Err(app) = APP.set(app.clone()) {
|
||||
error!(?app, "global APP can only be set once!");
|
||||
};
|
||||
|
||||
// TODO: do apply_top_config once we don't duplicate the db
|
||||
if let Err(err) = app.apply_top_config_db(&top_config).await {
|
||||
warn!(?err, "unable to fully apply config while starting!");
|
||||
};
|
||||
|
||||
let app = Arc::new(app);
|
||||
|
||||
// watch for config changes
|
||||
// TODO: move this to its own function/struct
|
||||
{
|
||||
@ -655,42 +635,25 @@ impl Web3ProxyApp {
|
||||
|
||||
let balanced = self
|
||||
.balanced_rpcs
|
||||
.apply_server_configs(self, new_top_config.balanced_rpcs.clone())
|
||||
.apply_server_configs(self, &new_top_config.balanced_rpcs)
|
||||
.await
|
||||
.web3_context("updating balanced rpcs");
|
||||
|
||||
let private = if let Some(private_rpc_configs) = new_top_config.private_rpcs.clone() {
|
||||
if let Some(ref private_rpcs) = self.private_rpcs {
|
||||
private_rpcs
|
||||
.apply_server_configs(self, private_rpc_configs)
|
||||
.await
|
||||
.web3_context("updating private_rpcs")
|
||||
} else {
|
||||
// TODO: maybe we should have private_rpcs just be empty instead of being None
|
||||
todo!("handle toggling private_rpcs")
|
||||
}
|
||||
} else {
|
||||
Ok(())
|
||||
};
|
||||
let protected = self
|
||||
.protected_rpcs
|
||||
.apply_server_configs(self, &new_top_config.private_rpcs)
|
||||
.await
|
||||
.web3_context("updating private_rpcs");
|
||||
|
||||
let bundler_4337 =
|
||||
if let Some(bundler_4337_rpc_configs) = new_top_config.bundler_4337_rpcs.clone() {
|
||||
if let Some(ref bundler_4337_rpcs) = self.bundler_4337_rpcs {
|
||||
bundler_4337_rpcs
|
||||
.apply_server_configs(self, bundler_4337_rpc_configs.clone())
|
||||
.await
|
||||
.web3_context("updating bundler_4337_rpcs")
|
||||
} else {
|
||||
// TODO: maybe we should have bundler_4337_rpcs just be empty instead of being None
|
||||
todo!("handle toggling bundler_4337_rpcs")
|
||||
}
|
||||
} else {
|
||||
Ok(())
|
||||
};
|
||||
let bundler_4337 = self
|
||||
.bundler_4337_rpcs
|
||||
.apply_server_configs(self, &new_top_config.bundler_4337_rpcs)
|
||||
.await
|
||||
.web3_context("updating bundler_4337_rpcs");
|
||||
|
||||
// TODO: log all the errors if there are multiple
|
||||
balanced?;
|
||||
private?;
|
||||
protected?;
|
||||
bundler_4337?;
|
||||
|
||||
Ok(())
|
||||
@ -714,7 +677,7 @@ impl Web3ProxyApp {
|
||||
.db_max_connections
|
||||
.unwrap_or(db_min_connections * 2);
|
||||
|
||||
let db_conn = if let Ok(old_db_conn) = global_db_conn().await {
|
||||
let db_conn = if let Ok(old_db_conn) = global_db_conn() {
|
||||
// TODO: compare old settings with new settings. don't always re-use!
|
||||
Ok(old_db_conn)
|
||||
} else {
|
||||
@ -740,7 +703,7 @@ impl Web3ProxyApp {
|
||||
.db_replica_max_connections
|
||||
.unwrap_or(db_max_connections);
|
||||
|
||||
let db_replica = if let Ok(old_db_replica) = global_db_conn().await {
|
||||
let db_replica = if let Ok(old_db_replica) = global_db_conn() {
|
||||
// TODO: compare old settings with new settings. don't always re-use!
|
||||
Ok(old_db_replica)
|
||||
} else {
|
||||
@ -779,8 +742,8 @@ impl Web3ProxyApp {
|
||||
db_conn.clone().map(Into::into)
|
||||
};
|
||||
|
||||
let mut locked_conn = DB_CONN.write().await;
|
||||
let mut locked_replica = DB_REPLICA.write().await;
|
||||
let mut locked_conn = DB_CONN.write();
|
||||
let mut locked_replica = DB_REPLICA.write();
|
||||
|
||||
*locked_conn = db_conn.clone();
|
||||
*locked_replica = db_replica.clone();
|
||||
@ -845,7 +808,7 @@ impl Web3ProxyApp {
|
||||
#[derive(Default, Serialize)]
|
||||
struct UserCount(i64);
|
||||
|
||||
let user_count: UserCount = if let Ok(db) = global_db_conn().await {
|
||||
let user_count: UserCount = if let Ok(db) = global_db_conn() {
|
||||
match user::Entity::find().count(&db).await {
|
||||
Ok(user_count) => UserCount(user_count as i64),
|
||||
Err(err) => {
|
||||
@ -1079,15 +1042,14 @@ impl Web3ProxyApp {
|
||||
let head_block: Web3ProxyBlock = self
|
||||
.balanced_rpcs
|
||||
.head_block()
|
||||
.ok_or(Web3ProxyError::NoServersSynced)?
|
||||
.clone();
|
||||
.ok_or(Web3ProxyError::NoServersSynced)?;
|
||||
|
||||
// TODO: use streams and buffers so we don't overwhelm our server
|
||||
let responses = join_all(
|
||||
requests
|
||||
.into_iter()
|
||||
.map(|request| {
|
||||
self.proxy_request(request, authorization.clone(), Some(&head_block))
|
||||
self.proxy_request(request, authorization.clone(), Some(head_block.clone()))
|
||||
})
|
||||
.collect::<Vec<_>>(),
|
||||
)
|
||||
@ -1131,74 +1093,53 @@ impl Web3ProxyApp {
|
||||
|
||||
/// try to send transactions to the best available rpcs with protected/private mempools
|
||||
/// if no protected rpcs are configured, then some public rpcs are used instead
|
||||
async fn try_send_protected<P: JsonRpcParams>(
|
||||
async fn try_send_protected(
|
||||
self: &Arc<Self>,
|
||||
method: &str,
|
||||
params: &P,
|
||||
request_metadata: &Arc<RequestMetadata>,
|
||||
web3_request: &Arc<Web3Request>,
|
||||
) -> Web3ProxyResult<Arc<RawValue>> {
|
||||
if let Some(protected_rpcs) = self.private_rpcs.as_ref() {
|
||||
if !protected_rpcs.is_empty() {
|
||||
let protected_response = protected_rpcs
|
||||
.try_send_all_synced_connections(
|
||||
method,
|
||||
params,
|
||||
request_metadata,
|
||||
None,
|
||||
None,
|
||||
Some(Duration::from_secs(10)),
|
||||
Some(Level::TRACE.into()),
|
||||
Some(3),
|
||||
)
|
||||
.await;
|
||||
if self.protected_rpcs.is_empty() {
|
||||
let num_public_rpcs = match web3_request.proxy_mode() {
|
||||
// TODO: how many balanced rpcs should we send to? configurable? percentage of total?
|
||||
ProxyMode::Best | ProxyMode::Debug => Some(4),
|
||||
ProxyMode::Fastest(0) => None,
|
||||
// TODO: how many balanced rpcs should we send to? configurable? percentage of total?
|
||||
// TODO: what if we do 2 per tier? we want to blast the third party rpcs
|
||||
// TODO: maybe having the third party rpcs in their own Web3Rpcs would be good for this
|
||||
ProxyMode::Fastest(x) => Some(x * 4),
|
||||
ProxyMode::Versus => None,
|
||||
};
|
||||
|
||||
return protected_response;
|
||||
}
|
||||
// no private rpcs to send to. send to a few public rpcs
|
||||
// try_send_all_upstream_servers puts the request id into the response. no need to do that ourselves here.
|
||||
self.balanced_rpcs
|
||||
.try_send_all_synced_connections(
|
||||
web3_request,
|
||||
Some(Duration::from_secs(10)),
|
||||
Some(Level::TRACE.into()),
|
||||
num_public_rpcs,
|
||||
)
|
||||
.await
|
||||
} else {
|
||||
self.protected_rpcs
|
||||
.try_send_all_synced_connections(
|
||||
web3_request,
|
||||
Some(Duration::from_secs(10)),
|
||||
Some(Level::TRACE.into()),
|
||||
Some(3),
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
let num_public_rpcs = match request_metadata.proxy_mode() {
|
||||
// TODO: how many balanced rpcs should we send to? configurable? percentage of total?
|
||||
ProxyMode::Best | ProxyMode::Debug => Some(4),
|
||||
ProxyMode::Fastest(0) => None,
|
||||
// TODO: how many balanced rpcs should we send to? configurable? percentage of total?
|
||||
// TODO: what if we do 2 per tier? we want to blast the third party rpcs
|
||||
// TODO: maybe having the third party rpcs in their own Web3Rpcs would be good for this
|
||||
ProxyMode::Fastest(x) => Some(x * 4),
|
||||
ProxyMode::Versus => None,
|
||||
};
|
||||
|
||||
// no private rpcs to send to. send to a few public rpcs
|
||||
// try_send_all_upstream_servers puts the request id into the response. no need to do that ourselves here.
|
||||
self.balanced_rpcs
|
||||
.try_send_all_synced_connections(
|
||||
method,
|
||||
params,
|
||||
request_metadata,
|
||||
None,
|
||||
None,
|
||||
Some(Duration::from_secs(10)),
|
||||
Some(Level::TRACE.into()),
|
||||
num_public_rpcs,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
/// proxy request with up to 3 tries.
|
||||
async fn proxy_request(
|
||||
self: &Arc<Self>,
|
||||
mut request: JsonRpcRequest,
|
||||
request: JsonRpcRequest,
|
||||
authorization: Arc<Authorization>,
|
||||
head_block: Option<&Web3ProxyBlock>,
|
||||
head_block: Option<Web3ProxyBlock>,
|
||||
) -> (StatusCode, jsonrpc::SingleResponse, Vec<Arc<Web3Rpc>>) {
|
||||
let request_metadata = RequestMetadata::new(
|
||||
self,
|
||||
authorization,
|
||||
RequestOrMethod::Request(&request),
|
||||
head_block,
|
||||
)
|
||||
.await;
|
||||
|
||||
let response_id = request.id;
|
||||
let web3_request =
|
||||
Web3Request::new_with_app(self, authorization, None, request.into(), head_block).await;
|
||||
|
||||
// TODO: trace/kafka log request.params before we send them to _proxy_request_with_caching which might modify them
|
||||
|
||||
@ -1213,47 +1154,33 @@ impl Web3ProxyApp {
|
||||
|
||||
tries += 1;
|
||||
|
||||
let (code, response) = match self
|
||||
._proxy_request_with_caching(
|
||||
// TODO: avoid clone here
|
||||
response_id.clone(),
|
||||
&request.method,
|
||||
&mut request.params,
|
||||
head_block,
|
||||
&request_metadata,
|
||||
)
|
||||
.await
|
||||
{
|
||||
let (code, response) = match self._proxy_request_with_caching(&web3_request).await {
|
||||
Ok(response_data) => {
|
||||
request_metadata
|
||||
.error_response
|
||||
.store(false, Ordering::Relaxed);
|
||||
request_metadata
|
||||
web3_request.error_response.store(false, Ordering::Relaxed);
|
||||
web3_request
|
||||
.user_error_response
|
||||
.store(false, Ordering::Relaxed);
|
||||
|
||||
(StatusCode::OK, response_data)
|
||||
}
|
||||
Err(err @ Web3ProxyError::NullJsonRpcResult) => {
|
||||
request_metadata
|
||||
.error_response
|
||||
.store(false, Ordering::Relaxed);
|
||||
request_metadata
|
||||
web3_request.error_response.store(false, Ordering::Relaxed);
|
||||
web3_request
|
||||
.user_error_response
|
||||
.store(false, Ordering::Relaxed);
|
||||
|
||||
err.as_json_response_parts(response_id)
|
||||
err.as_json_response_parts(web3_request.id())
|
||||
}
|
||||
Err(Web3ProxyError::JsonRpcResponse(response_data)) => {
|
||||
request_metadata
|
||||
.error_response
|
||||
.store(false, Ordering::Relaxed);
|
||||
request_metadata
|
||||
web3_request.error_response.store(false, Ordering::Relaxed);
|
||||
web3_request
|
||||
.user_error_response
|
||||
.store(response_data.is_error(), Ordering::Relaxed);
|
||||
|
||||
let response =
|
||||
jsonrpc::ParsedResponse::from_response_data(response_data, response_id);
|
||||
let response = jsonrpc::ParsedResponse::from_response_data(
|
||||
response_data,
|
||||
web3_request.id(),
|
||||
);
|
||||
(StatusCode::OK, response.into())
|
||||
}
|
||||
Err(err) => {
|
||||
@ -1264,24 +1191,21 @@ impl Web3ProxyApp {
|
||||
|
||||
// max tries exceeded. return the error
|
||||
|
||||
request_metadata
|
||||
.error_response
|
||||
.store(true, Ordering::Relaxed);
|
||||
request_metadata
|
||||
web3_request.error_response.store(true, Ordering::Relaxed);
|
||||
web3_request
|
||||
.user_error_response
|
||||
.store(false, Ordering::Relaxed);
|
||||
|
||||
err.as_json_response_parts(response_id)
|
||||
err.as_json_response_parts(web3_request.id())
|
||||
}
|
||||
};
|
||||
|
||||
// TODO: this serializes twice :/
|
||||
request_metadata.add_response(ResponseOrBytes::Response(&response));
|
||||
web3_request.add_response(&response);
|
||||
|
||||
let rpcs = request_metadata.backend_rpcs_used();
|
||||
let rpcs = web3_request.backend_rpcs_used();
|
||||
|
||||
// there might be clones in the background, so this isn't a sure thing
|
||||
let _ = request_metadata.try_send_arc_stat();
|
||||
let _ = web3_request.try_send_arc_stat();
|
||||
|
||||
return (code, response, rpcs);
|
||||
}
|
||||
@ -1291,15 +1215,11 @@ impl Web3ProxyApp {
|
||||
/// TODO: how can we make this generic?
|
||||
async fn _proxy_request_with_caching(
|
||||
self: &Arc<Self>,
|
||||
id: Box<RawValue>,
|
||||
method: &str,
|
||||
params: &mut serde_json::Value,
|
||||
head_block: Option<&Web3ProxyBlock>,
|
||||
request_metadata: &Arc<RequestMetadata>,
|
||||
web3_request: &Arc<Web3Request>,
|
||||
) -> Web3ProxyResult<jsonrpc::SingleResponse> {
|
||||
// TODO: serve net_version without querying the backend
|
||||
// TODO: don't force RawValue
|
||||
let response: jsonrpc::SingleResponse = match method {
|
||||
let response: jsonrpc::SingleResponse = match web3_request.request.method() {
|
||||
// lots of commands are blocked
|
||||
method @ ("db_getHex"
|
||||
| "db_getString"
|
||||
@ -1382,59 +1302,39 @@ impl Web3ProxyApp {
|
||||
| "eth_uninstallFilter") => {
|
||||
return Err(Web3ProxyError::MethodNotFound(method.to_owned().into()));
|
||||
}
|
||||
method @ ("eth_sendUserOperation"
|
||||
"eth_sendUserOperation"
|
||||
| "eth_estimateUserOperationGas"
|
||||
| "eth_getUserOperationByHash"
|
||||
| "eth_getUserOperationReceipt"
|
||||
| "eth_supportedEntryPoints"
|
||||
| "web3_bundlerVersion") => match self.bundler_4337_rpcs.as_ref() {
|
||||
Some(bundler_4337_rpcs) => {
|
||||
bundler_4337_rpcs
|
||||
.try_proxy_connection::<_, Arc<RawValue>>(
|
||||
method,
|
||||
params,
|
||||
request_metadata,
|
||||
Some(Duration::from_secs(30)),
|
||||
None,
|
||||
None,
|
||||
| "web3_bundlerVersion" => self.bundler_4337_rpcs
|
||||
.try_proxy_connection::<Arc<RawValue>>(
|
||||
web3_request,
|
||||
)
|
||||
.await?
|
||||
}
|
||||
None => {
|
||||
// TODO: stats even when we error!
|
||||
// TODO: dedicated error for no 4337 bundlers
|
||||
return Err(Web3ProxyError::NoServersSynced);
|
||||
}
|
||||
},
|
||||
// TODO: id
|
||||
"eth_accounts" => jsonrpc::ParsedResponse::from_value(serde_json::Value::Array(vec![]), id).into(),
|
||||
.await?,
|
||||
"eth_accounts" => jsonrpc::ParsedResponse::from_value(serde_json::Value::Array(vec![]), web3_request.id()).into(),
|
||||
"eth_blockNumber" => {
|
||||
match head_block.cloned().or(self.balanced_rpcs.head_block()) {
|
||||
Some(head_block) => jsonrpc::ParsedResponse::from_value(json!(head_block.number()), id).into(),
|
||||
match web3_request.head_block.clone().or(self.balanced_rpcs.head_block()) {
|
||||
Some(head_block) => jsonrpc::ParsedResponse::from_value(json!(head_block.number()), web3_request.id()).into(),
|
||||
None => {
|
||||
return Err(Web3ProxyError::NoServersSynced);
|
||||
}
|
||||
}
|
||||
}
|
||||
"eth_chainId" => jsonrpc::ParsedResponse::from_value(json!(U64::from(self.config.chain_id)), id).into(),
|
||||
"eth_chainId" => jsonrpc::ParsedResponse::from_value(json!(U64::from(self.config.chain_id)), web3_request.id()).into(),
|
||||
// TODO: eth_callBundle (https://docs.flashbots.net/flashbots-auction/searchers/advanced/rpc-endpoint#eth_callbundle)
|
||||
// TODO: eth_cancelPrivateTransaction (https://docs.flashbots.net/flashbots-auction/searchers/advanced/rpc-endpoint#eth_cancelprivatetransaction, but maybe just reject)
|
||||
// TODO: eth_sendPrivateTransaction (https://docs.flashbots.net/flashbots-auction/searchers/advanced/rpc-endpoint#eth_sendprivatetransaction)
|
||||
"eth_coinbase" => {
|
||||
// no need for serving coinbase
|
||||
jsonrpc::ParsedResponse::from_value(json!(Address::zero()), id).into()
|
||||
jsonrpc::ParsedResponse::from_value(json!(Address::zero()), web3_request.id()).into()
|
||||
}
|
||||
"eth_estimateGas" => {
|
||||
// TODO: timeout
|
||||
let mut gas_estimate = self
|
||||
.balanced_rpcs
|
||||
.try_proxy_connection::<_, U256>(
|
||||
method,
|
||||
params,
|
||||
request_metadata,
|
||||
Some(Duration::from_secs(30)),
|
||||
None,
|
||||
None,
|
||||
.try_proxy_connection::<U256>(
|
||||
web3_request,
|
||||
)
|
||||
.await?
|
||||
.parsed()
|
||||
@ -1455,63 +1355,61 @@ impl Web3ProxyApp {
|
||||
|
||||
gas_estimate += gas_increase;
|
||||
|
||||
let request_id = web3_request.id();
|
||||
|
||||
// TODO: from_serializable?
|
||||
jsonrpc::ParsedResponse::from_value(json!(gas_estimate), id).into()
|
||||
jsonrpc::ParsedResponse::from_value(json!(gas_estimate), request_id).into()
|
||||
}
|
||||
"eth_getTransactionReceipt" | "eth_getTransactionByHash" => {
|
||||
// try to get the transaction without specifying a min_block_height
|
||||
// TODO: timeout
|
||||
|
||||
let parsed = match self
|
||||
let result = self
|
||||
.balanced_rpcs
|
||||
.try_proxy_connection::<_, Arc<RawValue>>(
|
||||
method,
|
||||
params,
|
||||
request_metadata,
|
||||
Some(Duration::from_secs(30)),
|
||||
None,
|
||||
None,
|
||||
.try_proxy_connection::<serde_json::Value>(
|
||||
web3_request,
|
||||
)
|
||||
.await {
|
||||
Ok(response) => response.parsed().await.map_err(Into::into),
|
||||
Err(err) => Err(err),
|
||||
};
|
||||
.await?
|
||||
.parsed()
|
||||
.await?
|
||||
.into_result();
|
||||
|
||||
// if we got "null", it is probably because the tx is old. retry on nodes with old block data
|
||||
let try_archive = if let Ok(Some(value)) = parsed.as_ref().map(|r| r.result()) {
|
||||
value.get() == "null" || value.get() == "" || value.get() == "\"\""
|
||||
} else {
|
||||
true
|
||||
// if we got "null" or "", it is probably because the tx is old. retry on nodes with old block data
|
||||
// TODO: this feels fragile. how should we do this better/
|
||||
let try_archive = match &result {
|
||||
Ok(serde_json::Value::Null) => true,
|
||||
Ok(serde_json::Value::Array(x)) => x.is_empty(),
|
||||
Ok(serde_json::Value::String(x)) => x.is_empty(),
|
||||
Err(..) => true,
|
||||
_ => false,
|
||||
};
|
||||
|
||||
if try_archive && let Some(head_block_num) = head_block.map(|x| x.number()) {
|
||||
if try_archive {
|
||||
// TODO: only charge for archive if it gave a result
|
||||
request_metadata
|
||||
web3_request
|
||||
.archive_request
|
||||
.store(true, atomic::Ordering::Relaxed);
|
||||
|
||||
self
|
||||
.balanced_rpcs
|
||||
.try_proxy_connection::<_, Arc<RawValue>>(
|
||||
method,
|
||||
params,
|
||||
request_metadata,
|
||||
Some(Duration::from_secs(30)),
|
||||
// TODO: should this be block 0 instead?
|
||||
Some(&U64::one()),
|
||||
// TODO: is this a good way to allow lagged archive nodes a try
|
||||
Some(&head_block_num.saturating_sub(5.into()).clamp(U64::one(), U64::MAX)),
|
||||
.try_proxy_connection::<Arc<RawValue>>(
|
||||
web3_request,
|
||||
// Some(Duration::from_secs(30)),
|
||||
// // TODO: should this be block 0 instead?
|
||||
// Some(&U64::one()),
|
||||
// // TODO: is this a good way to allow lagged archive nodes a try
|
||||
// Some(&head_block_num.saturating_sub(5.into()).clamp(U64::one(), U64::MAX)),
|
||||
)
|
||||
.await?
|
||||
} else {
|
||||
parsed?.into()
|
||||
jsonrpc::ParsedResponse::from_value(result?, web3_request.id()).into()
|
||||
}
|
||||
|
||||
// TODO: if parsed is an error, return a null instead
|
||||
}
|
||||
// TODO: eth_gasPrice that does awesome magic to predict the future
|
||||
"eth_hashrate" => jsonrpc::ParsedResponse::from_value(json!(U64::zero()), id).into(),
|
||||
"eth_mining" => jsonrpc::ParsedResponse::from_value(serde_json::Value::Bool(false), id).into(),
|
||||
"eth_hashrate" => jsonrpc::ParsedResponse::from_value(json!(U64::zero()), web3_request.id()).into(),
|
||||
"eth_mining" => jsonrpc::ParsedResponse::from_value(serde_json::Value::Bool(false), web3_request.id()).into(),
|
||||
// TODO: eth_sendBundle (flashbots/eden command)
|
||||
// broadcast transactions to all private rpcs at once
|
||||
"eth_sendRawTransaction" => {
|
||||
@ -1521,9 +1419,7 @@ impl Web3ProxyApp {
|
||||
|
||||
let response = self
|
||||
.try_send_protected(
|
||||
method,
|
||||
params,
|
||||
request_metadata,
|
||||
web3_request,
|
||||
).await;
|
||||
|
||||
let mut response = response.try_into()?;
|
||||
@ -1536,7 +1432,7 @@ impl Web3ProxyApp {
|
||||
&& (error_data.message == "ALREADY_EXISTS: already known"
|
||||
|| error_data.message == "INTERNAL_ERROR: existing tx with same hash")
|
||||
{
|
||||
let params = params
|
||||
let params = web3_request.request.params()
|
||||
.as_array()
|
||||
.ok_or_else(|| {
|
||||
Web3ProxyError::BadRequest(
|
||||
@ -1611,39 +1507,39 @@ impl Web3ProxyApp {
|
||||
}
|
||||
}
|
||||
|
||||
jsonrpc::ParsedResponse::from_response_data(response, id).into()
|
||||
jsonrpc::ParsedResponse::from_response_data(response, web3_request.id()).into()
|
||||
}
|
||||
"eth_syncing" => {
|
||||
// no stats on this. its cheap
|
||||
// TODO: return a real response if all backends are syncing or if no servers in sync
|
||||
// TODO: const
|
||||
jsonrpc::ParsedResponse::from_value(serde_json::Value::Bool(false), id).into()
|
||||
jsonrpc::ParsedResponse::from_value(serde_json::Value::Bool(false), web3_request.id()).into()
|
||||
}
|
||||
"eth_subscribe" => jsonrpc::ParsedResponse::from_error(JsonRpcErrorData {
|
||||
message: "notifications not supported. eth_subscribe is only available over a websocket".into(),
|
||||
code: -32601,
|
||||
data: None,
|
||||
}, id).into(),
|
||||
}, web3_request.id()).into(),
|
||||
"eth_unsubscribe" => jsonrpc::ParsedResponse::from_error(JsonRpcErrorData {
|
||||
message: "notifications not supported. eth_unsubscribe is only available over a websocket".into(),
|
||||
code: -32601,
|
||||
data: None,
|
||||
}, id).into(),
|
||||
}, web3_request.id()).into(),
|
||||
"net_listening" => {
|
||||
// TODO: only true if there are some backends on balanced_rpcs?
|
||||
// TODO: const
|
||||
jsonrpc::ParsedResponse::from_value(serde_json::Value::Bool(true), id).into()
|
||||
jsonrpc::ParsedResponse::from_value(serde_json::Value::Bool(true), web3_request.id()).into()
|
||||
}
|
||||
"net_peerCount" =>
|
||||
jsonrpc::ParsedResponse::from_value(json!(U64::from(self.balanced_rpcs.num_synced_rpcs())), id).into()
|
||||
jsonrpc::ParsedResponse::from_value(json!(U64::from(self.balanced_rpcs.num_synced_rpcs())), web3_request.id()).into()
|
||||
,
|
||||
"web3_clientVersion" =>
|
||||
jsonrpc::ParsedResponse::from_value(serde_json::Value::String(APP_USER_AGENT.to_string()), id).into()
|
||||
jsonrpc::ParsedResponse::from_value(serde_json::Value::String(APP_USER_AGENT.to_string()), web3_request.id()).into()
|
||||
,
|
||||
"web3_sha3" => {
|
||||
// returns Keccak-256 (not the standardized SHA3-256) of the given data.
|
||||
// TODO: timeout
|
||||
match ¶ms {
|
||||
match &web3_request.request.params() {
|
||||
serde_json::Value::Array(params) => {
|
||||
// TODO: make a struct and use serde conversion to clean this up
|
||||
if params.len() != 1
|
||||
@ -1655,7 +1551,7 @@ impl Web3ProxyApp {
|
||||
message: "Invalid request".into(),
|
||||
code: -32600,
|
||||
data: None
|
||||
}, id).into()
|
||||
}, web3_request.id()).into()
|
||||
} else {
|
||||
// TODO: BadRequest instead of web3_context
|
||||
let param = Bytes::from_str(
|
||||
@ -1673,7 +1569,7 @@ impl Web3ProxyApp {
|
||||
|
||||
let hash = H256::from(keccak256(param));
|
||||
|
||||
jsonrpc::ParsedResponse::from_value(json!(hash), id).into()
|
||||
jsonrpc::ParsedResponse::from_value(json!(hash), web3_request.id()).into()
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
@ -1683,7 +1579,7 @@ impl Web3ProxyApp {
|
||||
message: "invalid request".into(),
|
||||
code: StatusCode::BAD_REQUEST.as_u16().into(),
|
||||
data: None,
|
||||
}, id).into()
|
||||
}, web3_request.id()).into()
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1691,7 +1587,7 @@ impl Web3ProxyApp {
|
||||
message: "The method test does not exist/is not available.".into(),
|
||||
code: -32601,
|
||||
data: None,
|
||||
}, id).into(),
|
||||
}, web3_request.id()).into(),
|
||||
// anything else gets sent to backend rpcs and cached
|
||||
method => {
|
||||
if method.starts_with("admin_") {
|
||||
@ -1705,111 +1601,29 @@ impl Web3ProxyApp {
|
||||
)).into());
|
||||
}
|
||||
|
||||
// TODO: if no servers synced, wait for them to be synced? probably better to error and let haproxy retry another server
|
||||
let head_block: Web3ProxyBlock = head_block
|
||||
.cloned()
|
||||
.or_else(|| self.balanced_rpcs.head_block())
|
||||
.ok_or(Web3ProxyError::NoServersSynced)?;
|
||||
|
||||
// we do this check before checking caches because it might modify the request params
|
||||
// TODO: add a stat for archive vs full since they should probably cost different
|
||||
// TODO: this cache key can be rather large. is that okay?
|
||||
let cache_key: Option<JsonRpcQueryCacheKey> = match CacheMode::new(
|
||||
method,
|
||||
params,
|
||||
&head_block,
|
||||
&self.balanced_rpcs,
|
||||
)
|
||||
.await
|
||||
{
|
||||
CacheMode::CacheSuccessForever => Some(JsonRpcQueryCacheKey::new(
|
||||
None,
|
||||
None,
|
||||
method,
|
||||
params,
|
||||
false,
|
||||
)),
|
||||
CacheMode::CacheNever => None,
|
||||
CacheMode::Cache {
|
||||
block,
|
||||
cache_errors,
|
||||
} => {
|
||||
let block_depth = (head_block.number().saturating_sub(*block.num())).as_u64();
|
||||
|
||||
if block_depth > self.config.archive_depth {
|
||||
trace!(%block_depth, archive_depth=%self.config.archive_depth);
|
||||
|
||||
request_metadata
|
||||
.archive_request
|
||||
.store(true, atomic::Ordering::Relaxed);
|
||||
}
|
||||
|
||||
Some(JsonRpcQueryCacheKey::new(
|
||||
Some(block),
|
||||
None,
|
||||
method,
|
||||
params,
|
||||
cache_errors,
|
||||
))
|
||||
}
|
||||
CacheMode::CacheRange {
|
||||
from_block,
|
||||
to_block,
|
||||
cache_errors,
|
||||
} => {
|
||||
let block_depth = (head_block.number().saturating_sub(*from_block.num())).as_u64();
|
||||
|
||||
if block_depth > self.config.archive_depth {
|
||||
trace!(%block_depth, archive_depth=%self.config.archive_depth);
|
||||
|
||||
request_metadata
|
||||
.archive_request
|
||||
.store(true, atomic::Ordering::Relaxed);
|
||||
}
|
||||
|
||||
Some(JsonRpcQueryCacheKey::new(
|
||||
Some(from_block),
|
||||
Some(to_block),
|
||||
method,
|
||||
params,
|
||||
cache_errors,
|
||||
))
|
||||
}
|
||||
};
|
||||
|
||||
// TODO: think more about this timeout. we should probably have a `request_expires_at` Duration on the request_metadata
|
||||
// TODO: different user tiers should have different timeouts
|
||||
// erigon's timeout is 300, so keep this a few seconds shorter
|
||||
let max_wait = Some(Duration::from_secs(290));
|
||||
|
||||
if let Some(cache_key) = cache_key {
|
||||
let from_block_num = cache_key.from_block_num().copied();
|
||||
let to_block_num = cache_key.to_block_num().copied();
|
||||
let cache_jsonrpc_errors = cache_key.cache_errors();
|
||||
let cache_key_hash = cache_key.hash();
|
||||
// TODO: why is this clone needed?
|
||||
let web3_request = web3_request.clone();
|
||||
|
||||
if web3_request.cache_mode.is_some() {
|
||||
// don't cache anything larger than 16 MiB
|
||||
let max_response_cache_bytes = 16 * (1024 ^ 2); // self.config.max_response_cache_bytes;
|
||||
|
||||
let cache_key = web3_request.cache_key().expect("key must exist if cache_mode does");
|
||||
|
||||
// TODO: try to fetch out of s3
|
||||
|
||||
let x: SingleResponse = if let Some(data) = self.jsonrpc_response_cache.get(&cache_key_hash).await {
|
||||
let x: SingleResponse = if let Some(data) = self.jsonrpc_response_cache.get(&cache_key).await {
|
||||
// it was cached! easy!
|
||||
// TODO: wait. this currently panics. why?
|
||||
jsonrpc::ParsedResponse::from_response_data(data, id).into()
|
||||
} else if self.jsonrpc_response_failed_cache_keys.contains_key(&cache_key_hash) {
|
||||
jsonrpc::ParsedResponse::from_response_data(data, web3_request.id()).into()
|
||||
} else if self.jsonrpc_response_failed_cache_keys.contains_key(&cache_key) {
|
||||
// this is a cache_key that we know won't cache
|
||||
// NOTICE! We do **NOT** use get which means the key's hotness is not updated. we don't use time-to-idler here so thats fine. but be careful if that changes
|
||||
timeout(
|
||||
Duration::from_secs(295),
|
||||
web3_request.ttl(),
|
||||
self.balanced_rpcs
|
||||
.try_proxy_connection::<_, Arc<RawValue>>(
|
||||
method,
|
||||
params,
|
||||
request_metadata,
|
||||
max_wait,
|
||||
None,
|
||||
None,
|
||||
.try_proxy_connection::<Arc<RawValue>>(
|
||||
&web3_request,
|
||||
)
|
||||
).await??
|
||||
} else {
|
||||
@ -1818,53 +1632,44 @@ impl Web3ProxyApp {
|
||||
// TODO: if we got the semaphore, do the try_get_with
|
||||
// TODO: if the response is too big to cache mark the cache_key as not cacheable. maybe CacheMode can check that cache?
|
||||
|
||||
let s = self.jsonrpc_response_semaphores.get_with(cache_key_hash, async move {
|
||||
let s = self.jsonrpc_response_semaphores.get_with(cache_key, async move {
|
||||
Arc::new(Semaphore::new(1))
|
||||
}).await;
|
||||
|
||||
// TODO: don't always do 1 second. use the median request latency instead
|
||||
match timeout(Duration::from_secs(1), s.acquire_owned()).await {
|
||||
let mut x = match timeout(Duration::from_secs(1), s.acquire_owned()).await {
|
||||
Err(_) => {
|
||||
// TODO: should we try to cache this? whatever has the semaphore //should// handle that for us
|
||||
timeout(
|
||||
Duration::from_secs(295),
|
||||
web3_request.ttl(),
|
||||
self.balanced_rpcs
|
||||
.try_proxy_connection::<_, Arc<RawValue>>(
|
||||
method,
|
||||
params,
|
||||
request_metadata,
|
||||
max_wait,
|
||||
None,
|
||||
None,
|
||||
.try_proxy_connection::<Arc<RawValue>>(
|
||||
&web3_request,
|
||||
)
|
||||
).await??
|
||||
}
|
||||
Ok(_p) => {
|
||||
// we got the permit! we are either first, or we were waiting a short time to get it in which case this response should be cached
|
||||
// TODO: clone less?
|
||||
// TODO: clone less? its spawned so i don't think we can
|
||||
let f = {
|
||||
let app = self.clone();
|
||||
let method = method.to_string();
|
||||
let params = params.clone();
|
||||
let request_metadata = request_metadata.clone();
|
||||
let web3_request = web3_request.clone();
|
||||
|
||||
async move {
|
||||
app
|
||||
.jsonrpc_response_cache
|
||||
.try_get_with::<_, Web3ProxyError>(cache_key.hash(), async {
|
||||
let response_data = timeout(Duration::from_secs(290), app.balanced_rpcs
|
||||
.try_proxy_connection::<_, Arc<RawValue>>(
|
||||
&method,
|
||||
¶ms,
|
||||
&request_metadata,
|
||||
max_wait,
|
||||
from_block_num.as_ref(),
|
||||
to_block_num.as_ref(),
|
||||
.try_get_with::<_, Web3ProxyError>(cache_key, async {
|
||||
let duration = web3_request.ttl().saturating_sub(Duration::from_secs(1));
|
||||
|
||||
// TODO: dynamic timeout based on whats left on web3_request
|
||||
let response_data = timeout(duration, app.balanced_rpcs
|
||||
.try_proxy_connection::<Arc<RawValue>>(
|
||||
&web3_request,
|
||||
)).await;
|
||||
|
||||
match response_data {
|
||||
Ok(response_data) => {
|
||||
if !cache_jsonrpc_errors && let Err(err) = response_data {
|
||||
if !web3_request.cache_jsonrpc_errors() && let Err(err) = response_data {
|
||||
// if we are not supposed to cache jsonrpc errors,
|
||||
// then we must not convert Provider errors into a JsonRpcResponseEnum
|
||||
// return all the errors now. moka will not cache Err results
|
||||
@ -1894,10 +1699,10 @@ impl Web3ProxyApp {
|
||||
|
||||
// this is spawned so that if the client disconnects, the app keeps polling the future with a lock inside the moka cache
|
||||
// TODO: is this expect actually safe!? could there be a background process that still has the arc?
|
||||
match tokio::spawn(f).await? {
|
||||
Ok(response_data) => Ok(jsonrpc::ParsedResponse::from_response_data(response_data, id).into()),
|
||||
let mut x = match tokio::spawn(f).await? {
|
||||
Ok(response_data) => Ok(jsonrpc::ParsedResponse::from_response_data(response_data, Default::default()).into()),
|
||||
Err(err) => {
|
||||
self.jsonrpc_response_failed_cache_keys.insert(cache_key_hash, ()).await;
|
||||
self.jsonrpc_response_failed_cache_keys.insert(cache_key, ()).await;
|
||||
|
||||
if let Web3ProxyError::StreamResponse(x) = err.as_ref() {
|
||||
let x = x.lock().take().expect("stream processing should only happen once");
|
||||
@ -1907,25 +1712,33 @@ impl Web3ProxyApp {
|
||||
Err(err)
|
||||
}
|
||||
},
|
||||
}?
|
||||
}?;
|
||||
|
||||
// clear the id. theres no point including it in our cached response
|
||||
x.set_id(Default::default());
|
||||
|
||||
x
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
x.set_id(web3_request.id());
|
||||
|
||||
x
|
||||
};
|
||||
|
||||
x
|
||||
} else {
|
||||
timeout(
|
||||
Duration::from_secs(295),
|
||||
let mut x = timeout(
|
||||
web3_request.ttl(),
|
||||
self.balanced_rpcs
|
||||
.try_proxy_connection::<_, Arc<RawValue>>(
|
||||
method,
|
||||
params,
|
||||
request_metadata,
|
||||
max_wait,
|
||||
None,
|
||||
None,
|
||||
.try_proxy_connection::<Arc<RawValue>>(
|
||||
&web3_request,
|
||||
)
|
||||
).await??
|
||||
).await??;
|
||||
|
||||
x.set_id(web3_request.id());
|
||||
|
||||
x
|
||||
}
|
||||
}
|
||||
};
|
||||
|
@ -2,8 +2,8 @@
|
||||
|
||||
use super::Web3ProxyApp;
|
||||
use crate::errors::{Web3ProxyError, Web3ProxyResult};
|
||||
use crate::frontend::authorization::{Authorization, RequestMetadata, RequestOrMethod};
|
||||
use crate::jsonrpc::{self, JsonRpcRequest};
|
||||
use crate::frontend::authorization::{RequestOrMethod, Web3Request};
|
||||
use crate::jsonrpc;
|
||||
use crate::response_cache::JsonRpcResponseEnum;
|
||||
use axum::extract::ws::{CloseFrame, Message};
|
||||
use deferred_rate_limiter::DeferredRateLimitResult;
|
||||
@ -24,14 +24,14 @@ use tracing::{error, trace};
|
||||
impl Web3ProxyApp {
|
||||
pub async fn eth_subscribe<'a>(
|
||||
self: &'a Arc<Self>,
|
||||
authorization: Arc<Authorization>,
|
||||
jsonrpc_request: JsonRpcRequest,
|
||||
web3_request: Arc<Web3Request>,
|
||||
subscription_count: &'a AtomicU64,
|
||||
// TODO: taking a sender for Message instead of the exact json we are planning to send feels wrong, but its easier for now
|
||||
response_sender: mpsc::Sender<Message>,
|
||||
) -> Web3ProxyResult<(AbortHandle, jsonrpc::ParsedResponse)> {
|
||||
let subscribe_to = jsonrpc_request
|
||||
.params
|
||||
let subscribe_to = web3_request
|
||||
.request
|
||||
.params()
|
||||
.get(0)
|
||||
.and_then(|x| x.as_str())
|
||||
.ok_or_else(|| {
|
||||
@ -42,21 +42,13 @@ impl Web3ProxyApp {
|
||||
// only premium users are allowed to subscribe to the other things
|
||||
if !(self.config.free_subscriptions
|
||||
|| subscribe_to == "newHeads"
|
||||
|| authorization.active_premium().await)
|
||||
|| web3_request.authorization.active_premium().await)
|
||||
{
|
||||
return Err(Web3ProxyError::AccessDenied(
|
||||
"eth_subscribe for this event requires an active premium account".into(),
|
||||
));
|
||||
}
|
||||
|
||||
let request_metadata = RequestMetadata::new(
|
||||
self,
|
||||
authorization.clone(),
|
||||
RequestOrMethod::Request(&jsonrpc_request),
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
|
||||
let (subscription_abort_handle, subscription_registration) = AbortHandle::new_pair();
|
||||
|
||||
// TODO: this only needs to be unique per connection. we don't need it globably unique
|
||||
@ -64,9 +56,6 @@ impl Web3ProxyApp {
|
||||
let subscription_id = subscription_count.fetch_add(1, atomic::Ordering::SeqCst);
|
||||
let subscription_id = U64::from(subscription_id);
|
||||
|
||||
// save the id so we can use it in the response
|
||||
let id = jsonrpc_request.id.clone();
|
||||
|
||||
// TODO: calling `json!` on every request is probably not fast. but it works for now
|
||||
// TODO: i think we need a stricter EthSubscribeRequest type that JsonRpcRequest can turn into
|
||||
// TODO: DRY This up. lots of duplication between newHeads and newPendingTransactions
|
||||
@ -74,6 +63,7 @@ impl Web3ProxyApp {
|
||||
"newHeads" => {
|
||||
let head_block_receiver = self.watch_consensus_head_receiver.clone();
|
||||
let app = self.clone();
|
||||
let authorization = web3_request.authorization.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
trace!("newHeads subscription {:?}", subscription_id);
|
||||
@ -90,16 +80,17 @@ impl Web3ProxyApp {
|
||||
continue;
|
||||
};
|
||||
|
||||
let subscription_request_metadata = RequestMetadata::new(
|
||||
let subscription_web3_request = Web3Request::new_with_app(
|
||||
&app,
|
||||
authorization.clone(),
|
||||
RequestOrMethod::Method("eth_subscribe(newHeads)", 0),
|
||||
Some(&new_head),
|
||||
None,
|
||||
RequestOrMethod::Method("eth_subscribe(newHeads)".into(), 0),
|
||||
Some(new_head),
|
||||
)
|
||||
.await;
|
||||
|
||||
if let Some(close_message) = app
|
||||
.rate_limit_close_websocket(&subscription_request_metadata)
|
||||
.rate_limit_close_websocket(&subscription_web3_request)
|
||||
.await
|
||||
{
|
||||
let _ = response_sender.send(close_message).await;
|
||||
@ -113,7 +104,7 @@ impl Web3ProxyApp {
|
||||
"params": {
|
||||
"subscription": subscription_id,
|
||||
// TODO: option to include full transaction objects instead of just the hashes?
|
||||
"result": new_head.block,
|
||||
"result": subscription_web3_request.head_block,
|
||||
},
|
||||
});
|
||||
|
||||
@ -133,7 +124,7 @@ impl Web3ProxyApp {
|
||||
break;
|
||||
};
|
||||
|
||||
subscription_request_metadata.add_response(response_bytes);
|
||||
subscription_web3_request.add_response(response_bytes);
|
||||
}
|
||||
|
||||
trace!("closed newHeads subscription {:?}", subscription_id);
|
||||
@ -143,6 +134,7 @@ impl Web3ProxyApp {
|
||||
"newPendingTransactions" => {
|
||||
let pending_txid_firehose = self.pending_txid_firehose.subscribe();
|
||||
let app = self.clone();
|
||||
let authorization = web3_request.authorization.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
let mut pending_txid_firehose = Abortable::new(
|
||||
@ -152,17 +144,21 @@ impl Web3ProxyApp {
|
||||
|
||||
while let Some(Ok(new_txid)) = pending_txid_firehose.next().await {
|
||||
// TODO: include the head_block here?
|
||||
let subscription_request_metadata = RequestMetadata::new(
|
||||
let subscription_web3_request = Web3Request::new_with_app(
|
||||
&app,
|
||||
authorization.clone(),
|
||||
RequestOrMethod::Method("eth_subscribe(newPendingTransactions)", 0),
|
||||
None,
|
||||
RequestOrMethod::Method(
|
||||
"eth_subscribe(newPendingTransactions)".into(),
|
||||
0,
|
||||
),
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
|
||||
// check if we should close the websocket connection
|
||||
if let Some(close_message) = app
|
||||
.rate_limit_close_websocket(&subscription_request_metadata)
|
||||
.rate_limit_close_websocket(&subscription_web3_request)
|
||||
.await
|
||||
{
|
||||
let _ = response_sender.send(close_message).await;
|
||||
@ -185,7 +181,7 @@ impl Web3ProxyApp {
|
||||
// we could use JsonRpcForwardedResponseEnum::num_bytes() here, but since we already have the string, this is easier
|
||||
let response_bytes = response_str.len();
|
||||
|
||||
subscription_request_metadata.add_response(response_bytes);
|
||||
subscription_web3_request.add_response(response_bytes);
|
||||
|
||||
// TODO: do clients support binary messages?
|
||||
// TODO: can we check a content type header?
|
||||
@ -216,23 +212,21 @@ impl Web3ProxyApp {
|
||||
|
||||
let response_data = JsonRpcResponseEnum::from(json!(subscription_id));
|
||||
|
||||
let response = jsonrpc::ParsedResponse::from_response_data(response_data, id);
|
||||
let response =
|
||||
jsonrpc::ParsedResponse::from_response_data(response_data, web3_request.id());
|
||||
|
||||
// TODO: better way of passing in ParsedResponse
|
||||
let response = jsonrpc::SingleResponse::Parsed(response);
|
||||
// TODO: this serializes twice
|
||||
request_metadata.add_response(&response);
|
||||
web3_request.add_response(&response);
|
||||
let response = response.parsed().await.expect("Response already parsed");
|
||||
|
||||
// TODO: make a `SubscriptonHandle(AbortHandle, JoinHandle)` struct?
|
||||
Ok((subscription_abort_handle, response))
|
||||
}
|
||||
|
||||
async fn rate_limit_close_websocket(
|
||||
&self,
|
||||
request_metadata: &RequestMetadata,
|
||||
) -> Option<Message> {
|
||||
let authorization = &request_metadata.authorization;
|
||||
async fn rate_limit_close_websocket(&self, web3_request: &Web3Request) -> Option<Message> {
|
||||
let authorization = &web3_request.authorization;
|
||||
|
||||
if !authorization.active_premium().await {
|
||||
if let Some(rate_limiter) = &self.frontend_public_rate_limiter {
|
||||
|
@ -1,10 +1,14 @@
|
||||
//! Helper functions for turning ether's BlockNumber into numbers and updating incoming queries to match.
|
||||
use crate::rpcs::many::Web3Rpcs;
|
||||
use std::time::Duration;
|
||||
|
||||
use crate::app::Web3ProxyApp;
|
||||
use crate::jsonrpc::JsonRpcRequest;
|
||||
use crate::{
|
||||
errors::{Web3ProxyError, Web3ProxyResult},
|
||||
rpcs::blockchain::Web3ProxyBlock,
|
||||
};
|
||||
use anyhow::Context;
|
||||
use async_recursion::async_recursion;
|
||||
use derive_more::From;
|
||||
use ethers::{
|
||||
prelude::{BlockNumber, U64},
|
||||
@ -14,16 +18,16 @@ use serde_json::json;
|
||||
use tracing::{error, trace, warn};
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
pub fn BlockNumber_to_U64(block_num: BlockNumber, latest_block: &U64) -> (U64, bool) {
|
||||
pub fn BlockNumber_to_U64(block_num: BlockNumber, latest_block: U64) -> (U64, bool) {
|
||||
match block_num {
|
||||
BlockNumber::Earliest => (U64::zero(), false),
|
||||
BlockNumber::Finalized => {
|
||||
warn!("finalized block requested! not yet implemented!");
|
||||
(*latest_block - 10, false)
|
||||
(latest_block - 10, false)
|
||||
}
|
||||
BlockNumber::Latest => {
|
||||
// change "latest" to a number
|
||||
(*latest_block, true)
|
||||
(latest_block, true)
|
||||
}
|
||||
BlockNumber::Number(x) => {
|
||||
// we already have a number
|
||||
@ -32,16 +36,16 @@ pub fn BlockNumber_to_U64(block_num: BlockNumber, latest_block: &U64) -> (U64, b
|
||||
BlockNumber::Pending => {
|
||||
// modified is false because we want the backend to see "pending"
|
||||
// TODO: think more about how to handle Pending
|
||||
(*latest_block, false)
|
||||
(latest_block, false)
|
||||
}
|
||||
BlockNumber::Safe => {
|
||||
warn!("safe block requested! not yet implemented!");
|
||||
(*latest_block - 3, false)
|
||||
(latest_block - 3, false)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Eq, From, PartialEq)]
|
||||
#[derive(Clone, Debug, Eq, From, Hash, PartialEq)]
|
||||
pub struct BlockNumAndHash(U64, H256);
|
||||
|
||||
impl BlockNumAndHash {
|
||||
@ -55,7 +59,7 @@ impl BlockNumAndHash {
|
||||
|
||||
impl From<&Web3ProxyBlock> for BlockNumAndHash {
|
||||
fn from(value: &Web3ProxyBlock) -> Self {
|
||||
let n = *value.number();
|
||||
let n = value.number();
|
||||
let h = *value.hash();
|
||||
|
||||
Self(n, h)
|
||||
@ -64,11 +68,12 @@ impl From<&Web3ProxyBlock> for BlockNumAndHash {
|
||||
|
||||
/// modify params to always have a block hash and not "latest"
|
||||
/// TODO: this should replace all block numbers with hashes, not just "latest"
|
||||
pub async fn clean_block_number(
|
||||
params: &mut serde_json::Value,
|
||||
#[async_recursion]
|
||||
pub async fn clean_block_number<'a>(
|
||||
params: &'a mut serde_json::Value,
|
||||
block_param_id: usize,
|
||||
latest_block: &Web3ProxyBlock,
|
||||
rpcs: &Web3Rpcs,
|
||||
head_block: &'a Web3ProxyBlock,
|
||||
app: Option<&'a Web3ProxyApp>,
|
||||
) -> Web3ProxyResult<BlockNumAndHash> {
|
||||
match params.as_array_mut() {
|
||||
None => {
|
||||
@ -79,7 +84,7 @@ pub async fn clean_block_number(
|
||||
None => {
|
||||
if params.len() == block_param_id {
|
||||
// add the latest block number to the end of the params
|
||||
params.push(json!(latest_block.number()));
|
||||
params.push(json!(head_block.number()));
|
||||
} else {
|
||||
// don't modify the request. only cache with current block
|
||||
// TODO: more useful log that include the
|
||||
@ -87,7 +92,7 @@ pub async fn clean_block_number(
|
||||
}
|
||||
|
||||
// don't modify params, just cache with the current block
|
||||
Ok(latest_block.into())
|
||||
Ok(head_block.into())
|
||||
}
|
||||
Some(x) => {
|
||||
// dig into the json value to find a BlockNumber or similar block identifier
|
||||
@ -99,12 +104,22 @@ pub async fn clean_block_number(
|
||||
let block_hash: H256 =
|
||||
serde_json::from_value(block_hash).context("decoding blockHash")?;
|
||||
|
||||
let block = rpcs
|
||||
.block(&block_hash, None, None)
|
||||
.await
|
||||
.context("fetching block number from hash")?;
|
||||
if block_hash == *head_block.hash() {
|
||||
(head_block.into(), false)
|
||||
} else if let Some(app) = app {
|
||||
let block = app
|
||||
.balanced_rpcs
|
||||
.block(&block_hash, None, None)
|
||||
.await
|
||||
.context("fetching block number from hash")?;
|
||||
|
||||
(BlockNumAndHash::from(&block), false)
|
||||
(BlockNumAndHash::from(&block), false)
|
||||
} else {
|
||||
return Err(anyhow::anyhow!(
|
||||
"app missing. cannot find block number from hash"
|
||||
)
|
||||
.into());
|
||||
}
|
||||
} else {
|
||||
return Err(anyhow::anyhow!("blockHash missing").into());
|
||||
}
|
||||
@ -112,59 +127,69 @@ pub async fn clean_block_number(
|
||||
// it might be a string like "latest" or a block number or a block hash
|
||||
// TODO: "BlockNumber" needs a better name
|
||||
// TODO: move this to a helper function?
|
||||
if let Ok(block_num) = serde_json::from_value::<U64>(x.clone()) {
|
||||
let head_block_num = *latest_block.number();
|
||||
let (block_num, changed) = if let Some(block_num) = x.as_u64() {
|
||||
(U64::from(block_num), false)
|
||||
} else if let Ok(block_num) = serde_json::from_value::<U64>(x.to_owned()) {
|
||||
(block_num, false)
|
||||
} else if let Ok(block_number) =
|
||||
serde_json::from_value::<BlockNumber>(x.to_owned())
|
||||
{
|
||||
BlockNumber_to_U64(block_number, head_block.number())
|
||||
} else if let Ok(block_hash) = serde_json::from_value::<H256>(x.clone()) {
|
||||
if block_hash == *head_block.hash() {
|
||||
(head_block.number(), false)
|
||||
} else if let Some(app) = app {
|
||||
// TODO: what should this max_wait be?
|
||||
let block = app
|
||||
.balanced_rpcs
|
||||
.block(&block_hash, None, Some(Duration::from_secs(3)))
|
||||
.await
|
||||
.context("fetching block number from hash")?;
|
||||
|
||||
if block_num > head_block_num {
|
||||
return Err(Web3ProxyError::UnknownBlockNumber {
|
||||
known: head_block_num,
|
||||
unknown: block_num,
|
||||
});
|
||||
(block.number(), false)
|
||||
} else {
|
||||
return Err(anyhow::anyhow!(
|
||||
"app missing. cannot find block number from hash"
|
||||
)
|
||||
.into());
|
||||
}
|
||||
} else {
|
||||
return Err(anyhow::anyhow!(
|
||||
"param not a block identifier, block number, or block hash"
|
||||
)
|
||||
.into());
|
||||
};
|
||||
|
||||
let block_hash = rpcs
|
||||
let head_block_num = head_block.number();
|
||||
|
||||
if block_num > head_block_num {
|
||||
// TODO: option to wait for the block
|
||||
return Err(Web3ProxyError::UnknownBlockNumber {
|
||||
known: head_block_num,
|
||||
unknown: block_num,
|
||||
});
|
||||
}
|
||||
|
||||
if block_num == head_block_num {
|
||||
(head_block.into(), changed)
|
||||
} else if let Some(app) = app {
|
||||
let block_hash = app
|
||||
.balanced_rpcs
|
||||
.block_hash(&block_num)
|
||||
.await
|
||||
.context("fetching block hash from number")?;
|
||||
|
||||
let block = rpcs
|
||||
let block = app
|
||||
.balanced_rpcs
|
||||
.block(&block_hash, None, None)
|
||||
.await
|
||||
.context("fetching block from hash")?;
|
||||
|
||||
// TODO: do true here? will that work for **all** methods on **all** chains? if not we need something smarter
|
||||
(BlockNumAndHash::from(&block), false)
|
||||
} else if let Ok(block_number) =
|
||||
serde_json::from_value::<BlockNumber>(x.clone())
|
||||
{
|
||||
let (block_num, change) =
|
||||
BlockNumber_to_U64(block_number, latest_block.number());
|
||||
|
||||
if block_num == *latest_block.number() {
|
||||
(latest_block.into(), change)
|
||||
} else {
|
||||
let block_hash = rpcs
|
||||
.block_hash(&block_num)
|
||||
.await
|
||||
.context("fetching block hash from number")?;
|
||||
|
||||
let block = rpcs
|
||||
.block(&block_hash, None, None)
|
||||
.await
|
||||
.context("fetching block from hash")?;
|
||||
|
||||
(BlockNumAndHash::from(&block), change)
|
||||
}
|
||||
} else if let Ok(block_hash) = serde_json::from_value::<H256>(x.clone()) {
|
||||
let block = rpcs
|
||||
.block(&block_hash, None, None)
|
||||
.await
|
||||
.context("fetching block number from hash")?;
|
||||
|
||||
(BlockNumAndHash::from(&block), false)
|
||||
(BlockNumAndHash::from(&block), changed)
|
||||
} else {
|
||||
return Err(anyhow::anyhow!(
|
||||
"param not a block identifier, block number, or block hash"
|
||||
"app missing. cannot find block number from hash"
|
||||
)
|
||||
.into());
|
||||
}
|
||||
@ -184,21 +209,23 @@ pub async fn clean_block_number(
|
||||
}
|
||||
|
||||
/// TODO: change this to also return the hash needed?
|
||||
#[derive(Debug, Eq, PartialEq)]
|
||||
/// this replaces any "latest" identifiers in the JsonRpcRequest with the current block number which feels like the data is structured wrong
|
||||
#[derive(Debug, Default, Hash, Eq, PartialEq)]
|
||||
pub enum CacheMode {
|
||||
CacheSuccessForever,
|
||||
CacheNever,
|
||||
Cache {
|
||||
SuccessForever,
|
||||
Standard {
|
||||
block: BlockNumAndHash,
|
||||
/// cache jsonrpc errors (server errors are never cached)
|
||||
cache_errors: bool,
|
||||
},
|
||||
CacheRange {
|
||||
Range {
|
||||
from_block: BlockNumAndHash,
|
||||
to_block: BlockNumAndHash,
|
||||
/// cache jsonrpc errors (server errors are never cached)
|
||||
cache_errors: bool,
|
||||
},
|
||||
#[default]
|
||||
Never,
|
||||
}
|
||||
|
||||
fn get_block_param_id(method: &str) -> Option<usize> {
|
||||
@ -227,61 +254,92 @@ fn get_block_param_id(method: &str) -> Option<usize> {
|
||||
}
|
||||
|
||||
impl CacheMode {
|
||||
pub async fn new(
|
||||
method: &str,
|
||||
params: &mut serde_json::Value,
|
||||
head_block: &Web3ProxyBlock,
|
||||
rpcs: &Web3Rpcs,
|
||||
/// like `try_new`, but instead of erroring, it will default to caching with the head block
|
||||
/// returns None if this request should not be cached
|
||||
pub async fn new<'a>(
|
||||
request: &'a mut JsonRpcRequest,
|
||||
head_block: Option<&'a Web3ProxyBlock>,
|
||||
app: Option<&'a Web3ProxyApp>,
|
||||
) -> Self {
|
||||
match Self::try_new(method, params, head_block, rpcs).await {
|
||||
match Self::try_new(request, head_block, app).await {
|
||||
Ok(x) => x,
|
||||
Err(Web3ProxyError::NoBlocksKnown) => {
|
||||
warn!(%method, ?params, "no servers available to get block from params. caching with head block");
|
||||
CacheMode::Cache {
|
||||
block: head_block.into(),
|
||||
cache_errors: true,
|
||||
warn!(
|
||||
method = %request.method,
|
||||
params = ?request.params,
|
||||
"no servers available to get block from params. caching with head block"
|
||||
);
|
||||
if let Some(head_block) = head_block {
|
||||
// TODO: strange to get NoBlocksKnown **and** have a head block. think about this more
|
||||
CacheMode::Standard {
|
||||
block: head_block.into(),
|
||||
cache_errors: true,
|
||||
}
|
||||
} else {
|
||||
CacheMode::Never
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
error!(%method, ?params, ?err, "could not get block from params. caching with head block");
|
||||
CacheMode::Cache {
|
||||
block: head_block.into(),
|
||||
cache_errors: true,
|
||||
error!(
|
||||
method = %request.method,
|
||||
params = ?request.params,
|
||||
?err,
|
||||
"could not get block from params. caching with head block"
|
||||
);
|
||||
if let Some(head_block) = head_block {
|
||||
CacheMode::Standard {
|
||||
block: head_block.into(),
|
||||
cache_errors: true,
|
||||
}
|
||||
} else {
|
||||
CacheMode::Never
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn try_new(
|
||||
method: &str,
|
||||
params: &mut serde_json::Value,
|
||||
head_block: &Web3ProxyBlock,
|
||||
rpcs: &Web3Rpcs,
|
||||
request: &mut JsonRpcRequest,
|
||||
head_block: Option<&Web3ProxyBlock>,
|
||||
app: Option<&Web3ProxyApp>,
|
||||
) -> Web3ProxyResult<Self> {
|
||||
let params = &mut request.params;
|
||||
|
||||
if matches!(params, serde_json::Value::Null) {
|
||||
// no params given. cache with the head block
|
||||
return Ok(Self::Cache {
|
||||
block: head_block.into(),
|
||||
cache_errors: true,
|
||||
});
|
||||
if let Some(head_block) = head_block {
|
||||
return Ok(Self::Standard {
|
||||
block: head_block.into(),
|
||||
cache_errors: true,
|
||||
});
|
||||
} else {
|
||||
return Ok(Self::Never);
|
||||
}
|
||||
}
|
||||
|
||||
if head_block.is_none() {
|
||||
// since we don't have a head block, i don't trust our anything enough to cache
|
||||
return Ok(Self::Never);
|
||||
}
|
||||
|
||||
let head_block = head_block.expect("head_block was just checked above");
|
||||
|
||||
if let Some(params) = params.as_array() {
|
||||
if params.is_empty() {
|
||||
// no params given. cache with the head block
|
||||
return Ok(Self::Cache {
|
||||
return Ok(Self::Standard {
|
||||
block: head_block.into(),
|
||||
cache_errors: true,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
match method {
|
||||
match request.method.as_str() {
|
||||
"debug_traceTransaction" => {
|
||||
// TODO: make sure re-orgs work properly!
|
||||
Ok(CacheMode::CacheSuccessForever)
|
||||
Ok(CacheMode::SuccessForever)
|
||||
}
|
||||
"eth_gasPrice" => Ok(CacheMode::Cache {
|
||||
"eth_gasPrice" => Ok(CacheMode::Standard {
|
||||
block: head_block.into(),
|
||||
cache_errors: false,
|
||||
}),
|
||||
@ -289,24 +347,24 @@ impl CacheMode {
|
||||
// TODO: double check that any node can serve this
|
||||
// TODO: can a block change? like what if it gets orphaned?
|
||||
// TODO: make sure re-orgs work properly!
|
||||
Ok(CacheMode::CacheSuccessForever)
|
||||
Ok(CacheMode::SuccessForever)
|
||||
}
|
||||
"eth_getBlockByNumber" => {
|
||||
// TODO: double check that any node can serve this
|
||||
// TODO: CacheSuccessForever if the block is old enough
|
||||
// TODO: make sure re-orgs work properly!
|
||||
Ok(CacheMode::Cache {
|
||||
Ok(CacheMode::Standard {
|
||||
block: head_block.into(),
|
||||
cache_errors: true,
|
||||
})
|
||||
}
|
||||
"eth_getBlockTransactionCountByHash" => {
|
||||
// TODO: double check that any node can serve this
|
||||
Ok(CacheMode::CacheSuccessForever)
|
||||
Ok(CacheMode::SuccessForever)
|
||||
}
|
||||
"eth_getLogs" => {
|
||||
/*
|
||||
// TODO: think about this more. this seems like it partly belongs in clean_block_number
|
||||
// TODO: think about this more
|
||||
// TODO: jsonrpc has a specific code for this
|
||||
let obj = params
|
||||
.get_mut(0)
|
||||
@ -367,7 +425,7 @@ impl CacheMode {
|
||||
})
|
||||
}
|
||||
*/
|
||||
Ok(CacheMode::Cache {
|
||||
Ok(CacheMode::Standard {
|
||||
block: head_block.into(),
|
||||
cache_errors: true,
|
||||
})
|
||||
@ -375,7 +433,7 @@ impl CacheMode {
|
||||
"eth_getTransactionByHash" => {
|
||||
// TODO: not sure how best to look these up
|
||||
// try full nodes first. retry will use archive
|
||||
Ok(CacheMode::Cache {
|
||||
Ok(CacheMode::Standard {
|
||||
block: head_block.into(),
|
||||
cache_errors: true,
|
||||
})
|
||||
@ -383,12 +441,12 @@ impl CacheMode {
|
||||
"eth_getTransactionByBlockHashAndIndex" => {
|
||||
// TODO: check a Cache of recent hashes
|
||||
// try full nodes first. retry will use archive
|
||||
Ok(CacheMode::CacheSuccessForever)
|
||||
Ok(CacheMode::SuccessForever)
|
||||
}
|
||||
"eth_getTransactionReceipt" => {
|
||||
// TODO: not sure how best to look these up
|
||||
// try full nodes first. retry will use archive
|
||||
Ok(CacheMode::Cache {
|
||||
Ok(CacheMode::Standard {
|
||||
block: head_block.into(),
|
||||
cache_errors: true,
|
||||
})
|
||||
@ -397,29 +455,28 @@ impl CacheMode {
|
||||
// TODO: check a Cache of recent hashes
|
||||
// try full nodes first. retry will use archive
|
||||
// TODO: what happens if this block is uncled later?
|
||||
Ok(CacheMode::CacheSuccessForever)
|
||||
Ok(CacheMode::SuccessForever)
|
||||
}
|
||||
"eth_getUncleCountByBlockHash" => {
|
||||
// TODO: check a Cache of recent hashes
|
||||
// try full nodes first. retry will use archive
|
||||
// TODO: what happens if this block is uncled later?
|
||||
Ok(CacheMode::CacheSuccessForever)
|
||||
Ok(CacheMode::SuccessForever)
|
||||
}
|
||||
"eth_maxPriorityFeePerGas" => {
|
||||
// TODO: this might be too aggressive. i think it can change before a block is mined
|
||||
Ok(CacheMode::Cache {
|
||||
Ok(CacheMode::Standard {
|
||||
block: head_block.into(),
|
||||
cache_errors: false,
|
||||
})
|
||||
}
|
||||
"net_listening" => Ok(CacheMode::CacheSuccessForever),
|
||||
"net_version" => Ok(CacheMode::CacheSuccessForever),
|
||||
"net_listening" => Ok(CacheMode::SuccessForever),
|
||||
"net_version" => Ok(CacheMode::SuccessForever),
|
||||
method => match get_block_param_id(method) {
|
||||
Some(block_param_id) => {
|
||||
let block =
|
||||
clean_block_number(params, block_param_id, head_block, rpcs).await?;
|
||||
let block = clean_block_number(params, block_param_id, head_block, app).await?;
|
||||
|
||||
Ok(CacheMode::Cache {
|
||||
Ok(CacheMode::Standard {
|
||||
block,
|
||||
cache_errors: true,
|
||||
})
|
||||
@ -428,12 +485,48 @@ impl CacheMode {
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub fn cache_jsonrpc_errors(&self) -> bool {
|
||||
match self {
|
||||
Self::Never => false,
|
||||
Self::SuccessForever => true,
|
||||
Self::Standard { cache_errors, .. } => *cache_errors,
|
||||
Self::Range { cache_errors, .. } => *cache_errors,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_block(&self) -> Option<&BlockNumAndHash> {
|
||||
match self {
|
||||
Self::SuccessForever => None,
|
||||
Self::Never => None,
|
||||
Self::Standard { block, .. } => Some(block),
|
||||
Self::Range { from_block, .. } => Some(from_block),
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn is_some(&self) -> bool {
|
||||
!matches!(self, Self::Never)
|
||||
}
|
||||
|
||||
pub fn to_block(&self) -> Option<&BlockNumAndHash> {
|
||||
match self {
|
||||
Self::SuccessForever => None,
|
||||
Self::Never => None,
|
||||
Self::Standard { block, .. } => Some(block),
|
||||
Self::Range { to_block, .. } => Some(to_block),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::CacheMode;
|
||||
use crate::rpcs::{blockchain::Web3ProxyBlock, many::Web3Rpcs};
|
||||
use crate::{
|
||||
errors::Web3ProxyError,
|
||||
jsonrpc::{JsonRpcId, JsonRpcRequest},
|
||||
rpcs::blockchain::Web3ProxyBlock,
|
||||
};
|
||||
use ethers::types::{Block, H256};
|
||||
use serde_json::json;
|
||||
use std::sync::Arc;
|
||||
@ -441,7 +534,7 @@ mod test {
|
||||
#[test_log::test(tokio::test)]
|
||||
async fn test_fee_history() {
|
||||
let method = "eth_feeHistory";
|
||||
let mut params = json!([4, "latest", [25, 75]]);
|
||||
let params = json!([4, "latest", [25, 75]]);
|
||||
|
||||
let head_block = Block {
|
||||
number: Some(1.into()),
|
||||
@ -451,32 +544,32 @@ mod test {
|
||||
|
||||
let head_block = Web3ProxyBlock::try_new(Arc::new(head_block)).unwrap();
|
||||
|
||||
let (empty, _handle, _ranked_rpc_reciver) =
|
||||
Web3Rpcs::spawn(1, None, 1, 1, "test".into(), None, None)
|
||||
.await
|
||||
.unwrap();
|
||||
let id = JsonRpcId::Number(9);
|
||||
|
||||
let x = CacheMode::try_new(method, &mut params, &head_block, &empty)
|
||||
let mut request = JsonRpcRequest::new(id, method.to_string(), params).unwrap();
|
||||
|
||||
// TODO: instead of empty, check None?
|
||||
let x = CacheMode::try_new(&mut request, Some(&head_block), None)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(
|
||||
x,
|
||||
CacheMode::Cache {
|
||||
CacheMode::Standard {
|
||||
block: (&head_block).into(),
|
||||
cache_errors: true
|
||||
}
|
||||
);
|
||||
|
||||
// "latest" should have been changed to the block number
|
||||
assert_eq!(params.get(1), Some(&json!(head_block.number())));
|
||||
assert_eq!(request.params.get(1), Some(&json!(head_block.number())));
|
||||
}
|
||||
|
||||
#[test_log::test(tokio::test)]
|
||||
async fn test_eth_call_latest() {
|
||||
let method = "eth_call";
|
||||
|
||||
let mut params = json!([{"data": "0xdeadbeef", "to": "0x0000000000000000000000000000000000000000"}, "latest"]);
|
||||
let params = json!([{"data": "0xdeadbeef", "to": "0x0000000000000000000000000000000000000000"}, "latest"]);
|
||||
|
||||
let head_block = Block {
|
||||
number: Some(18173997.into()),
|
||||
@ -486,24 +579,61 @@ mod test {
|
||||
|
||||
let head_block = Web3ProxyBlock::try_new(Arc::new(head_block)).unwrap();
|
||||
|
||||
let (empty, _handle, _ranked_rpc_reciver) =
|
||||
Web3Rpcs::spawn(1, None, 1, 1, "test".into(), None, None)
|
||||
.await
|
||||
.unwrap();
|
||||
let id = JsonRpcId::Number(99);
|
||||
|
||||
let x = CacheMode::try_new(method, &mut params, &head_block, &empty)
|
||||
let mut request = JsonRpcRequest::new(id, method.to_string(), params).unwrap();
|
||||
|
||||
let x = CacheMode::try_new(&mut request, Some(&head_block), None)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// "latest" should have been changed to the block number
|
||||
assert_eq!(params.get(1), Some(&json!(head_block.number())));
|
||||
assert_eq!(request.params.get(1), Some(&json!(head_block.number())));
|
||||
|
||||
assert_eq!(
|
||||
x,
|
||||
CacheMode::Cache {
|
||||
CacheMode::Standard {
|
||||
block: (&head_block).into(),
|
||||
cache_errors: true
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[test_log::test(tokio::test)]
|
||||
async fn test_eth_call_future() {
|
||||
let method = "eth_call";
|
||||
|
||||
let head_block_num = 18173997u64;
|
||||
let future_block_num = head_block_num + 1;
|
||||
|
||||
let params = json!([{"data": "0xdeadbeef", "to": "0x0000000000000000000000000000000000000000"}, future_block_num]);
|
||||
|
||||
let head_block: Block<H256> = Block {
|
||||
number: Some(head_block_num.into()),
|
||||
hash: Some(H256::random()),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let head_block = Web3ProxyBlock::try_new(Arc::new(head_block)).unwrap();
|
||||
|
||||
let mut request = JsonRpcRequest::new(99.into(), method.to_string(), params).unwrap();
|
||||
|
||||
let x = CacheMode::try_new(&mut request, Some(&head_block), None)
|
||||
.await
|
||||
.unwrap_err();
|
||||
|
||||
// future blocks should get an error
|
||||
match x {
|
||||
Web3ProxyError::UnknownBlockNumber { known, unknown } => {
|
||||
assert_eq!(known.as_u64(), head_block_num);
|
||||
assert_eq!(unknown.as_u64(), future_block_num);
|
||||
}
|
||||
x => panic!("{:?}", x),
|
||||
}
|
||||
|
||||
let x = CacheMode::new(&mut request, Some(&head_block), None).await;
|
||||
|
||||
// TODO: cache with the head block instead?
|
||||
matches!(x, CacheMode::Never);
|
||||
}
|
||||
}
|
||||
|
@ -1,6 +1,7 @@
|
||||
use crate::balance::Balance;
|
||||
use crate::errors::{Web3ProxyError, Web3ProxyResult};
|
||||
use crate::frontend::authorization::{AuthorizationChecks, RpcSecretKey};
|
||||
use crate::frontend::authorization::AuthorizationChecks;
|
||||
use crate::secrets::RpcSecretKey;
|
||||
use derive_more::From;
|
||||
use entities::rpc_key;
|
||||
use migration::sea_orm::{ColumnTrait, DatabaseConnection, EntityTrait, QueryFilter};
|
||||
|
@ -48,8 +48,10 @@ pub struct CliConfig {
|
||||
pub struct TopConfig {
|
||||
pub app: AppConfig,
|
||||
pub balanced_rpcs: HashMap<String, Web3RpcConfig>,
|
||||
pub private_rpcs: Option<HashMap<String, Web3RpcConfig>>,
|
||||
pub bundler_4337_rpcs: Option<HashMap<String, Web3RpcConfig>>,
|
||||
#[serde(default = "Default::default")]
|
||||
pub private_rpcs: HashMap<String, Web3RpcConfig>,
|
||||
#[serde(default = "Default::default")]
|
||||
pub bundler_4337_rpcs: HashMap<String, Web3RpcConfig>,
|
||||
/// unknown config options get put here
|
||||
#[serde(flatten, default = "HashMap::default")]
|
||||
pub extra: HashMap<String, serde_json::Value>,
|
||||
@ -292,6 +294,8 @@ pub fn average_block_interval(chain_id: u64) -> Duration {
|
||||
8453 => Duration::from_secs(2),
|
||||
// arbitrum
|
||||
42161 => Duration::from_millis(500),
|
||||
// web3-proxy tests
|
||||
999_001_999 => Duration::from_secs(10),
|
||||
// anything else
|
||||
_ => {
|
||||
let default = 10;
|
||||
|
@ -59,7 +59,7 @@ pub async fn admin_increase_balance(
|
||||
let caller = app.bearer_is_authorized(bearer).await?;
|
||||
|
||||
// Establish connections
|
||||
let db_conn = global_db_conn().await?;
|
||||
let db_conn = global_db_conn()?;
|
||||
let txn = db_conn.begin().await?;
|
||||
|
||||
// Check if the caller is an admin (if not, return early)
|
||||
@ -197,8 +197,8 @@ pub async fn admin_imitate_login_get(
|
||||
resources: vec![],
|
||||
};
|
||||
|
||||
let db_conn = global_db_conn().await?;
|
||||
let db_replica = global_db_replica_conn().await?;
|
||||
let db_conn = global_db_conn()?;
|
||||
let db_replica = global_db_replica_conn()?;
|
||||
|
||||
let admin = user::Entity::find()
|
||||
.filter(user::Column::Address.eq(admin_address.as_bytes()))
|
||||
@ -336,7 +336,7 @@ pub async fn admin_imitate_login_post(
|
||||
})?;
|
||||
|
||||
// fetch the message we gave them from our database
|
||||
let db_replica = global_db_replica_conn().await?;
|
||||
let db_replica = global_db_replica_conn()?;
|
||||
|
||||
let user_pending_login = pending_login::Entity::find()
|
||||
.filter(pending_login::Column::Nonce.eq(Uuid::from(login_nonce)))
|
||||
@ -379,7 +379,7 @@ pub async fn admin_imitate_login_post(
|
||||
.await?
|
||||
.web3_context("admin address was not found!")?;
|
||||
|
||||
let db_conn = global_db_conn().await?;
|
||||
let db_conn = global_db_conn()?;
|
||||
|
||||
// Add a message that the admin has logged in
|
||||
// Note that the admin is trying to log in as this user
|
||||
|
@ -3,20 +3,22 @@
|
||||
use super::rpc_proxy_ws::ProxyMode;
|
||||
use crate::app::{Web3ProxyApp, APP_USER_AGENT};
|
||||
use crate::balance::Balance;
|
||||
use crate::block_number::CacheMode;
|
||||
use crate::caches::RegisteredUserRateLimitKey;
|
||||
use crate::compute_units::default_usd_per_cu;
|
||||
use crate::errors::{Web3ProxyError, Web3ProxyErrorContext, Web3ProxyResult};
|
||||
use crate::globals::global_db_replica_conn;
|
||||
use crate::jsonrpc::{self, JsonRpcParams, JsonRpcRequest};
|
||||
use crate::globals::{global_db_replica_conn, APP};
|
||||
use crate::jsonrpc::{self, JsonRpcId, JsonRpcParams, JsonRpcRequest};
|
||||
use crate::kafka::KafkaDebugLogger;
|
||||
use crate::response_cache::JsonRpcQueryCacheKey;
|
||||
use crate::rpcs::blockchain::Web3ProxyBlock;
|
||||
use crate::rpcs::one::Web3Rpc;
|
||||
use crate::secrets::RpcSecretKey;
|
||||
use crate::stats::{AppStat, BackendRequests};
|
||||
use crate::user_token::UserBearerToken;
|
||||
use anyhow::Context;
|
||||
use axum::headers::authorization::Bearer;
|
||||
use axum::headers::{Header, Origin, Referer, UserAgent};
|
||||
use chrono::Utc;
|
||||
use core::fmt;
|
||||
use deferred_rate_limiter::{DeferredRateLimitResult, DeferredRateLimiter};
|
||||
use derivative::Derivative;
|
||||
use derive_more::From;
|
||||
@ -29,89 +31,27 @@ use http::HeaderValue;
|
||||
use ipnet::IpNet;
|
||||
use migration::sea_orm::prelude::Decimal;
|
||||
use migration::sea_orm::{ColumnTrait, EntityTrait, QueryFilter};
|
||||
use rdkafka::message::{Header as KafkaHeader, OwnedHeaders as KafkaOwnedHeaders, OwnedMessage};
|
||||
use rdkafka::producer::{FutureProducer, FutureRecord};
|
||||
use rdkafka::util::Timeout as KafkaTimeout;
|
||||
use redis_rate_limiter::redis::AsyncCommands;
|
||||
use redis_rate_limiter::{RedisRateLimitResult, RedisRateLimiter};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde::Serialize;
|
||||
use serde_json::json;
|
||||
use serde_json::value::RawValue;
|
||||
use std::borrow::Cow;
|
||||
use std::fmt::Debug;
|
||||
use std::fmt::Display;
|
||||
use std::hash::{Hash, Hasher};
|
||||
use std::mem;
|
||||
use std::num::NonZeroU64;
|
||||
use std::sync::atomic::{self, AtomicBool, AtomicI64, AtomicU64, AtomicUsize};
|
||||
use std::sync::atomic::{self, AtomicBool, AtomicI64, AtomicU64};
|
||||
use std::time::Duration;
|
||||
use std::{net::IpAddr, str::FromStr, sync::Arc};
|
||||
use tokio::sync::RwLock as AsyncRwLock;
|
||||
use tokio::sync::{mpsc, OwnedSemaphorePermit, Semaphore};
|
||||
use tokio::task::JoinHandle;
|
||||
use tokio::time::Instant;
|
||||
use tracing::{error, trace, warn};
|
||||
use ulid::Ulid;
|
||||
use uuid::Uuid;
|
||||
|
||||
/// This lets us use UUID and ULID while we transition to only ULIDs
|
||||
/// TODO: custom deserialize that can also go from String to Ulid
|
||||
#[derive(Copy, Clone, Deserialize)]
|
||||
pub enum RpcSecretKey {
|
||||
Ulid(Ulid),
|
||||
Uuid(Uuid),
|
||||
}
|
||||
|
||||
impl RpcSecretKey {
|
||||
pub fn new() -> Self {
|
||||
Ulid::new().into()
|
||||
}
|
||||
|
||||
fn as_128(&self) -> u128 {
|
||||
match self {
|
||||
Self::Ulid(x) => x.0,
|
||||
Self::Uuid(x) => x.as_u128(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq for RpcSecretKey {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.as_128() == other.as_128()
|
||||
}
|
||||
}
|
||||
|
||||
impl Eq for RpcSecretKey {}
|
||||
|
||||
impl Debug for RpcSecretKey {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
Self::Ulid(x) => Debug::fmt(x, f),
|
||||
Self::Uuid(x) => {
|
||||
let x = Ulid::from(x.as_u128());
|
||||
|
||||
Debug::fmt(&x, f)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// always serialize as a ULID.
|
||||
impl Serialize for RpcSecretKey {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
match self {
|
||||
Self::Ulid(x) => x.serialize(serializer),
|
||||
Self::Uuid(x) => {
|
||||
let x: Ulid = x.to_owned().into();
|
||||
|
||||
x.serialize(serializer)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// TODO: should this have IpAddr and Origin or AuthorizationChecks?
|
||||
#[derive(Debug)]
|
||||
pub enum RateLimitResult {
|
||||
@ -125,7 +65,7 @@ pub enum RateLimitResult {
|
||||
UnknownKey,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
|
||||
pub enum AuthorizationType {
|
||||
Internal,
|
||||
Frontend,
|
||||
@ -180,15 +120,6 @@ pub struct Authorization {
|
||||
pub authorization_type: AuthorizationType,
|
||||
}
|
||||
|
||||
pub struct KafkaDebugLogger {
|
||||
topic: String,
|
||||
key: Vec<u8>,
|
||||
headers: KafkaOwnedHeaders,
|
||||
producer: FutureProducer,
|
||||
num_requests: AtomicUsize,
|
||||
num_responses: AtomicUsize,
|
||||
}
|
||||
|
||||
/// Ulids and Uuids matching the same bits hash the same
|
||||
impl Hash for RpcSecretKey {
|
||||
fn hash<H: Hasher>(&self, state: &mut H) {
|
||||
@ -198,167 +129,43 @@ impl Hash for RpcSecretKey {
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for KafkaDebugLogger {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("KafkaDebugLogger")
|
||||
.field("topic", &self.topic)
|
||||
.finish_non_exhaustive()
|
||||
}
|
||||
}
|
||||
|
||||
type KafkaLogResult = Result<(i32, i64), (rdkafka::error::KafkaError, OwnedMessage)>;
|
||||
|
||||
impl KafkaDebugLogger {
|
||||
fn try_new(
|
||||
app: &Web3ProxyApp,
|
||||
authorization: Arc<Authorization>,
|
||||
head_block_num: Option<&U64>,
|
||||
kafka_topic: &str,
|
||||
request_ulid: Ulid,
|
||||
) -> Option<Arc<Self>> {
|
||||
let kafka_producer = app.kafka_producer.clone()?;
|
||||
|
||||
let kafka_topic = kafka_topic.to_string();
|
||||
|
||||
let rpc_secret_key_id = authorization
|
||||
.checks
|
||||
.rpc_secret_key_id
|
||||
.map(|x| x.get())
|
||||
.unwrap_or_default();
|
||||
|
||||
let kafka_key =
|
||||
rmp_serde::to_vec(&rpc_secret_key_id).expect("ids should always serialize with rmp");
|
||||
|
||||
let chain_id = app.config.chain_id;
|
||||
|
||||
let head_block_num = head_block_num
|
||||
.copied()
|
||||
.or_else(|| app.balanced_rpcs.head_block_num());
|
||||
|
||||
// TODO: would be nice to have the block hash too
|
||||
|
||||
// another item is added with the response, so initial_capacity is +1 what is needed here
|
||||
let kafka_headers = KafkaOwnedHeaders::new_with_capacity(6)
|
||||
.insert(KafkaHeader {
|
||||
key: "rpc_secret_key_id",
|
||||
value: authorization
|
||||
.checks
|
||||
.rpc_secret_key_id
|
||||
.map(|x| x.to_string())
|
||||
.as_ref(),
|
||||
})
|
||||
.insert(KafkaHeader {
|
||||
key: "ip",
|
||||
value: Some(&authorization.ip.to_string()),
|
||||
})
|
||||
.insert(KafkaHeader {
|
||||
key: "request_ulid",
|
||||
value: Some(&request_ulid.to_string()),
|
||||
})
|
||||
.insert(KafkaHeader {
|
||||
key: "head_block_num",
|
||||
value: head_block_num.map(|x| x.to_string()).as_ref(),
|
||||
})
|
||||
.insert(KafkaHeader {
|
||||
key: "chain_id",
|
||||
value: Some(&chain_id.to_le_bytes()),
|
||||
});
|
||||
|
||||
// save the key and headers for when we log the response
|
||||
let x = Self {
|
||||
topic: kafka_topic,
|
||||
key: kafka_key,
|
||||
headers: kafka_headers,
|
||||
producer: kafka_producer,
|
||||
num_requests: 0.into(),
|
||||
num_responses: 0.into(),
|
||||
};
|
||||
|
||||
let x = Arc::new(x);
|
||||
|
||||
Some(x)
|
||||
}
|
||||
|
||||
fn background_log(&self, payload: Vec<u8>) -> JoinHandle<KafkaLogResult> {
|
||||
let topic = self.topic.clone();
|
||||
let key = self.key.clone();
|
||||
let producer = self.producer.clone();
|
||||
let headers = self.headers.clone();
|
||||
|
||||
let f = async move {
|
||||
let record = FutureRecord::to(&topic)
|
||||
.key(&key)
|
||||
.payload(&payload)
|
||||
.headers(headers);
|
||||
|
||||
let produce_future =
|
||||
producer.send(record, KafkaTimeout::After(Duration::from_secs(5 * 60)));
|
||||
|
||||
let kafka_response = produce_future.await;
|
||||
|
||||
if let Err((err, msg)) = kafka_response.as_ref() {
|
||||
error!("produce kafka request: {} - {:?}", err, msg);
|
||||
// TODO: re-queue the msg? log somewhere else like a file on disk?
|
||||
// TODO: this is bad and should probably trigger an alarm
|
||||
};
|
||||
|
||||
kafka_response
|
||||
};
|
||||
|
||||
tokio::spawn(f)
|
||||
}
|
||||
|
||||
/// for opt-in debug usage, log the request to kafka
|
||||
/// TODO: generic type for request
|
||||
pub fn log_debug_request(&self, request: &JsonRpcRequest) -> JoinHandle<KafkaLogResult> {
|
||||
// TODO: is rust message pack a good choice? try rkyv instead
|
||||
let payload =
|
||||
rmp_serde::to_vec(&request).expect("requests should always serialize with rmp");
|
||||
|
||||
self.num_requests.fetch_add(1, atomic::Ordering::Relaxed);
|
||||
|
||||
self.background_log(payload)
|
||||
}
|
||||
|
||||
pub fn log_debug_response<R>(&self, response: &R) -> JoinHandle<KafkaLogResult>
|
||||
where
|
||||
R: serde::Serialize,
|
||||
{
|
||||
let payload =
|
||||
rmp_serde::to_vec(&response).expect("requests should always serialize with rmp");
|
||||
|
||||
self.num_responses.fetch_add(1, atomic::Ordering::Relaxed);
|
||||
|
||||
self.background_log(payload)
|
||||
}
|
||||
#[derive(Debug, Default, From, Serialize)]
|
||||
pub enum RequestOrMethod {
|
||||
Request(JsonRpcRequest),
|
||||
/// sometimes we don't have a full request. for example, when we are logging a websocket subscription
|
||||
Method(Cow<'static, str>, usize),
|
||||
#[default]
|
||||
None,
|
||||
}
|
||||
|
||||
/// TODO: instead of a bunch of atomics, this should probably use a RwLock
|
||||
#[derive(Debug, Derivative)]
|
||||
#[derivative(Default)]
|
||||
pub struct RequestMetadata {
|
||||
pub struct Web3Request {
|
||||
/// TODO: set archive_request during the new instead of after
|
||||
/// TODO: this is more complex than "requires a block older than X height". different types of data can be pruned differently
|
||||
pub archive_request: AtomicBool,
|
||||
|
||||
pub authorization: Arc<Authorization>,
|
||||
|
||||
pub cache_mode: CacheMode,
|
||||
|
||||
/// TODO: this should probably be in a global config. although maybe if we run multiple chains in one process this will be useful
|
||||
pub chain_id: u64,
|
||||
|
||||
pub head_block: Option<Web3ProxyBlock>,
|
||||
|
||||
/// TODO: this should be in a global config. not copied to every single request
|
||||
pub usd_per_cu: Decimal,
|
||||
|
||||
pub request_ulid: Ulid,
|
||||
|
||||
/// Size of the JSON request. Does not include headers or things like that.
|
||||
pub request_bytes: usize,
|
||||
|
||||
/// The JSON-RPC request method.
|
||||
pub method: Cow<'static, str>,
|
||||
pub request: RequestOrMethod,
|
||||
|
||||
/// Instant that the request was received (or at least close to it)
|
||||
/// We use Instant and not timestamps to avoid problems with leap seconds and similar issues
|
||||
#[derivative(Default(value = "Instant::now()"))]
|
||||
pub start_instant: Instant,
|
||||
#[derivative(Default(value = "Instant::now() + Duration::from_secs(295)"))]
|
||||
pub expire_instant: Instant,
|
||||
/// if this is empty, there was a cache_hit
|
||||
/// otherwise, it is populated with any rpc servers that were used by this request
|
||||
pub backend_requests: BackendRequests,
|
||||
@ -394,50 +201,48 @@ impl Default for Authorization {
|
||||
}
|
||||
}
|
||||
|
||||
impl RequestMetadata {
|
||||
pub fn proxy_mode(&self) -> ProxyMode {
|
||||
self.authorization.checks.proxy_mode
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(From)]
|
||||
pub enum RequestOrMethod<'a> {
|
||||
/// jsonrpc method (or similar label) and the size that the request should count as (sometimes 0)
|
||||
Method(&'a str, usize),
|
||||
Request(&'a JsonRpcRequest),
|
||||
}
|
||||
|
||||
impl<'a> RequestOrMethod<'a> {
|
||||
fn method(&self) -> Cow<'static, str> {
|
||||
let x = match self {
|
||||
Self::Request(x) => x.method.to_string(),
|
||||
Self::Method(x, _) => x.to_string(),
|
||||
};
|
||||
|
||||
x.into()
|
||||
impl RequestOrMethod {
|
||||
pub fn id(&self) -> Box<RawValue> {
|
||||
match self {
|
||||
Self::Request(x) => x.id.clone(),
|
||||
Self::Method(_, _) => Default::default(),
|
||||
Self::None => Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
fn jsonrpc_request(&self) -> Option<&JsonRpcRequest> {
|
||||
pub fn method(&self) -> &str {
|
||||
match self {
|
||||
Self::Request(x) => x.method.as_str(),
|
||||
Self::Method(x, _) => x,
|
||||
Self::None => "unknown",
|
||||
}
|
||||
}
|
||||
|
||||
/// TODO: should this panic on Self::None|Self::Method?
|
||||
pub fn params(&self) -> &serde_json::Value {
|
||||
match self {
|
||||
Self::Request(x) => &x.params,
|
||||
Self::Method(..) => &serde_json::Value::Null,
|
||||
Self::None => &serde_json::Value::Null,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn jsonrpc_request(&self) -> Option<&JsonRpcRequest> {
|
||||
match self {
|
||||
Self::Request(x) => Some(x),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn num_bytes(&self) -> usize {
|
||||
pub fn num_bytes(&self) -> usize {
|
||||
match self {
|
||||
RequestOrMethod::Method(_, num_bytes) => *num_bytes,
|
||||
RequestOrMethod::Request(x) => x.num_bytes(),
|
||||
Self::Method(_, num_bytes) => *num_bytes,
|
||||
Self::Request(x) => x.num_bytes(),
|
||||
Self::None => 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> From<&'a str> for RequestOrMethod<'a> {
|
||||
fn from(value: &'a str) -> Self {
|
||||
Self::Method(value, 0)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: i think a trait is actually the right thing to use here
|
||||
#[derive(From)]
|
||||
pub enum ResponseOrBytes<'a> {
|
||||
@ -470,110 +275,59 @@ impl ResponseOrBytes<'_> {
|
||||
}
|
||||
}
|
||||
|
||||
impl RequestMetadata {
|
||||
pub async fn new<'a, R: Into<RequestOrMethod<'a>>>(
|
||||
app: &Web3ProxyApp,
|
||||
impl Web3Request {
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn new_with_options(
|
||||
authorization: Arc<Authorization>,
|
||||
request: R,
|
||||
head_block: Option<&Web3ProxyBlock>,
|
||||
chain_id: u64,
|
||||
head_block: Option<Web3ProxyBlock>,
|
||||
kafka_debug_logger: Option<Arc<KafkaDebugLogger>>,
|
||||
max_wait: Option<Duration>,
|
||||
mut request: RequestOrMethod,
|
||||
stat_sender: Option<mpsc::UnboundedSender<AppStat>>,
|
||||
usd_per_cu: Decimal,
|
||||
app: Option<&Web3ProxyApp>,
|
||||
) -> Arc<Self> {
|
||||
let request = request.into();
|
||||
let start_instant = Instant::now();
|
||||
|
||||
let method = request.method();
|
||||
// TODO: get this default from config, or from user settings
|
||||
// 5 minutes with a buffer for other things being slow
|
||||
let expire_instant = start_instant + max_wait.unwrap_or_else(|| Duration::from_secs(295));
|
||||
|
||||
let request_bytes = request.num_bytes();
|
||||
|
||||
// TODO: modify the request here? I don't really like that very much. but its a sure way to get archive_request set correctly
|
||||
|
||||
// TODO: add the Ulid at the haproxy or amazon load balancer level? investigate OpenTelemetry
|
||||
let request_ulid = Ulid::new();
|
||||
|
||||
let kafka_debug_logger = if matches!(authorization.checks.proxy_mode, ProxyMode::Debug) {
|
||||
KafkaDebugLogger::try_new(
|
||||
app,
|
||||
authorization.clone(),
|
||||
head_block.map(|x| x.number()),
|
||||
"web3_proxy:rpc",
|
||||
request_ulid,
|
||||
)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
// let request: RequestOrMethod = request.into();
|
||||
|
||||
// we VERY INTENTIONALLY log to kafka BEFORE calculating the cache key
|
||||
// this is because calculating the cache_key may modify the params!
|
||||
// for example, if the request specifies "latest" as the block number, we replace it with the actual latest block number
|
||||
if let Some(ref kafka_debug_logger) = kafka_debug_logger {
|
||||
if let Some(request) = request.jsonrpc_request() {
|
||||
// TODO: channels might be more ergonomic than spawned futures
|
||||
// spawned things run in parallel easier but generally need more Arcs
|
||||
kafka_debug_logger.log_debug_request(request);
|
||||
} else {
|
||||
// there probably isn't a new request attached to this metadata.
|
||||
// this happens with websocket subscriptions
|
||||
}
|
||||
// TODO: channels might be more ergonomic than spawned futures
|
||||
// spawned things run in parallel easier but generally need more Arcs
|
||||
kafka_debug_logger.log_debug_request(&request);
|
||||
}
|
||||
|
||||
let chain_id = app.config.chain_id;
|
||||
|
||||
let x = Self {
|
||||
archive_request: false.into(),
|
||||
authorization,
|
||||
backend_requests: Default::default(),
|
||||
chain_id,
|
||||
error_response: false.into(),
|
||||
kafka_debug_logger,
|
||||
method,
|
||||
no_servers: 0.into(),
|
||||
request_bytes,
|
||||
request_ulid,
|
||||
response_bytes: 0.into(),
|
||||
response_from_backup_rpc: false.into(),
|
||||
response_millis: 0.into(),
|
||||
response_timestamp: 0.into(),
|
||||
start_instant: Instant::now(),
|
||||
stat_sender: app.stat_sender.clone(),
|
||||
usd_per_cu: app.config.usd_per_cu.unwrap_or_default(),
|
||||
user_error_response: false.into(),
|
||||
// now that kafka has logged the user's original params, we can calculate the cache key
|
||||
let cache_mode = match &mut request {
|
||||
RequestOrMethod::Request(x) => CacheMode::new(x, head_block.as_ref(), app).await,
|
||||
_ => CacheMode::Never,
|
||||
};
|
||||
|
||||
Arc::new(x)
|
||||
}
|
||||
|
||||
pub fn new_internal<P: JsonRpcParams>(chain_id: u64, method: &str, params: &P) -> Arc<Self> {
|
||||
let authorization = Arc::new(Authorization::internal().unwrap());
|
||||
let request_ulid = Ulid::new();
|
||||
let method = method.to_string().into();
|
||||
|
||||
// TODO: how can we get this?
|
||||
let stat_sender = None;
|
||||
|
||||
// TODO: how can we do this efficiently? having to serialize sucks
|
||||
let request_bytes = json!({
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"method": method,
|
||||
"params": params,
|
||||
})
|
||||
.to_string()
|
||||
.len();
|
||||
|
||||
// TODO: we should be getting this from config instead!
|
||||
let usd_per_cu = default_usd_per_cu(chain_id);
|
||||
|
||||
let x = Self {
|
||||
archive_request: false.into(),
|
||||
authorization,
|
||||
backend_requests: Default::default(),
|
||||
cache_mode,
|
||||
chain_id,
|
||||
error_response: false.into(),
|
||||
kafka_debug_logger: None,
|
||||
method,
|
||||
expire_instant,
|
||||
head_block: head_block.clone(),
|
||||
kafka_debug_logger,
|
||||
no_servers: 0.into(),
|
||||
request_bytes,
|
||||
request_ulid,
|
||||
request,
|
||||
response_bytes: 0.into(),
|
||||
response_from_backup_rpc: false.into(),
|
||||
response_millis: 0.into(),
|
||||
response_timestamp: 0.into(),
|
||||
start_instant: Instant::now(),
|
||||
start_instant,
|
||||
stat_sender,
|
||||
usd_per_cu,
|
||||
user_error_response: false.into(),
|
||||
@ -582,10 +336,127 @@ impl RequestMetadata {
|
||||
Arc::new(x)
|
||||
}
|
||||
|
||||
pub async fn new_with_app(
|
||||
app: &Web3ProxyApp,
|
||||
authorization: Arc<Authorization>,
|
||||
max_wait: Option<Duration>,
|
||||
request: RequestOrMethod,
|
||||
head_block: Option<Web3ProxyBlock>,
|
||||
) -> Arc<Self> {
|
||||
// TODO: get this out of tracing instead (where we have a String from Amazon's LB)
|
||||
let request_ulid = Ulid::new();
|
||||
|
||||
let kafka_debug_logger = if matches!(authorization.checks.proxy_mode, ProxyMode::Debug) {
|
||||
KafkaDebugLogger::try_new(
|
||||
app,
|
||||
authorization.clone(),
|
||||
head_block.as_ref().map(|x| x.number()),
|
||||
"web3_proxy:rpc",
|
||||
request_ulid,
|
||||
)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let chain_id = app.config.chain_id;
|
||||
|
||||
let stat_sender = app.stat_sender.clone();
|
||||
|
||||
let usd_per_cu = app.config.usd_per_cu.unwrap_or_default();
|
||||
|
||||
Self::new_with_options(
|
||||
authorization,
|
||||
chain_id,
|
||||
head_block,
|
||||
kafka_debug_logger,
|
||||
max_wait,
|
||||
request,
|
||||
stat_sender,
|
||||
usd_per_cu,
|
||||
Some(app),
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn new_internal<P: JsonRpcParams>(
|
||||
method: String,
|
||||
params: &P,
|
||||
head_block: Option<Web3ProxyBlock>,
|
||||
max_wait: Option<Duration>,
|
||||
) -> Arc<Self> {
|
||||
let authorization = Arc::new(Authorization::internal().unwrap());
|
||||
|
||||
// TODO: we need a real id! increment a counter on the app
|
||||
let id = JsonRpcId::Number(1);
|
||||
|
||||
// TODO: this seems inefficient
|
||||
let request = JsonRpcRequest::new(id, method, json!(params)).unwrap();
|
||||
|
||||
if let Some(app) = APP.get() {
|
||||
Self::new_with_app(app, authorization, max_wait, request.into(), head_block).await
|
||||
} else {
|
||||
Self::new_with_options(
|
||||
authorization,
|
||||
0,
|
||||
head_block,
|
||||
None,
|
||||
max_wait,
|
||||
request.into(),
|
||||
None,
|
||||
Default::default(),
|
||||
None,
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn backend_rpcs_used(&self) -> Vec<Arc<Web3Rpc>> {
|
||||
self.backend_requests.lock().clone()
|
||||
}
|
||||
|
||||
pub fn cache_key(&self) -> Option<u64> {
|
||||
match &self.cache_mode {
|
||||
CacheMode::Never => None,
|
||||
x => {
|
||||
let x = JsonRpcQueryCacheKey::new(x, &self.request).hash();
|
||||
|
||||
Some(x)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn cache_jsonrpc_errors(&self) -> bool {
|
||||
self.cache_mode.cache_jsonrpc_errors()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn id(&self) -> Box<RawValue> {
|
||||
self.request.id()
|
||||
}
|
||||
|
||||
pub fn max_block_needed(&self) -> Option<U64> {
|
||||
self.cache_mode.to_block().map(|x| *x.num())
|
||||
}
|
||||
|
||||
pub fn min_block_needed(&self) -> Option<U64> {
|
||||
if self.archive_request.load(atomic::Ordering::Relaxed) {
|
||||
Some(U64::zero())
|
||||
} else {
|
||||
self.cache_mode.from_block().map(|x| *x.num())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn ttl(&self) -> Duration {
|
||||
self.expire_instant
|
||||
.saturating_duration_since(Instant::now())
|
||||
}
|
||||
|
||||
pub fn ttl_expired(&self) -> bool {
|
||||
self.expire_instant < Instant::now()
|
||||
}
|
||||
|
||||
pub fn try_send_stat(mut self) -> Web3ProxyResult<()> {
|
||||
if let Some(stat_sender) = self.stat_sender.take() {
|
||||
trace!(?self, "sending stat");
|
||||
@ -648,11 +519,16 @@ impl RequestMetadata {
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn proxy_mode(&self) -> ProxyMode {
|
||||
self.authorization.checks.proxy_mode
|
||||
}
|
||||
|
||||
// TODO: helper function to duplicate? needs to clear request_bytes, and all the atomics tho...
|
||||
}
|
||||
|
||||
// TODO: is this where the panic comes from?
|
||||
impl Drop for RequestMetadata {
|
||||
impl Drop for Web3Request {
|
||||
fn drop(&mut self) {
|
||||
if self.stat_sender.is_some() {
|
||||
// turn `&mut self` into `self`
|
||||
@ -1076,7 +952,7 @@ impl Web3ProxyApp {
|
||||
let user_bearer_token = UserBearerToken::try_from(bearer)?;
|
||||
|
||||
// get the attached address from the database for the given auth_token.
|
||||
let db_replica = global_db_replica_conn().await?;
|
||||
let db_replica = global_db_replica_conn()?;
|
||||
|
||||
let user_bearer_uuid: Uuid = user_bearer_token.into();
|
||||
|
||||
@ -1193,7 +1069,7 @@ impl Web3ProxyApp {
|
||||
let x = self
|
||||
.rpc_secret_key_cache
|
||||
.try_get_with_by_ref(rpc_secret_key, async move {
|
||||
let db_replica = global_db_replica_conn().await?;
|
||||
let db_replica = global_db_replica_conn()?;
|
||||
|
||||
// TODO: join the user table to this to return the User? we don't always need it
|
||||
// TODO: join on secondary users
|
||||
|
@ -27,7 +27,7 @@ use strum::{EnumCount, EnumIter};
|
||||
use tokio::sync::broadcast;
|
||||
use tower_http::sensitive_headers::SetSensitiveRequestHeadersLayer;
|
||||
use tower_http::{cors::CorsLayer, normalize_path::NormalizePathLayer, trace::TraceLayer};
|
||||
use tracing::{error_span, info};
|
||||
use tracing::{error_span, info, trace_span};
|
||||
use ulid::Ulid;
|
||||
|
||||
/// simple keys for caching responses
|
||||
@ -278,13 +278,22 @@ pub async fn serve(
|
||||
|
||||
// And then we put it along with other information into the `request` span
|
||||
// TODO: what other info should we attach? how can we attach an error and a tracing span here?
|
||||
error_span!(
|
||||
// TODO: how can we do a tracing_span OR an error_span?
|
||||
let s = trace_span!(
|
||||
"request",
|
||||
id = %request_id,
|
||||
// method = %request.method(),
|
||||
// // don't log the path. it often includes the RPC key!
|
||||
// path = %request.uri().path(),
|
||||
)
|
||||
method = %request.method(),
|
||||
path = %request.uri().path(),
|
||||
);
|
||||
|
||||
if s.is_disabled() {
|
||||
error_span!(
|
||||
"request",
|
||||
id = %request_id,
|
||||
)
|
||||
} else {
|
||||
s
|
||||
}
|
||||
}), // .on_failure(|| todo!("on failure that has the request and response body so we can debug more easily")),
|
||||
)
|
||||
// 404 for any unknown routes
|
||||
|
@ -2,9 +2,9 @@
|
||||
//!
|
||||
//! WebSockets are the preferred method of receiving requests, but not all clients have good support.
|
||||
|
||||
use super::authorization::{ip_is_authorized, key_is_authorized, Authorization, RequestMetadata};
|
||||
use super::authorization::{ip_is_authorized, key_is_authorized, Authorization, Web3Request};
|
||||
use crate::errors::{Web3ProxyError, Web3ProxyResponse};
|
||||
use crate::jsonrpc::{self, JsonRpcId};
|
||||
use crate::jsonrpc;
|
||||
use crate::{
|
||||
app::Web3ProxyApp,
|
||||
errors::Web3ProxyResult,
|
||||
@ -29,7 +29,6 @@ use handlebars::Handlebars;
|
||||
use hashbrown::HashMap;
|
||||
use http::{HeaderMap, StatusCode};
|
||||
use serde_json::json;
|
||||
use serde_json::value::RawValue;
|
||||
use std::net::IpAddr;
|
||||
use std::str::from_utf8_mut;
|
||||
use std::sync::atomic::AtomicU64;
|
||||
@ -317,26 +316,22 @@ async fn proxy_web3_socket(
|
||||
}
|
||||
|
||||
async fn websocket_proxy_web3_rpc(
|
||||
app: Arc<Web3ProxyApp>,
|
||||
app: &Arc<Web3ProxyApp>,
|
||||
authorization: Arc<Authorization>,
|
||||
json_request: JsonRpcRequest,
|
||||
response_sender: &mpsc::Sender<Message>,
|
||||
subscription_count: &AtomicU64,
|
||||
subscriptions: &AsyncRwLock<HashMap<U64, AbortHandle>>,
|
||||
) -> (Box<RawValue>, Web3ProxyResult<jsonrpc::Response>) {
|
||||
let response_id = json_request.id.clone();
|
||||
|
||||
// TODO: move this to a seperate function so we can use the try operator
|
||||
let response: Web3ProxyResult<jsonrpc::Response> = match &json_request.method[..] {
|
||||
) -> Web3ProxyResult<jsonrpc::Response> {
|
||||
match &json_request.method[..] {
|
||||
"eth_subscribe" => {
|
||||
let web3_request =
|
||||
Web3Request::new_with_app(app, authorization, None, json_request.into(), None)
|
||||
.await;
|
||||
|
||||
// TODO: how can we subscribe with proxy_mode?
|
||||
match app
|
||||
.eth_subscribe(
|
||||
authorization,
|
||||
json_request,
|
||||
subscription_count,
|
||||
response_sender.clone(),
|
||||
)
|
||||
.eth_subscribe(web3_request, subscription_count, response_sender.clone())
|
||||
.await
|
||||
{
|
||||
Ok((handle, response)) => {
|
||||
@ -357,25 +352,25 @@ async fn websocket_proxy_web3_rpc(
|
||||
}
|
||||
}
|
||||
"eth_unsubscribe" => {
|
||||
let request_metadata =
|
||||
RequestMetadata::new(&app, authorization, &json_request, None).await;
|
||||
let web3_request =
|
||||
Web3Request::new_with_app(app, authorization, None, json_request.into(), None)
|
||||
.await;
|
||||
|
||||
let maybe_id = json_request
|
||||
.params
|
||||
// sometimes we get a list, sometimes we get the id directly
|
||||
// check for the list first, then just use the whole thing
|
||||
let maybe_id = web3_request
|
||||
.request
|
||||
.params()
|
||||
.get(0)
|
||||
.cloned()
|
||||
.unwrap_or(json_request.params);
|
||||
.unwrap_or_else(|| web3_request.request.params())
|
||||
.clone();
|
||||
|
||||
let subscription_id: U64 = match serde_json::from_value::<U64>(maybe_id) {
|
||||
Ok(x) => x,
|
||||
Err(err) => {
|
||||
return (
|
||||
response_id,
|
||||
Err(Web3ProxyError::BadRequest(
|
||||
format!("unexpected params given for eth_unsubscribe: {:?}", err)
|
||||
.into(),
|
||||
)),
|
||||
)
|
||||
return Err(Web3ProxyError::BadRequest(
|
||||
format!("unexpected params given for eth_unsubscribe: {:?}", err).into(),
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
@ -392,11 +387,11 @@ async fn websocket_proxy_web3_rpc(
|
||||
};
|
||||
|
||||
let response =
|
||||
jsonrpc::ParsedResponse::from_value(json!(partial_response), response_id.clone());
|
||||
jsonrpc::ParsedResponse::from_value(json!(partial_response), web3_request.id());
|
||||
|
||||
// TODO: better way of passing in ParsedResponse
|
||||
let response = jsonrpc::SingleResponse::Parsed(response);
|
||||
request_metadata.add_response(&response);
|
||||
web3_request.add_response(&response);
|
||||
let response = response.parsed().await.expect("Response already parsed");
|
||||
|
||||
Ok(response.into())
|
||||
@ -405,32 +400,27 @@ async fn websocket_proxy_web3_rpc(
|
||||
.proxy_web3_rpc(authorization, json_request.into())
|
||||
.await
|
||||
.map(|(_, response, _)| response),
|
||||
};
|
||||
|
||||
(response_id, response)
|
||||
}
|
||||
}
|
||||
|
||||
/// websockets support a few more methods than http clients
|
||||
async fn handle_socket_payload(
|
||||
app: Arc<Web3ProxyApp>,
|
||||
app: &Arc<Web3ProxyApp>,
|
||||
authorization: &Arc<Authorization>,
|
||||
payload: &str,
|
||||
response_sender: &mpsc::Sender<Message>,
|
||||
subscription_count: &AtomicU64,
|
||||
subscriptions: Arc<AsyncRwLock<HashMap<U64, AbortHandle>>>,
|
||||
) -> Web3ProxyResult<(Message, Option<OwnedSemaphorePermit>)> {
|
||||
let (authorization, semaphore) = authorization.check_again(&app).await?;
|
||||
let (authorization, semaphore) = authorization.check_again(app).await?;
|
||||
|
||||
// TODO: handle batched requests
|
||||
let (response_id, response) = match serde_json::from_str::<JsonRpcRequest>(payload) {
|
||||
Ok(json_request) => {
|
||||
// // TODO: move tarpit code to an invidual request, or change this to handle enums
|
||||
// json_request
|
||||
// .tarpit_invalid(&app, &authorization, Duration::from_secs(2))
|
||||
// .await?;
|
||||
let request_id = json_request.id.clone();
|
||||
|
||||
// TODO: move this to a seperate function so we can use the try operator
|
||||
websocket_proxy_web3_rpc(
|
||||
let x = websocket_proxy_web3_rpc(
|
||||
app,
|
||||
authorization.clone(),
|
||||
json_request,
|
||||
@ -438,12 +428,11 @@ async fn handle_socket_payload(
|
||||
subscription_count,
|
||||
&subscriptions,
|
||||
)
|
||||
.await
|
||||
}
|
||||
Err(err) => {
|
||||
let id = JsonRpcId::None.to_raw_value();
|
||||
(id, Err(err.into()))
|
||||
.await;
|
||||
|
||||
(request_id, x)
|
||||
}
|
||||
Err(err) => (Default::default(), Err(err.into())),
|
||||
};
|
||||
|
||||
let response_str = match response {
|
||||
@ -488,7 +477,7 @@ async fn read_web3_socket(
|
||||
let (response_msg, _semaphore) = match msg {
|
||||
Message::Text(payload) => {
|
||||
match handle_socket_payload(
|
||||
app,
|
||||
&app,
|
||||
&authorization,
|
||||
&payload,
|
||||
&response_sender,
|
||||
@ -522,7 +511,7 @@ async fn read_web3_socket(
|
||||
let payload = from_utf8_mut(&mut payload).unwrap();
|
||||
|
||||
let (m, s) = match handle_socket_payload(
|
||||
app,
|
||||
&app,
|
||||
&authorization,
|
||||
payload,
|
||||
&response_sender,
|
||||
@ -587,3 +576,17 @@ async fn write_web3_socket(
|
||||
|
||||
// TODO: decrement counter for open websockets
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
#[test]
|
||||
fn nulls_and_defaults() {
|
||||
let x = serde_json::Value::Null;
|
||||
let x = serde_json::to_string(&x).unwrap();
|
||||
|
||||
let y: Box<serde_json::value::RawValue> = Default::default();
|
||||
let y = serde_json::to_string(&y).unwrap();
|
||||
|
||||
assert_eq!(x, y);
|
||||
}
|
||||
}
|
||||
|
@ -208,7 +208,7 @@ async fn _status(app: Arc<Web3ProxyApp>) -> (StatusCode, &'static str, Bytes) {
|
||||
"hostname": app.hostname,
|
||||
"payment_factory_address": app.config.deposit_factory_contract,
|
||||
"pending_txid_firehose": app.pending_txid_firehose,
|
||||
"private_rpcs": app.private_rpcs,
|
||||
"private_rpcs": app.protected_rpcs,
|
||||
"uptime": app.start.elapsed().as_secs(),
|
||||
"version": APP_USER_AGENT,
|
||||
});
|
||||
|
@ -8,7 +8,7 @@ use tokio::stream::Stream;
|
||||
|
||||
struct SizingBody<B> {
|
||||
inner: B,
|
||||
request_metadata: RequestMetadata,
|
||||
web3_request: RequestMetadata,
|
||||
}
|
||||
|
||||
impl<B> SizingBody<B> {
|
||||
|
@ -1,8 +1,9 @@
|
||||
//! Handle registration, logins, and managing account data.
|
||||
use crate::app::Web3ProxyApp;
|
||||
use crate::errors::{Web3ProxyError, Web3ProxyErrorContext, Web3ProxyResponse};
|
||||
use crate::frontend::authorization::{login_is_authorized, RpcSecretKey};
|
||||
use crate::frontend::authorization::login_is_authorized;
|
||||
use crate::globals::{global_db_conn, global_db_replica_conn};
|
||||
use crate::secrets::RpcSecretKey;
|
||||
use crate::user_token::UserBearerToken;
|
||||
use axum::{
|
||||
extract::{Path, Query},
|
||||
@ -125,7 +126,7 @@ pub async fn user_login_get(
|
||||
resources: vec![],
|
||||
};
|
||||
|
||||
let db_conn = global_db_conn().await?;
|
||||
let db_conn = global_db_conn()?;
|
||||
|
||||
// delete any expired logins
|
||||
if let Err(err) = login::Entity::delete_many()
|
||||
@ -262,7 +263,7 @@ pub async fn user_login_post(
|
||||
let login_nonce = UserBearerToken::from_str(&their_msg.nonce)?;
|
||||
|
||||
// fetch the message we gave them from our database
|
||||
let db_replica = global_db_replica_conn().await?;
|
||||
let db_replica = global_db_replica_conn()?;
|
||||
|
||||
let user_pending_login = pending_login::Entity::find()
|
||||
.filter(pending_login::Column::Nonce.eq(Uuid::from(login_nonce)))
|
||||
@ -294,7 +295,7 @@ pub async fn user_login_post(
|
||||
.one(db_replica.as_ref())
|
||||
.await?;
|
||||
|
||||
let db_conn = global_db_conn().await?;
|
||||
let db_conn = global_db_conn()?;
|
||||
|
||||
let (caller, user_rpc_keys, status_code) = match caller {
|
||||
None => {
|
||||
@ -447,7 +448,7 @@ pub async fn user_logout_post(
|
||||
) -> Web3ProxyResponse {
|
||||
let user_bearer = UserBearerToken::try_from(bearer)?;
|
||||
|
||||
let db_conn = global_db_conn().await?;
|
||||
let db_conn = global_db_conn()?;
|
||||
|
||||
if let Err(err) = login::Entity::delete_many()
|
||||
.filter(login::Column::BearerToken.eq(user_bearer.uuid()))
|
||||
|
@ -46,7 +46,7 @@ pub async fn user_balance_get(
|
||||
) -> Web3ProxyResponse {
|
||||
let user = app.bearer_is_authorized(bearer).await?;
|
||||
|
||||
let db_replica = global_db_replica_conn().await?;
|
||||
let db_replica = global_db_replica_conn()?;
|
||||
|
||||
let user_balance = match Balance::try_from_db(db_replica.as_ref(), user.id).await? {
|
||||
None => Balance::default(),
|
||||
@ -66,7 +66,7 @@ pub async fn user_chain_deposits_get(
|
||||
) -> Web3ProxyResponse {
|
||||
let user = app.bearer_is_authorized(bearer).await?;
|
||||
|
||||
let db_replica = global_db_replica_conn().await?;
|
||||
let db_replica = global_db_replica_conn()?;
|
||||
|
||||
// Filter by user ...
|
||||
let receipts = increase_on_chain_balance_receipt::Entity::find()
|
||||
@ -105,7 +105,7 @@ pub async fn user_stripe_deposits_get(
|
||||
) -> Web3ProxyResponse {
|
||||
let user = app.bearer_is_authorized(bearer).await?;
|
||||
|
||||
let db_replica = global_db_replica_conn().await?;
|
||||
let db_replica = global_db_replica_conn()?;
|
||||
|
||||
// Filter by user ...
|
||||
let receipts = stripe_increase_balance_receipt::Entity::find()
|
||||
@ -148,7 +148,7 @@ pub async fn user_admin_deposits_get(
|
||||
) -> Web3ProxyResponse {
|
||||
let user = app.bearer_is_authorized(bearer).await?;
|
||||
|
||||
let db_replica = global_db_replica_conn().await?;
|
||||
let db_replica = global_db_replica_conn()?;
|
||||
|
||||
// Filter by user ...
|
||||
let receipts = admin_increase_balance_receipt::Entity::find()
|
||||
@ -207,7 +207,7 @@ pub async fn user_balance_post(
|
||||
Web3ProxyError::BadRequest(format!("unable to parse tx_hash: {}", err).into())
|
||||
})?;
|
||||
|
||||
let db_conn = global_db_conn().await?;
|
||||
let db_conn = global_db_conn()?;
|
||||
|
||||
// get the transaction receipt
|
||||
let transaction_receipt = app
|
||||
@ -496,7 +496,7 @@ pub async fn handle_uncle_block(
|
||||
// user_id -> balance that we need to subtract
|
||||
let mut reversed_balances: HashMap<u64, Decimal> = HashMap::new();
|
||||
|
||||
let db_conn = global_db_conn().await?;
|
||||
let db_conn = global_db_conn()?;
|
||||
|
||||
// delete any deposit txids with uncle_hash
|
||||
for reversed_deposit in increase_on_chain_balance_receipt::Entity::find()
|
||||
|
@ -69,9 +69,7 @@ pub async fn user_balance_stripe_post(
|
||||
return Ok("Received Webhook".into_response());
|
||||
}
|
||||
|
||||
let db_conn = global_db_conn()
|
||||
.await
|
||||
.web3_context("query_user_stats needs a db")?;
|
||||
let db_conn = global_db_conn().web3_context("query_user_stats needs a db")?;
|
||||
|
||||
if stripe_increase_balance_receipt::Entity::find()
|
||||
.filter(
|
||||
|
@ -36,7 +36,7 @@ pub async fn user_referral_link_get(
|
||||
// First get the bearer token and check if the user is logged in
|
||||
let user = app.bearer_is_authorized(bearer).await?;
|
||||
|
||||
let db_replica = global_db_replica_conn().await?;
|
||||
let db_replica = global_db_replica_conn()?;
|
||||
|
||||
// Then get the referral token. If one doesn't exist, create one
|
||||
let user_referrer = referrer::Entity::find()
|
||||
@ -48,7 +48,7 @@ pub async fn user_referral_link_get(
|
||||
Some(x) => (x.referral_code, StatusCode::OK),
|
||||
None => {
|
||||
// Connect to the database for writes
|
||||
let db_conn = global_db_conn().await?;
|
||||
let db_conn = global_db_conn()?;
|
||||
|
||||
let referral_code = ReferralCode::default().to_string();
|
||||
|
||||
@ -81,7 +81,7 @@ pub async fn user_used_referral_stats(
|
||||
// First get the bearer token and check if the user is logged in
|
||||
let user = app.bearer_is_authorized(bearer).await?;
|
||||
|
||||
let db_replica = global_db_replica_conn().await?;
|
||||
let db_replica = global_db_replica_conn()?;
|
||||
|
||||
// Get all referral records associated with this user
|
||||
let referrals = referee::Entity::find()
|
||||
@ -139,7 +139,7 @@ pub async fn user_shared_referral_stats(
|
||||
// First get the bearer token and check if the user is logged in
|
||||
let user = app.bearer_is_authorized(bearer).await?;
|
||||
|
||||
let db_replica = global_db_replica_conn().await?;
|
||||
let db_replica = global_db_replica_conn()?;
|
||||
|
||||
// Get all referral records associated with this user
|
||||
let query_result = referrer::Entity::find()
|
||||
|
@ -1,8 +1,8 @@
|
||||
//! Handle registration, logins, and managing account data.
|
||||
use super::super::authorization::RpcSecretKey;
|
||||
use crate::app::Web3ProxyApp;
|
||||
use crate::errors::{Web3ProxyError, Web3ProxyErrorContext, Web3ProxyResponse};
|
||||
use crate::globals::{global_db_conn, global_db_replica_conn};
|
||||
use crate::secrets::RpcSecretKey;
|
||||
use axum::headers::{Header, Origin, Referer, UserAgent};
|
||||
use axum::{
|
||||
headers::{authorization::Bearer, Authorization},
|
||||
@ -32,7 +32,7 @@ pub async fn rpc_keys_get(
|
||||
) -> Web3ProxyResponse {
|
||||
let user = app.bearer_is_authorized(bearer).await?;
|
||||
|
||||
let db_replica = global_db_replica_conn().await?;
|
||||
let db_replica = global_db_replica_conn()?;
|
||||
|
||||
// This is basically completely copied from sea-orm. Not optimal, but it keeps the format identical to before (while adding the final key)
|
||||
// We could also pack the below stuff into it's subfield, but then we would destroy the format. Both options are fine for now though
|
||||
@ -161,7 +161,7 @@ pub async fn rpc_keys_management(
|
||||
|
||||
let user = app.bearer_is_authorized(bearer).await?;
|
||||
|
||||
let db_replica = global_db_replica_conn().await?;
|
||||
let db_replica = global_db_replica_conn()?;
|
||||
|
||||
let mut uk = match payload.key_id {
|
||||
Some(existing_key_id) => {
|
||||
@ -341,7 +341,7 @@ pub async fn rpc_keys_management(
|
||||
}
|
||||
|
||||
let uk = if uk.is_changed() {
|
||||
let db_conn = global_db_conn().await?;
|
||||
let db_conn = global_db_conn()?;
|
||||
|
||||
uk.save(&db_conn)
|
||||
.await
|
||||
|
@ -48,7 +48,7 @@ pub async fn user_revert_logs_get(
|
||||
response.insert("chain_id", json!(chain_id));
|
||||
response.insert("query_start", json!(query_start.timestamp() as u64));
|
||||
|
||||
let db_replica = global_db_replica_conn().await?;
|
||||
let db_replica = global_db_replica_conn()?;
|
||||
|
||||
let uks = rpc_key::Entity::find()
|
||||
.filter(rpc_key::Column::UserId.eq(user.id))
|
||||
@ -141,7 +141,7 @@ pub async fn user_mysql_stats_get(
|
||||
TypedHeader(Authorization(bearer)): TypedHeader<Authorization<Bearer>>,
|
||||
) -> Web3ProxyResponse {
|
||||
let user = app.bearer_is_authorized(bearer).await?;
|
||||
let db_replica = global_db_replica_conn().await?;
|
||||
let db_replica = global_db_replica_conn()?;
|
||||
|
||||
// Fetch everything from mysql, joined
|
||||
let stats = rpc_key::Entity::find()
|
||||
|
@ -1,8 +1,8 @@
|
||||
//! Handle subusers, viewing subusers, and viewing accessible rpc-keys
|
||||
use crate::app::Web3ProxyApp;
|
||||
use crate::errors::{Web3ProxyError, Web3ProxyErrorContext, Web3ProxyResponse};
|
||||
use crate::frontend::authorization::RpcSecretKey;
|
||||
use crate::globals::{global_db_conn, global_db_replica_conn};
|
||||
use crate::secrets::RpcSecretKey;
|
||||
use anyhow::Context;
|
||||
use axum::{
|
||||
extract::Query,
|
||||
@ -36,7 +36,7 @@ pub async fn get_keys_as_subuser(
|
||||
// First, authenticate
|
||||
let subuser = app.bearer_is_authorized(bearer).await?;
|
||||
|
||||
let db_replica = global_db_replica_conn().await?;
|
||||
let db_replica = global_db_replica_conn()?;
|
||||
|
||||
// TODO: JOIN over RPC_KEY, SUBUSER, PRIMARY_USER and return these items
|
||||
|
||||
@ -101,7 +101,7 @@ pub async fn get_subusers(
|
||||
// First, authenticate
|
||||
let user = app.bearer_is_authorized(bearer).await?;
|
||||
|
||||
let db_replica = global_db_replica_conn().await?;
|
||||
let db_replica = global_db_replica_conn()?;
|
||||
|
||||
let rpc_key: u64 = params
|
||||
.remove("key_id")
|
||||
@ -173,7 +173,7 @@ pub async fn modify_subuser(
|
||||
// First, authenticate
|
||||
let user = app.bearer_is_authorized(bearer).await?;
|
||||
|
||||
let db_replica = global_db_replica_conn().await?;
|
||||
let db_replica = global_db_replica_conn()?;
|
||||
|
||||
trace!("Parameters are: {:?}", params);
|
||||
|
||||
@ -257,7 +257,7 @@ pub async fn modify_subuser(
|
||||
}
|
||||
|
||||
// TODO: There is a good chunk of duplicate logic as login-post. Consider refactoring ...
|
||||
let db_conn = global_db_conn().await?;
|
||||
let db_conn = global_db_conn()?;
|
||||
|
||||
let (subuser, _subuser_rpc_keys, _status_code) = match subuser {
|
||||
None => {
|
||||
|
@ -1,17 +1,19 @@
|
||||
use crate::{errors::Web3ProxyError, relational_db::DatabaseReplica};
|
||||
use crate::{app::Web3ProxyApp, errors::Web3ProxyError, relational_db::DatabaseReplica};
|
||||
use derivative::Derivative;
|
||||
use migration::{
|
||||
sea_orm::{DatabaseConnection, DatabaseTransaction, TransactionTrait},
|
||||
DbErr,
|
||||
};
|
||||
use std::sync::{Arc, LazyLock};
|
||||
use tokio::sync::RwLock as AsyncRwLock;
|
||||
use parking_lot::RwLock;
|
||||
use std::sync::{Arc, LazyLock, OnceLock};
|
||||
|
||||
pub static DB_CONN: LazyLock<AsyncRwLock<Result<DatabaseConnection, DatabaseError>>> =
|
||||
LazyLock::new(|| AsyncRwLock::new(Err(DatabaseError::NotConfigured)));
|
||||
pub static APP: OnceLock<Arc<Web3ProxyApp>> = OnceLock::new();
|
||||
|
||||
pub static DB_REPLICA: LazyLock<AsyncRwLock<Result<DatabaseReplica, DatabaseError>>> =
|
||||
LazyLock::new(|| AsyncRwLock::new(Err(DatabaseError::NotConfigured)));
|
||||
pub static DB_CONN: LazyLock<RwLock<Result<DatabaseConnection, DatabaseError>>> =
|
||||
LazyLock::new(|| RwLock::new(Err(DatabaseError::NotConfigured)));
|
||||
|
||||
pub static DB_REPLICA: LazyLock<RwLock<Result<DatabaseReplica, DatabaseError>>> =
|
||||
LazyLock::new(|| RwLock::new(Err(DatabaseError::NotConfigured)));
|
||||
|
||||
#[derive(Clone, Debug, Derivative)]
|
||||
pub enum DatabaseError {
|
||||
@ -32,14 +34,15 @@ impl From<DatabaseError> for Web3ProxyError {
|
||||
}
|
||||
}
|
||||
|
||||
/// TODO: do we need this clone? should we just do DB_CONN.read() whenever we need a Connection?
|
||||
#[inline]
|
||||
pub async fn global_db_conn() -> Result<DatabaseConnection, DatabaseError> {
|
||||
DB_CONN.read().await.clone()
|
||||
pub fn global_db_conn() -> Result<DatabaseConnection, DatabaseError> {
|
||||
DB_CONN.read().clone()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub async fn global_db_transaction() -> Result<DatabaseTransaction, DatabaseError> {
|
||||
let x = global_db_conn().await?;
|
||||
let x = global_db_conn()?;
|
||||
|
||||
let x = x
|
||||
.begin()
|
||||
@ -49,7 +52,8 @@ pub async fn global_db_transaction() -> Result<DatabaseTransaction, DatabaseErro
|
||||
Ok(x)
|
||||
}
|
||||
|
||||
/// TODO: do we need this clone?
|
||||
#[inline]
|
||||
pub async fn global_db_replica_conn() -> Result<DatabaseReplica, DatabaseError> {
|
||||
DB_REPLICA.read().await.clone()
|
||||
pub fn global_db_replica_conn() -> Result<DatabaseReplica, DatabaseError> {
|
||||
DB_REPLICA.read().clone()
|
||||
}
|
||||
|
@ -20,17 +20,17 @@ use tokio::time::sleep;
|
||||
|
||||
use crate::app::Web3ProxyApp;
|
||||
use crate::errors::{Web3ProxyError, Web3ProxyResult};
|
||||
use crate::frontend::authorization::{Authorization, RequestMetadata, RequestOrMethod};
|
||||
use crate::frontend::authorization::{Authorization, RequestOrMethod, Web3Request};
|
||||
use crate::response_cache::JsonRpcResponseEnum;
|
||||
|
||||
pub trait JsonRpcParams = fmt::Debug + serde::Serialize + Send + Sync + 'static;
|
||||
pub trait JsonRpcResultData = serde::Serialize + serde::de::DeserializeOwned + fmt::Debug + Send;
|
||||
|
||||
// TODO: borrow values to avoid allocs if possible
|
||||
/// TODO: borrow values to avoid allocs if possible
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct ParsedResponse<T = Arc<RawValue>> {
|
||||
jsonrpc: String,
|
||||
id: Option<Box<RawValue>>,
|
||||
pub jsonrpc: String,
|
||||
pub id: Box<RawValue>,
|
||||
#[serde(flatten)]
|
||||
pub payload: Payload<T>,
|
||||
}
|
||||
@ -40,7 +40,7 @@ impl ParsedResponse {
|
||||
let result = serde_json::value::to_raw_value(&value)
|
||||
.expect("this should not fail")
|
||||
.into();
|
||||
Self::from_result(result, Some(id))
|
||||
Self::from_result(result, id)
|
||||
}
|
||||
}
|
||||
|
||||
@ -49,16 +49,16 @@ impl ParsedResponse<Arc<RawValue>> {
|
||||
match data {
|
||||
JsonRpcResponseEnum::NullResult => {
|
||||
let x: Box<RawValue> = Default::default();
|
||||
Self::from_result(Arc::from(x), Some(id))
|
||||
Self::from_result(Arc::from(x), id)
|
||||
}
|
||||
JsonRpcResponseEnum::RpcError { error_data, .. } => Self::from_error(error_data, id),
|
||||
JsonRpcResponseEnum::Result { value, .. } => Self::from_result(value, Some(id)),
|
||||
JsonRpcResponseEnum::Result { value, .. } => Self::from_result(value, id),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> ParsedResponse<T> {
|
||||
pub fn from_result(result: T, id: Option<Box<RawValue>>) -> Self {
|
||||
pub fn from_result(result: T, id: Box<RawValue>) -> Self {
|
||||
Self {
|
||||
jsonrpc: "2.0".to_string(),
|
||||
id,
|
||||
@ -69,7 +69,7 @@ impl<T> ParsedResponse<T> {
|
||||
pub fn from_error(error: JsonRpcErrorData, id: Box<RawValue>) -> Self {
|
||||
Self {
|
||||
jsonrpc: "2.0".to_string(),
|
||||
id: Some(id),
|
||||
id,
|
||||
payload: Payload::Error { error },
|
||||
}
|
||||
}
|
||||
@ -171,6 +171,8 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
let id = id.unwrap_or_default();
|
||||
|
||||
// jsonrpc version must be present in all responses
|
||||
let jsonrpc = jsonrpc
|
||||
.ok_or_else(|| de::Error::missing_field("jsonrpc"))?
|
||||
@ -209,7 +211,7 @@ pub enum Payload<T> {
|
||||
pub struct StreamResponse {
|
||||
buffer: Bytes,
|
||||
response: reqwest::Response,
|
||||
request_metadata: Arc<RequestMetadata>,
|
||||
web3_request: Arc<Web3Request>,
|
||||
}
|
||||
|
||||
impl StreamResponse {
|
||||
@ -233,7 +235,7 @@ impl IntoResponse for StreamResponse {
|
||||
.map_ok(move |x| {
|
||||
let len = x.len();
|
||||
|
||||
self.request_metadata.add_response(len);
|
||||
self.web3_request.add_response(len);
|
||||
|
||||
x
|
||||
});
|
||||
@ -257,7 +259,7 @@ where
|
||||
pub async fn read_if_short(
|
||||
mut response: reqwest::Response,
|
||||
nbytes: u64,
|
||||
request_metadata: Arc<RequestMetadata>,
|
||||
web3_request: Arc<Web3Request>,
|
||||
) -> Result<SingleResponse<T>, ProviderError> {
|
||||
match response.content_length() {
|
||||
// short
|
||||
@ -266,7 +268,7 @@ where
|
||||
Some(_) => Ok(Self::Stream(StreamResponse {
|
||||
buffer: Bytes::new(),
|
||||
response,
|
||||
request_metadata,
|
||||
web3_request,
|
||||
})),
|
||||
None => {
|
||||
let mut buffer = BytesMut::new();
|
||||
@ -282,7 +284,7 @@ where
|
||||
Ok(Self::Stream(StreamResponse {
|
||||
buffer,
|
||||
response,
|
||||
request_metadata,
|
||||
web3_request,
|
||||
}))
|
||||
}
|
||||
}
|
||||
@ -312,6 +314,17 @@ where
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_id(&mut self, id: Box<RawValue>) {
|
||||
match self {
|
||||
SingleResponse::Parsed(x) => {
|
||||
x.id = id;
|
||||
}
|
||||
SingleResponse::Stream(..) => {
|
||||
// stream responses will hopefully always have the right id already because we pass the orignal id all the way from the front to the back
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> From<ParsedResponse<T>> for SingleResponse<T> {
|
||||
@ -381,6 +394,7 @@ where
|
||||
pub struct JsonRpcRequest {
|
||||
pub jsonrpc: String,
|
||||
/// id could be a stricter type, but many rpcs do things against the spec
|
||||
/// TODO: this gets cloned into the response object often. would an Arc be better? That has its own overhead and these are short strings
|
||||
pub id: Box<RawValue>,
|
||||
pub method: String,
|
||||
#[serde_inline_default(serde_json::Value::Null)]
|
||||
@ -392,6 +406,7 @@ pub enum JsonRpcId {
|
||||
None,
|
||||
Number(u64),
|
||||
String(String),
|
||||
Raw(Box<RawValue>),
|
||||
}
|
||||
|
||||
impl JsonRpcId {
|
||||
@ -403,6 +418,7 @@ impl JsonRpcId {
|
||||
serde_json::from_value(json!(x)).expect("number id should always work")
|
||||
}
|
||||
Self::String(x) => serde_json::from_str(&x).expect("string id should always work"),
|
||||
Self::Raw(x) => x,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -473,7 +489,7 @@ impl JsonRpcRequestEnum {
|
||||
/// returns the id of the first invalid result (if any). None is good
|
||||
pub async fn tarpit_invalid(
|
||||
&self,
|
||||
app: &Web3ProxyApp,
|
||||
app: &Arc<Web3ProxyApp>,
|
||||
authorization: &Arc<Authorization>,
|
||||
duration: Duration,
|
||||
) -> Result<(), AxumResponse> {
|
||||
@ -486,11 +502,16 @@ impl JsonRpcRequestEnum {
|
||||
.expect("JsonRpcRequestEnum should always serialize")
|
||||
.len();
|
||||
|
||||
let request = RequestOrMethod::Method("invalid_method", size);
|
||||
|
||||
// TODO: create a stat so we can penalize
|
||||
// TODO: what request size
|
||||
let metadata = RequestMetadata::new(app, authorization.clone(), request, None).await;
|
||||
let metadata = Web3Request::new_with_app(
|
||||
app,
|
||||
authorization.clone(),
|
||||
None,
|
||||
RequestOrMethod::Method("invalid_method".into(), size),
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
|
||||
metadata
|
||||
.user_error_response
|
||||
@ -676,26 +697,22 @@ impl JsonRpcRequest {
|
||||
}
|
||||
|
||||
impl JsonRpcForwardedResponse {
|
||||
pub fn from_anyhow_error(
|
||||
err: anyhow::Error,
|
||||
code: Option<i64>,
|
||||
id: Option<Box<RawValue>>,
|
||||
) -> Self {
|
||||
pub fn from_anyhow_error(err: anyhow::Error, code: Option<i64>, id: Box<RawValue>) -> Self {
|
||||
let message = format!("{:?}", err);
|
||||
|
||||
Self::from_string(message, code, id)
|
||||
}
|
||||
|
||||
pub fn from_str(message: &str, code: Option<i64>, id: Option<Box<RawValue>>) -> Self {
|
||||
pub fn from_str(message: &str, code: Option<i64>, id: Box<RawValue>) -> Self {
|
||||
Self::from_string(message.to_string(), code, id)
|
||||
}
|
||||
|
||||
pub fn from_string(message: String, code: Option<i64>, id: Option<Box<RawValue>>) -> Self {
|
||||
pub fn from_string(message: String, code: Option<i64>, id: Box<RawValue>) -> Self {
|
||||
// TODO: this is too verbose. plenty of errors are valid, like users giving an invalid address. no need to log that
|
||||
// TODO: can we somehow get the initial request here? if we put that into a tracing span, will things slow down a ton?
|
||||
JsonRpcForwardedResponse {
|
||||
jsonrpc: "2.0",
|
||||
id: id.unwrap_or_default(),
|
||||
id,
|
||||
result: None,
|
||||
error: Some(JsonRpcErrorData {
|
||||
code: code.unwrap_or(-32099),
|
||||
@ -772,7 +789,7 @@ mod tests {
|
||||
fn serialize_response() {
|
||||
let obj = ParsedResponse {
|
||||
jsonrpc: "2.0".to_string(),
|
||||
id: None,
|
||||
id: Default::default(),
|
||||
payload: Payload::Success {
|
||||
result: serde_json::value::RawValue::from_string("100".to_string()).unwrap(),
|
||||
},
|
||||
|
155
web3_proxy/src/kafka.rs
Normal file
155
web3_proxy/src/kafka.rs
Normal file
@ -0,0 +1,155 @@
|
||||
use crate::app::Web3ProxyApp;
|
||||
use crate::frontend::authorization::{Authorization, RequestOrMethod};
|
||||
use core::fmt;
|
||||
use ethers::types::U64;
|
||||
use rdkafka::message::{Header as KafkaHeader, OwnedHeaders as KafkaOwnedHeaders, OwnedMessage};
|
||||
use rdkafka::producer::{FutureProducer, FutureRecord};
|
||||
use rdkafka::util::Timeout as KafkaTimeout;
|
||||
use std::sync::atomic::{self, AtomicUsize};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use tokio::task::JoinHandle;
|
||||
use tracing::error;
|
||||
use ulid::Ulid;
|
||||
|
||||
pub struct KafkaDebugLogger {
|
||||
topic: String,
|
||||
key: Vec<u8>,
|
||||
headers: KafkaOwnedHeaders,
|
||||
producer: FutureProducer,
|
||||
num_requests: AtomicUsize,
|
||||
num_responses: AtomicUsize,
|
||||
}
|
||||
|
||||
impl fmt::Debug for KafkaDebugLogger {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("KafkaDebugLogger")
|
||||
.field("topic", &self.topic)
|
||||
.finish_non_exhaustive()
|
||||
}
|
||||
}
|
||||
|
||||
type KafkaLogResult = Result<(i32, i64), (rdkafka::error::KafkaError, OwnedMessage)>;
|
||||
|
||||
impl KafkaDebugLogger {
|
||||
pub fn try_new(
|
||||
app: &Web3ProxyApp,
|
||||
authorization: Arc<Authorization>,
|
||||
head_block_num: Option<U64>,
|
||||
kafka_topic: &str,
|
||||
request_ulid: Ulid,
|
||||
) -> Option<Arc<Self>> {
|
||||
let kafka_producer = app.kafka_producer.clone()?;
|
||||
|
||||
let kafka_topic = kafka_topic.to_string();
|
||||
|
||||
let rpc_secret_key_id = authorization
|
||||
.checks
|
||||
.rpc_secret_key_id
|
||||
.map(|x| x.get())
|
||||
.unwrap_or_default();
|
||||
|
||||
let kafka_key =
|
||||
rmp_serde::to_vec(&rpc_secret_key_id).expect("ids should always serialize with rmp");
|
||||
|
||||
let chain_id = app.config.chain_id;
|
||||
|
||||
let head_block_num = head_block_num.or_else(|| app.balanced_rpcs.head_block_num());
|
||||
|
||||
// TODO: would be nice to have the block hash too
|
||||
|
||||
// another item is added with the response, so initial_capacity is +1 what is needed here
|
||||
let kafka_headers = KafkaOwnedHeaders::new_with_capacity(6)
|
||||
.insert(KafkaHeader {
|
||||
key: "rpc_secret_key_id",
|
||||
value: authorization
|
||||
.checks
|
||||
.rpc_secret_key_id
|
||||
.map(|x| x.to_string())
|
||||
.as_ref(),
|
||||
})
|
||||
.insert(KafkaHeader {
|
||||
key: "ip",
|
||||
value: Some(&authorization.ip.to_string()),
|
||||
})
|
||||
.insert(KafkaHeader {
|
||||
key: "request_ulid",
|
||||
value: Some(&request_ulid.to_string()),
|
||||
})
|
||||
.insert(KafkaHeader {
|
||||
key: "head_block_num",
|
||||
value: head_block_num.map(|x| x.to_string()).as_ref(),
|
||||
})
|
||||
.insert(KafkaHeader {
|
||||
key: "chain_id",
|
||||
value: Some(&chain_id.to_le_bytes()),
|
||||
});
|
||||
|
||||
// save the key and headers for when we log the response
|
||||
let x = Self {
|
||||
topic: kafka_topic,
|
||||
key: kafka_key,
|
||||
headers: kafka_headers,
|
||||
producer: kafka_producer,
|
||||
num_requests: 0.into(),
|
||||
num_responses: 0.into(),
|
||||
};
|
||||
|
||||
let x = Arc::new(x);
|
||||
|
||||
Some(x)
|
||||
}
|
||||
|
||||
fn background_log(&self, payload: Vec<u8>) -> JoinHandle<KafkaLogResult> {
|
||||
let topic = self.topic.clone();
|
||||
let key = self.key.clone();
|
||||
let producer = self.producer.clone();
|
||||
let headers = self.headers.clone();
|
||||
|
||||
let f = async move {
|
||||
let record = FutureRecord::to(&topic)
|
||||
.key(&key)
|
||||
.payload(&payload)
|
||||
.headers(headers);
|
||||
|
||||
let produce_future =
|
||||
producer.send(record, KafkaTimeout::After(Duration::from_secs(5 * 60)));
|
||||
|
||||
let kafka_response = produce_future.await;
|
||||
|
||||
if let Err((err, msg)) = kafka_response.as_ref() {
|
||||
error!("produce kafka request: {} - {:?}", err, msg);
|
||||
// TODO: re-queue the msg? log somewhere else like a file on disk?
|
||||
// TODO: this is bad and should probably trigger an alarm
|
||||
};
|
||||
|
||||
kafka_response
|
||||
};
|
||||
|
||||
tokio::spawn(f)
|
||||
}
|
||||
|
||||
/// for opt-in debug usage, log the request to kafka
|
||||
/// TODO: generic type for request
|
||||
pub fn log_debug_request(&self, request: &RequestOrMethod) -> JoinHandle<KafkaLogResult> {
|
||||
// TODO: is rust message pack a good choice? try rkyv instead
|
||||
let payload =
|
||||
rmp_serde::to_vec(&request).expect("requests should always serialize with rmp");
|
||||
|
||||
self.num_requests.fetch_add(1, atomic::Ordering::Relaxed);
|
||||
|
||||
self.background_log(payload)
|
||||
}
|
||||
|
||||
pub fn log_debug_response<R>(&self, response: &R) -> JoinHandle<KafkaLogResult>
|
||||
where
|
||||
R: serde::Serialize,
|
||||
{
|
||||
let payload =
|
||||
rmp_serde::to_vec(&response).expect("requests should always serialize with rmp");
|
||||
|
||||
self.num_responses.fetch_add(1, atomic::Ordering::Relaxed);
|
||||
|
||||
self.background_log(payload)
|
||||
}
|
||||
}
|
@ -16,6 +16,7 @@ pub mod frontend;
|
||||
pub mod globals;
|
||||
pub mod http_params;
|
||||
pub mod jsonrpc;
|
||||
pub mod kafka;
|
||||
pub mod pagerduty;
|
||||
pub mod prelude;
|
||||
pub mod premium;
|
||||
@ -24,6 +25,7 @@ pub mod referral_code;
|
||||
pub mod relational_db;
|
||||
pub mod response_cache;
|
||||
pub mod rpcs;
|
||||
pub mod secrets;
|
||||
pub mod stats;
|
||||
pub mod test_utils;
|
||||
pub mod user_token;
|
||||
|
@ -1,6 +1,7 @@
|
||||
use crate::{
|
||||
block_number::BlockNumAndHash,
|
||||
block_number::{BlockNumAndHash, CacheMode},
|
||||
errors::{Web3ProxyError, Web3ProxyResult},
|
||||
frontend::authorization::RequestOrMethod,
|
||||
jsonrpc::{self, JsonRpcErrorData},
|
||||
};
|
||||
use derive_more::From;
|
||||
@ -18,15 +19,15 @@ use std::{
|
||||
};
|
||||
|
||||
#[derive(Clone, Debug, Eq, From)]
|
||||
pub struct JsonRpcQueryCacheKey {
|
||||
/// hashed params
|
||||
pub struct JsonRpcQueryCacheKey<'a> {
|
||||
/// hashed params so that
|
||||
hash: u64,
|
||||
from_block: Option<BlockNumAndHash>,
|
||||
to_block: Option<BlockNumAndHash>,
|
||||
cache_errors: bool,
|
||||
from_block: Option<&'a BlockNumAndHash>,
|
||||
to_block: Option<&'a BlockNumAndHash>,
|
||||
cache_jsonrpc_errors: bool,
|
||||
}
|
||||
|
||||
impl JsonRpcQueryCacheKey {
|
||||
impl JsonRpcQueryCacheKey<'_> {
|
||||
pub fn hash(&self) -> u64 {
|
||||
self.hash
|
||||
}
|
||||
@ -37,46 +38,42 @@ impl JsonRpcQueryCacheKey {
|
||||
self.to_block.as_ref().map(|x| x.num())
|
||||
}
|
||||
pub fn cache_errors(&self) -> bool {
|
||||
self.cache_errors
|
||||
self.cache_jsonrpc_errors
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq for JsonRpcQueryCacheKey {
|
||||
impl PartialEq for JsonRpcQueryCacheKey<'_> {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.hash.eq(&other.hash)
|
||||
}
|
||||
}
|
||||
|
||||
impl Hash for JsonRpcQueryCacheKey {
|
||||
impl Hash for JsonRpcQueryCacheKey<'_> {
|
||||
fn hash<H: Hasher>(&self, state: &mut H) {
|
||||
// TODO: i feel like this hashes twice. oh well
|
||||
self.hash.hash(state);
|
||||
}
|
||||
}
|
||||
|
||||
impl JsonRpcQueryCacheKey {
|
||||
pub fn new(
|
||||
from_block: Option<BlockNumAndHash>,
|
||||
to_block: Option<BlockNumAndHash>,
|
||||
method: &str,
|
||||
params: &serde_json::Value,
|
||||
cache_errors: bool,
|
||||
) -> Self {
|
||||
let from_block_hash = from_block.as_ref().map(|x| x.hash());
|
||||
let to_block_hash = to_block.as_ref().map(|x| x.hash());
|
||||
impl<'a> JsonRpcQueryCacheKey<'a> {
|
||||
pub fn new(cache_mode: &'a CacheMode, request: &'a RequestOrMethod) -> Self {
|
||||
// TODO: do this without clone
|
||||
let from_block = cache_mode.from_block();
|
||||
let to_block = cache_mode.to_block();
|
||||
let cache_jsonrpc_errors = cache_mode.cache_jsonrpc_errors();
|
||||
|
||||
let mut hasher = DefaultHashBuilder::default().build_hasher();
|
||||
|
||||
from_block_hash.hash(&mut hasher);
|
||||
to_block_hash.hash(&mut hasher);
|
||||
from_block.hash(&mut hasher);
|
||||
to_block.hash(&mut hasher);
|
||||
|
||||
method.hash(&mut hasher);
|
||||
request.method().hash(&mut hasher);
|
||||
|
||||
// TODO: make sure preserve_order feature is OFF
|
||||
// TODO: is there a faster way to do this?
|
||||
params.to_string().hash(&mut hasher);
|
||||
request.params().to_string().hash(&mut hasher);
|
||||
|
||||
cache_errors.hash(&mut hasher);
|
||||
cache_jsonrpc_errors.hash(&mut hasher);
|
||||
|
||||
let hash = hasher.finish();
|
||||
|
||||
@ -84,7 +81,7 @@ impl JsonRpcQueryCacheKey {
|
||||
hash,
|
||||
from_block,
|
||||
to_block,
|
||||
cache_errors,
|
||||
cache_jsonrpc_errors,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -4,7 +4,6 @@ use super::many::Web3Rpcs;
|
||||
use super::one::Web3Rpc;
|
||||
use crate::config::{average_block_interval, BlockAndRpc};
|
||||
use crate::errors::{Web3ProxyError, Web3ProxyErrorContext, Web3ProxyResult};
|
||||
use derive_more::From;
|
||||
use ethers::prelude::{Block, TxHash, H256, U64};
|
||||
use moka::future::Cache;
|
||||
use serde::ser::SerializeStruct;
|
||||
@ -23,14 +22,9 @@ pub type ArcBlock = Arc<Block<TxHash>>;
|
||||
pub type BlocksByHashCache = Cache<H256, Web3ProxyBlock>;
|
||||
pub type BlocksByNumberCache = Cache<U64, H256>;
|
||||
|
||||
/// A block and its age.
|
||||
#[derive(Clone, Debug, Default, From)]
|
||||
pub struct Web3ProxyBlock {
|
||||
pub block: ArcBlock,
|
||||
/// number of seconds this block was behind the current time when received
|
||||
/// this is only set if the block is from a subscription
|
||||
pub received_age: Option<u64>,
|
||||
}
|
||||
/// A block and its age with a less verbose serialized format
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct Web3ProxyBlock(ArcBlock);
|
||||
|
||||
impl Serialize for Web3ProxyBlock {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
@ -43,10 +37,10 @@ impl Serialize for Web3ProxyBlock {
|
||||
state.serialize_field("age", &self.age())?;
|
||||
|
||||
let block = json!({
|
||||
"hash": self.block.hash,
|
||||
"parent_hash": self.block.parent_hash,
|
||||
"number": self.block.number,
|
||||
"timestamp": self.block.timestamp,
|
||||
"hash": self.0.hash,
|
||||
"parent_hash": self.0.parent_hash,
|
||||
"number": self.0.number,
|
||||
"timestamp": self.0.timestamp,
|
||||
});
|
||||
|
||||
state.serialize_field("block", &block)?;
|
||||
@ -57,7 +51,7 @@ impl Serialize for Web3ProxyBlock {
|
||||
|
||||
impl PartialEq for Web3ProxyBlock {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
match (self.block.hash, other.block.hash) {
|
||||
match (self.0.hash, other.0.hash) {
|
||||
(None, None) => true,
|
||||
(Some(_), None) => false,
|
||||
(None, Some(_)) => false,
|
||||
@ -70,34 +64,24 @@ impl Eq for Web3ProxyBlock {}
|
||||
|
||||
impl Hash for Web3ProxyBlock {
|
||||
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
|
||||
self.block.hash.hash(state);
|
||||
self.0.hash.hash(state);
|
||||
}
|
||||
}
|
||||
|
||||
impl Web3ProxyBlock {
|
||||
/// A new block has arrived over a subscription
|
||||
/// A new block has arrived over a subscription. skip it if its empty
|
||||
pub fn try_new(block: ArcBlock) -> Option<Self> {
|
||||
if block.number.is_none() || block.hash.is_none() {
|
||||
return None;
|
||||
}
|
||||
|
||||
let mut x = Self {
|
||||
block,
|
||||
received_age: None,
|
||||
};
|
||||
|
||||
// no need to recalulate lag every time
|
||||
// if the head block gets too old, a health check restarts this connection
|
||||
// TODO: emit a stat for received_age
|
||||
x.received_age = Some(x.age().as_secs());
|
||||
|
||||
Some(x)
|
||||
Some(Self(block))
|
||||
}
|
||||
|
||||
pub fn age(&self) -> Duration {
|
||||
let now = chrono::Utc::now().timestamp();
|
||||
|
||||
let block_timestamp = self.block.timestamp.as_u32() as i64;
|
||||
let block_timestamp = self.0.timestamp.as_u32() as i64;
|
||||
|
||||
let x = if block_timestamp < now {
|
||||
// this server is still syncing from too far away to serve requests
|
||||
@ -112,44 +96,27 @@ impl Web3ProxyBlock {
|
||||
|
||||
#[inline(always)]
|
||||
pub fn parent_hash(&self) -> &H256 {
|
||||
&self.block.parent_hash
|
||||
&self.0.parent_hash
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn hash(&self) -> &H256 {
|
||||
self.block
|
||||
.hash
|
||||
.as_ref()
|
||||
.expect("saved blocks must have a hash")
|
||||
self.0.hash.as_ref().expect("saved blocks must have a hash")
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn number(&self) -> &U64 {
|
||||
self.block
|
||||
.number
|
||||
.as_ref()
|
||||
.expect("saved blocks must have a number")
|
||||
pub fn number(&self) -> U64 {
|
||||
self.0.number.expect("saved blocks must have a number")
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn transactions(&self) -> &[TxHash] {
|
||||
&self.0.transactions
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn uncles(&self) -> &[H256] {
|
||||
&self.block.uncles
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<ArcBlock> for Web3ProxyBlock {
|
||||
type Error = Web3ProxyError;
|
||||
|
||||
fn try_from(x: ArcBlock) -> Result<Self, Self::Error> {
|
||||
if x.number.is_none() || x.hash.is_none() {
|
||||
return Err(Web3ProxyError::NoBlockNumberOrHash);
|
||||
}
|
||||
|
||||
let b = Web3ProxyBlock {
|
||||
block: x,
|
||||
received_age: None,
|
||||
};
|
||||
|
||||
Ok(b)
|
||||
&self.0.uncles
|
||||
}
|
||||
}
|
||||
|
||||
@ -165,6 +132,14 @@ impl Display for Web3ProxyBlock {
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<ArcBlock> for Web3ProxyBlock {
|
||||
type Error = Web3ProxyError;
|
||||
|
||||
fn try_from(block: ArcBlock) -> Result<Self, Self::Error> {
|
||||
Self::try_new(block).ok_or(Web3ProxyError::NoBlocksKnown)
|
||||
}
|
||||
}
|
||||
|
||||
impl Web3Rpcs {
|
||||
/// add a block to our mappings and track the heaviest chain
|
||||
pub async fn try_cache_block(
|
||||
@ -187,7 +162,7 @@ impl Web3Rpcs {
|
||||
|
||||
// TODO: if there is an existing entry with a different block_hash,
|
||||
// TODO: use entry api to handle changing existing entries
|
||||
self.blocks_by_number.insert(*block_num, block_hash).await;
|
||||
self.blocks_by_number.insert(block_num, block_hash).await;
|
||||
|
||||
for uncle in block.uncles() {
|
||||
self.blocks_by_hash.invalidate(uncle).await;
|
||||
@ -277,7 +252,7 @@ impl Web3Rpcs {
|
||||
// double check that it matches the blocks_by_number cache
|
||||
let cached_hash = self
|
||||
.blocks_by_number
|
||||
.get_with_by_ref(block.number(), async { *hash })
|
||||
.get_with(block.number(), async { *hash })
|
||||
.await;
|
||||
|
||||
if cached_hash == *hash {
|
||||
@ -327,7 +302,13 @@ impl Web3Rpcs {
|
||||
|
||||
match block {
|
||||
Some(block) => {
|
||||
let block = self.try_cache_block(block.try_into()?, false).await?;
|
||||
let block = self
|
||||
.try_cache_block(
|
||||
Web3ProxyBlock::try_new(block)
|
||||
.ok_or(Web3ProxyError::UnknownBlockHash(*hash))?,
|
||||
false,
|
||||
)
|
||||
.await?;
|
||||
Ok(block)
|
||||
}
|
||||
None => Err(Web3ProxyError::UnknownBlockHash(*hash)),
|
||||
@ -365,7 +346,7 @@ impl Web3Rpcs {
|
||||
|
||||
// be sure the requested block num exists
|
||||
// TODO: is this okay? what if we aren't synced?!
|
||||
let mut head_block_num = *consensus_head_receiver
|
||||
let mut head_block_num = consensus_head_receiver
|
||||
.borrow_and_update()
|
||||
.as_ref()
|
||||
.web3_context("no consensus head block")?
|
||||
@ -386,7 +367,7 @@ impl Web3Rpcs {
|
||||
consensus_head_receiver.changed().await?;
|
||||
|
||||
if let Some(head) = consensus_head_receiver.borrow_and_update().as_ref() {
|
||||
head_block_num = *head.number();
|
||||
head_block_num = head.number();
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -398,7 +379,11 @@ impl Web3Rpcs {
|
||||
.await?
|
||||
.ok_or(Web3ProxyError::NoBlocksKnown)?;
|
||||
|
||||
let block = Web3ProxyBlock::try_from(response)?;
|
||||
let block =
|
||||
Web3ProxyBlock::try_new(response).ok_or(Web3ProxyError::UnknownBlockNumber {
|
||||
known: head_block_num,
|
||||
unknown: *num,
|
||||
})?;
|
||||
|
||||
// the block was fetched using eth_getBlockByNumber, so it should have all fields and be on the heaviest chain
|
||||
let block = self.try_cache_block(block, true).await?;
|
||||
|
@ -26,7 +26,7 @@ struct ConsensusRpcData {
|
||||
|
||||
impl ConsensusRpcData {
|
||||
fn new(rpc: &Web3Rpc, head: &Web3ProxyBlock) -> Self {
|
||||
let head_block_num = *head.number();
|
||||
let head_block_num = head.number();
|
||||
|
||||
let block_data_limit = rpc.block_data_limit();
|
||||
|
||||
@ -39,8 +39,8 @@ impl ConsensusRpcData {
|
||||
}
|
||||
|
||||
// TODO: take an enum for the type of data (hrtc)
|
||||
fn data_available(&self, block_num: &U64) -> bool {
|
||||
*block_num >= self.oldest_block_num && *block_num <= self.head_block_num
|
||||
fn data_available(&self, block_num: U64) -> bool {
|
||||
block_num >= self.oldest_block_num && block_num <= self.head_block_num
|
||||
}
|
||||
}
|
||||
|
||||
@ -119,7 +119,7 @@ impl RankedRpcs {
|
||||
let mut votes: Vec<_> = votes
|
||||
.into_iter()
|
||||
.filter_map(|(block, (rpcs, sum_soft_limit))| {
|
||||
if *block.number() < max_lag_block
|
||||
if block.number() < max_lag_block
|
||||
|| sum_soft_limit < min_sum_soft_limit
|
||||
|| rpcs.len() < min_synced_rpcs
|
||||
{
|
||||
@ -133,7 +133,7 @@ impl RankedRpcs {
|
||||
// sort the votes
|
||||
votes.sort_by_key(|(block, sum_soft_limit, _)| {
|
||||
(
|
||||
Reverse(*block.number()),
|
||||
Reverse(block.number()),
|
||||
// TODO: block total difficulty (if we have it)
|
||||
Reverse(*sum_soft_limit),
|
||||
// TODO: median/peak latency here?
|
||||
@ -158,7 +158,7 @@ impl RankedRpcs {
|
||||
continue;
|
||||
}
|
||||
|
||||
if *x_head.number() < max_lag_block {
|
||||
if x_head.number() < max_lag_block {
|
||||
// server is too far behind
|
||||
continue;
|
||||
}
|
||||
@ -167,7 +167,7 @@ impl RankedRpcs {
|
||||
}
|
||||
|
||||
ranked_rpcs
|
||||
.sort_by_cached_key(|x| x.sort_for_load_balancing_on(Some(*best_block.number())));
|
||||
.sort_by_cached_key(|x| x.sort_for_load_balancing_on(Some(best_block.number())));
|
||||
|
||||
// consensus found!
|
||||
trace!(?ranked_rpcs);
|
||||
@ -201,16 +201,17 @@ impl RankedRpcs {
|
||||
}
|
||||
|
||||
/// will tell you if waiting will eventually should wait for a block
|
||||
/// TODO: return if backup will be needed to serve the request
|
||||
/// TODO: serve now if a backup server has the data
|
||||
/// TODO: error if backup will be needed to serve the request?
|
||||
/// TODO: serve now if a backup server has the data?
|
||||
/// TODO: also include method (or maybe an enum representing the different prune types)
|
||||
pub fn should_wait_for_block(
|
||||
&self,
|
||||
needed_block_num: Option<&U64>,
|
||||
min_block_num: Option<U64>,
|
||||
max_block_num: Option<U64>,
|
||||
skip_rpcs: &[Arc<Web3Rpc>],
|
||||
) -> ShouldWaitForBlock {
|
||||
for rpc in self.inner.iter() {
|
||||
match self.rpc_will_work_eventually(rpc, needed_block_num, skip_rpcs) {
|
||||
match self.rpc_will_work_eventually(rpc, min_block_num, max_block_num, skip_rpcs) {
|
||||
ShouldWaitForBlock::NeverReady => continue,
|
||||
x => return x,
|
||||
}
|
||||
@ -220,7 +221,7 @@ impl RankedRpcs {
|
||||
}
|
||||
|
||||
/// TODO: change this to take a min and a max
|
||||
pub fn has_block_data(&self, rpc: &Web3Rpc, block_num: &U64) -> bool {
|
||||
pub fn has_block_data(&self, rpc: &Web3Rpc, block_num: U64) -> bool {
|
||||
self.rpc_data
|
||||
.get(rpc)
|
||||
.map(|x| x.data_available(block_num))
|
||||
@ -233,7 +234,8 @@ impl RankedRpcs {
|
||||
pub fn rpc_will_work_eventually(
|
||||
&self,
|
||||
rpc: &Arc<Web3Rpc>,
|
||||
needed_block_num: Option<&U64>,
|
||||
min_block_num: Option<U64>,
|
||||
max_block_num: Option<U64>,
|
||||
skip_rpcs: &[Arc<Web3Rpc>],
|
||||
) -> ShouldWaitForBlock {
|
||||
if skip_rpcs.contains(rpc) {
|
||||
@ -241,9 +243,20 @@ impl RankedRpcs {
|
||||
return ShouldWaitForBlock::NeverReady;
|
||||
}
|
||||
|
||||
if let Some(needed_block_num) = needed_block_num {
|
||||
if let Some(min_block_num) = min_block_num {
|
||||
if !self.has_block_data(rpc, min_block_num) {
|
||||
trace!(
|
||||
"{} is missing min_block_num ({}). will not work eventually",
|
||||
rpc,
|
||||
min_block_num,
|
||||
);
|
||||
return ShouldWaitForBlock::NeverReady;
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(needed_block_num) = max_block_num {
|
||||
if let Some(rpc_data) = self.rpc_data.get(rpc) {
|
||||
match rpc_data.head_block_num.cmp(needed_block_num) {
|
||||
match rpc_data.head_block_num.cmp(&needed_block_num) {
|
||||
Ordering::Less => {
|
||||
trace!("{} is behind. let it catch up", rpc);
|
||||
// TODO: what if this is a pruned rpc that is behind by a lot, and the block is old, too?
|
||||
@ -277,8 +290,8 @@ impl RankedRpcs {
|
||||
pub fn rpc_will_work_now(
|
||||
&self,
|
||||
skip: &[Arc<Web3Rpc>],
|
||||
min_block_needed: Option<&U64>,
|
||||
max_block_needed: Option<&U64>,
|
||||
min_block_needed: Option<U64>,
|
||||
max_block_needed: Option<U64>,
|
||||
rpc: &Arc<Web3Rpc>,
|
||||
) -> bool {
|
||||
if skip.contains(rpc) {
|
||||
@ -344,7 +357,7 @@ impl Web3Rpcs {
|
||||
/// note: you probably want to use `head_block` instead
|
||||
/// TODO: return a ref?
|
||||
pub fn head_block_num(&self) -> Option<U64> {
|
||||
self.head_block().map(|x| *x.number())
|
||||
self.head_block().map(|x| x.number())
|
||||
}
|
||||
|
||||
pub fn synced(&self) -> bool {
|
||||
@ -489,7 +502,7 @@ impl ConsensusFinder {
|
||||
Some(old_consensus_connections) => {
|
||||
let old_head_block = &old_consensus_connections.head_block;
|
||||
|
||||
match consensus_head_block.number().cmp(old_head_block.number()) {
|
||||
match consensus_head_block.number().cmp(&old_head_block.number()) {
|
||||
Ordering::Equal => {
|
||||
// multiple blocks with the same fork!
|
||||
if consensus_head_block.hash() == old_head_block.hash() {
|
||||
@ -805,7 +818,7 @@ impl ConsensusFinder {
|
||||
|
||||
trace!("max_lag_block_number: {}", max_lag_block_number);
|
||||
|
||||
let lowest_block_number = lowest_block.number().max(&max_lag_block_number);
|
||||
let lowest_block_number = lowest_block.number().max(max_lag_block_number);
|
||||
|
||||
// TODO: should lowest block number be set such that the rpc won't ever go backwards?
|
||||
trace!("safe lowest_block_number: {}", lowest_block_number);
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -5,7 +5,7 @@ use super::request::{OpenRequestHandle, OpenRequestResult};
|
||||
use crate::app::{flatten_handle, Web3ProxyJoinHandle};
|
||||
use crate::config::{BlockAndRpc, Web3RpcConfig};
|
||||
use crate::errors::{Web3ProxyError, Web3ProxyErrorContext, Web3ProxyResult};
|
||||
use crate::frontend::authorization::RequestMetadata;
|
||||
use crate::frontend::authorization::Web3Request;
|
||||
use crate::jsonrpc::{self, JsonRpcParams, JsonRpcResultData};
|
||||
use crate::rpcs::request::RequestErrorHandler;
|
||||
use anyhow::{anyhow, Context};
|
||||
@ -34,6 +34,7 @@ use url::Url;
|
||||
#[derive(Default)]
|
||||
pub struct Web3Rpc {
|
||||
pub name: String,
|
||||
pub chain_id: u64,
|
||||
pub block_interval: Duration,
|
||||
pub display_name: Option<String>,
|
||||
pub db_conn: Option<DatabaseConnection>,
|
||||
@ -60,7 +61,7 @@ pub struct Web3Rpc {
|
||||
/// TODO: have an enum for this so that "no limit" prints pretty?
|
||||
pub(super) block_data_limit: AtomicU64,
|
||||
/// head_block is only inside an Option so that the "Default" derive works. it will always be set.
|
||||
pub(super) head_block: Option<watch::Sender<Option<Web3ProxyBlock>>>,
|
||||
pub(super) head_block_sender: Option<watch::Sender<Option<Web3ProxyBlock>>>,
|
||||
/// Track head block latency.
|
||||
pub(super) head_delay: AsyncRwLock<EwmaLatency>,
|
||||
/// Track peak request latency
|
||||
@ -193,7 +194,7 @@ impl Web3Rpc {
|
||||
display_name: config.display_name,
|
||||
hard_limit,
|
||||
hard_limit_until: Some(hard_limit_until),
|
||||
head_block: Some(head_block),
|
||||
head_block_sender: Some(head_block),
|
||||
http_url,
|
||||
http_client,
|
||||
max_head_block_age,
|
||||
@ -238,9 +239,9 @@ impl Web3Rpc {
|
||||
/// TODO: move this to consensus.rs
|
||||
fn sort_on(&self, max_block: Option<U64>) -> (bool, Reverse<U64>, u32) {
|
||||
let mut head_block = self
|
||||
.head_block
|
||||
.head_block_sender
|
||||
.as_ref()
|
||||
.and_then(|x| x.borrow().as_ref().map(|x| *x.number()))
|
||||
.and_then(|x| x.borrow().as_ref().map(|x| x.number()))
|
||||
.unwrap_or_default();
|
||||
|
||||
if let Some(max_block) = max_block {
|
||||
@ -389,39 +390,43 @@ impl Web3Rpc {
|
||||
}
|
||||
|
||||
/// TODO: get rid of this now that consensus rpcs does it
|
||||
pub fn has_block_data(&self, needed_block_num: &U64) -> bool {
|
||||
let head_block_num = match self.head_block.as_ref().unwrap().borrow().as_ref() {
|
||||
None => return false,
|
||||
Some(x) => *x.number(),
|
||||
};
|
||||
pub fn has_block_data(&self, needed_block_num: U64) -> bool {
|
||||
if let Some(head_block_sender) = self.head_block_sender.as_ref() {
|
||||
// TODO: this needs a max of our overall head block number
|
||||
let head_block_num = match head_block_sender.borrow().as_ref() {
|
||||
None => return false,
|
||||
Some(x) => x.number(),
|
||||
};
|
||||
|
||||
// this rpc doesn't have that block yet. still syncing
|
||||
if needed_block_num > &head_block_num {
|
||||
trace!(
|
||||
"{} has head {} but needs {}",
|
||||
self,
|
||||
head_block_num,
|
||||
needed_block_num,
|
||||
);
|
||||
return false;
|
||||
// this rpc doesn't have that block yet. still syncing
|
||||
if needed_block_num > head_block_num {
|
||||
trace!(
|
||||
"{} has head {} but needs {}",
|
||||
self,
|
||||
head_block_num,
|
||||
needed_block_num,
|
||||
);
|
||||
return false;
|
||||
}
|
||||
|
||||
// if this is a pruning node, we might not actually have the block
|
||||
let block_data_limit: U64 = self.block_data_limit();
|
||||
|
||||
let oldest_block_num = head_block_num.saturating_sub(block_data_limit);
|
||||
|
||||
if needed_block_num < oldest_block_num {
|
||||
trace!(
|
||||
"{} needs {} but the oldest available is {}",
|
||||
self,
|
||||
needed_block_num,
|
||||
oldest_block_num
|
||||
);
|
||||
return false;
|
||||
}
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
|
||||
// if this is a pruning node, we might not actually have the block
|
||||
let block_data_limit: U64 = self.block_data_limit();
|
||||
|
||||
let oldest_block_num = head_block_num.saturating_sub(block_data_limit);
|
||||
|
||||
if needed_block_num < &oldest_block_num {
|
||||
trace!(
|
||||
"{} needs {} but the oldest available is {}",
|
||||
self,
|
||||
needed_block_num,
|
||||
oldest_block_num
|
||||
);
|
||||
return false;
|
||||
}
|
||||
|
||||
true
|
||||
}
|
||||
|
||||
/// query the web3 provider to confirm it is on the expected chain with the expected data available
|
||||
@ -468,7 +473,7 @@ impl Web3Rpc {
|
||||
block_and_rpc_sender: &mpsc::UnboundedSender<BlockAndRpc>,
|
||||
block_map: &BlocksByHashCache,
|
||||
) -> Web3ProxyResult<()> {
|
||||
let head_block_sender = self.head_block.as_ref().unwrap();
|
||||
let head_block_sender = self.head_block_sender.as_ref().unwrap();
|
||||
|
||||
let new_head_block = match new_head_block {
|
||||
Ok(x) => {
|
||||
@ -544,7 +549,7 @@ impl Web3Rpc {
|
||||
self: &Arc<Self>,
|
||||
error_handler: Option<RequestErrorHandler>,
|
||||
) -> Web3ProxyResult<()> {
|
||||
let head_block = self.head_block.as_ref().unwrap().borrow().clone();
|
||||
let head_block = self.head_block_sender.as_ref().unwrap().borrow().clone();
|
||||
|
||||
if let Some(head_block) = head_block {
|
||||
// TODO: if head block is very old and not expected to be syncing, emit warning
|
||||
@ -552,11 +557,9 @@ impl Web3Rpc {
|
||||
return Err(anyhow::anyhow!("head_block is too old!").into());
|
||||
}
|
||||
|
||||
let head_block = head_block.block;
|
||||
let block_number = head_block.number();
|
||||
|
||||
let block_number = head_block.number.context("no block number")?;
|
||||
|
||||
let to = if let Some(txid) = head_block.transactions.last().cloned() {
|
||||
let to = if let Some(txid) = head_block.transactions().last().cloned() {
|
||||
let tx = self
|
||||
.internal_request::<_, Option<Transaction>>(
|
||||
"eth_getTransactionByHash",
|
||||
@ -944,7 +947,7 @@ impl Web3Rpc {
|
||||
i.tick().await;
|
||||
}
|
||||
} else {
|
||||
unimplemented!("no ws or http provider!")
|
||||
return Err(anyhow!("no ws or http provider!").into());
|
||||
}
|
||||
|
||||
// clear the head block. this might not be needed, but it won't hurt
|
||||
@ -961,7 +964,7 @@ impl Web3Rpc {
|
||||
|
||||
pub async fn wait_for_request_handle(
|
||||
self: &Arc<Self>,
|
||||
request_metadata: &Arc<RequestMetadata>,
|
||||
web3_request: &Arc<Web3Request>,
|
||||
max_wait: Option<Duration>,
|
||||
error_handler: Option<RequestErrorHandler>,
|
||||
) -> Web3ProxyResult<OpenRequestHandle> {
|
||||
@ -970,10 +973,7 @@ impl Web3Rpc {
|
||||
let max_wait_until = max_wait.map(|x| Instant::now() + x);
|
||||
|
||||
loop {
|
||||
match self
|
||||
.try_request_handle(request_metadata, error_handler)
|
||||
.await
|
||||
{
|
||||
match self.try_request_handle(web3_request, error_handler).await {
|
||||
Ok(OpenRequestResult::Handle(handle)) => return Ok(handle),
|
||||
Ok(OpenRequestResult::RetryAt(retry_at)) => {
|
||||
// TODO: emit a stat?
|
||||
@ -1015,7 +1015,7 @@ impl Web3Rpc {
|
||||
|
||||
pub async fn try_request_handle(
|
||||
self: &Arc<Self>,
|
||||
request_metadata: &Arc<RequestMetadata>,
|
||||
web3_request: &Arc<Web3Request>,
|
||||
error_handler: Option<RequestErrorHandler>,
|
||||
) -> Web3ProxyResult<OpenRequestResult> {
|
||||
// TODO: if websocket is reconnecting, return an error?
|
||||
@ -1066,7 +1066,7 @@ impl Web3Rpc {
|
||||
};
|
||||
|
||||
let handle =
|
||||
OpenRequestHandle::new(request_metadata.clone(), self.clone(), error_handler).await;
|
||||
OpenRequestHandle::new(web3_request.clone(), self.clone(), error_handler).await;
|
||||
|
||||
Ok(handle.into())
|
||||
}
|
||||
@ -1078,25 +1078,23 @@ impl Web3Rpc {
|
||||
error_handler: Option<RequestErrorHandler>,
|
||||
max_wait: Option<Duration>,
|
||||
) -> Web3ProxyResult<R> {
|
||||
let authorization = Default::default();
|
||||
let web3_request = Web3Request::new_internal(method.into(), params, None, max_wait).await;
|
||||
|
||||
self.authorized_request(method, params, &authorization, error_handler, max_wait)
|
||||
self.authorized_request(&web3_request, error_handler, max_wait)
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn authorized_request<P: JsonRpcParams, R: JsonRpcResultData>(
|
||||
pub async fn authorized_request<R: JsonRpcResultData>(
|
||||
self: &Arc<Self>,
|
||||
method: &str,
|
||||
params: &P,
|
||||
request_metadata: &Arc<RequestMetadata>,
|
||||
web3_request: &Arc<Web3Request>,
|
||||
error_handler: Option<RequestErrorHandler>,
|
||||
max_wait: Option<Duration>,
|
||||
) -> Web3ProxyResult<R> {
|
||||
let handle = self
|
||||
.wait_for_request_handle(request_metadata, max_wait, error_handler)
|
||||
.wait_for_request_handle(web3_request, max_wait, error_handler)
|
||||
.await?;
|
||||
|
||||
let response = handle.request::<P, R>(method, params).await?;
|
||||
let response = handle.request().await?;
|
||||
let parsed = response.parsed().await?;
|
||||
match parsed.payload {
|
||||
jsonrpc::Payload::Success { result } => Ok(result),
|
||||
@ -1174,7 +1172,7 @@ impl Serialize for Web3Rpc {
|
||||
|
||||
// TODO: maybe this is too much data. serialize less?
|
||||
{
|
||||
let head_block = self.head_block.as_ref().unwrap();
|
||||
let head_block = self.head_block_sender.as_ref().unwrap();
|
||||
let head_block = head_block.borrow();
|
||||
let head_block = head_block.as_ref();
|
||||
state.serialize_field("head_block", &head_block)?;
|
||||
@ -1244,9 +1242,9 @@ impl fmt::Debug for Web3Rpc {
|
||||
|
||||
f.field("weighted_ms", &self.weighted_peak_latency().as_millis());
|
||||
|
||||
if let Some(head_block_watch) = self.head_block.as_ref() {
|
||||
if let Some(head_block_watch) = self.head_block_sender.as_ref() {
|
||||
if let Some(head_block) = head_block_watch.borrow().as_ref() {
|
||||
f.field("head_num", head_block.number());
|
||||
f.field("head_num", &head_block.number());
|
||||
f.field("head_hash", head_block.hash());
|
||||
} else {
|
||||
f.field("head_num", &None::<()>);
|
||||
@ -1293,15 +1291,15 @@ mod tests {
|
||||
automatic_block_limit: false,
|
||||
backup: false,
|
||||
block_data_limit: block_data_limit.into(),
|
||||
head_block: Some(tx),
|
||||
head_block_sender: Some(tx),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
assert!(x.has_block_data(&0.into()));
|
||||
assert!(x.has_block_data(&1.into()));
|
||||
assert!(x.has_block_data(0.into()));
|
||||
assert!(x.has_block_data(1.into()));
|
||||
assert!(x.has_block_data(head_block.number()));
|
||||
assert!(!x.has_block_data(&(head_block.number() + 1)));
|
||||
assert!(!x.has_block_data(&(head_block.number() + 1000)));
|
||||
assert!(!x.has_block_data(head_block.number() + 1));
|
||||
assert!(!x.has_block_data(head_block.number() + 1000));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -1327,17 +1325,17 @@ mod tests {
|
||||
automatic_block_limit: false,
|
||||
backup: false,
|
||||
block_data_limit: block_data_limit.into(),
|
||||
head_block: Some(tx),
|
||||
head_block_sender: Some(tx),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
assert!(!x.has_block_data(&0.into()));
|
||||
assert!(!x.has_block_data(&1.into()));
|
||||
assert!(!x.has_block_data(&(head_block.number() - block_data_limit - 1)));
|
||||
assert!(x.has_block_data(&(head_block.number() - block_data_limit)));
|
||||
assert!(!x.has_block_data(0.into()));
|
||||
assert!(!x.has_block_data(1.into()));
|
||||
assert!(!x.has_block_data(head_block.number() - block_data_limit - 1));
|
||||
assert!(x.has_block_data(head_block.number() - block_data_limit));
|
||||
assert!(x.has_block_data(head_block.number()));
|
||||
assert!(!x.has_block_data(&(head_block.number() + 1)));
|
||||
assert!(!x.has_block_data(&(head_block.number() + 1000)));
|
||||
assert!(!x.has_block_data(head_block.number() + 1));
|
||||
assert!(!x.has_block_data(head_block.number() + 1000));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1380,11 +1378,11 @@ mod tests {
|
||||
head_block: AsyncRwLock::new(Some(head_block.clone())),
|
||||
};
|
||||
|
||||
assert!(!x.has_block_data(&0.into()));
|
||||
assert!(!x.has_block_data(&1.into()));
|
||||
assert!(!x.has_block_data(&head_block.number()));
|
||||
assert!(!x.has_block_data(&(head_block.number() + 1)));
|
||||
assert!(!x.has_block_data(&(head_block.number() + 1000)));
|
||||
assert!(!x.has_block_data(0.into()));
|
||||
assert!(!x.has_block_data(1.into()));
|
||||
assert!(!x.has_block_data(head_block.number());
|
||||
assert!(!x.has_block_data(head_block.number() + 1));
|
||||
assert!(!x.has_block_data(head_block.number() + 1000));
|
||||
}
|
||||
*/
|
||||
}
|
||||
|
@ -1,8 +1,8 @@
|
||||
use super::one::Web3Rpc;
|
||||
use crate::errors::{Web3ProxyErrorContext, Web3ProxyResult};
|
||||
use crate::frontend::authorization::{Authorization, AuthorizationType, RequestMetadata};
|
||||
use crate::frontend::authorization::{Authorization, AuthorizationType, Web3Request};
|
||||
use crate::globals::{global_db_conn, DB_CONN};
|
||||
use crate::jsonrpc::{self, JsonRpcParams, JsonRpcResultData};
|
||||
use crate::jsonrpc::{self, JsonRpcResultData};
|
||||
use chrono::Utc;
|
||||
use derive_more::From;
|
||||
use entities::revert_log;
|
||||
@ -28,9 +28,8 @@ pub enum OpenRequestResult {
|
||||
|
||||
/// Make RPC requests through this handle and drop it when you are done.
|
||||
/// Opening this handle checks rate limits. Developers, try to keep opening a handle and using it as close together as possible
|
||||
#[derive(Debug)]
|
||||
pub struct OpenRequestHandle {
|
||||
request_metadata: Arc<RequestMetadata>,
|
||||
web3_request: Arc<Web3Request>,
|
||||
error_handler: RequestErrorHandler,
|
||||
rpc: Arc<Web3Rpc>,
|
||||
}
|
||||
@ -63,6 +62,15 @@ struct EthCallFirstParams {
|
||||
data: Option<Bytes>,
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for OpenRequestHandle {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("OpenRequestHandle")
|
||||
.field("method", &self.web3_request.request.method())
|
||||
.field("rpc", &self.rpc.name)
|
||||
.finish_non_exhaustive()
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Level> for RequestErrorHandler {
|
||||
fn from(level: Level) -> Self {
|
||||
match level {
|
||||
@ -90,7 +98,7 @@ impl Authorization {
|
||||
}
|
||||
};
|
||||
|
||||
let db_conn = global_db_conn().await?;
|
||||
let db_conn = global_db_conn()?;
|
||||
|
||||
// TODO: should the database set the timestamp?
|
||||
// we intentionally use "now" and not the time the request started
|
||||
@ -133,7 +141,7 @@ impl Drop for OpenRequestHandle {
|
||||
|
||||
impl OpenRequestHandle {
|
||||
pub async fn new(
|
||||
request_metadata: Arc<RequestMetadata>,
|
||||
web3_request: Arc<Web3Request>,
|
||||
rpc: Arc<Web3Rpc>,
|
||||
error_handler: Option<RequestErrorHandler>,
|
||||
) -> Self {
|
||||
@ -146,7 +154,7 @@ impl OpenRequestHandle {
|
||||
let error_handler = error_handler.unwrap_or_default();
|
||||
|
||||
Self {
|
||||
request_metadata,
|
||||
web3_request,
|
||||
error_handler,
|
||||
rpc,
|
||||
}
|
||||
@ -165,17 +173,18 @@ impl OpenRequestHandle {
|
||||
/// By having the request method here, we ensure that the rate limiter was called and connection counts were properly incremented
|
||||
/// depending on how things are locked, you might need to pass the provider in
|
||||
/// we take self to ensure this function only runs once
|
||||
pub async fn request<P: JsonRpcParams, R: JsonRpcResultData + serde::Serialize>(
|
||||
pub async fn request<R: JsonRpcResultData + serde::Serialize>(
|
||||
self,
|
||||
method: &str,
|
||||
params: &P,
|
||||
) -> Result<jsonrpc::SingleResponse<R>, ProviderError> {
|
||||
// TODO: use tracing spans
|
||||
// TODO: including params in this log is way too verbose
|
||||
// trace!(rpc=%self.rpc, %method, "request");
|
||||
trace!("requesting from {}", self.rpc);
|
||||
|
||||
let authorization = &self.request_metadata.authorization;
|
||||
let method = self.web3_request.request.method();
|
||||
let params = self.web3_request.request.params();
|
||||
|
||||
let authorization = &self.web3_request.authorization;
|
||||
|
||||
match &authorization.authorization_type {
|
||||
AuthorizationType::Frontend => {
|
||||
@ -204,8 +213,7 @@ impl OpenRequestHandle {
|
||||
{
|
||||
let params: serde_json::Value = serde_json::to_value(params)?;
|
||||
let request = jsonrpc::JsonRpcRequest::new(
|
||||
// TODO: proper id
|
||||
jsonrpc::JsonRpcId::Number(1),
|
||||
self.web3_request.id().into(),
|
||||
method.to_string(),
|
||||
params,
|
||||
)
|
||||
@ -216,7 +224,7 @@ impl OpenRequestHandle {
|
||||
jsonrpc::SingleResponse::read_if_short(
|
||||
response,
|
||||
1024,
|
||||
self.request_metadata.clone(),
|
||||
self.web3_request.clone(),
|
||||
)
|
||||
.await
|
||||
}
|
||||
@ -226,7 +234,9 @@ impl OpenRequestHandle {
|
||||
p.request(method, params)
|
||||
.await
|
||||
// TODO: Id here
|
||||
.map(|result| jsonrpc::ParsedResponse::from_result(result, None).into())
|
||||
.map(|result| {
|
||||
jsonrpc::ParsedResponse::from_result(result, Default::default()).into()
|
||||
})
|
||||
} else {
|
||||
return Err(ProviderError::CustomError(
|
||||
"no provider configured!".to_string(),
|
||||
@ -255,9 +265,9 @@ impl OpenRequestHandle {
|
||||
if !["eth_call", "eth_estimateGas"].contains(&method) {
|
||||
// trace!(%method, "skipping save on revert");
|
||||
RequestErrorHandler::TraceLevel
|
||||
} else if DB_CONN.read().await.is_ok() {
|
||||
} else if DB_CONN.read().is_ok() {
|
||||
let log_revert_chance =
|
||||
self.request_metadata.authorization.checks.log_revert_chance;
|
||||
self.web3_request.authorization.checks.log_revert_chance;
|
||||
|
||||
if log_revert_chance == 0 {
|
||||
// trace!(%method, "no chance. skipping save on revert");
|
||||
@ -435,6 +445,8 @@ impl OpenRequestHandle {
|
||||
tokio::spawn(async move {
|
||||
self.rpc.peak_latency.as_ref().unwrap().report(latency);
|
||||
self.rpc.median_latency.as_ref().unwrap().record(latency);
|
||||
|
||||
// TODO: app median latency
|
||||
});
|
||||
|
||||
response
|
||||
|
63
web3_proxy/src/secrets.rs
Normal file
63
web3_proxy/src/secrets.rs
Normal file
@ -0,0 +1,63 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::fmt;
|
||||
use ulid::Ulid;
|
||||
use uuid::Uuid;
|
||||
|
||||
/// This lets us use UUID and ULID while we transition to only ULIDs
|
||||
/// TODO: custom deserialize that can also go from String to Ulid
|
||||
#[derive(Copy, Clone, Deserialize)]
|
||||
pub enum RpcSecretKey {
|
||||
Ulid(Ulid),
|
||||
Uuid(Uuid),
|
||||
}
|
||||
|
||||
impl RpcSecretKey {
|
||||
pub fn new() -> Self {
|
||||
Ulid::new().into()
|
||||
}
|
||||
|
||||
pub fn as_128(&self) -> u128 {
|
||||
match self {
|
||||
Self::Ulid(x) => x.0,
|
||||
Self::Uuid(x) => x.as_u128(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq for RpcSecretKey {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.as_128() == other.as_128()
|
||||
}
|
||||
}
|
||||
|
||||
impl Eq for RpcSecretKey {}
|
||||
|
||||
impl fmt::Debug for RpcSecretKey {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
Self::Ulid(x) => fmt::Debug::fmt(x, f),
|
||||
Self::Uuid(x) => {
|
||||
let x = Ulid::from(x.as_u128());
|
||||
|
||||
fmt::Debug::fmt(&x, f)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// always serialize as a ULID.
|
||||
impl Serialize for RpcSecretKey {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
match self {
|
||||
Self::Ulid(x) => x.serialize(serializer),
|
||||
Self::Uuid(x) => {
|
||||
let x: Ulid = x.to_owned().into();
|
||||
|
||||
x.serialize(serializer)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -62,8 +62,8 @@ pub async fn query_user_stats<'a>(
|
||||
params: &'a HashMap<String, String>,
|
||||
stat_response_type: StatType,
|
||||
) -> Web3ProxyResponse {
|
||||
let db_conn = global_db_conn().await?;
|
||||
let db_replica = global_db_replica_conn().await?;
|
||||
let db_conn = global_db_conn()?;
|
||||
let db_replica = global_db_replica_conn()?;
|
||||
let mut redis_conn = app.redis_conn().await?;
|
||||
|
||||
// get the user id first. if it is 0, we should use a cache on the app
|
||||
|
@ -48,7 +48,7 @@ pub async fn query_user_influx_stats<'a>(
|
||||
));
|
||||
}
|
||||
|
||||
let db_replica = global_db_replica_conn().await?;
|
||||
let db_replica = global_db_replica_conn()?;
|
||||
|
||||
// Read the (optional) user-id from the request, this is the logic for subusers
|
||||
// If there is no bearer token, this is not allowed
|
||||
|
@ -9,11 +9,11 @@ use self::stat_buffer::BufferedRpcQueryStats;
|
||||
use crate::caches::{RpcSecretKeyCache, UserBalanceCache};
|
||||
use crate::compute_units::ComputeUnit;
|
||||
use crate::errors::{Web3ProxyError, Web3ProxyResult};
|
||||
use crate::frontend::authorization::{Authorization, RequestMetadata};
|
||||
use crate::frontend::authorization::{Authorization, AuthorizationType, Web3Request};
|
||||
use crate::rpcs::one::Web3Rpc;
|
||||
use anyhow::{anyhow, Context};
|
||||
use chrono::{DateTime, Months, TimeZone, Utc};
|
||||
use derive_more::From;
|
||||
use derive_more::{AddAssign, From};
|
||||
use entities::{referee, referrer, rpc_accounting_v2};
|
||||
use influxdb2::models::DataPoint;
|
||||
use migration::sea_orm::prelude::Decimal;
|
||||
@ -25,7 +25,6 @@ use migration::{Expr, LockType, OnConflict};
|
||||
use num_traits::ToPrimitive;
|
||||
use parking_lot::Mutex;
|
||||
use std::borrow::Cow;
|
||||
use std::mem;
|
||||
use std::sync::atomic::Ordering;
|
||||
use std::sync::Arc;
|
||||
use tracing::{error, instrument, trace, warn};
|
||||
@ -40,13 +39,19 @@ pub enum StatType {
|
||||
|
||||
pub type BackendRequests = Mutex<Vec<Arc<Web3Rpc>>>;
|
||||
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
#[derive(AddAssign, Copy, Clone, Debug, Default)]
|
||||
pub struct FlushedStats {
|
||||
/// the number of rows saved to the relational database.
|
||||
/// rows can contain multiple requests
|
||||
pub relational: usize,
|
||||
pub relational_frontend_requests: u64,
|
||||
pub relational_internal_requests: u64,
|
||||
/// the number of data points saved to the timeseries database.
|
||||
/// data points can contain multiple requests
|
||||
pub timeseries: usize,
|
||||
/// the number of global frontend requests saved to influx
|
||||
/// the number of global frontend requests saved to the time series database
|
||||
pub timeseries_frontend_requests: u64,
|
||||
pub timeseries_internal_requests: u64,
|
||||
}
|
||||
|
||||
/// TODO: better name? RpcQueryStatBuilder?
|
||||
@ -69,12 +74,11 @@ pub struct RpcQueryStats {
|
||||
pub compute_unit_cost: Decimal,
|
||||
/// If the request is invalid or received a jsonrpc error response (excluding reverts)
|
||||
pub user_error_response: bool,
|
||||
/// If premium was active at the start of the request
|
||||
pub paid_credits_used: bool,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, From, Hash, PartialEq, Eq)]
|
||||
pub struct RpcQueryKey {
|
||||
pub authorization_type: AuthorizationType,
|
||||
/// unix epoch time in seconds.
|
||||
/// for the time series db, this is (close to) the time that the response was sent.
|
||||
/// for the account database, this is rounded to the week.
|
||||
@ -90,6 +94,7 @@ pub struct RpcQueryKey {
|
||||
/// 0 if the public url was used.
|
||||
rpc_secret_key_id: u64,
|
||||
/// 0 if the public url was used.
|
||||
/// TODO: u64::MAX if the internal? or have a migration make a user for us? or keep 0 and we track that another way?
|
||||
rpc_key_user_id: u64,
|
||||
}
|
||||
|
||||
@ -130,6 +135,7 @@ impl RpcQueryStats {
|
||||
// Depending on method, add some arithmetic around calculating credits_used
|
||||
// I think balance should not go here, this looks more like a key thingy
|
||||
RpcQueryKey {
|
||||
authorization_type: self.authorization.authorization_type,
|
||||
response_timestamp,
|
||||
archive_needed: self.archive_request,
|
||||
error_response: self.error_response,
|
||||
@ -151,6 +157,7 @@ impl RpcQueryStats {
|
||||
let rpc_key_user_id = 0;
|
||||
|
||||
RpcQueryKey {
|
||||
authorization_type: self.authorization.authorization_type,
|
||||
response_timestamp: self.response_timestamp,
|
||||
archive_needed: self.archive_request,
|
||||
error_response: self.error_response,
|
||||
@ -177,6 +184,7 @@ impl RpcQueryStats {
|
||||
let method = self.method.clone();
|
||||
|
||||
let key = RpcQueryKey {
|
||||
authorization_type: self.authorization.authorization_type,
|
||||
response_timestamp: self.response_timestamp,
|
||||
archive_needed: self.archive_request,
|
||||
error_response: self.error_response,
|
||||
@ -194,7 +202,7 @@ impl RpcQueryStats {
|
||||
/// For now there is just one, but I think there might be others later
|
||||
#[derive(Debug, From)]
|
||||
pub enum AppStat {
|
||||
RpcQuery(RequestMetadata),
|
||||
RpcQuery(Web3Request),
|
||||
}
|
||||
|
||||
// TODO: move to stat_buffer.rs?
|
||||
@ -538,7 +546,7 @@ impl BufferedRpcQueryStats {
|
||||
/// We want this to run when there is **one and only one** copy of this RequestMetadata left
|
||||
/// There are often multiple copies if a request is being sent to multiple servers in parallel
|
||||
impl RpcQueryStats {
|
||||
fn try_from_metadata(mut metadata: RequestMetadata) -> Web3ProxyResult<Self> {
|
||||
fn try_from_metadata(metadata: Web3Request) -> Web3ProxyResult<Self> {
|
||||
// TODO: do this without a clone
|
||||
let authorization = metadata.authorization.clone();
|
||||
|
||||
@ -547,7 +555,7 @@ impl RpcQueryStats {
|
||||
// TODO: do this without cloning. we can take their vec
|
||||
let backend_rpcs_used = metadata.backend_rpcs_used();
|
||||
|
||||
let request_bytes = metadata.request_bytes as u64;
|
||||
let request_bytes = metadata.request.num_bytes() as u64;
|
||||
let response_bytes = metadata.response_bytes.load(Ordering::Relaxed);
|
||||
|
||||
let mut error_response = metadata.error_response.load(Ordering::Relaxed);
|
||||
@ -579,7 +587,7 @@ impl RpcQueryStats {
|
||||
x => x,
|
||||
};
|
||||
|
||||
let cu = ComputeUnit::new(&metadata.method, metadata.chain_id, response_bytes);
|
||||
let cu = ComputeUnit::new(metadata.request.method(), metadata.chain_id, response_bytes);
|
||||
|
||||
let cache_hit = backend_rpcs_used.is_empty();
|
||||
|
||||
@ -590,9 +598,7 @@ impl RpcQueryStats {
|
||||
&metadata.usd_per_cu,
|
||||
);
|
||||
|
||||
let method = mem::take(&mut metadata.method);
|
||||
|
||||
let paid_credits_used = authorization.checks.paid_credits_used;
|
||||
let method = metadata.request.method().to_string().into();
|
||||
|
||||
let x = Self {
|
||||
archive_request,
|
||||
@ -602,7 +608,6 @@ impl RpcQueryStats {
|
||||
compute_unit_cost,
|
||||
error_response,
|
||||
method,
|
||||
paid_credits_used,
|
||||
request_bytes,
|
||||
response_bytes,
|
||||
response_millis,
|
||||
|
@ -2,7 +2,7 @@ use super::{AppStat, FlushedStats, RpcQueryKey};
|
||||
use crate::app::Web3ProxyJoinHandle;
|
||||
use crate::caches::{RpcSecretKeyCache, UserBalanceCache};
|
||||
use crate::errors::Web3ProxyResult;
|
||||
use crate::frontend::authorization::RequestMetadata;
|
||||
use crate::frontend::authorization::{AuthorizationType, Web3Request};
|
||||
use crate::globals::global_db_conn;
|
||||
use crate::stats::RpcQueryStats;
|
||||
use derive_more::From;
|
||||
@ -136,9 +136,12 @@ impl StatBuffer {
|
||||
let mut db_save_interval =
|
||||
interval(Duration::from_secs(self.db_save_interval_seconds as u64));
|
||||
|
||||
// TODO: this should be a FlushedStats that we add to
|
||||
let mut total_frontend_requests = 0;
|
||||
let mut tsdb_frontend_requests = 0;
|
||||
let mut tsdb_internal_requests = 0;
|
||||
let mut db_frontend_requests = 0;
|
||||
let mut db_internal_requests = 0;
|
||||
|
||||
loop {
|
||||
select! {
|
||||
@ -154,17 +157,19 @@ impl StatBuffer {
|
||||
_ = db_save_interval.tick() => {
|
||||
// TODO: tokio spawn this! (but with a semaphore on db_save_interval)
|
||||
trace!("DB save internal tick");
|
||||
let (count, new_frontend_requests) = self.save_relational_stats().await;
|
||||
let (count, new_frontend_requests, new_internal_requests) = self.save_relational_stats().await;
|
||||
if count > 0 {
|
||||
db_frontend_requests += new_frontend_requests;
|
||||
db_internal_requests += new_internal_requests;
|
||||
debug!("Saved {} stats for {} requests to the relational db", count, new_frontend_requests);
|
||||
}
|
||||
}
|
||||
_ = tsdb_save_interval.tick() => {
|
||||
trace!("TSDB save internal tick");
|
||||
let (count, new_frontend_requests) = self.save_tsdb_stats().await;
|
||||
let (count, new_frontend_requests, new_internal_requests) = self.save_tsdb_stats().await;
|
||||
if count > 0 {
|
||||
tsdb_frontend_requests += new_frontend_requests;
|
||||
tsdb_internal_requests += new_internal_requests;
|
||||
debug!("Saved {} stats for {} requests to the tsdb @ {}/{}", count, new_frontend_requests, self.tsdb_window, self.num_tsdb_windows);
|
||||
}
|
||||
}
|
||||
@ -174,7 +179,10 @@ impl StatBuffer {
|
||||
let flushed_stats = self._flush(&mut stat_receiver).await?;
|
||||
|
||||
tsdb_frontend_requests += flushed_stats.timeseries_frontend_requests;
|
||||
tsdb_internal_requests += flushed_stats.timeseries_internal_requests;
|
||||
|
||||
db_frontend_requests += flushed_stats.relational_frontend_requests;
|
||||
db_internal_requests += flushed_stats.relational_internal_requests;
|
||||
|
||||
if let Err(err) = x.send(flushed_stats) {
|
||||
error!(?flushed_stats, ?err, "unable to notify about flushed stats");
|
||||
@ -218,34 +226,32 @@ impl StatBuffer {
|
||||
let flushed_stats = self._flush(&mut stat_receiver).await?;
|
||||
|
||||
tsdb_frontend_requests += flushed_stats.timeseries_frontend_requests;
|
||||
db_frontend_requests += flushed_stats.relational_frontend_requests;
|
||||
tsdb_internal_requests += flushed_stats.timeseries_internal_requests;
|
||||
|
||||
// TODO: if these totals don't match, something is wrong!
|
||||
info!(%total_frontend_requests, %tsdb_frontend_requests, %db_frontend_requests, "accounting and stat save loop complete");
|
||||
db_frontend_requests += flushed_stats.relational_frontend_requests;
|
||||
db_internal_requests += flushed_stats.relational_internal_requests;
|
||||
|
||||
// TODO: if these totals don't match, something is wrong! log something or maybe even return an error
|
||||
info!(%total_frontend_requests, %tsdb_frontend_requests, %tsdb_internal_requests, %db_frontend_requests, %db_internal_requests, "accounting and stat save loop complete");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn _buffer_app_stat(&mut self, stat: AppStat) -> Web3ProxyResult<u64> {
|
||||
match stat {
|
||||
AppStat::RpcQuery(request_metadata) => {
|
||||
self._buffer_request_metadata(request_metadata).await
|
||||
}
|
||||
AppStat::RpcQuery(web3_request) => self._buffer_web3_request(web3_request).await,
|
||||
}
|
||||
}
|
||||
|
||||
async fn _buffer_request_metadata(
|
||||
&mut self,
|
||||
request_metadata: RequestMetadata,
|
||||
) -> Web3ProxyResult<u64> {
|
||||
async fn _buffer_web3_request(&mut self, web3_request: Web3Request) -> Web3ProxyResult<u64> {
|
||||
// we convert on this side of the channel so that we don't slow down the request
|
||||
let stat = RpcQueryStats::try_from_metadata(request_metadata)?;
|
||||
let stat = RpcQueryStats::try_from_metadata(web3_request)?;
|
||||
|
||||
// update the latest balance
|
||||
// do this BEFORE emitting any stats
|
||||
let mut approximate_balance_remaining = 0.into();
|
||||
let mut active_premium = false;
|
||||
if let Ok(db_conn) = global_db_conn().await {
|
||||
if let Ok(db_conn) = global_db_conn() {
|
||||
let user_id = stat.authorization.checks.user_id;
|
||||
|
||||
// update the user's balance
|
||||
@ -359,15 +365,19 @@ impl StatBuffer {
|
||||
|
||||
// flush the buffers
|
||||
// TODO: include frontend counts here
|
||||
let (tsdb_count, tsdb_frontend_requests) = self.save_tsdb_stats().await;
|
||||
let (relational_count, relational_frontend_requests) = self.save_relational_stats().await;
|
||||
let (timeseries_count, timeseries_frontend_requests, timeseries_internal_requests) =
|
||||
self.save_tsdb_stats().await;
|
||||
let (relational_count, relational_frontend_requests, relational_internal_requests) =
|
||||
self.save_relational_stats().await;
|
||||
|
||||
// notify
|
||||
let flushed_stats = FlushedStats {
|
||||
timeseries: tsdb_count,
|
||||
timeseries_frontend_requests: tsdb_frontend_requests,
|
||||
timeseries: timeseries_count,
|
||||
timeseries_frontend_requests,
|
||||
timeseries_internal_requests,
|
||||
relational: relational_count,
|
||||
relational_frontend_requests,
|
||||
relational_internal_requests,
|
||||
};
|
||||
|
||||
trace!(?flushed_stats);
|
||||
@ -375,14 +385,16 @@ impl StatBuffer {
|
||||
Ok(flushed_stats)
|
||||
}
|
||||
|
||||
async fn save_relational_stats(&mut self) -> (usize, u64) {
|
||||
async fn save_relational_stats(&mut self) -> (usize, u64, u64) {
|
||||
let mut count = 0;
|
||||
let mut frontend_requests = 0;
|
||||
let mut internal_requests = 0;
|
||||
|
||||
if let Ok(db_conn) = global_db_conn().await {
|
||||
if let Ok(db_conn) = global_db_conn() {
|
||||
count = self.accounting_db_buffer.len();
|
||||
for (key, stat) in self.accounting_db_buffer.drain() {
|
||||
let new_frontend_requests = stat.frontend_requests;
|
||||
let is_internal = matches!(key.authorization_type, AuthorizationType::Internal);
|
||||
|
||||
// TODO: batch saves
|
||||
// TODO: i don't like passing key (which came from the stat) to the function on the stat. but it works for now
|
||||
@ -397,20 +409,24 @@ impl StatBuffer {
|
||||
.await
|
||||
{
|
||||
// TODO: save the stat and retry later!
|
||||
error!(?err, %count, %new_frontend_requests, "unable to save accounting entry!");
|
||||
error!(?err, %count, %new_frontend_requests, %is_internal, "unable to save accounting entry!");
|
||||
} else if is_internal {
|
||||
internal_requests += new_frontend_requests;
|
||||
} else {
|
||||
frontend_requests += new_frontend_requests;
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
(count, frontend_requests)
|
||||
(count, frontend_requests, internal_requests)
|
||||
}
|
||||
|
||||
// TODO: bucket should be an enum so that we don't risk typos
|
||||
async fn save_tsdb_stats(&mut self) -> (usize, u64) {
|
||||
// TODO: return type should be a struct so we dont mix up the values
|
||||
async fn save_tsdb_stats(&mut self) -> (usize, u64, u64) {
|
||||
let mut count = 0;
|
||||
let mut frontend_requests = 0;
|
||||
let mut internal_requests = 0;
|
||||
|
||||
if let Some(influxdb_client) = self.influxdb_client.as_ref() {
|
||||
// every time we save, we increment the tsdb_window. this is used to ensure that stats don't overwrite others because the keys match
|
||||
@ -434,6 +450,7 @@ impl StatBuffer {
|
||||
for (key, stat) in self.global_timeseries_buffer.drain() {
|
||||
// TODO: i don't like passing key (which came from the stat) to the function on the stat. but it works for now
|
||||
let new_frontend_requests = stat.frontend_requests;
|
||||
let is_internal = matches!(key.authorization_type, AuthorizationType::Internal);
|
||||
|
||||
match stat
|
||||
.build_timeseries_point("global_proxy", self.chain_id, key, uniq)
|
||||
@ -441,11 +458,16 @@ impl StatBuffer {
|
||||
{
|
||||
Ok(point) => {
|
||||
points.push(point);
|
||||
frontend_requests += new_frontend_requests;
|
||||
|
||||
if is_internal {
|
||||
internal_requests += new_frontend_requests;
|
||||
} else {
|
||||
frontend_requests += new_frontend_requests;
|
||||
};
|
||||
}
|
||||
Err(err) => {
|
||||
// TODO: what can cause this?
|
||||
error!(?err, "unable to build global stat!");
|
||||
error!(?err, %new_frontend_requests, % is_internal, "unable to build global stat!");
|
||||
}
|
||||
};
|
||||
}
|
||||
@ -497,6 +519,6 @@ impl StatBuffer {
|
||||
}
|
||||
}
|
||||
|
||||
(count, frontend_requests)
|
||||
(count, frontend_requests, internal_requests)
|
||||
}
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "web3_proxy_cli"
|
||||
version = "1.43.8"
|
||||
version = "1.43.10"
|
||||
edition = "2021"
|
||||
default-run = "web3_proxy_cli"
|
||||
|
||||
|
@ -1,4 +1,3 @@
|
||||
use web3_proxy::frontend::authorization::RpcSecretKey;
|
||||
use web3_proxy::prelude::anyhow::{self, Context};
|
||||
use web3_proxy::prelude::argh::{self, FromArgs};
|
||||
use web3_proxy::prelude::entities::{rpc_key, user, user_tier};
|
||||
@ -9,6 +8,7 @@ use web3_proxy::prelude::migration::sea_orm::{
|
||||
use web3_proxy::prelude::serde_json::json;
|
||||
use web3_proxy::prelude::tracing::{debug, info};
|
||||
use web3_proxy::prelude::uuid::Uuid;
|
||||
use web3_proxy::secrets::RpcSecretKey;
|
||||
|
||||
/// change a user's tier.
|
||||
#[derive(FromArgs, PartialEq, Eq, Debug)]
|
||||
|
@ -1,4 +1,3 @@
|
||||
use web3_proxy::frontend::authorization::RpcSecretKey;
|
||||
use web3_proxy::prelude::anyhow::{self, Context};
|
||||
use web3_proxy::prelude::argh::{self, FromArgs};
|
||||
use web3_proxy::prelude::entities::{rpc_key, user};
|
||||
@ -9,6 +8,7 @@ use web3_proxy::prelude::migration::sea_orm::{
|
||||
use web3_proxy::prelude::tracing::info;
|
||||
use web3_proxy::prelude::ulid::Ulid;
|
||||
use web3_proxy::prelude::uuid::Uuid;
|
||||
use web3_proxy::secrets::RpcSecretKey;
|
||||
|
||||
#[derive(FromArgs, PartialEq, Debug, Eq)]
|
||||
/// Create a new user and api key
|
||||
|
@ -1,5 +1,4 @@
|
||||
use tracing::info;
|
||||
use web3_proxy::frontend::authorization::RpcSecretKey;
|
||||
use web3_proxy::prelude::anyhow::{self, Context};
|
||||
use web3_proxy::prelude::argh::{self, FromArgs};
|
||||
use web3_proxy::prelude::entities::{rpc_key, user};
|
||||
@ -7,6 +6,7 @@ use web3_proxy::prelude::ethers::prelude::Address;
|
||||
use web3_proxy::prelude::migration::sea_orm::{self, ActiveModelTrait, TransactionTrait};
|
||||
use web3_proxy::prelude::ulid::Ulid;
|
||||
use web3_proxy::prelude::uuid::Uuid;
|
||||
use web3_proxy::secrets::RpcSecretKey;
|
||||
|
||||
#[derive(FromArgs, PartialEq, Debug, Eq)]
|
||||
/// Create a new user and api key
|
||||
|
@ -1,9 +1,10 @@
|
||||
use std::num::NonZeroU64;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use tracing::{error, info};
|
||||
use web3_proxy::app::BILLING_PERIOD_SECONDS;
|
||||
use web3_proxy::config::TopConfig;
|
||||
use web3_proxy::frontend::authorization::{Authorization, RequestMetadata, RpcSecretKey};
|
||||
use web3_proxy::frontend::authorization::{Authorization, RequestOrMethod, Web3Request};
|
||||
use web3_proxy::prelude::anyhow::{self, Context};
|
||||
use web3_proxy::prelude::argh::{self, FromArgs};
|
||||
use web3_proxy::prelude::chrono;
|
||||
@ -20,8 +21,8 @@ use web3_proxy::prelude::moka::future::Cache;
|
||||
use web3_proxy::prelude::parking_lot::Mutex;
|
||||
use web3_proxy::prelude::tokio::sync::{broadcast, mpsc};
|
||||
use web3_proxy::prelude::tokio::time::Instant;
|
||||
use web3_proxy::prelude::ulid::Ulid;
|
||||
use web3_proxy::rpcs::one::Web3Rpc;
|
||||
use web3_proxy::secrets::RpcSecretKey;
|
||||
use web3_proxy::stats::StatBuffer;
|
||||
|
||||
#[derive(FromArgs, PartialEq, Eq, Debug)]
|
||||
@ -180,42 +181,43 @@ impl MigrateStatsToV2SubCommand {
|
||||
.map(|_| Arc::new(Web3Rpc::default()))
|
||||
.collect();
|
||||
|
||||
let request_ulid = Ulid::new();
|
||||
|
||||
let chain_id = x.chain_id;
|
||||
|
||||
// Create RequestMetadata
|
||||
let request_metadata = RequestMetadata {
|
||||
let method = x
|
||||
.method
|
||||
.clone()
|
||||
.unwrap_or_else(|| "unknown".to_string())
|
||||
.into();
|
||||
|
||||
let request = RequestOrMethod::Method(method, int_request_bytes as usize);
|
||||
|
||||
// Create Web3Request
|
||||
let web3_request = Web3Request {
|
||||
archive_request: x.archive_request.into(),
|
||||
authorization: authorization.clone(),
|
||||
backend_requests: Mutex::new(backend_rpcs),
|
||||
chain_id,
|
||||
error_response: x.error_response.into(),
|
||||
head_block: None,
|
||||
// debug data is in kafka, not mysql or influx
|
||||
kafka_debug_logger: None,
|
||||
method: x
|
||||
.method
|
||||
.clone()
|
||||
.unwrap_or_else(|| "unknown".to_string())
|
||||
.into(),
|
||||
request,
|
||||
// This is not relevant in the new version
|
||||
no_servers: 0.into(),
|
||||
// Get the mean of all the request bytes
|
||||
request_bytes: int_request_bytes as usize,
|
||||
response_bytes: int_response_bytes.into(),
|
||||
// We did not initially record this data
|
||||
response_from_backup_rpc: false.into(),
|
||||
response_timestamp: x.period_datetime.timestamp().into(),
|
||||
response_millis: int_response_millis.into(),
|
||||
// This is overwritten later on
|
||||
start_instant: Instant::now(),
|
||||
stat_sender: Some(stat_sender.clone()),
|
||||
request_ulid,
|
||||
user_error_response: false.into(),
|
||||
usd_per_cu: top_config.app.usd_per_cu.unwrap_or_default(),
|
||||
cache_mode: Default::default(),
|
||||
start_instant: Instant::now(),
|
||||
expire_instant: Instant::now() + Duration::from_secs(1),
|
||||
};
|
||||
|
||||
request_metadata.try_send_stat()?;
|
||||
web3_request.try_send_stat()?;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -107,28 +107,32 @@ impl ProxydSubCommand {
|
||||
|
||||
thread::spawn(move || loop {
|
||||
match fs::read_to_string(&top_config_path) {
|
||||
Ok(new_top_config) => match toml::from_str::<TopConfig>(&new_top_config) {
|
||||
Ok(mut new_top_config) => {
|
||||
new_top_config.clean();
|
||||
Ok(new_top_config) => {
|
||||
match toml::from_str::<TopConfig>(&new_top_config) {
|
||||
Ok(mut new_top_config) => {
|
||||
new_top_config.clean();
|
||||
|
||||
if new_top_config != current_config {
|
||||
trace!("current_config: {:#?}", current_config);
|
||||
trace!("new_top_config: {:#?}", new_top_config);
|
||||
if new_top_config != current_config {
|
||||
trace!("current_config: {:#?}", current_config);
|
||||
trace!("new_top_config: {:#?}", new_top_config);
|
||||
|
||||
// TODO: print the differences
|
||||
// TODO: first run seems to always see differences. why?
|
||||
info!("config @ {:?} changed", top_config_path);
|
||||
match config_sender.send(new_top_config.clone()) {
|
||||
Ok(()) => current_config = new_top_config,
|
||||
Err(err) => error!(?err, "unable to apply new config"),
|
||||
// TODO: print the differences
|
||||
// TODO: first run seems to always see differences. why?
|
||||
info!("config @ {:?} changed", top_config_path);
|
||||
match config_sender.send(new_top_config.clone()) {
|
||||
Ok(()) => current_config = new_top_config,
|
||||
Err(err) => {
|
||||
error!(?err, "unable to apply new config")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
// TODO: panic?
|
||||
error!("Unable to parse config! {:#?}", err);
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
// TODO: panic?
|
||||
error!("Unable to parse config! {:#?}", err);
|
||||
}
|
||||
},
|
||||
}
|
||||
Err(err) => {
|
||||
// TODO: panic?
|
||||
error!("Unable to read config! {:#?}", err);
|
||||
@ -299,7 +303,7 @@ impl ProxydSubCommand {
|
||||
}
|
||||
|
||||
// TODO: make sure this happens even if we exit with an error
|
||||
if let Ok(db_conn) = global_db_conn().await {
|
||||
if let Ok(db_conn) = global_db_conn() {
|
||||
/*
|
||||
From the sqlx docs:
|
||||
|
||||
|
@ -11,9 +11,8 @@ use web3_proxy::prelude::rdkafka::{
|
||||
};
|
||||
use web3_proxy::prelude::rmp_serde;
|
||||
use web3_proxy::prelude::uuid::Uuid;
|
||||
use web3_proxy::{
|
||||
config::TopConfig, frontend::authorization::RpcSecretKey, relational_db::connect_db,
|
||||
};
|
||||
use web3_proxy::secrets::RpcSecretKey;
|
||||
use web3_proxy::{config::TopConfig, relational_db::connect_db};
|
||||
|
||||
/// Second subcommand.
|
||||
#[derive(FromArgs, PartialEq, Debug, Eq)]
|
||||
|
@ -1,5 +1,4 @@
|
||||
use tracing::{debug, info};
|
||||
use web3_proxy::frontend::authorization::RpcSecretKey;
|
||||
use web3_proxy::prelude::anyhow::{self, Context};
|
||||
use web3_proxy::prelude::argh::{self, FromArgs};
|
||||
use web3_proxy::prelude::entities::{rpc_key, user};
|
||||
@ -9,6 +8,7 @@ use web3_proxy::prelude::sea_orm::{
|
||||
QueryFilter,
|
||||
};
|
||||
use web3_proxy::prelude::uuid::Uuid;
|
||||
use web3_proxy::secrets::RpcSecretKey;
|
||||
|
||||
/// change a key's owner.
|
||||
#[derive(FromArgs, PartialEq, Eq, Debug)]
|
||||
|
@ -106,8 +106,8 @@ impl TestApp {
|
||||
},
|
||||
)]),
|
||||
// influxdb_client: influx.map(|x| x.client),
|
||||
private_rpcs: None,
|
||||
bundler_4337_rpcs: None,
|
||||
private_rpcs: Default::default(),
|
||||
bundler_4337_rpcs: Default::default(),
|
||||
extra: Default::default(),
|
||||
};
|
||||
|
||||
@ -126,6 +126,7 @@ impl TestApp {
|
||||
let flush_stat_buffer_sender = flush_stat_buffer_sender.clone();
|
||||
let shutdown_sender = shutdown_sender.clone();
|
||||
|
||||
// TODO: thread isn't enough! this needs its own process for the globals to be isolated!
|
||||
thread::spawn(move || {
|
||||
let runtime = Builder::new_multi_thread()
|
||||
.enable_all()
|
||||
@ -180,6 +181,32 @@ impl TestApp {
|
||||
Ok(x)
|
||||
}
|
||||
|
||||
pub async fn flush_stats_and_wait(&self) -> anyhow::Result<FlushedStats> {
|
||||
let mut x = FlushedStats::default();
|
||||
|
||||
loop {
|
||||
// give stats time to get into the channel
|
||||
// TODO: do this better
|
||||
sleep(Duration::from_secs(5)).await;
|
||||
|
||||
// Flush all stats here
|
||||
// TODO: the test should maybe pause time so that stats definitely flush from our queries.
|
||||
let flush_count = self.flush_stats().await?;
|
||||
|
||||
x += flush_count;
|
||||
|
||||
if flush_count.relational_frontend_requests + flush_count.timeseries_frontend_requests
|
||||
== 0
|
||||
{
|
||||
break;
|
||||
}
|
||||
|
||||
info!(?flush_count);
|
||||
}
|
||||
|
||||
Ok(x)
|
||||
}
|
||||
|
||||
pub fn stop(&self) -> Result<usize, SendError<()>> {
|
||||
self.shutdown_sender.send(())
|
||||
}
|
||||
|
@ -7,6 +7,7 @@ use web3_proxy::prelude::reqwest;
|
||||
use web3_proxy::prelude::rust_decimal::{Decimal, RoundingStrategy};
|
||||
use web3_proxy::prelude::tokio::time::sleep;
|
||||
use web3_proxy::rpcs::blockchain::ArcBlock;
|
||||
use web3_proxy::stats::FlushedStats;
|
||||
use web3_proxy::test_utils::TestInflux;
|
||||
use web3_proxy::test_utils::{TestAnvil, TestMysql};
|
||||
use web3_proxy_cli::test_utils::create_provider_with_rpc_key::create_provider_for_user;
|
||||
@ -124,30 +125,12 @@ async fn test_multiple_proxies_stats_add_up() {
|
||||
|
||||
// Flush all stats here
|
||||
// TODO: the test should maybe pause time so that stats definitely flush from our queries.
|
||||
let _flush_0_count_0 = x_0.flush_stats().await.unwrap();
|
||||
let _flush_1_count_0 = x_1.flush_stats().await.unwrap();
|
||||
let mut flushed = FlushedStats::default();
|
||||
|
||||
// // the counts might actually be zero because we flushed from timers
|
||||
// // TODO: tests should probably have the option to set flush interval to infinity for more control.
|
||||
// info!(?flush_0_count_0);
|
||||
// assert_eq!(flush_0_count_0.relational, 1);
|
||||
// assert_eq!(flush_0_count_0.timeseries, 2);
|
||||
// info!(?flush_1_count_0);
|
||||
// assert_eq!(flush_1_count_0.relational, 1);
|
||||
// assert_eq!(flush_1_count_0.timeseries, 2);
|
||||
flushed += x_0.flush_stats_and_wait().await.unwrap();
|
||||
flushed += x_1.flush_stats_and_wait().await.unwrap();
|
||||
|
||||
// give time for more stats to arrive
|
||||
sleep(Duration::from_secs(5)).await;
|
||||
|
||||
// no more stats should arrive
|
||||
let flush_0_count_1 = x_0.flush_stats().await.unwrap();
|
||||
let flush_1_count_1 = x_1.flush_stats().await.unwrap();
|
||||
info!(?flush_0_count_1);
|
||||
assert_eq!(flush_0_count_1.relational, 0);
|
||||
assert_eq!(flush_0_count_1.timeseries, 0);
|
||||
info!(?flush_1_count_1);
|
||||
assert_eq!(flush_1_count_1.relational, 0);
|
||||
assert_eq!(flush_1_count_1.timeseries, 0);
|
||||
info!(?flushed);
|
||||
|
||||
// get stats now
|
||||
// todo!("Need to validate all the stat accounting now");
|
||||
|
@ -6,8 +6,9 @@ use tokio::{
|
||||
};
|
||||
use tracing::info;
|
||||
use web3_proxy::prelude::ethers::{
|
||||
prelude::{Block, Transaction, TxHash, U256, U64},
|
||||
prelude::{Block, Transaction, TxHash, H256, U256, U64},
|
||||
providers::{Http, JsonRpcClient, Quorum, QuorumProvider, WeightedProvider},
|
||||
types::{transaction::eip2718::TypedTransaction, Address, Bytes, Eip1559TransactionRequest},
|
||||
};
|
||||
use web3_proxy::prelude::http::StatusCode;
|
||||
use web3_proxy::prelude::reqwest;
|
||||
@ -23,7 +24,7 @@ async fn it_migrates_the_db() {
|
||||
let x = TestApp::spawn(&a, Some(&db), None, None).await;
|
||||
|
||||
// we call flush stats more to be sure it works than because we expect it to save any stats
|
||||
x.flush_stats().await.unwrap();
|
||||
x.flush_stats_and_wait().await.unwrap();
|
||||
|
||||
// drop x first to avoid spurious warnings about anvil/influx/mysql shutting down before the app
|
||||
drop(x);
|
||||
@ -104,7 +105,7 @@ async fn it_starts_and_stops() {
|
||||
assert_eq!(anvil_result, proxy_result.unwrap());
|
||||
|
||||
// this won't do anything since stats aren't tracked when there isn't a db
|
||||
let flushed = x.flush_stats().await.unwrap();
|
||||
let flushed = x.flush_stats_and_wait().await.unwrap();
|
||||
assert_eq!(flushed.relational, 0);
|
||||
assert_eq!(flushed.timeseries, 0);
|
||||
|
||||
@ -116,9 +117,9 @@ async fn it_starts_and_stops() {
|
||||
/// TODO: have another test that makes sure error codes match
|
||||
#[test_log::test(tokio::test)]
|
||||
async fn it_matches_anvil() {
|
||||
let a = TestAnvil::spawn(31337).await;
|
||||
let chain_id = 31337;
|
||||
|
||||
// TODO: send some test transactions
|
||||
let a = TestAnvil::spawn(chain_id).await;
|
||||
|
||||
a.provider.request::<_, U64>("evm_mine", ()).await.unwrap();
|
||||
|
||||
@ -167,12 +168,91 @@ async fn it_matches_anvil() {
|
||||
let balance: U256 = quorum_provider
|
||||
.request(
|
||||
"eth_getBalance",
|
||||
(block_with_tx.unwrap().author.unwrap(), "latest"),
|
||||
(block_with_tx.as_ref().unwrap().author.unwrap(), "latest"),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
info!(%balance);
|
||||
|
||||
let singleton_deploy_from: Address = "0xBb6e024b9cFFACB947A71991E386681B1Cd1477D"
|
||||
.parse()
|
||||
.unwrap();
|
||||
|
||||
let wallet = a.wallet(0);
|
||||
|
||||
let x = quorum_provider
|
||||
.request::<_, Option<Transaction>>(
|
||||
"eth_getTransactionByHash",
|
||||
["0x803351deb6d745e91545a6a3e1c0ea3e9a6a02a1a4193b70edfcd2f40f71a01c"],
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(x.is_none());
|
||||
|
||||
let gas_price: U256 = quorum_provider.request("eth_gasPrice", ()).await.unwrap();
|
||||
|
||||
let tx = TypedTransaction::Eip1559(Eip1559TransactionRequest {
|
||||
chain_id: Some(chain_id),
|
||||
to: Some(singleton_deploy_from.into()),
|
||||
gas: Some(21000.into()),
|
||||
value: Some("24700000000000000".parse().unwrap()),
|
||||
max_fee_per_gas: Some(gas_price * U256::from(2)),
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
let sig = wallet.sign_transaction_sync(&tx).unwrap();
|
||||
|
||||
let raw_tx = tx.rlp_signed(&sig);
|
||||
|
||||
// fund singleton deployer
|
||||
// TODO: send through the quorum provider. it should detect that its already confirmed
|
||||
let fund_tx_hash: H256 = a
|
||||
.provider
|
||||
.request("eth_sendRawTransaction", [raw_tx])
|
||||
.await
|
||||
.unwrap();
|
||||
info!(%fund_tx_hash);
|
||||
|
||||
// deploy singleton deployer
|
||||
// TODO: send through the quorum provider. it should detect that its already confirmed
|
||||
let deploy_tx: H256 = a.provider.request("eth_sendRawTransaction", ["0xf9016c8085174876e8008303c4d88080b90154608060405234801561001057600080fd5b50610134806100206000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c80634af63f0214602d575b600080fd5b60cf60048036036040811015604157600080fd5b810190602081018135640100000000811115605b57600080fd5b820183602082011115606c57600080fd5b80359060200191846001830284011164010000000083111715608d57600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250929550509135925060eb915050565b604080516001600160a01b039092168252519081900360200190f35b6000818351602085016000f5939250505056fea26469706673582212206b44f8a82cb6b156bfcc3dc6aadd6df4eefd204bc928a4397fd15dacf6d5320564736f6c634300060200331b83247000822470"]).await.unwrap();
|
||||
assert_eq!(
|
||||
deploy_tx,
|
||||
"0x803351deb6d745e91545a6a3e1c0ea3e9a6a02a1a4193b70edfcd2f40f71a01c"
|
||||
.parse()
|
||||
.unwrap()
|
||||
);
|
||||
|
||||
let code: Bytes = quorum_provider
|
||||
.request(
|
||||
"eth_getCode",
|
||||
("0xce0042B868300000d44A59004Da54A005ffdcf9f", "latest"),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
info!(%code);
|
||||
|
||||
let deploy_tx = quorum_provider
|
||||
.request::<_, Option<Transaction>>(
|
||||
"eth_getTransactionByHash",
|
||||
["0x803351deb6d745e91545a6a3e1c0ea3e9a6a02a1a4193b70edfcd2f40f71a01c"],
|
||||
)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
info!(?deploy_tx);
|
||||
|
||||
let head_block_num: U64 = quorum_provider
|
||||
.request("eth_blockNumber", ())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let future_block: Option<ArcBlock> = quorum_provider
|
||||
.request("eth_getBlockByNumber", (head_block_num + U64::one(), false))
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(future_block.is_none());
|
||||
|
||||
// todo!("lots more requests");
|
||||
|
||||
// todo!("compare batch requests");
|
||||
|
@ -5,7 +5,6 @@ use tracing::{info, warn};
|
||||
use web3_proxy::prelude::futures::future::try_join_all;
|
||||
use web3_proxy::prelude::reqwest;
|
||||
use web3_proxy::prelude::rust_decimal::{Decimal, RoundingStrategy};
|
||||
use web3_proxy::prelude::tokio::time::sleep;
|
||||
use web3_proxy::rpcs::blockchain::ArcBlock;
|
||||
use web3_proxy_cli::test_utils::create_provider_with_rpc_key::create_provider_for_user;
|
||||
use web3_proxy_cli::test_utils::rpc_key::user_get_first_rpc_key;
|
||||
@ -95,25 +94,8 @@ async fn test_single_proxy_stats_add_up() {
|
||||
|
||||
try_join_all(handles).await.unwrap();
|
||||
|
||||
// give stats time to get into the channel
|
||||
// TODO: do this better
|
||||
sleep(Duration::from_secs(5)).await;
|
||||
|
||||
// Flush all stats here
|
||||
// TODO: the test should maybe pause time so that stats definitely flush from our queries.
|
||||
let flush_0_count_0 = x.flush_stats().await.unwrap();
|
||||
|
||||
warn!("Counts 0 are: {:?}", flush_0_count_0);
|
||||
// we don't actually assert on these because its likely the intervals flushed most of the stats
|
||||
// assert_eq!(flush_0_count_0.relational, 1);
|
||||
// assert_eq!(flush_0_count_0.timeseries, 2);
|
||||
|
||||
// Wait a bit. TODO: instead of waiting. make flush stats more robust
|
||||
sleep(Duration::from_secs(5)).await;
|
||||
let flush_0_count_1 = x.flush_stats().await.unwrap();
|
||||
warn!("Counts 0 are: {:?}", flush_0_count_1);
|
||||
assert_eq!(flush_0_count_1.relational, 0);
|
||||
assert_eq!(flush_0_count_1.timeseries, 0);
|
||||
let flushed = x.flush_stats_and_wait().await.unwrap();
|
||||
info!(?flushed);
|
||||
|
||||
// get stats now
|
||||
// todo!("Need to validate all the stat accounting now");
|
||||
|
@ -4,7 +4,6 @@ use web3_proxy::balance::Balance;
|
||||
use web3_proxy::prelude::ethers::prelude::U64;
|
||||
use web3_proxy::prelude::migration::sea_orm::prelude::Decimal;
|
||||
use web3_proxy::prelude::reqwest;
|
||||
use web3_proxy::prelude::tokio::time::sleep;
|
||||
use web3_proxy_cli::test_utils::{
|
||||
admin_increases_balance::admin_increase_balance,
|
||||
create_admin::create_user_as_admin,
|
||||
@ -91,16 +90,8 @@ async fn test_sum_credits_used() {
|
||||
let cached_query_cost: Decimal = query_cost * cache_multipler;
|
||||
|
||||
// flush stats
|
||||
let _ = x.flush_stats().await.unwrap();
|
||||
// due to intervals, we can't be sure this is true. it should be <=
|
||||
// assert_eq!(flushed.relational, 2, "relational");
|
||||
// assert_eq!(flushed.timeseries, 1, "timeseries");
|
||||
|
||||
sleep(Duration::from_secs(1)).await;
|
||||
|
||||
let flushed = x.flush_stats().await.unwrap();
|
||||
assert_eq!(flushed.relational, 0, "relational");
|
||||
assert_eq!(flushed.timeseries, 0, "timeseries");
|
||||
let flushed = x.flush_stats_and_wait().await.unwrap();
|
||||
info!(?flushed);
|
||||
|
||||
// TODO: sleep and then flush and make sure no more arrive
|
||||
|
||||
@ -131,9 +122,10 @@ async fn test_sum_credits_used() {
|
||||
.unwrap();
|
||||
|
||||
// flush stats
|
||||
let flushed = x.flush_stats().await.unwrap();
|
||||
assert_eq!(flushed.relational, 1);
|
||||
assert_eq!(flushed.timeseries, 2);
|
||||
let flushed = x.flush_stats_and_wait().await.unwrap();
|
||||
info!(?flushed);
|
||||
// assert_eq!(flushed.relational, 1);
|
||||
// assert_eq!(flushed.timeseries, 2);
|
||||
|
||||
// check balance
|
||||
let balance: Balance = user_get_balance(&x, &r, &user_login_response).await;
|
||||
@ -168,9 +160,10 @@ async fn test_sum_credits_used() {
|
||||
}
|
||||
|
||||
// flush stats
|
||||
let flushed = x.flush_stats().await.unwrap();
|
||||
assert_eq!(flushed.relational, 1);
|
||||
assert_eq!(flushed.timeseries, 2);
|
||||
let flushed = x.flush_stats_and_wait().await.unwrap();
|
||||
info!(?flushed);
|
||||
// assert_eq!(flushed.relational, 1);
|
||||
// assert_eq!(flushed.timeseries, 2);
|
||||
|
||||
// check balance
|
||||
info!("checking the final balance");
|
||||
|
@ -184,7 +184,8 @@ async fn test_user_balance_decreases() {
|
||||
}
|
||||
|
||||
// Flush all stats here
|
||||
let _ = x.flush_stats().await.unwrap();
|
||||
let flushed = x.flush_stats_and_wait().await.unwrap();
|
||||
info!(?flushed);
|
||||
// assert_eq!(flush_count.timeseries, 0);
|
||||
// assert!(flush_count.relational > 0);
|
||||
|
||||
@ -222,7 +223,8 @@ async fn test_user_balance_decreases() {
|
||||
}
|
||||
|
||||
// Flush all stats here
|
||||
let _ = x.flush_stats().await.unwrap();
|
||||
let flushed = x.flush_stats_and_wait().await.unwrap();
|
||||
info!(?flushed);
|
||||
// assert_eq!(flush_count.timeseries, 0);
|
||||
// assert!(flush_count.relational == 1);
|
||||
|
||||
@ -357,7 +359,8 @@ async fn test_referral_bonus_non_concurrent() {
|
||||
}
|
||||
|
||||
// Flush all stats here
|
||||
let _ = x.flush_stats().await.unwrap();
|
||||
let flushed = x.flush_stats_and_wait().await.unwrap();
|
||||
info!(?flushed);
|
||||
// we can't assert because the intervals might flush for us
|
||||
// assert_eq!(flush_count.timeseries, 0);
|
||||
// assert!(flush_count.relational > 0);
|
||||
@ -521,7 +524,8 @@ async fn test_referral_bonus_concurrent_referrer_only() {
|
||||
}
|
||||
|
||||
// Flush all stats here
|
||||
let _ = x.flush_stats().await.unwrap();
|
||||
let flushed = x.flush_stats_and_wait().await.unwrap();
|
||||
info!(?flushed);
|
||||
// assert_eq!(flush_count.timeseries, 0);
|
||||
// assert!(flush_count.relational > 0);
|
||||
|
||||
@ -705,7 +709,8 @@ async fn test_referral_bonus_concurrent_referrer_and_user() {
|
||||
}
|
||||
|
||||
// Flush all stats here
|
||||
let _ = x.flush_stats().await.unwrap();
|
||||
let flushed = x.flush_stats_and_wait().await.unwrap();
|
||||
info!(?flushed);
|
||||
// assert_eq!(flush_count.timeseries, 0);
|
||||
// assert!(flush_count.relational > 0);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user