Suprisingly large refactor to get ids everywhere (#222)

* cargo upgrade --incompatible and update

* first draft at suprisingly_large_refactor_to_get_ids_everywhere

* put app in a task_local

* ref cleanup

* use a OnceLock instead of a tokio local

* test more methods

* APP isn't set in all tests

* it compiles. tests fail. todos still open

* use the app only when necessary

* more tests. less panic

* less verbose debug impl

* short enum names

* move kafka and secrets to their own files

* main tests pass

* add debug chain block time

* helper for stats that ignores internal stats

* Update Jenkinsfile (#223)

* more tests

---------

Co-authored-by: Pewxz <124064710+pewxz@users.noreply.github.com>
This commit is contained in:
Bryan Stitt 2023-10-03 13:46:27 -07:00 committed by GitHub
parent abe516c21e
commit e917a11d6c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
54 changed files with 2115 additions and 1867 deletions

230
Cargo.lock generated
View File

@ -99,9 +99,9 @@ dependencies = [
[[package]] [[package]]
name = "anstream" name = "anstream"
version = "0.5.0" version = "0.6.4"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b1f58811cfac344940f1a400b6e6231ce35171f614f26439e80f8c1465c5cc0c" checksum = "2ab91ebe16eb252986481c5b62f6098f3b698a45e34b5b98200cf20dd2484a44"
dependencies = [ dependencies = [
"anstyle", "anstyle",
"anstyle-parse", "anstyle-parse",
@ -113,15 +113,15 @@ dependencies = [
[[package]] [[package]]
name = "anstyle" name = "anstyle"
version = "1.0.3" version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b84bf0a05bbb2a83e5eb6fa36bb6e87baa08193c35ff52bbf6b38d8af2890e46" checksum = "7079075b41f533b8c61d2a4d073c4676e1f8b249ff94a393b0595db304e0dd87"
[[package]] [[package]]
name = "anstyle-parse" name = "anstyle-parse"
version = "0.2.1" version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "938874ff5980b03a87c5524b3ae5b59cf99b1d6bc836848df7bc5ada9643c333" checksum = "317b9a89c1868f5ea6ff1d9539a69f45dffc21ce321ac1fd1160dfa48c8e2140"
dependencies = [ dependencies = [
"utf8parse", "utf8parse",
] ]
@ -137,9 +137,9 @@ dependencies = [
[[package]] [[package]]
name = "anstyle-wincon" name = "anstyle-wincon"
version = "2.1.0" version = "3.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "58f54d10c6dfa51283a066ceab3ec1ab78d13fae00aa49243a45e4571fb79dfd" checksum = "f0699d10d2f4d628a98ee7b57b289abbc98ff3bad977cb3152709d4bf2330628"
dependencies = [ dependencies = [
"anstyle", "anstyle",
"windows-sys", "windows-sys",
@ -235,6 +235,17 @@ dependencies = [
"event-listener", "event-listener",
] ]
[[package]]
name = "async-recursion"
version = "1.0.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5fd55a5ba1179988837d24ab4c7cc8ed6efdeff578ede0416b4225a5fca35bd0"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.37",
]
[[package]] [[package]]
name = "async-stream" name = "async-stream"
version = "0.3.5" version = "0.3.5"
@ -259,9 +270,9 @@ dependencies = [
[[package]] [[package]]
name = "async-stripe" name = "async-stripe"
version = "0.23.0" version = "0.25.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b257177a9dd10350033af6d2602fb5164b4c7168c7b11f4ae8d287178df38996" checksum = "87dd8d77f5bfefa28601194c7233e7c3dc6a9833dae6c990804a2d90a95d6354"
dependencies = [ dependencies = [
"chrono", "chrono",
"futures-util", "futures-util",
@ -626,9 +637,9 @@ dependencies = [
[[package]] [[package]]
name = "bytecount" name = "bytecount"
version = "0.6.3" version = "0.6.4"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2c676a478f63e9fa2dd5368a42f28bba0d6c560b775f38583c8bbaa7fcd67c9c" checksum = "ad152d03a2c813c80bb94fedbf3a3f02b28f793e39e7c214c8a0bcc196343de7"
[[package]] [[package]]
name = "byteorder" name = "byteorder"
@ -777,9 +788,9 @@ dependencies = [
[[package]] [[package]]
name = "clap" name = "clap"
version = "4.4.5" version = "4.4.6"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "824956d0dca8334758a5b7f7e50518d66ea319330cbceedcf76905c2f6ab30e3" checksum = "d04704f56c2cde07f43e8e2c154b43f216dc5c92fc98ada720177362f953b956"
dependencies = [ dependencies = [
"clap_builder", "clap_builder",
"clap_derive 4.4.2", "clap_derive 4.4.2",
@ -787,9 +798,9 @@ dependencies = [
[[package]] [[package]]
name = "clap_builder" name = "clap_builder"
version = "4.4.5" version = "4.4.6"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "122ec64120a49b4563ccaedcbea7818d069ed8e9aa6d829b82d8a4128936b2ab" checksum = "0e231faeaca65ebd1ea3c737966bf858971cd38c3849107aa3ea7de90a804e45"
dependencies = [ dependencies = [
"anstream", "anstream",
"anstyle", "anstyle",
@ -966,10 +977,11 @@ dependencies = [
[[package]] [[package]]
name = "console-api" name = "console-api"
version = "0.5.0" version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c2895653b4d9f1538a83970077cb01dfc77a4810524e51a110944688e916b18e" checksum = "fd326812b3fd01da5bb1af7d340d0d555fd3d4b641e7f1dfcf5962a902952787"
dependencies = [ dependencies = [
"futures-core",
"prost", "prost",
"prost-types", "prost-types",
"tonic", "tonic",
@ -978,14 +990,14 @@ dependencies = [
[[package]] [[package]]
name = "console-subscriber" name = "console-subscriber"
version = "0.1.10" version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d4cf42660ac07fcebed809cfe561dd8730bcd35b075215e6479c516bcd0d11cb" checksum = "7481d4c57092cd1c19dd541b92bdce883de840df30aa5d03fd48a3935c01842e"
dependencies = [ dependencies = [
"console-api", "console-api",
"crossbeam-channel", "crossbeam-channel",
"crossbeam-utils", "crossbeam-utils",
"futures", "futures-task",
"hdrhistogram", "hdrhistogram",
"humantime", "humantime",
"parking_lot", "parking_lot",
@ -1296,7 +1308,7 @@ name = "deferred-rate-limiter"
version = "0.2.0" version = "0.2.0"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"hashbrown 0.14.0", "hashbrown 0.14.1",
"log", "log",
"moka", "moka",
"redis-rate-limiter", "redis-rate-limiter",
@ -1561,9 +1573,9 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5"
[[package]] [[package]]
name = "errno" name = "errno"
version = "0.3.3" version = "0.3.4"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "136526188508e25c6fef639d7927dfb3e0e3084488bf202267829cf7fc23dbdd" checksum = "add4f07d43996f76ef320709726a556a9d4f965d9410d8d0271132d2f8293480"
dependencies = [ dependencies = [
"errno-dragonfly", "errno-dragonfly",
"libc", "libc",
@ -2010,13 +2022,12 @@ dependencies = [
[[package]] [[package]]
name = "flume" name = "flume"
version = "0.10.14" version = "0.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1657b4441c3403d9f7b3409e47575237dac27b1b5726df654a6ecbf92f0f7577" checksum = "55ac459de2512911e4b674ce33cf20befaba382d05b62b008afc1c8b57cbf181"
dependencies = [ dependencies = [
"futures-core", "futures-core",
"futures-sink", "futures-sink",
"pin-project",
"spin 0.9.8", "spin 0.9.8",
] ]
@ -2363,9 +2374,9 @@ dependencies = [
[[package]] [[package]]
name = "hashbrown" name = "hashbrown"
version = "0.14.0" version = "0.14.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a" checksum = "7dfda62a12f55daeae5015f81b0baea145391cb4520f86c248fc615d72640d12"
dependencies = [ dependencies = [
"ahash 0.8.3", "ahash 0.8.3",
"allocator-api2", "allocator-api2",
@ -2387,7 +2398,7 @@ version = "0.8.4"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7" checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7"
dependencies = [ dependencies = [
"hashbrown 0.14.0", "hashbrown 0.14.1",
] ]
[[package]] [[package]]
@ -2717,12 +2728,12 @@ dependencies = [
[[package]] [[package]]
name = "indexmap" name = "indexmap"
version = "2.0.0" version = "2.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d5477fe2230a79769d8dc68e0eabf5437907c0457a5614a9e8dddb67f65eb65d" checksum = "8adf3ddd720272c6ea8bf59463c04e0f93d0bbf7c5439b691bca2987e0270897"
dependencies = [ dependencies = [
"equivalent", "equivalent",
"hashbrown 0.14.0", "hashbrown 0.14.1",
] ]
[[package]] [[package]]
@ -3010,9 +3021,9 @@ dependencies = [
[[package]] [[package]]
name = "linux-raw-sys" name = "linux-raw-sys"
version = "0.4.7" version = "0.4.8"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1a9bad9f94746442c783ca431b22403b519cd7fbeed0533fdd6328b2f2212128" checksum = "3852614a3bd9ca9804678ba6be5e3b8ce76dfc902cae004e3e0c44051b6e88db"
[[package]] [[package]]
name = "listenfd" name = "listenfd"
@ -3047,7 +3058,7 @@ version = "0.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a4a83fb7698b3643a0e34f9ae6f2e8f0178c0fd42f8b59d493aa271ff3a5bf21" checksum = "a4a83fb7698b3643a0e34f9ae6f2e8f0178c0fd42f8b59d493aa271ff3a5bf21"
dependencies = [ dependencies = [
"hashbrown 0.14.0", "hashbrown 0.14.1",
] ]
[[package]] [[package]]
@ -3092,9 +3103,9 @@ dependencies = [
[[package]] [[package]]
name = "memchr" name = "memchr"
version = "2.6.3" version = "2.6.4"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8f232d6ef707e1956a43342693d2a31e72989554d58299d7a88738cc95b0d35c" checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167"
[[package]] [[package]]
name = "memoffset" name = "memoffset"
@ -3711,7 +3722,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e1d3afd2628e69da2be385eb6f2fd57c8ac7977ceeff6dc166ff1657b0e386a9" checksum = "e1d3afd2628e69da2be385eb6f2fd57c8ac7977ceeff6dc166ff1657b0e386a9"
dependencies = [ dependencies = [
"fixedbitset", "fixedbitset",
"indexmap 2.0.0", "indexmap 2.0.2",
] ]
[[package]] [[package]]
@ -3950,9 +3961,9 @@ dependencies = [
[[package]] [[package]]
name = "prost" name = "prost"
version = "0.11.9" version = "0.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0b82eaa1d779e9a4bc1c3217db8ffbeabaae1dca241bf70183242128d48681cd" checksum = "f4fdd22f3b9c31b53c060df4a0613a1c7f062d4115a2b984dd15b1858f7e340d"
dependencies = [ dependencies = [
"bytes", "bytes",
"prost-derive", "prost-derive",
@ -3960,22 +3971,22 @@ dependencies = [
[[package]] [[package]]
name = "prost-derive" name = "prost-derive"
version = "0.11.9" version = "0.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" checksum = "265baba7fabd416cf5078179f7d2cbeca4ce7a9041111900675ea7c4cb8a4c32"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"itertools 0.10.5", "itertools 0.11.0",
"proc-macro2", "proc-macro2",
"quote", "quote",
"syn 1.0.109", "syn 2.0.37",
] ]
[[package]] [[package]]
name = "prost-types" name = "prost-types"
version = "0.11.9" version = "0.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "213622a1460818959ac1181aaeb2dc9c7f63df720db7d788b3e24eacd1983e13" checksum = "e081b29f63d83a4bc75cfc9f3fe424f9156cf92d8a4f0c9407cce9a1b67327cf"
dependencies = [ dependencies = [
"prost", "prost",
] ]
@ -4258,13 +4269,13 @@ dependencies = [
[[package]] [[package]]
name = "regex" name = "regex"
version = "1.9.5" version = "1.9.6"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "697061221ea1b4a94a624f67d0ae2bfe4e22b8a17b6a192afb11046542cc8c47" checksum = "ebee201405406dbf528b8b672104ae6d6d63e6d118cb10e4d51abbc7b58044ff"
dependencies = [ dependencies = [
"aho-corasick", "aho-corasick",
"memchr", "memchr",
"regex-automata 0.3.8", "regex-automata 0.3.9",
"regex-syntax 0.7.5", "regex-syntax 0.7.5",
] ]
@ -4279,9 +4290,9 @@ dependencies = [
[[package]] [[package]]
name = "regex-automata" name = "regex-automata"
version = "0.3.8" version = "0.3.9"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c2f401f4955220693b56f8ec66ee9c78abffd8d1c4f23dc41a23839eb88f0795" checksum = "59b23e92ee4318893fa3fe3e6fb365258efbfe6ac6ab30f090cdcbb7aa37efa9"
dependencies = [ dependencies = [
"aho-corasick", "aho-corasick",
"memchr", "memchr",
@ -4526,9 +4537,9 @@ dependencies = [
[[package]] [[package]]
name = "rustix" name = "rustix"
version = "0.38.14" version = "0.38.15"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "747c788e9ce8e92b12cd485c49ddf90723550b654b32508f979b71a7b1ecda4f" checksum = "d2f9da0cbd88f9f09e7814e388301c8414c51c62aa6ce1e4b5c551d49d96e531"
dependencies = [ dependencies = [
"bitflags 2.4.0", "bitflags 2.4.0",
"errno", "errno",
@ -4545,7 +4556,7 @@ checksum = "cd8d6c9f025a446bc4d18ad9632e69aec8f287aa84499ee335599fabd20c3fd8"
dependencies = [ dependencies = [
"log", "log",
"ring", "ring",
"rustls-webpki 0.101.6", "rustls-webpki",
"sct", "sct",
] ]
@ -4570,16 +4581,6 @@ dependencies = [
"base64 0.21.4", "base64 0.21.4",
] ]
[[package]]
name = "rustls-webpki"
version = "0.100.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5f6a5fc258f1c1276dfe3016516945546e2d5383911efc0fc4f1cdc5df3a4ae3"
dependencies = [
"ring",
"untrusted",
]
[[package]] [[package]]
name = "rustls-webpki" name = "rustls-webpki"
version = "0.101.6" version = "0.101.6"
@ -4729,7 +4730,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6bef60732e6016c5643350c87f43a697e8c074e41e4e2a9d961c056cb1310915" checksum = "6bef60732e6016c5643350c87f43a697e8c074e41e4e2a9d961c056cb1310915"
dependencies = [ dependencies = [
"chrono", "chrono",
"clap 4.4.5", "clap 4.4.6",
"dotenvy", "dotenvy",
"glob", "glob",
"regex", "regex",
@ -4760,7 +4761,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7e53b6ddaf6dbb84e5dfc3fb78634ed0a4d6d64e7479500ab2585db239747031" checksum = "7e53b6ddaf6dbb84e5dfc3fb78634ed0a4d6d64e7479500ab2585db239747031"
dependencies = [ dependencies = [
"async-trait", "async-trait",
"clap 4.4.5", "clap 4.4.6",
"dotenvy", "dotenvy",
"futures", "futures",
"sea-orm", "sea-orm",
@ -5186,9 +5187,9 @@ dependencies = [
[[package]] [[package]]
name = "sharded-slab" name = "sharded-slab"
version = "0.1.4" version = "0.1.6"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "900fba806f70c630b0a382d0d825e17a0f19fcd059a2ade1ff237bcddf446b31" checksum = "c1b21f559e07218024e7e9f90f96f601825397de0e25420135f7f952453fed0b"
dependencies = [ dependencies = [
"lazy_static", "lazy_static",
] ]
@ -5418,9 +5419,9 @@ dependencies = [
[[package]] [[package]]
name = "sqlx" name = "sqlx"
version = "0.7.1" version = "0.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8e58421b6bc416714d5115a2ca953718f6c621a51b68e4f4922aea5a4391a721" checksum = "0e50c216e3624ec8e7ecd14c6a6a6370aad6ee5d8cfc3ab30b5162eeeef2ed33"
dependencies = [ dependencies = [
"sqlx-core", "sqlx-core",
"sqlx-macros", "sqlx-macros",
@ -5431,9 +5432,9 @@ dependencies = [
[[package]] [[package]]
name = "sqlx-core" name = "sqlx-core"
version = "0.7.1" version = "0.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dd4cef4251aabbae751a3710927945901ee1d97ee96d757f6880ebb9a79bfd53" checksum = "8d6753e460c998bbd4cd8c6f0ed9a64346fcca0723d6e75e52fdc351c5d2169d"
dependencies = [ dependencies = [
"ahash 0.8.3", "ahash 0.8.3",
"atoi", "atoi",
@ -5453,7 +5454,7 @@ dependencies = [
"futures-util", "futures-util",
"hashlink", "hashlink",
"hex", "hex",
"indexmap 2.0.0", "indexmap 2.0.2",
"log", "log",
"memchr", "memchr",
"once_cell", "once_cell",
@ -5479,9 +5480,9 @@ dependencies = [
[[package]] [[package]]
name = "sqlx-macros" name = "sqlx-macros"
version = "0.7.1" version = "0.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "208e3165167afd7f3881b16c1ef3f2af69fa75980897aac8874a0696516d12c2" checksum = "9a793bb3ba331ec8359c1853bd39eed32cdd7baaf22c35ccf5c92a7e8d1189ec"
dependencies = [ dependencies = [
"proc-macro2", "proc-macro2",
"quote", "quote",
@ -5492,9 +5493,9 @@ dependencies = [
[[package]] [[package]]
name = "sqlx-macros-core" name = "sqlx-macros-core"
version = "0.7.1" version = "0.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8a4a8336d278c62231d87f24e8a7a74898156e34c1c18942857be2acb29c7dfc" checksum = "0a4ee1e104e00dedb6aa5ffdd1343107b0a4702e862a84320ee7cc74782d96fc"
dependencies = [ dependencies = [
"dotenvy", "dotenvy",
"either", "either",
@ -5518,9 +5519,9 @@ dependencies = [
[[package]] [[package]]
name = "sqlx-mysql" name = "sqlx-mysql"
version = "0.7.1" version = "0.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8ca69bf415b93b60b80dc8fda3cb4ef52b2336614d8da2de5456cc942a110482" checksum = "864b869fdf56263f4c95c45483191ea0af340f9f3e3e7b4d57a61c7c87a970db"
dependencies = [ dependencies = [
"atoi", "atoi",
"base64 0.21.4", "base64 0.21.4",
@ -5565,9 +5566,9 @@ dependencies = [
[[package]] [[package]]
name = "sqlx-postgres" name = "sqlx-postgres"
version = "0.7.1" version = "0.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a0db2df1b8731c3651e204629dd55e52adbae0462fa1bdcbed56a2302c18181e" checksum = "eb7ae0e6a97fb3ba33b23ac2671a5ce6e3cabe003f451abd5a56e7951d975624"
dependencies = [ dependencies = [
"atoi", "atoi",
"base64 0.21.4", "base64 0.21.4",
@ -5610,9 +5611,9 @@ dependencies = [
[[package]] [[package]]
name = "sqlx-sqlite" name = "sqlx-sqlite"
version = "0.7.1" version = "0.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "be4c21bf34c7cae5b283efb3ac1bcc7670df7561124dc2f8bdc0b59be40f79a2" checksum = "d59dc83cf45d89c555a577694534fcd1b55c545a816c816ce51f20bbe56a4f3f"
dependencies = [ dependencies = [
"atoi", "atoi",
"chrono", "chrono",
@ -5815,18 +5816,18 @@ checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d"
[[package]] [[package]]
name = "thiserror" name = "thiserror"
version = "1.0.48" version = "1.0.49"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9d6d7a740b8a666a7e828dd00da9c0dc290dff53154ea77ac109281de90589b7" checksum = "1177e8c6d7ede7afde3585fd2513e611227efd6481bd78d2e82ba1ce16557ed4"
dependencies = [ dependencies = [
"thiserror-impl", "thiserror-impl",
] ]
[[package]] [[package]]
name = "thiserror-impl" name = "thiserror-impl"
version = "1.0.48" version = "1.0.49"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "49922ecae66cc8a249b77e68d1d0623c1b2c514f0060c27cdc68bd62a1219d35" checksum = "10712f02019e9288794769fba95cd6847df9874d49d871d062172f9dd41bc4cc"
dependencies = [ dependencies = [
"proc-macro2", "proc-macro2",
"quote", "quote",
@ -5928,9 +5929,9 @@ dependencies = [
[[package]] [[package]]
name = "tokio-console" name = "tokio-console"
version = "0.1.9" version = "0.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4d8d44c50f1b17838c6044119701900e4242dbc0e8a3792f6fbf512b489b3dbf" checksum = "d5ff40e8df801b383b8666967ec4aee8dc516f376d06d0e5a9f93f310763e6d2"
dependencies = [ dependencies = [
"atty", "atty",
"clap 3.2.25", "clap 3.2.25",
@ -6051,14 +6052,14 @@ dependencies = [
[[package]] [[package]]
name = "toml" name = "toml"
version = "0.8.0" version = "0.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c226a7bba6d859b63c92c4b4fe69c5b6b72d0cb897dbc8e6012298e6154cb56e" checksum = "1bc1433177506450fe920e46a4f9812d0c211f5dd556da10e731a0a3dfa151f0"
dependencies = [ dependencies = [
"serde", "serde",
"serde_spanned", "serde_spanned",
"toml_datetime", "toml_datetime",
"toml_edit 0.20.0", "toml_edit 0.20.1",
] ]
[[package]] [[package]]
@ -6076,7 +6077,7 @@ version = "0.19.15"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421"
dependencies = [ dependencies = [
"indexmap 2.0.0", "indexmap 2.0.2",
"serde", "serde",
"serde_spanned", "serde_spanned",
"toml_datetime", "toml_datetime",
@ -6085,11 +6086,11 @@ dependencies = [
[[package]] [[package]]
name = "toml_edit" name = "toml_edit"
version = "0.20.0" version = "0.20.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8ff63e60a958cefbb518ae1fd6566af80d9d4be430a33f3723dfc47d1d411d95" checksum = "ca676d9ba1a322c1b64eb8045a5ec5c0cfb0c9d08e15e9ff622589ad5221c8fe"
dependencies = [ dependencies = [
"indexmap 2.0.0", "indexmap 2.0.2",
"serde", "serde",
"serde_spanned", "serde_spanned",
"toml_datetime", "toml_datetime",
@ -6098,16 +6099,15 @@ dependencies = [
[[package]] [[package]]
name = "tonic" name = "tonic"
version = "0.9.2" version = "0.10.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3082666a3a6433f7f511c7192923fa1fe07c69332d3c6a2e6bb040b569199d5a" checksum = "d560933a0de61cf715926b9cac824d4c883c2c43142f787595e48280c40a1d0e"
dependencies = [ dependencies = [
"async-stream",
"async-trait", "async-trait",
"axum", "axum",
"base64 0.21.4", "base64 0.21.4",
"bytes", "bytes",
"futures-core",
"futures-util",
"h2", "h2",
"http", "http",
"http-body", "http-body",
@ -6397,17 +6397,17 @@ checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a"
[[package]] [[package]]
name = "ureq" name = "ureq"
version = "2.7.1" version = "2.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0b11c96ac7ee530603dcdf68ed1557050f374ce55a5a07193ebf8cbc9f8927e9" checksum = "f5ccd538d4a604753ebc2f17cd9946e89b77bf87f6a8e2309667c6f2e87855e3"
dependencies = [ dependencies = [
"base64 0.21.4", "base64 0.21.4",
"log", "log",
"once_cell", "once_cell",
"rustls", "rustls",
"rustls-webpki 0.100.3", "rustls-webpki",
"url", "url",
"webpki-roots 0.23.1", "webpki-roots 0.25.2",
] ]
[[package]] [[package]]
@ -6614,11 +6614,12 @@ dependencies = [
[[package]] [[package]]
name = "web3_proxy" name = "web3_proxy"
version = "1.43.8" version = "1.43.10"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"arc-swap", "arc-swap",
"argh", "argh",
"async-recursion",
"async-stripe", "async-stripe",
"async-trait", "async-trait",
"axum", "axum",
@ -6643,7 +6644,7 @@ dependencies = [
"futures-util", "futures-util",
"glob", "glob",
"handlebars", "handlebars",
"hashbrown 0.14.0", "hashbrown 0.14.1",
"hdrhistogram", "hdrhistogram",
"hostname", "hostname",
"http", "http",
@ -6684,7 +6685,7 @@ dependencies = [
"tokio", "tokio",
"tokio-console", "tokio-console",
"tokio-stream", "tokio-stream",
"toml 0.8.0", "toml 0.8.1",
"tower", "tower",
"tower-http", "tower-http",
"tracing", "tracing",
@ -6696,7 +6697,7 @@ dependencies = [
[[package]] [[package]]
name = "web3_proxy_cli" name = "web3_proxy_cli"
version = "1.43.8" version = "1.43.10"
dependencies = [ dependencies = [
"env_logger", "env_logger",
"parking_lot", "parking_lot",
@ -6710,22 +6711,13 @@ dependencies = [
"web3_proxy", "web3_proxy",
] ]
[[package]]
name = "webpki-roots"
version = "0.23.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b03058f88386e5ff5310d9111d53f48b17d732b401aeb83a8d5190f2ac459338"
dependencies = [
"rustls-webpki 0.100.3",
]
[[package]] [[package]]
name = "webpki-roots" name = "webpki-roots"
version = "0.24.0" version = "0.24.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b291546d5d9d1eab74f069c77749f2cb8504a12caa20f0f2de93ddbf6f411888" checksum = "b291546d5d9d1eab74f069c77749f2cb8504a12caa20f0f2de93ddbf6f411888"
dependencies = [ dependencies = [
"rustls-webpki 0.101.6", "rustls-webpki",
] ]
[[package]] [[package]]

31
Jenkinsfile vendored
View File

@ -18,22 +18,21 @@ pipeline {
LATEST_BRANCH="main" LATEST_BRANCH="main"
} }
stages { stages {
// stage('Check and Cancel Old Builds') { stage('Check and Cancel Old Builds') {
// steps { steps {
// script { script {
// def currentBuildNumber = currentBuild.number def jobName = env.JOB_NAME
def buildNumber = env.BUILD_NUMBER.toInteger()
// // Check all build from same project
// for (build in currentBuild.rawBuild.getParent().getBuilds()) { // Get all running builds of the current job
// // Check if an older build is still running and cancel it in favor of the new one def job = Jenkins.instance.getItemByFullName(jobName)
// if (build.number < currentBuildNumber && build.building) { def runningBuilds = job.builds.findAll { it.isBuilding() && it.number < buildNumber }
// echo "Cancelling build ${build.number}"
// build.doStop() // Cancel running builds
// } runningBuilds.each { it.doStop() }
// } }
// } }
// } }
// }
stage('build and push') { stage('build and push') {
parallel { parallel {
stage('Build and push arm64_graviton2 image') { stage('Build and push arm64_graviton2 image') {

View File

@ -44,13 +44,13 @@ cargo run --release -- proxyd
Quickly run tests: Quickly run tests:
``` ```
RUST_LOG=web3_proxy=trace,info cargo nextest run RUST_BACKTRACE=1 RUST_LOG=web3_proxy=trace,info cargo nextest run
``` ```
Run more tests: Run more tests:
``` ```
RUST_LOG=web3_proxy=trace,info cargo nextest run --features tests-needing-docker RUST_BACKTRACE=1 RUST_LOG=web3_proxy=trace,info cargo nextest run --features tests-needing-docker
``` ```
## Mysql ## Mysql

View File

@ -289,7 +289,7 @@ These are not yet ordered. There might be duplicates. We might not actually need
- we were caching too aggressively - we were caching too aggressively
- [x] BUG! if sending transactions gets "INTERNAL_ERROR: existing tx with same hash", create a success message - [x] BUG! if sending transactions gets "INTERNAL_ERROR: existing tx with same hash", create a success message
- we just want to be sure that the server has our tx and in this case, it does. - we just want to be sure that the server has our tx and in this case, it does.
- ERROR http_request:request:try_send_all_upstream_servers: web3_proxy::rpcs::request: bad response! err=JsonRpcClientError(JsonRpcError(JsonRpcError { code: -32000, message: "INTERNAL_ERROR: existing tx with same hash", data: None })) method=eth_sendRawTransaction rpc=local_erigon_alpha_archive id=01GF4HV03Y4ZNKQV8DW5NDQ5CG method=POST authorized_request=User(Some(SqlxMySqlPoolConnection), AuthorizedKey { ip: 10.11.12.15, origin: None, user_key_id: 4, log_revert_chance: 0.0000 }) self=Web3Rpcs { conns: {"local_erigon_alpha_archive_ws": Web3Rpc { name: "local_erigon_alpha_archive_ws", blocks: "all", .. }, "local_geth_ws": Web3Rpc { name: "local_geth_ws", blocks: 64, .. }, "local_erigon_alpha_archive": Web3Rpc { name: "local_erigon_alpha_archive", blocks: "all", .. }}, .. } authorized_request=Some(User(Some(SqlxMySqlPoolConnection), AuthorizedKey { ip: 10.11.12.15, origin: None, user_key_id: 4, log_revert_chance: 0.0000 })) request=JsonRpcRequest { id: RawValue(39), method: "eth_sendRawTransaction", .. } request_metadata=Some(RequestMetadata { datetime: 2022-10-11T22:14:57.406829095Z, period_seconds: 60, request_bytes: 633, backend_requests: 0, no_servers: 0, error_response: false, response_bytes: 0, response_millis: 0 }) block_needed=None - ERROR http_request:request:try_send_all_upstream_servers: web3_proxy::rpcs::request: bad response! err=JsonRpcClientError(JsonRpcError(JsonRpcError { code: -32000, message: "INTERNAL_ERROR: existing tx with same hash", data: None })) method=eth_sendRawTransaction rpc=local_erigon_alpha_archive id=01GF4HV03Y4ZNKQV8DW5NDQ5CG method=POST authorized_request=User(Some(SqlxMySqlPoolConnection), AuthorizedKey { ip: 10.11.12.15, origin: None, user_key_id: 4, log_revert_chance: 0.0000 }) self=Web3Rpcs { conns: {"local_erigon_alpha_archive_ws": Web3Rpc { name: "local_erigon_alpha_archive_ws", blocks: "all", .. }, "local_geth_ws": Web3Rpc { name: "local_geth_ws", blocks: 64, .. }, "local_erigon_alpha_archive": Web3Rpc { name: "local_erigon_alpha_archive", blocks: "all", .. }}, .. } authorized_request=Some(User(Some(SqlxMySqlPoolConnection), AuthorizedKey { ip: 10.11.12.15, origin: None, user_key_id: 4, log_revert_chance: 0.0000 })) request=JsonRpcRequest { id: RawValue(39), method: "eth_sendRawTransaction", .. } web3_request=Some(RequestMetadata { datetime: 2022-10-11T22:14:57.406829095Z, period_seconds: 60, request_bytes: 633, backend_requests: 0, no_servers: 0, error_response: false, response_bytes: 0, response_millis: 0 }) block_needed=None
- [x] serde collect unknown fields in config instead of crash - [x] serde collect unknown fields in config instead of crash
- [x] upgrade user tier by address - [x] upgrade user tier by address
- [x] all_backend_connections skips syncing servers - [x] all_backend_connections skips syncing servers

View File

@ -8,7 +8,7 @@ edition = "2021"
redis-rate-limiter = { path = "../redis-rate-limiter" } redis-rate-limiter = { path = "../redis-rate-limiter" }
anyhow = "1.0.75" anyhow = "1.0.75"
hashbrown = "0.14.0" hashbrown = "0.14.1"
log = "0.4.20" log = "0.4.20"
moka = { version = "0.12.0", features = ["future"] } moka = { version = "0.12.0", features = ["future"] }
tokio = "1.32.0" tokio = "1.32.0"

View File

@ -1,6 +1,6 @@
[package] [package]
name = "web3_proxy" name = "web3_proxy"
version = "1.43.8" version = "1.43.10"
edition = "2021" edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
@ -38,14 +38,14 @@ siwe = { git = "https://github.com/llamanodes/siwe-rs", rev = "013be5204ff1c8577
anyhow = { version = "1.0.75", features = ["backtrace"] } anyhow = { version = "1.0.75", features = ["backtrace"] }
arc-swap = { version = "1.6.0" } arc-swap = { version = "1.6.0" }
argh = "0.1.12" argh = "0.1.12"
async-stripe = { version = "0.23.0", default-features = false, features = ["billing", "checkout", "connect", "runtime-tokio-hyper-rustls", "webhook-events"] } async-stripe = { version = "0.25.1", default-features = false, features = ["billing", "checkout", "connect", "runtime-tokio-hyper-rustls", "webhook-events"] }
async-trait = "0.1.73" async-trait = "0.1.73"
axum = { version = "0.6.20", features = ["headers", "tracing", "ws"] } axum = { version = "0.6.20", features = ["headers", "tracing", "ws"] }
axum-client-ip = "0.4.2" axum-client-ip = "0.4.2"
axum-macros = "0.3.8" axum-macros = "0.3.8"
base64 = "0.21.4" base64 = "0.21.4"
chrono = { version = "0.4.31" } chrono = { version = "0.4.31" }
console-subscriber = { version = "0.1.10", features = ["env-filter", "parking_lot"], optional = true } console-subscriber = { version = "0.2.0", features = ["env-filter", "parking_lot"], optional = true }
counter = "0.5.7" counter = "0.5.7"
derivative = "2.2.0" derivative = "2.2.0"
derive_more = { version = "0.99.17", features = ["nightly"] } derive_more = { version = "0.99.17", features = ["nightly"] }
@ -56,7 +56,7 @@ fstrings = "0.2"
futures = { version = "0.3.28" } futures = { version = "0.3.28" }
glob = "0.3.1" glob = "0.3.1"
handlebars = "4.4.0" handlebars = "4.4.0"
hashbrown = { version = "0.14.0", features = ["serde", "nightly"] } hashbrown = { version = "0.14.1", features = ["serde", "nightly"] }
hdrhistogram = "7.5.2" hdrhistogram = "7.5.2"
hostname = "0.3.1" hostname = "0.3.1"
http = "0.2.9" http = "0.2.9"
@ -74,7 +74,7 @@ ordered-float = {version = "4.1.0" }
pagerduty-rs = { version = "0.1.6", default-features = false, features = ["async", "rustls", "sync"] } pagerduty-rs = { version = "0.1.6", default-features = false, features = ["async", "rustls", "sync"] }
parking_lot = { version = "0.12.1", features = ["arc_lock", "nightly"] } parking_lot = { version = "0.12.1", features = ["arc_lock", "nightly"] }
rdkafka = { version = "0.34.0", features = ["tracing"] } rdkafka = { version = "0.34.0", features = ["tracing"] }
regex = "1.9.5" regex = "1.9.6"
reqwest = { version = "0.11.20", default-features = false, features = ["json", "tokio-rustls"] } reqwest = { version = "0.11.20", default-features = false, features = ["json", "tokio-rustls"] }
rmp-serde = "1.1.2" rmp-serde = "1.1.2"
rust_decimal = { version = "1.32.0" } rust_decimal = { version = "1.32.0" }
@ -87,9 +87,9 @@ serde_prometheus = "0.2.4"
strum = { version = "0.25.0", features = ["derive"] } strum = { version = "0.25.0", features = ["derive"] }
time = { version = "0.3" } time = { version = "0.3" }
tokio = { version = "1.32.0", features = ["full", "tracing"] } tokio = { version = "1.32.0", features = ["full", "tracing"] }
tokio-console = { version = "0.1.9", optional = true } tokio-console = { version = "0.1.10", optional = true }
tokio-stream = { version = "0.1.14", features = ["sync"] } tokio-stream = { version = "0.1.14", features = ["sync"] }
toml = "0.8.0" toml = "0.8.1"
tower = { version = "0.4.13", features = ["timeout", "tracing"] } tower = { version = "0.4.13", features = ["timeout", "tracing"] }
tower-http = { version = "0.4.4", features = ["cors", "normalize-path", "sensitive-headers", "trace"] } tower-http = { version = "0.4.4", features = ["cors", "normalize-path", "sensitive-headers", "trace"] }
tracing = "0.1" tracing = "0.1"
@ -101,6 +101,7 @@ uuid = { version = "1.4.1", default-features = false, features = ["fast-rng", "v
test-log = { version = "0.2.12", default-features = false, features = ["trace"] } test-log = { version = "0.2.12", default-features = false, features = ["trace"] }
bytes = "1.5.0" bytes = "1.5.0"
futures-util = "0.3.28" futures-util = "0.3.28"
async-recursion = "1.0.5"
# # TODO: bring this back # # TODO: bring this back
# check-if-email-exists = "0.9.0" # check-if-email-exists = "0.9.0"

View File

@ -43,8 +43,8 @@ pub async fn query_admin_modify_usertier<'a>(
let mut response_body = HashMap::new(); let mut response_body = HashMap::new();
// Establish connections // Establish connections
let db_conn = global_db_conn().await?; let db_conn = global_db_conn()?;
let db_replica = global_db_replica_conn().await?; let db_replica = global_db_replica_conn()?;
let mut redis_conn = app.redis_conn().await?; let mut redis_conn = app.redis_conn().await?;
// Will modify logic here // Will modify logic here

View File

@ -1,22 +1,17 @@
mod ws; mod ws;
use crate::block_number::CacheMode;
use crate::caches::{RegisteredUserRateLimitKey, RpcSecretKeyCache, UserBalanceCache}; use crate::caches::{RegisteredUserRateLimitKey, RpcSecretKeyCache, UserBalanceCache};
use crate::config::{AppConfig, TopConfig}; use crate::config::{AppConfig, TopConfig};
use crate::errors::{Web3ProxyError, Web3ProxyErrorContext, Web3ProxyResult}; use crate::errors::{Web3ProxyError, Web3ProxyErrorContext, Web3ProxyResult};
use crate::frontend::authorization::{ use crate::frontend::authorization::{Authorization, Web3Request};
Authorization, RequestMetadata, RequestOrMethod, ResponseOrBytes,
};
use crate::frontend::rpc_proxy_ws::ProxyMode; use crate::frontend::rpc_proxy_ws::ProxyMode;
use crate::globals::{global_db_conn, DatabaseError, DB_CONN, DB_REPLICA}; use crate::globals::{global_db_conn, DatabaseError, APP, DB_CONN, DB_REPLICA};
use crate::jsonrpc::{ use crate::jsonrpc::{
self, JsonRpcErrorData, JsonRpcId, JsonRpcParams, JsonRpcRequest, JsonRpcRequestEnum, self, JsonRpcErrorData, JsonRpcId, JsonRpcParams, JsonRpcRequest, JsonRpcRequestEnum,
JsonRpcResultData, SingleResponse, JsonRpcResultData, SingleResponse,
}; };
use crate::relational_db::{connect_db, migrate_db}; use crate::relational_db::{connect_db, migrate_db};
use crate::response_cache::{ use crate::response_cache::{JsonRpcResponseCache, JsonRpcResponseEnum, JsonRpcResponseWeigher};
JsonRpcQueryCacheKey, JsonRpcResponseCache, JsonRpcResponseEnum, JsonRpcResponseWeigher,
};
use crate::rpcs::blockchain::Web3ProxyBlock; use crate::rpcs::blockchain::Web3ProxyBlock;
use crate::rpcs::consensus::RankedRpcs; use crate::rpcs::consensus::RankedRpcs;
use crate::rpcs::many::Web3Rpcs; use crate::rpcs::many::Web3Rpcs;
@ -77,9 +72,9 @@ pub struct Web3ProxyApp {
/// Send requests to the best server available /// Send requests to the best server available
pub balanced_rpcs: Arc<Web3Rpcs>, pub balanced_rpcs: Arc<Web3Rpcs>,
/// Send 4337 Abstraction Bundler requests to one of these servers /// Send 4337 Abstraction Bundler requests to one of these servers
pub bundler_4337_rpcs: Option<Arc<Web3Rpcs>>, pub bundler_4337_rpcs: Arc<Web3Rpcs>,
/// application config /// application config
/// TODO: this will need a large refactor to handle reloads while running. maybe use a watch::Receiver? /// TODO: this will need a large refactor to handle reloads while running. maybe use a watch::Receiver and a task_local?
pub config: AppConfig, pub config: AppConfig,
pub http_client: Option<reqwest::Client>, pub http_client: Option<reqwest::Client>,
/// track JSONRPC responses /// track JSONRPC responses
@ -114,8 +109,7 @@ pub struct Web3ProxyApp {
/// we do this because each pending login is a row in the database /// we do this because each pending login is a row in the database
pub login_rate_limiter: Option<RedisRateLimiter>, pub login_rate_limiter: Option<RedisRateLimiter>,
/// Send private requests (like eth_sendRawTransaction) to all these servers /// Send private requests (like eth_sendRawTransaction) to all these servers
/// TODO: include another type so that we can use private miner relays that do not use JSONRPC requests pub protected_rpcs: Arc<Web3Rpcs>,
pub private_rpcs: Option<Arc<Web3Rpcs>>,
pub prometheus_port: Arc<AtomicU16>, pub prometheus_port: Arc<AtomicU16>,
/// cache authenticated users so that we don't have to query the database on the hot path /// cache authenticated users so that we don't have to query the database on the hot path
// TODO: should the key be our RpcSecretKey class instead of Ulid? // TODO: should the key be our RpcSecretKey class instead of Ulid?
@ -462,60 +456,42 @@ impl Web3ProxyApp {
app_handles.push(balanced_handle); app_handles.push(balanced_handle);
// prepare a Web3Rpcs to hold all our private connections // prepare a Web3Rpcs to hold all our private connections
// only some chains have this, so this is optional // only some chains have this, so this might be empty
// TODO: remove this. it should only be done by apply_top_config // TODO: set min_sum_soft_limit > 0 if any private rpcs are configured. this way we don't accidently leak to the public mempool if they are all offline
let private_rpcs = if top_config.private_rpcs.is_none() { let (private_rpcs, private_handle, _) = Web3Rpcs::spawn(
warn!("No private relays configured. Any transactions will be broadcast to the public mempool!"); chain_id,
None // private rpcs don't get subscriptions, so no need for max_head_block_lag
} else { None,
// TODO: do something with the spawn handle 0,
let (private_rpcs, private_handle, _) = Web3Rpcs::spawn( 0,
chain_id, "protected rpcs".into(),
// private rpcs don't get subscriptions, so no need for max_head_block_lag // subscribing to new heads here won't work well. if they are fast, they might be ahead of balanced_rpcs
None, // they also often have low rate limits
0, // however, they are well connected to miners/validators. so maybe using them as a safety check would be good
0, // TODO: but maybe we could include privates in the "backup" tier
"protected rpcs".into(), None,
// subscribing to new heads here won't work well. if they are fast, they might be ahead of balanced_rpcs None,
// they also often have low rate limits )
// however, they are well connected to miners/validators. so maybe using them as a safety check would be good .await
// TODO: but maybe we could include privates in the "backup" tier .web3_context("spawning private_rpcs")?;
None,
None,
)
.await
.web3_context("spawning private_rpcs")?;
app_handles.push(private_handle); app_handles.push(private_handle);
Some(private_rpcs) // prepare a Web3Rpcs to hold all our 4337 Abstraction Bundler connections (if any)
}; let (bundler_4337_rpcs, bundler_4337_rpcs_handle, _) = Web3Rpcs::spawn(
chain_id,
// bundler_4337_rpcs don't get subscriptions, so no need for max_head_block_lag
None,
0,
0,
"eip4337 rpcs".into(),
None,
None,
)
.await
.web3_context("spawning bundler_4337_rpcs")?;
// prepare a Web3Rpcs to hold all our 4337 Abstraction Bundler connections app_handles.push(bundler_4337_rpcs_handle);
// only some chains have this, so this is optional
// TODO: remove this. it should only be done by apply_top_config
let bundler_4337_rpcs = if top_config.bundler_4337_rpcs.is_none() {
warn!("No bundler_4337_rpcs configured");
None
} else {
// TODO: do something with the spawn handle
let (bundler_4337_rpcs, bundler_4337_rpcs_handle, _) = Web3Rpcs::spawn(
chain_id,
// bundler_4337_rpcs don't get subscriptions, so no need for max_head_block_lag
None,
0,
0,
"eip4337 rpcs".into(),
None,
None,
)
.await
.web3_context("spawning bundler_4337_rpcs")?;
app_handles.push(bundler_4337_rpcs_handle);
Some(bundler_4337_rpcs)
};
let hostname = hostname::get() let hostname = hostname::get()
.ok() .ok()
@ -557,7 +533,7 @@ impl Web3ProxyApp {
kafka_producer, kafka_producer,
login_rate_limiter, login_rate_limiter,
pending_txid_firehose: deduped_txid_firehose, pending_txid_firehose: deduped_txid_firehose,
private_rpcs, protected_rpcs: private_rpcs,
prometheus_port: prometheus_port.clone(), prometheus_port: prometheus_port.clone(),
rpc_secret_key_cache, rpc_secret_key_cache,
start: Instant::now(), start: Instant::now(),
@ -568,13 +544,17 @@ impl Web3ProxyApp {
watch_consensus_head_receiver, watch_consensus_head_receiver,
}; };
let app = Arc::new(app);
if let Err(app) = APP.set(app.clone()) {
error!(?app, "global APP can only be set once!");
};
// TODO: do apply_top_config once we don't duplicate the db // TODO: do apply_top_config once we don't duplicate the db
if let Err(err) = app.apply_top_config_db(&top_config).await { if let Err(err) = app.apply_top_config_db(&top_config).await {
warn!(?err, "unable to fully apply config while starting!"); warn!(?err, "unable to fully apply config while starting!");
}; };
let app = Arc::new(app);
// watch for config changes // watch for config changes
// TODO: move this to its own function/struct // TODO: move this to its own function/struct
{ {
@ -655,42 +635,25 @@ impl Web3ProxyApp {
let balanced = self let balanced = self
.balanced_rpcs .balanced_rpcs
.apply_server_configs(self, new_top_config.balanced_rpcs.clone()) .apply_server_configs(self, &new_top_config.balanced_rpcs)
.await .await
.web3_context("updating balanced rpcs"); .web3_context("updating balanced rpcs");
let private = if let Some(private_rpc_configs) = new_top_config.private_rpcs.clone() { let protected = self
if let Some(ref private_rpcs) = self.private_rpcs { .protected_rpcs
private_rpcs .apply_server_configs(self, &new_top_config.private_rpcs)
.apply_server_configs(self, private_rpc_configs) .await
.await .web3_context("updating private_rpcs");
.web3_context("updating private_rpcs")
} else {
// TODO: maybe we should have private_rpcs just be empty instead of being None
todo!("handle toggling private_rpcs")
}
} else {
Ok(())
};
let bundler_4337 = let bundler_4337 = self
if let Some(bundler_4337_rpc_configs) = new_top_config.bundler_4337_rpcs.clone() { .bundler_4337_rpcs
if let Some(ref bundler_4337_rpcs) = self.bundler_4337_rpcs { .apply_server_configs(self, &new_top_config.bundler_4337_rpcs)
bundler_4337_rpcs .await
.apply_server_configs(self, bundler_4337_rpc_configs.clone()) .web3_context("updating bundler_4337_rpcs");
.await
.web3_context("updating bundler_4337_rpcs")
} else {
// TODO: maybe we should have bundler_4337_rpcs just be empty instead of being None
todo!("handle toggling bundler_4337_rpcs")
}
} else {
Ok(())
};
// TODO: log all the errors if there are multiple // TODO: log all the errors if there are multiple
balanced?; balanced?;
private?; protected?;
bundler_4337?; bundler_4337?;
Ok(()) Ok(())
@ -714,7 +677,7 @@ impl Web3ProxyApp {
.db_max_connections .db_max_connections
.unwrap_or(db_min_connections * 2); .unwrap_or(db_min_connections * 2);
let db_conn = if let Ok(old_db_conn) = global_db_conn().await { let db_conn = if let Ok(old_db_conn) = global_db_conn() {
// TODO: compare old settings with new settings. don't always re-use! // TODO: compare old settings with new settings. don't always re-use!
Ok(old_db_conn) Ok(old_db_conn)
} else { } else {
@ -740,7 +703,7 @@ impl Web3ProxyApp {
.db_replica_max_connections .db_replica_max_connections
.unwrap_or(db_max_connections); .unwrap_or(db_max_connections);
let db_replica = if let Ok(old_db_replica) = global_db_conn().await { let db_replica = if let Ok(old_db_replica) = global_db_conn() {
// TODO: compare old settings with new settings. don't always re-use! // TODO: compare old settings with new settings. don't always re-use!
Ok(old_db_replica) Ok(old_db_replica)
} else { } else {
@ -779,8 +742,8 @@ impl Web3ProxyApp {
db_conn.clone().map(Into::into) db_conn.clone().map(Into::into)
}; };
let mut locked_conn = DB_CONN.write().await; let mut locked_conn = DB_CONN.write();
let mut locked_replica = DB_REPLICA.write().await; let mut locked_replica = DB_REPLICA.write();
*locked_conn = db_conn.clone(); *locked_conn = db_conn.clone();
*locked_replica = db_replica.clone(); *locked_replica = db_replica.clone();
@ -845,7 +808,7 @@ impl Web3ProxyApp {
#[derive(Default, Serialize)] #[derive(Default, Serialize)]
struct UserCount(i64); struct UserCount(i64);
let user_count: UserCount = if let Ok(db) = global_db_conn().await { let user_count: UserCount = if let Ok(db) = global_db_conn() {
match user::Entity::find().count(&db).await { match user::Entity::find().count(&db).await {
Ok(user_count) => UserCount(user_count as i64), Ok(user_count) => UserCount(user_count as i64),
Err(err) => { Err(err) => {
@ -1079,15 +1042,14 @@ impl Web3ProxyApp {
let head_block: Web3ProxyBlock = self let head_block: Web3ProxyBlock = self
.balanced_rpcs .balanced_rpcs
.head_block() .head_block()
.ok_or(Web3ProxyError::NoServersSynced)? .ok_or(Web3ProxyError::NoServersSynced)?;
.clone();
// TODO: use streams and buffers so we don't overwhelm our server // TODO: use streams and buffers so we don't overwhelm our server
let responses = join_all( let responses = join_all(
requests requests
.into_iter() .into_iter()
.map(|request| { .map(|request| {
self.proxy_request(request, authorization.clone(), Some(&head_block)) self.proxy_request(request, authorization.clone(), Some(head_block.clone()))
}) })
.collect::<Vec<_>>(), .collect::<Vec<_>>(),
) )
@ -1131,74 +1093,53 @@ impl Web3ProxyApp {
/// try to send transactions to the best available rpcs with protected/private mempools /// try to send transactions to the best available rpcs with protected/private mempools
/// if no protected rpcs are configured, then some public rpcs are used instead /// if no protected rpcs are configured, then some public rpcs are used instead
async fn try_send_protected<P: JsonRpcParams>( async fn try_send_protected(
self: &Arc<Self>, self: &Arc<Self>,
method: &str, web3_request: &Arc<Web3Request>,
params: &P,
request_metadata: &Arc<RequestMetadata>,
) -> Web3ProxyResult<Arc<RawValue>> { ) -> Web3ProxyResult<Arc<RawValue>> {
if let Some(protected_rpcs) = self.private_rpcs.as_ref() { if self.protected_rpcs.is_empty() {
if !protected_rpcs.is_empty() { let num_public_rpcs = match web3_request.proxy_mode() {
let protected_response = protected_rpcs // TODO: how many balanced rpcs should we send to? configurable? percentage of total?
.try_send_all_synced_connections( ProxyMode::Best | ProxyMode::Debug => Some(4),
method, ProxyMode::Fastest(0) => None,
params, // TODO: how many balanced rpcs should we send to? configurable? percentage of total?
request_metadata, // TODO: what if we do 2 per tier? we want to blast the third party rpcs
None, // TODO: maybe having the third party rpcs in their own Web3Rpcs would be good for this
None, ProxyMode::Fastest(x) => Some(x * 4),
Some(Duration::from_secs(10)), ProxyMode::Versus => None,
Some(Level::TRACE.into()), };
Some(3),
)
.await;
return protected_response; // no private rpcs to send to. send to a few public rpcs
} // try_send_all_upstream_servers puts the request id into the response. no need to do that ourselves here.
self.balanced_rpcs
.try_send_all_synced_connections(
web3_request,
Some(Duration::from_secs(10)),
Some(Level::TRACE.into()),
num_public_rpcs,
)
.await
} else {
self.protected_rpcs
.try_send_all_synced_connections(
web3_request,
Some(Duration::from_secs(10)),
Some(Level::TRACE.into()),
Some(3),
)
.await
} }
let num_public_rpcs = match request_metadata.proxy_mode() {
// TODO: how many balanced rpcs should we send to? configurable? percentage of total?
ProxyMode::Best | ProxyMode::Debug => Some(4),
ProxyMode::Fastest(0) => None,
// TODO: how many balanced rpcs should we send to? configurable? percentage of total?
// TODO: what if we do 2 per tier? we want to blast the third party rpcs
// TODO: maybe having the third party rpcs in their own Web3Rpcs would be good for this
ProxyMode::Fastest(x) => Some(x * 4),
ProxyMode::Versus => None,
};
// no private rpcs to send to. send to a few public rpcs
// try_send_all_upstream_servers puts the request id into the response. no need to do that ourselves here.
self.balanced_rpcs
.try_send_all_synced_connections(
method,
params,
request_metadata,
None,
None,
Some(Duration::from_secs(10)),
Some(Level::TRACE.into()),
num_public_rpcs,
)
.await
} }
/// proxy request with up to 3 tries. /// proxy request with up to 3 tries.
async fn proxy_request( async fn proxy_request(
self: &Arc<Self>, self: &Arc<Self>,
mut request: JsonRpcRequest, request: JsonRpcRequest,
authorization: Arc<Authorization>, authorization: Arc<Authorization>,
head_block: Option<&Web3ProxyBlock>, head_block: Option<Web3ProxyBlock>,
) -> (StatusCode, jsonrpc::SingleResponse, Vec<Arc<Web3Rpc>>) { ) -> (StatusCode, jsonrpc::SingleResponse, Vec<Arc<Web3Rpc>>) {
let request_metadata = RequestMetadata::new( let web3_request =
self, Web3Request::new_with_app(self, authorization, None, request.into(), head_block).await;
authorization,
RequestOrMethod::Request(&request),
head_block,
)
.await;
let response_id = request.id;
// TODO: trace/kafka log request.params before we send them to _proxy_request_with_caching which might modify them // TODO: trace/kafka log request.params before we send them to _proxy_request_with_caching which might modify them
@ -1213,47 +1154,33 @@ impl Web3ProxyApp {
tries += 1; tries += 1;
let (code, response) = match self let (code, response) = match self._proxy_request_with_caching(&web3_request).await {
._proxy_request_with_caching(
// TODO: avoid clone here
response_id.clone(),
&request.method,
&mut request.params,
head_block,
&request_metadata,
)
.await
{
Ok(response_data) => { Ok(response_data) => {
request_metadata web3_request.error_response.store(false, Ordering::Relaxed);
.error_response web3_request
.store(false, Ordering::Relaxed);
request_metadata
.user_error_response .user_error_response
.store(false, Ordering::Relaxed); .store(false, Ordering::Relaxed);
(StatusCode::OK, response_data) (StatusCode::OK, response_data)
} }
Err(err @ Web3ProxyError::NullJsonRpcResult) => { Err(err @ Web3ProxyError::NullJsonRpcResult) => {
request_metadata web3_request.error_response.store(false, Ordering::Relaxed);
.error_response web3_request
.store(false, Ordering::Relaxed);
request_metadata
.user_error_response .user_error_response
.store(false, Ordering::Relaxed); .store(false, Ordering::Relaxed);
err.as_json_response_parts(response_id) err.as_json_response_parts(web3_request.id())
} }
Err(Web3ProxyError::JsonRpcResponse(response_data)) => { Err(Web3ProxyError::JsonRpcResponse(response_data)) => {
request_metadata web3_request.error_response.store(false, Ordering::Relaxed);
.error_response web3_request
.store(false, Ordering::Relaxed);
request_metadata
.user_error_response .user_error_response
.store(response_data.is_error(), Ordering::Relaxed); .store(response_data.is_error(), Ordering::Relaxed);
let response = let response = jsonrpc::ParsedResponse::from_response_data(
jsonrpc::ParsedResponse::from_response_data(response_data, response_id); response_data,
web3_request.id(),
);
(StatusCode::OK, response.into()) (StatusCode::OK, response.into())
} }
Err(err) => { Err(err) => {
@ -1264,24 +1191,21 @@ impl Web3ProxyApp {
// max tries exceeded. return the error // max tries exceeded. return the error
request_metadata web3_request.error_response.store(true, Ordering::Relaxed);
.error_response web3_request
.store(true, Ordering::Relaxed);
request_metadata
.user_error_response .user_error_response
.store(false, Ordering::Relaxed); .store(false, Ordering::Relaxed);
err.as_json_response_parts(response_id) err.as_json_response_parts(web3_request.id())
} }
}; };
// TODO: this serializes twice :/ web3_request.add_response(&response);
request_metadata.add_response(ResponseOrBytes::Response(&response));
let rpcs = request_metadata.backend_rpcs_used(); let rpcs = web3_request.backend_rpcs_used();
// there might be clones in the background, so this isn't a sure thing // there might be clones in the background, so this isn't a sure thing
let _ = request_metadata.try_send_arc_stat(); let _ = web3_request.try_send_arc_stat();
return (code, response, rpcs); return (code, response, rpcs);
} }
@ -1291,15 +1215,11 @@ impl Web3ProxyApp {
/// TODO: how can we make this generic? /// TODO: how can we make this generic?
async fn _proxy_request_with_caching( async fn _proxy_request_with_caching(
self: &Arc<Self>, self: &Arc<Self>,
id: Box<RawValue>, web3_request: &Arc<Web3Request>,
method: &str,
params: &mut serde_json::Value,
head_block: Option<&Web3ProxyBlock>,
request_metadata: &Arc<RequestMetadata>,
) -> Web3ProxyResult<jsonrpc::SingleResponse> { ) -> Web3ProxyResult<jsonrpc::SingleResponse> {
// TODO: serve net_version without querying the backend // TODO: serve net_version without querying the backend
// TODO: don't force RawValue // TODO: don't force RawValue
let response: jsonrpc::SingleResponse = match method { let response: jsonrpc::SingleResponse = match web3_request.request.method() {
// lots of commands are blocked // lots of commands are blocked
method @ ("db_getHex" method @ ("db_getHex"
| "db_getString" | "db_getString"
@ -1382,59 +1302,39 @@ impl Web3ProxyApp {
| "eth_uninstallFilter") => { | "eth_uninstallFilter") => {
return Err(Web3ProxyError::MethodNotFound(method.to_owned().into())); return Err(Web3ProxyError::MethodNotFound(method.to_owned().into()));
} }
method @ ("eth_sendUserOperation" "eth_sendUserOperation"
| "eth_estimateUserOperationGas" | "eth_estimateUserOperationGas"
| "eth_getUserOperationByHash" | "eth_getUserOperationByHash"
| "eth_getUserOperationReceipt" | "eth_getUserOperationReceipt"
| "eth_supportedEntryPoints" | "eth_supportedEntryPoints"
| "web3_bundlerVersion") => match self.bundler_4337_rpcs.as_ref() { | "web3_bundlerVersion" => self.bundler_4337_rpcs
Some(bundler_4337_rpcs) => { .try_proxy_connection::<Arc<RawValue>>(
bundler_4337_rpcs web3_request,
.try_proxy_connection::<_, Arc<RawValue>>(
method,
params,
request_metadata,
Some(Duration::from_secs(30)),
None,
None,
) )
.await? .await?,
} "eth_accounts" => jsonrpc::ParsedResponse::from_value(serde_json::Value::Array(vec![]), web3_request.id()).into(),
None => {
// TODO: stats even when we error!
// TODO: dedicated error for no 4337 bundlers
return Err(Web3ProxyError::NoServersSynced);
}
},
// TODO: id
"eth_accounts" => jsonrpc::ParsedResponse::from_value(serde_json::Value::Array(vec![]), id).into(),
"eth_blockNumber" => { "eth_blockNumber" => {
match head_block.cloned().or(self.balanced_rpcs.head_block()) { match web3_request.head_block.clone().or(self.balanced_rpcs.head_block()) {
Some(head_block) => jsonrpc::ParsedResponse::from_value(json!(head_block.number()), id).into(), Some(head_block) => jsonrpc::ParsedResponse::from_value(json!(head_block.number()), web3_request.id()).into(),
None => { None => {
return Err(Web3ProxyError::NoServersSynced); return Err(Web3ProxyError::NoServersSynced);
} }
} }
} }
"eth_chainId" => jsonrpc::ParsedResponse::from_value(json!(U64::from(self.config.chain_id)), id).into(), "eth_chainId" => jsonrpc::ParsedResponse::from_value(json!(U64::from(self.config.chain_id)), web3_request.id()).into(),
// TODO: eth_callBundle (https://docs.flashbots.net/flashbots-auction/searchers/advanced/rpc-endpoint#eth_callbundle) // TODO: eth_callBundle (https://docs.flashbots.net/flashbots-auction/searchers/advanced/rpc-endpoint#eth_callbundle)
// TODO: eth_cancelPrivateTransaction (https://docs.flashbots.net/flashbots-auction/searchers/advanced/rpc-endpoint#eth_cancelprivatetransaction, but maybe just reject) // TODO: eth_cancelPrivateTransaction (https://docs.flashbots.net/flashbots-auction/searchers/advanced/rpc-endpoint#eth_cancelprivatetransaction, but maybe just reject)
// TODO: eth_sendPrivateTransaction (https://docs.flashbots.net/flashbots-auction/searchers/advanced/rpc-endpoint#eth_sendprivatetransaction) // TODO: eth_sendPrivateTransaction (https://docs.flashbots.net/flashbots-auction/searchers/advanced/rpc-endpoint#eth_sendprivatetransaction)
"eth_coinbase" => { "eth_coinbase" => {
// no need for serving coinbase // no need for serving coinbase
jsonrpc::ParsedResponse::from_value(json!(Address::zero()), id).into() jsonrpc::ParsedResponse::from_value(json!(Address::zero()), web3_request.id()).into()
} }
"eth_estimateGas" => { "eth_estimateGas" => {
// TODO: timeout // TODO: timeout
let mut gas_estimate = self let mut gas_estimate = self
.balanced_rpcs .balanced_rpcs
.try_proxy_connection::<_, U256>( .try_proxy_connection::<U256>(
method, web3_request,
params,
request_metadata,
Some(Duration::from_secs(30)),
None,
None,
) )
.await? .await?
.parsed() .parsed()
@ -1455,63 +1355,61 @@ impl Web3ProxyApp {
gas_estimate += gas_increase; gas_estimate += gas_increase;
let request_id = web3_request.id();
// TODO: from_serializable? // TODO: from_serializable?
jsonrpc::ParsedResponse::from_value(json!(gas_estimate), id).into() jsonrpc::ParsedResponse::from_value(json!(gas_estimate), request_id).into()
} }
"eth_getTransactionReceipt" | "eth_getTransactionByHash" => { "eth_getTransactionReceipt" | "eth_getTransactionByHash" => {
// try to get the transaction without specifying a min_block_height // try to get the transaction without specifying a min_block_height
// TODO: timeout // TODO: timeout
let parsed = match self let result = self
.balanced_rpcs .balanced_rpcs
.try_proxy_connection::<_, Arc<RawValue>>( .try_proxy_connection::<serde_json::Value>(
method, web3_request,
params,
request_metadata,
Some(Duration::from_secs(30)),
None,
None,
) )
.await { .await?
Ok(response) => response.parsed().await.map_err(Into::into), .parsed()
Err(err) => Err(err), .await?
}; .into_result();
// if we got "null", it is probably because the tx is old. retry on nodes with old block data // if we got "null" or "", it is probably because the tx is old. retry on nodes with old block data
let try_archive = if let Ok(Some(value)) = parsed.as_ref().map(|r| r.result()) { // TODO: this feels fragile. how should we do this better/
value.get() == "null" || value.get() == "" || value.get() == "\"\"" let try_archive = match &result {
} else { Ok(serde_json::Value::Null) => true,
true Ok(serde_json::Value::Array(x)) => x.is_empty(),
Ok(serde_json::Value::String(x)) => x.is_empty(),
Err(..) => true,
_ => false,
}; };
if try_archive && let Some(head_block_num) = head_block.map(|x| x.number()) { if try_archive {
// TODO: only charge for archive if it gave a result // TODO: only charge for archive if it gave a result
request_metadata web3_request
.archive_request .archive_request
.store(true, atomic::Ordering::Relaxed); .store(true, atomic::Ordering::Relaxed);
self self
.balanced_rpcs .balanced_rpcs
.try_proxy_connection::<_, Arc<RawValue>>( .try_proxy_connection::<Arc<RawValue>>(
method, web3_request,
params, // Some(Duration::from_secs(30)),
request_metadata, // // TODO: should this be block 0 instead?
Some(Duration::from_secs(30)), // Some(&U64::one()),
// TODO: should this be block 0 instead? // // TODO: is this a good way to allow lagged archive nodes a try
Some(&U64::one()), // Some(&head_block_num.saturating_sub(5.into()).clamp(U64::one(), U64::MAX)),
// TODO: is this a good way to allow lagged archive nodes a try
Some(&head_block_num.saturating_sub(5.into()).clamp(U64::one(), U64::MAX)),
) )
.await? .await?
} else { } else {
parsed?.into() jsonrpc::ParsedResponse::from_value(result?, web3_request.id()).into()
} }
// TODO: if parsed is an error, return a null instead // TODO: if parsed is an error, return a null instead
} }
// TODO: eth_gasPrice that does awesome magic to predict the future // TODO: eth_gasPrice that does awesome magic to predict the future
"eth_hashrate" => jsonrpc::ParsedResponse::from_value(json!(U64::zero()), id).into(), "eth_hashrate" => jsonrpc::ParsedResponse::from_value(json!(U64::zero()), web3_request.id()).into(),
"eth_mining" => jsonrpc::ParsedResponse::from_value(serde_json::Value::Bool(false), id).into(), "eth_mining" => jsonrpc::ParsedResponse::from_value(serde_json::Value::Bool(false), web3_request.id()).into(),
// TODO: eth_sendBundle (flashbots/eden command) // TODO: eth_sendBundle (flashbots/eden command)
// broadcast transactions to all private rpcs at once // broadcast transactions to all private rpcs at once
"eth_sendRawTransaction" => { "eth_sendRawTransaction" => {
@ -1521,9 +1419,7 @@ impl Web3ProxyApp {
let response = self let response = self
.try_send_protected( .try_send_protected(
method, web3_request,
params,
request_metadata,
).await; ).await;
let mut response = response.try_into()?; let mut response = response.try_into()?;
@ -1536,7 +1432,7 @@ impl Web3ProxyApp {
&& (error_data.message == "ALREADY_EXISTS: already known" && (error_data.message == "ALREADY_EXISTS: already known"
|| error_data.message == "INTERNAL_ERROR: existing tx with same hash") || error_data.message == "INTERNAL_ERROR: existing tx with same hash")
{ {
let params = params let params = web3_request.request.params()
.as_array() .as_array()
.ok_or_else(|| { .ok_or_else(|| {
Web3ProxyError::BadRequest( Web3ProxyError::BadRequest(
@ -1611,39 +1507,39 @@ impl Web3ProxyApp {
} }
} }
jsonrpc::ParsedResponse::from_response_data(response, id).into() jsonrpc::ParsedResponse::from_response_data(response, web3_request.id()).into()
} }
"eth_syncing" => { "eth_syncing" => {
// no stats on this. its cheap // no stats on this. its cheap
// TODO: return a real response if all backends are syncing or if no servers in sync // TODO: return a real response if all backends are syncing or if no servers in sync
// TODO: const // TODO: const
jsonrpc::ParsedResponse::from_value(serde_json::Value::Bool(false), id).into() jsonrpc::ParsedResponse::from_value(serde_json::Value::Bool(false), web3_request.id()).into()
} }
"eth_subscribe" => jsonrpc::ParsedResponse::from_error(JsonRpcErrorData { "eth_subscribe" => jsonrpc::ParsedResponse::from_error(JsonRpcErrorData {
message: "notifications not supported. eth_subscribe is only available over a websocket".into(), message: "notifications not supported. eth_subscribe is only available over a websocket".into(),
code: -32601, code: -32601,
data: None, data: None,
}, id).into(), }, web3_request.id()).into(),
"eth_unsubscribe" => jsonrpc::ParsedResponse::from_error(JsonRpcErrorData { "eth_unsubscribe" => jsonrpc::ParsedResponse::from_error(JsonRpcErrorData {
message: "notifications not supported. eth_unsubscribe is only available over a websocket".into(), message: "notifications not supported. eth_unsubscribe is only available over a websocket".into(),
code: -32601, code: -32601,
data: None, data: None,
}, id).into(), }, web3_request.id()).into(),
"net_listening" => { "net_listening" => {
// TODO: only true if there are some backends on balanced_rpcs? // TODO: only true if there are some backends on balanced_rpcs?
// TODO: const // TODO: const
jsonrpc::ParsedResponse::from_value(serde_json::Value::Bool(true), id).into() jsonrpc::ParsedResponse::from_value(serde_json::Value::Bool(true), web3_request.id()).into()
} }
"net_peerCount" => "net_peerCount" =>
jsonrpc::ParsedResponse::from_value(json!(U64::from(self.balanced_rpcs.num_synced_rpcs())), id).into() jsonrpc::ParsedResponse::from_value(json!(U64::from(self.balanced_rpcs.num_synced_rpcs())), web3_request.id()).into()
, ,
"web3_clientVersion" => "web3_clientVersion" =>
jsonrpc::ParsedResponse::from_value(serde_json::Value::String(APP_USER_AGENT.to_string()), id).into() jsonrpc::ParsedResponse::from_value(serde_json::Value::String(APP_USER_AGENT.to_string()), web3_request.id()).into()
, ,
"web3_sha3" => { "web3_sha3" => {
// returns Keccak-256 (not the standardized SHA3-256) of the given data. // returns Keccak-256 (not the standardized SHA3-256) of the given data.
// TODO: timeout // TODO: timeout
match &params { match &web3_request.request.params() {
serde_json::Value::Array(params) => { serde_json::Value::Array(params) => {
// TODO: make a struct and use serde conversion to clean this up // TODO: make a struct and use serde conversion to clean this up
if params.len() != 1 if params.len() != 1
@ -1655,7 +1551,7 @@ impl Web3ProxyApp {
message: "Invalid request".into(), message: "Invalid request".into(),
code: -32600, code: -32600,
data: None data: None
}, id).into() }, web3_request.id()).into()
} else { } else {
// TODO: BadRequest instead of web3_context // TODO: BadRequest instead of web3_context
let param = Bytes::from_str( let param = Bytes::from_str(
@ -1673,7 +1569,7 @@ impl Web3ProxyApp {
let hash = H256::from(keccak256(param)); let hash = H256::from(keccak256(param));
jsonrpc::ParsedResponse::from_value(json!(hash), id).into() jsonrpc::ParsedResponse::from_value(json!(hash), web3_request.id()).into()
} }
} }
_ => { _ => {
@ -1683,7 +1579,7 @@ impl Web3ProxyApp {
message: "invalid request".into(), message: "invalid request".into(),
code: StatusCode::BAD_REQUEST.as_u16().into(), code: StatusCode::BAD_REQUEST.as_u16().into(),
data: None, data: None,
}, id).into() }, web3_request.id()).into()
} }
} }
} }
@ -1691,7 +1587,7 @@ impl Web3ProxyApp {
message: "The method test does not exist/is not available.".into(), message: "The method test does not exist/is not available.".into(),
code: -32601, code: -32601,
data: None, data: None,
}, id).into(), }, web3_request.id()).into(),
// anything else gets sent to backend rpcs and cached // anything else gets sent to backend rpcs and cached
method => { method => {
if method.starts_with("admin_") { if method.starts_with("admin_") {
@ -1705,111 +1601,29 @@ impl Web3ProxyApp {
)).into()); )).into());
} }
// TODO: if no servers synced, wait for them to be synced? probably better to error and let haproxy retry another server // TODO: why is this clone needed?
let head_block: Web3ProxyBlock = head_block let web3_request = web3_request.clone();
.cloned()
.or_else(|| self.balanced_rpcs.head_block())
.ok_or(Web3ProxyError::NoServersSynced)?;
// we do this check before checking caches because it might modify the request params
// TODO: add a stat for archive vs full since they should probably cost different
// TODO: this cache key can be rather large. is that okay?
let cache_key: Option<JsonRpcQueryCacheKey> = match CacheMode::new(
method,
params,
&head_block,
&self.balanced_rpcs,
)
.await
{
CacheMode::CacheSuccessForever => Some(JsonRpcQueryCacheKey::new(
None,
None,
method,
params,
false,
)),
CacheMode::CacheNever => None,
CacheMode::Cache {
block,
cache_errors,
} => {
let block_depth = (head_block.number().saturating_sub(*block.num())).as_u64();
if block_depth > self.config.archive_depth {
trace!(%block_depth, archive_depth=%self.config.archive_depth);
request_metadata
.archive_request
.store(true, atomic::Ordering::Relaxed);
}
Some(JsonRpcQueryCacheKey::new(
Some(block),
None,
method,
params,
cache_errors,
))
}
CacheMode::CacheRange {
from_block,
to_block,
cache_errors,
} => {
let block_depth = (head_block.number().saturating_sub(*from_block.num())).as_u64();
if block_depth > self.config.archive_depth {
trace!(%block_depth, archive_depth=%self.config.archive_depth);
request_metadata
.archive_request
.store(true, atomic::Ordering::Relaxed);
}
Some(JsonRpcQueryCacheKey::new(
Some(from_block),
Some(to_block),
method,
params,
cache_errors,
))
}
};
// TODO: think more about this timeout. we should probably have a `request_expires_at` Duration on the request_metadata
// TODO: different user tiers should have different timeouts
// erigon's timeout is 300, so keep this a few seconds shorter
let max_wait = Some(Duration::from_secs(290));
if let Some(cache_key) = cache_key {
let from_block_num = cache_key.from_block_num().copied();
let to_block_num = cache_key.to_block_num().copied();
let cache_jsonrpc_errors = cache_key.cache_errors();
let cache_key_hash = cache_key.hash();
if web3_request.cache_mode.is_some() {
// don't cache anything larger than 16 MiB // don't cache anything larger than 16 MiB
let max_response_cache_bytes = 16 * (1024 ^ 2); // self.config.max_response_cache_bytes; let max_response_cache_bytes = 16 * (1024 ^ 2); // self.config.max_response_cache_bytes;
let cache_key = web3_request.cache_key().expect("key must exist if cache_mode does");
// TODO: try to fetch out of s3 // TODO: try to fetch out of s3
let x: SingleResponse = if let Some(data) = self.jsonrpc_response_cache.get(&cache_key_hash).await { let x: SingleResponse = if let Some(data) = self.jsonrpc_response_cache.get(&cache_key).await {
// it was cached! easy! // it was cached! easy!
// TODO: wait. this currently panics. why? // TODO: wait. this currently panics. why?
jsonrpc::ParsedResponse::from_response_data(data, id).into() jsonrpc::ParsedResponse::from_response_data(data, web3_request.id()).into()
} else if self.jsonrpc_response_failed_cache_keys.contains_key(&cache_key_hash) { } else if self.jsonrpc_response_failed_cache_keys.contains_key(&cache_key) {
// this is a cache_key that we know won't cache // this is a cache_key that we know won't cache
// NOTICE! We do **NOT** use get which means the key's hotness is not updated. we don't use time-to-idler here so thats fine. but be careful if that changes // NOTICE! We do **NOT** use get which means the key's hotness is not updated. we don't use time-to-idler here so thats fine. but be careful if that changes
timeout( timeout(
Duration::from_secs(295), web3_request.ttl(),
self.balanced_rpcs self.balanced_rpcs
.try_proxy_connection::<_, Arc<RawValue>>( .try_proxy_connection::<Arc<RawValue>>(
method, &web3_request,
params,
request_metadata,
max_wait,
None,
None,
) )
).await?? ).await??
} else { } else {
@ -1818,53 +1632,44 @@ impl Web3ProxyApp {
// TODO: if we got the semaphore, do the try_get_with // TODO: if we got the semaphore, do the try_get_with
// TODO: if the response is too big to cache mark the cache_key as not cacheable. maybe CacheMode can check that cache? // TODO: if the response is too big to cache mark the cache_key as not cacheable. maybe CacheMode can check that cache?
let s = self.jsonrpc_response_semaphores.get_with(cache_key_hash, async move { let s = self.jsonrpc_response_semaphores.get_with(cache_key, async move {
Arc::new(Semaphore::new(1)) Arc::new(Semaphore::new(1))
}).await; }).await;
// TODO: don't always do 1 second. use the median request latency instead // TODO: don't always do 1 second. use the median request latency instead
match timeout(Duration::from_secs(1), s.acquire_owned()).await { let mut x = match timeout(Duration::from_secs(1), s.acquire_owned()).await {
Err(_) => { Err(_) => {
// TODO: should we try to cache this? whatever has the semaphore //should// handle that for us // TODO: should we try to cache this? whatever has the semaphore //should// handle that for us
timeout( timeout(
Duration::from_secs(295), web3_request.ttl(),
self.balanced_rpcs self.balanced_rpcs
.try_proxy_connection::<_, Arc<RawValue>>( .try_proxy_connection::<Arc<RawValue>>(
method, &web3_request,
params,
request_metadata,
max_wait,
None,
None,
) )
).await?? ).await??
} }
Ok(_p) => { Ok(_p) => {
// we got the permit! we are either first, or we were waiting a short time to get it in which case this response should be cached // we got the permit! we are either first, or we were waiting a short time to get it in which case this response should be cached
// TODO: clone less? // TODO: clone less? its spawned so i don't think we can
let f = { let f = {
let app = self.clone(); let app = self.clone();
let method = method.to_string(); let web3_request = web3_request.clone();
let params = params.clone();
let request_metadata = request_metadata.clone();
async move { async move {
app app
.jsonrpc_response_cache .jsonrpc_response_cache
.try_get_with::<_, Web3ProxyError>(cache_key.hash(), async { .try_get_with::<_, Web3ProxyError>(cache_key, async {
let response_data = timeout(Duration::from_secs(290), app.balanced_rpcs let duration = web3_request.ttl().saturating_sub(Duration::from_secs(1));
.try_proxy_connection::<_, Arc<RawValue>>(
&method, // TODO: dynamic timeout based on whats left on web3_request
&params, let response_data = timeout(duration, app.balanced_rpcs
&request_metadata, .try_proxy_connection::<Arc<RawValue>>(
max_wait, &web3_request,
from_block_num.as_ref(),
to_block_num.as_ref(),
)).await; )).await;
match response_data { match response_data {
Ok(response_data) => { Ok(response_data) => {
if !cache_jsonrpc_errors && let Err(err) = response_data { if !web3_request.cache_jsonrpc_errors() && let Err(err) = response_data {
// if we are not supposed to cache jsonrpc errors, // if we are not supposed to cache jsonrpc errors,
// then we must not convert Provider errors into a JsonRpcResponseEnum // then we must not convert Provider errors into a JsonRpcResponseEnum
// return all the errors now. moka will not cache Err results // return all the errors now. moka will not cache Err results
@ -1894,10 +1699,10 @@ impl Web3ProxyApp {
// this is spawned so that if the client disconnects, the app keeps polling the future with a lock inside the moka cache // this is spawned so that if the client disconnects, the app keeps polling the future with a lock inside the moka cache
// TODO: is this expect actually safe!? could there be a background process that still has the arc? // TODO: is this expect actually safe!? could there be a background process that still has the arc?
match tokio::spawn(f).await? { let mut x = match tokio::spawn(f).await? {
Ok(response_data) => Ok(jsonrpc::ParsedResponse::from_response_data(response_data, id).into()), Ok(response_data) => Ok(jsonrpc::ParsedResponse::from_response_data(response_data, Default::default()).into()),
Err(err) => { Err(err) => {
self.jsonrpc_response_failed_cache_keys.insert(cache_key_hash, ()).await; self.jsonrpc_response_failed_cache_keys.insert(cache_key, ()).await;
if let Web3ProxyError::StreamResponse(x) = err.as_ref() { if let Web3ProxyError::StreamResponse(x) = err.as_ref() {
let x = x.lock().take().expect("stream processing should only happen once"); let x = x.lock().take().expect("stream processing should only happen once");
@ -1907,25 +1712,33 @@ impl Web3ProxyApp {
Err(err) Err(err)
} }
}, },
}? }?;
// clear the id. theres no point including it in our cached response
x.set_id(Default::default());
x
} }
} };
x.set_id(web3_request.id());
x
}; };
x x
} else { } else {
timeout( let mut x = timeout(
Duration::from_secs(295), web3_request.ttl(),
self.balanced_rpcs self.balanced_rpcs
.try_proxy_connection::<_, Arc<RawValue>>( .try_proxy_connection::<Arc<RawValue>>(
method, &web3_request,
params,
request_metadata,
max_wait,
None,
None,
) )
).await?? ).await??;
x.set_id(web3_request.id());
x
} }
} }
}; };

View File

@ -2,8 +2,8 @@
use super::Web3ProxyApp; use super::Web3ProxyApp;
use crate::errors::{Web3ProxyError, Web3ProxyResult}; use crate::errors::{Web3ProxyError, Web3ProxyResult};
use crate::frontend::authorization::{Authorization, RequestMetadata, RequestOrMethod}; use crate::frontend::authorization::{RequestOrMethod, Web3Request};
use crate::jsonrpc::{self, JsonRpcRequest}; use crate::jsonrpc;
use crate::response_cache::JsonRpcResponseEnum; use crate::response_cache::JsonRpcResponseEnum;
use axum::extract::ws::{CloseFrame, Message}; use axum::extract::ws::{CloseFrame, Message};
use deferred_rate_limiter::DeferredRateLimitResult; use deferred_rate_limiter::DeferredRateLimitResult;
@ -24,14 +24,14 @@ use tracing::{error, trace};
impl Web3ProxyApp { impl Web3ProxyApp {
pub async fn eth_subscribe<'a>( pub async fn eth_subscribe<'a>(
self: &'a Arc<Self>, self: &'a Arc<Self>,
authorization: Arc<Authorization>, web3_request: Arc<Web3Request>,
jsonrpc_request: JsonRpcRequest,
subscription_count: &'a AtomicU64, subscription_count: &'a AtomicU64,
// TODO: taking a sender for Message instead of the exact json we are planning to send feels wrong, but its easier for now // TODO: taking a sender for Message instead of the exact json we are planning to send feels wrong, but its easier for now
response_sender: mpsc::Sender<Message>, response_sender: mpsc::Sender<Message>,
) -> Web3ProxyResult<(AbortHandle, jsonrpc::ParsedResponse)> { ) -> Web3ProxyResult<(AbortHandle, jsonrpc::ParsedResponse)> {
let subscribe_to = jsonrpc_request let subscribe_to = web3_request
.params .request
.params()
.get(0) .get(0)
.and_then(|x| x.as_str()) .and_then(|x| x.as_str())
.ok_or_else(|| { .ok_or_else(|| {
@ -42,21 +42,13 @@ impl Web3ProxyApp {
// only premium users are allowed to subscribe to the other things // only premium users are allowed to subscribe to the other things
if !(self.config.free_subscriptions if !(self.config.free_subscriptions
|| subscribe_to == "newHeads" || subscribe_to == "newHeads"
|| authorization.active_premium().await) || web3_request.authorization.active_premium().await)
{ {
return Err(Web3ProxyError::AccessDenied( return Err(Web3ProxyError::AccessDenied(
"eth_subscribe for this event requires an active premium account".into(), "eth_subscribe for this event requires an active premium account".into(),
)); ));
} }
let request_metadata = RequestMetadata::new(
self,
authorization.clone(),
RequestOrMethod::Request(&jsonrpc_request),
None,
)
.await;
let (subscription_abort_handle, subscription_registration) = AbortHandle::new_pair(); let (subscription_abort_handle, subscription_registration) = AbortHandle::new_pair();
// TODO: this only needs to be unique per connection. we don't need it globably unique // TODO: this only needs to be unique per connection. we don't need it globably unique
@ -64,9 +56,6 @@ impl Web3ProxyApp {
let subscription_id = subscription_count.fetch_add(1, atomic::Ordering::SeqCst); let subscription_id = subscription_count.fetch_add(1, atomic::Ordering::SeqCst);
let subscription_id = U64::from(subscription_id); let subscription_id = U64::from(subscription_id);
// save the id so we can use it in the response
let id = jsonrpc_request.id.clone();
// TODO: calling `json!` on every request is probably not fast. but it works for now // TODO: calling `json!` on every request is probably not fast. but it works for now
// TODO: i think we need a stricter EthSubscribeRequest type that JsonRpcRequest can turn into // TODO: i think we need a stricter EthSubscribeRequest type that JsonRpcRequest can turn into
// TODO: DRY This up. lots of duplication between newHeads and newPendingTransactions // TODO: DRY This up. lots of duplication between newHeads and newPendingTransactions
@ -74,6 +63,7 @@ impl Web3ProxyApp {
"newHeads" => { "newHeads" => {
let head_block_receiver = self.watch_consensus_head_receiver.clone(); let head_block_receiver = self.watch_consensus_head_receiver.clone();
let app = self.clone(); let app = self.clone();
let authorization = web3_request.authorization.clone();
tokio::spawn(async move { tokio::spawn(async move {
trace!("newHeads subscription {:?}", subscription_id); trace!("newHeads subscription {:?}", subscription_id);
@ -90,16 +80,17 @@ impl Web3ProxyApp {
continue; continue;
}; };
let subscription_request_metadata = RequestMetadata::new( let subscription_web3_request = Web3Request::new_with_app(
&app, &app,
authorization.clone(), authorization.clone(),
RequestOrMethod::Method("eth_subscribe(newHeads)", 0), None,
Some(&new_head), RequestOrMethod::Method("eth_subscribe(newHeads)".into(), 0),
Some(new_head),
) )
.await; .await;
if let Some(close_message) = app if let Some(close_message) = app
.rate_limit_close_websocket(&subscription_request_metadata) .rate_limit_close_websocket(&subscription_web3_request)
.await .await
{ {
let _ = response_sender.send(close_message).await; let _ = response_sender.send(close_message).await;
@ -113,7 +104,7 @@ impl Web3ProxyApp {
"params": { "params": {
"subscription": subscription_id, "subscription": subscription_id,
// TODO: option to include full transaction objects instead of just the hashes? // TODO: option to include full transaction objects instead of just the hashes?
"result": new_head.block, "result": subscription_web3_request.head_block,
}, },
}); });
@ -133,7 +124,7 @@ impl Web3ProxyApp {
break; break;
}; };
subscription_request_metadata.add_response(response_bytes); subscription_web3_request.add_response(response_bytes);
} }
trace!("closed newHeads subscription {:?}", subscription_id); trace!("closed newHeads subscription {:?}", subscription_id);
@ -143,6 +134,7 @@ impl Web3ProxyApp {
"newPendingTransactions" => { "newPendingTransactions" => {
let pending_txid_firehose = self.pending_txid_firehose.subscribe(); let pending_txid_firehose = self.pending_txid_firehose.subscribe();
let app = self.clone(); let app = self.clone();
let authorization = web3_request.authorization.clone();
tokio::spawn(async move { tokio::spawn(async move {
let mut pending_txid_firehose = Abortable::new( let mut pending_txid_firehose = Abortable::new(
@ -152,17 +144,21 @@ impl Web3ProxyApp {
while let Some(Ok(new_txid)) = pending_txid_firehose.next().await { while let Some(Ok(new_txid)) = pending_txid_firehose.next().await {
// TODO: include the head_block here? // TODO: include the head_block here?
let subscription_request_metadata = RequestMetadata::new( let subscription_web3_request = Web3Request::new_with_app(
&app, &app,
authorization.clone(), authorization.clone(),
RequestOrMethod::Method("eth_subscribe(newPendingTransactions)", 0), None,
RequestOrMethod::Method(
"eth_subscribe(newPendingTransactions)".into(),
0,
),
None, None,
) )
.await; .await;
// check if we should close the websocket connection // check if we should close the websocket connection
if let Some(close_message) = app if let Some(close_message) = app
.rate_limit_close_websocket(&subscription_request_metadata) .rate_limit_close_websocket(&subscription_web3_request)
.await .await
{ {
let _ = response_sender.send(close_message).await; let _ = response_sender.send(close_message).await;
@ -185,7 +181,7 @@ impl Web3ProxyApp {
// we could use JsonRpcForwardedResponseEnum::num_bytes() here, but since we already have the string, this is easier // we could use JsonRpcForwardedResponseEnum::num_bytes() here, but since we already have the string, this is easier
let response_bytes = response_str.len(); let response_bytes = response_str.len();
subscription_request_metadata.add_response(response_bytes); subscription_web3_request.add_response(response_bytes);
// TODO: do clients support binary messages? // TODO: do clients support binary messages?
// TODO: can we check a content type header? // TODO: can we check a content type header?
@ -216,23 +212,21 @@ impl Web3ProxyApp {
let response_data = JsonRpcResponseEnum::from(json!(subscription_id)); let response_data = JsonRpcResponseEnum::from(json!(subscription_id));
let response = jsonrpc::ParsedResponse::from_response_data(response_data, id); let response =
jsonrpc::ParsedResponse::from_response_data(response_data, web3_request.id());
// TODO: better way of passing in ParsedResponse // TODO: better way of passing in ParsedResponse
let response = jsonrpc::SingleResponse::Parsed(response); let response = jsonrpc::SingleResponse::Parsed(response);
// TODO: this serializes twice // TODO: this serializes twice
request_metadata.add_response(&response); web3_request.add_response(&response);
let response = response.parsed().await.expect("Response already parsed"); let response = response.parsed().await.expect("Response already parsed");
// TODO: make a `SubscriptonHandle(AbortHandle, JoinHandle)` struct? // TODO: make a `SubscriptonHandle(AbortHandle, JoinHandle)` struct?
Ok((subscription_abort_handle, response)) Ok((subscription_abort_handle, response))
} }
async fn rate_limit_close_websocket( async fn rate_limit_close_websocket(&self, web3_request: &Web3Request) -> Option<Message> {
&self, let authorization = &web3_request.authorization;
request_metadata: &RequestMetadata,
) -> Option<Message> {
let authorization = &request_metadata.authorization;
if !authorization.active_premium().await { if !authorization.active_premium().await {
if let Some(rate_limiter) = &self.frontend_public_rate_limiter { if let Some(rate_limiter) = &self.frontend_public_rate_limiter {

View File

@ -1,10 +1,14 @@
//! Helper functions for turning ether's BlockNumber into numbers and updating incoming queries to match. //! Helper functions for turning ether's BlockNumber into numbers and updating incoming queries to match.
use crate::rpcs::many::Web3Rpcs; use std::time::Duration;
use crate::app::Web3ProxyApp;
use crate::jsonrpc::JsonRpcRequest;
use crate::{ use crate::{
errors::{Web3ProxyError, Web3ProxyResult}, errors::{Web3ProxyError, Web3ProxyResult},
rpcs::blockchain::Web3ProxyBlock, rpcs::blockchain::Web3ProxyBlock,
}; };
use anyhow::Context; use anyhow::Context;
use async_recursion::async_recursion;
use derive_more::From; use derive_more::From;
use ethers::{ use ethers::{
prelude::{BlockNumber, U64}, prelude::{BlockNumber, U64},
@ -14,16 +18,16 @@ use serde_json::json;
use tracing::{error, trace, warn}; use tracing::{error, trace, warn};
#[allow(non_snake_case)] #[allow(non_snake_case)]
pub fn BlockNumber_to_U64(block_num: BlockNumber, latest_block: &U64) -> (U64, bool) { pub fn BlockNumber_to_U64(block_num: BlockNumber, latest_block: U64) -> (U64, bool) {
match block_num { match block_num {
BlockNumber::Earliest => (U64::zero(), false), BlockNumber::Earliest => (U64::zero(), false),
BlockNumber::Finalized => { BlockNumber::Finalized => {
warn!("finalized block requested! not yet implemented!"); warn!("finalized block requested! not yet implemented!");
(*latest_block - 10, false) (latest_block - 10, false)
} }
BlockNumber::Latest => { BlockNumber::Latest => {
// change "latest" to a number // change "latest" to a number
(*latest_block, true) (latest_block, true)
} }
BlockNumber::Number(x) => { BlockNumber::Number(x) => {
// we already have a number // we already have a number
@ -32,16 +36,16 @@ pub fn BlockNumber_to_U64(block_num: BlockNumber, latest_block: &U64) -> (U64, b
BlockNumber::Pending => { BlockNumber::Pending => {
// modified is false because we want the backend to see "pending" // modified is false because we want the backend to see "pending"
// TODO: think more about how to handle Pending // TODO: think more about how to handle Pending
(*latest_block, false) (latest_block, false)
} }
BlockNumber::Safe => { BlockNumber::Safe => {
warn!("safe block requested! not yet implemented!"); warn!("safe block requested! not yet implemented!");
(*latest_block - 3, false) (latest_block - 3, false)
} }
} }
} }
#[derive(Clone, Debug, Eq, From, PartialEq)] #[derive(Clone, Debug, Eq, From, Hash, PartialEq)]
pub struct BlockNumAndHash(U64, H256); pub struct BlockNumAndHash(U64, H256);
impl BlockNumAndHash { impl BlockNumAndHash {
@ -55,7 +59,7 @@ impl BlockNumAndHash {
impl From<&Web3ProxyBlock> for BlockNumAndHash { impl From<&Web3ProxyBlock> for BlockNumAndHash {
fn from(value: &Web3ProxyBlock) -> Self { fn from(value: &Web3ProxyBlock) -> Self {
let n = *value.number(); let n = value.number();
let h = *value.hash(); let h = *value.hash();
Self(n, h) Self(n, h)
@ -64,11 +68,12 @@ impl From<&Web3ProxyBlock> for BlockNumAndHash {
/// modify params to always have a block hash and not "latest" /// modify params to always have a block hash and not "latest"
/// TODO: this should replace all block numbers with hashes, not just "latest" /// TODO: this should replace all block numbers with hashes, not just "latest"
pub async fn clean_block_number( #[async_recursion]
params: &mut serde_json::Value, pub async fn clean_block_number<'a>(
params: &'a mut serde_json::Value,
block_param_id: usize, block_param_id: usize,
latest_block: &Web3ProxyBlock, head_block: &'a Web3ProxyBlock,
rpcs: &Web3Rpcs, app: Option<&'a Web3ProxyApp>,
) -> Web3ProxyResult<BlockNumAndHash> { ) -> Web3ProxyResult<BlockNumAndHash> {
match params.as_array_mut() { match params.as_array_mut() {
None => { None => {
@ -79,7 +84,7 @@ pub async fn clean_block_number(
None => { None => {
if params.len() == block_param_id { if params.len() == block_param_id {
// add the latest block number to the end of the params // add the latest block number to the end of the params
params.push(json!(latest_block.number())); params.push(json!(head_block.number()));
} else { } else {
// don't modify the request. only cache with current block // don't modify the request. only cache with current block
// TODO: more useful log that include the // TODO: more useful log that include the
@ -87,7 +92,7 @@ pub async fn clean_block_number(
} }
// don't modify params, just cache with the current block // don't modify params, just cache with the current block
Ok(latest_block.into()) Ok(head_block.into())
} }
Some(x) => { Some(x) => {
// dig into the json value to find a BlockNumber or similar block identifier // dig into the json value to find a BlockNumber or similar block identifier
@ -99,12 +104,22 @@ pub async fn clean_block_number(
let block_hash: H256 = let block_hash: H256 =
serde_json::from_value(block_hash).context("decoding blockHash")?; serde_json::from_value(block_hash).context("decoding blockHash")?;
let block = rpcs if block_hash == *head_block.hash() {
.block(&block_hash, None, None) (head_block.into(), false)
.await } else if let Some(app) = app {
.context("fetching block number from hash")?; let block = app
.balanced_rpcs
.block(&block_hash, None, None)
.await
.context("fetching block number from hash")?;
(BlockNumAndHash::from(&block), false) (BlockNumAndHash::from(&block), false)
} else {
return Err(anyhow::anyhow!(
"app missing. cannot find block number from hash"
)
.into());
}
} else { } else {
return Err(anyhow::anyhow!("blockHash missing").into()); return Err(anyhow::anyhow!("blockHash missing").into());
} }
@ -112,59 +127,69 @@ pub async fn clean_block_number(
// it might be a string like "latest" or a block number or a block hash // it might be a string like "latest" or a block number or a block hash
// TODO: "BlockNumber" needs a better name // TODO: "BlockNumber" needs a better name
// TODO: move this to a helper function? // TODO: move this to a helper function?
if let Ok(block_num) = serde_json::from_value::<U64>(x.clone()) { let (block_num, changed) = if let Some(block_num) = x.as_u64() {
let head_block_num = *latest_block.number(); (U64::from(block_num), false)
} else if let Ok(block_num) = serde_json::from_value::<U64>(x.to_owned()) {
(block_num, false)
} else if let Ok(block_number) =
serde_json::from_value::<BlockNumber>(x.to_owned())
{
BlockNumber_to_U64(block_number, head_block.number())
} else if let Ok(block_hash) = serde_json::from_value::<H256>(x.clone()) {
if block_hash == *head_block.hash() {
(head_block.number(), false)
} else if let Some(app) = app {
// TODO: what should this max_wait be?
let block = app
.balanced_rpcs
.block(&block_hash, None, Some(Duration::from_secs(3)))
.await
.context("fetching block number from hash")?;
if block_num > head_block_num { (block.number(), false)
return Err(Web3ProxyError::UnknownBlockNumber { } else {
known: head_block_num, return Err(anyhow::anyhow!(
unknown: block_num, "app missing. cannot find block number from hash"
}); )
.into());
} }
} else {
return Err(anyhow::anyhow!(
"param not a block identifier, block number, or block hash"
)
.into());
};
let block_hash = rpcs let head_block_num = head_block.number();
if block_num > head_block_num {
// TODO: option to wait for the block
return Err(Web3ProxyError::UnknownBlockNumber {
known: head_block_num,
unknown: block_num,
});
}
if block_num == head_block_num {
(head_block.into(), changed)
} else if let Some(app) = app {
let block_hash = app
.balanced_rpcs
.block_hash(&block_num) .block_hash(&block_num)
.await .await
.context("fetching block hash from number")?; .context("fetching block hash from number")?;
let block = rpcs let block = app
.balanced_rpcs
.block(&block_hash, None, None) .block(&block_hash, None, None)
.await .await
.context("fetching block from hash")?; .context("fetching block from hash")?;
// TODO: do true here? will that work for **all** methods on **all** chains? if not we need something smarter // TODO: do true here? will that work for **all** methods on **all** chains? if not we need something smarter
(BlockNumAndHash::from(&block), false) (BlockNumAndHash::from(&block), changed)
} else if let Ok(block_number) =
serde_json::from_value::<BlockNumber>(x.clone())
{
let (block_num, change) =
BlockNumber_to_U64(block_number, latest_block.number());
if block_num == *latest_block.number() {
(latest_block.into(), change)
} else {
let block_hash = rpcs
.block_hash(&block_num)
.await
.context("fetching block hash from number")?;
let block = rpcs
.block(&block_hash, None, None)
.await
.context("fetching block from hash")?;
(BlockNumAndHash::from(&block), change)
}
} else if let Ok(block_hash) = serde_json::from_value::<H256>(x.clone()) {
let block = rpcs
.block(&block_hash, None, None)
.await
.context("fetching block number from hash")?;
(BlockNumAndHash::from(&block), false)
} else { } else {
return Err(anyhow::anyhow!( return Err(anyhow::anyhow!(
"param not a block identifier, block number, or block hash" "app missing. cannot find block number from hash"
) )
.into()); .into());
} }
@ -184,21 +209,23 @@ pub async fn clean_block_number(
} }
/// TODO: change this to also return the hash needed? /// TODO: change this to also return the hash needed?
#[derive(Debug, Eq, PartialEq)] /// this replaces any "latest" identifiers in the JsonRpcRequest with the current block number which feels like the data is structured wrong
#[derive(Debug, Default, Hash, Eq, PartialEq)]
pub enum CacheMode { pub enum CacheMode {
CacheSuccessForever, SuccessForever,
CacheNever, Standard {
Cache {
block: BlockNumAndHash, block: BlockNumAndHash,
/// cache jsonrpc errors (server errors are never cached) /// cache jsonrpc errors (server errors are never cached)
cache_errors: bool, cache_errors: bool,
}, },
CacheRange { Range {
from_block: BlockNumAndHash, from_block: BlockNumAndHash,
to_block: BlockNumAndHash, to_block: BlockNumAndHash,
/// cache jsonrpc errors (server errors are never cached) /// cache jsonrpc errors (server errors are never cached)
cache_errors: bool, cache_errors: bool,
}, },
#[default]
Never,
} }
fn get_block_param_id(method: &str) -> Option<usize> { fn get_block_param_id(method: &str) -> Option<usize> {
@ -227,61 +254,92 @@ fn get_block_param_id(method: &str) -> Option<usize> {
} }
impl CacheMode { impl CacheMode {
pub async fn new( /// like `try_new`, but instead of erroring, it will default to caching with the head block
method: &str, /// returns None if this request should not be cached
params: &mut serde_json::Value, pub async fn new<'a>(
head_block: &Web3ProxyBlock, request: &'a mut JsonRpcRequest,
rpcs: &Web3Rpcs, head_block: Option<&'a Web3ProxyBlock>,
app: Option<&'a Web3ProxyApp>,
) -> Self { ) -> Self {
match Self::try_new(method, params, head_block, rpcs).await { match Self::try_new(request, head_block, app).await {
Ok(x) => x, Ok(x) => x,
Err(Web3ProxyError::NoBlocksKnown) => { Err(Web3ProxyError::NoBlocksKnown) => {
warn!(%method, ?params, "no servers available to get block from params. caching with head block"); warn!(
CacheMode::Cache { method = %request.method,
block: head_block.into(), params = ?request.params,
cache_errors: true, "no servers available to get block from params. caching with head block"
);
if let Some(head_block) = head_block {
// TODO: strange to get NoBlocksKnown **and** have a head block. think about this more
CacheMode::Standard {
block: head_block.into(),
cache_errors: true,
}
} else {
CacheMode::Never
} }
} }
Err(err) => { Err(err) => {
error!(%method, ?params, ?err, "could not get block from params. caching with head block"); error!(
CacheMode::Cache { method = %request.method,
block: head_block.into(), params = ?request.params,
cache_errors: true, ?err,
"could not get block from params. caching with head block"
);
if let Some(head_block) = head_block {
CacheMode::Standard {
block: head_block.into(),
cache_errors: true,
}
} else {
CacheMode::Never
} }
} }
} }
} }
pub async fn try_new( pub async fn try_new(
method: &str, request: &mut JsonRpcRequest,
params: &mut serde_json::Value, head_block: Option<&Web3ProxyBlock>,
head_block: &Web3ProxyBlock, app: Option<&Web3ProxyApp>,
rpcs: &Web3Rpcs,
) -> Web3ProxyResult<Self> { ) -> Web3ProxyResult<Self> {
let params = &mut request.params;
if matches!(params, serde_json::Value::Null) { if matches!(params, serde_json::Value::Null) {
// no params given. cache with the head block // no params given. cache with the head block
return Ok(Self::Cache { if let Some(head_block) = head_block {
block: head_block.into(), return Ok(Self::Standard {
cache_errors: true, block: head_block.into(),
}); cache_errors: true,
});
} else {
return Ok(Self::Never);
}
} }
if head_block.is_none() {
// since we don't have a head block, i don't trust our anything enough to cache
return Ok(Self::Never);
}
let head_block = head_block.expect("head_block was just checked above");
if let Some(params) = params.as_array() { if let Some(params) = params.as_array() {
if params.is_empty() { if params.is_empty() {
// no params given. cache with the head block // no params given. cache with the head block
return Ok(Self::Cache { return Ok(Self::Standard {
block: head_block.into(), block: head_block.into(),
cache_errors: true, cache_errors: true,
}); });
} }
} }
match method { match request.method.as_str() {
"debug_traceTransaction" => { "debug_traceTransaction" => {
// TODO: make sure re-orgs work properly! // TODO: make sure re-orgs work properly!
Ok(CacheMode::CacheSuccessForever) Ok(CacheMode::SuccessForever)
} }
"eth_gasPrice" => Ok(CacheMode::Cache { "eth_gasPrice" => Ok(CacheMode::Standard {
block: head_block.into(), block: head_block.into(),
cache_errors: false, cache_errors: false,
}), }),
@ -289,24 +347,24 @@ impl CacheMode {
// TODO: double check that any node can serve this // TODO: double check that any node can serve this
// TODO: can a block change? like what if it gets orphaned? // TODO: can a block change? like what if it gets orphaned?
// TODO: make sure re-orgs work properly! // TODO: make sure re-orgs work properly!
Ok(CacheMode::CacheSuccessForever) Ok(CacheMode::SuccessForever)
} }
"eth_getBlockByNumber" => { "eth_getBlockByNumber" => {
// TODO: double check that any node can serve this // TODO: double check that any node can serve this
// TODO: CacheSuccessForever if the block is old enough // TODO: CacheSuccessForever if the block is old enough
// TODO: make sure re-orgs work properly! // TODO: make sure re-orgs work properly!
Ok(CacheMode::Cache { Ok(CacheMode::Standard {
block: head_block.into(), block: head_block.into(),
cache_errors: true, cache_errors: true,
}) })
} }
"eth_getBlockTransactionCountByHash" => { "eth_getBlockTransactionCountByHash" => {
// TODO: double check that any node can serve this // TODO: double check that any node can serve this
Ok(CacheMode::CacheSuccessForever) Ok(CacheMode::SuccessForever)
} }
"eth_getLogs" => { "eth_getLogs" => {
/* /*
// TODO: think about this more. this seems like it partly belongs in clean_block_number // TODO: think about this more
// TODO: jsonrpc has a specific code for this // TODO: jsonrpc has a specific code for this
let obj = params let obj = params
.get_mut(0) .get_mut(0)
@ -367,7 +425,7 @@ impl CacheMode {
}) })
} }
*/ */
Ok(CacheMode::Cache { Ok(CacheMode::Standard {
block: head_block.into(), block: head_block.into(),
cache_errors: true, cache_errors: true,
}) })
@ -375,7 +433,7 @@ impl CacheMode {
"eth_getTransactionByHash" => { "eth_getTransactionByHash" => {
// TODO: not sure how best to look these up // TODO: not sure how best to look these up
// try full nodes first. retry will use archive // try full nodes first. retry will use archive
Ok(CacheMode::Cache { Ok(CacheMode::Standard {
block: head_block.into(), block: head_block.into(),
cache_errors: true, cache_errors: true,
}) })
@ -383,12 +441,12 @@ impl CacheMode {
"eth_getTransactionByBlockHashAndIndex" => { "eth_getTransactionByBlockHashAndIndex" => {
// TODO: check a Cache of recent hashes // TODO: check a Cache of recent hashes
// try full nodes first. retry will use archive // try full nodes first. retry will use archive
Ok(CacheMode::CacheSuccessForever) Ok(CacheMode::SuccessForever)
} }
"eth_getTransactionReceipt" => { "eth_getTransactionReceipt" => {
// TODO: not sure how best to look these up // TODO: not sure how best to look these up
// try full nodes first. retry will use archive // try full nodes first. retry will use archive
Ok(CacheMode::Cache { Ok(CacheMode::Standard {
block: head_block.into(), block: head_block.into(),
cache_errors: true, cache_errors: true,
}) })
@ -397,29 +455,28 @@ impl CacheMode {
// TODO: check a Cache of recent hashes // TODO: check a Cache of recent hashes
// try full nodes first. retry will use archive // try full nodes first. retry will use archive
// TODO: what happens if this block is uncled later? // TODO: what happens if this block is uncled later?
Ok(CacheMode::CacheSuccessForever) Ok(CacheMode::SuccessForever)
} }
"eth_getUncleCountByBlockHash" => { "eth_getUncleCountByBlockHash" => {
// TODO: check a Cache of recent hashes // TODO: check a Cache of recent hashes
// try full nodes first. retry will use archive // try full nodes first. retry will use archive
// TODO: what happens if this block is uncled later? // TODO: what happens if this block is uncled later?
Ok(CacheMode::CacheSuccessForever) Ok(CacheMode::SuccessForever)
} }
"eth_maxPriorityFeePerGas" => { "eth_maxPriorityFeePerGas" => {
// TODO: this might be too aggressive. i think it can change before a block is mined // TODO: this might be too aggressive. i think it can change before a block is mined
Ok(CacheMode::Cache { Ok(CacheMode::Standard {
block: head_block.into(), block: head_block.into(),
cache_errors: false, cache_errors: false,
}) })
} }
"net_listening" => Ok(CacheMode::CacheSuccessForever), "net_listening" => Ok(CacheMode::SuccessForever),
"net_version" => Ok(CacheMode::CacheSuccessForever), "net_version" => Ok(CacheMode::SuccessForever),
method => match get_block_param_id(method) { method => match get_block_param_id(method) {
Some(block_param_id) => { Some(block_param_id) => {
let block = let block = clean_block_number(params, block_param_id, head_block, app).await?;
clean_block_number(params, block_param_id, head_block, rpcs).await?;
Ok(CacheMode::Cache { Ok(CacheMode::Standard {
block, block,
cache_errors: true, cache_errors: true,
}) })
@ -428,12 +485,48 @@ impl CacheMode {
}, },
} }
} }
pub fn cache_jsonrpc_errors(&self) -> bool {
match self {
Self::Never => false,
Self::SuccessForever => true,
Self::Standard { cache_errors, .. } => *cache_errors,
Self::Range { cache_errors, .. } => *cache_errors,
}
}
pub fn from_block(&self) -> Option<&BlockNumAndHash> {
match self {
Self::SuccessForever => None,
Self::Never => None,
Self::Standard { block, .. } => Some(block),
Self::Range { from_block, .. } => Some(from_block),
}
}
#[inline]
pub fn is_some(&self) -> bool {
!matches!(self, Self::Never)
}
pub fn to_block(&self) -> Option<&BlockNumAndHash> {
match self {
Self::SuccessForever => None,
Self::Never => None,
Self::Standard { block, .. } => Some(block),
Self::Range { to_block, .. } => Some(to_block),
}
}
} }
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use super::CacheMode; use super::CacheMode;
use crate::rpcs::{blockchain::Web3ProxyBlock, many::Web3Rpcs}; use crate::{
errors::Web3ProxyError,
jsonrpc::{JsonRpcId, JsonRpcRequest},
rpcs::blockchain::Web3ProxyBlock,
};
use ethers::types::{Block, H256}; use ethers::types::{Block, H256};
use serde_json::json; use serde_json::json;
use std::sync::Arc; use std::sync::Arc;
@ -441,7 +534,7 @@ mod test {
#[test_log::test(tokio::test)] #[test_log::test(tokio::test)]
async fn test_fee_history() { async fn test_fee_history() {
let method = "eth_feeHistory"; let method = "eth_feeHistory";
let mut params = json!([4, "latest", [25, 75]]); let params = json!([4, "latest", [25, 75]]);
let head_block = Block { let head_block = Block {
number: Some(1.into()), number: Some(1.into()),
@ -451,32 +544,32 @@ mod test {
let head_block = Web3ProxyBlock::try_new(Arc::new(head_block)).unwrap(); let head_block = Web3ProxyBlock::try_new(Arc::new(head_block)).unwrap();
let (empty, _handle, _ranked_rpc_reciver) = let id = JsonRpcId::Number(9);
Web3Rpcs::spawn(1, None, 1, 1, "test".into(), None, None)
.await
.unwrap();
let x = CacheMode::try_new(method, &mut params, &head_block, &empty) let mut request = JsonRpcRequest::new(id, method.to_string(), params).unwrap();
// TODO: instead of empty, check None?
let x = CacheMode::try_new(&mut request, Some(&head_block), None)
.await .await
.unwrap(); .unwrap();
assert_eq!( assert_eq!(
x, x,
CacheMode::Cache { CacheMode::Standard {
block: (&head_block).into(), block: (&head_block).into(),
cache_errors: true cache_errors: true
} }
); );
// "latest" should have been changed to the block number // "latest" should have been changed to the block number
assert_eq!(params.get(1), Some(&json!(head_block.number()))); assert_eq!(request.params.get(1), Some(&json!(head_block.number())));
} }
#[test_log::test(tokio::test)] #[test_log::test(tokio::test)]
async fn test_eth_call_latest() { async fn test_eth_call_latest() {
let method = "eth_call"; let method = "eth_call";
let mut params = json!([{"data": "0xdeadbeef", "to": "0x0000000000000000000000000000000000000000"}, "latest"]); let params = json!([{"data": "0xdeadbeef", "to": "0x0000000000000000000000000000000000000000"}, "latest"]);
let head_block = Block { let head_block = Block {
number: Some(18173997.into()), number: Some(18173997.into()),
@ -486,24 +579,61 @@ mod test {
let head_block = Web3ProxyBlock::try_new(Arc::new(head_block)).unwrap(); let head_block = Web3ProxyBlock::try_new(Arc::new(head_block)).unwrap();
let (empty, _handle, _ranked_rpc_reciver) = let id = JsonRpcId::Number(99);
Web3Rpcs::spawn(1, None, 1, 1, "test".into(), None, None)
.await
.unwrap();
let x = CacheMode::try_new(method, &mut params, &head_block, &empty) let mut request = JsonRpcRequest::new(id, method.to_string(), params).unwrap();
let x = CacheMode::try_new(&mut request, Some(&head_block), None)
.await .await
.unwrap(); .unwrap();
// "latest" should have been changed to the block number // "latest" should have been changed to the block number
assert_eq!(params.get(1), Some(&json!(head_block.number()))); assert_eq!(request.params.get(1), Some(&json!(head_block.number())));
assert_eq!( assert_eq!(
x, x,
CacheMode::Cache { CacheMode::Standard {
block: (&head_block).into(), block: (&head_block).into(),
cache_errors: true cache_errors: true
} }
); );
} }
#[test_log::test(tokio::test)]
async fn test_eth_call_future() {
let method = "eth_call";
let head_block_num = 18173997u64;
let future_block_num = head_block_num + 1;
let params = json!([{"data": "0xdeadbeef", "to": "0x0000000000000000000000000000000000000000"}, future_block_num]);
let head_block: Block<H256> = Block {
number: Some(head_block_num.into()),
hash: Some(H256::random()),
..Default::default()
};
let head_block = Web3ProxyBlock::try_new(Arc::new(head_block)).unwrap();
let mut request = JsonRpcRequest::new(99.into(), method.to_string(), params).unwrap();
let x = CacheMode::try_new(&mut request, Some(&head_block), None)
.await
.unwrap_err();
// future blocks should get an error
match x {
Web3ProxyError::UnknownBlockNumber { known, unknown } => {
assert_eq!(known.as_u64(), head_block_num);
assert_eq!(unknown.as_u64(), future_block_num);
}
x => panic!("{:?}", x),
}
let x = CacheMode::new(&mut request, Some(&head_block), None).await;
// TODO: cache with the head block instead?
matches!(x, CacheMode::Never);
}
} }

View File

@ -1,6 +1,7 @@
use crate::balance::Balance; use crate::balance::Balance;
use crate::errors::{Web3ProxyError, Web3ProxyResult}; use crate::errors::{Web3ProxyError, Web3ProxyResult};
use crate::frontend::authorization::{AuthorizationChecks, RpcSecretKey}; use crate::frontend::authorization::AuthorizationChecks;
use crate::secrets::RpcSecretKey;
use derive_more::From; use derive_more::From;
use entities::rpc_key; use entities::rpc_key;
use migration::sea_orm::{ColumnTrait, DatabaseConnection, EntityTrait, QueryFilter}; use migration::sea_orm::{ColumnTrait, DatabaseConnection, EntityTrait, QueryFilter};

View File

@ -48,8 +48,10 @@ pub struct CliConfig {
pub struct TopConfig { pub struct TopConfig {
pub app: AppConfig, pub app: AppConfig,
pub balanced_rpcs: HashMap<String, Web3RpcConfig>, pub balanced_rpcs: HashMap<String, Web3RpcConfig>,
pub private_rpcs: Option<HashMap<String, Web3RpcConfig>>, #[serde(default = "Default::default")]
pub bundler_4337_rpcs: Option<HashMap<String, Web3RpcConfig>>, pub private_rpcs: HashMap<String, Web3RpcConfig>,
#[serde(default = "Default::default")]
pub bundler_4337_rpcs: HashMap<String, Web3RpcConfig>,
/// unknown config options get put here /// unknown config options get put here
#[serde(flatten, default = "HashMap::default")] #[serde(flatten, default = "HashMap::default")]
pub extra: HashMap<String, serde_json::Value>, pub extra: HashMap<String, serde_json::Value>,
@ -292,6 +294,8 @@ pub fn average_block_interval(chain_id: u64) -> Duration {
8453 => Duration::from_secs(2), 8453 => Duration::from_secs(2),
// arbitrum // arbitrum
42161 => Duration::from_millis(500), 42161 => Duration::from_millis(500),
// web3-proxy tests
999_001_999 => Duration::from_secs(10),
// anything else // anything else
_ => { _ => {
let default = 10; let default = 10;

View File

@ -59,7 +59,7 @@ pub async fn admin_increase_balance(
let caller = app.bearer_is_authorized(bearer).await?; let caller = app.bearer_is_authorized(bearer).await?;
// Establish connections // Establish connections
let db_conn = global_db_conn().await?; let db_conn = global_db_conn()?;
let txn = db_conn.begin().await?; let txn = db_conn.begin().await?;
// Check if the caller is an admin (if not, return early) // Check if the caller is an admin (if not, return early)
@ -197,8 +197,8 @@ pub async fn admin_imitate_login_get(
resources: vec![], resources: vec![],
}; };
let db_conn = global_db_conn().await?; let db_conn = global_db_conn()?;
let db_replica = global_db_replica_conn().await?; let db_replica = global_db_replica_conn()?;
let admin = user::Entity::find() let admin = user::Entity::find()
.filter(user::Column::Address.eq(admin_address.as_bytes())) .filter(user::Column::Address.eq(admin_address.as_bytes()))
@ -336,7 +336,7 @@ pub async fn admin_imitate_login_post(
})?; })?;
// fetch the message we gave them from our database // fetch the message we gave them from our database
let db_replica = global_db_replica_conn().await?; let db_replica = global_db_replica_conn()?;
let user_pending_login = pending_login::Entity::find() let user_pending_login = pending_login::Entity::find()
.filter(pending_login::Column::Nonce.eq(Uuid::from(login_nonce))) .filter(pending_login::Column::Nonce.eq(Uuid::from(login_nonce)))
@ -379,7 +379,7 @@ pub async fn admin_imitate_login_post(
.await? .await?
.web3_context("admin address was not found!")?; .web3_context("admin address was not found!")?;
let db_conn = global_db_conn().await?; let db_conn = global_db_conn()?;
// Add a message that the admin has logged in // Add a message that the admin has logged in
// Note that the admin is trying to log in as this user // Note that the admin is trying to log in as this user

View File

@ -3,20 +3,22 @@
use super::rpc_proxy_ws::ProxyMode; use super::rpc_proxy_ws::ProxyMode;
use crate::app::{Web3ProxyApp, APP_USER_AGENT}; use crate::app::{Web3ProxyApp, APP_USER_AGENT};
use crate::balance::Balance; use crate::balance::Balance;
use crate::block_number::CacheMode;
use crate::caches::RegisteredUserRateLimitKey; use crate::caches::RegisteredUserRateLimitKey;
use crate::compute_units::default_usd_per_cu;
use crate::errors::{Web3ProxyError, Web3ProxyErrorContext, Web3ProxyResult}; use crate::errors::{Web3ProxyError, Web3ProxyErrorContext, Web3ProxyResult};
use crate::globals::global_db_replica_conn; use crate::globals::{global_db_replica_conn, APP};
use crate::jsonrpc::{self, JsonRpcParams, JsonRpcRequest}; use crate::jsonrpc::{self, JsonRpcId, JsonRpcParams, JsonRpcRequest};
use crate::kafka::KafkaDebugLogger;
use crate::response_cache::JsonRpcQueryCacheKey;
use crate::rpcs::blockchain::Web3ProxyBlock; use crate::rpcs::blockchain::Web3ProxyBlock;
use crate::rpcs::one::Web3Rpc; use crate::rpcs::one::Web3Rpc;
use crate::secrets::RpcSecretKey;
use crate::stats::{AppStat, BackendRequests}; use crate::stats::{AppStat, BackendRequests};
use crate::user_token::UserBearerToken; use crate::user_token::UserBearerToken;
use anyhow::Context; use anyhow::Context;
use axum::headers::authorization::Bearer; use axum::headers::authorization::Bearer;
use axum::headers::{Header, Origin, Referer, UserAgent}; use axum::headers::{Header, Origin, Referer, UserAgent};
use chrono::Utc; use chrono::Utc;
use core::fmt;
use deferred_rate_limiter::{DeferredRateLimitResult, DeferredRateLimiter}; use deferred_rate_limiter::{DeferredRateLimitResult, DeferredRateLimiter};
use derivative::Derivative; use derivative::Derivative;
use derive_more::From; use derive_more::From;
@ -29,89 +31,27 @@ use http::HeaderValue;
use ipnet::IpNet; use ipnet::IpNet;
use migration::sea_orm::prelude::Decimal; use migration::sea_orm::prelude::Decimal;
use migration::sea_orm::{ColumnTrait, EntityTrait, QueryFilter}; use migration::sea_orm::{ColumnTrait, EntityTrait, QueryFilter};
use rdkafka::message::{Header as KafkaHeader, OwnedHeaders as KafkaOwnedHeaders, OwnedMessage};
use rdkafka::producer::{FutureProducer, FutureRecord};
use rdkafka::util::Timeout as KafkaTimeout;
use redis_rate_limiter::redis::AsyncCommands; use redis_rate_limiter::redis::AsyncCommands;
use redis_rate_limiter::{RedisRateLimitResult, RedisRateLimiter}; use redis_rate_limiter::{RedisRateLimitResult, RedisRateLimiter};
use serde::{Deserialize, Serialize}; use serde::Serialize;
use serde_json::json; use serde_json::json;
use serde_json::value::RawValue;
use std::borrow::Cow; use std::borrow::Cow;
use std::fmt::Debug; use std::fmt::Debug;
use std::fmt::Display; use std::fmt::Display;
use std::hash::{Hash, Hasher}; use std::hash::{Hash, Hasher};
use std::mem; use std::mem;
use std::num::NonZeroU64; use std::num::NonZeroU64;
use std::sync::atomic::{self, AtomicBool, AtomicI64, AtomicU64, AtomicUsize}; use std::sync::atomic::{self, AtomicBool, AtomicI64, AtomicU64};
use std::time::Duration; use std::time::Duration;
use std::{net::IpAddr, str::FromStr, sync::Arc}; use std::{net::IpAddr, str::FromStr, sync::Arc};
use tokio::sync::RwLock as AsyncRwLock; use tokio::sync::RwLock as AsyncRwLock;
use tokio::sync::{mpsc, OwnedSemaphorePermit, Semaphore}; use tokio::sync::{mpsc, OwnedSemaphorePermit, Semaphore};
use tokio::task::JoinHandle;
use tokio::time::Instant; use tokio::time::Instant;
use tracing::{error, trace, warn}; use tracing::{error, trace, warn};
use ulid::Ulid; use ulid::Ulid;
use uuid::Uuid; use uuid::Uuid;
/// This lets us use UUID and ULID while we transition to only ULIDs
/// TODO: custom deserialize that can also go from String to Ulid
#[derive(Copy, Clone, Deserialize)]
pub enum RpcSecretKey {
Ulid(Ulid),
Uuid(Uuid),
}
impl RpcSecretKey {
pub fn new() -> Self {
Ulid::new().into()
}
fn as_128(&self) -> u128 {
match self {
Self::Ulid(x) => x.0,
Self::Uuid(x) => x.as_u128(),
}
}
}
impl PartialEq for RpcSecretKey {
fn eq(&self, other: &Self) -> bool {
self.as_128() == other.as_128()
}
}
impl Eq for RpcSecretKey {}
impl Debug for RpcSecretKey {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Ulid(x) => Debug::fmt(x, f),
Self::Uuid(x) => {
let x = Ulid::from(x.as_u128());
Debug::fmt(&x, f)
}
}
}
}
/// always serialize as a ULID.
impl Serialize for RpcSecretKey {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
match self {
Self::Ulid(x) => x.serialize(serializer),
Self::Uuid(x) => {
let x: Ulid = x.to_owned().into();
x.serialize(serializer)
}
}
}
}
/// TODO: should this have IpAddr and Origin or AuthorizationChecks? /// TODO: should this have IpAddr and Origin or AuthorizationChecks?
#[derive(Debug)] #[derive(Debug)]
pub enum RateLimitResult { pub enum RateLimitResult {
@ -125,7 +65,7 @@ pub enum RateLimitResult {
UnknownKey, UnknownKey,
} }
#[derive(Clone, Debug)] #[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
pub enum AuthorizationType { pub enum AuthorizationType {
Internal, Internal,
Frontend, Frontend,
@ -180,15 +120,6 @@ pub struct Authorization {
pub authorization_type: AuthorizationType, pub authorization_type: AuthorizationType,
} }
pub struct KafkaDebugLogger {
topic: String,
key: Vec<u8>,
headers: KafkaOwnedHeaders,
producer: FutureProducer,
num_requests: AtomicUsize,
num_responses: AtomicUsize,
}
/// Ulids and Uuids matching the same bits hash the same /// Ulids and Uuids matching the same bits hash the same
impl Hash for RpcSecretKey { impl Hash for RpcSecretKey {
fn hash<H: Hasher>(&self, state: &mut H) { fn hash<H: Hasher>(&self, state: &mut H) {
@ -198,167 +129,43 @@ impl Hash for RpcSecretKey {
} }
} }
impl fmt::Debug for KafkaDebugLogger { #[derive(Debug, Default, From, Serialize)]
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { pub enum RequestOrMethod {
f.debug_struct("KafkaDebugLogger") Request(JsonRpcRequest),
.field("topic", &self.topic) /// sometimes we don't have a full request. for example, when we are logging a websocket subscription
.finish_non_exhaustive() Method(Cow<'static, str>, usize),
} #[default]
} None,
type KafkaLogResult = Result<(i32, i64), (rdkafka::error::KafkaError, OwnedMessage)>;
impl KafkaDebugLogger {
fn try_new(
app: &Web3ProxyApp,
authorization: Arc<Authorization>,
head_block_num: Option<&U64>,
kafka_topic: &str,
request_ulid: Ulid,
) -> Option<Arc<Self>> {
let kafka_producer = app.kafka_producer.clone()?;
let kafka_topic = kafka_topic.to_string();
let rpc_secret_key_id = authorization
.checks
.rpc_secret_key_id
.map(|x| x.get())
.unwrap_or_default();
let kafka_key =
rmp_serde::to_vec(&rpc_secret_key_id).expect("ids should always serialize with rmp");
let chain_id = app.config.chain_id;
let head_block_num = head_block_num
.copied()
.or_else(|| app.balanced_rpcs.head_block_num());
// TODO: would be nice to have the block hash too
// another item is added with the response, so initial_capacity is +1 what is needed here
let kafka_headers = KafkaOwnedHeaders::new_with_capacity(6)
.insert(KafkaHeader {
key: "rpc_secret_key_id",
value: authorization
.checks
.rpc_secret_key_id
.map(|x| x.to_string())
.as_ref(),
})
.insert(KafkaHeader {
key: "ip",
value: Some(&authorization.ip.to_string()),
})
.insert(KafkaHeader {
key: "request_ulid",
value: Some(&request_ulid.to_string()),
})
.insert(KafkaHeader {
key: "head_block_num",
value: head_block_num.map(|x| x.to_string()).as_ref(),
})
.insert(KafkaHeader {
key: "chain_id",
value: Some(&chain_id.to_le_bytes()),
});
// save the key and headers for when we log the response
let x = Self {
topic: kafka_topic,
key: kafka_key,
headers: kafka_headers,
producer: kafka_producer,
num_requests: 0.into(),
num_responses: 0.into(),
};
let x = Arc::new(x);
Some(x)
}
fn background_log(&self, payload: Vec<u8>) -> JoinHandle<KafkaLogResult> {
let topic = self.topic.clone();
let key = self.key.clone();
let producer = self.producer.clone();
let headers = self.headers.clone();
let f = async move {
let record = FutureRecord::to(&topic)
.key(&key)
.payload(&payload)
.headers(headers);
let produce_future =
producer.send(record, KafkaTimeout::After(Duration::from_secs(5 * 60)));
let kafka_response = produce_future.await;
if let Err((err, msg)) = kafka_response.as_ref() {
error!("produce kafka request: {} - {:?}", err, msg);
// TODO: re-queue the msg? log somewhere else like a file on disk?
// TODO: this is bad and should probably trigger an alarm
};
kafka_response
};
tokio::spawn(f)
}
/// for opt-in debug usage, log the request to kafka
/// TODO: generic type for request
pub fn log_debug_request(&self, request: &JsonRpcRequest) -> JoinHandle<KafkaLogResult> {
// TODO: is rust message pack a good choice? try rkyv instead
let payload =
rmp_serde::to_vec(&request).expect("requests should always serialize with rmp");
self.num_requests.fetch_add(1, atomic::Ordering::Relaxed);
self.background_log(payload)
}
pub fn log_debug_response<R>(&self, response: &R) -> JoinHandle<KafkaLogResult>
where
R: serde::Serialize,
{
let payload =
rmp_serde::to_vec(&response).expect("requests should always serialize with rmp");
self.num_responses.fetch_add(1, atomic::Ordering::Relaxed);
self.background_log(payload)
}
} }
/// TODO: instead of a bunch of atomics, this should probably use a RwLock /// TODO: instead of a bunch of atomics, this should probably use a RwLock
#[derive(Debug, Derivative)] #[derive(Debug, Derivative)]
#[derivative(Default)] #[derivative(Default)]
pub struct RequestMetadata { pub struct Web3Request {
/// TODO: set archive_request during the new instead of after /// TODO: set archive_request during the new instead of after
/// TODO: this is more complex than "requires a block older than X height". different types of data can be pruned differently /// TODO: this is more complex than "requires a block older than X height". different types of data can be pruned differently
pub archive_request: AtomicBool, pub archive_request: AtomicBool,
pub authorization: Arc<Authorization>, pub authorization: Arc<Authorization>,
pub cache_mode: CacheMode,
/// TODO: this should probably be in a global config. although maybe if we run multiple chains in one process this will be useful
pub chain_id: u64, pub chain_id: u64,
pub head_block: Option<Web3ProxyBlock>,
/// TODO: this should be in a global config. not copied to every single request
pub usd_per_cu: Decimal, pub usd_per_cu: Decimal,
pub request_ulid: Ulid, pub request: RequestOrMethod,
/// Size of the JSON request. Does not include headers or things like that.
pub request_bytes: usize,
/// The JSON-RPC request method.
pub method: Cow<'static, str>,
/// Instant that the request was received (or at least close to it) /// Instant that the request was received (or at least close to it)
/// We use Instant and not timestamps to avoid problems with leap seconds and similar issues /// We use Instant and not timestamps to avoid problems with leap seconds and similar issues
#[derivative(Default(value = "Instant::now()"))] #[derivative(Default(value = "Instant::now()"))]
pub start_instant: Instant, pub start_instant: Instant,
#[derivative(Default(value = "Instant::now() + Duration::from_secs(295)"))]
pub expire_instant: Instant,
/// if this is empty, there was a cache_hit /// if this is empty, there was a cache_hit
/// otherwise, it is populated with any rpc servers that were used by this request /// otherwise, it is populated with any rpc servers that were used by this request
pub backend_requests: BackendRequests, pub backend_requests: BackendRequests,
@ -394,50 +201,48 @@ impl Default for Authorization {
} }
} }
impl RequestMetadata { impl RequestOrMethod {
pub fn proxy_mode(&self) -> ProxyMode { pub fn id(&self) -> Box<RawValue> {
self.authorization.checks.proxy_mode match self {
} Self::Request(x) => x.id.clone(),
} Self::Method(_, _) => Default::default(),
Self::None => Default::default(),
#[derive(From)] }
pub enum RequestOrMethod<'a> {
/// jsonrpc method (or similar label) and the size that the request should count as (sometimes 0)
Method(&'a str, usize),
Request(&'a JsonRpcRequest),
}
impl<'a> RequestOrMethod<'a> {
fn method(&self) -> Cow<'static, str> {
let x = match self {
Self::Request(x) => x.method.to_string(),
Self::Method(x, _) => x.to_string(),
};
x.into()
} }
fn jsonrpc_request(&self) -> Option<&JsonRpcRequest> { pub fn method(&self) -> &str {
match self {
Self::Request(x) => x.method.as_str(),
Self::Method(x, _) => x,
Self::None => "unknown",
}
}
/// TODO: should this panic on Self::None|Self::Method?
pub fn params(&self) -> &serde_json::Value {
match self {
Self::Request(x) => &x.params,
Self::Method(..) => &serde_json::Value::Null,
Self::None => &serde_json::Value::Null,
}
}
pub fn jsonrpc_request(&self) -> Option<&JsonRpcRequest> {
match self { match self {
Self::Request(x) => Some(x), Self::Request(x) => Some(x),
_ => None, _ => None,
} }
} }
fn num_bytes(&self) -> usize { pub fn num_bytes(&self) -> usize {
match self { match self {
RequestOrMethod::Method(_, num_bytes) => *num_bytes, Self::Method(_, num_bytes) => *num_bytes,
RequestOrMethod::Request(x) => x.num_bytes(), Self::Request(x) => x.num_bytes(),
Self::None => 0,
} }
} }
} }
impl<'a> From<&'a str> for RequestOrMethod<'a> {
fn from(value: &'a str) -> Self {
Self::Method(value, 0)
}
}
// TODO: i think a trait is actually the right thing to use here // TODO: i think a trait is actually the right thing to use here
#[derive(From)] #[derive(From)]
pub enum ResponseOrBytes<'a> { pub enum ResponseOrBytes<'a> {
@ -470,110 +275,59 @@ impl ResponseOrBytes<'_> {
} }
} }
impl RequestMetadata { impl Web3Request {
pub async fn new<'a, R: Into<RequestOrMethod<'a>>>( #[allow(clippy::too_many_arguments)]
app: &Web3ProxyApp, async fn new_with_options(
authorization: Arc<Authorization>, authorization: Arc<Authorization>,
request: R, chain_id: u64,
head_block: Option<&Web3ProxyBlock>, head_block: Option<Web3ProxyBlock>,
kafka_debug_logger: Option<Arc<KafkaDebugLogger>>,
max_wait: Option<Duration>,
mut request: RequestOrMethod,
stat_sender: Option<mpsc::UnboundedSender<AppStat>>,
usd_per_cu: Decimal,
app: Option<&Web3ProxyApp>,
) -> Arc<Self> { ) -> Arc<Self> {
let request = request.into(); let start_instant = Instant::now();
let method = request.method(); // TODO: get this default from config, or from user settings
// 5 minutes with a buffer for other things being slow
let expire_instant = start_instant + max_wait.unwrap_or_else(|| Duration::from_secs(295));
let request_bytes = request.num_bytes(); // let request: RequestOrMethod = request.into();
// TODO: modify the request here? I don't really like that very much. but its a sure way to get archive_request set correctly
// TODO: add the Ulid at the haproxy or amazon load balancer level? investigate OpenTelemetry
let request_ulid = Ulid::new();
let kafka_debug_logger = if matches!(authorization.checks.proxy_mode, ProxyMode::Debug) {
KafkaDebugLogger::try_new(
app,
authorization.clone(),
head_block.map(|x| x.number()),
"web3_proxy:rpc",
request_ulid,
)
} else {
None
};
// we VERY INTENTIONALLY log to kafka BEFORE calculating the cache key
// this is because calculating the cache_key may modify the params!
// for example, if the request specifies "latest" as the block number, we replace it with the actual latest block number
if let Some(ref kafka_debug_logger) = kafka_debug_logger { if let Some(ref kafka_debug_logger) = kafka_debug_logger {
if let Some(request) = request.jsonrpc_request() { // TODO: channels might be more ergonomic than spawned futures
// TODO: channels might be more ergonomic than spawned futures // spawned things run in parallel easier but generally need more Arcs
// spawned things run in parallel easier but generally need more Arcs kafka_debug_logger.log_debug_request(&request);
kafka_debug_logger.log_debug_request(request);
} else {
// there probably isn't a new request attached to this metadata.
// this happens with websocket subscriptions
}
} }
let chain_id = app.config.chain_id; // now that kafka has logged the user's original params, we can calculate the cache key
let cache_mode = match &mut request {
let x = Self { RequestOrMethod::Request(x) => CacheMode::new(x, head_block.as_ref(), app).await,
archive_request: false.into(), _ => CacheMode::Never,
authorization,
backend_requests: Default::default(),
chain_id,
error_response: false.into(),
kafka_debug_logger,
method,
no_servers: 0.into(),
request_bytes,
request_ulid,
response_bytes: 0.into(),
response_from_backup_rpc: false.into(),
response_millis: 0.into(),
response_timestamp: 0.into(),
start_instant: Instant::now(),
stat_sender: app.stat_sender.clone(),
usd_per_cu: app.config.usd_per_cu.unwrap_or_default(),
user_error_response: false.into(),
}; };
Arc::new(x)
}
pub fn new_internal<P: JsonRpcParams>(chain_id: u64, method: &str, params: &P) -> Arc<Self> {
let authorization = Arc::new(Authorization::internal().unwrap());
let request_ulid = Ulid::new();
let method = method.to_string().into();
// TODO: how can we get this?
let stat_sender = None;
// TODO: how can we do this efficiently? having to serialize sucks
let request_bytes = json!({
"jsonrpc": "2.0",
"id": 1,
"method": method,
"params": params,
})
.to_string()
.len();
// TODO: we should be getting this from config instead!
let usd_per_cu = default_usd_per_cu(chain_id);
let x = Self { let x = Self {
archive_request: false.into(), archive_request: false.into(),
authorization, authorization,
backend_requests: Default::default(), backend_requests: Default::default(),
cache_mode,
chain_id, chain_id,
error_response: false.into(), error_response: false.into(),
kafka_debug_logger: None, expire_instant,
method, head_block: head_block.clone(),
kafka_debug_logger,
no_servers: 0.into(), no_servers: 0.into(),
request_bytes, request,
request_ulid,
response_bytes: 0.into(), response_bytes: 0.into(),
response_from_backup_rpc: false.into(), response_from_backup_rpc: false.into(),
response_millis: 0.into(), response_millis: 0.into(),
response_timestamp: 0.into(), response_timestamp: 0.into(),
start_instant: Instant::now(), start_instant,
stat_sender, stat_sender,
usd_per_cu, usd_per_cu,
user_error_response: false.into(), user_error_response: false.into(),
@ -582,10 +336,127 @@ impl RequestMetadata {
Arc::new(x) Arc::new(x)
} }
pub async fn new_with_app(
app: &Web3ProxyApp,
authorization: Arc<Authorization>,
max_wait: Option<Duration>,
request: RequestOrMethod,
head_block: Option<Web3ProxyBlock>,
) -> Arc<Self> {
// TODO: get this out of tracing instead (where we have a String from Amazon's LB)
let request_ulid = Ulid::new();
let kafka_debug_logger = if matches!(authorization.checks.proxy_mode, ProxyMode::Debug) {
KafkaDebugLogger::try_new(
app,
authorization.clone(),
head_block.as_ref().map(|x| x.number()),
"web3_proxy:rpc",
request_ulid,
)
} else {
None
};
let chain_id = app.config.chain_id;
let stat_sender = app.stat_sender.clone();
let usd_per_cu = app.config.usd_per_cu.unwrap_or_default();
Self::new_with_options(
authorization,
chain_id,
head_block,
kafka_debug_logger,
max_wait,
request,
stat_sender,
usd_per_cu,
Some(app),
)
.await
}
pub async fn new_internal<P: JsonRpcParams>(
method: String,
params: &P,
head_block: Option<Web3ProxyBlock>,
max_wait: Option<Duration>,
) -> Arc<Self> {
let authorization = Arc::new(Authorization::internal().unwrap());
// TODO: we need a real id! increment a counter on the app
let id = JsonRpcId::Number(1);
// TODO: this seems inefficient
let request = JsonRpcRequest::new(id, method, json!(params)).unwrap();
if let Some(app) = APP.get() {
Self::new_with_app(app, authorization, max_wait, request.into(), head_block).await
} else {
Self::new_with_options(
authorization,
0,
head_block,
None,
max_wait,
request.into(),
None,
Default::default(),
None,
)
.await
}
}
#[inline]
pub fn backend_rpcs_used(&self) -> Vec<Arc<Web3Rpc>> { pub fn backend_rpcs_used(&self) -> Vec<Arc<Web3Rpc>> {
self.backend_requests.lock().clone() self.backend_requests.lock().clone()
} }
pub fn cache_key(&self) -> Option<u64> {
match &self.cache_mode {
CacheMode::Never => None,
x => {
let x = JsonRpcQueryCacheKey::new(x, &self.request).hash();
Some(x)
}
}
}
#[inline]
pub fn cache_jsonrpc_errors(&self) -> bool {
self.cache_mode.cache_jsonrpc_errors()
}
#[inline]
pub fn id(&self) -> Box<RawValue> {
self.request.id()
}
pub fn max_block_needed(&self) -> Option<U64> {
self.cache_mode.to_block().map(|x| *x.num())
}
pub fn min_block_needed(&self) -> Option<U64> {
if self.archive_request.load(atomic::Ordering::Relaxed) {
Some(U64::zero())
} else {
self.cache_mode.from_block().map(|x| *x.num())
}
}
pub fn ttl(&self) -> Duration {
self.expire_instant
.saturating_duration_since(Instant::now())
}
pub fn ttl_expired(&self) -> bool {
self.expire_instant < Instant::now()
}
pub fn try_send_stat(mut self) -> Web3ProxyResult<()> { pub fn try_send_stat(mut self) -> Web3ProxyResult<()> {
if let Some(stat_sender) = self.stat_sender.take() { if let Some(stat_sender) = self.stat_sender.take() {
trace!(?self, "sending stat"); trace!(?self, "sending stat");
@ -648,11 +519,16 @@ impl RequestMetadata {
} }
} }
#[inline]
pub fn proxy_mode(&self) -> ProxyMode {
self.authorization.checks.proxy_mode
}
// TODO: helper function to duplicate? needs to clear request_bytes, and all the atomics tho... // TODO: helper function to duplicate? needs to clear request_bytes, and all the atomics tho...
} }
// TODO: is this where the panic comes from? // TODO: is this where the panic comes from?
impl Drop for RequestMetadata { impl Drop for Web3Request {
fn drop(&mut self) { fn drop(&mut self) {
if self.stat_sender.is_some() { if self.stat_sender.is_some() {
// turn `&mut self` into `self` // turn `&mut self` into `self`
@ -1076,7 +952,7 @@ impl Web3ProxyApp {
let user_bearer_token = UserBearerToken::try_from(bearer)?; let user_bearer_token = UserBearerToken::try_from(bearer)?;
// get the attached address from the database for the given auth_token. // get the attached address from the database for the given auth_token.
let db_replica = global_db_replica_conn().await?; let db_replica = global_db_replica_conn()?;
let user_bearer_uuid: Uuid = user_bearer_token.into(); let user_bearer_uuid: Uuid = user_bearer_token.into();
@ -1193,7 +1069,7 @@ impl Web3ProxyApp {
let x = self let x = self
.rpc_secret_key_cache .rpc_secret_key_cache
.try_get_with_by_ref(rpc_secret_key, async move { .try_get_with_by_ref(rpc_secret_key, async move {
let db_replica = global_db_replica_conn().await?; let db_replica = global_db_replica_conn()?;
// TODO: join the user table to this to return the User? we don't always need it // TODO: join the user table to this to return the User? we don't always need it
// TODO: join on secondary users // TODO: join on secondary users

View File

@ -27,7 +27,7 @@ use strum::{EnumCount, EnumIter};
use tokio::sync::broadcast; use tokio::sync::broadcast;
use tower_http::sensitive_headers::SetSensitiveRequestHeadersLayer; use tower_http::sensitive_headers::SetSensitiveRequestHeadersLayer;
use tower_http::{cors::CorsLayer, normalize_path::NormalizePathLayer, trace::TraceLayer}; use tower_http::{cors::CorsLayer, normalize_path::NormalizePathLayer, trace::TraceLayer};
use tracing::{error_span, info}; use tracing::{error_span, info, trace_span};
use ulid::Ulid; use ulid::Ulid;
/// simple keys for caching responses /// simple keys for caching responses
@ -278,13 +278,22 @@ pub async fn serve(
// And then we put it along with other information into the `request` span // And then we put it along with other information into the `request` span
// TODO: what other info should we attach? how can we attach an error and a tracing span here? // TODO: what other info should we attach? how can we attach an error and a tracing span here?
error_span!( // TODO: how can we do a tracing_span OR an error_span?
let s = trace_span!(
"request", "request",
id = %request_id, id = %request_id,
// method = %request.method(), method = %request.method(),
// // don't log the path. it often includes the RPC key! path = %request.uri().path(),
// path = %request.uri().path(), );
)
if s.is_disabled() {
error_span!(
"request",
id = %request_id,
)
} else {
s
}
}), // .on_failure(|| todo!("on failure that has the request and response body so we can debug more easily")), }), // .on_failure(|| todo!("on failure that has the request and response body so we can debug more easily")),
) )
// 404 for any unknown routes // 404 for any unknown routes

View File

@ -2,9 +2,9 @@
//! //!
//! WebSockets are the preferred method of receiving requests, but not all clients have good support. //! WebSockets are the preferred method of receiving requests, but not all clients have good support.
use super::authorization::{ip_is_authorized, key_is_authorized, Authorization, RequestMetadata}; use super::authorization::{ip_is_authorized, key_is_authorized, Authorization, Web3Request};
use crate::errors::{Web3ProxyError, Web3ProxyResponse}; use crate::errors::{Web3ProxyError, Web3ProxyResponse};
use crate::jsonrpc::{self, JsonRpcId}; use crate::jsonrpc;
use crate::{ use crate::{
app::Web3ProxyApp, app::Web3ProxyApp,
errors::Web3ProxyResult, errors::Web3ProxyResult,
@ -29,7 +29,6 @@ use handlebars::Handlebars;
use hashbrown::HashMap; use hashbrown::HashMap;
use http::{HeaderMap, StatusCode}; use http::{HeaderMap, StatusCode};
use serde_json::json; use serde_json::json;
use serde_json::value::RawValue;
use std::net::IpAddr; use std::net::IpAddr;
use std::str::from_utf8_mut; use std::str::from_utf8_mut;
use std::sync::atomic::AtomicU64; use std::sync::atomic::AtomicU64;
@ -317,26 +316,22 @@ async fn proxy_web3_socket(
} }
async fn websocket_proxy_web3_rpc( async fn websocket_proxy_web3_rpc(
app: Arc<Web3ProxyApp>, app: &Arc<Web3ProxyApp>,
authorization: Arc<Authorization>, authorization: Arc<Authorization>,
json_request: JsonRpcRequest, json_request: JsonRpcRequest,
response_sender: &mpsc::Sender<Message>, response_sender: &mpsc::Sender<Message>,
subscription_count: &AtomicU64, subscription_count: &AtomicU64,
subscriptions: &AsyncRwLock<HashMap<U64, AbortHandle>>, subscriptions: &AsyncRwLock<HashMap<U64, AbortHandle>>,
) -> (Box<RawValue>, Web3ProxyResult<jsonrpc::Response>) { ) -> Web3ProxyResult<jsonrpc::Response> {
let response_id = json_request.id.clone(); match &json_request.method[..] {
// TODO: move this to a seperate function so we can use the try operator
let response: Web3ProxyResult<jsonrpc::Response> = match &json_request.method[..] {
"eth_subscribe" => { "eth_subscribe" => {
let web3_request =
Web3Request::new_with_app(app, authorization, None, json_request.into(), None)
.await;
// TODO: how can we subscribe with proxy_mode? // TODO: how can we subscribe with proxy_mode?
match app match app
.eth_subscribe( .eth_subscribe(web3_request, subscription_count, response_sender.clone())
authorization,
json_request,
subscription_count,
response_sender.clone(),
)
.await .await
{ {
Ok((handle, response)) => { Ok((handle, response)) => {
@ -357,25 +352,25 @@ async fn websocket_proxy_web3_rpc(
} }
} }
"eth_unsubscribe" => { "eth_unsubscribe" => {
let request_metadata = let web3_request =
RequestMetadata::new(&app, authorization, &json_request, None).await; Web3Request::new_with_app(app, authorization, None, json_request.into(), None)
.await;
let maybe_id = json_request // sometimes we get a list, sometimes we get the id directly
.params // check for the list first, then just use the whole thing
let maybe_id = web3_request
.request
.params()
.get(0) .get(0)
.cloned() .unwrap_or_else(|| web3_request.request.params())
.unwrap_or(json_request.params); .clone();
let subscription_id: U64 = match serde_json::from_value::<U64>(maybe_id) { let subscription_id: U64 = match serde_json::from_value::<U64>(maybe_id) {
Ok(x) => x, Ok(x) => x,
Err(err) => { Err(err) => {
return ( return Err(Web3ProxyError::BadRequest(
response_id, format!("unexpected params given for eth_unsubscribe: {:?}", err).into(),
Err(Web3ProxyError::BadRequest( ));
format!("unexpected params given for eth_unsubscribe: {:?}", err)
.into(),
)),
)
} }
}; };
@ -392,11 +387,11 @@ async fn websocket_proxy_web3_rpc(
}; };
let response = let response =
jsonrpc::ParsedResponse::from_value(json!(partial_response), response_id.clone()); jsonrpc::ParsedResponse::from_value(json!(partial_response), web3_request.id());
// TODO: better way of passing in ParsedResponse // TODO: better way of passing in ParsedResponse
let response = jsonrpc::SingleResponse::Parsed(response); let response = jsonrpc::SingleResponse::Parsed(response);
request_metadata.add_response(&response); web3_request.add_response(&response);
let response = response.parsed().await.expect("Response already parsed"); let response = response.parsed().await.expect("Response already parsed");
Ok(response.into()) Ok(response.into())
@ -405,32 +400,27 @@ async fn websocket_proxy_web3_rpc(
.proxy_web3_rpc(authorization, json_request.into()) .proxy_web3_rpc(authorization, json_request.into())
.await .await
.map(|(_, response, _)| response), .map(|(_, response, _)| response),
}; }
(response_id, response)
} }
/// websockets support a few more methods than http clients /// websockets support a few more methods than http clients
async fn handle_socket_payload( async fn handle_socket_payload(
app: Arc<Web3ProxyApp>, app: &Arc<Web3ProxyApp>,
authorization: &Arc<Authorization>, authorization: &Arc<Authorization>,
payload: &str, payload: &str,
response_sender: &mpsc::Sender<Message>, response_sender: &mpsc::Sender<Message>,
subscription_count: &AtomicU64, subscription_count: &AtomicU64,
subscriptions: Arc<AsyncRwLock<HashMap<U64, AbortHandle>>>, subscriptions: Arc<AsyncRwLock<HashMap<U64, AbortHandle>>>,
) -> Web3ProxyResult<(Message, Option<OwnedSemaphorePermit>)> { ) -> Web3ProxyResult<(Message, Option<OwnedSemaphorePermit>)> {
let (authorization, semaphore) = authorization.check_again(&app).await?; let (authorization, semaphore) = authorization.check_again(app).await?;
// TODO: handle batched requests // TODO: handle batched requests
let (response_id, response) = match serde_json::from_str::<JsonRpcRequest>(payload) { let (response_id, response) = match serde_json::from_str::<JsonRpcRequest>(payload) {
Ok(json_request) => { Ok(json_request) => {
// // TODO: move tarpit code to an invidual request, or change this to handle enums let request_id = json_request.id.clone();
// json_request
// .tarpit_invalid(&app, &authorization, Duration::from_secs(2))
// .await?;
// TODO: move this to a seperate function so we can use the try operator // TODO: move this to a seperate function so we can use the try operator
websocket_proxy_web3_rpc( let x = websocket_proxy_web3_rpc(
app, app,
authorization.clone(), authorization.clone(),
json_request, json_request,
@ -438,12 +428,11 @@ async fn handle_socket_payload(
subscription_count, subscription_count,
&subscriptions, &subscriptions,
) )
.await .await;
}
Err(err) => { (request_id, x)
let id = JsonRpcId::None.to_raw_value();
(id, Err(err.into()))
} }
Err(err) => (Default::default(), Err(err.into())),
}; };
let response_str = match response { let response_str = match response {
@ -488,7 +477,7 @@ async fn read_web3_socket(
let (response_msg, _semaphore) = match msg { let (response_msg, _semaphore) = match msg {
Message::Text(payload) => { Message::Text(payload) => {
match handle_socket_payload( match handle_socket_payload(
app, &app,
&authorization, &authorization,
&payload, &payload,
&response_sender, &response_sender,
@ -522,7 +511,7 @@ async fn read_web3_socket(
let payload = from_utf8_mut(&mut payload).unwrap(); let payload = from_utf8_mut(&mut payload).unwrap();
let (m, s) = match handle_socket_payload( let (m, s) = match handle_socket_payload(
app, &app,
&authorization, &authorization,
payload, payload,
&response_sender, &response_sender,
@ -587,3 +576,17 @@ async fn write_web3_socket(
// TODO: decrement counter for open websockets // TODO: decrement counter for open websockets
} }
#[cfg(test)]
mod test {
#[test]
fn nulls_and_defaults() {
let x = serde_json::Value::Null;
let x = serde_json::to_string(&x).unwrap();
let y: Box<serde_json::value::RawValue> = Default::default();
let y = serde_json::to_string(&y).unwrap();
assert_eq!(x, y);
}
}

View File

@ -208,7 +208,7 @@ async fn _status(app: Arc<Web3ProxyApp>) -> (StatusCode, &'static str, Bytes) {
"hostname": app.hostname, "hostname": app.hostname,
"payment_factory_address": app.config.deposit_factory_contract, "payment_factory_address": app.config.deposit_factory_contract,
"pending_txid_firehose": app.pending_txid_firehose, "pending_txid_firehose": app.pending_txid_firehose,
"private_rpcs": app.private_rpcs, "private_rpcs": app.protected_rpcs,
"uptime": app.start.elapsed().as_secs(), "uptime": app.start.elapsed().as_secs(),
"version": APP_USER_AGENT, "version": APP_USER_AGENT,
}); });

View File

@ -8,7 +8,7 @@ use tokio::stream::Stream;
struct SizingBody<B> { struct SizingBody<B> {
inner: B, inner: B,
request_metadata: RequestMetadata, web3_request: RequestMetadata,
} }
impl<B> SizingBody<B> { impl<B> SizingBody<B> {

View File

@ -1,8 +1,9 @@
//! Handle registration, logins, and managing account data. //! Handle registration, logins, and managing account data.
use crate::app::Web3ProxyApp; use crate::app::Web3ProxyApp;
use crate::errors::{Web3ProxyError, Web3ProxyErrorContext, Web3ProxyResponse}; use crate::errors::{Web3ProxyError, Web3ProxyErrorContext, Web3ProxyResponse};
use crate::frontend::authorization::{login_is_authorized, RpcSecretKey}; use crate::frontend::authorization::login_is_authorized;
use crate::globals::{global_db_conn, global_db_replica_conn}; use crate::globals::{global_db_conn, global_db_replica_conn};
use crate::secrets::RpcSecretKey;
use crate::user_token::UserBearerToken; use crate::user_token::UserBearerToken;
use axum::{ use axum::{
extract::{Path, Query}, extract::{Path, Query},
@ -125,7 +126,7 @@ pub async fn user_login_get(
resources: vec![], resources: vec![],
}; };
let db_conn = global_db_conn().await?; let db_conn = global_db_conn()?;
// delete any expired logins // delete any expired logins
if let Err(err) = login::Entity::delete_many() if let Err(err) = login::Entity::delete_many()
@ -262,7 +263,7 @@ pub async fn user_login_post(
let login_nonce = UserBearerToken::from_str(&their_msg.nonce)?; let login_nonce = UserBearerToken::from_str(&their_msg.nonce)?;
// fetch the message we gave them from our database // fetch the message we gave them from our database
let db_replica = global_db_replica_conn().await?; let db_replica = global_db_replica_conn()?;
let user_pending_login = pending_login::Entity::find() let user_pending_login = pending_login::Entity::find()
.filter(pending_login::Column::Nonce.eq(Uuid::from(login_nonce))) .filter(pending_login::Column::Nonce.eq(Uuid::from(login_nonce)))
@ -294,7 +295,7 @@ pub async fn user_login_post(
.one(db_replica.as_ref()) .one(db_replica.as_ref())
.await?; .await?;
let db_conn = global_db_conn().await?; let db_conn = global_db_conn()?;
let (caller, user_rpc_keys, status_code) = match caller { let (caller, user_rpc_keys, status_code) = match caller {
None => { None => {
@ -447,7 +448,7 @@ pub async fn user_logout_post(
) -> Web3ProxyResponse { ) -> Web3ProxyResponse {
let user_bearer = UserBearerToken::try_from(bearer)?; let user_bearer = UserBearerToken::try_from(bearer)?;
let db_conn = global_db_conn().await?; let db_conn = global_db_conn()?;
if let Err(err) = login::Entity::delete_many() if let Err(err) = login::Entity::delete_many()
.filter(login::Column::BearerToken.eq(user_bearer.uuid())) .filter(login::Column::BearerToken.eq(user_bearer.uuid()))

View File

@ -46,7 +46,7 @@ pub async fn user_balance_get(
) -> Web3ProxyResponse { ) -> Web3ProxyResponse {
let user = app.bearer_is_authorized(bearer).await?; let user = app.bearer_is_authorized(bearer).await?;
let db_replica = global_db_replica_conn().await?; let db_replica = global_db_replica_conn()?;
let user_balance = match Balance::try_from_db(db_replica.as_ref(), user.id).await? { let user_balance = match Balance::try_from_db(db_replica.as_ref(), user.id).await? {
None => Balance::default(), None => Balance::default(),
@ -66,7 +66,7 @@ pub async fn user_chain_deposits_get(
) -> Web3ProxyResponse { ) -> Web3ProxyResponse {
let user = app.bearer_is_authorized(bearer).await?; let user = app.bearer_is_authorized(bearer).await?;
let db_replica = global_db_replica_conn().await?; let db_replica = global_db_replica_conn()?;
// Filter by user ... // Filter by user ...
let receipts = increase_on_chain_balance_receipt::Entity::find() let receipts = increase_on_chain_balance_receipt::Entity::find()
@ -105,7 +105,7 @@ pub async fn user_stripe_deposits_get(
) -> Web3ProxyResponse { ) -> Web3ProxyResponse {
let user = app.bearer_is_authorized(bearer).await?; let user = app.bearer_is_authorized(bearer).await?;
let db_replica = global_db_replica_conn().await?; let db_replica = global_db_replica_conn()?;
// Filter by user ... // Filter by user ...
let receipts = stripe_increase_balance_receipt::Entity::find() let receipts = stripe_increase_balance_receipt::Entity::find()
@ -148,7 +148,7 @@ pub async fn user_admin_deposits_get(
) -> Web3ProxyResponse { ) -> Web3ProxyResponse {
let user = app.bearer_is_authorized(bearer).await?; let user = app.bearer_is_authorized(bearer).await?;
let db_replica = global_db_replica_conn().await?; let db_replica = global_db_replica_conn()?;
// Filter by user ... // Filter by user ...
let receipts = admin_increase_balance_receipt::Entity::find() let receipts = admin_increase_balance_receipt::Entity::find()
@ -207,7 +207,7 @@ pub async fn user_balance_post(
Web3ProxyError::BadRequest(format!("unable to parse tx_hash: {}", err).into()) Web3ProxyError::BadRequest(format!("unable to parse tx_hash: {}", err).into())
})?; })?;
let db_conn = global_db_conn().await?; let db_conn = global_db_conn()?;
// get the transaction receipt // get the transaction receipt
let transaction_receipt = app let transaction_receipt = app
@ -496,7 +496,7 @@ pub async fn handle_uncle_block(
// user_id -> balance that we need to subtract // user_id -> balance that we need to subtract
let mut reversed_balances: HashMap<u64, Decimal> = HashMap::new(); let mut reversed_balances: HashMap<u64, Decimal> = HashMap::new();
let db_conn = global_db_conn().await?; let db_conn = global_db_conn()?;
// delete any deposit txids with uncle_hash // delete any deposit txids with uncle_hash
for reversed_deposit in increase_on_chain_balance_receipt::Entity::find() for reversed_deposit in increase_on_chain_balance_receipt::Entity::find()

View File

@ -69,9 +69,7 @@ pub async fn user_balance_stripe_post(
return Ok("Received Webhook".into_response()); return Ok("Received Webhook".into_response());
} }
let db_conn = global_db_conn() let db_conn = global_db_conn().web3_context("query_user_stats needs a db")?;
.await
.web3_context("query_user_stats needs a db")?;
if stripe_increase_balance_receipt::Entity::find() if stripe_increase_balance_receipt::Entity::find()
.filter( .filter(

View File

@ -36,7 +36,7 @@ pub async fn user_referral_link_get(
// First get the bearer token and check if the user is logged in // First get the bearer token and check if the user is logged in
let user = app.bearer_is_authorized(bearer).await?; let user = app.bearer_is_authorized(bearer).await?;
let db_replica = global_db_replica_conn().await?; let db_replica = global_db_replica_conn()?;
// Then get the referral token. If one doesn't exist, create one // Then get the referral token. If one doesn't exist, create one
let user_referrer = referrer::Entity::find() let user_referrer = referrer::Entity::find()
@ -48,7 +48,7 @@ pub async fn user_referral_link_get(
Some(x) => (x.referral_code, StatusCode::OK), Some(x) => (x.referral_code, StatusCode::OK),
None => { None => {
// Connect to the database for writes // Connect to the database for writes
let db_conn = global_db_conn().await?; let db_conn = global_db_conn()?;
let referral_code = ReferralCode::default().to_string(); let referral_code = ReferralCode::default().to_string();
@ -81,7 +81,7 @@ pub async fn user_used_referral_stats(
// First get the bearer token and check if the user is logged in // First get the bearer token and check if the user is logged in
let user = app.bearer_is_authorized(bearer).await?; let user = app.bearer_is_authorized(bearer).await?;
let db_replica = global_db_replica_conn().await?; let db_replica = global_db_replica_conn()?;
// Get all referral records associated with this user // Get all referral records associated with this user
let referrals = referee::Entity::find() let referrals = referee::Entity::find()
@ -139,7 +139,7 @@ pub async fn user_shared_referral_stats(
// First get the bearer token and check if the user is logged in // First get the bearer token and check if the user is logged in
let user = app.bearer_is_authorized(bearer).await?; let user = app.bearer_is_authorized(bearer).await?;
let db_replica = global_db_replica_conn().await?; let db_replica = global_db_replica_conn()?;
// Get all referral records associated with this user // Get all referral records associated with this user
let query_result = referrer::Entity::find() let query_result = referrer::Entity::find()

View File

@ -1,8 +1,8 @@
//! Handle registration, logins, and managing account data. //! Handle registration, logins, and managing account data.
use super::super::authorization::RpcSecretKey;
use crate::app::Web3ProxyApp; use crate::app::Web3ProxyApp;
use crate::errors::{Web3ProxyError, Web3ProxyErrorContext, Web3ProxyResponse}; use crate::errors::{Web3ProxyError, Web3ProxyErrorContext, Web3ProxyResponse};
use crate::globals::{global_db_conn, global_db_replica_conn}; use crate::globals::{global_db_conn, global_db_replica_conn};
use crate::secrets::RpcSecretKey;
use axum::headers::{Header, Origin, Referer, UserAgent}; use axum::headers::{Header, Origin, Referer, UserAgent};
use axum::{ use axum::{
headers::{authorization::Bearer, Authorization}, headers::{authorization::Bearer, Authorization},
@ -32,7 +32,7 @@ pub async fn rpc_keys_get(
) -> Web3ProxyResponse { ) -> Web3ProxyResponse {
let user = app.bearer_is_authorized(bearer).await?; let user = app.bearer_is_authorized(bearer).await?;
let db_replica = global_db_replica_conn().await?; let db_replica = global_db_replica_conn()?;
// This is basically completely copied from sea-orm. Not optimal, but it keeps the format identical to before (while adding the final key) // This is basically completely copied from sea-orm. Not optimal, but it keeps the format identical to before (while adding the final key)
// We could also pack the below stuff into it's subfield, but then we would destroy the format. Both options are fine for now though // We could also pack the below stuff into it's subfield, but then we would destroy the format. Both options are fine for now though
@ -161,7 +161,7 @@ pub async fn rpc_keys_management(
let user = app.bearer_is_authorized(bearer).await?; let user = app.bearer_is_authorized(bearer).await?;
let db_replica = global_db_replica_conn().await?; let db_replica = global_db_replica_conn()?;
let mut uk = match payload.key_id { let mut uk = match payload.key_id {
Some(existing_key_id) => { Some(existing_key_id) => {
@ -341,7 +341,7 @@ pub async fn rpc_keys_management(
} }
let uk = if uk.is_changed() { let uk = if uk.is_changed() {
let db_conn = global_db_conn().await?; let db_conn = global_db_conn()?;
uk.save(&db_conn) uk.save(&db_conn)
.await .await

View File

@ -48,7 +48,7 @@ pub async fn user_revert_logs_get(
response.insert("chain_id", json!(chain_id)); response.insert("chain_id", json!(chain_id));
response.insert("query_start", json!(query_start.timestamp() as u64)); response.insert("query_start", json!(query_start.timestamp() as u64));
let db_replica = global_db_replica_conn().await?; let db_replica = global_db_replica_conn()?;
let uks = rpc_key::Entity::find() let uks = rpc_key::Entity::find()
.filter(rpc_key::Column::UserId.eq(user.id)) .filter(rpc_key::Column::UserId.eq(user.id))
@ -141,7 +141,7 @@ pub async fn user_mysql_stats_get(
TypedHeader(Authorization(bearer)): TypedHeader<Authorization<Bearer>>, TypedHeader(Authorization(bearer)): TypedHeader<Authorization<Bearer>>,
) -> Web3ProxyResponse { ) -> Web3ProxyResponse {
let user = app.bearer_is_authorized(bearer).await?; let user = app.bearer_is_authorized(bearer).await?;
let db_replica = global_db_replica_conn().await?; let db_replica = global_db_replica_conn()?;
// Fetch everything from mysql, joined // Fetch everything from mysql, joined
let stats = rpc_key::Entity::find() let stats = rpc_key::Entity::find()

View File

@ -1,8 +1,8 @@
//! Handle subusers, viewing subusers, and viewing accessible rpc-keys //! Handle subusers, viewing subusers, and viewing accessible rpc-keys
use crate::app::Web3ProxyApp; use crate::app::Web3ProxyApp;
use crate::errors::{Web3ProxyError, Web3ProxyErrorContext, Web3ProxyResponse}; use crate::errors::{Web3ProxyError, Web3ProxyErrorContext, Web3ProxyResponse};
use crate::frontend::authorization::RpcSecretKey;
use crate::globals::{global_db_conn, global_db_replica_conn}; use crate::globals::{global_db_conn, global_db_replica_conn};
use crate::secrets::RpcSecretKey;
use anyhow::Context; use anyhow::Context;
use axum::{ use axum::{
extract::Query, extract::Query,
@ -36,7 +36,7 @@ pub async fn get_keys_as_subuser(
// First, authenticate // First, authenticate
let subuser = app.bearer_is_authorized(bearer).await?; let subuser = app.bearer_is_authorized(bearer).await?;
let db_replica = global_db_replica_conn().await?; let db_replica = global_db_replica_conn()?;
// TODO: JOIN over RPC_KEY, SUBUSER, PRIMARY_USER and return these items // TODO: JOIN over RPC_KEY, SUBUSER, PRIMARY_USER and return these items
@ -101,7 +101,7 @@ pub async fn get_subusers(
// First, authenticate // First, authenticate
let user = app.bearer_is_authorized(bearer).await?; let user = app.bearer_is_authorized(bearer).await?;
let db_replica = global_db_replica_conn().await?; let db_replica = global_db_replica_conn()?;
let rpc_key: u64 = params let rpc_key: u64 = params
.remove("key_id") .remove("key_id")
@ -173,7 +173,7 @@ pub async fn modify_subuser(
// First, authenticate // First, authenticate
let user = app.bearer_is_authorized(bearer).await?; let user = app.bearer_is_authorized(bearer).await?;
let db_replica = global_db_replica_conn().await?; let db_replica = global_db_replica_conn()?;
trace!("Parameters are: {:?}", params); trace!("Parameters are: {:?}", params);
@ -257,7 +257,7 @@ pub async fn modify_subuser(
} }
// TODO: There is a good chunk of duplicate logic as login-post. Consider refactoring ... // TODO: There is a good chunk of duplicate logic as login-post. Consider refactoring ...
let db_conn = global_db_conn().await?; let db_conn = global_db_conn()?;
let (subuser, _subuser_rpc_keys, _status_code) = match subuser { let (subuser, _subuser_rpc_keys, _status_code) = match subuser {
None => { None => {

View File

@ -1,17 +1,19 @@
use crate::{errors::Web3ProxyError, relational_db::DatabaseReplica}; use crate::{app::Web3ProxyApp, errors::Web3ProxyError, relational_db::DatabaseReplica};
use derivative::Derivative; use derivative::Derivative;
use migration::{ use migration::{
sea_orm::{DatabaseConnection, DatabaseTransaction, TransactionTrait}, sea_orm::{DatabaseConnection, DatabaseTransaction, TransactionTrait},
DbErr, DbErr,
}; };
use std::sync::{Arc, LazyLock}; use parking_lot::RwLock;
use tokio::sync::RwLock as AsyncRwLock; use std::sync::{Arc, LazyLock, OnceLock};
pub static DB_CONN: LazyLock<AsyncRwLock<Result<DatabaseConnection, DatabaseError>>> = pub static APP: OnceLock<Arc<Web3ProxyApp>> = OnceLock::new();
LazyLock::new(|| AsyncRwLock::new(Err(DatabaseError::NotConfigured)));
pub static DB_REPLICA: LazyLock<AsyncRwLock<Result<DatabaseReplica, DatabaseError>>> = pub static DB_CONN: LazyLock<RwLock<Result<DatabaseConnection, DatabaseError>>> =
LazyLock::new(|| AsyncRwLock::new(Err(DatabaseError::NotConfigured))); LazyLock::new(|| RwLock::new(Err(DatabaseError::NotConfigured)));
pub static DB_REPLICA: LazyLock<RwLock<Result<DatabaseReplica, DatabaseError>>> =
LazyLock::new(|| RwLock::new(Err(DatabaseError::NotConfigured)));
#[derive(Clone, Debug, Derivative)] #[derive(Clone, Debug, Derivative)]
pub enum DatabaseError { pub enum DatabaseError {
@ -32,14 +34,15 @@ impl From<DatabaseError> for Web3ProxyError {
} }
} }
/// TODO: do we need this clone? should we just do DB_CONN.read() whenever we need a Connection?
#[inline] #[inline]
pub async fn global_db_conn() -> Result<DatabaseConnection, DatabaseError> { pub fn global_db_conn() -> Result<DatabaseConnection, DatabaseError> {
DB_CONN.read().await.clone() DB_CONN.read().clone()
} }
#[inline] #[inline]
pub async fn global_db_transaction() -> Result<DatabaseTransaction, DatabaseError> { pub async fn global_db_transaction() -> Result<DatabaseTransaction, DatabaseError> {
let x = global_db_conn().await?; let x = global_db_conn()?;
let x = x let x = x
.begin() .begin()
@ -49,7 +52,8 @@ pub async fn global_db_transaction() -> Result<DatabaseTransaction, DatabaseErro
Ok(x) Ok(x)
} }
/// TODO: do we need this clone?
#[inline] #[inline]
pub async fn global_db_replica_conn() -> Result<DatabaseReplica, DatabaseError> { pub fn global_db_replica_conn() -> Result<DatabaseReplica, DatabaseError> {
DB_REPLICA.read().await.clone() DB_REPLICA.read().clone()
} }

View File

@ -20,17 +20,17 @@ use tokio::time::sleep;
use crate::app::Web3ProxyApp; use crate::app::Web3ProxyApp;
use crate::errors::{Web3ProxyError, Web3ProxyResult}; use crate::errors::{Web3ProxyError, Web3ProxyResult};
use crate::frontend::authorization::{Authorization, RequestMetadata, RequestOrMethod}; use crate::frontend::authorization::{Authorization, RequestOrMethod, Web3Request};
use crate::response_cache::JsonRpcResponseEnum; use crate::response_cache::JsonRpcResponseEnum;
pub trait JsonRpcParams = fmt::Debug + serde::Serialize + Send + Sync + 'static; pub trait JsonRpcParams = fmt::Debug + serde::Serialize + Send + Sync + 'static;
pub trait JsonRpcResultData = serde::Serialize + serde::de::DeserializeOwned + fmt::Debug + Send; pub trait JsonRpcResultData = serde::Serialize + serde::de::DeserializeOwned + fmt::Debug + Send;
// TODO: borrow values to avoid allocs if possible /// TODO: borrow values to avoid allocs if possible
#[derive(Debug, Serialize)] #[derive(Debug, Serialize)]
pub struct ParsedResponse<T = Arc<RawValue>> { pub struct ParsedResponse<T = Arc<RawValue>> {
jsonrpc: String, pub jsonrpc: String,
id: Option<Box<RawValue>>, pub id: Box<RawValue>,
#[serde(flatten)] #[serde(flatten)]
pub payload: Payload<T>, pub payload: Payload<T>,
} }
@ -40,7 +40,7 @@ impl ParsedResponse {
let result = serde_json::value::to_raw_value(&value) let result = serde_json::value::to_raw_value(&value)
.expect("this should not fail") .expect("this should not fail")
.into(); .into();
Self::from_result(result, Some(id)) Self::from_result(result, id)
} }
} }
@ -49,16 +49,16 @@ impl ParsedResponse<Arc<RawValue>> {
match data { match data {
JsonRpcResponseEnum::NullResult => { JsonRpcResponseEnum::NullResult => {
let x: Box<RawValue> = Default::default(); let x: Box<RawValue> = Default::default();
Self::from_result(Arc::from(x), Some(id)) Self::from_result(Arc::from(x), id)
} }
JsonRpcResponseEnum::RpcError { error_data, .. } => Self::from_error(error_data, id), JsonRpcResponseEnum::RpcError { error_data, .. } => Self::from_error(error_data, id),
JsonRpcResponseEnum::Result { value, .. } => Self::from_result(value, Some(id)), JsonRpcResponseEnum::Result { value, .. } => Self::from_result(value, id),
} }
} }
} }
impl<T> ParsedResponse<T> { impl<T> ParsedResponse<T> {
pub fn from_result(result: T, id: Option<Box<RawValue>>) -> Self { pub fn from_result(result: T, id: Box<RawValue>) -> Self {
Self { Self {
jsonrpc: "2.0".to_string(), jsonrpc: "2.0".to_string(),
id, id,
@ -69,7 +69,7 @@ impl<T> ParsedResponse<T> {
pub fn from_error(error: JsonRpcErrorData, id: Box<RawValue>) -> Self { pub fn from_error(error: JsonRpcErrorData, id: Box<RawValue>) -> Self {
Self { Self {
jsonrpc: "2.0".to_string(), jsonrpc: "2.0".to_string(),
id: Some(id), id,
payload: Payload::Error { error }, payload: Payload::Error { error },
} }
} }
@ -171,6 +171,8 @@ where
} }
} }
let id = id.unwrap_or_default();
// jsonrpc version must be present in all responses // jsonrpc version must be present in all responses
let jsonrpc = jsonrpc let jsonrpc = jsonrpc
.ok_or_else(|| de::Error::missing_field("jsonrpc"))? .ok_or_else(|| de::Error::missing_field("jsonrpc"))?
@ -209,7 +211,7 @@ pub enum Payload<T> {
pub struct StreamResponse { pub struct StreamResponse {
buffer: Bytes, buffer: Bytes,
response: reqwest::Response, response: reqwest::Response,
request_metadata: Arc<RequestMetadata>, web3_request: Arc<Web3Request>,
} }
impl StreamResponse { impl StreamResponse {
@ -233,7 +235,7 @@ impl IntoResponse for StreamResponse {
.map_ok(move |x| { .map_ok(move |x| {
let len = x.len(); let len = x.len();
self.request_metadata.add_response(len); self.web3_request.add_response(len);
x x
}); });
@ -257,7 +259,7 @@ where
pub async fn read_if_short( pub async fn read_if_short(
mut response: reqwest::Response, mut response: reqwest::Response,
nbytes: u64, nbytes: u64,
request_metadata: Arc<RequestMetadata>, web3_request: Arc<Web3Request>,
) -> Result<SingleResponse<T>, ProviderError> { ) -> Result<SingleResponse<T>, ProviderError> {
match response.content_length() { match response.content_length() {
// short // short
@ -266,7 +268,7 @@ where
Some(_) => Ok(Self::Stream(StreamResponse { Some(_) => Ok(Self::Stream(StreamResponse {
buffer: Bytes::new(), buffer: Bytes::new(),
response, response,
request_metadata, web3_request,
})), })),
None => { None => {
let mut buffer = BytesMut::new(); let mut buffer = BytesMut::new();
@ -282,7 +284,7 @@ where
Ok(Self::Stream(StreamResponse { Ok(Self::Stream(StreamResponse {
buffer, buffer,
response, response,
request_metadata, web3_request,
})) }))
} }
} }
@ -312,6 +314,17 @@ where
}, },
} }
} }
pub fn set_id(&mut self, id: Box<RawValue>) {
match self {
SingleResponse::Parsed(x) => {
x.id = id;
}
SingleResponse::Stream(..) => {
// stream responses will hopefully always have the right id already because we pass the orignal id all the way from the front to the back
}
}
}
} }
impl<T> From<ParsedResponse<T>> for SingleResponse<T> { impl<T> From<ParsedResponse<T>> for SingleResponse<T> {
@ -381,6 +394,7 @@ where
pub struct JsonRpcRequest { pub struct JsonRpcRequest {
pub jsonrpc: String, pub jsonrpc: String,
/// id could be a stricter type, but many rpcs do things against the spec /// id could be a stricter type, but many rpcs do things against the spec
/// TODO: this gets cloned into the response object often. would an Arc be better? That has its own overhead and these are short strings
pub id: Box<RawValue>, pub id: Box<RawValue>,
pub method: String, pub method: String,
#[serde_inline_default(serde_json::Value::Null)] #[serde_inline_default(serde_json::Value::Null)]
@ -392,6 +406,7 @@ pub enum JsonRpcId {
None, None,
Number(u64), Number(u64),
String(String), String(String),
Raw(Box<RawValue>),
} }
impl JsonRpcId { impl JsonRpcId {
@ -403,6 +418,7 @@ impl JsonRpcId {
serde_json::from_value(json!(x)).expect("number id should always work") serde_json::from_value(json!(x)).expect("number id should always work")
} }
Self::String(x) => serde_json::from_str(&x).expect("string id should always work"), Self::String(x) => serde_json::from_str(&x).expect("string id should always work"),
Self::Raw(x) => x,
} }
} }
} }
@ -473,7 +489,7 @@ impl JsonRpcRequestEnum {
/// returns the id of the first invalid result (if any). None is good /// returns the id of the first invalid result (if any). None is good
pub async fn tarpit_invalid( pub async fn tarpit_invalid(
&self, &self,
app: &Web3ProxyApp, app: &Arc<Web3ProxyApp>,
authorization: &Arc<Authorization>, authorization: &Arc<Authorization>,
duration: Duration, duration: Duration,
) -> Result<(), AxumResponse> { ) -> Result<(), AxumResponse> {
@ -486,11 +502,16 @@ impl JsonRpcRequestEnum {
.expect("JsonRpcRequestEnum should always serialize") .expect("JsonRpcRequestEnum should always serialize")
.len(); .len();
let request = RequestOrMethod::Method("invalid_method", size);
// TODO: create a stat so we can penalize // TODO: create a stat so we can penalize
// TODO: what request size // TODO: what request size
let metadata = RequestMetadata::new(app, authorization.clone(), request, None).await; let metadata = Web3Request::new_with_app(
app,
authorization.clone(),
None,
RequestOrMethod::Method("invalid_method".into(), size),
None,
)
.await;
metadata metadata
.user_error_response .user_error_response
@ -676,26 +697,22 @@ impl JsonRpcRequest {
} }
impl JsonRpcForwardedResponse { impl JsonRpcForwardedResponse {
pub fn from_anyhow_error( pub fn from_anyhow_error(err: anyhow::Error, code: Option<i64>, id: Box<RawValue>) -> Self {
err: anyhow::Error,
code: Option<i64>,
id: Option<Box<RawValue>>,
) -> Self {
let message = format!("{:?}", err); let message = format!("{:?}", err);
Self::from_string(message, code, id) Self::from_string(message, code, id)
} }
pub fn from_str(message: &str, code: Option<i64>, id: Option<Box<RawValue>>) -> Self { pub fn from_str(message: &str, code: Option<i64>, id: Box<RawValue>) -> Self {
Self::from_string(message.to_string(), code, id) Self::from_string(message.to_string(), code, id)
} }
pub fn from_string(message: String, code: Option<i64>, id: Option<Box<RawValue>>) -> Self { pub fn from_string(message: String, code: Option<i64>, id: Box<RawValue>) -> Self {
// TODO: this is too verbose. plenty of errors are valid, like users giving an invalid address. no need to log that // TODO: this is too verbose. plenty of errors are valid, like users giving an invalid address. no need to log that
// TODO: can we somehow get the initial request here? if we put that into a tracing span, will things slow down a ton? // TODO: can we somehow get the initial request here? if we put that into a tracing span, will things slow down a ton?
JsonRpcForwardedResponse { JsonRpcForwardedResponse {
jsonrpc: "2.0", jsonrpc: "2.0",
id: id.unwrap_or_default(), id,
result: None, result: None,
error: Some(JsonRpcErrorData { error: Some(JsonRpcErrorData {
code: code.unwrap_or(-32099), code: code.unwrap_or(-32099),
@ -772,7 +789,7 @@ mod tests {
fn serialize_response() { fn serialize_response() {
let obj = ParsedResponse { let obj = ParsedResponse {
jsonrpc: "2.0".to_string(), jsonrpc: "2.0".to_string(),
id: None, id: Default::default(),
payload: Payload::Success { payload: Payload::Success {
result: serde_json::value::RawValue::from_string("100".to_string()).unwrap(), result: serde_json::value::RawValue::from_string("100".to_string()).unwrap(),
}, },

155
web3_proxy/src/kafka.rs Normal file
View File

@ -0,0 +1,155 @@
use crate::app::Web3ProxyApp;
use crate::frontend::authorization::{Authorization, RequestOrMethod};
use core::fmt;
use ethers::types::U64;
use rdkafka::message::{Header as KafkaHeader, OwnedHeaders as KafkaOwnedHeaders, OwnedMessage};
use rdkafka::producer::{FutureProducer, FutureRecord};
use rdkafka::util::Timeout as KafkaTimeout;
use std::sync::atomic::{self, AtomicUsize};
use std::sync::Arc;
use std::time::Duration;
use tokio::task::JoinHandle;
use tracing::error;
use ulid::Ulid;
pub struct KafkaDebugLogger {
topic: String,
key: Vec<u8>,
headers: KafkaOwnedHeaders,
producer: FutureProducer,
num_requests: AtomicUsize,
num_responses: AtomicUsize,
}
impl fmt::Debug for KafkaDebugLogger {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("KafkaDebugLogger")
.field("topic", &self.topic)
.finish_non_exhaustive()
}
}
type KafkaLogResult = Result<(i32, i64), (rdkafka::error::KafkaError, OwnedMessage)>;
impl KafkaDebugLogger {
pub fn try_new(
app: &Web3ProxyApp,
authorization: Arc<Authorization>,
head_block_num: Option<U64>,
kafka_topic: &str,
request_ulid: Ulid,
) -> Option<Arc<Self>> {
let kafka_producer = app.kafka_producer.clone()?;
let kafka_topic = kafka_topic.to_string();
let rpc_secret_key_id = authorization
.checks
.rpc_secret_key_id
.map(|x| x.get())
.unwrap_or_default();
let kafka_key =
rmp_serde::to_vec(&rpc_secret_key_id).expect("ids should always serialize with rmp");
let chain_id = app.config.chain_id;
let head_block_num = head_block_num.or_else(|| app.balanced_rpcs.head_block_num());
// TODO: would be nice to have the block hash too
// another item is added with the response, so initial_capacity is +1 what is needed here
let kafka_headers = KafkaOwnedHeaders::new_with_capacity(6)
.insert(KafkaHeader {
key: "rpc_secret_key_id",
value: authorization
.checks
.rpc_secret_key_id
.map(|x| x.to_string())
.as_ref(),
})
.insert(KafkaHeader {
key: "ip",
value: Some(&authorization.ip.to_string()),
})
.insert(KafkaHeader {
key: "request_ulid",
value: Some(&request_ulid.to_string()),
})
.insert(KafkaHeader {
key: "head_block_num",
value: head_block_num.map(|x| x.to_string()).as_ref(),
})
.insert(KafkaHeader {
key: "chain_id",
value: Some(&chain_id.to_le_bytes()),
});
// save the key and headers for when we log the response
let x = Self {
topic: kafka_topic,
key: kafka_key,
headers: kafka_headers,
producer: kafka_producer,
num_requests: 0.into(),
num_responses: 0.into(),
};
let x = Arc::new(x);
Some(x)
}
fn background_log(&self, payload: Vec<u8>) -> JoinHandle<KafkaLogResult> {
let topic = self.topic.clone();
let key = self.key.clone();
let producer = self.producer.clone();
let headers = self.headers.clone();
let f = async move {
let record = FutureRecord::to(&topic)
.key(&key)
.payload(&payload)
.headers(headers);
let produce_future =
producer.send(record, KafkaTimeout::After(Duration::from_secs(5 * 60)));
let kafka_response = produce_future.await;
if let Err((err, msg)) = kafka_response.as_ref() {
error!("produce kafka request: {} - {:?}", err, msg);
// TODO: re-queue the msg? log somewhere else like a file on disk?
// TODO: this is bad and should probably trigger an alarm
};
kafka_response
};
tokio::spawn(f)
}
/// for opt-in debug usage, log the request to kafka
/// TODO: generic type for request
pub fn log_debug_request(&self, request: &RequestOrMethod) -> JoinHandle<KafkaLogResult> {
// TODO: is rust message pack a good choice? try rkyv instead
let payload =
rmp_serde::to_vec(&request).expect("requests should always serialize with rmp");
self.num_requests.fetch_add(1, atomic::Ordering::Relaxed);
self.background_log(payload)
}
pub fn log_debug_response<R>(&self, response: &R) -> JoinHandle<KafkaLogResult>
where
R: serde::Serialize,
{
let payload =
rmp_serde::to_vec(&response).expect("requests should always serialize with rmp");
self.num_responses.fetch_add(1, atomic::Ordering::Relaxed);
self.background_log(payload)
}
}

View File

@ -16,6 +16,7 @@ pub mod frontend;
pub mod globals; pub mod globals;
pub mod http_params; pub mod http_params;
pub mod jsonrpc; pub mod jsonrpc;
pub mod kafka;
pub mod pagerduty; pub mod pagerduty;
pub mod prelude; pub mod prelude;
pub mod premium; pub mod premium;
@ -24,6 +25,7 @@ pub mod referral_code;
pub mod relational_db; pub mod relational_db;
pub mod response_cache; pub mod response_cache;
pub mod rpcs; pub mod rpcs;
pub mod secrets;
pub mod stats; pub mod stats;
pub mod test_utils; pub mod test_utils;
pub mod user_token; pub mod user_token;

View File

@ -1,6 +1,7 @@
use crate::{ use crate::{
block_number::BlockNumAndHash, block_number::{BlockNumAndHash, CacheMode},
errors::{Web3ProxyError, Web3ProxyResult}, errors::{Web3ProxyError, Web3ProxyResult},
frontend::authorization::RequestOrMethod,
jsonrpc::{self, JsonRpcErrorData}, jsonrpc::{self, JsonRpcErrorData},
}; };
use derive_more::From; use derive_more::From;
@ -18,15 +19,15 @@ use std::{
}; };
#[derive(Clone, Debug, Eq, From)] #[derive(Clone, Debug, Eq, From)]
pub struct JsonRpcQueryCacheKey { pub struct JsonRpcQueryCacheKey<'a> {
/// hashed params /// hashed params so that
hash: u64, hash: u64,
from_block: Option<BlockNumAndHash>, from_block: Option<&'a BlockNumAndHash>,
to_block: Option<BlockNumAndHash>, to_block: Option<&'a BlockNumAndHash>,
cache_errors: bool, cache_jsonrpc_errors: bool,
} }
impl JsonRpcQueryCacheKey { impl JsonRpcQueryCacheKey<'_> {
pub fn hash(&self) -> u64 { pub fn hash(&self) -> u64 {
self.hash self.hash
} }
@ -37,46 +38,42 @@ impl JsonRpcQueryCacheKey {
self.to_block.as_ref().map(|x| x.num()) self.to_block.as_ref().map(|x| x.num())
} }
pub fn cache_errors(&self) -> bool { pub fn cache_errors(&self) -> bool {
self.cache_errors self.cache_jsonrpc_errors
} }
} }
impl PartialEq for JsonRpcQueryCacheKey { impl PartialEq for JsonRpcQueryCacheKey<'_> {
fn eq(&self, other: &Self) -> bool { fn eq(&self, other: &Self) -> bool {
self.hash.eq(&other.hash) self.hash.eq(&other.hash)
} }
} }
impl Hash for JsonRpcQueryCacheKey { impl Hash for JsonRpcQueryCacheKey<'_> {
fn hash<H: Hasher>(&self, state: &mut H) { fn hash<H: Hasher>(&self, state: &mut H) {
// TODO: i feel like this hashes twice. oh well // TODO: i feel like this hashes twice. oh well
self.hash.hash(state); self.hash.hash(state);
} }
} }
impl JsonRpcQueryCacheKey { impl<'a> JsonRpcQueryCacheKey<'a> {
pub fn new( pub fn new(cache_mode: &'a CacheMode, request: &'a RequestOrMethod) -> Self {
from_block: Option<BlockNumAndHash>, // TODO: do this without clone
to_block: Option<BlockNumAndHash>, let from_block = cache_mode.from_block();
method: &str, let to_block = cache_mode.to_block();
params: &serde_json::Value, let cache_jsonrpc_errors = cache_mode.cache_jsonrpc_errors();
cache_errors: bool,
) -> Self {
let from_block_hash = from_block.as_ref().map(|x| x.hash());
let to_block_hash = to_block.as_ref().map(|x| x.hash());
let mut hasher = DefaultHashBuilder::default().build_hasher(); let mut hasher = DefaultHashBuilder::default().build_hasher();
from_block_hash.hash(&mut hasher); from_block.hash(&mut hasher);
to_block_hash.hash(&mut hasher); to_block.hash(&mut hasher);
method.hash(&mut hasher); request.method().hash(&mut hasher);
// TODO: make sure preserve_order feature is OFF // TODO: make sure preserve_order feature is OFF
// TODO: is there a faster way to do this? // TODO: is there a faster way to do this?
params.to_string().hash(&mut hasher); request.params().to_string().hash(&mut hasher);
cache_errors.hash(&mut hasher); cache_jsonrpc_errors.hash(&mut hasher);
let hash = hasher.finish(); let hash = hasher.finish();
@ -84,7 +81,7 @@ impl JsonRpcQueryCacheKey {
hash, hash,
from_block, from_block,
to_block, to_block,
cache_errors, cache_jsonrpc_errors,
} }
} }
} }

View File

@ -4,7 +4,6 @@ use super::many::Web3Rpcs;
use super::one::Web3Rpc; use super::one::Web3Rpc;
use crate::config::{average_block_interval, BlockAndRpc}; use crate::config::{average_block_interval, BlockAndRpc};
use crate::errors::{Web3ProxyError, Web3ProxyErrorContext, Web3ProxyResult}; use crate::errors::{Web3ProxyError, Web3ProxyErrorContext, Web3ProxyResult};
use derive_more::From;
use ethers::prelude::{Block, TxHash, H256, U64}; use ethers::prelude::{Block, TxHash, H256, U64};
use moka::future::Cache; use moka::future::Cache;
use serde::ser::SerializeStruct; use serde::ser::SerializeStruct;
@ -23,14 +22,9 @@ pub type ArcBlock = Arc<Block<TxHash>>;
pub type BlocksByHashCache = Cache<H256, Web3ProxyBlock>; pub type BlocksByHashCache = Cache<H256, Web3ProxyBlock>;
pub type BlocksByNumberCache = Cache<U64, H256>; pub type BlocksByNumberCache = Cache<U64, H256>;
/// A block and its age. /// A block and its age with a less verbose serialized format
#[derive(Clone, Debug, Default, From)] #[derive(Clone, Debug, Default)]
pub struct Web3ProxyBlock { pub struct Web3ProxyBlock(ArcBlock);
pub block: ArcBlock,
/// number of seconds this block was behind the current time when received
/// this is only set if the block is from a subscription
pub received_age: Option<u64>,
}
impl Serialize for Web3ProxyBlock { impl Serialize for Web3ProxyBlock {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
@ -43,10 +37,10 @@ impl Serialize for Web3ProxyBlock {
state.serialize_field("age", &self.age())?; state.serialize_field("age", &self.age())?;
let block = json!({ let block = json!({
"hash": self.block.hash, "hash": self.0.hash,
"parent_hash": self.block.parent_hash, "parent_hash": self.0.parent_hash,
"number": self.block.number, "number": self.0.number,
"timestamp": self.block.timestamp, "timestamp": self.0.timestamp,
}); });
state.serialize_field("block", &block)?; state.serialize_field("block", &block)?;
@ -57,7 +51,7 @@ impl Serialize for Web3ProxyBlock {
impl PartialEq for Web3ProxyBlock { impl PartialEq for Web3ProxyBlock {
fn eq(&self, other: &Self) -> bool { fn eq(&self, other: &Self) -> bool {
match (self.block.hash, other.block.hash) { match (self.0.hash, other.0.hash) {
(None, None) => true, (None, None) => true,
(Some(_), None) => false, (Some(_), None) => false,
(None, Some(_)) => false, (None, Some(_)) => false,
@ -70,34 +64,24 @@ impl Eq for Web3ProxyBlock {}
impl Hash for Web3ProxyBlock { impl Hash for Web3ProxyBlock {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) { fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
self.block.hash.hash(state); self.0.hash.hash(state);
} }
} }
impl Web3ProxyBlock { impl Web3ProxyBlock {
/// A new block has arrived over a subscription /// A new block has arrived over a subscription. skip it if its empty
pub fn try_new(block: ArcBlock) -> Option<Self> { pub fn try_new(block: ArcBlock) -> Option<Self> {
if block.number.is_none() || block.hash.is_none() { if block.number.is_none() || block.hash.is_none() {
return None; return None;
} }
let mut x = Self { Some(Self(block))
block,
received_age: None,
};
// no need to recalulate lag every time
// if the head block gets too old, a health check restarts this connection
// TODO: emit a stat for received_age
x.received_age = Some(x.age().as_secs());
Some(x)
} }
pub fn age(&self) -> Duration { pub fn age(&self) -> Duration {
let now = chrono::Utc::now().timestamp(); let now = chrono::Utc::now().timestamp();
let block_timestamp = self.block.timestamp.as_u32() as i64; let block_timestamp = self.0.timestamp.as_u32() as i64;
let x = if block_timestamp < now { let x = if block_timestamp < now {
// this server is still syncing from too far away to serve requests // this server is still syncing from too far away to serve requests
@ -112,44 +96,27 @@ impl Web3ProxyBlock {
#[inline(always)] #[inline(always)]
pub fn parent_hash(&self) -> &H256 { pub fn parent_hash(&self) -> &H256 {
&self.block.parent_hash &self.0.parent_hash
} }
#[inline(always)] #[inline(always)]
pub fn hash(&self) -> &H256 { pub fn hash(&self) -> &H256 {
self.block self.0.hash.as_ref().expect("saved blocks must have a hash")
.hash
.as_ref()
.expect("saved blocks must have a hash")
} }
#[inline(always)] #[inline(always)]
pub fn number(&self) -> &U64 { pub fn number(&self) -> U64 {
self.block self.0.number.expect("saved blocks must have a number")
.number
.as_ref()
.expect("saved blocks must have a number")
} }
#[inline(always)]
pub fn transactions(&self) -> &[TxHash] {
&self.0.transactions
}
#[inline(always)]
pub fn uncles(&self) -> &[H256] { pub fn uncles(&self) -> &[H256] {
&self.block.uncles &self.0.uncles
}
}
impl TryFrom<ArcBlock> for Web3ProxyBlock {
type Error = Web3ProxyError;
fn try_from(x: ArcBlock) -> Result<Self, Self::Error> {
if x.number.is_none() || x.hash.is_none() {
return Err(Web3ProxyError::NoBlockNumberOrHash);
}
let b = Web3ProxyBlock {
block: x,
received_age: None,
};
Ok(b)
} }
} }
@ -165,6 +132,14 @@ impl Display for Web3ProxyBlock {
} }
} }
impl TryFrom<ArcBlock> for Web3ProxyBlock {
type Error = Web3ProxyError;
fn try_from(block: ArcBlock) -> Result<Self, Self::Error> {
Self::try_new(block).ok_or(Web3ProxyError::NoBlocksKnown)
}
}
impl Web3Rpcs { impl Web3Rpcs {
/// add a block to our mappings and track the heaviest chain /// add a block to our mappings and track the heaviest chain
pub async fn try_cache_block( pub async fn try_cache_block(
@ -187,7 +162,7 @@ impl Web3Rpcs {
// TODO: if there is an existing entry with a different block_hash, // TODO: if there is an existing entry with a different block_hash,
// TODO: use entry api to handle changing existing entries // TODO: use entry api to handle changing existing entries
self.blocks_by_number.insert(*block_num, block_hash).await; self.blocks_by_number.insert(block_num, block_hash).await;
for uncle in block.uncles() { for uncle in block.uncles() {
self.blocks_by_hash.invalidate(uncle).await; self.blocks_by_hash.invalidate(uncle).await;
@ -277,7 +252,7 @@ impl Web3Rpcs {
// double check that it matches the blocks_by_number cache // double check that it matches the blocks_by_number cache
let cached_hash = self let cached_hash = self
.blocks_by_number .blocks_by_number
.get_with_by_ref(block.number(), async { *hash }) .get_with(block.number(), async { *hash })
.await; .await;
if cached_hash == *hash { if cached_hash == *hash {
@ -327,7 +302,13 @@ impl Web3Rpcs {
match block { match block {
Some(block) => { Some(block) => {
let block = self.try_cache_block(block.try_into()?, false).await?; let block = self
.try_cache_block(
Web3ProxyBlock::try_new(block)
.ok_or(Web3ProxyError::UnknownBlockHash(*hash))?,
false,
)
.await?;
Ok(block) Ok(block)
} }
None => Err(Web3ProxyError::UnknownBlockHash(*hash)), None => Err(Web3ProxyError::UnknownBlockHash(*hash)),
@ -365,7 +346,7 @@ impl Web3Rpcs {
// be sure the requested block num exists // be sure the requested block num exists
// TODO: is this okay? what if we aren't synced?! // TODO: is this okay? what if we aren't synced?!
let mut head_block_num = *consensus_head_receiver let mut head_block_num = consensus_head_receiver
.borrow_and_update() .borrow_and_update()
.as_ref() .as_ref()
.web3_context("no consensus head block")? .web3_context("no consensus head block")?
@ -386,7 +367,7 @@ impl Web3Rpcs {
consensus_head_receiver.changed().await?; consensus_head_receiver.changed().await?;
if let Some(head) = consensus_head_receiver.borrow_and_update().as_ref() { if let Some(head) = consensus_head_receiver.borrow_and_update().as_ref() {
head_block_num = *head.number(); head_block_num = head.number();
} }
} }
} }
@ -398,7 +379,11 @@ impl Web3Rpcs {
.await? .await?
.ok_or(Web3ProxyError::NoBlocksKnown)?; .ok_or(Web3ProxyError::NoBlocksKnown)?;
let block = Web3ProxyBlock::try_from(response)?; let block =
Web3ProxyBlock::try_new(response).ok_or(Web3ProxyError::UnknownBlockNumber {
known: head_block_num,
unknown: *num,
})?;
// the block was fetched using eth_getBlockByNumber, so it should have all fields and be on the heaviest chain // the block was fetched using eth_getBlockByNumber, so it should have all fields and be on the heaviest chain
let block = self.try_cache_block(block, true).await?; let block = self.try_cache_block(block, true).await?;

View File

@ -26,7 +26,7 @@ struct ConsensusRpcData {
impl ConsensusRpcData { impl ConsensusRpcData {
fn new(rpc: &Web3Rpc, head: &Web3ProxyBlock) -> Self { fn new(rpc: &Web3Rpc, head: &Web3ProxyBlock) -> Self {
let head_block_num = *head.number(); let head_block_num = head.number();
let block_data_limit = rpc.block_data_limit(); let block_data_limit = rpc.block_data_limit();
@ -39,8 +39,8 @@ impl ConsensusRpcData {
} }
// TODO: take an enum for the type of data (hrtc) // TODO: take an enum for the type of data (hrtc)
fn data_available(&self, block_num: &U64) -> bool { fn data_available(&self, block_num: U64) -> bool {
*block_num >= self.oldest_block_num && *block_num <= self.head_block_num block_num >= self.oldest_block_num && block_num <= self.head_block_num
} }
} }
@ -119,7 +119,7 @@ impl RankedRpcs {
let mut votes: Vec<_> = votes let mut votes: Vec<_> = votes
.into_iter() .into_iter()
.filter_map(|(block, (rpcs, sum_soft_limit))| { .filter_map(|(block, (rpcs, sum_soft_limit))| {
if *block.number() < max_lag_block if block.number() < max_lag_block
|| sum_soft_limit < min_sum_soft_limit || sum_soft_limit < min_sum_soft_limit
|| rpcs.len() < min_synced_rpcs || rpcs.len() < min_synced_rpcs
{ {
@ -133,7 +133,7 @@ impl RankedRpcs {
// sort the votes // sort the votes
votes.sort_by_key(|(block, sum_soft_limit, _)| { votes.sort_by_key(|(block, sum_soft_limit, _)| {
( (
Reverse(*block.number()), Reverse(block.number()),
// TODO: block total difficulty (if we have it) // TODO: block total difficulty (if we have it)
Reverse(*sum_soft_limit), Reverse(*sum_soft_limit),
// TODO: median/peak latency here? // TODO: median/peak latency here?
@ -158,7 +158,7 @@ impl RankedRpcs {
continue; continue;
} }
if *x_head.number() < max_lag_block { if x_head.number() < max_lag_block {
// server is too far behind // server is too far behind
continue; continue;
} }
@ -167,7 +167,7 @@ impl RankedRpcs {
} }
ranked_rpcs ranked_rpcs
.sort_by_cached_key(|x| x.sort_for_load_balancing_on(Some(*best_block.number()))); .sort_by_cached_key(|x| x.sort_for_load_balancing_on(Some(best_block.number())));
// consensus found! // consensus found!
trace!(?ranked_rpcs); trace!(?ranked_rpcs);
@ -201,16 +201,17 @@ impl RankedRpcs {
} }
/// will tell you if waiting will eventually should wait for a block /// will tell you if waiting will eventually should wait for a block
/// TODO: return if backup will be needed to serve the request /// TODO: error if backup will be needed to serve the request?
/// TODO: serve now if a backup server has the data /// TODO: serve now if a backup server has the data?
/// TODO: also include method (or maybe an enum representing the different prune types) /// TODO: also include method (or maybe an enum representing the different prune types)
pub fn should_wait_for_block( pub fn should_wait_for_block(
&self, &self,
needed_block_num: Option<&U64>, min_block_num: Option<U64>,
max_block_num: Option<U64>,
skip_rpcs: &[Arc<Web3Rpc>], skip_rpcs: &[Arc<Web3Rpc>],
) -> ShouldWaitForBlock { ) -> ShouldWaitForBlock {
for rpc in self.inner.iter() { for rpc in self.inner.iter() {
match self.rpc_will_work_eventually(rpc, needed_block_num, skip_rpcs) { match self.rpc_will_work_eventually(rpc, min_block_num, max_block_num, skip_rpcs) {
ShouldWaitForBlock::NeverReady => continue, ShouldWaitForBlock::NeverReady => continue,
x => return x, x => return x,
} }
@ -220,7 +221,7 @@ impl RankedRpcs {
} }
/// TODO: change this to take a min and a max /// TODO: change this to take a min and a max
pub fn has_block_data(&self, rpc: &Web3Rpc, block_num: &U64) -> bool { pub fn has_block_data(&self, rpc: &Web3Rpc, block_num: U64) -> bool {
self.rpc_data self.rpc_data
.get(rpc) .get(rpc)
.map(|x| x.data_available(block_num)) .map(|x| x.data_available(block_num))
@ -233,7 +234,8 @@ impl RankedRpcs {
pub fn rpc_will_work_eventually( pub fn rpc_will_work_eventually(
&self, &self,
rpc: &Arc<Web3Rpc>, rpc: &Arc<Web3Rpc>,
needed_block_num: Option<&U64>, min_block_num: Option<U64>,
max_block_num: Option<U64>,
skip_rpcs: &[Arc<Web3Rpc>], skip_rpcs: &[Arc<Web3Rpc>],
) -> ShouldWaitForBlock { ) -> ShouldWaitForBlock {
if skip_rpcs.contains(rpc) { if skip_rpcs.contains(rpc) {
@ -241,9 +243,20 @@ impl RankedRpcs {
return ShouldWaitForBlock::NeverReady; return ShouldWaitForBlock::NeverReady;
} }
if let Some(needed_block_num) = needed_block_num { if let Some(min_block_num) = min_block_num {
if !self.has_block_data(rpc, min_block_num) {
trace!(
"{} is missing min_block_num ({}). will not work eventually",
rpc,
min_block_num,
);
return ShouldWaitForBlock::NeverReady;
}
}
if let Some(needed_block_num) = max_block_num {
if let Some(rpc_data) = self.rpc_data.get(rpc) { if let Some(rpc_data) = self.rpc_data.get(rpc) {
match rpc_data.head_block_num.cmp(needed_block_num) { match rpc_data.head_block_num.cmp(&needed_block_num) {
Ordering::Less => { Ordering::Less => {
trace!("{} is behind. let it catch up", rpc); trace!("{} is behind. let it catch up", rpc);
// TODO: what if this is a pruned rpc that is behind by a lot, and the block is old, too? // TODO: what if this is a pruned rpc that is behind by a lot, and the block is old, too?
@ -277,8 +290,8 @@ impl RankedRpcs {
pub fn rpc_will_work_now( pub fn rpc_will_work_now(
&self, &self,
skip: &[Arc<Web3Rpc>], skip: &[Arc<Web3Rpc>],
min_block_needed: Option<&U64>, min_block_needed: Option<U64>,
max_block_needed: Option<&U64>, max_block_needed: Option<U64>,
rpc: &Arc<Web3Rpc>, rpc: &Arc<Web3Rpc>,
) -> bool { ) -> bool {
if skip.contains(rpc) { if skip.contains(rpc) {
@ -344,7 +357,7 @@ impl Web3Rpcs {
/// note: you probably want to use `head_block` instead /// note: you probably want to use `head_block` instead
/// TODO: return a ref? /// TODO: return a ref?
pub fn head_block_num(&self) -> Option<U64> { pub fn head_block_num(&self) -> Option<U64> {
self.head_block().map(|x| *x.number()) self.head_block().map(|x| x.number())
} }
pub fn synced(&self) -> bool { pub fn synced(&self) -> bool {
@ -489,7 +502,7 @@ impl ConsensusFinder {
Some(old_consensus_connections) => { Some(old_consensus_connections) => {
let old_head_block = &old_consensus_connections.head_block; let old_head_block = &old_consensus_connections.head_block;
match consensus_head_block.number().cmp(old_head_block.number()) { match consensus_head_block.number().cmp(&old_head_block.number()) {
Ordering::Equal => { Ordering::Equal => {
// multiple blocks with the same fork! // multiple blocks with the same fork!
if consensus_head_block.hash() == old_head_block.hash() { if consensus_head_block.hash() == old_head_block.hash() {
@ -805,7 +818,7 @@ impl ConsensusFinder {
trace!("max_lag_block_number: {}", max_lag_block_number); trace!("max_lag_block_number: {}", max_lag_block_number);
let lowest_block_number = lowest_block.number().max(&max_lag_block_number); let lowest_block_number = lowest_block.number().max(max_lag_block_number);
// TODO: should lowest block number be set such that the rpc won't ever go backwards? // TODO: should lowest block number be set such that the rpc won't ever go backwards?
trace!("safe lowest_block_number: {}", lowest_block_number); trace!("safe lowest_block_number: {}", lowest_block_number);

File diff suppressed because it is too large Load Diff

View File

@ -5,7 +5,7 @@ use super::request::{OpenRequestHandle, OpenRequestResult};
use crate::app::{flatten_handle, Web3ProxyJoinHandle}; use crate::app::{flatten_handle, Web3ProxyJoinHandle};
use crate::config::{BlockAndRpc, Web3RpcConfig}; use crate::config::{BlockAndRpc, Web3RpcConfig};
use crate::errors::{Web3ProxyError, Web3ProxyErrorContext, Web3ProxyResult}; use crate::errors::{Web3ProxyError, Web3ProxyErrorContext, Web3ProxyResult};
use crate::frontend::authorization::RequestMetadata; use crate::frontend::authorization::Web3Request;
use crate::jsonrpc::{self, JsonRpcParams, JsonRpcResultData}; use crate::jsonrpc::{self, JsonRpcParams, JsonRpcResultData};
use crate::rpcs::request::RequestErrorHandler; use crate::rpcs::request::RequestErrorHandler;
use anyhow::{anyhow, Context}; use anyhow::{anyhow, Context};
@ -34,6 +34,7 @@ use url::Url;
#[derive(Default)] #[derive(Default)]
pub struct Web3Rpc { pub struct Web3Rpc {
pub name: String, pub name: String,
pub chain_id: u64,
pub block_interval: Duration, pub block_interval: Duration,
pub display_name: Option<String>, pub display_name: Option<String>,
pub db_conn: Option<DatabaseConnection>, pub db_conn: Option<DatabaseConnection>,
@ -60,7 +61,7 @@ pub struct Web3Rpc {
/// TODO: have an enum for this so that "no limit" prints pretty? /// TODO: have an enum for this so that "no limit" prints pretty?
pub(super) block_data_limit: AtomicU64, pub(super) block_data_limit: AtomicU64,
/// head_block is only inside an Option so that the "Default" derive works. it will always be set. /// head_block is only inside an Option so that the "Default" derive works. it will always be set.
pub(super) head_block: Option<watch::Sender<Option<Web3ProxyBlock>>>, pub(super) head_block_sender: Option<watch::Sender<Option<Web3ProxyBlock>>>,
/// Track head block latency. /// Track head block latency.
pub(super) head_delay: AsyncRwLock<EwmaLatency>, pub(super) head_delay: AsyncRwLock<EwmaLatency>,
/// Track peak request latency /// Track peak request latency
@ -193,7 +194,7 @@ impl Web3Rpc {
display_name: config.display_name, display_name: config.display_name,
hard_limit, hard_limit,
hard_limit_until: Some(hard_limit_until), hard_limit_until: Some(hard_limit_until),
head_block: Some(head_block), head_block_sender: Some(head_block),
http_url, http_url,
http_client, http_client,
max_head_block_age, max_head_block_age,
@ -238,9 +239,9 @@ impl Web3Rpc {
/// TODO: move this to consensus.rs /// TODO: move this to consensus.rs
fn sort_on(&self, max_block: Option<U64>) -> (bool, Reverse<U64>, u32) { fn sort_on(&self, max_block: Option<U64>) -> (bool, Reverse<U64>, u32) {
let mut head_block = self let mut head_block = self
.head_block .head_block_sender
.as_ref() .as_ref()
.and_then(|x| x.borrow().as_ref().map(|x| *x.number())) .and_then(|x| x.borrow().as_ref().map(|x| x.number()))
.unwrap_or_default(); .unwrap_or_default();
if let Some(max_block) = max_block { if let Some(max_block) = max_block {
@ -389,39 +390,43 @@ impl Web3Rpc {
} }
/// TODO: get rid of this now that consensus rpcs does it /// TODO: get rid of this now that consensus rpcs does it
pub fn has_block_data(&self, needed_block_num: &U64) -> bool { pub fn has_block_data(&self, needed_block_num: U64) -> bool {
let head_block_num = match self.head_block.as_ref().unwrap().borrow().as_ref() { if let Some(head_block_sender) = self.head_block_sender.as_ref() {
None => return false, // TODO: this needs a max of our overall head block number
Some(x) => *x.number(), let head_block_num = match head_block_sender.borrow().as_ref() {
}; None => return false,
Some(x) => x.number(),
};
// this rpc doesn't have that block yet. still syncing // this rpc doesn't have that block yet. still syncing
if needed_block_num > &head_block_num { if needed_block_num > head_block_num {
trace!( trace!(
"{} has head {} but needs {}", "{} has head {} but needs {}",
self, self,
head_block_num, head_block_num,
needed_block_num, needed_block_num,
); );
return false; return false;
}
// if this is a pruning node, we might not actually have the block
let block_data_limit: U64 = self.block_data_limit();
let oldest_block_num = head_block_num.saturating_sub(block_data_limit);
if needed_block_num < oldest_block_num {
trace!(
"{} needs {} but the oldest available is {}",
self,
needed_block_num,
oldest_block_num
);
return false;
}
true
} else {
false
} }
// if this is a pruning node, we might not actually have the block
let block_data_limit: U64 = self.block_data_limit();
let oldest_block_num = head_block_num.saturating_sub(block_data_limit);
if needed_block_num < &oldest_block_num {
trace!(
"{} needs {} but the oldest available is {}",
self,
needed_block_num,
oldest_block_num
);
return false;
}
true
} }
/// query the web3 provider to confirm it is on the expected chain with the expected data available /// query the web3 provider to confirm it is on the expected chain with the expected data available
@ -468,7 +473,7 @@ impl Web3Rpc {
block_and_rpc_sender: &mpsc::UnboundedSender<BlockAndRpc>, block_and_rpc_sender: &mpsc::UnboundedSender<BlockAndRpc>,
block_map: &BlocksByHashCache, block_map: &BlocksByHashCache,
) -> Web3ProxyResult<()> { ) -> Web3ProxyResult<()> {
let head_block_sender = self.head_block.as_ref().unwrap(); let head_block_sender = self.head_block_sender.as_ref().unwrap();
let new_head_block = match new_head_block { let new_head_block = match new_head_block {
Ok(x) => { Ok(x) => {
@ -544,7 +549,7 @@ impl Web3Rpc {
self: &Arc<Self>, self: &Arc<Self>,
error_handler: Option<RequestErrorHandler>, error_handler: Option<RequestErrorHandler>,
) -> Web3ProxyResult<()> { ) -> Web3ProxyResult<()> {
let head_block = self.head_block.as_ref().unwrap().borrow().clone(); let head_block = self.head_block_sender.as_ref().unwrap().borrow().clone();
if let Some(head_block) = head_block { if let Some(head_block) = head_block {
// TODO: if head block is very old and not expected to be syncing, emit warning // TODO: if head block is very old and not expected to be syncing, emit warning
@ -552,11 +557,9 @@ impl Web3Rpc {
return Err(anyhow::anyhow!("head_block is too old!").into()); return Err(anyhow::anyhow!("head_block is too old!").into());
} }
let head_block = head_block.block; let block_number = head_block.number();
let block_number = head_block.number.context("no block number")?; let to = if let Some(txid) = head_block.transactions().last().cloned() {
let to = if let Some(txid) = head_block.transactions.last().cloned() {
let tx = self let tx = self
.internal_request::<_, Option<Transaction>>( .internal_request::<_, Option<Transaction>>(
"eth_getTransactionByHash", "eth_getTransactionByHash",
@ -944,7 +947,7 @@ impl Web3Rpc {
i.tick().await; i.tick().await;
} }
} else { } else {
unimplemented!("no ws or http provider!") return Err(anyhow!("no ws or http provider!").into());
} }
// clear the head block. this might not be needed, but it won't hurt // clear the head block. this might not be needed, but it won't hurt
@ -961,7 +964,7 @@ impl Web3Rpc {
pub async fn wait_for_request_handle( pub async fn wait_for_request_handle(
self: &Arc<Self>, self: &Arc<Self>,
request_metadata: &Arc<RequestMetadata>, web3_request: &Arc<Web3Request>,
max_wait: Option<Duration>, max_wait: Option<Duration>,
error_handler: Option<RequestErrorHandler>, error_handler: Option<RequestErrorHandler>,
) -> Web3ProxyResult<OpenRequestHandle> { ) -> Web3ProxyResult<OpenRequestHandle> {
@ -970,10 +973,7 @@ impl Web3Rpc {
let max_wait_until = max_wait.map(|x| Instant::now() + x); let max_wait_until = max_wait.map(|x| Instant::now() + x);
loop { loop {
match self match self.try_request_handle(web3_request, error_handler).await {
.try_request_handle(request_metadata, error_handler)
.await
{
Ok(OpenRequestResult::Handle(handle)) => return Ok(handle), Ok(OpenRequestResult::Handle(handle)) => return Ok(handle),
Ok(OpenRequestResult::RetryAt(retry_at)) => { Ok(OpenRequestResult::RetryAt(retry_at)) => {
// TODO: emit a stat? // TODO: emit a stat?
@ -1015,7 +1015,7 @@ impl Web3Rpc {
pub async fn try_request_handle( pub async fn try_request_handle(
self: &Arc<Self>, self: &Arc<Self>,
request_metadata: &Arc<RequestMetadata>, web3_request: &Arc<Web3Request>,
error_handler: Option<RequestErrorHandler>, error_handler: Option<RequestErrorHandler>,
) -> Web3ProxyResult<OpenRequestResult> { ) -> Web3ProxyResult<OpenRequestResult> {
// TODO: if websocket is reconnecting, return an error? // TODO: if websocket is reconnecting, return an error?
@ -1066,7 +1066,7 @@ impl Web3Rpc {
}; };
let handle = let handle =
OpenRequestHandle::new(request_metadata.clone(), self.clone(), error_handler).await; OpenRequestHandle::new(web3_request.clone(), self.clone(), error_handler).await;
Ok(handle.into()) Ok(handle.into())
} }
@ -1078,25 +1078,23 @@ impl Web3Rpc {
error_handler: Option<RequestErrorHandler>, error_handler: Option<RequestErrorHandler>,
max_wait: Option<Duration>, max_wait: Option<Duration>,
) -> Web3ProxyResult<R> { ) -> Web3ProxyResult<R> {
let authorization = Default::default(); let web3_request = Web3Request::new_internal(method.into(), params, None, max_wait).await;
self.authorized_request(method, params, &authorization, error_handler, max_wait) self.authorized_request(&web3_request, error_handler, max_wait)
.await .await
} }
pub async fn authorized_request<P: JsonRpcParams, R: JsonRpcResultData>( pub async fn authorized_request<R: JsonRpcResultData>(
self: &Arc<Self>, self: &Arc<Self>,
method: &str, web3_request: &Arc<Web3Request>,
params: &P,
request_metadata: &Arc<RequestMetadata>,
error_handler: Option<RequestErrorHandler>, error_handler: Option<RequestErrorHandler>,
max_wait: Option<Duration>, max_wait: Option<Duration>,
) -> Web3ProxyResult<R> { ) -> Web3ProxyResult<R> {
let handle = self let handle = self
.wait_for_request_handle(request_metadata, max_wait, error_handler) .wait_for_request_handle(web3_request, max_wait, error_handler)
.await?; .await?;
let response = handle.request::<P, R>(method, params).await?; let response = handle.request().await?;
let parsed = response.parsed().await?; let parsed = response.parsed().await?;
match parsed.payload { match parsed.payload {
jsonrpc::Payload::Success { result } => Ok(result), jsonrpc::Payload::Success { result } => Ok(result),
@ -1174,7 +1172,7 @@ impl Serialize for Web3Rpc {
// TODO: maybe this is too much data. serialize less? // TODO: maybe this is too much data. serialize less?
{ {
let head_block = self.head_block.as_ref().unwrap(); let head_block = self.head_block_sender.as_ref().unwrap();
let head_block = head_block.borrow(); let head_block = head_block.borrow();
let head_block = head_block.as_ref(); let head_block = head_block.as_ref();
state.serialize_field("head_block", &head_block)?; state.serialize_field("head_block", &head_block)?;
@ -1244,9 +1242,9 @@ impl fmt::Debug for Web3Rpc {
f.field("weighted_ms", &self.weighted_peak_latency().as_millis()); f.field("weighted_ms", &self.weighted_peak_latency().as_millis());
if let Some(head_block_watch) = self.head_block.as_ref() { if let Some(head_block_watch) = self.head_block_sender.as_ref() {
if let Some(head_block) = head_block_watch.borrow().as_ref() { if let Some(head_block) = head_block_watch.borrow().as_ref() {
f.field("head_num", head_block.number()); f.field("head_num", &head_block.number());
f.field("head_hash", head_block.hash()); f.field("head_hash", head_block.hash());
} else { } else {
f.field("head_num", &None::<()>); f.field("head_num", &None::<()>);
@ -1293,15 +1291,15 @@ mod tests {
automatic_block_limit: false, automatic_block_limit: false,
backup: false, backup: false,
block_data_limit: block_data_limit.into(), block_data_limit: block_data_limit.into(),
head_block: Some(tx), head_block_sender: Some(tx),
..Default::default() ..Default::default()
}; };
assert!(x.has_block_data(&0.into())); assert!(x.has_block_data(0.into()));
assert!(x.has_block_data(&1.into())); assert!(x.has_block_data(1.into()));
assert!(x.has_block_data(head_block.number())); assert!(x.has_block_data(head_block.number()));
assert!(!x.has_block_data(&(head_block.number() + 1))); assert!(!x.has_block_data(head_block.number() + 1));
assert!(!x.has_block_data(&(head_block.number() + 1000))); assert!(!x.has_block_data(head_block.number() + 1000));
} }
#[test] #[test]
@ -1327,17 +1325,17 @@ mod tests {
automatic_block_limit: false, automatic_block_limit: false,
backup: false, backup: false,
block_data_limit: block_data_limit.into(), block_data_limit: block_data_limit.into(),
head_block: Some(tx), head_block_sender: Some(tx),
..Default::default() ..Default::default()
}; };
assert!(!x.has_block_data(&0.into())); assert!(!x.has_block_data(0.into()));
assert!(!x.has_block_data(&1.into())); assert!(!x.has_block_data(1.into()));
assert!(!x.has_block_data(&(head_block.number() - block_data_limit - 1))); assert!(!x.has_block_data(head_block.number() - block_data_limit - 1));
assert!(x.has_block_data(&(head_block.number() - block_data_limit))); assert!(x.has_block_data(head_block.number() - block_data_limit));
assert!(x.has_block_data(head_block.number())); assert!(x.has_block_data(head_block.number()));
assert!(!x.has_block_data(&(head_block.number() + 1))); assert!(!x.has_block_data(head_block.number() + 1));
assert!(!x.has_block_data(&(head_block.number() + 1000))); assert!(!x.has_block_data(head_block.number() + 1000));
} }
/* /*
@ -1380,11 +1378,11 @@ mod tests {
head_block: AsyncRwLock::new(Some(head_block.clone())), head_block: AsyncRwLock::new(Some(head_block.clone())),
}; };
assert!(!x.has_block_data(&0.into())); assert!(!x.has_block_data(0.into()));
assert!(!x.has_block_data(&1.into())); assert!(!x.has_block_data(1.into()));
assert!(!x.has_block_data(&head_block.number())); assert!(!x.has_block_data(head_block.number());
assert!(!x.has_block_data(&(head_block.number() + 1))); assert!(!x.has_block_data(head_block.number() + 1));
assert!(!x.has_block_data(&(head_block.number() + 1000))); assert!(!x.has_block_data(head_block.number() + 1000));
} }
*/ */
} }

View File

@ -1,8 +1,8 @@
use super::one::Web3Rpc; use super::one::Web3Rpc;
use crate::errors::{Web3ProxyErrorContext, Web3ProxyResult}; use crate::errors::{Web3ProxyErrorContext, Web3ProxyResult};
use crate::frontend::authorization::{Authorization, AuthorizationType, RequestMetadata}; use crate::frontend::authorization::{Authorization, AuthorizationType, Web3Request};
use crate::globals::{global_db_conn, DB_CONN}; use crate::globals::{global_db_conn, DB_CONN};
use crate::jsonrpc::{self, JsonRpcParams, JsonRpcResultData}; use crate::jsonrpc::{self, JsonRpcResultData};
use chrono::Utc; use chrono::Utc;
use derive_more::From; use derive_more::From;
use entities::revert_log; use entities::revert_log;
@ -28,9 +28,8 @@ pub enum OpenRequestResult {
/// Make RPC requests through this handle and drop it when you are done. /// Make RPC requests through this handle and drop it when you are done.
/// Opening this handle checks rate limits. Developers, try to keep opening a handle and using it as close together as possible /// Opening this handle checks rate limits. Developers, try to keep opening a handle and using it as close together as possible
#[derive(Debug)]
pub struct OpenRequestHandle { pub struct OpenRequestHandle {
request_metadata: Arc<RequestMetadata>, web3_request: Arc<Web3Request>,
error_handler: RequestErrorHandler, error_handler: RequestErrorHandler,
rpc: Arc<Web3Rpc>, rpc: Arc<Web3Rpc>,
} }
@ -63,6 +62,15 @@ struct EthCallFirstParams {
data: Option<Bytes>, data: Option<Bytes>,
} }
impl std::fmt::Debug for OpenRequestHandle {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("OpenRequestHandle")
.field("method", &self.web3_request.request.method())
.field("rpc", &self.rpc.name)
.finish_non_exhaustive()
}
}
impl From<Level> for RequestErrorHandler { impl From<Level> for RequestErrorHandler {
fn from(level: Level) -> Self { fn from(level: Level) -> Self {
match level { match level {
@ -90,7 +98,7 @@ impl Authorization {
} }
}; };
let db_conn = global_db_conn().await?; let db_conn = global_db_conn()?;
// TODO: should the database set the timestamp? // TODO: should the database set the timestamp?
// we intentionally use "now" and not the time the request started // we intentionally use "now" and not the time the request started
@ -133,7 +141,7 @@ impl Drop for OpenRequestHandle {
impl OpenRequestHandle { impl OpenRequestHandle {
pub async fn new( pub async fn new(
request_metadata: Arc<RequestMetadata>, web3_request: Arc<Web3Request>,
rpc: Arc<Web3Rpc>, rpc: Arc<Web3Rpc>,
error_handler: Option<RequestErrorHandler>, error_handler: Option<RequestErrorHandler>,
) -> Self { ) -> Self {
@ -146,7 +154,7 @@ impl OpenRequestHandle {
let error_handler = error_handler.unwrap_or_default(); let error_handler = error_handler.unwrap_or_default();
Self { Self {
request_metadata, web3_request,
error_handler, error_handler,
rpc, rpc,
} }
@ -165,17 +173,18 @@ impl OpenRequestHandle {
/// By having the request method here, we ensure that the rate limiter was called and connection counts were properly incremented /// By having the request method here, we ensure that the rate limiter was called and connection counts were properly incremented
/// depending on how things are locked, you might need to pass the provider in /// depending on how things are locked, you might need to pass the provider in
/// we take self to ensure this function only runs once /// we take self to ensure this function only runs once
pub async fn request<P: JsonRpcParams, R: JsonRpcResultData + serde::Serialize>( pub async fn request<R: JsonRpcResultData + serde::Serialize>(
self, self,
method: &str,
params: &P,
) -> Result<jsonrpc::SingleResponse<R>, ProviderError> { ) -> Result<jsonrpc::SingleResponse<R>, ProviderError> {
// TODO: use tracing spans // TODO: use tracing spans
// TODO: including params in this log is way too verbose // TODO: including params in this log is way too verbose
// trace!(rpc=%self.rpc, %method, "request"); // trace!(rpc=%self.rpc, %method, "request");
trace!("requesting from {}", self.rpc); trace!("requesting from {}", self.rpc);
let authorization = &self.request_metadata.authorization; let method = self.web3_request.request.method();
let params = self.web3_request.request.params();
let authorization = &self.web3_request.authorization;
match &authorization.authorization_type { match &authorization.authorization_type {
AuthorizationType::Frontend => { AuthorizationType::Frontend => {
@ -204,8 +213,7 @@ impl OpenRequestHandle {
{ {
let params: serde_json::Value = serde_json::to_value(params)?; let params: serde_json::Value = serde_json::to_value(params)?;
let request = jsonrpc::JsonRpcRequest::new( let request = jsonrpc::JsonRpcRequest::new(
// TODO: proper id self.web3_request.id().into(),
jsonrpc::JsonRpcId::Number(1),
method.to_string(), method.to_string(),
params, params,
) )
@ -216,7 +224,7 @@ impl OpenRequestHandle {
jsonrpc::SingleResponse::read_if_short( jsonrpc::SingleResponse::read_if_short(
response, response,
1024, 1024,
self.request_metadata.clone(), self.web3_request.clone(),
) )
.await .await
} }
@ -226,7 +234,9 @@ impl OpenRequestHandle {
p.request(method, params) p.request(method, params)
.await .await
// TODO: Id here // TODO: Id here
.map(|result| jsonrpc::ParsedResponse::from_result(result, None).into()) .map(|result| {
jsonrpc::ParsedResponse::from_result(result, Default::default()).into()
})
} else { } else {
return Err(ProviderError::CustomError( return Err(ProviderError::CustomError(
"no provider configured!".to_string(), "no provider configured!".to_string(),
@ -255,9 +265,9 @@ impl OpenRequestHandle {
if !["eth_call", "eth_estimateGas"].contains(&method) { if !["eth_call", "eth_estimateGas"].contains(&method) {
// trace!(%method, "skipping save on revert"); // trace!(%method, "skipping save on revert");
RequestErrorHandler::TraceLevel RequestErrorHandler::TraceLevel
} else if DB_CONN.read().await.is_ok() { } else if DB_CONN.read().is_ok() {
let log_revert_chance = let log_revert_chance =
self.request_metadata.authorization.checks.log_revert_chance; self.web3_request.authorization.checks.log_revert_chance;
if log_revert_chance == 0 { if log_revert_chance == 0 {
// trace!(%method, "no chance. skipping save on revert"); // trace!(%method, "no chance. skipping save on revert");
@ -435,6 +445,8 @@ impl OpenRequestHandle {
tokio::spawn(async move { tokio::spawn(async move {
self.rpc.peak_latency.as_ref().unwrap().report(latency); self.rpc.peak_latency.as_ref().unwrap().report(latency);
self.rpc.median_latency.as_ref().unwrap().record(latency); self.rpc.median_latency.as_ref().unwrap().record(latency);
// TODO: app median latency
}); });
response response

63
web3_proxy/src/secrets.rs Normal file
View File

@ -0,0 +1,63 @@
use serde::{Deserialize, Serialize};
use std::fmt;
use ulid::Ulid;
use uuid::Uuid;
/// This lets us use UUID and ULID while we transition to only ULIDs
/// TODO: custom deserialize that can also go from String to Ulid
#[derive(Copy, Clone, Deserialize)]
pub enum RpcSecretKey {
Ulid(Ulid),
Uuid(Uuid),
}
impl RpcSecretKey {
pub fn new() -> Self {
Ulid::new().into()
}
pub fn as_128(&self) -> u128 {
match self {
Self::Ulid(x) => x.0,
Self::Uuid(x) => x.as_u128(),
}
}
}
impl PartialEq for RpcSecretKey {
fn eq(&self, other: &Self) -> bool {
self.as_128() == other.as_128()
}
}
impl Eq for RpcSecretKey {}
impl fmt::Debug for RpcSecretKey {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Ulid(x) => fmt::Debug::fmt(x, f),
Self::Uuid(x) => {
let x = Ulid::from(x.as_u128());
fmt::Debug::fmt(&x, f)
}
}
}
}
/// always serialize as a ULID.
impl Serialize for RpcSecretKey {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
match self {
Self::Ulid(x) => x.serialize(serializer),
Self::Uuid(x) => {
let x: Ulid = x.to_owned().into();
x.serialize(serializer)
}
}
}
}

View File

@ -62,8 +62,8 @@ pub async fn query_user_stats<'a>(
params: &'a HashMap<String, String>, params: &'a HashMap<String, String>,
stat_response_type: StatType, stat_response_type: StatType,
) -> Web3ProxyResponse { ) -> Web3ProxyResponse {
let db_conn = global_db_conn().await?; let db_conn = global_db_conn()?;
let db_replica = global_db_replica_conn().await?; let db_replica = global_db_replica_conn()?;
let mut redis_conn = app.redis_conn().await?; let mut redis_conn = app.redis_conn().await?;
// get the user id first. if it is 0, we should use a cache on the app // get the user id first. if it is 0, we should use a cache on the app

View File

@ -48,7 +48,7 @@ pub async fn query_user_influx_stats<'a>(
)); ));
} }
let db_replica = global_db_replica_conn().await?; let db_replica = global_db_replica_conn()?;
// Read the (optional) user-id from the request, this is the logic for subusers // Read the (optional) user-id from the request, this is the logic for subusers
// If there is no bearer token, this is not allowed // If there is no bearer token, this is not allowed

View File

@ -9,11 +9,11 @@ use self::stat_buffer::BufferedRpcQueryStats;
use crate::caches::{RpcSecretKeyCache, UserBalanceCache}; use crate::caches::{RpcSecretKeyCache, UserBalanceCache};
use crate::compute_units::ComputeUnit; use crate::compute_units::ComputeUnit;
use crate::errors::{Web3ProxyError, Web3ProxyResult}; use crate::errors::{Web3ProxyError, Web3ProxyResult};
use crate::frontend::authorization::{Authorization, RequestMetadata}; use crate::frontend::authorization::{Authorization, AuthorizationType, Web3Request};
use crate::rpcs::one::Web3Rpc; use crate::rpcs::one::Web3Rpc;
use anyhow::{anyhow, Context}; use anyhow::{anyhow, Context};
use chrono::{DateTime, Months, TimeZone, Utc}; use chrono::{DateTime, Months, TimeZone, Utc};
use derive_more::From; use derive_more::{AddAssign, From};
use entities::{referee, referrer, rpc_accounting_v2}; use entities::{referee, referrer, rpc_accounting_v2};
use influxdb2::models::DataPoint; use influxdb2::models::DataPoint;
use migration::sea_orm::prelude::Decimal; use migration::sea_orm::prelude::Decimal;
@ -25,7 +25,6 @@ use migration::{Expr, LockType, OnConflict};
use num_traits::ToPrimitive; use num_traits::ToPrimitive;
use parking_lot::Mutex; use parking_lot::Mutex;
use std::borrow::Cow; use std::borrow::Cow;
use std::mem;
use std::sync::atomic::Ordering; use std::sync::atomic::Ordering;
use std::sync::Arc; use std::sync::Arc;
use tracing::{error, instrument, trace, warn}; use tracing::{error, instrument, trace, warn};
@ -40,13 +39,19 @@ pub enum StatType {
pub type BackendRequests = Mutex<Vec<Arc<Web3Rpc>>>; pub type BackendRequests = Mutex<Vec<Arc<Web3Rpc>>>;
#[derive(Copy, Clone, Debug)] #[derive(AddAssign, Copy, Clone, Debug, Default)]
pub struct FlushedStats { pub struct FlushedStats {
/// the number of rows saved to the relational database.
/// rows can contain multiple requests
pub relational: usize, pub relational: usize,
pub relational_frontend_requests: u64, pub relational_frontend_requests: u64,
pub relational_internal_requests: u64,
/// the number of data points saved to the timeseries database.
/// data points can contain multiple requests
pub timeseries: usize, pub timeseries: usize,
/// the number of global frontend requests saved to influx /// the number of global frontend requests saved to the time series database
pub timeseries_frontend_requests: u64, pub timeseries_frontend_requests: u64,
pub timeseries_internal_requests: u64,
} }
/// TODO: better name? RpcQueryStatBuilder? /// TODO: better name? RpcQueryStatBuilder?
@ -69,12 +74,11 @@ pub struct RpcQueryStats {
pub compute_unit_cost: Decimal, pub compute_unit_cost: Decimal,
/// If the request is invalid or received a jsonrpc error response (excluding reverts) /// If the request is invalid or received a jsonrpc error response (excluding reverts)
pub user_error_response: bool, pub user_error_response: bool,
/// If premium was active at the start of the request
pub paid_credits_used: bool,
} }
#[derive(Clone, Debug, From, Hash, PartialEq, Eq)] #[derive(Clone, Debug, From, Hash, PartialEq, Eq)]
pub struct RpcQueryKey { pub struct RpcQueryKey {
pub authorization_type: AuthorizationType,
/// unix epoch time in seconds. /// unix epoch time in seconds.
/// for the time series db, this is (close to) the time that the response was sent. /// for the time series db, this is (close to) the time that the response was sent.
/// for the account database, this is rounded to the week. /// for the account database, this is rounded to the week.
@ -90,6 +94,7 @@ pub struct RpcQueryKey {
/// 0 if the public url was used. /// 0 if the public url was used.
rpc_secret_key_id: u64, rpc_secret_key_id: u64,
/// 0 if the public url was used. /// 0 if the public url was used.
/// TODO: u64::MAX if the internal? or have a migration make a user for us? or keep 0 and we track that another way?
rpc_key_user_id: u64, rpc_key_user_id: u64,
} }
@ -130,6 +135,7 @@ impl RpcQueryStats {
// Depending on method, add some arithmetic around calculating credits_used // Depending on method, add some arithmetic around calculating credits_used
// I think balance should not go here, this looks more like a key thingy // I think balance should not go here, this looks more like a key thingy
RpcQueryKey { RpcQueryKey {
authorization_type: self.authorization.authorization_type,
response_timestamp, response_timestamp,
archive_needed: self.archive_request, archive_needed: self.archive_request,
error_response: self.error_response, error_response: self.error_response,
@ -151,6 +157,7 @@ impl RpcQueryStats {
let rpc_key_user_id = 0; let rpc_key_user_id = 0;
RpcQueryKey { RpcQueryKey {
authorization_type: self.authorization.authorization_type,
response_timestamp: self.response_timestamp, response_timestamp: self.response_timestamp,
archive_needed: self.archive_request, archive_needed: self.archive_request,
error_response: self.error_response, error_response: self.error_response,
@ -177,6 +184,7 @@ impl RpcQueryStats {
let method = self.method.clone(); let method = self.method.clone();
let key = RpcQueryKey { let key = RpcQueryKey {
authorization_type: self.authorization.authorization_type,
response_timestamp: self.response_timestamp, response_timestamp: self.response_timestamp,
archive_needed: self.archive_request, archive_needed: self.archive_request,
error_response: self.error_response, error_response: self.error_response,
@ -194,7 +202,7 @@ impl RpcQueryStats {
/// For now there is just one, but I think there might be others later /// For now there is just one, but I think there might be others later
#[derive(Debug, From)] #[derive(Debug, From)]
pub enum AppStat { pub enum AppStat {
RpcQuery(RequestMetadata), RpcQuery(Web3Request),
} }
// TODO: move to stat_buffer.rs? // TODO: move to stat_buffer.rs?
@ -538,7 +546,7 @@ impl BufferedRpcQueryStats {
/// We want this to run when there is **one and only one** copy of this RequestMetadata left /// We want this to run when there is **one and only one** copy of this RequestMetadata left
/// There are often multiple copies if a request is being sent to multiple servers in parallel /// There are often multiple copies if a request is being sent to multiple servers in parallel
impl RpcQueryStats { impl RpcQueryStats {
fn try_from_metadata(mut metadata: RequestMetadata) -> Web3ProxyResult<Self> { fn try_from_metadata(metadata: Web3Request) -> Web3ProxyResult<Self> {
// TODO: do this without a clone // TODO: do this without a clone
let authorization = metadata.authorization.clone(); let authorization = metadata.authorization.clone();
@ -547,7 +555,7 @@ impl RpcQueryStats {
// TODO: do this without cloning. we can take their vec // TODO: do this without cloning. we can take their vec
let backend_rpcs_used = metadata.backend_rpcs_used(); let backend_rpcs_used = metadata.backend_rpcs_used();
let request_bytes = metadata.request_bytes as u64; let request_bytes = metadata.request.num_bytes() as u64;
let response_bytes = metadata.response_bytes.load(Ordering::Relaxed); let response_bytes = metadata.response_bytes.load(Ordering::Relaxed);
let mut error_response = metadata.error_response.load(Ordering::Relaxed); let mut error_response = metadata.error_response.load(Ordering::Relaxed);
@ -579,7 +587,7 @@ impl RpcQueryStats {
x => x, x => x,
}; };
let cu = ComputeUnit::new(&metadata.method, metadata.chain_id, response_bytes); let cu = ComputeUnit::new(metadata.request.method(), metadata.chain_id, response_bytes);
let cache_hit = backend_rpcs_used.is_empty(); let cache_hit = backend_rpcs_used.is_empty();
@ -590,9 +598,7 @@ impl RpcQueryStats {
&metadata.usd_per_cu, &metadata.usd_per_cu,
); );
let method = mem::take(&mut metadata.method); let method = metadata.request.method().to_string().into();
let paid_credits_used = authorization.checks.paid_credits_used;
let x = Self { let x = Self {
archive_request, archive_request,
@ -602,7 +608,6 @@ impl RpcQueryStats {
compute_unit_cost, compute_unit_cost,
error_response, error_response,
method, method,
paid_credits_used,
request_bytes, request_bytes,
response_bytes, response_bytes,
response_millis, response_millis,

View File

@ -2,7 +2,7 @@ use super::{AppStat, FlushedStats, RpcQueryKey};
use crate::app::Web3ProxyJoinHandle; use crate::app::Web3ProxyJoinHandle;
use crate::caches::{RpcSecretKeyCache, UserBalanceCache}; use crate::caches::{RpcSecretKeyCache, UserBalanceCache};
use crate::errors::Web3ProxyResult; use crate::errors::Web3ProxyResult;
use crate::frontend::authorization::RequestMetadata; use crate::frontend::authorization::{AuthorizationType, Web3Request};
use crate::globals::global_db_conn; use crate::globals::global_db_conn;
use crate::stats::RpcQueryStats; use crate::stats::RpcQueryStats;
use derive_more::From; use derive_more::From;
@ -136,9 +136,12 @@ impl StatBuffer {
let mut db_save_interval = let mut db_save_interval =
interval(Duration::from_secs(self.db_save_interval_seconds as u64)); interval(Duration::from_secs(self.db_save_interval_seconds as u64));
// TODO: this should be a FlushedStats that we add to
let mut total_frontend_requests = 0; let mut total_frontend_requests = 0;
let mut tsdb_frontend_requests = 0; let mut tsdb_frontend_requests = 0;
let mut tsdb_internal_requests = 0;
let mut db_frontend_requests = 0; let mut db_frontend_requests = 0;
let mut db_internal_requests = 0;
loop { loop {
select! { select! {
@ -154,17 +157,19 @@ impl StatBuffer {
_ = db_save_interval.tick() => { _ = db_save_interval.tick() => {
// TODO: tokio spawn this! (but with a semaphore on db_save_interval) // TODO: tokio spawn this! (but with a semaphore on db_save_interval)
trace!("DB save internal tick"); trace!("DB save internal tick");
let (count, new_frontend_requests) = self.save_relational_stats().await; let (count, new_frontend_requests, new_internal_requests) = self.save_relational_stats().await;
if count > 0 { if count > 0 {
db_frontend_requests += new_frontend_requests; db_frontend_requests += new_frontend_requests;
db_internal_requests += new_internal_requests;
debug!("Saved {} stats for {} requests to the relational db", count, new_frontend_requests); debug!("Saved {} stats for {} requests to the relational db", count, new_frontend_requests);
} }
} }
_ = tsdb_save_interval.tick() => { _ = tsdb_save_interval.tick() => {
trace!("TSDB save internal tick"); trace!("TSDB save internal tick");
let (count, new_frontend_requests) = self.save_tsdb_stats().await; let (count, new_frontend_requests, new_internal_requests) = self.save_tsdb_stats().await;
if count > 0 { if count > 0 {
tsdb_frontend_requests += new_frontend_requests; tsdb_frontend_requests += new_frontend_requests;
tsdb_internal_requests += new_internal_requests;
debug!("Saved {} stats for {} requests to the tsdb @ {}/{}", count, new_frontend_requests, self.tsdb_window, self.num_tsdb_windows); debug!("Saved {} stats for {} requests to the tsdb @ {}/{}", count, new_frontend_requests, self.tsdb_window, self.num_tsdb_windows);
} }
} }
@ -174,7 +179,10 @@ impl StatBuffer {
let flushed_stats = self._flush(&mut stat_receiver).await?; let flushed_stats = self._flush(&mut stat_receiver).await?;
tsdb_frontend_requests += flushed_stats.timeseries_frontend_requests; tsdb_frontend_requests += flushed_stats.timeseries_frontend_requests;
tsdb_internal_requests += flushed_stats.timeseries_internal_requests;
db_frontend_requests += flushed_stats.relational_frontend_requests; db_frontend_requests += flushed_stats.relational_frontend_requests;
db_internal_requests += flushed_stats.relational_internal_requests;
if let Err(err) = x.send(flushed_stats) { if let Err(err) = x.send(flushed_stats) {
error!(?flushed_stats, ?err, "unable to notify about flushed stats"); error!(?flushed_stats, ?err, "unable to notify about flushed stats");
@ -218,34 +226,32 @@ impl StatBuffer {
let flushed_stats = self._flush(&mut stat_receiver).await?; let flushed_stats = self._flush(&mut stat_receiver).await?;
tsdb_frontend_requests += flushed_stats.timeseries_frontend_requests; tsdb_frontend_requests += flushed_stats.timeseries_frontend_requests;
db_frontend_requests += flushed_stats.relational_frontend_requests; tsdb_internal_requests += flushed_stats.timeseries_internal_requests;
// TODO: if these totals don't match, something is wrong! db_frontend_requests += flushed_stats.relational_frontend_requests;
info!(%total_frontend_requests, %tsdb_frontend_requests, %db_frontend_requests, "accounting and stat save loop complete"); db_internal_requests += flushed_stats.relational_internal_requests;
// TODO: if these totals don't match, something is wrong! log something or maybe even return an error
info!(%total_frontend_requests, %tsdb_frontend_requests, %tsdb_internal_requests, %db_frontend_requests, %db_internal_requests, "accounting and stat save loop complete");
Ok(()) Ok(())
} }
async fn _buffer_app_stat(&mut self, stat: AppStat) -> Web3ProxyResult<u64> { async fn _buffer_app_stat(&mut self, stat: AppStat) -> Web3ProxyResult<u64> {
match stat { match stat {
AppStat::RpcQuery(request_metadata) => { AppStat::RpcQuery(web3_request) => self._buffer_web3_request(web3_request).await,
self._buffer_request_metadata(request_metadata).await
}
} }
} }
async fn _buffer_request_metadata( async fn _buffer_web3_request(&mut self, web3_request: Web3Request) -> Web3ProxyResult<u64> {
&mut self,
request_metadata: RequestMetadata,
) -> Web3ProxyResult<u64> {
// we convert on this side of the channel so that we don't slow down the request // we convert on this side of the channel so that we don't slow down the request
let stat = RpcQueryStats::try_from_metadata(request_metadata)?; let stat = RpcQueryStats::try_from_metadata(web3_request)?;
// update the latest balance // update the latest balance
// do this BEFORE emitting any stats // do this BEFORE emitting any stats
let mut approximate_balance_remaining = 0.into(); let mut approximate_balance_remaining = 0.into();
let mut active_premium = false; let mut active_premium = false;
if let Ok(db_conn) = global_db_conn().await { if let Ok(db_conn) = global_db_conn() {
let user_id = stat.authorization.checks.user_id; let user_id = stat.authorization.checks.user_id;
// update the user's balance // update the user's balance
@ -359,15 +365,19 @@ impl StatBuffer {
// flush the buffers // flush the buffers
// TODO: include frontend counts here // TODO: include frontend counts here
let (tsdb_count, tsdb_frontend_requests) = self.save_tsdb_stats().await; let (timeseries_count, timeseries_frontend_requests, timeseries_internal_requests) =
let (relational_count, relational_frontend_requests) = self.save_relational_stats().await; self.save_tsdb_stats().await;
let (relational_count, relational_frontend_requests, relational_internal_requests) =
self.save_relational_stats().await;
// notify // notify
let flushed_stats = FlushedStats { let flushed_stats = FlushedStats {
timeseries: tsdb_count, timeseries: timeseries_count,
timeseries_frontend_requests: tsdb_frontend_requests, timeseries_frontend_requests,
timeseries_internal_requests,
relational: relational_count, relational: relational_count,
relational_frontend_requests, relational_frontend_requests,
relational_internal_requests,
}; };
trace!(?flushed_stats); trace!(?flushed_stats);
@ -375,14 +385,16 @@ impl StatBuffer {
Ok(flushed_stats) Ok(flushed_stats)
} }
async fn save_relational_stats(&mut self) -> (usize, u64) { async fn save_relational_stats(&mut self) -> (usize, u64, u64) {
let mut count = 0; let mut count = 0;
let mut frontend_requests = 0; let mut frontend_requests = 0;
let mut internal_requests = 0;
if let Ok(db_conn) = global_db_conn().await { if let Ok(db_conn) = global_db_conn() {
count = self.accounting_db_buffer.len(); count = self.accounting_db_buffer.len();
for (key, stat) in self.accounting_db_buffer.drain() { for (key, stat) in self.accounting_db_buffer.drain() {
let new_frontend_requests = stat.frontend_requests; let new_frontend_requests = stat.frontend_requests;
let is_internal = matches!(key.authorization_type, AuthorizationType::Internal);
// TODO: batch saves // TODO: batch saves
// TODO: i don't like passing key (which came from the stat) to the function on the stat. but it works for now // TODO: i don't like passing key (which came from the stat) to the function on the stat. but it works for now
@ -397,20 +409,24 @@ impl StatBuffer {
.await .await
{ {
// TODO: save the stat and retry later! // TODO: save the stat and retry later!
error!(?err, %count, %new_frontend_requests, "unable to save accounting entry!"); error!(?err, %count, %new_frontend_requests, %is_internal, "unable to save accounting entry!");
} else if is_internal {
internal_requests += new_frontend_requests;
} else { } else {
frontend_requests += new_frontend_requests; frontend_requests += new_frontend_requests;
}; };
} }
} }
(count, frontend_requests) (count, frontend_requests, internal_requests)
} }
// TODO: bucket should be an enum so that we don't risk typos // TODO: bucket should be an enum so that we don't risk typos
async fn save_tsdb_stats(&mut self) -> (usize, u64) { // TODO: return type should be a struct so we dont mix up the values
async fn save_tsdb_stats(&mut self) -> (usize, u64, u64) {
let mut count = 0; let mut count = 0;
let mut frontend_requests = 0; let mut frontend_requests = 0;
let mut internal_requests = 0;
if let Some(influxdb_client) = self.influxdb_client.as_ref() { if let Some(influxdb_client) = self.influxdb_client.as_ref() {
// every time we save, we increment the tsdb_window. this is used to ensure that stats don't overwrite others because the keys match // every time we save, we increment the tsdb_window. this is used to ensure that stats don't overwrite others because the keys match
@ -434,6 +450,7 @@ impl StatBuffer {
for (key, stat) in self.global_timeseries_buffer.drain() { for (key, stat) in self.global_timeseries_buffer.drain() {
// TODO: i don't like passing key (which came from the stat) to the function on the stat. but it works for now // TODO: i don't like passing key (which came from the stat) to the function on the stat. but it works for now
let new_frontend_requests = stat.frontend_requests; let new_frontend_requests = stat.frontend_requests;
let is_internal = matches!(key.authorization_type, AuthorizationType::Internal);
match stat match stat
.build_timeseries_point("global_proxy", self.chain_id, key, uniq) .build_timeseries_point("global_proxy", self.chain_id, key, uniq)
@ -441,11 +458,16 @@ impl StatBuffer {
{ {
Ok(point) => { Ok(point) => {
points.push(point); points.push(point);
frontend_requests += new_frontend_requests;
if is_internal {
internal_requests += new_frontend_requests;
} else {
frontend_requests += new_frontend_requests;
};
} }
Err(err) => { Err(err) => {
// TODO: what can cause this? // TODO: what can cause this?
error!(?err, "unable to build global stat!"); error!(?err, %new_frontend_requests, % is_internal, "unable to build global stat!");
} }
}; };
} }
@ -497,6 +519,6 @@ impl StatBuffer {
} }
} }
(count, frontend_requests) (count, frontend_requests, internal_requests)
} }
} }

View File

@ -1,6 +1,6 @@
[package] [package]
name = "web3_proxy_cli" name = "web3_proxy_cli"
version = "1.43.8" version = "1.43.10"
edition = "2021" edition = "2021"
default-run = "web3_proxy_cli" default-run = "web3_proxy_cli"

View File

@ -1,4 +1,3 @@
use web3_proxy::frontend::authorization::RpcSecretKey;
use web3_proxy::prelude::anyhow::{self, Context}; use web3_proxy::prelude::anyhow::{self, Context};
use web3_proxy::prelude::argh::{self, FromArgs}; use web3_proxy::prelude::argh::{self, FromArgs};
use web3_proxy::prelude::entities::{rpc_key, user, user_tier}; use web3_proxy::prelude::entities::{rpc_key, user, user_tier};
@ -9,6 +8,7 @@ use web3_proxy::prelude::migration::sea_orm::{
use web3_proxy::prelude::serde_json::json; use web3_proxy::prelude::serde_json::json;
use web3_proxy::prelude::tracing::{debug, info}; use web3_proxy::prelude::tracing::{debug, info};
use web3_proxy::prelude::uuid::Uuid; use web3_proxy::prelude::uuid::Uuid;
use web3_proxy::secrets::RpcSecretKey;
/// change a user's tier. /// change a user's tier.
#[derive(FromArgs, PartialEq, Eq, Debug)] #[derive(FromArgs, PartialEq, Eq, Debug)]

View File

@ -1,4 +1,3 @@
use web3_proxy::frontend::authorization::RpcSecretKey;
use web3_proxy::prelude::anyhow::{self, Context}; use web3_proxy::prelude::anyhow::{self, Context};
use web3_proxy::prelude::argh::{self, FromArgs}; use web3_proxy::prelude::argh::{self, FromArgs};
use web3_proxy::prelude::entities::{rpc_key, user}; use web3_proxy::prelude::entities::{rpc_key, user};
@ -9,6 +8,7 @@ use web3_proxy::prelude::migration::sea_orm::{
use web3_proxy::prelude::tracing::info; use web3_proxy::prelude::tracing::info;
use web3_proxy::prelude::ulid::Ulid; use web3_proxy::prelude::ulid::Ulid;
use web3_proxy::prelude::uuid::Uuid; use web3_proxy::prelude::uuid::Uuid;
use web3_proxy::secrets::RpcSecretKey;
#[derive(FromArgs, PartialEq, Debug, Eq)] #[derive(FromArgs, PartialEq, Debug, Eq)]
/// Create a new user and api key /// Create a new user and api key

View File

@ -1,5 +1,4 @@
use tracing::info; use tracing::info;
use web3_proxy::frontend::authorization::RpcSecretKey;
use web3_proxy::prelude::anyhow::{self, Context}; use web3_proxy::prelude::anyhow::{self, Context};
use web3_proxy::prelude::argh::{self, FromArgs}; use web3_proxy::prelude::argh::{self, FromArgs};
use web3_proxy::prelude::entities::{rpc_key, user}; use web3_proxy::prelude::entities::{rpc_key, user};
@ -7,6 +6,7 @@ use web3_proxy::prelude::ethers::prelude::Address;
use web3_proxy::prelude::migration::sea_orm::{self, ActiveModelTrait, TransactionTrait}; use web3_proxy::prelude::migration::sea_orm::{self, ActiveModelTrait, TransactionTrait};
use web3_proxy::prelude::ulid::Ulid; use web3_proxy::prelude::ulid::Ulid;
use web3_proxy::prelude::uuid::Uuid; use web3_proxy::prelude::uuid::Uuid;
use web3_proxy::secrets::RpcSecretKey;
#[derive(FromArgs, PartialEq, Debug, Eq)] #[derive(FromArgs, PartialEq, Debug, Eq)]
/// Create a new user and api key /// Create a new user and api key

View File

@ -1,9 +1,10 @@
use std::num::NonZeroU64; use std::num::NonZeroU64;
use std::sync::Arc; use std::sync::Arc;
use std::time::Duration;
use tracing::{error, info}; use tracing::{error, info};
use web3_proxy::app::BILLING_PERIOD_SECONDS; use web3_proxy::app::BILLING_PERIOD_SECONDS;
use web3_proxy::config::TopConfig; use web3_proxy::config::TopConfig;
use web3_proxy::frontend::authorization::{Authorization, RequestMetadata, RpcSecretKey}; use web3_proxy::frontend::authorization::{Authorization, RequestOrMethod, Web3Request};
use web3_proxy::prelude::anyhow::{self, Context}; use web3_proxy::prelude::anyhow::{self, Context};
use web3_proxy::prelude::argh::{self, FromArgs}; use web3_proxy::prelude::argh::{self, FromArgs};
use web3_proxy::prelude::chrono; use web3_proxy::prelude::chrono;
@ -20,8 +21,8 @@ use web3_proxy::prelude::moka::future::Cache;
use web3_proxy::prelude::parking_lot::Mutex; use web3_proxy::prelude::parking_lot::Mutex;
use web3_proxy::prelude::tokio::sync::{broadcast, mpsc}; use web3_proxy::prelude::tokio::sync::{broadcast, mpsc};
use web3_proxy::prelude::tokio::time::Instant; use web3_proxy::prelude::tokio::time::Instant;
use web3_proxy::prelude::ulid::Ulid;
use web3_proxy::rpcs::one::Web3Rpc; use web3_proxy::rpcs::one::Web3Rpc;
use web3_proxy::secrets::RpcSecretKey;
use web3_proxy::stats::StatBuffer; use web3_proxy::stats::StatBuffer;
#[derive(FromArgs, PartialEq, Eq, Debug)] #[derive(FromArgs, PartialEq, Eq, Debug)]
@ -180,42 +181,43 @@ impl MigrateStatsToV2SubCommand {
.map(|_| Arc::new(Web3Rpc::default())) .map(|_| Arc::new(Web3Rpc::default()))
.collect(); .collect();
let request_ulid = Ulid::new();
let chain_id = x.chain_id; let chain_id = x.chain_id;
// Create RequestMetadata let method = x
let request_metadata = RequestMetadata { .method
.clone()
.unwrap_or_else(|| "unknown".to_string())
.into();
let request = RequestOrMethod::Method(method, int_request_bytes as usize);
// Create Web3Request
let web3_request = Web3Request {
archive_request: x.archive_request.into(), archive_request: x.archive_request.into(),
authorization: authorization.clone(), authorization: authorization.clone(),
backend_requests: Mutex::new(backend_rpcs), backend_requests: Mutex::new(backend_rpcs),
chain_id, chain_id,
error_response: x.error_response.into(), error_response: x.error_response.into(),
head_block: None,
// debug data is in kafka, not mysql or influx // debug data is in kafka, not mysql or influx
kafka_debug_logger: None, kafka_debug_logger: None,
method: x request,
.method
.clone()
.unwrap_or_else(|| "unknown".to_string())
.into(),
// This is not relevant in the new version // This is not relevant in the new version
no_servers: 0.into(), no_servers: 0.into(),
// Get the mean of all the request bytes
request_bytes: int_request_bytes as usize,
response_bytes: int_response_bytes.into(), response_bytes: int_response_bytes.into(),
// We did not initially record this data // We did not initially record this data
response_from_backup_rpc: false.into(), response_from_backup_rpc: false.into(),
response_timestamp: x.period_datetime.timestamp().into(), response_timestamp: x.period_datetime.timestamp().into(),
response_millis: int_response_millis.into(), response_millis: int_response_millis.into(),
// This is overwritten later on
start_instant: Instant::now(),
stat_sender: Some(stat_sender.clone()), stat_sender: Some(stat_sender.clone()),
request_ulid,
user_error_response: false.into(), user_error_response: false.into(),
usd_per_cu: top_config.app.usd_per_cu.unwrap_or_default(), usd_per_cu: top_config.app.usd_per_cu.unwrap_or_default(),
cache_mode: Default::default(),
start_instant: Instant::now(),
expire_instant: Instant::now() + Duration::from_secs(1),
}; };
request_metadata.try_send_stat()?; web3_request.try_send_stat()?;
} }
} }

View File

@ -107,28 +107,32 @@ impl ProxydSubCommand {
thread::spawn(move || loop { thread::spawn(move || loop {
match fs::read_to_string(&top_config_path) { match fs::read_to_string(&top_config_path) {
Ok(new_top_config) => match toml::from_str::<TopConfig>(&new_top_config) { Ok(new_top_config) => {
Ok(mut new_top_config) => { match toml::from_str::<TopConfig>(&new_top_config) {
new_top_config.clean(); Ok(mut new_top_config) => {
new_top_config.clean();
if new_top_config != current_config { if new_top_config != current_config {
trace!("current_config: {:#?}", current_config); trace!("current_config: {:#?}", current_config);
trace!("new_top_config: {:#?}", new_top_config); trace!("new_top_config: {:#?}", new_top_config);
// TODO: print the differences // TODO: print the differences
// TODO: first run seems to always see differences. why? // TODO: first run seems to always see differences. why?
info!("config @ {:?} changed", top_config_path); info!("config @ {:?} changed", top_config_path);
match config_sender.send(new_top_config.clone()) { match config_sender.send(new_top_config.clone()) {
Ok(()) => current_config = new_top_config, Ok(()) => current_config = new_top_config,
Err(err) => error!(?err, "unable to apply new config"), Err(err) => {
error!(?err, "unable to apply new config")
}
}
} }
} }
Err(err) => {
// TODO: panic?
error!("Unable to parse config! {:#?}", err);
}
} }
Err(err) => { }
// TODO: panic?
error!("Unable to parse config! {:#?}", err);
}
},
Err(err) => { Err(err) => {
// TODO: panic? // TODO: panic?
error!("Unable to read config! {:#?}", err); error!("Unable to read config! {:#?}", err);
@ -299,7 +303,7 @@ impl ProxydSubCommand {
} }
// TODO: make sure this happens even if we exit with an error // TODO: make sure this happens even if we exit with an error
if let Ok(db_conn) = global_db_conn().await { if let Ok(db_conn) = global_db_conn() {
/* /*
From the sqlx docs: From the sqlx docs:

View File

@ -11,9 +11,8 @@ use web3_proxy::prelude::rdkafka::{
}; };
use web3_proxy::prelude::rmp_serde; use web3_proxy::prelude::rmp_serde;
use web3_proxy::prelude::uuid::Uuid; use web3_proxy::prelude::uuid::Uuid;
use web3_proxy::{ use web3_proxy::secrets::RpcSecretKey;
config::TopConfig, frontend::authorization::RpcSecretKey, relational_db::connect_db, use web3_proxy::{config::TopConfig, relational_db::connect_db};
};
/// Second subcommand. /// Second subcommand.
#[derive(FromArgs, PartialEq, Debug, Eq)] #[derive(FromArgs, PartialEq, Debug, Eq)]

View File

@ -1,5 +1,4 @@
use tracing::{debug, info}; use tracing::{debug, info};
use web3_proxy::frontend::authorization::RpcSecretKey;
use web3_proxy::prelude::anyhow::{self, Context}; use web3_proxy::prelude::anyhow::{self, Context};
use web3_proxy::prelude::argh::{self, FromArgs}; use web3_proxy::prelude::argh::{self, FromArgs};
use web3_proxy::prelude::entities::{rpc_key, user}; use web3_proxy::prelude::entities::{rpc_key, user};
@ -9,6 +8,7 @@ use web3_proxy::prelude::sea_orm::{
QueryFilter, QueryFilter,
}; };
use web3_proxy::prelude::uuid::Uuid; use web3_proxy::prelude::uuid::Uuid;
use web3_proxy::secrets::RpcSecretKey;
/// change a key's owner. /// change a key's owner.
#[derive(FromArgs, PartialEq, Eq, Debug)] #[derive(FromArgs, PartialEq, Eq, Debug)]

View File

@ -106,8 +106,8 @@ impl TestApp {
}, },
)]), )]),
// influxdb_client: influx.map(|x| x.client), // influxdb_client: influx.map(|x| x.client),
private_rpcs: None, private_rpcs: Default::default(),
bundler_4337_rpcs: None, bundler_4337_rpcs: Default::default(),
extra: Default::default(), extra: Default::default(),
}; };
@ -126,6 +126,7 @@ impl TestApp {
let flush_stat_buffer_sender = flush_stat_buffer_sender.clone(); let flush_stat_buffer_sender = flush_stat_buffer_sender.clone();
let shutdown_sender = shutdown_sender.clone(); let shutdown_sender = shutdown_sender.clone();
// TODO: thread isn't enough! this needs its own process for the globals to be isolated!
thread::spawn(move || { thread::spawn(move || {
let runtime = Builder::new_multi_thread() let runtime = Builder::new_multi_thread()
.enable_all() .enable_all()
@ -180,6 +181,32 @@ impl TestApp {
Ok(x) Ok(x)
} }
pub async fn flush_stats_and_wait(&self) -> anyhow::Result<FlushedStats> {
let mut x = FlushedStats::default();
loop {
// give stats time to get into the channel
// TODO: do this better
sleep(Duration::from_secs(5)).await;
// Flush all stats here
// TODO: the test should maybe pause time so that stats definitely flush from our queries.
let flush_count = self.flush_stats().await?;
x += flush_count;
if flush_count.relational_frontend_requests + flush_count.timeseries_frontend_requests
== 0
{
break;
}
info!(?flush_count);
}
Ok(x)
}
pub fn stop(&self) -> Result<usize, SendError<()>> { pub fn stop(&self) -> Result<usize, SendError<()>> {
self.shutdown_sender.send(()) self.shutdown_sender.send(())
} }

View File

@ -7,6 +7,7 @@ use web3_proxy::prelude::reqwest;
use web3_proxy::prelude::rust_decimal::{Decimal, RoundingStrategy}; use web3_proxy::prelude::rust_decimal::{Decimal, RoundingStrategy};
use web3_proxy::prelude::tokio::time::sleep; use web3_proxy::prelude::tokio::time::sleep;
use web3_proxy::rpcs::blockchain::ArcBlock; use web3_proxy::rpcs::blockchain::ArcBlock;
use web3_proxy::stats::FlushedStats;
use web3_proxy::test_utils::TestInflux; use web3_proxy::test_utils::TestInflux;
use web3_proxy::test_utils::{TestAnvil, TestMysql}; use web3_proxy::test_utils::{TestAnvil, TestMysql};
use web3_proxy_cli::test_utils::create_provider_with_rpc_key::create_provider_for_user; use web3_proxy_cli::test_utils::create_provider_with_rpc_key::create_provider_for_user;
@ -124,30 +125,12 @@ async fn test_multiple_proxies_stats_add_up() {
// Flush all stats here // Flush all stats here
// TODO: the test should maybe pause time so that stats definitely flush from our queries. // TODO: the test should maybe pause time so that stats definitely flush from our queries.
let _flush_0_count_0 = x_0.flush_stats().await.unwrap(); let mut flushed = FlushedStats::default();
let _flush_1_count_0 = x_1.flush_stats().await.unwrap();
// // the counts might actually be zero because we flushed from timers flushed += x_0.flush_stats_and_wait().await.unwrap();
// // TODO: tests should probably have the option to set flush interval to infinity for more control. flushed += x_1.flush_stats_and_wait().await.unwrap();
// info!(?flush_0_count_0);
// assert_eq!(flush_0_count_0.relational, 1);
// assert_eq!(flush_0_count_0.timeseries, 2);
// info!(?flush_1_count_0);
// assert_eq!(flush_1_count_0.relational, 1);
// assert_eq!(flush_1_count_0.timeseries, 2);
// give time for more stats to arrive info!(?flushed);
sleep(Duration::from_secs(5)).await;
// no more stats should arrive
let flush_0_count_1 = x_0.flush_stats().await.unwrap();
let flush_1_count_1 = x_1.flush_stats().await.unwrap();
info!(?flush_0_count_1);
assert_eq!(flush_0_count_1.relational, 0);
assert_eq!(flush_0_count_1.timeseries, 0);
info!(?flush_1_count_1);
assert_eq!(flush_1_count_1.relational, 0);
assert_eq!(flush_1_count_1.timeseries, 0);
// get stats now // get stats now
// todo!("Need to validate all the stat accounting now"); // todo!("Need to validate all the stat accounting now");

View File

@ -6,8 +6,9 @@ use tokio::{
}; };
use tracing::info; use tracing::info;
use web3_proxy::prelude::ethers::{ use web3_proxy::prelude::ethers::{
prelude::{Block, Transaction, TxHash, U256, U64}, prelude::{Block, Transaction, TxHash, H256, U256, U64},
providers::{Http, JsonRpcClient, Quorum, QuorumProvider, WeightedProvider}, providers::{Http, JsonRpcClient, Quorum, QuorumProvider, WeightedProvider},
types::{transaction::eip2718::TypedTransaction, Address, Bytes, Eip1559TransactionRequest},
}; };
use web3_proxy::prelude::http::StatusCode; use web3_proxy::prelude::http::StatusCode;
use web3_proxy::prelude::reqwest; use web3_proxy::prelude::reqwest;
@ -23,7 +24,7 @@ async fn it_migrates_the_db() {
let x = TestApp::spawn(&a, Some(&db), None, None).await; let x = TestApp::spawn(&a, Some(&db), None, None).await;
// we call flush stats more to be sure it works than because we expect it to save any stats // we call flush stats more to be sure it works than because we expect it to save any stats
x.flush_stats().await.unwrap(); x.flush_stats_and_wait().await.unwrap();
// drop x first to avoid spurious warnings about anvil/influx/mysql shutting down before the app // drop x first to avoid spurious warnings about anvil/influx/mysql shutting down before the app
drop(x); drop(x);
@ -104,7 +105,7 @@ async fn it_starts_and_stops() {
assert_eq!(anvil_result, proxy_result.unwrap()); assert_eq!(anvil_result, proxy_result.unwrap());
// this won't do anything since stats aren't tracked when there isn't a db // this won't do anything since stats aren't tracked when there isn't a db
let flushed = x.flush_stats().await.unwrap(); let flushed = x.flush_stats_and_wait().await.unwrap();
assert_eq!(flushed.relational, 0); assert_eq!(flushed.relational, 0);
assert_eq!(flushed.timeseries, 0); assert_eq!(flushed.timeseries, 0);
@ -116,9 +117,9 @@ async fn it_starts_and_stops() {
/// TODO: have another test that makes sure error codes match /// TODO: have another test that makes sure error codes match
#[test_log::test(tokio::test)] #[test_log::test(tokio::test)]
async fn it_matches_anvil() { async fn it_matches_anvil() {
let a = TestAnvil::spawn(31337).await; let chain_id = 31337;
// TODO: send some test transactions let a = TestAnvil::spawn(chain_id).await;
a.provider.request::<_, U64>("evm_mine", ()).await.unwrap(); a.provider.request::<_, U64>("evm_mine", ()).await.unwrap();
@ -167,12 +168,91 @@ async fn it_matches_anvil() {
let balance: U256 = quorum_provider let balance: U256 = quorum_provider
.request( .request(
"eth_getBalance", "eth_getBalance",
(block_with_tx.unwrap().author.unwrap(), "latest"), (block_with_tx.as_ref().unwrap().author.unwrap(), "latest"),
) )
.await .await
.unwrap(); .unwrap();
info!(%balance); info!(%balance);
let singleton_deploy_from: Address = "0xBb6e024b9cFFACB947A71991E386681B1Cd1477D"
.parse()
.unwrap();
let wallet = a.wallet(0);
let x = quorum_provider
.request::<_, Option<Transaction>>(
"eth_getTransactionByHash",
["0x803351deb6d745e91545a6a3e1c0ea3e9a6a02a1a4193b70edfcd2f40f71a01c"],
)
.await
.unwrap();
assert!(x.is_none());
let gas_price: U256 = quorum_provider.request("eth_gasPrice", ()).await.unwrap();
let tx = TypedTransaction::Eip1559(Eip1559TransactionRequest {
chain_id: Some(chain_id),
to: Some(singleton_deploy_from.into()),
gas: Some(21000.into()),
value: Some("24700000000000000".parse().unwrap()),
max_fee_per_gas: Some(gas_price * U256::from(2)),
..Default::default()
});
let sig = wallet.sign_transaction_sync(&tx).unwrap();
let raw_tx = tx.rlp_signed(&sig);
// fund singleton deployer
// TODO: send through the quorum provider. it should detect that its already confirmed
let fund_tx_hash: H256 = a
.provider
.request("eth_sendRawTransaction", [raw_tx])
.await
.unwrap();
info!(%fund_tx_hash);
// deploy singleton deployer
// TODO: send through the quorum provider. it should detect that its already confirmed
let deploy_tx: H256 = a.provider.request("eth_sendRawTransaction", ["0xf9016c8085174876e8008303c4d88080b90154608060405234801561001057600080fd5b50610134806100206000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c80634af63f0214602d575b600080fd5b60cf60048036036040811015604157600080fd5b810190602081018135640100000000811115605b57600080fd5b820183602082011115606c57600080fd5b80359060200191846001830284011164010000000083111715608d57600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250929550509135925060eb915050565b604080516001600160a01b039092168252519081900360200190f35b6000818351602085016000f5939250505056fea26469706673582212206b44f8a82cb6b156bfcc3dc6aadd6df4eefd204bc928a4397fd15dacf6d5320564736f6c634300060200331b83247000822470"]).await.unwrap();
assert_eq!(
deploy_tx,
"0x803351deb6d745e91545a6a3e1c0ea3e9a6a02a1a4193b70edfcd2f40f71a01c"
.parse()
.unwrap()
);
let code: Bytes = quorum_provider
.request(
"eth_getCode",
("0xce0042B868300000d44A59004Da54A005ffdcf9f", "latest"),
)
.await
.unwrap();
info!(%code);
let deploy_tx = quorum_provider
.request::<_, Option<Transaction>>(
"eth_getTransactionByHash",
["0x803351deb6d745e91545a6a3e1c0ea3e9a6a02a1a4193b70edfcd2f40f71a01c"],
)
.await
.unwrap()
.unwrap();
info!(?deploy_tx);
let head_block_num: U64 = quorum_provider
.request("eth_blockNumber", ())
.await
.unwrap();
let future_block: Option<ArcBlock> = quorum_provider
.request("eth_getBlockByNumber", (head_block_num + U64::one(), false))
.await
.unwrap();
assert!(future_block.is_none());
// todo!("lots more requests"); // todo!("lots more requests");
// todo!("compare batch requests"); // todo!("compare batch requests");

View File

@ -5,7 +5,6 @@ use tracing::{info, warn};
use web3_proxy::prelude::futures::future::try_join_all; use web3_proxy::prelude::futures::future::try_join_all;
use web3_proxy::prelude::reqwest; use web3_proxy::prelude::reqwest;
use web3_proxy::prelude::rust_decimal::{Decimal, RoundingStrategy}; use web3_proxy::prelude::rust_decimal::{Decimal, RoundingStrategy};
use web3_proxy::prelude::tokio::time::sleep;
use web3_proxy::rpcs::blockchain::ArcBlock; use web3_proxy::rpcs::blockchain::ArcBlock;
use web3_proxy_cli::test_utils::create_provider_with_rpc_key::create_provider_for_user; use web3_proxy_cli::test_utils::create_provider_with_rpc_key::create_provider_for_user;
use web3_proxy_cli::test_utils::rpc_key::user_get_first_rpc_key; use web3_proxy_cli::test_utils::rpc_key::user_get_first_rpc_key;
@ -95,25 +94,8 @@ async fn test_single_proxy_stats_add_up() {
try_join_all(handles).await.unwrap(); try_join_all(handles).await.unwrap();
// give stats time to get into the channel let flushed = x.flush_stats_and_wait().await.unwrap();
// TODO: do this better info!(?flushed);
sleep(Duration::from_secs(5)).await;
// Flush all stats here
// TODO: the test should maybe pause time so that stats definitely flush from our queries.
let flush_0_count_0 = x.flush_stats().await.unwrap();
warn!("Counts 0 are: {:?}", flush_0_count_0);
// we don't actually assert on these because its likely the intervals flushed most of the stats
// assert_eq!(flush_0_count_0.relational, 1);
// assert_eq!(flush_0_count_0.timeseries, 2);
// Wait a bit. TODO: instead of waiting. make flush stats more robust
sleep(Duration::from_secs(5)).await;
let flush_0_count_1 = x.flush_stats().await.unwrap();
warn!("Counts 0 are: {:?}", flush_0_count_1);
assert_eq!(flush_0_count_1.relational, 0);
assert_eq!(flush_0_count_1.timeseries, 0);
// get stats now // get stats now
// todo!("Need to validate all the stat accounting now"); // todo!("Need to validate all the stat accounting now");

View File

@ -4,7 +4,6 @@ use web3_proxy::balance::Balance;
use web3_proxy::prelude::ethers::prelude::U64; use web3_proxy::prelude::ethers::prelude::U64;
use web3_proxy::prelude::migration::sea_orm::prelude::Decimal; use web3_proxy::prelude::migration::sea_orm::prelude::Decimal;
use web3_proxy::prelude::reqwest; use web3_proxy::prelude::reqwest;
use web3_proxy::prelude::tokio::time::sleep;
use web3_proxy_cli::test_utils::{ use web3_proxy_cli::test_utils::{
admin_increases_balance::admin_increase_balance, admin_increases_balance::admin_increase_balance,
create_admin::create_user_as_admin, create_admin::create_user_as_admin,
@ -91,16 +90,8 @@ async fn test_sum_credits_used() {
let cached_query_cost: Decimal = query_cost * cache_multipler; let cached_query_cost: Decimal = query_cost * cache_multipler;
// flush stats // flush stats
let _ = x.flush_stats().await.unwrap(); let flushed = x.flush_stats_and_wait().await.unwrap();
// due to intervals, we can't be sure this is true. it should be <= info!(?flushed);
// assert_eq!(flushed.relational, 2, "relational");
// assert_eq!(flushed.timeseries, 1, "timeseries");
sleep(Duration::from_secs(1)).await;
let flushed = x.flush_stats().await.unwrap();
assert_eq!(flushed.relational, 0, "relational");
assert_eq!(flushed.timeseries, 0, "timeseries");
// TODO: sleep and then flush and make sure no more arrive // TODO: sleep and then flush and make sure no more arrive
@ -131,9 +122,10 @@ async fn test_sum_credits_used() {
.unwrap(); .unwrap();
// flush stats // flush stats
let flushed = x.flush_stats().await.unwrap(); let flushed = x.flush_stats_and_wait().await.unwrap();
assert_eq!(flushed.relational, 1); info!(?flushed);
assert_eq!(flushed.timeseries, 2); // assert_eq!(flushed.relational, 1);
// assert_eq!(flushed.timeseries, 2);
// check balance // check balance
let balance: Balance = user_get_balance(&x, &r, &user_login_response).await; let balance: Balance = user_get_balance(&x, &r, &user_login_response).await;
@ -168,9 +160,10 @@ async fn test_sum_credits_used() {
} }
// flush stats // flush stats
let flushed = x.flush_stats().await.unwrap(); let flushed = x.flush_stats_and_wait().await.unwrap();
assert_eq!(flushed.relational, 1); info!(?flushed);
assert_eq!(flushed.timeseries, 2); // assert_eq!(flushed.relational, 1);
// assert_eq!(flushed.timeseries, 2);
// check balance // check balance
info!("checking the final balance"); info!("checking the final balance");

View File

@ -184,7 +184,8 @@ async fn test_user_balance_decreases() {
} }
// Flush all stats here // Flush all stats here
let _ = x.flush_stats().await.unwrap(); let flushed = x.flush_stats_and_wait().await.unwrap();
info!(?flushed);
// assert_eq!(flush_count.timeseries, 0); // assert_eq!(flush_count.timeseries, 0);
// assert!(flush_count.relational > 0); // assert!(flush_count.relational > 0);
@ -222,7 +223,8 @@ async fn test_user_balance_decreases() {
} }
// Flush all stats here // Flush all stats here
let _ = x.flush_stats().await.unwrap(); let flushed = x.flush_stats_and_wait().await.unwrap();
info!(?flushed);
// assert_eq!(flush_count.timeseries, 0); // assert_eq!(flush_count.timeseries, 0);
// assert!(flush_count.relational == 1); // assert!(flush_count.relational == 1);
@ -357,7 +359,8 @@ async fn test_referral_bonus_non_concurrent() {
} }
// Flush all stats here // Flush all stats here
let _ = x.flush_stats().await.unwrap(); let flushed = x.flush_stats_and_wait().await.unwrap();
info!(?flushed);
// we can't assert because the intervals might flush for us // we can't assert because the intervals might flush for us
// assert_eq!(flush_count.timeseries, 0); // assert_eq!(flush_count.timeseries, 0);
// assert!(flush_count.relational > 0); // assert!(flush_count.relational > 0);
@ -521,7 +524,8 @@ async fn test_referral_bonus_concurrent_referrer_only() {
} }
// Flush all stats here // Flush all stats here
let _ = x.flush_stats().await.unwrap(); let flushed = x.flush_stats_and_wait().await.unwrap();
info!(?flushed);
// assert_eq!(flush_count.timeseries, 0); // assert_eq!(flush_count.timeseries, 0);
// assert!(flush_count.relational > 0); // assert!(flush_count.relational > 0);
@ -705,7 +709,8 @@ async fn test_referral_bonus_concurrent_referrer_and_user() {
} }
// Flush all stats here // Flush all stats here
let _ = x.flush_stats().await.unwrap(); let flushed = x.flush_stats_and_wait().await.unwrap();
info!(?flushed);
// assert_eq!(flush_count.timeseries, 0); // assert_eq!(flush_count.timeseries, 0);
// assert!(flush_count.relational > 0); // assert!(flush_count.relational > 0);