diff --git a/.dockerignore b/.dockerignore index b3902729..98024f48 100644 --- a/.dockerignore +++ b/.dockerignore @@ -6,5 +6,6 @@ perf.data.old /data/ /docker-compose* /Dockerfile +/Jenkinsfile /redis-cell-server/ /target diff --git a/Cargo.lock b/Cargo.lock index 80281cfa..feec357a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -98,9 +98,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.68" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cb2f989d18dd141ab8ae82f64d1a8cdd37e0840f73a406896cf5e99502fab61" +checksum = "224afbd727c3d6e4b90103ece64b8d1b67fbb1973b1046c2281eed3f3803f800" dependencies = [ "backtrace", ] @@ -148,28 +148,6 @@ dependencies = [ "term", ] -[[package]] -name = "aspect" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3927b415bba088539aaaf872d19752c7d00101a25ead1d123fcd7633f9c224d" -dependencies = [ - "aspect-weave", -] - -[[package]] -name = "aspect-weave" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea4f672ac5290272725e1453014af99a86d2c1712808d647f469bf9427519f41" -dependencies = [ - "indexmap", - "proc-macro2", - "quote", - "syn", - "synattra", -] - [[package]] name = "async-io" version = "1.12.0" @@ -223,9 +201,9 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.63" +version = "0.1.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eff18d764974428cf3a9328e23fc5c986f5fbed46e6cd4cdf42544df5d297ec1" +checksum = "1cd7fce9ba8c3c042128ce72d8b2ddbf3a05747efb67ea0313c635e10bda47a2" dependencies = [ "proc-macro2", "quote", @@ -253,12 +231,12 @@ dependencies = [ ] [[package]] -name = "atomic" -version = "0.5.1" +name = "atomic-polyfill" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b88d82667eca772c4aa12f0f1348b3ae643424c8876448f3f7bd5787032e234c" +checksum = "e3ff7eb3f316534d83a8a2c3d1674ace8a5a71198eba31e2e2b597833f699b28" dependencies = [ - "autocfg", + "critical-section", ] [[package]] @@ -272,40 +250,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "auto_enums" -version = "0.7.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe0dfe45d75158751e195799f47ea02e81f570aa24bc5ef999cdd9e888c4b5c3" -dependencies = [ - "auto_enums_core", - "auto_enums_derive", -] - -[[package]] -name = "auto_enums_core" -version = "0.7.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da47c46001293a2c4b744d731958be22cff408a2ab76e2279328f9713b1267b4" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "auto_enums_derive" -version = "0.7.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41aed1da83ecdc799503b7cb94da1b45a34d72b49caf40a61d9cf5b88ec07cfd" -dependencies = [ - "autocfg", - "derive_utils", - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "auto_impl" version = "0.5.0" @@ -375,12 +319,13 @@ dependencies = [ [[package]] name = "axum-client-ip" -version = "0.3.1" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddfb5a3ddd6367075d50629546fb46710584016ae7704cd03b6d41cb5be82e5a" +checksum = "0d719fabd6813392bbc10e1fe67f2977fad52791a836e51236f7e02f2482e017" dependencies = [ "axum", "forwarded-header-value", + "serde", ] [[package]] @@ -498,6 +443,17 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2dabbe35f96fb9507f7330793dc490461b2962659ac5d427181e451a623751d1" +[[package]] +name = "bigdecimal" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6aaf33151a6429fe9211d1b276eafdf70cdff28b071e76c0b0e1503221ea3744" +dependencies = [ + "num-bigint", + "num-integer", + "num-traits", +] + [[package]] name = "bincode" version = "1.3.3" @@ -649,6 +605,18 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "771fe0050b883fcc3ea2359b1a96bcfbc090b7116eae7c3c512c7a083fdf23d3" +[[package]] +name = "bstr" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba3569f383e8f1598449f1a423e72e99569137b47740b1da11ef19af3d5c3223" +dependencies = [ + "lazy_static", + "memchr", + "regex-automata", + "serde", +] + [[package]] name = "bumpalo" version = "3.12.0" @@ -777,9 +745,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.78" +version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a20104e2335ce8a659d6dd92a51a767a0c062599c73b343fd152cb401e828c3d" +checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f" dependencies = [ "jobserver", ] @@ -960,7 +928,7 @@ version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3993e6445baa160675931ec041a5e03ca84b9c6e32a056150d3aa2bdda0a1f45" dependencies = [ - "encode_unicode", + "encode_unicode 0.3.6", "lazy_static", "libc", "regex", @@ -975,7 +943,7 @@ version = "0.15.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3d79fbe8970a77e3e34151cc13d3b3e248aa0faaecb9f6091fa07ebefe5ad60" dependencies = [ - "encode_unicode", + "encode_unicode 0.3.6", "lazy_static", "libc", "windows-sys", @@ -1038,21 +1006,6 @@ dependencies = [ "libc", ] -[[package]] -name = "crc" -version = "3.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53757d12b596c16c78b83458d732a5d1a17ab3f53f2f7412f6fb57cc8a140ab3" -dependencies = [ - "crc-catalog", -] - -[[package]] -name = "crc-catalog" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cace84e55f07e7301bae1c519df89cdad8cc3cd868413d3fdbdeca9ff3db484" - [[package]] name = "crc32fast" version = "1.3.2" @@ -1062,6 +1015,12 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "critical-section" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6548a0ad5d2549e111e1f6a11a6c2e2d00ce6a3dafe22948d67c2b443f775e52" + [[package]] name = "crossbeam-channel" version = "0.5.6" @@ -1153,6 +1112,28 @@ dependencies = [ "typenum", ] +[[package]] +name = "csv" +version = "1.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22813a6dc45b335f9bade10bf7271dc477e81113e89eb251a0bc2a8a81c536e1" +dependencies = [ + "bstr", + "csv-core", + "itoa 0.4.8", + "ryu", + "serde", +] + +[[package]] +name = "csv-core" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b2466559f260f48ad25fe6317b3c8dac77b5bdb5763ac7d9d6103530663bc90" +dependencies = [ + "memchr", +] + [[package]] name = "ctr" version = "0.9.2" @@ -1164,9 +1145,9 @@ dependencies = [ [[package]] name = "cxx" -version = "1.0.87" +version = "1.0.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b61a7545f753a88bcbe0a70de1fcc0221e10bfc752f576754fa91e663db1622e" +checksum = "322296e2f2e5af4270b54df9e85a02ff037e271af20ba3e7fe1575515dc840b8" dependencies = [ "cc", "cxxbridge-flags", @@ -1176,9 +1157,9 @@ dependencies = [ [[package]] name = "cxx-build" -version = "1.0.87" +version = "1.0.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f464457d494b5ed6905c63b0c4704842aba319084a0a3561cdc1359536b53200" +checksum = "017a1385b05d631e7875b1f151c9f012d37b53491e2a87f65bff5c262b2111d8" dependencies = [ "cc", "codespan-reporting", @@ -1191,15 +1172,15 @@ dependencies = [ [[package]] name = "cxxbridge-flags" -version = "1.0.87" +version = "1.0.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43c7119ce3a3701ed81aca8410b9acf6fc399d2629d057b87e2efa4e63a3aaea" +checksum = "c26bbb078acf09bc1ecda02d4223f03bdd28bd4874edcb0379138efc499ce971" [[package]] name = "cxxbridge-macro" -version = "1.0.87" +version = "1.0.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65e07508b90551e610910fa648a1878991d367064997a596135b86df30daf07e" +checksum = "357f40d1f06a24b60ae1fe122542c1fb05d28d32acb2aed064e84bc2ad1e252e" dependencies = [ "proc-macro2", "quote", @@ -1247,7 +1228,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef552e6f588e446098f6ba40d89ac146c8c7b64aade83c051ee00bb5d2bc18d" dependencies = [ "serde", - "uuid 1.2.2", + "uuid 1.3.0", ] [[package]] @@ -1296,17 +1277,6 @@ dependencies = [ "syn", ] -[[package]] -name = "derive_utils" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "532b4c15dccee12c7044f1fcad956e98410860b22231e44a3b827464797ca7bf" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "dialoguer" version = "0.8.0" @@ -1375,12 +1345,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "doc-comment" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" - [[package]] name = "dotenv" version = "0.15.0" @@ -1413,9 +1377,9 @@ dependencies = [ [[package]] name = "either" -version = "1.8.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90e5c1c8368803113bf0c9584fc495a58b86dc8a29edbf8fe877d21d9507e797" +checksum = "7fcaabb2fef8c910e7f4c7ce9f67a1283a1715879a7c230ca9d6d1ae31f16d91" [[package]] name = "elliptic-curve" @@ -1452,6 +1416,12 @@ version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" +[[package]] +name = "encode_unicode" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34aa73646ffb006b8f5147f3dc182bd4bcb190227ce861fc4a4844bf8e3cb2c0" + [[package]] name = "encoding_rs" version = "0.8.31" @@ -1469,7 +1439,7 @@ dependencies = [ "sea-orm", "serde", "ulid", - "uuid 1.2.2", + "uuid 1.3.0", ] [[package]] @@ -1980,12 +1950,6 @@ dependencies = [ "libc", ] -[[package]] -name = "ftoa" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca45aac12b6c561b6289bc68957cb1db3dccf870e1951d590202de5e24f1dd35" - [[package]] name = "funty" version = "2.0.0" @@ -1994,9 +1958,9 @@ checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" [[package]] name = "futures" -version = "0.3.25" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38390104763dc37a5145a53c29c63c1290b5d316d6086ec32c293f6736051bb0" +checksum = "13e2792b0ff0340399d58445b88fd9770e3489eff258a4cbc1523418f12abf84" dependencies = [ "futures-channel", "futures-core", @@ -2009,9 +1973,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.25" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52ba265a92256105f45b719605a571ffe2d1f0fea3807304b522c1d778f79eed" +checksum = "2e5317663a9089767a1ec00a487df42e0ca174b61b4483213ac24448e4664df5" dependencies = [ "futures-core", "futures-sink", @@ -2019,15 +1983,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.25" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04909a7a7e4633ae6c4a9ab280aeb86da1236243a77b694a49eacd659a4bd3ac" +checksum = "ec90ff4d0fe1f57d600049061dc6bb68ed03c7d2fbd697274c41805dcb3f8608" [[package]] name = "futures-executor" -version = "0.3.25" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7acc85df6714c176ab5edf386123fafe217be88c0840ec11f199441134a074e2" +checksum = "e8de0a35a6ab97ec8869e32a2473f4b1324459e14c29275d14b10cb1fd19b50e" dependencies = [ "futures-core", "futures-task", @@ -2048,9 +2012,9 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.25" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00f5fb52a06bdcadeb54e8d3671f8888a39697dcb0b81b23b55174030427f4eb" +checksum = "bfb8371b6fb2aeb2d280374607aeabfc99d95c72edfe51692e42d3d7f0d08531" [[package]] name = "futures-lite" @@ -2079,9 +2043,9 @@ dependencies = [ [[package]] name = "futures-macro" -version = "0.3.25" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdfb8ce053d86b91919aad980c220b1fb8401a9394410e1c289ed7e66b61835d" +checksum = "95a73af87da33b5acf53acfebdc339fe592ecf5357ac7c0a7734ab9d8c876a70" dependencies = [ "proc-macro2", "quote", @@ -2090,15 +2054,15 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.25" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39c15cf1a4aa79df40f1bb462fb39676d0ad9e366c2a33b590d7c66f4f81fcf9" +checksum = "f310820bb3e8cfd46c80db4d7fb8353e15dfff853a127158425f31e0be6c8364" [[package]] name = "futures-task" -version = "0.3.25" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ffb393ac5d9a6eaa9d3fdf37ae2776656b706e200c8e16b1bdb227f5198e6ea" +checksum = "dcf79a1bf610b10f42aea489289c5a2c478a786509693b80cd39c44ccd936366" [[package]] name = "futures-timer" @@ -2108,9 +2072,9 @@ checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" [[package]] name = "futures-util" -version = "0.3.25" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "197676987abd2f9cadff84926f410af1c183608d36641465df73ae8211dc65d6" +checksum = "9c1d6de3acfef38d2be4b1f543f553131788603495be83da675e180c8d6b7bd1" dependencies = [ "futures-channel", "futures-core", @@ -2231,6 +2195,15 @@ dependencies = [ "thiserror", ] +[[package]] +name = "hash32" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0c35f58762feb77d74ebe43bdbc3210f09be9fe6742234d573bacc26ed92b67" +dependencies = [ + "byteorder", +] + [[package]] name = "hashbrown" version = "0.11.2" @@ -2316,6 +2289,19 @@ dependencies = [ "http", ] +[[package]] +name = "heapless" +version = "0.7.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db04bc24a18b9ea980628ecf00e6c0264f3c1426dac36c00cb49b6fbad8b0743" +dependencies = [ + "atomic-polyfill", + "hash32", + "rustc_version", + "spin 0.9.4", + "stable_deref_trait", +] + [[package]] name = "heck" version = "0.3.3" @@ -2556,7 +2542,6 @@ checksum = "1885e79c1fc4b10f0e172c475f458b7f7b93061064d98c3293e98c5ba0c8b399" dependencies = [ "autocfg", "hashbrown 0.12.3", - "serde", ] [[package]] @@ -2863,36 +2848,6 @@ dependencies = [ "autocfg", ] -[[package]] -name = "metered" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17491527d2ceff20d00d02166bdd18e23056e7ced22b9a8bb0efdfd293f0441a" -dependencies = [ - "aspect", - "atomic", - "cfg-if", - "hdrhistogram", - "metered-macro", - "parking_lot 0.12.1", - "serde", -] - -[[package]] -name = "metered-macro" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5ef9d33baa693e2d449d069f6ef6eb549762ed0c0179976c45bd98f3aa4a4e1" -dependencies = [ - "aspect-weave", - "heck 0.4.0", - "indexmap", - "proc-macro2", - "quote", - "syn", - "synattra", -] - [[package]] name = "migration" version = "0.13.0" @@ -2936,9 +2891,9 @@ dependencies = [ [[package]] name = "moka" -version = "0.9.6" +version = "0.9.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b49a05f67020456541f4f29cbaa812016a266a86ec76f96d3873d459c68fe5e" +checksum = "19b9268097a2cf211ac9955b1cc95e80fa84fff5c2d13ba292916445dc8a311f" dependencies = [ "async-io", "async-lock", @@ -2956,7 +2911,7 @@ dependencies = [ "tagptr", "thiserror", "triomphe", - "uuid 1.2.2", + "uuid 1.3.0", ] [[package]] @@ -3001,9 +2956,9 @@ checksum = "e9e591e719385e6ebaeb5ce5d3887f7d5676fceca6411d1925ccc95745f3d6f7" [[package]] name = "notify" -version = "5.0.0" +version = "5.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed2c66da08abae1c024c01d635253e402341b4060a12e99b31c7594063bf490a" +checksum = "58ea850aa68a06e48fdb069c0ec44d0d64c8dbffa49bf3b6f7f0a901fdea1ba9" dependencies = [ "bitflags", "crossbeam-channel", @@ -3014,17 +2969,7 @@ dependencies = [ "libc", "mio", "walkdir", - "winapi", -] - -[[package]] -name = "nu-ansi-term" -version = "0.46.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" -dependencies = [ - "overload", - "winapi", + "windows-sys", ] [[package]] @@ -3191,9 +3136,9 @@ dependencies = [ [[package]] name = "os_info" -version = "3.5.1" +version = "3.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4750134fb6a5d49afc80777394ad5d95b04bc12068c6abb92fae8f43817270f" +checksum = "5c424bc68d15e0778838ac013b5b3449544d8133633d8016319e7e05a820b8c0" dependencies = [ "log", "serde", @@ -3229,12 +3174,6 @@ dependencies = [ "syn", ] -[[package]] -name = "overload" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" - [[package]] name = "pagerduty-rs" version = "0.1.6" @@ -3590,6 +3529,20 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "925383efa346730478fb4838dbe9137d2a47675ad789c546d150a6e1dd4ab31c" +[[package]] +name = "prettytable" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46480520d1b77c9a3482d39939fcf96831537a250ec62d4fd8fbdf8e0302e781" +dependencies = [ + "csv", + "encode_unicode 1.0.0", + "is-terminal", + "lazy_static", + "term", + "unicode-width", +] + [[package]] name = "primitive-types" version = "0.12.1" @@ -3620,7 +3573,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "66618389e4ec1c7afe67d51a9bf34ff9236480f8d51e7489b7d5ab0303c13f34" dependencies = [ "once_cell", - "toml_edit", + "toml_edit 0.18.0", ] [[package]] @@ -4203,15 +4156,15 @@ dependencies = [ [[package]] name = "sea-orm" -version = "0.10.7" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88694d01b528a94f90ad87f8d2f546d060d070eee180315c67d158cb69476034" +checksum = "e7a0e3ec90718d849c73b167df7a476672b64c7ee5f3c582179069e63b2451e1" dependencies = [ "async-stream", "async-trait", + "bigdecimal", "chrono", "futures", - "futures-util", "log", "ouroboros", "rust_decimal", @@ -4226,14 +4179,14 @@ dependencies = [ "time 0.3.17", "tracing", "url", - "uuid 1.2.2", + "uuid 1.3.0", ] [[package]] name = "sea-orm-cli" -version = "0.10.7" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ebe1f820fe8949cf6a57272ba9ebd0be766e47c9b85c04b3cabea40ab9459b3" +checksum = "992bc003ed84e736daa19d1b562bd80fa2de09d7bca70cb1745adec3f3b54064" dependencies = [ "chrono", "clap", @@ -4247,9 +4200,9 @@ dependencies = [ [[package]] name = "sea-orm-macros" -version = "0.10.7" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7216195de9c6b2474fd0efab486173dccd0eff21f28cc54aa4c0205d52fb3af0" +checksum = "5d89f7d4d2533c178e08a9e1990619c391e9ca7b402851d02a605938b15e03d9" dependencies = [ "bae", "heck 0.3.3", @@ -4260,13 +4213,14 @@ dependencies = [ [[package]] name = "sea-orm-migration" -version = "0.10.7" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ed3cdfa669e4c385922f902b9a58e0c2128782a4d0fe79c6c34f3b927565e5b" +checksum = "355b1e2763e73d36de6f4539b04fc5d01b232e5c97785e0d08c4becbc2accad3" dependencies = [ "async-trait", "clap", "dotenvy", + "futures", "sea-orm", "sea-orm-cli", "sea-schema", @@ -4276,40 +4230,42 @@ dependencies = [ [[package]] name = "sea-query" -version = "0.27.2" +version = "0.28.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4f0fc4d8e44e1d51c739a68d336252a18bc59553778075d5e32649be6ec92ed" +checksum = "d2fbe015dbdaa7d8829d71c1e14fb6289e928ac256b93dfda543c85cd89d6f03" dependencies = [ + "bigdecimal", "chrono", "rust_decimal", "sea-query-derive", "serde_json", "time 0.3.17", - "uuid 1.2.2", + "uuid 1.3.0", ] [[package]] name = "sea-query-binder" -version = "0.2.2" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c2585b89c985cfacfe0ec9fc9e7bb055b776c1a2581c4e3c6185af2b8bf8865" +checksum = "03548c63aec07afd4fd190923e0160d2f2fc92def27470b54154cf232da6203b" dependencies = [ + "bigdecimal", "chrono", "rust_decimal", "sea-query", "serde_json", "sqlx", "time 0.3.17", - "uuid 1.2.2", + "uuid 1.3.0", ] [[package]] name = "sea-query-derive" -version = "0.2.0" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34cdc022b4f606353fe5dc85b09713a04e433323b70163e81513b141c6ae6eb5" +checksum = "63f62030c60f3a691f5fe251713b4e220b306e50a71e1d6f9cce1f24bb781978" dependencies = [ - "heck 0.3.3", + "heck 0.4.0", "proc-macro2", "quote", "syn", @@ -4318,9 +4274,9 @@ dependencies = [ [[package]] name = "sea-schema" -version = "0.10.3" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38d5fda574d980e9352b6c7abd6fc75697436fe0078cac2b548559b52643ad3b" +checksum = "eeb2940bb5a10bc6cd05b450ce6cd3993e27fddd7eface2becb97fc5af3a040e" dependencies = [ "futures", "sea-query", @@ -4398,9 +4354,9 @@ checksum = "930c0acf610d3fdb5e2ab6213019aaa04e227ebe9547b0649ba599b16d788bd7" [[package]] name = "sentry" -version = "0.29.2" +version = "0.29.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6097dc270a9c4555c5d6222ed243eaa97ff38e29299ed7c5cb36099033c604e" +checksum = "c6f8ce69326daef9d845c3fd17149bd3dbd7caf5dc65dbbad9f5441a40ee407f" dependencies = [ "httpdate", "reqwest", @@ -4418,9 +4374,9 @@ dependencies = [ [[package]] name = "sentry-anyhow" -version = "0.29.2" +version = "0.29.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45a52d909ea1f5107fe29aa86581da01b88bde811fbde875773237c1596fbab6" +checksum = "a80510663e6b711de2eed521a95dc38435a0e5858397d1acec79185e4a44215b" dependencies = [ "anyhow", "sentry-backtrace", @@ -4429,9 +4385,9 @@ dependencies = [ [[package]] name = "sentry-backtrace" -version = "0.29.2" +version = "0.29.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d92d1e4d591534ae4f872d6142f3b500f4ffc179a6aed8a3e86c7cc96d10a6a" +checksum = "3ed6c0254d4cce319800609aa0d41b486ee57326494802045ff27434fc9a2030" dependencies = [ "backtrace", "once_cell", @@ -4441,9 +4397,9 @@ dependencies = [ [[package]] name = "sentry-contexts" -version = "0.29.2" +version = "0.29.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3afa877b1898ff67dd9878cf4bec4e53cef7d3be9f14b1fc9e4fcdf36f8e4259" +checksum = "d3277dc5d2812562026f2095c7841f3d61bbe6789159b7da54f41d540787f818" dependencies = [ "hostname", "libc", @@ -4455,9 +4411,9 @@ dependencies = [ [[package]] name = "sentry-core" -version = "0.29.2" +version = "0.29.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc43eb7e4e3a444151a0fe8a0e9ce60eabd905dae33d66e257fa26f1b509c1bd" +checksum = "b5acbd3da4255938cf0384b6b140e6c07ff65919c26e4d7a989d8d90ee88fa91" dependencies = [ "once_cell", "rand", @@ -4468,9 +4424,9 @@ dependencies = [ [[package]] name = "sentry-log" -version = "0.29.2" +version = "0.29.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "598aefe14750bcec956adebc8992dd432f4e22c12cd524633963113864aa39b4" +checksum = "a4b922394014861334c24388a55825e4c715afb8ec7c1db900175aa9951f8241" dependencies = [ "log", "sentry-core", @@ -4478,9 +4434,9 @@ dependencies = [ [[package]] name = "sentry-panic" -version = "0.29.2" +version = "0.29.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccab4fab11e3e63c45f4524bee2e75cde39cdf164cb0b0cbe6ccd1948ceddf66" +checksum = "beebc7aedbd3aa470cd19caad208a5efe6c48902595c0d111a193d8ce4f7bd15" dependencies = [ "sentry-backtrace", "sentry-core", @@ -4488,9 +4444,9 @@ dependencies = [ [[package]] name = "sentry-types" -version = "0.29.2" +version = "0.29.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f63708ec450b6bdcb657af760c447416d69c38ce421f34e5e2e9ce8118410bc7" +checksum = "10d8587b12c0b8211bb3066979ee57af6e8657e23cf439dc6c8581fd86de24e8" dependencies = [ "debugid", "getrandom", @@ -4500,7 +4456,7 @@ dependencies = [ "thiserror", "time 0.3.17", "url", - "uuid 1.2.2", + "uuid 1.3.0", ] [[package]] @@ -4535,9 +4491,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.91" +version = "1.0.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "877c235533714907a8c2464236f5c4b2a17262ef1bd71f38f35ea592c8da6883" +checksum = "cad406b69c91885b5107daf2c29572f6c8cdb3c66826821e286c533490c0bc76" dependencies = [ "itoa 1.0.5", "ryu", @@ -4554,25 +4510,32 @@ dependencies = [ ] [[package]] -name = "serde_prometheus" -version = "0.1.6" +name = "serde_plain" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25fcd6131bac47a32328d1ba1ee15a27f8d91ab2e5920dba71dbe93d2648f6b1" +checksum = "d6018081315db179d0ce57b1fe4b62a12a0028c9cf9bbef868c9cf477b3c34ae" dependencies = [ - "ftoa", - "indexmap", - "itoa 0.4.8", - "lazy_static", - "regex", "serde", - "snafu", +] + +[[package]] +name = "serde_prometheus" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bfb6048d9e4ebc41f7d1a42c79b04c5b460633be307620a0e34a8f81970ea47" +dependencies = [ + "heapless", + "nom", + "serde", + "serde_plain", + "thiserror", ] [[package]] name = "serde_spanned" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c68e921cef53841b8925c2abadd27c9b891d9613bdc43d6b823062866df38e8" +checksum = "0efd8caf556a6cebd3b285caf480045fcc1ac04f6bd786b09a6f11af30c4fcf4" dependencies = [ "serde", ] @@ -4737,27 +4700,6 @@ version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" -[[package]] -name = "snafu" -version = "0.6.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eab12d3c261b2308b0d80c26fffb58d17eba81a4be97890101f416b478c79ca7" -dependencies = [ - "doc-comment", - "snafu-derive", -] - -[[package]] -name = "snafu-derive" -version = "0.6.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1508efa03c362e23817f96cde18abed596a25219a8b2c66e8db33c03543d315b" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "socket2" version = "0.4.7" @@ -4845,11 +4787,11 @@ checksum = "dcbc16ddba161afc99e14d1713a453747a2b07fc097d2009f4c300ec99286105" dependencies = [ "ahash 0.7.6", "atoi", + "bigdecimal", "bitflags", "byteorder", "bytes", "chrono", - "crc", "crossbeam-queue", "digest 0.10.6", "dotenvy", @@ -4888,7 +4830,7 @@ dependencies = [ "time 0.3.17", "tokio-stream", "url", - "uuid 1.2.2", + "uuid 1.3.0", "webpki-roots", ] @@ -4905,7 +4847,6 @@ dependencies = [ "proc-macro2", "quote", "serde_json", - "sha2 0.10.6", "sqlx-core", "sqlx-rt", "syn", @@ -4923,6 +4864,12 @@ dependencies = [ "tokio-rustls", ] +[[package]] +name = "stable_deref_trait" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" + [[package]] name = "static_assertions" version = "1.1.0" @@ -5028,17 +4975,6 @@ dependencies = [ "unicode-ident", ] -[[package]] -name = "synattra" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "378cd5695f9ef5a26668bb70e81a464e7de6144bac3f77f42d5fa596c690be63" -dependencies = [ - "auto_enums", - "proc-macro2", - "syn", -] - [[package]] name = "sync_wrapper" version = "0.1.1" @@ -5219,9 +5155,9 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.24.2" +version = "1.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "597a12a59981d9e3c38d216785b0c37399f6e415e8d0712047620f189371b0bb" +checksum = "c8e00990ebabbe4c14c08aca901caed183ecd5c09562a12c824bb53d3c3fd3af" dependencies = [ "autocfg", "bytes", @@ -5325,14 +5261,14 @@ dependencies = [ [[package]] name = "toml" -version = "0.6.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fb9d890e4dc9298b70f740f615f2e05b9db37dce531f6b24fb77ac993f9f217" +checksum = "f7afcae9e3f0fe2c370fd4657108972cbb2fa9db1b9f84849cefd80741b01cb6" dependencies = [ "serde", "serde_spanned", - "toml_datetime", - "toml_edit", + "toml_datetime 0.6.1", + "toml_edit 0.19.3", ] [[package]] @@ -5340,6 +5276,12 @@ name = "toml_datetime" version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4553f467ac8e3d374bc9a177a26801e5d0f9b211aa1673fb137a403afd1c9cf5" + +[[package]] +name = "toml_datetime" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ab8ed2edee10b50132aed5f331333428b011c99402b5a534154ed15746f9622" dependencies = [ "serde", ] @@ -5349,12 +5291,23 @@ name = "toml_edit" version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "729bfd096e40da9c001f778f5cdecbd2957929a24e10e5883d9392220a751581" +dependencies = [ + "indexmap", + "nom8", + "toml_datetime 0.5.1", +] + +[[package]] +name = "toml_edit" +version = "0.19.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e6a7712b49e1775fb9a7b998de6635b299237f48b404dde71704f2e0e7f37e5" dependencies = [ "indexmap", "nom8", "serde", "serde_spanned", - "toml_datetime", + "toml_datetime 0.6.1", ] [[package]] @@ -5435,7 +5388,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24eb03ba0eab1fd845050058ce5e616558e8f8d8fca633e6b163fe25c797213a" dependencies = [ "once_cell", - "valuable", ] [[package]] @@ -5448,17 +5400,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "tracing-log" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78ddad33d2d10b1ed7eb9d1f518a5674713876e97e5bb9b7345a7984fbb4f922" -dependencies = [ - "lazy_static", - "log", - "tracing-core", -] - [[package]] name = "tracing-subscriber" version = "0.3.16" @@ -5466,15 +5407,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a6176eae26dd70d0c919749377897b54a9276bd7061339665dd68777926b5a70" dependencies = [ "matchers", - "nu-ansi-term", "once_cell", "regex", "sharded-slab", - "smallvec", "thread_local", "tracing", "tracing-core", - "tracing-log", ] [[package]] @@ -5604,9 +5542,9 @@ dependencies = [ [[package]] name = "unicode-segmentation" -version = "1.10.0" +version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fdbf052a0783de01e944a6ce7a8cb939e295b1e7be835a1112c3b9a7f047a5a" +checksum = "1dd624098567895118886609431a7c3b8f516e41d30e0643f03d94592a147e36" [[package]] name = "unicode-width" @@ -5677,20 +5615,14 @@ dependencies = [ [[package]] name = "uuid" -version = "1.2.2" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "422ee0de9031b5b948b97a8fc04e3aa35230001a722ddd27943e0be31564ce4c" +checksum = "1674845326ee10d37ca60470760d4288a6f80f304007d92e5c53bab78c9cfd79" dependencies = [ "getrandom", "serde", ] -[[package]] -name = "valuable" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" - [[package]] name = "version_check" version = "0.9.4" @@ -5856,7 +5788,6 @@ dependencies = [ "ipnet", "itertools", "log", - "metered", "migration", "moka", "notify", @@ -5864,6 +5795,7 @@ dependencies = [ "num-traits", "pagerduty-rs", "parking_lot 0.12.1", + "prettytable", "proctitle", "redis-rate-limiter", "regex", @@ -5878,12 +5810,12 @@ dependencies = [ "time 0.3.17", "tokio", "tokio-stream", - "toml 0.6.0", + "toml 0.7.2", "tower", "tower-http", "ulid", "url", - "uuid 1.2.2", + "uuid 1.3.0", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 1d5c2238..2c2e0f74 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -11,5 +11,9 @@ members = [ [profile.release] # `debug = true` so that sentry can give us line numbers debug = true + +[profile.faster_release] +inherits = "release" + # spend longer compiling for a slightly faster binary codegen-units = 1 diff --git a/Dockerfile b/Dockerfile index eb74e040..1642327d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,21 +1,52 @@ -FROM rust:1-bullseye as builder +# +# cargo-nextest +# We only pay the installation cost once, +# it will be cached from the second build onwards +# +FROM rust:1-bullseye AS builder +WORKDIR /app +ENV CARGO_TERM_COLOR always + +# a next-generation test runner for Rust projects. +# TODO: more mount type cache? +RUN --mount=type=cache,target=/usr/local/cargo/registry \ + cargo install cargo-nextest + +# foundry is needed to run tests ENV PATH /root/.foundry/bin:$PATH RUN curl -L https://foundry.paradigm.xyz | bash && foundryup -WORKDIR /usr/src/web3_proxy +# copy the application COPY . . + +# test the application with cargo-nextest RUN --mount=type=cache,target=/usr/local/cargo/registry \ - --mount=type=cache,target=/usr/src/web3_proxy/target \ - cargo test &&\ - cargo install --locked --no-default-features --root /opt/bin --path ./web3_proxy + --mount=type=cache,target=/app/target \ + cargo nextest run -FROM debian:bullseye-slim +# build the application +# using a "release" profile (which install does) is **very** important +RUN --mount=type=cache,target=/usr/local/cargo/registry \ + --mount=type=cache,target=/app/target \ + cargo install --locked --no-default-features --profile faster_release --root /opt/bin --path ./web3_proxy -COPY --from=builder /opt/bin/* /usr/local/bin/ +# +# We do not need the Rust toolchain to run the binary! +# +FROM debian:bullseye-slim AS runtime + +# Create llama user to avoid running container with root +RUN mkdir /llama \ + && adduser --home /llama --shell /sbin/nologin --gecos '' --no-create-home --disabled-password --uid 1001 llama \ + && chown -R llama /llama + +USER llama ENTRYPOINT ["web3_proxy_cli"] CMD [ "--config", "/web3-proxy.toml", "proxyd" ] # TODO: lower log level when done with prototyping ENV RUST_LOG "warn,web3_proxy=debug,web3_proxy_cli=debug" + +COPY --from=builder /opt/bin/* /usr/local/bin/ diff --git a/Jenkinsfile b/Jenkinsfile new file mode 100644 index 00000000..47cb0c2b --- /dev/null +++ b/Jenkinsfile @@ -0,0 +1,174 @@ +def buildAndPush() { + // env.BRANCH_NAME is set to the git branch name by default + // env.REGISTRY is the repository url for this pipeline + // env.GIT_SHORT is the git short hash of the currently checked out repo + // env.LATEST_BRANCH is the branch name that gets tagged latest + // env.ARCH is the system architecture. some apps can be generic (amd64, arm64), + // but apps that compile for specific hardware (like web3-proxy) will need more specific tags (amd64_epyc2, arm64_graviton2, intel_xeon3, etc.) + + // TODO: check that this system actually matches the given arch + sh '''#!/bin/bash + set -eux -o pipefail + + [ -n "$GIT_SHORT" ] + [ -n "$GIT_SHORT" ] + [ -n "$REGISTRY" ] + [ -n "$ARCH" ] + + # deterministic mtime on .git keeps Dockerfiles that do 'ADD . .' or similar + # without this, the build process always thinks the directory has changes + git restore-mtime + touch -t "$(git show -s --date=format:'%Y%m%d%H%M.%S' --format=%cd HEAD)" .git + + function buildAndPush { + image=$1 + buildcache=$2 + + buildctl build \ + --frontend=dockerfile.v0 \ + --local context=. \ + --local dockerfile=. \ + --output "type=image,name=${image},push=true" \ + --export-cache type=s3,region=us-east-2,bucket=llamarpc-buildctl-cache,name=${buildcache} \ + --import-cache type=s3,region=us-east-2,bucket=llamarpc-buildctl-cache,name=${buildcache} \ + ; + } + + BUILDCACHE="${REGISTRY}:buildcache_${ARCH}" + + # build and push a docker image tagged with the short git commit + buildAndPush "${REGISTRY}:git_${GIT_SHORT}_${ARCH}" "${BUILDCACHE}" + + # push an image tagged with the branch + # since buildAndPush just ran above, this should be very quick + # TODO: maybe replace slashes in the name with dashes or underscores + buildAndPush "${REGISTRY}:branch_${BRANCH_NAME}_${ARCH}" "${BUILDCACHE}" + + if [ "${BRANCH_NAME}" = "${LATEST_BRANCH}" ]; then + buildAndPush "${REGISTRY}:latest_${ARCH}" "${BUILDCACHE}" + fi + ''' +} + +pipeline { + agent any + options { + ansiColor('xterm') + } + environment { + // AWS_ECR_URL needs to be set in jenkin's config. + // AWS_ECR_URL could really be any docker registry. we just use ECR so that we don't have to manage it + REGISTRY="${AWS_ECR_URL}/web3-proxy" + + // branch that should get tagged with "latest_$arch" (stable, main, master, etc.) + LATEST_BRANCH="main" + + // non-buildkit builds are officially deprecated + // buildkit is much faster and handles caching much better than the default build process. + DOCKER_BUILDKIT=1 + + GIT_SHORT="${GIT_COMMIT.substring(0,8)}" + } + stages { + stage('build and push') { + parallel { + stage('build and push amd64_epyc2 image') { + agent { + label 'amd64_epyc2' + } + environment { + ARCH="amd64_epyc2" + } + steps { + script { + buildAndPush() + } + } + } + stage('build and push amd64_epyc3 image') { + agent { + label 'amd64_epyc3' + } + environment { + ARCH="amd64_epyc3" + } + steps { + script { + buildAndPush() + } + } + } + stage('Build and push arm64_graviton1 image') { + agent { + label 'arm64_graviton1' + } + environment { + ARCH="arm64_graviton1" + } + steps { + script { + buildAndPush() + } + } + } + stage('Build and push arm64_graviton2 image') { + agent { + label 'arm64_graviton2' + } + environment { + ARCH="arm64_graviton2" + } + steps { + script { + buildAndPush() + } + } + } + stage('Build and push intel_xeon3 image') { + agent { + label 'intel_xeon3' + } + environment { + ARCH="intel_xeon3" + } + steps { + script { + buildAndPush() + } + } + } + } + + } + stage('create (experimental) manifest') { + agent any + steps { + script { + sh '''#!/bin/bash + set -eux -o pipefail + + [ -n "$BRANCH_NAME" ] + [ -n "$GIT_SHORT" ] + [ -n "$LATEST_BRANCH" ] + [ -n "$REGISTRY" ] + + function manifest { + repo=$1 + + docker manifest create "${repo}" --amend "${repo}_arm64_graviton2" --amend "${repo}_amd64_epyc2" --amend "${repo}_intel_xeon3" + + docker manifest push --purge "${repo}" + } + + manifest "${REGISTRY}:git_${GIT_SHORT}" + manifest "${REGISTRY}:branch_${BRANCH_NAME}" + + if [ "${BRANCH_NAME}" = "${LATEST_BRANCH}" ]; then + manifest "${REGISTRY}:latest" + fi + ''' + } + } + } + } +} diff --git a/README.md b/README.md index 9a0ade50..e3af4e51 100644 --- a/README.md +++ b/README.md @@ -37,7 +37,7 @@ Options: Start the server with the defaults (listen on `http://localhost:8544` and use `./config/development.toml` which uses the database and cache running under docker and proxies to a bunch of public nodes: ``` -cargo run --release -- daemon +cargo run --release -- proxyd ``` ## Common commands diff --git a/TODO.md b/TODO.md index f115d336..207567c4 100644 --- a/TODO.md +++ b/TODO.md @@ -243,8 +243,8 @@ These are roughly in order of completition - [x] cache the status page for a second - [x] request accounting for websockets - [x] database merge scripts -- [x] test that sets up a Web3Connection and asks "has_block" for old and new blocks -- [x] test that sets up Web3Connections with 2 nodes. one behind by several blocks. and see what the "next" server shows as +- [x] test that sets up a Web3Rpc and asks "has_block" for old and new blocks +- [x] test that sets up Web3Rpcs with 2 nodes. one behind by several blocks. and see what the "next" server shows as - [x] ethspam on bsc and polygon gives 1/4 errors. fix whatever is causing this - bugfix! we were using the whole connection list instead of just the synced connection list when picking servers. oops! - [x] actually block unauthenticated requests instead of emitting warning of "allowing without auth during development!" @@ -289,7 +289,7 @@ These are not yet ordered. There might be duplicates. We might not actually need - we were caching too aggressively - [x] BUG! if sending transactions gets "INTERNAL_ERROR: existing tx with same hash", create a success message - we just want to be sure that the server has our tx and in this case, it does. - - ERROR http_request:request:try_send_all_upstream_servers: web3_proxy::rpcs::request: bad response! err=JsonRpcClientError(JsonRpcError(JsonRpcError { code: -32000, message: "INTERNAL_ERROR: existing tx with same hash", data: None })) method=eth_sendRawTransaction rpc=local_erigon_alpha_archive id=01GF4HV03Y4ZNKQV8DW5NDQ5CG method=POST authorized_request=User(Some(SqlxMySqlPoolConnection), AuthorizedKey { ip: 10.11.12.15, origin: None, user_key_id: 4, log_revert_chance: 0.0000 }) self=Web3Connections { conns: {"local_erigon_alpha_archive_ws": Web3Connection { name: "local_erigon_alpha_archive_ws", blocks: "all", .. }, "local_geth_ws": Web3Connection { name: "local_geth_ws", blocks: 64, .. }, "local_erigon_alpha_archive": Web3Connection { name: "local_erigon_alpha_archive", blocks: "all", .. }}, .. } authorized_request=Some(User(Some(SqlxMySqlPoolConnection), AuthorizedKey { ip: 10.11.12.15, origin: None, user_key_id: 4, log_revert_chance: 0.0000 })) request=JsonRpcRequest { id: RawValue(39), method: "eth_sendRawTransaction", .. } request_metadata=Some(RequestMetadata { datetime: 2022-10-11T22:14:57.406829095Z, period_seconds: 60, request_bytes: 633, backend_requests: 0, no_servers: 0, error_response: false, response_bytes: 0, response_millis: 0 }) block_needed=None + - ERROR http_request:request:try_send_all_upstream_servers: web3_proxy::rpcs::request: bad response! err=JsonRpcClientError(JsonRpcError(JsonRpcError { code: -32000, message: "INTERNAL_ERROR: existing tx with same hash", data: None })) method=eth_sendRawTransaction rpc=local_erigon_alpha_archive id=01GF4HV03Y4ZNKQV8DW5NDQ5CG method=POST authorized_request=User(Some(SqlxMySqlPoolConnection), AuthorizedKey { ip: 10.11.12.15, origin: None, user_key_id: 4, log_revert_chance: 0.0000 }) self=Web3Rpcs { conns: {"local_erigon_alpha_archive_ws": Web3Rpc { name: "local_erigon_alpha_archive_ws", blocks: "all", .. }, "local_geth_ws": Web3Rpc { name: "local_geth_ws", blocks: 64, .. }, "local_erigon_alpha_archive": Web3Rpc { name: "local_erigon_alpha_archive", blocks: "all", .. }}, .. } authorized_request=Some(User(Some(SqlxMySqlPoolConnection), AuthorizedKey { ip: 10.11.12.15, origin: None, user_key_id: 4, log_revert_chance: 0.0000 })) request=JsonRpcRequest { id: RawValue(39), method: "eth_sendRawTransaction", .. } request_metadata=Some(RequestMetadata { datetime: 2022-10-11T22:14:57.406829095Z, period_seconds: 60, request_bytes: 633, backend_requests: 0, no_servers: 0, error_response: false, response_bytes: 0, response_millis: 0 }) block_needed=None - [x] serde collect unknown fields in config instead of crash - [x] upgrade user tier by address - [x] all_backend_connections skips syncing servers @@ -324,6 +324,12 @@ These are not yet ordered. There might be duplicates. We might not actually need - [x] improve waiting for sync when rate limited - [x] improve pager duty errors for smarter deduping - [x] add create_key cli command +- [x] short lived cache on /health +- [x] cache /status for longer +- [x] sort connections during eth_sendRawTransaction +- [x] block all admin_ rpc commands +- [x] remove the "metered" crate now that we save aggregate queries? +- [x] add archive depth to app config - [-] proxy mode for benchmarking all backends - [-] proxy mode for sending to multiple backends - [-] let users choose a % of reverts to log (or maybe x/second). someone like curve logging all reverts will be a BIG database very quickly @@ -375,7 +381,6 @@ These are not yet ordered. There might be duplicates. We might not actually need - [ ] cli commands to search users by key - [ ] cli flag to set prometheus port - [ ] flamegraphs show 25% of the time to be in moka-housekeeper. tune that -- [ ] remove the "metered" crate now that we save aggregate queries? - [ ] remove/change the "active_requests" counter? maybe only once we have dynamic soft limits? - [ ] refactor so configs can change while running - this will probably be a rather large change, but is necessary when we have autoscaling @@ -551,10 +556,10 @@ in another repo: event subscriber - [ ] weird flapping fork could have more useful logs. like, howd we get to 1/1/4 and fork. geth changed its mind 3 times? - should we change our code to follow the same consensus rules as geth? our first seen still seems like a reasonable choice - other chains might change all sorts of things about their fork choice rules - 2022-07-22T23:52:18.593956Z WARN block_receiver: web3_proxy::connections: chain is forked! 1 possible heads. 1/1/4 rpcs have 0xa906…5bc1 rpc=Web3Connection { url: "ws://127.0.0.1:8546", data: 64, .. } new_block_num=15195517 - 2022-07-22T23:52:18.983441Z WARN block_receiver: web3_proxy::connections: chain is forked! 1 possible heads. 1/1/4 rpcs have 0x70e8…48e0 rpc=Web3Connection { url: "ws://127.0.0.1:8546", data: 64, .. } new_block_num=15195517 - 2022-07-22T23:52:19.350720Z WARN block_receiver: web3_proxy::connections: chain is forked! 2 possible heads. 1/2/4 rpcs have 0x70e8…48e0 rpc=Web3Connection { url: "ws://127.0.0.1:8549", data: "archive", .. } new_block_num=15195517 - 2022-07-22T23:52:26.041140Z WARN block_receiver: web3_proxy::connections: chain is forked! 2 possible heads. 2/4/4 rpcs have 0x70e8…48e0 rpc=Web3Connection { url: "http://127.0.0.1:8549", data: "archive", .. } new_block_num=15195517 + 2022-07-22T23:52:18.593956Z WARN block_receiver: web3_proxy::connections: chain is forked! 1 possible heads. 1/1/4 rpcs have 0xa906…5bc1 rpc=Web3Rpc { url: "ws://127.0.0.1:8546", data: 64, .. } new_block_num=15195517 + 2022-07-22T23:52:18.983441Z WARN block_receiver: web3_proxy::connections: chain is forked! 1 possible heads. 1/1/4 rpcs have 0x70e8…48e0 rpc=Web3Rpc { url: "ws://127.0.0.1:8546", data: 64, .. } new_block_num=15195517 + 2022-07-22T23:52:19.350720Z WARN block_receiver: web3_proxy::connections: chain is forked! 2 possible heads. 1/2/4 rpcs have 0x70e8…48e0 rpc=Web3Rpc { url: "ws://127.0.0.1:8549", data: "archive", .. } new_block_num=15195517 + 2022-07-22T23:52:26.041140Z WARN block_receiver: web3_proxy::connections: chain is forked! 2 possible heads. 2/4/4 rpcs have 0x70e8…48e0 rpc=Web3Rpc { url: "http://127.0.0.1:8549", data: "archive", .. } new_block_num=15195517 - [ ] threshold should check actual available request limits (if any) instead of just the soft limit - [ ] foreign key on_update and on_delete - [ ] database creation timestamps diff --git a/config/minimal.toml b/config/minimal.toml new file mode 100644 index 00000000..2225c9d1 --- /dev/null +++ b/config/minimal.toml @@ -0,0 +1,32 @@ +[app] +chain_id = 1 + +# no database +# no influxdb +# no redis +# no sentry +# no public limits means anon gets full access + +# no thundering herd protection +min_sum_soft_limit = 1 +min_synced_rpcs = 1 + +# 1GB of cache +response_cache_max_bytes = 1_000_000_000 + +[balanced_rpcs] + + [balanced_rpcs.llama_public_wss] + # TODO: what should we do if all rpcs are disabled? warn and wait for a config change? + disabled = false + display_name = "LlamaNodes WSS" + url = "wss://eth.llamarpc.com/" + soft_limit = 1_000 + tier = 0 + + [balanced_rpcs.llama_public_https] + disabled = false + display_name = "LlamaNodes HTTPS" + url = "https://eth.llamarpc.com/" + soft_limit = 1_000 + tier = 0 diff --git a/deferred-rate-limiter/Cargo.toml b/deferred-rate-limiter/Cargo.toml index 14602245..9b7c4ad8 100644 --- a/deferred-rate-limiter/Cargo.toml +++ b/deferred-rate-limiter/Cargo.toml @@ -7,8 +7,8 @@ edition = "2021" [dependencies] redis-rate-limiter = { path = "../redis-rate-limiter" } -anyhow = "1.0.68" +anyhow = "1.0.69" hashbrown = "0.13.2" log = "0.4.17" -moka = { version = "0.9.6", default-features = false, features = ["future"] } -tokio = "1.24.2" +moka = { version = "0.9.7", default-features = false, features = ["future"] } +tokio = "1.25.0" diff --git a/entities/Cargo.toml b/entities/Cargo.toml index 606a2f39..b1bc8edb 100644 --- a/entities/Cargo.toml +++ b/entities/Cargo.toml @@ -10,8 +10,8 @@ path = "src/mod.rs" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -sea-orm = "0.10.7" +sea-orm = "0.11.0" serde = "1.0.152" -uuid = "1.2.2" +uuid = "1.3.0" ethers = "1.0.2" ulid = "1.0.0" diff --git a/migration/Cargo.toml b/migration/Cargo.toml index 61d25f6d..1b612ed3 100644 --- a/migration/Cargo.toml +++ b/migration/Cargo.toml @@ -9,10 +9,10 @@ name = "migration" path = "src/lib.rs" [dependencies] -tokio = { version = "1.24.2", features = ["full", "tracing"] } +tokio = { version = "1.25.0", features = ["full", "tracing"] } [dependencies.sea-orm-migration] -version = "0.10.7" +version = "0.11.0" features = [ # Enable at least one `ASYNC_RUNTIME` and `DATABASE_DRIVER` feature if you want to run migration via CLI. # View the list of supported features at https://www.sea-ql.org/SeaORM/docs/install-and-config/database-and-async-runtime. diff --git a/redis-rate-limiter/Cargo.toml b/redis-rate-limiter/Cargo.toml index c4af3503..9ba37ad3 100644 --- a/redis-rate-limiter/Cargo.toml +++ b/redis-rate-limiter/Cargo.toml @@ -5,6 +5,6 @@ authors = ["Bryan Stitt "] edition = "2021" [dependencies] -anyhow = "1.0.68" +anyhow = "1.0.69" deadpool-redis = { version = "0.11.1", features = ["rt_tokio_1", "serde"] } -tokio = "1.24.2" +tokio = "1.25.0" diff --git a/web3_proxy/Cargo.toml b/web3_proxy/Cargo.toml index 433715e3..d60d162d 100644 --- a/web3_proxy/Cargo.toml +++ b/web3_proxy/Cargo.toml @@ -25,10 +25,10 @@ thread-fast-rng = { path = "../thread-fast-rng" } # TODO: import chrono from sea-orm so we always have the same version # TODO: make sure this time version matches siwe. PR to put this in their prelude -anyhow = { version = "1.0.68", features = ["backtrace"] } +anyhow = { version = "1.0.69", features = ["backtrace"] } argh = "0.1.10" axum = { version = "0.6.4", features = ["headers", "ws"] } -axum-client-ip = "0.3.1" +axum-client-ip = "0.4.0" axum-macros = "0.3.2" chrono = "0.4.23" counter = "0.5.7" @@ -38,7 +38,7 @@ env_logger = "0.10.0" ethers = { version = "1.0.2", default-features = false, features = ["rustls", "ws"] } fdlimit = "0.2.1" flume = "0.10.14" -futures = { version = "0.3.25", features = ["thread-pool"] } +futures = { version = "0.3.26", features = ["thread-pool"] } gethostname = "0.4.1" glob = "0.3.1" handlebars = "4.3.6" @@ -48,28 +48,28 @@ http = "0.2.8" ipnet = "2.7.1" itertools = "0.10.5" log = "0.4.17" -metered = { version = "0.9.0", features = ["serialize"] } -moka = { version = "0.9.6", default-features = false, features = ["future"] } -notify = "5.0.0" +moka = { version = "0.9.7", default-features = false, features = ["future"] } +notify = "5.1.0" num = "0.4.0" num-traits = "0.2.15" pagerduty-rs = { version = "0.1.6", default-features = false, features = ["async", "rustls", "sync"] } parking_lot = { version = "0.12.1", features = ["arc_lock"] } +prettytable = "*" proctitle = "0.1.1" regex = "1.7.1" reqwest = { version = "0.11.14", default-features = false, features = ["json", "tokio-rustls"] } rustc-hash = "1.1.0" -sentry = { version = "0.29.2", default-features = false, features = ["backtrace", "contexts", "panic", "anyhow", "reqwest", "rustls", "log", "sentry-log"] } +sentry = { version = "0.29.3", default-features = false, features = ["backtrace", "contexts", "panic", "anyhow", "reqwest", "rustls", "log", "sentry-log"] } serde = { version = "1.0.152", features = [] } -serde_json = { version = "1.0.91", default-features = false, features = ["alloc", "raw_value"] } -serde_prometheus = "0.1.6" +serde_json = { version = "1.0.93", default-features = false, features = ["alloc", "raw_value"] } +serde_prometheus = "0.2.0" siwe = "0.5.0" time = "0.3.17" -tokio = { version = "1.24.2", features = ["full"] } +tokio = { version = "1.25.0", features = ["full"] } tokio-stream = { version = "0.1.11", features = ["sync"] } -toml = "0.6.0" +toml = "0.7.2" tower = "0.4.13" tower-http = { version = "0.3.5", features = ["cors", "sensitive-headers"] } ulid = { version = "1.0.0", features = ["serde"] } url = "2.3.1" -uuid = "1.2.2" +uuid = "1.3.0" diff --git a/web3_proxy/examples/metrics.rs b/web3_proxy/examples/metrics.rs deleted file mode 100644 index 59667b18..00000000 --- a/web3_proxy/examples/metrics.rs +++ /dev/null @@ -1,32 +0,0 @@ -use metered::{metered, HitCount, Throughput}; -use serde::Serialize; -use thread_fast_rng::{rand::Rng, thread_fast_rng}; - -#[derive(Default, Debug, Serialize)] -pub struct Biz { - metrics: BizMetrics, -} - -#[metered(registry = BizMetrics)] -impl Biz { - #[measure([HitCount, Throughput])] - pub fn biz(&self) { - let delay = std::time::Duration::from_millis(thread_fast_rng().gen::() % 200); - std::thread::sleep(delay); - } -} - -fn main() { - let buz = Biz::default(); - - for _ in 0..100 { - buz.biz(); - } - - let mut globals = std::collections::HashMap::new(); - globals.insert("service", "web3_proxy_prometheus_example"); - - let serialized = serde_prometheus::to_string(&buz.metrics, Some("example"), globals).unwrap(); - - println!("{}", serialized); -} diff --git a/web3_proxy/src/app/mod.rs b/web3_proxy/src/app/mod.rs index f41c1210..75610ca2 100644 --- a/web3_proxy/src/app/mod.rs +++ b/web3_proxy/src/app/mod.rs @@ -11,9 +11,8 @@ use crate::jsonrpc::{ JsonRpcForwardedResponse, JsonRpcForwardedResponseEnum, JsonRpcRequest, JsonRpcRequestEnum, }; use crate::rpcs::blockchain::{ArcBlock, SavedBlock}; -use crate::rpcs::connection::Web3Connection; -use crate::rpcs::connections::Web3Connections; -use crate::rpcs::request::OpenRequestHandleMetrics; +use crate::rpcs::many::Web3Rpcs; +use crate::rpcs::one::Web3Rpc; use crate::rpcs::transactions::TxStatus; use crate::user_token::UserBearerToken; use anyhow::Context; @@ -32,7 +31,6 @@ use futures::stream::{FuturesUnordered, StreamExt}; use hashbrown::{HashMap, HashSet}; use ipnet::IpNet; use log::{debug, error, info, trace, warn, Level}; -use metered::{metered, ErrorCount, HitCount, ResponseTime, Throughput}; use migration::sea_orm::{ self, ConnectionTrait, Database, DatabaseConnection, EntityTrait, PaginatorTrait, }; @@ -71,7 +69,9 @@ pub static REQUEST_PERIOD: u64 = 60; #[derive(From)] struct ResponseCacheKey { // if none, this is cached until evicted - block: Option, + from_block: Option, + // to_block is only set when ranges of blocks are requested (like with eth_getLogs) + to_block: Option, method: String, // TODO: better type for this params: Option, @@ -96,7 +96,22 @@ impl PartialEq for ResponseCacheKey { return false; } - match (self.block.as_ref(), other.block.as_ref()) { + match (self.from_block.as_ref(), other.from_block.as_ref()) { + (None, None) => {} + (None, Some(_)) => { + return false; + } + (Some(_), None) => { + return false; + } + (Some(s), Some(o)) => { + if s != o { + return false; + } + } + } + + match (self.to_block.as_ref(), other.to_block.as_ref()) { (None, None) => {} (None, Some(_)) => { return false; @@ -123,7 +138,8 @@ impl Eq for ResponseCacheKey {} impl Hash for ResponseCacheKey { fn hash(&self, state: &mut H) { - self.block.as_ref().map(|x| x.hash()).hash(state); + self.from_block.as_ref().map(|x| x.hash()).hash(state); + self.to_block.as_ref().map(|x| x.hash()).hash(state); self.method.hash(state); self.params.as_ref().map(|x| x.to_string()).hash(state); self.cache_errors.hash(state) @@ -182,9 +198,9 @@ impl DatabaseReplica { // TODO: i'm sure this is more arcs than necessary, but spawning futures makes references hard pub struct Web3ProxyApp { /// Send requests to the best server available - pub balanced_rpcs: Arc, + pub balanced_rpcs: Arc, /// Send private requests (like eth_sendRawTransaction) to all these servers - pub private_rpcs: Option>, + pub private_rpcs: Option>, response_cache: ResponseCache, // don't drop this or the sender will stop working // TODO: broadcast channel instead? @@ -193,9 +209,6 @@ pub struct Web3ProxyApp { pub config: AppConfig, pub db_conn: Option, pub db_replica: Option, - /// prometheus metrics - app_metrics: Arc, - open_request_handle_metrics: Arc, /// store pending transactions that we've seen so that we don't send duplicates to subscribers pub pending_transactions: Cache, pub frontend_ip_rate_limiter: Option>, @@ -288,7 +301,7 @@ pub async fn migrate_db( ); loop { - if Migrator::get_pending_migrations(&db_conn).await?.is_empty() { + if Migrator::get_pending_migrations(db_conn).await?.is_empty() { info!("no migrations to apply"); return Ok(()); } @@ -314,10 +327,10 @@ pub async fn migrate_db( break; } - let migration_result = Migrator::up(&db_conn, None).await; + let migration_result = Migrator::up(db_conn, None).await; // drop the distributed lock - drop_migration_lock(&db_conn).await?; + drop_migration_lock(db_conn).await?; // return if migrations erred migration_result @@ -347,7 +360,6 @@ pub struct Web3ProxyAppSpawn { pub background_handles: FuturesUnordered>, } -#[metered(registry = Web3ProxyAppMetrics, registry_expr = self.app_metrics, visibility = pub)] impl Web3ProxyApp { /// The main entrypoint. pub async fn spawn( @@ -377,10 +389,6 @@ impl Web3ProxyApp { ); } - // setup metrics - let app_metrics = Default::default(); - let open_request_handle_metrics: Arc = Default::default(); - let mut db_conn = None::; let mut db_replica = None::; @@ -564,7 +572,7 @@ impl Web3ProxyApp { .build_with_hasher(hashbrown::hash_map::DefaultHashBuilder::default()); // connect to the load balanced rpcs - let (balanced_rpcs, balanced_handle) = Web3Connections::spawn( + let (balanced_rpcs, balanced_handle) = Web3Rpcs::spawn( top_config.app.chain_id, db_conn.clone(), balanced_rpcs, @@ -576,7 +584,6 @@ impl Web3ProxyApp { top_config.app.min_synced_rpcs, Some(pending_tx_sender.clone()), pending_transactions.clone(), - open_request_handle_metrics.clone(), ) .await .context("spawning balanced rpcs")?; @@ -591,7 +598,7 @@ impl Web3ProxyApp { warn!("No private relays configured. Any transactions will be broadcast to the public mempool!"); None } else { - let (private_rpcs, private_handle) = Web3Connections::spawn( + let (private_rpcs, private_handle) = Web3Rpcs::spawn( top_config.app.chain_id, db_conn.clone(), private_rpcs, @@ -607,7 +614,6 @@ impl Web3ProxyApp { // TODO: subscribe to pending transactions on the private rpcs? they seem to have low rate limits None, pending_transactions.clone(), - open_request_handle_metrics.clone(), ) .await .context("spawning private_rpcs")?; @@ -663,14 +669,12 @@ impl Web3ProxyApp { )); } - // keep 1GB of blocks in the cache - // responses can be very different in sizes, so this definitely needs a weigher - // TODO: max_capacity from config + // responses can be very different in sizes, so this is a cache with a max capacity and a weigher // TODO: don't allow any response to be bigger than X% of the cache let response_cache = Cache::builder() - .max_capacity(1024 * 1024 * 1024) + .max_capacity(top_config.app.response_cache_max_bytes) .weigher(|k: &ResponseCacheKey, v| { - // TODO: is this good? + // TODO: is this good enough? if let Ok(v) = serde_json::to_string(v) { let weight = k.weight() + v.len(); @@ -718,8 +722,6 @@ impl Web3ProxyApp { db_conn, db_replica, vredis_pool, - app_metrics, - open_request_handle_metrics, rpc_secret_key_cache, bearer_token_semaphores, ip_semaphores, @@ -893,9 +895,7 @@ impl Web3ProxyApp { // "user_cache_size": app.rpc_secret_key_cache.weighted_size(), #[derive(Serialize)] - struct CombinedMetrics<'a> { - app: &'a Web3ProxyAppMetrics, - backend_rpc: &'a OpenRequestHandleMetrics, + struct CombinedMetrics { recent_ip_counts: RecentCounts, recent_user_id_counts: RecentCounts, recent_tx_counts: RecentCounts, @@ -903,14 +903,13 @@ impl Web3ProxyApp { } let metrics = CombinedMetrics { - app: &self.app_metrics, - backend_rpc: &self.open_request_handle_metrics, recent_ip_counts, recent_user_id_counts, recent_tx_counts, user_count, }; + // TODO: i don't like this library. it doesn't include HELP or TYPE lines and so our prometheus server fails to parse it serde_prometheus::to_string(&metrics, Some("web3_proxy"), globals) .expect("prometheus metrics should always serialize") } @@ -921,8 +920,7 @@ impl Web3ProxyApp { authorization: Arc, request: JsonRpcRequestEnum, proxy_mode: ProxyMode, - ) -> Result<(JsonRpcForwardedResponseEnum, Vec>), FrontendErrorResponse> - { + ) -> Result<(JsonRpcForwardedResponseEnum, Vec>), FrontendErrorResponse> { // trace!(?request, "proxy_web3_rpc"); // even though we have timeouts on the requests to our backend providers, @@ -961,7 +959,7 @@ impl Web3ProxyApp { authorization: &Arc, requests: Vec, proxy_mode: ProxyMode, - ) -> anyhow::Result<(Vec, Vec>)> { + ) -> Result<(Vec, Vec>), FrontendErrorResponse> { // TODO: we should probably change ethers-rs to support this directly. they pushed this off to v2 though let num_requests = requests.len(); @@ -978,7 +976,7 @@ impl Web3ProxyApp { // TODO: i'm sure this could be done better with iterators // TODO: stream the response? let mut collected: Vec = Vec::with_capacity(num_requests); - let mut collected_rpcs: HashSet> = HashSet::new(); + let mut collected_rpcs: HashSet> = HashSet::new(); for response in responses { // TODO: any way to attach the tried rpcs to the error? it is likely helpful let (response, rpcs) = response?; @@ -1013,13 +1011,13 @@ impl Web3ProxyApp { } } - #[measure([ErrorCount, HitCount, ResponseTime, Throughput])] + // #[measure([ErrorCount, HitCount, ResponseTime, Throughput])] async fn proxy_cached_request( self: &Arc, authorization: &Arc, mut request: JsonRpcRequest, proxy_mode: ProxyMode, - ) -> anyhow::Result<(JsonRpcForwardedResponse, Vec>)> { + ) -> Result<(JsonRpcForwardedResponse, Vec>), FrontendErrorResponse> { // trace!("Received request: {:?}", request); let request_metadata = Arc::new(RequestMetadata::new(REQUEST_PERIOD, request.num_bytes())?); @@ -1033,13 +1031,7 @@ impl Web3ProxyApp { // TODO: don't clone? let partial_response: serde_json::Value = match request_method.as_ref() { // lots of commands are blocked - method @ ("admin_addPeer" - | "admin_datadir" - | "admin_startRPC" - | "admin_startWS" - | "admin_stopRPC" - | "admin_stopWS" - | "db_getHex" + method @ ("db_getHex" | "db_getString" | "db_putHex" | "db_putString" @@ -1114,6 +1106,7 @@ impl Web3ProxyApp { | "eth_newBlockFilter" | "eth_newFilter" | "eth_newPendingTransactionFilter" + | "eth_pollSubscriptions" | "eth_uninstallFilter") => { // TODO: unsupported command stat // TODO: what error code? @@ -1138,9 +1131,10 @@ impl Web3ProxyApp { } None => { // TODO: what does geth do if this happens? - return Err(anyhow::anyhow!( - "no servers synced. unknown eth_blockNumber" - )); + // TODO: i think we want a 502 so that haproxy retries on another server + return Err( + anyhow::anyhow!("no servers synced. unknown eth_blockNumber").into(), + ); } } } @@ -1211,7 +1205,7 @@ impl Web3ProxyApp { ProxyMode::Fastest(0) => None, // TODO: how many balanced rpcs should we send to? configurable? percentage of total? // TODO: what if we do 2 per tier? we want to blast the third party rpcs - // TODO: maybe having the third party rpcs in their own Web3Connections would be good for this + // TODO: maybe having the third party rpcs in their own Web3Rpcs would be good for this ProxyMode::Fastest(x) => Some(x * 4), ProxyMode::Versus => None, }; @@ -1221,6 +1215,7 @@ impl Web3ProxyApp { // if we are sending the transaction privately, no matter the proxy_mode, we send to ALL private rpcs (private_rpcs, None) } else { + // TODO: send to balanced_rpcs AND private_rpcs (&self.balanced_rpcs, default_num) } } else { @@ -1236,6 +1231,7 @@ impl Web3ProxyApp { None, Level::Trace, num, + true, ) .await?; @@ -1376,12 +1372,17 @@ impl Web3ProxyApp { )); } - // TODO: don't return with ? here. send a jsonrpc invalid request let param = Bytes::from_str( params[0] .as_str() .context("parsing params 0 into str then bytes")?, - )?; + ) + .map_err(|x| { + trace!("bad request: {:?}", x); + FrontendErrorResponse::BadRequest( + "param 0 could not be read as H256".to_string(), + ) + })?; let hash = H256::from(keccak256(param)); @@ -1413,6 +1414,11 @@ impl Web3ProxyApp { } // anything else gets sent to backend rpcs and cached method => { + if method.starts_with("admin_") { + // TODO: emit a stat? will probably just be noise + return Err(FrontendErrorResponse::AccessDenied); + } + // emit stats // TODO: if no servers synced, wait for them to be synced? probably better to error and let haproxy retry another server @@ -1434,7 +1440,8 @@ impl Web3ProxyApp { .await? { BlockNeeded::CacheSuccessForever => Some(ResponseCacheKey { - block: None, + from_block: None, + to_block: None, method: method.to_string(), params: request.params.clone(), cache_errors: false, @@ -1444,12 +1451,12 @@ impl Web3ProxyApp { block_num, cache_errors, } => { - let (request_block_hash, archive_needed) = self + let (request_block_hash, block_depth) = self .balanced_rpcs .block_hash(authorization, &block_num) .await?; - if archive_needed { + if block_depth < self.config.archive_depth { request_metadata .archive_request .store(true, atomic::Ordering::Relaxed); @@ -1461,7 +1468,48 @@ impl Web3ProxyApp { .await?; Some(ResponseCacheKey { - block: Some(SavedBlock::new(request_block)), + from_block: Some(SavedBlock::new(request_block)), + to_block: None, + method: method.to_string(), + // TODO: hash here? + params: request.params.clone(), + cache_errors, + }) + } + BlockNeeded::CacheRange { + from_block_num, + to_block_num, + cache_errors, + } => { + let (from_block_hash, block_depth) = self + .balanced_rpcs + .block_hash(authorization, &from_block_num) + .await?; + + if block_depth < self.config.archive_depth { + request_metadata + .archive_request + .store(true, atomic::Ordering::Relaxed); + } + + let from_block = self + .balanced_rpcs + .block(authorization, &from_block_hash, None) + .await?; + + let (to_block_hash, _) = self + .balanced_rpcs + .block_hash(authorization, &to_block_num) + .await?; + + let to_block = self + .balanced_rpcs + .block(authorization, &to_block_hash, None) + .await?; + + Some(ResponseCacheKey { + from_block: Some(SavedBlock::new(from_block)), + to_block: Some(SavedBlock::new(to_block)), method: method.to_string(), // TODO: hash here? params: request.params.clone(), @@ -1476,14 +1524,11 @@ impl Web3ProxyApp { let authorization = authorization.clone(); if let Some(cache_key) = cache_key { - let request_block_number = cache_key.block.as_ref().map(|x| x.number()); + let from_block_num = cache_key.from_block.as_ref().map(|x| x.number()); self.response_cache .try_get_with(cache_key, async move { - // TODO: retry some failures automatically! - // TODO: try private_rpcs if all the balanced_rpcs fail! // TODO: put the hash here instead of the block number? its in the request already. - let mut response = self .balanced_rpcs .try_proxy_connection( @@ -1491,7 +1536,7 @@ impl Web3ProxyApp { &authorization, request, Some(&request_metadata), - request_block_number.as_ref(), + from_block_num.as_ref(), ) .await?; @@ -1499,6 +1544,8 @@ impl Web3ProxyApp { response.id = Default::default(); // TODO: only cache the inner response + // TODO: how are we going to stream this? + // TODO: check response size. if its very large, return it in a custom Error type that bypasses caching Ok::<_, anyhow::Error>(response) }) .await diff --git a/web3_proxy/src/bin/web3_proxy_cli/daemon.rs b/web3_proxy/src/bin/web3_proxy_cli/daemon.rs index 9019592a..62d742e5 100644 --- a/web3_proxy/src/bin/web3_proxy_cli/daemon.rs +++ b/web3_proxy/src/bin/web3_proxy_cli/daemon.rs @@ -155,7 +155,7 @@ mod tests { use std::env; use web3_proxy::{ - config::{AppConfig, Web3ConnectionConfig}, + config::{AppConfig, Web3RpcConfig}, rpcs::blockchain::ArcBlock, }; @@ -196,7 +196,7 @@ mod tests { min_sum_soft_limit: 1, min_synced_rpcs: 1, public_requests_per_period: Some(1_000_000), - response_cache_max_bytes: 10_usize.pow(7), + response_cache_max_bytes: 10_u64.pow(7), redirect_public_url: Some("example.com/".to_string()), redirect_rpc_key_url: Some("example.com/{{rpc_key_id}}".to_string()), ..Default::default() @@ -204,7 +204,7 @@ mod tests { balanced_rpcs: HashMap::from([ ( "anvil".to_string(), - Web3ConnectionConfig { + Web3RpcConfig { disabled: false, display_name: None, url: anvil.endpoint(), @@ -219,7 +219,7 @@ mod tests { ), ( "anvil_ws".to_string(), - Web3ConnectionConfig { + Web3RpcConfig { disabled: false, display_name: None, url: anvil.ws_endpoint(), diff --git a/web3_proxy/src/bin/web3_proxy_cli/main.rs b/web3_proxy/src/bin/web3_proxy_cli/main.rs index 73710f7a..19b608cf 100644 --- a/web3_proxy/src/bin/web3_proxy_cli/main.rs +++ b/web3_proxy/src/bin/web3_proxy_cli/main.rs @@ -11,6 +11,7 @@ mod daemon; mod drop_migration_lock; mod list_user_tier; mod pagerduty; +mod popularity_contest; mod rpc_accounting; mod sentryd; mod transfer_key; @@ -80,6 +81,7 @@ enum SubCommand { CreateUser(create_user::CreateUserSubCommand), DropMigrationLock(drop_migration_lock::DropMigrationLockSubCommand), Pagerduty(pagerduty::PagerdutySubCommand), + PopularityContest(popularity_contest::PopularityContestSubCommand), Proxyd(daemon::ProxydSubCommand), RpcAccounting(rpc_accounting::RpcAccountingSubCommand), Sentryd(sentryd::SentrydSubCommand), @@ -372,6 +374,7 @@ fn main() -> anyhow::Result<()> { x.main(pagerduty_async, top_config).await } + SubCommand::PopularityContest(x) => x.main().await, SubCommand::Sentryd(x) => { if cli_config.sentry_url.is_none() { warn!("sentry_url is not set! Logs will only show in this console"); diff --git a/web3_proxy/src/bin/web3_proxy_cli/popularity_contest.rs b/web3_proxy/src/bin/web3_proxy_cli/popularity_contest.rs new file mode 100644 index 00000000..dccd2012 --- /dev/null +++ b/web3_proxy/src/bin/web3_proxy_cli/popularity_contest.rs @@ -0,0 +1,135 @@ +use std::collections::BTreeMap; + +// show what nodes are used most often +use argh::FromArgs; +use log::trace; +use prettytable::{row, Table}; + +#[derive(FromArgs, PartialEq, Debug)] +/// Second subcommand. +#[argh(subcommand, name = "popularity_contest")] +pub struct PopularityContestSubCommand { + #[argh(positional)] + /// the web3-proxy url + /// TODO: query multiple and add them together + rpc: String, +} + +#[derive(Debug)] +struct BackendRpcData<'a> { + name: &'a str, + // tier: u64, + // backup: bool, + // block_data_limit: u64, + requests: u64, +} + +impl PopularityContestSubCommand { + pub async fn main(self) -> anyhow::Result<()> { + let x: serde_json::Value = reqwest::get(format!("{}/status", self.rpc)) + .await? + .json() + .await?; + + let conns = x + .as_object() + .unwrap() + .get("balanced_rpcs") + .unwrap() + .as_object() + .unwrap() + .get("conns") + .unwrap() + .as_array() + .unwrap(); + + let mut by_tier = BTreeMap::>::new(); + let mut tier_requests = BTreeMap::::new(); + let mut total_requests = 0; + + for conn in conns { + let conn = conn.as_object().unwrap(); + + let name = conn + .get("display_name") + .unwrap_or_else(|| conn.get("name").unwrap()) + .as_str() + .unwrap(); + + if name.ends_with("http") { + continue; + } + + let tier = conn.get("tier").unwrap().as_u64().unwrap(); + + // let backup = conn.get("backup").unwrap().as_bool().unwrap(); + + // let block_data_limit = conn + // .get("block_data_limit") + // .unwrap() + // .as_u64() + // .unwrap_or(u64::MAX); + + let requests = conn.get("total_requests").unwrap().as_u64().unwrap(); + + let rpc_data = BackendRpcData { + name, + // tier, + // backup, + // block_data_limit, + requests, + }; + + total_requests += rpc_data.requests; + + *tier_requests.entry(tier).or_default() += rpc_data.requests; + + by_tier.entry(tier).or_default().push(rpc_data); + } + + trace!("tier_requests: {:#?}", tier_requests); + trace!("by_tier: {:#?}", by_tier); + + let mut table = Table::new(); + + table.add_row(row![ + "name", + "tier", + "rpc_requests", + "tier_request_pct", + "total_pct" + ]); + + let total_requests = total_requests as f32; + + for (tier, rpcs) in by_tier.iter() { + let t = (*tier_requests.get(tier).unwrap()) as f32; + + for rpc in rpcs.iter() { + let tier_request_pct = if t == 0.0 { + 0.0 + } else { + (rpc.requests as f32) / t * 100.0 + }; + + let total_request_pct = if total_requests == 0.0 { + 0.0 + } else { + (rpc.requests as f32) / total_requests * 100.0 + }; + + table.add_row(row![ + rpc.name, + tier, + rpc.requests, + tier_request_pct, + total_request_pct + ]); + } + } + + table.printstd(); + + Ok(()) + } +} diff --git a/web3_proxy/src/block_number.rs b/web3_proxy/src/block_number.rs index 4e52788f..33ef7f54 100644 --- a/web3_proxy/src/block_number.rs +++ b/web3_proxy/src/block_number.rs @@ -4,38 +4,36 @@ use ethers::{ prelude::{BlockNumber, U64}, types::H256, }; -use log::{trace, warn}; +use log::warn; use serde_json::json; use std::sync::Arc; -use crate::{frontend::authorization::Authorization, rpcs::connections::Web3Connections}; +use crate::{frontend::authorization::Authorization, rpcs::many::Web3Rpcs}; #[allow(non_snake_case)] -pub fn block_num_to_U64(block_num: BlockNumber, latest_block: U64) -> U64 { +pub fn block_num_to_U64(block_num: BlockNumber, latest_block: U64) -> (U64, bool) { match block_num { - BlockNumber::Earliest => { - // modified is false because we want the backend to see "pending" - U64::zero() - } + BlockNumber::Earliest => (U64::zero(), false), BlockNumber::Finalized => { warn!("finalized block requested! not yet implemented!"); - latest_block - 10 + (latest_block - 10, false) } BlockNumber::Latest => { // change "latest" to a number - latest_block + (latest_block, true) } BlockNumber::Number(x) => { // we already have a number - x + (x, false) } BlockNumber::Pending => { + // modified is false because we want the backend to see "pending" // TODO: think more about how to handle Pending - latest_block + (latest_block, false) } BlockNumber::Safe => { warn!("finalized block requested! not yet implemented!"); - latest_block - 3 + (latest_block - 3, false) } } } @@ -47,7 +45,7 @@ pub async fn clean_block_number( params: &mut serde_json::Value, block_param_id: usize, latest_block: U64, - rpcs: &Web3Connections, + rpcs: &Web3Rpcs, ) -> anyhow::Result { match params.as_array_mut() { None => { @@ -58,7 +56,7 @@ pub async fn clean_block_number( None => { if params.len() == block_param_id { // add the latest block number to the end of the params - params.push(serde_json::to_value(latest_block)?); + params.push(json!(latest_block)); } else { // don't modify the request. only cache with current block // TODO: more useful log that include the @@ -69,37 +67,41 @@ pub async fn clean_block_number( Ok(latest_block) } Some(x) => { - let start = x.clone(); - // convert the json value to a BlockNumber - let block_num = if let Some(obj) = x.as_object_mut() { + let (block_num, change) = if let Some(obj) = x.as_object_mut() { // it might be a Map like `{"blockHash": String("0xa5626dc20d3a0a209b1de85521717a3e859698de8ce98bca1b16822b7501f74b")}` if let Some(block_hash) = obj.remove("blockHash") { let block_hash: H256 = serde_json::from_value(block_hash).context("decoding blockHash")?; - let block = rpcs.block(authorization, &block_hash, None).await?; + let block = rpcs + .block(authorization, &block_hash, None) + .await + .context("fetching block number from hash")?; - block - .number - .expect("blocks here should always have numbers") + // TODO: set change to true? i think not we should probably use hashes for everything. + ( + block + .number + .expect("blocks here should always have numbers"), + false, + ) } else { return Err(anyhow::anyhow!("blockHash missing")); } } else { // it might be a string like "latest" or a block number // TODO: "BlockNumber" needs a better name - let block_number = serde_json::from_value::(x.take())?; + // TODO: use take instead of clone + let block_number = serde_json::from_value::(x.clone()) + .context("checking params for BlockNumber")?; block_num_to_U64(block_number, latest_block) }; // if we changed "latest" to a number, update the params to match - *x = serde_json::to_value(block_num)?; - - // TODO: only do this if trace logging is enabled - if x.as_u64() != start.as_u64() { - trace!("changed {} to {}", start, x); + if change { + *x = json!(block_num); } Ok(block_num) @@ -112,7 +114,15 @@ pub async fn clean_block_number( pub enum BlockNeeded { CacheSuccessForever, CacheNever, - Cache { block_num: U64, cache_errors: bool }, + Cache { + block_num: U64, + cache_errors: bool, + }, + CacheRange { + from_block_num: U64, + to_block_num: U64, + cache_errors: bool, + }, } pub async fn block_needed( @@ -120,21 +130,22 @@ pub async fn block_needed( method: &str, params: Option<&mut serde_json::Value>, head_block_num: U64, - rpcs: &Web3Connections, + rpcs: &Web3Rpcs, ) -> anyhow::Result { - // if no params, no block is needed let params = if let Some(params) = params { + // grab the params so we can inspect and potentially modify them params } else { + // if no params, no block is needed // TODO: check all the methods with no params, some might not be cacheable - // caching for one block should always be okay + // caching with the head block /should/ always be okay return Ok(BlockNeeded::Cache { block_num: head_block_num, cache_errors: true, }); }; - // get the index for the BlockNumber or return None to say no block is needed. + // get the index for the BlockNumber // The BlockNumber is usually the last element. // TODO: double check these. i think some of the getBlock stuff will never need archive let block_param_id = match method { @@ -168,39 +179,44 @@ pub async fn block_needed( .as_object_mut() .ok_or_else(|| anyhow::anyhow!("invalid format"))?; - if let Some(x) = obj.get_mut("fromBlock") { - let block_num: BlockNumber = serde_json::from_value(x.take())?; - - let block_num = block_num_to_U64(block_num, head_block_num); - - *x = json!(block_num); - - // TODO: maybe don't return. instead check toBlock too? - // TODO: if there is a very wide fromBlock and toBlock, we need to check that our rpcs have both! - return Ok(BlockNeeded::Cache { - block_num, - cache_errors: false, - }); - } - - if let Some(x) = obj.get_mut("toBlock") { - let block_num: BlockNumber = serde_json::from_value(x.take())?; - - let block_num = block_num_to_U64(block_num, head_block_num); - - *x = json!(block_num); - - return Ok(BlockNeeded::Cache { - block_num, - cache_errors: false, - }); - } - if obj.contains_key("blockHash") { 1 } else { - return Ok(BlockNeeded::Cache { - block_num: head_block_num, + let from_block_num = if let Some(x) = obj.get_mut("fromBlock") { + // TODO: use .take instead of clone + let block_num: BlockNumber = serde_json::from_value(x.clone())?; + + let (block_num, change) = block_num_to_U64(block_num, head_block_num); + + if change { + *x = json!(block_num); + } + + block_num + } else { + let (block_num, _) = block_num_to_U64(BlockNumber::Earliest, head_block_num); + + block_num + }; + + let to_block_num = if let Some(x) = obj.get_mut("toBlock") { + // TODO: use .take instead of clone + let block_num: BlockNumber = serde_json::from_value(x.clone())?; + + let (block_num, change) = block_num_to_U64(block_num, head_block_num); + + if change { + *x = json!(block_num); + } + + block_num + } else { + head_block_num + }; + + return Ok(BlockNeeded::CacheRange { + from_block_num: from_block_num, + to_block_num: to_block_num, cache_errors: true, }); } diff --git a/web3_proxy/src/config.rs b/web3_proxy/src/config.rs index 9e40db5a..942632e7 100644 --- a/web3_proxy/src/config.rs +++ b/web3_proxy/src/config.rs @@ -1,6 +1,5 @@ use crate::rpcs::blockchain::BlockHashesCache; -use crate::rpcs::connection::Web3Connection; -use crate::rpcs::request::OpenRequestHandleMetrics; +use crate::rpcs::one::Web3Rpc; use crate::{app::AnyhowJoinHandle, rpcs::blockchain::ArcBlock}; use argh::FromArgs; use ethers::prelude::TxHash; @@ -12,8 +11,8 @@ use serde::Deserialize; use std::sync::Arc; use tokio::sync::broadcast; -pub type BlockAndRpc = (Option, Arc); -pub type TxHashAndRpc = (TxHash, Arc); +pub type BlockAndRpc = (Option, Arc); +pub type TxHashAndRpc = (TxHash, Arc); #[derive(Debug, FromArgs)] /// Web3_proxy is a fast caching and load balancing proxy for web3 (Ethereum or similar) JsonRPC servers. @@ -42,15 +41,15 @@ pub struct CliConfig { #[derive(Clone, Debug, Deserialize)] pub struct TopConfig { pub app: AppConfig, - pub balanced_rpcs: HashMap, + pub balanced_rpcs: HashMap, // TODO: instead of an option, give it a default - pub private_rpcs: Option>, + pub private_rpcs: Option>, /// unknown config options get put here #[serde(flatten, default = "HashMap::default")] pub extra: HashMap, } -/// shared configuration between Web3Connections +/// shared configuration between Web3Rpcs // TODO: no String, only &str #[derive(Clone, Debug, Default, Deserialize)] pub struct AppConfig { @@ -59,6 +58,10 @@ pub struct AppConfig { #[serde(default = "default_allowed_origin_requests_per_period")] pub allowed_origin_requests_per_period: HashMap, + /// erigon defaults to pruning beyond 90,000 blocks + #[serde(default = "default_archive_depth")] + pub archive_depth: u64, + /// EVM chain id. 1 for ETH /// TODO: better type for chain_id? max of `u64::MAX / 2 - 36` pub chain_id: u64, @@ -135,7 +138,7 @@ pub struct AppConfig { /// RPC responses are cached locally #[serde(default = "default_response_cache_max_bytes")] - pub response_cache_max_bytes: usize, + pub response_cache_max_bytes: u64, /// the stats page url for an anonymous user. pub redirect_public_url: Option, @@ -159,6 +162,10 @@ pub struct AppConfig { pub extra: HashMap, } +fn default_archive_depth() -> u64 { + 90_000 +} + fn default_allowed_origin_requests_per_period() -> HashMap { HashMap::new() } @@ -183,15 +190,15 @@ fn default_login_rate_limit_per_period() -> u64 { 10 } -fn default_response_cache_max_bytes() -> usize { +fn default_response_cache_max_bytes() -> u64 { // TODO: default to some percentage of the system? // 100 megabytes - 10_usize.pow(8) + 10u64.pow(8) } /// Configuration for a backend web3 RPC server #[derive(Clone, Debug, Deserialize)] -pub struct Web3ConnectionConfig { +pub struct Web3RpcConfig { /// simple way to disable a connection without deleting the row #[serde(default)] pub disabled: bool, @@ -223,9 +230,9 @@ fn default_tier() -> u64 { 0 } -impl Web3ConnectionConfig { - /// Create a Web3Connection from config - /// TODO: move this into Web3Connection? (just need to make things pub(crate)) +impl Web3RpcConfig { + /// Create a Web3Rpc from config + /// TODO: move this into Web3Rpc? (just need to make things pub(crate)) #[allow(clippy::too_many_arguments)] pub async fn spawn( self, @@ -238,13 +245,9 @@ impl Web3ConnectionConfig { block_map: BlockHashesCache, block_sender: Option>, tx_id_sender: Option>, - open_request_handle_metrics: Arc, - ) -> anyhow::Result<(Arc, AnyhowJoinHandle<()>)> { + ) -> anyhow::Result<(Arc, AnyhowJoinHandle<()>)> { if !self.extra.is_empty() { - warn!( - "unknown Web3ConnectionConfig fields!: {:?}", - self.extra.keys() - ); + warn!("unknown Web3RpcConfig fields!: {:?}", self.extra.keys()); } let hard_limit = match (self.hard_limit, redis_pool) { @@ -266,7 +269,7 @@ impl Web3ConnectionConfig { let backup = self.backup.unwrap_or(false); - Web3Connection::spawn( + Web3Rpc::spawn( name, self.display_name, chain_id, @@ -283,7 +286,6 @@ impl Web3ConnectionConfig { tx_id_sender, true, self.tier, - open_request_handle_metrics, ) .await } diff --git a/web3_proxy/src/frontend/authorization.rs b/web3_proxy/src/frontend/authorization.rs index fc9308ef..342addfc 100644 --- a/web3_proxy/src/frontend/authorization.rs +++ b/web3_proxy/src/frontend/authorization.rs @@ -2,7 +2,7 @@ use super::errors::FrontendErrorResponse; use crate::app::{AuthorizationChecks, Web3ProxyApp, APP_USER_AGENT}; -use crate::rpcs::connection::Web3Connection; +use crate::rpcs::one::Web3Rpc; use crate::user_token::UserBearerToken; use anyhow::Context; use axum::headers::authorization::Bearer; @@ -80,7 +80,7 @@ pub struct RequestMetadata { // TODO: "archive" isn't really a boolean. pub archive_request: AtomicBool, /// if this is empty, there was a cache_hit - pub backend_requests: Mutex>>, + pub backend_requests: Mutex>>, pub no_servers: AtomicU64, pub error_response: AtomicBool, pub response_bytes: AtomicU64, diff --git a/web3_proxy/src/frontend/errors.rs b/web3_proxy/src/frontend/errors.rs index 22f048ee..162bf255 100644 --- a/web3_proxy/src/frontend/errors.rs +++ b/web3_proxy/src/frontend/errors.rs @@ -11,7 +11,7 @@ use axum::{ use derive_more::From; use http::header::InvalidHeaderValue; use ipnet::AddrParseError; -use log::{trace, warn}; +use log::{debug, trace, warn}; use migration::sea_orm::DbErr; use redis_rate_limiter::redis::RedisError; use reqwest::header::ToStrError; @@ -25,6 +25,7 @@ pub type FrontendResult = Result; pub enum FrontendErrorResponse { AccessDenied, Anyhow(anyhow::Error), + BadRequest(String), SemaphoreAcquireError(AcquireError), Database(DbErr), HeadersError(headers::Error), @@ -71,18 +72,17 @@ impl FrontendErrorResponse { ), ) } - // Self::(err) => { - // warn!("boxed err={:?}", err); - // ( - // StatusCode::INTERNAL_SERVER_ERROR, - // JsonRpcForwardedResponse::from_str( - // // TODO: make this better. maybe include the error type? - // "boxed error!", - // Some(StatusCode::INTERNAL_SERVER_ERROR.as_u16().into()), - // None, - // ), - // ) - // } + Self::BadRequest(err) => { + debug!("BAD_REQUEST: {}", err); + ( + StatusCode::BAD_REQUEST, + JsonRpcForwardedResponse::from_str( + &format!("bad request: {}", err), + Some(StatusCode::BAD_REQUEST.as_u16().into()), + None, + ), + ) + } Self::Database(err) => { warn!("database err={:?}", err); ( diff --git a/web3_proxy/src/frontend/mod.rs b/web3_proxy/src/frontend/mod.rs index dae34033..3ed6d163 100644 --- a/web3_proxy/src/frontend/mod.rs +++ b/web3_proxy/src/frontend/mod.rs @@ -1,4 +1,6 @@ //! `frontend` contains HTTP and websocket endpoints for use by users and admins. +//! +//! Important reading about axum extractors: https://docs.rs/axum/latest/axum/extract/index.html#the-order-of-extractors pub mod admin; pub mod authorization; @@ -31,6 +33,7 @@ pub enum FrontendResponseCaches { // TODO: what should this cache's value be? pub type FrontendResponseCache = Cache, hashbrown::hash_map::DefaultHashBuilder>; +pub type FrontendHealthCache = Cache<(), bool, hashbrown::hash_map::DefaultHashBuilder>; /// Start the frontend server. pub async fn serve(port: u16, proxy_app: Arc) -> anyhow::Result<()> { @@ -38,7 +41,11 @@ pub async fn serve(port: u16, proxy_app: Arc) -> anyhow::Result<() // TODO: a moka cache is probably way overkill for this. // no need for max items. only expire because of time to live let response_cache: FrontendResponseCache = Cache::builder() - .time_to_live(Duration::from_secs(1)) + .time_to_live(Duration::from_secs(2)) + .build_with_hasher(hashbrown::hash_map::DefaultHashBuilder::default()); + + let health_cache: FrontendHealthCache = Cache::builder() + .time_to_live(Duration::from_millis(100)) .build_with_hasher(hashbrown::hash_map::DefaultHashBuilder::default()); // TODO: read config for if fastest/versus should be available publicly. default off @@ -182,6 +189,7 @@ pub async fn serve(port: u16, proxy_app: Arc) -> anyhow::Result<() .layer(Extension(proxy_app.clone())) // frontend caches .layer(Extension(response_cache)) + .layer(Extension(health_cache)) // 404 for any unknown routes .fallback(errors::handler_404); @@ -199,7 +207,6 @@ pub async fn serve(port: u16, proxy_app: Arc) -> anyhow::Result<() - axum::extract::ConnectInfo (if not behind proxy) */ let service = app.into_make_service_with_connect_info::(); - // let service = app.into_make_service(); // `axum::Server` is a re-export of `hyper::Server` axum::Server::bind(&addr) diff --git a/web3_proxy/src/frontend/rpc_proxy_http.rs b/web3_proxy/src/frontend/rpc_proxy_http.rs index 067546db..7f3e87e4 100644 --- a/web3_proxy/src/frontend/rpc_proxy_http.rs +++ b/web3_proxy/src/frontend/rpc_proxy_http.rs @@ -8,7 +8,7 @@ use axum::extract::Path; use axum::headers::{Origin, Referer, UserAgent}; use axum::TypedHeader; use axum::{response::IntoResponse, Extension, Json}; -use axum_client_ip::ClientIp; +use axum_client_ip::InsecureClientIp; use axum_macros::debug_handler; use itertools::Itertools; use std::sync::Arc; @@ -19,7 +19,7 @@ use std::sync::Arc; #[debug_handler] pub async fn proxy_web3_rpc( Extension(app): Extension>, - ip: ClientIp, + ip: InsecureClientIp, origin: Option>, Json(payload): Json, ) -> FrontendResult { @@ -29,7 +29,7 @@ pub async fn proxy_web3_rpc( #[debug_handler] pub async fn fastest_proxy_web3_rpc( Extension(app): Extension>, - ip: ClientIp, + ip: InsecureClientIp, origin: Option>, Json(payload): Json, ) -> FrontendResult { @@ -41,7 +41,7 @@ pub async fn fastest_proxy_web3_rpc( #[debug_handler] pub async fn versus_proxy_web3_rpc( Extension(app): Extension>, - ip: ClientIp, + ip: InsecureClientIp, origin: Option>, Json(payload): Json, ) -> FrontendResult { @@ -50,7 +50,7 @@ pub async fn versus_proxy_web3_rpc( async fn _proxy_web3_rpc( app: Arc, - ClientIp(ip): ClientIp, + InsecureClientIp(ip): InsecureClientIp, origin: Option>, payload: JsonRpcRequestEnum, proxy_mode: ProxyMode, @@ -91,7 +91,7 @@ async fn _proxy_web3_rpc( #[debug_handler] pub async fn proxy_web3_rpc_with_key( Extension(app): Extension>, - ip: ClientIp, + ip: InsecureClientIp, origin: Option>, referer: Option>, user_agent: Option>, @@ -114,7 +114,7 @@ pub async fn proxy_web3_rpc_with_key( #[debug_handler] pub async fn fastest_proxy_web3_rpc_with_key( Extension(app): Extension>, - ip: ClientIp, + ip: InsecureClientIp, origin: Option>, referer: Option>, user_agent: Option>, @@ -137,7 +137,7 @@ pub async fn fastest_proxy_web3_rpc_with_key( #[debug_handler] pub async fn versus_proxy_web3_rpc_with_key( Extension(app): Extension>, - ip: ClientIp, + ip: InsecureClientIp, origin: Option>, referer: Option>, user_agent: Option>, @@ -160,7 +160,7 @@ pub async fn versus_proxy_web3_rpc_with_key( #[allow(clippy::too_many_arguments)] async fn _proxy_web3_rpc_with_key( app: Arc, - ClientIp(ip): ClientIp, + InsecureClientIp(ip): InsecureClientIp, origin: Option>, referer: Option>, user_agent: Option>, diff --git a/web3_proxy/src/frontend/rpc_proxy_ws.rs b/web3_proxy/src/frontend/rpc_proxy_ws.rs index f031aaf6..4de01bce 100644 --- a/web3_proxy/src/frontend/rpc_proxy_ws.rs +++ b/web3_proxy/src/frontend/rpc_proxy_ws.rs @@ -17,7 +17,7 @@ use axum::{ response::{IntoResponse, Redirect}, Extension, TypedHeader, }; -use axum_client_ip::ClientIp; +use axum_client_ip::InsecureClientIp; use axum_macros::debug_handler; use futures::SinkExt; use futures::{ @@ -49,7 +49,7 @@ pub enum ProxyMode { #[debug_handler] pub async fn websocket_handler( Extension(app): Extension>, - ip: ClientIp, + ip: InsecureClientIp, origin: Option>, ws_upgrade: Option, ) -> FrontendResult { @@ -61,7 +61,7 @@ pub async fn websocket_handler( #[debug_handler] pub async fn fastest_websocket_handler( Extension(app): Extension>, - ip: ClientIp, + ip: InsecureClientIp, origin: Option>, ws_upgrade: Option, ) -> FrontendResult { @@ -75,7 +75,7 @@ pub async fn fastest_websocket_handler( #[debug_handler] pub async fn versus_websocket_handler( Extension(app): Extension>, - ip: ClientIp, + ip: InsecureClientIp, origin: Option>, ws_upgrade: Option, ) -> FrontendResult { @@ -86,7 +86,7 @@ pub async fn versus_websocket_handler( async fn _websocket_handler( proxy_mode: ProxyMode, app: Arc, - ClientIp(ip): ClientIp, + InsecureClientIp(ip): InsecureClientIp, origin: Option>, ws_upgrade: Option, ) -> FrontendResult { @@ -121,7 +121,7 @@ async fn _websocket_handler( #[debug_handler] pub async fn websocket_handler_with_key( Extension(app): Extension>, - ip: ClientIp, + ip: InsecureClientIp, Path(rpc_key): Path, origin: Option>, referer: Option>, @@ -144,7 +144,7 @@ pub async fn websocket_handler_with_key( #[debug_handler] pub async fn fastest_websocket_handler_with_key( Extension(app): Extension>, - ip: ClientIp, + ip: InsecureClientIp, Path(rpc_key): Path, origin: Option>, referer: Option>, @@ -168,7 +168,7 @@ pub async fn fastest_websocket_handler_with_key( #[debug_handler] pub async fn versus_websocket_handler_with_key( Extension(app): Extension>, - ip: ClientIp, + ip: InsecureClientIp, Path(rpc_key): Path, origin: Option>, referer: Option>, @@ -192,7 +192,7 @@ pub async fn versus_websocket_handler_with_key( async fn _websocket_handler_with_key( proxy_mode: ProxyMode, app: Arc, - ClientIp(ip): ClientIp, + InsecureClientIp(ip): InsecureClientIp, rpc_key: String, origin: Option>, referer: Option>, diff --git a/web3_proxy/src/frontend/status.rs b/web3_proxy/src/frontend/status.rs index df7f8bc9..1199dc25 100644 --- a/web3_proxy/src/frontend/status.rs +++ b/web3_proxy/src/frontend/status.rs @@ -3,7 +3,7 @@ //! For ease of development, users can currently access these endponts. //! They will eventually move to another port. -use super::{FrontendResponseCache, FrontendResponseCaches}; +use super::{FrontendHealthCache, FrontendResponseCache, FrontendResponseCaches}; use crate::app::{Web3ProxyApp, APP_USER_AGENT}; use axum::{http::StatusCode, response::IntoResponse, Extension, Json}; use axum_macros::debug_handler; @@ -12,9 +12,15 @@ use std::sync::Arc; /// Health check page for load balancers to use. #[debug_handler] -pub async fn health(Extension(app): Extension>) -> impl IntoResponse { - // TODO: add a check that we aren't shutting down - if app.balanced_rpcs.synced() { +pub async fn health( + Extension(app): Extension>, + Extension(health_cache): Extension, +) -> impl IntoResponse { + let synced = health_cache + .get_with((), async { app.balanced_rpcs.synced() }) + .await; + + if synced { (StatusCode::OK, "OK") } else { (StatusCode::SERVICE_UNAVAILABLE, ":(") diff --git a/web3_proxy/src/frontend/users.rs b/web3_proxy/src/frontend/users.rs index 9bed230a..3c21e8d0 100644 --- a/web3_proxy/src/frontend/users.rs +++ b/web3_proxy/src/frontend/users.rs @@ -17,7 +17,7 @@ use axum::{ response::IntoResponse, Extension, Json, TypedHeader, }; -use axum_client_ip::ClientIp; +use axum_client_ip::InsecureClientIp; use axum_macros::debug_handler; use chrono::{TimeZone, Utc}; use entities::sea_orm_active_enums::{LogLevel, Role}; @@ -65,7 +65,7 @@ use crate::{PostLogin, PostLoginQuery}; #[debug_handler] pub async fn user_login_get( Extension(app): Extension>, - ClientIp(ip): ClientIp, + InsecureClientIp(ip): InsecureClientIp, // TODO: what does axum's error handling look like if the path fails to parse? Path(mut params): Path>, ) -> FrontendResult { @@ -165,7 +165,7 @@ pub async fn user_login_get( #[debug_handler] pub async fn user_login_post( Extension(app): Extension>, - ClientIp(ip): ClientIp, + InsecureClientIp(ip): InsecureClientIp, Query(query): Query, Json(payload): Json, ) -> FrontendResult { diff --git a/web3_proxy/src/lib.rs b/web3_proxy/src/lib.rs index 5c8e74ea..aab98d57 100644 --- a/web3_proxy/src/lib.rs +++ b/web3_proxy/src/lib.rs @@ -5,7 +5,6 @@ pub mod block_number; pub mod config; pub mod frontend; pub mod jsonrpc; -pub mod metered; pub mod metrics_frontend; pub mod pagerduty; pub mod rpcs; diff --git a/web3_proxy/src/metered/jsonrpc_error_count.rs b/web3_proxy/src/metered/jsonrpc_error_count.rs index 424e8b7c..eb8ed33f 100644 --- a/web3_proxy/src/metered/jsonrpc_error_count.rs +++ b/web3_proxy/src/metered/jsonrpc_error_count.rs @@ -1,12 +1,6 @@ //! A module providing the `JsonRpcErrorCount` metric. use ethers::providers::ProviderError; -use metered::metric::{Advice, Enter, OnResult}; -use metered::{ - atomic::AtomicInt, - clear::Clear, - metric::{Counter, Metric}, -}; use serde::Serialize; use std::ops::Deref; diff --git a/web3_proxy/src/metered/provider_error_count.rs b/web3_proxy/src/metered/provider_error_count.rs index 5670e3ba..9025c463 100644 --- a/web3_proxy/src/metered/provider_error_count.rs +++ b/web3_proxy/src/metered/provider_error_count.rs @@ -1,12 +1,6 @@ //! A module providing the `JsonRpcErrorCount` metric. use ethers::providers::ProviderError; -use metered::metric::{Advice, Enter, OnResult}; -use metered::{ - atomic::AtomicInt, - clear::Clear, - metric::{Counter, Metric}, -}; use serde::Serialize; use std::ops::Deref; diff --git a/web3_proxy/src/metrics_frontend.rs b/web3_proxy/src/metrics_frontend.rs index 2eb2170a..cc2da646 100644 --- a/web3_proxy/src/metrics_frontend.rs +++ b/web3_proxy/src/metrics_frontend.rs @@ -23,13 +23,14 @@ pub async fn serve(app: Arc, port: u16) -> anyhow::Result<()> { // TODO: into_make_service is enough if we always run behind a proxy. make into_make_service_with_connect_info optional? /* - It sequentially looks for an IP in: + InsecureClientIp sequentially looks for an IP in: - x-forwarded-for header (de-facto standard) - x-real-ip header - forwarded header (new standard) - axum::extract::ConnectInfo (if not behind proxy) - So we probably won't need into_make_service_with_connect_info, but it shouldn't hurt + Since we run behind haproxy, x-forwarded-for will be set. + We probably won't need into_make_service_with_connect_info, but it shouldn't hurt. */ let service = app.into_make_service_with_connect_info::(); // let service = app.into_make_service(); diff --git a/web3_proxy/src/rpcs/blockchain.rs b/web3_proxy/src/rpcs/blockchain.rs index da1c2188..ce79d76a 100644 --- a/web3_proxy/src/rpcs/blockchain.rs +++ b/web3_proxy/src/rpcs/blockchain.rs @@ -1,10 +1,10 @@ -///! Keep track of the blockchain as seen by a Web3Connections. -use super::connection::Web3Connection; -use super::connections::Web3Connections; +use super::many::Web3Rpcs; +///! Keep track of the blockchain as seen by a Web3Rpcs. +use super::one::Web3Rpc; use super::transactions::TxStatus; use crate::frontend::authorization::Authorization; use crate::{ - config::BlockAndRpc, jsonrpc::JsonRpcRequest, rpcs::synced_connections::ConsensusConnections, + config::BlockAndRpc, jsonrpc::JsonRpcRequest, rpcs::synced_connections::ConsensusWeb3Rpcs, }; use anyhow::Context; use derive_more::From; @@ -92,7 +92,7 @@ impl Display for SavedBlock { } } -impl Web3Connections { +impl Web3Rpcs { /// add a block to our mappings and track the heaviest chain pub async fn save_block( &self, @@ -135,7 +135,7 @@ impl Web3Connections { &self, authorization: &Arc, hash: &H256, - rpc: Option<&Arc>, + rpc: Option<&Arc>, ) -> anyhow::Result { // first, try to get the hash from our cache // the cache is set last, so if its here, its everywhere @@ -190,12 +190,12 @@ impl Web3Connections { &self, authorization: &Arc, num: &U64, - ) -> anyhow::Result<(H256, bool)> { - let (block, is_archive_block) = self.cannonical_block(authorization, num).await?; + ) -> anyhow::Result<(H256, u64)> { + let (block, block_depth) = self.cannonical_block(authorization, num).await?; let hash = block.hash.expect("Saved blocks should always have hashes"); - Ok((hash, is_archive_block)) + Ok((hash, block_depth)) } /// Get the heaviest chain's block from cache or backend rpc @@ -204,7 +204,7 @@ impl Web3Connections { &self, authorization: &Arc, num: &U64, - ) -> anyhow::Result<(ArcBlock, bool)> { + ) -> anyhow::Result<(ArcBlock, u64)> { // we only have blocks by hash now // maybe save them during save_block in a blocks_by_number Cache> // if theres multiple, use petgraph to find the one on the main chain (and remove the others if they have enough confirmations) @@ -233,8 +233,11 @@ impl Web3Connections { let head_block_num = head_block_num.expect("we should only get here if we have a head block"); - // TODO: geth does 64, erigon does 90k. sometimes we run a mix - let archive_needed = num < &(head_block_num - U64::from(64)); + let block_depth = if num >= &head_block_num { + 0 + } else { + (head_block_num - num).as_u64() + }; // try to get the hash from our cache // deref to not keep the lock open @@ -243,7 +246,7 @@ impl Web3Connections { // TODO: pass authorization through here? let block = self.block(authorization, &block_hash, None).await?; - return Ok((block, archive_needed)); + return Ok((block, block_depth)); } // block number not in cache. we need to ask an rpc for it @@ -269,7 +272,7 @@ impl Web3Connections { // the block was fetched using eth_getBlockByNumber, so it should have all fields and be on the heaviest chain let block = self.save_block(block, true).await?; - Ok((block, archive_needed)) + Ok((block, block_depth)) } pub(super) async fn process_incoming_blocks( @@ -285,30 +288,33 @@ impl Web3Connections { // TODO: this will grow unbounded. prune old heads on this at the same time we prune the graph? let mut connection_heads = ConsensusFinder::default(); - while let Ok((new_block, rpc)) = block_receiver.recv_async().await { - let new_block = new_block.map(Into::into); + loop { + match block_receiver.recv_async().await { + Ok((new_block, rpc)) => { + let new_block = new_block.map(Into::into); - let rpc_name = rpc.name.clone(); + let rpc_name = rpc.name.clone(); - if let Err(err) = self - .process_block_from_rpc( - authorization, - &mut connection_heads, - new_block, - rpc, - &head_block_sender, - &pending_tx_sender, - ) - .await - { - warn!("unable to process block from rpc {}: {:?}", rpc_name, err); + if let Err(err) = self + .process_block_from_rpc( + authorization, + &mut connection_heads, + new_block, + rpc, + &head_block_sender, + &pending_tx_sender, + ) + .await + { + warn!("unable to process block from rpc {}: {:?}", rpc_name, err); + } + } + Err(err) => { + warn!("block_receiver exited! {:#?}", err); + return Err(err.into()); + } } } - - // TODO: if there was an error, should we return it instead of an Ok? - warn!("block_receiver exited!"); - - Ok(()) } /// `connection_heads` is a mapping of rpc_names to head block hashes. @@ -319,7 +325,7 @@ impl Web3Connections { authorization: &Arc, consensus_finder: &mut ConsensusFinder, rpc_head_block: Option, - rpc: Arc, + rpc: Arc, head_block_sender: &watch::Sender, pending_tx_sender: &Option>, ) -> anyhow::Result<()> { @@ -388,6 +394,7 @@ impl Web3Connections { // multiple blocks with the same fork! if consensus_saved_block.hash() == old_head_block.hash() { // no change in hash. no need to use head_block_sender + // TODO: trace level if rpc is backup debug!( "con {}{}/{}/{}/{} con={} rpc={}@{}", includes_backups_str, @@ -546,11 +553,11 @@ impl ConnectionsGroup { Self::new(true) } - fn remove(&mut self, rpc: &Web3Connection) -> Option { + fn remove(&mut self, rpc: &Web3Rpc) -> Option { self.rpc_name_to_hash.remove(rpc.name.as_str()) } - fn insert(&mut self, rpc: &Web3Connection, block_hash: H256) -> Option { + fn insert(&mut self, rpc: &Web3Rpc, block_hash: H256) -> Option { self.rpc_name_to_hash.insert(rpc.name.clone(), block_hash) } @@ -560,7 +567,7 @@ impl ConnectionsGroup { rpc_name: &str, hash: &H256, authorization: &Arc, - web3_connections: &Web3Connections, + web3_rpcs: &Web3Rpcs, ) -> anyhow::Result { // // TODO: why does this happen?!?! seems to only happen with uncled blocks // // TODO: maybe we should do try_get_with? @@ -571,16 +578,17 @@ impl ConnectionsGroup { // ); // this option should almost always be populated. if the connection reconnects at a bad time it might not be available though - let rpc = web3_connections.conns.get(rpc_name); + // TODO: if this is None, I think we should error. + let rpc = web3_rpcs.conns.get(rpc_name); - web3_connections.block(authorization, hash, rpc).await + web3_rpcs.block(authorization, hash, rpc).await } // TODO: do this during insert/remove? pub(self) async fn highest_block( &self, authorization: &Arc, - web3_connections: &Web3Connections, + web3_rpcs: &Web3Rpcs, ) -> Option { let mut checked_heads = HashSet::with_capacity(self.rpc_name_to_hash.len()); let mut highest_block = None::; @@ -592,7 +600,7 @@ impl ConnectionsGroup { } let rpc_block = match self - .get_block_from_rpc(rpc_name, rpc_head_hash, authorization, web3_connections) + .get_block_from_rpc(rpc_name, rpc_head_hash, authorization, web3_rpcs) .await { Ok(x) => x, @@ -627,9 +635,9 @@ impl ConnectionsGroup { pub(self) async fn consensus_head_connections( &self, authorization: &Arc, - web3_connections: &Web3Connections, - ) -> anyhow::Result { - let mut maybe_head_block = match self.highest_block(authorization, web3_connections).await { + web3_rpcs: &Web3Rpcs, + ) -> anyhow::Result { + let mut maybe_head_block = match self.highest_block(authorization, web3_rpcs).await { None => return Err(anyhow::anyhow!("No blocks known")), Some(x) => x, }; @@ -663,27 +671,25 @@ impl ConnectionsGroup { continue; } - if let Some(rpc) = web3_connections.conns.get(rpc_name.as_str()) { + if let Some(rpc) = web3_rpcs.conns.get(rpc_name.as_str()) { highest_rpcs.insert(rpc_name); highest_rpcs_sum_soft_limit += rpc.soft_limit; } else { // i don't think this is an error. i think its just if a reconnect is currently happening warn!("connection missing: {}", rpc_name); + debug!("web3_rpcs.conns: {:#?}", web3_rpcs.conns); } } - if highest_rpcs_sum_soft_limit >= web3_connections.min_sum_soft_limit - && highest_rpcs.len() >= web3_connections.min_head_rpcs + if highest_rpcs_sum_soft_limit >= web3_rpcs.min_sum_soft_limit + && highest_rpcs.len() >= web3_rpcs.min_head_rpcs { // we have enough servers with enough requests break; } // not enough rpcs yet. check the parent block - if let Some(parent_block) = web3_connections - .block_hashes - .get(&maybe_head_block.parent_hash) - { + if let Some(parent_block) = web3_rpcs.block_hashes.get(&maybe_head_block.parent_hash) { // trace!( // child=%maybe_head_hash, parent=%parent_block.hash.unwrap(), "avoiding thundering herd", // ); @@ -691,25 +697,25 @@ impl ConnectionsGroup { maybe_head_block = parent_block; continue; } else { - if num_known < web3_connections.min_head_rpcs { + if num_known < web3_rpcs.min_head_rpcs { return Err(anyhow::anyhow!( "not enough rpcs connected: {}/{}/{}", highest_rpcs.len(), num_known, - web3_connections.min_head_rpcs, + web3_rpcs.min_head_rpcs, )); } else { let soft_limit_percent = (highest_rpcs_sum_soft_limit as f32 - / web3_connections.min_sum_soft_limit as f32) + / web3_rpcs.min_sum_soft_limit as f32) * 100.0; return Err(anyhow::anyhow!( "ran out of parents to check. rpcs {}/{}/{}. soft limit: {:.2}% ({}/{})", highest_rpcs.len(), num_known, - web3_connections.min_head_rpcs, + web3_rpcs.min_head_rpcs, highest_rpcs_sum_soft_limit, - web3_connections.min_sum_soft_limit, + web3_rpcs.min_sum_soft_limit, soft_limit_percent, )); } @@ -719,29 +725,28 @@ impl ConnectionsGroup { // TODO: if consensus_head_rpcs.is_empty, try another method of finding the head block. will need to change the return Err above into breaks. // we've done all the searching for the heaviest block that we can - if highest_rpcs.len() < web3_connections.min_head_rpcs - || highest_rpcs_sum_soft_limit < web3_connections.min_sum_soft_limit + if highest_rpcs.len() < web3_rpcs.min_head_rpcs + || highest_rpcs_sum_soft_limit < web3_rpcs.min_sum_soft_limit { // if we get here, not enough servers are synced. return an error - let soft_limit_percent = (highest_rpcs_sum_soft_limit as f32 - / web3_connections.min_sum_soft_limit as f32) - * 100.0; + let soft_limit_percent = + (highest_rpcs_sum_soft_limit as f32 / web3_rpcs.min_sum_soft_limit as f32) * 100.0; return Err(anyhow::anyhow!( "Not enough resources. rpcs {}/{}/{}. soft limit: {:.2}% ({}/{})", highest_rpcs.len(), num_known, - web3_connections.min_head_rpcs, + web3_rpcs.min_head_rpcs, highest_rpcs_sum_soft_limit, - web3_connections.min_sum_soft_limit, + web3_rpcs.min_sum_soft_limit, soft_limit_percent, )); } // success! this block has enough soft limit and nodes on it (or on later blocks) - let conns: Vec> = highest_rpcs + let conns: Vec> = highest_rpcs .into_iter() - .filter_map(|conn_name| web3_connections.conns.get(conn_name).cloned()) + .filter_map(|conn_name| web3_rpcs.conns.get(conn_name).cloned()) .collect(); // TODO: DEBUG only check @@ -754,7 +759,7 @@ impl ConnectionsGroup { let consensus_head_block: SavedBlock = maybe_head_block.into(); - Ok(ConsensusConnections { + Ok(ConsensusWeb3Rpcs { head_block: Some(consensus_head_block), conns, num_checked_conns: self.rpc_name_to_hash.len(), @@ -781,7 +786,7 @@ impl Default for ConsensusFinder { } impl ConsensusFinder { - fn remove(&mut self, rpc: &Web3Connection) -> Option { + fn remove(&mut self, rpc: &Web3Rpc) -> Option { // TODO: should we have multiple backup tiers? (remote datacenters vs third party) if !rpc.backup { self.main.remove(rpc); @@ -789,7 +794,7 @@ impl ConsensusFinder { self.all.remove(rpc) } - fn insert(&mut self, rpc: &Web3Connection, new_hash: H256) -> Option { + fn insert(&mut self, rpc: &Web3Rpc, new_hash: H256) -> Option { // TODO: should we have multiple backup tiers? (remote datacenters vs third party) if !rpc.backup { self.main.insert(rpc, new_hash); @@ -801,9 +806,9 @@ impl ConsensusFinder { async fn update_rpc( &mut self, rpc_head_block: Option, - rpc: Arc, + rpc: Arc, // we need this so we can save the block to caches. i don't like it though. maybe we should use a lazy_static Cache wrapper that has a "save_block" method?. i generally dislike globals but i also dislike all the types having to pass eachother around - web3_connections: &Web3Connections, + web3_connections: &Web3Rpcs, ) -> anyhow::Result { // add the rpc's block to connection_heads, or remove the rpc from connection_heads let changed = match rpc_head_block { @@ -848,15 +853,15 @@ impl ConsensusFinder { async fn best_consensus_connections( &mut self, authorization: &Arc, - web3_connections: &Web3Connections, - ) -> ConsensusConnections { + web3_connections: &Web3Rpcs, + ) -> ConsensusWeb3Rpcs { let highest_block_num = match self .all .highest_block(authorization, web3_connections) .await { None => { - return ConsensusConnections::default(); + return ConsensusWeb3Rpcs::default(); } Some(x) => x.number.expect("blocks here should always have a number"), }; @@ -897,7 +902,7 @@ impl ConsensusFinder { if self.all.rpc_name_to_hash.len() < web3_connections.min_head_rpcs { debug!("No consensus head yet: {}", err); } - return ConsensusConnections::default(); + return ConsensusWeb3Rpcs::default(); } Ok(x) => x, }; @@ -920,7 +925,7 @@ impl ConsensusFinder { } else { // TODO: i don't think we need this error. and i doublt we'll ever even get here error!("NO CONSENSUS HEAD!"); - ConsensusConnections::default() + ConsensusWeb3Rpcs::default() } } } diff --git a/web3_proxy/src/rpcs/connections.rs b/web3_proxy/src/rpcs/many.rs similarity index 89% rename from web3_proxy/src/rpcs/connections.rs rename to web3_proxy/src/rpcs/many.rs index 9ecf3fd9..a46e66f6 100644 --- a/web3_proxy/src/rpcs/connections.rs +++ b/web3_proxy/src/rpcs/many.rs @@ -1,12 +1,10 @@ -///! Load balanced communication with a group of web3 providers +///! Load balanced communication with a group of web3 rpc providers use super::blockchain::{ArcBlock, BlockHashesCache}; -use super::connection::Web3Connection; -use super::request::{ - OpenRequestHandle, OpenRequestHandleMetrics, OpenRequestResult, RequestRevertHandler, -}; -use super::synced_connections::ConsensusConnections; +use super::one::Web3Rpc; +use super::request::{OpenRequestHandle, OpenRequestResult, RequestRevertHandler}; +use super::synced_connections::ConsensusWeb3Rpcs; use crate::app::{flatten_handle, AnyhowJoinHandle}; -use crate::config::{BlockAndRpc, TxHashAndRpc, Web3ConnectionConfig}; +use crate::config::{BlockAndRpc, TxHashAndRpc, Web3RpcConfig}; use crate::frontend::authorization::{Authorization, RequestMetadata}; use crate::frontend::rpc_proxy_ws::ProxyMode; use crate::jsonrpc::{JsonRpcForwardedResponse, JsonRpcRequest}; @@ -14,7 +12,7 @@ use crate::rpcs::transactions::TxStatus; use counter::Counter; use derive_more::From; use ethers::prelude::{ProviderError, TxHash, H256, U64}; -use futures::future::{join_all, try_join_all}; +use futures::future::try_join_all; use futures::stream::FuturesUnordered; use futures::StreamExt; use hashbrown::{HashMap, HashSet}; @@ -36,11 +34,11 @@ use tokio::time::{interval, sleep, sleep_until, Duration, Instant, MissedTickBeh /// A collection of web3 connections. Sends requests either the current best server or all servers. #[derive(From)] -pub struct Web3Connections { +pub struct Web3Rpcs { /// any requests will be forwarded to one (or more) of these connections - pub(crate) conns: HashMap>, + pub(crate) conns: HashMap>, /// all providers with the same consensus head block. won't update if there is no `self.watch_consensus_head_sender` - pub(super) watch_consensus_connections_sender: watch::Sender>, + pub(super) watch_consensus_connections_sender: watch::Sender>, /// this head receiver makes it easy to wait until there is a new block pub(super) watch_consensus_head_receiver: Option>, pub(super) pending_transactions: @@ -54,13 +52,13 @@ pub struct Web3Connections { pub(super) min_sum_soft_limit: u32, } -impl Web3Connections { +impl Web3Rpcs { /// Spawn durable connections to multiple Web3 providers. #[allow(clippy::too_many_arguments)] pub async fn spawn( chain_id: u64, db_conn: Option, - server_configs: HashMap, + server_configs: HashMap, http_client: Option, redis_pool: Option, block_map: BlockHashesCache, @@ -69,7 +67,6 @@ impl Web3Connections { min_head_rpcs: usize, pending_tx_sender: Option>, pending_transactions: Cache, - open_request_handle_metrics: Arc, ) -> anyhow::Result<(Arc, AnyhowJoinHandle<()>)> { let (pending_tx_id_sender, pending_tx_id_receiver) = flume::unbounded(); let (block_sender, block_receiver) = flume::unbounded::(); @@ -92,12 +89,10 @@ impl Web3Connections { }; let http_interval_sender = if http_client.is_some() { - let (sender, receiver) = broadcast::channel(1); - - drop(receiver); + let (sender, _) = broadcast::channel(1); // TODO: what interval? follow a websocket also? maybe by watching synced connections with a timeout. will need debounce - let mut interval = interval(Duration::from_millis(expected_block_time_ms)); + let mut interval = interval(Duration::from_millis(expected_block_time_ms / 2)); interval.set_missed_tick_behavior(MissedTickBehavior::Delay); let sender = Arc::new(sender); @@ -107,13 +102,14 @@ impl Web3Connections { async move { loop { - // TODO: every time a head_block arrives (with a small delay for known slow servers), or on the interval. interval.tick().await; - // // trace!("http interval ready"); + // trace!("http interval ready"); - // errors are okay. they mean that all receivers have been dropped - let _ = sender.send(()); + if let Err(_) = sender.send(()) { + // errors are okay. they mean that all receivers have been dropped, or the rpcs just haven't started yet + trace!("no http receivers"); + }; } } }; @@ -128,11 +124,11 @@ impl Web3Connections { // turn configs into connections (in parallel) // TODO: move this into a helper function. then we can use it when configs change (will need a remove function too) - // TODO: futures unordered? - let spawn_handles: Vec<_> = server_configs + let mut spawn_handles: FuturesUnordered<_> = server_configs .into_iter() .filter_map(|(server_name, server_config)| { if server_config.disabled { + info!("{} is disabled", server_name); return None; } @@ -149,7 +145,8 @@ impl Web3Connections { let pending_tx_id_sender = Some(pending_tx_id_sender.clone()); let block_map = block_map.clone(); - let open_request_handle_metrics = open_request_handle_metrics.clone(); + + debug!("spawning {}", server_name); let handle = tokio::spawn(async move { server_config @@ -163,7 +160,6 @@ impl Web3Connections { block_map, block_sender, pending_tx_id_sender, - open_request_handle_metrics, ) .await }); @@ -177,19 +173,20 @@ impl Web3Connections { let mut handles = vec![]; // TODO: futures unordered? - for x in join_all(spawn_handles).await { - // TODO: how should we handle errors here? one rpc being down shouldn't cause the program to exit + while let Some(x) = spawn_handles.next().await { match x { Ok(Ok((connection, handle))) => { + // web3 connection worked connections.insert(connection.name.clone(), connection); handles.push(handle); } Ok(Err(err)) => { - // if we got an error here, it is not retryable + // if we got an error here, the app can continue on // TODO: include context about which connection failed error!("Unable to create connection. err={:?}", err); } Err(err) => { + // something actually bad happened. exit with an error return Err(err.into()); } } @@ -229,7 +226,6 @@ impl Web3Connections { let connections = connections.clone(); tokio::spawn(async move { - // TODO: try_join_all with the other handles here connections .subscribe( authorization, @@ -245,13 +241,13 @@ impl Web3Connections { Ok((connections, handle)) } - pub fn get(&self, conn_name: &str) -> Option<&Arc> { + pub fn get(&self, conn_name: &str) -> Option<&Arc> { self.conns.get(conn_name) } /// subscribe to blocks and transactions from all the backend rpcs. - /// blocks are processed by all the `Web3Connection`s and then sent to the `block_receiver` - /// transaction ids from all the `Web3Connection`s are deduplicated and forwarded to `pending_tx_sender` + /// blocks are processed by all the `Web3Rpc`s and then sent to the `block_receiver` + /// transaction ids from all the `Web3Rpc`s are deduplicated and forwarded to `pending_tx_sender` async fn subscribe( self: Arc, authorization: Arc, @@ -327,7 +323,6 @@ impl Web3Connections { } info!("subscriptions over: {:?}", self); - Ok(()) } @@ -415,7 +410,7 @@ impl Web3Connections { &self, authorization: &Arc, request_metadata: Option<&Arc>, - skip: &[Arc], + skip: &[Arc], min_block_needed: Option<&U64>, ) -> anyhow::Result { if let Ok(without_backups) = self @@ -450,13 +445,10 @@ impl Web3Connections { allow_backups: bool, authorization: &Arc, request_metadata: Option<&Arc>, - skip: &[Arc], + skip: &[Arc], min_block_needed: Option<&U64>, ) -> anyhow::Result { - let usable_rpcs_by_head_num_and_weight: BTreeMap< - (Option, u64), - Vec>, - > = { + let usable_rpcs_by_head_num_and_weight: BTreeMap<(Option, u64), Vec>> = { let synced_connections = self.watch_consensus_connections_sender.borrow().clone(); let head_block_num = if let Some(head_block) = synced_connections.head_block.as_ref() { @@ -647,12 +639,15 @@ impl Web3Connections { authorization: &Arc, block_needed: Option<&U64>, max_count: Option, + always_include_backups: bool, ) -> Result, Option> { - if let Ok(without_backups) = self - ._all_connections(false, authorization, block_needed, max_count) - .await - { - return Ok(without_backups); + if !always_include_backups { + if let Ok(without_backups) = self + ._all_connections(false, authorization, block_needed, max_count) + .await + { + return Ok(without_backups); + } } self._all_connections(true, authorization, block_needed, max_count) @@ -678,17 +673,21 @@ impl Web3Connections { let mut tried = HashSet::new(); - let conns_to_try = itertools::chain( - // TODO: sort by tier - self.watch_consensus_connections_sender - .borrow() - .conns - .clone(), - // TODO: sort by tier - self.conns.values().cloned(), - ); + let mut synced_conns = self + .watch_consensus_connections_sender + .borrow() + .conns + .clone(); - for connection in conns_to_try { + // synced connections are all on the same block. sort them by tier with higher soft limits first + synced_conns.sort_by_cached_key(|x| (x.tier, u32::MAX - x.soft_limit)); + + // if there aren't enough synced connections, include more connections + let mut all_conns: Vec<_> = self.conns.values().cloned().collect(); + + sort_connections_by_sync_status(&mut all_conns); + + for connection in itertools::chain(synced_conns, all_conns) { if max_count == 0 { break; } @@ -760,13 +759,8 @@ impl Web3Connections { loop { let num_skipped = skip_rpcs.len(); - if num_skipped > 0 { - // trace!("skip_rpcs: {:?}", skip_rpcs); - - // TODO: is self.conns still right now that we split main and backup servers? - if num_skipped == self.conns.len() { - break; - } + if num_skipped == self.conns.len() { + break; } match self @@ -1017,10 +1011,16 @@ impl Web3Connections { block_needed: Option<&U64>, error_level: Level, max_count: Option, + always_include_backups: bool, ) -> anyhow::Result { loop { match self - .all_connections(authorization, block_needed, max_count) + .all_connections( + authorization, + block_needed, + max_count, + always_include_backups, + ) .await { Ok(active_request_handles) => { @@ -1117,23 +1117,23 @@ impl Web3Connections { } } -impl fmt::Debug for Web3Connections { +impl fmt::Debug for Web3Rpcs { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { // TODO: the default formatter takes forever to write. this is too quiet though - f.debug_struct("Web3Connections") + f.debug_struct("Web3Rpcs") .field("conns", &self.conns) .finish_non_exhaustive() } } -impl Serialize for Web3Connections { +impl Serialize for Web3Rpcs { fn serialize(&self, serializer: S) -> Result where S: Serializer, { - let mut state = serializer.serialize_struct("Web3Connections", 6)?; + let mut state = serializer.serialize_struct("Web3Rpcs", 6)?; - let conns: Vec<&Web3Connection> = self.conns.values().map(|x| x.as_ref()).collect(); + let conns: Vec<&Web3Rpc> = self.conns.values().map(|x| x.as_ref()).collect(); state.serialize_field("conns", &conns)?; { @@ -1152,13 +1152,29 @@ impl Serialize for Web3Connections { } } +/// sort by block number (descending) and tier (ascending) +fn sort_connections_by_sync_status(rpcs: &mut Vec>) { + rpcs.sort_by_cached_key(|x| { + let reversed_head_block = u64::MAX + - x.head_block + .read() + .as_ref() + .map(|x| x.number().as_u64()) + .unwrap_or(0); + + let tier = x.tier; + + (reversed_head_block, tier) + }); +} + mod tests { // TODO: why is this allow needed? does tokio::test get in the way somehow? #![allow(unused_imports)] use super::*; use crate::rpcs::{ blockchain::{ConsensusFinder, SavedBlock}, - connection::ProviderState, + one::ProviderState, provider::Web3Provider, }; use ethers::types::{Block, U256}; @@ -1167,6 +1183,80 @@ mod tests { use std::time::{SystemTime, UNIX_EPOCH}; use tokio::sync::RwLock as AsyncRwLock; + #[tokio::test] + async fn test_sort_connections_by_sync_status() { + let block_0 = Block { + number: Some(0.into()), + hash: Some(H256::random()), + ..Default::default() + }; + let block_1 = Block { + number: Some(1.into()), + hash: Some(H256::random()), + parent_hash: block_0.hash.unwrap(), + ..Default::default() + }; + let block_2 = Block { + number: Some(2.into()), + hash: Some(H256::random()), + parent_hash: block_1.hash.unwrap(), + ..Default::default() + }; + + let blocks: Vec<_> = [block_0, block_1, block_2] + .into_iter() + .map(|x| SavedBlock::new(Arc::new(x))) + .collect(); + + let mut rpcs = [ + Web3Rpc { + name: "a".to_string(), + tier: 0, + head_block: RwLock::new(None), + ..Default::default() + }, + Web3Rpc { + name: "b".to_string(), + tier: 0, + head_block: RwLock::new(blocks.get(1).cloned()), + ..Default::default() + }, + Web3Rpc { + name: "c".to_string(), + tier: 0, + head_block: RwLock::new(blocks.get(2).cloned()), + ..Default::default() + }, + Web3Rpc { + name: "d".to_string(), + tier: 1, + head_block: RwLock::new(None), + ..Default::default() + }, + Web3Rpc { + name: "e".to_string(), + tier: 1, + head_block: RwLock::new(blocks.get(1).cloned()), + ..Default::default() + }, + Web3Rpc { + name: "f".to_string(), + tier: 1, + head_block: RwLock::new(blocks.get(2).cloned()), + ..Default::default() + }, + ] + .into_iter() + .map(Arc::new) + .collect(); + + sort_connections_by_sync_status(&mut rpcs); + + let names_in_sort_order: Vec<_> = rpcs.iter().map(|x| x.name.as_str()).collect(); + + assert_eq!(names_in_sort_order, ["c", "f", "b", "e", "a", "d"]); + } + #[tokio::test] async fn test_server_selection_by_height() { // TODO: do this better. can test_env_logger and tokio test be stacked? @@ -1206,50 +1296,32 @@ mod tests { let block_data_limit = u64::MAX; - let head_rpc = Web3Connection { + let head_rpc = Web3Rpc { name: "synced".to_string(), - db_conn: None, - display_name: None, - url: "ws://example.com/synced".to_string(), - http_client: None, - active_requests: 0.into(), - frontend_requests: 0.into(), - internal_requests: 0.into(), provider_state: AsyncRwLock::new(ProviderState::Connected(Arc::new( Web3Provider::Mock, ))), - hard_limit: None, - hard_limit_until: None, soft_limit: 1_000, - automatic_block_limit: true, + automatic_block_limit: false, backup: false, block_data_limit: block_data_limit.into(), tier: 0, head_block: RwLock::new(Some(head_block.clone())), - open_request_handle_metrics: Arc::new(Default::default()), + ..Default::default() }; - let lagged_rpc = Web3Connection { + let lagged_rpc = Web3Rpc { name: "lagged".to_string(), - db_conn: None, - display_name: None, - url: "ws://example.com/lagged".to_string(), - http_client: None, - active_requests: 0.into(), - frontend_requests: 0.into(), - internal_requests: 0.into(), provider_state: AsyncRwLock::new(ProviderState::Connected(Arc::new( Web3Provider::Mock, ))), - hard_limit: None, - hard_limit_until: None, soft_limit: 1_000, automatic_block_limit: false, backup: false, block_data_limit: block_data_limit.into(), tier: 0, head_block: RwLock::new(Some(lagged_block.clone())), - open_request_handle_metrics: Arc::new(Default::default()), + ..Default::default() }; assert!(head_rpc.has_block_data(&lagged_block.number())); @@ -1268,8 +1340,8 @@ mod tests { let (watch_consensus_connections_sender, _) = watch::channel(Default::default()); - // TODO: make a Web3Connections::new - let conns = Web3Connections { + // TODO: make a Web3Rpcs::new + let conns = Web3Rpcs { conns, watch_consensus_head_receiver: None, watch_consensus_connections_sender, @@ -1319,10 +1391,10 @@ mod tests { // no head block because the rpcs haven't communicated through their channels assert!(conns.head_block_hash().is_none()); - // all_backend_connections gives everything regardless of sync status + // all_backend_connections gives all non-backup servers regardless of sync status assert_eq!( conns - .all_connections(&authorization, None, None) + .all_connections(&authorization, None, None, false) .await .unwrap() .len(), @@ -1439,50 +1511,32 @@ mod tests { let head_block: SavedBlock = Arc::new(head_block).into(); - let pruned_rpc = Web3Connection { + let pruned_rpc = Web3Rpc { name: "pruned".to_string(), - db_conn: None, - display_name: None, - url: "ws://example.com/pruned".to_string(), - http_client: None, - active_requests: 0.into(), - frontend_requests: 0.into(), - internal_requests: 0.into(), provider_state: AsyncRwLock::new(ProviderState::Connected(Arc::new( Web3Provider::Mock, ))), - hard_limit: None, - hard_limit_until: None, soft_limit: 3_000, automatic_block_limit: false, backup: false, block_data_limit: 64.into(), tier: 1, head_block: RwLock::new(Some(head_block.clone())), - open_request_handle_metrics: Arc::new(Default::default()), + ..Default::default() }; - let archive_rpc = Web3Connection { + let archive_rpc = Web3Rpc { name: "archive".to_string(), - db_conn: None, - display_name: None, - url: "ws://example.com/archive".to_string(), - http_client: None, - active_requests: 0.into(), - frontend_requests: 0.into(), - internal_requests: 0.into(), provider_state: AsyncRwLock::new(ProviderState::Connected(Arc::new( Web3Provider::Mock, ))), - hard_limit: None, - hard_limit_until: None, soft_limit: 1_000, automatic_block_limit: false, backup: false, block_data_limit: u64::MAX.into(), tier: 2, head_block: RwLock::new(Some(head_block.clone())), - open_request_handle_metrics: Arc::new(Default::default()), + ..Default::default() }; assert!(pruned_rpc.has_block_data(&head_block.number())); @@ -1500,8 +1554,8 @@ mod tests { let (watch_consensus_connections_sender, _) = watch::channel(Default::default()); - // TODO: make a Web3Connections::new - let conns = Web3Connections { + // TODO: make a Web3Rpcs::new + let conns = Web3Rpcs { conns, watch_consensus_head_receiver: None, watch_consensus_connections_sender, diff --git a/web3_proxy/src/rpcs/mod.rs b/web3_proxy/src/rpcs/mod.rs index 9a05f896..44ea5afe 100644 --- a/web3_proxy/src/rpcs/mod.rs +++ b/web3_proxy/src/rpcs/mod.rs @@ -1,7 +1,7 @@ // TODO: all pub, or export useful things here instead? pub mod blockchain; -pub mod connection; -pub mod connections; +pub mod many; +pub mod one; pub mod provider; pub mod request; pub mod synced_connections; diff --git a/web3_proxy/src/rpcs/connection.rs b/web3_proxy/src/rpcs/one.rs similarity index 92% rename from web3_proxy/src/rpcs/connection.rs rename to web3_proxy/src/rpcs/one.rs index 99fc3cd1..05bc0e54 100644 --- a/web3_proxy/src/rpcs/connection.rs +++ b/web3_proxy/src/rpcs/one.rs @@ -1,7 +1,7 @@ ///! Rate-limited communication with a web3 provider. use super::blockchain::{ArcBlock, BlockHashesCache, SavedBlock}; use super::provider::Web3Provider; -use super::request::{OpenRequestHandle, OpenRequestHandleMetrics, OpenRequestResult}; +use super::request::{OpenRequestHandle, OpenRequestResult}; use crate::app::{flatten_handle, AnyhowJoinHandle}; use crate::config::BlockAndRpc; use crate::frontend::authorization::Authorization; @@ -10,6 +10,7 @@ use ethers::prelude::{Bytes, Middleware, ProviderError, TxHash, H256, U64}; use ethers::types::U256; use futures::future::try_join_all; use futures::StreamExt; +use hdrhistogram::Histogram; use log::{debug, error, info, trace, warn, Level}; use migration::sea_orm::DatabaseConnection; use parking_lot::RwLock; @@ -25,7 +26,7 @@ use std::{cmp::Ordering, sync::Arc}; use thread_fast_rng::rand::Rng; use thread_fast_rng::thread_fast_rng; use tokio::sync::{broadcast, oneshot, watch, RwLock as AsyncRwLock}; -use tokio::time::{interval, sleep, sleep_until, timeout, Duration, Instant, MissedTickBehavior}; +use tokio::time::{sleep, sleep_until, timeout, Duration, Instant}; // TODO: maybe provider state should have the block data limit in it. but it is inside an async lock and we can't Serialize then #[derive(Clone, Debug)] @@ -35,6 +36,12 @@ pub enum ProviderState { Connected(Arc), } +impl Default for ProviderState { + fn default() -> Self { + Self::None + } +} + impl ProviderState { pub async fn provider(&self, allow_not_ready: bool) -> Option<&Arc> { match self { @@ -58,8 +65,31 @@ impl ProviderState { } } +pub struct Web3RpcLatencies { + /// Traack how far behind the fastest node we are + new_head: Histogram, + /// exponentially weighted moving average of how far behind the fastest node we are + new_head_ewma: u32, + /// Track how long an rpc call takes on average + request: Histogram, + /// exponentially weighted moving average of how far behind the fastest node we are + request_ewma: u32, +} + +impl Default for Web3RpcLatencies { + fn default() -> Self { + Self { + new_head: Histogram::new(3).unwrap(), + new_head_ewma: 0, + request: Histogram::new(3).unwrap(), + request_ewma: 0, + } + } +} + /// An active connection to a Web3 RPC server like geth or erigon. -pub struct Web3Connection { +#[derive(Default)] +pub struct Web3Rpc { pub name: String, pub display_name: Option, pub db_conn: Option, @@ -91,12 +121,13 @@ pub struct Web3Connection { pub(super) block_data_limit: AtomicU64, /// Lower tiers are higher priority when sending requests pub(super) tier: u64, - /// TODO: should this be an AsyncRwLock? + /// TODO: change this to a watch channel so that http providers can subscribe and take action on change pub(super) head_block: RwLock>, - pub(super) open_request_handle_metrics: Arc, + /// Track how fast this RPC is + pub(super) latency: Web3RpcLatencies, } -impl Web3Connection { +impl Web3Rpc { /// Connect to a web3 rpc // TODO: have this take a builder (which will have channels attached). or maybe just take the config and give the config public fields #[allow(clippy::too_many_arguments)] @@ -120,8 +151,7 @@ impl Web3Connection { tx_id_sender: Option)>>, reconnect: bool, tier: u64, - open_request_handle_metrics: Arc, - ) -> anyhow::Result<(Arc, AnyhowJoinHandle<()>)> { + ) -> anyhow::Result<(Arc, AnyhowJoinHandle<()>)> { let hard_limit = hard_limit.map(|(hard_rate_limit, redis_pool)| { // TODO: is cache size 1 okay? i think we need RedisRateLimiter::new( @@ -154,19 +184,14 @@ impl Web3Connection { display_name, http_client, url: url_str, - active_requests: 0.into(), - frontend_requests: 0.into(), - internal_requests: 0.into(), - provider_state: AsyncRwLock::new(ProviderState::None), hard_limit, hard_limit_until, soft_limit, automatic_block_limit, backup, block_data_limit, - head_block: RwLock::new(Default::default()), tier, - open_request_handle_metrics, + ..Default::default() }; let new_connection = Arc::new(new_connection); @@ -506,7 +531,7 @@ impl Web3Connection { // we previously sent a None. return early return Ok(()); } - warn!("{} is not synced!", self); + warn!("clearing head block on {}!", self); *head_block = None; } @@ -885,34 +910,14 @@ impl Web3Connection { .clone() { trace!("watching pending transactions on {}", self); + // TODO: does this keep the lock open for too long? match provider.as_ref() { Web3Provider::Mock => unimplemented!(), Web3Provider::Http(provider) => { // there is a "watch_pending_transactions" function, but a lot of public nodes do not support the necessary rpc endpoints - // TODO: what should this interval be? probably automatically set to some fraction of block time - // TODO: maybe it would be better to have one interval for all of the http providers, but this works for now - // TODO: if there are some websocket providers, maybe have a longer interval and a channel that tells the https to update when a websocket gets a new head? if they are slow this wouldn't work well though - let mut interval = interval(Duration::from_secs(60)); - interval.set_missed_tick_behavior(MissedTickBehavior::Delay); - - loop { - // TODO: actually do something here - /* - match self.try_request_handle().await { - Ok(active_request_handle) => { - // TODO: check the filter - todo!("actually send a request"); - } - Err(e) => { - warn!("Failed getting latest block from {}: {:?}", self, e); - } - } - */ - - // wait for the interval - // TODO: if error or rate limit, increase interval? - interval.tick().await; - } + // TODO: maybe subscribe to self.head_block? + // TODO: this keeps a read lock guard open on provider_state forever. is that okay for an http client? + futures::future::pending::<()>().await; } Web3Provider::Ws(provider) => { // TODO: maybe the subscribe_pending_txs function should be on the active_request_handle @@ -1084,46 +1089,48 @@ impl fmt::Debug for Web3Provider { } } -impl Hash for Web3Connection { +impl Hash for Web3Rpc { fn hash(&self, state: &mut H) { // TODO: is this enough? self.name.hash(state); } } -impl Eq for Web3Connection {} +impl Eq for Web3Rpc {} -impl Ord for Web3Connection { +impl Ord for Web3Rpc { fn cmp(&self, other: &Self) -> std::cmp::Ordering { self.name.cmp(&other.name) } } -impl PartialOrd for Web3Connection { +impl PartialOrd for Web3Rpc { fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } } -impl PartialEq for Web3Connection { +impl PartialEq for Web3Rpc { fn eq(&self, other: &Self) -> bool { self.name == other.name } } -impl Serialize for Web3Connection { +impl Serialize for Web3Rpc { fn serialize(&self, serializer: S) -> Result where S: Serializer, { // 3 is the number of fields in the struct. - let mut state = serializer.serialize_struct("Web3Connection", 8)?; + let mut state = serializer.serialize_struct("Web3Rpc", 9)?; // the url is excluded because it likely includes private information. just show the name that we use in keys state.serialize_field("name", &self.name)?; // a longer name for display to users state.serialize_field("display_name", &self.display_name)?; + state.serialize_field("backup", &self.backup)?; + match self.block_data_limit.load(atomic::Ordering::Relaxed) { u64::MAX => { state.serialize_field("block_data_limit", &None::<()>)?; @@ -1157,9 +1164,9 @@ impl Serialize for Web3Connection { } } -impl fmt::Debug for Web3Connection { +impl fmt::Debug for Web3Rpc { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let mut f = f.debug_struct("Web3Connection"); + let mut f = f.debug_struct("Web3Rpc"); f.field("name", &self.name); @@ -1174,7 +1181,7 @@ impl fmt::Debug for Web3Connection { } } -impl fmt::Display for Web3Connection { +impl fmt::Display for Web3Rpc { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { // TODO: filter basic auth and api keys write!(f, "{}", &self.name) @@ -1207,27 +1214,16 @@ mod tests { let head_block = SavedBlock::new(random_block); let block_data_limit = u64::MAX; - let metrics = OpenRequestHandleMetrics::default(); - - let x = Web3Connection { + let x = Web3Rpc { name: "name".to_string(), - db_conn: None, - display_name: None, url: "ws://example.com".to_string(), - http_client: None, - active_requests: 0.into(), - frontend_requests: 0.into(), - internal_requests: 0.into(), - provider_state: AsyncRwLock::new(ProviderState::None), - hard_limit: None, - hard_limit_until: None, soft_limit: 1_000, automatic_block_limit: false, backup: false, block_data_limit: block_data_limit.into(), tier: 0, head_block: RwLock::new(Some(head_block.clone())), - open_request_handle_metrics: Arc::new(metrics), + ..Default::default() }; assert!(x.has_block_data(&0.into())); @@ -1255,28 +1251,16 @@ mod tests { let block_data_limit = 64; - let metrics = OpenRequestHandleMetrics::default(); - // TODO: this is getting long. have a `impl Default` - let x = Web3Connection { + let x = Web3Rpc { name: "name".to_string(), - db_conn: None, - display_name: None, - url: "ws://example.com".to_string(), - http_client: None, - active_requests: 0.into(), - frontend_requests: 0.into(), - internal_requests: 0.into(), - provider_state: AsyncRwLock::new(ProviderState::None), - hard_limit: None, - hard_limit_until: None, soft_limit: 1_000, automatic_block_limit: false, backup: false, block_data_limit: block_data_limit.into(), tier: 0, head_block: RwLock::new(Some(head_block.clone())), - open_request_handle_metrics: Arc::new(metrics), + ..Default::default() }; assert!(!x.has_block_data(&0.into())); @@ -1313,7 +1297,7 @@ mod tests { let metrics = OpenRequestHandleMetrics::default(); - let x = Web3Connection { + let x = Web3Rpc { name: "name".to_string(), db_conn: None, display_name: None, @@ -1330,7 +1314,6 @@ mod tests { block_data_limit: block_data_limit.into(), tier: 0, head_block: RwLock::new(Some(head_block.clone())), - open_request_handle_metrics: Arc::new(metrics), }; assert!(!x.has_block_data(&0.into())); diff --git a/web3_proxy/src/rpcs/request.rs b/web3_proxy/src/rpcs/request.rs index d7f2aaf9..da204992 100644 --- a/web3_proxy/src/rpcs/request.rs +++ b/web3_proxy/src/rpcs/request.rs @@ -1,7 +1,6 @@ -use super::connection::Web3Connection; +use super::one::Web3Rpc; use super::provider::Web3Provider; use crate::frontend::authorization::{Authorization, AuthorizationType}; -use crate::metered::{JsonRpcErrorCount, ProviderErrorCount}; use anyhow::Context; use chrono::Utc; use entities::revert_log; @@ -9,14 +8,10 @@ use entities::sea_orm_active_enums::Method; use ethers::providers::{HttpClientError, ProviderError, WsClientError}; use ethers::types::{Address, Bytes}; use log::{debug, error, trace, warn, Level}; -use metered::metered; -use metered::HitCount; -use metered::ResponseTime; -use metered::Throughput; use migration::sea_orm::{self, ActiveEnum, ActiveModelTrait}; use serde_json::json; use std::fmt; -use std::sync::atomic::{self, AtomicBool, Ordering}; +use std::sync::atomic; use std::sync::Arc; use thread_fast_rng::rand::Rng; use tokio::time::{sleep, Duration, Instant}; @@ -35,11 +30,8 @@ pub enum OpenRequestResult { #[derive(Debug)] pub struct OpenRequestHandle { authorization: Arc, - conn: Arc, - // TODO: this is the same metrics on the conn. use a reference? - metrics: Arc, + conn: Arc, provider: Arc, - used: AtomicBool, } /// Depending on the context, RPC errors can require different handling. @@ -129,14 +121,11 @@ impl Authorization { } } -#[metered(registry = OpenRequestHandleMetrics, visibility = pub)] impl OpenRequestHandle { - pub async fn new(authorization: Arc, conn: Arc) -> Self { + pub async fn new(authorization: Arc, conn: Arc) -> Self { // TODO: take request_id as an argument? // TODO: attach a unique id to this? customer requests have one, but not internal queries // TODO: what ordering?! - // TODO: should we be using metered, or not? i think not because we want stats for each handle - // TODO: these should maybe be sent to an influxdb instance? conn.active_requests.fetch_add(1, atomic::Ordering::Relaxed); let mut provider = None; @@ -184,15 +173,10 @@ impl OpenRequestHandle { } } - let metrics = conn.open_request_handle_metrics.clone(); - let used = false.into(); - Self { authorization, conn, - metrics, provider, - used, } } @@ -201,17 +185,14 @@ impl OpenRequestHandle { } #[inline] - pub fn clone_connection(&self) -> Arc { + pub fn clone_connection(&self) -> Arc { self.conn.clone() } /// Send a web3 request /// By having the request method here, we ensure that the rate limiter was called and connection counts were properly incremented - /// TODO: we no longer take self because metered doesn't like that - /// TODO: ErrorCount includes too many types of errors, such as transaction reverts - #[measure([JsonRpcErrorCount, HitCount, ProviderErrorCount, ResponseTime, Throughput])] pub async fn request( - &self, + self, method: &str, params: &P, revert_handler: RequestRevertHandler, @@ -221,20 +202,11 @@ impl OpenRequestHandle { P: Clone + fmt::Debug + serde::Serialize + Send + Sync + 'static, R: serde::Serialize + serde::de::DeserializeOwned + fmt::Debug, { - // ensure this function only runs once - if self.used.swap(true, Ordering::Release) { - unimplemented!("a request handle should only be used once"); - } - // TODO: use tracing spans - // TODO: requests from customers have request ids, but we should add - // TODO: including params in this is way too verbose - // the authorization field is already on a parent span + // TODO: including params in this log is way too verbose // trace!(rpc=%self.conn, %method, "request"); - // trace!("got provider for {:?}", self); - - // TODO: really sucks that we have to clone here + // TODO: replace ethers-rs providers with our own that supports streaming the responses let response = match &*self.provider { Web3Provider::Mock => unimplemented!(), Web3Provider::Http(provider) => provider.request(method, params).await, diff --git a/web3_proxy/src/rpcs/synced_connections.rs b/web3_proxy/src/rpcs/synced_connections.rs index 224381df..e285c307 100644 --- a/web3_proxy/src/rpcs/synced_connections.rs +++ b/web3_proxy/src/rpcs/synced_connections.rs @@ -1,25 +1,25 @@ use super::blockchain::{ArcBlock, SavedBlock}; -use super::connection::Web3Connection; -use super::connections::Web3Connections; +use super::many::Web3Rpcs; +use super::one::Web3Rpc; use ethers::prelude::{H256, U64}; use serde::Serialize; use std::fmt; use std::sync::Arc; -/// A collection of Web3Connections that are on the same block. +/// A collection of Web3Rpcs that are on the same block. /// Serialize is so we can print it on our debug endpoint #[derive(Clone, Default, Serialize)] -pub struct ConsensusConnections { +pub struct ConsensusWeb3Rpcs { // TODO: store ArcBlock instead? pub(super) head_block: Option, // TODO: this should be able to serialize, but it isn't #[serde(skip_serializing)] - pub(super) conns: Vec>, + pub(super) conns: Vec>, pub(super) num_checked_conns: usize, pub(super) includes_backups: bool, } -impl ConsensusConnections { +impl ConsensusWeb3Rpcs { pub fn num_conns(&self) -> usize { self.conns.len() } @@ -31,7 +31,7 @@ impl ConsensusConnections { // TODO: sum_hard_limit? } -impl fmt::Debug for ConsensusConnections { +impl fmt::Debug for ConsensusWeb3Rpcs { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { // TODO: the default formatter takes forever to write. this is too quiet though // TODO: print the actual conns? @@ -42,7 +42,7 @@ impl fmt::Debug for ConsensusConnections { } } -impl Web3Connections { +impl Web3Rpcs { pub fn head_block(&self) -> Option { self.watch_consensus_head_receiver .as_ref() diff --git a/web3_proxy/src/rpcs/transactions.rs b/web3_proxy/src/rpcs/transactions.rs index cc5a4011..dc5710d1 100644 --- a/web3_proxy/src/rpcs/transactions.rs +++ b/web3_proxy/src/rpcs/transactions.rs @@ -1,8 +1,8 @@ use crate::frontend::authorization::Authorization; +use super::many::Web3Rpcs; ///! Load balanced communication with a group of web3 providers -use super::connection::Web3Connection; -use super::connections::Web3Connections; +use super::one::Web3Rpc; use super::request::OpenRequestResult; use ethers::prelude::{ProviderError, Transaction, TxHash}; use log::{debug, trace, Level}; @@ -17,11 +17,11 @@ pub enum TxStatus { Orphaned(Transaction), } -impl Web3Connections { +impl Web3Rpcs { async fn query_transaction_status( &self, authorization: &Arc, - rpc: Arc, + rpc: Arc, pending_tx_id: TxHash, ) -> Result, ProviderError> { // TODO: there is a race here on geth. sometimes the rpc isn't yet ready to serve the transaction (even though they told us about it!) @@ -66,7 +66,7 @@ impl Web3Connections { pub(super) async fn process_incoming_tx_id( self: Arc, authorization: Arc, - rpc: Arc, + rpc: Arc, pending_tx_id: TxHash, pending_tx_sender: broadcast::Sender, ) -> anyhow::Result<()> {