diff --git a/Cargo.lock b/Cargo.lock index 9f4bf45c..80281cfa 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -14,9 +14,9 @@ dependencies = [ [[package]] name = "addr2line" -version = "0.17.0" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9ecd88a8c8378ca913a680cd98f0f13ac67383d35993f86c90a70e3f137816b" +checksum = "a76fd60b23679b7d19bd066031410fb7e458ccc5e958eb5c325888ce4baedc97" dependencies = [ "gimli", ] @@ -41,9 +41,9 @@ dependencies = [ [[package]] name = "aes" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfe0133578c0986e1fe3dfcd4af1cc5b2dd6c3dbf534d69916ce16a2701d40ba" +checksum = "433cfd6710c9986c576a25ca913c39d66a6474107b406f34f91d4a8923395241" dependencies = [ "cfg-if", "cipher 0.4.3", @@ -63,9 +63,9 @@ dependencies = [ [[package]] name = "ahash" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf6ccdb167abbf410dcb915cabd428929d7f6a04980b54a11f26a39f1c7f7107" +checksum = "2c99f64d1e06488f620f932677e24bc6e2897582980441ae90a671415bd7ec2f" dependencies = [ "cfg-if", "once_cell", @@ -74,9 +74,9 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "0.7.18" +version = "0.7.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e37cfd5e7657ada45f742d6e99ca5788580b5c529dc78faf11ece6dc702656f" +checksum = "cc936419f96fa211c1b9166887b38e5e40b19958e5b895be7c1f93adec7071ac" dependencies = [ "memchr", ] @@ -89,9 +89,9 @@ checksum = "250f629c0161ad8107cf89319e990051fae62832fd343083bea452d93e2205fd" [[package]] name = "android_system_properties" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7ed72e1635e121ca3e79420540282af22da58be50de153d36f81ddc6b83aa9e" +checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" dependencies = [ "libc", ] @@ -105,12 +105,6 @@ dependencies = [ "backtrace", ] -[[package]] -name = "arc-swap" -version = "1.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bddcadddf5e9015d310179a59bb28c4d4b9920ad0f11e8e14dbadf654890c9a6" - [[package]] name = "argh" version = "0.1.10" @@ -178,30 +172,32 @@ dependencies = [ [[package]] name = "async-io" -version = "1.7.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5e18f61464ae81cde0a23e713ae8fd299580c54d697a35820cfd0625b8b0e07" +checksum = "8c374dda1ed3e7d8f0d9ba58715f924862c63eae6849c92d3a18e7fbde9e2794" dependencies = [ + "async-lock", + "autocfg", "concurrent-queue", "futures-lite", "libc", "log", - "once_cell", "parking", "polling", "slab", "socket2", "waker-fn", - "winapi", + "windows-sys", ] [[package]] name = "async-lock" -version = "2.5.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e97a171d191782fba31bb902b14ad94e24a68145032b7eedf871ab0bc0d077b6" +checksum = "c8101efe8695a6c17e02911402145357e718ac92d3ff88ae8419e84b1707b685" dependencies = [ "event-listener", + "futures-lite", ] [[package]] @@ -227,9 +223,9 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.57" +version = "0.1.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76464446b8bc32758d7e88ee1a804d9914cd9b1cb264c029899680b0be29826f" +checksum = "eff18d764974428cf3a9328e23fc5c986f5fbed46e6cd4cdf42544df5d297ec1" dependencies = [ "proc-macro2", "quote", @@ -342,9 +338,9 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "axum" -version = "0.6.2" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1304eab461cf02bd70b083ed8273388f9724c549b316ba3d1e213ce0e9e7fb7e" +checksum = "e5694b64066a2459918d8074c2ce0d5a88f409431994c2356617c8ae0c4721fc" dependencies = [ "async-trait", "axum-core", @@ -356,7 +352,7 @@ dependencies = [ "http", "http-body", "hyper", - "itoa 1.0.2", + "itoa 1.0.5", "matchit", "memchr", "mime", @@ -389,9 +385,9 @@ dependencies = [ [[package]] name = "axum-core" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f487e40dc9daee24d8a1779df88522f159a54a980f99cfbe43db0be0bd3444a8" +checksum = "1cae3e661676ffbacb30f1a824089a8c9150e71017f7e1e38f2aa32009188d34" dependencies = [ "async-trait", "bytes", @@ -406,9 +402,9 @@ dependencies = [ [[package]] name = "axum-macros" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc7d7c3e69f305217e317a28172aab29f275667f2e1c15b87451e134fe27c7b1" +checksum = "9dbcf61bed07d554bd5c225cd07bc41b793eab63e79c6f0ceac7e1aed2f1c670" dependencies = [ "heck 0.4.0", "proc-macro2", @@ -418,9 +414,9 @@ dependencies = [ [[package]] name = "backtrace" -version = "0.3.65" +version = "0.3.67" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11a17d453482a265fd5f8479f2a3f405566e6ca627837aaddb85af8b1ab8ef61" +checksum = "233d376d6d185f2a3093e58f283f60f880315b6c60075b01f36b3b85154564ca" dependencies = [ "addr2line", "cc", @@ -474,9 +470,9 @@ checksum = "3441f0f7b02788e948e47f457ca01f1d7e6d92c693bc132c22b087d3141c03ff" [[package]] name = "base64" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" +checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64" @@ -485,10 +481,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ea22880d78093b0cbe17c89f64a7d457941e65759157ec6cb31a31d652b05e5" [[package]] -name = "base64ct" -version = "1.0.1" +name = "base64" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a32fd6af2b5827bce66c29053ba0e7c42b9dcab01835835058558c10851a46b" +checksum = "a4a4ddaa51a5bc52a6948f74c06d20aaaddb71924eab79b8c97a8c556e942d6a" + +[[package]] +name = "base64ct" +version = "1.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b645a089122eccb6111b4f81cbc1a49f5900ac4666bb93ac027feaecf15607bf" [[package]] name = "bech32" @@ -507,9 +509,9 @@ dependencies = [ [[package]] name = "bit-set" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e11e16035ea35e4e5997b393eacbf6f63983188f7a2ad25bfb13465f5ad59de" +checksum = "0700ddab506f33b20a03b13996eccd309a48e5ff77d0d95926aa0210fb4e95f1" dependencies = [ "bit-vec", ] @@ -538,9 +540,9 @@ dependencies = [ [[package]] name = "bitvec" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1489fcb93a5bb47da0462ca93ad252ad6af2145cce58d10d46a83931ba9f016b" +checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" dependencies = [ "funty", "radium 0.7.0", @@ -550,11 +552,11 @@ dependencies = [ [[package]] name = "blake2" -version = "0.10.4" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9cf849ee05b2ee5fba5e36f97ff8ec2533916700fc0758d40d92136a42f3388" +checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe" dependencies = [ - "digest 0.10.5", + "digest 0.10.6", ] [[package]] @@ -580,9 +582,9 @@ dependencies = [ [[package]] name = "block-buffer" -version = "0.10.2" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bf7fe51849ea569fd452f37822f606a5cabb684dc918707a0193fd4664ff324" +checksum = "69cce20737498f97b993470a6e536b8523f0af7892a4f928cceb1ac5e52ebe7e" dependencies = [ "generic-array 0.14.6", ] @@ -596,6 +598,51 @@ dependencies = [ "byte-tools", ] +[[package]] +name = "borsh" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15bf3650200d8bffa99015595e10f1fbd17de07abbc25bb067da79e769939bfa" +dependencies = [ + "borsh-derive", + "hashbrown 0.11.2", +] + +[[package]] +name = "borsh-derive" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6441c552f230375d18e3cc377677914d2ca2b0d36e52129fe15450a2dce46775" +dependencies = [ + "borsh-derive-internal", + "borsh-schema-derive-internal", + "proc-macro-crate 0.1.5", + "proc-macro2", + "syn", +] + +[[package]] +name = "borsh-derive-internal" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5449c28a7b352f2d1e592a8a28bf139bc71afb0764a14f3c02500935d8c44065" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "borsh-schema-derive-internal" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cdbd5696d8bfa21d53d9fe39a714a18538bad11492a42d066dbbc395fb1951c0" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "bs58" version = "0.4.0" @@ -604,15 +651,15 @@ checksum = "771fe0050b883fcc3ea2359b1a96bcfbc090b7116eae7c3c512c7a083fdf23d3" [[package]] name = "bumpalo" -version = "3.9.1" +version = "3.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a45a46ab1f2412e53d3a0ade76ffad2025804294569aae387231a0cd6e0899" +checksum = "0d261e256854913907f67ed06efbc3338dfe6179796deefc1ff763fc1aee5535" [[package]] name = "byte-slice-cast" -version = "1.2.1" +version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87c5fdd0166095e1d463fc6cc01aa8ce547ad77a4e84d42eb6762b084e28067e" +checksum = "c3ac9f8b63eca6fd385229b3675f6cc0dc5c8a5c8a54a59d4f52ffd670d87b0c" [[package]] name = "byte-tools" @@ -620,6 +667,27 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" +[[package]] +name = "bytecheck" +version = "0.6.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d11cac2c12b5adc6570dad2ee1b87eff4955dac476fe12d81e5fdd352e52406f" +dependencies = [ + "bytecheck_derive", + "ptr_meta", +] + +[[package]] +name = "bytecheck_derive" +version = "0.6.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13e576ebe98e605500b3c8041bb888e966653577172df6dd97398714eb30b9bf" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "bytecount" version = "0.6.3" @@ -634,18 +702,18 @@ checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec8a7b6a70fde80372154c65702f00a0f56f3e1c36abbc6c440484be248856db" +checksum = "dfb24e866b15a1af2a1b663f10c6b6b8f397a84aadb828f12e5b289ec23a3a3c" dependencies = [ "serde", ] [[package]] name = "bzip2" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6afcd980b5f3a45017c57e57a2fcccbb351cc43a356ce117ef760ef8052b89b0" +checksum = "bdb116a6ef3f6c3698828873ad02c3014b3c85cadb88496095628e3ef1e347f8" dependencies = [ "bzip2-sys", "libc", @@ -662,17 +730,11 @@ dependencies = [ "pkg-config", ] -[[package]] -name = "cache-padded" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1db59621ec70f09c5e9b597b220c7a2b43611f4710dc03ceb8748637775692c" - [[package]] name = "camino" -version = "1.0.9" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "869119e97797867fd90f5e22af7d0bd274bd4635ebb9eb68c04f3f513ae6c412" +checksum = "c77df041dc383319cc661b428b6961a005db4d6808d5e12536931b1ca9556055" dependencies = [ "serde", ] @@ -701,22 +763,23 @@ dependencies = [ [[package]] name = "cargo_metadata" -version = "0.15.0" +version = "0.15.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3abb7553d5b9b8421c6de7cb02606ff15e0c6eea7d8eadd75ef013fd636bec36" +checksum = "982a0cf6a99c350d7246035613882e376d58cebe571785abc5da4f648d53ac0a" dependencies = [ "camino", "cargo-platform", "semver", "serde", "serde_json", + "thiserror", ] [[package]] name = "cc" -version = "1.0.73" +version = "1.0.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fff2a6927b3bb87f9595d67196a70493f627687a71d87a0d692242c33f58c11" +checksum = "a20104e2335ce8a659d6dd92a51a767a0c062599c73b343fd152cb401e828c3d" dependencies = [ "jobserver", ] @@ -738,17 +801,11 @@ dependencies = [ "num-integer", "num-traits", "serde", - "time 0.1.43", + "time 0.1.45", "wasm-bindgen", "winapi", ] -[[package]] -name = "chunked_transfer" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fff857943da45f546682664a79488be82e69e43c1a7a2307679ab9afb3a66d2e" - [[package]] name = "cipher" version = "0.3.0" @@ -770,9 +827,9 @@ dependencies = [ [[package]] name = "clap" -version = "3.2.15" +version = "3.2.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44bbe24bbd31a185bc2c4f7c2abe80bea13a20d57ee4e55be70ac512bdc76417" +checksum = "71655c45cb9845d3270c9d6df84ebe72b4dad3c2ba3f7023ad47c144e4e473a5" dependencies = [ "atty", "bitflags", @@ -787,9 +844,9 @@ dependencies = [ [[package]] name = "clap_derive" -version = "3.2.15" +version = "3.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ba52acd3b0a5c33aeada5cdaa3267cdc7c594a98731d4268cdc1532f4264cb4" +checksum = "ea0c8bce528c4be4da13ea6fead8965e95b6073585a2f05204bd8f4119f82a65" dependencies = [ "heck 0.4.0", "proc-macro-error", @@ -807,6 +864,16 @@ dependencies = [ "os_str_bytes", ] +[[package]] +name = "codespan-reporting" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3538270d33cc669650c4b093848450d380def10c331d38c768e34cac80576e6e" +dependencies = [ + "termcolor", + "unicode-width", +] + [[package]] name = "coins-bip32" version = "0.7.0" @@ -816,7 +883,7 @@ dependencies = [ "bincode", "bs58", "coins-core", - "digest 0.10.5", + "digest 0.10.6", "getrandom", "hmac", "k256", @@ -837,7 +904,7 @@ dependencies = [ "getrandom", "hex", "hmac", - "pbkdf2 0.11.0", + "pbkdf2", "rand", "sha2 0.10.6", "thiserror", @@ -853,7 +920,7 @@ dependencies = [ "base64 0.12.3", "bech32", "blake2", - "digest 0.10.5", + "digest 0.10.6", "generic-array 0.14.6", "hex", "ripemd", @@ -866,9 +933,9 @@ dependencies = [ [[package]] name = "combine" -version = "4.6.4" +version = "4.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a604e93b79d1808327a6fca85a6f2d69de66461e7620f5a4cbf5fb4d1d7c948" +checksum = "35ed6e9d84f0b51a7f52daf1c7d71dd136fd7a3f41a8462b8cdb8c78d920fad4" dependencies = [ "bytes", "futures-core", @@ -880,11 +947,11 @@ dependencies = [ [[package]] name = "concurrent-queue" -version = "1.2.2" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30ed07550be01594c6026cff2a1d7fe9c8f683caa798e12b68694ac9e88286a3" +checksum = "c278839b831783b70278b14df4d45e1beb1aad306c07bb796637de9a0e323e8e" dependencies = [ - "cache-padded", + "crossbeam-utils", ] [[package]] @@ -904,15 +971,14 @@ dependencies = [ [[package]] name = "console" -version = "0.15.0" +version = "0.15.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a28b32d32ca44b70c3e4acd7db1babf555fa026e385fb95f18028f88848b3c31" +checksum = "c3d79fbe8970a77e3e34151cc13d3b3e248aa0faaecb9f6091fa07ebefe5ad60" dependencies = [ "encode_unicode", + "lazy_static", "libc", - "once_cell", - "terminal_size", - "winapi", + "windows-sys", ] [[package]] @@ -923,9 +989,9 @@ checksum = "e4c78c047431fee22c1a7bb92e00ad095a02a983affe4d8a72e2a2c62c1b94f3" [[package]] name = "const-oid" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "722e23542a15cea1f65d4a1419c4cfd7a26706c70871a13a04238ca3f40f1661" +checksum = "cec318a675afcb6a1ea1d4340e2d377e56e47c266f28043ceccbf4412ddfdd3b" [[package]] name = "constant_time_eq" @@ -965,9 +1031,9 @@ dependencies = [ [[package]] name = "cpufeatures" -version = "0.2.2" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59a6001667ab124aebae2a495118e11d30984c3a653e99d86d58971708cf5e4b" +checksum = "28d997bd5e24a5928dd43e46dc529867e207907fe0b239c3477d924f7f2ca320" dependencies = [ "libc", ] @@ -983,9 +1049,9 @@ dependencies = [ [[package]] name = "crc-catalog" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d0165d2900ae6778e36e80bbc4da3b5eefccee9ba939761f9c2882a5d9af3ff" +checksum = "9cace84e55f07e7301bae1c519df89cdad8cc3cd868413d3fdbdeca9ff3db484" [[package]] name = "crc32fast" @@ -1008,9 +1074,9 @@ dependencies = [ [[package]] name = "crossbeam-deque" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6455c0ca19f0d2fbf751b908d5c55c1f5cbc65e03c4225427254b46890bdde1e" +checksum = "715e8152b692bba2d374b53d4875445368fdf21a94751410af607a5ac677d1fc" dependencies = [ "cfg-if", "crossbeam-epoch", @@ -1019,23 +1085,22 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.10" +version = "0.9.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "045ebe27666471bb549370b4b0b3e51b07f56325befa4284db65fc89c02511b1" +checksum = "01a9af1f4c2ef74bb8aa1f7e19706bc72d03598c8a570bb5de72243c7a9d9d5a" dependencies = [ "autocfg", "cfg-if", "crossbeam-utils", "memoffset", - "once_cell", "scopeguard", ] [[package]] name = "crossbeam-queue" -version = "0.3.6" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cd42583b04998a5363558e5f9291ee5a5ff6b49944332103f251e7479a82aa7" +checksum = "d1cfb3ea8a53f37c40dea2c7bedcbd88bdfae54f5e2175d6ecaff1c988353add" dependencies = [ "cfg-if", "crossbeam-utils", @@ -1043,12 +1108,11 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.8" +version = "0.8.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bf124c720b7686e3c2663cf54062ab0f68a88af2fb6a030e87e30bf721fcb38" +checksum = "4fb766fa798726286dbbb842f174001dab8abc7b627a1dd86e0b7222a95d929f" dependencies = [ "cfg-if", - "lazy_static", ] [[package]] @@ -1069,9 +1133,9 @@ dependencies = [ [[package]] name = "crypto-bigint" -version = "0.4.8" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f2b443d17d49dad5ef0ede301c3179cc923b8822f3393b4d2c28c269dd4a122" +checksum = "ef2b4b23cddf68b89b8f8069890e8c270d54e2d5fe1b143820234805e4cb17ef" dependencies = [ "generic-array 0.14.6", "rand_core", @@ -1081,9 +1145,9 @@ dependencies = [ [[package]] name = "crypto-common" -version = "0.1.3" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57952ca27b5e3606ff4dd79b0020231aaf9d6aa76dc05fd30137538c50bd3ce8" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ "generic-array 0.14.6", "typenum", @@ -1098,6 +1162,50 @@ dependencies = [ "cipher 0.4.3", ] +[[package]] +name = "cxx" +version = "1.0.87" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b61a7545f753a88bcbe0a70de1fcc0221e10bfc752f576754fa91e663db1622e" +dependencies = [ + "cc", + "cxxbridge-flags", + "cxxbridge-macro", + "link-cplusplus", +] + +[[package]] +name = "cxx-build" +version = "1.0.87" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f464457d494b5ed6905c63b0c4704842aba319084a0a3561cdc1359536b53200" +dependencies = [ + "cc", + "codespan-reporting", + "once_cell", + "proc-macro2", + "quote", + "scratch", + "syn", +] + +[[package]] +name = "cxxbridge-flags" +version = "1.0.87" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43c7119ce3a3701ed81aca8410b9acf6fc399d2629d057b87e2efa4e63a3aaea" + +[[package]] +name = "cxxbridge-macro" +version = "1.0.87" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "65e07508b90551e610910fa648a1878991d367064997a596135b86df30daf07e" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "deadpool" version = "0.9.5" @@ -1167,11 +1275,11 @@ dependencies = [ [[package]] name = "der" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13dd2ae565c0a381dde7fade45fce95984c568bdcb4700a4fdbe3175e0380b2f" +checksum = "f1a467a65c5e759bce6e65eaf91cc29f466cdc57cb65777bd646872a8a1fd4de" dependencies = [ - "const-oid 0.9.0", + "const-oid 0.9.1", "zeroize", ] @@ -1213,9 +1321,9 @@ dependencies = [ [[package]] name = "diff" -version = "0.1.12" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e25ea47919b1560c4e3b7fe0aaab9becf5b84a10325ddf7db0f0ba5e1026499" +checksum = "56254986775e3233ffa9c4d7d3faaf6d36a2c09d30b20687e9f88bc8bafc16c8" [[package]] name = "digest" @@ -1237,11 +1345,11 @@ dependencies = [ [[package]] name = "digest" -version = "0.10.5" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adfbc57365a37acbd2ebf2b64d7e69bb766e2fea813521ed536f5d0520dcf86c" +checksum = "8168378f4e5023e7218c89c891c0fd8ecdb5e5e4f18cb78f38cf245dd021e76f" dependencies = [ - "block-buffer 0.10.2", + "block-buffer 0.10.3", "crypto-common", "subtle", ] @@ -1293,11 +1401,11 @@ checksum = "0bd4b30a6560bbd9b4620f4de34c3f14f60848e58a9b7216801afcb4c7b31c3c" [[package]] name = "ecdsa" -version = "0.14.3" +version = "0.14.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3bd46e0c364655e5baf2f5e99b603e7a09905da9966d7928d7470af393b28670" +checksum = "413301934810f597c1d19ca71c8710e99a3f1ba28a0d2ebc01551a2daeea3c5c" dependencies = [ - "der 0.6.0", + "der 0.6.1", "elliptic-curve", "rfc6979", "signature", @@ -1305,9 +1413,9 @@ dependencies = [ [[package]] name = "either" -version = "1.6.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" +checksum = "90e5c1c8368803113bf0c9584fc495a58b86dc8a29edbf8fe877d21d9507e797" [[package]] name = "elliptic-curve" @@ -1316,9 +1424,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e7bb888ab5300a19b8e5bceef25ac745ad065f3c9f7efc6de1b91958110891d3" dependencies = [ "base16ct", - "crypto-bigint 0.4.8", - "der 0.6.0", - "digest 0.10.5", + "crypto-bigint 0.4.9", + "der 0.6.1", + "digest 0.10.6", "ff", "generic-array 0.14.6", "group", @@ -1355,7 +1463,7 @@ dependencies = [ [[package]] name = "entities" -version = "0.12.0" +version = "0.13.0" dependencies = [ "ethers", "sea-orm", @@ -1413,12 +1521,12 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fda3bf123be441da5260717e0661c25a2fd9cb2b2c1d20bf2e05580047158ab" dependencies = [ - "aes 0.8.1", + "aes 0.8.2", "ctr", - "digest 0.10.5", + "digest 0.10.6", "hex", "hmac", - "pbkdf2 0.11.0", + "pbkdf2", "rand", "scrypt", "serde", @@ -1431,9 +1539,9 @@ dependencies = [ [[package]] name = "ethabi" -version = "17.2.0" +version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4966fba78396ff92db3b817ee71143eccd98acf0f876b8d600e585a670c5d1b" +checksum = "7413c5f74cc903ea37386a8965a936cbeb334bd270862fdece542c1b2dcbc898" dependencies = [ "ethereum-types", "hex", @@ -1448,9 +1556,9 @@ dependencies = [ [[package]] name = "ethbloom" -version = "0.12.1" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11da94e443c60508eb62cf256243a64da87304c2802ac2528847f79d750007ef" +checksum = "c22d4b5885b6aa2fe5e8b9329fb8d232bf739e434e6b87347c63bdd00c120f60" dependencies = [ "crunchy", "fixed-hash", @@ -1463,9 +1571,9 @@ dependencies = [ [[package]] name = "ethereum-types" -version = "0.13.1" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2827b94c556145446fcce834ca86b7abf0c39a805883fe20e72c5bfdb5a0dc6" +checksum = "02d215cbf040552efcbe99a38372fe80ab9d00268e20012b79fcd0f073edd8ee" dependencies = [ "ethbloom", "fixed-hash", @@ -1495,9 +1603,9 @@ dependencies = [ [[package]] name = "ethers-addressbook" -version = "1.0.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4b8c9da375d178d59a50f9a5d31ede4475a0f60cd5184c3db00f172b25f7e11" +checksum = "fe4be54dd2260945d784e06ccdeb5ad573e8f1541838cee13a1ab885485eaa0b" dependencies = [ "ethers-core", "once_cell", @@ -1507,9 +1615,9 @@ dependencies = [ [[package]] name = "ethers-contract" -version = "1.0.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "002a0d58a7d921b496f5f19b5b9508d01d25fbe25078286b1fcb6f4e7562acf7" +checksum = "e9c3c3e119a89f0a9a1e539e7faecea815f74ddcf7c90d0b00d1f524db2fdc9c" dependencies = [ "ethers-contract-abigen", "ethers-contract-derive", @@ -1526,9 +1634,9 @@ dependencies = [ [[package]] name = "ethers-contract-abigen" -version = "1.0.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "486f389525de61c1c4807fddc804a151ca3c5a5b6f2dc759689424777b7ba617" +checksum = "3d4e5ad46aede34901f71afdb7bb555710ed9613d88d644245c657dc371aa228" dependencies = [ "Inflector", "cfg-if", @@ -1544,6 +1652,7 @@ dependencies = [ "serde", "serde_json", "syn", + "toml 0.5.11", "url", "walkdir", ] @@ -1565,13 +1674,13 @@ dependencies = [ [[package]] name = "ethers-core" -version = "1.0.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06338c311c6a0a7ed04877d0fb0f0d627ed390aaa3429b4e041b8d17348a506d" +checksum = "ade3e9c97727343984e1ceada4fdab11142d2ee3472d2c67027d56b1251d4f15" dependencies = [ "arrayvec", "bytes", - "cargo_metadata 0.15.0", + "cargo_metadata 0.15.2", "chrono", "convert_case 0.6.0", "elliptic-curve", @@ -1585,7 +1694,6 @@ dependencies = [ "rand", "rlp", "rlp-derive", - "rust_decimal", "serde", "serde_json", "strum", @@ -1597,9 +1705,9 @@ dependencies = [ [[package]] name = "ethers-etherscan" -version = "1.0.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c3acd2c48d240ae13a4ed3ac88dc15b31bc1ba9513a072e080d4a32fda1637b" +checksum = "a9713f525348e5dde025d09b0a4217429f8074e8ff22c886263cc191e87d8216" dependencies = [ "ethers-core", "getrandom", @@ -1614,9 +1722,9 @@ dependencies = [ [[package]] name = "ethers-middleware" -version = "1.0.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f51bc2555522673e8a890b79615e04dd9ef40f0ab0a73e745024fdda15710d69" +checksum = "e71df7391b0a9a51208ffb5c7f2d068900e99d6b3128d3a4849d138f194778b7" dependencies = [ "async-trait", "auto_impl 0.5.0", @@ -1640,13 +1748,13 @@ dependencies = [ [[package]] name = "ethers-providers" -version = "1.0.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6cc65f79e2168aac5ca4a659bb6639c78164a6a5b18c954cc7699b6ce5ac6275" +checksum = "a1a9e0597aa6b2fdc810ff58bc95e4eeaa2c219b3e615ed025106ecb027407d8" dependencies = [ "async-trait", "auto_impl 1.0.1", - "base64 0.13.0", + "base64 0.13.1", "ethers-core", "futures-channel", "futures-core", @@ -1677,9 +1785,9 @@ dependencies = [ [[package]] name = "ethers-signers" -version = "1.0.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1f97da069cd77dd91a0a7f0c979f063a4bf9d2533b277ff5ccb19b7ac348376" +checksum = "3f41ced186867f64773db2e55ffdd92959e094072a1d09a5e5e831d443204f98" dependencies = [ "async-trait", "coins-bip32", @@ -1695,9 +1803,9 @@ dependencies = [ [[package]] name = "ethers-solc" -version = "1.0.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ed856e3e0d07a0ffbc79157e3dd0ed10b45b6736eff6a878d40a1e57f224988" +checksum = "cbe9c0a6d296c57191e5f8a613a3b5e816812c28f4a28d6178a17c21db903d77" dependencies = [ "cfg-if", "dunce", @@ -1727,9 +1835,9 @@ dependencies = [ [[package]] name = "event-listener" -version = "2.5.2" +version = "2.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77f3309417938f28bf8228fcff79a4a37103981e3e186d2ccd19c74b38f4eb71" +checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" [[package]] name = "eyre" @@ -1749,9 +1857,9 @@ checksum = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed" [[package]] name = "fastrand" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3fcf0cee53519c866c09b5de1f6c56ff9d647101f81c1964fa632e148896cdf" +checksum = "a7a407cfaa3385c4ae6b23e84623d48c2798d06e3e6a1878f7f59f17b3f86499" dependencies = [ "instant", ] @@ -1767,9 +1875,9 @@ dependencies = [ [[package]] name = "ff" -version = "0.12.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df689201f395c6b90dfe87127685f8dbfc083a5e779e613575d8bd7314300c3e" +checksum = "d013fc25338cc558c5c2cfbad646908fb23591e2404481826742b651c9af7160" dependencies = [ "rand_core", "subtle", @@ -1777,21 +1885,21 @@ dependencies = [ [[package]] name = "filetime" -version = "0.2.17" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e94a7bbaa59354bc20dd75b67f23e2797b4490e9d6928203fb105c79e448c86c" +checksum = "4e884668cd0c7480504233e951174ddc3b382f7c2666e3b7310b5c4e7b0c37f9" dependencies = [ "cfg-if", "libc", "redox_syscall", - "windows-sys 0.36.1", + "windows-sys", ] [[package]] name = "fixed-hash" -version = "0.7.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfcf0ed7fe52a17a03854ec54a9f76d6d84508d1c0e66bc1793301c73fc8493c" +checksum = "835c052cb0c08c1acf6ffd71c022172e18723949c8282f2b9f27efbc51e64534" dependencies = [ "byteorder", "rand", @@ -1801,15 +1909,15 @@ dependencies = [ [[package]] name = "fixedbitset" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "279fb028e20b3c4c320317955b77c5e0c9701f05a1d309905d6fc702cdc5053e" +checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" [[package]] name = "flate2" -version = "1.0.24" +version = "1.0.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f82b0f4c27ad9f8bfd1f3208d882da2b09c301bc1c828fd3a00d0216d2fbbff6" +checksum = "a8a2db397cb1c8772f31494cb8917e48cd1e64f0fa7efac59fbd741a0a8ce841" dependencies = [ "crc32fast", "miniz_oxide", @@ -1825,7 +1933,7 @@ dependencies = [ "futures-sink", "nanorand", "pin-project", - "spin 0.9.3", + "spin 0.9.4", ] [[package]] @@ -1929,9 +2037,9 @@ dependencies = [ [[package]] name = "futures-intrusive" -version = "0.4.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62007592ac46aa7c2b6416f7deb9a8a8f63a01e0f1d6e1787d5630170db2b63e" +checksum = "a604f7a68fbf8103337523b1fadc8ade7361ee3f112f7c680ad179651616aed5" dependencies = [ "futures-core", "lock_api", @@ -1961,13 +2069,12 @@ dependencies = [ [[package]] name = "futures-locks" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3eb42d4fb72227be5778429f9ef5240a38a358925a49f05b5cf702ce7c7e558a" +checksum = "45ec6fe3675af967e67c5536c0b9d44e34e6c52f86bedc4ea49c5317b8e94d06" dependencies = [ "futures-channel", "futures-task", - "tokio", ] [[package]] @@ -2046,23 +2153,33 @@ dependencies = [ ] [[package]] -name = "getrandom" -version = "0.2.6" +name = "gethostname" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9be70c98951c83b8d2f8f60d7065fa6d5146873094452a1008da8c2f1e4205ad" +checksum = "8a329e22866dd78b35d2c639a4a23d7b950aeae300dfd79f4fb19f74055c2404" +dependencies = [ + "libc", + "windows", +] + +[[package]] +name = "getrandom" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31" dependencies = [ "cfg-if", "js-sys", "libc", - "wasi 0.10.2+wasi-snapshot-preview1", + "wasi 0.11.0+wasi-snapshot-preview1", "wasm-bindgen", ] [[package]] name = "gimli" -version = "0.26.1" +version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78cc372d058dcf6d5ecd98510e7fbc9e5aec4d21de70f65fea8fecebcd881bd4" +checksum = "221996f774192f0f718773def8201c4ae31f02616a54ccfc2d358bb0e5cefdec" [[package]] name = "glob" @@ -2072,9 +2189,9 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] name = "group" -version = "0.12.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7391856def869c1c81063a03457c676fbcd419709c3dfb33d8d319de484b154d" +checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7" dependencies = [ "ff", "rand_core", @@ -2083,9 +2200,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.13" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37a82c6d637fc9515a4694bbf1cb2457b79d81ce52b3108bdeea58b07dd34a57" +checksum = "5f9f29bc9dda355256b2916cf526ab02ce0aeaaaf2bad60d65ef3f12f11dd0f4" dependencies = [ "bytes", "fnv", @@ -2114,6 +2231,15 @@ dependencies = [ "thiserror", ] +[[package]] +name = "hashbrown" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" +dependencies = [ + "ahash 0.7.6", +] + [[package]] name = "hashbrown" version = "0.12.3" @@ -2129,7 +2255,7 @@ version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" dependencies = [ - "ahash 0.8.2", + "ahash 0.8.3", "serde", ] @@ -2144,9 +2270,9 @@ dependencies = [ [[package]] name = "hashlink" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d452c155cb93fecdfb02a73dd57b5d8e442c2063bd7aac72f1bc5e4263a43086" +checksum = "69fe1fcf8b4278d860ad0548329f892a3631fb63f82574df68275f34cdbe0ffa" dependencies = [ "hashbrown 0.12.3", ] @@ -2157,7 +2283,7 @@ version = "7.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f19b9f54f7c7f55e31401bb647626ce0cf0f67b0004982ce815b3ee72a02aa8" dependencies = [ - "base64 0.13.0", + "base64 0.13.1", "byteorder", "crossbeam-channel", "flate2", @@ -2167,18 +2293,18 @@ dependencies = [ [[package]] name = "headers" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4cff78e5788be1e0ab65b04d306b2ed5092c815ec97ec70f4ebd5aee158aa55d" +checksum = "f3e372db8e5c0d213e0cd0b9be18be2aca3d44cf2fe30a9d46a65581cd454584" dependencies = [ - "base64 0.13.0", + "base64 0.13.1", "bitflags", "bytes", "headers-core", "http", "httpdate", "mime", - "sha-1", + "sha1", ] [[package]] @@ -2238,7 +2364,7 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" dependencies = [ - "digest 0.10.5", + "digest 0.10.6", ] [[package]] @@ -2269,7 +2395,7 @@ checksum = "75f43d41e26995c17e71ee126451dd3941010b0514a81a9d11f3b341debc2399" dependencies = [ "bytes", "fnv", - "itoa 1.0.2", + "itoa 1.0.5", ] [[package]] @@ -2291,9 +2417,9 @@ checksum = "0bfe8eed0a9285ef776bb792479ea3834e8b94e13d615c2f66d03dd50a435a29" [[package]] name = "httparse" -version = "1.7.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "496ce29bb5a52785b44e0f7ca2847ae0bb839c9bd28f69acac9b99d461c0c04c" +checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" [[package]] name = "httpdate" @@ -2309,9 +2435,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.19" +version = "0.14.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42dc3c131584288d375f2d07f822b0cb012d8c6fb899a5b9fdb3cb7eb9b6004f" +checksum = "034711faac9d2166cb1baf1a2fb0b60b1f277f8492fd72176c17f3515e1abd3c" dependencies = [ "bytes", "futures-channel", @@ -2322,7 +2448,7 @@ dependencies = [ "http-body", "httparse", "httpdate", - "itoa 1.0.2", + "itoa 1.0.5", "pin-project-lite", "socket2", "tokio", @@ -2333,9 +2459,9 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.23.0" +version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d87c48c02e0dc5e3b849a2041db3029fd066650f8f717c07bf8ed78ccb895cac" +checksum = "1788965e61b367cd03a62950836d5cd41560c3577d90e40e0819373194d1661c" dependencies = [ "http", "hyper", @@ -2346,17 +2472,28 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.46" +version = "0.1.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad2bfd338099682614d3ee3fe0cd72e0b6a41ca6a87f6a74a3bd593c91650501" +checksum = "64c122667b287044802d6ce17ee2ddf13207ed924c712de9a66a5814d5b64765" dependencies = [ "android_system_properties", "core-foundation-sys", + "iana-time-zone-haiku", "js-sys", "wasm-bindgen", "winapi", ] +[[package]] +name = "iana-time-zone-haiku" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0703ae284fc167426161c2e3f1da3ea71d94b21bedbcc9494e92b28e334e3dca" +dependencies = [ + "cxx", + "cxx-build", +] + [[package]] name = "idna" version = "0.3.0" @@ -2387,9 +2524,9 @@ dependencies = [ [[package]] name = "impl-serde" -version = "0.3.2" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4551f042f3438e64dbd6226b20527fc84a6e1fe65688b58746a2f53623f25f5c" +checksum = "ebc88fc67028ae3db0c853baa36269d398d5f45b6982f95549ff5def78c935cd" dependencies = [ "serde", ] @@ -2413,9 +2550,9 @@ checksum = "ce23b50ad8242c51a442f3ff322d56b02f08852c77e4c0b4d3fd684abc89c683" [[package]] name = "indexmap" -version = "1.9.1" +version = "1.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10a35a97730320ffe8e2d410b5d3b69279b98d2c14bdb8b70ea89ecf7888d41e" +checksum = "1885e79c1fc4b10f0e172c475f458b7f7b93061064d98c3293e98c5ba0c8b399" dependencies = [ "autocfg", "hashbrown 0.12.3", @@ -2428,7 +2565,7 @@ version = "0.16.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2d207dc617c7a380ab07ff572a6e52fa202a2a8f355860ac9c38e23f8196be1b" dependencies = [ - "console 0.15.0", + "console 0.15.5", "lazy_static", "number_prefix", "regex", @@ -2477,12 +2614,12 @@ dependencies = [ [[package]] name = "io-lifetimes" -version = "1.0.1" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7d367024b3f3414d8e01f437f704f41a9f64ab36f9067fa73e526ad4c763c87" +checksum = "e7d6c6f8c91b4b9ed43484ad1a938e393caf35960fce7f82a040497207bd8e9e" dependencies = [ "libc", - "windows-sys 0.42.0", + "windows-sys", ] [[package]] @@ -2503,14 +2640,14 @@ dependencies = [ [[package]] name = "is-terminal" -version = "0.4.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aae5bc6e2eb41c9def29a3e0f1306382807764b9b53112030eff57435667352d" +checksum = "28dfb6c8100ccc63462345b67d1bbc3679177c75ee4bf59bf29c8b1d110b8189" dependencies = [ "hermit-abi 0.2.6", "io-lifetimes", "rustix", - "windows-sys 0.42.0", + "windows-sys", ] [[package]] @@ -2530,33 +2667,33 @@ checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" [[package]] name = "itoa" -version = "1.0.2" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "112c678d4050afce233f4f2852bb2eb519230b3cf12f33585275537d7e41578d" +checksum = "fad582f4b9e86b6caa621cabeb0963332d92eea04729ab12892c2533951e6440" [[package]] name = "jobserver" -version = "0.1.24" +version = "0.1.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af25a77299a7f711a01975c35a6a424eb6862092cc2d6c72c4ed6cbc56dfc1fa" +checksum = "068b1ee6743e4d11fb9c6a1e6064b3693a1b600e7f5f5988047d98b3dc9fb90b" dependencies = [ "libc", ] [[package]] name = "js-sys" -version = "0.3.59" +version = "0.3.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "258451ab10b34f8af53416d1fdab72c22e805f0c92a1136d59470ec0b11138b2" +checksum = "49409df3e3bf0856b916e2ceaca09ee28e6871cf7d9ce97a692cacfdb2a25a47" dependencies = [ "wasm-bindgen", ] [[package]] name = "k256" -version = "0.11.3" +version = "0.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c8a5a96d92d849c4499d99461da81c9cdc1467418a8ed2aaeb407e8d85940ed" +checksum = "72c1e0b51e7ec0a97369623508396067a486bd0cbed95a2659a4b863d28cfc8b" dependencies = [ "cfg-if", "ecdsa", @@ -2567,15 +2704,18 @@ dependencies = [ [[package]] name = "keccak" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9b7d56ba4a8344d6be9729995e6b06f928af29998cdf79fe390cbf6b1fee838" +checksum = "3afef3b6eff9ce9d8ff9b3601125eec7f0c8cbac7abd14f355d053fa56c98768" +dependencies = [ + "cpufeatures", +] [[package]] name = "kqueue" -version = "1.0.6" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d6112e8f37b59803ac47a42d14f1f3a59bbf72fc6857ffc5be455e28a691f8e" +checksum = "2c8fc60ba15bf51257aa9807a48a61013db043fcf3a78cb0d916e8e396dcad98" dependencies = [ "kqueue-sys", "libc", @@ -2634,27 +2774,36 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.137" +version = "0.2.139" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc7fcc620a3bff7cdd7a365be3376c97191aeaccc2a603e600951e452615bf89" +checksum = "201de327520df007757c1f0adce6e827fe8562fbc28bfd9c15571c66ca1f5f79" [[package]] name = "libm" -version = "0.2.2" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33a33a362ce288760ec6a508b94caaec573ae7d3bbbd91b87aa0bad4456839db" +checksum = "348108ab3fba42ec82ff6e9564fc4ca0247bdccdc68dd8af9764bbc79c3c8ffb" + +[[package]] +name = "link-cplusplus" +version = "1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ecd207c9c713c34f95a097a5b029ac2ce6010530c7b49d7fea24d977dede04f5" +dependencies = [ + "cc", +] [[package]] name = "linux-raw-sys" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f9f08d8963a6c613f4b1a78f4f4a4dbfadf8e6545b2d72861731e4858b8b47f" +checksum = "f051f77a7c8e6957c0696eac88f26b0117e54f52d3fc682ab19397a8812846a4" [[package]] name = "lock_api" -version = "0.4.8" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f80bf5aacaf25cbfc8210d1cfb718f2bf3b11c4c54e5afe36c236853a8ec390" +checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df" dependencies = [ "autocfg", "scopeguard", @@ -2696,7 +2845,7 @@ version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6365506850d44bff6e2fbcb5176cf63650e48bd45ef2fe2665ae1570e0f4b9ca" dependencies = [ - "digest 0.10.5", + "digest 0.10.6", ] [[package]] @@ -2707,9 +2856,9 @@ checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" [[package]] name = "memoffset" -version = "0.6.5" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce" +checksum = "5de893c32cde5f383baa4c04c5d6dbdd735cfd4a794b0debdb2bb1b421da5ff4" dependencies = [ "autocfg", ] @@ -2746,7 +2895,7 @@ dependencies = [ [[package]] name = "migration" -version = "0.12.0" +version = "0.13.0" dependencies = [ "sea-orm-migration", "tokio", @@ -2766,23 +2915,23 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.5.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2b29bd4bc3f33391105ebee3589c19197c4271e3e5a9ec9bfe8127eeff8f082" +checksum = "b275950c28b37e794e8c55d88aeb5e139d0ce23fdbbeda68f8d7174abdf9e8fa" dependencies = [ "adler", ] [[package]] name = "mio" -version = "0.8.4" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57ee1c23c7c63b0c9250c339ffdc69255f110b298b901b9f6c82547b7b87caaf" +checksum = "e5d732bc30207a6423068df043e3d02e0735b155ad7ce1a6f76fe2baa5b158de" dependencies = [ "libc", "log", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.36.1", + "windows-sys", ] [[package]] @@ -2827,14 +2976,23 @@ checksum = "e4a24736216ec316047a1fc4252e27dabb04218aa4a3f37c6e7ddbf1f9782b54" [[package]] name = "nom" -version = "7.1.1" +version = "7.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8903e5a29a317527874d0402f867152a3d21c908bb0b933e416c65e301d4c36" +checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" dependencies = [ "memchr", "minimal-lexical", ] +[[package]] +name = "nom8" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae01545c9c7fc4486ab7debaf2aad7003ac19431791868fb2e8066df97fad2f8" +dependencies = [ + "memchr", +] + [[package]] name = "nonempty" version = "0.7.0" @@ -2896,9 +3054,9 @@ dependencies = [ [[package]] name = "num-bigint-dig" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "566d173b2f9406afbc5510a90925d5a2cd80cae4605631f1212303df265de011" +checksum = "2399c9463abc5f909349d8aa9ba080e0b88b3ce2885389b60b993f39b1a56905" dependencies = [ "byteorder", "lazy_static", @@ -2913,9 +3071,9 @@ dependencies = [ [[package]] name = "num-complex" -version = "0.4.2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ae39348c8bc5fbd7f40c727a9925f03517afd2ab27d46702108b6a7e5414c19" +checksum = "02e0d21255c828d6f128a1e41534206671e8c3ea0c62f32291e808dc82cff17d" dependencies = [ "num-traits", ] @@ -2965,11 +3123,11 @@ dependencies = [ [[package]] name = "num_cpus" -version = "1.13.1" +version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19e64526ebdee182341572e50e9ad03965aa510cd94427a4549448f285e957a1" +checksum = "0fac9e2da13b5eb447a6ce3d392f23a29d8694bff781bf03a16cd9ac8697593b" dependencies = [ - "hermit-abi 0.1.19", + "hermit-abi 0.2.6", "libc", ] @@ -2981,18 +3139,18 @@ checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3" [[package]] name = "object" -version = "0.28.4" +version = "0.30.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e42c982f2d955fac81dd7e1d0e1426a7d702acd9c98d19ab01083a6a0328c424" +checksum = "ea86265d3d3dcb6a27fc51bd29a4bf387fae9d2986b823079d4986af253eb439" dependencies = [ "memchr", ] [[package]] name = "once_cell" -version = "1.15.0" +version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e82dad04139b71a90c080c8463fe0dc7902db5192d939bd0950f074d014339e1" +checksum = "6f61fba1741ea2b3d6a1e3178721804bb716a68a6aeba1149b5d52e3d464ea66" [[package]] name = "opaque-debug" @@ -3008,9 +3166,9 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "open-fastrlp" -version = "0.1.2" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "131de184f045153e72c537ef4f1d57babddf2a897ca19e67bdff697aebba7f3d" +checksum = "786393f80485445794f6043fd3138854dd109cc6c4bd1a6383db304c9ce9b9ce" dependencies = [ "arrayvec", "auto_impl 1.0.1", @@ -3044,26 +3202,25 @@ dependencies = [ [[package]] name = "os_str_bytes" -version = "6.1.0" +version = "6.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21326818e99cfe6ce1e524c2a805c189a99b5ae555a35d19f9a284b427d86afa" +checksum = "9b7820b9daea5457c9f21c69448905d723fbd21136ccf521748f23fd49e723ee" [[package]] name = "ouroboros" -version = "0.15.0" +version = "0.15.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f31a3b678685b150cba82b702dcdc5e155893f63610cf388d30cd988d4ca2bf" +checksum = "dfbb50b356159620db6ac971c6d5c9ab788c9cc38a6f49619fca2a27acb062ca" dependencies = [ "aliasable", "ouroboros_macro", - "stable_deref_trait", ] [[package]] name = "ouroboros_macro" -version = "0.15.0" +version = "0.15.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "084fd65d5dd8b3772edccb5ffd1e4b7eba43897ecd0f9401e330e8c542959408" +checksum = "4a0d9d1a6191c4f391f87219d1ea42b23f09ee84d64763cd05ee6ea88d9f384d" dependencies = [ "Inflector", "proc-macro-error", @@ -3079,13 +3236,25 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" [[package]] -name = "parity-scale-codec" -version = "3.1.2" +name = "pagerduty-rs" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8b44461635bbb1a0300f100a841e571e7d919c81c73075ef5d152ffdb521066" +checksum = "bd10bab2b6df910bbe6c4987d76aa4221235103d9a9c000cfabcee6a6abc8f7a" +dependencies = [ + "reqwest", + "serde", + "time 0.3.17", + "url", +] + +[[package]] +name = "parity-scale-codec" +version = "3.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7ab01d0f889e957861bc65888d5ccbe82c158d0270136ba46820d43837cdf72" dependencies = [ "arrayvec", - "bitvec 1.0.0", + "bitvec 1.0.1", "byte-slice-cast", "impl-trait-for-tuples", "parity-scale-codec-derive", @@ -3094,11 +3263,11 @@ dependencies = [ [[package]] name = "parity-scale-codec-derive" -version = "3.1.2" +version = "3.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c45ed1f39709f5a89338fab50e59816b2e8815f5bb58276e7ddf9afd495f73f8" +checksum = "86b26a931f824dd4eca30b3e43bb4f31cd5f0d3a403c5f5ff27106b805bfde7b" dependencies = [ - "proc-macro-crate", + "proc-macro-crate 1.3.0", "proc-macro2", "quote", "syn", @@ -3118,7 +3287,7 @@ checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" dependencies = [ "instant", "lock_api", - "parking_lot_core 0.8.5", + "parking_lot_core 0.8.6", ] [[package]] @@ -3128,14 +3297,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ "lock_api", - "parking_lot_core 0.9.3", + "parking_lot_core 0.9.6", ] [[package]] name = "parking_lot_core" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d76e8e1493bcac0d2766c42737f34458f1c8c50c0d23bcb24ea953affb273216" +checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc" dependencies = [ "cfg-if", "instant", @@ -3147,9 +3316,9 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.3" +version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09a279cbf25cb0757810394fbc1e359949b59e348145c643a939a525692e6929" +checksum = "ba1ef8814b5c993410bb3adfad7a5ed269563e4a2f90c41f5d85be7fb47133bf" dependencies = [ "backtrace", "cfg-if", @@ -3158,18 +3327,7 @@ dependencies = [ "redox_syscall", "smallvec", "thread-id", - "windows-sys 0.36.1", -] - -[[package]] -name = "password-hash" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d791538a6dcc1e7cb7fe6f6b58aca40e7f79403c45b2bc274008b5e647af1d8" -dependencies = [ - "base64ct", - "rand_core", - "subtle", + "windows-sys", ] [[package]] @@ -3185,9 +3343,9 @@ dependencies = [ [[package]] name = "paste" -version = "1.0.7" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c520e05135d6e763148b6426a837e239041653ba7becd2e538c076c738025fc" +checksum = "d01a5bd0424d00070b0098dd17ebca6f961a959dead1dbcbbbc1d1cd8d3deeba" [[package]] name = "path-slash" @@ -3195,27 +3353,15 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e91099d4268b0e11973f036e885d652fb0b21fedcf69738c627f94db6a44f42" -[[package]] -name = "pbkdf2" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "271779f35b581956db91a3e55737327a03aa051e90b1c47aeb189508533adfd7" -dependencies = [ - "digest 0.10.5", - "hmac", - "password-hash 0.3.2", - "sha2 0.10.6", -] - [[package]] name = "pbkdf2" version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "83a0692ec44e4cf1ef28ca317f14f8f07da2d95ec3fa01f86e4467b725e60917" dependencies = [ - "digest 0.10.5", + "digest 0.10.6", "hmac", - "password-hash 0.4.2", + "password-hash", "sha2 0.10.6", ] @@ -3236,9 +3382,9 @@ checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" [[package]] name = "pest" -version = "2.2.1" +version = "2.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69486e2b8c2d2aeb9762db7b4e00b0331156393555cff467f4163ff06821eef8" +checksum = "4ab62d2fa33726dbe6321cc97ef96d8cde531e3eeaf858a058de53a8a6d40d8f" dependencies = [ "thiserror", "ucd-trie", @@ -3246,9 +3392,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.2.1" +version = "2.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b13570633aff33c6d22ce47dd566b10a3b9122c2fe9d8e7501895905be532b91" +checksum = "8bf026e2d0581559db66d837fe5242320f525d85c76283c61f4d51a1238d65ea" dependencies = [ "pest", "pest_generator", @@ -3256,9 +3402,9 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.2.1" +version = "2.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3c567e5702efdc79fb18859ea74c3eb36e14c43da7b8c1f098a4ed6514ec7a0" +checksum = "2b27bd18aa01d91c8ed2b61ea23406a676b42d82609c6e2581fba42f0c15f17f" dependencies = [ "pest", "pest_meta", @@ -3269,13 +3415,13 @@ dependencies = [ [[package]] name = "pest_meta" -version = "2.2.1" +version = "2.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5eb32be5ee3bbdafa8c7a18b0a8a8d962b66cfa2ceee4037f49267a50ee821fe" +checksum = "9f02b677c1859756359fc9983c2e56a0237f18624a3789528804406b7e915e5d" dependencies = [ "once_cell", "pest", - "sha-1", + "sha2 0.10.6", ] [[package]] @@ -3350,18 +3496,18 @@ checksum = "db8bcd96cb740d03149cbad5518db9fd87126a10ab519c011893b1754134c468" [[package]] name = "pin-project" -version = "1.0.11" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78203e83c48cffbe01e4a2d35d566ca4de445d79a85372fc64e378bfc812a260" +checksum = "ad29a609b6bcd67fee905812e544992d216af9d755757c05ed2d0e15a74c6ecc" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.0.11" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "710faf75e1b33345361201d36d04e98ac1ed8909151a017ed384700836104c74" +checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" dependencies = [ "proc-macro2", "quote", @@ -3408,34 +3554,35 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9eca2c590a5f85da82668fa685c09ce2888b9430e83299debf1f34b65fd4a4ba" dependencies = [ - "der 0.6.0", + "der 0.6.1", "spki 0.6.0", ] [[package]] name = "pkg-config" -version = "0.3.25" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1df8c4ec4b0627e53bdf214615ad287367e482558cf84b109250b37464dc03ae" +checksum = "6ac9a59f73473f1b8d852421e59e64809f025994837ef743615c6d0c5b305160" [[package]] name = "polling" -version = "2.2.0" +version = "2.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "685404d509889fade3e86fe3a5803bca2ec09b0c0778d5ada6ec8bf7a8de5259" +checksum = "22122d5ec4f9fe1b3916419b76be1e80bcb93f618d071d2edf841b137b2a2bd6" dependencies = [ + "autocfg", "cfg-if", "libc", "log", "wepoll-ffi", - "winapi", + "windows-sys", ] [[package]] name = "ppv-lite86" -version = "0.2.16" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb9f9e6e233e5c4a35559a617bf40a4ec447db2e84c20b55a6f83167b7e57872" +checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] name = "precomputed-hash" @@ -3445,9 +3592,9 @@ checksum = "925383efa346730478fb4838dbe9137d2a47675ad789c546d150a6e1dd4ab31c" [[package]] name = "primitive-types" -version = "0.11.1" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e28720988bff275df1f51b171e1b2a18c30d194c4d2b61defdacecd625a5d94a" +checksum = "9f3486ccba82358b11a77516035647c34ba167dfa53312630de83b12bd4f3d66" dependencies = [ "fixed-hash", "impl-codec", @@ -3459,12 +3606,21 @@ dependencies = [ [[package]] name = "proc-macro-crate" -version = "1.1.3" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e17d47ce914bf4de440332250b0edd23ce48c005f59fab39d3335866b114f11a" +checksum = "1d6ea3c4595b96363c13943497db34af4460fb474a95c43f4446ad341b8c9785" dependencies = [ - "thiserror", - "toml", + "toml 0.5.11", +] + +[[package]] +name = "proc-macro-crate" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "66618389e4ec1c7afe67d51a9bf34ff9236480f8d51e7489b7d5ab0303c13f34" +dependencies = [ + "once_cell", + "toml_edit", ] [[package]] @@ -3493,15 +3649,15 @@ dependencies = [ [[package]] name = "proc-macro-hack" -version = "0.5.19" +version = "0.5.20+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" +checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" [[package]] name = "proc-macro2" -version = "1.0.47" +version = "1.0.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ea3d908b0e36316caf9e9e2c4625cdde190a7e6f440d794667ed17a1855e725" +checksum = "6ef7d57beacfaf2d8aee5937dab7b7f28de3cb8b1828479bb5de2a7106f2bae2" dependencies = [ "unicode-ident", ] @@ -3517,6 +3673,26 @@ dependencies = [ "winapi", ] +[[package]] +name = "ptr_meta" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0738ccf7ea06b608c10564b31debd4f5bc5e197fc8bfe088f68ae5ce81e7a4f1" +dependencies = [ + "ptr_meta_derive", +] + +[[package]] +name = "ptr_meta_derive" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16b845dbfca988fa33db069c0e230574d15a3088f147a87b64c7589eb662c9ac" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "pulldown-cmark" version = "0.9.2" @@ -3530,9 +3706,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.18" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1feb54ed693b93a84e14094943b84b7c4eae204c512b7ccb95ab0c66d278ad1" +checksum = "8856d8364d252a14d474036ea1358d63c9e6965c8e5c1885c18f73d70bff9c7b" dependencies = [ "proc-macro2", ] @@ -3572,9 +3748,9 @@ dependencies = [ [[package]] name = "rand_core" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ "getrandom", ] @@ -3590,21 +3766,19 @@ dependencies = [ [[package]] name = "rayon" -version = "1.5.3" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd99e5772ead8baa5215278c9b15bf92087709e9c1b2d1f97cdb5a183c933a7d" +checksum = "6db3a213adf02b3bcfd2d3846bb41cb22857d131789e01df434fb7e7bc0759b7" dependencies = [ - "autocfg", - "crossbeam-deque", "either", "rayon-core", ] [[package]] name = "rayon-core" -version = "1.9.3" +version = "1.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "258bcdb5ac6dad48491bb2992db6b7cf74878b0384908af124823d118c99683f" +checksum = "356a0625f1954f730c0201cdab48611198dc6ce21f4acff55089b5a78e6e835b" dependencies = [ "crossbeam-channel", "crossbeam-deque", @@ -3614,15 +3788,15 @@ dependencies = [ [[package]] name = "redis" -version = "0.22.1" +version = "0.22.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "513b3649f1a111c17954296e4a3b9eecb108b766c803e2b99f179ebe27005985" +checksum = "aa8455fa3621f6b41c514946de66ea0531f57ca017b2e6c7cc368035ea5b46df" dependencies = [ "async-trait", "bytes", "combine", "futures-util", - "itoa 1.0.2", + "itoa 1.0.5", "percent-encoding", "pin-project-lite", "ryu", @@ -3642,9 +3816,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.2.13" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62f25bc4c7e55e0b0b7a1d43fb893f4fa1361d0abe38b9ce4f323c2adfe6ef42" +checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" dependencies = [ "bitflags", ] @@ -3682,9 +3856,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.27" +version = "0.6.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3f87b73ce11b1619a3c6332f45341e0047173771e8b8b73f87bfeefb7b56244" +checksum = "456c603be3e8d448b072f410900c09faf164fbce2d480456f50eea6e25f9c848" [[package]] name = "remove_dir_all" @@ -3696,12 +3870,21 @@ dependencies = [ ] [[package]] -name = "reqwest" -version = "0.11.13" +name = "rend" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68cc60575865c7831548863cc02356512e3f1dc2f3f82cb837d7fc4cc8f3c97c" +checksum = "79af64b4b6362ffba04eef3a4e10829718a4896dac19daa741851c86781edf95" dependencies = [ - "base64 0.13.0", + "bytecheck", +] + +[[package]] +name = "reqwest" +version = "0.11.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21eed90ec8570952d53b772ecf8f206aa1ec9a3d76b2521c56c42973f2d91ee9" +dependencies = [ + "base64 0.21.0", "bytes", "encoding_rs", "futures-core", @@ -3742,11 +3925,11 @@ checksum = "4389f1d5789befaf6029ebd9f7dac4af7f7e3d61b69d4f30e2ac02b57e7712b0" [[package]] name = "rfc6979" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88c86280f057430a52f4861551b092a01b419b8eacefc7c995eacb9dc132fe32" +checksum = "7743f17af12fa0b03b803ba12cd6a8d9483a587e89c69445e3909655c0b9fabb" dependencies = [ - "crypto-bigint 0.4.8", + "crypto-bigint 0.4.9", "hmac", "zeroize", ] @@ -3768,18 +3951,43 @@ dependencies = [ [[package]] name = "ripemd" -version = "0.1.1" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1facec54cb5e0dc08553501fa740091086d0259ad0067e0d4103448e4cb22ed3" +checksum = "bd124222d17ad93a644ed9d011a40f4fb64aa54275c08cc216524a9ea82fb09f" dependencies = [ - "digest 0.10.5", + "digest 0.10.6", +] + +[[package]] +name = "rkyv" +version = "0.7.39" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cec2b3485b07d96ddfd3134767b8a447b45ea4eb91448d0a35180ec0ffd5ed15" +dependencies = [ + "bytecheck", + "hashbrown 0.12.3", + "ptr_meta", + "rend", + "rkyv_derive", + "seahash", +] + +[[package]] +name = "rkyv_derive" +version = "0.7.39" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6eaedadc88b53e36dd32d940ed21ae4d850d5916f2581526921f553a72ac34c4" +dependencies = [ + "proc-macro2", + "quote", + "syn", ] [[package]] name = "rlp" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "999508abb0ae792aabed2460c45b89106d97fe4adac593bdaef433c2605847b5" +checksum = "bb919243f34364b6bd2fc10ef797edbfa75f33c252e7998527479c6d6b47e1ec" dependencies = [ "bytes", "rustc-hex", @@ -3803,7 +4011,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4cf22754c49613d2b3b119f0e5d46e34a2c628a937e3024b8762de4e7d8c710b" dependencies = [ "byteorder", - "digest 0.10.5", + "digest 0.10.6", "num-bigint-dig", "num-integer", "num-iter", @@ -3818,13 +4026,20 @@ dependencies = [ [[package]] name = "rust_decimal" -version = "1.26.1" +version = "1.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee9164faf726e4f3ece4978b25ca877ddc6802fa77f38cdccb32c7f805ecd70c" +checksum = "7fe32e8c89834541077a5c5bbe5691aa69324361e27e6aeb3552a737db4a70c8" dependencies = [ "arrayvec", + "borsh", + "bytecheck", + "byteorder", + "bytes", "num-traits", + "rand", + "rkyv", "serde", + "serde_json", ] [[package]] @@ -3856,23 +4071,23 @@ dependencies = [ [[package]] name = "rustix" -version = "0.36.3" +version = "0.36.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b1fbb4dfc4eb1d390c02df47760bb19a84bb80b301ecc947ab5406394d8223e" +checksum = "d4fdebc4b395b7fbb9ab11e462e20ed9051e7b16e42d24042c776eca0ac81b03" dependencies = [ "bitflags", "errno", "io-lifetimes", "libc", "linux-raw-sys", - "windows-sys 0.42.0", + "windows-sys", ] [[package]] name = "rustls" -version = "0.20.6" +version = "0.20.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aab8ee6c7097ed6057f43c187a62418d0c05a4bd5f18b3571db50ee0f9ce033" +checksum = "fff78fc74d175294f4e83b28343315ffcfb114b156f0185e9741cb5570f50e2f" dependencies = [ "log", "ring", @@ -3882,24 +4097,24 @@ dependencies = [ [[package]] name = "rustls-pemfile" -version = "1.0.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7522c9de787ff061458fe9a829dc790a3f5b22dc571694fc5883f448b94d9a9" +checksum = "d194b56d58803a43635bdc398cd17e383d6f71f9182b9a192c127ca42494a59b" dependencies = [ - "base64 0.13.0", + "base64 0.21.0", ] [[package]] name = "rustversion" -version = "1.0.9" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97477e48b4cf8603ad5f7aaf897467cf42ab4218a38ef76fb14c2d6773a6d6a8" +checksum = "5583e89e108996506031660fe09baa5011b9dd0341b89029313006d1fb508d70" [[package]] name = "ryu" -version = "1.0.10" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3f6f92acf49d1b98f7a81226834412ada05458b7364277387724a237f062695" +checksum = "7b4b9743ed687d4b4bcedf9ff5eaa7398495ae14e61cba0a295704edbc7decde" [[package]] name = "salsa20" @@ -3921,9 +4136,9 @@ dependencies = [ [[package]] name = "scale-info" -version = "2.2.0" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "333af15b02563b8182cd863f925bd31ef8fa86a0e095d30c091956057d436153" +checksum = "001cf62ece89779fd16105b5f515ad0e5cedcd5440d3dd806bb067978e7c3608" dependencies = [ "cfg-if", "derive_more", @@ -3933,11 +4148,11 @@ dependencies = [ [[package]] name = "scale-info-derive" -version = "2.2.0" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53f56acbd0743d29ffa08f911ab5397def774ad01bab3786804cf6ee057fb5e1" +checksum = "303959cf613a6f6efd19ed4b4ad5bf79966a13352716299ad532cfb115f4205c" dependencies = [ - "proc-macro-crate", + "proc-macro-crate 1.3.0", "proc-macro2", "quote", "syn", @@ -3958,6 +4173,12 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" +[[package]] +name = "scratch" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ddccb15bcce173023b3fedd9436f882a0739b8dfb45e4f6b6002bee5929f61b2" + [[package]] name = "scrypt" version = "0.10.0" @@ -3965,7 +4186,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9f9e24d2b632954ded8ab2ef9fea0a0c769ea56ea98bddbafbad22caeeadf45d" dependencies = [ "hmac", - "pbkdf2 0.11.0", + "pbkdf2", "salsa20", "sha2 0.10.6", ] @@ -3982,9 +4203,9 @@ dependencies = [ [[package]] name = "sea-orm" -version = "0.10.6" +version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc2db217f2061ab2bbb1bd22323a533ace0617f97690919f3ed3894e1b3ba170" +checksum = "88694d01b528a94f90ad87f8d2f546d060d070eee180315c67d158cb69476034" dependencies = [ "async-stream", "async-trait", @@ -4010,9 +4231,9 @@ dependencies = [ [[package]] name = "sea-orm-cli" -version = "0.10.6" +version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebcce92f0f804acd10b4378a3c8b0e5fb28f3a9ae9337006bd651baa3a95632c" +checksum = "0ebe1f820fe8949cf6a57272ba9ebd0be766e47c9b85c04b3cabea40ab9459b3" dependencies = [ "chrono", "clap", @@ -4026,9 +4247,9 @@ dependencies = [ [[package]] name = "sea-orm-macros" -version = "0.10.6" +version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38066057ef1fa17ddc6ce1458cf269862b8f1df919497d110ea127b549a90fbd" +checksum = "7216195de9c6b2474fd0efab486173dccd0eff21f28cc54aa4c0205d52fb3af0" dependencies = [ "bae", "heck 0.3.3", @@ -4039,9 +4260,9 @@ dependencies = [ [[package]] name = "sea-orm-migration" -version = "0.10.6" +version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ada716f9825e4190a0a8ebaecbf7171ce0ed6f218ea2e70086bdc72ccfc1d03c" +checksum = "0ed3cdfa669e4c385922f902b9a58e0c2128782a4d0fe79c6c34f3b927565e5b" dependencies = [ "async-trait", "clap", @@ -4055,9 +4276,9 @@ dependencies = [ [[package]] name = "sea-query" -version = "0.27.1" +version = "0.27.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3497a83851c4be4d1fdc8cbc7215105b828a2a944abb64dd2e0ba233f2ce187f" +checksum = "a4f0fc4d8e44e1d51c739a68d336252a18bc59553778075d5e32649be6ec92ed" dependencies = [ "chrono", "rust_decimal", @@ -4069,9 +4290,9 @@ dependencies = [ [[package]] name = "sea-query-binder" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7dddc5c3889bbc63b7e3374d3a7494551eb801c45f2f9cb460bfa4921653563d" +checksum = "9c2585b89c985cfacfe0ec9fc9e7bb055b776c1a2581c4e3c6185af2b8bf8865" dependencies = [ "chrono", "rust_decimal", @@ -4097,9 +4318,9 @@ dependencies = [ [[package]] name = "sea-schema" -version = "0.10.2" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a2e6fd7fb2c4adc28f1b8fb29944fa5e6a77968df57f32b7146c9ae10fb2f2b" +checksum = "38d5fda574d980e9352b6c7abd6fc75697436fe0078cac2b548559b52643ad3b" dependencies = [ "futures", "sea-query", @@ -4140,6 +4361,12 @@ dependencies = [ "syn", ] +[[package]] +name = "seahash" +version = "4.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c107b6f4780854c8b126e228ea8869f4d7b71260f962fefb57b996b8959ba6b" + [[package]] name = "sec1" version = "0.3.0" @@ -4147,7 +4374,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3be24c1842290c45df0a7bf069e0c268a747ad05a192f2fd7dcfdbc1cba40928" dependencies = [ "base16ct", - "der 0.6.0", + "der 0.6.1", "generic-array 0.14.6", "pkcs8 0.9.0", "subtle", @@ -4156,9 +4383,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.14" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e25dfac463d778e353db5be2449d1cce89bd6fd23c9f1ea21310ce6e5a1b29c4" +checksum = "58bc9567378fc7690d6b2addae4e60ac2eeea07becb2c64b9f218b53865cba2a" dependencies = [ "serde", ] @@ -4171,9 +4398,9 @@ checksum = "930c0acf610d3fdb5e2ab6213019aaa04e227ebe9547b0649ba599b16d788bd7" [[package]] name = "sentry" -version = "0.29.1" +version = "0.29.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17ad137b9df78294b98cab1a650bef237cc6c950e82e5ce164655e674d07c5cc" +checksum = "a6097dc270a9c4555c5d6222ed243eaa97ff38e29299ed7c5cb36099033c604e" dependencies = [ "httpdate", "reqwest", @@ -4191,9 +4418,9 @@ dependencies = [ [[package]] name = "sentry-anyhow" -version = "0.29.1" +version = "0.29.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f55adcceaad4189af35d82c3c51613c0c372f15c25f42f70bf23c9c3ede223e1" +checksum = "45a52d909ea1f5107fe29aa86581da01b88bde811fbde875773237c1596fbab6" dependencies = [ "anyhow", "sentry-backtrace", @@ -4202,9 +4429,9 @@ dependencies = [ [[package]] name = "sentry-backtrace" -version = "0.29.1" +version = "0.29.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afe4800806552aab314129761d5d3b3d422284eca3de2ab59e9fd133636cbd3d" +checksum = "9d92d1e4d591534ae4f872d6142f3b500f4ffc179a6aed8a3e86c7cc96d10a6a" dependencies = [ "backtrace", "once_cell", @@ -4214,9 +4441,9 @@ dependencies = [ [[package]] name = "sentry-contexts" -version = "0.29.1" +version = "0.29.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a42938426670f6e7974989cd1417837a96dd8bbb01567094f567d6acb360bf88" +checksum = "3afa877b1898ff67dd9878cf4bec4e53cef7d3be9f14b1fc9e4fcdf36f8e4259" dependencies = [ "hostname", "libc", @@ -4228,9 +4455,9 @@ dependencies = [ [[package]] name = "sentry-core" -version = "0.29.1" +version = "0.29.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4df9b9d8de2658a1ecd4e45f7b06c80c5dd97b891bfbc7c501186189b7e9bbdf" +checksum = "fc43eb7e4e3a444151a0fe8a0e9ce60eabd905dae33d66e257fa26f1b509c1bd" dependencies = [ "once_cell", "rand", @@ -4241,9 +4468,9 @@ dependencies = [ [[package]] name = "sentry-log" -version = "0.29.1" +version = "0.29.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7518096b31fa4075d1bbab79ad62da3258f6c67bafeb4a8b2b3f803695b9205e" +checksum = "598aefe14750bcec956adebc8992dd432f4e22c12cd524633963113864aa39b4" dependencies = [ "log", "sentry-core", @@ -4251,9 +4478,9 @@ dependencies = [ [[package]] name = "sentry-panic" -version = "0.29.1" +version = "0.29.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0af37b8500f273e511ebd6eb0d342ff7937d64ce3f134764b2b4653112d48cb4" +checksum = "ccab4fab11e3e63c45f4524bee2e75cde39cdf164cb0b0cbe6ccd1948ceddf66" dependencies = [ "sentry-backtrace", "sentry-core", @@ -4261,9 +4488,9 @@ dependencies = [ [[package]] name = "sentry-types" -version = "0.29.1" +version = "0.29.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccc95faa4078768a6bf8df45e2b894bbf372b3dbbfb364e9429c1c58ab7545c6" +checksum = "f63708ec450b6bdcb657af760c447416d69c38ce421f34e5e2e9ce8118410bc7" dependencies = [ "debugid", "getrandom", @@ -4287,9 +4514,9 @@ dependencies = [ [[package]] name = "serde-aux" -version = "4.0.0" +version = "4.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c79c1a5a310c28bf9f7a4b9bd848553051120d80a5952f993c7eb62f6ed6e4c5" +checksum = "c599b3fd89a75e0c18d6d2be693ddb12cccaf771db4ff9e39097104808a014c0" dependencies = [ "serde", "serde_json", @@ -4312,16 +4539,16 @@ version = "1.0.91" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "877c235533714907a8c2464236f5c4b2a17262ef1bd71f38f35ea592c8da6883" dependencies = [ - "itoa 1.0.2", + "itoa 1.0.5", "ryu", "serde", ] [[package]] name = "serde_path_to_error" -version = "0.1.8" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "184c643044780f7ceb59104cef98a5a6f12cb2288a7bc701ab93a362b49fd47d" +checksum = "26b04f22b563c91331a10074bda3dd5492e3cc39d56bd557e91c0af42b6c7341" dependencies = [ "serde", ] @@ -4341,6 +4568,15 @@ dependencies = [ "snafu", ] +[[package]] +name = "serde_spanned" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c68e921cef53841b8925c2abadd27c9b891d9613bdc43d6b823062866df38e8" +dependencies = [ + "serde", +] + [[package]] name = "serde_urlencoded" version = "0.7.1" @@ -4348,31 +4584,31 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" dependencies = [ "form_urlencoded", - "itoa 1.0.2", + "itoa 1.0.5", "ryu", "serde", ] [[package]] name = "sha-1" -version = "0.10.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "028f48d513f9678cda28f6e4064755b3fbb2af6acd672f2c209b62323f7aea0f" +checksum = "f5058ada175748e33390e40e872bd0fe59a19f265d0158daa551c5a88a76009c" dependencies = [ "cfg-if", "cpufeatures", - "digest 0.10.5", + "digest 0.10.6", ] [[package]] name = "sha1" -version = "0.10.1" +version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c77f4e7f65455545c2153c1253d25056825e77ee2533f0e41deb65a93a34852f" +checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" dependencies = [ "cfg-if", "cpufeatures", - "digest 0.10.5", + "digest 0.10.6", ] [[package]] @@ -4408,16 +4644,16 @@ checksum = "82e6b795fe2e3b1e845bafcb27aa35405c4d47cdfc92af5fc8d3002f76cebdc0" dependencies = [ "cfg-if", "cpufeatures", - "digest 0.10.5", + "digest 0.10.6", ] [[package]] name = "sha3" -version = "0.10.4" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eaedf34ed289ea47c2b741bb72e5357a209512d67bcd4bda44359e5bf0470f56" +checksum = "bdf0c33fae925bdc080598b84bc15c55e7b9a4a43b3c704da051f977469691c9" dependencies = [ - "digest 0.10.5", + "digest 0.10.6", "keccak", ] @@ -4441,11 +4677,11 @@ dependencies = [ [[package]] name = "signature" -version = "1.5.0" +version = "1.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f054c6c1a6e95179d6f23ed974060dcefb2d9388bb7256900badad682c499de4" +checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" dependencies = [ - "digest 0.10.5", + "digest 0.10.6", "rand_core", ] @@ -4524,9 +4760,9 @@ dependencies = [ [[package]] name = "socket2" -version = "0.4.4" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66d72b759436ae32898a2af0a14218dbf55efde3feeb170eb623637db85ee1e0" +checksum = "02e2d2db9033d13a1567121ddd7a095ee144db4e1ca1b1bda3419bc0da294ebd" dependencies = [ "libc", "winapi", @@ -4553,9 +4789,9 @@ checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] name = "spin" -version = "0.9.3" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c530c2b0d0bf8b69304b39fe2001993e267461948b890cd037d8ad4293fa1a0d" +checksum = "7f6002a767bff9e83f8eeecf883ecb8011875a21ae8da43bffb817a57e78cc09" dependencies = [ "lock_api", ] @@ -4577,14 +4813,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "67cf02bbac7a337dc36e4f5a693db6c21e7863f45070f7064577eb4367a3212b" dependencies = [ "base64ct", - "der 0.6.0", + "der 0.6.1", ] [[package]] name = "sqlformat" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f87e292b4291f154971a43c3774364e2cbcaec599d3f5bf6fa9d122885dbc38a" +checksum = "0c12bc9199d1db8234678b7051747c07f517cdcf019262d1847b94ec8b1aee3e" dependencies = [ "itertools", "nom", @@ -4615,7 +4851,7 @@ dependencies = [ "chrono", "crc", "crossbeam-queue", - "digest 0.10.5", + "digest 0.10.6", "dotenvy", "either", "event-listener", @@ -4627,7 +4863,7 @@ dependencies = [ "hashlink", "hex", "indexmap", - "itoa 1.0.2", + "itoa 1.0.5", "libc", "log", "memchr", @@ -4687,12 +4923,6 @@ dependencies = [ "tokio-rustls", ] -[[package]] -name = "stable_deref_trait" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" - [[package]] name = "static_assertions" version = "1.1.0" @@ -4730,18 +4960,18 @@ checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" [[package]] name = "strum" -version = "0.24.0" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e96acfc1b70604b8b2f1ffa4c57e59176c7dbb05d556c71ecd2f5498a1dee7f8" +checksum = "063e6045c0e62079840579a7e47a355ae92f60eb74daaf156fb1e84ba164e63f" dependencies = [ "strum_macros", ] [[package]] name = "strum_macros" -version = "0.24.0" +version = "0.24.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6878079b17446e4d3eba6192bb0a2950d5b14f0ed8424b852310e5a94345d0ef" +checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" dependencies = [ "heck 0.4.0", "proc-macro2", @@ -4758,9 +4988,9 @@ checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" [[package]] name = "svm-rs" -version = "0.2.18" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e4cdcf91153dc0e4e0637f26f042ada32a3b552bc8115935c7bf96f80132b0a" +checksum = "e18bbb2b229a2cc0d8ba58603adb0e460ad49a3451b1540fd6f7a5d37fd03b80" dependencies = [ "anyhow", "cfg-if", @@ -4789,9 +5019,9 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.105" +version = "1.0.107" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60b9b43d45702de4c839cb9b51d9f529c5dd26a4aff255b42b1ebc03e88ee908" +checksum = "1f4064b5b16e03ae50984a5a8ed5d4f8803e6bc1fd170a3cda91a1be4b18e3f5" dependencies = [ "proc-macro2", "quote", @@ -4854,9 +5084,9 @@ dependencies = [ [[package]] name = "termcolor" -version = "1.1.3" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bab24d30b911b2376f3a13cc2cd443142f0c81dda04c118693e35b3835757755" +checksum = "be55cf8942feac5c765c2c993422806843c9a9a45d4d5c407ad6dd2ea95eb9b6" dependencies = [ "winapi-util", ] @@ -4873,24 +5103,24 @@ dependencies = [ [[package]] name = "textwrap" -version = "0.15.0" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1141d4d61095b28419e22cb0bbf02755f5e54e0526f97f1e3d1d160e60885fb" +checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d" [[package]] name = "thiserror" -version = "1.0.37" +version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10deb33631e3c9018b9baf9dcbbc4f737320d2b576bac10f6aefa048fa407e3e" +checksum = "6a9cd18aa97d5c45c6603caea1da6628790b37f7a34b6ca89522331c5180fed0" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.37" +version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "982d17546b47146b28f7c22e3d08465f6b8903d0ea13c1660d9d84a6e7adcdbb" +checksum = "1fb327af4685e4d03fa8cbcf1716380da910eeb2bb8be417e7f9fd3fb164f36f" dependencies = [ "proc-macro2", "quote", @@ -4927,11 +5157,12 @@ dependencies = [ [[package]] name = "time" -version = "0.1.43" +version = "0.1.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438" +checksum = "1b797afad3f312d1c66a56d11d0316f916356d11bd158fbc6ca6389ff6bf805a" dependencies = [ "libc", + "wasi 0.10.0+wasi-snapshot-preview1", "winapi", ] @@ -4941,7 +5172,7 @@ version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a561bf4617eebd33bca6434b988f39ed798e527f51a1e797d0ee4f61c0a38376" dependencies = [ - "itoa 1.0.2", + "itoa 1.0.5", "serde", "time-core", "time-macros", @@ -4988,9 +5219,9 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.24.1" +version = "1.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d9f76183f91ecfb55e1d7d5602bd1d979e38a3a522fe900241cf195624d67ae" +checksum = "597a12a59981d9e3c38d216785b0c37399f6e415e8d0712047620f189371b0bb" dependencies = [ "autocfg", "bytes", @@ -5004,14 +5235,14 @@ dependencies = [ "socket2", "tokio-macros", "tracing", - "windows-sys 0.42.0", + "windows-sys", ] [[package]] name = "tokio-macros" -version = "1.7.0" +version = "1.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b557f72f448c511a979e2564e55d74e6c4432fc96ff4f6241bc6bded342643b7" +checksum = "d266c00fde287f55d3f1c3e96c500c362a2b8c695076ec180f27918820bc6df8" dependencies = [ "proc-macro2", "quote", @@ -5071,9 +5302,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.2" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f988a1a1adc2fb21f9c12aa96441da33a1728193ae0b95d2be22dbd17fcb4e5c" +checksum = "0bb2e075f03b3d66d8d8785356224ba688d2906a371015e225beeb65ca92c740" dependencies = [ "bytes", "futures-core", @@ -5085,13 +5316,47 @@ dependencies = [ [[package]] name = "toml" -version = "0.5.10" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1333c76748e868a4d9d1017b5ab53171dfd095f70c712fdb4653a406547f598f" +checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234" dependencies = [ "serde", ] +[[package]] +name = "toml" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fb9d890e4dc9298b70f740f615f2e05b9db37dce531f6b24fb77ac993f9f217" +dependencies = [ + "serde", + "serde_spanned", + "toml_datetime", + "toml_edit", +] + +[[package]] +name = "toml_datetime" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4553f467ac8e3d374bc9a177a26801e5d0f9b211aa1673fb137a403afd1c9cf5" +dependencies = [ + "serde", +] + +[[package]] +name = "toml_edit" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "729bfd096e40da9c001f778f5cdecbd2957929a24e10e5883d9392220a751581" +dependencies = [ + "indexmap", + "nom8", + "serde", + "serde_spanned", + "toml_datetime", +] + [[package]] name = "tower" version = "0.4.13" @@ -5135,9 +5400,9 @@ checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" [[package]] name = "tower-service" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" +checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" [[package]] name = "tracing" @@ -5214,15 +5479,15 @@ dependencies = [ [[package]] name = "triomphe" -version = "0.1.7" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fe1b3800b35f9b936c28dc59dbda91b195371269396784d931fe2a5a2be3d2f" +checksum = "f1ee9bd9239c339d714d657fac840c6d2a4f9c45f4f9ec7b0975113458be78db" [[package]] name = "try-lock" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" +checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" [[package]] name = "tungstenite" @@ -5230,7 +5495,7 @@ version = "0.17.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e27992fd6a8c29ee7eef28fc78349aa244134e10ad447ce3b9f0ac0ed0fa4ce0" dependencies = [ - "base64 0.13.0", + "base64 0.13.1", "byteorder", "bytes", "http", @@ -5251,7 +5516,7 @@ version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "30ee6ab729cd4cf0fd55218530c4522ed30b7b6081752839b68fcec8d0960788" dependencies = [ - "base64 0.13.0", + "base64 0.13.1", "byteorder", "bytes", "http", @@ -5266,21 +5531,21 @@ dependencies = [ [[package]] name = "typenum" -version = "1.15.0" +version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987" +checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" [[package]] name = "ucd-trie" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89570599c4fe5585de2b388aab47e99f7fa4e9238a1399f707a02e356058141c" +checksum = "9e79c4d996edb816c91e4308506774452e55e95c3c9de07b6729e17e15a5ef81" [[package]] name = "uint" -version = "0.9.3" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12f03af7ccf01dd611cc450a0d10dbc9b745770d096473e2faf0ca6e2d66d1e0" +checksum = "76f64bba2c53b04fcab63c01a7d7427eadc821e3bc48c34dc9ba29c501164b52" dependencies = [ "byteorder", "crunchy", @@ -5318,36 +5583,36 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.8" +version = "0.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "099b7128301d285f79ddd55b9a83d5e6b9e97c92e0ea0daebee7263e932de992" +checksum = "d54675592c1dbefd78cbd98db9bacd89886e1ca50692a0692baefffdeb92dd58" [[package]] name = "unicode-ident" -version = "1.0.0" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d22af068fba1eb5edcb4aea19d382b2a3deb4c8f9d475c589b6ada9e0fd493ee" +checksum = "84a22b9f218b40614adcb3f4ff08b703773ad44fa9423e4e0d346d5db86e4ebc" [[package]] name = "unicode-normalization" -version = "0.1.19" +version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d54590932941a9e9266f0832deed84ebe1bf2e4c9e4a3554d393d18f5e854bf9" +checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" dependencies = [ "tinyvec", ] [[package]] name = "unicode-segmentation" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e8820f5d777f6224dc4be3632222971ac30164d4a258d595640799554ebfd99" +checksum = "0fdbf052a0783de01e944a6ce7a8cb939e295b1e7be835a1112c3b9a7f047a5a" [[package]] name = "unicode-width" -version = "0.1.9" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973" +checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b" [[package]] name = "unicode-xid" @@ -5369,12 +5634,11 @@ checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" [[package]] name = "ureq" -version = "2.5.0" +version = "2.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b97acb4c28a254fd7a4aeec976c46a7fa404eac4d7c134b30c75144846d7cb8f" +checksum = "338b31dd1314f68f3aabf3ed57ab922df95ffcd902476ca7ba3c4ce7b908c46d" dependencies = [ - "base64 0.13.0", - "chunked_transfer", + "base64 0.13.1", "log", "once_cell", "rustls", @@ -5462,9 +5726,9 @@ dependencies = [ [[package]] name = "wasi" -version = "0.10.2+wasi-snapshot-preview1" +version = "0.10.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" +checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" [[package]] name = "wasi" @@ -5474,9 +5738,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.82" +version = "0.2.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc7652e3f6c4706c8d9cd54832c4a4ccb9b5336e2c3bd154d5cccfbf1c1f5f7d" +checksum = "eaf9f5aceeec8be17c128b2e93e031fb8a4d469bb9c4ae2d7dc1888b26887268" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -5484,9 +5748,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.82" +version = "0.2.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "662cd44805586bd52971b9586b1df85cdbbd9112e4ef4d8f41559c334dc6ac3f" +checksum = "4c8ffb332579b0557b52d268b91feab8df3615f265d5270fec2a8c95b17c1142" dependencies = [ "bumpalo", "log", @@ -5499,9 +5763,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.30" +version = "0.4.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f741de44b75e14c35df886aff5f1eb73aa114fa5d4d00dcd37b5e01259bf3b2" +checksum = "23639446165ca5a5de86ae1d8896b737ae80319560fbaa4c2887b7da6e7ebd7d" dependencies = [ "cfg-if", "js-sys", @@ -5511,9 +5775,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.82" +version = "0.2.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b260f13d3012071dfb1512849c033b1925038373aea48ced3012c09df952c602" +checksum = "052be0f94026e6cbc75cdefc9bae13fd6052cdcaf532fa6c45e7ae33a1e6c810" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -5521,9 +5785,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.82" +version = "0.2.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5be8e654bdd9b79216c2929ab90721aa82faf65c48cdf08bdc4e7f51357b80da" +checksum = "07bc0c051dc5f23e307b13285f9d75df86bfdf816c5721e573dec1f9b8aa193c" dependencies = [ "proc-macro2", "quote", @@ -5534,9 +5798,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.82" +version = "0.2.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6598dd0bd3c7d51095ff6531a5b23e02acdc81804e30d8f07afb77b7215a140a" +checksum = "1c38c045535d93ec4f0b4defec448e4291638ee608530863b1e2ba115d4fff7f" [[package]] name = "wasm-timer" @@ -5555,9 +5819,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.57" +version = "0.3.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b17e741662c70c8bd24ac5c5b18de314a2c26c32bf8346ee1e6f53de919c283" +checksum = "bcda906d8be16e728fd5adc5b729afad4e444e106ab28cd1c7256e54fa61510f" dependencies = [ "js-sys", "wasm-bindgen", @@ -5565,10 +5829,9 @@ dependencies = [ [[package]] name = "web3_proxy" -version = "0.12.0" +version = "0.13.0" dependencies = [ "anyhow", - "arc-swap", "argh", "axum", "axum-client-ip", @@ -5584,6 +5847,7 @@ dependencies = [ "fdlimit", "flume", "futures", + "gethostname", "glob", "handlebars", "hashbrown 0.13.2", @@ -5598,6 +5862,7 @@ dependencies = [ "notify", "num", "num-traits", + "pagerduty-rs", "parking_lot 0.12.1", "proctitle", "redis-rate-limiter", @@ -5613,7 +5878,7 @@ dependencies = [ "time 0.3.17", "tokio", "tokio-stream", - "toml", + "toml 0.6.0", "tower", "tower-http", "ulid", @@ -5633,9 +5898,9 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.22.5" +version = "0.22.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "368bfe657969fb01238bb756d351dcade285e0f6fcbd36dcb23359a5169975be" +checksum = "b6c71e40d7d2c34a5106301fb632274ca37242cd0c9d3e64dbece371a40a2d87" dependencies = [ "webpki", ] @@ -5681,16 +5946,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] -name = "windows-sys" -version = "0.36.1" +name = "windows" +version = "0.43.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea04155a16a59f9eab786fe12a4a450e75cdb175f9e0d80da1e17db09f55b8d2" +checksum = "04662ed0e3e5630dfa9b26e4cb823b817f1a9addda855d973a9458c236556244" dependencies = [ - "windows_aarch64_msvc 0.36.1", - "windows_i686_gnu 0.36.1", - "windows_i686_msvc 0.36.1", - "windows_x86_64_gnu 0.36.1", - "windows_x86_64_msvc 0.36.1", + "windows_aarch64_gnullvm", + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc", ] [[package]] @@ -5700,85 +5967,55 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" dependencies = [ "windows_aarch64_gnullvm", - "windows_aarch64_msvc 0.42.0", - "windows_i686_gnu 0.42.0", - "windows_i686_msvc 0.42.0", - "windows_x86_64_gnu 0.42.0", + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_msvc", + "windows_x86_64_gnu", "windows_x86_64_gnullvm", - "windows_x86_64_msvc 0.42.0", + "windows_x86_64_msvc", ] [[package]] name = "windows_aarch64_gnullvm" -version = "0.42.0" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41d2aa71f6f0cbe00ae5167d90ef3cfe66527d6f613ca78ac8024c3ccab9a19e" +checksum = "8c9864e83243fdec7fc9c5444389dcbbfd258f745e7853198f365e3c4968a608" [[package]] name = "windows_aarch64_msvc" -version = "0.36.1" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47" - -[[package]] -name = "windows_aarch64_msvc" -version = "0.42.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd0f252f5a35cac83d6311b2e795981f5ee6e67eb1f9a7f64eb4500fbc4dcdb4" +checksum = "4c8b1b673ffc16c47a9ff48570a9d85e25d265735c503681332589af6253c6c7" [[package]] name = "windows_i686_gnu" -version = "0.36.1" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6" - -[[package]] -name = "windows_i686_gnu" -version = "0.42.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbeae19f6716841636c28d695375df17562ca208b2b7d0dc47635a50ae6c5de7" +checksum = "de3887528ad530ba7bdbb1faa8275ec7a1155a45ffa57c37993960277145d640" [[package]] name = "windows_i686_msvc" -version = "0.36.1" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024" - -[[package]] -name = "windows_i686_msvc" -version = "0.42.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84c12f65daa39dd2babe6e442988fc329d6243fdce47d7d2d155b8d874862246" +checksum = "bf4d1122317eddd6ff351aa852118a2418ad4214e6613a50e0191f7004372605" [[package]] name = "windows_x86_64_gnu" -version = "0.36.1" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.42.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf7b1b21b5362cbc318f686150e5bcea75ecedc74dd157d874d754a2ca44b0ed" +checksum = "c1040f221285e17ebccbc2591ffdc2d44ee1f9186324dd3e84e99ac68d699c45" [[package]] name = "windows_x86_64_gnullvm" -version = "0.42.0" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09d525d2ba30eeb3297665bd434a54297e4170c7f1a44cad4ef58095b4cd2028" +checksum = "628bfdf232daa22b0d64fdb62b09fcc36bb01f05a3939e20ab73aaf9470d0463" [[package]] name = "windows_x86_64_msvc" -version = "0.36.1" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680" - -[[package]] -name = "windows_x86_64_msvc" -version = "0.42.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f40009d85759725a34da6d89a94e63d7bdc50a862acf0dbc7c8e488f1edcb6f5" +checksum = "447660ad36a13288b1db4d4248e857b510e8c3a225c822ba4fb748c0aafecffd" [[package]] name = "winreg" @@ -5809,9 +6046,9 @@ dependencies = [ [[package]] name = "wyz" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30b31594f29d27036c383b53b59ed3476874d518f0efb151b27a4c275141390e" +checksum = "05f360fc0b24296329c78fda852a1e9ae82de9cf7b27dae4b7f62f118f77b9ed" dependencies = [ "tap", ] @@ -5830,9 +6067,9 @@ checksum = "c394b5bd0c6f669e7275d9c20aa90ae064cb22e75a1cad54e1b34088034b149f" [[package]] name = "zip" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf225bcf73bb52cbb496e70475c7bd7a3f769df699c0020f6c7bd9a96dcf0b8d" +checksum = "537ce7411d25e54e8ae21a7ce0b15840e7bfcff15b51d697ec3266cc76bdf080" dependencies = [ "aes 0.7.5", "byteorder", @@ -5842,7 +6079,7 @@ dependencies = [ "crossbeam-utils", "flate2", "hmac", - "pbkdf2 0.10.1", + "pbkdf2", "sha1", "time 0.3.17", "zstd", @@ -5850,18 +6087,18 @@ dependencies = [ [[package]] name = "zstd" -version = "0.10.2+zstd.1.5.2" +version = "0.11.2+zstd.1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f4a6bd64f22b5e3e94b4e238669ff9f10815c27a5180108b849d24174a83847" +checksum = "20cc960326ece64f010d2d2107537f26dc589a6573a316bd5b1dba685fa5fde4" dependencies = [ "zstd-safe", ] [[package]] name = "zstd-safe" -version = "4.1.6+zstd.1.5.2" +version = "5.0.2+zstd.1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94b61c51bb270702d6167b8ce67340d2754b088d0c091b06e593aa772c3ee9bb" +checksum = "1d2a5585e04f9eea4b2a3d1eca508c4dee9592a89ef6f450c11719da0726f4db" dependencies = [ "libc", "zstd-sys", @@ -5869,10 +6106,11 @@ dependencies = [ [[package]] name = "zstd-sys" -version = "1.6.3+zstd.1.5.2" +version = "2.0.5+zstd.1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc49afa5c8d634e75761feda8c592051e7eeb4683ba827211eb0d731d3402ea8" +checksum = "edc50ffce891ad571e9f9afe5039c4837bede781ac4bb13052ed7ae695518596" dependencies = [ "cc", "libc", + "pkg-config", ] diff --git a/Cargo.toml b/Cargo.toml index 75a15ab9..1d5c2238 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,9 +9,7 @@ members = [ ] [profile.release] -# we leave debug = true on so that sentry can give us line numbers +# `debug = true` so that sentry can give us line numbers debug = true -# TODO: enable lto (and maybe other things proven with benchmarks) once rapid development is done -#lto = true - -# TODO: we can't do panic = abort because the websockets disconnect by panicking sometimes +# spend longer compiling for a slightly faster binary +codegen-units = 1 diff --git a/Dockerfile b/Dockerfile index 02bfdfa0..eb74e040 100644 --- a/Dockerfile +++ b/Dockerfile @@ -8,12 +8,14 @@ COPY . . RUN --mount=type=cache,target=/usr/local/cargo/registry \ --mount=type=cache,target=/usr/src/web3_proxy/target \ cargo test &&\ - cargo install --locked --root /opt/bin --path ./web3_proxy + cargo install --locked --no-default-features --root /opt/bin --path ./web3_proxy FROM debian:bullseye-slim COPY --from=builder /opt/bin/* /usr/local/bin/ -ENTRYPOINT ["web3_proxy"] + +ENTRYPOINT ["web3_proxy_cli"] +CMD [ "--config", "/web3-proxy.toml", "proxyd" ] # TODO: lower log level when done with prototyping -ENV RUST_LOG "web3_proxy=debug" +ENV RUST_LOG "warn,web3_proxy=debug,web3_proxy_cli=debug" diff --git a/README.md b/README.md index d16a3c66..9a0ade50 100644 --- a/README.md +++ b/README.md @@ -37,7 +37,7 @@ Options: Start the server with the defaults (listen on `http://localhost:8544` and use `./config/development.toml` which uses the database and cache running under docker and proxies to a bunch of public nodes: ``` -cargo run --release +cargo run --release -- daemon ``` ## Common commands @@ -45,7 +45,7 @@ cargo run --release Create a user: ``` -cargo run --bin web3_proxy_cli -- --db-url "$YOUR_DB_URL" create_user --address "$USER_ADDRESS_0x" +cargo run -- --db-url "$YOUR_DB_URL" create_user --address "$USER_ADDRESS_0x" ``` Check that the proxy is working: @@ -104,7 +104,7 @@ web3_proxy_cli --config ... change_user_tier_by_key "$RPC_ULID_KEY_FROM_PREV_COM Health check 3 servers and error if the first one doesn't match the others. ``` -web3_proxy_cli https://eth.llamarpc.com/ https://rpc.ankr.com/eth https://cloudflare-eth.com +web3_proxy_cli health_compass https://eth.llamarpc.com/ https://rpc.ankr.com/eth https://cloudflare-eth.com ``` ## Adding new database tables diff --git a/TODO.md b/TODO.md index 5b10936b..f115d336 100644 --- a/TODO.md +++ b/TODO.md @@ -300,6 +300,32 @@ These are not yet ordered. There might be duplicates. We might not actually need - [x] if private txs are disabled, only send trasactions to some of our servers. we were DOSing ourselves with transactions and slowing down sync - [x] retry if we get "the method X is not available" - [x] remove weight. we don't use it anymore. tiers are what we use now +- [x] make deadlock feature optional +- [x] standalone healthcheck daemon (sentryd) +- [x] status page should show version +- [x] combine the proxy and cli into one bin +- [x] improve rate limiting on websockets +- [x] retry another server if we get a jsonrpc response error about rate limits +- [x] major refactor to only use backup servers when absolutely necessary +- [x] remove allowed lag +- [x] configurable gas buffer. default to the larger of 25k or 25% on polygon to work around erigon bug +- [x] public is 3900, but free is 360. free should be at least 3900 but probably more +- [x] add --max-wait to wait_for_sync +- [x] add automatic compare urls to wait_for_sync +- [x] send panics to pagerduty +- [x] enable lto on release builds +- [x] less logs for backup servers +- [x] use channels instead of arcswap + - this will let us easily wait for a new head or a new synced connection +- [x] broadcast transactions to more servers +- [x] send sentryd errors to pagerduty +- [x] improve handling of unknown methods +- [x] don't send pagerduty alerts for websocket panics +- [x] improve waiting for sync when rate limited +- [x] improve pager duty errors for smarter deduping +- [x] add create_key cli command +- [-] proxy mode for benchmarking all backends +- [-] proxy mode for sending to multiple backends - [-] let users choose a % of reverts to log (or maybe x/second). someone like curve logging all reverts will be a BIG database very quickly - this must be opt-in and spawned in the background since it will slow things down and will make their calls less private - [ ] automatic pruning of old revert logs once too many are collected @@ -323,7 +349,7 @@ These are not yet ordered. There might be duplicates. We might not actually need - [ ] `stat delay` script - query database for newest stat - [ ] period_datetime should always be :00. right now it depends on start time -- [ ] two servers running will confuse rpc_accounting! +- [ ] we have our hard rate limiter set up with a period of 60. but most providers have period of 1- [ ] two servers running will confuse rpc_accounting! - it won't happen with users often because they should be sticky to one proxy, but unauthenticated users will definitely hit this - one option: we need the insert to be an upsert, but how do we merge historgrams? - [ ] don't use systemtime. use chrono @@ -508,7 +534,8 @@ in another repo: event subscriber - [ ] if the call is something simple like "symbol" or "decimals", cache that too. though i think this could bite us. - [ ] add a subscription that returns the head block number and hash but nothing else - [ ] if chain split detected, what should we do? don't send transactions? -- [ ] archive check works well for local servers, but public nodes (especially on other chains) seem to give unreliable results. likely because of load balancers. maybe have a "max block data limit" +- [ ] archive check works well for local servers, but public nodes (especially on other chains) seem to give unreliable results. likely because of load balancers. + - [x] configurable block data limit until better checks - [ ] https://docs.rs/derive_builder/latest/derive_builder/ - [ ] Detect orphaned transactions - [ ] https://crates.io/crates/reqwest-middleware easy retry with exponential back off @@ -578,7 +605,6 @@ in another repo: event subscriber - [ ] sentry profiling - [ ] support alchemy_minedTransactions - [ ] debug print of user::Model's address is a big vec of numbers. make that hex somehow -- [ ] should we combine the proxy and cli into one bin? - [ ] make it so you can put a string like "LN arbitrum" into the create_user script, and have it automatically turn it into 0x4c4e20617262697472756d000000000000000000. - [ ] if --address not given, use the --description - [ ] if it is too long, (the last 4 bytes must be zero), give an error so descriptions like this stand out diff --git a/deferred-rate-limiter/Cargo.toml b/deferred-rate-limiter/Cargo.toml index fe1909e3..14602245 100644 --- a/deferred-rate-limiter/Cargo.toml +++ b/deferred-rate-limiter/Cargo.toml @@ -11,4 +11,4 @@ anyhow = "1.0.68" hashbrown = "0.13.2" log = "0.4.17" moka = { version = "0.9.6", default-features = false, features = ["future"] } -tokio = "1.24.1" +tokio = "1.24.2" diff --git a/docker-compose.common.yml b/docker-compose.common.yml index 26e24c3b..e6164994 100644 --- a/docker-compose.common.yml +++ b/docker-compose.common.yml @@ -4,7 +4,7 @@ services: build: . init: true restart: unless-stopped - command: --config /config.toml --workers 16 + command: --config /config.toml --workers 16 proxyd # rust's tokio crate expects a SIGINT https://tokio.rs/tokio/topics/shutdown stop_signal: SIGINT environment: diff --git a/docker-compose.prod.yml b/docker-compose.prod.yml index 2fb3bd45..5c6bf809 100644 --- a/docker-compose.prod.yml +++ b/docker-compose.prod.yml @@ -68,7 +68,7 @@ services: extends: file: docker-compose.common.yml service: web3-proxy - command: --config /config.toml --workers 48 + command: --config /config.toml --workers 48 proxyd volumes: - ./config/production-eth.toml:/config.toml - ./data/scratch:/scratch diff --git a/docker-compose.yml b/docker-compose.yml index 88d69521..df6fe4b6 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -23,6 +23,22 @@ services: volumes: - ./data/dev_mysql:/var/lib/mysql + # influxdb for stats + dev-influxdb: + image: influxdb:2.6.1-alpine + environment: + DOCKER_INFLUXDB_INIT_MODE: setup + DOCKER_INFLUXDB_INIT_USERNAME: dev_web3_proxy + DOCKER_INFLUXDB_INIT_PASSWORD: dev_web3_proxy + DOCKER_INFLUXDB_INIT_ORG: dev_org + DOCKER_INFLUXDB_INIT_BUCKET: dev_web3_proxy + DOCKER_INFLUXDB_INIT_ADMIN_TOKEN: dev_web3_proxy_auth_token + ports: + - 127.0.0.1:8086:8086 + volumes: + - ./data/dev_influxdb/data:/var/lib/influxdb2 + - ./data/dev_influxdb/config:/etc/influxdb2 + # volatile redis for storing rate limits dev-vredis: extends: diff --git a/entities/Cargo.toml b/entities/Cargo.toml index 64d052a3..606a2f39 100644 --- a/entities/Cargo.toml +++ b/entities/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "entities" -version = "0.12.0" +version = "0.13.0" edition = "2021" [lib] @@ -10,7 +10,7 @@ path = "src/mod.rs" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -sea-orm = "0.10.6" +sea-orm = "0.10.7" serde = "1.0.152" uuid = "1.2.2" ethers = "1.0.2" diff --git a/migration/Cargo.toml b/migration/Cargo.toml index fd1e4a12..61d25f6d 100644 --- a/migration/Cargo.toml +++ b/migration/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "migration" -version = "0.12.0" +version = "0.13.0" edition = "2021" publish = false @@ -9,10 +9,10 @@ name = "migration" path = "src/lib.rs" [dependencies] -tokio = { version = "1.24.1", features = ["full", "tracing"] } +tokio = { version = "1.24.2", features = ["full", "tracing"] } [dependencies.sea-orm-migration] -version = "0.10.6" +version = "0.10.7" features = [ # Enable at least one `ASYNC_RUNTIME` and `DATABASE_DRIVER` feature if you want to run migration via CLI. # View the list of supported features at https://www.sea-ql.org/SeaORM/docs/install-and-config/database-and-async-runtime. diff --git a/migration/README.md b/migration/README.md index b3ea53eb..3b438d89 100644 --- a/migration/README.md +++ b/migration/README.md @@ -2,7 +2,7 @@ - Generate a new migration file ```sh - cargo run -- migrate generate MIGRATION_NAME + cargo run -- generate MIGRATION_NAME ``` - Apply all pending migrations ```sh diff --git a/migration/src/lib.rs b/migration/src/lib.rs index e5ab068f..7c156bf4 100644 --- a/migration/src/lib.rs +++ b/migration/src/lib.rs @@ -13,6 +13,7 @@ mod m20221108_200345_save_anon_stats; mod m20221211_124002_request_method_privacy; mod m20221213_134158_move_login_into_database; mod m20230117_191358_admin_table; +mod m20230119_204135_better_free_tier; pub struct Migrator; @@ -33,6 +34,7 @@ impl MigratorTrait for Migrator { Box::new(m20221211_124002_request_method_privacy::Migration), Box::new(m20221213_134158_move_login_into_database::Migration), Box::new(m20230117_191358_admin_table::Migration), + Box::new(m20230119_204135_better_free_tier::Migration), ] } } diff --git a/migration/src/m20230119_204135_better_free_tier.rs b/migration/src/m20230119_204135_better_free_tier.rs new file mode 100644 index 00000000..aef9e5f8 --- /dev/null +++ b/migration/src/m20230119_204135_better_free_tier.rs @@ -0,0 +1,39 @@ +//! Increase requests per minute for the free tier to be better than our public tier (which has 3900/min) +use sea_orm_migration::{prelude::*, sea_orm::ConnectionTrait}; + +#[derive(DeriveMigrationName)] +pub struct Migration; + +#[async_trait::async_trait] +impl MigrationTrait for Migration { + async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { + let db_conn = manager.get_connection(); + let db_backend = manager.get_database_backend(); + + let update_free = Query::update() + .table(UserTier::Table) + .value(UserTier::MaxRequestsPerPeriod, 6000) + .and_where(Expr::col(UserTier::Title).eq("Free")) + .limit(1) + .to_owned(); + + let x = db_backend.build(&update_free); + + let rows_affected = db_conn.execute(x).await?.rows_affected(); + + assert_eq!(rows_affected, 1, "unable to update free tier"); + + Ok(()) + } + + async fn down(&self, _manager: &SchemaManager) -> Result<(), DbErr> { + todo!(); + } +} + +#[derive(Iden)] +enum UserTier { + Table, + Title, + MaxRequestsPerPeriod, +} diff --git a/redis-rate-limiter/Cargo.toml b/redis-rate-limiter/Cargo.toml index fcc05372..c4af3503 100644 --- a/redis-rate-limiter/Cargo.toml +++ b/redis-rate-limiter/Cargo.toml @@ -7,4 +7,4 @@ edition = "2021" [dependencies] anyhow = "1.0.68" deadpool-redis = { version = "0.11.1", features = ["rt_tokio_1", "serde"] } -tokio = "1.24.1" +tokio = "1.24.2" diff --git a/web3_proxy/Cargo.toml b/web3_proxy/Cargo.toml index 9da390b1..433715e3 100644 --- a/web3_proxy/Cargo.toml +++ b/web3_proxy/Cargo.toml @@ -1,8 +1,8 @@ [package] name = "web3_proxy" -version = "0.12.0" +version = "0.13.0" edition = "2021" -default-run = "web3_proxy" +default-run = "web3_proxy_cli" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html @@ -19,55 +19,57 @@ migration = { path = "../migration" } redis-rate-limiter = { path = "../redis-rate-limiter" } thread-fast-rng = { path = "../thread-fast-rng" } -anyhow = { version = "1.0.68", features = ["backtrace"] } -arc-swap = "1.6.0" -argh = "0.1.10" -axum = { version = "0.6.2", features = ["headers", "ws"] } -axum-client-ip = "0.3.1" -axum-macros = "0.3.1" +# TODO: regex has several "perf" features that we might want to use +# TODO: make sure this uuid version matches sea-orm. PR to put this in their prelude +# TODO: import num_traits from sea-orm so we always have the same version # TODO: import chrono from sea-orm so we always have the same version +# TODO: make sure this time version matches siwe. PR to put this in their prelude + +anyhow = { version = "1.0.68", features = ["backtrace"] } +argh = "0.1.10" +axum = { version = "0.6.4", features = ["headers", "ws"] } +axum-client-ip = "0.3.1" +axum-macros = "0.3.2" chrono = "0.4.23" counter = "0.5.7" derive_more = "0.99.17" dotenv = "0.15.0" -ethers = { version = "1.0.2", default-features = false, features = ["rustls", "ws"] } env_logger = "0.10.0" +ethers = { version = "1.0.2", default-features = false, features = ["rustls", "ws"] } fdlimit = "0.2.1" flume = "0.10.14" futures = { version = "0.3.25", features = ["thread-pool"] } +gethostname = "0.4.1" +glob = "0.3.1" +handlebars = "4.3.6" hashbrown = { version = "0.13.2", features = ["serde"] } hdrhistogram = "7.5.2" http = "0.2.8" ipnet = "2.7.1" +itertools = "0.10.5" log = "0.4.17" metered = { version = "0.9.0", features = ["serialize"] } moka = { version = "0.9.6", default-features = false, features = ["future"] } notify = "5.0.0" num = "0.4.0" -# TODO: import num_traits from sea-orm so we always have the same version num-traits = "0.2.15" +pagerduty-rs = { version = "0.1.6", default-features = false, features = ["async", "rustls", "sync"] } parking_lot = { version = "0.12.1", features = ["arc_lock"] } proctitle = "0.1.1" -# TODO: regex has several "perf" features that we might want to use regex = "1.7.1" -reqwest = { version = "0.11.13", default-features = false, features = ["json", "tokio-rustls"] } -handlebars = "4.3.6" +reqwest = { version = "0.11.14", default-features = false, features = ["json", "tokio-rustls"] } rustc-hash = "1.1.0" -siwe = "0.5.0" -sentry = { version = "0.29.1", default-features = false, features = ["backtrace", "contexts", "panic", "anyhow", "reqwest", "rustls", "log", "sentry-log"] } +sentry = { version = "0.29.2", default-features = false, features = ["backtrace", "contexts", "panic", "anyhow", "reqwest", "rustls", "log", "sentry-log"] } serde = { version = "1.0.152", features = [] } serde_json = { version = "1.0.91", default-features = false, features = ["alloc", "raw_value"] } serde_prometheus = "0.1.6" -# TODO: make sure this time version matches siwe. PR to put this in their prelude +siwe = "0.5.0" time = "0.3.17" -tokio = { version = "1.24.1", features = ["full"] } -# TODO: make sure this uuid version matches sea-orm. PR to put this in their prelude +tokio = { version = "1.24.2", features = ["full"] } tokio-stream = { version = "0.1.11", features = ["sync"] } -toml = "0.5.10" +toml = "0.6.0" tower = "0.4.13" tower-http = { version = "0.3.5", features = ["cors", "sensitive-headers"] } ulid = { version = "1.0.0", features = ["serde"] } url = "2.3.1" uuid = "1.2.2" -itertools = "0.10.5" -glob = "0.3.1" diff --git a/web3_proxy/src/app/mod.rs b/web3_proxy/src/app/mod.rs index 5ec9d856..f41c1210 100644 --- a/web3_proxy/src/app/mod.rs +++ b/web3_proxy/src/app/mod.rs @@ -4,8 +4,9 @@ mod ws; use crate::app_stats::{ProxyResponseStat, StatEmitter, Web3ProxyStat}; use crate::block_number::{block_needed, BlockNeeded}; use crate::config::{AppConfig, TopConfig}; -use crate::frontend::authorization::{Authorization, RequestMetadata}; +use crate::frontend::authorization::{Authorization, RequestMetadata, RpcSecretKey}; use crate::frontend::errors::FrontendErrorResponse; +use crate::frontend::rpc_proxy_ws::ProxyMode; use crate::jsonrpc::{ JsonRpcForwardedResponse, JsonRpcForwardedResponseEnum, JsonRpcRequest, JsonRpcRequestEnum, }; @@ -24,6 +25,7 @@ use entities::sea_orm_active_enums::LogLevel; use entities::user; use ethers::core::utils::keccak256; use ethers::prelude::{Address, Block, Bytes, Transaction, TxHash, H256, U64}; +use ethers::types::U256; use ethers::utils::rlp::{Decodable, Rlp}; use futures::future::join_all; use futures::stream::{FuturesUnordered, StreamExt}; @@ -55,11 +57,12 @@ use tokio::time::{sleep, timeout}; use ulid::Ulid; // TODO: make this customizable? +// TODO: include GIT_REF in here. i had trouble getting https://docs.rs/vergen/latest/vergen/ to work with a workspace. also .git is in .dockerignore pub static APP_USER_AGENT: &str = concat!( - "satoshiandkin/", + "llamanodes_", env!("CARGO_PKG_NAME"), - "/", - env!("CARGO_PKG_VERSION"), + "/v", + env!("CARGO_PKG_VERSION") ); /// TODO: allow customizing the request period? @@ -134,12 +137,14 @@ pub type AnyhowJoinHandle = JoinHandle>; #[derive(Clone, Debug, Default, From)] pub struct AuthorizationChecks { - /// database id of the primary user. + /// database id of the primary user. 0 if anon /// TODO: do we need this? its on the authorization so probably not pub user_id: u64, + /// the key used (if any) + pub rpc_secret_key: Option, /// database id of the rpc key /// if this is None, then this request is being rate limited by ip - pub rpc_key_id: Option, + pub rpc_secret_key_id: Option, /// if None, allow unlimited queries. inherited from the user_tier pub max_requests_per_period: Option, // if None, allow unlimited concurrent requests. inherited from the user_tier @@ -183,10 +188,9 @@ pub struct Web3ProxyApp { response_cache: ResponseCache, // don't drop this or the sender will stop working // TODO: broadcast channel instead? - head_block_receiver: watch::Receiver, + watch_consensus_head_receiver: watch::Receiver, pending_tx_sender: broadcast::Sender, pub config: AppConfig, - pub allowed_lag: u64, pub db_conn: Option, pub db_replica: Option, /// prometheus metrics @@ -269,18 +273,14 @@ pub async fn drop_migration_lock(db_conn: &DatabaseConnection) -> Result<(), DbE Ok(()) } -/// Connect to the database and run migrations -pub async fn get_migrated_db( - db_url: String, - min_connections: u32, - max_connections: u32, -) -> anyhow::Result { - // TODO: this seems to fail silently - let db_conn = get_db(db_url, min_connections, max_connections).await?; - +/// Be super careful with override_existing_lock! It is very important that only one process is running the migrations at a time! +pub async fn migrate_db( + db_conn: &DatabaseConnection, + override_existing_lock: bool, +) -> Result<(), DbErr> { let db_backend = db_conn.get_database_backend(); - // TODO: put the timestamp into this? + // TODO: put the timestamp and hostname into this as columns? let create_lock_statment = db_backend.build( Table::create() .table(Alias::new("migration_lock")) @@ -290,18 +290,24 @@ pub async fn get_migrated_db( loop { if Migrator::get_pending_migrations(&db_conn).await?.is_empty() { info!("no migrations to apply"); - return Ok(db_conn); + return Ok(()); } // there are migrations to apply // acquire a lock if let Err(err) = db_conn.execute(create_lock_statment.clone()).await { - debug!("Unable to acquire lock. err={:?}", err); + if override_existing_lock { + warn!("OVERRIDING EXISTING LOCK in 10 seconds! ctrl+c now if other migrations are actually running!"); - // TODO: exponential backoff with jitter - sleep(Duration::from_secs(1)).await; + sleep(Duration::from_secs(10)).await + } else { + debug!("Unable to acquire lock. if you are positive no migration is running, run \"web3_proxy_cli drop_migration_lock\". err={:?}", err); - continue; + // TODO: exponential backoff with jitter? + sleep(Duration::from_secs(1)).await; + + continue; + } } debug!("migration lock acquired"); @@ -314,7 +320,19 @@ pub async fn get_migrated_db( drop_migration_lock(&db_conn).await?; // return if migrations erred - migration_result?; + migration_result +} + +/// Connect to the database and run migrations +pub async fn get_migrated_db( + db_url: String, + min_connections: u32, + max_connections: u32, +) -> Result { + // TODO: this seems to fail silently + let db_conn = get_db(db_url, min_connections, max_connections).await?; + + migrate_db(&db_conn, false).await?; Ok(db_conn) } @@ -515,7 +533,8 @@ impl Web3ProxyApp { }; // TODO: i don't like doing Block::default here! Change this to "None"? - let (head_block_sender, head_block_receiver) = watch::channel(Arc::new(Block::default())); + let (watch_consensus_head_sender, watch_consensus_head_receiver) = + watch::channel(Arc::new(Block::default())); // TODO: will one receiver lagging be okay? how big should this be? let (pending_tx_sender, pending_tx_receiver) = broadcast::channel(256); @@ -552,7 +571,7 @@ impl Web3ProxyApp { http_client.clone(), vredis_pool.clone(), block_map.clone(), - Some(head_block_sender), + Some(watch_consensus_head_sender), top_config.app.min_sum_soft_limit, top_config.app.min_synced_rpcs, Some(pending_tx_sender.clone()), @@ -580,6 +599,8 @@ impl Web3ProxyApp { vredis_pool.clone(), block_map, // subscribing to new heads here won't work well. if they are fast, they might be ahead of balanced_rpcs + // they also often have low rate limits + // however, they are well connected to miners/validators. so maybe using them as a safety check would be good None, 0, 0, @@ -683,24 +704,12 @@ impl Web3ProxyApp { .time_to_idle(Duration::from_secs(120)) .build_with_hasher(hashbrown::hash_map::DefaultHashBuilder::default()); - // TODO: get this out of the toml instead - let allowed_lag = match top_config.app.chain_id { - 1 => 60, - 137 => 10, - 250 => 10, - _ => { - warn!("defaulting allowed lag to 60"); - 60 - } - }; - let app = Self { config: top_config.app, - allowed_lag, balanced_rpcs, private_rpcs, response_cache, - head_block_receiver, + watch_consensus_head_receiver, pending_tx_sender, pending_transactions, frontend_ip_rate_limiter, @@ -723,6 +732,10 @@ impl Web3ProxyApp { Ok((app, cancellable_handles, important_background_handles).into()) } + pub fn head_block_receiver(&self) -> watch::Receiver { + self.watch_consensus_head_receiver.clone() + } + pub async fn prometheus_metrics(&self) -> String { let globals = HashMap::new(); // TODO: what globals? should this be the hostname or what? @@ -907,10 +920,10 @@ impl Web3ProxyApp { self: &Arc, authorization: Arc, request: JsonRpcRequestEnum, + proxy_mode: ProxyMode, ) -> Result<(JsonRpcForwardedResponseEnum, Vec>), FrontendErrorResponse> { - // TODO: this should probably be trace level - // // trace!(?request, "proxy_web3_rpc"); + // trace!(?request, "proxy_web3_rpc"); // even though we have timeouts on the requests to our backend providers, // we need a timeout for the incoming request so that retries don't run forever @@ -921,7 +934,7 @@ impl Web3ProxyApp { JsonRpcRequestEnum::Single(request) => { let (response, rpcs) = timeout( max_time, - self.proxy_web3_rpc_request(&authorization, request), + self.proxy_cached_request(&authorization, request, proxy_mode), ) .await??; @@ -930,7 +943,7 @@ impl Web3ProxyApp { JsonRpcRequestEnum::Batch(requests) => { let (responses, rpcs) = timeout( max_time, - self.proxy_web3_rpc_requests(&authorization, requests), + self.proxy_web3_rpc_requests(&authorization, requests, proxy_mode), ) .await??; @@ -947,6 +960,7 @@ impl Web3ProxyApp { self: &Arc, authorization: &Arc, requests: Vec, + proxy_mode: ProxyMode, ) -> anyhow::Result<(Vec, Vec>)> { // TODO: we should probably change ethers-rs to support this directly. they pushed this off to v2 though let num_requests = requests.len(); @@ -956,7 +970,7 @@ impl Web3ProxyApp { let responses = join_all( requests .into_iter() - .map(|request| self.proxy_web3_rpc_request(authorization, request)) + .map(|request| self.proxy_cached_request(authorization, request, proxy_mode)) .collect::>(), ) .await; @@ -1000,10 +1014,11 @@ impl Web3ProxyApp { } #[measure([ErrorCount, HitCount, ResponseTime, Throughput])] - async fn proxy_web3_rpc_request( + async fn proxy_cached_request( self: &Arc, authorization: &Arc, mut request: JsonRpcRequest, + proxy_mode: ProxyMode, ) -> anyhow::Result<(JsonRpcForwardedResponse, Vec>)> { // trace!("Received request: {:?}", request); @@ -1083,8 +1098,15 @@ impl Web3ProxyApp { | "shh_uninstallFilter" | "shh_version") => { // TODO: client error stat - // TODO: proper error code - return Err(anyhow::anyhow!("method unsupported: {}", method)); + // TODO: what error code? + return Ok(( + JsonRpcForwardedResponse::from_string( + format!("method unsupported: {}", method), + None, + Some(request_id), + ), + vec![], + )); } // TODO: implement these commands method @ ("eth_getFilterChanges" @@ -1094,7 +1116,15 @@ impl Web3ProxyApp { | "eth_newPendingTransactionFilter" | "eth_uninstallFilter") => { // TODO: unsupported command stat - return Err(anyhow::anyhow!("not yet implemented: {}", method)); + // TODO: what error code? + return Ok(( + JsonRpcForwardedResponse::from_string( + format!("not yet implemented: {}", method), + None, + Some(request_id), + ), + vec![], + )); } // some commands can use local data or caches "eth_accounts" => { @@ -1122,18 +1152,14 @@ impl Web3ProxyApp { // TODO: eth_sendPrivateTransaction (https://docs.flashbots.net/flashbots-auction/searchers/advanced/rpc-endpoint#eth_sendprivatetransaction) "eth_coinbase" => { // no need for serving coinbase - // we could return a per-user payment address here, but then we might leak that to dapps // no stats on this. its cheap json!(Address::zero()) } - /* - // erigon was giving bad estimates. but now it doesn't need it "eth_estimateGas" => { - // TODO: eth_estimateGas using anvil? - // TODO: modify the block requested? let mut response = self .balanced_rpcs - .try_send_best_upstream_server( + .try_proxy_connection( + proxy_mode, authorization, request, Some(&request_metadata), @@ -1141,11 +1167,9 @@ impl Web3ProxyApp { ) .await?; - let parsed_gas_estimate = if let Some(gas_estimate) = response.result.take() { - let parsed_gas_estimate: U256 = serde_json::from_str(gas_estimate.get()) - .context("gas estimate result is not an U256")?; - - parsed_gas_estimate + let mut gas_estimate: U256 = if let Some(gas_estimate) = response.result.take() { + serde_json::from_str(gas_estimate.get()) + .context("gas estimate result is not an U256")? } else { // i think this is always an error response let rpcs = request_metadata.backend_requests.lock().clone(); @@ -1153,13 +1177,21 @@ impl Web3ProxyApp { return Ok((response, rpcs)); }; - // increase by 1.01% - let parsed_gas_estimate = - parsed_gas_estimate * U256::from(101_010) / U256::from(100_000); + let gas_increase = + if let Some(gas_increase_percent) = self.config.gas_increase_percent { + let gas_increase = gas_estimate * gas_increase_percent / U256::from(100); - json!(parsed_gas_estimate) + let min_gas_increase = self.config.gas_increase_min.unwrap_or_default(); + + gas_increase.max(min_gas_increase) + } else { + self.config.gas_increase_min.unwrap_or_default() + }; + + gas_estimate += gas_increase; + + json!(gas_estimate) } - */ // TODO: eth_gasPrice that does awesome magic to predict the future "eth_hashrate" => { // no stats on this. its cheap @@ -1172,22 +1204,32 @@ impl Web3ProxyApp { // TODO: eth_sendBundle (flashbots command) // broadcast transactions to all private rpcs at once "eth_sendRawTransaction" => { + // TODO: how should we handle private_mode here? + let default_num = match proxy_mode { + // TODO: how many balanced rpcs should we send to? configurable? percentage of total? + ProxyMode::Best => Some(4), + ProxyMode::Fastest(0) => None, + // TODO: how many balanced rpcs should we send to? configurable? percentage of total? + // TODO: what if we do 2 per tier? we want to blast the third party rpcs + // TODO: maybe having the third party rpcs in their own Web3Connections would be good for this + ProxyMode::Fastest(x) => Some(x * 4), + ProxyMode::Versus => None, + }; + let (private_rpcs, num) = if let Some(private_rpcs) = self.private_rpcs.as_ref() { if authorization.checks.private_txs { + // if we are sending the transaction privately, no matter the proxy_mode, we send to ALL private rpcs (private_rpcs, None) } else { - // TODO: how many balanced rpcs should we send to? configurable? percentage of total? - // TODO: what if we do 2 per tier? we want to blast the third party rpcs - // TODO: maybe having the third party rpcs would be good for this - (&self.balanced_rpcs, Some(2)) + (&self.balanced_rpcs, default_num) } } else { - (&self.balanced_rpcs, Some(2)) + (&self.balanced_rpcs, default_num) }; // try_send_all_upstream_servers puts the request id into the response. no need to do that ourselves here. let mut response = private_rpcs - .try_send_all_upstream_servers( + .try_send_all_synced_connections( authorization, &request, Some(request_metadata.clone()), @@ -1283,13 +1325,23 @@ impl Web3ProxyApp { json!(false) } "eth_subscribe" => { - return Err(anyhow::anyhow!( - "notifications not supported. eth_subscribe is only available over a websocket" + return Ok(( + JsonRpcForwardedResponse::from_str( + "notifications not supported. eth_subscribe is only available over a websocket", + Some(-32601), + Some(request_id), + ), + vec![], )); } "eth_unsubscribe" => { - return Err(anyhow::anyhow!( - "notifications not supported. eth_unsubscribe is only available over a websocket" + return Ok(( + JsonRpcForwardedResponse::from_str( + "notifications not supported. eth_unsubscribe is only available over a websocket", + Some(-32601), + Some(request_id), + ), + vec![], )); } "net_listening" => { @@ -1298,7 +1350,8 @@ impl Web3ProxyApp { json!(true) } "net_peerCount" => { - // emit stats + // no stats on this. its cheap + // TODO: do something with proxy_mode here? self.balanced_rpcs.num_synced_rpcs().into() } "web3_clientVersion" => { @@ -1312,10 +1365,18 @@ impl Web3ProxyApp { Some(serde_json::Value::Array(params)) => { // TODO: make a struct and use serde conversion to clean this up if params.len() != 1 || !params[0].is_string() { - // TODO: this needs the correct error code in the response - return Err(anyhow::anyhow!("invalid request")); + // TODO: what error code? + return Ok(( + JsonRpcForwardedResponse::from_str( + "Invalid request", + Some(-32600), + Some(request_id), + ), + vec![], + )); } + // TODO: don't return with ? here. send a jsonrpc invalid request let param = Bytes::from_str( params[0] .as_str() @@ -1329,18 +1390,35 @@ impl Web3ProxyApp { _ => { // TODO: this needs the correct error code in the response // TODO: emit stat? - return Err(anyhow::anyhow!("invalid request")); + return Ok(( + JsonRpcForwardedResponse::from_str( + "invalid request", + None, + Some(request_id), + ), + vec![], + )); } } } + "test" => { + return Ok(( + JsonRpcForwardedResponse::from_str( + "The method test does not exist/is not available.", + Some(-32601), + Some(request_id), + ), + vec![], + )); + } // anything else gets sent to backend rpcs and cached method => { // emit stats - // TODO: if no servers synced, wait for them to be synced? - let head_block = self + // TODO: if no servers synced, wait for them to be synced? probably better to error and let haproxy retry another server + let head_block_num = self .balanced_rpcs - .head_block() + .head_block_num() .context("no servers synced")?; // we do this check before checking caches because it might modify the request params @@ -1350,7 +1428,7 @@ impl Web3ProxyApp { authorization, method, request.params.as_mut(), - head_block.number(), + head_block_num, &self.balanced_rpcs, ) .await? @@ -1404,11 +1482,12 @@ impl Web3ProxyApp { .try_get_with(cache_key, async move { // TODO: retry some failures automatically! // TODO: try private_rpcs if all the balanced_rpcs fail! - // TODO: put the hash here instead? + // TODO: put the hash here instead of the block number? its in the request already. + let mut response = self .balanced_rpcs - .try_send_best_upstream_server( - self.allowed_lag, + .try_proxy_connection( + proxy_mode, &authorization, request, Some(&request_metadata), @@ -1433,18 +1512,14 @@ impl Web3ProxyApp { })? } else { self.balanced_rpcs - .try_send_best_upstream_server( - self.allowed_lag, + .try_proxy_connection( + proxy_mode, &authorization, request, Some(&request_metadata), None, ) - .await - .map_err(|err| { - // TODO: emit a stat for an error - anyhow::anyhow!("error while forwarding response: {}", err) - })? + .await? } }; diff --git a/web3_proxy/src/app/ws.rs b/web3_proxy/src/app/ws.rs index e6ac30c0..582ea814 100644 --- a/web3_proxy/src/app/ws.rs +++ b/web3_proxy/src/app/ws.rs @@ -50,7 +50,7 @@ impl Web3ProxyApp { match request_json.params.as_ref() { Some(x) if x == &json!(["newHeads"]) => { let authorization = authorization.clone(); - let head_block_receiver = self.head_block_receiver.clone(); + let head_block_receiver = self.watch_consensus_head_receiver.clone(); let stat_sender = self.stat_sender.clone(); trace!("newHeads subscription {:?}", subscription_id); diff --git a/web3_proxy/src/app_stats.rs b/web3_proxy/src/app_stats.rs index 204effd5..681dfcea 100644 --- a/web3_proxy/src/app_stats.rs +++ b/web3_proxy/src/app_stats.rs @@ -36,7 +36,7 @@ impl ProxyResponseStat { fn key(&self) -> ProxyResponseAggregateKey { // include either the rpc_key_id or the origin let (mut rpc_key_id, origin) = match ( - self.authorization.checks.rpc_key_id, + self.authorization.checks.rpc_secret_key_id, &self.authorization.origin, ) { (Some(rpc_key_id), _) => { diff --git a/web3_proxy/src/bin/wait_for_sync.rs b/web3_proxy/src/bin/wait_for_sync.rs index a214d67b..c13d5fe5 100644 --- a/web3_proxy/src/bin/wait_for_sync.rs +++ b/web3_proxy/src/bin/wait_for_sync.rs @@ -1,27 +1,39 @@ -// TODO: websockets instead of http +// TODO: support websockets use anyhow::Context; use argh::FromArgs; use chrono::Utc; +use ethers::types::U64; use ethers::types::{Block, TxHash}; use log::info; use log::warn; use reqwest::Client; use serde::Deserialize; use serde_json::json; +use std::sync::atomic::{AtomicU32, Ordering}; use tokio::time::sleep; use tokio::time::Duration; #[derive(Debug, FromArgs)] /// Command line interface for admins to interact with web3_proxy pub struct CliConfig { - /// the RPC to check + /// the HTTP RPC to check #[argh(option, default = "\"http://localhost:8545\".to_string()")] pub check_url: String, - /// the RPC to compare to - #[argh(option, default = "\"https://eth.llamarpc.com\".to_string()")] - pub compare_url: String, + /// the HTTP RPC to compare against. defaults to LlamaNodes public RPC + #[argh(option)] + pub compare_url: Option, + + /// how many seconds to wait for sync. + /// Defaults to waiting forever. + /// if the wait is exceeded, will exit with code 2 + #[argh(option)] + pub max_wait: Option, + + /// require a specific chain id (for extra safety) + #[argh(option)] + pub chain_id: Option, } #[tokio::main] @@ -38,26 +50,73 @@ async fn main() -> anyhow::Result<()> { let cli_config: CliConfig = argh::from_env(); - let json_request = json!({ - "id": "1", - "jsonrpc": "2.0", - "method": "eth_getBlockByNumber", - "params": [ - "latest", - false, - ], - }); - let client = reqwest::Client::new(); - // TODO: make sure the chain ids match - // TODO: automatic compare_url based on the chain id + let check_url = cli_config.check_url; + + // make sure the chain ids match + let check_id = get_chain_id(&check_url, &client) + .await + .context("unknown chain id for check_url")?; + + if let Some(chain_id) = cli_config.chain_id { + if chain_id != check_id { + return Err(anyhow::anyhow!( + "chain_id of check_url is wrong! Need {}. Found {}", + chain_id, + check_id, + )); + } + } + + let compare_url: String = match cli_config.compare_url { + Some(x) => x, + None => match check_id { + 1 => "https://eth.llamarpc.com", + 137 => "https://polygon.llamarpc.com", + _ => { + return Err(anyhow::anyhow!( + "--compare-url required for chain {}", + check_id + )) + } + } + .to_string(), + }; + + info!( + "comparing {} to {} (chain {})", + check_url, compare_url, check_id + ); + + let compare_id = get_chain_id(&compare_url, &client) + .await + .context("unknown chain id for compare_url")?; + + if check_id != compare_id { + return Err(anyhow::anyhow!( + "chain_id does not match! Need {}. Found {}", + check_id, + compare_id, + )); + } + + // start ids at 2 because id 1 was checking the chain id + let counter = AtomicU32::new(2); + let start = tokio::time::Instant::now(); loop { - match main_loop(&cli_config, &client, &json_request).await { + match main_loop(&check_url, &compare_url, &client, &counter).await { Ok(()) => break, Err(err) => { warn!("{:?}", err); + + if let Some(max_wait) = cli_config.max_wait { + if max_wait == 0 || start.elapsed().as_secs() > max_wait { + std::process::exit(2); + } + } + sleep(Duration::from_secs(10)).await; } } @@ -66,38 +125,77 @@ async fn main() -> anyhow::Result<()> { Ok(()) } +#[derive(Deserialize)] +struct JsonRpcChainIdResult { + result: U64, +} + +async fn get_chain_id(rpc: &str, client: &reqwest::Client) -> anyhow::Result { + let get_chain_id_request = json!({ + "id": "1", + "jsonrpc": "2.0", + "method": "eth_chainId", + }); + + // TODO: loop until chain id is found? + let check_result = client + .post(rpc) + .json(&get_chain_id_request) + .send() + .await + .context("failed querying chain id")? + .json::() + .await + .context("failed parsing chain id")? + .result + .as_u64(); + + Ok(check_result) +} + #[derive(Deserialize)] struct JsonRpcBlockResult { result: Block, } async fn main_loop( - cli_config: &CliConfig, + check_url: &str, + compare_url: &str, client: &Client, - json_request: &serde_json::Value, + counter: &AtomicU32, ) -> anyhow::Result<()> { - let check_result = client - .post(&cli_config.check_url) - .json(json_request) + // TODO: have a real id here that increments every call? + let get_block_number_request = json!({ + "id": counter.fetch_add(1, Ordering::SeqCst), + "jsonrpc": "2.0", + "method": "eth_getBlockByNumber", + "params": [ + "latest", + false, + ], + }); + + let check_block = client + .post(check_url) + .json(&get_block_number_request) .send() .await .context("querying check block")? .json::() .await - .context("parsing check block")?; + .context("parsing check block")? + .result; - let compare_result = client - .post(&cli_config.compare_url) - .json(json_request) + let compare_block = client + .post(compare_url) + .json(&get_block_number_request) .send() .await .context("querying compare block")? .json::() .await - .context("parsing compare block")?; - - let check_block = check_result.result; - let compare_block = compare_result.result; + .context("parsing compare block")? + .result; let check_number = check_block.number.context("no check block number")?; let compare_number = compare_block.number.context("no compare block number")?; diff --git a/web3_proxy/src/bin/web3_proxy.rs b/web3_proxy/src/bin/web3_proxy.rs deleted file mode 100644 index c61b5476..00000000 --- a/web3_proxy/src/bin/web3_proxy.rs +++ /dev/null @@ -1,399 +0,0 @@ -//! Web3_proxy is a fast caching and load balancing proxy for web3 (Ethereum or similar) JsonRPC servers. -//! -//! Signed transactions (eth_sendRawTransaction) are sent in parallel to the configured private RPCs (eden, ethermine, flashbots, etc.). -//! -//! All other requests are sent to an RPC server on the latest block (alchemy, moralis, rivet, your own node, or one of many other providers). -//! If multiple servers are in sync, the fastest server is prioritized. Since the fastest server is most likely to serve requests, slow servers are unlikely to ever get any requests. - -//#![warn(missing_docs)] -#![forbid(unsafe_code)] - -use anyhow::Context; -use futures::StreamExt; -use log::{debug, error, info, warn}; -use num::Zero; -use parking_lot::deadlock; -use std::fs; -use std::path::Path; -use std::sync::atomic::{self, AtomicUsize}; -use std::thread; -use tokio::runtime; -use tokio::sync::broadcast; -use tokio::time::Duration; -use web3_proxy::app::{flatten_handle, flatten_handles, Web3ProxyApp}; -use web3_proxy::config::{CliConfig, TopConfig}; -use web3_proxy::{frontend, metrics_frontend}; - -fn run( - shutdown_sender: broadcast::Sender<()>, - cli_config: CliConfig, - top_config: TopConfig, -) -> anyhow::Result<()> { - debug!("{:?}", cli_config); - debug!("{:?}", top_config); - - let mut shutdown_receiver = shutdown_sender.subscribe(); - - // spawn a thread for deadlock detection - // TODO: disable this feature during release mode and things should go faster - thread::spawn(move || loop { - thread::sleep(Duration::from_secs(10)); - let deadlocks = deadlock::check_deadlock(); - if deadlocks.is_empty() { - continue; - } - - println!("{} deadlocks detected", deadlocks.len()); - for (i, threads) in deadlocks.iter().enumerate() { - println!("Deadlock #{}", i); - for t in threads { - println!("Thread Id {:#?}", t.thread_id()); - println!("{:#?}", t.backtrace()); - } - } - }); - - // set up tokio's async runtime - let mut rt_builder = runtime::Builder::new_multi_thread(); - - let chain_id = top_config.app.chain_id; - rt_builder.enable_all().thread_name_fn(move || { - static ATOMIC_ID: AtomicUsize = AtomicUsize::new(0); - // TODO: what ordering? i think we want seqcst so that these all happen in order, but that might be stricter than we really need - let worker_id = ATOMIC_ID.fetch_add(1, atomic::Ordering::SeqCst); - // TODO: i think these max at 15 characters - format!("web3-{}-{}", chain_id, worker_id) - }); - - if cli_config.workers > 0 { - rt_builder.worker_threads(cli_config.workers); - } - - // start tokio's async runtime - let rt = rt_builder.build()?; - - let num_workers = rt.metrics().num_workers(); - info!("num_workers: {}", num_workers); - - rt.block_on(async { - let app_frontend_port = cli_config.port; - let app_prometheus_port = cli_config.prometheus_port; - - // start the main app - let mut spawned_app = - Web3ProxyApp::spawn(top_config, num_workers, shutdown_sender.subscribe()).await?; - - let frontend_handle = - tokio::spawn(frontend::serve(app_frontend_port, spawned_app.app.clone())); - - let prometheus_handle = tokio::spawn(metrics_frontend::serve( - spawned_app.app, - app_prometheus_port, - )); - - // if everything is working, these should both run forever - tokio::select! { - x = flatten_handles(spawned_app.app_handles) => { - match x { - Ok(_) => info!("app_handle exited"), - Err(e) => { - return Err(e); - } - } - } - x = flatten_handle(frontend_handle) => { - match x { - Ok(_) => info!("frontend exited"), - Err(e) => { - return Err(e); - } - } - } - x = flatten_handle(prometheus_handle) => { - match x { - Ok(_) => info!("prometheus exited"), - Err(e) => { - return Err(e); - } - } - } - x = tokio::signal::ctrl_c() => { - match x { - Ok(_) => info!("quiting from ctrl-c"), - Err(e) => { - return Err(e.into()); - } - } - } - x = shutdown_receiver.recv() => { - match x { - Ok(_) => info!("quiting from shutdown receiver"), - Err(e) => { - return Err(e.into()); - } - } - } - }; - - // one of the handles stopped. send a value so the others know to shut down - if let Err(err) = shutdown_sender.send(()) { - warn!("shutdown sender err={:?}", err); - }; - - // wait for things like saving stats to the database to complete - info!("waiting on important background tasks"); - let mut background_errors = 0; - while let Some(x) = spawned_app.background_handles.next().await { - match x { - Err(e) => { - error!("{:?}", e); - background_errors += 1; - } - Ok(Err(e)) => { - error!("{:?}", e); - background_errors += 1; - } - Ok(Ok(_)) => continue, - } - } - - if background_errors.is_zero() { - info!("finished"); - } else { - // TODO: collect instead? - error!("finished with errors!") - } - - Ok(()) - }) -} - -fn main() -> anyhow::Result<()> { - // if RUST_LOG isn't set, configure a default - let rust_log = match std::env::var("RUST_LOG") { - Ok(x) => x, - Err(_) => "info,ethers=debug,redis_rate_limit=debug,web3_proxy=debug".to_string(), - }; - - // this probably won't matter for us in docker, but better safe than sorry - fdlimit::raise_fd_limit(); - - // initial configuration from flags - let cli_config: CliConfig = argh::from_env(); - - // convert to absolute path so error logging is most helpful - let config_path = Path::new(&cli_config.config) - .canonicalize() - .context(format!( - "checking full path of {} and {}", - ".", // TODO: get cwd somehow - cli_config.config - ))?; - - // advanced configuration is on disk - let top_config: String = fs::read_to_string(config_path.clone()) - .context(format!("reading config at {}", config_path.display()))?; - let top_config: TopConfig = toml::from_str(&top_config) - .context(format!("parsing config at {}", config_path.display()))?; - - // TODO: this doesn't seem to do anything - proctitle::set_title(format!("web3_proxy-{}", top_config.app.chain_id)); - - let logger = env_logger::builder().parse_filters(&rust_log).build(); - - let max_level = logger.filter(); - - // connect to sentry for error reporting - // if no sentry, only log to stdout - let _sentry_guard = if let Some(sentry_url) = top_config.app.sentry_url.clone() { - let logger = sentry::integrations::log::SentryLogger::with_dest(logger); - - log::set_boxed_logger(Box::new(logger)).unwrap(); - - let guard = sentry::init(( - sentry_url, - sentry::ClientOptions { - release: sentry::release_name!(), - // TODO: Set this a to lower value (from config) in production - traces_sample_rate: 1.0, - ..Default::default() - }, - )); - - Some(guard) - } else { - log::set_boxed_logger(Box::new(logger)).unwrap(); - - None - }; - - log::set_max_level(max_level); - - // we used to do this earlier, but now we attach sentry - debug!("CLI config @ {:#?}", cli_config.config); - - // tokio has code for catching ctrl+c so we use that - // this shutdown sender is currently only used in tests, but we might make a /shutdown endpoint or something - // we do not need this receiver. new receivers are made by `shutdown_sender.subscribe()` - let (shutdown_sender, _) = broadcast::channel(1); - - run(shutdown_sender, cli_config, top_config) -} - -#[cfg(test)] -mod tests { - use ethers::{ - prelude::{Http, Provider, U256}, - utils::Anvil, - }; - use hashbrown::HashMap; - use std::env; - - use web3_proxy::{ - config::{AppConfig, Web3ConnectionConfig}, - rpcs::blockchain::ArcBlock, - }; - - use super::*; - - #[tokio::test] - async fn it_works() { - // TODO: move basic setup into a test fixture - let path = env::var("PATH").unwrap(); - - println!("path: {}", path); - - // TODO: how should we handle logs in this? - // TODO: option for super verbose logs - std::env::set_var("RUST_LOG", "info,web3_proxy=debug"); - - let _ = env_logger::builder().is_test(true).try_init(); - - let anvil = Anvil::new().spawn(); - - println!("Anvil running at `{}`", anvil.endpoint()); - - let anvil_provider = Provider::::try_from(anvil.endpoint()).unwrap(); - - // mine a block because my code doesn't like being on block 0 - // TODO: make block 0 okay? is it okay now? - let _: U256 = anvil_provider - .request("evm_mine", None::<()>) - .await - .unwrap(); - - // make a test CliConfig - let cli_config = CliConfig { - port: 0, - prometheus_port: 0, - workers: 4, - config: "./does/not/exist/test.toml".to_string(), - cookie_key_filename: "./does/not/exist/development_cookie_key".to_string(), - }; - - // make a test TopConfig - // TODO: load TopConfig from a file? CliConfig could have `cli_config.load_top_config`. would need to inject our endpoint ports - let top_config = TopConfig { - app: AppConfig { - chain_id: 31337, - default_user_max_requests_per_period: Some(6_000_000), - min_sum_soft_limit: 1, - min_synced_rpcs: 1, - public_requests_per_period: Some(1_000_000), - response_cache_max_bytes: 10_usize.pow(7), - redirect_public_url: Some("example.com/".to_string()), - redirect_rpc_key_url: Some("example.com/{{rpc_key_id}}".to_string()), - ..Default::default() - }, - balanced_rpcs: HashMap::from([ - ( - "anvil".to_string(), - Web3ConnectionConfig { - disabled: false, - display_name: None, - url: anvil.endpoint(), - block_data_limit: None, - soft_limit: 100, - hard_limit: None, - tier: 0, - subscribe_txs: Some(false), - extra: Default::default(), - }, - ), - ( - "anvil_ws".to_string(), - Web3ConnectionConfig { - disabled: false, - display_name: None, - url: anvil.ws_endpoint(), - block_data_limit: None, - soft_limit: 100, - hard_limit: None, - tier: 0, - subscribe_txs: Some(false), - extra: Default::default(), - }, - ), - ]), - private_rpcs: None, - extra: Default::default(), - }; - - let (shutdown_sender, _) = broadcast::channel(1); - - // spawn another thread for running the app - // TODO: allow launching into the local tokio runtime instead of creating a new one? - let handle = { - let shutdown_sender = shutdown_sender.clone(); - - thread::spawn(move || run(shutdown_sender, cli_config, top_config)) - }; - - // TODO: do something to the node. query latest block, mine another block, query again - let proxy_provider = Provider::::try_from(anvil.endpoint()).unwrap(); - - let anvil_result = anvil_provider - .request::<_, Option>("eth_getBlockByNumber", ("latest", true)) - .await - .unwrap() - .unwrap(); - let proxy_result = proxy_provider - .request::<_, Option>("eth_getBlockByNumber", ("latest", true)) - .await - .unwrap() - .unwrap(); - - assert_eq!(anvil_result, proxy_result); - - let first_block_num = anvil_result.number.unwrap(); - - let _: U256 = anvil_provider - .request("evm_mine", None::<()>) - .await - .unwrap(); - - let anvil_result = anvil_provider - .request::<_, Option>("eth_getBlockByNumber", ("latest", true)) - .await - .unwrap() - .unwrap(); - let proxy_result = proxy_provider - .request::<_, Option>("eth_getBlockByNumber", ("latest", true)) - .await - .unwrap() - .unwrap(); - - assert_eq!(anvil_result, proxy_result); - - let second_block_num = anvil_result.number.unwrap(); - - assert_eq!(first_block_num, second_block_num - 1); - - // tell the test app to shut down - shutdown_sender.send(()).unwrap(); - - println!("waiting for shutdown..."); - // TODO: panic if a timeout is reached - handle.join().unwrap().unwrap(); - } -} diff --git a/web3_proxy/src/bin/web3_proxy_cli/create_key.rs b/web3_proxy/src/bin/web3_proxy_cli/create_key.rs new file mode 100644 index 00000000..3afee516 --- /dev/null +++ b/web3_proxy/src/bin/web3_proxy_cli/create_key.rs @@ -0,0 +1,77 @@ +use anyhow::Context; +use argh::FromArgs; +use entities::{rpc_key, user}; +use ethers::prelude::Address; +use log::info; +use migration::sea_orm::{self, ActiveModelTrait, ColumnTrait, EntityTrait, QueryFilter}; +use ulid::Ulid; +use uuid::Uuid; +use web3_proxy::frontend::authorization::RpcSecretKey; + +#[derive(FromArgs, PartialEq, Debug, Eq)] +/// Create a new user and api key +#[argh(subcommand, name = "create_key")] +pub struct CreateKeySubCommand { + /// the user's ethereum address or descriptive string. + /// If a string is given, it will be converted to hex and potentially truncated. + /// Users from strings are only for testing since they won't be able to log in. + #[argh(positional)] + address: String, + + /// the user's api ULID or UUID key. + /// If none given, one will be created. + #[argh(option)] + rpc_secret_key: Option, + + /// an optional short description of the key's purpose. + #[argh(option)] + description: Option, +} + +impl CreateKeySubCommand { + pub async fn main(self, db: &sea_orm::DatabaseConnection) -> anyhow::Result<()> { + // TODO: would be nice to use the fixed array instead of a Vec in the entities + // take a simple String. If it starts with 0x, parse as address. otherwise convert ascii to hex + let address: Vec = if self.address.starts_with("0x") { + let address = self.address.parse::
()?; + + address.to_fixed_bytes().into() + } else { + // TODO: allow ENS + // left pad and truncate the string + let address = &format!("{:\x00>20}", self.address)[0..20]; + + // convert the string to bytes + let bytes = address.as_bytes(); + + // convert the slice to a Vec + bytes.try_into().expect("Bytes can always be a Vec") + }; + + // TODO: get existing or create a new one + let u = user::Entity::find() + .filter(user::Column::Address.eq(address)) + .one(db) + .await? + .context("No user found with that address")?; + + info!("user #{}", u.id); + + let rpc_secret_key = self.rpc_secret_key.unwrap_or_else(RpcSecretKey::new); + + // create a key for the new user + let uk = rpc_key::ActiveModel { + user_id: sea_orm::Set(u.id), + secret_key: sea_orm::Set(rpc_secret_key.into()), + description: sea_orm::Set(self.description), + ..Default::default() + }; + + let _uk = uk.save(db).await.context("Failed saving new user key")?; + + info!("user key as ULID: {}", Ulid::from(rpc_secret_key)); + info!("user key as UUID: {}", Uuid::from(rpc_secret_key)); + + Ok(()) + } +} diff --git a/web3_proxy/src/bin/web3_proxy_cli/daemon.rs b/web3_proxy/src/bin/web3_proxy_cli/daemon.rs new file mode 100644 index 00000000..9019592a --- /dev/null +++ b/web3_proxy/src/bin/web3_proxy_cli/daemon.rs @@ -0,0 +1,309 @@ +#![forbid(unsafe_code)] + +use argh::FromArgs; +use futures::StreamExt; +use log::{error, info, warn}; +use num::Zero; +use tokio::sync::broadcast; +use web3_proxy::app::{flatten_handle, flatten_handles, Web3ProxyApp}; +use web3_proxy::config::TopConfig; +use web3_proxy::{frontend, metrics_frontend}; + +/// start the main proxy daemon +#[derive(FromArgs, PartialEq, Debug, Eq)] +#[argh(subcommand, name = "proxyd")] +pub struct ProxydSubCommand { + /// path to a toml of rpc servers + /// what port the proxy should listen on + #[argh(option, default = "8544")] + pub port: u16, + + /// what port the proxy should expose prometheus stats on + #[argh(option, default = "8543")] + pub prometheus_port: u16, +} + +impl ProxydSubCommand { + pub async fn main(self, top_config: TopConfig, num_workers: usize) -> anyhow::Result<()> { + let (shutdown_sender, _) = broadcast::channel(1); + + run( + top_config, + self.port, + self.prometheus_port, + num_workers, + shutdown_sender, + ) + .await + } +} + +async fn run( + top_config: TopConfig, + frontend_port: u16, + prometheus_port: u16, + num_workers: usize, + shutdown_sender: broadcast::Sender<()>, +) -> anyhow::Result<()> { + // tokio has code for catching ctrl+c so we use that + // this shutdown sender is currently only used in tests, but we might make a /shutdown endpoint or something + // we do not need this receiver. new receivers are made by `shutdown_sender.subscribe()` + + let app_frontend_port = frontend_port; + let app_prometheus_port = prometheus_port; + let mut shutdown_receiver = shutdown_sender.subscribe(); + + // start the main app + let mut spawned_app = + Web3ProxyApp::spawn(top_config, num_workers, shutdown_sender.subscribe()).await?; + + // start the prometheus metrics port + let prometheus_handle = tokio::spawn(metrics_frontend::serve( + spawned_app.app.clone(), + app_prometheus_port, + )); + + // wait until the app has seen its first consensus head block + // TODO: if backups were included, wait a little longer + let _ = spawned_app.app.head_block_receiver().changed().await; + + // start the frontend port + let frontend_handle = tokio::spawn(frontend::serve(app_frontend_port, spawned_app.app.clone())); + + // if everything is working, these should all run forever + tokio::select! { + x = flatten_handles(spawned_app.app_handles) => { + match x { + Ok(_) => info!("app_handle exited"), + Err(e) => { + return Err(e); + } + } + } + x = flatten_handle(frontend_handle) => { + match x { + Ok(_) => info!("frontend exited"), + Err(e) => { + return Err(e); + } + } + } + x = flatten_handle(prometheus_handle) => { + match x { + Ok(_) => info!("prometheus exited"), + Err(e) => { + return Err(e); + } + } + } + x = tokio::signal::ctrl_c() => { + match x { + Ok(_) => info!("quiting from ctrl-c"), + Err(e) => { + return Err(e.into()); + } + } + } + x = shutdown_receiver.recv() => { + match x { + Ok(_) => info!("quiting from shutdown receiver"), + Err(e) => { + return Err(e.into()); + } + } + } + }; + + // one of the handles stopped. send a value so the others know to shut down + if let Err(err) = shutdown_sender.send(()) { + warn!("shutdown sender err={:?}", err); + }; + + // wait for things like saving stats to the database to complete + info!("waiting on important background tasks"); + let mut background_errors = 0; + while let Some(x) = spawned_app.background_handles.next().await { + match x { + Err(e) => { + error!("{:?}", e); + background_errors += 1; + } + Ok(Err(e)) => { + error!("{:?}", e); + background_errors += 1; + } + Ok(Ok(_)) => continue, + } + } + + if background_errors.is_zero() { + info!("finished"); + Ok(()) + } else { + // TODO: collect instead? + Err(anyhow::anyhow!("finished with errors!")) + } +} + +#[cfg(test)] +mod tests { + use ethers::{ + prelude::{Http, Provider, U256}, + utils::Anvil, + }; + use hashbrown::HashMap; + use std::env; + + use web3_proxy::{ + config::{AppConfig, Web3ConnectionConfig}, + rpcs::blockchain::ArcBlock, + }; + + use super::*; + + #[tokio::test] + async fn it_works() { + // TODO: move basic setup into a test fixture + let path = env::var("PATH").unwrap(); + + println!("path: {}", path); + + // TODO: how should we handle logs in this? + // TODO: option for super verbose logs + std::env::set_var("RUST_LOG", "info,web3_proxy=debug"); + + let _ = env_logger::builder().is_test(true).try_init(); + + let anvil = Anvil::new().spawn(); + + println!("Anvil running at `{}`", anvil.endpoint()); + + let anvil_provider = Provider::::try_from(anvil.endpoint()).unwrap(); + + // mine a block because my code doesn't like being on block 0 + // TODO: make block 0 okay? is it okay now? + let _: U256 = anvil_provider + .request("evm_mine", None::<()>) + .await + .unwrap(); + + // make a test TopConfig + // TODO: load TopConfig from a file? CliConfig could have `cli_config.load_top_config`. would need to inject our endpoint ports + let top_config = TopConfig { + app: AppConfig { + chain_id: 31337, + default_user_max_requests_per_period: Some(6_000_000), + min_sum_soft_limit: 1, + min_synced_rpcs: 1, + public_requests_per_period: Some(1_000_000), + response_cache_max_bytes: 10_usize.pow(7), + redirect_public_url: Some("example.com/".to_string()), + redirect_rpc_key_url: Some("example.com/{{rpc_key_id}}".to_string()), + ..Default::default() + }, + balanced_rpcs: HashMap::from([ + ( + "anvil".to_string(), + Web3ConnectionConfig { + disabled: false, + display_name: None, + url: anvil.endpoint(), + backup: Some(false), + block_data_limit: None, + soft_limit: 100, + hard_limit: None, + tier: 0, + subscribe_txs: Some(false), + extra: Default::default(), + }, + ), + ( + "anvil_ws".to_string(), + Web3ConnectionConfig { + disabled: false, + display_name: None, + url: anvil.ws_endpoint(), + backup: Some(false), + block_data_limit: None, + soft_limit: 100, + hard_limit: None, + tier: 0, + subscribe_txs: Some(false), + extra: Default::default(), + }, + ), + ]), + private_rpcs: None, + extra: Default::default(), + }; + + let (shutdown_sender, _) = broadcast::channel(1); + + // spawn another thread for running the app + // TODO: allow launching into the local tokio runtime instead of creating a new one? + let handle = { + let shutdown_sender = shutdown_sender.clone(); + + let frontend_port = 0; + let prometheus_port = 0; + + tokio::spawn(async move { + run( + top_config, + frontend_port, + prometheus_port, + 2, + shutdown_sender, + ) + .await + }) + }; + + // TODO: do something to the node. query latest block, mine another block, query again + let proxy_provider = Provider::::try_from(anvil.endpoint()).unwrap(); + + let anvil_result = anvil_provider + .request::<_, Option>("eth_getBlockByNumber", ("latest", true)) + .await + .unwrap() + .unwrap(); + let proxy_result = proxy_provider + .request::<_, Option>("eth_getBlockByNumber", ("latest", true)) + .await + .unwrap() + .unwrap(); + + assert_eq!(anvil_result, proxy_result); + + let first_block_num = anvil_result.number.unwrap(); + + let _: U256 = anvil_provider + .request("evm_mine", None::<()>) + .await + .unwrap(); + + let anvil_result = anvil_provider + .request::<_, Option>("eth_getBlockByNumber", ("latest", true)) + .await + .unwrap() + .unwrap(); + let proxy_result = proxy_provider + .request::<_, Option>("eth_getBlockByNumber", ("latest", true)) + .await + .unwrap() + .unwrap(); + + assert_eq!(anvil_result, proxy_result); + + let second_block_num = anvil_result.number.unwrap(); + + assert_eq!(first_block_num, second_block_num - 1); + + // tell the test app to shut down + shutdown_sender.send(()).unwrap(); + + println!("waiting for shutdown..."); + // TODO: panic if a timeout is reached + handle.await.unwrap().unwrap(); + } +} diff --git a/web3_proxy/src/bin/web3_proxy_cli/drop_migration_lock.rs b/web3_proxy/src/bin/web3_proxy_cli/drop_migration_lock.rs index 633a0610..ace59c65 100644 --- a/web3_proxy/src/bin/web3_proxy_cli/drop_migration_lock.rs +++ b/web3_proxy/src/bin/web3_proxy_cli/drop_migration_lock.rs @@ -1,15 +1,24 @@ use argh::FromArgs; use migration::sea_orm::DatabaseConnection; -use web3_proxy::app::drop_migration_lock; +use web3_proxy::app::{drop_migration_lock, migrate_db}; #[derive(FromArgs, PartialEq, Debug, Eq)] /// In case of emergency, break glass. #[argh(subcommand, name = "drop_migration_lock")] -pub struct DropMigrationLockSubCommand {} +pub struct DropMigrationLockSubCommand { + #[argh(option)] + /// run migrations after dropping the lock + and_migrate: bool, +} impl DropMigrationLockSubCommand { pub async fn main(&self, db_conn: &DatabaseConnection) -> anyhow::Result<()> { - drop_migration_lock(db_conn).await?; + if self.and_migrate { + migrate_db(db_conn, true).await?; + } else { + // just drop the lock + drop_migration_lock(db_conn).await?; + } Ok(()) } diff --git a/web3_proxy/src/bin/web3_proxy_cli/health_compass.rs b/web3_proxy/src/bin/web3_proxy_cli/health_compass.rs deleted file mode 100644 index 4bdffbe9..00000000 --- a/web3_proxy/src/bin/web3_proxy_cli/health_compass.rs +++ /dev/null @@ -1,137 +0,0 @@ -use argh::FromArgs; -use ethers::types::{Block, TxHash, H256}; -use log::{error, info, warn}; -use serde::{Deserialize, Serialize}; -use serde_json::json; -use web3_proxy::jsonrpc::JsonRpcErrorData; - -#[derive(FromArgs, PartialEq, Debug, Eq)] -/// Never bring only 2 compasses to sea. -#[argh(subcommand, name = "health_compass")] -pub struct HealthCompassSubCommand { - #[argh(positional)] - /// first rpc - rpc_a: String, - - #[argh(positional)] - /// second rpc - rpc_b: String, - - #[argh(positional)] - /// third rpc - rpc_c: String, -} - -#[derive(Debug, Deserialize, Serialize)] -struct JsonRpcResponse { - // pub jsonrpc: String, - // pub id: Box, - #[serde(skip_serializing_if = "Option::is_none")] - pub result: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub error: Option, -} - -impl HealthCompassSubCommand { - pub async fn main(self) -> anyhow::Result<()> { - let client = reqwest::Client::new(); - - let block_by_number_request = json!({ - "jsonrpc": "2.0", - "id": "1", - "method": "eth_getBlockByNumber", - "params": ["latest", false], - }); - - let a = client - .post(&self.rpc_a) - .json(&block_by_number_request) - .send() - .await? - .json::>>() - .await? - .result - .unwrap(); - - // check the parent because b and c might not be as fast as a - let parent_hash = a.parent_hash; - - let a = check_rpc(&parent_hash, &client, &self.rpc_a).await; - let b = check_rpc(&parent_hash, &client, &self.rpc_b).await; - let c = check_rpc(&parent_hash, &client, &self.rpc_c).await; - - match (a, b, c) { - (Ok(Ok(a)), Ok(Ok(b)), Ok(Ok(c))) => { - if a != b { - error!("A: {:?}\n\nB: {:?}\n\nC: {:?}", a, b, c); - return Err(anyhow::anyhow!("difference detected!")); - } - - if b != c { - error!("\nA: {:?}\n\nB: {:?}\n\nC: {:?}", a, b, c); - return Err(anyhow::anyhow!("difference detected!")); - } - - // all three rpcs agree - } - (Ok(Ok(a)), Ok(Ok(b)), c) => { - // not all successes! but still enough to compare - warn!("C failed: {:?}", c); - - if a != b { - error!("\nA: {:?}\n\nB: {:?}", a, b); - return Err(anyhow::anyhow!("difference detected!")); - } - } - (Ok(Ok(a)), b, Ok(Ok(c))) => { - // not all successes! but still enough to compare - warn!("B failed: {:?}", b); - - if a != c { - error!("\nA: {:?}\n\nC: {:?}", a, c); - return Err(anyhow::anyhow!("difference detected!")); - } - } - (a, b, c) => { - // not enough successes - error!("A: {:?}\n\nB: {:?}\n\nC: {:?}", a, b, c); - return Err(anyhow::anyhow!("All are failing!")); - } - } - - info!("OK"); - - Ok(()) - } -} - -// i don't think we need a whole provider. a simple http request is easiest -async fn check_rpc( - block_hash: &H256, - client: &reqwest::Client, - rpc: &str, -) -> anyhow::Result, JsonRpcErrorData>> { - let block_by_hash_request = json!({ - "jsonrpc": "2.0", - "id": "1", - "method": "eth_getBlockByHash", - "params": [block_hash, false], - }); - - // TODO: don't unwrap! don't use the try operator - let response: JsonRpcResponse> = client - .post(rpc) - .json(&block_by_hash_request) - .send() - .await? - .json() - .await?; - - if let Some(result) = response.result { - Ok(Ok(result)) - } else if let Some(result) = response.error { - Ok(Err(result)) - } else { - unimplemented!("{:?}", response) - } -} diff --git a/web3_proxy/src/bin/web3_proxy_cli/main.rs b/web3_proxy/src/bin/web3_proxy_cli/main.rs index fa519162..ff7dd61b 100644 --- a/web3_proxy/src/bin/web3_proxy_cli/main.rs +++ b/web3_proxy/src/bin/web3_proxy_cli/main.rs @@ -5,35 +5,61 @@ mod change_user_tier_by_address; mod change_user_tier_by_key; mod check_config; mod count_users; +mod create_key; mod create_user; +mod daemon; mod drop_migration_lock; -mod health_compass; mod list_user_tier; +mod pagerduty; mod rpc_accounting; +mod sentryd; mod transfer_key; mod user_export; mod user_import; +use anyhow::Context; use argh::FromArgs; -use std::fs; +use ethers::types::U256; +use log::{info, warn}; +use pagerduty_rs::eventsv2async::EventsV2 as PagerdutyAsyncEventsV2; +use pagerduty_rs::eventsv2sync::EventsV2 as PagerdutySyncEventsV2; +use std::{ + fs, panic, + path::Path, + sync::atomic::{self, AtomicUsize}, +}; +use tokio::runtime; +use web3_proxy::pagerduty::panic_handler; use web3_proxy::{ - app::{get_db, get_migrated_db}, + app::{get_db, get_migrated_db, APP_USER_AGENT}, config::TopConfig, }; +#[cfg(feature = "deadlock")] +use parking_lot::deadlock; +#[cfg(feature = "deadlock")] +use std::thread; +#[cfg(feature = "deadlock")] +use tokio::time::Duration; + #[derive(Debug, FromArgs)] /// Command line interface for admins to interact with web3_proxy -pub struct CliConfig { - /// path to the application config (optional). +pub struct Web3ProxyCli { + /// path to the application config (only required for some commands; defaults to dev config). #[argh(option)] pub config: Option, - /// if no config, what database the client should connect to. Defaults to dev db. - #[argh( - option, - default = "\"mysql://root:dev_web3_proxy@127.0.0.1:13306/dev_web3_proxy\".to_string()" - )] - pub db_url: String, + /// number of worker threads. Defaults to the number of logical processors + #[argh(option, default = "0")] + pub workers: usize, + + /// if no config, what database the client should connect to (only required for some commands; Defaults to dev db) + #[argh(option)] + pub db_url: Option, + + /// if no config, what sentry url should the client should connect to + #[argh(option)] + pub sentry_url: Option, /// this one cli can do multiple things #[argh(subcommand)] @@ -50,10 +76,13 @@ enum SubCommand { ChangeUserTierByKey(change_user_tier_by_key::ChangeUserTierByKeySubCommand), CheckConfig(check_config::CheckConfigSubCommand), CountUsers(count_users::CountUsersSubCommand), + CreateKey(create_key::CreateKeySubCommand), CreateUser(create_user::CreateUserSubCommand), DropMigrationLock(drop_migration_lock::DropMigrationLockSubCommand), - HealthCompass(health_compass::HealthCompassSubCommand), + Pagerduty(pagerduty::PagerdutySubCommand), + Proxyd(daemon::ProxydSubCommand), RpcAccounting(rpc_accounting::RpcAccountingSubCommand), + Sentryd(sentryd::SentrydSubCommand), TransferKey(transfer_key::TransferKeySubCommand), UserExport(user_export::UserExportSubCommand), UserImport(user_import::UserImportSubCommand), @@ -62,28 +91,97 @@ enum SubCommand { // TODO: sub command to change a user's tier } -#[tokio::main] -async fn main() -> anyhow::Result<()> { - // if RUST_LOG isn't set, configure a default - // TODO: is there a better way to do this? - if std::env::var("RUST_LOG").is_err() { - // std::env::set_var("RUST_LOG", "info,web3_proxy=debug,web3_proxy_cli=debug"); - std::env::set_var("RUST_LOG", "info,web3_proxy=debug,web3_proxy_cli=debug"); +fn main() -> anyhow::Result<()> { + #[cfg(feature = "deadlock")] + { + // spawn a thread for deadlock detection + thread::spawn(move || loop { + thread::sleep(Duration::from_secs(10)); + let deadlocks = deadlock::check_deadlock(); + if deadlocks.is_empty() { + continue; + } + + println!("{} deadlocks detected", deadlocks.len()); + for (i, threads) in deadlocks.iter().enumerate() { + println!("Deadlock #{}", i); + for t in threads { + println!("Thread Id {:#?}", t.thread_id()); + println!("{:#?}", t.backtrace()); + } + } + }); } - env_logger::init(); + // if RUST_LOG isn't set, configure a default + // TODO: is there a better way to do this? + let rust_log = match std::env::var("RUST_LOG") { + Ok(x) => x, + Err(_) => match std::env::var("WEB3_PROXY_TRACE").map(|x| x == "true") { + Ok(true) => { + vec![ + "info", + "ethers=debug", + "redis_rate_limit=debug", + "web3_proxy=trace", + "web3_proxy_cli=trace", + "web3_proxy::rpcs::blockchain=info", + "web3_proxy::rpcs::request=debug", + ] + } + _ => { + vec![ + "info", + "ethers=debug", + "redis_rate_limit=debug", + "web3_proxy=debug", + "web3_proxy_cli=debug", + ] + } + } + .join(","), + }; // this probably won't matter for us in docker, but better safe than sorry fdlimit::raise_fd_limit(); - let mut cli_config: CliConfig = argh::from_env(); + let mut cli_config: Web3ProxyCli = argh::from_env(); + + if cli_config.config.is_none() && cli_config.db_url.is_none() && cli_config.sentry_url.is_none() + { + // TODO: default to example.toml if development.toml doesn't exist + info!("defaulting to development config"); + cli_config.config = Some("./config/development.toml".to_string()); + } + + let top_config = if let Some(top_config_path) = cli_config.config.clone() { + let top_config_path = Path::new(&top_config_path) + .canonicalize() + .context(format!("checking for config at {}", top_config_path))?; - let _top_config = if let Some(top_config_path) = cli_config.config.clone() { let top_config: String = fs::read_to_string(top_config_path)?; - let top_config: TopConfig = toml::from_str(&top_config)?; + let mut top_config: TopConfig = toml::from_str(&top_config)?; - if let Some(top_config_db_url) = top_config.app.db_url.clone() { - cli_config.db_url = top_config_db_url; + // TODO: this doesn't seem to do anything + proctitle::set_title(format!("web3_proxy-{}", top_config.app.chain_id)); + + if cli_config.db_url.is_none() { + cli_config.db_url = top_config.app.db_url.clone(); + } + + if let Some(sentry_url) = top_config.app.sentry_url.clone() { + cli_config.sentry_url = Some(sentry_url); + } + + if top_config.app.chain_id == 137 { + // TODO: these numbers are arbitrary. i think the maticnetwork/erigon fork has a bug + if top_config.app.gas_increase_min.is_none() { + top_config.app.gas_increase_min = Some(U256::from(40_000)); + } + + if top_config.app.gas_increase_percent.is_none() { + top_config.app.gas_increase_percent = Some(U256::from(40)); + } } Some(top_config) @@ -91,69 +189,231 @@ async fn main() -> anyhow::Result<()> { None }; - match cli_config.sub_command { - SubCommand::ChangeUserAddress(x) => { - let db_conn = get_db(cli_config.db_url, 1, 1).await?; + let logger = env_logger::builder().parse_filters(&rust_log).build(); - x.main(&db_conn).await - } - SubCommand::ChangeUserTier(x) => { - let db_conn = get_db(cli_config.db_url, 1, 1).await?; + let max_level = logger.filter(); - x.main(&db_conn).await - } - SubCommand::ChangeUserAdminStatus(x) => { - let db_conn = get_db(cli_config.db_url, 1, 1).await?; + // connect to sentry for error reporting + // if no sentry, only log to stdout + let _sentry_guard = if let Some(sentry_url) = cli_config.sentry_url.clone() { + let logger = sentry::integrations::log::SentryLogger::with_dest(logger); - x.main(&db_conn).await - } - SubCommand::ChangeUserTierByAddress(x) => { - let db_conn = get_db(cli_config.db_url, 1, 1).await?; + log::set_boxed_logger(Box::new(logger)).unwrap(); - x.main(&db_conn).await - } - SubCommand::ChangeUserTierByKey(x) => { - let db_conn = get_db(cli_config.db_url, 1, 1).await?; + let guard = sentry::init(( + sentry_url, + sentry::ClientOptions { + release: sentry::release_name!(), + // TODO: Set this a to lower value (from config) in production + traces_sample_rate: 1.0, + ..Default::default() + }, + )); - x.main(&db_conn).await - } - SubCommand::CheckConfig(x) => x.main().await, - SubCommand::CreateUser(x) => { - let db_conn = get_migrated_db(cli_config.db_url, 1, 1).await?; + Some(guard) + } else { + log::set_boxed_logger(Box::new(logger)).unwrap(); - x.main(&db_conn).await - } - SubCommand::CountUsers(x) => { - let db_conn = get_db(cli_config.db_url, 1, 1).await?; + None + }; - x.main(&db_conn).await - } - SubCommand::DropMigrationLock(x) => { - // very intentionally, do NOT run migrations here - let db_conn = get_db(cli_config.db_url, 1, 1).await?; + log::set_max_level(max_level); - x.main(&db_conn).await - } - SubCommand::HealthCompass(x) => x.main().await, - SubCommand::RpcAccounting(x) => { - let db_conn = get_migrated_db(cli_config.db_url, 1, 1).await?; + info!("{}", APP_USER_AGENT); - x.main(&db_conn).await - } - SubCommand::TransferKey(x) => { - let db_conn = get_db(cli_config.db_url, 1, 1).await?; + // optionally connect to pagerduty + // TODO: fix this nested result + let (pagerduty_async, pagerduty_sync) = if let Ok(pagerduty_key) = + std::env::var("PAGERDUTY_INTEGRATION_KEY") + { + let pagerduty_async = + PagerdutyAsyncEventsV2::new(pagerduty_key.clone(), Some(APP_USER_AGENT.to_string()))?; + let pagerduty_sync = + PagerdutySyncEventsV2::new(pagerduty_key, Some(APP_USER_AGENT.to_string()))?; - x.main(&db_conn).await - } - SubCommand::UserExport(x) => { - let db_conn = get_migrated_db(cli_config.db_url, 1, 1).await?; + (Some(pagerduty_async), Some(pagerduty_sync)) + } else { + info!("No PAGERDUTY_INTEGRATION_KEY"); - x.main(&db_conn).await - } - SubCommand::UserImport(x) => { - let db_conn = get_migrated_db(cli_config.db_url, 1, 1).await?; + (None, None) + }; - x.main(&db_conn).await - } + // panic handler that sends to pagerduty. + // TODO: use the sentry handler if no pager duty. use default if no sentry + if let Some(pagerduty_sync) = pagerduty_sync { + let top_config = top_config.clone(); + + panic::set_hook(Box::new(move |x| { + panic_handler(top_config.clone(), &pagerduty_sync, x); + })); } + + // set up tokio's async runtime + let mut rt_builder = runtime::Builder::new_multi_thread(); + + rt_builder.enable_all(); + + if cli_config.workers > 0 { + rt_builder.worker_threads(cli_config.workers); + } + + if let Some(top_config) = top_config.as_ref() { + let chain_id = top_config.app.chain_id; + + rt_builder.thread_name_fn(move || { + static ATOMIC_ID: AtomicUsize = AtomicUsize::new(0); + // TODO: what ordering? i think we want seqcst so that these all happen in order, but that might be stricter than we really need + let worker_id = ATOMIC_ID.fetch_add(1, atomic::Ordering::SeqCst); + // TODO: i think these max at 15 characters + format!("web3-{}-{}", chain_id, worker_id) + }); + } + + // start tokio's async runtime + let rt = rt_builder.build()?; + + let num_workers = rt.metrics().num_workers(); + info!("num_workers: {}", num_workers); + + rt.block_on(async { + match cli_config.sub_command { + SubCommand::ChangeUserAddress(x) => { + let db_url = cli_config + .db_url + .expect("'--config' (with a db) or '--db-url' is required to run proxyd"); + + let db_conn = get_db(db_url, 1, 1).await?; + + x.main(&db_conn).await + } + SubCommand::ChangeUserTier(x) => { + let db_url = cli_config + .db_url + .expect("'--config' (with a db) or '--db-url' is required to run proxyd"); + + let db_conn = get_db(db_url, 1, 1).await?; + + x.main(&db_conn).await + } + SubCommand::ChangeUserAdminStatus(x) => { + let db_url = cli_config + .db_url + .expect("'--config' (with a db) or '--db-url' is required to run proxyd"); + + let db_conn = get_db(db_url, 1, 1).await?; + + x.main(&db_conn).await + } + SubCommand::ChangeUserTierByAddress(x) => { + let db_url = cli_config + .db_url + .expect("'--config' (with a db) or '--db-url' is required to run proxyd"); + + let db_conn = get_db(db_url, 1, 1).await?; + + x.main(&db_conn).await + } + SubCommand::ChangeUserTierByKey(x) => { + let db_url = cli_config + .db_url + .expect("'--config' (with a db) or '--db-url' is required to run proxyd"); + + let db_conn = get_db(db_url, 1, 1).await?; + + x.main(&db_conn).await + } + SubCommand::CheckConfig(x) => x.main().await, + SubCommand::CreateKey(x) => { + let db_url = cli_config + .db_url + .expect("'--config' (with a db) or '--db-url' is required to run create a key"); + + let db_conn = get_migrated_db(db_url, 1, 1).await?; + + x.main(&db_conn).await + } + SubCommand::CreateUser(x) => { + let db_url = cli_config + .db_url + .expect("'--config' (with a db) or '--db-url' is required to run proxyd"); + + let db_conn = get_migrated_db(db_url, 1, 1).await?; + + x.main(&db_conn).await + } + SubCommand::CountUsers(x) => { + let db_url = cli_config + .db_url + .expect("'--config' (with a db) or '--db-url' is required to run proxyd"); + + let db_conn = get_db(db_url, 1, 1).await?; + + x.main(&db_conn).await + } + SubCommand::Proxyd(x) => { + let top_config = top_config.expect("--config is required to run proxyd"); + + x.main(top_config, num_workers).await + } + SubCommand::DropMigrationLock(x) => { + let db_url = cli_config + .db_url + .expect("'--config' (with a db) or '--db-url' is required to run proxyd"); + + // very intentionally, do NOT run migrations here + let db_conn = get_db(db_url, 1, 1).await?; + + x.main(&db_conn).await + } + SubCommand::Pagerduty(x) => { + if cli_config.sentry_url.is_none() { + warn!("sentry_url is not set! Logs will only show in this console"); + } + + x.main(pagerduty_async, top_config).await + } + SubCommand::Sentryd(x) => { + if cli_config.sentry_url.is_none() { + warn!("sentry_url is not set! Logs will only show in this console"); + } + + x.main(pagerduty_async, top_config).await + } + SubCommand::RpcAccounting(x) => { + let db_url = cli_config + .db_url + .expect("'--config' (with a db) or '--db-url' is required to run proxyd"); + + let db_conn = get_migrated_db(db_url, 1, 1).await?; + + x.main(&db_conn).await + } + SubCommand::TransferKey(x) => { + let db_url = cli_config + .db_url + .expect("'--config' (with a db) or '--db-url' is required to run proxyd"); + let db_conn = get_db(db_url, 1, 1).await?; + + x.main(&db_conn).await + } + SubCommand::UserExport(x) => { + let db_url = cli_config + .db_url + .expect("'--config' (with a db) or '--db-url' is required to run proxyd"); + + let db_conn = get_migrated_db(db_url, 1, 1).await?; + + x.main(&db_conn).await + } + SubCommand::UserImport(x) => { + let db_url = cli_config + .db_url + .expect("'--config' (with a db) or '--db-url' is required to run proxyd"); + + let db_conn = get_migrated_db(db_url, 1, 1).await?; + + x.main(&db_conn).await + } + } + }) } diff --git a/web3_proxy/src/bin/web3_proxy_cli/pagerduty.rs b/web3_proxy/src/bin/web3_proxy_cli/pagerduty.rs new file mode 100644 index 00000000..7e55103a --- /dev/null +++ b/web3_proxy/src/bin/web3_proxy_cli/pagerduty.rs @@ -0,0 +1,88 @@ +use argh::FromArgs; +use log::{error, info}; +use pagerduty_rs::{eventsv2async::EventsV2 as PagerdutyAsyncEventsV2, types::Event}; +use web3_proxy::{ + config::TopConfig, + pagerduty::{pagerduty_alert, pagerduty_alert_for_config}, +}; + +#[derive(FromArgs, PartialEq, Debug, Eq)] +/// Quickly create a pagerduty alert +#[argh(subcommand, name = "pagerduty")] +pub struct PagerdutySubCommand { + #[argh(positional)] + /// short description of the alert + summary: String, + + /// the chain id to require. Only used if not using --config. + #[argh(option)] + chain_id: Option, + + #[argh(option)] + /// the class/type of the event + class: Option, + + #[argh(option)] + /// the component of the event + component: Option, + + #[argh(option)] + /// deduplicate alerts based on this key. + /// If there are no open incidents with this key, a new incident will be created. + /// If there is an open incident with a matching key, the new event will be appended to that incident's Alerts log as an additional Trigger log entry. + dedup_key: Option, +} + +impl PagerdutySubCommand { + pub async fn main( + self, + pagerduty_async: Option, + top_config: Option, + ) -> anyhow::Result<()> { + // TODO: allow customizing severity + let event = top_config + .map(|top_config| { + pagerduty_alert_for_config( + self.class.clone(), + self.component.clone(), + None::<()>, + pagerduty_rs::types::Severity::Error, + self.summary.clone(), + None, + top_config, + ) + }) + .unwrap_or_else(|| { + pagerduty_alert( + self.chain_id, + self.class, + None, + None, + self.component, + None::<()>, + pagerduty_rs::types::Severity::Error, + None, + self.summary, + None, + ) + }); + + if let Some(pagerduty_async) = pagerduty_async { + info!( + "sending to pagerduty: {}", + serde_json::to_string_pretty(&event)? + ); + + if let Err(err) = pagerduty_async.event(Event::AlertTrigger(event)).await { + error!("Failed sending to pagerduty: {}", err); + } + } else { + info!( + "would send to pagerduty if PAGERDUTY_INTEGRATION_KEY were set: {}", + serde_json::to_string_pretty(&event)? + ); + } + + Ok(()) + } +} diff --git a/web3_proxy/src/bin/web3_proxy_cli/rpc_accounting.rs b/web3_proxy/src/bin/web3_proxy_cli/rpc_accounting.rs index 9d9e7170..653ecb02 100644 --- a/web3_proxy/src/bin/web3_proxy_cli/rpc_accounting.rs +++ b/web3_proxy/src/bin/web3_proxy_cli/rpc_accounting.rs @@ -12,6 +12,8 @@ use migration::{ }, Condition, }; +use serde::Serialize; +use serde_json::json; /// count requests #[derive(FromArgs, PartialEq, Debug, Eq)] @@ -37,7 +39,7 @@ pub struct RpcAccountingSubCommand { impl RpcAccountingSubCommand { pub async fn main(self, db_conn: &DatabaseConnection) -> anyhow::Result<()> { - #[derive(Debug, FromQueryResult)] + #[derive(Serialize, FromQueryResult)] struct SelectResult { total_frontend_requests: Decimal, // pub total_backend_retries: Decimal, @@ -137,8 +139,9 @@ impl RpcAccountingSubCommand { .context("no query result")?; info!( - "query_response for chain {:?}: {:#?}", - self.chain_id, query_response + "query_response for chain {:?}: {:#}", + self.chain_id, + json!(query_response) ); // let query_seconds: Decimal = query_response diff --git a/web3_proxy/src/bin/web3_proxy_cli/sentryd/compare.rs b/web3_proxy/src/bin/web3_proxy_cli/sentryd/compare.rs new file mode 100644 index 00000000..e5225fbc --- /dev/null +++ b/web3_proxy/src/bin/web3_proxy_cli/sentryd/compare.rs @@ -0,0 +1,256 @@ +use anyhow::{anyhow, Context}; +use chrono::{DateTime, Utc}; +use ethers::types::{Block, TxHash, H256}; +use futures::{stream::FuturesUnordered, StreamExt}; +use log::{debug, warn}; +use serde::{Deserialize, Serialize}; +use serde_json::json; +use web3_proxy::jsonrpc::JsonRpcErrorData; + +use super::{SentrydErrorBuilder, SentrydResult}; + +#[derive(Debug, Deserialize, Serialize)] +struct JsonRpcResponse { + // pub jsonrpc: String, + // pub id: Box, + #[serde(skip_serializing_if = "Option::is_none")] + pub result: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub error: Option, +} + +#[derive(Serialize, Ord, PartialEq, PartialOrd, Eq)] +struct AbbreviatedBlock { + pub num: u64, + pub time: DateTime, + pub hash: H256, +} + +impl From> for AbbreviatedBlock { + fn from(x: Block) -> Self { + Self { + num: x.number.unwrap().as_u64(), + hash: x.hash.unwrap(), + time: x.time().unwrap(), + } + } +} + +pub async fn main( + error_builder: SentrydErrorBuilder, + rpc: String, + others: Vec, + max_age: i64, + max_lag: i64, +) -> SentrydResult { + let client = reqwest::Client::new(); + + let block_by_number_request = json!({ + "jsonrpc": "2.0", + "id": "1", + "method": "eth_getBlockByNumber", + "params": ["latest", false], + }); + + let a = client + .post(&rpc) + .json(&block_by_number_request) + .send() + .await + .context(format!("error querying block from {}", rpc)) + .map_err(|x| error_builder.build(x))?; + + if !a.status().is_success() { + return error_builder.result(anyhow!("bad response from {}: {}", rpc, a.status())); + } + + // TODO: capture response headers now in case of error. store them in the extra data on the pager duty alert + let headers = format!("{:#?}", a.headers()); + + let body = a + .text() + .await + .context(format!("failed parsing body from {}", rpc)) + .map_err(|x| error_builder.build(x))?; + + let a: JsonRpcResponse> = serde_json::from_str(&body) + .context(format!("body: {}", body)) + .context(format!("failed parsing json from {}", rpc)) + .map_err(|x| error_builder.build(x))?; + + let a = if let Some(block) = a.result { + block + } else if let Some(err) = a.error { + return error_builder.result( + anyhow::anyhow!("headers: {:#?}. err: {:#?}", headers, err) + .context(format!("jsonrpc error from {}: code {}", rpc, err.code)), + ); + } else { + return error_builder + .result(anyhow!("{:#?}", a).context(format!("empty response from {}", rpc))); + }; + + // check the parent because b and c might not be as fast as a + let parent_hash = a.parent_hash; + + let rpc_block = check_rpc(parent_hash, client.clone(), rpc.to_string()) + .await + .context(format!("Error while querying primary rpc: {}", rpc)) + .map_err(|err| error_builder.build(err))?; + + let fs = FuturesUnordered::new(); + for other in others.iter() { + let f = check_rpc(parent_hash, client.clone(), other.to_string()); + + fs.push(tokio::spawn(f)); + } + let other_check: Vec<_> = fs.collect().await; + + if other_check.is_empty() { + return error_builder.result(anyhow::anyhow!("No other RPCs to check!")); + } + + // TODO: collect into a counter instead? + let mut newest_other = None; + for oc in other_check.iter() { + match oc { + Ok(Ok(x)) => newest_other = newest_other.max(Some(x)), + Ok(Err(err)) => warn!("failed checking other rpc: {:?}", err), + Err(err) => warn!("internal error checking other rpc: {:?}", err), + } + } + + if let Some(newest_other) = newest_other { + let duration_since = newest_other + .time + .signed_duration_since(rpc_block.time) + .num_seconds(); + + match duration_since.abs().cmp(&max_lag) { + std::cmp::Ordering::Less | std::cmp::Ordering::Equal => {} + std::cmp::Ordering::Greater => match duration_since.cmp(&0) { + std::cmp::Ordering::Equal => { + unimplemented!("we already checked that they are not equal") + } + std::cmp::Ordering::Less => { + return error_builder.result(anyhow::anyhow!( + "Our RPC is too far ahead ({} s)! Something might be wrong.\n{:#}\nvs\n{:#}", + duration_since.abs(), + json!(rpc_block), + json!(newest_other), + ).context(format!("{} is too far ahead", rpc))); + } + std::cmp::Ordering::Greater => { + return error_builder.result( + anyhow::anyhow!( + "Behind {} s!\n{:#}\nvs\n{:#}", + duration_since, + json!(rpc_block), + json!(newest_other), + ) + .context(format!("{} is too far behind", rpc)), + ); + } + }, + } + + let now = Utc::now(); + + let block_age = now + .signed_duration_since(newest_other.max(&rpc_block).time) + .num_seconds(); + + match block_age.abs().cmp(&max_age) { + std::cmp::Ordering::Less | std::cmp::Ordering::Equal => {} + std::cmp::Ordering::Greater => match duration_since.cmp(&0) { + std::cmp::Ordering::Equal => unimplemented!(), + std::cmp::Ordering::Less => { + return error_builder.result( + anyhow::anyhow!( + "Clock is behind {}s! Something might be wrong.\n{:#}\nvs\n{:#}", + block_age.abs(), + json!(now), + json!(newest_other), + ) + .context(format!("Clock is too far behind on {}!", rpc)), + ); + } + std::cmp::Ordering::Greater => { + return error_builder.result( + anyhow::anyhow!( + "block is too old ({}s)!\n{:#}\nvs\n{:#}", + block_age, + json!(now), + json!(newest_other), + ) + .context(format!("block is too old on {}!", rpc)), + ); + } + }, + } + } else { + return error_builder.result(anyhow::anyhow!("No other RPC times to check!")); + } + + debug!("rpc comparison ok: {:#}", json!(rpc_block)); + + Ok(()) +} + +// i don't think we need a whole provider. a simple http request is easiest +async fn check_rpc( + block_hash: H256, + client: reqwest::Client, + rpc: String, +) -> anyhow::Result { + let block_by_hash_request = json!({ + "jsonrpc": "2.0", + "id": "1", + "method": "eth_getBlockByHash", + "params": [block_hash, false], + }); + + let response = client + .post(&rpc) + .json(&block_by_hash_request) + .send() + .await + .context(format!("awaiting response from {}", rpc))?; + + if !response.status().is_success() { + return Err(anyhow::anyhow!( + "bad response from {}: {}", + rpc, + response.status(), + )); + } + + let body = response + .text() + .await + .context(format!("failed parsing body from {}", rpc))?; + + let response_json: JsonRpcResponse> = serde_json::from_str(&body) + .context(format!("body: {}", body)) + .context(format!("failed parsing json from {}", rpc))?; + + if let Some(result) = response_json.result { + let abbreviated = AbbreviatedBlock::from(result); + + debug!("{} has {:?}@{}", rpc, abbreviated.hash, abbreviated.num); + + Ok(abbreviated) + } else if let Some(result) = response_json.error { + Err(anyhow!( + "jsonrpc error during check_rpc from {}: {:#}", + rpc, + json!(result), + )) + } else { + Err(anyhow!( + "empty result during check_rpc from {}: {:#}", + rpc, + json!(response_json) + )) + } +} diff --git a/web3_proxy/src/bin/web3_proxy_cli/sentryd/mod.rs b/web3_proxy/src/bin/web3_proxy_cli/sentryd/mod.rs new file mode 100644 index 00000000..90398f20 --- /dev/null +++ b/web3_proxy/src/bin/web3_proxy_cli/sentryd/mod.rs @@ -0,0 +1,284 @@ +mod compare; +mod simple; + +use anyhow::Context; +use argh::FromArgs; +use futures::{ + stream::{FuturesUnordered, StreamExt}, + Future, +}; +use log::{error, info}; +use pagerduty_rs::{eventsv2async::EventsV2 as PagerdutyAsyncEventsV2, types::Event}; +use serde_json::json; +use std::time::Duration; +use tokio::sync::mpsc; +use tokio::time::{interval, MissedTickBehavior}; +use web3_proxy::{config::TopConfig, pagerduty::pagerduty_alert}; + +#[derive(FromArgs, PartialEq, Debug, Eq)] +/// Loop healthchecks and send pager duty alerts if any fail +#[argh(subcommand, name = "sentryd")] +pub struct SentrydSubCommand { + #[argh(positional)] + /// the main (HTTP only) web3-proxy being checked. + web3_proxy: String, + + /// the chain id to require. Only used if not using --config. + #[argh(option)] + chain_id: Option, + + #[argh(option)] + /// warning threshold for age of the best known head block + max_age: i64, + + #[argh(option)] + /// warning threshold for seconds between the rpc and best other_rpc's head blocks + max_lag: i64, + + #[argh(option)] + /// other (HTTP only) rpcs to compare the main rpc to + other_rpc: Vec, + + #[argh(option)] + /// other (HTTP only) web3-proxies to compare the main rpc to + other_proxy: Vec, + + #[argh(option)] + /// how many seconds between running checks + seconds: Option, +} + +#[derive(Debug)] +pub struct SentrydError { + /// The class/type of the event, for example ping failure or cpu load + class: String, + /// Errors will send a pagerduty alert. others just give log messages + level: log::Level, + /// A short summary that should be mostly static + summary: String, + /// Lots of detail about the error + extra: Option, +} + +/// helper for creating SentrydErrors +#[derive(Clone)] +pub struct SentrydErrorBuilder { + class: String, + level: log::Level, +} + +impl SentrydErrorBuilder { + fn build(&self, err: anyhow::Error) -> SentrydError { + SentrydError { + class: self.class.to_owned(), + level: self.level.to_owned(), + summary: format!("{}", err), + extra: Some(json!(format!("{:#?}", err))), + } + } + + fn result(&self, err: anyhow::Error) -> SentrydResult { + Err(self.build(err)) + } +} + +type SentrydResult = Result<(), SentrydError>; + +impl SentrydSubCommand { + pub async fn main( + self, + pagerduty_async: Option, + top_config: Option, + ) -> anyhow::Result<()> { + // sentry logging should already be configured + + let chain_id = self + .chain_id + .or_else(|| top_config.map(|x| x.app.chain_id)) + .context("--config or --chain-id required")?; + + let primary_proxy = self.web3_proxy.trim_end_matches("/").to_string(); + + let other_proxy: Vec<_> = self + .other_proxy + .into_iter() + .map(|x| x.trim_end_matches("/").to_string()) + .collect(); + + let other_rpc: Vec<_> = self + .other_rpc + .into_iter() + .map(|x| x.trim_end_matches("/").to_string()) + .collect(); + + let seconds = self.seconds.unwrap_or(60); + + let mut handles = FuturesUnordered::new(); + + // channels and a task for sending errors to logs/pagerduty + let (error_sender, mut error_receiver) = mpsc::channel::(10); + + { + let error_handler_f = async move { + if pagerduty_async.is_none() { + info!("set PAGERDUTY_INTEGRATION_KEY to send create alerts for errors"); + } + + while let Some(err) = error_receiver.recv().await { + log::log!(err.level, "check failed: {:#?}", err); + + if matches!(err.level, log::Level::Error) { + let alert = pagerduty_alert( + Some(chain_id), + Some(err.class), + Some("web3-proxy-sentry".to_string()), + None, + None, + err.extra, + pagerduty_rs::types::Severity::Error, + None, + err.summary, + None, + ); + + if let Some(pagerduty_async) = pagerduty_async.as_ref() { + info!( + "sending to pagerduty: {:#}", + serde_json::to_string_pretty(&alert)? + ); + + if let Err(err) = + pagerduty_async.event(Event::AlertTrigger(alert)).await + { + error!("Failed sending to pagerduty: {:#?}", err); + } + } + } + } + + Ok(()) + }; + + handles.push(tokio::spawn(error_handler_f)); + } + + // spawn a bunch of health check loops that do their checks on an interval + + // check the main rpc's /health endpoint + { + let url = if primary_proxy.contains("/rpc/") { + let x = primary_proxy.split("/rpc/").next().unwrap(); + + format!("{}/health", x) + } else { + format!("{}/health", primary_proxy) + }; + let error_sender = error_sender.clone(); + + // TODO: what timeout? + let timeout = Duration::from_secs(5); + + let loop_f = a_loop( + "main /health", + seconds, + log::Level::Error, + error_sender, + move |error_builder| simple::main(error_builder, url.clone(), timeout), + ); + + handles.push(tokio::spawn(loop_f)); + } + // check any other web3-proxy /health endpoints + for other_web3_proxy in other_proxy.iter() { + let url = if other_web3_proxy.contains("/rpc/") { + let x = other_web3_proxy.split("/rpc/").next().unwrap(); + + format!("{}/health", x) + } else { + format!("{}/health", other_web3_proxy) + }; + + let error_sender = error_sender.clone(); + + // TODO: what timeout? + let timeout = Duration::from_secs(5); + + let loop_f = a_loop( + "other /health", + seconds, + log::Level::Warn, + error_sender, + move |error_builder| simple::main(error_builder, url.clone(), timeout), + ); + + handles.push(tokio::spawn(loop_f)); + } + + // compare the main web3-proxy head block to all web3-proxies and rpcs + { + let max_age = self.max_age; + let max_lag = self.max_lag; + let primary_proxy = primary_proxy.clone(); + let error_sender = error_sender.clone(); + + let mut others = other_proxy.clone(); + + others.extend(other_rpc); + + let loop_f = a_loop( + "head block comparison", + seconds, + log::Level::Error, + error_sender, + move |error_builder| { + compare::main( + error_builder, + primary_proxy.clone(), + others.clone(), + max_age, + max_lag, + ) + }, + ); + + handles.push(tokio::spawn(loop_f)); + } + + // wait for any returned values (if everything is working, they will all run forever) + while let Some(x) = handles.next().await { + // any errors that make it here will end the program + x??; + } + + Ok(()) + } +} + +async fn a_loop( + class: &str, + seconds: u64, + error_level: log::Level, + error_sender: mpsc::Sender, + f: impl Fn(SentrydErrorBuilder) -> T, +) -> anyhow::Result<()> +where + T: Future + Send + 'static, +{ + let error_builder = SentrydErrorBuilder { + class: class.to_owned(), + level: error_level, + }; + + let mut interval = interval(Duration::from_secs(seconds)); + + // TODO: should we warn if there are delays? + interval.set_missed_tick_behavior(MissedTickBehavior::Delay); + + loop { + interval.tick().await; + + if let Err(err) = f(error_builder.clone()).await { + error_sender.send(err).await?; + }; + } +} diff --git a/web3_proxy/src/bin/web3_proxy_cli/sentryd/simple.rs b/web3_proxy/src/bin/web3_proxy_cli/sentryd/simple.rs new file mode 100644 index 00000000..54dffde4 --- /dev/null +++ b/web3_proxy/src/bin/web3_proxy_cli/sentryd/simple.rs @@ -0,0 +1,60 @@ +use std::time::Duration; + +use super::{SentrydErrorBuilder, SentrydResult}; +use anyhow::Context; +use log::{debug, trace}; +use tokio::time::Instant; + +/// GET the url and return an error if it wasn't a success +pub async fn main( + error_builder: SentrydErrorBuilder, + url: String, + timeout: Duration, +) -> SentrydResult { + let start = Instant::now(); + + let r = reqwest::get(&url) + .await + .context(format!("Failed GET {}", &url)) + .map_err(|x| error_builder.build(x))?; + + let elapsed = start.elapsed(); + + if elapsed > timeout { + return error_builder.result( + anyhow::anyhow!( + "query took longer than {}ms ({}ms): {:#?}", + timeout.as_millis(), + elapsed.as_millis(), + r + ) + .context(format!("fetching {} took too long", &url)), + ); + } + + // TODO: what should we do if we get rate limited here? + + if r.status().is_success() { + debug!("{} is healthy", &url); + trace!("Successful {:#?}", r); + return Ok(()); + } + + // TODO: capture headers? or is that already part of r? + let detail = format!("{:#?}", r); + + let summary = format!("{} is unhealthy: {}", &url, r.status()); + + let body = r + .text() + .await + .context(detail.clone()) + .context(summary.clone()) + .map_err(|x| error_builder.build(x))?; + + error_builder.result( + anyhow::anyhow!("body: {}", body) + .context(detail) + .context(summary), + ) +} diff --git a/web3_proxy/src/config.rs b/web3_proxy/src/config.rs index 9bb125e3..9e40db5a 100644 --- a/web3_proxy/src/config.rs +++ b/web3_proxy/src/config.rs @@ -4,6 +4,7 @@ use crate::rpcs::request::OpenRequestHandleMetrics; use crate::{app::AnyhowJoinHandle, rpcs::blockchain::ArcBlock}; use argh::FromArgs; use ethers::prelude::TxHash; +use ethers::types::U256; use hashbrown::HashMap; use log::warn; use migration::sea_orm::DatabaseConnection; @@ -38,7 +39,7 @@ pub struct CliConfig { pub cookie_key_filename: String, } -#[derive(Debug, Deserialize)] +#[derive(Clone, Debug, Deserialize)] pub struct TopConfig { pub app: AppConfig, pub balanced_rpcs: HashMap, @@ -51,7 +52,7 @@ pub struct TopConfig { /// shared configuration between Web3Connections // TODO: no String, only &str -#[derive(Debug, Default, Deserialize)] +#[derive(Clone, Debug, Default, Deserialize)] pub struct AppConfig { /// Request limit for allowed origins for anonymous users. /// These requests get rate limited by IP. @@ -90,6 +91,12 @@ pub struct AppConfig { /// None = allow all requests pub default_user_max_requests_per_period: Option, + /// minimum amount to increase eth_estimateGas results + pub gas_increase_min: Option, + + /// percentage to increase eth_estimateGas results. 100 == 100% + pub gas_increase_percent: Option, + /// Restrict user registration. /// None = no code needed pub invite_code: Option, @@ -183,7 +190,7 @@ fn default_response_cache_max_bytes() -> usize { } /// Configuration for a backend web3 RPC server -#[derive(Debug, Deserialize)] +#[derive(Clone, Debug, Deserialize)] pub struct Web3ConnectionConfig { /// simple way to disable a connection without deleting the row #[serde(default)] @@ -198,6 +205,8 @@ pub struct Web3ConnectionConfig { pub soft_limit: u32, /// the requests per second at which the server throws errors (rate limit or otherwise) pub hard_limit: Option, + /// only use this rpc if everything else is lagging too far. this allows us to ignore fast but very low limit rpcs + pub backup: Option, /// All else equal, a server with a lower tier receives all requests #[serde(default = "default_tier")] pub tier: u64, @@ -221,7 +230,6 @@ impl Web3ConnectionConfig { pub async fn spawn( self, name: String, - allowed_lag: u64, db_conn: Option, redis_pool: Option, chain_id: u64, @@ -256,9 +264,10 @@ impl Web3ConnectionConfig { None }; + let backup = self.backup.unwrap_or(false); + Web3Connection::spawn( name, - allowed_lag, self.display_name, chain_id, db_conn, @@ -267,6 +276,7 @@ impl Web3ConnectionConfig { http_interval_sender, hard_limit, self.soft_limit, + backup, self.block_data_limit, block_map, block_sender, diff --git a/web3_proxy/src/frontend/authorization.rs b/web3_proxy/src/frontend/authorization.rs index f98cf7d0..8c9380da 100644 --- a/web3_proxy/src/frontend/authorization.rs +++ b/web3_proxy/src/frontend/authorization.rs @@ -85,6 +85,7 @@ pub struct RequestMetadata { pub error_response: AtomicBool, pub response_bytes: AtomicU64, pub response_millis: AtomicU64, + pub response_from_backup_rpc: AtomicBool, } impl RequestMetadata { @@ -103,6 +104,7 @@ impl RequestMetadata { error_response: false.into(), response_bytes: 0.into(), response_millis: 0.into(), + response_from_backup_rpc: false.into(), }; Ok(new) @@ -660,13 +662,11 @@ impl Web3ProxyApp { let db_replica = self.db_replica().context("Getting database connection")?; - let rpc_secret_key: Uuid = rpc_secret_key.into(); - // TODO: join the user table to this to return the User? we don't always need it // TODO: join on secondary users // TODO: join on user tier match rpc_key::Entity::find() - .filter(rpc_key::Column::SecretKey.eq(rpc_secret_key)) + .filter(rpc_key::Column::SecretKey.eq(::from(rpc_secret_key))) .filter(rpc_key::Column::Active.eq(true)) .one(db_replica.conn()) .await? @@ -741,7 +741,8 @@ impl Web3ProxyApp { Ok(AuthorizationChecks { user_id: rpc_key_model.user_id, - rpc_key_id, + rpc_secret_key: Some(rpc_secret_key), + rpc_secret_key_id: rpc_key_id, allowed_ips, allowed_origins, allowed_referers, @@ -774,7 +775,7 @@ impl Web3ProxyApp { let authorization_checks = self.authorization_checks(rpc_key).await?; // if no rpc_key_id matching the given rpc was found, then we can't rate limit by key - if authorization_checks.rpc_key_id.is_none() { + if authorization_checks.rpc_secret_key_id.is_none() { return Ok(RateLimitResult::UnknownKey); } @@ -845,3 +846,29 @@ impl Web3ProxyApp { } } } + +impl Authorization { + pub async fn check_again( + &self, + app: &Arc, + ) -> Result<(Arc, Option), FrontendErrorResponse> { + // TODO: we could probably do this without clones. but this is easy + let (a, s) = if let Some(rpc_secret_key) = self.checks.rpc_secret_key { + key_is_authorized( + app, + rpc_secret_key, + self.ip, + self.origin.clone(), + self.referer.clone(), + self.user_agent.clone(), + ) + .await? + } else { + ip_is_authorized(app, self.ip, self.origin.clone()).await? + }; + + let a = Arc::new(a); + + Ok((a, s)) + } +} diff --git a/web3_proxy/src/frontend/errors.rs b/web3_proxy/src/frontend/errors.rs index 30ee053f..22f048ee 100644 --- a/web3_proxy/src/frontend/errors.rs +++ b/web3_proxy/src/frontend/errors.rs @@ -35,7 +35,6 @@ pub enum FrontendErrorResponse { NotFound, RateLimited(Authorization, Option), Redis(RedisError), - Response(Response), /// simple way to return an error message to the user and an anyhow to our logs StatusCode(StatusCode, String, Option), /// TODO: what should be attached to the timout? @@ -44,11 +43,9 @@ pub enum FrontendErrorResponse { UnknownKey, } -impl IntoResponse for FrontendErrorResponse { - fn into_response(self) -> Response { - // TODO: include the request id in these so that users can give us something that will point to logs - // TODO: status code is in the jsonrpc response and is also the first item in the tuple. DRY - let (status_code, response) = match self { +impl FrontendErrorResponse { + pub fn into_response_parts(self) -> (StatusCode, JsonRpcForwardedResponse) { + match self { Self::AccessDenied => { // TODO: attach something to this trace. probably don't include much in the message though. don't want to leak creds by accident trace!("access denied"); @@ -174,12 +171,12 @@ impl IntoResponse for FrontendErrorResponse { }; // create a string with either the IP or the rpc_key_id - let msg = if authorization.checks.rpc_key_id.is_none() { + let msg = if authorization.checks.rpc_secret_key_id.is_none() { format!("too many requests from {}.{}", authorization.ip, retry_msg) } else { format!( "too many requests from rpc key #{}.{}", - authorization.checks.rpc_key_id.unwrap(), + authorization.checks.rpc_secret_key_id.unwrap(), retry_msg ) }; @@ -204,10 +201,6 @@ impl IntoResponse for FrontendErrorResponse { ), ) } - Self::Response(r) => { - debug_assert_ne!(r.status(), StatusCode::OK); - return r; - } Self::SemaphoreAcquireError(err) => { warn!("semaphore acquire err={:?}", err); ( @@ -274,7 +267,15 @@ impl IntoResponse for FrontendErrorResponse { None, ), ), - }; + } + } +} + +impl IntoResponse for FrontendErrorResponse { + fn into_response(self) -> Response { + // TODO: include the request id in these so that users can give us something that will point to logs + // TODO: status code is in the jsonrpc response and is also the first item in the tuple. DRY + let (status_code, response) = self.into_response_parts(); (status_code, Json(response)).into_response() } diff --git a/web3_proxy/src/frontend/mod.rs b/web3_proxy/src/frontend/mod.rs index 8b8d8f86..fad68a8f 100644 --- a/web3_proxy/src/frontend/mod.rs +++ b/web3_proxy/src/frontend/mod.rs @@ -41,28 +41,102 @@ pub async fn serve(port: u16, proxy_app: Arc) -> anyhow::Result<() .time_to_live(Duration::from_secs(1)) .build_with_hasher(hashbrown::hash_map::DefaultHashBuilder::default()); + // TODO: read config for if fastest/versus should be available publicly. default off + // build our axum Router let app = Router::new() - // routes should be ordered most to least common + // TODO: i think these routes could be done a lot better + // + // HTTP RPC (POST) + // + // public .route("/", post(rpc_proxy_http::proxy_web3_rpc)) + // authenticated with and without trailing slash + .route( + "/rpc/:rpc_key/", + post(rpc_proxy_http::proxy_web3_rpc_with_key), + ) + .route( + "/rpc/:rpc_key", + post(rpc_proxy_http::proxy_web3_rpc_with_key), + ) + // public fastest with and without trailing slash + .route("/fastest/", post(rpc_proxy_http::fastest_proxy_web3_rpc)) + .route("/fastest", post(rpc_proxy_http::fastest_proxy_web3_rpc)) + // authenticated fastest with and without trailing slash + .route( + "/fastest/:rpc_key/", + post(rpc_proxy_http::fastest_proxy_web3_rpc_with_key), + ) + .route( + "/fastest/:rpc_key", + post(rpc_proxy_http::fastest_proxy_web3_rpc_with_key), + ) + // public versus + .route("/versus/", post(rpc_proxy_http::versus_proxy_web3_rpc)) + .route("/versus", post(rpc_proxy_http::versus_proxy_web3_rpc)) + // authenticated versus with and without trailing slash + .route( + "/versus/:rpc_key/", + post(rpc_proxy_http::versus_proxy_web3_rpc_with_key), + ) + .route( + "/versus/:rpc_key", + post(rpc_proxy_http::versus_proxy_web3_rpc_with_key), + ) + // + // Websocket RPC (GET) + // If not an RPC, this will redirect to configurable urls + // + // public .route("/", get(rpc_proxy_ws::websocket_handler)) - .route( - "/rpc/:rpc_key", - post(rpc_proxy_http::proxy_web3_rpc_with_key), - ) + // authenticated with and without trailing slash .route( "/rpc/:rpc_key/", - post(rpc_proxy_http::proxy_web3_rpc_with_key), + get(rpc_proxy_ws::websocket_handler_with_key), ) .route( "/rpc/:rpc_key", get(rpc_proxy_ws::websocket_handler_with_key), ) + // public fastest with and without trailing slash + .route("/fastest/", get(rpc_proxy_ws::fastest_websocket_handler)) + .route("/fastest", get(rpc_proxy_ws::fastest_websocket_handler)) + // authenticated fastest with and without trailing slash .route( - "/rpc/:rpc_key/", - get(rpc_proxy_ws::websocket_handler_with_key), + "/fastest/:rpc_key/", + get(rpc_proxy_ws::fastest_websocket_handler_with_key), ) + .route( + "/fastest/:rpc_key", + get(rpc_proxy_ws::fastest_websocket_handler_with_key), + ) + // public versus + .route( + "/versus/", + get(rpc_proxy_ws::versus_websocket_handler_with_key), + ) + .route( + "/versus", + get(rpc_proxy_ws::versus_websocket_handler_with_key), + ) + // authenticated versus with and without trailing slash + .route( + "/versus/:rpc_key/", + get(rpc_proxy_ws::versus_websocket_handler_with_key), + ) + .route( + "/versus/:rpc_key", + get(rpc_proxy_ws::versus_websocket_handler_with_key), + ) + // + // System things + // .route("/health", get(status::health)) + .route("/status", get(status::status)) + // + // User stuff + // .route("/user/login/:user_address", get(users::user_login_get)) .route( "/user/login/:user_address/:message_eip", @@ -88,9 +162,11 @@ pub async fn serve(port: u16, proxy_app: Arc) -> anyhow::Result<() .route("/user/stats/detailed", get(users::user_stats_detailed_get)) .route("/admin/modify_role", get(admin::admin_change_user_roles)) .route("/user/logout", post(users::user_logout_post)) - .route("/status", get(status::status)) + // + // Axum layers // layers are ordered bottom up // the last layer is first for requests and last for responses + // // Mark the `Authorization` request header as sensitive so it doesn't show in logs .layer(SetSensitiveRequestHeadersLayer::new(once(AUTHORIZATION))) // handle cors diff --git a/web3_proxy/src/frontend/rpc_proxy_http.rs b/web3_proxy/src/frontend/rpc_proxy_http.rs index 72664812..067546db 100644 --- a/web3_proxy/src/frontend/rpc_proxy_http.rs +++ b/web3_proxy/src/frontend/rpc_proxy_http.rs @@ -2,6 +2,7 @@ use super::authorization::{ip_is_authorized, key_is_authorized}; use super::errors::FrontendResult; +use super::rpc_proxy_ws::ProxyMode; use crate::{app::Web3ProxyApp, jsonrpc::JsonRpcRequestEnum}; use axum::extract::Path; use axum::headers::{Origin, Referer, UserAgent}; @@ -18,9 +19,41 @@ use std::sync::Arc; #[debug_handler] pub async fn proxy_web3_rpc( Extension(app): Extension>, - ClientIp(ip): ClientIp, + ip: ClientIp, origin: Option>, Json(payload): Json, +) -> FrontendResult { + _proxy_web3_rpc(app, ip, origin, payload, ProxyMode::Best).await +} + +#[debug_handler] +pub async fn fastest_proxy_web3_rpc( + Extension(app): Extension>, + ip: ClientIp, + origin: Option>, + Json(payload): Json, +) -> FrontendResult { + // TODO: read the fastest number from params + // TODO: check that the app allows this without authentication + _proxy_web3_rpc(app, ip, origin, payload, ProxyMode::Fastest(0)).await +} + +#[debug_handler] +pub async fn versus_proxy_web3_rpc( + Extension(app): Extension>, + ip: ClientIp, + origin: Option>, + Json(payload): Json, +) -> FrontendResult { + _proxy_web3_rpc(app, ip, origin, payload, ProxyMode::Versus).await +} + +async fn _proxy_web3_rpc( + app: Arc, + ClientIp(ip): ClientIp, + origin: Option>, + payload: JsonRpcRequestEnum, + proxy_mode: ProxyMode, ) -> FrontendResult { // TODO: benchmark spawning this // TODO: do we care about keeping the TypedHeader wrapper? @@ -31,7 +64,7 @@ pub async fn proxy_web3_rpc( let authorization = Arc::new(authorization); let (response, rpcs, _semaphore) = app - .proxy_web3_rpc(authorization, payload) + .proxy_web3_rpc(authorization, payload, proxy_mode) .await .map(|(x, y)| (x, y, semaphore))?; @@ -58,12 +91,82 @@ pub async fn proxy_web3_rpc( #[debug_handler] pub async fn proxy_web3_rpc_with_key( Extension(app): Extension>, - ClientIp(ip): ClientIp, + ip: ClientIp, origin: Option>, referer: Option>, user_agent: Option>, Path(rpc_key): Path, Json(payload): Json, +) -> FrontendResult { + _proxy_web3_rpc_with_key( + app, + ip, + origin, + referer, + user_agent, + rpc_key, + payload, + ProxyMode::Best, + ) + .await +} + +#[debug_handler] +pub async fn fastest_proxy_web3_rpc_with_key( + Extension(app): Extension>, + ip: ClientIp, + origin: Option>, + referer: Option>, + user_agent: Option>, + Path(rpc_key): Path, + Json(payload): Json, +) -> FrontendResult { + _proxy_web3_rpc_with_key( + app, + ip, + origin, + referer, + user_agent, + rpc_key, + payload, + ProxyMode::Fastest(0), + ) + .await +} + +#[debug_handler] +pub async fn versus_proxy_web3_rpc_with_key( + Extension(app): Extension>, + ip: ClientIp, + origin: Option>, + referer: Option>, + user_agent: Option>, + Path(rpc_key): Path, + Json(payload): Json, +) -> FrontendResult { + _proxy_web3_rpc_with_key( + app, + ip, + origin, + referer, + user_agent, + rpc_key, + payload, + ProxyMode::Versus, + ) + .await +} + +#[allow(clippy::too_many_arguments)] +async fn _proxy_web3_rpc_with_key( + app: Arc, + ClientIp(ip): ClientIp, + origin: Option>, + referer: Option>, + user_agent: Option>, + rpc_key: String, + payload: JsonRpcRequestEnum, + proxy_mode: ProxyMode, ) -> FrontendResult { // TODO: DRY w/ proxy_web3_rpc // the request can take a while, so we spawn so that we can start serving another request @@ -82,7 +185,7 @@ pub async fn proxy_web3_rpc_with_key( let authorization = Arc::new(authorization); let (response, rpcs, _semaphore) = app - .proxy_web3_rpc(authorization, payload) + .proxy_web3_rpc(authorization, payload, proxy_mode) .await .map(|(x, y)| (x, y, semaphore))?; diff --git a/web3_proxy/src/frontend/rpc_proxy_ws.rs b/web3_proxy/src/frontend/rpc_proxy_ws.rs index b1f70e9f..f031aaf6 100644 --- a/web3_proxy/src/frontend/rpc_proxy_ws.rs +++ b/web3_proxy/src/frontend/rpc_proxy_ws.rs @@ -32,11 +32,60 @@ use serde_json::json; use serde_json::value::to_raw_value; use std::sync::Arc; use std::{str::from_utf8_mut, sync::atomic::AtomicUsize}; +use tokio::sync::{broadcast, OwnedSemaphorePermit, RwLock}; + +#[derive(Copy, Clone)] +pub enum ProxyMode { + /// send to the "best" synced server + Best, + /// send to all synced servers and return the fastest non-error response (reverts do not count as errors here) + Fastest(usize), + /// send to all servers for benchmarking. return the fastest non-error response + Versus, +} /// Public entrypoint for WebSocket JSON-RPC requests. +/// Queries a single server at a time #[debug_handler] pub async fn websocket_handler( Extension(app): Extension>, + ip: ClientIp, + origin: Option>, + ws_upgrade: Option, +) -> FrontendResult { + _websocket_handler(ProxyMode::Best, app, ip, origin, ws_upgrade).await +} + +/// Public entrypoint for WebSocket JSON-RPC requests that uses all synced servers. +/// Queries all synced backends with every request! This might get expensive! +#[debug_handler] +pub async fn fastest_websocket_handler( + Extension(app): Extension>, + ip: ClientIp, + origin: Option>, + ws_upgrade: Option, +) -> FrontendResult { + // TODO: get the fastest number from the url params (default to 0/all) + // TODO: config to disable this + _websocket_handler(ProxyMode::Fastest(0), app, ip, origin, ws_upgrade).await +} + +/// Public entrypoint for WebSocket JSON-RPC requests that uses all synced servers. +/// Queries **all** backends with every request! This might get expensive! +#[debug_handler] +pub async fn versus_websocket_handler( + Extension(app): Extension>, + ip: ClientIp, + origin: Option>, + ws_upgrade: Option, +) -> FrontendResult { + // TODO: config to disable this + _websocket_handler(ProxyMode::Versus, app, ip, origin, ws_upgrade).await +} + +async fn _websocket_handler( + proxy_mode: ProxyMode, + app: Arc, ClientIp(ip): ClientIp, origin: Option>, ws_upgrade: Option, @@ -49,7 +98,7 @@ pub async fn websocket_handler( match ws_upgrade { Some(ws) => Ok(ws - .on_upgrade(|socket| proxy_web3_socket(app, authorization, socket)) + .on_upgrade(move |socket| proxy_web3_socket(app, authorization, socket, proxy_mode)) .into_response()), None => { if let Some(redirect) = &app.config.redirect_public_url { @@ -72,12 +121,83 @@ pub async fn websocket_handler( #[debug_handler] pub async fn websocket_handler_with_key( Extension(app): Extension>, - ClientIp(ip): ClientIp, + ip: ClientIp, Path(rpc_key): Path, origin: Option>, referer: Option>, user_agent: Option>, ws_upgrade: Option, +) -> FrontendResult { + _websocket_handler_with_key( + ProxyMode::Best, + app, + ip, + rpc_key, + origin, + referer, + user_agent, + ws_upgrade, + ) + .await +} + +#[debug_handler] +pub async fn fastest_websocket_handler_with_key( + Extension(app): Extension>, + ip: ClientIp, + Path(rpc_key): Path, + origin: Option>, + referer: Option>, + user_agent: Option>, + ws_upgrade: Option, +) -> FrontendResult { + // TODO: get the fastest number from the url params (default to 0/all) + _websocket_handler_with_key( + ProxyMode::Fastest(0), + app, + ip, + rpc_key, + origin, + referer, + user_agent, + ws_upgrade, + ) + .await +} + +#[debug_handler] +pub async fn versus_websocket_handler_with_key( + Extension(app): Extension>, + ip: ClientIp, + Path(rpc_key): Path, + origin: Option>, + referer: Option>, + user_agent: Option>, + ws_upgrade: Option, +) -> FrontendResult { + _websocket_handler_with_key( + ProxyMode::Versus, + app, + ip, + rpc_key, + origin, + referer, + user_agent, + ws_upgrade, + ) + .await +} + +#[allow(clippy::too_many_arguments)] +async fn _websocket_handler_with_key( + proxy_mode: ProxyMode, + app: Arc, + ClientIp(ip): ClientIp, + rpc_key: String, + origin: Option>, + referer: Option>, + user_agent: Option>, + ws_upgrade: Option, ) -> FrontendResult { let rpc_key = rpc_key.parse()?; @@ -96,9 +216,8 @@ pub async fn websocket_handler_with_key( let authorization = Arc::new(authorization); match ws_upgrade { - Some(ws_upgrade) => { - Ok(ws_upgrade.on_upgrade(move |socket| proxy_web3_socket(app, authorization, socket))) - } + Some(ws_upgrade) => Ok(ws_upgrade + .on_upgrade(move |socket| proxy_web3_socket(app, authorization, socket, proxy_mode))), None => { // if no websocket upgrade, this is probably a user loading the url with their browser @@ -107,7 +226,7 @@ pub async fn websocket_handler_with_key( match ( &app.config.redirect_public_url, &app.config.redirect_rpc_key_url, - authorization.checks.rpc_key_id, + authorization.checks.rpc_secret_key_id, ) { (None, None, _) => Err(FrontendErrorResponse::StatusCode( StatusCode::BAD_REQUEST, @@ -120,7 +239,7 @@ pub async fn websocket_handler_with_key( (_, Some(redirect_rpc_key_url), rpc_key_id) => { let reg = Handlebars::new(); - if authorization.checks.rpc_key_id.is_none() { + if authorization.checks.rpc_secret_key_id.is_none() { // i don't think this is possible Err(FrontendErrorResponse::StatusCode( StatusCode::UNAUTHORIZED, @@ -154,6 +273,7 @@ async fn proxy_web3_socket( app: Arc, authorization: Arc, socket: WebSocket, + proxy_mode: ProxyMode, ) { // split the websocket so we can read and write concurrently let (ws_tx, ws_rx) = socket.split(); @@ -162,7 +282,13 @@ async fn proxy_web3_socket( let (response_sender, response_receiver) = flume::unbounded::(); tokio::spawn(write_web3_socket(response_receiver, ws_tx)); - tokio::spawn(read_web3_socket(app, authorization, ws_rx, response_sender)); + tokio::spawn(read_web3_socket( + app, + authorization, + ws_rx, + response_sender, + proxy_mode, + )); } /// websockets support a few more methods than http clients @@ -172,8 +298,20 @@ async fn handle_socket_payload( payload: &str, response_sender: &flume::Sender, subscription_count: &AtomicUsize, - subscriptions: &mut HashMap, -) -> Message { + subscriptions: Arc>>, + proxy_mode: ProxyMode, +) -> (Message, Option) { + let (authorization, semaphore) = match authorization.check_again(&app).await { + Ok((a, s)) => (a, s), + Err(err) => { + let (_, err) = err.into_response_parts(); + + let err = serde_json::to_string(&err).expect("to_string should always work here"); + + return (Message::Text(err), None); + } + }; + // TODO: do any clients send batches over websockets? let (id, response) = match serde_json::from_str::(payload) { Ok(json_request) => { @@ -183,6 +321,7 @@ async fn handle_socket_payload( [..] { "eth_subscribe" => { + // TODO: how can we subscribe with proxy_mode? match app .eth_subscribe( authorization.clone(), @@ -194,7 +333,9 @@ async fn handle_socket_payload( { Ok((handle, response)) => { // TODO: better key - subscriptions.insert( + let mut x = subscriptions.write().await; + + x.insert( response .result .as_ref() @@ -218,8 +359,10 @@ async fn handle_socket_payload( let subscription_id = json_request.params.unwrap().to_string(); + let mut x = subscriptions.write().await; + // TODO: is this the right response? - let partial_response = match subscriptions.remove(&subscription_id) { + let partial_response = match x.remove(&subscription_id) { None => false, Some(handle) => { handle.abort(); @@ -227,6 +370,8 @@ async fn handle_socket_payload( } }; + drop(x); + let response = JsonRpcForwardedResponse::from_value(json!(partial_response), id.clone()); @@ -247,7 +392,7 @@ async fn handle_socket_payload( Ok(response.into()) } _ => app - .proxy_web3_rpc(authorization.clone(), json_request.into()) + .proxy_web3_rpc(authorization.clone(), json_request.into(), proxy_mode) .await .map_or_else( |err| match err { @@ -281,9 +426,7 @@ async fn handle_socket_payload( } }; - // TODO: what error should this be? - - Message::Text(response_str) + (Message::Text(response_str), semaphore) } async fn read_web3_socket( @@ -291,60 +434,99 @@ async fn read_web3_socket( authorization: Arc, mut ws_rx: SplitStream, response_sender: flume::Sender, + proxy_mode: ProxyMode, ) { - let mut subscriptions = HashMap::new(); - let subscription_count = AtomicUsize::new(1); + // TODO: need a concurrent hashmap + let subscriptions = Arc::new(RwLock::new(HashMap::new())); + let subscription_count = Arc::new(AtomicUsize::new(1)); - while let Some(Ok(msg)) = ws_rx.next().await { - // TODO: spawn this? - // new message from our client. forward to a backend and then send it through response_tx - let response_msg = match msg { - Message::Text(payload) => { - handle_socket_payload( - app.clone(), - &authorization, - &payload, - &response_sender, - &subscription_count, - &mut subscriptions, - ) - .await + let (close_sender, mut close_receiver) = broadcast::channel(1); + + loop { + tokio::select! { + msg = ws_rx.next() => { + if let Some(Ok(msg)) = msg { + // spawn so that we can serve responses from this loop even faster + // TODO: only do these clones if the msg is text/binary? + let close_sender = close_sender.clone(); + let app = app.clone(); + let authorization = authorization.clone(); + let response_sender = response_sender.clone(); + let subscriptions = subscriptions.clone(); + let subscription_count = subscription_count.clone(); + + let f = async move { + let mut _semaphore = None; + + // new message from our client. forward to a backend and then send it through response_tx + let response_msg = match msg { + Message::Text(payload) => { + let (msg, s) = handle_socket_payload( + app.clone(), + &authorization, + &payload, + &response_sender, + &subscription_count, + subscriptions, + proxy_mode, + ) + .await; + + _semaphore = s; + + msg + } + Message::Ping(x) => { + trace!("ping: {:?}", x); + Message::Pong(x) + } + Message::Pong(x) => { + trace!("pong: {:?}", x); + return; + } + Message::Close(_) => { + info!("closing websocket connection"); + // TODO: do something to close subscriptions? + let _ = close_sender.send(true); + return; + } + Message::Binary(mut payload) => { + let payload = from_utf8_mut(&mut payload).unwrap(); + + let (msg, s) = handle_socket_payload( + app.clone(), + &authorization, + payload, + &response_sender, + &subscription_count, + subscriptions, + proxy_mode, + ) + .await; + + _semaphore = s; + + msg + } + }; + + if response_sender.send_async(response_msg).await.is_err() { + let _ = close_sender.send(true); + return; + }; + + _semaphore = None; + }; + + tokio::spawn(f); + } else { + break; + } } - Message::Ping(x) => { - trace!("ping: {:?}", x); - Message::Pong(x) - } - Message::Pong(x) => { - trace!("pong: {:?}", x); - continue; - } - Message::Close(_) => { - info!("closing websocket connection"); + _ = close_receiver.recv() => { break; } - Message::Binary(mut payload) => { - // TODO: poke rate limit for the user/ip - let payload = from_utf8_mut(&mut payload).unwrap(); - - handle_socket_payload( - app.clone(), - &authorization, - payload, - &response_sender, - &subscription_count, - &mut subscriptions, - ) - .await - } - }; - - match response_sender.send_async(response_msg).await { - Ok(_) => {} - Err(err) => { - error!("{}", err); - break; - } - }; + } } } diff --git a/web3_proxy/src/frontend/status.rs b/web3_proxy/src/frontend/status.rs index 2e4a8198..df7f8bc9 100644 --- a/web3_proxy/src/frontend/status.rs +++ b/web3_proxy/src/frontend/status.rs @@ -4,7 +4,7 @@ //! They will eventually move to another port. use super::{FrontendResponseCache, FrontendResponseCaches}; -use crate::app::Web3ProxyApp; +use crate::app::{Web3ProxyApp, APP_USER_AGENT}; use axum::{http::StatusCode, response::IntoResponse, Extension, Json}; use axum_macros::debug_handler; use serde_json::json; @@ -33,6 +33,7 @@ pub async fn status( .get_with(FrontendResponseCaches::Status, async { // TODO: what else should we include? uptime, cache hit rates, cpu load, memory used let body = json!({ + "version": APP_USER_AGENT, "chain_id": app.config.chain_id, "balanced_rpcs": app.balanced_rpcs, "private_rpcs": app.private_rpcs, diff --git a/web3_proxy/src/lib.rs b/web3_proxy/src/lib.rs index 7fc9ff97..c9770319 100644 --- a/web3_proxy/src/lib.rs +++ b/web3_proxy/src/lib.rs @@ -7,6 +7,7 @@ pub mod frontend; pub mod jsonrpc; pub mod metered; pub mod metrics_frontend; +pub mod pagerduty; pub mod rpcs; pub mod user_queries; pub mod user_token; diff --git a/web3_proxy/src/pagerduty.rs b/web3_proxy/src/pagerduty.rs new file mode 100644 index 00000000..9eacbff9 --- /dev/null +++ b/web3_proxy/src/pagerduty.rs @@ -0,0 +1,191 @@ +use crate::config::TopConfig; +use gethostname::gethostname; +use log::{debug, error}; +use pagerduty_rs::eventsv2sync::EventsV2 as PagerdutySyncEventsV2; +use pagerduty_rs::types::{AlertTrigger, AlertTriggerPayload, Event}; +use serde::Serialize; +use std::{ + collections::hash_map::DefaultHasher, + hash::{Hash, Hasher}, + panic::PanicInfo, +}; +use time::OffsetDateTime; + +/* + + let client = top_config + .as_ref() + .map(|top_config| format!("web3-proxy chain #{}", top_config.app.chain_id)) + .unwrap_or_else(|| format!("web3-proxy w/o chain")); + + let client_url = top_config + .as_ref() + .and_then(|x| x.app.redirect_public_url.clone()); + + panic::set_hook(Box::new(move |x| { + let hostname = gethostname().into_string().unwrap_or("unknown".to_string()); + let panic_msg = format!("{} {:?}", x, x); + + if panic_msg.starts_with("panicked at 'WS Server panic") { + info!("Underlying library {}", panic_msg); + } else { + error!("sending panic to pagerduty: {}", panic_msg); + + let mut s = DefaultHasher::new(); + panic_msg.hash(&mut s); + panic_msg.hash(&mut s); + let dedup_key = s.finish().to_string(); + + let payload = AlertTriggerPayload { + severity: pagerduty_rs::types::Severity::Error, + summary: panic_msg, + source: hostname, + timestamp: None, + component: None, + group: Some("web3-proxy".to_string()), + class: Some("panic".to_string()), + custom_details: None::<()>, + }; + + let event = Event::AlertTrigger(AlertTrigger { + payload, + dedup_key: Some(dedup_key), + images: None, + links: None, + client: Some(client.clone()), + client_url: client_url.clone(), + }); + + if let Err(err) = pagerduty_sync.event(event) { + error!("Failed sending panic to pagerduty: {}", err); + } + } + })); + +*/ + +pub fn panic_handler( + top_config: Option, + pagerduty_sync: &PagerdutySyncEventsV2, + panic_info: &PanicInfo, +) { + let summary = format!("{}", panic_info); + + let details = format!("{:#?}", panic_info); + + if summary.starts_with("panicked at 'WS Server panic") { + // the ethers-rs library panics when websockets disconnect. this isn't a panic we care about reporting + debug!("Underlying library {}", details); + return; + } + + let class = Some("panic".to_string()); + + let alert = if let Some(top_config) = top_config { + pagerduty_alert_for_config( + class, + None, + Some(details), + pagerduty_rs::types::Severity::Critical, + summary, + None, + top_config, + ) + } else { + pagerduty_alert( + None, + class, + None, + None, + None, + Some(details), + pagerduty_rs::types::Severity::Critical, + None, + summary, + None, + ) + }; + + let event = Event::AlertTrigger(alert); + + if let Err(err) = pagerduty_sync.event(event) { + error!("Failed sending alert to pagerduty! {:#?}", err); + } +} + +pub fn pagerduty_alert_for_config( + class: Option, + component: Option, + custom_details: Option, + severity: pagerduty_rs::types::Severity, + summary: String, + timestamp: Option, + top_config: TopConfig, +) -> AlertTrigger { + let chain_id = top_config.app.chain_id; + + let client_url = top_config.app.redirect_public_url.clone(); + + pagerduty_alert( + Some(chain_id), + class, + None, + client_url, + component, + custom_details, + severity, + None, + summary, + timestamp, + ) +} + +pub fn pagerduty_alert( + chain_id: Option, + class: Option, + client: Option, + client_url: Option, + component: Option, + custom_details: Option, + severity: pagerduty_rs::types::Severity, + source: Option, + summary: String, + timestamp: Option, +) -> AlertTrigger { + let client = client.unwrap_or_else(|| "web3-proxy".to_string()); + + let group = chain_id.map(|x| format!("chain #{}", x)); + + let source = + source.unwrap_or_else(|| gethostname().into_string().unwrap_or("unknown".to_string())); + + let mut s = DefaultHasher::new(); + // TODO: include severity here? + summary.hash(&mut s); + client.hash(&mut s); + client_url.hash(&mut s); + component.hash(&mut s); + group.hash(&mut s); + class.hash(&mut s); + let dedup_key = s.finish().to_string(); + + let payload = AlertTriggerPayload { + severity, + summary, + source, + timestamp, + component, + group, + class, + custom_details, + }; + + AlertTrigger { + payload, + dedup_key: Some(dedup_key), + images: None, + links: None, + client: Some(client), + client_url: client_url, + } +} diff --git a/web3_proxy/src/rpcs/blockchain.rs b/web3_proxy/src/rpcs/blockchain.rs index 456d21fd..da1c2188 100644 --- a/web3_proxy/src/rpcs/blockchain.rs +++ b/web3_proxy/src/rpcs/blockchain.rs @@ -4,13 +4,13 @@ use super::connections::Web3Connections; use super::transactions::TxStatus; use crate::frontend::authorization::Authorization; use crate::{ - config::BlockAndRpc, jsonrpc::JsonRpcRequest, rpcs::synced_connections::SyncedConnections, + config::BlockAndRpc, jsonrpc::JsonRpcRequest, rpcs::synced_connections::ConsensusConnections, }; use anyhow::Context; use derive_more::From; use ethers::prelude::{Block, TxHash, H256, U64}; use hashbrown::{HashMap, HashSet}; -use log::{debug, warn, Level}; +use log::{debug, error, warn, Level}; use moka::future::Cache; use serde::Serialize; use serde_json::json; @@ -24,7 +24,7 @@ pub type ArcBlock = Arc>; pub type BlockHashesCache = Cache; -/// A block's hash and number. +/// A block and its age. #[derive(Clone, Debug, Default, From, Serialize)] pub struct SavedBlock { pub block: ArcBlock, @@ -78,11 +78,6 @@ impl SavedBlock { pub fn number(&self) -> U64 { self.block.number.expect("saved blocks must have a number") } - - /// When the block was received, this node was still syncing - pub fn syncing(&self, allowed_lag: u64) -> bool { - self.age > allowed_lag - } } impl From for SavedBlock { @@ -99,14 +94,18 @@ impl Display for SavedBlock { impl Web3Connections { /// add a block to our mappings and track the heaviest chain - pub async fn save_block(&self, block: &ArcBlock, heaviest_chain: bool) -> anyhow::Result<()> { + pub async fn save_block( + &self, + block: ArcBlock, + heaviest_chain: bool, + ) -> anyhow::Result { // TODO: i think we can rearrange this function to make it faster on the hot path let block_hash = block.hash.as_ref().context("no block hash")?; // skip Block::default() if block_hash.is_zero() { debug!("Skipping block without hash!"); - return Ok(()); + return Ok(block); } let block_num = block.number.as_ref().context("no block num")?; @@ -121,15 +120,17 @@ impl Web3Connections { // this block is very likely already in block_hashes // TODO: use their get_with - self.block_hashes + let block = self + .block_hashes .get_with(*block_hash, async move { block.clone() }) .await; - Ok(()) + Ok(block) } /// Get a block from caches with fallback. /// Will query a specific node or the best available. + /// TODO: return anyhow::Result>? pub async fn block( &self, authorization: &Arc, @@ -138,6 +139,7 @@ impl Web3Connections { ) -> anyhow::Result { // first, try to get the hash from our cache // the cache is set last, so if its here, its everywhere + // TODO: use try_get_with if let Some(block) = self.block_hashes.get(hash) { return Ok(block); } @@ -147,7 +149,7 @@ impl Web3Connections { // TODO: if error, retry? let block: ArcBlock = match rpc { Some(rpc) => rpc - .wait_for_request_handle(authorization, Duration::from_secs(30), false) + .wait_for_request_handle(authorization, Some(Duration::from_secs(30)), false) .await? .request::<_, Option<_>>( "eth_getBlockByHash", @@ -163,9 +165,9 @@ impl Web3Connections { let request: JsonRpcRequest = serde_json::from_value(request)?; // TODO: request_metadata? maybe we should put it in the authorization? - // TODO: don't hard code allowed lag + // TODO: think more about this wait_for_sync let response = self - .try_send_best_upstream_server(60, authorization, request, None, None) + .try_send_best_consensus_head_connection(authorization, request, None, None) .await?; let block = response.result.context("failed fetching block")?; @@ -178,7 +180,7 @@ impl Web3Connections { // the block was fetched using eth_getBlockByHash, so it should have all fields // TODO: fill in heaviest_chain! if the block is old enough, is this definitely true? - self.save_block(&block, false).await?; + let block = self.save_block(block, false).await?; Ok(block) } @@ -197,6 +199,7 @@ impl Web3Connections { } /// Get the heaviest chain's block from cache or backend rpc + /// Caution! If a future block is requested, this might wait forever. Be sure to have a timeout outside of this! pub async fn cannonical_block( &self, authorization: &Arc, @@ -206,23 +209,33 @@ impl Web3Connections { // maybe save them during save_block in a blocks_by_number Cache> // if theres multiple, use petgraph to find the one on the main chain (and remove the others if they have enough confirmations) + let mut consensus_head_receiver = self + .watch_consensus_head_receiver + .as_ref() + .context("need new head subscriptions to fetch cannonical_block")? + .clone(); + // be sure the requested block num exists - let head_block_num = self.head_block_num().context("no servers in sync")?; + let mut head_block_num = consensus_head_receiver.borrow_and_update().number; + + loop { + if let Some(head_block_num) = head_block_num { + if num <= &head_block_num { + break; + } + } + + consensus_head_receiver.changed().await?; + + head_block_num = consensus_head_receiver.borrow_and_update().number; + } + + let head_block_num = + head_block_num.expect("we should only get here if we have a head block"); // TODO: geth does 64, erigon does 90k. sometimes we run a mix let archive_needed = num < &(head_block_num - U64::from(64)); - if num > &head_block_num { - // TODO: i'm seeing this a lot when using ethspam. i dont know why though. i thought we delayed publishing - // TODO: instead of error, maybe just sleep and try again? - // TODO: this should be a 401, not a 500 - return Err(anyhow::anyhow!( - "Head block is #{}, but #{} was requested", - head_block_num, - num - )); - } - // try to get the hash from our cache // deref to not keep the lock open if let Some(block_hash) = self.block_numbers.get(num) { @@ -240,16 +253,21 @@ impl Web3Connections { // TODO: if error, retry? // TODO: request_metadata or authorization? + // we don't actually set min_block_needed here because all nodes have all blocks let response = self - .try_send_best_upstream_server(60, authorization, request, None, Some(num)) + .try_send_best_consensus_head_connection(authorization, request, None, None) .await?; - let raw_block = response.result.context("no block result")?; + if let Some(err) = response.error { + debug!("could not find canonical block {}: {:?}", num, err); + } + + let raw_block = response.result.context("no cannonical block result")?; let block: ArcBlock = serde_json::from_str(raw_block.get())?; // the block was fetched using eth_getBlockByNumber, so it should have all fields and be on the heaviest chain - self.save_block(&block, true).await?; + let block = self.save_block(block, true).await?; Ok((block, archive_needed)) } @@ -265,7 +283,7 @@ impl Web3Connections { ) -> anyhow::Result<()> { // TODO: indexmap or hashmap? what hasher? with_capacity? // TODO: this will grow unbounded. prune old heads on this at the same time we prune the graph? - let mut connection_heads = HashMap::new(); + let mut connection_heads = ConsensusFinder::default(); while let Ok((new_block, rpc)) = block_receiver.recv_async().await { let new_block = new_block.map(Into::into); @@ -287,7 +305,7 @@ impl Web3Connections { } } - // TODO: if there was an error, we should return it + // TODO: if there was an error, should we return it instead of an Ok? warn!("block_receiver exited!"); Ok(()) @@ -299,327 +317,611 @@ impl Web3Connections { pub(crate) async fn process_block_from_rpc( &self, authorization: &Arc, - connection_heads: &mut HashMap, + consensus_finder: &mut ConsensusFinder, rpc_head_block: Option, rpc: Arc, head_block_sender: &watch::Sender, pending_tx_sender: &Option>, ) -> anyhow::Result<()> { - // add the rpc's block to connection_heads, or remove the rpc from connection_heads - let rpc_head_block = match rpc_head_block { - Some(rpc_head_block) => { - // we don't know if its on the heaviest chain yet - self.save_block(&rpc_head_block.block, false).await?; - - // TODO: don't default to 60. different chains are differen - if rpc_head_block.syncing(60) { - if connection_heads.remove(&rpc.name).is_some() { - warn!("{} is behind by {} seconds", &rpc.name, rpc_head_block.age); - } else { - // we didn't remove anything and this block is old. exit early - return Ok(()); - }; - - None - } else { - let rpc_head_hash = rpc_head_block.hash(); - - if let Some(prev_hash) = - connection_heads.insert(rpc.name.to_owned(), rpc_head_hash) - { - if prev_hash == rpc_head_hash { - // this block was already sent by this node. return early - return Ok(()); - } - } - - // TODO: should we just keep the ArcBlock here? - Some(rpc_head_block) - } - } - None => { - // // trace!(%rpc, "Block without number or hash!"); - - if connection_heads.remove(&rpc.name).is_none() { - // this connection was already removed. - // return early. no need to process synced connections - return Ok(()); - } - - None - } - }; - - // iterate the known heads to find the highest_work_block - let mut checked_heads = HashSet::new(); - let mut highest_num_block: Option = None; - for (conn_name, connection_head_hash) in connection_heads.iter() { - if checked_heads.contains(connection_head_hash) { - // we already checked this head from another rpc - continue; - } - // don't check the same hash multiple times - checked_heads.insert(connection_head_hash); - - let conn_head_block = if let Some(x) = self.block_hashes.get(connection_head_hash) { - x - } else { - // TODO: why does this happen?!?! seems to only happen with uncled blocks - // TODO: maybe we should do get_with? - // TODO: maybe we should just continue. this only seems to happen when an older block is received - warn!("Missing connection_head_block in block_hashes. Fetching now. hash={}. other={}. rpc={}", connection_head_hash, conn_name, rpc); - - // this option should always be populated - let conn_rpc = self.conns.get(conn_name); - - match self - .block(authorization, connection_head_hash, conn_rpc) - .await - { - Ok(block) => block, - Err(err) => { - warn!("Processing {}. Failed fetching connection_head_block for block_hashes. {} head hash={}. err={:?}", rpc, conn_name, connection_head_hash, err); - continue; - } - } - }; - - match &conn_head_block.number { - None => { - panic!("block is missing number. this is a bug"); - } - Some(conn_head_num) => { - // if this is the first block we've tried - // or if this rpc's newest block has a higher number - // we used to check total difficulty, but that isn't a thing anymore - if highest_num_block.is_none() - || conn_head_num - > highest_num_block - .as_ref() - .expect("there should always be a block here") - .number - .as_ref() - .expect("there should always be number here") - { - highest_num_block = Some(conn_head_block); - } - } - } + // TODO: how should we handle an error here? + if !consensus_finder + .update_rpc(rpc_head_block.clone(), rpc.clone(), self) + .await? + { + // nothing changed. no need + return Ok(()); } - if let Some(mut maybe_head_block) = highest_num_block { - // track rpcs on this heaviest chain so we can build a new SyncedConnections - let mut highest_rpcs = HashSet::<&String>::new(); - // a running total of the soft limits covered by the rpcs that agree on the head block - let mut highest_rpcs_sum_soft_limit: u32 = 0; - // TODO: also track highest_rpcs_sum_hard_limit? llama doesn't need this, so it can wait + let new_synced_connections = consensus_finder + .best_consensus_connections(authorization, self) + .await; - // check the highest work block for a set of rpcs that can serve our request load - // if it doesn't have enough rpcs for our request load, check the parent block - // TODO: loop for how many parent blocks? we don't want to serve blocks that are too far behind. probably different per chain - // TODO: this loop is pretty long. any way to clean up this code? - for _ in 0..3 { - let maybe_head_hash = maybe_head_block - .hash - .as_ref() - .expect("blocks here always need hashes"); + // TODO: what should we do if the block number of new_synced_connections is < old_synced_connections? wait? - // find all rpcs with maybe_head_block as their current head - for (conn_name, conn_head_hash) in connection_heads.iter() { - if conn_head_hash != maybe_head_hash { - // connection is not on the desired block - continue; - } - if highest_rpcs.contains(conn_name) { - // connection is on a child block - continue; + let includes_backups = new_synced_connections.includes_backups; + let consensus_head_block = new_synced_connections.head_block.clone(); + let num_consensus_rpcs = new_synced_connections.num_conns(); + let num_checked_rpcs = new_synced_connections.num_checked_conns; + let num_active_rpcs = consensus_finder.all.rpc_name_to_hash.len(); + let total_rpcs = self.conns.len(); + + let old_consensus_head_connections = self + .watch_consensus_connections_sender + .send_replace(Arc::new(new_synced_connections)); + + let includes_backups_str = if includes_backups { "B " } else { "" }; + + if let Some(consensus_saved_block) = consensus_head_block { + match &old_consensus_head_connections.head_block { + None => { + debug!( + "first {}{}/{}/{}/{} block={}, rpc={}", + includes_backups_str, + num_consensus_rpcs, + num_checked_rpcs, + num_active_rpcs, + total_rpcs, + consensus_saved_block, + rpc, + ); + + if includes_backups { + // TODO: what else should be in this error? + warn!("Backup RPCs are in use!"); } - if let Some(rpc) = self.conns.get(conn_name) { - highest_rpcs.insert(conn_name); - highest_rpcs_sum_soft_limit += rpc.soft_limit; - } else { - warn!("connection missing") - } + let consensus_head_block = + self.save_block(consensus_saved_block.block, true).await?; + + head_block_sender + .send(consensus_head_block) + .context("head_block_sender sending consensus_head_block")?; } + Some(old_head_block) => { + // TODO: do this log item better + let rpc_head_str = rpc_head_block + .map(|x| x.to_string()) + .unwrap_or_else(|| "None".to_string()); - if highest_rpcs_sum_soft_limit < self.min_sum_soft_limit - || highest_rpcs.len() < self.min_head_rpcs - { - // not enough rpcs yet. check the parent - if let Some(parent_block) = self.block_hashes.get(&maybe_head_block.parent_hash) - { - // // trace!( - // child=%maybe_head_hash, parent=%parent_block.hash.unwrap(), "avoiding thundering herd", - // ); - - maybe_head_block = parent_block; - continue; - } else { - // TODO: this message - warn!( - "soft limit {}/{} from {}/{} rpcs: {}%", - highest_rpcs_sum_soft_limit, - self.min_sum_soft_limit, - highest_rpcs.len(), - self.min_head_rpcs, - highest_rpcs_sum_soft_limit * 100 / self.min_sum_soft_limit - ); - break; - } - } - } - - // TODO: if consensus_head_rpcs.is_empty, try another method of finding the head block - - let num_connection_heads = connection_heads.len(); - let total_conns = self.conns.len(); - - // we've done all the searching for the heaviest block that we can - if highest_rpcs.is_empty() { - // if we get here, something is wrong. clear synced connections - let empty_synced_connections = SyncedConnections::default(); - - let _ = self - .synced_connections - .swap(Arc::new(empty_synced_connections)); - - // TODO: log different things depending on old_synced_connections - warn!( - "Processing {}. no consensus head! {}/{}/{}", - rpc, 0, num_connection_heads, total_conns - ); - } else { - // // trace!(?highest_rpcs); - - // TODO: if maybe_head_block.time() is old, ignore it - - // success! this block has enough soft limit and nodes on it (or on later blocks) - let conns: Vec> = highest_rpcs - .into_iter() - .filter_map(|conn_name| self.conns.get(conn_name).cloned()) - .collect(); - - // TODO: DEBUG only check - let _ = maybe_head_block - .hash - .expect("head blocks always have hashes"); - let _ = maybe_head_block - .number - .expect("head blocks always have numbers"); - - let num_consensus_rpcs = conns.len(); - - let consensus_head_block: SavedBlock = maybe_head_block.into(); - - let new_synced_connections = SyncedConnections { - head_block: Some(consensus_head_block.clone()), - conns, - }; - - let old_synced_connections = self - .synced_connections - .swap(Arc::new(new_synced_connections)); - - // TODO: if the rpc_head_block != consensus_head_block, log something? - match &old_synced_connections.head_block { - None => { - debug!( - "first {}/{}/{} block={}, rpc={}", - num_consensus_rpcs, - num_connection_heads, - total_conns, - consensus_head_block, - rpc - ); - - self.save_block(&consensus_head_block.block, true).await?; - - head_block_sender - .send(consensus_head_block.block) - .context("head_block_sender sending consensus_head_block")?; - } - Some(old_head_block) => { - // TODO: do this log item better - let rpc_head_str = rpc_head_block - .map(|x| x.to_string()) - .unwrap_or_else(|| "None".to_string()); - - match consensus_head_block.number().cmp(&old_head_block.number()) { - Ordering::Equal => { - // TODO: if rpc_block_id != consensus_head_block, do a different log? - - // multiple blocks with the same fork! - if consensus_head_block.hash() == old_head_block.hash() { - // no change in hash. no need to use head_block_sender - debug!( - "con {}/{}/{} con_head={} rpc_head={} rpc={}", - num_consensus_rpcs, - num_connection_heads, - total_conns, - consensus_head_block, - rpc_head_str, - rpc, - ) - } else { - // hash changed - debug!( - "unc {}/{}/{} con_head={} old={} rpc_head={} rpc={}", - num_consensus_rpcs, - num_connection_heads, - total_conns, - consensus_head_block, - old_head_block, - rpc_head_str, - rpc, - ); - - self.save_block(&consensus_head_block.block, true) - .await - .context("save consensus_head_block as heaviest chain")?; - - head_block_sender.send(consensus_head_block.block).context( - "head_block_sender sending consensus_head_block", - )?; - } - } - Ordering::Less => { - // this is unlikely but possible - // TODO: better log - warn!("chain rolled back {}/{}/{} con_head={} old_head={} rpc_head={} rpc={}", num_consensus_rpcs, num_connection_heads, total_conns, consensus_head_block, old_head_block, rpc_head_str, rpc); - - // TODO: tell save_block to remove any higher block numbers from the cache. not needed because we have other checks on requested blocks being > head, but still seems slike a good idea - self.save_block(&consensus_head_block.block, true) - .await - .context( - "save_block sending consensus_head_block as heaviest chain", - )?; - - head_block_sender - .send(consensus_head_block.block) - .context("head_block_sender sending consensus_head_block")?; - } - Ordering::Greater => { + match consensus_saved_block.number().cmp(&old_head_block.number()) { + Ordering::Equal => { + // multiple blocks with the same fork! + if consensus_saved_block.hash() == old_head_block.hash() { + // no change in hash. no need to use head_block_sender debug!( - "new {}/{}/{} con_head={} rpc_head={} rpc={}", + "con {}{}/{}/{}/{} con={} rpc={}@{}", + includes_backups_str, num_consensus_rpcs, - num_connection_heads, - total_conns, - consensus_head_block, + num_checked_rpcs, + num_active_rpcs, + total_rpcs, + consensus_saved_block, + rpc, + rpc_head_str, + ) + } else { + // hash changed + + if includes_backups { + // TODO: what else should be in this error? + warn!("Backup RPCs are in use!"); + } + + debug!( + "unc {}{}/{}/{}/{} con_head={} old={} rpc={}@{}", + includes_backups_str, + num_consensus_rpcs, + num_checked_rpcs, + num_active_rpcs, + total_rpcs, + consensus_saved_block, + old_head_block, + rpc, rpc_head_str, - rpc ); - self.save_block(&consensus_head_block.block, true).await?; + let consensus_head_block = self + .save_block(consensus_saved_block.block, true) + .await + .context("save consensus_head_block as heaviest chain")?; - head_block_sender.send(consensus_head_block.block)?; + head_block_sender + .send(consensus_head_block) + .context("head_block_sender sending consensus_head_block")?; } } + Ordering::Less => { + // this is unlikely but possible + // TODO: better log + warn!( + "chain rolled back {}{}/{}/{}/{} con={} old={} rpc={}@{}", + includes_backups_str, + num_consensus_rpcs, + num_checked_rpcs, + num_active_rpcs, + total_rpcs, + consensus_saved_block, + old_head_block, + rpc, + rpc_head_str, + ); + + if includes_backups { + // TODO: what else should be in this error? + warn!("Backup RPCs are in use!"); + } + + // TODO: tell save_block to remove any higher block numbers from the cache. not needed because we have other checks on requested blocks being > head, but still seems like a good idea + let consensus_head_block = self + .save_block(consensus_saved_block.block, true) + .await + .context( + "save_block sending consensus_head_block as heaviest chain", + )?; + + head_block_sender + .send(consensus_head_block) + .context("head_block_sender sending consensus_head_block")?; + } + Ordering::Greater => { + debug!( + "new {}{}/{}/{}/{} con={} rpc={}@{}", + includes_backups_str, + num_consensus_rpcs, + num_checked_rpcs, + num_active_rpcs, + total_rpcs, + consensus_saved_block, + rpc, + rpc_head_str, + ); + + if includes_backups { + // TODO: what else should be in this error? + warn!("Backup RPCs are in use!"); + } + + let consensus_head_block = + self.save_block(consensus_saved_block.block, true).await?; + + head_block_sender.send(consensus_head_block)?; + } } } } + } else { + // TODO: do this log item better + let rpc_head_str = rpc_head_block + .map(|x| x.to_string()) + .unwrap_or_else(|| "None".to_string()); + + if num_checked_rpcs >= self.min_head_rpcs { + error!( + "non {}{}/{}/{}/{} rpc={}@{}", + includes_backups_str, + num_consensus_rpcs, + num_checked_rpcs, + num_active_rpcs, + total_rpcs, + rpc, + rpc_head_str, + ); + } else { + debug!( + "non {}{}/{}/{}/{} rpc={}@{}", + includes_backups_str, + num_consensus_rpcs, + num_checked_rpcs, + num_active_rpcs, + total_rpcs, + rpc, + rpc_head_str, + ); + } } Ok(()) } } + +struct ConnectionsGroup { + /// TODO: this group might not actually include backups, but they were at leastchecked + includes_backups: bool, + rpc_name_to_hash: HashMap, +} + +impl ConnectionsGroup { + fn new(with_backups: bool) -> Self { + Self { + includes_backups: with_backups, + rpc_name_to_hash: Default::default(), + } + } + + fn without_backups() -> Self { + Self::new(false) + } + + fn with_backups() -> Self { + Self::new(true) + } + + fn remove(&mut self, rpc: &Web3Connection) -> Option { + self.rpc_name_to_hash.remove(rpc.name.as_str()) + } + + fn insert(&mut self, rpc: &Web3Connection, block_hash: H256) -> Option { + self.rpc_name_to_hash.insert(rpc.name.clone(), block_hash) + } + + // TODO: i don't love having this here. move to web3_connections? + async fn get_block_from_rpc( + &self, + rpc_name: &str, + hash: &H256, + authorization: &Arc, + web3_connections: &Web3Connections, + ) -> anyhow::Result { + // // TODO: why does this happen?!?! seems to only happen with uncled blocks + // // TODO: maybe we should do try_get_with? + // // TODO: maybe we should just continue. this only seems to happen when an older block is received + // warn!( + // "Missing connection_head_block in block_hashes. Fetching now. hash={}. other={}", + // connection_head_hash, conn_name + // ); + + // this option should almost always be populated. if the connection reconnects at a bad time it might not be available though + let rpc = web3_connections.conns.get(rpc_name); + + web3_connections.block(authorization, hash, rpc).await + } + + // TODO: do this during insert/remove? + pub(self) async fn highest_block( + &self, + authorization: &Arc, + web3_connections: &Web3Connections, + ) -> Option { + let mut checked_heads = HashSet::with_capacity(self.rpc_name_to_hash.len()); + let mut highest_block = None::; + + for (rpc_name, rpc_head_hash) in self.rpc_name_to_hash.iter() { + // don't waste time checking the same hash multiple times + if checked_heads.contains(rpc_head_hash) { + continue; + } + + let rpc_block = match self + .get_block_from_rpc(rpc_name, rpc_head_hash, authorization, web3_connections) + .await + { + Ok(x) => x, + Err(err) => { + warn!( + "failed getting block {} from {} while finding highest block number: {:?}", + rpc_head_hash, rpc_name, err, + ); + continue; + } + }; + + checked_heads.insert(rpc_head_hash); + + // if this is the first block we've tried + // or if this rpc's newest block has a higher number + // we used to check total difficulty, but that isn't a thing anymore on ETH + // TODO: we still need total difficulty on some other PoW chains. whats annoying is it isn't considered part of the "block header" just the block. so websockets don't return it + let highest_num = highest_block + .as_ref() + .map(|x| x.number.expect("blocks here should always have a number")); + let rpc_num = rpc_block.as_ref().number; + + if rpc_num > highest_num { + highest_block = Some(rpc_block); + } + } + + highest_block + } + + pub(self) async fn consensus_head_connections( + &self, + authorization: &Arc, + web3_connections: &Web3Connections, + ) -> anyhow::Result { + let mut maybe_head_block = match self.highest_block(authorization, web3_connections).await { + None => return Err(anyhow::anyhow!("No blocks known")), + Some(x) => x, + }; + + let num_known = self.rpc_name_to_hash.len(); + + // track rpcs on this heaviest chain so we can build a new ConsensusConnections + let mut highest_rpcs = HashSet::<&str>::new(); + // a running total of the soft limits covered by the rpcs that agree on the head block + let mut highest_rpcs_sum_soft_limit: u32 = 0; + // TODO: also track highest_rpcs_sum_hard_limit? llama doesn't need this, so it can wait + + // check the highest work block for a set of rpcs that can serve our request load + // if it doesn't have enough rpcs for our request load, check the parent block + // TODO: loop for how many parent blocks? we don't want to serve blocks that are too far behind. probably different per chain + // TODO: this loop is pretty long. any way to clean up this code? + for _ in 0..6 { + let maybe_head_hash = maybe_head_block + .hash + .as_ref() + .expect("blocks here always need hashes"); + + // find all rpcs with maybe_head_block as their current head + for (rpc_name, rpc_head_hash) in self.rpc_name_to_hash.iter() { + if rpc_head_hash != maybe_head_hash { + // connection is not on the desired block + continue; + } + if highest_rpcs.contains(rpc_name.as_str()) { + // connection is on a child block + continue; + } + + if let Some(rpc) = web3_connections.conns.get(rpc_name.as_str()) { + highest_rpcs.insert(rpc_name); + highest_rpcs_sum_soft_limit += rpc.soft_limit; + } else { + // i don't think this is an error. i think its just if a reconnect is currently happening + warn!("connection missing: {}", rpc_name); + } + } + + if highest_rpcs_sum_soft_limit >= web3_connections.min_sum_soft_limit + && highest_rpcs.len() >= web3_connections.min_head_rpcs + { + // we have enough servers with enough requests + break; + } + + // not enough rpcs yet. check the parent block + if let Some(parent_block) = web3_connections + .block_hashes + .get(&maybe_head_block.parent_hash) + { + // trace!( + // child=%maybe_head_hash, parent=%parent_block.hash.unwrap(), "avoiding thundering herd", + // ); + + maybe_head_block = parent_block; + continue; + } else { + if num_known < web3_connections.min_head_rpcs { + return Err(anyhow::anyhow!( + "not enough rpcs connected: {}/{}/{}", + highest_rpcs.len(), + num_known, + web3_connections.min_head_rpcs, + )); + } else { + let soft_limit_percent = (highest_rpcs_sum_soft_limit as f32 + / web3_connections.min_sum_soft_limit as f32) + * 100.0; + + return Err(anyhow::anyhow!( + "ran out of parents to check. rpcs {}/{}/{}. soft limit: {:.2}% ({}/{})", + highest_rpcs.len(), + num_known, + web3_connections.min_head_rpcs, + highest_rpcs_sum_soft_limit, + web3_connections.min_sum_soft_limit, + soft_limit_percent, + )); + } + } + } + + // TODO: if consensus_head_rpcs.is_empty, try another method of finding the head block. will need to change the return Err above into breaks. + + // we've done all the searching for the heaviest block that we can + if highest_rpcs.len() < web3_connections.min_head_rpcs + || highest_rpcs_sum_soft_limit < web3_connections.min_sum_soft_limit + { + // if we get here, not enough servers are synced. return an error + let soft_limit_percent = (highest_rpcs_sum_soft_limit as f32 + / web3_connections.min_sum_soft_limit as f32) + * 100.0; + + return Err(anyhow::anyhow!( + "Not enough resources. rpcs {}/{}/{}. soft limit: {:.2}% ({}/{})", + highest_rpcs.len(), + num_known, + web3_connections.min_head_rpcs, + highest_rpcs_sum_soft_limit, + web3_connections.min_sum_soft_limit, + soft_limit_percent, + )); + } + + // success! this block has enough soft limit and nodes on it (or on later blocks) + let conns: Vec> = highest_rpcs + .into_iter() + .filter_map(|conn_name| web3_connections.conns.get(conn_name).cloned()) + .collect(); + + // TODO: DEBUG only check + let _ = maybe_head_block + .hash + .expect("head blocks always have hashes"); + let _ = maybe_head_block + .number + .expect("head blocks always have numbers"); + + let consensus_head_block: SavedBlock = maybe_head_block.into(); + + Ok(ConsensusConnections { + head_block: Some(consensus_head_block), + conns, + num_checked_conns: self.rpc_name_to_hash.len(), + includes_backups: self.includes_backups, + }) + } +} + +/// A ConsensusConnections builder that tracks all connection heads across multiple groups of servers +pub struct ConsensusFinder { + /// only main servers + main: ConnectionsGroup, + /// main and backup servers + all: ConnectionsGroup, +} + +impl Default for ConsensusFinder { + fn default() -> Self { + Self { + main: ConnectionsGroup::without_backups(), + all: ConnectionsGroup::with_backups(), + } + } +} + +impl ConsensusFinder { + fn remove(&mut self, rpc: &Web3Connection) -> Option { + // TODO: should we have multiple backup tiers? (remote datacenters vs third party) + if !rpc.backup { + self.main.remove(rpc); + } + self.all.remove(rpc) + } + + fn insert(&mut self, rpc: &Web3Connection, new_hash: H256) -> Option { + // TODO: should we have multiple backup tiers? (remote datacenters vs third party) + if !rpc.backup { + self.main.insert(rpc, new_hash); + } + self.all.insert(rpc, new_hash) + } + + /// Update our tracking of the rpc and return true if something changed + async fn update_rpc( + &mut self, + rpc_head_block: Option, + rpc: Arc, + // we need this so we can save the block to caches. i don't like it though. maybe we should use a lazy_static Cache wrapper that has a "save_block" method?. i generally dislike globals but i also dislike all the types having to pass eachother around + web3_connections: &Web3Connections, + ) -> anyhow::Result { + // add the rpc's block to connection_heads, or remove the rpc from connection_heads + let changed = match rpc_head_block { + Some(mut rpc_head_block) => { + // we don't know if its on the heaviest chain yet + rpc_head_block.block = web3_connections + .save_block(rpc_head_block.block, false) + .await?; + + // we used to remove here if the block was too far behind. but it just made things more complicated + + let rpc_head_hash = rpc_head_block.hash(); + + if let Some(prev_hash) = self.insert(&rpc, rpc_head_hash) { + if prev_hash == rpc_head_hash { + // this block was already sent by this rpc. return early + false + } else { + // new block for this rpc + true + } + } else { + // first block for this rpc + true + } + } + None => { + if self.remove(&rpc).is_none() { + // this rpc was already removed + false + } else { + // rpc head changed from being synced to not + true + } + } + }; + + Ok(changed) + } + + // TODO: this could definitely be cleaner. i don't like the error handling/unwrapping + async fn best_consensus_connections( + &mut self, + authorization: &Arc, + web3_connections: &Web3Connections, + ) -> ConsensusConnections { + let highest_block_num = match self + .all + .highest_block(authorization, web3_connections) + .await + { + None => { + return ConsensusConnections::default(); + } + Some(x) => x.number.expect("blocks here should always have a number"), + }; + + // TODO: also needs to be not less than our current head + let mut min_block_num = highest_block_num.saturating_sub(U64::from(5)); + + // we also want to be sure we don't ever go backwards! + if let Some(current_consensus_head_num) = web3_connections.head_block_num() { + min_block_num = min_block_num.max(current_consensus_head_num); + } + + // TODO: pass `min_block_num` to consensus_head_connections? + let consensus_head_for_main = self + .main + .consensus_head_connections(authorization, web3_connections) + .await + .map_err(|err| err.context("cannot use main group")); + + let consensus_num_for_main = consensus_head_for_main + .as_ref() + .ok() + .map(|x| x.head_block.as_ref().unwrap().number()); + + if let Some(consensus_num_for_main) = consensus_num_for_main { + if consensus_num_for_main >= min_block_num { + return consensus_head_for_main.unwrap(); + } + } + + // TODO: pass `min_block_num` to consensus_head_connections? + let consensus_connections_for_all = match self + .all + .consensus_head_connections(authorization, web3_connections) + .await + { + Err(err) => { + if self.all.rpc_name_to_hash.len() < web3_connections.min_head_rpcs { + debug!("No consensus head yet: {}", err); + } + return ConsensusConnections::default(); + } + Ok(x) => x, + }; + + let consensus_num_for_all = consensus_connections_for_all + .head_block + .as_ref() + .map(|x| x.number()); + + if consensus_num_for_all > consensus_num_for_main { + if consensus_num_for_all < Some(min_block_num) { + // TODO: this should have an alarm in sentry + error!("CONSENSUS HEAD w/ BACKUP NODES IS VERY OLD!"); + } + consensus_connections_for_all + } else { + if let Ok(x) = consensus_head_for_main { + error!("CONSENSUS HEAD IS VERY OLD! Backup RPCs did not improve this situation"); + x + } else { + // TODO: i don't think we need this error. and i doublt we'll ever even get here + error!("NO CONSENSUS HEAD!"); + ConsensusConnections::default() + } + } + } +} diff --git a/web3_proxy/src/rpcs/connection.rs b/web3_proxy/src/rpcs/connection.rs index a4e83a76..99fc3cd1 100644 --- a/web3_proxy/src/rpcs/connection.rs +++ b/web3_proxy/src/rpcs/connection.rs @@ -24,22 +24,22 @@ use std::sync::atomic::{self, AtomicU32, AtomicU64}; use std::{cmp::Ordering, sync::Arc}; use thread_fast_rng::rand::Rng; use thread_fast_rng::thread_fast_rng; -use tokio::sync::{broadcast, oneshot, RwLock as AsyncRwLock}; +use tokio::sync::{broadcast, oneshot, watch, RwLock as AsyncRwLock}; use tokio::time::{interval, sleep, sleep_until, timeout, Duration, Instant, MissedTickBehavior}; // TODO: maybe provider state should have the block data limit in it. but it is inside an async lock and we can't Serialize then #[derive(Clone, Debug)] pub enum ProviderState { None, - NotReady(Arc), - Ready(Arc), + Connecting(Arc), + Connected(Arc), } impl ProviderState { pub async fn provider(&self, allow_not_ready: bool) -> Option<&Arc> { match self { ProviderState::None => None, - ProviderState::NotReady(x) => { + ProviderState::Connecting(x) => { if allow_not_ready { Some(x) } else { @@ -47,7 +47,7 @@ impl ProviderState { None } } - ProviderState::Ready(x) => { + ProviderState::Connected(x) => { if x.ready() { Some(x) } else { @@ -63,7 +63,6 @@ pub struct Web3Connection { pub name: String, pub display_name: Option, pub db_conn: Option, - pub(super) allowed_lag: u64, /// TODO: can we get this from the provider? do we even need it? pub(super) url: String, /// Some connections use an http_client. we keep a clone for reconnecting @@ -77,6 +76,8 @@ pub struct Web3Connection { /// provider is in a RwLock so that we can replace it if re-connecting /// it is an async lock because we hold it open across awaits pub(super) provider_state: AsyncRwLock, + /// keep track of hard limits + pub(super) hard_limit_until: Option>, /// rate limits are stored in a central redis so that multiple proxies can share their rate limits /// We do not use the deferred rate limiter because going over limits would cause errors pub(super) hard_limit: Option, @@ -84,6 +85,8 @@ pub struct Web3Connection { pub(super) soft_limit: u32, /// use web3 queries to find the block data limit for archive/pruned nodes pub(super) automatic_block_limit: bool, + /// only use this rpc if everything else is lagging too far. this allows us to ignore fast but very low limit rpcs + pub(super) backup: bool, /// TODO: have an enum for this so that "no limit" prints pretty? pub(super) block_data_limit: AtomicU64, /// Lower tiers are higher priority when sending requests @@ -99,7 +102,6 @@ impl Web3Connection { #[allow(clippy::too_many_arguments)] pub async fn spawn( name: String, - allowed_lag: u64, display_name: Option, chain_id: u64, db_conn: Option, @@ -111,6 +113,7 @@ impl Web3Connection { hard_limit: Option<(u64, RedisPool)>, // TODO: think more about this type soft_limit: u32, + backup: bool, block_data_limit: Option, block_map: BlockHashesCache, block_sender: Option>, @@ -135,9 +138,18 @@ impl Web3Connection { let automatic_block_limit = (block_data_limit.load(atomic::Ordering::Acquire) == 0) && block_sender.is_some(); + // track hard limit until on backup servers (which might surprise us with rate limit changes) + // and track on servers that have a configured hard limit + let hard_limit_until = if backup || hard_limit.is_some() { + let (sender, _) = watch::channel(Instant::now()); + + Some(sender) + } else { + None + }; + let new_connection = Self { name, - allowed_lag, db_conn: db_conn.clone(), display_name, http_client, @@ -147,8 +159,10 @@ impl Web3Connection { internal_requests: 0.into(), provider_state: AsyncRwLock::new(ProviderState::None), hard_limit, + hard_limit_until, soft_limit, automatic_block_limit, + backup, block_data_limit, head_block: RwLock::new(Default::default()), tier, @@ -191,25 +205,7 @@ impl Web3Connection { return Ok(None); } - // check if we are synced - let head_block: ArcBlock = self - .wait_for_request_handle(authorization, Duration::from_secs(30), true) - .await? - .request::<_, Option<_>>( - "eth_getBlockByNumber", - &json!(("latest", false)), - // error here are expected, so keep the level low - Level::Warn.into(), - ) - .await? - .context("no block during check_block_data_limit!")?; - - if SavedBlock::from(head_block).syncing(60) { - // if the node is syncing, we can't check its block data limit - return Ok(None); - } - - // TODO: add SavedBlock to self? probably best not to. we might not get marked Ready + // TODO: check eth_syncing. if it is not false, return Ok(None) let mut limit = None; @@ -217,7 +213,7 @@ impl Web3Connection { // TODO: start at 0 or 1? for block_data_limit in [0, 32, 64, 128, 256, 512, 1024, 90_000, u64::MAX] { let handle = self - .wait_for_request_handle(authorization, Duration::from_secs(30), true) + .wait_for_request_handle(authorization, None, true) .await?; let head_block_num_future = handle.request::, U256>( @@ -243,7 +239,7 @@ impl Web3Connection { // TODO: wait for the handle BEFORE we check the current block number. it might be delayed too! // TODO: what should the request be? let handle = self - .wait_for_request_handle(authorization, Duration::from_secs(30), true) + .wait_for_request_handle(authorization, None, true) .await?; let archive_result: Result = handle @@ -292,26 +288,10 @@ impl Web3Connection { self.block_data_limit.load(atomic::Ordering::Acquire).into() } - pub fn syncing(&self, allowed_lag: u64) -> bool { - match self.head_block.read().clone() { - None => true, - Some(x) => x.syncing(allowed_lag), - } - } - pub fn has_block_data(&self, needed_block_num: &U64) -> bool { let head_block_num = match self.head_block.read().clone() { None => return false, - Some(x) => { - // TODO: this 60 second limit is causing our polygons to fall behind. change this to number of blocks? - if x.syncing(60) { - // skip syncing nodes. even though they might be able to serve a query, - // latency will be poor and it will get in the way of them syncing further - return false; - } - - x.number() - } + Some(x) => x.number(), }; // this rpc doesn't have that block yet. still syncing @@ -370,7 +350,15 @@ impl Web3Connection { ); let retry_in = Duration::from_millis(sleep_ms); - info!( + + let error_level = if self.backup { + log::Level::Debug + } else { + log::Level::Info + }; + + log::log!( + error_level, "Failed reconnect to {}! Retry in {}ms. err={:?}", self, retry_in.as_millis(), @@ -401,7 +389,7 @@ impl Web3Connection { ProviderState::None => { info!("connecting to {}", self); } - ProviderState::NotReady(provider) | ProviderState::Ready(provider) => { + ProviderState::Connecting(provider) | ProviderState::Connected(provider) => { // disconnect the current provider if let Web3Provider::Mock = provider.as_ref() { return Ok(()); @@ -435,7 +423,7 @@ impl Web3Connection { let new_provider = Web3Provider::from_str(&self.url, self.http_client.clone()).await?; // trace!("saving provider state as NotReady on {}", self); - *provider_state = ProviderState::NotReady(Arc::new(new_provider)); + *provider_state = ProviderState::Connecting(Arc::new(new_provider)); // drop the lock so that we can get a request handle // trace!("provider_state {} unlocked", self); @@ -448,7 +436,7 @@ impl Web3Connection { // TODO: what should the timeout be? should there be a request timeout? // trace!("waiting on chain id for {}", self); let found_chain_id: Result = self - .wait_for_request_handle(&authorization, Duration::from_secs(30), true) + .wait_for_request_handle(&authorization, None, true) .await? .request( "eth_chainId", @@ -489,7 +477,7 @@ impl Web3Connection { .context("provider missing")? .clone(); - *provider_state = ProviderState::Ready(ready_provider); + *provider_state = ProviderState::Connected(ready_provider); // trace!("unlocked for ready..."); } @@ -543,7 +531,7 @@ impl Web3Connection { let _ = head_block.insert(new_head_block.clone().into()); } - if self.block_data_limit() == U64::zero() && !self.syncing(1) { + if self.block_data_limit() == U64::zero() { let authorization = Arc::new(Authorization::internal(self.db_conn.clone())?); if let Err(err) = self.check_block_data_limit(&authorization).await { warn!( @@ -591,8 +579,6 @@ impl Web3Connection { reconnect: bool, tx_id_sender: Option)>>, ) -> anyhow::Result<()> { - let allowed_lag = self.allowed_lag; - loop { let http_interval_receiver = http_interval_sender.as_ref().map(|x| x.subscribe()); @@ -624,8 +610,6 @@ impl Web3Connection { let health_sleep_seconds = 10; sleep(Duration::from_secs(health_sleep_seconds)).await; - let mut warned = 0; - loop { // TODO: what if we just happened to have this check line up with another restart? // TODO: think more about this @@ -644,34 +628,6 @@ impl Web3Connection { } // trace!("health check on {}. unlocked", conn); - if let Some(x) = &*conn.head_block.read() { - // if this block is too old, return an error so we reconnect - let current_lag = x.lag(); - if current_lag > allowed_lag { - let level = if warned == 0 { - log::Level::Warn - } else if warned % 100 == 0 { - log::Level::Debug - } else { - log::Level::Trace - }; - - log::log!( - level, - "{} is lagged {} secs: {} {}", - conn, - current_lag, - x.number(), - x.hash(), - ); - - warned += 1; - } else { - // reset warnings now that we are connected - warned = 0; - } - } - sleep(Duration::from_secs(health_sleep_seconds)).await; } }; @@ -750,7 +706,7 @@ impl Web3Connection { // trace!("unlocked on new heads"); // TODO: need a timeout - if let ProviderState::Ready(provider) = provider_state { + if let ProviderState::Connected(provider) = provider_state { match provider.as_ref() { Web3Provider::Mock => unimplemented!(), Web3Provider::Http(_provider) => { @@ -764,7 +720,7 @@ impl Web3Connection { loop { // TODO: what should the max_wait be? match self - .wait_for_request_handle(&authorization, Duration::from_secs(30), false) + .wait_for_request_handle(&authorization, None, false) .await { Ok(active_request_handle) => { @@ -850,7 +806,7 @@ impl Web3Connection { Web3Provider::Ws(provider) => { // todo: move subscribe_blocks onto the request handle? let active_request_handle = self - .wait_for_request_handle(&authorization, Duration::from_secs(30), false) + .wait_for_request_handle(&authorization, None, false) .await; let mut stream = provider.subscribe_blocks().await?; drop(active_request_handle); @@ -860,7 +816,7 @@ impl Web3Connection { // all it does is print "new block" for the same block as current block // TODO: how does this get wrapped in an arc? does ethers handle that? let block: Result, _> = self - .wait_for_request_handle(&authorization, Duration::from_secs(30), false) + .wait_for_request_handle(&authorization, None, false) .await? .request( "eth_getBlockByNumber", @@ -922,7 +878,7 @@ impl Web3Connection { authorization: Arc, tx_id_sender: flume::Sender<(TxHash, Arc)>, ) -> anyhow::Result<()> { - if let ProviderState::Ready(provider) = self + if let ProviderState::Connected(provider) = self .provider_state .try_read() .context("subscribe_pending_transactions")? @@ -961,8 +917,8 @@ impl Web3Connection { Web3Provider::Ws(provider) => { // TODO: maybe the subscribe_pending_txs function should be on the active_request_handle let active_request_handle = self - .wait_for_request_handle(&authorization, Duration::from_secs(30), false) - .await; + .wait_for_request_handle(&authorization, None, false) + .await?; let mut stream = provider.subscribe_pending_txs().await?; @@ -995,13 +951,14 @@ impl Web3Connection { /// be careful with this; it might wait forever! /// `allow_not_ready` is only for use by health checks while starting the provider + /// TODO: don't use anyhow. use specific error type pub async fn wait_for_request_handle( self: &Arc, authorization: &Arc, - max_wait: Duration, + max_wait: Option, allow_not_ready: bool, ) -> anyhow::Result { - let max_wait = Instant::now() + max_wait; + let max_wait = max_wait.map(|x| Instant::now() + x); loop { match self @@ -1011,21 +968,39 @@ impl Web3Connection { Ok(OpenRequestResult::Handle(handle)) => return Ok(handle), Ok(OpenRequestResult::RetryAt(retry_at)) => { // TODO: emit a stat? - // // trace!(?retry_at); + let wait = retry_at.duration_since(Instant::now()); - if retry_at > max_wait { - // break now since we will wait past our maximum wait time - // TODO: don't use anyhow. use specific error type - return Err(anyhow::anyhow!("timeout waiting for request handle")); + trace!( + "waiting {} millis for request handle on {}", + wait.as_millis(), + self + ); + + if let Some(max_wait) = max_wait { + if retry_at > max_wait { + // break now since we will wait past our maximum wait time + // TODO: don't use anyhow. use specific error type + return Err(anyhow::anyhow!("timeout waiting for request handle")); + } } + sleep_until(retry_at).await; } - Ok(OpenRequestResult::NotReady) => { + Ok(OpenRequestResult::NotReady(_)) => { // TODO: when can this happen? log? emit a stat? - // TODO: subscribe to the head block on this + trace!("{} has no handle ready", self); + + if let Some(max_wait) = max_wait { + let now = Instant::now(); + + if now > max_wait { + return Err(anyhow::anyhow!("unable to retry for request handle")); + } + } + // TODO: sleep how long? maybe just error? - // TODO: don't use anyhow. use specific error type - return Err(anyhow::anyhow!("unable to retry for request handle")); + // TODO: instead of an arbitrary sleep, subscribe to the head block on this + sleep(Duration::from_millis(10)).await; } Err(err) => return Err(err), } @@ -1048,27 +1023,50 @@ impl Web3Connection { .await .is_none() { - return Ok(OpenRequestResult::NotReady); + trace!("{} is not ready", self); + return Ok(OpenRequestResult::NotReady(self.backup)); + } + + if let Some(hard_limit_until) = self.hard_limit_until.as_ref() { + let hard_limit_ready = hard_limit_until.borrow().clone(); + + let now = Instant::now(); + + if now < hard_limit_ready { + return Ok(OpenRequestResult::RetryAt(hard_limit_ready)); + } } // check rate limits if let Some(ratelimiter) = self.hard_limit.as_ref() { // TODO: how should we know if we should set expire or not? - match ratelimiter.throttle().await? { + match ratelimiter + .throttle() + .await + .context(format!("attempting to throttle {}", self))? + { RedisRateLimitResult::Allowed(_) => { - // // trace!("rate limit succeeded") + // trace!("rate limit succeeded") } RedisRateLimitResult::RetryAt(retry_at, _) => { - // rate limit failed - // save the smallest retry_after. if nothing succeeds, return an Err with retry_after in it - // TODO: use tracing better - // TODO: i'm seeing "Exhausted rate limit on moralis: 0ns". How is it getting 0? - warn!("Exhausted rate limit on {}. Retry at {:?}", self, retry_at); + // rate limit gave us a wait time + if !self.backup { + let when = retry_at.duration_since(Instant::now()); + warn!( + "Exhausted rate limit on {}. Retry in {}ms", + self, + when.as_millis() + ); + } + + if let Some(hard_limit_until) = self.hard_limit_until.as_ref() { + hard_limit_until.send_replace(retry_at.clone()); + } return Ok(OpenRequestResult::RetryAt(retry_at)); } RedisRateLimitResult::RetryNever => { - return Ok(OpenRequestResult::NotReady); + return Ok(OpenRequestResult::NotReady(self.backup)); } } }; @@ -1213,7 +1211,6 @@ mod tests { let x = Web3Connection { name: "name".to_string(), - allowed_lag: 10, db_conn: None, display_name: None, url: "ws://example.com".to_string(), @@ -1223,8 +1220,10 @@ mod tests { internal_requests: 0.into(), provider_state: AsyncRwLock::new(ProviderState::None), hard_limit: None, + hard_limit_until: None, soft_limit: 1_000, automatic_block_limit: false, + backup: false, block_data_limit: block_data_limit.into(), tier: 0, head_block: RwLock::new(Some(head_block.clone())), @@ -1261,7 +1260,6 @@ mod tests { // TODO: this is getting long. have a `impl Default` let x = Web3Connection { name: "name".to_string(), - allowed_lag: 10, db_conn: None, display_name: None, url: "ws://example.com".to_string(), @@ -1271,8 +1269,10 @@ mod tests { internal_requests: 0.into(), provider_state: AsyncRwLock::new(ProviderState::None), hard_limit: None, + hard_limit_until: None, soft_limit: 1_000, automatic_block_limit: false, + backup: false, block_data_limit: block_data_limit.into(), tier: 0, head_block: RwLock::new(Some(head_block.clone())), @@ -1288,6 +1288,8 @@ mod tests { assert!(!x.has_block_data(&(head_block.number() + 1000))); } + /* + // TODO: think about how to bring the concept of a "lagged" node back #[test] fn test_lagged_node_not_has_block_data() { let now: U256 = SystemTime::now() @@ -1313,7 +1315,6 @@ mod tests { let x = Web3Connection { name: "name".to_string(), - allowed_lag: 10, db_conn: None, display_name: None, url: "ws://example.com".to_string(), @@ -1325,6 +1326,7 @@ mod tests { hard_limit: None, soft_limit: 1_000, automatic_block_limit: false, + backup: false, block_data_limit: block_data_limit.into(), tier: 0, head_block: RwLock::new(Some(head_block.clone())), @@ -1337,4 +1339,5 @@ mod tests { assert!(!x.has_block_data(&(head_block.number() + 1))); assert!(!x.has_block_data(&(head_block.number() + 1000))); } + */ } diff --git a/web3_proxy/src/rpcs/connections.rs b/web3_proxy/src/rpcs/connections.rs index fbd75b3f..9ecf3fd9 100644 --- a/web3_proxy/src/rpcs/connections.rs +++ b/web3_proxy/src/rpcs/connections.rs @@ -2,22 +2,22 @@ use super::blockchain::{ArcBlock, BlockHashesCache}; use super::connection::Web3Connection; use super::request::{ - OpenRequestHandle, OpenRequestHandleMetrics, OpenRequestResult, RequestErrorHandler, + OpenRequestHandle, OpenRequestHandleMetrics, OpenRequestResult, RequestRevertHandler, }; -use super::synced_connections::SyncedConnections; +use super::synced_connections::ConsensusConnections; use crate::app::{flatten_handle, AnyhowJoinHandle}; use crate::config::{BlockAndRpc, TxHashAndRpc, Web3ConnectionConfig}; use crate::frontend::authorization::{Authorization, RequestMetadata}; +use crate::frontend::rpc_proxy_ws::ProxyMode; use crate::jsonrpc::{JsonRpcForwardedResponse, JsonRpcRequest}; use crate::rpcs::transactions::TxStatus; -use arc_swap::ArcSwap; use counter::Counter; use derive_more::From; use ethers::prelude::{ProviderError, TxHash, H256, U64}; use futures::future::{join_all, try_join_all}; use futures::stream::FuturesUnordered; use futures::StreamExt; -use hashbrown::HashMap; +use hashbrown::{HashMap, HashSet}; use log::{debug, error, info, trace, warn, Level}; use migration::sea_orm::DatabaseConnection; use moka::future::{Cache, ConcurrentCacheExt}; @@ -26,9 +26,9 @@ use serde::Serialize; use serde_json::json; use serde_json::value::RawValue; use std::collections::BTreeMap; -use std::fmt; use std::sync::atomic::Ordering; use std::sync::Arc; +use std::{cmp, fmt}; use thread_fast_rng::rand::seq::SliceRandom; use tokio::sync::{broadcast, watch}; use tokio::task; @@ -37,9 +37,12 @@ use tokio::time::{interval, sleep, sleep_until, Duration, Instant, MissedTickBeh /// A collection of web3 connections. Sends requests either the current best server or all servers. #[derive(From)] pub struct Web3Connections { - pub(crate) conns: HashMap>, /// any requests will be forwarded to one (or more) of these connections - pub(super) synced_connections: ArcSwap, + pub(crate) conns: HashMap>, + /// all providers with the same consensus head block. won't update if there is no `self.watch_consensus_head_sender` + pub(super) watch_consensus_connections_sender: watch::Sender>, + /// this head receiver makes it easy to wait until there is a new block + pub(super) watch_consensus_head_receiver: Option>, pub(super) pending_transactions: Cache, /// TODO: this map is going to grow forever unless we do some sort of pruning. maybe store pruned in redis? @@ -61,7 +64,7 @@ impl Web3Connections { http_client: Option, redis_pool: Option, block_map: BlockHashesCache, - head_block_sender: Option>, + watch_consensus_head_sender: Option>, min_sum_soft_limit: u32, min_head_rpcs: usize, pending_tx_sender: Option>, @@ -88,9 +91,6 @@ impl Web3Connections { } }; - // TODO: this might be too aggressive. think about this more - let allowed_lag = ((expected_block_time_ms * 3) as f64 / 1000.0).round() as u64; - let http_interval_sender = if http_client.is_some() { let (sender, receiver) = broadcast::channel(1); @@ -128,6 +128,7 @@ impl Web3Connections { // turn configs into connections (in parallel) // TODO: move this into a helper function. then we can use it when configs change (will need a remove function too) + // TODO: futures unordered? let spawn_handles: Vec<_> = server_configs .into_iter() .filter_map(|(server_name, server_config)| { @@ -140,7 +141,7 @@ impl Web3Connections { let redis_pool = redis_pool.clone(); let http_interval_sender = http_interval_sender.clone(); - let block_sender = if head_block_sender.is_some() { + let block_sender = if watch_consensus_head_sender.is_some() { Some(block_sender.clone()) } else { None @@ -154,7 +155,6 @@ impl Web3Connections { server_config .spawn( server_name, - allowed_lag, db_conn, redis_pool, chain_id, @@ -176,7 +176,7 @@ impl Web3Connections { let mut connections = HashMap::new(); let mut handles = vec![]; - // TODO: do we need to join this? + // TODO: futures unordered? for x in join_all(spawn_handles).await { // TODO: how should we handle errors here? one rpc being down shouldn't cause the program to exit match x { @@ -195,8 +195,6 @@ impl Web3Connections { } } - let synced_connections = SyncedConnections::default(); - // TODO: max_capacity and time_to_idle from config // all block hashes are the same size, so no need for weigher let block_hashes = Cache::builder() @@ -209,9 +207,15 @@ impl Web3Connections { .max_capacity(10_000) .build_with_hasher(hashbrown::hash_map::DefaultHashBuilder::default()); + let (watch_consensus_connections_sender, _) = watch::channel(Default::default()); + + let watch_consensus_head_receiver = + watch_consensus_head_sender.as_ref().map(|x| x.subscribe()); + let connections = Arc::new(Self { conns: connections, - synced_connections: ArcSwap::new(Arc::new(synced_connections)), + watch_consensus_connections_sender, + watch_consensus_head_receiver, pending_transactions, block_hashes, block_numbers, @@ -231,7 +235,7 @@ impl Web3Connections { authorization, pending_tx_id_receiver, block_receiver, - head_block_sender, + watch_consensus_head_sender, pending_tx_sender, ) .await @@ -328,6 +332,7 @@ impl Web3Connections { } /// Send the same request to all the handles. Returning the most common success or most common error. + /// TODO: option to return the fastest response and handles for all the others instead? pub async fn try_send_parallel_requests( &self, active_request_handles: Vec, @@ -406,10 +411,43 @@ impl Web3Connections { unimplemented!("this shouldn't be possible") } - /// get the best available rpc server - pub async fn best_synced_backend_connection( + pub async fn best_consensus_head_connection( &self, - allowed_lag: u64, + authorization: &Arc, + request_metadata: Option<&Arc>, + skip: &[Arc], + min_block_needed: Option<&U64>, + ) -> anyhow::Result { + if let Ok(without_backups) = self + ._best_consensus_head_connection( + false, + authorization, + request_metadata, + skip, + min_block_needed, + ) + .await + { + // TODO: this might use backups too eagerly. but even when we allow backups, we still prioritize our own + if matches!(without_backups, OpenRequestResult::Handle(_)) { + return Ok(without_backups); + } + } + + self._best_consensus_head_connection( + true, + authorization, + request_metadata, + skip, + min_block_needed, + ) + .await + } + + /// get the best available rpc server with the consensus head block. it might have blocks after the consensus head + async fn _best_consensus_head_connection( + &self, + allow_backups: bool, authorization: &Arc, request_metadata: Option<&Arc>, skip: &[Arc], @@ -418,54 +456,59 @@ impl Web3Connections { let usable_rpcs_by_head_num_and_weight: BTreeMap< (Option, u64), Vec>, - > = if let Some(min_block_needed) = min_block_needed { - // need a potentially old block. check all the rpcs - let mut m = BTreeMap::new(); + > = { + let synced_connections = self.watch_consensus_connections_sender.borrow().clone(); - for x in self - .conns - .values() - .filter(|x| !skip.contains(x)) - .filter(|x| x.has_block_data(min_block_needed)) - .cloned() - { - let x_head_block = x.head_block.read().clone(); - - match x_head_block { - None => continue, - Some(x_head) => { - let key = (Some(x_head.number()), u64::MAX - x.tier); - - m.entry(key).or_insert_with(Vec::new).push(x); - } - } - } - - m - } else { - // need latest. filter the synced rpcs - let synced_connections = self.synced_connections.load(); - - let head_block = match synced_connections.head_block.as_ref() { - None => return Ok(OpenRequestResult::NotReady), - Some(x) => x, + let head_block_num = if let Some(head_block) = synced_connections.head_block.as_ref() { + head_block.number() + } else { + // TODO: optionally wait for a head block >= min_block_needed + return Ok(OpenRequestResult::NotReady(allow_backups)); }; - // TODO: self.allowed_lag instead of taking as an arg - if head_block.syncing(allowed_lag) { - return Ok(OpenRequestResult::NotReady); - } + let min_block_needed = min_block_needed.unwrap_or(&head_block_num); let mut m = BTreeMap::new(); - for x in synced_connections - .conns - .iter() - .filter(|x| !skip.contains(x)) - { - let key = (None, u64::MAX - x.tier); + match min_block_needed.cmp(&head_block_num) { + cmp::Ordering::Less => { + // need an old block. check all the rpcs. prefer the most synced + for x in self + .conns + .values() + .filter(|x| if allow_backups { true } else { !x.backup }) + .filter(|x| !skip.contains(x)) + .filter(|x| x.has_block_data(min_block_needed)) + .cloned() + { + let x_head_block = x.head_block.read().clone(); - m.entry(key).or_insert_with(Vec::new).push(x.clone()); + match x_head_block { + None => continue, + Some(x_head) => { + let key = (Some(x_head.number()), u64::MAX - x.tier); + + m.entry(key).or_insert_with(Vec::new).push(x); + } + } + } + } + cmp::Ordering::Equal => { + // need the consensus head block. filter the synced rpcs + for x in synced_connections + .conns + .iter() + .filter(|x| !skip.contains(x)) + { + let key = (None, u64::MAX - x.tier); + + m.entry(key).or_insert_with(Vec::new).push(x.clone()); + } + } + cmp::Ordering::Greater => { + // TODO? if the blocks is close and wait_for_sync and allow_backups, wait for change on a watch_consensus_connections_receiver().subscribe() + return Ok(OpenRequestResult::NotReady(allow_backups)); + } } m @@ -490,7 +533,7 @@ impl Web3Connections { let available_requests = soft_limit - active_requests; - trace!("available requests on {}: {}", rpc, available_requests); + // trace!("available requests on {}: {}", rpc, available_requests); minimum = minimum.min(available_requests); maximum = maximum.max(available_requests); @@ -499,8 +542,8 @@ impl Web3Connections { }) .collect(); - trace!("minimum available requests: {}", minimum); - trace!("maximum available requests: {}", minimum); + // trace!("minimum available requests: {}", minimum); + // trace!("maximum available requests: {}", maximum); if maximum < 0.0 { // TODO: if maximum < 0 and there are other tiers on the same block, we should include them now @@ -549,13 +592,13 @@ impl Web3Connections { .await { Ok(OpenRequestResult::Handle(handle)) => { - trace!("opened handle: {}", best_rpc); + // trace!("opened handle: {}", best_rpc); return Ok(OpenRequestResult::Handle(handle)); } Ok(OpenRequestResult::RetryAt(retry_at)) => { earliest_retry_at = earliest_retry_at.min(Some(retry_at)); } - Ok(OpenRequestResult::NotReady) => { + Ok(OpenRequestResult::NotReady(_)) => { // TODO: log a warning? emit a stat? } Err(err) => { @@ -573,7 +616,7 @@ impl Web3Connections { None => { // none of the servers gave us a time to retry at - // TODO: bring this back? + // TODO: bring this back? need to think about how to do this with `allow_backups` // we could return an error here, but maybe waiting a second will fix the problem // TODO: configurable max wait? the whole max request time, or just some portion? // let handle = sorted_rpcs @@ -585,7 +628,7 @@ impl Web3Connections { // TODO: should we log here? - Ok(OpenRequestResult::NotReady) + Ok(OpenRequestResult::NotReady(allow_backups)) } Some(earliest_retry_at) => { warn!("no servers on {:?}! {:?}", self, earliest_retry_at); @@ -596,35 +639,74 @@ impl Web3Connections { } /// get all rpc servers that are not rate limited - /// returns servers even if they aren't fully in sync. This is useful for broadcasting signed transactions + /// this prefers synced servers, but it will return servers even if they aren't fully in sync. + /// This is useful for broadcasting signed transactions. // TODO: better type on this that can return an anyhow::Result - pub async fn all_synced_connections( + pub async fn all_connections( &self, authorization: &Arc, block_needed: Option<&U64>, max_count: Option, + ) -> Result, Option> { + if let Ok(without_backups) = self + ._all_connections(false, authorization, block_needed, max_count) + .await + { + return Ok(without_backups); + } + + self._all_connections(true, authorization, block_needed, max_count) + .await + } + + async fn _all_connections( + &self, + allow_backups: bool, + authorization: &Arc, + block_needed: Option<&U64>, + max_count: Option, ) -> Result, Option> { let mut earliest_retry_at = None; // TODO: with capacity? let mut selected_rpcs = vec![]; - let mut max_count = if max_count.is_none() { - self.conns.len() + let mut max_count = if let Some(max_count) = max_count { + max_count } else { - self.conns.len().min(max_count.unwrap()) + self.conns.len() }; - for connection in self.conns.values() { + let mut tried = HashSet::new(); + + let conns_to_try = itertools::chain( + // TODO: sort by tier + self.watch_consensus_connections_sender + .borrow() + .conns + .clone(), + // TODO: sort by tier + self.conns.values().cloned(), + ); + + for connection in conns_to_try { if max_count == 0 { break; } + if tried.contains(&connection.name) { + continue; + } + + tried.insert(connection.name.clone()); + + if !allow_backups && connection.backup { + continue; + } + if let Some(block_needed) = block_needed { if !connection.has_block_data(block_needed) { continue; } - } else if connection.syncing(30) { - continue; } // check rate limits and increment our connection counter @@ -640,7 +722,7 @@ impl Web3Connections { max_count -= 1; selected_rpcs.push(handle) } - Ok(OpenRequestResult::NotReady) => { + Ok(OpenRequestResult::NotReady(_)) => { warn!("no request handle for {}", connection) } Err(err) => { @@ -661,26 +743,34 @@ impl Web3Connections { } /// be sure there is a timeout on this or it might loop forever - /// TODO: do not take allowed_lag here. have it be on the connections struct instead - pub async fn try_send_best_upstream_server( + /// TODO: think more about wait_for_sync + pub async fn try_send_best_consensus_head_connection( &self, - allowed_lag: u64, authorization: &Arc, request: JsonRpcRequest, request_metadata: Option<&Arc>, min_block_needed: Option<&U64>, ) -> anyhow::Result { let mut skip_rpcs = vec![]; + let mut method_not_available_response = None; + + let mut watch_consensus_connections = self.watch_consensus_connections_sender.subscribe(); // TODO: maximum retries? right now its the total number of servers loop { - if skip_rpcs.len() == self.conns.len() { - // no servers to try - break; + let num_skipped = skip_rpcs.len(); + + if num_skipped > 0 { + // trace!("skip_rpcs: {:?}", skip_rpcs); + + // TODO: is self.conns still right now that we split main and backup servers? + if num_skipped == self.conns.len() { + break; + } } + match self - .best_synced_backend_connection( - allowed_lag, + .best_consensus_head_connection( authorization, request_metadata, &skip_rpcs, @@ -690,14 +780,17 @@ impl Web3Connections { { OpenRequestResult::Handle(active_request_handle) => { // save the rpc in case we get an error and want to retry on another server + // TODO: look at backend_requests instead skip_rpcs.push(active_request_handle.clone_connection()); if let Some(request_metadata) = request_metadata { - // TODO: request_metadata.backend_requests instead of skip_rpcs + let rpc = active_request_handle.clone_connection(); + request_metadata - .backend_requests - .lock() - .push(active_request_handle.clone_connection()); + .response_from_backup_rpc + .store(rpc.backup, Ordering::Release); + + request_metadata.backend_requests.lock().push(rpc); } // TODO: get the log percent from the user data @@ -705,7 +798,7 @@ impl Web3Connections { .request( &request.method, &json!(request.params), - RequestErrorHandler::SaveReverts, + RequestRevertHandler::Save, ) .await; @@ -714,7 +807,7 @@ impl Web3Connections { request.id.clone(), ) { Ok(response) => { - if let Some(error) = &response.error { + if let Some(error) = &response.error.as_ref() { // trace!(?response, "rpc error"); if let Some(request_metadata) = request_metadata { @@ -724,10 +817,20 @@ impl Web3Connections { } // some errors should be retried on other nodes + let error_msg = error.message.as_str(); + + // different providers do different codes. check all of them + // TODO: there's probably more strings to add here + let rate_limit_substrings = ["limit", "exceeded"]; + for rate_limit_substr in rate_limit_substrings { + if error_msg.contains(rate_limit_substr) { + warn!("rate limited by {}", skip_rpcs.last().unwrap()); + continue; + } + } + match error.code { -32000 => { - let error_msg = error.message.as_str(); - // TODO: regex? let retry_prefixes = [ "header not found", @@ -745,9 +848,22 @@ impl Web3Connections { -32601 => { let error_msg = error.message.as_str(); + // sometimes a provider does not support all rpc methods + // we check other connections rather than returning the error + // but sometimes the method is something that is actually unsupported, + // so we save the response here to return it later + + // some providers look like this if error_msg.starts_with("the method") && error_msg.ends_with("is not available") { + method_not_available_response = Some(response); + continue; + } + + // others look like this + if error_msg == "Method not found" { + method_not_available_response = Some(response); continue; } } @@ -771,9 +887,6 @@ impl Web3Connections { rpc, err ); - // TODO: sleep how long? until synced_connections changes or rate limits are available - // sleep(Duration::from_millis(100)).await; - continue; } } @@ -782,27 +895,84 @@ impl Web3Connections { // TODO: move this to a helper function // sleep (TODO: with a lock?) until our rate limits should be available // TODO: if a server catches up sync while we are waiting, we could stop waiting - warn!("All rate limits exceeded. Sleeping until {:?}", retry_at); + warn!( + "All rate limits exceeded. waiting for change in synced servers or {:?}", + retry_at + ); // TODO: have a separate column for rate limited? if let Some(request_metadata) = request_metadata { request_metadata.no_servers.fetch_add(1, Ordering::Release); } - sleep_until(retry_at).await; - - continue; + tokio::select! { + _ = sleep_until(retry_at) => { + skip_rpcs.pop(); + } + _ = watch_consensus_connections.changed() => { + watch_consensus_connections.borrow_and_update(); + } + } } - OpenRequestResult::NotReady => { + OpenRequestResult::NotReady(backups_included) => { if let Some(request_metadata) = request_metadata { request_metadata.no_servers.fetch_add(1, Ordering::Release); } - break; + // todo!( + // "check if we are requesting an old block and no archive servers are synced" + // ); + + if let Some(min_block_needed) = min_block_needed { + let mut theres_a_chance = false; + + for potential_conn in self.conns.values() { + if skip_rpcs.contains(potential_conn) { + continue; + } + + // TODO: should we instead check if has_block_data but with the current head block? + if potential_conn.has_block_data(min_block_needed) { + trace!("chance for {} on {}", min_block_needed, potential_conn); + theres_a_chance = true; + break; + } + + skip_rpcs.push(potential_conn.clone()); + } + + if !theres_a_chance { + debug!("no chance of finding data in block #{}", min_block_needed); + break; + } + } + + if backups_included { + // if NotReady and we tried backups, there's no chance + warn!("No servers ready even after checking backups"); + break; + } + + debug!("No servers ready. Waiting up to 1 second for change in synced servers"); + + // TODO: exponential backoff? + tokio::select! { + _ = sleep(Duration::from_secs(1)) => { + // do NOT pop the last rpc off skip here + } + _ = watch_consensus_connections.changed() => { + watch_consensus_connections.borrow_and_update(); + } + } } } } + if let Some(r) = method_not_available_response { + // TODO: emit a stat for unsupported methods? + return Ok(r); + } + // TODO: do we need this here, or do we do it somewhere else? if let Some(request_metadata) = request_metadata { request_metadata @@ -811,14 +981,35 @@ impl Web3Connections { } let num_conns = self.conns.len(); + let num_skipped = skip_rpcs.len(); - error!("No servers synced ({} known)", num_conns); + if num_skipped == 0 { + error!("No servers synced ({} known)", num_conns); - Err(anyhow::anyhow!("No servers synced ({} known)", num_conns)) + return Ok(JsonRpcForwardedResponse::from_str( + "No servers synced", + Some(-32000), + Some(request.id), + )); + } else { + // TODO: warn? debug? trace? + warn!( + "Requested data was not available on {}/{} servers", + num_skipped, num_conns + ); + + // TODO: what error code? + // cloudflare gives {"jsonrpc":"2.0","error":{"code":-32043,"message":"Requested data cannot be older than 128 blocks."},"id":1} + return Ok(JsonRpcForwardedResponse::from_str( + "Requested data is not available", + Some(-32043), + Some(request.id), + )); + } } /// be sure there is a timeout on this or it might loop forever - pub async fn try_send_all_upstream_servers( + pub async fn try_send_all_synced_connections( &self, authorization: &Arc, request: &JsonRpcRequest, @@ -829,7 +1020,7 @@ impl Web3Connections { ) -> anyhow::Result { loop { match self - .all_synced_connections(authorization, block_needed, max_count) + .all_connections(authorization, block_needed, max_count) .await { Ok(active_request_handles) => { @@ -838,10 +1029,24 @@ impl Web3Connections { // TODO: this is not working right. simplify if let Some(request_metadata) = request_metadata { + let mut backup_used = false; + + request_metadata.backend_requests.lock().extend( + active_request_handles.iter().map(|x| { + let rpc = x.clone_connection(); + + if rpc.backup { + // TODO: its possible we serve from a synced connection though. think about this more + backup_used = true; + } + + x.clone_connection() + }), + ); + request_metadata - .backend_requests - .lock() - .extend(active_request_handles.iter().map(|x| x.clone_connection())); + .response_from_backup_rpc + .store(true, Ordering::Release); } return self @@ -865,7 +1070,7 @@ impl Web3Connections { // TODO: return a 502? if it does? // return Err(anyhow::anyhow!("no available rpcs!")); // TODO: sleep how long? - // TODO: subscribe to something in SyncedConnections instead + // TODO: subscribe to something in ConsensusConnections instead sleep(Duration::from_millis(200)).await; continue; @@ -887,6 +1092,29 @@ impl Web3Connections { } } } + + pub async fn try_proxy_connection( + &self, + proxy_mode: ProxyMode, + authorization: &Arc, + request: JsonRpcRequest, + request_metadata: Option<&Arc>, + min_block_needed: Option<&U64>, + ) -> anyhow::Result { + match proxy_mode { + ProxyMode::Best => { + self.try_send_best_consensus_head_connection( + authorization, + request, + request_metadata, + min_block_needed, + ) + .await + } + ProxyMode::Fastest(x) => todo!("Fastest"), + ProxyMode::Versus => todo!("Versus"), + } + } } impl fmt::Debug for Web3Connections { @@ -908,8 +1136,11 @@ impl Serialize for Web3Connections { let conns: Vec<&Web3Connection> = self.conns.values().map(|x| x.as_ref()).collect(); state.serialize_field("conns", &conns)?; - let synced_connections = &**self.synced_connections.load(); - state.serialize_field("synced_connections", synced_connections)?; + { + let consensus_connections = self.watch_consensus_connections_sender.borrow().clone(); + // TODO: rename synced_connections to consensus_connections? + state.serialize_field("synced_connections", &consensus_connections)?; + } self.block_hashes.sync(); self.block_numbers.sync(); @@ -925,7 +1156,11 @@ mod tests { // TODO: why is this allow needed? does tokio::test get in the way somehow? #![allow(unused_imports)] use super::*; - use crate::rpcs::{blockchain::SavedBlock, connection::ProviderState, provider::Web3Provider}; + use crate::rpcs::{ + blockchain::{ConsensusFinder, SavedBlock}, + connection::ProviderState, + provider::Web3Provider, + }; use ethers::types::{Block, U256}; use log::{trace, LevelFilter}; use parking_lot::RwLock; @@ -966,15 +1201,13 @@ mod tests { let head_block = Arc::new(head_block); // TODO: write a impl From for Block -> BlockId? - let lagged_block: SavedBlock = lagged_block.into(); - let head_block: SavedBlock = head_block.into(); + let mut lagged_block: SavedBlock = lagged_block.into(); + let mut head_block: SavedBlock = head_block.into(); let block_data_limit = u64::MAX; let head_rpc = Web3Connection { name: "synced".to_string(), - // TODO: what should this be? - allowed_lag: 10, db_conn: None, display_name: None, url: "ws://example.com/synced".to_string(), @@ -982,10 +1215,14 @@ mod tests { active_requests: 0.into(), frontend_requests: 0.into(), internal_requests: 0.into(), - provider_state: AsyncRwLock::new(ProviderState::Ready(Arc::new(Web3Provider::Mock))), + provider_state: AsyncRwLock::new(ProviderState::Connected(Arc::new( + Web3Provider::Mock, + ))), hard_limit: None, + hard_limit_until: None, soft_limit: 1_000, automatic_block_limit: true, + backup: false, block_data_limit: block_data_limit.into(), tier: 0, head_block: RwLock::new(Some(head_block.clone())), @@ -994,7 +1231,6 @@ mod tests { let lagged_rpc = Web3Connection { name: "lagged".to_string(), - allowed_lag: 10, db_conn: None, display_name: None, url: "ws://example.com/lagged".to_string(), @@ -1002,10 +1238,14 @@ mod tests { active_requests: 0.into(), frontend_requests: 0.into(), internal_requests: 0.into(), - provider_state: AsyncRwLock::new(ProviderState::Ready(Arc::new(Web3Provider::Mock))), + provider_state: AsyncRwLock::new(ProviderState::Connected(Arc::new( + Web3Provider::Mock, + ))), hard_limit: None, + hard_limit_until: None, soft_limit: 1_000, automatic_block_limit: false, + backup: false, block_data_limit: block_data_limit.into(), tier: 0, head_block: RwLock::new(Some(lagged_block.clone())), @@ -1026,9 +1266,13 @@ mod tests { (lagged_rpc.name.clone(), lagged_rpc.clone()), ]); + let (watch_consensus_connections_sender, _) = watch::channel(Default::default()); + + // TODO: make a Web3Connections::new let conns = Web3Connections { conns, - synced_connections: Default::default(), + watch_consensus_head_receiver: None, + watch_consensus_connections_sender, pending_transactions: Cache::builder() .max_capacity(10_000) .build_with_hasher(hashbrown::hash_map::DefaultHashBuilder::default()), @@ -1046,7 +1290,7 @@ mod tests { let (head_block_sender, _head_block_receiver) = watch::channel::(Default::default()); - let mut connection_heads = HashMap::new(); + let mut connection_heads = ConsensusFinder::default(); // process None so that conns @@ -1078,7 +1322,7 @@ mod tests { // all_backend_connections gives everything regardless of sync status assert_eq!( conns - .all_synced_connections(&authorization, None, None) + .all_connections(&authorization, None, None) .await .unwrap() .len(), @@ -1086,18 +1330,17 @@ mod tests { ); // best_synced_backend_connection requires servers to be synced with the head block - // TODO: don't hard code allowed_lag let x = conns - .best_synced_backend_connection(60, &authorization, None, &[], None) + .best_consensus_head_connection(&authorization, None, &[], None) .await .unwrap(); dbg!(&x); - assert!(matches!(x, OpenRequestResult::NotReady)); + assert!(matches!(x, OpenRequestResult::NotReady(true))); // add lagged blocks to the conns. both servers should be allowed - conns.save_block(&lagged_block.block, true).await.unwrap(); + lagged_block.block = conns.save_block(lagged_block.block, true).await.unwrap(); conns .process_block_from_rpc( @@ -1125,7 +1368,7 @@ mod tests { assert_eq!(conns.num_synced_rpcs(), 2); // add head block to the conns. lagged_rpc should not be available - conns.save_block(&head_block.block, true).await.unwrap(); + head_block.block = conns.save_block(head_block.block, true).await.unwrap(); conns .process_block_from_rpc( @@ -1143,21 +1386,21 @@ mod tests { assert!(matches!( conns - .best_synced_backend_connection(60, &authorization, None, &[], None) + .best_consensus_head_connection(&authorization, None, &[], None) .await, Ok(OpenRequestResult::Handle(_)) )); assert!(matches!( conns - .best_synced_backend_connection(60, &authorization, None, &[], Some(&0.into())) + .best_consensus_head_connection(&authorization, None, &[], Some(&0.into())) .await, Ok(OpenRequestResult::Handle(_)) )); assert!(matches!( conns - .best_synced_backend_connection(60, &authorization, None, &[], Some(&1.into())) + .best_consensus_head_connection(&authorization, None, &[], Some(&1.into())) .await, Ok(OpenRequestResult::Handle(_)) )); @@ -1165,9 +1408,9 @@ mod tests { // future block should not get a handle assert!(matches!( conns - .best_synced_backend_connection(60, &authorization, None, &[], Some(&2.into())) + .best_consensus_head_connection(&authorization, None, &[], Some(&2.into())) .await, - Ok(OpenRequestResult::NotReady) + Ok(OpenRequestResult::NotReady(true)) )); } @@ -1198,7 +1441,6 @@ mod tests { let pruned_rpc = Web3Connection { name: "pruned".to_string(), - allowed_lag: 10, db_conn: None, display_name: None, url: "ws://example.com/pruned".to_string(), @@ -1206,10 +1448,14 @@ mod tests { active_requests: 0.into(), frontend_requests: 0.into(), internal_requests: 0.into(), - provider_state: AsyncRwLock::new(ProviderState::Ready(Arc::new(Web3Provider::Mock))), + provider_state: AsyncRwLock::new(ProviderState::Connected(Arc::new( + Web3Provider::Mock, + ))), hard_limit: None, + hard_limit_until: None, soft_limit: 3_000, automatic_block_limit: false, + backup: false, block_data_limit: 64.into(), tier: 1, head_block: RwLock::new(Some(head_block.clone())), @@ -1218,7 +1464,6 @@ mod tests { let archive_rpc = Web3Connection { name: "archive".to_string(), - allowed_lag: 10, db_conn: None, display_name: None, url: "ws://example.com/archive".to_string(), @@ -1226,10 +1471,14 @@ mod tests { active_requests: 0.into(), frontend_requests: 0.into(), internal_requests: 0.into(), - provider_state: AsyncRwLock::new(ProviderState::Ready(Arc::new(Web3Provider::Mock))), + provider_state: AsyncRwLock::new(ProviderState::Connected(Arc::new( + Web3Provider::Mock, + ))), hard_limit: None, + hard_limit_until: None, soft_limit: 1_000, automatic_block_limit: false, + backup: false, block_data_limit: u64::MAX.into(), tier: 2, head_block: RwLock::new(Some(head_block.clone())), @@ -1249,9 +1498,13 @@ mod tests { (archive_rpc.name.clone(), archive_rpc.clone()), ]); + let (watch_consensus_connections_sender, _) = watch::channel(Default::default()); + + // TODO: make a Web3Connections::new let conns = Web3Connections { conns, - synced_connections: Default::default(), + watch_consensus_head_receiver: None, + watch_consensus_connections_sender, pending_transactions: Cache::builder() .max_capacity(10) .build_with_hasher(hashbrown::hash_map::DefaultHashBuilder::default()), @@ -1269,7 +1522,7 @@ mod tests { let (head_block_sender, _head_block_receiver) = watch::channel::(Default::default()); - let mut connection_heads = HashMap::new(); + let mut connection_heads = ConsensusFinder::default(); conns .process_block_from_rpc( @@ -1298,13 +1551,7 @@ mod tests { // best_synced_backend_connection requires servers to be synced with the head block let best_head_server = conns - .best_synced_backend_connection( - 60, - &authorization, - None, - &[], - Some(&head_block.number()), - ) + .best_consensus_head_connection(&authorization, None, &[], Some(&head_block.number())) .await; assert!(matches!( @@ -1313,7 +1560,7 @@ mod tests { )); let best_archive_server = conns - .best_synced_backend_connection(60, &authorization, None, &[], Some(&1.into())) + .best_consensus_head_connection(&authorization, None, &[], Some(&1.into())) .await; match best_archive_server { diff --git a/web3_proxy/src/rpcs/request.rs b/web3_proxy/src/rpcs/request.rs index 7358982c..d7f2aaf9 100644 --- a/web3_proxy/src/rpcs/request.rs +++ b/web3_proxy/src/rpcs/request.rs @@ -27,7 +27,8 @@ pub enum OpenRequestResult { /// Unable to start a request. Retry at the given time. RetryAt(Instant), /// Unable to start a request because the server is not synced - NotReady, + /// contains "true" if backup servers were attempted + NotReady(bool), } /// Make RPC requests through this handle and drop it when you are done. @@ -42,7 +43,7 @@ pub struct OpenRequestHandle { } /// Depending on the context, RPC errors can require different handling. -pub enum RequestErrorHandler { +pub enum RequestRevertHandler { /// Log at the trace level. Use when errors are expected. TraceLevel, /// Log at the debug level. Use when errors are expected. @@ -52,7 +53,7 @@ pub enum RequestErrorHandler { /// Log at the warn level. Use when errors do not cause problems. WarnLevel, /// Potentially save the revert. Users can tune how often this happens - SaveReverts, + Save, } // TODO: second param could be skipped since we don't need it here @@ -65,13 +66,13 @@ struct EthCallFirstParams { data: Option, } -impl From for RequestErrorHandler { +impl From for RequestRevertHandler { fn from(level: Level) -> Self { match level { - Level::Trace => RequestErrorHandler::TraceLevel, - Level::Debug => RequestErrorHandler::DebugLevel, - Level::Error => RequestErrorHandler::ErrorLevel, - Level::Warn => RequestErrorHandler::WarnLevel, + Level::Trace => RequestRevertHandler::TraceLevel, + Level::Debug => RequestRevertHandler::DebugLevel, + Level::Error => RequestRevertHandler::ErrorLevel, + Level::Warn => RequestRevertHandler::WarnLevel, _ => unimplemented!("unexpected tracing Level"), } } @@ -84,7 +85,7 @@ impl Authorization { method: Method, params: EthCallFirstParams, ) -> anyhow::Result<()> { - let rpc_key_id = match self.checks.rpc_key_id { + let rpc_key_id = match self.checks.rpc_secret_key_id { Some(rpc_key_id) => rpc_key_id.into(), None => { // // trace!(?self, "cannot save revert without rpc_key_id"); @@ -213,7 +214,7 @@ impl OpenRequestHandle { &self, method: &str, params: &P, - error_handler: RequestErrorHandler, + revert_handler: RequestRevertHandler, ) -> Result where // TODO: not sure about this type. would be better to not need clones, but measure and spawns combine to need it @@ -240,52 +241,58 @@ impl OpenRequestHandle { Web3Provider::Ws(provider) => provider.request(method, params).await, }; - // TODO: i think ethers already has trace logging (and does it much more fancy) - trace!( - "response from {} for {} {:?}: {:?}", - self.conn, - method, - params, - response, - ); + // // TODO: i think ethers already has trace logging (and does it much more fancy) + // trace!( + // "response from {} for {} {:?}: {:?}", + // self.conn, + // method, + // params, + // response, + // ); if let Err(err) = &response { // only save reverts for some types of calls // TODO: do something special for eth_sendRawTransaction too - let error_handler = if let RequestErrorHandler::SaveReverts = error_handler { + let revert_handler = if let RequestRevertHandler::Save = revert_handler { // TODO: should all these be Trace or Debug or a mix? if !["eth_call", "eth_estimateGas"].contains(&method) { // trace!(%method, "skipping save on revert"); - RequestErrorHandler::TraceLevel + RequestRevertHandler::TraceLevel } else if self.authorization.db_conn.is_some() { let log_revert_chance = self.authorization.checks.log_revert_chance; if log_revert_chance == 0.0 { // trace!(%method, "no chance. skipping save on revert"); - RequestErrorHandler::TraceLevel + RequestRevertHandler::TraceLevel } else if log_revert_chance == 1.0 { // trace!(%method, "gaurenteed chance. SAVING on revert"); - error_handler + revert_handler } else if thread_fast_rng::thread_fast_rng().gen_range(0.0f64..=1.0) < log_revert_chance { // trace!(%method, "missed chance. skipping save on revert"); - RequestErrorHandler::TraceLevel + RequestRevertHandler::TraceLevel } else { // trace!("Saving on revert"); // TODO: is always logging at debug level fine? - error_handler + revert_handler } } else { // trace!(%method, "no database. skipping save on revert"); - RequestErrorHandler::TraceLevel + RequestRevertHandler::TraceLevel } } else { - error_handler + revert_handler }; + enum ResponseTypes { + Revert, + RateLimit, + Ok, + } + // check for "execution reverted" here - let is_revert = if let ProviderError::JsonRpcClientError(err) = err { + let response_type = if let ProviderError::JsonRpcClientError(err) = err { // Http and Ws errors are very similar, but different types let msg = match &*self.provider { Web3Provider::Mock => unimplemented!(), @@ -310,30 +317,44 @@ impl OpenRequestHandle { }; if let Some(msg) = msg { - msg.starts_with("execution reverted") + if msg.starts_with("execution reverted") { + trace!("revert from {}", self.conn); + ResponseTypes::Revert + } else if msg.contains("limit") || msg.contains("request") { + trace!("rate limit from {}", self.conn); + ResponseTypes::RateLimit + } else { + ResponseTypes::Ok + } } else { - false + ResponseTypes::Ok } } else { - false + ResponseTypes::Ok }; - if is_revert { - trace!("revert from {}", self.conn); + if matches!(response_type, ResponseTypes::RateLimit) { + if let Some(hard_limit_until) = self.conn.hard_limit_until.as_ref() { + let retry_at = Instant::now() + Duration::from_secs(1); + + trace!("retry {} at: {:?}", self.conn, retry_at); + + hard_limit_until.send_replace(retry_at); + } } // TODO: think more about the method and param logs. those can be sensitive information - match error_handler { - RequestErrorHandler::DebugLevel => { + match revert_handler { + RequestRevertHandler::DebugLevel => { // TODO: think about this revert check more. sometimes we might want reverts logged so this needs a flag - if !is_revert { + if matches!(response_type, ResponseTypes::Revert) { debug!( "bad response from {}! method={} params={:?} err={:?}", self.conn, method, params, err ); } } - RequestErrorHandler::TraceLevel => { + RequestRevertHandler::TraceLevel => { trace!( "bad response from {}! method={} params={:?} err={:?}", self.conn, @@ -342,21 +363,21 @@ impl OpenRequestHandle { err ); } - RequestErrorHandler::ErrorLevel => { + RequestRevertHandler::ErrorLevel => { // TODO: include params if not running in release mode error!( "bad response from {}! method={} err={:?}", self.conn, method, err ); } - RequestErrorHandler::WarnLevel => { + RequestRevertHandler::WarnLevel => { // TODO: include params if not running in release mode warn!( "bad response from {}! method={} err={:?}", self.conn, method, err ); } - RequestErrorHandler::SaveReverts => { + RequestRevertHandler::Save => { trace!( "bad response from {}! method={} params={:?} err={:?}", self.conn, diff --git a/web3_proxy/src/rpcs/synced_connections.rs b/web3_proxy/src/rpcs/synced_connections.rs index f6a5e288..224381df 100644 --- a/web3_proxy/src/rpcs/synced_connections.rs +++ b/web3_proxy/src/rpcs/synced_connections.rs @@ -1,4 +1,4 @@ -use super::blockchain::SavedBlock; +use super::blockchain::{ArcBlock, SavedBlock}; use super::connection::Web3Connection; use super::connections::Web3Connections; use ethers::prelude::{H256, U64}; @@ -9,19 +9,33 @@ use std::sync::Arc; /// A collection of Web3Connections that are on the same block. /// Serialize is so we can print it on our debug endpoint #[derive(Clone, Default, Serialize)] -pub struct SyncedConnections { +pub struct ConsensusConnections { // TODO: store ArcBlock instead? pub(super) head_block: Option, // TODO: this should be able to serialize, but it isn't #[serde(skip_serializing)] pub(super) conns: Vec>, + pub(super) num_checked_conns: usize, + pub(super) includes_backups: bool, } -impl fmt::Debug for SyncedConnections { +impl ConsensusConnections { + pub fn num_conns(&self) -> usize { + self.conns.len() + } + + pub fn sum_soft_limit(&self) -> u32 { + self.conns.iter().fold(0, |sum, rpc| sum + rpc.soft_limit) + } + + // TODO: sum_hard_limit? +} + +impl fmt::Debug for ConsensusConnections { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { // TODO: the default formatter takes forever to write. this is too quiet though // TODO: print the actual conns? - f.debug_struct("SyncedConnections") + f.debug_struct("ConsensusConnections") .field("head_block", &self.head_block) .field("num_conns", &self.conns.len()) .finish_non_exhaustive() @@ -29,31 +43,29 @@ impl fmt::Debug for SyncedConnections { } impl Web3Connections { - pub fn head_block(&self) -> Option { - self.synced_connections.load().head_block.clone() + pub fn head_block(&self) -> Option { + self.watch_consensus_head_receiver + .as_ref() + .map(|x| x.borrow().clone()) } pub fn head_block_hash(&self) -> Option { - self.synced_connections - .load() - .head_block - .as_ref() - .map(|head_block| head_block.hash()) + self.head_block().and_then(|x| x.hash) } pub fn head_block_num(&self) -> Option { - self.synced_connections - .load() - .head_block - .as_ref() - .map(|head_block| head_block.number()) + self.head_block().and_then(|x| x.number) } pub fn synced(&self) -> bool { - !self.synced_connections.load().conns.is_empty() + !self + .watch_consensus_connections_sender + .borrow() + .conns + .is_empty() } pub fn num_synced_rpcs(&self) -> usize { - self.synced_connections.load().conns.len() + self.watch_consensus_connections_sender.borrow().conns.len() } }