Merge remote-tracking branch 'origin/devel'
This commit is contained in:
commit
c1e69da155
@ -5,4 +5,9 @@ rustflags = [
|
||||
# tokio unstable is needed for tokio-console
|
||||
"--cfg", "tokio_unstable"
|
||||
]
|
||||
rustdocflags = ["--cfg", "tokio_unstable"]
|
||||
rustdocflags = [
|
||||
# potentially faster. https://nnethercote.github.io/perf-book/build-configuration.html
|
||||
"-C", "target-cpu=native",
|
||||
# tokio unstable is needed for tokio-console
|
||||
"--cfg", "tokio_unstable"
|
||||
]
|
||||
|
866
Cargo.lock
generated
866
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@ -2,7 +2,9 @@
|
||||
members = [
|
||||
"deferred-rate-limiter",
|
||||
"entities",
|
||||
"latency",
|
||||
"migration",
|
||||
"quick_cache_ttl",
|
||||
"rate-counter",
|
||||
"redis-rate-limiter",
|
||||
"thread-fast-rng",
|
||||
|
@ -32,17 +32,19 @@ RUN apt-get update && \
|
||||
# copy the application
|
||||
COPY . .
|
||||
|
||||
ENV WEB3_PROXY_FEATURES "rdkafka-src"
|
||||
|
||||
# test the application with cargo-nextest
|
||||
RUN --mount=type=cache,target=/usr/local/cargo/registry \
|
||||
--mount=type=cache,target=/app/target \
|
||||
cargo nextest run --features "rdkafka-src tokio-uring" --no-default-features
|
||||
cargo nextest run --features "$WEB3_PROXY_FEATURES" --no-default-features
|
||||
|
||||
# build the application
|
||||
# using a "release" profile (which install does) is **very** important
|
||||
RUN --mount=type=cache,target=/usr/local/cargo/registry \
|
||||
--mount=type=cache,target=/app/target \
|
||||
cargo install \
|
||||
--features "rdkafka-src tokio-uring" \
|
||||
--features "$WEB3_PROXY_FEATURES" \
|
||||
--locked \
|
||||
--no-default-features \
|
||||
--path ./web3_proxy \
|
||||
|
21
README.md
21
README.md
@ -6,7 +6,7 @@ Web3_proxy is a fast caching and load balancing proxy for web3 (Ethereum or simi
|
||||
|
||||
Signed transactions (eth_sendRawTransaction) are sent in parallel to the configured private RPCs (eden, ethermine, flashbots, etc.).
|
||||
|
||||
All other requests are sent to an RPC server on the latest block (alchemy, moralis, rivet, your own node, or one of many other providers). If multiple servers are in sync, they are prioritized by `active_requests/soft_limit`. Note that this means that the fastest server is most likely to serve requests and slow servers are unlikely to ever get any requests.
|
||||
All other requests are sent to an RPC server on the latest block (llamanodes, alchemy, moralis, rivet, your own node, or one of many other providers). If multiple servers are in sync, they are prioritized by `active_requests` and request latency. Note that this means that the fastest server is most likely to serve requests and slow servers are unlikely to ever get any requests.
|
||||
|
||||
Each server has different limits to configure. The `soft_limit` is the number of parallel active requests where a server starts to slow down. The `hard_limit` is where a server starts giving rate limits or other errors.
|
||||
|
||||
@ -54,19 +54,22 @@ Check that the proxy is working:
|
||||
```
|
||||
curl -X POST -H "Content-Type: application/json" --data '{"jsonrpc":"2.0","method":"web3_clientVersion","id":1}' 127.0.0.1:8544
|
||||
```
|
||||
```
|
||||
curl -X POST -H "Content-Type: application/json" --data '{"jsonrpc":"2.0","method":"eth_getBalance", "params": ["0x0000000000000000000000000000000000000000", "latest"],"id":1}' 127.0.0.1:8544
|
||||
```
|
||||
|
||||
Check that the websocket is working:
|
||||
|
||||
```
|
||||
$ websocat ws://127.0.0.1:8544
|
||||
|
||||
{"id": 1, "method": "eth_subscribe", "params": ["newHeads"]}
|
||||
{"jsonrpc": "2.0", "id": 1, "method": "eth_subscribe", "params": ["newHeads"]}
|
||||
|
||||
{"id": 2, "method": "eth_subscribe", "params": ["newPendingTransactions"]}
|
||||
{"jsonrpc": "2.0", "id": 2, "method": "eth_subscribe", "params": ["newPendingTransactions"]}
|
||||
|
||||
{"id": 3, "method": "eth_subscribe", "params": ["newPendingFullTransactions"]}
|
||||
{"jsonrpc": "2.0", "id": 3, "method": "eth_subscribe", "params": ["newPendingFullTransactions"]}
|
||||
|
||||
{"id": 4, "method": "eth_subscribe", "params": ["newPendingRawTransactions"]}
|
||||
{"jsonrpc": "2.0", "id": 4, "method": "eth_subscribe", "params": ["newPendingRawTransactions"]}
|
||||
```
|
||||
|
||||
You can copy `config/example.toml` to `config/production-$CHAINNAME.toml` and then run `docker-compose up --build -d` start proxies for many chains.
|
||||
@ -79,7 +82,9 @@ web3_proxy_cli health_compass https://eth.llamarpc.com https://eth-ski.llamarpc.
|
||||
|
||||
### Run migrations
|
||||
|
||||
This is only really useful during development. The migrations run on application start.
|
||||
Generally it is simplest to just run the app to run migrations. It runs migrations on start.
|
||||
|
||||
But if you want to run them manually (generally only useful in development):
|
||||
|
||||
```
|
||||
cd migration
|
||||
@ -131,7 +136,7 @@ Flame graphs make a developer's join of finding slow code painless:
|
||||
4
|
||||
$ echo -1 | sudo tee /proc/sys/kernel/perf_event_paranoid
|
||||
-1
|
||||
$ CARGO_PROFILE_RELEASE_DEBUG=true cargo flamegraph --bin web3_proxy --no-inline
|
||||
$ CARGO_PROFILE_RELEASE_DEBUG=true cargo flamegraph --bin web3_proxy_cli --no-inline -- proxyd
|
||||
|
||||
Be sure to use `--no-inline` or perf will be VERY slow
|
||||
|
||||
@ -147,6 +152,8 @@ TODO: also enable debug symbols in the release build by modifying the root Cargo
|
||||
|
||||
Test the proxy:
|
||||
|
||||
wrk -t12 -c400 -d30s --latency http://127.0.0.1:8544/health
|
||||
wrk -t12 -c400 -d30s --latency http://127.0.0.1:8544/status
|
||||
wrk -s ./wrk/getBlockNumber.lua -t12 -c400 -d30s --latency http://127.0.0.1:8544/u/$API_KEY
|
||||
wrk -s ./wrk/getLatestBlockByNumber.lua -t12 -c400 -d30s --latency http://127.0.0.1:8544/u/$API_KEY
|
||||
|
||||
|
4
TODO.md
4
TODO.md
@ -128,7 +128,7 @@ These are roughly in order of completition
|
||||
- this was intentional so that recently confirmed transactions go to a server that is more likely to have the tx.
|
||||
- but under heavy load, we hit their rate limits. need a "retry_until_success" function that goes to balanced_rpcs. or maybe store in redis the txids that we broadcast privately and use that to route.
|
||||
- [x] some of the DashMaps grow unbounded! Make/find a "SizedDashMap" that cleans up old rows with some garbage collection task
|
||||
- moka is exactly what we need
|
||||
- moka has all the features that we need and more
|
||||
- [x] if block data limit is 0, say Unknown in Debug output
|
||||
- [x] basic request method stats (using the user_id and other fields that are in the tracing frame)
|
||||
- [x] refactor from_anyhow_error to have consistent error codes and http codes. maybe implement the Error trait
|
||||
@ -745,4 +745,4 @@ in another repo: event subscriber
|
||||
- [ ] tests for config reloading
|
||||
- [ ] use pin instead of arc for a bunch of things?
|
||||
- https://fasterthanli.me/articles/pin-and-suffering
|
||||
- [ ] calculate archive depth automatically based on block_data_limits
|
||||
- [ ] calculate archive depth automatically based on block_data_limits
|
212
config/development_polygon.toml
Normal file
212
config/development_polygon.toml
Normal file
@ -0,0 +1,212 @@
|
||||
[app]
|
||||
chain_id = 137
|
||||
|
||||
# a database is optional. it is used for user authentication and accounting
|
||||
# TODO: how do we find the optimal db_max_connections? too high actually ends up being slower
|
||||
db_max_connections = 20
|
||||
# development runs cargo commands on the host and so uses "mysql://root:dev_web3_proxy@127.0.0.1:13306/dev_web3_proxy" for db_url
|
||||
# production runs inside docker and so uses "mysql://root:web3_proxy@db:3306/web3_proxy" for db_url
|
||||
db_url = "mysql://root:dev_web3_proxy@127.0.0.1:13306/dev_web3_proxy"
|
||||
|
||||
deposit_factory_contract = "0x4e3bc2054788de923a04936c6addb99a05b0ea36"
|
||||
deposit_topic = "0x45fdc265dc29885b9a485766b03e70978440d38c7c328ee0a14fa40c76c6af54"
|
||||
|
||||
# a timeseries database is optional. it is used for making pretty graphs
|
||||
influxdb_host = "http://127.0.0.1:18086"
|
||||
influxdb_org = "dev_org"
|
||||
influxdb_token = "dev_web3_proxy_auth_token"
|
||||
influxdb_bucket = "dev_web3_proxy"
|
||||
|
||||
# thundering herd protection
|
||||
# only mark a block as the head block if the sum of their soft limits is greater than or equal to min_sum_soft_limit
|
||||
min_sum_soft_limit = 1_000
|
||||
# only mark a block as the head block if the number of servers with it is great than or equal to min_synced_rpcs
|
||||
min_synced_rpcs = 1
|
||||
|
||||
# redis is optional. it is used for rate limits set by `hard_limit`
|
||||
# TODO: how do we find the optimal redis_max_connections? too high actually ends up being slower
|
||||
volatile_redis_max_connections = 20
|
||||
# development runs cargo commands on the host and so uses "redis://127.0.0.1:16379/" for volatile_redis_url
|
||||
# production runs inside docker and so uses "redis://redis:6379/" for volatile_redis_url
|
||||
volatile_redis_url = "redis://127.0.0.1:16379/"
|
||||
|
||||
# redirect_public_url is optional
|
||||
redirect_public_url = "https://llamanodes.com/public-rpc"
|
||||
# redirect_rpc_key_url is optional
|
||||
# it only does something if db_url is set
|
||||
redirect_rpc_key_url = "https://llamanodes.com/dashboard/keys?key={{rpc_key_id}}"
|
||||
|
||||
# sentry is optional. it is used for browsing error logs
|
||||
# sentry_url = "https://SENTRY_KEY_A.ingest.sentry.io/SENTRY_KEY_B"
|
||||
|
||||
# public limits are when no key is used. these are instead grouped by ip
|
||||
# 0 = block all public requests
|
||||
# Not defined = allow all requests
|
||||
#public_max_concurrent_requests =
|
||||
# 0 = block all public requests
|
||||
# Not defined = allow all requests
|
||||
#public_requests_per_period =
|
||||
|
||||
public_recent_ips_salt = ""
|
||||
|
||||
login_domain = "llamanodes.com"
|
||||
|
||||
# 1GB of cache
|
||||
response_cache_max_bytes = 1_000_000_000
|
||||
|
||||
# allowed_origin_requests_per_period changes the min_sum_soft_limit for requests with the specified (AND SPOOFABLE) Origin header
|
||||
# origins not in the list for requests without an rpc_key will use public_requests_per_period instead
|
||||
[app.allowed_origin_requests_per_period]
|
||||
"https://chainlist.org" = 1_000
|
||||
|
||||
[balanced_rpcs]
|
||||
|
||||
[balanced_rpcs.llama_public]
|
||||
disabled = false
|
||||
display_name = "LlamaNodes"
|
||||
http_url = "https://polygon.llamarpc.com"
|
||||
ws_url = "wss://polygon.llamarpc.com"
|
||||
soft_limit = 1_000
|
||||
tier = 0
|
||||
|
||||
[balanced_rpcs.quicknode]
|
||||
disabled = false
|
||||
display_name = "Quicknode"
|
||||
http_url = "https://rpc-mainnet.matic.quiknode.pro"
|
||||
soft_limit = 10
|
||||
tier = 2
|
||||
|
||||
[balanced_rpcs.maticvigil]
|
||||
disabled = false
|
||||
display_name = "Maticvigil"
|
||||
http_url = "https://rpc-mainnet.maticvigil.com"
|
||||
soft_limit = 10
|
||||
tier = 2
|
||||
|
||||
[balanced_rpcs.matic-network]
|
||||
disabled = false
|
||||
display_name = "Matic Network"
|
||||
http_url = "https://rpc-mainnet.matic.network"
|
||||
soft_limit = 10
|
||||
tier = 1
|
||||
|
||||
[balanced_rpcs.chainstack]
|
||||
disabled = false
|
||||
http_url = "https://matic-mainnet.chainstacklabs.com"
|
||||
soft_limit = 10
|
||||
tier = 2
|
||||
|
||||
[balanced_rpcs.bware]
|
||||
disabled = false
|
||||
display_name = "Bware Labs"
|
||||
http_url = "https://matic-mainnet-full-rpc.bwarelabs.com"
|
||||
soft_limit = 10
|
||||
tier = 2
|
||||
|
||||
[balanced_rpcs.bware_archive]
|
||||
disabled = false
|
||||
display_name = "Bware Labs Archive"
|
||||
http_url = "https://matic-mainnet-archive-rpc.bwarelabs.com"
|
||||
soft_limit = 10
|
||||
tier = 2
|
||||
|
||||
[balanced_rpcs.polygonapi]
|
||||
disabled = false
|
||||
display_name = "Polygon API"
|
||||
http_url = "https://polygonapi.terminet.io/rpc"
|
||||
soft_limit = 10
|
||||
tier = 2
|
||||
|
||||
[balanced_rpcs.one-rpc]
|
||||
disabled = false
|
||||
display_name = "1RPC"
|
||||
http_url = "https://1rpc.io/matic"
|
||||
soft_limit = 10
|
||||
tier = 2
|
||||
|
||||
[balanced_rpcs.fastrpc]
|
||||
disabled = false
|
||||
display_name = "FastRPC"
|
||||
http_url = "https://polygon-mainnet.rpcfast.com?api_key=xbhWBI1Wkguk8SNMu1bvvLurPGLXmgwYeC4S6g2H7WdwFigZSmPWVZRxrskEQwIf"
|
||||
soft_limit = 10
|
||||
tier = 2
|
||||
|
||||
[balanced_rpcs.unifra]
|
||||
disabled = false
|
||||
display_name = "Unifra"
|
||||
http_url = "https://polygon-mainnet-public.unifra.io"
|
||||
soft_limit = 10
|
||||
tier = 2
|
||||
|
||||
[balanced_rpcs.onfinality]
|
||||
disabled = false
|
||||
display_name = "Onfinality"
|
||||
http_url = "https://polygon.api.onfinality.io/public"
|
||||
soft_limit = 10
|
||||
tier = 2
|
||||
|
||||
[balanced_rpcs.alchemy]
|
||||
disabled = false
|
||||
display_name = "Alchemy"
|
||||
heept_url = "https://polygon-mainnet.g.alchemy.com/v2/demo"
|
||||
soft_limit = 10
|
||||
tier = 2
|
||||
|
||||
[balanced_rpcs.blockpi]
|
||||
disabled = false
|
||||
display_name = "Blockpi"
|
||||
http_url = "https://polygon.blockpi.network/v1/rpc/public"
|
||||
soft_limit = 100
|
||||
tier = 2
|
||||
|
||||
[balanced_rpcs.polygon]
|
||||
backup = true
|
||||
disabled = false
|
||||
display_name = "Polygon"
|
||||
http_url = "https://polygon-rpc.com"
|
||||
soft_limit = 10
|
||||
tier = 2
|
||||
|
||||
[balanced_rpcs.pokt]
|
||||
disabled = false
|
||||
display_name = "Pokt"
|
||||
http_url = "https://poly-rpc.gateway.pokt.network"
|
||||
soft_limit = 10
|
||||
tier = 2
|
||||
|
||||
[balanced_rpcs.ankr]
|
||||
backup = true
|
||||
disabled = false
|
||||
display_name = "Ankr"
|
||||
http_url = "https://rpc.ankr.com/polygon"
|
||||
soft_limit = 10
|
||||
tier = 2
|
||||
|
||||
[balanced_rpcs.blastapi]
|
||||
backup = true
|
||||
disabled = true
|
||||
display_name = "Blast"
|
||||
http_url = "https://polygon-mainnet.public.blastapi.io"
|
||||
hard_limit = 10
|
||||
soft_limit = 10
|
||||
tier = 2
|
||||
|
||||
[balanced_rpcs.omnia]
|
||||
disabled = true
|
||||
display_name = "Omnia"
|
||||
http_url = "https://endpoints.omniatech.io/v1/matic/mainnet/public"
|
||||
soft_limit = 10
|
||||
tier = 2
|
||||
|
||||
[balanced_rpcs.bor]
|
||||
disabled = true
|
||||
http_url = "https://polygon-bor.publicnode.com"
|
||||
soft_limit = 10
|
||||
tier = 2
|
||||
|
||||
[balanced_rpcs.blxr]
|
||||
disabled = false
|
||||
http_url = "https://polygon.rpc.blxrbdn.com"
|
||||
soft_limit = 10
|
||||
tier = 2
|
||||
|
@ -11,6 +11,9 @@ db_url = "mysql://root:dev_web3_proxy@127.0.0.1:13306/dev_web3_proxy"
|
||||
# read-only replica useful when running the proxy in multiple regions
|
||||
db_replica_url = "mysql://root:dev_web3_proxy@127.0.0.1:13306/dev_web3_proxy"
|
||||
|
||||
deposit_factory_contract = "0x4e3bc2054788de923a04936c6addb99a05b0ea36"
|
||||
deposit_topic = "0x45fdc265dc29885b9a485766b03e70978440d38c7c328ee0a14fa40c76c6af54"
|
||||
|
||||
kafka_urls = "127.0.0.1:19092"
|
||||
kafka_protocol = "plaintext"
|
||||
|
||||
@ -18,7 +21,7 @@ kafka_protocol = "plaintext"
|
||||
influxdb_host = "http://127.0.0.1:18086"
|
||||
influxdb_org = "dev_org"
|
||||
influxdb_token = "dev_web3_proxy_auth_token"
|
||||
influxdb_bucketname = "web3_proxy"
|
||||
influxdb_bucketname = "dev_web3_proxy"
|
||||
|
||||
# thundering herd protection
|
||||
# only mark a block as the head block if the sum of their soft limits is greater than or equal to min_sum_soft_limit
|
||||
|
@ -5,10 +5,10 @@ authors = ["Bryan Stitt <bryan@stitthappens.com>"]
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
quick_cache_ttl = { path = "../quick_cache_ttl" }
|
||||
redis-rate-limiter = { path = "../redis-rate-limiter" }
|
||||
|
||||
anyhow = "1.0.71"
|
||||
hashbrown = "0.13.2"
|
||||
log = "0.4.17"
|
||||
moka = { version = "0.11.0", default-features = false, features = ["future"] }
|
||||
tokio = "1.28.0"
|
||||
tokio = "1.28.1"
|
||||
|
@ -1,6 +1,6 @@
|
||||
//#![warn(missing_docs)]
|
||||
use log::error;
|
||||
use moka::future::Cache;
|
||||
use quick_cache_ttl::CacheWithTTL;
|
||||
use redis_rate_limiter::{RedisRateLimitResult, RedisRateLimiter};
|
||||
use std::cmp::Eq;
|
||||
use std::fmt::{Debug, Display};
|
||||
@ -16,7 +16,7 @@ pub struct DeferredRateLimiter<K>
|
||||
where
|
||||
K: Send + Sync,
|
||||
{
|
||||
local_cache: Cache<K, Arc<AtomicU64>, hashbrown::hash_map::DefaultHashBuilder>,
|
||||
local_cache: CacheWithTTL<K, Arc<AtomicU64>>,
|
||||
prefix: String,
|
||||
rrl: RedisRateLimiter,
|
||||
/// if None, defers to the max on rrl
|
||||
@ -33,9 +33,9 @@ impl<K> DeferredRateLimiter<K>
|
||||
where
|
||||
K: Copy + Debug + Display + Hash + Eq + Send + Sync + 'static,
|
||||
{
|
||||
pub fn new(
|
||||
pub async fn new(
|
||||
// TODO: change this to cache_size in bytes
|
||||
cache_size: u64,
|
||||
cache_size: usize,
|
||||
prefix: &str,
|
||||
rrl: RedisRateLimiter,
|
||||
default_max_requests_per_second: Option<u64>,
|
||||
@ -45,11 +45,8 @@ where
|
||||
// TODO: time to live is not exactly right. we want this ttl counter to start only after redis is down. this works for now
|
||||
// TODO: what do these weigh?
|
||||
// TODO: allow skipping max_capacity
|
||||
let local_cache = Cache::builder()
|
||||
.time_to_live(Duration::from_secs(ttl))
|
||||
.max_capacity(cache_size)
|
||||
.name(prefix)
|
||||
.build_with_hasher(hashbrown::hash_map::DefaultHashBuilder::default());
|
||||
let local_cache =
|
||||
CacheWithTTL::new_with_capacity(cache_size, Duration::from_secs(ttl)).await;
|
||||
|
||||
Self {
|
||||
local_cache,
|
||||
@ -87,9 +84,9 @@ where
|
||||
let redis_key = redis_key.clone();
|
||||
let rrl = Arc::new(self.rrl.clone());
|
||||
|
||||
// set arc_deferred_rate_limit_result and return the coun
|
||||
// set arc_deferred_rate_limit_result and return the count
|
||||
self.local_cache
|
||||
.get_with(key, async move {
|
||||
.get_or_insert_async::<anyhow::Error, _>(&key, async move {
|
||||
// we do not use the try operator here because we want to be okay with redis errors
|
||||
let redis_count = match rrl
|
||||
.throttle_label(&redis_key, Some(max_requests_per_period), count)
|
||||
@ -110,7 +107,7 @@ where
|
||||
count
|
||||
}
|
||||
Ok(RedisRateLimitResult::RetryNever) => {
|
||||
panic!("RetryNever shouldn't happen")
|
||||
unreachable!();
|
||||
}
|
||||
Err(err) => {
|
||||
let _ = deferred_rate_limit_result
|
||||
@ -126,9 +123,9 @@ where
|
||||
}
|
||||
};
|
||||
|
||||
Arc::new(AtomicU64::new(redis_count))
|
||||
Ok(Arc::new(AtomicU64::new(redis_count)))
|
||||
})
|
||||
.await
|
||||
.await?
|
||||
};
|
||||
|
||||
let mut locked = deferred_rate_limit_result.lock().await;
|
||||
@ -139,7 +136,7 @@ where
|
||||
Ok(deferred_rate_limit_result)
|
||||
} else {
|
||||
// we have a cached amount here
|
||||
let cached_key_count = local_key_count.fetch_add(count, Ordering::Acquire);
|
||||
let cached_key_count = local_key_count.fetch_add(count, Ordering::AcqRel);
|
||||
|
||||
// assuming no other parallel futures incremented this key, this is the count that redis has
|
||||
let expected_key_count = cached_key_count + count;
|
||||
|
@ -1,10 +0,0 @@
|
||||
# log in with curl
|
||||
|
||||
1. curl http://127.0.0.1:8544/user/login/$ADDRESS
|
||||
2. Sign the text with a site like https://www.myetherwallet.com/wallet/sign
|
||||
3. POST the signed data:
|
||||
|
||||
curl -X POST http://127.0.0.1:8544/user/login -H 'Content-Type: application/json' -d
|
||||
'{ "address": "0x9eb9e3dc2543dc9ff4058e2a2da43a855403f1fd", "msg": "0x6c6c616d616e6f6465732e636f6d2077616e747320796f7520746f207369676e20696e207769746820796f757220457468657265756d206163636f756e743a0a3078396562396533646332353433646339464634303538653241324441343341383535343033463166440a0af09fa699f09fa699f09fa699f09fa699f09fa6990a0a5552493a2068747470733a2f2f6c6c616d616e6f6465732e636f6d2f0a56657273696f6e3a20310a436861696e2049443a20310a4e6f6e63653a203031474d37373330375344324448333854454d3957545156454a0a4973737565642041743a20323032322d31322d31345430323a32333a31372e3735333736335a0a45787069726174696f6e2054696d653a20323032322d31322d31345430323a34333a31372e3735333736335a", "sig": "16bac055345279723193737c6c67cf995e821fd7c038d31fd6f671102088c7b85ab4b13069fd2ed02da186cf549530e315d8d042d721bf81289b3ffdbe8cf9ce1c", "version": "3", "signer": "MEW" }'
|
||||
|
||||
4. The response will include a bearer token. Use it with curl ... -H 'Authorization: Bearer $TOKEN'
|
@ -1,8 +0,0 @@
|
||||
sudo apt install bison flex
|
||||
wget https://eighty-twenty.org/files/0001-tools-perf-Use-long-running-addr2line-per-dso.patch
|
||||
git clone https://github.com/torvalds/linux.git
|
||||
cd linux
|
||||
git checkout v5.15
|
||||
git apply ../0001-tools-perf-Use-long-running-addr2line-per-dso.patch
|
||||
cd tools/perf
|
||||
make prefix=$HOME/.local VERSION=5.15 install-bin
|
@ -1,144 +0,0 @@
|
||||
|
||||
GET /
|
||||
This entrypoint handles two things.
|
||||
If connecting with a browser, it redirects to the public stat page on llamanodes.com.
|
||||
If connecting with a websocket, it is rate limited by IP and routes to the Web3 RPC.
|
||||
|
||||
POST /
|
||||
This entrypoint handles two things.
|
||||
If connecting with a browser, it redirects to the public stat page on llamanodes.com.
|
||||
If connecting with a websocket, it is rate limited by IP and routes to the Web3 RPC.
|
||||
|
||||
GET /rpc/:rpc_key
|
||||
This entrypoint handles two things.
|
||||
If connecting with a browser, it redirects to the key's stat page on llamanodes.com.
|
||||
If connecting with a websocket, it is rate limited by key and routes to the Web3 RPC.
|
||||
|
||||
POST /rpc/:rpc_key
|
||||
This entrypoint handles two things.
|
||||
If connecting with a browser, it redirects to the key's stat page on llamanodes.com.
|
||||
If connecting with a websocket, it is rate limited by key and routes to the Web3 RPC.
|
||||
|
||||
GET /health
|
||||
If servers are synced, this gives a 200 "OK".
|
||||
If no servers are synced, it gives a 502 ":("
|
||||
|
||||
GET /user/login/:user_address
|
||||
Displays a "Sign in With Ethereum" message to be signed by the address's private key.
|
||||
Once signed, continue to `POST /user/login`
|
||||
|
||||
GET /user/login/:user_address/:message_eip
|
||||
Similar to `GET /user/login/:user_address` but gives the message in different formats depending on the eip.
|
||||
Wallets have varying support. This shouldn't be needed by most users.
|
||||
The message_eip should be hidden behind a small gear icon near the login button.
|
||||
Once signed, continue to `POST /user/login`
|
||||
|
||||
Supported:
|
||||
EIP191 as bytes
|
||||
EIP191 as a hash
|
||||
EIP4361 (the default)
|
||||
|
||||
Support coming soon:
|
||||
EIP1271 for contract signing
|
||||
|
||||
POST /user/login?invite_code=SOMETHING_SECRET
|
||||
Verifies the user's signed message.
|
||||
|
||||
The post should have JSON data containing "sig" (the signature) and "msg" (the original message).
|
||||
|
||||
Optionally requires an invite_code.
|
||||
The invite code is only needed for new users. Once registered, it is not necessary.
|
||||
|
||||
If the invite code and signature are valid, this returns JSON data containing "rpc_keys", "bearer_token" and the "user".
|
||||
|
||||
"rpc_keys" contains the key and settings for all of the user's keys.
|
||||
If the user is new, an "rpc_key" will be created for them.
|
||||
|
||||
The "bearer_token" is required by some endpoints. Include it in the "AUTHORIZATION" header in this format: "bearer :bearer_token".
|
||||
The token is good for 4 weeks and the 4 week time will reset whenever the token is used.
|
||||
|
||||
The "user" just has an address at first, but you can prompt them to add an email address. See `POST /user`
|
||||
|
||||
GET /user
|
||||
Checks the "AUTHORIZATION" header for a valid bearer token.
|
||||
If valid, display's the user's data as JSON.
|
||||
|
||||
|
||||
|
||||
POST /user
|
||||
POST the data in the same format that `GET /user` gives it.
|
||||
If you do not want to update a field, do not include it in the POSTed JSON.
|
||||
If you want to delete a field, include the data's key and set the value to an empty string.
|
||||
|
||||
Checks the "AUTHORIZATION" header for a valid bearer token.
|
||||
If valid, updates the user's data and returns the updated data as JSON.
|
||||
|
||||
GET /user/balance
|
||||
Not yet implemented.
|
||||
|
||||
Checks the "AUTHORIZATION" header for a valid bearer token.
|
||||
If valid, displays data about the user's balance and payments as JSON.
|
||||
|
||||
POST /user/balance/:txid
|
||||
Not yet implemented. Rate limited by IP.
|
||||
|
||||
Checks the ":txid" for a transaction that updates a user's balance.
|
||||
The backend will be watching for these transactions, so this should not be needed in the common case.
|
||||
However, log susbcriptions are not perfect and so it might sometimes be needed.
|
||||
|
||||
GET /user/keys
|
||||
Checks the "AUTHORIZATION" header for a valid bearer token.
|
||||
If valid, displays data about the user's keys as JSON.
|
||||
|
||||
POST or PUT /user/keys
|
||||
Checks the "AUTHORIZATION" header for a valid bearer token.
|
||||
If valid, allows the user to create a new key or change options on their keys.
|
||||
|
||||
The POSTed JSON can have these fields:
|
||||
key_id: Option<u64>,
|
||||
description: Option<String>,
|
||||
private_txs: Option<bool>,
|
||||
active: Option<bool>,
|
||||
allowed_ips: Option<String>,
|
||||
allowed_origins: Option<String>,
|
||||
allowed_referers: Option<String>,
|
||||
allowed_user_agents: Option<String>,
|
||||
|
||||
The PUTed JSON has the same fields as the POSTed JSON, except for there is no `key_id`
|
||||
|
||||
If you do not want to update a field, do not include it in the POSTed JSON.
|
||||
If you want to delete a string field, include the data's key and set the value to an empty string.
|
||||
|
||||
`allowed_ips`, `allowed_origins`, `allowed_referers`, and `allowed_user_agents` can have multiple values by separating them with commas.
|
||||
`allowed_ips` must be in CIDR Notation (ex: "10.1.1.0/24" for a network, "10.1.1.10/32" for a single address).
|
||||
The spec technically allows for bytes in `allowed_origins` or `allowed_referers`, but our code currently only supports strings. If a customer needs bytes, then we can code support for them.
|
||||
|
||||
`private_txs` are not currently recommended. If high gas is not supplied then they will likely never be included. Improvements to this are in the works
|
||||
|
||||
Soon, the POST data will also have a `log_revert_trace: Option<f32>`. This will by the percent chance to log any calls that "revert" to the database. Large dapps probably want this to be a small percent, but development keys will probably want 100%. This will not be enabled until automatic pruning is coded.
|
||||
|
||||
GET `/user/revert_logs`
|
||||
Checks the "AUTHORIZATION" header for a valid bearer token.
|
||||
If valid, fetches paginated revert logs for the user.
|
||||
More documentation will be written here once revert logging is enabled.
|
||||
|
||||
GET /user/stats/aggregate
|
||||
Checks the "AUTHORIZATION" header for a valid bearer token.
|
||||
If valid, fetches paginated aggregated stats for the user.
|
||||
Pages are limited to 200 entries. The backend config can change this page size if necessary.
|
||||
Can be filtered by:
|
||||
`chain_id` - set to 0 for all. 0 is the default.
|
||||
`query_start` - The start date in unix epoch time.
|
||||
`query_window_seconds` - How many seconds to aggregate the stats over.
|
||||
`page` - The page to request. Defaults to 0.
|
||||
|
||||
GET /user/stats/detailed
|
||||
Checks the "AUTHORIZATION" header for a valid bearer token.
|
||||
If valid, fetches paginated stats for the user with more detail. The request method is included. For user privacy, we intentionally do not include the request's calldata.
|
||||
Can be filtered the same as `GET /user/stats/aggregate`
|
||||
Soon will also be filterable by "method"
|
||||
|
||||
POST /user/logout
|
||||
Checks the "AUTHORIZATION" header for a valid bearer token.
|
||||
If valid, deletes the bearer token from the proxy.
|
||||
The user will need to `POST /user/login` to get a new bearer token.
|
@ -1,15 +0,0 @@
|
||||
Hello, I'm pretty new to tracing so my vocabulary might be wrong. I've got my app using tracing to log to stdout. I have a bunch of fields including user_id and ip_addr that make telling where logs are from nice and easy.
|
||||
|
||||
Now there is one part of my code where I want to save a log to a database. I'm not sure of the best/correct way to do this. I can get the current span with tracing::Span::current(), but AFAICT that doesn't have a way to get to the values. I think I need to write my own Subscriber or Visitor (or both) and then tell tracing to use it only in this one part of the code. Am I on the right track? Is there a place in the docs that explains something similar?
|
||||
|
||||
https://burgers.io/custom-logging-in-rust-using-tracing
|
||||
|
||||
if you are doing it learn how to write a subscriber then you should write a custom layer. If you are simply trying to work on your main project there are several subscribers that already do this work for you.
|
||||
|
||||
look at opentelemetry_otlp .. this will let you connect opentelemetry collector to your tracing using tracing_opentelemetry
|
||||
|
||||
I'd suggest using the Registry subscriber because it can take multiple layers ... and use a filtered_layer to filter out the messages (look at env_filter, it can take the filtering params from an environment variable or a config string) and then have your collector be the second layer. e.... Registery can take in a vector of layers that are also-in-turn multi-layered.
|
||||
let me see if i can pull up an example
|
||||
On the https://docs.rs/tracing-subscriber/latest/tracing_subscriber/layer/ page about half-way down there is an example of boxed layers
|
||||
|
||||
you basically end up composing different layers that output to different trace stores and also configure each using per-layer filtering (see https://docs.rs/tracing-subscriber/latest/tracing_subscriber/layer/#per-layer-filtering)
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "entities"
|
||||
version = "0.17.0"
|
||||
version = "0.28.0"
|
||||
edition = "2021"
|
||||
|
||||
[lib]
|
||||
@ -11,7 +11,7 @@ path = "src/mod.rs"
|
||||
|
||||
[dependencies]
|
||||
sea-orm = "0.11.3"
|
||||
serde = "1.0.162"
|
||||
uuid = "1.3.2"
|
||||
serde = "1.0.163"
|
||||
uuid = "1.3.3"
|
||||
ethers = "2.0.4"
|
||||
ulid = "1.0.0"
|
||||
|
49
entities/src/admin_increase_balance_receipt.rs
Normal file
49
entities/src/admin_increase_balance_receipt.rs
Normal file
@ -0,0 +1,49 @@
|
||||
//! `SeaORM` Entity. Generated by sea-orm-codegen 0.10.6
|
||||
|
||||
use sea_orm::entity::prelude::*;
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq)]
|
||||
#[sea_orm(table_name = "admin_increase_balance_receipt")]
|
||||
pub struct Model {
|
||||
#[sea_orm(primary_key)]
|
||||
pub id: i32,
|
||||
#[sea_orm(column_type = "Decimal(Some((20, 10)))")]
|
||||
pub amount: Decimal,
|
||||
pub admin_id: u64,
|
||||
pub deposit_to_user_id: u64,
|
||||
pub note: String,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
|
||||
pub enum Relation {
|
||||
#[sea_orm(
|
||||
belongs_to = "super::admin::Entity",
|
||||
from = "Column::AdminId",
|
||||
to = "super::admin::Column::Id",
|
||||
on_update = "NoAction",
|
||||
on_delete = "NoAction"
|
||||
)]
|
||||
Admin,
|
||||
#[sea_orm(
|
||||
belongs_to = "super::user::Entity",
|
||||
from = "Column::DepositToUserId",
|
||||
to = "super::user::Column::Id",
|
||||
on_update = "NoAction",
|
||||
on_delete = "NoAction"
|
||||
)]
|
||||
User,
|
||||
}
|
||||
|
||||
impl Related<super::admin::Entity> for Entity {
|
||||
fn to() -> RelationDef {
|
||||
Relation::Admin.def()
|
||||
}
|
||||
}
|
||||
|
||||
impl Related<super::user::Entity> for Entity {
|
||||
fn to() -> RelationDef {
|
||||
Relation::User.def()
|
||||
}
|
||||
}
|
||||
|
||||
impl ActiveModelBehavior for ActiveModel {}
|
37
entities/src/balance.rs
Normal file
37
entities/src/balance.rs
Normal file
@ -0,0 +1,37 @@
|
||||
//! `SeaORM` Entity. Generated by sea-orm-codegen 0.10.6
|
||||
|
||||
use sea_orm::entity::prelude::*;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq, Serialize, Deserialize)]
|
||||
#[sea_orm(table_name = "balance")]
|
||||
pub struct Model {
|
||||
#[sea_orm(primary_key)]
|
||||
pub id: i32,
|
||||
#[sea_orm(column_type = "Decimal(Some((20, 10)))")]
|
||||
pub available_balance: Decimal,
|
||||
#[sea_orm(column_type = "Decimal(Some((20, 10)))")]
|
||||
pub used_balance: Decimal,
|
||||
#[sea_orm(unique)]
|
||||
pub user_id: u64,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
|
||||
pub enum Relation {
|
||||
#[sea_orm(
|
||||
belongs_to = "super::user::Entity",
|
||||
from = "Column::UserId",
|
||||
to = "super::user::Column::Id",
|
||||
on_update = "NoAction",
|
||||
on_delete = "NoAction"
|
||||
)]
|
||||
User,
|
||||
}
|
||||
|
||||
impl Related<super::user::Entity> for Entity {
|
||||
fn to() -> RelationDef {
|
||||
Relation::User.def()
|
||||
}
|
||||
}
|
||||
|
||||
impl ActiveModelBehavior for ActiveModel {}
|
37
entities/src/increase_on_chain_balance_receipt.rs
Normal file
37
entities/src/increase_on_chain_balance_receipt.rs
Normal file
@ -0,0 +1,37 @@
|
||||
//! `SeaORM` Entity. Generated by sea-orm-codegen 0.10.6
|
||||
|
||||
use sea_orm::entity::prelude::*;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq, Serialize, Deserialize)]
|
||||
#[sea_orm(table_name = "increase_on_chain_balance_receipt")]
|
||||
pub struct Model {
|
||||
#[sea_orm(primary_key)]
|
||||
pub id: i32,
|
||||
#[sea_orm(unique)]
|
||||
pub tx_hash: String,
|
||||
pub chain_id: u64,
|
||||
#[sea_orm(column_type = "Decimal(Some((20, 10)))")]
|
||||
pub amount: Decimal,
|
||||
pub deposit_to_user_id: u64,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
|
||||
pub enum Relation {
|
||||
#[sea_orm(
|
||||
belongs_to = "super::user::Entity",
|
||||
from = "Column::DepositToUserId",
|
||||
to = "super::user::Column::Id",
|
||||
on_update = "NoAction",
|
||||
on_delete = "NoAction"
|
||||
)]
|
||||
User,
|
||||
}
|
||||
|
||||
impl Related<super::user::Entity> for Entity {
|
||||
fn to() -> RelationDef {
|
||||
Relation::User.def()
|
||||
}
|
||||
}
|
||||
|
||||
impl ActiveModelBehavior for ActiveModel {}
|
@ -3,9 +3,14 @@
|
||||
pub mod prelude;
|
||||
|
||||
pub mod admin;
|
||||
pub mod admin_increase_balance_receipt;
|
||||
pub mod admin_trail;
|
||||
pub mod balance;
|
||||
pub mod increase_on_chain_balance_receipt;
|
||||
pub mod login;
|
||||
pub mod pending_login;
|
||||
pub mod referee;
|
||||
pub mod referrer;
|
||||
pub mod revert_log;
|
||||
pub mod rpc_accounting;
|
||||
pub mod rpc_accounting_v2;
|
||||
|
@ -19,6 +19,21 @@ pub struct Model {
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
|
||||
pub enum Relation {}
|
||||
pub enum Relation {
|
||||
#[sea_orm(
|
||||
belongs_to = "super::user::Entity",
|
||||
from = "Column::ImitatingUser",
|
||||
to = "super::user::Column::Id",
|
||||
on_update = "NoAction",
|
||||
on_delete = "NoAction"
|
||||
)]
|
||||
User,
|
||||
}
|
||||
|
||||
impl Related<super::user::Entity> for Entity {
|
||||
fn to() -> RelationDef {
|
||||
Relation::User.def()
|
||||
}
|
||||
}
|
||||
|
||||
impl ActiveModelBehavior for ActiveModel {}
|
||||
|
@ -1,9 +1,14 @@
|
||||
//! `SeaORM` Entity. Generated by sea-orm-codegen 0.10.7
|
||||
|
||||
pub use super::admin::Entity as Admin;
|
||||
pub use super::admin_increase_balance_receipt::Entity as AdminIncreaseBalanceReceipt;
|
||||
pub use super::admin_trail::Entity as AdminTrail;
|
||||
pub use super::balance::Entity as Balance;
|
||||
pub use super::increase_on_chain_balance_receipt::Entity as IncreaseOnChainBalanceReceipt;
|
||||
pub use super::login::Entity as Login;
|
||||
pub use super::pending_login::Entity as PendingLogin;
|
||||
pub use super::referee::Entity as Referee;
|
||||
pub use super::referrer::Entity as Referrer;
|
||||
pub use super::revert_log::Entity as RevertLog;
|
||||
pub use super::rpc_accounting::Entity as RpcAccounting;
|
||||
pub use super::rpc_accounting_v2::Entity as RpcAccountingV2;
|
||||
|
51
entities/src/referee.rs
Normal file
51
entities/src/referee.rs
Normal file
@ -0,0 +1,51 @@
|
||||
//! `SeaORM` Entity. Generated by sea-orm-codegen 0.10.6
|
||||
|
||||
use sea_orm::entity::prelude::*;
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq)]
|
||||
#[sea_orm(table_name = "referee")]
|
||||
pub struct Model {
|
||||
#[sea_orm(primary_key)]
|
||||
pub id: i32,
|
||||
pub credits_applied_for_referee: bool,
|
||||
#[sea_orm(column_type = "Decimal(Some((20, 10)))")]
|
||||
pub credits_applied_for_referrer: Decimal,
|
||||
pub referral_start_date: DateTime,
|
||||
pub used_referral_code: i32,
|
||||
#[sea_orm(unique)]
|
||||
pub user_id: u64,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
|
||||
pub enum Relation {
|
||||
#[sea_orm(
|
||||
belongs_to = "super::referrer::Entity",
|
||||
from = "Column::UsedReferralCode",
|
||||
to = "super::referrer::Column::Id",
|
||||
on_update = "NoAction",
|
||||
on_delete = "NoAction"
|
||||
)]
|
||||
Referrer,
|
||||
#[sea_orm(
|
||||
belongs_to = "super::user::Entity",
|
||||
from = "Column::UserId",
|
||||
to = "super::user::Column::Id",
|
||||
on_update = "NoAction",
|
||||
on_delete = "NoAction"
|
||||
)]
|
||||
User,
|
||||
}
|
||||
|
||||
impl Related<super::referrer::Entity> for Entity {
|
||||
fn to() -> RelationDef {
|
||||
Relation::Referrer.def()
|
||||
}
|
||||
}
|
||||
|
||||
impl Related<super::user::Entity> for Entity {
|
||||
fn to() -> RelationDef {
|
||||
Relation::User.def()
|
||||
}
|
||||
}
|
||||
|
||||
impl ActiveModelBehavior for ActiveModel {}
|
42
entities/src/referrer.rs
Normal file
42
entities/src/referrer.rs
Normal file
@ -0,0 +1,42 @@
|
||||
//! `SeaORM` Entity. Generated by sea-orm-codegen 0.10.6
|
||||
|
||||
use sea_orm::entity::prelude::*;
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq)]
|
||||
#[sea_orm(table_name = "referrer")]
|
||||
pub struct Model {
|
||||
#[sea_orm(primary_key)]
|
||||
pub id: i32,
|
||||
#[sea_orm(unique)]
|
||||
pub referral_code: String,
|
||||
#[sea_orm(unique)]
|
||||
pub user_id: u64,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
|
||||
pub enum Relation {
|
||||
#[sea_orm(has_many = "super::referee::Entity")]
|
||||
Referee,
|
||||
#[sea_orm(
|
||||
belongs_to = "super::user::Entity",
|
||||
from = "Column::UserId",
|
||||
to = "super::user::Column::Id",
|
||||
on_update = "NoAction",
|
||||
on_delete = "NoAction"
|
||||
)]
|
||||
User,
|
||||
}
|
||||
|
||||
impl Related<super::referee::Entity> for Entity {
|
||||
fn to() -> RelationDef {
|
||||
Relation::Referee.def()
|
||||
}
|
||||
}
|
||||
|
||||
impl Related<super::user::Entity> for Entity {
|
||||
fn to() -> RelationDef {
|
||||
Relation::User.def()
|
||||
}
|
||||
}
|
||||
|
||||
impl ActiveModelBehavior for ActiveModel {}
|
@ -8,11 +8,9 @@ use serde::{Deserialize, Serialize};
|
||||
pub struct Model {
|
||||
#[sea_orm(primary_key)]
|
||||
pub id: u64,
|
||||
pub rpc_key_id: u64,
|
||||
pub rpc_key_id: Option<u64>,
|
||||
pub chain_id: u64,
|
||||
pub period_datetime: DateTimeUtc,
|
||||
pub method: String,
|
||||
pub origin: String,
|
||||
pub archive_needed: bool,
|
||||
pub error_response: bool,
|
||||
pub frontend_requests: u64,
|
||||
@ -24,6 +22,8 @@ pub struct Model {
|
||||
pub sum_request_bytes: u64,
|
||||
pub sum_response_millis: u64,
|
||||
pub sum_response_bytes: u64,
|
||||
#[sea_orm(column_type = "Decimal(Some((20, 10)))")]
|
||||
pub sum_credits_used: Decimal,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
|
||||
|
@ -38,6 +38,8 @@ pub enum Relation {
|
||||
RpcAccounting,
|
||||
#[sea_orm(has_many = "super::rpc_accounting_v2::Entity")]
|
||||
RpcAccountingV2,
|
||||
#[sea_orm(has_many = "super::secondary_user::Entity")]
|
||||
SecondaryUser,
|
||||
#[sea_orm(
|
||||
belongs_to = "super::user::Entity",
|
||||
from = "Column::UserId",
|
||||
@ -66,6 +68,12 @@ impl Related<super::rpc_accounting_v2::Entity> for Entity {
|
||||
}
|
||||
}
|
||||
|
||||
impl Related<super::secondary_user::Entity> for Entity {
|
||||
fn to() -> RelationDef {
|
||||
Relation::SecondaryUser.def()
|
||||
}
|
||||
}
|
||||
|
||||
impl Related<super::user::Entity> for Entity {
|
||||
fn to() -> RelationDef {
|
||||
Relation::User.def()
|
||||
|
@ -11,6 +11,7 @@ pub struct Model {
|
||||
pub id: u64,
|
||||
pub user_id: u64,
|
||||
pub description: Option<String>,
|
||||
pub rpc_secret_key_id: u64,
|
||||
pub role: Role,
|
||||
}
|
||||
|
||||
@ -24,6 +25,14 @@ pub enum Relation {
|
||||
on_delete = "NoAction"
|
||||
)]
|
||||
User,
|
||||
#[sea_orm(
|
||||
belongs_to = "super::rpc_key::Entity",
|
||||
from = "Column::RpcSecretKeyId",
|
||||
to = "super::rpc_key::Column::Id",
|
||||
on_update = "NoAction",
|
||||
on_delete = "NoAction"
|
||||
)]
|
||||
RpcKey,
|
||||
}
|
||||
|
||||
impl Related<super::user::Entity> for Entity {
|
||||
@ -32,4 +41,10 @@ impl Related<super::user::Entity> for Entity {
|
||||
}
|
||||
}
|
||||
|
||||
impl Related<super::rpc_key::Entity> for Entity {
|
||||
fn to() -> RelationDef {
|
||||
Relation::RpcKey.def()
|
||||
}
|
||||
}
|
||||
|
||||
impl ActiveModelBehavior for ActiveModel {}
|
||||
|
@ -11,12 +11,21 @@ pub struct Model {
|
||||
pub title: String,
|
||||
pub max_requests_per_period: Option<u64>,
|
||||
pub max_concurrent_requests: Option<u32>,
|
||||
pub downgrade_tier_id: Option<u64>,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
|
||||
pub enum Relation {
|
||||
#[sea_orm(has_many = "super::user::Entity")]
|
||||
User,
|
||||
#[sea_orm(
|
||||
belongs_to = "Entity",
|
||||
from = "Column::DowngradeTierId",
|
||||
to = "Column::Id",
|
||||
on_update = "NoAction",
|
||||
on_delete = "NoAction"
|
||||
)]
|
||||
SelfRef,
|
||||
}
|
||||
|
||||
impl Related<super::user::Entity> for Entity {
|
||||
|
16
latency/Cargo.toml
Normal file
16
latency/Cargo.toml
Normal file
@ -0,0 +1,16 @@
|
||||
[package]
|
||||
name = "latency"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
ewma = "0.1.1"
|
||||
flume = "0.10.14"
|
||||
log = "0.4.17"
|
||||
serde = { version = "1.0.163", features = [] }
|
||||
tokio = { version = "1.28.1", features = ["full"] }
|
||||
|
||||
[dev-dependencies]
|
||||
tokio = { version = "1.28.1", features = ["full", "test-util"] }
|
69
latency/src/ewma.rs
Normal file
69
latency/src/ewma.rs
Normal file
@ -0,0 +1,69 @@
|
||||
use serde::ser::Serializer;
|
||||
use serde::Serialize;
|
||||
use tokio::time::Duration;
|
||||
|
||||
pub struct EwmaLatency {
|
||||
/// exponentially weighted moving average of how many milliseconds behind the fastest node we are
|
||||
ewma: ewma::EWMA,
|
||||
}
|
||||
|
||||
impl Serialize for EwmaLatency {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
serializer.serialize_f64(self.ewma.value())
|
||||
}
|
||||
}
|
||||
|
||||
impl EwmaLatency {
|
||||
#[inline]
|
||||
pub fn record(&mut self, duration: Duration) {
|
||||
self.record_ms(duration.as_secs_f64() * 1000.0);
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn record_ms(&mut self, milliseconds: f64) {
|
||||
// don't let it go under 0.1ms
|
||||
self.ewma.add(milliseconds.max(0.1));
|
||||
}
|
||||
|
||||
/// Current EWMA value in milliseconds
|
||||
#[inline]
|
||||
pub fn value(&self) -> f64 {
|
||||
self.ewma.value()
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for EwmaLatency {
|
||||
fn default() -> Self {
|
||||
// TODO: what should the default span be? 10 requests?
|
||||
let span = 10.0;
|
||||
|
||||
// TODO: what should the defautt start be?
|
||||
let start = 1.0;
|
||||
|
||||
Self::new(span, start)
|
||||
}
|
||||
}
|
||||
|
||||
impl EwmaLatency {
|
||||
// depending on the span, start might not be perfect
|
||||
pub fn new(span: f64, start_ms: f64) -> Self {
|
||||
let alpha = Self::span_to_alpha(span);
|
||||
|
||||
let mut ewma = ewma::EWMA::new(alpha);
|
||||
|
||||
if start_ms > 0.0 {
|
||||
for _ in 0..(span as u64) {
|
||||
ewma.add(start_ms);
|
||||
}
|
||||
}
|
||||
|
||||
Self { ewma }
|
||||
}
|
||||
|
||||
fn span_to_alpha(span: f64) -> f64 {
|
||||
2.0 / (span + 1.0)
|
||||
}
|
||||
}
|
5
latency/src/lib.rs
Normal file
5
latency/src/lib.rs
Normal file
@ -0,0 +1,5 @@
|
||||
mod ewma;
|
||||
mod peak_ewma;
|
||||
mod util;
|
||||
|
||||
pub use self::{ewma::EwmaLatency, peak_ewma::PeakEwmaLatency};
|
153
latency/src/peak_ewma/mod.rs
Normal file
153
latency/src/peak_ewma/mod.rs
Normal file
@ -0,0 +1,153 @@
|
||||
mod rtt_estimate;
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use log::{error, log_enabled, trace};
|
||||
use tokio::task::JoinHandle;
|
||||
use tokio::time::{Duration, Instant};
|
||||
|
||||
use self::rtt_estimate::AtomicRttEstimate;
|
||||
use crate::util::nanos::nanos;
|
||||
|
||||
/// Latency calculation using Peak EWMA algorithm
|
||||
///
|
||||
/// Updates are done in a separate task to avoid locking or race
|
||||
/// conditions. Reads may happen on any thread.
|
||||
#[derive(Debug)]
|
||||
pub struct PeakEwmaLatency {
|
||||
/// Join handle for the latency calculation task
|
||||
pub join_handle: JoinHandle<()>,
|
||||
/// Send to update with each request duration
|
||||
request_tx: flume::Sender<Duration>,
|
||||
/// Latency average and last update time
|
||||
rtt_estimate: Arc<AtomicRttEstimate>,
|
||||
/// Decay time
|
||||
decay_ns: f64,
|
||||
}
|
||||
|
||||
impl PeakEwmaLatency {
|
||||
/// Spawn the task for calculating peak request latency
|
||||
///
|
||||
/// Returns a handle that can also be used to read the current
|
||||
/// average latency.
|
||||
pub fn spawn(decay: Duration, buf_size: usize, start_latency: Duration) -> Self {
|
||||
let decay_ns = decay.as_nanos() as f64;
|
||||
|
||||
debug_assert!(decay_ns > 0.0, "decay_ns must be positive");
|
||||
|
||||
let (request_tx, request_rx) = flume::bounded(buf_size);
|
||||
let rtt_estimate = Arc::new(AtomicRttEstimate::new(start_latency));
|
||||
let task = PeakEwmaLatencyTask {
|
||||
request_rx,
|
||||
rtt_estimate: rtt_estimate.clone(),
|
||||
update_at: Instant::now(),
|
||||
decay_ns,
|
||||
};
|
||||
let join_handle = tokio::spawn(task.run());
|
||||
Self {
|
||||
join_handle,
|
||||
request_tx,
|
||||
rtt_estimate,
|
||||
decay_ns,
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the current peak-ewma latency estimate
|
||||
pub fn latency(&self) -> Duration {
|
||||
let mut estimate = self.rtt_estimate.load();
|
||||
|
||||
let now = Instant::now();
|
||||
|
||||
if estimate.update_at > now {
|
||||
if log_enabled!(log::Level::Trace) {
|
||||
trace!(
|
||||
"update_at is {}ns in the future",
|
||||
estimate.update_at.duration_since(now).as_nanos()
|
||||
);
|
||||
}
|
||||
estimate.rtt
|
||||
} else {
|
||||
// Update the RTT estimate to account for decay since the last update.
|
||||
estimate.update(0.0, self.decay_ns, now)
|
||||
}
|
||||
}
|
||||
|
||||
/// Report latency from a single request
|
||||
///
|
||||
/// Should only be called with a duration from the Web3Rpc that owns it.
|
||||
pub fn report(&self, duration: Duration) {
|
||||
match self.request_tx.try_send(duration) {
|
||||
Ok(()) => {}
|
||||
Err(err) => {
|
||||
// We don't want to block if the channel is full, just
|
||||
// report the error
|
||||
error!("Latency report channel full. {}", err);
|
||||
// TODO: could we spawn a new tokio task to report tthis later?
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/// Task to be spawned per-Web3Rpc for calculating the peak request latency
|
||||
#[derive(Debug)]
|
||||
struct PeakEwmaLatencyTask {
|
||||
/// Receive new request timings for update
|
||||
request_rx: flume::Receiver<Duration>,
|
||||
/// Current estimate and update time
|
||||
rtt_estimate: Arc<AtomicRttEstimate>,
|
||||
/// Last update time, used for decay calculation
|
||||
update_at: Instant,
|
||||
/// Decay time
|
||||
decay_ns: f64,
|
||||
}
|
||||
|
||||
impl PeakEwmaLatencyTask {
|
||||
/// Run the loop for updating latency
|
||||
async fn run(self) {
|
||||
while let Ok(rtt) = self.request_rx.recv_async().await {
|
||||
self.update(rtt);
|
||||
}
|
||||
trace!("latency loop exited");
|
||||
}
|
||||
|
||||
/// Update the estimate object atomically.
|
||||
fn update(&self, rtt: Duration) {
|
||||
let rtt = nanos(rtt);
|
||||
|
||||
let now = Instant::now();
|
||||
assert!(
|
||||
self.update_at <= now,
|
||||
"update_at is {}ns in the future",
|
||||
self.update_at.duration_since(now).as_nanos(),
|
||||
);
|
||||
|
||||
self.rtt_estimate
|
||||
.fetch_update(|mut rtt_estimate| rtt_estimate.update(rtt, self.decay_ns, now));
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use tokio::time::{self, Duration};
|
||||
|
||||
/// The default RTT estimate decays, so that new nodes are considered if the
|
||||
/// default RTT is too high.
|
||||
#[tokio::test(start_paused = true)]
|
||||
async fn default_decay() {
|
||||
let estimate = super::PeakEwmaLatency::spawn(
|
||||
Duration::from_millis(1_000),
|
||||
8,
|
||||
Duration::from_millis(10),
|
||||
);
|
||||
let load = estimate.latency();
|
||||
assert_eq!(load, Duration::from_millis(10));
|
||||
|
||||
time::advance(Duration::from_millis(100)).await;
|
||||
let load = estimate.latency();
|
||||
assert!(Duration::from_millis(9) < load && load < Duration::from_millis(10));
|
||||
|
||||
time::advance(Duration::from_millis(100)).await;
|
||||
let load = estimate.latency();
|
||||
assert!(Duration::from_millis(8) < load && load < Duration::from_millis(9));
|
||||
}
|
||||
}
|
169
latency/src/peak_ewma/rtt_estimate.rs
Normal file
169
latency/src/peak_ewma/rtt_estimate.rs
Normal file
@ -0,0 +1,169 @@
|
||||
use std::sync::atomic::Ordering;
|
||||
|
||||
use log::trace;
|
||||
use tokio::time::{Duration, Instant};
|
||||
|
||||
use crate::util::atomic_f32_pair::AtomicF32Pair;
|
||||
use crate::util::nanos::{nanos, NANOS_PER_MILLI};
|
||||
|
||||
/// Holds the current RTT estimate and the last time this value was updated.
|
||||
#[derive(Debug)]
|
||||
pub struct RttEstimate {
|
||||
pub update_at: Instant,
|
||||
pub rtt: Duration,
|
||||
}
|
||||
|
||||
impl RttEstimate {
|
||||
/// Update the estimate with a new rtt value. Use rtt=0.0 for simply
|
||||
/// decaying the current value.
|
||||
pub fn update(&mut self, rtt: f64, decay_ns: f64, now: Instant) -> Duration {
|
||||
let ewma = nanos(self.rtt);
|
||||
self.rtt = if ewma < rtt {
|
||||
// For Peak-EWMA, always use the worst-case (peak) value as the estimate for
|
||||
// subsequent requests.
|
||||
trace!(
|
||||
"update peak rtt={}ms prior={}ms",
|
||||
rtt / NANOS_PER_MILLI,
|
||||
ewma / NANOS_PER_MILLI,
|
||||
);
|
||||
Duration::from_nanos(rtt as u64)
|
||||
} else {
|
||||
// When a latency is observed that is less than the estimated latency, we decay the
|
||||
// prior estimate according to how much time has elapsed since the last
|
||||
// update. The inverse of the decay is used to scale the estimate towards the
|
||||
// observed latency value.
|
||||
let elapsed = nanos(now.saturating_duration_since(self.update_at));
|
||||
let decay = (-elapsed / decay_ns).exp();
|
||||
let recency = 1.0 - decay;
|
||||
let next_estimate = (ewma * decay) + (rtt * recency);
|
||||
trace!(
|
||||
"update duration={:03.0}ms decay={:06.0}ns; next={:03.0}ms",
|
||||
rtt / NANOS_PER_MILLI,
|
||||
ewma - next_estimate,
|
||||
next_estimate / NANOS_PER_MILLI,
|
||||
);
|
||||
Duration::from_nanos(next_estimate as u64)
|
||||
};
|
||||
|
||||
self.rtt
|
||||
}
|
||||
|
||||
/// Build a new estimate object using current time.
|
||||
fn new(start_duration: Duration) -> Self {
|
||||
Self {
|
||||
update_at: Instant::now(),
|
||||
rtt: start_duration,
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert to pair of f32
|
||||
fn as_pair(&self, start_time: Instant) -> [f32; 2] {
|
||||
let update_at = self
|
||||
.update_at
|
||||
.saturating_duration_since(start_time)
|
||||
.as_secs_f32();
|
||||
let rtt = self.rtt.as_secs_f32();
|
||||
[update_at, rtt]
|
||||
}
|
||||
|
||||
/// Build from pair of f32
|
||||
fn from_pair(pair: [f32; 2], start_time: Instant) -> Self {
|
||||
let update_at = start_time + Duration::from_secs_f32(pair[0]);
|
||||
let rtt = Duration::from_secs_f32(pair[1]);
|
||||
Self { update_at, rtt }
|
||||
}
|
||||
}
|
||||
|
||||
/// Atomic storage of RttEstimate using AtomicF32Pair
|
||||
///
|
||||
/// Start time is needed to (de-)serialize the update_at instance.
|
||||
#[derive(Debug)]
|
||||
pub struct AtomicRttEstimate {
|
||||
pair: AtomicF32Pair,
|
||||
start_time: Instant,
|
||||
}
|
||||
|
||||
impl AtomicRttEstimate {
|
||||
/// Creates a new atomic rtt estimate.
|
||||
pub fn new(start_duration: Duration) -> Self {
|
||||
let estimate = RttEstimate::new(start_duration);
|
||||
Self {
|
||||
pair: AtomicF32Pair::new(estimate.as_pair(estimate.update_at)),
|
||||
start_time: estimate.update_at,
|
||||
}
|
||||
}
|
||||
|
||||
/// Loads a value from the atomic rtt estimate.
|
||||
///
|
||||
/// This method omits the ordering argument since loads may use
|
||||
/// slightly stale data to avoid adding additional latency.
|
||||
pub fn load(&self) -> RttEstimate {
|
||||
RttEstimate::from_pair(self.pair.load(Ordering::Relaxed), self.start_time)
|
||||
}
|
||||
|
||||
/// Fetches the value, and applies a function to it that returns an
|
||||
/// new rtt. Returns the new RttEstimate with new update_at.
|
||||
///
|
||||
/// Automatically updates the update_at with Instant::now(). This
|
||||
/// method omits ordering arguments, defaulting to Relaxed since
|
||||
/// all writes are serial and any reads may rely on slightly stale
|
||||
/// data.
|
||||
pub fn fetch_update<F>(&self, mut f: F) -> RttEstimate
|
||||
where
|
||||
F: FnMut(RttEstimate) -> Duration,
|
||||
{
|
||||
let mut update_at = Instant::now();
|
||||
let mut rtt = Duration::ZERO;
|
||||
self.pair
|
||||
.fetch_update(Ordering::Relaxed, Ordering::Relaxed, |pair| {
|
||||
rtt = f(RttEstimate::from_pair(pair, self.start_time));
|
||||
// Save the new update_at inside the function in case it
|
||||
// is run multiple times
|
||||
update_at = Instant::now();
|
||||
Some(RttEstimate { rtt, update_at }.as_pair(self.start_time))
|
||||
})
|
||||
.expect("Should never Err");
|
||||
RttEstimate { update_at, rtt }
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use tokio::time::{self, Duration, Instant};
|
||||
|
||||
use super::{AtomicRttEstimate, RttEstimate};
|
||||
|
||||
#[test]
|
||||
fn test_rtt_estimate_f32_conversions() {
|
||||
let rtt = Duration::from_secs(1);
|
||||
let expected = RttEstimate::new(rtt);
|
||||
let actual =
|
||||
RttEstimate::from_pair(expected.as_pair(expected.update_at), expected.update_at);
|
||||
assert_eq!(expected.update_at, actual.update_at);
|
||||
assert_eq!(expected.rtt, actual.rtt);
|
||||
}
|
||||
|
||||
#[tokio::test(start_paused = true)]
|
||||
async fn test_atomic_rtt_estimate_load() {
|
||||
let rtt = Duration::from_secs(1);
|
||||
let estimate = AtomicRttEstimate::new(rtt);
|
||||
let actual = estimate.load();
|
||||
assert_eq!(Instant::now(), actual.update_at);
|
||||
assert_eq!(rtt, actual.rtt);
|
||||
}
|
||||
|
||||
#[tokio::test(start_paused = true)]
|
||||
async fn test_atomic_rtt_estimate_fetch_update() {
|
||||
let start_time = Instant::now();
|
||||
let rtt = Duration::from_secs(1);
|
||||
let estimate = AtomicRttEstimate::new(rtt);
|
||||
time::advance(Duration::from_secs(1)).await;
|
||||
let rv = estimate.fetch_update(|value| {
|
||||
assert_eq!(start_time, value.update_at);
|
||||
assert_eq!(rtt, value.rtt);
|
||||
Duration::from_secs(2)
|
||||
});
|
||||
assert_eq!(start_time + Duration::from_secs(1), rv.update_at);
|
||||
assert_eq!(Duration::from_secs(2), rv.rtt);
|
||||
}
|
||||
}
|
89
latency/src/util/atomic_f32_pair.rs
Normal file
89
latency/src/util/atomic_f32_pair.rs
Normal file
@ -0,0 +1,89 @@
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
|
||||
/// Implements an atomic pair of f32s
|
||||
///
|
||||
/// This uses an AtomicU64 internally.
|
||||
#[derive(Debug)]
|
||||
pub struct AtomicF32Pair(AtomicU64);
|
||||
|
||||
impl AtomicF32Pair {
|
||||
/// Creates a new atomic pair.
|
||||
pub fn new(pair: [f32; 2]) -> Self {
|
||||
Self(AtomicU64::new(to_bits(pair)))
|
||||
}
|
||||
|
||||
/// Loads a value from the atomic pair.
|
||||
pub fn load(&self, ordering: Ordering) -> [f32; 2] {
|
||||
from_bits(self.0.load(ordering))
|
||||
}
|
||||
|
||||
/// Fetches the value, and applies a function to it that returns an
|
||||
/// optional new value. Returns a Result of Ok(previous_value) if
|
||||
/// the function returned Some(_), else Err(previous_value).
|
||||
pub fn fetch_update<F>(
|
||||
&self,
|
||||
set_order: Ordering,
|
||||
fetch_order: Ordering,
|
||||
mut f: F,
|
||||
) -> Result<[f32; 2], [f32; 2]>
|
||||
where
|
||||
F: FnMut([f32; 2]) -> Option<[f32; 2]>,
|
||||
{
|
||||
self.0
|
||||
.fetch_update(set_order, fetch_order, |bits| {
|
||||
f(from_bits(bits)).map(to_bits)
|
||||
})
|
||||
.map(from_bits)
|
||||
.map_err(from_bits)
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert a f32 pair to its bit-representation as u64
|
||||
fn to_bits(pair: [f32; 2]) -> u64 {
|
||||
let f1 = pair[0].to_bits() as u64;
|
||||
let f2 = pair[1].to_bits() as u64;
|
||||
(f1 << 32) | f2
|
||||
}
|
||||
|
||||
/// Build a f32 pair from its bit-representation as u64
|
||||
fn from_bits(bits: u64) -> [f32; 2] {
|
||||
let f1 = f32::from_bits((bits >> 32) as u32);
|
||||
let f2 = f32::from_bits(bits as u32);
|
||||
[f1, f2]
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::f32;
|
||||
use std::sync::atomic::Ordering;
|
||||
|
||||
use super::{from_bits, to_bits, AtomicF32Pair};
|
||||
|
||||
#[test]
|
||||
fn test_f32_pair_bit_conversions() {
|
||||
let pair = [f32::consts::PI, f32::consts::E];
|
||||
assert_eq!(pair, from_bits(to_bits(pair)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_atomic_f32_pair_load() {
|
||||
let pair = [f32::consts::PI, f32::consts::E];
|
||||
let atomic = AtomicF32Pair::new(pair);
|
||||
assert_eq!(pair, atomic.load(Ordering::Relaxed));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_atomic_f32_pair_fetch_update() {
|
||||
let pair = [f32::consts::PI, f32::consts::E];
|
||||
let atomic = AtomicF32Pair::new(pair);
|
||||
atomic
|
||||
.fetch_update(Ordering::Relaxed, Ordering::Relaxed, |[f1, f2]| {
|
||||
Some([f1 + 1.0, f2 + 1.0])
|
||||
})
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
[pair[0] + 1.0, pair[1] + 1.0],
|
||||
atomic.load(Ordering::Relaxed)
|
||||
);
|
||||
}
|
||||
}
|
2
latency/src/util/mod.rs
Normal file
2
latency/src/util/mod.rs
Normal file
@ -0,0 +1,2 @@
|
||||
pub(crate) mod atomic_f32_pair;
|
||||
pub(crate) mod nanos;
|
30
latency/src/util/nanos.rs
Normal file
30
latency/src/util/nanos.rs
Normal file
@ -0,0 +1,30 @@
|
||||
use tokio::time::Duration;
|
||||
|
||||
pub const NANOS_PER_MILLI: f64 = 1_000_000.0;
|
||||
|
||||
/// Utility that converts durations to nanos in f64.
|
||||
///
|
||||
/// Due to a lossy transformation, the maximum value that can be represented is ~585 years,
|
||||
/// which, I hope, is more than enough to represent request latencies.
|
||||
pub fn nanos(d: Duration) -> f64 {
|
||||
const NANOS_PER_SEC: u64 = 1_000_000_000;
|
||||
let n = f64::from(d.subsec_nanos());
|
||||
let s = d.as_secs().saturating_mul(NANOS_PER_SEC) as f64;
|
||||
n + s
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use tokio::time::Duration;
|
||||
|
||||
#[test]
|
||||
fn nanos() {
|
||||
assert_eq!(super::nanos(Duration::new(0, 0)), 0.0);
|
||||
assert_eq!(super::nanos(Duration::new(0, 123)), 123.0);
|
||||
assert_eq!(super::nanos(Duration::new(1, 23)), 1_000_000_023.0);
|
||||
assert_eq!(
|
||||
super::nanos(Duration::new(::std::u64::MAX, 999_999_999)),
|
||||
18446744074709553000.0
|
||||
);
|
||||
}
|
||||
}
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "migration"
|
||||
version = "0.19.0"
|
||||
version = "0.28.0"
|
||||
edition = "2021"
|
||||
publish = false
|
||||
|
||||
@ -9,7 +9,7 @@ name = "migration"
|
||||
path = "src/lib.rs"
|
||||
|
||||
[dependencies]
|
||||
tokio = { version = "1.28.0", features = ["full", "tracing"] }
|
||||
tokio = { version = "1.28.1", features = ["full", "tracing"] }
|
||||
|
||||
[dependencies.sea-orm-migration]
|
||||
version = "0.11.3"
|
||||
|
@ -17,8 +17,17 @@ mod m20230119_204135_better_free_tier;
|
||||
mod m20230125_204810_stats_v2;
|
||||
mod m20230130_124740_read_only_login_logic;
|
||||
mod m20230130_165144_prepare_admin_imitation_pre_login;
|
||||
mod m20230205_130035_create_balance;
|
||||
mod m20230205_133755_create_referrals;
|
||||
mod m20230214_134254_increase_balance_transactions;
|
||||
mod m20230215_152254_admin_trail;
|
||||
mod m20230221_230953_track_spend;
|
||||
mod m20230307_002623_migrate_rpc_accounting_to_rpc_accounting_v2;
|
||||
mod m20230412_171916_modify_secondary_user_add_primary_user;
|
||||
mod m20230422_172555_premium_downgrade_logic;
|
||||
mod m20230511_161214_remove_columns_statsv2_origin_and_method;
|
||||
mod m20230512_220213_allow_null_rpc_key_id_in_stats_v2;
|
||||
mod m20230514_114803_admin_add_credits;
|
||||
|
||||
pub struct Migrator;
|
||||
|
||||
@ -43,8 +52,17 @@ impl MigratorTrait for Migrator {
|
||||
Box::new(m20230125_204810_stats_v2::Migration),
|
||||
Box::new(m20230130_124740_read_only_login_logic::Migration),
|
||||
Box::new(m20230130_165144_prepare_admin_imitation_pre_login::Migration),
|
||||
Box::new(m20230205_130035_create_balance::Migration),
|
||||
Box::new(m20230205_133755_create_referrals::Migration),
|
||||
Box::new(m20230214_134254_increase_balance_transactions::Migration),
|
||||
Box::new(m20230215_152254_admin_trail::Migration),
|
||||
Box::new(m20230221_230953_track_spend::Migration),
|
||||
Box::new(m20230307_002623_migrate_rpc_accounting_to_rpc_accounting_v2::Migration),
|
||||
Box::new(m20230412_171916_modify_secondary_user_add_primary_user::Migration),
|
||||
Box::new(m20230422_172555_premium_downgrade_logic::Migration),
|
||||
Box::new(m20230511_161214_remove_columns_statsv2_origin_and_method::Migration),
|
||||
Box::new(m20230512_220213_allow_null_rpc_key_id_in_stats_v2::Migration),
|
||||
Box::new(m20230514_114803_admin_add_credits::Migration),
|
||||
]
|
||||
}
|
||||
}
|
||||
|
@ -92,7 +92,6 @@ impl MigrationTrait for Migration {
|
||||
)
|
||||
.await?;
|
||||
|
||||
// rename column rpc_key to rpc_secret_key
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -6,7 +6,6 @@ pub struct Migration;
|
||||
#[async_trait::async_trait]
|
||||
impl MigrationTrait for Migration {
|
||||
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
|
||||
// Replace the sample below with your own migration scripts
|
||||
manager
|
||||
.create_table(
|
||||
Table::create()
|
||||
@ -36,7 +35,6 @@ impl MigrationTrait for Migration {
|
||||
}
|
||||
|
||||
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
|
||||
// Replace the sample below with your own migration scripts
|
||||
manager
|
||||
.drop_table(Table::drop().table(Admin::Table).to_owned())
|
||||
.await
|
||||
|
@ -23,6 +23,12 @@ impl MigrationTrait for Migration {
|
||||
.not_null()
|
||||
.default(0),
|
||||
)
|
||||
.foreign_key(
|
||||
ForeignKeyCreateStatement::new()
|
||||
.from_col(RpcAccountingV2::RpcKeyId)
|
||||
.to_tbl(RpcKey::Table)
|
||||
.to_col(RpcKey::Id),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(RpcAccountingV2::ChainId)
|
||||
.big_unsigned()
|
||||
@ -136,6 +142,12 @@ impl MigrationTrait for Migration {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Iden)]
|
||||
enum RpcKey {
|
||||
Table,
|
||||
Id,
|
||||
}
|
||||
|
||||
#[derive(Iden)]
|
||||
enum RpcAccountingV2 {
|
||||
Table,
|
||||
|
70
migration/src/m20230205_130035_create_balance.rs
Normal file
70
migration/src/m20230205_130035_create_balance.rs
Normal file
@ -0,0 +1,70 @@
|
||||
use sea_orm_migration::prelude::*;
|
||||
|
||||
#[derive(DeriveMigrationName)]
|
||||
pub struct Migration;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl MigrationTrait for Migration {
|
||||
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
|
||||
manager
|
||||
.create_table(
|
||||
Table::create()
|
||||
.table(Balance::Table)
|
||||
.if_not_exists()
|
||||
.col(
|
||||
ColumnDef::new(Balance::Id)
|
||||
.integer()
|
||||
.not_null()
|
||||
.auto_increment()
|
||||
.primary_key(),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(Balance::AvailableBalance)
|
||||
.decimal_len(20, 10)
|
||||
.not_null()
|
||||
.default(0.0),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(Balance::UsedBalance)
|
||||
.decimal_len(20, 10)
|
||||
.not_null()
|
||||
.default(0.0),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(Balance::UserId)
|
||||
.big_unsigned()
|
||||
.unique_key()
|
||||
.not_null(),
|
||||
)
|
||||
.foreign_key(
|
||||
sea_query::ForeignKey::create()
|
||||
.from(Balance::Table, Balance::UserId)
|
||||
.to(User::Table, User::Id),
|
||||
)
|
||||
.to_owned(),
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
|
||||
manager
|
||||
.drop_table(Table::drop().table(Balance::Table).to_owned())
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
/// Learn more at https://docs.rs/sea-query#iden
|
||||
#[derive(Iden)]
|
||||
enum User {
|
||||
Table,
|
||||
Id,
|
||||
}
|
||||
|
||||
#[derive(Iden)]
|
||||
enum Balance {
|
||||
Table,
|
||||
Id,
|
||||
UserId,
|
||||
AvailableBalance,
|
||||
UsedBalance,
|
||||
}
|
133
migration/src/m20230205_133755_create_referrals.rs
Normal file
133
migration/src/m20230205_133755_create_referrals.rs
Normal file
@ -0,0 +1,133 @@
|
||||
use sea_orm_migration::prelude::*;
|
||||
|
||||
#[derive(DeriveMigrationName)]
|
||||
pub struct Migration;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl MigrationTrait for Migration {
|
||||
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
|
||||
// Create one table for the referrer
|
||||
manager
|
||||
.create_table(
|
||||
Table::create()
|
||||
.table(Referrer::Table)
|
||||
.if_not_exists()
|
||||
.col(
|
||||
ColumnDef::new(Referrer::Id)
|
||||
.integer()
|
||||
.not_null()
|
||||
.auto_increment()
|
||||
.primary_key(),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(Referrer::ReferralCode)
|
||||
.string()
|
||||
.unique_key()
|
||||
.not_null(),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(Referrer::UserId)
|
||||
.big_unsigned()
|
||||
.unique_key()
|
||||
.not_null(),
|
||||
)
|
||||
.foreign_key(
|
||||
sea_query::ForeignKey::create()
|
||||
.from(Referrer::Table, Referrer::UserId)
|
||||
.to(User::Table, User::Id),
|
||||
)
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Create one table for the referrer
|
||||
manager
|
||||
.create_table(
|
||||
Table::create()
|
||||
.table(Referee::Table)
|
||||
.if_not_exists()
|
||||
.col(
|
||||
ColumnDef::new(Referee::Id)
|
||||
.integer()
|
||||
.not_null()
|
||||
.auto_increment()
|
||||
.primary_key(),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(Referee::CreditsAppliedForReferee)
|
||||
.boolean()
|
||||
.not_null(),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(Referee::CreditsAppliedForReferrer)
|
||||
.decimal_len(20, 10)
|
||||
.not_null()
|
||||
.default(0),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(Referee::ReferralStartDate)
|
||||
.date_time()
|
||||
.not_null()
|
||||
.extra("DEFAULT CURRENT_TIMESTAMP".to_string()),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(Referee::UsedReferralCode)
|
||||
.integer()
|
||||
.not_null(),
|
||||
)
|
||||
.foreign_key(
|
||||
sea_query::ForeignKey::create()
|
||||
.from(Referee::Table, Referee::UsedReferralCode)
|
||||
.to(Referrer::Table, Referrer::Id),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(Referee::UserId)
|
||||
.big_unsigned()
|
||||
.unique_key()
|
||||
.not_null(),
|
||||
)
|
||||
.foreign_key(
|
||||
sea_query::ForeignKey::create()
|
||||
.from(Referee::Table, Referee::UserId)
|
||||
.to(User::Table, User::Id),
|
||||
)
|
||||
.to_owned(),
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
|
||||
manager
|
||||
.drop_table(Table::drop().table(Referrer::Table).to_owned())
|
||||
.await?;
|
||||
manager
|
||||
.drop_table(Table::drop().table(Referee::Table).to_owned())
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
/// Learn more at https://docs.rs/sea-query#iden
|
||||
#[derive(Iden)]
|
||||
enum Referrer {
|
||||
Table,
|
||||
Id,
|
||||
UserId,
|
||||
ReferralCode,
|
||||
}
|
||||
|
||||
#[derive(Iden)]
|
||||
enum Referee {
|
||||
Table,
|
||||
Id,
|
||||
UserId,
|
||||
UsedReferralCode,
|
||||
CreditsAppliedForReferrer,
|
||||
CreditsAppliedForReferee,
|
||||
ReferralStartDate,
|
||||
}
|
||||
|
||||
#[derive(Iden)]
|
||||
enum User {
|
||||
Table,
|
||||
Id,
|
||||
}
|
@ -0,0 +1,96 @@
|
||||
use sea_orm_migration::prelude::*;
|
||||
|
||||
#[derive(DeriveMigrationName)]
|
||||
pub struct Migration;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl MigrationTrait for Migration {
|
||||
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
|
||||
// Adds a table which keeps track of which transactions were already added (basically to prevent double spending)
|
||||
manager
|
||||
.create_table(
|
||||
Table::create()
|
||||
.table(IncreaseOnChainBalanceReceipt::Table)
|
||||
.if_not_exists()
|
||||
.col(
|
||||
ColumnDef::new(IncreaseOnChainBalanceReceipt::Id)
|
||||
.integer()
|
||||
.not_null()
|
||||
.auto_increment()
|
||||
.primary_key(),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(IncreaseOnChainBalanceReceipt::TxHash)
|
||||
.string()
|
||||
.not_null(),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(IncreaseOnChainBalanceReceipt::ChainId)
|
||||
.big_integer()
|
||||
.not_null(),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(IncreaseOnChainBalanceReceipt::Amount)
|
||||
.decimal_len(20, 10)
|
||||
.not_null(),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(IncreaseOnChainBalanceReceipt::DepositToUserId)
|
||||
.big_unsigned()
|
||||
.unique_key()
|
||||
.not_null(),
|
||||
)
|
||||
.foreign_key(
|
||||
ForeignKey::create()
|
||||
.name("fk-deposit_to_user_id")
|
||||
.from(
|
||||
IncreaseOnChainBalanceReceipt::Table,
|
||||
IncreaseOnChainBalanceReceipt::DepositToUserId,
|
||||
)
|
||||
.to(User::Table, User::Id),
|
||||
)
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Add a unique-constraint on chain-id and tx-hash
|
||||
manager
|
||||
.create_index(
|
||||
Index::create()
|
||||
.name("idx-increase_on_chain_balance_receipt-unique-chain_id-tx_hash")
|
||||
.table(IncreaseOnChainBalanceReceipt::Table)
|
||||
.col(IncreaseOnChainBalanceReceipt::ChainId)
|
||||
.col(IncreaseOnChainBalanceReceipt::TxHash)
|
||||
.unique()
|
||||
.to_owned(),
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
|
||||
manager
|
||||
.drop_table(
|
||||
Table::drop()
|
||||
.table(IncreaseOnChainBalanceReceipt::Table)
|
||||
.to_owned(),
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
/// Learn more at https://docs.rs/sea-query#iden
|
||||
#[derive(Iden)]
|
||||
enum IncreaseOnChainBalanceReceipt {
|
||||
Table,
|
||||
Id,
|
||||
TxHash,
|
||||
ChainId,
|
||||
Amount,
|
||||
DepositToUserId,
|
||||
}
|
||||
|
||||
#[derive(Iden)]
|
||||
enum User {
|
||||
Table,
|
||||
Id,
|
||||
}
|
@ -49,7 +49,6 @@ impl MigrationTrait for Migration {
|
||||
}
|
||||
|
||||
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
|
||||
// Replace the sample below with your own migration scripts
|
||||
manager
|
||||
.drop_table(Table::drop().table(AdminTrail::Table).to_owned())
|
||||
.await
|
||||
|
41
migration/src/m20230221_230953_track_spend.rs
Normal file
41
migration/src/m20230221_230953_track_spend.rs
Normal file
@ -0,0 +1,41 @@
|
||||
use sea_orm_migration::prelude::*;
|
||||
|
||||
#[derive(DeriveMigrationName)]
|
||||
pub struct Migration;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl MigrationTrait for Migration {
|
||||
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
|
||||
// Track spend inside the RPC accounting v2 table
|
||||
manager
|
||||
.alter_table(
|
||||
Table::alter()
|
||||
.table(RpcAccountingV2::Table)
|
||||
.add_column(
|
||||
ColumnDef::new(RpcAccountingV2::SumCreditsUsed)
|
||||
.decimal_len(20, 10)
|
||||
.not_null(),
|
||||
)
|
||||
.to_owned(),
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
|
||||
manager
|
||||
.alter_table(
|
||||
sea_query::Table::alter()
|
||||
.table(RpcAccountingV2::Table)
|
||||
.drop_column(RpcAccountingV2::SumCreditsUsed)
|
||||
.to_owned(),
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
/// Learn more at https://docs.rs/sea-query#iden
|
||||
#[derive(Iden)]
|
||||
enum RpcAccountingV2 {
|
||||
Table,
|
||||
SumCreditsUsed,
|
||||
}
|
@ -0,0 +1,58 @@
|
||||
use sea_orm_migration::prelude::*;
|
||||
|
||||
#[derive(DeriveMigrationName)]
|
||||
pub struct Migration;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl MigrationTrait for Migration {
|
||||
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
|
||||
manager
|
||||
.alter_table(
|
||||
Table::alter()
|
||||
.table(SecondaryUser::Table)
|
||||
.add_column(
|
||||
ColumnDef::new(SecondaryUser::RpcSecretKeyId)
|
||||
.big_unsigned()
|
||||
.not_null(), // add foreign key to user table ...,
|
||||
)
|
||||
.add_foreign_key(
|
||||
TableForeignKey::new()
|
||||
.name("FK_secondary_user-rpc_key")
|
||||
.from_tbl(SecondaryUser::Table)
|
||||
.from_col(SecondaryUser::RpcSecretKeyId)
|
||||
.to_tbl(RpcKey::Table)
|
||||
.to_col(RpcKey::Id)
|
||||
.on_delete(ForeignKeyAction::NoAction)
|
||||
.on_update(ForeignKeyAction::NoAction),
|
||||
)
|
||||
.to_owned(),
|
||||
)
|
||||
.await
|
||||
|
||||
// TODO: Add a unique index on RpcKey + Subuser
|
||||
}
|
||||
|
||||
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
|
||||
manager
|
||||
.alter_table(
|
||||
sea_query::Table::alter()
|
||||
.table(SecondaryUser::Table)
|
||||
.drop_column(SecondaryUser::RpcSecretKeyId)
|
||||
.to_owned(),
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
/// Learn more at https://docs.rs/sea-query#iden
|
||||
#[derive(Iden)]
|
||||
enum SecondaryUser {
|
||||
Table,
|
||||
RpcSecretKeyId,
|
||||
}
|
||||
|
||||
#[derive(Iden)]
|
||||
enum RpcKey {
|
||||
Table,
|
||||
Id,
|
||||
}
|
124
migration/src/m20230422_172555_premium_downgrade_logic.rs
Normal file
124
migration/src/m20230422_172555_premium_downgrade_logic.rs
Normal file
@ -0,0 +1,124 @@
|
||||
use crate::sea_orm::ConnectionTrait;
|
||||
use sea_orm_migration::prelude::*;
|
||||
|
||||
#[derive(DeriveMigrationName)]
|
||||
pub struct Migration;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl MigrationTrait for Migration {
|
||||
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
|
||||
// Add a column "downgrade_tier_id"
|
||||
// It is a "foreign key" that references other items in this table
|
||||
manager
|
||||
.alter_table(
|
||||
Table::alter()
|
||||
.table(UserTier::Table)
|
||||
.add_column(ColumnDef::new(UserTier::DowngradeTierId).big_unsigned())
|
||||
.add_foreign_key(
|
||||
TableForeignKey::new()
|
||||
.to_tbl(UserTier::Table)
|
||||
.from_col(UserTier::DowngradeTierId)
|
||||
.to_col(UserTier::Id),
|
||||
)
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Insert Premium, and PremiumOutOfFunds
|
||||
let premium_out_of_funds_tier = Query::insert()
|
||||
.into_table(UserTier::Table)
|
||||
.columns([
|
||||
UserTier::Title,
|
||||
UserTier::MaxRequestsPerPeriod,
|
||||
UserTier::MaxConcurrentRequests,
|
||||
UserTier::DowngradeTierId,
|
||||
])
|
||||
.values_panic([
|
||||
"Premium Out Of Funds".into(),
|
||||
Some("6000").into(),
|
||||
Some("5").into(),
|
||||
None::<i64>.into(),
|
||||
])
|
||||
.to_owned();
|
||||
|
||||
manager.exec_stmt(premium_out_of_funds_tier).await?;
|
||||
|
||||
// Insert Premium Out Of Funds
|
||||
// get the premium tier ...
|
||||
let db_conn = manager.get_connection();
|
||||
let db_backend = manager.get_database_backend();
|
||||
|
||||
let select_premium_out_of_funds_tier_id = Query::select()
|
||||
.column(UserTier::Id)
|
||||
.from(UserTier::Table)
|
||||
.cond_where(Expr::col(UserTier::Title).eq("Premium Out Of Funds"))
|
||||
.to_owned();
|
||||
let premium_out_of_funds_tier_id: u64 = db_conn
|
||||
.query_one(db_backend.build(&select_premium_out_of_funds_tier_id))
|
||||
.await?
|
||||
.expect("we just created Premium Out Of Funds")
|
||||
.try_get("", &UserTier::Id.to_string())?;
|
||||
|
||||
// Add two tiers for premium: premium, and premium-out-of-funds
|
||||
let premium_tier = Query::insert()
|
||||
.into_table(UserTier::Table)
|
||||
.columns([
|
||||
UserTier::Title,
|
||||
UserTier::MaxRequestsPerPeriod,
|
||||
UserTier::MaxConcurrentRequests,
|
||||
UserTier::DowngradeTierId,
|
||||
])
|
||||
.values_panic([
|
||||
"Premium".into(),
|
||||
None::<&str>.into(),
|
||||
Some("100").into(),
|
||||
Some(premium_out_of_funds_tier_id).into(),
|
||||
])
|
||||
.to_owned();
|
||||
|
||||
manager.exec_stmt(premium_tier).await
|
||||
}
|
||||
|
||||
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
|
||||
// Remove the two tiers that you just added
|
||||
// And remove the column you just added
|
||||
let db_conn = manager.get_connection();
|
||||
let db_backend = manager.get_database_backend();
|
||||
|
||||
let delete_premium = Query::delete()
|
||||
.from_table(UserTier::Table)
|
||||
.cond_where(Expr::col(UserTier::Title).eq("Premium"))
|
||||
.to_owned();
|
||||
|
||||
db_conn.execute(db_backend.build(&delete_premium)).await?;
|
||||
|
||||
let delete_premium_out_of_funds = Query::delete()
|
||||
.from_table(UserTier::Table)
|
||||
.cond_where(Expr::col(UserTier::Title).eq("Premium Out Of Funds"))
|
||||
.to_owned();
|
||||
|
||||
db_conn
|
||||
.execute(db_backend.build(&delete_premium_out_of_funds))
|
||||
.await?;
|
||||
|
||||
// Finally drop the downgrade column
|
||||
manager
|
||||
.alter_table(
|
||||
Table::alter()
|
||||
.table(UserTier::Table)
|
||||
.drop_column(UserTier::DowngradeTierId)
|
||||
.to_owned(),
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Iden)]
|
||||
enum UserTier {
|
||||
Table,
|
||||
Id,
|
||||
Title,
|
||||
MaxRequestsPerPeriod,
|
||||
MaxConcurrentRequests,
|
||||
DowngradeTierId,
|
||||
}
|
@ -0,0 +1,49 @@
|
||||
use sea_orm_migration::prelude::*;
|
||||
|
||||
#[derive(DeriveMigrationName)]
|
||||
pub struct Migration;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl MigrationTrait for Migration {
|
||||
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
|
||||
manager
|
||||
.alter_table(
|
||||
Table::alter()
|
||||
.table(RpcAccountingV2::Table)
|
||||
.drop_column(RpcAccountingV2::Origin)
|
||||
.drop_column(RpcAccountingV2::Method)
|
||||
.to_owned(),
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
|
||||
manager
|
||||
.alter_table(
|
||||
Table::alter()
|
||||
.table(RpcAccountingV2::Table)
|
||||
.add_column(
|
||||
ColumnDef::new(RpcAccountingV2::Method)
|
||||
.string()
|
||||
.not_null()
|
||||
.default(""),
|
||||
)
|
||||
.add_column(
|
||||
ColumnDef::new(RpcAccountingV2::Origin)
|
||||
.string()
|
||||
.not_null()
|
||||
.default(""),
|
||||
)
|
||||
.to_owned(),
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
/// Learn more at https://docs.rs/sea-query#iden
|
||||
#[derive(Iden)]
|
||||
enum RpcAccountingV2 {
|
||||
Table,
|
||||
Origin,
|
||||
Method,
|
||||
}
|
@ -0,0 +1,48 @@
|
||||
use sea_orm_migration::prelude::*;
|
||||
|
||||
#[derive(DeriveMigrationName)]
|
||||
pub struct Migration;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl MigrationTrait for Migration {
|
||||
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
|
||||
manager
|
||||
.alter_table(
|
||||
sea_query::Table::alter()
|
||||
.table(RpcAccountingV2::Table)
|
||||
.to_owned()
|
||||
// allow rpc_key_id to be null. Needed for public rpc stat tracking
|
||||
.modify_column(
|
||||
ColumnDef::new(RpcAccountingV2::RpcKeyId)
|
||||
.big_unsigned()
|
||||
.null(),
|
||||
)
|
||||
.to_owned(),
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
|
||||
manager
|
||||
.alter_table(
|
||||
sea_query::Table::alter()
|
||||
.table(RpcAccountingV2::Table)
|
||||
.to_owned()
|
||||
.modify_column(
|
||||
ColumnDef::new(RpcAccountingV2::RpcKeyId)
|
||||
.big_unsigned()
|
||||
.not_null()
|
||||
.default(0),
|
||||
)
|
||||
.to_owned(),
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
/// Learn more at https://docs.rs/sea-query#iden
|
||||
#[derive(Iden)]
|
||||
enum RpcAccountingV2 {
|
||||
Table,
|
||||
RpcKeyId,
|
||||
}
|
97
migration/src/m20230514_114803_admin_add_credits.rs
Normal file
97
migration/src/m20230514_114803_admin_add_credits.rs
Normal file
@ -0,0 +1,97 @@
|
||||
use sea_orm_migration::prelude::*;
|
||||
|
||||
#[derive(DeriveMigrationName)]
|
||||
pub struct Migration;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl MigrationTrait for Migration {
|
||||
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
|
||||
manager
|
||||
.create_table(
|
||||
Table::create()
|
||||
.table(AdminIncreaseBalanceReceipt::Table)
|
||||
.if_not_exists()
|
||||
.col(
|
||||
ColumnDef::new(AdminIncreaseBalanceReceipt::Id)
|
||||
.integer()
|
||||
.not_null()
|
||||
.auto_increment()
|
||||
.primary_key(),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(AdminIncreaseBalanceReceipt::Amount)
|
||||
.decimal_len(20, 10)
|
||||
.not_null(),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(AdminIncreaseBalanceReceipt::AdminId)
|
||||
.big_unsigned()
|
||||
.not_null(),
|
||||
)
|
||||
.foreign_key(
|
||||
ForeignKey::create()
|
||||
.name("fk-admin_id")
|
||||
.from(
|
||||
AdminIncreaseBalanceReceipt::Table,
|
||||
AdminIncreaseBalanceReceipt::AdminId,
|
||||
)
|
||||
.to(Admin::Table, Admin::Id),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(AdminIncreaseBalanceReceipt::DepositToUserId)
|
||||
.big_unsigned()
|
||||
.not_null(),
|
||||
)
|
||||
.foreign_key(
|
||||
ForeignKey::create()
|
||||
.name("fk-admin_deposits_to_user_id")
|
||||
.from(
|
||||
AdminIncreaseBalanceReceipt::Table,
|
||||
AdminIncreaseBalanceReceipt::DepositToUserId,
|
||||
)
|
||||
.to(User::Table, User::Id),
|
||||
)
|
||||
.col(
|
||||
ColumnDef::new(AdminIncreaseBalanceReceipt::Note)
|
||||
.string()
|
||||
.not_null(),
|
||||
)
|
||||
.to_owned(),
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
|
||||
manager
|
||||
.drop_table(
|
||||
Table::drop()
|
||||
.table(AdminIncreaseBalanceReceipt::Table)
|
||||
.to_owned(),
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Iden)]
|
||||
enum Admin {
|
||||
Table,
|
||||
Id,
|
||||
}
|
||||
|
||||
/// Learn more at https://docs.rs/sea-query#iden
|
||||
#[derive(Iden)]
|
||||
enum User {
|
||||
Table,
|
||||
Id,
|
||||
}
|
||||
|
||||
/// Learn more at https://docs.rs/sea-query#iden
|
||||
#[derive(Iden)]
|
||||
enum AdminIncreaseBalanceReceipt {
|
||||
Table,
|
||||
Id,
|
||||
Amount,
|
||||
AdminId,
|
||||
DepositToUserId,
|
||||
Note,
|
||||
}
|
14
quick_cache_ttl/Cargo.toml
Normal file
14
quick_cache_ttl/Cargo.toml
Normal file
@ -0,0 +1,14 @@
|
||||
[package]
|
||||
name = "quick_cache_ttl"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
flume = "0.10.14"
|
||||
quick_cache = "0.3.0"
|
||||
tokio = { version = "1.28.1", features = ["full"] }
|
||||
|
||||
[dev-dependencies]
|
||||
tokio = { version = "1.28.1", features = ["full", "test-util"] }
|
118
quick_cache_ttl/src/cache.rs
Normal file
118
quick_cache_ttl/src/cache.rs
Normal file
@ -0,0 +1,118 @@
|
||||
use quick_cache::{DefaultHashBuilder, UnitWeighter, Weighter};
|
||||
use std::{
|
||||
future::Future,
|
||||
hash::{BuildHasher, Hash},
|
||||
sync::Arc,
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use crate::{KQCacheWithTTL, PlaceholderGuardWithTTL};
|
||||
|
||||
pub struct CacheWithTTL<Key, Val, We = UnitWeighter, B = DefaultHashBuilder>(
|
||||
KQCacheWithTTL<Key, (), Val, We, B>,
|
||||
);
|
||||
|
||||
impl<Key: Eq + Hash + Clone + Send + Sync + 'static, Val: Clone + Send + Sync + 'static>
|
||||
CacheWithTTL<Key, Val, UnitWeighter, DefaultHashBuilder>
|
||||
{
|
||||
pub async fn new_with_capacity(capacity: usize, ttl: Duration) -> Self {
|
||||
Self::new(
|
||||
capacity,
|
||||
capacity as u64,
|
||||
UnitWeighter,
|
||||
DefaultHashBuilder::default(),
|
||||
ttl,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn arc_with_capacity(capacity: usize, ttl: Duration) -> Arc<Self> {
|
||||
let x = Self::new_with_capacity(capacity, ttl).await;
|
||||
|
||||
Arc::new(x)
|
||||
}
|
||||
}
|
||||
|
||||
impl<
|
||||
Key: Eq + Hash + Clone + Send + Sync + 'static,
|
||||
Val: Clone + Send + Sync + 'static,
|
||||
We: Weighter<Key, (), Val> + Clone + Send + Sync + 'static,
|
||||
B: BuildHasher + Clone + Default + Send + Sync + 'static,
|
||||
> CacheWithTTL<Key, Val, We, B>
|
||||
{
|
||||
pub async fn new_with_weights(
|
||||
estimated_items_capacity: usize,
|
||||
weight_capacity: u64,
|
||||
weighter: We,
|
||||
ttl: Duration,
|
||||
) -> Self {
|
||||
let inner = KQCacheWithTTL::new(
|
||||
estimated_items_capacity,
|
||||
weight_capacity,
|
||||
weighter,
|
||||
B::default(),
|
||||
ttl,
|
||||
)
|
||||
.await;
|
||||
|
||||
Self(inner)
|
||||
}
|
||||
}
|
||||
|
||||
impl<
|
||||
Key: Eq + Hash + Clone + Send + Sync + 'static,
|
||||
Val: Clone + Send + Sync + 'static,
|
||||
We: Weighter<Key, (), Val> + Clone + Send + Sync + 'static,
|
||||
B: BuildHasher + Clone + Send + Sync + 'static,
|
||||
> CacheWithTTL<Key, Val, We, B>
|
||||
{
|
||||
pub async fn new(
|
||||
estimated_items_capacity: usize,
|
||||
weight_capacity: u64,
|
||||
weighter: We,
|
||||
hash_builder: B,
|
||||
ttl: Duration,
|
||||
) -> Self {
|
||||
let inner = KQCacheWithTTL::new(
|
||||
estimated_items_capacity,
|
||||
weight_capacity,
|
||||
weighter,
|
||||
hash_builder,
|
||||
ttl,
|
||||
)
|
||||
.await;
|
||||
|
||||
Self(inner)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn get(&self, key: &Key) -> Option<Val> {
|
||||
self.0.get(key, &())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub async fn get_or_insert_async<E, Fut>(&self, key: &Key, f: Fut) -> Result<Val, E>
|
||||
where
|
||||
Fut: Future<Output = Result<Val, E>>,
|
||||
{
|
||||
self.0.get_or_insert_async(key, &(), f).await
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub async fn get_value_or_guard_async(
|
||||
&self,
|
||||
key: Key,
|
||||
) -> Result<Val, PlaceholderGuardWithTTL<'_, Key, (), Val, We, B>> {
|
||||
self.0.get_value_or_guard_async(key, ()).await
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn insert(&self, key: Key, val: Val) {
|
||||
self.0.insert(key, (), val)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn remove(&self, key: &Key) -> bool {
|
||||
self.0.remove(key, &())
|
||||
}
|
||||
}
|
148
quick_cache_ttl/src/kq_cache.rs
Normal file
148
quick_cache_ttl/src/kq_cache.rs
Normal file
@ -0,0 +1,148 @@
|
||||
use quick_cache::sync::KQCache;
|
||||
use quick_cache::{PlaceholderGuard, Weighter};
|
||||
use std::future::Future;
|
||||
use std::hash::{BuildHasher, Hash};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use tokio::task::JoinHandle;
|
||||
use tokio::time::{sleep_until, Instant};
|
||||
|
||||
pub struct KQCacheWithTTL<Key, Qey, Val, We, B> {
|
||||
cache: Arc<KQCache<Key, Qey, Val, We, B>>,
|
||||
pub task_handle: JoinHandle<()>,
|
||||
ttl: Duration,
|
||||
tx: flume::Sender<(Instant, Key, Qey)>,
|
||||
}
|
||||
|
||||
struct KQCacheWithTTLTask<Key, Qey, Val, We, B> {
|
||||
cache: Arc<KQCache<Key, Qey, Val, We, B>>,
|
||||
rx: flume::Receiver<(Instant, Key, Qey)>,
|
||||
}
|
||||
|
||||
pub struct PlaceholderGuardWithTTL<'a, Key, Qey, Val, We, B> {
|
||||
inner: PlaceholderGuard<'a, Key, Qey, Val, We, B>,
|
||||
key: Key,
|
||||
qey: Qey,
|
||||
ttl: Duration,
|
||||
tx: &'a flume::Sender<(Instant, Key, Qey)>,
|
||||
}
|
||||
|
||||
impl<
|
||||
Key: Eq + Hash + Clone + Send + Sync + 'static,
|
||||
Qey: Eq + Hash + Clone + Send + Sync + 'static,
|
||||
Val: Clone + Send + Sync + 'static,
|
||||
We: Weighter<Key, Qey, Val> + Clone + Send + Sync + 'static,
|
||||
B: BuildHasher + Clone + Send + Sync + 'static,
|
||||
> KQCacheWithTTL<Key, Qey, Val, We, B>
|
||||
{
|
||||
pub async fn new(
|
||||
estimated_items_capacity: usize,
|
||||
weight_capacity: u64,
|
||||
weighter: We,
|
||||
hash_builder: B,
|
||||
ttl: Duration,
|
||||
) -> Self {
|
||||
let (tx, rx) = flume::unbounded();
|
||||
|
||||
let cache = KQCache::with(
|
||||
estimated_items_capacity,
|
||||
weight_capacity,
|
||||
weighter,
|
||||
hash_builder,
|
||||
);
|
||||
|
||||
let cache = Arc::new(cache);
|
||||
|
||||
let task = KQCacheWithTTLTask {
|
||||
cache: cache.clone(),
|
||||
rx,
|
||||
};
|
||||
|
||||
let task_handle = tokio::spawn(task.run());
|
||||
|
||||
Self {
|
||||
cache,
|
||||
task_handle,
|
||||
ttl,
|
||||
tx,
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn get(&self, key: &Key, qey: &Qey) -> Option<Val> {
|
||||
self.cache.get(key, qey)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub async fn get_or_insert_async<E, Fut>(&self, key: &Key, qey: &Qey, f: Fut) -> Result<Val, E>
|
||||
where
|
||||
Fut: Future<Output = Result<Val, E>>,
|
||||
{
|
||||
self.cache.get_or_insert_async(key, qey, f).await
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub async fn get_value_or_guard_async(
|
||||
&self,
|
||||
key: Key,
|
||||
qey: Qey,
|
||||
) -> Result<Val, PlaceholderGuardWithTTL<'_, Key, Qey, Val, We, B>> {
|
||||
match self.cache.get_value_or_guard_async(&key, &qey).await {
|
||||
Ok(x) => Ok(x),
|
||||
Err(inner) => Err(PlaceholderGuardWithTTL {
|
||||
inner,
|
||||
key,
|
||||
qey,
|
||||
ttl: self.ttl,
|
||||
tx: &self.tx,
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn insert(&self, key: Key, qey: Qey, val: Val) {
|
||||
let expire_at = Instant::now() + self.ttl;
|
||||
|
||||
self.cache.insert(key.clone(), qey.clone(), val);
|
||||
|
||||
self.tx.send((expire_at, key, qey)).unwrap();
|
||||
}
|
||||
|
||||
pub fn remove(&self, key: &Key, qey: &Qey) -> bool {
|
||||
self.cache.remove(key, qey)
|
||||
}
|
||||
}
|
||||
|
||||
impl<
|
||||
Key: Eq + Hash,
|
||||
Qey: Eq + Hash,
|
||||
Val: Clone,
|
||||
We: Weighter<Key, Qey, Val> + Clone,
|
||||
B: BuildHasher + Clone,
|
||||
> KQCacheWithTTLTask<Key, Qey, Val, We, B>
|
||||
{
|
||||
async fn run(self) {
|
||||
while let Ok((expire_at, key, qey)) = self.rx.recv_async().await {
|
||||
sleep_until(expire_at).await;
|
||||
|
||||
self.cache.remove(&key, &qey);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<
|
||||
'a,
|
||||
Key: Clone + Hash + Eq,
|
||||
Qey: Clone + Hash + Eq,
|
||||
Val: Clone,
|
||||
We: Weighter<Key, Qey, Val>,
|
||||
B: BuildHasher,
|
||||
> PlaceholderGuardWithTTL<'a, Key, Qey, Val, We, B>
|
||||
{
|
||||
pub fn insert(self, val: Val) {
|
||||
let expire_at = Instant::now() + self.ttl;
|
||||
|
||||
self.inner.insert(val);
|
||||
|
||||
self.tx.send((expire_at, self.key, self.qey)).unwrap();
|
||||
}
|
||||
}
|
7
quick_cache_ttl/src/lib.rs
Normal file
7
quick_cache_ttl/src/lib.rs
Normal file
@ -0,0 +1,7 @@
|
||||
mod cache;
|
||||
mod kq_cache;
|
||||
|
||||
pub use cache::CacheWithTTL;
|
||||
pub use kq_cache::{KQCacheWithTTL, PlaceholderGuardWithTTL};
|
||||
pub use quick_cache::sync::{Cache, KQCache};
|
||||
pub use quick_cache::{DefaultHashBuilder, UnitWeighter, Weighter};
|
@ -6,4 +6,4 @@ edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
flume = "0.10.14"
|
||||
tokio = { version = "1.28.0", features = ["time"] }
|
||||
tokio = { version = "1.28.1", features = ["time"] }
|
||||
|
@ -8,4 +8,4 @@ edition = "2021"
|
||||
anyhow = "1.0.71"
|
||||
chrono = "0.4.24"
|
||||
deadpool-redis = { version = "0.12.0", features = ["rt_tokio_1", "serde"] }
|
||||
tokio = "1.28.0"
|
||||
tokio = "1.28.1"
|
||||
|
2
scripts/brownie-tests/.gitattributes
vendored
Normal file
2
scripts/brownie-tests/.gitattributes
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
*.sol linguist-language=Solidity
|
||||
*.vy linguist-language=Python
|
6
scripts/brownie-tests/.gitignore
vendored
Normal file
6
scripts/brownie-tests/.gitignore
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
__pycache__
|
||||
.env
|
||||
.history
|
||||
.hypothesis/
|
||||
build/
|
||||
reports/
|
1
scripts/brownie-tests/brownie-config.yaml
Normal file
1
scripts/brownie-tests/brownie-config.yaml
Normal file
@ -0,0 +1 @@
|
||||
dotenv: .env
|
34
scripts/brownie-tests/scripts/make_payment.py
Normal file
34
scripts/brownie-tests/scripts/make_payment.py
Normal file
@ -0,0 +1,34 @@
|
||||
from brownie import Contract, Sweeper, accounts
|
||||
from brownie.network import priority_fee
|
||||
|
||||
def main():
|
||||
print("Hello")
|
||||
|
||||
|
||||
print("accounts are")
|
||||
token = Contract.from_explorer("0xC9fCFA7e28fF320C49967f4522EBc709aa1fDE7c")
|
||||
factory = Contract.from_explorer("0x4e3bc2054788de923a04936c6addb99a05b0ea36")
|
||||
user = accounts.load("david")
|
||||
# user = accounts.load("david-main")
|
||||
|
||||
print("Llama token")
|
||||
print(token)
|
||||
|
||||
print("Factory token")
|
||||
print(factory)
|
||||
|
||||
print("User addr")
|
||||
print(user)
|
||||
|
||||
# Sweeper and Proxy are deployed by us, as the user, by calling factory
|
||||
# Already been called before ...
|
||||
# factory.create_payment_address({'from': user})
|
||||
sweeper = Sweeper.at(factory.account_to_payment_address(user))
|
||||
print("Sweeper is at")
|
||||
print(sweeper)
|
||||
|
||||
priority_fee("auto")
|
||||
token._mint_for_testing(user, (10_000)*(10**18), {'from': user})
|
||||
# token.approve(sweeper, 2**256-1, {'from': user})
|
||||
sweeper.send_token(token, (5_000)*(10**18), {'from': user})
|
||||
# sweeper.send_token(token, (47)*(10**13), {'from': user})
|
@ -5,4 +5,4 @@
|
||||
# https://github.com/INFURA/versus
|
||||
# ./ethspam | ./versus --stop-after 100 "http://localhost:8544/" # Pipe into the endpoint ..., add a bearer token and all that
|
||||
|
||||
./ethspam http://127.0.0.1:8544 | ./versus --stop-after 100 http://localhost:8544
|
||||
./ethspam http://127.0.0.1:8544/rpc/01H0ZZJDNNEW49FRFS4D9SPR8B | ./versus --concurrency=4 --stop-after 100 http://localhost:8544/rpc/01H0ZZJDNNEW49FRFS4D9SPR8B
|
||||
|
@ -6,5 +6,6 @@
|
||||
curl -X GET \
|
||||
"http://localhost:8544/user/stats/aggregate?query_start=1678780033&query_window_seconds=1000"
|
||||
|
||||
#curl -X GET \
|
||||
#"http://localhost:8544/user/stats/detailed?query_start=1678780033&query_window_seconds=1000"
|
||||
curl -X GET \
|
||||
-H "Authorization: Bearer 01GZK8MHHGQWK4VPGF97HS91MB" \
|
||||
"http://localhost:8544/user/stats/detailed?query_start=1678780033&query_window_seconds=1000"
|
||||
|
110
scripts/manual-tests/12-subusers-premium-account.sh
Normal file
110
scripts/manual-tests/12-subusers-premium-account.sh
Normal file
@ -0,0 +1,110 @@
|
||||
### Tests subuser premium account endpoints
|
||||
##################
|
||||
# Run the server
|
||||
##################
|
||||
# Run the proxyd instance
|
||||
cargo run --release -- proxyd
|
||||
|
||||
# Check if the instance is running
|
||||
curl -X POST -H "Content-Type: application/json" --data '{"jsonrpc":"2.0","method":"web3_clientVersion","id":1}' 127.0.0.1:8544
|
||||
|
||||
|
||||
##################
|
||||
# Create the premium / primary user & log in (Wallet 0xeB3E928A2E54BE013EF8241d4C9EaF4DfAE94D5a)
|
||||
##################
|
||||
cargo run create_user --address 0xeB3E928A2E54BE013EF8241d4C9EaF4DfAE94D5a
|
||||
|
||||
# Make user premium, so he can create subusers
|
||||
cargo run change_user_tier_by_address 0xeB3E928A2E54BE013EF8241d4C9EaF4DfAE94D5a "Unlimited"
|
||||
# could also use CLI to change user role
|
||||
# ULID 01GXRAGS5F9VJFQRVMZGE1Q85T
|
||||
# UUID 018770a8-64af-4ee4-fbe3-74fc1c1ba0ba
|
||||
|
||||
# Open this website to get the nonce to log in, sign the message, and paste the payload in the endpoint that follows it
|
||||
http://127.0.0.1:8544/user/login/0xeB3E928A2E54BE013EF8241d4C9EaF4DfAE94D5a
|
||||
https://www.myetherwallet.com/wallet/sign
|
||||
|
||||
http://127.0.0.1:8544/user/login/0xeB3E928A2E54BE013EF8241d4C9EaF4DfAE94D5a
|
||||
https://www.myetherwallet.com/wallet/sign
|
||||
|
||||
# Use this site to sign a message
|
||||
curl -X POST http://127.0.0.1:8544/user/login \
|
||||
-H 'Content-Type: application/json' \
|
||||
-d '{
|
||||
"address": "0x762390ae7a3c4d987062a398c1ea8767029ab08e",
|
||||
"msg": "0x6c6c616d616e6f6465732e636f6d2077616e747320796f7520746f207369676e20696e207769746820796f757220457468657265756d206163636f756e743a0a3078373632333930616537613363344439383730363261333938433165413837363730323941423038450a0af09fa699f09fa699f09fa699f09fa699f09fa6990a0a5552493a2068747470733a2f2f6c6c616d616e6f6465732e636f6d2f0a56657273696f6e3a20310a436861696e2049443a20310a4e6f6e63653a203031475a484e4350315a57345134305a384b4e4e304454564a320a4973737565642041743a20323032332d30352d30335432303a33383a31392e3435363231345a0a45787069726174696f6e2054696d653a20323032332d30352d30335432303a35383a31392e3435363231345a",
|
||||
"sig": "82d2ee89fb6075bdc57fa66db4e0b2b84ad0b6515e1b3d71bb1dd4e6f1711b2f0f6b5f5e40116fd51e609bc8b4c0642f4cdaaf96a6c48e66093fe153d4e2873f1c",
|
||||
"version": "3",
|
||||
"signer": "MEW"
|
||||
}'
|
||||
|
||||
# Bearer token is: 01GZHMCXHXHPGAABAQQTXKMSM3
|
||||
# RPC secret key is: 01GZHMCXGXT5Z4M8SCKCMKDAZ6
|
||||
|
||||
# 01GZHND8E5BYRVPXXMKPQ75RJ1
|
||||
# 01GZHND83W8VAHCZWEPP1AA24M
|
||||
|
||||
# Top up the balance of the account
|
||||
curl \
|
||||
-H "Authorization: Bearer 01GZHMCXHXHPGAABAQQTXKMSM3" \
|
||||
-X GET "127.0.0.1:8544/user/balance/0x749788a5766577431a0a4fc8721fd7cb981f55222e073ed17976f0aba5e8818a"
|
||||
|
||||
|
||||
# Make an example RPC request to check if the tokens work
|
||||
curl \
|
||||
-X POST "127.0.0.1:8544/rpc/01GZHMCXGXT5Z4M8SCKCMKDAZ6" \
|
||||
-H "Content-Type: application/json" \
|
||||
--data '{"method":"eth_blockNumber","params":[],"id":1,"jsonrpc":"2.0"}'
|
||||
|
||||
##################
|
||||
# Now act as the subuser (Wallet 0x762390ae7a3c4D987062a398C1eA8767029AB08E)
|
||||
# We first login the subuser
|
||||
##################
|
||||
# Login using the referral link. This should create the user, and also mark him as being referred
|
||||
# http://127.0.0.1:8544/user/login/0x762390ae7a3c4D987062a398C1eA8767029AB08E
|
||||
# https://www.myetherwallet.com/wallet/sign
|
||||
curl -X POST http://127.0.0.1:8544/user/login \
|
||||
-H 'Content-Type: application/json' \
|
||||
-d '{
|
||||
"address": "0x762390ae7a3c4d987062a398c1ea8767029ab08e",
|
||||
"msg": "0x6c6c616d616e6f6465732e636f6d2077616e747320796f7520746f207369676e20696e207769746820796f757220457468657265756d206163636f756e743a0a3078373632333930616537613363344439383730363261333938433165413837363730323941423038450a0af09fa699f09fa699f09fa699f09fa699f09fa6990a0a5552493a2068747470733a2f2f6c6c616d616e6f6465732e636f6d2f0a56657273696f6e3a20310a436861696e2049443a20310a4e6f6e63653a20303147585246454b5654334d584531334b5956443159323853460a4973737565642041743a20323032332d30342d31315431353a33373a34382e3636373438315a0a45787069726174696f6e2054696d653a20323032332d30342d31315431353a35373a34382e3636373438315a",
|
||||
"sig": "1784c968fdc244248a4c0b8d52158ff773e044646d6e5ce61d457679d740566b66fd16ad24777f09c971e2c3dfa74966ffb8c083a9bef2a527e49bc3770713431c",
|
||||
"version": "3",
|
||||
"signer": "MEW",
|
||||
"referral_code": "llamanodes-01GXRB6RVM00MACTKABYVF8MJR"
|
||||
}'
|
||||
|
||||
# Bearer token 01GXRFKFQXDV0MQ2RT52BCPZ23
|
||||
# RPC key 01GXRFKFPY5DDRCRVB3B3HVDYK
|
||||
|
||||
##################
|
||||
# Now the primary user adds the secondary user as a subuser
|
||||
##################
|
||||
# Get first users RPC keys
|
||||
curl \
|
||||
-H "Authorization: Bearer 01GXRB6AHZSXFDX2S1QJPJ8X51" \
|
||||
-X GET "127.0.0.1:8544/user/keys"
|
||||
|
||||
# Secret key
|
||||
curl \
|
||||
-X GET "127.0.0.1:8544/user/subuser?subuser_address=0x762390ae7a3c4D987062a398C1eA8767029AB08E&rpc_key=01GZHMCXGXT5Z4M8SCKCMKDAZ6&new_status=upsert&new_role=admin" \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: Bearer 01GZHMCXHXHPGAABAQQTXKMSM3"
|
||||
|
||||
# The primary user can check what subusers he gave access to
|
||||
curl \
|
||||
-X GET "127.0.0.1:8544/user/subusers?rpc_key=01GZHMCXGXT5Z4M8SCKCMKDAZ6" \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: Bearer 01GZHMCXHXHPGAABAQQTXKMSM3"
|
||||
|
||||
# The secondary user can see all the projects that he is associated with
|
||||
curl \
|
||||
-X GET "127.0.0.1:8544/subuser/rpc_keys" \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: Bearer 01GXRFKFQXDV0MQ2RT52BCPZ23"
|
||||
|
||||
# Secret key
|
||||
curl \
|
||||
-X GET "127.0.0.1:8544/user/subuser?subuser_address=0x762390ae7a3c4D987062a398C1eA8767029AB08E&rpc_key=01GXRFKFPY5DDRCRVB3B3HVDYK&new_status=remove&new_role=collaborator" \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: Bearer 01GXRFKFQXDV0MQ2RT52BCPZ23"
|
@ -3,14 +3,14 @@
|
||||
# sea-orm-cli migrate up
|
||||
|
||||
# Use CLI to create the admin that will call the endpoint
|
||||
RUSTFLAGS="--cfg tokio_unstable" cargo run create_user --address 0xeB3E928A2E54BE013EF8241d4C9EaF4DfAE94D5a
|
||||
RUSTFLAGS="--cfg tokio_unstable" cargo run change_admin_status 0xeB3E928A2E54BE013EF8241d4C9EaF4DfAE94D5a true
|
||||
cargo run create_user --address 0xeB3E928A2E54BE013EF8241d4C9EaF4DfAE94D5a
|
||||
cargo run change_admin_status 0xeB3E928A2E54BE013EF8241d4C9EaF4DfAE94D5a true
|
||||
|
||||
# Use CLI to create the user whose role will be changed via the endpoint
|
||||
RUSTFLAGS="--cfg tokio_unstable" cargo run create_user --address 0x077e43dcca20da9859daa3fd78b5998b81f794f7
|
||||
cargo run create_user --address 0x077e43dcca20da9859daa3fd78b5998b81f794f7
|
||||
|
||||
# Run the proxyd instance
|
||||
RUSTFLAGS="--cfg tokio_unstable" cargo run --release -- proxyd
|
||||
cargo run --release -- proxyd
|
||||
|
||||
# Check if the instance is running
|
||||
curl --verbose -X POST -H "Content-Type: application/json" --data '{"jsonrpc":"2.0","method":"web3_clientVersion","id":1}' 127.0.0.1:8544
|
||||
|
111
scripts/manual-tests/24-simple-referral-program.sh
Normal file
111
scripts/manual-tests/24-simple-referral-program.sh
Normal file
@ -0,0 +1,111 @@
|
||||
##################
|
||||
# Run the server
|
||||
##################
|
||||
|
||||
# Keep the proxyd instance running the background (and test that it works)
|
||||
cargo run --release -- proxyd
|
||||
|
||||
# Check if the instance is running
|
||||
curl -X POST -H "Content-Type: application/json" --data '{"jsonrpc":"2.0","method":"web3_clientVersion","id":1}' 127.0.0.1:8544
|
||||
|
||||
##################
|
||||
# Create the referring user & log in (Wallet 0xeB3E928A2E54BE013EF8241d4C9EaF4DfAE94D5a)
|
||||
##################
|
||||
cargo run create_user --address 0xeB3E928A2E54BE013EF8241d4C9EaF4DfAE94D5a
|
||||
|
||||
# Make user premium, so he can create referral keys
|
||||
cargo run change_user_tier_by_address 0xeB3E928A2E54BE013EF8241d4C9EaF4DfAE94D5a "Unlimited"
|
||||
# could also use CLI to change user role
|
||||
# ULID 01GXRAGS5F9VJFQRVMZGE1Q85T
|
||||
# UUID 018770a8-64af-4ee4-fbe3-74fc1c1ba0ba
|
||||
|
||||
# Open this website to get the nonce to log in, sign the message, and paste the payload in the endpoint that follows it
|
||||
http://127.0.0.1:8544/user/login/0xeB3E928A2E54BE013EF8241d4C9EaF4DfAE94D5a
|
||||
https://www.myetherwallet.com/wallet/sign
|
||||
|
||||
# Use this site to sign a message
|
||||
curl -X POST http://127.0.0.1:8544/user/login \
|
||||
-H 'Content-Type: application/json' \
|
||||
-d '{
|
||||
"address": "0xeb3e928a2e54be013ef8241d4c9eaf4dfae94d5a",
|
||||
"msg": "0x6c6c616d616e6f6465732e636f6d2077616e747320796f7520746f207369676e20696e207769746820796f757220457468657265756d206163636f756e743a0a3078654233453932384132453534424530313345463832343164344339456146344466414539344435610a0af09fa699f09fa699f09fa699f09fa699f09fa6990a0a5552493a2068747470733a2f2f6c6c616d616e6f6465732e636f6d2f0a56657273696f6e3a20310a436861696e2049443a20310a4e6f6e63653a2030314758524235424a584b47535845454b5a314438424857565a0a4973737565642041743a20323032332d30342d31315431343a32323a35302e3937333930365a0a45787069726174696f6e2054696d653a20323032332d30342d31315431343a34323a35302e3937333930365a",
|
||||
"sig": "be1f9fed3f6f206c15677b7da488071b936b68daf560715b75cf9232afe4b9923c2c5d00a558847131f0f04200b4b123011f62521b7b97bab2c8b794c82b29621b",
|
||||
"version": "3",
|
||||
"signer": "MEW"
|
||||
}'
|
||||
|
||||
# Bearer token is: 01GXRB6AHZSXFDX2S1QJPJ8X51
|
||||
# RPC secret key is: 01GXRAGS5F9VJFQRVMZGE1Q85T
|
||||
|
||||
# Make an example RPC request to check if the tokens work
|
||||
curl \
|
||||
-X POST "127.0.0.1:8544/rpc/01GXRAGS5F9VJFQRVMZGE1Q85T" \
|
||||
-H "Content-Type: application/json" \
|
||||
--data '{"method":"eth_blockNumber","params":[],"id":1,"jsonrpc":"2.0"}'
|
||||
|
||||
# Now retrieve the referral link
|
||||
curl \
|
||||
-H "Authorization: Bearer 01GXRB6AHZSXFDX2S1QJPJ8X51" \
|
||||
-X GET "127.0.0.1:8544/user/referral"
|
||||
|
||||
# This is the referral code which will be used by the redeemer
|
||||
# "llamanodes-01GXRB6RVM00MACTKABYVF8MJR"
|
||||
|
||||
##################
|
||||
# Now act as the referrer (Wallet 0x762390ae7a3c4D987062a398C1eA8767029AB08E)
|
||||
# We first login the referrer
|
||||
# Using the referrer code creates an entry in the table
|
||||
##################
|
||||
# Login using the referral link. This should create the user, and also mark him as being referred
|
||||
# http://127.0.0.1:8544/user/login/0x762390ae7a3c4D987062a398C1eA8767029AB08E
|
||||
# https://www.myetherwallet.com/wallet/sign
|
||||
curl -X POST http://127.0.0.1:8544/user/login \
|
||||
-H 'Content-Type: application/json' \
|
||||
-d '{
|
||||
"address": "0x762390ae7a3c4d987062a398c1ea8767029ab08e",
|
||||
"msg": "0x6c6c616d616e6f6465732e636f6d2077616e747320796f7520746f207369676e20696e207769746820796f757220457468657265756d206163636f756e743a0a3078373632333930616537613363344439383730363261333938433165413837363730323941423038450a0af09fa699f09fa699f09fa699f09fa699f09fa6990a0a5552493a2068747470733a2f2f6c6c616d616e6f6465732e636f6d2f0a56657273696f6e3a20310a436861696e2049443a20310a4e6f6e63653a20303147585246454b5654334d584531334b5956443159323853460a4973737565642041743a20323032332d30342d31315431353a33373a34382e3636373438315a0a45787069726174696f6e2054696d653a20323032332d30342d31315431353a35373a34382e3636373438315a",
|
||||
"sig": "1784c968fdc244248a4c0b8d52158ff773e044646d6e5ce61d457679d740566b66fd16ad24777f09c971e2c3dfa74966ffb8c083a9bef2a527e49bc3770713431c",
|
||||
"version": "3",
|
||||
"signer": "MEW",
|
||||
"referral_code": "llamanodes-01GXRB6RVM00MACTKABYVF8MJR"
|
||||
}'
|
||||
|
||||
# Bearer token 01GXRFKFQXDV0MQ2RT52BCPZ23
|
||||
# RPC key 01GXRFKFPY5DDRCRVB3B3HVDYK
|
||||
|
||||
# Make some requests, the referrer should not receive any credits for this (balance table is not created for free-tier users ...) This works fine
|
||||
for i in {1..1000}
|
||||
do
|
||||
curl \
|
||||
-X POST "127.0.0.1:8544/rpc/01GXRFKFPY5DDRCRVB3B3HVDYK" \
|
||||
-H "Content-Type: application/json" \
|
||||
--data '{"method":"eth_blockNumber","params":[],"id":1,"jsonrpc":"2.0"}'
|
||||
done
|
||||
|
||||
###########################################
|
||||
# Now the referred user deposits some tokens
|
||||
# They then send it to the endpoint
|
||||
###########################################
|
||||
curl \
|
||||
-H "Authorization: Bearer 01GXRFKFQXDV0MQ2RT52BCPZ23" \
|
||||
-X GET "127.0.0.1:8544/user/balance/0xda41f748106d2d1f1bf395e65d07bd9fc507c1eb4fd50c87d8ca1f34cfd536b0"
|
||||
|
||||
curl \
|
||||
-H "Authorization: Bearer 01GXRFKFQXDV0MQ2RT52BCPZ23" \
|
||||
-X GET "127.0.0.1:8544/user/balance/0xd56dee328dfa3bea26c3762834081881e5eff62e77a2b45e72d98016daaeffba"
|
||||
|
||||
|
||||
###########################################
|
||||
# Now the referred user starts spending the money. Let's make requests worth $100 and see what happens ...
|
||||
# At all times, the referrer should receive 10% of the spent tokens
|
||||
###########################################
|
||||
for i in {1..10000000}
|
||||
do
|
||||
curl \
|
||||
-X POST "127.0.0.1:8544/rpc/01GXRFKFPY5DDRCRVB3B3HVDYK" \
|
||||
-H "Content-Type: application/json" \
|
||||
--data '{"method":"eth_blockNumber","params":[],"id":1,"jsonrpc":"2.0"}'
|
||||
done
|
||||
|
||||
# Check that the new user was indeed logged in, and that a referral table entry was created (in the database)
|
||||
# Check that the 10% referral rate works
|
86
scripts/manual-tests/42-simple-balance.sh
Normal file
86
scripts/manual-tests/42-simple-balance.sh
Normal file
@ -0,0 +1,86 @@
|
||||
##################
|
||||
# Run the server
|
||||
##################
|
||||
# Run the proxyd instance
|
||||
cargo run --release -- proxyd
|
||||
|
||||
# Check if the instance is running
|
||||
curl -X POST -H "Content-Type: application/json" --data '{"jsonrpc":"2.0","method":"web3_clientVersion","id":1}' 127.0.0.1:8544
|
||||
|
||||
##########################
|
||||
# Create a User & Log in
|
||||
##########################
|
||||
cargo run create_user --address 0x762390ae7a3c4D987062a398C1eA8767029AB08E
|
||||
# ULID: 01GXEDC66Z9RZE6AE22JE7FRAW
|
||||
# UUID: 01875cd6-18df-4e3e-e329-c2149c77e15c
|
||||
|
||||
# Log in as the user so we can check the balance
|
||||
# Open this website to get the nonce to log in
|
||||
curl -X GET "http://127.0.0.1:8544/user/login/0xeb3e928a2e54be013ef8241d4c9eaf4dfae94d5a"
|
||||
|
||||
# Use this site to sign a message
|
||||
# https://www.myetherwallet.com/wallet/sign (whatever is output with the above code)
|
||||
curl -X POST http://127.0.0.1:8544/user/login \
|
||||
-H 'Content-Type: application/json' \
|
||||
-d '{
|
||||
"address": "0xeb3e928a2e54be013ef8241d4c9eaf4dfae94d5a",
|
||||
"msg": "0x6c6c616d616e6f6465732e636f6d2077616e747320796f7520746f207369676e20696e207769746820796f757220457468657265756d206163636f756e743a0a3078654233453932384132453534424530313345463832343164344339456146344466414539344435610a0af09fa699f09fa699f09fa699f09fa699f09fa6990a0a5552493a2068747470733a2f2f6c6c616d616e6f6465732e636f6d2f0a56657273696f6e3a20310a436861696e2049443a20310a4e6f6e63653a20303148305a5a48434356324b32324738544850535758485131480a4973737565642041743a20323032332d30352d32315432303a32303a34332e3033353539315a0a45787069726174696f6e2054696d653a20323032332d30352d32315432303a34303a34332e3033353539315a",
|
||||
"sig": "7591251840bf75d2ab7c895bc566a49d2f4c3ad6bb14d7256258a59e52055fc94c11f8f3836f5311b52fc18aca40867cd85802636645e1d757494800631cad381c",
|
||||
"version": "3",
|
||||
"signer": "MEW"
|
||||
}'
|
||||
|
||||
# bearer token is: 01H0ZZJDQ2F02MAXZR5K1X5NCP
|
||||
# scret key is: 01H0ZZJDNNEW49FRFS4D9SPR8B
|
||||
|
||||
# 01GZH2PS89EJJY6V8JFCVTQ4BX
|
||||
# 01GZH2PS7CTHA3TAZ4HXCTX6KQ
|
||||
|
||||
###########################################
|
||||
# Initially check balance, it should be 0
|
||||
###########################################
|
||||
# Check the balance of the user
|
||||
# Balance seems to be returning properly (0, in this test case)
|
||||
curl \
|
||||
-H "Authorization: Bearer 01H0ZZJDQ2F02MAXZR5K1X5NCP" \
|
||||
-X GET "127.0.0.1:8544/user/balance"
|
||||
|
||||
|
||||
###########################################
|
||||
# The user submits a transaction on the matic network
|
||||
# and submits it on the endpoint
|
||||
###########################################
|
||||
curl \
|
||||
-H "Authorization: Bearer 01GZK65YRW69KZECCGPSQH2XYK" \
|
||||
-X GET "127.0.0.1:8544/user/balance/0x749788a5766577431a0a4fc8721fd7cb981f55222e073ed17976f0aba5e8818a"
|
||||
|
||||
###########################################
|
||||
# Check the balance again, it should have increased according to how much USDC was spent
|
||||
###########################################
|
||||
# Check the balance of the user
|
||||
# Balance seems to be returning properly (0, in this test case)
|
||||
curl \
|
||||
-H "Authorization: Bearer 01GZGGDBMV0GM6MFBBHPDE78BW" \
|
||||
-X GET "127.0.0.1:8544/user/balance"
|
||||
|
||||
# TODO: Now start using the RPC, balance should decrease
|
||||
|
||||
# Get the RPC key
|
||||
curl \
|
||||
-X GET "127.0.0.1:8544/user/keys" \
|
||||
-H "Authorization: Bearer 01GZGGDBMV0GM6MFBBHPDE78BW" \
|
||||
--data '{"method":"eth_blockNumber","params":[],"id":1,"jsonrpc":"2.0"}'
|
||||
|
||||
## Check if calling an RPC endpoint logs the stats
|
||||
## This one does already even it seems
|
||||
for i in {1..300}
|
||||
do
|
||||
curl \
|
||||
-X POST "127.0.0.1:8544/rpc/01H0ZZJDNNEW49FRFS4D9SPR8B" \
|
||||
-H "Content-Type: application/json" \
|
||||
--data '{"method":"eth_blockNumber","params":[],"id":1,"jsonrpc":"2.0"}'
|
||||
done
|
||||
|
||||
|
||||
# TODO: Now implement and test withdrawal
|
||||
|
44
scripts/manual-tests/45-admin-add-balance.sh
Normal file
44
scripts/manual-tests/45-admin-add-balance.sh
Normal file
@ -0,0 +1,44 @@
|
||||
|
||||
# Create / Login user1
|
||||
curl -X GET "http://127.0.0.1:8544/user/login/0xeb3e928a2e54be013ef8241d4c9eaf4dfae94d5a"
|
||||
curl -X POST http://127.0.0.1:8544/user/login \
|
||||
-H 'Content-Type: application/json' \
|
||||
-d '{
|
||||
"address": "0xeb3e928a2e54be013ef8241d4c9eaf4dfae94d5a",
|
||||
"msg": "0x6c6c616d616e6f6465732e636f6d2077616e747320796f7520746f207369676e20696e207769746820796f757220457468657265756d206163636f756e743a0a3078654233453932384132453534424530313345463832343164344339456146344466414539344435610a0af09fa699f09fa699f09fa699f09fa699f09fa6990a0a5552493a2068747470733a2f2f6c6c616d616e6f6465732e636f6d2f0a56657273696f6e3a20310a436861696e2049443a20310a4e6f6e63653a203031483044573642334a48355a4b384a4e3947504d594e4d4b370a4973737565642041743a20323032332d30352d31345431393a33353a35352e3736323632395a0a45787069726174696f6e2054696d653a20323032332d30352d31345431393a35353a35352e3736323632395a",
|
||||
"sig": "f88b42d638246f8e51637c753052cab3a13b2a138faf3107c921ce2f0027d6506b9adcd3a7b72af830cdf50d20e6e9cb3f9f456dd1be47f6543990ea050909791c",
|
||||
"version": "3",
|
||||
"signer": "MEW"
|
||||
}'
|
||||
|
||||
# 01H0DW6VFCP365B9TXVQVVMHHY
|
||||
# 01H0DVZNDJWQ7YG8RBHXQHJ301
|
||||
|
||||
# Make user1 an admin
|
||||
cargo run change_admin_status 0xeB3E928A2E54BE013EF8241d4C9EaF4DfAE94D5a true
|
||||
|
||||
# Create/Login user2
|
||||
curl -X GET "http://127.0.0.1:8544/user/login/0x762390ae7a3c4D987062a398C1eA8767029AB08E"
|
||||
|
||||
curl -X POST http://127.0.0.1:8544/user/login \
|
||||
-H 'Content-Type: application/json' \
|
||||
-d '{
|
||||
"address": "0x762390ae7a3c4d987062a398c1ea8767029ab08e",
|
||||
"msg": "0x6c6c616d616e6f6465732e636f6d2077616e747320796f7520746f207369676e20696e207769746820796f757220457468657265756d206163636f756e743a0a3078373632333930616537613363344439383730363261333938433165413837363730323941423038450a0af09fa699f09fa699f09fa699f09fa699f09fa6990a0a5552493a2068747470733a2f2f6c6c616d616e6f6465732e636f6d2f0a56657273696f6e3a20310a436861696e2049443a20310a4e6f6e63653a20303148304457384233304e534447594e484d33514d4a31434e530a4973737565642041743a20323032332d30352d31345431393a33373a30312e3238303338355a0a45787069726174696f6e2054696d653a20323032332d30352d31345431393a35373a30312e3238303338355a",
|
||||
"sig": "c545235557b7952a789dffa2af153af5cf663dcc05449bcc4b651b04cda57de05bcef55c0f5cbf6aa2432369582eb6a40927d14ad0a2d15f48fa45f32fbf273f1c",
|
||||
"version": "3",
|
||||
"signer": "MEW"
|
||||
}'
|
||||
|
||||
# 01H0DWPXRQA7XX2VFSNR02CG1N
|
||||
# 01H0DWPXQQ951Y3R90QMF6MYGE
|
||||
|
||||
curl \
|
||||
-H "Authorization: Bearer 01H0DWPXRQA7XX2VFSNR02CG1N" \
|
||||
-X GET "127.0.0.1:8544/user/balance"
|
||||
|
||||
|
||||
# Admin add balance
|
||||
curl \
|
||||
-H "Authorization: Bearer 01H0DW6VFCP365B9TXVQVVMHHY" \
|
||||
-X GET "127.0.0.1:8544/admin/increase_balance?user_address=0x762390ae7a3c4D987062a398C1eA8767029AB08E&amount=100.0"
|
88
scripts/manual-tests/48-balance-downgrade.sh
Normal file
88
scripts/manual-tests/48-balance-downgrade.sh
Normal file
@ -0,0 +1,88 @@
|
||||
##################
|
||||
# Run the server
|
||||
##################
|
||||
# Run the proxyd instance
|
||||
cargo run --release -- proxyd
|
||||
|
||||
# Check if the instance is running
|
||||
curl -X POST -H "Content-Type: application/json" --data '{"jsonrpc":"2.0","method":"web3_clientVersion","id":1}' 127.0.0.1:8544
|
||||
|
||||
##########################
|
||||
# Create a User & Log in
|
||||
##########################
|
||||
#cargo run create_user --address 0x762390ae7a3c4D987062a398C1eA8767029AB08E
|
||||
# ULID: 01GXEDC66Z9RZE6AE22JE7FRAW
|
||||
# UUID: 01875cd6-18df-4e3e-e329-c2149c77e15c
|
||||
|
||||
# Log in as the user so we can check the balance
|
||||
# Open this website to get the nonce to log in
|
||||
curl -X GET "http://127.0.0.1:8544/user/login/0xeB3E928A2E54BE013EF8241d4C9EaF4DfAE94D5a"
|
||||
|
||||
# Use this site to sign a message
|
||||
# https://www.myetherwallet.com/wallet/sign (whatever is output with the above code)
|
||||
curl -X POST http://127.0.0.1:8544/user/login \
|
||||
-H 'Content-Type: application/json' \
|
||||
-d '{
|
||||
"address": "0xeb3e928a2e54be013ef8241d4c9eaf4dfae94d5a",
|
||||
"msg": "0x6c6c616d616e6f6465732e636f6d2077616e747320796f7520746f207369676e20696e207769746820796f757220457468657265756d206163636f756e743a0a3078654233453932384132453534424530313345463832343164344339456146344466414539344435610a0af09fa699f09fa699f09fa699f09fa699f09fa6990a0a5552493a2068747470733a2f2f6c6c616d616e6f6465732e636f6d2f0a56657273696f6e3a20310a436861696e2049443a20310a4e6f6e63653a2030314759513445564731474b34314b42364130324a344b45384b0a4973737565642041743a20323032332d30342d32335431333a32323a30392e3533373932365a0a45787069726174696f6e2054696d653a20323032332d30342d32335431333a34323a30392e3533373932365a",
|
||||
"sig": "52071cc59afb427eb554126f4f9f2a445c2a539783ba45079ccc0911197062f135d6d347cf0c38fa078dc2369c32b5131b86811fc0916786d1e48252163f58131c",
|
||||
"version": "3",
|
||||
"signer": "MEW"
|
||||
}'
|
||||
|
||||
# bearer token is: 01GYQ4FMRKKWJEA2YBST3B89MJ
|
||||
# scret key is: 01GYQ4FMNX9EMFBT43XEFGZV1K
|
||||
|
||||
###########################################
|
||||
# Initially check balance, it should be 0
|
||||
###########################################
|
||||
# Check the balance of the user
|
||||
# Balance seems to be returning properly (0, in this test case)
|
||||
curl \
|
||||
-H "Authorization: Bearer 01GYQ4FMRKKWJEA2YBST3B89MJ" \
|
||||
-X GET "127.0.0.1:8544/user/balance"
|
||||
|
||||
|
||||
###########################################
|
||||
# The user submits a transaction on the matic network
|
||||
# and submits it on the endpoint
|
||||
###########################################
|
||||
curl \
|
||||
-H "Authorization: Bearer 01GYQ4FMRKKWJEA2YBST3B89MJ" \
|
||||
-X GET "127.0.0.1:8544/user/balance/0x749788a5766577431a0a4fc8721fd7cb981f55222e073ed17976f0aba5e8818a"
|
||||
|
||||
###########################################
|
||||
# Check the balance again, it should have increased according to how much USDC was spent
|
||||
###########################################
|
||||
# Check the balance of the user
|
||||
# Balance seems to be returning properly (0, in this test case)
|
||||
curl \
|
||||
-H "Authorization: Bearer 01GYQ4FMRKKWJEA2YBST3B89MJ" \
|
||||
-X GET "127.0.0.1:8544/user/balance"
|
||||
|
||||
# Get the RPC key
|
||||
curl \
|
||||
-X GET "127.0.0.1:8544/user/keys" \
|
||||
-H "Authorization: Bearer 01GYQ4FMRKKWJEA2YBST3B89MJ"
|
||||
|
||||
## Check if calling an RPC endpoint logs the stats
|
||||
## This one does already even it seems
|
||||
for i in {1..100000}
|
||||
do
|
||||
curl \
|
||||
-X POST "127.0.0.1:8544/rpc/01GZHMCXGXT5Z4M8SCKCMKDAZ6" \
|
||||
-H "Content-Type: application/json" \
|
||||
--data '{"method":"eth_blockNumber","params":[],"id":1,"jsonrpc":"2.0"}'
|
||||
done
|
||||
|
||||
for i in {1..100}
|
||||
do
|
||||
curl \
|
||||
-X POST "127.0.0.1:8544/" \
|
||||
-H "Content-Type: application/json" \
|
||||
--data '{"method":"eth_blockNumber","params":[],"id":1,"jsonrpc":"2.0"}'
|
||||
done
|
||||
|
||||
|
||||
# TODO: Now implement and test withdrawal
|
||||
|
5
scripts/manual-tests/52-simple-get-deposits.sh
Normal file
5
scripts/manual-tests/52-simple-get-deposits.sh
Normal file
@ -0,0 +1,5 @@
|
||||
# Check the balance of the user
|
||||
# Balance seems to be returning properly (0, in this test case)
|
||||
curl \
|
||||
-H "Authorization: Bearer 01GZHMCXHXHPGAABAQQTXKMSM3" \
|
||||
-X GET "127.0.0.1:8544/user/deposits"
|
4
scripts/requirements.txt
Normal file
4
scripts/requirements.txt
Normal file
@ -0,0 +1,4 @@
|
||||
python-dotenv
|
||||
eth-brownie
|
||||
ensurepath
|
||||
brownie-token-tester
|
@ -1,21 +1,25 @@
|
||||
[package]
|
||||
name = "web3_proxy"
|
||||
version = "0.17.0"
|
||||
version = "0.28.0"
|
||||
edition = "2021"
|
||||
default-run = "web3_proxy_cli"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[features]
|
||||
default = ["deadlock_detection"]
|
||||
default = ["connectinfo", "deadlock_detection"]
|
||||
deadlock_detection = ["parking_lot/deadlock_detection"]
|
||||
mimalloc = ["dep:mimalloc"]
|
||||
tokio-console = ["dep:tokio-console", "dep:console-subscriber"]
|
||||
rdkafka-src = ["rdkafka/cmake-build", "rdkafka/libz", "rdkafka/ssl", "rdkafka/zstd-pkg-config"]
|
||||
connectinfo = []
|
||||
|
||||
[dependencies]
|
||||
deferred-rate-limiter = { path = "../deferred-rate-limiter" }
|
||||
entities = { path = "../entities" }
|
||||
latency = { path = "../latency" }
|
||||
migration = { path = "../migration" }
|
||||
quick_cache_ttl = { path = "../quick_cache_ttl" }
|
||||
redis-rate-limiter = { path = "../redis-rate-limiter" }
|
||||
thread-fast-rng = { path = "../thread-fast-rng" }
|
||||
|
||||
@ -26,6 +30,7 @@ thread-fast-rng = { path = "../thread-fast-rng" }
|
||||
# TODO: make sure this time version matches siwe. PR to put this in their prelude
|
||||
|
||||
anyhow = { version = "1.0.71", features = ["backtrace"] }
|
||||
arc-swap = "1.6.0"
|
||||
argh = "0.1.10"
|
||||
axum = { version = "0.6.18", features = ["headers", "ws"] }
|
||||
axum-client-ip = "0.4.1"
|
||||
@ -42,19 +47,22 @@ fdlimit = "0.2.1"
|
||||
flume = "0.10.14"
|
||||
fstrings = "0.2"
|
||||
futures = { version = "0.3.28", features = ["thread-pool"] }
|
||||
gethostname = "0.4.2"
|
||||
gethostname = "0.4.3"
|
||||
glob = "0.3.1"
|
||||
handlebars = "4.3.6"
|
||||
handlebars = "4.3.7"
|
||||
hashbrown = { version = "0.13.2", features = ["serde"] }
|
||||
hdrhistogram = "7.5.2"
|
||||
http = "0.2.9"
|
||||
hex_fmt = "0.3.0"
|
||||
hostname = "0.3.1"
|
||||
influxdb2 = { version = "0.4", features = ["rustls"] }
|
||||
influxdb2-structmap = "0.2.0"
|
||||
http = "0.2.9"
|
||||
hyper = { version = "0.14.26", features = ["full"] }
|
||||
influxdb2 = { git = "https://github.com/llamanodes/influxdb2", features = ["rustls"] }
|
||||
influxdb2-structmap = { git = "https://github.com/llamanodes/influxdb2/"}
|
||||
ipnet = "2.7.2"
|
||||
itertools = "0.10.5"
|
||||
listenfd = "1.0.1"
|
||||
log = "0.4.17"
|
||||
moka = { version = "0.11.0", default-features = false, features = ["future"] }
|
||||
mimalloc = { version = "0.1.37", optional = true}
|
||||
num = "0.4.0"
|
||||
num-traits = "0.2.15"
|
||||
once_cell = { version = "1.17.1" }
|
||||
@ -63,24 +71,27 @@ pagerduty-rs = { version = "0.1.6", default-features = false, features = ["async
|
||||
parking_lot = { version = "0.12.1", features = ["arc_lock"] }
|
||||
prettytable = "*"
|
||||
proctitle = "0.1.1"
|
||||
rdkafka = { version = "0.29.0" }
|
||||
regex = "1.8.1"
|
||||
reqwest = { version = "0.11.17", default-features = false, features = ["json", "tokio-rustls"] }
|
||||
rdkafka = { version = "0.31.0" }
|
||||
regex = "1.8.2"
|
||||
reqwest = { version = "0.11.18", default-features = false, features = ["json", "tokio-rustls"] }
|
||||
rmp-serde = "1.1.1"
|
||||
rustc-hash = "1.1.0"
|
||||
sentry = { version = "0.31.0", default-features = false, features = ["backtrace", "contexts", "panic", "anyhow", "reqwest", "rustls", "log", "sentry-log"] }
|
||||
serde = { version = "1.0.162", features = [] }
|
||||
sentry = { version = "0.31.2", default-features = false, features = ["backtrace", "contexts", "panic", "anyhow", "reqwest", "rustls", "log", "sentry-log"] }
|
||||
serde = { version = "1.0.163", features = [] }
|
||||
serde_json = { version = "1.0.96", default-features = false, features = ["alloc", "raw_value"] }
|
||||
serde_prometheus = "0.2.2"
|
||||
siwe = "0.5.0"
|
||||
strum = { version = "0.24.1", features = ["derive"] }
|
||||
time = "0.3.21"
|
||||
tokio = { version = "1.28.0", features = ["full"] }
|
||||
tokio = { version = "1.28.1", features = ["full"] }
|
||||
tokio-console = { version = "*", optional = true }
|
||||
tokio-stream = { version = "0.1.14", features = ["sync"] }
|
||||
tokio-uring = { version = "0.4.0", optional = true }
|
||||
toml = "0.7.3"
|
||||
toml = "0.7.4"
|
||||
tower = "0.4.13"
|
||||
tower-http = { version = "0.4.0", features = ["cors", "sensitive-headers"] }
|
||||
ulid = { version = "1.0.0", features = ["serde"] }
|
||||
ulid = { version = "1.0.0", features = ["uuid", "serde"] }
|
||||
url = "2.3.1"
|
||||
uuid = "1.3.2"
|
||||
uuid = "1.3.3"
|
||||
|
||||
[dev-dependencies]
|
||||
tokio = { version = "1.28.1", features = ["full", "test-util"] }
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,56 +1,56 @@
|
||||
//! Websocket-specific functions for the Web3ProxyApp
|
||||
|
||||
use super::Web3ProxyApp;
|
||||
use crate::frontend::authorization::{Authorization, RequestMetadata};
|
||||
use crate::frontend::errors::{Web3ProxyError, Web3ProxyErrorContext, Web3ProxyResult};
|
||||
use crate::frontend::authorization::{Authorization, RequestMetadata, RequestOrMethod};
|
||||
use crate::frontend::errors::{Web3ProxyError, Web3ProxyResult};
|
||||
use crate::jsonrpc::JsonRpcForwardedResponse;
|
||||
use crate::jsonrpc::JsonRpcRequest;
|
||||
use crate::response_cache::JsonRpcResponseData;
|
||||
use crate::rpcs::transactions::TxStatus;
|
||||
use crate::stats::RpcQueryStats;
|
||||
use axum::extract::ws::Message;
|
||||
use ethers::prelude::U64;
|
||||
use ethers::types::U64;
|
||||
use futures::future::AbortHandle;
|
||||
use futures::future::Abortable;
|
||||
use futures::stream::StreamExt;
|
||||
use log::{trace, warn};
|
||||
use log::trace;
|
||||
use serde_json::json;
|
||||
use std::sync::atomic::{self, AtomicUsize};
|
||||
use std::sync::Arc;
|
||||
use tokio_stream::wrappers::{BroadcastStream, WatchStream};
|
||||
|
||||
impl Web3ProxyApp {
|
||||
// TODO: #[measure([ErrorCount, HitCount, ResponseTime, Throughput])]
|
||||
pub async fn eth_subscribe<'a>(
|
||||
self: &'a Arc<Self>,
|
||||
authorization: Arc<Authorization>,
|
||||
request_json: JsonRpcRequest,
|
||||
jsonrpc_request: JsonRpcRequest,
|
||||
subscription_count: &'a AtomicUsize,
|
||||
// TODO: taking a sender for Message instead of the exact json we are planning to send feels wrong, but its easier for now
|
||||
response_sender: flume::Sender<Message>,
|
||||
) -> Web3ProxyResult<(AbortHandle, JsonRpcForwardedResponse)> {
|
||||
// TODO: this is not efficient
|
||||
let request_bytes = serde_json::to_string(&request_json)
|
||||
.web3_context("finding request size")?
|
||||
.len();
|
||||
|
||||
let request_metadata = Arc::new(RequestMetadata::new(request_bytes));
|
||||
let request_metadata = RequestMetadata::new(
|
||||
self,
|
||||
authorization.clone(),
|
||||
RequestOrMethod::Request(&jsonrpc_request),
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
|
||||
let (subscription_abort_handle, subscription_registration) = AbortHandle::new_pair();
|
||||
|
||||
// TODO: this only needs to be unique per connection. we don't need it globably unique
|
||||
// TODO: have a max number of subscriptions per key/ip. have a global max number of subscriptions? how should this be calculated?
|
||||
let subscription_id = subscription_count.fetch_add(1, atomic::Ordering::SeqCst);
|
||||
let subscription_id = U64::from(subscription_id);
|
||||
let subscription_id = U64::from(subscription_id as u64);
|
||||
|
||||
// save the id so we can use it in the response
|
||||
let id = request_json.id.clone();
|
||||
let id = jsonrpc_request.id.clone();
|
||||
|
||||
// TODO: calling json! on every request is probably not fast. but we can only match against
|
||||
// TODO: i think we need a stricter EthSubscribeRequest type that JsonRpcRequest can turn into
|
||||
match request_json.params.as_ref() {
|
||||
match jsonrpc_request.params.as_ref() {
|
||||
Some(x) if x == &json!(["newHeads"]) => {
|
||||
let authorization = authorization.clone();
|
||||
let head_block_receiver = self.watch_consensus_head_receiver.clone();
|
||||
let stat_sender = self.stat_sender.clone();
|
||||
let app = self.clone();
|
||||
|
||||
trace!("newHeads subscription {:?}", subscription_id);
|
||||
tokio::spawn(async move {
|
||||
@ -66,8 +66,13 @@ impl Web3ProxyApp {
|
||||
continue;
|
||||
};
|
||||
|
||||
// TODO: what should the payload for RequestMetadata be?
|
||||
let request_metadata = Arc::new(RequestMetadata::new(0));
|
||||
let subscription_request_metadata = RequestMetadata::new(
|
||||
&app,
|
||||
authorization.clone(),
|
||||
RequestOrMethod::Method("eth_subscribe(newHeads)", 0),
|
||||
Some(new_head.number()),
|
||||
)
|
||||
.await;
|
||||
|
||||
// TODO: make a struct for this? using our JsonRpcForwardedResponse won't work because it needs an id
|
||||
let response_json = json!({
|
||||
@ -83,33 +88,20 @@ impl Web3ProxyApp {
|
||||
let response_str = serde_json::to_string(&response_json)
|
||||
.expect("this should always be valid json");
|
||||
|
||||
// we could use response.num_bytes() here, but since we already have the string, this is easier
|
||||
// we could use JsonRpcForwardedResponseEnum::num_bytes() here, but since we already have the string, this is easier
|
||||
let response_bytes = response_str.len();
|
||||
|
||||
// TODO: do clients support binary messages?
|
||||
// TODO: can we check a content type header?
|
||||
let response_msg = Message::Text(response_str);
|
||||
|
||||
if response_sender.send_async(response_msg).await.is_err() {
|
||||
// TODO: increment error_response? i don't think so. i think this will happen once every time a client disconnects.
|
||||
// TODO: cancel this subscription earlier? select on head_block_receiver.next() and an abort handle?
|
||||
break;
|
||||
};
|
||||
|
||||
if let Some(stat_sender) = stat_sender.as_ref() {
|
||||
let response_stat = RpcQueryStats::new(
|
||||
Some("eth_subscription(newHeads)".to_string()),
|
||||
authorization.clone(),
|
||||
request_metadata.clone(),
|
||||
response_bytes,
|
||||
);
|
||||
|
||||
if let Err(err) = stat_sender.send_async(response_stat.into()).await {
|
||||
// TODO: what should we do?
|
||||
warn!(
|
||||
"stat_sender failed inside newPendingTransactions: {:?}",
|
||||
err
|
||||
);
|
||||
}
|
||||
}
|
||||
subscription_request_metadata.add_response(response_bytes);
|
||||
}
|
||||
|
||||
trace!("closed newHeads subscription {:?}", subscription_id);
|
||||
@ -117,8 +109,7 @@ impl Web3ProxyApp {
|
||||
}
|
||||
Some(x) if x == &json!(["newPendingTransactions"]) => {
|
||||
let pending_tx_receiver = self.pending_tx_sender.subscribe();
|
||||
let stat_sender = self.stat_sender.clone();
|
||||
let authorization = authorization.clone();
|
||||
let app = self.clone();
|
||||
|
||||
let mut pending_tx_receiver = Abortable::new(
|
||||
BroadcastStream::new(pending_tx_receiver),
|
||||
@ -133,7 +124,13 @@ impl Web3ProxyApp {
|
||||
// TODO: do something with this handle?
|
||||
tokio::spawn(async move {
|
||||
while let Some(Ok(new_tx_state)) = pending_tx_receiver.next().await {
|
||||
let request_metadata = Arc::new(RequestMetadata::new(0));
|
||||
let subscription_request_metadata = RequestMetadata::new(
|
||||
&app,
|
||||
authorization.clone(),
|
||||
RequestOrMethod::Method("eth_subscribe(newPendingTransactions)", 0),
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
|
||||
let new_tx = match new_tx_state {
|
||||
TxStatus::Pending(tx) => tx,
|
||||
@ -154,9 +151,11 @@ impl Web3ProxyApp {
|
||||
let response_str = serde_json::to_string(&response_json)
|
||||
.expect("this should always be valid json");
|
||||
|
||||
// we could use response.num_bytes() here, but since we already have the string, this is easier
|
||||
// TODO: test that this len is the same as JsonRpcForwardedResponseEnum.num_bytes()
|
||||
let response_bytes = response_str.len();
|
||||
|
||||
subscription_request_metadata.add_response(response_bytes);
|
||||
|
||||
// TODO: do clients support binary messages?
|
||||
let response_msg = Message::Text(response_str);
|
||||
|
||||
@ -164,23 +163,6 @@ impl Web3ProxyApp {
|
||||
// TODO: cancel this subscription earlier? select on head_block_receiver.next() and an abort handle?
|
||||
break;
|
||||
};
|
||||
|
||||
if let Some(stat_sender) = stat_sender.as_ref() {
|
||||
let response_stat = RpcQueryStats::new(
|
||||
Some("eth_subscription(newPendingTransactions)".to_string()),
|
||||
authorization.clone(),
|
||||
request_metadata.clone(),
|
||||
response_bytes,
|
||||
);
|
||||
|
||||
if let Err(err) = stat_sender.send_async(response_stat.into()).await {
|
||||
// TODO: what should we do?
|
||||
warn!(
|
||||
"stat_sender failed inside newPendingTransactions: {:?}",
|
||||
err
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
trace!(
|
||||
@ -191,9 +173,8 @@ impl Web3ProxyApp {
|
||||
}
|
||||
Some(x) if x == &json!(["newPendingFullTransactions"]) => {
|
||||
// TODO: too much copy/pasta with newPendingTransactions
|
||||
let authorization = authorization.clone();
|
||||
let pending_tx_receiver = self.pending_tx_sender.subscribe();
|
||||
let stat_sender = self.stat_sender.clone();
|
||||
let app = self.clone();
|
||||
|
||||
let mut pending_tx_receiver = Abortable::new(
|
||||
BroadcastStream::new(pending_tx_receiver),
|
||||
@ -208,7 +189,13 @@ impl Web3ProxyApp {
|
||||
// TODO: do something with this handle?
|
||||
tokio::spawn(async move {
|
||||
while let Some(Ok(new_tx_state)) = pending_tx_receiver.next().await {
|
||||
let request_metadata = Arc::new(RequestMetadata::new(0));
|
||||
let subscription_request_metadata = RequestMetadata::new(
|
||||
&app,
|
||||
authorization.clone(),
|
||||
RequestOrMethod::Method("eth_subscribe(newPendingFullTransactions)", 0),
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
|
||||
let new_tx = match new_tx_state {
|
||||
TxStatus::Pending(tx) => tx,
|
||||
@ -227,12 +214,11 @@ impl Web3ProxyApp {
|
||||
},
|
||||
});
|
||||
|
||||
subscription_request_metadata.add_response(&response_json);
|
||||
|
||||
let response_str = serde_json::to_string(&response_json)
|
||||
.expect("this should always be valid json");
|
||||
|
||||
// we could use response.num_bytes() here, but since we already have the string, this is easier
|
||||
let response_bytes = response_str.len();
|
||||
|
||||
// TODO: do clients support binary messages?
|
||||
let response_msg = Message::Text(response_str);
|
||||
|
||||
@ -240,23 +226,6 @@ impl Web3ProxyApp {
|
||||
// TODO: cancel this subscription earlier? select on head_block_receiver.next() and an abort handle?
|
||||
break;
|
||||
};
|
||||
|
||||
if let Some(stat_sender) = stat_sender.as_ref() {
|
||||
let response_stat = RpcQueryStats::new(
|
||||
Some("eth_subscription(newPendingFullTransactions)".to_string()),
|
||||
authorization.clone(),
|
||||
request_metadata.clone(),
|
||||
response_bytes,
|
||||
);
|
||||
|
||||
if let Err(err) = stat_sender.send_async(response_stat.into()).await {
|
||||
// TODO: what should we do?
|
||||
warn!(
|
||||
"stat_sender failed inside newPendingFullTransactions: {:?}",
|
||||
err
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
trace!(
|
||||
@ -267,9 +236,8 @@ impl Web3ProxyApp {
|
||||
}
|
||||
Some(x) if x == &json!(["newPendingRawTransactions"]) => {
|
||||
// TODO: too much copy/pasta with newPendingTransactions
|
||||
let authorization = authorization.clone();
|
||||
let pending_tx_receiver = self.pending_tx_sender.subscribe();
|
||||
let stat_sender = self.stat_sender.clone();
|
||||
let app = self.clone();
|
||||
|
||||
let mut pending_tx_receiver = Abortable::new(
|
||||
BroadcastStream::new(pending_tx_receiver),
|
||||
@ -284,7 +252,13 @@ impl Web3ProxyApp {
|
||||
// TODO: do something with this handle?
|
||||
tokio::spawn(async move {
|
||||
while let Some(Ok(new_tx_state)) = pending_tx_receiver.next().await {
|
||||
let request_metadata = Arc::new(RequestMetadata::new(0));
|
||||
let subscription_request_metadata = RequestMetadata::new(
|
||||
&app,
|
||||
authorization.clone(),
|
||||
"eth_subscribe(newPendingRawTransactions)",
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
|
||||
let new_tx = match new_tx_state {
|
||||
TxStatus::Pending(tx) => tx,
|
||||
@ -317,22 +291,7 @@ impl Web3ProxyApp {
|
||||
break;
|
||||
};
|
||||
|
||||
if let Some(stat_sender) = stat_sender.as_ref() {
|
||||
let response_stat = RpcQueryStats::new(
|
||||
Some("eth_subscription(newPendingRawTransactions)".to_string()),
|
||||
authorization.clone(),
|
||||
request_metadata.clone(),
|
||||
response_bytes,
|
||||
);
|
||||
|
||||
if let Err(err) = stat_sender.send_async(response_stat.into()).await {
|
||||
// TODO: what should we do?
|
||||
warn!(
|
||||
"stat_sender failed inside newPendingRawTransactions: {:?}",
|
||||
err
|
||||
);
|
||||
}
|
||||
}
|
||||
subscription_request_metadata.add_response(response_bytes);
|
||||
}
|
||||
|
||||
trace!(
|
||||
@ -346,21 +305,12 @@ impl Web3ProxyApp {
|
||||
|
||||
// TODO: do something with subscription_join_handle?
|
||||
|
||||
let response = JsonRpcForwardedResponse::from_value(json!(subscription_id), id);
|
||||
let response_data = JsonRpcResponseData::from(json!(subscription_id));
|
||||
|
||||
if let Some(stat_sender) = self.stat_sender.as_ref() {
|
||||
let response_stat = RpcQueryStats::new(
|
||||
Some(request_json.method.clone()),
|
||||
authorization.clone(),
|
||||
request_metadata,
|
||||
response.num_bytes(),
|
||||
);
|
||||
let response = JsonRpcForwardedResponse::from_response_data(response_data, id);
|
||||
|
||||
if let Err(err) = stat_sender.send_async(response_stat.into()).await {
|
||||
// TODO: what should we do?
|
||||
warn!("stat_sender failed inside websocket: {:?}", err);
|
||||
}
|
||||
}
|
||||
// TODO: this serializes twice
|
||||
request_metadata.add_response(&response);
|
||||
|
||||
// TODO: make a `SubscriptonHandle(AbortHandle, JoinHandle)` struct?
|
||||
Ok((subscription_abort_handle, response))
|
||||
|
@ -60,13 +60,12 @@ async fn main() -> anyhow::Result<()> {
|
||||
.context("unknown chain id for check_url")?;
|
||||
|
||||
if let Some(chain_id) = cli_config.chain_id {
|
||||
if chain_id != check_id {
|
||||
return Err(anyhow::anyhow!(
|
||||
"chain_id of check_url is wrong! Need {}. Found {}",
|
||||
chain_id,
|
||||
check_id,
|
||||
));
|
||||
}
|
||||
anyhow::ensure!(
|
||||
chain_id == check_id,
|
||||
"chain_id of check_url is wrong! Need {}. Found {}",
|
||||
chain_id,
|
||||
check_id,
|
||||
);
|
||||
}
|
||||
|
||||
let compare_url: String = match cli_config.compare_url {
|
||||
@ -93,13 +92,12 @@ async fn main() -> anyhow::Result<()> {
|
||||
.await
|
||||
.context("unknown chain id for compare_url")?;
|
||||
|
||||
if check_id != compare_id {
|
||||
return Err(anyhow::anyhow!(
|
||||
"chain_id does not match! Need {}. Found {}",
|
||||
check_id,
|
||||
compare_id,
|
||||
));
|
||||
}
|
||||
anyhow::ensure!(
|
||||
check_id == compare_id,
|
||||
"chain_id does not match! Need {}. Found {}",
|
||||
check_id,
|
||||
compare_id,
|
||||
);
|
||||
|
||||
// start ids at 2 because id 1 was checking the chain id
|
||||
let counter = AtomicU32::new(2);
|
||||
|
@ -38,6 +38,13 @@ use web3_proxy::{
|
||||
config::TopConfig,
|
||||
};
|
||||
|
||||
#[cfg(feature = "mimalloc")]
|
||||
use mimalloc::MiMalloc;
|
||||
|
||||
#[cfg(feature = "mimalloc")]
|
||||
#[global_allocator]
|
||||
static GLOBAL: MiMalloc = MiMalloc;
|
||||
|
||||
#[cfg(feature = "deadlock")]
|
||||
use {parking_lot::deadlock, std::thread, tokio::time::Duration};
|
||||
|
||||
@ -120,10 +127,10 @@ fn main() -> anyhow::Result<()> {
|
||||
|
||||
// if RUST_LOG isn't set, configure a default
|
||||
// TODO: is there a better way to do this?
|
||||
#[cfg(tokio_console)]
|
||||
#[cfg(feature = "tokio_console")]
|
||||
console_subscriber::init();
|
||||
|
||||
#[cfg(not(tokio_console))]
|
||||
#[cfg(not(feature = "tokio_console"))]
|
||||
let rust_log = match std::env::var("RUST_LOG") {
|
||||
Ok(x) => x,
|
||||
Err(_) => match std::env::var("WEB3_PROXY_TRACE").map(|x| x == "true") {
|
||||
@ -202,7 +209,6 @@ fn main() -> anyhow::Result<()> {
|
||||
(None, None)
|
||||
};
|
||||
|
||||
#[cfg(not(tokio_console))]
|
||||
{
|
||||
let logger = env_logger::builder().parse_filters(&rust_log).build();
|
||||
|
||||
@ -267,9 +273,6 @@ fn main() -> anyhow::Result<()> {
|
||||
}
|
||||
|
||||
// set up tokio's async runtime
|
||||
#[cfg(tokio_uring)]
|
||||
let mut rt_builder = tokio_uring::Builder::new_multi_thread();
|
||||
#[cfg(not(tokio_uring))]
|
||||
let mut rt_builder = runtime::Builder::new_multi_thread();
|
||||
|
||||
rt_builder.enable_all();
|
||||
@ -278,7 +281,7 @@ fn main() -> anyhow::Result<()> {
|
||||
rt_builder.worker_threads(cli_config.workers);
|
||||
}
|
||||
|
||||
if let Some(top_config) = top_config.as_ref() {
|
||||
if let Some(ref top_config) = top_config {
|
||||
let chain_id = top_config.app.chain_id;
|
||||
|
||||
rt_builder.thread_name_fn(move || {
|
||||
|
@ -1,4 +1,4 @@
|
||||
use anyhow::Context;
|
||||
use anyhow::{anyhow, Context};
|
||||
use argh::FromArgs;
|
||||
use entities::{rpc_accounting, rpc_key};
|
||||
use futures::stream::FuturesUnordered;
|
||||
@ -9,17 +9,17 @@ use migration::sea_orm::{
|
||||
ColumnTrait, DatabaseConnection, EntityTrait, QueryFilter, QuerySelect, UpdateResult,
|
||||
};
|
||||
use migration::{Expr, Value};
|
||||
use std::net::{IpAddr, Ipv4Addr};
|
||||
use parking_lot::Mutex;
|
||||
use std::num::NonZeroU64;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::broadcast;
|
||||
use tokio::time::Instant;
|
||||
use web3_proxy::app::{AuthorizationChecks, BILLING_PERIOD_SECONDS};
|
||||
use ulid::Ulid;
|
||||
use web3_proxy::app::BILLING_PERIOD_SECONDS;
|
||||
use web3_proxy::config::TopConfig;
|
||||
use web3_proxy::frontend::authorization::{
|
||||
Authorization, AuthorizationType, RequestMetadata, RpcSecretKey,
|
||||
};
|
||||
use web3_proxy::stats::{RpcQueryStats, StatBuffer};
|
||||
use web3_proxy::frontend::authorization::{Authorization, RequestMetadata, RpcSecretKey};
|
||||
use web3_proxy::rpcs::one::Web3Rpc;
|
||||
use web3_proxy::stats::StatBuffer;
|
||||
|
||||
#[derive(FromArgs, PartialEq, Eq, Debug)]
|
||||
/// Migrate towards influxdb and rpc_accounting_v2 from rpc_accounting
|
||||
@ -67,27 +67,28 @@ impl MigrateStatsToV2 {
|
||||
};
|
||||
|
||||
// Spawn the stat-sender
|
||||
let stat_sender = if let Some(emitter_spawn) = StatBuffer::try_spawn(
|
||||
top_config.app.chain_id,
|
||||
let emitter_spawn = StatBuffer::try_spawn(
|
||||
BILLING_PERIOD_SECONDS,
|
||||
top_config
|
||||
.app
|
||||
.influxdb_bucket
|
||||
.clone()
|
||||
.context("No influxdb bucket was provided")?,
|
||||
top_config.app.chain_id,
|
||||
Some(db_conn.clone()),
|
||||
influxdb_client.clone(),
|
||||
30,
|
||||
1,
|
||||
BILLING_PERIOD_SECONDS,
|
||||
influxdb_client.clone(),
|
||||
None,
|
||||
rpc_account_shutdown_recevier,
|
||||
)? {
|
||||
// since the database entries are used for accounting, we want to be sure everything is saved before exiting
|
||||
important_background_handles.push(emitter_spawn.background_handle);
|
||||
1,
|
||||
)
|
||||
.context("Error spawning stat buffer")?
|
||||
.context("No stat buffer spawned. Maybe missing influx or db credentials?")?;
|
||||
|
||||
Some(emitter_spawn.stat_sender)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
// since the database entries are used for accounting, we want to be sure everything is saved before exiting
|
||||
important_background_handles.push(emitter_spawn.background_handle);
|
||||
|
||||
let stat_sender = emitter_spawn.stat_sender;
|
||||
|
||||
let migration_timestamp = chrono::offset::Utc::now();
|
||||
|
||||
@ -109,7 +110,10 @@ impl MigrateStatsToV2 {
|
||||
// (2) Create request metadata objects to match the old data
|
||||
// Iterate through all old rows, and put them into the above objects.
|
||||
for x in old_records.iter() {
|
||||
let authorization_checks = match x.rpc_key_id {
|
||||
let mut authorization = Authorization::internal(None)
|
||||
.context("failed creating internal authorization")?;
|
||||
|
||||
match x.rpc_key_id {
|
||||
Some(rpc_key_id) => {
|
||||
let rpc_key_obj = rpc_key::Entity::find()
|
||||
.filter(rpc_key::Column::Id.eq(rpc_key_id))
|
||||
@ -117,34 +121,16 @@ impl MigrateStatsToV2 {
|
||||
.await?
|
||||
.context("Could not find rpc_key_obj for the given rpc_key_id")?;
|
||||
|
||||
// TODO: Create authrization
|
||||
// We can probably also randomly generate this, as we don't care about the user (?)
|
||||
AuthorizationChecks {
|
||||
user_id: rpc_key_obj.user_id,
|
||||
rpc_secret_key: Some(RpcSecretKey::Uuid(rpc_key_obj.secret_key)),
|
||||
rpc_secret_key_id: Some(
|
||||
NonZeroU64::new(rpc_key_id)
|
||||
.context("Could not use rpc_key_id to create a u64")?,
|
||||
),
|
||||
..Default::default()
|
||||
}
|
||||
authorization.checks.user_id = rpc_key_obj.user_id;
|
||||
authorization.checks.rpc_secret_key =
|
||||
Some(RpcSecretKey::Uuid(rpc_key_obj.secret_key));
|
||||
authorization.checks.rpc_secret_key_id =
|
||||
NonZeroU64::try_from(rpc_key_id).ok();
|
||||
}
|
||||
None => Default::default(),
|
||||
};
|
||||
|
||||
let authorization_type = AuthorizationType::Internal;
|
||||
let authorization = Arc::new(
|
||||
Authorization::try_new(
|
||||
authorization_checks,
|
||||
None,
|
||||
IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)),
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
authorization_type,
|
||||
)
|
||||
.context("Initializing Authorization Struct was not successful")?,
|
||||
);
|
||||
let authorization = Arc::new(authorization);
|
||||
|
||||
// It will be like a fork basically (to simulate getting multiple single requests ...)
|
||||
// Iterate through all frontend requests
|
||||
@ -177,46 +163,38 @@ impl MigrateStatsToV2 {
|
||||
|
||||
// Add module at the last step to include for any remained that we missed ... (?)
|
||||
|
||||
// TODO: Create RequestMetadata
|
||||
let backend_rpcs: Vec<_> = (0..int_backend_requests)
|
||||
.map(|_| Arc::new(Web3Rpc::default()))
|
||||
.collect();
|
||||
|
||||
let request_ulid = Ulid::new();
|
||||
|
||||
// Create RequestMetadata
|
||||
let request_metadata = RequestMetadata {
|
||||
start_instant: Instant::now(), // This is overwritten later on
|
||||
request_bytes: int_request_bytes, // Get the mean of all the request bytes
|
||||
archive_request: x.archive_request.into(),
|
||||
backend_requests: Default::default(), // This is not used, instead we modify the field later
|
||||
no_servers: 0.into(), // This is not relevant in the new version
|
||||
authorization: Some(authorization.clone()),
|
||||
backend_requests: Mutex::new(backend_rpcs),
|
||||
error_response: x.error_response.into(),
|
||||
// debug data is in kafka, not mysql or influx
|
||||
kafka_debug_logger: None,
|
||||
method: x.method.clone(),
|
||||
// This is not relevant in the new version
|
||||
no_servers: 0.into(),
|
||||
// Get the mean of all the request bytes
|
||||
request_bytes: int_request_bytes as usize,
|
||||
response_bytes: int_response_bytes.into(),
|
||||
// We did not initially record this data
|
||||
response_from_backup_rpc: false.into(),
|
||||
response_timestamp: x.period_datetime.timestamp().into(),
|
||||
response_millis: int_response_millis.into(),
|
||||
// We just don't have this data
|
||||
response_from_backup_rpc: false.into(), // I think we did not record this back then // Default::default()
|
||||
// This is overwritten later on
|
||||
start_instant: Instant::now(),
|
||||
stat_sender: Some(stat_sender.clone()),
|
||||
request_ulid,
|
||||
};
|
||||
|
||||
// (3) Send through a channel to a stat emitter
|
||||
// Send it to the stats sender
|
||||
if let Some(stat_sender_ref) = stat_sender.as_ref() {
|
||||
// info!("Method is: {:?}", x.clone().method);
|
||||
let mut response_stat = RpcQueryStats::new(
|
||||
x.clone().method,
|
||||
authorization.clone(),
|
||||
Arc::new(request_metadata),
|
||||
(int_response_bytes)
|
||||
.try_into()
|
||||
.context("sum bytes average is not calculated properly")?,
|
||||
);
|
||||
// Modify the timestamps ..
|
||||
response_stat.modify_struct(
|
||||
int_response_millis,
|
||||
x.period_datetime.timestamp(),
|
||||
int_backend_requests,
|
||||
);
|
||||
// info!("Sending stats: {:?}", response_stat);
|
||||
stat_sender_ref
|
||||
// .send(response_stat.into())
|
||||
.send_async(response_stat.into())
|
||||
.await
|
||||
.context("stat_sender sending response_stat")?;
|
||||
} else {
|
||||
panic!("Stat sender was not spawned!");
|
||||
if let Some(x) = request_metadata.try_send_stat()? {
|
||||
return Err(anyhow!("failed saving stat! {:?}", x));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -195,7 +195,7 @@ async fn run(
|
||||
// start the frontend port
|
||||
let frontend_handle = tokio::spawn(frontend::serve(
|
||||
app_frontend_port,
|
||||
spawned_app.app.clone(),
|
||||
spawned_app.app,
|
||||
frontend_shutdown_receiver,
|
||||
frontend_shutdown_complete_sender,
|
||||
));
|
||||
@ -417,17 +417,14 @@ mod tests {
|
||||
let prometheus_port = 0;
|
||||
let shutdown_sender = shutdown_sender.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
run(
|
||||
top_config,
|
||||
None,
|
||||
frontend_port,
|
||||
prometheus_port,
|
||||
2,
|
||||
shutdown_sender,
|
||||
)
|
||||
.await
|
||||
})
|
||||
tokio::spawn(run(
|
||||
top_config,
|
||||
None,
|
||||
frontend_port,
|
||||
prometheus_port,
|
||||
2,
|
||||
shutdown_sender,
|
||||
))
|
||||
};
|
||||
|
||||
// TODO: do something to the node. query latest block, mine another block, query again
|
||||
|
@ -108,11 +108,9 @@ impl RpcAccountingSubCommand {
|
||||
.all(db_conn)
|
||||
.await?;
|
||||
|
||||
if u_keys.is_empty() {
|
||||
return Err(anyhow::anyhow!("no user keys"));
|
||||
}
|
||||
anyhow::ensure!(!u_keys.is_empty(), "no user keys");
|
||||
|
||||
let u_key_ids: Vec<_> = u_keys.iter().map(|x| x.id).collect();
|
||||
let u_key_ids: Vec<_> = u_keys.into_iter().map(|x| x.id).collect();
|
||||
|
||||
condition = condition.add(rpc_accounting::Column::RpcKeyId.is_in(u_key_ids));
|
||||
}
|
||||
|
@ -217,13 +217,12 @@ async fn check_rpc(
|
||||
.await
|
||||
.context(format!("awaiting response from {}", rpc))?;
|
||||
|
||||
if !response.status().is_success() {
|
||||
return Err(anyhow::anyhow!(
|
||||
"bad response from {}: {}",
|
||||
rpc,
|
||||
response.status(),
|
||||
));
|
||||
}
|
||||
anyhow::ensure!(
|
||||
response.status().is_success(),
|
||||
"bad response from {}: {}",
|
||||
rpc,
|
||||
response.status(),
|
||||
);
|
||||
|
||||
let body = response
|
||||
.text()
|
||||
|
@ -141,7 +141,7 @@ impl SentrydSubCommand {
|
||||
None,
|
||||
);
|
||||
|
||||
if let Some(pagerduty_async) = pagerduty_async.as_ref() {
|
||||
if let Some(ref pagerduty_async) = pagerduty_async {
|
||||
info!(
|
||||
"sending to pagerduty: {:#}",
|
||||
serde_json::to_string_pretty(&alert)?
|
||||
|
@ -32,12 +32,11 @@ impl UserImportSubCommand {
|
||||
pub async fn main(self, db_conn: &DatabaseConnection) -> anyhow::Result<()> {
|
||||
let import_dir = Path::new(&self.input_dir);
|
||||
|
||||
if !import_dir.exists() {
|
||||
return Err(anyhow::anyhow!(
|
||||
"import dir ({}) does not exist!",
|
||||
import_dir.to_string_lossy()
|
||||
));
|
||||
}
|
||||
anyhow::ensure!(
|
||||
import_dir.exists(),
|
||||
"import dir ({}) does not exist!",
|
||||
import_dir.to_string_lossy()
|
||||
);
|
||||
|
||||
let user_glob_path = import_dir.join(format!("{}-users-*.json", self.export_timestamp));
|
||||
|
||||
@ -180,10 +179,7 @@ impl UserImportSubCommand {
|
||||
.await?
|
||||
{
|
||||
// make sure it belongs to the mapped user
|
||||
if existing_rk.user_id != mapped_id {
|
||||
// TODO: error or import the rest?
|
||||
return Err(anyhow::anyhow!("unexpected user id"));
|
||||
}
|
||||
anyhow::ensure!(existing_rk.user_id == mapped_id, "unexpected user id");
|
||||
|
||||
// the key exists under the expected user. we are good to continue
|
||||
} else {
|
||||
|
@ -1,15 +1,15 @@
|
||||
use crate::app::AnyhowJoinHandle;
|
||||
use crate::app::Web3ProxyJoinHandle;
|
||||
use crate::rpcs::blockchain::{BlocksByHashCache, Web3ProxyBlock};
|
||||
use crate::rpcs::one::Web3Rpc;
|
||||
use argh::FromArgs;
|
||||
use ethers::prelude::TxHash;
|
||||
use ethers::prelude::{Address, TxHash, H256};
|
||||
use ethers::types::{U256, U64};
|
||||
use hashbrown::HashMap;
|
||||
use log::warn;
|
||||
use migration::sea_orm::DatabaseConnection;
|
||||
use serde::Deserialize;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::broadcast;
|
||||
use std::time::Duration;
|
||||
|
||||
pub type BlockAndRpc = (Option<Web3ProxyBlock>, Arc<Web3Rpc>);
|
||||
pub type TxHashAndRpc = (TxHash, Arc<Web3Rpc>);
|
||||
@ -94,6 +94,12 @@ pub struct AppConfig {
|
||||
/// None = allow all requests
|
||||
pub default_user_max_requests_per_period: Option<u64>,
|
||||
|
||||
/// Default ERC address for out deposit contract
|
||||
pub deposit_factory_contract: Option<Address>,
|
||||
|
||||
/// Default ERC address for out deposit contract
|
||||
pub deposit_topic: Option<H256>,
|
||||
|
||||
/// minimum amount to increase eth_estimateGas results
|
||||
pub gas_increase_min: Option<U256>,
|
||||
|
||||
@ -277,28 +283,48 @@ impl Web3RpcConfig {
|
||||
redis_pool: Option<redis_rate_limiter::RedisPool>,
|
||||
chain_id: u64,
|
||||
http_client: Option<reqwest::Client>,
|
||||
http_interval_sender: Option<Arc<broadcast::Sender<()>>>,
|
||||
blocks_by_hash_cache: BlocksByHashCache,
|
||||
block_sender: Option<flume::Sender<BlockAndRpc>>,
|
||||
tx_id_sender: Option<flume::Sender<TxHashAndRpc>>,
|
||||
reconnect: bool,
|
||||
) -> anyhow::Result<(Arc<Web3Rpc>, AnyhowJoinHandle<()>)> {
|
||||
) -> anyhow::Result<(Arc<Web3Rpc>, Web3ProxyJoinHandle<()>)> {
|
||||
if !self.extra.is_empty() {
|
||||
warn!("unknown Web3RpcConfig fields!: {:?}", self.extra.keys());
|
||||
}
|
||||
|
||||
// TODO: get this from config? a helper function? where does this belong?
|
||||
let block_interval = match chain_id {
|
||||
// ethereum
|
||||
1 => Duration::from_secs(12),
|
||||
// ethereum-goerli
|
||||
5 => Duration::from_secs(12),
|
||||
// polygon
|
||||
137 => Duration::from_secs(2),
|
||||
// fantom
|
||||
250 => Duration::from_secs(1),
|
||||
// arbitrum
|
||||
42161 => Duration::from_millis(500),
|
||||
// anything else
|
||||
_ => {
|
||||
let default = 10;
|
||||
warn!(
|
||||
"unexpected chain_id ({}). polling every {} seconds",
|
||||
chain_id, default
|
||||
);
|
||||
Duration::from_secs(default)
|
||||
}
|
||||
};
|
||||
|
||||
Web3Rpc::spawn(
|
||||
self,
|
||||
name,
|
||||
chain_id,
|
||||
db_conn,
|
||||
http_client,
|
||||
http_interval_sender,
|
||||
redis_pool,
|
||||
block_interval,
|
||||
blocks_by_hash_cache,
|
||||
block_sender,
|
||||
tx_id_sender,
|
||||
reconnect,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
@ -7,6 +7,7 @@ use crate::app::Web3ProxyApp;
|
||||
use crate::frontend::errors::{Web3ProxyError, Web3ProxyErrorContext};
|
||||
use crate::user_token::UserBearerToken;
|
||||
use crate::PostLogin;
|
||||
use anyhow::Context;
|
||||
use axum::{
|
||||
extract::{Path, Query},
|
||||
headers::{authorization::Bearer, Authorization},
|
||||
@ -16,15 +17,19 @@ use axum::{
|
||||
use axum_client_ip::InsecureClientIp;
|
||||
use axum_macros::debug_handler;
|
||||
use chrono::{TimeZone, Utc};
|
||||
use entities::{admin_trail, login, pending_login, rpc_key, user};
|
||||
use entities::{
|
||||
admin, admin_increase_balance_receipt, admin_trail, balance, login, pending_login, rpc_key,
|
||||
user, user_tier,
|
||||
};
|
||||
use ethers::{prelude::Address, types::Bytes};
|
||||
use hashbrown::HashMap;
|
||||
use http::StatusCode;
|
||||
use log::{debug, info, warn};
|
||||
use migration::sea_orm::prelude::Uuid;
|
||||
use migration::sea_orm::prelude::{Decimal, Uuid};
|
||||
use migration::sea_orm::{
|
||||
self, ActiveModelTrait, ColumnTrait, EntityTrait, IntoActiveModel, QueryFilter,
|
||||
};
|
||||
use migration::{Expr, OnConflict};
|
||||
use serde_json::json;
|
||||
use siwe::{Message, VerificationOpts};
|
||||
use std::ops::Add;
|
||||
@ -33,6 +38,135 @@ use std::sync::Arc;
|
||||
use time::{Duration, OffsetDateTime};
|
||||
use ulid::Ulid;
|
||||
|
||||
/// `GET /admin/increase_balance` -- As an admin, modify a user's user-tier
|
||||
///
|
||||
/// - user_address that is to credited balance
|
||||
/// - user_role_tier that is supposed to be adapted
|
||||
#[debug_handler]
|
||||
pub async fn admin_increase_balance(
|
||||
Extension(app): Extension<Arc<Web3ProxyApp>>,
|
||||
TypedHeader(Authorization(bearer)): TypedHeader<Authorization<Bearer>>,
|
||||
Query(params): Query<HashMap<String, String>>,
|
||||
) -> Web3ProxyResponse {
|
||||
let (caller, _) = app.bearer_is_authorized(bearer).await?;
|
||||
let caller_id = caller.id;
|
||||
|
||||
// Establish connections
|
||||
let db_conn = app
|
||||
.db_conn()
|
||||
.context("query_admin_modify_user needs a db")?;
|
||||
|
||||
// Check if the caller is an admin (if not, return early)
|
||||
let admin_entry: admin::Model = admin::Entity::find()
|
||||
.filter(admin::Column::UserId.eq(caller_id))
|
||||
.one(&db_conn)
|
||||
.await?
|
||||
.ok_or(Web3ProxyError::AccessDenied)?;
|
||||
|
||||
// Get the user from params
|
||||
let user_address: Address = params
|
||||
.get("user_address")
|
||||
.ok_or_else(|| {
|
||||
Web3ProxyError::BadRequest("Unable to find user_address key in request".to_string())
|
||||
})?
|
||||
.parse::<Address>()
|
||||
.map_err(|_| {
|
||||
Web3ProxyError::BadRequest("Unable to parse user_address as an Address".to_string())
|
||||
})?;
|
||||
let user_address_bytes: Vec<u8> = user_address.to_fixed_bytes().into();
|
||||
let note: String = params
|
||||
.get("note")
|
||||
.ok_or_else(|| {
|
||||
Web3ProxyError::BadRequest("Unable to find 'note' key in request".to_string())
|
||||
})?
|
||||
.parse::<String>()
|
||||
.map_err(|_| {
|
||||
Web3ProxyError::BadRequest("Unable to parse 'note' as a String".to_string())
|
||||
})?;
|
||||
// Get the amount from params
|
||||
// Decimal::from_str
|
||||
let amount: Decimal = params
|
||||
.get("amount")
|
||||
.ok_or_else(|| {
|
||||
Web3ProxyError::BadRequest("Unable to get the amount key from the request".to_string())
|
||||
})
|
||||
.map(|x| Decimal::from_str(x))?
|
||||
.map_err(|err| {
|
||||
Web3ProxyError::BadRequest(format!("Unable to parse amount from the request {:?}", err))
|
||||
})?;
|
||||
|
||||
let user_entry: user::Model = user::Entity::find()
|
||||
.filter(user::Column::Address.eq(user_address_bytes.clone()))
|
||||
.one(&db_conn)
|
||||
.await?
|
||||
.ok_or(Web3ProxyError::BadRequest(
|
||||
"No user with this id found".to_string(),
|
||||
))?;
|
||||
|
||||
let increase_balance_receipt = admin_increase_balance_receipt::ActiveModel {
|
||||
amount: sea_orm::Set(amount),
|
||||
admin_id: sea_orm::Set(admin_entry.id),
|
||||
deposit_to_user_id: sea_orm::Set(user_entry.id),
|
||||
note: sea_orm::Set(note),
|
||||
..Default::default()
|
||||
};
|
||||
increase_balance_receipt.save(&db_conn).await?;
|
||||
|
||||
let mut out = HashMap::new();
|
||||
out.insert(
|
||||
"user",
|
||||
serde_json::Value::String(format!("{:?}", user_address)),
|
||||
);
|
||||
out.insert("amount", serde_json::Value::String(amount.to_string()));
|
||||
|
||||
// Get the balance row
|
||||
let balance_entry: balance::Model = balance::Entity::find()
|
||||
.filter(balance::Column::UserId.eq(user_entry.id))
|
||||
.one(&db_conn)
|
||||
.await?
|
||||
.context("User does not have a balance row")?;
|
||||
|
||||
// Finally make the user premium if balance is above 10$
|
||||
let premium_user_tier = user_tier::Entity::find()
|
||||
.filter(user_tier::Column::Title.eq("Premium"))
|
||||
.one(&db_conn)
|
||||
.await?
|
||||
.context("Premium tier was not found!")?;
|
||||
|
||||
let balance_entry = balance_entry.into_active_model();
|
||||
balance::Entity::insert(balance_entry)
|
||||
.on_conflict(
|
||||
OnConflict::new()
|
||||
.values([
|
||||
// (
|
||||
// balance::Column::Id,
|
||||
// Expr::col(balance::Column::Id).add(self.frontend_requests),
|
||||
// ),
|
||||
(
|
||||
balance::Column::AvailableBalance,
|
||||
Expr::col(balance::Column::AvailableBalance).add(amount),
|
||||
),
|
||||
// (
|
||||
// balance::Column::Used,
|
||||
// Expr::col(balance::Column::UsedBalance).add(self.backend_retries),
|
||||
// ),
|
||||
// (
|
||||
// balance::Column::UserId,
|
||||
// Expr::col(balance::Column::UserId).add(self.no_servers),
|
||||
// ),
|
||||
])
|
||||
.to_owned(),
|
||||
)
|
||||
.exec(&db_conn)
|
||||
.await?;
|
||||
// TODO: Downgrade otherwise, right now not functioning properly
|
||||
|
||||
// Then read and save in one transaction
|
||||
let response = (StatusCode::OK, Json(out)).into_response();
|
||||
|
||||
Ok(response)
|
||||
}
|
||||
|
||||
/// `GET /admin/modify_role` -- As an admin, modify a user's user-tier
|
||||
///
|
||||
/// - user_address that is to be modified
|
||||
@ -108,8 +242,8 @@ pub async fn admin_login_get(
|
||||
let login_domain = app
|
||||
.config
|
||||
.login_domain
|
||||
.clone()
|
||||
.unwrap_or_else(|| "llamanodes.com".to_string());
|
||||
.as_deref()
|
||||
.unwrap_or("llamanodes.com");
|
||||
|
||||
// Also there must basically be a token, that says that one admin logins _as a user_.
|
||||
// I'm not yet fully sure how to handle with that logic specifically ...
|
||||
|
@ -3,35 +3,45 @@
|
||||
use super::errors::{Web3ProxyError, Web3ProxyErrorContext, Web3ProxyResult};
|
||||
use super::rpc_proxy_ws::ProxyMode;
|
||||
use crate::app::{AuthorizationChecks, Web3ProxyApp, APP_USER_AGENT};
|
||||
use crate::jsonrpc::{JsonRpcForwardedResponse, JsonRpcRequest};
|
||||
use crate::rpcs::one::Web3Rpc;
|
||||
use crate::stats::{AppStat, BackendRequests, RpcQueryStats};
|
||||
use crate::user_token::UserBearerToken;
|
||||
use axum::headers::authorization::Bearer;
|
||||
use axum::headers::{Header, Origin, Referer, UserAgent};
|
||||
use chrono::Utc;
|
||||
use core::fmt;
|
||||
use deferred_rate_limiter::DeferredRateLimitResult;
|
||||
use derive_more::From;
|
||||
use entities::sea_orm_active_enums::TrackingLevel;
|
||||
use entities::{login, rpc_key, user, user_tier};
|
||||
use ethers::types::Bytes;
|
||||
use entities::{balance, login, rpc_key, user, user_tier};
|
||||
use ethers::types::{Bytes, U64};
|
||||
use ethers::utils::keccak256;
|
||||
use futures::TryFutureExt;
|
||||
use hashbrown::HashMap;
|
||||
use http::HeaderValue;
|
||||
use ipnet::IpNet;
|
||||
use log::{error, warn};
|
||||
use log::{error, trace, warn};
|
||||
use migration::sea_orm::{ColumnTrait, DatabaseConnection, EntityTrait, QueryFilter};
|
||||
use parking_lot::Mutex;
|
||||
use rdkafka::message::{Header as KafkaHeader, OwnedHeaders as KafkaOwnedHeaders, OwnedMessage};
|
||||
use rdkafka::producer::{FutureProducer, FutureRecord};
|
||||
use rdkafka::util::Timeout as KafkaTimeout;
|
||||
use redis_rate_limiter::redis::AsyncCommands;
|
||||
use redis_rate_limiter::RedisRateLimitResult;
|
||||
use std::convert::Infallible;
|
||||
use std::fmt::Display;
|
||||
use std::sync::atomic::{AtomicBool, AtomicU64};
|
||||
use std::hash::{Hash, Hasher};
|
||||
use std::mem;
|
||||
use std::sync::atomic::{self, AtomicBool, AtomicI64, AtomicU64, AtomicUsize};
|
||||
use std::time::Duration;
|
||||
use std::{net::IpAddr, str::FromStr, sync::Arc};
|
||||
use tokio::sync::{OwnedSemaphorePermit, Semaphore};
|
||||
use tokio::task::JoinHandle;
|
||||
use tokio::time::Instant;
|
||||
use ulid::Ulid;
|
||||
use uuid::Uuid;
|
||||
|
||||
/// This lets us use UUID and ULID while we transition to only ULIDs
|
||||
/// TODO: include the key's description.
|
||||
#[derive(Copy, Clone, Debug, Eq, PartialEq, serde::Deserialize, serde::Serialize)]
|
||||
pub enum RpcSecretKey {
|
||||
Ulid(Ulid),
|
||||
@ -70,37 +80,462 @@ pub struct Authorization {
|
||||
pub authorization_type: AuthorizationType,
|
||||
}
|
||||
|
||||
pub struct KafkaDebugLogger {
|
||||
topic: String,
|
||||
key: Vec<u8>,
|
||||
headers: KafkaOwnedHeaders,
|
||||
producer: FutureProducer,
|
||||
num_requests: AtomicUsize,
|
||||
num_responses: AtomicUsize,
|
||||
}
|
||||
|
||||
impl Hash for RpcSecretKey {
|
||||
fn hash<H: Hasher>(&self, state: &mut H) {
|
||||
let x = match self {
|
||||
Self::Ulid(x) => x.0,
|
||||
Self::Uuid(x) => x.as_u128(),
|
||||
};
|
||||
|
||||
x.hash(state);
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for KafkaDebugLogger {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("KafkaDebugLogger")
|
||||
.field("topic", &self.topic)
|
||||
.finish_non_exhaustive()
|
||||
}
|
||||
}
|
||||
|
||||
type KafkaLogResult = Result<(i32, i64), (rdkafka::error::KafkaError, OwnedMessage)>;
|
||||
|
||||
impl KafkaDebugLogger {
|
||||
fn try_new(
|
||||
app: &Web3ProxyApp,
|
||||
authorization: Arc<Authorization>,
|
||||
head_block_num: Option<&U64>,
|
||||
kafka_topic: &str,
|
||||
request_ulid: Ulid,
|
||||
) -> Option<Arc<Self>> {
|
||||
let kafka_producer = app.kafka_producer.clone()?;
|
||||
|
||||
let kafka_topic = kafka_topic.to_string();
|
||||
|
||||
let rpc_secret_key_id = authorization
|
||||
.checks
|
||||
.rpc_secret_key_id
|
||||
.map(|x| x.get())
|
||||
.unwrap_or_default();
|
||||
|
||||
let kafka_key =
|
||||
rmp_serde::to_vec(&rpc_secret_key_id).expect("ids should always serialize with rmp");
|
||||
|
||||
let chain_id = app.config.chain_id;
|
||||
|
||||
let head_block_num = head_block_num
|
||||
.copied()
|
||||
.or_else(|| app.balanced_rpcs.head_block_num());
|
||||
|
||||
// TODO: would be nice to have the block hash too
|
||||
|
||||
// another item is added with the response, so initial_capacity is +1 what is needed here
|
||||
let kafka_headers = KafkaOwnedHeaders::new_with_capacity(6)
|
||||
.insert(KafkaHeader {
|
||||
key: "rpc_secret_key_id",
|
||||
value: authorization
|
||||
.checks
|
||||
.rpc_secret_key_id
|
||||
.map(|x| x.to_string())
|
||||
.as_ref(),
|
||||
})
|
||||
.insert(KafkaHeader {
|
||||
key: "ip",
|
||||
value: Some(&authorization.ip.to_string()),
|
||||
})
|
||||
.insert(KafkaHeader {
|
||||
key: "request_ulid",
|
||||
value: Some(&request_ulid.to_string()),
|
||||
})
|
||||
.insert(KafkaHeader {
|
||||
key: "head_block_num",
|
||||
value: head_block_num.map(|x| x.to_string()).as_ref(),
|
||||
})
|
||||
.insert(KafkaHeader {
|
||||
key: "chain_id",
|
||||
value: Some(&chain_id.to_le_bytes()),
|
||||
});
|
||||
|
||||
// save the key and headers for when we log the response
|
||||
let x = Self {
|
||||
topic: kafka_topic,
|
||||
key: kafka_key,
|
||||
headers: kafka_headers,
|
||||
producer: kafka_producer,
|
||||
num_requests: 0.into(),
|
||||
num_responses: 0.into(),
|
||||
};
|
||||
|
||||
let x = Arc::new(x);
|
||||
|
||||
Some(x)
|
||||
}
|
||||
|
||||
fn background_log(&self, payload: Vec<u8>) -> JoinHandle<KafkaLogResult> {
|
||||
let topic = self.topic.clone();
|
||||
let key = self.key.clone();
|
||||
let producer = self.producer.clone();
|
||||
let headers = self.headers.clone();
|
||||
|
||||
let f = async move {
|
||||
let record = FutureRecord::to(&topic)
|
||||
.key(&key)
|
||||
.payload(&payload)
|
||||
.headers(headers);
|
||||
|
||||
let produce_future =
|
||||
producer.send(record, KafkaTimeout::After(Duration::from_secs(5 * 60)));
|
||||
|
||||
let kafka_response = produce_future.await;
|
||||
|
||||
if let Err((err, msg)) = kafka_response.as_ref() {
|
||||
error!("produce kafka request: {} - {:?}", err, msg);
|
||||
// TODO: re-queue the msg? log somewhere else like a file on disk?
|
||||
// TODO: this is bad and should probably trigger an alarm
|
||||
};
|
||||
|
||||
kafka_response
|
||||
};
|
||||
|
||||
tokio::spawn(f)
|
||||
}
|
||||
|
||||
/// for opt-in debug usage, log the request to kafka
|
||||
/// TODO: generic type for request
|
||||
pub fn log_debug_request(&self, request: &JsonRpcRequest) -> JoinHandle<KafkaLogResult> {
|
||||
// TODO: is rust message pack a good choice? try rkyv instead
|
||||
let payload =
|
||||
rmp_serde::to_vec(&request).expect("requests should always serialize with rmp");
|
||||
|
||||
self.num_requests.fetch_add(1, atomic::Ordering::AcqRel);
|
||||
|
||||
self.background_log(payload)
|
||||
}
|
||||
|
||||
pub fn log_debug_response<R>(&self, response: &R) -> JoinHandle<KafkaLogResult>
|
||||
where
|
||||
R: serde::Serialize,
|
||||
{
|
||||
let payload =
|
||||
rmp_serde::to_vec(&response).expect("requests should always serialize with rmp");
|
||||
|
||||
self.num_responses.fetch_add(1, atomic::Ordering::AcqRel);
|
||||
|
||||
self.background_log(payload)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct RequestMetadata {
|
||||
pub start_instant: tokio::time::Instant,
|
||||
pub request_bytes: u64,
|
||||
// TODO: do we need atomics? seems like we should be able to pass a &mut around
|
||||
// TODO: "archive" isn't really a boolean.
|
||||
/// TODO: set archive_request during the new instead of after
|
||||
/// TODO: this is more complex than "requires a block older than X height". different types of data can be pruned differently
|
||||
pub archive_request: AtomicBool,
|
||||
|
||||
pub authorization: Option<Arc<Authorization>>,
|
||||
|
||||
pub request_ulid: Ulid,
|
||||
|
||||
/// Size of the JSON request. Does not include headers or things like that.
|
||||
pub request_bytes: usize,
|
||||
|
||||
/// users can opt out of method tracking for their personal dashboads
|
||||
/// but we still have to store the method at least temporarily for cost calculations
|
||||
pub method: Option<String>,
|
||||
|
||||
/// Instant that the request was received (or at least close to it)
|
||||
/// We use Instant and not timestamps to avoid problems with leap seconds and similar issues
|
||||
pub start_instant: tokio::time::Instant,
|
||||
/// if this is empty, there was a cache_hit
|
||||
pub backend_requests: Mutex<Vec<Arc<Web3Rpc>>>,
|
||||
/// otherwise, it is populated with any rpc servers that were used by this request
|
||||
pub backend_requests: BackendRequests,
|
||||
/// The number of times the request got stuck waiting because no servers were synced
|
||||
pub no_servers: AtomicU64,
|
||||
/// If handling the request hit an application error
|
||||
/// This does not count things like a transcation reverting or a malformed request
|
||||
pub error_response: AtomicBool,
|
||||
/// Size in bytes of the JSON response. Does not include headers or things like that.
|
||||
pub response_bytes: AtomicU64,
|
||||
/// How many milliseconds it took to respond to the request
|
||||
pub response_millis: AtomicU64,
|
||||
/// What time the (first) response was proxied.
|
||||
/// TODO: think about how to store response times for ProxyMode::Versus
|
||||
pub response_timestamp: AtomicI64,
|
||||
/// True if the response required querying a backup RPC
|
||||
/// RPC aggregators that query multiple providers to compare response may use this header to ignore our response.
|
||||
pub response_from_backup_rpc: AtomicBool,
|
||||
|
||||
/// ProxyMode::Debug logs requests and responses with Kafka
|
||||
/// TODO: maybe this shouldn't be determined by ProxyMode. A request param should probably enable this
|
||||
pub kafka_debug_logger: Option<Arc<KafkaDebugLogger>>,
|
||||
|
||||
/// Cancel-safe channel for sending stats to the buffer
|
||||
pub stat_sender: Option<flume::Sender<AppStat>>,
|
||||
}
|
||||
|
||||
impl Default for RequestMetadata {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
archive_request: Default::default(),
|
||||
authorization: Default::default(),
|
||||
backend_requests: Default::default(),
|
||||
error_response: Default::default(),
|
||||
kafka_debug_logger: Default::default(),
|
||||
method: Default::default(),
|
||||
no_servers: Default::default(),
|
||||
request_bytes: Default::default(),
|
||||
request_ulid: Default::default(),
|
||||
response_bytes: Default::default(),
|
||||
response_from_backup_rpc: Default::default(),
|
||||
response_millis: Default::default(),
|
||||
response_timestamp: Default::default(),
|
||||
start_instant: Instant::now(),
|
||||
stat_sender: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(From)]
|
||||
pub enum RequestOrMethod<'a> {
|
||||
Request(&'a JsonRpcRequest),
|
||||
/// jsonrpc method (or similar label) and the size that the request should count as (sometimes 0)
|
||||
Method(&'a str, usize),
|
||||
RequestSize(usize),
|
||||
}
|
||||
|
||||
impl<'a> RequestOrMethod<'a> {
|
||||
fn method(&self) -> Option<&str> {
|
||||
match self {
|
||||
Self::Request(x) => Some(&x.method),
|
||||
Self::Method(x, _) => Some(x),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn jsonrpc_request(&self) -> Option<&JsonRpcRequest> {
|
||||
match self {
|
||||
Self::Request(x) => Some(x),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn num_bytes(&self) -> usize {
|
||||
match self {
|
||||
RequestOrMethod::Method(_, num_bytes) => *num_bytes,
|
||||
RequestOrMethod::Request(x) => x.num_bytes(),
|
||||
RequestOrMethod::RequestSize(num_bytes) => *num_bytes,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> From<&'a str> for RequestOrMethod<'a> {
|
||||
fn from(value: &'a str) -> Self {
|
||||
if value.is_empty() {
|
||||
Self::RequestSize(0)
|
||||
} else {
|
||||
Self::Method(value, 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: i think a trait is actually the right thing to use here
|
||||
#[derive(From)]
|
||||
pub enum ResponseOrBytes<'a> {
|
||||
Json(&'a serde_json::Value),
|
||||
Response(&'a JsonRpcForwardedResponse),
|
||||
Bytes(usize),
|
||||
}
|
||||
|
||||
impl<'a> From<u64> for ResponseOrBytes<'a> {
|
||||
fn from(value: u64) -> Self {
|
||||
Self::Bytes(value as usize)
|
||||
}
|
||||
}
|
||||
|
||||
impl ResponseOrBytes<'_> {
|
||||
pub fn num_bytes(&self) -> usize {
|
||||
match self {
|
||||
Self::Json(x) => serde_json::to_string(x)
|
||||
.expect("this should always serialize")
|
||||
.len(),
|
||||
Self::Response(x) => serde_json::to_string(x)
|
||||
.expect("this should always serialize")
|
||||
.len(),
|
||||
Self::Bytes(num_bytes) => *num_bytes,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl RequestMetadata {
|
||||
pub fn new(request_bytes: usize) -> Self {
|
||||
// TODO: how can we do this without turning it into a string first. this is going to slow us down!
|
||||
let request_bytes = request_bytes as u64;
|
||||
pub async fn new<'a, R: Into<RequestOrMethod<'a>>>(
|
||||
app: &Web3ProxyApp,
|
||||
authorization: Arc<Authorization>,
|
||||
request: R,
|
||||
head_block_num: Option<&U64>,
|
||||
) -> Arc<Self> {
|
||||
let request = request.into();
|
||||
|
||||
Self {
|
||||
start_instant: Instant::now(),
|
||||
request_bytes,
|
||||
let method = request.method().map(|x| x.to_string());
|
||||
|
||||
let request_bytes = request.num_bytes();
|
||||
|
||||
// TODO: modify the request here? I don't really like that very much. but its a sure way to get archive_request set correctly
|
||||
|
||||
// TODO: add the Ulid at the haproxy or amazon load balancer level? investigate OpenTelemetry
|
||||
let request_ulid = Ulid::new();
|
||||
|
||||
let kafka_debug_logger = if matches!(authorization.checks.proxy_mode, ProxyMode::Debug) {
|
||||
KafkaDebugLogger::try_new(
|
||||
app,
|
||||
authorization.clone(),
|
||||
head_block_num,
|
||||
"web3_proxy:rpc",
|
||||
request_ulid,
|
||||
)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
if let Some(ref kafka_debug_logger) = kafka_debug_logger {
|
||||
if let Some(request) = request.jsonrpc_request() {
|
||||
// TODO: channels might be more ergonomic than spawned futures
|
||||
// spawned things run in parallel easier but generally need more Arcs
|
||||
kafka_debug_logger.log_debug_request(request);
|
||||
} else {
|
||||
// there probably isn't a new request attached to this metadata.
|
||||
// this happens with websocket subscriptions
|
||||
}
|
||||
}
|
||||
|
||||
let x = Self {
|
||||
archive_request: false.into(),
|
||||
backend_requests: Default::default(),
|
||||
no_servers: 0.into(),
|
||||
error_response: false.into(),
|
||||
kafka_debug_logger,
|
||||
no_servers: 0.into(),
|
||||
authorization: Some(authorization),
|
||||
request_bytes,
|
||||
method,
|
||||
response_bytes: 0.into(),
|
||||
response_millis: 0.into(),
|
||||
response_from_backup_rpc: false.into(),
|
||||
response_millis: 0.into(),
|
||||
request_ulid,
|
||||
response_timestamp: 0.into(),
|
||||
start_instant: Instant::now(),
|
||||
stat_sender: app.stat_sender.clone(),
|
||||
};
|
||||
|
||||
Arc::new(x)
|
||||
}
|
||||
|
||||
pub fn backend_rpcs_used(&self) -> Vec<Arc<Web3Rpc>> {
|
||||
self.backend_requests.lock().clone()
|
||||
}
|
||||
|
||||
pub fn tracking_level(&self) -> TrackingLevel {
|
||||
if let Some(authorization) = self.authorization.as_ref() {
|
||||
authorization.checks.tracking_level.clone()
|
||||
} else {
|
||||
TrackingLevel::None
|
||||
}
|
||||
}
|
||||
|
||||
pub fn opt_in_method(&self) -> Option<String> {
|
||||
match self.tracking_level() {
|
||||
TrackingLevel::None | TrackingLevel::Aggregated => None,
|
||||
TrackingLevel::Detailed => self.method.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn take_opt_in_method(&mut self) -> Option<String> {
|
||||
match self.tracking_level() {
|
||||
TrackingLevel::None | TrackingLevel::Aggregated => None,
|
||||
TrackingLevel::Detailed => self.method.take(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn try_send_stat(mut self) -> Web3ProxyResult<Option<Self>> {
|
||||
if let Some(stat_sender) = self.stat_sender.take() {
|
||||
trace!("sending stat! {:?}", self);
|
||||
|
||||
let stat: RpcQueryStats = self.try_into()?;
|
||||
|
||||
let stat: AppStat = stat.into();
|
||||
|
||||
if let Err(err) = stat_sender.send(stat) {
|
||||
error!("failed sending stat {:?}: {:?}", err.0, err);
|
||||
// TODO: return it? that seems like it might cause an infinite loop
|
||||
// TODO: but dropping stats is bad... hmm... i guess better to undercharge customers than overcharge
|
||||
};
|
||||
|
||||
Ok(None)
|
||||
} else {
|
||||
Ok(Some(self))
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_response<'a, R: Into<ResponseOrBytes<'a>>>(&'a self, response: R) {
|
||||
// TODO: fetch? set? should it be None in a Mutex? or a OnceCell?
|
||||
let response = response.into();
|
||||
|
||||
let num_bytes = response.num_bytes() as u64;
|
||||
|
||||
self.response_bytes
|
||||
.fetch_add(num_bytes, atomic::Ordering::AcqRel);
|
||||
|
||||
self.response_millis.fetch_add(
|
||||
self.start_instant.elapsed().as_millis() as u64,
|
||||
atomic::Ordering::AcqRel,
|
||||
);
|
||||
|
||||
// TODO: record first or last timestamp? really, we need multiple
|
||||
self.response_timestamp
|
||||
.store(Utc::now().timestamp(), atomic::Ordering::Release);
|
||||
|
||||
if let Some(kafka_debug_logger) = self.kafka_debug_logger.as_ref() {
|
||||
if let ResponseOrBytes::Response(response) = response {
|
||||
kafka_debug_logger.log_debug_response(response);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn try_send_arc_stat(self: Arc<Self>) -> anyhow::Result<Option<Arc<Self>>> {
|
||||
match Arc::try_unwrap(self) {
|
||||
Ok(x) => {
|
||||
let not_sent = x.try_send_stat()?.map(Arc::new);
|
||||
Ok(not_sent)
|
||||
}
|
||||
Err(not_sent) => {
|
||||
trace!(
|
||||
"could not send stat while {} arcs are active",
|
||||
Arc::strong_count(¬_sent)
|
||||
);
|
||||
Ok(Some(not_sent))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: helper function to duplicate? needs to clear request_bytes, and all the atomics tho...
|
||||
}
|
||||
|
||||
// TODO: is this where the panic comes from?
|
||||
impl Drop for RequestMetadata {
|
||||
fn drop(&mut self) {
|
||||
if self.stat_sender.is_some() {
|
||||
// turn `&mut self` into `self`
|
||||
let x = mem::take(self);
|
||||
|
||||
// warn!("request metadata dropped without stat send! {:?}", self);
|
||||
let _ = x.try_send_stat();
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -445,21 +880,17 @@ pub async fn key_is_authorized(
|
||||
|
||||
impl Web3ProxyApp {
|
||||
/// Limit the number of concurrent requests from the given ip address.
|
||||
pub async fn ip_semaphore(&self, ip: IpAddr) -> Web3ProxyResult<Option<OwnedSemaphorePermit>> {
|
||||
pub async fn ip_semaphore(&self, ip: &IpAddr) -> Web3ProxyResult<Option<OwnedSemaphorePermit>> {
|
||||
if let Some(max_concurrent_requests) = self.config.public_max_concurrent_requests {
|
||||
let semaphore = self
|
||||
.ip_semaphores
|
||||
.get_with(ip, async move {
|
||||
.get_or_insert_async::<Infallible>(ip, async move {
|
||||
// TODO: set max_concurrent_requests dynamically based on load?
|
||||
let s = Semaphore::new(max_concurrent_requests);
|
||||
Arc::new(s)
|
||||
Ok(Arc::new(s))
|
||||
})
|
||||
.await;
|
||||
|
||||
// if semaphore.available_permits() == 0 {
|
||||
// // TODO: concurrent limit hit! emit a stat? less important for anon users
|
||||
// // TODO: there is probably a race here
|
||||
// }
|
||||
.await
|
||||
.expect("infallible");
|
||||
|
||||
let semaphore_permit = semaphore.acquire_owned().await?;
|
||||
|
||||
@ -469,8 +900,8 @@ impl Web3ProxyApp {
|
||||
}
|
||||
}
|
||||
|
||||
/// Limit the number of concurrent requests from the given rpc key.
|
||||
pub async fn registered_user_semaphore(
|
||||
/// Limit the number of concurrent requests for a given user across all of their keys
|
||||
pub async fn user_semaphore(
|
||||
&self,
|
||||
authorization_checks: &AuthorizationChecks,
|
||||
) -> Web3ProxyResult<Option<OwnedSemaphorePermit>> {
|
||||
@ -478,28 +909,22 @@ impl Web3ProxyApp {
|
||||
let user_id = authorization_checks
|
||||
.user_id
|
||||
.try_into()
|
||||
.or(Err(Web3ProxyError::UserIdZero))
|
||||
.web3_context("user ids should always be non-zero")?;
|
||||
.or(Err(Web3ProxyError::UserIdZero))?;
|
||||
|
||||
let semaphore = self
|
||||
.registered_user_semaphores
|
||||
.get_with(user_id, async move {
|
||||
.user_semaphores
|
||||
.get_or_insert_async::<Infallible>(&user_id, async move {
|
||||
let s = Semaphore::new(max_concurrent_requests as usize);
|
||||
// trace!("new semaphore for user_id {}", user_id);
|
||||
Arc::new(s)
|
||||
Ok(Arc::new(s))
|
||||
})
|
||||
.await;
|
||||
|
||||
// if semaphore.available_permits() == 0 {
|
||||
// // TODO: concurrent limit hit! emit a stat? this has a race condition though.
|
||||
// // TODO: maybe have a stat on how long we wait to acquire the semaphore instead?
|
||||
// }
|
||||
.await
|
||||
.expect("infallible");
|
||||
|
||||
let semaphore_permit = semaphore.acquire_owned().await?;
|
||||
|
||||
Ok(Some(semaphore_permit))
|
||||
} else {
|
||||
// unlimited requests allowed
|
||||
// unlimited concurrency
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
@ -516,11 +941,12 @@ impl Web3ProxyApp {
|
||||
// limit concurrent requests
|
||||
let semaphore = self
|
||||
.bearer_token_semaphores
|
||||
.get_with(user_bearer_token.clone(), async move {
|
||||
.get_or_insert_async::<Infallible>(&user_bearer_token, async move {
|
||||
let s = Semaphore::new(self.config.bearer_token_max_concurrent_requests as usize);
|
||||
Arc::new(s)
|
||||
Ok(Arc::new(s))
|
||||
})
|
||||
.await;
|
||||
.await
|
||||
.expect("infallible");
|
||||
|
||||
let semaphore_permit = semaphore.acquire_owned().await?;
|
||||
|
||||
@ -608,7 +1034,7 @@ impl Web3ProxyApp {
|
||||
// they do check origin because we can override rate limits for some origins
|
||||
let authorization = Authorization::external(
|
||||
allowed_origin_requests_per_period,
|
||||
self.db_conn.clone(),
|
||||
self.db_conn(),
|
||||
ip,
|
||||
origin,
|
||||
proxy_mode,
|
||||
@ -623,7 +1049,7 @@ impl Web3ProxyApp {
|
||||
{
|
||||
Ok(DeferredRateLimitResult::Allowed) => {
|
||||
// rate limit allowed us. check concurrent request limits
|
||||
let semaphore = self.ip_semaphore(ip).await?;
|
||||
let semaphore = self.ip_semaphore(&ip).await?;
|
||||
|
||||
Ok(RateLimitResult::Allowed(authorization, semaphore))
|
||||
}
|
||||
@ -643,14 +1069,14 @@ impl Web3ProxyApp {
|
||||
error!("rate limiter is unhappy. allowing ip. err={:?}", err);
|
||||
|
||||
// at least we can still check the semaphore
|
||||
let semaphore = self.ip_semaphore(ip).await?;
|
||||
let semaphore = self.ip_semaphore(&ip).await?;
|
||||
|
||||
Ok(RateLimitResult::Allowed(authorization, semaphore))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// no redis, but we can still check the ip semaphore
|
||||
let semaphore = self.ip_semaphore(ip).await?;
|
||||
let semaphore = self.ip_semaphore(&ip).await?;
|
||||
|
||||
// TODO: if no redis, rate limit with a local cache? "warn!" probably isn't right
|
||||
Ok(RateLimitResult::Allowed(authorization, semaphore))
|
||||
@ -663,9 +1089,8 @@ impl Web3ProxyApp {
|
||||
proxy_mode: ProxyMode,
|
||||
rpc_secret_key: RpcSecretKey,
|
||||
) -> Web3ProxyResult<AuthorizationChecks> {
|
||||
let authorization_checks: Result<_, Arc<Web3ProxyError>> = self
|
||||
.rpc_secret_key_cache
|
||||
.try_get_with(rpc_secret_key.into(), async move {
|
||||
self.rpc_secret_key_cache
|
||||
.get_or_insert_async(&rpc_secret_key, async move {
|
||||
// trace!(?rpc_secret_key, "user cache miss");
|
||||
|
||||
let db_replica = self
|
||||
@ -689,6 +1114,13 @@ impl Web3ProxyApp {
|
||||
.await?
|
||||
.expect("related user");
|
||||
|
||||
let balance = balance::Entity::find()
|
||||
.filter(balance::Column::UserId.eq(user_model.id))
|
||||
.one(db_replica.conn())
|
||||
.await?
|
||||
.expect("related balance")
|
||||
.available_balance;
|
||||
|
||||
let user_tier_model =
|
||||
user_tier::Entity::find_by_id(user_model.user_tier_id)
|
||||
.one(db_replica.conn())
|
||||
@ -771,14 +1203,13 @@ impl Web3ProxyApp {
|
||||
max_requests_per_period: user_tier_model.max_requests_per_period,
|
||||
private_txs: rpc_key_model.private_txs,
|
||||
proxy_mode,
|
||||
balance: Some(balance),
|
||||
})
|
||||
}
|
||||
None => Ok(AuthorizationChecks::default()),
|
||||
}
|
||||
})
|
||||
.await;
|
||||
|
||||
authorization_checks.map_err(Web3ProxyError::Arc)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Authorized the ip/origin/referer/useragent and rate limit and concurrency
|
||||
@ -802,9 +1233,7 @@ impl Web3ProxyApp {
|
||||
|
||||
// only allow this rpc_key to run a limited amount of concurrent requests
|
||||
// TODO: rate limit should be BEFORE the semaphore!
|
||||
let semaphore = self
|
||||
.registered_user_semaphore(&authorization_checks)
|
||||
.await?;
|
||||
let semaphore = self.user_semaphore(&authorization_checks).await?;
|
||||
|
||||
let authorization = Authorization::try_new(
|
||||
authorization_checks,
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -16,25 +16,34 @@ use axum::{
|
||||
routing::{get, post, put},
|
||||
Extension, Router,
|
||||
};
|
||||
use http::header::AUTHORIZATION;
|
||||
use http::{header::AUTHORIZATION, StatusCode};
|
||||
use listenfd::ListenFd;
|
||||
use log::info;
|
||||
use moka::future::Cache;
|
||||
use quick_cache_ttl::UnitWeighter;
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::Arc;
|
||||
use std::{iter::once, time::Duration};
|
||||
use strum::{EnumCount, EnumIter};
|
||||
use tokio::sync::broadcast;
|
||||
use tower_http::cors::CorsLayer;
|
||||
use tower_http::sensitive_headers::SetSensitiveRequestHeadersLayer;
|
||||
|
||||
use self::errors::Web3ProxyResult;
|
||||
|
||||
/// simple keys for caching responses
|
||||
#[derive(Clone, Hash, PartialEq, Eq)]
|
||||
pub enum FrontendResponseCaches {
|
||||
#[derive(Copy, Clone, Hash, PartialEq, Eq, EnumCount, EnumIter)]
|
||||
pub enum ResponseCacheKey {
|
||||
BackupsNeeded,
|
||||
Health,
|
||||
Status,
|
||||
}
|
||||
|
||||
pub type FrontendJsonResponseCache =
|
||||
Cache<FrontendResponseCaches, Arc<serde_json::Value>, hashbrown::hash_map::DefaultHashBuilder>;
|
||||
pub type FrontendHealthCache = Cache<(), bool, hashbrown::hash_map::DefaultHashBuilder>;
|
||||
pub type ResponseCache = quick_cache_ttl::CacheWithTTL<
|
||||
ResponseCacheKey,
|
||||
(StatusCode, &'static str, axum::body::Bytes),
|
||||
UnitWeighter,
|
||||
quick_cache_ttl::DefaultHashBuilder,
|
||||
>;
|
||||
|
||||
/// Start the frontend server.
|
||||
pub async fn serve(
|
||||
@ -42,17 +51,14 @@ pub async fn serve(
|
||||
proxy_app: Arc<Web3ProxyApp>,
|
||||
mut shutdown_receiver: broadcast::Receiver<()>,
|
||||
shutdown_complete_sender: broadcast::Sender<()>,
|
||||
) -> anyhow::Result<()> {
|
||||
) -> Web3ProxyResult<()> {
|
||||
// setup caches for whatever the frontend needs
|
||||
// no need for max items since it is limited by the enum key
|
||||
let json_response_cache: FrontendJsonResponseCache = Cache::builder()
|
||||
.time_to_live(Duration::from_secs(2))
|
||||
.build_with_hasher(hashbrown::hash_map::DefaultHashBuilder::default());
|
||||
// TODO: latest moka allows for different ttls for different
|
||||
let response_cache_size = ResponseCacheKey::COUNT;
|
||||
|
||||
// /health gets a cache with a shorter lifetime
|
||||
let health_cache: FrontendHealthCache = Cache::builder()
|
||||
.time_to_live(Duration::from_millis(100))
|
||||
.build_with_hasher(hashbrown::hash_map::DefaultHashBuilder::default());
|
||||
let response_cache =
|
||||
ResponseCache::new_with_capacity(response_cache_size, Duration::from_secs(1)).await;
|
||||
|
||||
// TODO: read config for if fastest/versus should be available publicly. default off
|
||||
|
||||
@ -62,102 +68,77 @@ pub async fn serve(
|
||||
//
|
||||
// HTTP RPC (POST)
|
||||
//
|
||||
// Websocket RPC (GET)
|
||||
// If not an RPC, GET will redirect to urls in the config
|
||||
//
|
||||
// public
|
||||
.route("/", post(rpc_proxy_http::proxy_web3_rpc))
|
||||
.route(
|
||||
"/",
|
||||
post(rpc_proxy_http::proxy_web3_rpc).get(rpc_proxy_ws::websocket_handler),
|
||||
)
|
||||
// authenticated with and without trailing slash
|
||||
.route(
|
||||
"/rpc/:rpc_key/",
|
||||
post(rpc_proxy_http::proxy_web3_rpc_with_key),
|
||||
post(rpc_proxy_http::proxy_web3_rpc_with_key)
|
||||
.get(rpc_proxy_ws::websocket_handler_with_key),
|
||||
)
|
||||
.route(
|
||||
"/rpc/:rpc_key",
|
||||
post(rpc_proxy_http::proxy_web3_rpc_with_key),
|
||||
post(rpc_proxy_http::proxy_web3_rpc_with_key)
|
||||
.get(rpc_proxy_ws::websocket_handler_with_key),
|
||||
)
|
||||
// authenticated debug route with and without trailing slash
|
||||
.route(
|
||||
"/debug/:rpc_key/",
|
||||
post(rpc_proxy_http::debug_proxy_web3_rpc_with_key),
|
||||
post(rpc_proxy_http::debug_proxy_web3_rpc_with_key)
|
||||
.get(rpc_proxy_ws::debug_websocket_handler_with_key),
|
||||
)
|
||||
.route(
|
||||
"/debug/:rpc_key",
|
||||
post(rpc_proxy_http::debug_proxy_web3_rpc_with_key),
|
||||
post(rpc_proxy_http::debug_proxy_web3_rpc_with_key)
|
||||
.get(rpc_proxy_ws::debug_websocket_handler_with_key),
|
||||
)
|
||||
// public fastest with and without trailing slash
|
||||
.route("/fastest/", post(rpc_proxy_http::fastest_proxy_web3_rpc))
|
||||
.route("/fastest", post(rpc_proxy_http::fastest_proxy_web3_rpc))
|
||||
.route(
|
||||
"/fastest/",
|
||||
post(rpc_proxy_http::fastest_proxy_web3_rpc)
|
||||
.get(rpc_proxy_ws::fastest_websocket_handler),
|
||||
)
|
||||
.route(
|
||||
"/fastest",
|
||||
post(rpc_proxy_http::fastest_proxy_web3_rpc)
|
||||
.get(rpc_proxy_ws::fastest_websocket_handler),
|
||||
)
|
||||
// authenticated fastest with and without trailing slash
|
||||
.route(
|
||||
"/fastest/:rpc_key/",
|
||||
post(rpc_proxy_http::fastest_proxy_web3_rpc_with_key),
|
||||
post(rpc_proxy_http::fastest_proxy_web3_rpc_with_key)
|
||||
.get(rpc_proxy_ws::fastest_websocket_handler_with_key),
|
||||
)
|
||||
.route(
|
||||
"/fastest/:rpc_key",
|
||||
post(rpc_proxy_http::fastest_proxy_web3_rpc_with_key),
|
||||
)
|
||||
// public versus
|
||||
.route("/versus/", post(rpc_proxy_http::versus_proxy_web3_rpc))
|
||||
.route("/versus", post(rpc_proxy_http::versus_proxy_web3_rpc))
|
||||
// authenticated versus with and without trailing slash
|
||||
.route(
|
||||
"/versus/:rpc_key/",
|
||||
post(rpc_proxy_http::versus_proxy_web3_rpc_with_key),
|
||||
)
|
||||
.route(
|
||||
"/versus/:rpc_key",
|
||||
post(rpc_proxy_http::versus_proxy_web3_rpc_with_key),
|
||||
)
|
||||
//
|
||||
// Websocket RPC (GET)
|
||||
// If not an RPC, this will redirect to configurable urls
|
||||
//
|
||||
// public
|
||||
.route("/", get(rpc_proxy_ws::websocket_handler))
|
||||
// authenticated with and without trailing slash
|
||||
.route(
|
||||
"/rpc/:rpc_key/",
|
||||
get(rpc_proxy_ws::websocket_handler_with_key),
|
||||
)
|
||||
.route(
|
||||
"/rpc/:rpc_key",
|
||||
get(rpc_proxy_ws::websocket_handler_with_key),
|
||||
)
|
||||
// debug with and without trailing slash
|
||||
.route(
|
||||
"/debug/:rpc_key/",
|
||||
get(rpc_proxy_ws::websocket_handler_with_key),
|
||||
)
|
||||
.route(
|
||||
"/debug/:rpc_key",
|
||||
get(rpc_proxy_ws::websocket_handler_with_key),
|
||||
) // public fastest with and without trailing slash
|
||||
.route("/fastest/", get(rpc_proxy_ws::fastest_websocket_handler))
|
||||
.route("/fastest", get(rpc_proxy_ws::fastest_websocket_handler))
|
||||
// authenticated fastest with and without trailing slash
|
||||
.route(
|
||||
"/fastest/:rpc_key/",
|
||||
get(rpc_proxy_ws::fastest_websocket_handler_with_key),
|
||||
)
|
||||
.route(
|
||||
"/fastest/:rpc_key",
|
||||
get(rpc_proxy_ws::fastest_websocket_handler_with_key),
|
||||
post(rpc_proxy_http::fastest_proxy_web3_rpc_with_key)
|
||||
.get(rpc_proxy_ws::fastest_websocket_handler_with_key),
|
||||
)
|
||||
// public versus
|
||||
.route(
|
||||
"/versus/",
|
||||
get(rpc_proxy_ws::versus_websocket_handler_with_key),
|
||||
post(rpc_proxy_http::versus_proxy_web3_rpc).get(rpc_proxy_ws::versus_websocket_handler),
|
||||
)
|
||||
.route(
|
||||
"/versus",
|
||||
get(rpc_proxy_ws::versus_websocket_handler_with_key),
|
||||
post(rpc_proxy_http::versus_proxy_web3_rpc).get(rpc_proxy_ws::versus_websocket_handler),
|
||||
)
|
||||
// authenticated versus with and without trailing slash
|
||||
.route(
|
||||
"/versus/:rpc_key/",
|
||||
get(rpc_proxy_ws::versus_websocket_handler_with_key),
|
||||
post(rpc_proxy_http::versus_proxy_web3_rpc_with_key)
|
||||
.get(rpc_proxy_ws::versus_websocket_handler_with_key),
|
||||
)
|
||||
.route(
|
||||
"/versus/:rpc_key",
|
||||
get(rpc_proxy_ws::versus_websocket_handler_with_key),
|
||||
post(rpc_proxy_http::versus_proxy_web3_rpc_with_key)
|
||||
.get(rpc_proxy_ws::versus_websocket_handler_with_key),
|
||||
)
|
||||
//
|
||||
// System things
|
||||
@ -168,30 +149,62 @@ pub async fn serve(
|
||||
//
|
||||
// User stuff
|
||||
//
|
||||
.route("/user/login/:user_address", get(users::user_login_get))
|
||||
.route(
|
||||
"/user/login/:user_address",
|
||||
get(users::authentication::user_login_get),
|
||||
)
|
||||
.route(
|
||||
"/user/login/:user_address/:message_eip",
|
||||
get(users::user_login_get),
|
||||
get(users::authentication::user_login_get),
|
||||
)
|
||||
.route("/user/login", post(users::authentication::user_login_post))
|
||||
.route(
|
||||
// /:rpc_key/:subuser_address/:new_status/:new_role
|
||||
"/user/subuser",
|
||||
get(users::subuser::modify_subuser),
|
||||
)
|
||||
.route("/user/subusers", get(users::subuser::get_subusers))
|
||||
.route(
|
||||
"/subuser/rpc_keys",
|
||||
get(users::subuser::get_keys_as_subuser),
|
||||
)
|
||||
.route("/user/login", post(users::user_login_post))
|
||||
.route("/user", get(users::user_get))
|
||||
.route("/user", post(users::user_post))
|
||||
.route("/user/balance", get(users::user_balance_get))
|
||||
.route("/user/balance/:txid", post(users::user_balance_post))
|
||||
.route("/user/keys", get(users::rpc_keys_get))
|
||||
.route("/user/keys", post(users::rpc_keys_management))
|
||||
.route("/user/keys", put(users::rpc_keys_management))
|
||||
.route("/user/revert_logs", get(users::user_revert_logs_get))
|
||||
.route("/user/balance", get(users::payment::user_balance_get))
|
||||
.route("/user/deposits", get(users::payment::user_deposits_get))
|
||||
.route(
|
||||
"/user/balance/:tx_hash",
|
||||
get(users::payment::user_balance_post),
|
||||
)
|
||||
.route("/user/keys", get(users::rpc_keys::rpc_keys_get))
|
||||
.route("/user/keys", post(users::rpc_keys::rpc_keys_management))
|
||||
.route("/user/keys", put(users::rpc_keys::rpc_keys_management))
|
||||
// .route("/user/referral/:referral_link", get(users::user_referral_link_get))
|
||||
.route(
|
||||
"/user/referral",
|
||||
get(users::referral::user_referral_link_get),
|
||||
)
|
||||
.route("/user/revert_logs", get(users::stats::user_revert_logs_get))
|
||||
.route(
|
||||
"/user/stats/aggregate",
|
||||
get(users::user_stats_aggregated_get),
|
||||
get(users::stats::user_stats_aggregated_get),
|
||||
)
|
||||
.route(
|
||||
"/user/stats/aggregated",
|
||||
get(users::user_stats_aggregated_get),
|
||||
get(users::stats::user_stats_aggregated_get),
|
||||
)
|
||||
.route(
|
||||
"/user/stats/detailed",
|
||||
get(users::stats::user_stats_detailed_get),
|
||||
)
|
||||
.route(
|
||||
"/user/logout",
|
||||
post(users::authentication::user_logout_post),
|
||||
)
|
||||
.route(
|
||||
"/admin/increase_balance",
|
||||
get(admin::admin_increase_balance),
|
||||
)
|
||||
.route("/user/stats/detailed", get(users::user_stats_detailed_get))
|
||||
.route("/user/logout", post(users::user_logout_post))
|
||||
.route("/admin/modify_role", get(admin::admin_change_user_roles))
|
||||
.route(
|
||||
"/admin/imitate-login/:admin_address/:user_address",
|
||||
@ -213,19 +226,28 @@ pub async fn serve(
|
||||
// handle cors
|
||||
.layer(CorsLayer::very_permissive())
|
||||
// application state
|
||||
.layer(Extension(proxy_app.clone()))
|
||||
.layer(Extension(proxy_app))
|
||||
// frontend caches
|
||||
.layer(Extension(json_response_cache))
|
||||
.layer(Extension(health_cache))
|
||||
.layer(Extension(Arc::new(response_cache)))
|
||||
// 404 for any unknown routes
|
||||
.fallback(errors::handler_404);
|
||||
|
||||
// run our app with hyper
|
||||
// TODO: allow only listening on localhost? top_config.app.host.parse()?
|
||||
let addr = SocketAddr::from(([0, 0, 0, 0], port));
|
||||
info!("listening on port {}", port);
|
||||
let server_builder = if let Some(listener) = ListenFd::from_env().take_tcp_listener(0)? {
|
||||
// use systemd socket magic for no downtime deploys
|
||||
let addr = listener.local_addr()?;
|
||||
|
||||
// TODO: into_make_service is enough if we always run behind a proxy. make into_make_service_with_connect_info optional?
|
||||
info!("listening with fd at {}", addr);
|
||||
|
||||
axum::Server::from_tcp(listener)?
|
||||
} else {
|
||||
info!("listening on port {}", port);
|
||||
// TODO: allow only listening on localhost? top_config.app.host.parse()?
|
||||
let addr = SocketAddr::from(([0, 0, 0, 0], port));
|
||||
|
||||
axum::Server::try_bind(&addr)?
|
||||
};
|
||||
|
||||
// into_make_service is enough if we always run behind a proxy
|
||||
/*
|
||||
It sequentially looks for an IP in:
|
||||
- x-forwarded-for header (de-facto standard)
|
||||
@ -233,12 +255,21 @@ pub async fn serve(
|
||||
- forwarded header (new standard)
|
||||
- axum::extract::ConnectInfo (if not behind proxy)
|
||||
*/
|
||||
let service = app.into_make_service_with_connect_info::<SocketAddr>();
|
||||
#[cfg(feature = "connectinfo")]
|
||||
let make_service = {
|
||||
info!("connectinfo feature enabled");
|
||||
app.into_make_service_with_connect_info::<SocketAddr>()
|
||||
};
|
||||
|
||||
// `axum::Server` is a re-export of `hyper::Server`
|
||||
let server = axum::Server::bind(&addr)
|
||||
#[cfg(not(feature = "connectinfo"))]
|
||||
let make_service = {
|
||||
info!("connectinfo feature disabled");
|
||||
app.into_make_service()
|
||||
};
|
||||
|
||||
let server = server_builder
|
||||
.serve(make_service)
|
||||
// TODO: option to use with_connect_info. we want it in dev, but not when running behind a proxy, but not
|
||||
.serve(service)
|
||||
.with_graceful_shutdown(async move {
|
||||
let _ = shutdown_receiver.recv().await;
|
||||
})
|
||||
|
@ -63,12 +63,14 @@ async fn _proxy_web3_rpc(
|
||||
|
||||
let authorization = Arc::new(authorization);
|
||||
|
||||
let (response, rpcs, _semaphore) = app
|
||||
// TODO: calculate payload bytes here (before turning into serde_json::Value). that will save serializing later
|
||||
|
||||
let (status_code, response, rpcs, _semaphore) = app
|
||||
.proxy_web3_rpc(authorization, payload)
|
||||
.await
|
||||
.map(|(x, y)| (x, y, semaphore))?;
|
||||
.map(|(s, x, y)| (s, x, y, semaphore))?;
|
||||
|
||||
let mut response = Json(&response).into_response();
|
||||
let mut response = (status_code, Json(response)).into_response();
|
||||
|
||||
let headers = response.headers_mut();
|
||||
|
||||
@ -129,6 +131,8 @@ pub async fn proxy_web3_rpc_with_key(
|
||||
.await
|
||||
}
|
||||
|
||||
// TODO: if a /debug/ request gets rejected by an invalid request, there won't be any kafka log
|
||||
// TODO:
|
||||
#[debug_handler]
|
||||
pub async fn debug_proxy_web3_rpc_with_key(
|
||||
Extension(app): Extension<Arc<Web3ProxyApp>>,
|
||||
@ -228,12 +232,12 @@ async fn _proxy_web3_rpc_with_key(
|
||||
|
||||
let rpc_secret_key_id = authorization.checks.rpc_secret_key_id;
|
||||
|
||||
let (response, rpcs, _semaphore) = app
|
||||
let (status_code, response, rpcs, _semaphore) = app
|
||||
.proxy_web3_rpc(authorization, payload)
|
||||
.await
|
||||
.map(|(x, y)| (x, y, semaphore))?;
|
||||
.map(|(s, x, y)| (s, x, y, semaphore))?;
|
||||
|
||||
let mut response = Json(&response).into_response();
|
||||
let mut response = (status_code, Json(response)).into_response();
|
||||
|
||||
let headers = response.headers_mut();
|
||||
|
||||
|
@ -5,12 +5,12 @@
|
||||
use super::authorization::{ip_is_authorized, key_is_authorized, Authorization, RequestMetadata};
|
||||
use super::errors::{Web3ProxyError, Web3ProxyResponse};
|
||||
use crate::jsonrpc::JsonRpcId;
|
||||
use crate::stats::RpcQueryStats;
|
||||
use crate::{
|
||||
app::Web3ProxyApp,
|
||||
frontend::errors::Web3ProxyResult,
|
||||
jsonrpc::{JsonRpcForwardedResponse, JsonRpcForwardedResponseEnum, JsonRpcRequest},
|
||||
};
|
||||
use anyhow::Context;
|
||||
use axum::headers::{Origin, Referer, UserAgent};
|
||||
use axum::{
|
||||
extract::ws::{Message, WebSocket, WebSocketUpgrade},
|
||||
@ -20,6 +20,8 @@ use axum::{
|
||||
};
|
||||
use axum_client_ip::InsecureClientIp;
|
||||
use axum_macros::debug_handler;
|
||||
use ethers::types::U64;
|
||||
use fstrings::{f, format_args_f};
|
||||
use futures::SinkExt;
|
||||
use futures::{
|
||||
future::AbortHandle,
|
||||
@ -28,12 +30,13 @@ use futures::{
|
||||
use handlebars::Handlebars;
|
||||
use hashbrown::HashMap;
|
||||
use http::StatusCode;
|
||||
use log::{info, trace, warn};
|
||||
use log::{info, trace};
|
||||
use serde_json::json;
|
||||
use std::sync::Arc;
|
||||
use std::{str::from_utf8_mut, sync::atomic::AtomicUsize};
|
||||
use tokio::sync::{broadcast, OwnedSemaphorePermit, RwLock};
|
||||
|
||||
/// How to select backend servers for a request
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub enum ProxyMode {
|
||||
/// send to the "best" synced server
|
||||
@ -43,6 +46,7 @@ pub enum ProxyMode {
|
||||
/// send to all servers for benchmarking. return the fastest non-error response
|
||||
Versus,
|
||||
/// send all requests and responses to kafka
|
||||
/// TODO: should this be seperate from best/fastest/versus?
|
||||
Debug,
|
||||
}
|
||||
|
||||
@ -314,110 +318,121 @@ async fn proxy_web3_socket(
|
||||
}
|
||||
|
||||
/// websockets support a few more methods than http clients
|
||||
/// TODO: i think this subscriptions hashmap grows unbounded
|
||||
async fn handle_socket_payload(
|
||||
app: Arc<Web3ProxyApp>,
|
||||
authorization: &Arc<Authorization>,
|
||||
payload: &str,
|
||||
response_sender: &flume::Sender<Message>,
|
||||
subscription_count: &AtomicUsize,
|
||||
subscriptions: Arc<RwLock<HashMap<String, AbortHandle>>>,
|
||||
) -> (Message, Option<OwnedSemaphorePermit>) {
|
||||
subscriptions: Arc<RwLock<HashMap<U64, AbortHandle>>>,
|
||||
) -> Web3ProxyResult<(Message, Option<OwnedSemaphorePermit>)> {
|
||||
let (authorization, semaphore) = match authorization.check_again(&app).await {
|
||||
Ok((a, s)) => (a, s),
|
||||
Err(err) => {
|
||||
let (_, err) = err.into_response_parts();
|
||||
|
||||
let err = serde_json::to_string(&err).expect("to_string should always work here");
|
||||
let err = JsonRpcForwardedResponse::from_response_data(err, Default::default());
|
||||
|
||||
return (Message::Text(err), None);
|
||||
let err = serde_json::to_string(&err)?;
|
||||
|
||||
return Ok((Message::Text(err), None));
|
||||
}
|
||||
};
|
||||
|
||||
// TODO: do any clients send batches over websockets?
|
||||
let (id, response) = match serde_json::from_str::<JsonRpcRequest>(payload) {
|
||||
// TODO: change response into response_data
|
||||
let (response_id, response) = match serde_json::from_str::<JsonRpcRequest>(payload) {
|
||||
Ok(json_request) => {
|
||||
let id = json_request.id.clone();
|
||||
let response_id = json_request.id.clone();
|
||||
|
||||
let response: Web3ProxyResult<JsonRpcForwardedResponseEnum> = match &json_request.method
|
||||
[..]
|
||||
{
|
||||
"eth_subscribe" => {
|
||||
// TODO: how can we subscribe with proxy_mode?
|
||||
match app
|
||||
.eth_subscribe(
|
||||
authorization.clone(),
|
||||
json_request,
|
||||
subscription_count,
|
||||
response_sender.clone(),
|
||||
)
|
||||
// TODO: move this to a seperate function so we can use the try operator
|
||||
let response: Web3ProxyResult<JsonRpcForwardedResponseEnum> =
|
||||
match &json_request.method[..] {
|
||||
"eth_subscribe" => {
|
||||
// TODO: how can we subscribe with proxy_mode?
|
||||
match app
|
||||
.eth_subscribe(
|
||||
authorization.clone(),
|
||||
json_request,
|
||||
subscription_count,
|
||||
response_sender.clone(),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok((handle, response)) => {
|
||||
{
|
||||
let mut x = subscriptions.write().await;
|
||||
|
||||
let result: &serde_json::value::RawValue = response
|
||||
.result
|
||||
.as_ref()
|
||||
.context("there should be a result here")?;
|
||||
|
||||
// TODO: there must be a better way to turn a RawValue
|
||||
let k: U64 = serde_json::from_str(result.get())
|
||||
.context("subscription ids must be U64s")?;
|
||||
|
||||
x.insert(k, handle);
|
||||
};
|
||||
|
||||
Ok(response.into())
|
||||
}
|
||||
Err(err) => Err(err),
|
||||
}
|
||||
}
|
||||
"eth_unsubscribe" => {
|
||||
let request_metadata =
|
||||
RequestMetadata::new(&app, authorization.clone(), &json_request, None)
|
||||
.await;
|
||||
|
||||
#[derive(serde::Deserialize)]
|
||||
struct EthUnsubscribeParams([U64; 1]);
|
||||
|
||||
if let Some(params) = json_request.params {
|
||||
match serde_json::from_value(params) {
|
||||
Ok::<EthUnsubscribeParams, _>(params) => {
|
||||
let subscription_id = ¶ms.0[0];
|
||||
|
||||
// TODO: is this the right response?
|
||||
let partial_response = {
|
||||
let mut x = subscriptions.write().await;
|
||||
match x.remove(subscription_id) {
|
||||
None => false,
|
||||
Some(handle) => {
|
||||
handle.abort();
|
||||
true
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// TODO: don't create the response here. use a JsonRpcResponseData instead
|
||||
let response = JsonRpcForwardedResponse::from_value(
|
||||
json!(partial_response),
|
||||
response_id.clone(),
|
||||
);
|
||||
|
||||
request_metadata.add_response(&response);
|
||||
|
||||
Ok(response.into())
|
||||
}
|
||||
Err(err) => Err(Web3ProxyError::BadRequest(f!(
|
||||
"incorrect params given for eth_unsubscribe. {err:?}"
|
||||
))),
|
||||
}
|
||||
} else {
|
||||
Err(Web3ProxyError::BadRequest(
|
||||
"no params given for eth_unsubscribe".to_string(),
|
||||
))
|
||||
}
|
||||
}
|
||||
_ => app
|
||||
.proxy_web3_rpc(authorization.clone(), json_request.into())
|
||||
.await
|
||||
{
|
||||
Ok((handle, response)) => {
|
||||
// TODO: better key
|
||||
let mut x = subscriptions.write().await;
|
||||
.map(|(status_code, response, _)| response),
|
||||
};
|
||||
|
||||
x.insert(
|
||||
response
|
||||
.result
|
||||
.as_ref()
|
||||
// TODO: what if there is an error?
|
||||
.expect("response should always have a result, not an error")
|
||||
.to_string(),
|
||||
handle,
|
||||
);
|
||||
|
||||
Ok(response.into())
|
||||
}
|
||||
Err(err) => Err(err),
|
||||
}
|
||||
}
|
||||
"eth_unsubscribe" => {
|
||||
// TODO: move this logic into the app?
|
||||
let request_bytes = json_request.num_bytes();
|
||||
|
||||
let request_metadata = Arc::new(RequestMetadata::new(request_bytes));
|
||||
|
||||
let subscription_id = json_request.params.unwrap().to_string();
|
||||
|
||||
let mut x = subscriptions.write().await;
|
||||
|
||||
// TODO: is this the right response?
|
||||
let partial_response = match x.remove(&subscription_id) {
|
||||
None => false,
|
||||
Some(handle) => {
|
||||
handle.abort();
|
||||
true
|
||||
}
|
||||
};
|
||||
|
||||
drop(x);
|
||||
|
||||
let response =
|
||||
JsonRpcForwardedResponse::from_value(json!(partial_response), id.clone());
|
||||
|
||||
if let Some(stat_sender) = app.stat_sender.as_ref() {
|
||||
let response_stat = RpcQueryStats::new(
|
||||
Some(json_request.method.clone()),
|
||||
authorization.clone(),
|
||||
request_metadata,
|
||||
response.num_bytes(),
|
||||
);
|
||||
|
||||
if let Err(err) = stat_sender.send_async(response_stat.into()).await {
|
||||
// TODO: what should we do?
|
||||
warn!("stat_sender failed during eth_unsubscribe: {:?}", err);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(response.into())
|
||||
}
|
||||
_ => app
|
||||
.proxy_web3_rpc(authorization.clone(), json_request.into())
|
||||
.await
|
||||
.map(|(response, _)| response),
|
||||
};
|
||||
|
||||
(id, response)
|
||||
(response_id, response)
|
||||
}
|
||||
Err(err) => {
|
||||
let id = JsonRpcId::None.to_raw_value();
|
||||
@ -428,13 +443,15 @@ async fn handle_socket_payload(
|
||||
let response_str = match response {
|
||||
Ok(x) => serde_json::to_string(&x).expect("to_string should always work here"),
|
||||
Err(err) => {
|
||||
let (_, mut response) = err.into_response_parts();
|
||||
response.id = id;
|
||||
let (_, response_data) = err.into_response_parts();
|
||||
|
||||
let response = JsonRpcForwardedResponse::from_response_data(response_data, response_id);
|
||||
|
||||
serde_json::to_string(&response).expect("to_string should always work here")
|
||||
}
|
||||
};
|
||||
|
||||
(Message::Text(response_str), semaphore)
|
||||
Ok((Message::Text(response_str), semaphore))
|
||||
}
|
||||
|
||||
async fn read_web3_socket(
|
||||
@ -443,7 +460,7 @@ async fn read_web3_socket(
|
||||
mut ws_rx: SplitStream<WebSocket>,
|
||||
response_sender: flume::Sender<Message>,
|
||||
) {
|
||||
// TODO: need a concurrent hashmap
|
||||
// RwLock should be fine here. a user isn't going to be opening tons of subscriptions
|
||||
let subscriptions = Arc::new(RwLock::new(HashMap::new()));
|
||||
let subscription_count = Arc::new(AtomicUsize::new(1));
|
||||
|
||||
@ -467,16 +484,17 @@ async fn read_web3_socket(
|
||||
|
||||
// new message from our client. forward to a backend and then send it through response_tx
|
||||
let response_msg = match msg {
|
||||
Message::Text(payload) => {
|
||||
Message::Text(ref payload) => {
|
||||
// TODO: do not unwrap!
|
||||
let (msg, s) = handle_socket_payload(
|
||||
app.clone(),
|
||||
&authorization,
|
||||
&payload,
|
||||
payload,
|
||||
&response_sender,
|
||||
&subscription_count,
|
||||
subscriptions,
|
||||
)
|
||||
.await;
|
||||
.await.unwrap();
|
||||
|
||||
_semaphore = s;
|
||||
|
||||
@ -499,6 +517,7 @@ async fn read_web3_socket(
|
||||
Message::Binary(mut payload) => {
|
||||
let payload = from_utf8_mut(&mut payload).unwrap();
|
||||
|
||||
// TODO: do not unwrap!
|
||||
let (msg, s) = handle_socket_payload(
|
||||
app.clone(),
|
||||
&authorization,
|
||||
@ -507,7 +526,7 @@ async fn read_web3_socket(
|
||||
&subscription_count,
|
||||
subscriptions,
|
||||
)
|
||||
.await;
|
||||
.await.unwrap();
|
||||
|
||||
_semaphore = s;
|
||||
|
||||
|
@ -3,36 +3,92 @@
|
||||
//! For ease of development, users can currently access these endponts.
|
||||
//! They will eventually move to another port.
|
||||
|
||||
use super::{FrontendHealthCache, FrontendJsonResponseCache, FrontendResponseCaches};
|
||||
use super::{ResponseCache, ResponseCacheKey};
|
||||
use crate::app::{Web3ProxyApp, APP_USER_AGENT};
|
||||
use axum::{http::StatusCode, response::IntoResponse, Extension, Json};
|
||||
use axum::{
|
||||
body::{Bytes, Full},
|
||||
http::StatusCode,
|
||||
response::{IntoResponse, Response},
|
||||
Extension,
|
||||
};
|
||||
use axum_macros::debug_handler;
|
||||
use once_cell::sync::Lazy;
|
||||
use serde_json::json;
|
||||
use std::sync::Arc;
|
||||
use std::{convert::Infallible, sync::Arc};
|
||||
|
||||
static HEALTH_OK: Lazy<Bytes> = Lazy::new(|| Bytes::from("OK\n"));
|
||||
static HEALTH_NOT_OK: Lazy<Bytes> = Lazy::new(|| Bytes::from(":(\n"));
|
||||
|
||||
static BACKUPS_NEEDED_TRUE: Lazy<Bytes> = Lazy::new(|| Bytes::from("true\n"));
|
||||
static BACKUPS_NEEDED_FALSE: Lazy<Bytes> = Lazy::new(|| Bytes::from("false\n"));
|
||||
|
||||
static CONTENT_TYPE_JSON: &str = "application/json";
|
||||
static CONTENT_TYPE_PLAIN: &str = "text/plain";
|
||||
|
||||
/// Health check page for load balancers to use.
|
||||
#[debug_handler]
|
||||
pub async fn health(
|
||||
Extension(app): Extension<Arc<Web3ProxyApp>>,
|
||||
Extension(health_cache): Extension<FrontendHealthCache>,
|
||||
Extension(cache): Extension<Arc<ResponseCache>>,
|
||||
) -> impl IntoResponse {
|
||||
let synced = health_cache
|
||||
.get_with((), async { app.balanced_rpcs.synced() })
|
||||
.await;
|
||||
let (code, content_type, body) = cache
|
||||
.get_or_insert_async::<Infallible, _>(&ResponseCacheKey::Health, async move {
|
||||
Ok(_health(app).await)
|
||||
})
|
||||
.await
|
||||
.expect("this cache get is infallible");
|
||||
|
||||
if synced {
|
||||
(StatusCode::OK, "OK")
|
||||
Response::builder()
|
||||
.status(code)
|
||||
.header("content-type", content_type)
|
||||
.body(Full::from(body))
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
// TODO: _health doesn't need to be async, but _quick_cache_ttl needs an async function
|
||||
#[inline]
|
||||
async fn _health(app: Arc<Web3ProxyApp>) -> (StatusCode, &'static str, Bytes) {
|
||||
if app.balanced_rpcs.synced() {
|
||||
(StatusCode::OK, CONTENT_TYPE_PLAIN, HEALTH_OK.clone())
|
||||
} else {
|
||||
(StatusCode::SERVICE_UNAVAILABLE, ":(")
|
||||
(
|
||||
StatusCode::SERVICE_UNAVAILABLE,
|
||||
CONTENT_TYPE_PLAIN,
|
||||
HEALTH_NOT_OK.clone(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// Easy alerting if backup servers are in use.
|
||||
pub async fn backups_needed(Extension(app): Extension<Arc<Web3ProxyApp>>) -> impl IntoResponse {
|
||||
let code = {
|
||||
let consensus_rpcs = app.balanced_rpcs.watch_consensus_rpcs_sender.borrow();
|
||||
#[debug_handler]
|
||||
pub async fn backups_needed(
|
||||
Extension(app): Extension<Arc<Web3ProxyApp>>,
|
||||
Extension(cache): Extension<Arc<ResponseCache>>,
|
||||
) -> impl IntoResponse {
|
||||
let (code, content_type, body) = cache
|
||||
.get_or_insert_async::<Infallible, _>(&ResponseCacheKey::BackupsNeeded, async move {
|
||||
Ok(_backups_needed(app).await)
|
||||
})
|
||||
.await
|
||||
.expect("this cache get is infallible");
|
||||
|
||||
if let Some(consensus_rpcs) = consensus_rpcs.as_ref() {
|
||||
Response::builder()
|
||||
.status(code)
|
||||
.header("content-type", content_type)
|
||||
.body(Full::from(body))
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
async fn _backups_needed(app: Arc<Web3ProxyApp>) -> (StatusCode, &'static str, Bytes) {
|
||||
let code = {
|
||||
let consensus_rpcs = app
|
||||
.balanced_rpcs
|
||||
.watch_consensus_rpcs_sender
|
||||
.borrow()
|
||||
.clone();
|
||||
|
||||
if let Some(ref consensus_rpcs) = consensus_rpcs {
|
||||
if consensus_rpcs.backups_needed {
|
||||
StatusCode::INTERNAL_SERVER_ERROR
|
||||
} else {
|
||||
@ -45,35 +101,57 @@ pub async fn backups_needed(Extension(app): Extension<Arc<Web3ProxyApp>>) -> imp
|
||||
};
|
||||
|
||||
if matches!(code, StatusCode::OK) {
|
||||
(code, "no backups needed. :)")
|
||||
(code, CONTENT_TYPE_PLAIN, BACKUPS_NEEDED_FALSE.clone())
|
||||
} else {
|
||||
(code, "backups needed! :(")
|
||||
(code, CONTENT_TYPE_PLAIN, BACKUPS_NEEDED_TRUE.clone())
|
||||
}
|
||||
}
|
||||
|
||||
/// Very basic status page.
|
||||
///
|
||||
/// TODO: replace this with proper stats and monitoring
|
||||
/// TODO: replace this with proper stats and monitoring. frontend uses it for their public dashboards though
|
||||
#[debug_handler]
|
||||
pub async fn status(
|
||||
Extension(app): Extension<Arc<Web3ProxyApp>>,
|
||||
Extension(response_cache): Extension<FrontendJsonResponseCache>,
|
||||
Extension(cache): Extension<Arc<ResponseCache>>,
|
||||
) -> impl IntoResponse {
|
||||
let body = response_cache
|
||||
.get_with(FrontendResponseCaches::Status, async {
|
||||
// TODO: what else should we include? uptime, cache hit rates, cpu load, memory used
|
||||
// TODO: the hostname is probably not going to change. only get once at the start?
|
||||
let body = json!({
|
||||
"version": APP_USER_AGENT,
|
||||
"chain_id": app.config.chain_id,
|
||||
"balanced_rpcs": app.balanced_rpcs,
|
||||
"private_rpcs": app.private_rpcs,
|
||||
"hostname": app.hostname,
|
||||
});
|
||||
|
||||
Arc::new(body)
|
||||
let (code, content_type, body) = cache
|
||||
.get_or_insert_async::<Infallible, _>(&ResponseCacheKey::Status, async move {
|
||||
Ok(_status(app).await)
|
||||
})
|
||||
.await;
|
||||
.await
|
||||
.expect("this cache get is infallible");
|
||||
|
||||
Json(body)
|
||||
Response::builder()
|
||||
.status(code)
|
||||
.header("content-type", content_type)
|
||||
.body(Full::from(body))
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
// TODO: _status doesn't need to be async, but _quick_cache_ttl needs an async function
|
||||
#[inline]
|
||||
async fn _status(app: Arc<Web3ProxyApp>) -> (StatusCode, &'static str, Bytes) {
|
||||
// TODO: what else should we include? uptime, cache hit rates, cpu load, memory used
|
||||
// TODO: the hostname is probably not going to change. only get once at the start?
|
||||
let body = json!({
|
||||
"version": APP_USER_AGENT,
|
||||
"chain_id": app.config.chain_id,
|
||||
"balanced_rpcs": app.balanced_rpcs,
|
||||
"private_rpcs": app.private_rpcs,
|
||||
"bundler_4337_rpcs": app.bundler_4337_rpcs,
|
||||
"hostname": app.hostname,
|
||||
});
|
||||
|
||||
let body = body.to_string().into_bytes();
|
||||
|
||||
let body = Bytes::from(body);
|
||||
|
||||
let code = if app.balanced_rpcs.synced() {
|
||||
StatusCode::OK
|
||||
} else {
|
||||
StatusCode::INTERNAL_SERVER_ERROR
|
||||
};
|
||||
|
||||
(code, CONTENT_TYPE_JSON, body)
|
||||
}
|
||||
|
@ -1,838 +0,0 @@
|
||||
//! Handle registration, logins, and managing account data.
|
||||
use super::authorization::{login_is_authorized, RpcSecretKey};
|
||||
use super::errors::{Web3ProxyError, Web3ProxyErrorContext, Web3ProxyResponse};
|
||||
use crate::app::Web3ProxyApp;
|
||||
use crate::http_params::{
|
||||
get_chain_id_from_params, get_page_from_params, get_query_start_from_params,
|
||||
};
|
||||
use crate::stats::influxdb_queries::query_user_stats;
|
||||
use crate::stats::StatType;
|
||||
use crate::user_token::UserBearerToken;
|
||||
use crate::{PostLogin, PostLoginQuery};
|
||||
use axum::headers::{Header, Origin, Referer, UserAgent};
|
||||
use axum::{
|
||||
extract::{Path, Query},
|
||||
headers::{authorization::Bearer, Authorization},
|
||||
response::IntoResponse,
|
||||
Extension, Json, TypedHeader,
|
||||
};
|
||||
use axum_client_ip::InsecureClientIp;
|
||||
use axum_macros::debug_handler;
|
||||
use chrono::{TimeZone, Utc};
|
||||
use entities::sea_orm_active_enums::TrackingLevel;
|
||||
use entities::{login, pending_login, revert_log, rpc_key, user};
|
||||
use ethers::{prelude::Address, types::Bytes};
|
||||
use hashbrown::HashMap;
|
||||
use http::{HeaderValue, StatusCode};
|
||||
use ipnet::IpNet;
|
||||
use itertools::Itertools;
|
||||
use log::{debug, warn};
|
||||
use migration::sea_orm::prelude::Uuid;
|
||||
use migration::sea_orm::{
|
||||
self, ActiveModelTrait, ColumnTrait, EntityTrait, IntoActiveModel, PaginatorTrait, QueryFilter,
|
||||
QueryOrder, TransactionTrait, TryIntoModel,
|
||||
};
|
||||
use serde::Deserialize;
|
||||
use serde_json::json;
|
||||
use siwe::{Message, VerificationOpts};
|
||||
use std::ops::Add;
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
use time::{Duration, OffsetDateTime};
|
||||
use ulid::Ulid;
|
||||
|
||||
/// `GET /user/login/:user_address` or `GET /user/login/:user_address/:message_eip` -- Start the "Sign In with Ethereum" (siwe) login flow.
|
||||
///
|
||||
/// `message_eip`s accepted:
|
||||
/// - eip191_bytes
|
||||
/// - eip191_hash
|
||||
/// - eip4361 (default)
|
||||
///
|
||||
/// Coming soon: eip1271
|
||||
///
|
||||
/// This is the initial entrypoint for logging in. Take the response from this endpoint and give it to your user's wallet for singing. POST the response to `/user/login`.
|
||||
///
|
||||
/// Rate limited by IP address.
|
||||
///
|
||||
/// At first i thought about checking that user_address is in our db,
|
||||
/// But theres no need to separate the registration and login flows.
|
||||
/// It is a better UX to just click "login with ethereum" and have the account created if it doesn't exist.
|
||||
/// We can prompt for an email and and payment after they log in.
|
||||
#[debug_handler]
|
||||
pub async fn user_login_get(
|
||||
Extension(app): Extension<Arc<Web3ProxyApp>>,
|
||||
InsecureClientIp(ip): InsecureClientIp,
|
||||
// TODO: what does axum's error handling look like if the path fails to parse?
|
||||
Path(mut params): Path<HashMap<String, String>>,
|
||||
) -> Web3ProxyResponse {
|
||||
login_is_authorized(&app, ip).await?;
|
||||
|
||||
// create a message and save it in redis
|
||||
// TODO: how many seconds? get from config?
|
||||
let expire_seconds: usize = 20 * 60;
|
||||
|
||||
let nonce = Ulid::new();
|
||||
|
||||
let issued_at = OffsetDateTime::now_utc();
|
||||
|
||||
let expiration_time = issued_at.add(Duration::new(expire_seconds as i64, 0));
|
||||
|
||||
// TODO: allow ENS names here?
|
||||
let user_address: Address = params
|
||||
.remove("user_address")
|
||||
.ok_or(Web3ProxyError::BadRouting)?
|
||||
.parse()
|
||||
.or(Err(Web3ProxyError::ParseAddressError))?;
|
||||
|
||||
let login_domain = app
|
||||
.config
|
||||
.login_domain
|
||||
.clone()
|
||||
.unwrap_or_else(|| "llamanodes.com".to_string());
|
||||
|
||||
// TODO: get most of these from the app config
|
||||
let message = Message {
|
||||
// TODO: don't unwrap
|
||||
// TODO: accept a login_domain from the request?
|
||||
domain: login_domain.parse().unwrap(),
|
||||
address: user_address.to_fixed_bytes(),
|
||||
// TODO: config for statement
|
||||
statement: Some("🦙🦙🦙🦙🦙".to_string()),
|
||||
// TODO: don't unwrap
|
||||
uri: format!("https://{}/", login_domain).parse().unwrap(),
|
||||
version: siwe::Version::V1,
|
||||
chain_id: 1,
|
||||
expiration_time: Some(expiration_time.into()),
|
||||
issued_at: issued_at.into(),
|
||||
nonce: nonce.to_string(),
|
||||
not_before: None,
|
||||
request_id: None,
|
||||
resources: vec![],
|
||||
};
|
||||
|
||||
let db_conn = app.db_conn().web3_context("login requires a database")?;
|
||||
|
||||
// massage types to fit in the database. sea-orm does not make this very elegant
|
||||
let uuid = Uuid::from_u128(nonce.into());
|
||||
// we add 1 to expire_seconds just to be sure the database has the key for the full expiration_time
|
||||
let expires_at = Utc
|
||||
.timestamp_opt(expiration_time.unix_timestamp() + 1, 0)
|
||||
.unwrap();
|
||||
|
||||
// we do not store a maximum number of attempted logins. anyone can request so we don't want to allow DOS attacks
|
||||
// add a row to the database for this user
|
||||
let user_pending_login = pending_login::ActiveModel {
|
||||
id: sea_orm::NotSet,
|
||||
nonce: sea_orm::Set(uuid),
|
||||
message: sea_orm::Set(message.to_string()),
|
||||
expires_at: sea_orm::Set(expires_at),
|
||||
imitating_user: sea_orm::Set(None),
|
||||
};
|
||||
|
||||
user_pending_login
|
||||
.save(&db_conn)
|
||||
.await
|
||||
.web3_context("saving user's pending_login")?;
|
||||
|
||||
// there are multiple ways to sign messages and not all wallets support them
|
||||
// TODO: default message eip from config?
|
||||
let message_eip = params
|
||||
.remove("message_eip")
|
||||
.unwrap_or_else(|| "eip4361".to_string());
|
||||
|
||||
let message: String = match message_eip.as_str() {
|
||||
"eip191_bytes" => Bytes::from(message.eip191_bytes().unwrap()).to_string(),
|
||||
"eip191_hash" => Bytes::from(&message.eip191_hash().unwrap()).to_string(),
|
||||
"eip4361" => message.to_string(),
|
||||
_ => {
|
||||
return Err(Web3ProxyError::InvalidEip);
|
||||
}
|
||||
};
|
||||
|
||||
Ok(message.into_response())
|
||||
}
|
||||
|
||||
/// `POST /user/login` - Register or login by posting a signed "siwe" message.
|
||||
/// It is recommended to save the returned bearer token in a cookie.
|
||||
/// The bearer token can be used to authenticate other requests, such as getting the user's stats or modifying the user's profile.
|
||||
#[debug_handler]
|
||||
pub async fn user_login_post(
|
||||
Extension(app): Extension<Arc<Web3ProxyApp>>,
|
||||
InsecureClientIp(ip): InsecureClientIp,
|
||||
Query(query): Query<PostLoginQuery>,
|
||||
Json(payload): Json<PostLogin>,
|
||||
) -> Web3ProxyResponse {
|
||||
login_is_authorized(&app, ip).await?;
|
||||
|
||||
// TODO: this seems too verbose. how can we simply convert a String into a [u8; 65]
|
||||
let their_sig_bytes = Bytes::from_str(&payload.sig).web3_context("parsing sig")?;
|
||||
if their_sig_bytes.len() != 65 {
|
||||
return Err(Web3ProxyError::InvalidSignatureLength);
|
||||
}
|
||||
let mut their_sig: [u8; 65] = [0; 65];
|
||||
for x in 0..65 {
|
||||
their_sig[x] = their_sig_bytes[x]
|
||||
}
|
||||
|
||||
// we can't trust that they didn't tamper with the message in some way. like some clients return it hex encoded
|
||||
// TODO: checking 0x seems fragile, but I think it will be fine. siwe message text shouldn't ever start with 0x
|
||||
let their_msg: Message = if payload.msg.starts_with("0x") {
|
||||
let their_msg_bytes =
|
||||
Bytes::from_str(&payload.msg).web3_context("parsing payload message")?;
|
||||
|
||||
// TODO: lossy or no?
|
||||
String::from_utf8_lossy(their_msg_bytes.as_ref())
|
||||
.parse::<siwe::Message>()
|
||||
.web3_context("parsing hex string message")?
|
||||
} else {
|
||||
payload
|
||||
.msg
|
||||
.parse::<siwe::Message>()
|
||||
.web3_context("parsing string message")?
|
||||
};
|
||||
|
||||
// the only part of the message we will trust is their nonce
|
||||
// TODO: this is fragile. have a helper function/struct for redis keys
|
||||
let login_nonce = UserBearerToken::from_str(&their_msg.nonce)?;
|
||||
|
||||
// fetch the message we gave them from our database
|
||||
let db_replica = app
|
||||
.db_replica()
|
||||
.web3_context("Getting database connection")?;
|
||||
|
||||
// massage type for the db
|
||||
let login_nonce_uuid: Uuid = login_nonce.clone().into();
|
||||
|
||||
let user_pending_login = pending_login::Entity::find()
|
||||
.filter(pending_login::Column::Nonce.eq(login_nonce_uuid))
|
||||
.one(db_replica.conn())
|
||||
.await
|
||||
.web3_context("database error while finding pending_login")?
|
||||
.web3_context("login nonce not found")?;
|
||||
|
||||
let our_msg: siwe::Message = user_pending_login
|
||||
.message
|
||||
.parse()
|
||||
.web3_context("parsing siwe message")?;
|
||||
|
||||
// default options are fine. the message includes timestamp and domain and nonce
|
||||
let verify_config = VerificationOpts::default();
|
||||
|
||||
// Check with both verify and verify_eip191
|
||||
if let Err(err_1) = our_msg
|
||||
.verify(&their_sig, &verify_config)
|
||||
.await
|
||||
.web3_context("verifying signature against our local message")
|
||||
{
|
||||
// verification method 1 failed. try eip191
|
||||
if let Err(err_191) = our_msg
|
||||
.verify_eip191(&their_sig)
|
||||
.web3_context("verifying eip191 signature against our local message")
|
||||
{
|
||||
let db_conn = app
|
||||
.db_conn()
|
||||
.web3_context("deleting expired pending logins requires a db")?;
|
||||
|
||||
// delete ALL expired rows.
|
||||
let now = Utc::now();
|
||||
let delete_result = pending_login::Entity::delete_many()
|
||||
.filter(pending_login::Column::ExpiresAt.lte(now))
|
||||
.exec(&db_conn)
|
||||
.await?;
|
||||
|
||||
// TODO: emit a stat? if this is high something weird might be happening
|
||||
debug!("cleared expired pending_logins: {:?}", delete_result);
|
||||
|
||||
return Err(Web3ProxyError::EipVerificationFailed(
|
||||
Box::new(err_1),
|
||||
Box::new(err_191),
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: limit columns or load whole user?
|
||||
let u = user::Entity::find()
|
||||
.filter(user::Column::Address.eq(our_msg.address.as_ref()))
|
||||
.one(db_replica.conn())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let db_conn = app.db_conn().web3_context("login requires a db")?;
|
||||
|
||||
let (u, uks, status_code) = match u {
|
||||
None => {
|
||||
// user does not exist yet
|
||||
|
||||
// check the invite code
|
||||
// TODO: more advanced invite codes that set different request/minute and concurrency limits
|
||||
if let Some(invite_code) = &app.config.invite_code {
|
||||
if query.invite_code.as_ref() != Some(invite_code) {
|
||||
return Err(Web3ProxyError::InvalidInviteCode);
|
||||
}
|
||||
}
|
||||
|
||||
let txn = db_conn.begin().await?;
|
||||
|
||||
// the only thing we need from them is an address
|
||||
// everything else is optional
|
||||
// TODO: different invite codes should allow different levels
|
||||
// TODO: maybe decrement a count on the invite code?
|
||||
let u = user::ActiveModel {
|
||||
address: sea_orm::Set(our_msg.address.into()),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let u = u.insert(&txn).await?;
|
||||
|
||||
// create the user's first api key
|
||||
let rpc_secret_key = RpcSecretKey::new();
|
||||
|
||||
let uk = rpc_key::ActiveModel {
|
||||
user_id: sea_orm::Set(u.id),
|
||||
secret_key: sea_orm::Set(rpc_secret_key.into()),
|
||||
description: sea_orm::Set(None),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let uk = uk
|
||||
.insert(&txn)
|
||||
.await
|
||||
.web3_context("Failed saving new user key")?;
|
||||
|
||||
let uks = vec![uk];
|
||||
|
||||
// save the user and key to the database
|
||||
txn.commit().await?;
|
||||
|
||||
(u, uks, StatusCode::CREATED)
|
||||
}
|
||||
Some(u) => {
|
||||
// the user is already registered
|
||||
let uks = rpc_key::Entity::find()
|
||||
.filter(rpc_key::Column::UserId.eq(u.id))
|
||||
.all(db_replica.conn())
|
||||
.await
|
||||
.web3_context("failed loading user's key")?;
|
||||
|
||||
(u, uks, StatusCode::OK)
|
||||
}
|
||||
};
|
||||
|
||||
// create a bearer token for the user.
|
||||
let user_bearer_token = UserBearerToken::default();
|
||||
|
||||
// json response with everything in it
|
||||
// we could return just the bearer token, but I think they will always request api keys and the user profile
|
||||
let response_json = json!({
|
||||
"rpc_keys": uks
|
||||
.into_iter()
|
||||
.map(|uk| (uk.id, uk))
|
||||
.collect::<HashMap<_, _>>(),
|
||||
"bearer_token": user_bearer_token,
|
||||
"user": u,
|
||||
});
|
||||
|
||||
let response = (status_code, Json(response_json)).into_response();
|
||||
|
||||
// add bearer to the database
|
||||
|
||||
// expire in 4 weeks
|
||||
let expires_at = Utc::now()
|
||||
.checked_add_signed(chrono::Duration::weeks(4))
|
||||
.unwrap();
|
||||
|
||||
let user_login = login::ActiveModel {
|
||||
id: sea_orm::NotSet,
|
||||
bearer_token: sea_orm::Set(user_bearer_token.uuid()),
|
||||
user_id: sea_orm::Set(u.id),
|
||||
expires_at: sea_orm::Set(expires_at),
|
||||
read_only: sea_orm::Set(false),
|
||||
};
|
||||
|
||||
user_login
|
||||
.save(&db_conn)
|
||||
.await
|
||||
.web3_context("saving user login")?;
|
||||
|
||||
if let Err(err) = user_pending_login
|
||||
.into_active_model()
|
||||
.delete(&db_conn)
|
||||
.await
|
||||
{
|
||||
warn!("Failed to delete nonce:{}: {}", login_nonce.0, err);
|
||||
}
|
||||
|
||||
Ok(response)
|
||||
}
|
||||
|
||||
/// `POST /user/logout` - Forget the bearer token in the `Authentication` header.
|
||||
#[debug_handler]
|
||||
pub async fn user_logout_post(
|
||||
Extension(app): Extension<Arc<Web3ProxyApp>>,
|
||||
TypedHeader(Authorization(bearer)): TypedHeader<Authorization<Bearer>>,
|
||||
) -> Web3ProxyResponse {
|
||||
let user_bearer = UserBearerToken::try_from(bearer)?;
|
||||
|
||||
let db_conn = app
|
||||
.db_conn()
|
||||
.web3_context("database needed for user logout")?;
|
||||
|
||||
if let Err(err) = login::Entity::delete_many()
|
||||
.filter(login::Column::BearerToken.eq(user_bearer.uuid()))
|
||||
.exec(&db_conn)
|
||||
.await
|
||||
{
|
||||
debug!("Failed to delete {}: {}", user_bearer.redis_key(), err);
|
||||
}
|
||||
|
||||
let now = Utc::now();
|
||||
|
||||
// also delete any expired logins
|
||||
let delete_result = login::Entity::delete_many()
|
||||
.filter(login::Column::ExpiresAt.lte(now))
|
||||
.exec(&db_conn)
|
||||
.await;
|
||||
|
||||
debug!("Deleted expired logins: {:?}", delete_result);
|
||||
|
||||
// also delete any expired pending logins
|
||||
let delete_result = login::Entity::delete_many()
|
||||
.filter(login::Column::ExpiresAt.lte(now))
|
||||
.exec(&db_conn)
|
||||
.await;
|
||||
|
||||
debug!("Deleted expired pending logins: {:?}", delete_result);
|
||||
|
||||
// TODO: what should the response be? probably json something
|
||||
Ok("goodbye".into_response())
|
||||
}
|
||||
|
||||
/// `GET /user` -- Use a bearer token to get the user's profile.
|
||||
///
|
||||
/// - the email address of a user if they opted in to get contacted via email
|
||||
///
|
||||
/// TODO: this will change as we add better support for secondary users.
|
||||
#[debug_handler]
|
||||
pub async fn user_get(
|
||||
Extension(app): Extension<Arc<Web3ProxyApp>>,
|
||||
TypedHeader(Authorization(bearer_token)): TypedHeader<Authorization<Bearer>>,
|
||||
) -> Web3ProxyResponse {
|
||||
let (user, _semaphore) = app.bearer_is_authorized(bearer_token).await?;
|
||||
|
||||
Ok(Json(user).into_response())
|
||||
}
|
||||
|
||||
/// the JSON input to the `post_user` handler.
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct UserPost {
|
||||
email: Option<String>,
|
||||
}
|
||||
|
||||
/// `POST /user` -- modify the account connected to the bearer token in the `Authentication` header.
|
||||
#[debug_handler]
|
||||
pub async fn user_post(
|
||||
Extension(app): Extension<Arc<Web3ProxyApp>>,
|
||||
TypedHeader(Authorization(bearer_token)): TypedHeader<Authorization<Bearer>>,
|
||||
Json(payload): Json<UserPost>,
|
||||
) -> Web3ProxyResponse {
|
||||
let (user, _semaphore) = app.bearer_is_authorized(bearer_token).await?;
|
||||
|
||||
let mut user: user::ActiveModel = user.into();
|
||||
|
||||
// update the email address
|
||||
if let Some(x) = payload.email {
|
||||
// TODO: only Set if no change
|
||||
if x.is_empty() {
|
||||
user.email = sea_orm::Set(None);
|
||||
} else {
|
||||
// TODO: do some basic validation
|
||||
// TODO: don't set immediatly, send a confirmation email first
|
||||
// TODO: compare first? or is sea orm smart enough to do that for us?
|
||||
user.email = sea_orm::Set(Some(x));
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: what else can we update here? password hash? subscription to newsletter?
|
||||
|
||||
let user = if user.is_changed() {
|
||||
let db_conn = app.db_conn().web3_context("Getting database connection")?;
|
||||
|
||||
user.save(&db_conn).await?
|
||||
} else {
|
||||
// no changes. no need to touch the database
|
||||
user
|
||||
};
|
||||
|
||||
let user: user::Model = user.try_into().web3_context("Returning updated user")?;
|
||||
|
||||
Ok(Json(user).into_response())
|
||||
}
|
||||
|
||||
/// `GET /user/balance` -- Use a bearer token to get the user's balance and spend.
|
||||
///
|
||||
/// - show balance in USD
|
||||
/// - show deposits history (currency, amounts, transaction id)
|
||||
///
|
||||
/// TODO: one key per request? maybe /user/balance/:rpc_key?
|
||||
/// TODO: this will change as we add better support for secondary users.
|
||||
#[debug_handler]
|
||||
pub async fn user_balance_get(
|
||||
Extension(app): Extension<Arc<Web3ProxyApp>>,
|
||||
TypedHeader(Authorization(bearer)): TypedHeader<Authorization<Bearer>>,
|
||||
) -> Web3ProxyResponse {
|
||||
let (_user, _semaphore) = app.bearer_is_authorized(bearer).await?;
|
||||
|
||||
todo!("user_balance_get");
|
||||
}
|
||||
|
||||
/// `POST /user/balance/:txhash` -- Manually process a confirmed txid to update a user's balance.
|
||||
///
|
||||
/// We will subscribe to events to watch for any user deposits, but sometimes events can be missed.
|
||||
///
|
||||
/// TODO: change this. just have a /tx/:txhash that is open to anyone. rate limit like we rate limit /login
|
||||
#[debug_handler]
|
||||
pub async fn user_balance_post(
|
||||
Extension(app): Extension<Arc<Web3ProxyApp>>,
|
||||
TypedHeader(Authorization(bearer)): TypedHeader<Authorization<Bearer>>,
|
||||
) -> Web3ProxyResponse {
|
||||
let (_user, _semaphore) = app.bearer_is_authorized(bearer).await?;
|
||||
|
||||
todo!("user_balance_post");
|
||||
}
|
||||
|
||||
/// `GET /user/keys` -- Use a bearer token to get the user's api keys and their settings.
|
||||
#[debug_handler]
|
||||
pub async fn rpc_keys_get(
|
||||
Extension(app): Extension<Arc<Web3ProxyApp>>,
|
||||
TypedHeader(Authorization(bearer)): TypedHeader<Authorization<Bearer>>,
|
||||
) -> Web3ProxyResponse {
|
||||
let (user, _semaphore) = app.bearer_is_authorized(bearer).await?;
|
||||
|
||||
let db_replica = app
|
||||
.db_replica()
|
||||
.web3_context("db_replica is required to fetch a user's keys")?;
|
||||
|
||||
let uks = rpc_key::Entity::find()
|
||||
.filter(rpc_key::Column::UserId.eq(user.id))
|
||||
.all(db_replica.conn())
|
||||
.await
|
||||
.web3_context("failed loading user's key")?;
|
||||
|
||||
let response_json = json!({
|
||||
"user_id": user.id,
|
||||
"user_rpc_keys": uks
|
||||
.into_iter()
|
||||
.map(|uk| (uk.id, uk))
|
||||
.collect::<HashMap::<_, _>>(),
|
||||
});
|
||||
|
||||
Ok(Json(response_json).into_response())
|
||||
}
|
||||
|
||||
/// `DELETE /user/keys` -- Use a bearer token to delete an existing key.
|
||||
#[debug_handler]
|
||||
pub async fn rpc_keys_delete(
|
||||
Extension(app): Extension<Arc<Web3ProxyApp>>,
|
||||
TypedHeader(Authorization(bearer)): TypedHeader<Authorization<Bearer>>,
|
||||
) -> Web3ProxyResponse {
|
||||
let (_user, _semaphore) = app.bearer_is_authorized(bearer).await?;
|
||||
|
||||
// TODO: think about how cascading deletes and billing should work
|
||||
Err(Web3ProxyError::NotImplemented)
|
||||
}
|
||||
|
||||
/// the JSON input to the `rpc_keys_management` handler.
|
||||
/// If `key_id` is set, it updates an existing key.
|
||||
/// If `key_id` is not set, it creates a new key.
|
||||
/// `log_request_method` cannot be change once the key is created
|
||||
/// `user_tier` cannot be changed here
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct UserKeyManagement {
|
||||
key_id: Option<u64>,
|
||||
active: Option<bool>,
|
||||
allowed_ips: Option<String>,
|
||||
allowed_origins: Option<String>,
|
||||
allowed_referers: Option<String>,
|
||||
allowed_user_agents: Option<String>,
|
||||
description: Option<String>,
|
||||
log_level: Option<TrackingLevel>,
|
||||
// TODO: enable log_revert_trace: Option<f64>,
|
||||
private_txs: Option<bool>,
|
||||
}
|
||||
|
||||
/// `POST /user/keys` or `PUT /user/keys` -- Use a bearer token to create or update an existing key.
|
||||
#[debug_handler]
|
||||
pub async fn rpc_keys_management(
|
||||
Extension(app): Extension<Arc<Web3ProxyApp>>,
|
||||
TypedHeader(Authorization(bearer)): TypedHeader<Authorization<Bearer>>,
|
||||
Json(payload): Json<UserKeyManagement>,
|
||||
) -> Web3ProxyResponse {
|
||||
// TODO: is there a way we can know if this is a PUT or POST? right now we can modify or create keys with either. though that probably doesn't matter
|
||||
|
||||
let (user, _semaphore) = app.bearer_is_authorized(bearer).await?;
|
||||
|
||||
let db_replica = app
|
||||
.db_replica()
|
||||
.web3_context("getting db for user's keys")?;
|
||||
|
||||
let mut uk = if let Some(existing_key_id) = payload.key_id {
|
||||
// get the key and make sure it belongs to the user
|
||||
rpc_key::Entity::find()
|
||||
.filter(rpc_key::Column::UserId.eq(user.id))
|
||||
.filter(rpc_key::Column::Id.eq(existing_key_id))
|
||||
.one(db_replica.conn())
|
||||
.await
|
||||
.web3_context("failed loading user's key")?
|
||||
.web3_context("key does not exist or is not controlled by this bearer token")?
|
||||
.into_active_model()
|
||||
} else {
|
||||
// make a new key
|
||||
// TODO: limit to 10 keys?
|
||||
let secret_key = RpcSecretKey::new();
|
||||
|
||||
let log_level = payload
|
||||
.log_level
|
||||
.web3_context("log level must be 'none', 'detailed', or 'aggregated'")?;
|
||||
|
||||
rpc_key::ActiveModel {
|
||||
user_id: sea_orm::Set(user.id),
|
||||
secret_key: sea_orm::Set(secret_key.into()),
|
||||
log_level: sea_orm::Set(log_level),
|
||||
..Default::default()
|
||||
}
|
||||
};
|
||||
|
||||
// TODO: do we need null descriptions? default to empty string should be fine, right?
|
||||
if let Some(description) = payload.description {
|
||||
if description.is_empty() {
|
||||
uk.description = sea_orm::Set(None);
|
||||
} else {
|
||||
uk.description = sea_orm::Set(Some(description));
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(private_txs) = payload.private_txs {
|
||||
uk.private_txs = sea_orm::Set(private_txs);
|
||||
}
|
||||
|
||||
if let Some(active) = payload.active {
|
||||
uk.active = sea_orm::Set(active);
|
||||
}
|
||||
|
||||
if let Some(allowed_ips) = payload.allowed_ips {
|
||||
if allowed_ips.is_empty() {
|
||||
uk.allowed_ips = sea_orm::Set(None);
|
||||
} else {
|
||||
// split allowed ips on ',' and try to parse them all. error on invalid input
|
||||
let allowed_ips = allowed_ips
|
||||
.split(',')
|
||||
.map(|x| x.trim().parse::<IpNet>())
|
||||
.collect::<Result<Vec<_>, _>>()?
|
||||
// parse worked. convert back to Strings
|
||||
.into_iter()
|
||||
.map(|x| x.to_string());
|
||||
|
||||
// and join them back together
|
||||
let allowed_ips: String =
|
||||
Itertools::intersperse(allowed_ips, ", ".to_string()).collect();
|
||||
|
||||
uk.allowed_ips = sea_orm::Set(Some(allowed_ips));
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: this should actually be bytes
|
||||
if let Some(allowed_origins) = payload.allowed_origins {
|
||||
if allowed_origins.is_empty() {
|
||||
uk.allowed_origins = sea_orm::Set(None);
|
||||
} else {
|
||||
// split allowed_origins on ',' and try to parse them all. error on invalid input
|
||||
let allowed_origins = allowed_origins
|
||||
.split(',')
|
||||
.map(|x| HeaderValue::from_str(x.trim()))
|
||||
.collect::<Result<Vec<_>, _>>()?
|
||||
.into_iter()
|
||||
.map(|x| Origin::decode(&mut [x].iter()))
|
||||
.collect::<Result<Vec<_>, _>>()?
|
||||
// parse worked. convert back to String and join them back together
|
||||
.into_iter()
|
||||
.map(|x| x.to_string());
|
||||
|
||||
let allowed_origins: String =
|
||||
Itertools::intersperse(allowed_origins, ", ".to_string()).collect();
|
||||
|
||||
uk.allowed_origins = sea_orm::Set(Some(allowed_origins));
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: this should actually be bytes
|
||||
if let Some(allowed_referers) = payload.allowed_referers {
|
||||
if allowed_referers.is_empty() {
|
||||
uk.allowed_referers = sea_orm::Set(None);
|
||||
} else {
|
||||
// split allowed ips on ',' and try to parse them all. error on invalid input
|
||||
let allowed_referers = allowed_referers
|
||||
.split(',')
|
||||
.map(|x| HeaderValue::from_str(x.trim()))
|
||||
.collect::<Result<Vec<_>, _>>()?
|
||||
.into_iter()
|
||||
.map(|x| Referer::decode(&mut [x].iter()))
|
||||
.collect::<Result<Vec<_>, _>>()?;
|
||||
|
||||
// parse worked. now we can put it back together.
|
||||
// but we can't go directly to String.
|
||||
// so we convert to HeaderValues first
|
||||
let mut header_map = vec![];
|
||||
for x in allowed_referers {
|
||||
x.encode(&mut header_map);
|
||||
}
|
||||
|
||||
// convert HeaderValues to Strings
|
||||
// since we got these from strings, this should always work (unless we figure out using bytes)
|
||||
let allowed_referers = header_map
|
||||
.into_iter()
|
||||
.map(|x| x.to_str().map(|x| x.to_string()))
|
||||
.collect::<Result<Vec<_>, _>>()?;
|
||||
|
||||
// join strings together with commas
|
||||
let allowed_referers: String =
|
||||
Itertools::intersperse(allowed_referers.into_iter(), ", ".to_string()).collect();
|
||||
|
||||
uk.allowed_referers = sea_orm::Set(Some(allowed_referers));
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(allowed_user_agents) = payload.allowed_user_agents {
|
||||
if allowed_user_agents.is_empty() {
|
||||
uk.allowed_user_agents = sea_orm::Set(None);
|
||||
} else {
|
||||
// split allowed_user_agents on ',' and try to parse them all. error on invalid input
|
||||
let allowed_user_agents = allowed_user_agents
|
||||
.split(',')
|
||||
.filter_map(|x| x.trim().parse::<UserAgent>().ok())
|
||||
// parse worked. convert back to String
|
||||
.map(|x| x.to_string());
|
||||
|
||||
// join the strings together
|
||||
let allowed_user_agents: String =
|
||||
Itertools::intersperse(allowed_user_agents, ", ".to_string()).collect();
|
||||
|
||||
uk.allowed_user_agents = sea_orm::Set(Some(allowed_user_agents));
|
||||
}
|
||||
}
|
||||
|
||||
let uk = if uk.is_changed() {
|
||||
let db_conn = app.db_conn().web3_context("login requires a db")?;
|
||||
|
||||
uk.save(&db_conn)
|
||||
.await
|
||||
.web3_context("Failed saving user key")?
|
||||
} else {
|
||||
uk
|
||||
};
|
||||
|
||||
let uk = uk.try_into_model()?;
|
||||
|
||||
Ok(Json(uk).into_response())
|
||||
}
|
||||
|
||||
/// `GET /user/revert_logs` -- Use a bearer token to get the user's revert logs.
|
||||
#[debug_handler]
|
||||
pub async fn user_revert_logs_get(
|
||||
Extension(app): Extension<Arc<Web3ProxyApp>>,
|
||||
TypedHeader(Authorization(bearer)): TypedHeader<Authorization<Bearer>>,
|
||||
Query(params): Query<HashMap<String, String>>,
|
||||
) -> Web3ProxyResponse {
|
||||
let (user, _semaphore) = app.bearer_is_authorized(bearer).await?;
|
||||
|
||||
let chain_id = get_chain_id_from_params(app.as_ref(), ¶ms)?;
|
||||
let query_start = get_query_start_from_params(¶ms)?;
|
||||
let page = get_page_from_params(¶ms)?;
|
||||
|
||||
// TODO: page size from config
|
||||
let page_size = 1_000;
|
||||
|
||||
let mut response = HashMap::new();
|
||||
|
||||
response.insert("page", json!(page));
|
||||
response.insert("page_size", json!(page_size));
|
||||
response.insert("chain_id", json!(chain_id));
|
||||
response.insert("query_start", json!(query_start.timestamp() as u64));
|
||||
|
||||
let db_replica = app
|
||||
.db_replica()
|
||||
.web3_context("getting replica db for user's revert logs")?;
|
||||
|
||||
let uks = rpc_key::Entity::find()
|
||||
.filter(rpc_key::Column::UserId.eq(user.id))
|
||||
.all(db_replica.conn())
|
||||
.await
|
||||
.web3_context("failed loading user's key")?;
|
||||
|
||||
// TODO: only select the ids
|
||||
let uks: Vec<_> = uks.into_iter().map(|x| x.id).collect();
|
||||
|
||||
// get revert logs
|
||||
let mut q = revert_log::Entity::find()
|
||||
.filter(revert_log::Column::Timestamp.gte(query_start))
|
||||
.filter(revert_log::Column::RpcKeyId.is_in(uks))
|
||||
.order_by_asc(revert_log::Column::Timestamp);
|
||||
|
||||
if chain_id == 0 {
|
||||
// don't do anything
|
||||
} else {
|
||||
// filter on chain id
|
||||
q = q.filter(revert_log::Column::ChainId.eq(chain_id))
|
||||
}
|
||||
|
||||
// query the database for number of items and pages
|
||||
let pages_result = q
|
||||
.clone()
|
||||
.paginate(db_replica.conn(), page_size)
|
||||
.num_items_and_pages()
|
||||
.await?;
|
||||
|
||||
response.insert("num_items", pages_result.number_of_items.into());
|
||||
response.insert("num_pages", pages_result.number_of_pages.into());
|
||||
|
||||
// query the database for the revert logs
|
||||
let revert_logs = q
|
||||
.paginate(db_replica.conn(), page_size)
|
||||
.fetch_page(page)
|
||||
.await?;
|
||||
|
||||
response.insert("revert_logs", json!(revert_logs));
|
||||
|
||||
Ok(Json(response).into_response())
|
||||
}
|
||||
|
||||
/// `GET /user/stats/aggregate` -- Public endpoint for aggregate stats such as bandwidth used and methods requested.
|
||||
#[debug_handler]
|
||||
pub async fn user_stats_aggregated_get(
|
||||
Extension(app): Extension<Arc<Web3ProxyApp>>,
|
||||
bearer: Option<TypedHeader<Authorization<Bearer>>>,
|
||||
Query(params): Query<HashMap<String, String>>,
|
||||
) -> Web3ProxyResponse {
|
||||
let response = query_user_stats(&app, bearer, ¶ms, StatType::Aggregated).await?;
|
||||
|
||||
Ok(response)
|
||||
}
|
||||
|
||||
/// `GET /user/stats/detailed` -- Use a bearer token to get the user's key stats such as bandwidth used and methods requested.
|
||||
///
|
||||
/// If no bearer is provided, detailed stats for all users will be shown.
|
||||
/// View a single user with `?user_id=$x`.
|
||||
/// View a single chain with `?chain_id=$x`.
|
||||
///
|
||||
/// Set `$x` to zero to see all.
|
||||
///
|
||||
/// TODO: this will change as we add better support for secondary users.
|
||||
#[debug_handler]
|
||||
pub async fn user_stats_detailed_get(
|
||||
Extension(app): Extension<Arc<Web3ProxyApp>>,
|
||||
bearer: Option<TypedHeader<Authorization<Bearer>>>,
|
||||
Query(params): Query<HashMap<String, String>>,
|
||||
) -> Web3ProxyResponse {
|
||||
let response = query_user_stats(&app, bearer, ¶ms, StatType::Detailed).await?;
|
||||
|
||||
Ok(response)
|
||||
}
|
473
web3_proxy/src/frontend/users/authentication.rs
Normal file
473
web3_proxy/src/frontend/users/authentication.rs
Normal file
@ -0,0 +1,473 @@
|
||||
//! Handle registration, logins, and managing account data.
|
||||
use crate::app::Web3ProxyApp;
|
||||
use crate::frontend::authorization::{login_is_authorized, RpcSecretKey};
|
||||
use crate::frontend::errors::{Web3ProxyError, Web3ProxyErrorContext, Web3ProxyResponse};
|
||||
use crate::user_token::UserBearerToken;
|
||||
use crate::{PostLogin, PostLoginQuery};
|
||||
use axum::{
|
||||
extract::{Path, Query},
|
||||
headers::{authorization::Bearer, Authorization},
|
||||
response::IntoResponse,
|
||||
Extension, Json, TypedHeader,
|
||||
};
|
||||
use axum_client_ip::InsecureClientIp;
|
||||
use axum_macros::debug_handler;
|
||||
use chrono::{TimeZone, Utc};
|
||||
use entities;
|
||||
use entities::{balance, login, pending_login, referee, referrer, rpc_key, user};
|
||||
use ethers::{prelude::Address, types::Bytes};
|
||||
use hashbrown::HashMap;
|
||||
use http::StatusCode;
|
||||
use log::{debug, warn};
|
||||
use migration::sea_orm::prelude::{Decimal, Uuid};
|
||||
use migration::sea_orm::{
|
||||
self, ActiveModelTrait, ColumnTrait, EntityTrait, IntoActiveModel, QueryFilter,
|
||||
TransactionTrait,
|
||||
};
|
||||
use serde_json::json;
|
||||
use siwe::{Message, VerificationOpts};
|
||||
use std::ops::Add;
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
use time::{Duration, OffsetDateTime};
|
||||
use ulid::Ulid;
|
||||
|
||||
/// `GET /user/login/:user_address` or `GET /user/login/:user_address/:message_eip` -- Start the "Sign In with Ethereum" (siwe) login flow.
|
||||
///
|
||||
/// `message_eip`s accepted:
|
||||
/// - eip191_bytes
|
||||
/// - eip191_hash
|
||||
/// - eip4361 (default)
|
||||
///
|
||||
/// Coming soon: eip1271
|
||||
///
|
||||
/// This is the initial entrypoint for logging in. Take the response from this endpoint and give it to your user's wallet for singing. POST the response to `/user/login`.
|
||||
///
|
||||
/// Rate limited by IP address.
|
||||
///
|
||||
/// At first i thought about checking that user_address is in our db,
|
||||
/// But theres no need to separate the registration and login flows.
|
||||
/// It is a better UX to just click "login with ethereum" and have the account created if it doesn't exist.
|
||||
/// We can prompt for an email and and payment after they log in.
|
||||
#[debug_handler]
|
||||
pub async fn user_login_get(
|
||||
Extension(app): Extension<Arc<Web3ProxyApp>>,
|
||||
InsecureClientIp(ip): InsecureClientIp,
|
||||
// TODO: what does axum's error handling look like if the path fails to parse?
|
||||
Path(mut params): Path<HashMap<String, String>>,
|
||||
) -> Web3ProxyResponse {
|
||||
login_is_authorized(&app, ip).await?;
|
||||
|
||||
// create a message and save it in redis
|
||||
// TODO: how many seconds? get from config?
|
||||
let expire_seconds: usize = 20 * 60;
|
||||
|
||||
let nonce = Ulid::new();
|
||||
|
||||
let issued_at = OffsetDateTime::now_utc();
|
||||
|
||||
let expiration_time = issued_at.add(Duration::new(expire_seconds as i64, 0));
|
||||
|
||||
// TODO: allow ENS names here?
|
||||
let user_address: Address = params
|
||||
.remove("user_address")
|
||||
.ok_or(Web3ProxyError::BadRouting)?
|
||||
.parse()
|
||||
.or(Err(Web3ProxyError::ParseAddressError))?;
|
||||
|
||||
let login_domain = app
|
||||
.config
|
||||
.login_domain
|
||||
.clone()
|
||||
.unwrap_or_else(|| "llamanodes.com".to_string());
|
||||
|
||||
// TODO: get most of these from the app config
|
||||
let message = Message {
|
||||
// TODO: don't unwrap
|
||||
// TODO: accept a login_domain from the request?
|
||||
domain: login_domain.parse().unwrap(),
|
||||
address: user_address.to_fixed_bytes(),
|
||||
// TODO: config for statement
|
||||
statement: Some("🦙🦙🦙🦙🦙".to_string()),
|
||||
// TODO: don't unwrap
|
||||
uri: format!("https://{}/", login_domain).parse().unwrap(),
|
||||
version: siwe::Version::V1,
|
||||
chain_id: 1,
|
||||
expiration_time: Some(expiration_time.into()),
|
||||
issued_at: issued_at.into(),
|
||||
nonce: nonce.to_string(),
|
||||
not_before: None,
|
||||
request_id: None,
|
||||
resources: vec![],
|
||||
};
|
||||
|
||||
let db_conn = app.db_conn().web3_context("login requires a database")?;
|
||||
|
||||
// massage types to fit in the database. sea-orm does not make this very elegant
|
||||
let uuid = Uuid::from_u128(nonce.into());
|
||||
// we add 1 to expire_seconds just to be sure the database has the key for the full expiration_time
|
||||
let expires_at = Utc
|
||||
.timestamp_opt(expiration_time.unix_timestamp() + 1, 0)
|
||||
.unwrap();
|
||||
|
||||
// we do not store a maximum number of attempted logins. anyone can request so we don't want to allow DOS attacks
|
||||
// add a row to the database for this user
|
||||
let user_pending_login = pending_login::ActiveModel {
|
||||
id: sea_orm::NotSet,
|
||||
nonce: sea_orm::Set(uuid),
|
||||
message: sea_orm::Set(message.to_string()),
|
||||
expires_at: sea_orm::Set(expires_at),
|
||||
imitating_user: sea_orm::Set(None),
|
||||
};
|
||||
|
||||
user_pending_login
|
||||
.save(&db_conn)
|
||||
.await
|
||||
.web3_context("saving user's pending_login")?;
|
||||
|
||||
// there are multiple ways to sign messages and not all wallets support them
|
||||
// TODO: default message eip from config?
|
||||
let message_eip = params
|
||||
.remove("message_eip")
|
||||
.unwrap_or_else(|| "eip4361".to_string());
|
||||
|
||||
let message: String = match message_eip.as_str() {
|
||||
"eip191_bytes" => Bytes::from(message.eip191_bytes().unwrap()).to_string(),
|
||||
"eip191_hash" => Bytes::from(&message.eip191_hash().unwrap()).to_string(),
|
||||
"eip4361" => message.to_string(),
|
||||
_ => {
|
||||
return Err(Web3ProxyError::InvalidEip);
|
||||
}
|
||||
};
|
||||
|
||||
Ok(message.into_response())
|
||||
}
|
||||
|
||||
/// `POST /user/login` - Register or login by posting a signed "siwe" message.
|
||||
/// It is recommended to save the returned bearer token in a cookie.
|
||||
/// The bearer token can be used to authenticate other requests, such as getting the user's stats or modifying the user's profile.
|
||||
#[debug_handler]
|
||||
pub async fn user_login_post(
|
||||
Extension(app): Extension<Arc<Web3ProxyApp>>,
|
||||
InsecureClientIp(ip): InsecureClientIp,
|
||||
Query(query): Query<PostLoginQuery>,
|
||||
Json(payload): Json<PostLogin>,
|
||||
) -> Web3ProxyResponse {
|
||||
login_is_authorized(&app, ip).await?;
|
||||
|
||||
// TODO: this seems too verbose. how can we simply convert a String into a [u8; 65]
|
||||
let their_sig_bytes = Bytes::from_str(&payload.sig).web3_context("parsing sig")?;
|
||||
if their_sig_bytes.len() != 65 {
|
||||
return Err(Web3ProxyError::InvalidSignatureLength);
|
||||
}
|
||||
let mut their_sig: [u8; 65] = [0; 65];
|
||||
for x in 0..65 {
|
||||
their_sig[x] = their_sig_bytes[x]
|
||||
}
|
||||
|
||||
// we can't trust that they didn't tamper with the message in some way. like some clients return it hex encoded
|
||||
// TODO: checking 0x seems fragile, but I think it will be fine. siwe message text shouldn't ever start with 0x
|
||||
let their_msg: Message = if payload.msg.starts_with("0x") {
|
||||
let their_msg_bytes =
|
||||
Bytes::from_str(&payload.msg).web3_context("parsing payload message")?;
|
||||
|
||||
// TODO: lossy or no?
|
||||
String::from_utf8_lossy(their_msg_bytes.as_ref())
|
||||
.parse::<siwe::Message>()
|
||||
.web3_context("parsing hex string message")?
|
||||
} else {
|
||||
payload
|
||||
.msg
|
||||
.parse::<siwe::Message>()
|
||||
.web3_context("parsing string message")?
|
||||
};
|
||||
|
||||
// the only part of the message we will trust is their nonce
|
||||
// TODO: this is fragile. have a helper function/struct for redis keys
|
||||
let login_nonce = UserBearerToken::from_str(&their_msg.nonce)?;
|
||||
|
||||
// fetch the message we gave them from our database
|
||||
let db_replica = app
|
||||
.db_replica()
|
||||
.web3_context("Getting database connection")?;
|
||||
|
||||
// massage type for the db
|
||||
let login_nonce_uuid: Uuid = login_nonce.clone().into();
|
||||
|
||||
let user_pending_login = pending_login::Entity::find()
|
||||
.filter(pending_login::Column::Nonce.eq(login_nonce_uuid))
|
||||
.one(db_replica.conn())
|
||||
.await
|
||||
.web3_context("database error while finding pending_login")?
|
||||
.web3_context("login nonce not found")?;
|
||||
|
||||
let our_msg: siwe::Message = user_pending_login
|
||||
.message
|
||||
.parse()
|
||||
.web3_context("parsing siwe message")?;
|
||||
|
||||
// default options are fine. the message includes timestamp and domain and nonce
|
||||
let verify_config = VerificationOpts::default();
|
||||
|
||||
// Check with both verify and verify_eip191
|
||||
if let Err(err_1) = our_msg
|
||||
.verify(&their_sig, &verify_config)
|
||||
.await
|
||||
.web3_context("verifying signature against our local message")
|
||||
{
|
||||
// verification method 1 failed. try eip191
|
||||
if let Err(err_191) = our_msg
|
||||
.verify_eip191(&their_sig)
|
||||
.web3_context("verifying eip191 signature against our local message")
|
||||
{
|
||||
let db_conn = app
|
||||
.db_conn()
|
||||
.web3_context("deleting expired pending logins requires a db")?;
|
||||
|
||||
// delete ALL expired rows.
|
||||
let now = Utc::now();
|
||||
let delete_result = pending_login::Entity::delete_many()
|
||||
.filter(pending_login::Column::ExpiresAt.lte(now))
|
||||
.exec(&db_conn)
|
||||
.await?;
|
||||
|
||||
// TODO: emit a stat? if this is high something weird might be happening
|
||||
debug!("cleared expired pending_logins: {:?}", delete_result);
|
||||
|
||||
return Err(Web3ProxyError::EipVerificationFailed(
|
||||
Box::new(err_1),
|
||||
Box::new(err_191),
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: limit columns or load whole user?
|
||||
let caller = user::Entity::find()
|
||||
.filter(user::Column::Address.eq(our_msg.address.as_ref()))
|
||||
.one(db_replica.conn())
|
||||
.await?;
|
||||
|
||||
let db_conn = app.db_conn().web3_context("login requires a db")?;
|
||||
|
||||
let (caller, user_rpc_keys, status_code) = match caller {
|
||||
None => {
|
||||
// user does not exist yet
|
||||
|
||||
// check the invite code
|
||||
// TODO: more advanced invite codes that set different request/minute and concurrency limits
|
||||
// Do nothing if app config is none (then there is basically no authentication invitation, and the user can process with a free tier ...
|
||||
|
||||
// Prematurely return if there is a wrong invite code
|
||||
if let Some(invite_code) = &app.config.invite_code {
|
||||
if query.invite_code.as_ref() != Some(invite_code) {
|
||||
return Err(Web3ProxyError::InvalidInviteCode);
|
||||
}
|
||||
}
|
||||
|
||||
let txn = db_conn.begin().await?;
|
||||
|
||||
// First add a user
|
||||
|
||||
// the only thing we need from them is an address
|
||||
// everything else is optional
|
||||
// TODO: different invite codes should allow different levels
|
||||
// TODO: maybe decrement a count on the invite code?
|
||||
// TODO: There will be two different transactions. The first one inserts the user, the second one marks the user as being referred
|
||||
let caller = user::ActiveModel {
|
||||
address: sea_orm::Set(our_msg.address.into()),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let caller = caller.insert(&txn).await?;
|
||||
|
||||
// create the user's first api key
|
||||
let rpc_secret_key = RpcSecretKey::new();
|
||||
|
||||
let user_rpc_key = rpc_key::ActiveModel {
|
||||
user_id: sea_orm::Set(caller.id),
|
||||
secret_key: sea_orm::Set(rpc_secret_key.into()),
|
||||
description: sea_orm::Set(None),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let user_rpc_key = user_rpc_key
|
||||
.insert(&txn)
|
||||
.await
|
||||
.web3_context("Failed saving new user key")?;
|
||||
|
||||
// We should also create the balance entry ...
|
||||
let user_balance = balance::ActiveModel {
|
||||
user_id: sea_orm::Set(caller.id),
|
||||
available_balance: sea_orm::Set(Decimal::new(0, 0)),
|
||||
used_balance: sea_orm::Set(Decimal::new(0, 0)),
|
||||
..Default::default()
|
||||
};
|
||||
user_balance.insert(&txn).await?;
|
||||
|
||||
let user_rpc_keys = vec![user_rpc_key];
|
||||
|
||||
// Also add a part for the invite code, i.e. who invited this guy
|
||||
|
||||
// save the user and key to the database
|
||||
txn.commit().await?;
|
||||
|
||||
let txn = db_conn.begin().await?;
|
||||
// First, optionally catch a referral code from the parameters if there is any
|
||||
debug!("Refferal code is: {:?}", payload.referral_code);
|
||||
if let Some(referral_code) = payload.referral_code.as_ref() {
|
||||
// If it is not inside, also check in the database
|
||||
warn!("Using register referral code: {:?}", referral_code);
|
||||
let user_referrer = referrer::Entity::find()
|
||||
.filter(referrer::Column::ReferralCode.eq(referral_code))
|
||||
.one(db_replica.conn())
|
||||
.await?
|
||||
.ok_or(Web3ProxyError::UnknownReferralCode)?;
|
||||
|
||||
// Create a new item in the database,
|
||||
// marking this guy as the referrer (and ignoring a duplicate insert, if there is any...)
|
||||
// First person to make the referral gets all credits
|
||||
// Generate a random referral code ...
|
||||
let used_referral = referee::ActiveModel {
|
||||
used_referral_code: sea_orm::Set(user_referrer.id),
|
||||
user_id: sea_orm::Set(caller.id),
|
||||
credits_applied_for_referee: sea_orm::Set(false),
|
||||
credits_applied_for_referrer: sea_orm::Set(Decimal::new(0, 10)),
|
||||
..Default::default()
|
||||
};
|
||||
used_referral.insert(&txn).await?;
|
||||
}
|
||||
txn.commit().await?;
|
||||
|
||||
(caller, user_rpc_keys, StatusCode::CREATED)
|
||||
}
|
||||
Some(caller) => {
|
||||
// Let's say that a user that exists can actually also redeem a key in retrospect...
|
||||
let txn = db_conn.begin().await?;
|
||||
// TODO: Move this into a common variable outside ...
|
||||
// First, optionally catch a referral code from the parameters if there is any
|
||||
if let Some(referral_code) = payload.referral_code.as_ref() {
|
||||
// If it is not inside, also check in the database
|
||||
warn!("Using referral code: {:?}", referral_code);
|
||||
let user_referrer = referrer::Entity::find()
|
||||
.filter(referrer::Column::ReferralCode.eq(referral_code))
|
||||
.one(db_replica.conn())
|
||||
.await?
|
||||
.ok_or(Web3ProxyError::BadRequest(format!(
|
||||
"The referral_link you provided does not exist {}",
|
||||
referral_code
|
||||
)))?;
|
||||
|
||||
// Create a new item in the database,
|
||||
// marking this guy as the referrer (and ignoring a duplicate insert, if there is any...)
|
||||
// First person to make the referral gets all credits
|
||||
// Generate a random referral code ...
|
||||
let used_referral = referee::ActiveModel {
|
||||
used_referral_code: sea_orm::Set(user_referrer.id),
|
||||
user_id: sea_orm::Set(caller.id),
|
||||
credits_applied_for_referee: sea_orm::Set(false),
|
||||
credits_applied_for_referrer: sea_orm::Set(Decimal::new(0, 10)),
|
||||
..Default::default()
|
||||
};
|
||||
used_referral.insert(&txn).await?;
|
||||
}
|
||||
txn.commit().await?;
|
||||
|
||||
// the user is already registered
|
||||
let user_rpc_keys = rpc_key::Entity::find()
|
||||
.filter(rpc_key::Column::UserId.eq(caller.id))
|
||||
.all(db_replica.conn())
|
||||
.await
|
||||
.web3_context("failed loading user's key")?;
|
||||
|
||||
(caller, user_rpc_keys, StatusCode::OK)
|
||||
}
|
||||
};
|
||||
|
||||
// create a bearer token for the user.
|
||||
let user_bearer_token = UserBearerToken::default();
|
||||
|
||||
// json response with everything in it
|
||||
// we could return just the bearer token, but I think they will always request api keys and the user profile
|
||||
let response_json = json!({
|
||||
"rpc_keys": user_rpc_keys
|
||||
.into_iter()
|
||||
.map(|user_rpc_key| (user_rpc_key.id, user_rpc_key))
|
||||
.collect::<HashMap<_, _>>(),
|
||||
"bearer_token": user_bearer_token,
|
||||
"user": caller,
|
||||
});
|
||||
|
||||
let response = (status_code, Json(response_json)).into_response();
|
||||
|
||||
// add bearer to the database
|
||||
|
||||
// expire in 4 weeks
|
||||
let expires_at = Utc::now()
|
||||
.checked_add_signed(chrono::Duration::weeks(4))
|
||||
.unwrap();
|
||||
|
||||
let user_login = login::ActiveModel {
|
||||
id: sea_orm::NotSet,
|
||||
bearer_token: sea_orm::Set(user_bearer_token.uuid()),
|
||||
user_id: sea_orm::Set(caller.id),
|
||||
expires_at: sea_orm::Set(expires_at),
|
||||
read_only: sea_orm::Set(false),
|
||||
};
|
||||
|
||||
user_login
|
||||
.save(&db_conn)
|
||||
.await
|
||||
.web3_context("saving user login")?;
|
||||
|
||||
if let Err(err) = user_pending_login
|
||||
.into_active_model()
|
||||
.delete(&db_conn)
|
||||
.await
|
||||
{
|
||||
warn!("Failed to delete nonce:{}: {}", login_nonce.0, err);
|
||||
}
|
||||
|
||||
Ok(response)
|
||||
}
|
||||
|
||||
/// `POST /user/logout` - Forget the bearer token in the `Authentication` header.
|
||||
#[debug_handler]
|
||||
pub async fn user_logout_post(
|
||||
Extension(app): Extension<Arc<Web3ProxyApp>>,
|
||||
TypedHeader(Authorization(bearer)): TypedHeader<Authorization<Bearer>>,
|
||||
) -> Web3ProxyResponse {
|
||||
let user_bearer = UserBearerToken::try_from(bearer)?;
|
||||
|
||||
let db_conn = app
|
||||
.db_conn()
|
||||
.web3_context("database needed for user logout")?;
|
||||
|
||||
if let Err(err) = login::Entity::delete_many()
|
||||
.filter(login::Column::BearerToken.eq(user_bearer.uuid()))
|
||||
.exec(&db_conn)
|
||||
.await
|
||||
{
|
||||
debug!("Failed to delete {}: {}", user_bearer.redis_key(), err);
|
||||
}
|
||||
|
||||
let now = Utc::now();
|
||||
|
||||
// also delete any expired logins
|
||||
let delete_result = login::Entity::delete_many()
|
||||
.filter(login::Column::ExpiresAt.lte(now))
|
||||
.exec(&db_conn)
|
||||
.await;
|
||||
|
||||
debug!("Deleted expired logins: {:?}", delete_result);
|
||||
|
||||
// also delete any expired pending logins
|
||||
let delete_result = login::Entity::delete_many()
|
||||
.filter(login::Column::ExpiresAt.lte(now))
|
||||
.exec(&db_conn)
|
||||
.await;
|
||||
|
||||
debug!("Deleted expired pending logins: {:?}", delete_result);
|
||||
|
||||
// TODO: what should the response be? probably json something
|
||||
Ok("goodbye".into_response())
|
||||
}
|
83
web3_proxy/src/frontend/users/mod.rs
Normal file
83
web3_proxy/src/frontend/users/mod.rs
Normal file
@ -0,0 +1,83 @@
|
||||
//! Handle registration, logins, and managing account data.
|
||||
pub mod authentication;
|
||||
pub mod payment;
|
||||
pub mod referral;
|
||||
pub mod rpc_keys;
|
||||
pub mod stats;
|
||||
pub mod subuser;
|
||||
|
||||
use super::errors::{Web3ProxyErrorContext, Web3ProxyResponse};
|
||||
use crate::app::Web3ProxyApp;
|
||||
|
||||
use axum::{
|
||||
headers::{authorization::Bearer, Authorization},
|
||||
response::IntoResponse,
|
||||
Extension, Json, TypedHeader,
|
||||
};
|
||||
use axum_macros::debug_handler;
|
||||
use entities;
|
||||
use entities::user;
|
||||
use migration::sea_orm::{self, ActiveModelTrait};
|
||||
use serde::Deserialize;
|
||||
use std::sync::Arc;
|
||||
|
||||
/// `GET /user` -- Use a bearer token to get the user's profile.
|
||||
///
|
||||
/// - the email address of a user if they opted in to get contacted via email
|
||||
///
|
||||
/// TODO: this will change as we add better support for secondary users.
|
||||
#[debug_handler]
|
||||
pub async fn user_get(
|
||||
Extension(app): Extension<Arc<Web3ProxyApp>>,
|
||||
TypedHeader(Authorization(bearer_token)): TypedHeader<Authorization<Bearer>>,
|
||||
) -> Web3ProxyResponse {
|
||||
let (user, _semaphore) = app.bearer_is_authorized(bearer_token).await?;
|
||||
|
||||
Ok(Json(user).into_response())
|
||||
}
|
||||
|
||||
/// the JSON input to the `post_user` handler.
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct UserPost {
|
||||
email: Option<String>,
|
||||
}
|
||||
|
||||
/// `POST /user` -- modify the account connected to the bearer token in the `Authentication` header.
|
||||
#[debug_handler]
|
||||
pub async fn user_post(
|
||||
Extension(app): Extension<Arc<Web3ProxyApp>>,
|
||||
TypedHeader(Authorization(bearer_token)): TypedHeader<Authorization<Bearer>>,
|
||||
Json(payload): Json<UserPost>,
|
||||
) -> Web3ProxyResponse {
|
||||
let (user, _semaphore) = app.bearer_is_authorized(bearer_token).await?;
|
||||
|
||||
let mut user: user::ActiveModel = user.into();
|
||||
|
||||
// update the email address
|
||||
if let Some(x) = payload.email {
|
||||
// TODO: only Set if no change
|
||||
if x.is_empty() {
|
||||
user.email = sea_orm::Set(None);
|
||||
} else {
|
||||
// TODO: do some basic validation
|
||||
// TODO: don't set immediatly, send a confirmation email first
|
||||
// TODO: compare first? or is sea orm smart enough to do that for us?
|
||||
user.email = sea_orm::Set(Some(x));
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: what else can we update here? password hash? subscription to newsletter?
|
||||
|
||||
let user = if user.is_changed() {
|
||||
let db_conn = app.db_conn().web3_context("Getting database connection")?;
|
||||
|
||||
user.save(&db_conn).await?
|
||||
} else {
|
||||
// no changes. no need to touch the database
|
||||
user
|
||||
};
|
||||
|
||||
let user: user::Model = user.try_into().web3_context("Returning updated user")?;
|
||||
|
||||
Ok(Json(user).into_response())
|
||||
}
|
499
web3_proxy/src/frontend/users/payment.rs
Normal file
499
web3_proxy/src/frontend/users/payment.rs
Normal file
@ -0,0 +1,499 @@
|
||||
use crate::app::Web3ProxyApp;
|
||||
use crate::frontend::authorization::Authorization as InternalAuthorization;
|
||||
use crate::frontend::errors::{Web3ProxyError, Web3ProxyResponse};
|
||||
use crate::rpcs::request::OpenRequestResult;
|
||||
use anyhow::{anyhow, Context};
|
||||
use axum::{
|
||||
extract::Path,
|
||||
headers::{authorization::Bearer, Authorization},
|
||||
response::IntoResponse,
|
||||
Extension, Json, TypedHeader,
|
||||
};
|
||||
use axum_macros::debug_handler;
|
||||
use entities::{balance, increase_on_chain_balance_receipt, user, user_tier};
|
||||
use ethers::abi::{AbiEncode, ParamType};
|
||||
use ethers::types::{Address, TransactionReceipt, H256, U256};
|
||||
use ethers::utils::{hex, keccak256};
|
||||
use hashbrown::HashMap;
|
||||
use hex_fmt::HexFmt;
|
||||
use http::StatusCode;
|
||||
use log::{debug, info, warn, Level};
|
||||
use migration::sea_orm;
|
||||
use migration::sea_orm::prelude::Decimal;
|
||||
use migration::sea_orm::ActiveModelTrait;
|
||||
use migration::sea_orm::ColumnTrait;
|
||||
use migration::sea_orm::EntityTrait;
|
||||
use migration::sea_orm::IntoActiveModel;
|
||||
use migration::sea_orm::QueryFilter;
|
||||
use migration::sea_orm::TransactionTrait;
|
||||
use serde_json::json;
|
||||
use std::sync::Arc;
|
||||
|
||||
/// Implements any logic related to payments
|
||||
/// Removed this mainly from "user" as this was getting clogged
|
||||
///
|
||||
/// `GET /user/balance` -- Use a bearer token to get the user's balance and spend.
|
||||
///
|
||||
/// - show balance in USD
|
||||
/// - show deposits history (currency, amounts, transaction id)
|
||||
#[debug_handler]
|
||||
pub async fn user_balance_get(
|
||||
Extension(app): Extension<Arc<Web3ProxyApp>>,
|
||||
TypedHeader(Authorization(bearer)): TypedHeader<Authorization<Bearer>>,
|
||||
) -> Web3ProxyResponse {
|
||||
let (_user, _semaphore) = app.bearer_is_authorized(bearer).await?;
|
||||
|
||||
let db_replica = app.db_replica().context("Getting database connection")?;
|
||||
|
||||
// Just return the balance for the user
|
||||
let user_balance = match balance::Entity::find()
|
||||
.filter(balance::Column::UserId.eq(_user.id))
|
||||
.one(db_replica.conn())
|
||||
.await?
|
||||
{
|
||||
Some(x) => x.available_balance,
|
||||
None => Decimal::from(0), // That means the user has no balance as of yet
|
||||
// (user exists, but balance entry does not exist)
|
||||
// In that case add this guy here
|
||||
// Err(FrontendErrorResponse::BadRequest("User not found!"))
|
||||
};
|
||||
|
||||
let mut response = HashMap::new();
|
||||
response.insert("balance", json!(user_balance));
|
||||
|
||||
// TODO: Gotta create a new table for the spend part
|
||||
Ok(Json(response).into_response())
|
||||
}
|
||||
|
||||
/// `GET /user/deposits` -- Use a bearer token to get the user's balance and spend.
|
||||
///
|
||||
/// - shows a list of all deposits, including their chain-id, amount and tx-hash
|
||||
#[debug_handler]
|
||||
pub async fn user_deposits_get(
|
||||
Extension(app): Extension<Arc<Web3ProxyApp>>,
|
||||
TypedHeader(Authorization(bearer)): TypedHeader<Authorization<Bearer>>,
|
||||
) -> Web3ProxyResponse {
|
||||
let (user, _semaphore) = app.bearer_is_authorized(bearer).await?;
|
||||
|
||||
let db_replica = app.db_replica().context("Getting database connection")?;
|
||||
|
||||
// Filter by user ...
|
||||
let receipts = increase_on_chain_balance_receipt::Entity::find()
|
||||
.filter(increase_on_chain_balance_receipt::Column::DepositToUserId.eq(user.id))
|
||||
.all(db_replica.conn())
|
||||
.await?;
|
||||
|
||||
// Return the response, all except the user ...
|
||||
let mut response = HashMap::new();
|
||||
let receipts = receipts
|
||||
.into_iter()
|
||||
.map(|x| {
|
||||
let mut out = HashMap::new();
|
||||
out.insert("amount", serde_json::Value::String(x.amount.to_string()));
|
||||
out.insert("chain_id", serde_json::Value::Number(x.chain_id.into()));
|
||||
out.insert("tx_hash", serde_json::Value::String(x.tx_hash));
|
||||
out
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
response.insert(
|
||||
"user",
|
||||
json!(format!("{:?}", Address::from_slice(&user.address))),
|
||||
);
|
||||
response.insert("deposits", json!(receipts));
|
||||
|
||||
Ok(Json(response).into_response())
|
||||
}
|
||||
|
||||
/// `POST /user/balance/:tx_hash` -- Manually process a confirmed txid to update a user's balance.
|
||||
///
|
||||
/// We will subscribe to events to watch for any user deposits, but sometimes events can be missed.
|
||||
/// TODO: change this. just have a /tx/:txhash that is open to anyone. rate limit like we rate limit /login
|
||||
#[debug_handler]
|
||||
pub async fn user_balance_post(
|
||||
Extension(app): Extension<Arc<Web3ProxyApp>>,
|
||||
TypedHeader(Authorization(bearer)): TypedHeader<Authorization<Bearer>>,
|
||||
Path(mut params): Path<HashMap<String, String>>,
|
||||
) -> Web3ProxyResponse {
|
||||
// I suppose this is ok / good, so people don't spam this endpoint as it is not "cheap"
|
||||
// Check that the user is logged-in and authorized. We don't need a semaphore here btw
|
||||
let (_, _semaphore) = app.bearer_is_authorized(bearer).await?;
|
||||
|
||||
// Get the transaction hash, and the amount that the user wants to top up by.
|
||||
// Let's say that for now, 1 credit is equivalent to 1 dollar (assuming any stablecoin has a 1:1 peg)
|
||||
let tx_hash: H256 = params
|
||||
.remove("tx_hash")
|
||||
// TODO: map_err so this becomes a 500. routing must be bad
|
||||
.ok_or(Web3ProxyError::BadRequest(
|
||||
"You have not provided the tx_hash in which you paid in".to_string(),
|
||||
))?
|
||||
.parse()
|
||||
.context("unable to parse tx_hash")?;
|
||||
|
||||
let db_conn = app.db_conn().context("query_user_stats needs a db")?;
|
||||
let db_replica = app
|
||||
.db_replica()
|
||||
.context("query_user_stats needs a db replica")?;
|
||||
|
||||
// Return straight false if the tx was already added ...
|
||||
let receipt = increase_on_chain_balance_receipt::Entity::find()
|
||||
.filter(increase_on_chain_balance_receipt::Column::TxHash.eq(hex::encode(tx_hash)))
|
||||
.one(&db_conn)
|
||||
.await?;
|
||||
if receipt.is_some() {
|
||||
return Err(Web3ProxyError::BadRequest(
|
||||
"The transaction you provided has already been accounted for!".to_string(),
|
||||
));
|
||||
}
|
||||
debug!("Receipt: {:?}", receipt);
|
||||
|
||||
// Iterate through all logs, and add them to the transaction list if there is any
|
||||
// Address will be hardcoded in the config
|
||||
let authorization = Arc::new(InternalAuthorization::internal(None).unwrap());
|
||||
|
||||
// Just make an rpc request, idk if i need to call this super extensive code
|
||||
let transaction_receipt: TransactionReceipt = match app
|
||||
.balanced_rpcs
|
||||
.wait_for_best_rpc(&authorization, None, &mut vec![], None, None, None)
|
||||
.await
|
||||
{
|
||||
Ok(OpenRequestResult::Handle(handle)) => {
|
||||
debug!(
|
||||
"Params are: {:?}",
|
||||
&vec![format!("0x{}", hex::encode(tx_hash))]
|
||||
);
|
||||
handle
|
||||
.request(
|
||||
"eth_getTransactionReceipt",
|
||||
&vec![format!("0x{}", hex::encode(tx_hash))],
|
||||
Level::Trace.into(),
|
||||
)
|
||||
.await
|
||||
// TODO: What kind of error would be here
|
||||
.map_err(|err| Web3ProxyError::Anyhow(err.into()))
|
||||
}
|
||||
Ok(_) => {
|
||||
// TODO: @Brllan Is this the right error message?
|
||||
Err(Web3ProxyError::NoHandleReady)
|
||||
}
|
||||
Err(err) => {
|
||||
log::trace!(
|
||||
"cancelled funneling transaction {} from: {:?}",
|
||||
tx_hash,
|
||||
err,
|
||||
);
|
||||
Err(err)
|
||||
}
|
||||
}?;
|
||||
debug!("Transaction receipt is: {:?}", transaction_receipt);
|
||||
let accepted_token: Address = match app
|
||||
.balanced_rpcs
|
||||
.wait_for_best_rpc(&authorization, None, &mut vec![], None, None, None)
|
||||
.await
|
||||
{
|
||||
Ok(OpenRequestResult::Handle(handle)) => {
|
||||
let mut accepted_tokens_request_object: serde_json::Map<String, serde_json::Value> =
|
||||
serde_json::Map::new();
|
||||
// We want to send a request to the contract
|
||||
accepted_tokens_request_object.insert(
|
||||
"to".to_owned(),
|
||||
serde_json::Value::String(format!(
|
||||
"{:?}",
|
||||
app.config.deposit_factory_contract.clone()
|
||||
)),
|
||||
);
|
||||
// We then want to include the function that we want to call
|
||||
accepted_tokens_request_object.insert(
|
||||
"data".to_owned(),
|
||||
serde_json::Value::String(format!(
|
||||
"0x{}",
|
||||
HexFmt(keccak256("get_approved_tokens()".to_owned().into_bytes()))
|
||||
)),
|
||||
// hex::encode(
|
||||
);
|
||||
let params = serde_json::Value::Array(vec![
|
||||
serde_json::Value::Object(accepted_tokens_request_object),
|
||||
serde_json::Value::String("latest".to_owned()),
|
||||
]);
|
||||
debug!("Params are: {:?}", ¶ms);
|
||||
let accepted_token: String = handle
|
||||
.request("eth_call", ¶ms, Level::Trace.into())
|
||||
.await
|
||||
// TODO: What kind of error would be here
|
||||
.map_err(|err| Web3ProxyError::Anyhow(err.into()))?;
|
||||
// Read the last
|
||||
debug!("Accepted token response is: {:?}", accepted_token);
|
||||
accepted_token[accepted_token.len() - 40..]
|
||||
.parse::<Address>()
|
||||
.map_err(|err| Web3ProxyError::Anyhow(err.into()))
|
||||
}
|
||||
Ok(_) => {
|
||||
// TODO: @Brllan Is this the right error message?
|
||||
Err(Web3ProxyError::NoHandleReady)
|
||||
}
|
||||
Err(err) => {
|
||||
log::trace!(
|
||||
"cancelled funneling transaction {} from: {:?}",
|
||||
tx_hash,
|
||||
err,
|
||||
);
|
||||
Err(err)
|
||||
}
|
||||
}?;
|
||||
debug!("Accepted token is: {:?}", accepted_token);
|
||||
let decimals: u32 = match app
|
||||
.balanced_rpcs
|
||||
.wait_for_best_rpc(&authorization, None, &mut vec![], None, None, None)
|
||||
.await
|
||||
{
|
||||
Ok(OpenRequestResult::Handle(handle)) => {
|
||||
// Now get decimals points of the stablecoin
|
||||
let mut token_decimals_request_object: serde_json::Map<String, serde_json::Value> =
|
||||
serde_json::Map::new();
|
||||
token_decimals_request_object.insert(
|
||||
"to".to_owned(),
|
||||
serde_json::Value::String(format!("0x{}", HexFmt(accepted_token))),
|
||||
);
|
||||
token_decimals_request_object.insert(
|
||||
"data".to_owned(),
|
||||
serde_json::Value::String(format!(
|
||||
"0x{}",
|
||||
HexFmt(keccak256("decimals()".to_owned().into_bytes()))
|
||||
)),
|
||||
);
|
||||
let params = serde_json::Value::Array(vec![
|
||||
serde_json::Value::Object(token_decimals_request_object),
|
||||
serde_json::Value::String("latest".to_owned()),
|
||||
]);
|
||||
debug!("ERC20 Decimal request params are: {:?}", ¶ms);
|
||||
let decimals: String = handle
|
||||
.request("eth_call", ¶ms, Level::Trace.into())
|
||||
.await
|
||||
.map_err(|err| Web3ProxyError::Anyhow(err.into()))?;
|
||||
debug!("Decimals response is: {:?}", decimals);
|
||||
u32::from_str_radix(&decimals[2..], 16)
|
||||
.map_err(|err| Web3ProxyError::Anyhow(err.into()))
|
||||
}
|
||||
Ok(_) => {
|
||||
// TODO: @Brllan Is this the right error message?
|
||||
Err(Web3ProxyError::NoHandleReady)
|
||||
}
|
||||
Err(err) => {
|
||||
log::trace!(
|
||||
"cancelled funneling transaction {} from: {:?}",
|
||||
tx_hash,
|
||||
err,
|
||||
);
|
||||
Err(err)
|
||||
}
|
||||
}?;
|
||||
debug!("Decimals are: {:?}", decimals);
|
||||
debug!("Tx receipt: {:?}", transaction_receipt);
|
||||
|
||||
// Go through all logs, this should prob capture it,
|
||||
// At least according to this SE logs are just concatenations of the underlying types (like a struct..)
|
||||
// https://ethereum.stackexchange.com/questions/87653/how-to-decode-log-event-of-my-transaction-log
|
||||
|
||||
let deposit_contract = match app.config.deposit_factory_contract {
|
||||
Some(x) => Ok(x),
|
||||
None => Err(Web3ProxyError::Anyhow(anyhow!(
|
||||
"A deposit_contract must be provided in the config to parse payments"
|
||||
))),
|
||||
}?;
|
||||
let deposit_topic = match app.config.deposit_topic {
|
||||
Some(x) => Ok(x),
|
||||
None => Err(Web3ProxyError::Anyhow(anyhow!(
|
||||
"A deposit_topic must be provided in the config to parse payments"
|
||||
))),
|
||||
}?;
|
||||
|
||||
// Make sure there is only a single log within that transaction ...
|
||||
// I don't know how to best cover the case that there might be multiple logs inside
|
||||
|
||||
for log in transaction_receipt.logs {
|
||||
if log.address != deposit_contract {
|
||||
debug!(
|
||||
"Out: Log is not relevant, as it is not directed to the deposit contract {:?} {:?}",
|
||||
format!("{:?}", log.address),
|
||||
deposit_contract
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Get the topics out
|
||||
let topic: H256 = log.topics.get(0).unwrap().to_owned();
|
||||
if topic != deposit_topic {
|
||||
debug!(
|
||||
"Out: Topic is not relevant: {:?} {:?}",
|
||||
topic, deposit_topic
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
// TODO: Will this work? Depends how logs are encoded
|
||||
let (recipient_account, token, amount): (Address, Address, U256) = match ethers::abi::decode(
|
||||
&[
|
||||
ParamType::Address,
|
||||
ParamType::Address,
|
||||
ParamType::Uint(256usize),
|
||||
],
|
||||
&log.data,
|
||||
) {
|
||||
Ok(tpl) => (
|
||||
tpl.get(0)
|
||||
.unwrap()
|
||||
.clone()
|
||||
.into_address()
|
||||
.context("Could not decode recipient")?,
|
||||
tpl.get(1)
|
||||
.unwrap()
|
||||
.clone()
|
||||
.into_address()
|
||||
.context("Could not decode token")?,
|
||||
tpl.get(2)
|
||||
.unwrap()
|
||||
.clone()
|
||||
.into_uint()
|
||||
.context("Could not decode amount")?,
|
||||
),
|
||||
Err(err) => {
|
||||
warn!("Out: Could not decode! {:?}", err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
// return early if amount is 0
|
||||
if amount == U256::from(0) {
|
||||
warn!(
|
||||
"Out: Found log has amount = 0 {:?}. This should never be the case according to the smart contract",
|
||||
amount
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Skip if no accepted token. Right now we only accept a single stablecoin as input
|
||||
if token != accepted_token {
|
||||
warn!(
|
||||
"Out: Token is not accepted: {:?} != {:?}",
|
||||
token, accepted_token
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
info!(
|
||||
"Found deposit transaction for: {:?} {:?} {:?}",
|
||||
recipient_account, token, amount
|
||||
);
|
||||
|
||||
// Encoding is inefficient, revisit later
|
||||
let recipient = match user::Entity::find()
|
||||
.filter(user::Column::Address.eq(&recipient_account.encode()[12..]))
|
||||
.one(db_replica.conn())
|
||||
.await?
|
||||
{
|
||||
Some(x) => Ok(x),
|
||||
None => Err(Web3ProxyError::BadRequest(
|
||||
"The user must have signed up first. They are currently not signed up!".to_string(),
|
||||
)),
|
||||
}?;
|
||||
|
||||
// For now we only accept stablecoins
|
||||
// And we hardcode the peg (later we would have to depeg this, for example
|
||||
// 1$ = Decimal(1) for any stablecoin
|
||||
// TODO: Let's assume that people don't buy too much at _once_, we do support >$1M which should be fine for now
|
||||
debug!("Arithmetic is: {:?} {:?}", amount, decimals);
|
||||
debug!(
|
||||
"Decimals arithmetic is: {:?} {:?}",
|
||||
Decimal::from(amount.as_u128()),
|
||||
Decimal::from(10_u64.pow(decimals))
|
||||
);
|
||||
let mut amount = Decimal::from(amount.as_u128());
|
||||
let _ = amount.set_scale(decimals);
|
||||
debug!("Amount is: {:?}", amount);
|
||||
|
||||
// Check if the item is in the database. If it is not, then add it into the database
|
||||
let user_balance = balance::Entity::find()
|
||||
.filter(balance::Column::UserId.eq(recipient.id))
|
||||
.one(&db_conn)
|
||||
.await?;
|
||||
|
||||
// Get the premium user-tier
|
||||
let premium_user_tier = user_tier::Entity::find()
|
||||
.filter(user_tier::Column::Title.eq("Premium"))
|
||||
.one(&db_conn)
|
||||
.await?
|
||||
.context("Could not find 'Premium' Tier in user-database")?;
|
||||
|
||||
let txn = db_conn.begin().await?;
|
||||
match user_balance {
|
||||
Some(user_balance) => {
|
||||
let balance_plus_amount = user_balance.available_balance + amount;
|
||||
info!("New user balance is: {:?}", balance_plus_amount);
|
||||
// Update the entry, adding the balance
|
||||
let mut active_user_balance = user_balance.into_active_model();
|
||||
active_user_balance.available_balance = sea_orm::Set(balance_plus_amount);
|
||||
|
||||
if balance_plus_amount >= Decimal::new(10, 0) {
|
||||
// Also make the user premium at this point ...
|
||||
let mut active_recipient = recipient.clone().into_active_model();
|
||||
// Make the recipient premium "Effectively Unlimited"
|
||||
active_recipient.user_tier_id = sea_orm::Set(premium_user_tier.id);
|
||||
active_recipient.save(&txn).await?;
|
||||
}
|
||||
|
||||
debug!("New user balance model is: {:?}", active_user_balance);
|
||||
active_user_balance.save(&txn).await?;
|
||||
// txn.commit().await?;
|
||||
// user_balance
|
||||
}
|
||||
None => {
|
||||
// Create the entry with the respective balance
|
||||
let active_user_balance = balance::ActiveModel {
|
||||
available_balance: sea_orm::ActiveValue::Set(amount),
|
||||
user_id: sea_orm::ActiveValue::Set(recipient.id),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
if amount >= Decimal::new(10, 0) {
|
||||
// Also make the user premium at this point ...
|
||||
let mut active_recipient = recipient.clone().into_active_model();
|
||||
// Make the recipient premium "Effectively Unlimited"
|
||||
active_recipient.user_tier_id = sea_orm::Set(premium_user_tier.id);
|
||||
active_recipient.save(&txn).await?;
|
||||
}
|
||||
|
||||
info!("New user balance model is: {:?}", active_user_balance);
|
||||
active_user_balance.save(&txn).await?;
|
||||
// txn.commit().await?;
|
||||
// user_balance // .try_into_model().unwrap()
|
||||
}
|
||||
};
|
||||
debug!("Setting tx_hash: {:?}", tx_hash);
|
||||
let receipt = increase_on_chain_balance_receipt::ActiveModel {
|
||||
tx_hash: sea_orm::ActiveValue::Set(hex::encode(tx_hash)),
|
||||
chain_id: sea_orm::ActiveValue::Set(app.config.chain_id),
|
||||
amount: sea_orm::ActiveValue::Set(amount),
|
||||
deposit_to_user_id: sea_orm::ActiveValue::Set(recipient.id),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
receipt.save(&txn).await?;
|
||||
txn.commit().await?;
|
||||
debug!("Saved to db");
|
||||
|
||||
let response = (
|
||||
StatusCode::CREATED,
|
||||
Json(json!({
|
||||
"tx_hash": tx_hash,
|
||||
"amount": amount
|
||||
})),
|
||||
)
|
||||
.into_response();
|
||||
|
||||
// Return early if the log was added, assume there is at most one valid log per transaction
|
||||
return Ok(response);
|
||||
}
|
||||
|
||||
Err(Web3ProxyError::BadRequest(
|
||||
"No such transaction was found, or token is not supported!".to_string(),
|
||||
))
|
||||
}
|
72
web3_proxy/src/frontend/users/referral.rs
Normal file
72
web3_proxy/src/frontend/users/referral.rs
Normal file
@ -0,0 +1,72 @@
|
||||
//! Handle registration, logins, and managing account data.
|
||||
use crate::app::Web3ProxyApp;
|
||||
use crate::frontend::errors::Web3ProxyResponse;
|
||||
use crate::referral_code::ReferralCode;
|
||||
use anyhow::Context;
|
||||
use axum::{
|
||||
extract::Query,
|
||||
headers::{authorization::Bearer, Authorization},
|
||||
response::IntoResponse,
|
||||
Extension, Json, TypedHeader,
|
||||
};
|
||||
use axum_macros::debug_handler;
|
||||
use entities::referrer;
|
||||
use hashbrown::HashMap;
|
||||
use http::StatusCode;
|
||||
use migration::sea_orm;
|
||||
use migration::sea_orm::ActiveModelTrait;
|
||||
use migration::sea_orm::ColumnTrait;
|
||||
use migration::sea_orm::EntityTrait;
|
||||
use migration::sea_orm::QueryFilter;
|
||||
use serde_json::json;
|
||||
use std::sync::Arc;
|
||||
|
||||
/// Create or get the existing referral link.
|
||||
/// This is the link that the user can share to third parties, and get credits.
|
||||
/// Applies to premium users only
|
||||
#[debug_handler]
|
||||
pub async fn user_referral_link_get(
|
||||
Extension(app): Extension<Arc<Web3ProxyApp>>,
|
||||
TypedHeader(Authorization(bearer)): TypedHeader<Authorization<Bearer>>,
|
||||
Query(_params): Query<HashMap<String, String>>,
|
||||
) -> Web3ProxyResponse {
|
||||
// First get the bearer token and check if the user is logged in
|
||||
let (user, _semaphore) = app.bearer_is_authorized(bearer).await?;
|
||||
|
||||
let db_replica = app
|
||||
.db_replica()
|
||||
.context("getting replica db for user's revert logs")?;
|
||||
|
||||
// Then get the referral token. If one doesn't exist, create one
|
||||
let user_referrer = referrer::Entity::find()
|
||||
.filter(referrer::Column::UserId.eq(user.id))
|
||||
.one(db_replica.conn())
|
||||
.await?;
|
||||
|
||||
let (referral_code, status_code) = match user_referrer {
|
||||
Some(x) => (x.referral_code, StatusCode::OK),
|
||||
None => {
|
||||
// Connect to the database for writes
|
||||
let db_conn = app.db_conn().context("getting db_conn")?;
|
||||
|
||||
let referral_code = ReferralCode::default().to_string();
|
||||
|
||||
let referrer_entry = referrer::ActiveModel {
|
||||
user_id: sea_orm::ActiveValue::Set(user.id),
|
||||
referral_code: sea_orm::ActiveValue::Set(referral_code.clone()),
|
||||
..Default::default()
|
||||
};
|
||||
referrer_entry.save(&db_conn).await?;
|
||||
|
||||
(referral_code, StatusCode::CREATED)
|
||||
}
|
||||
};
|
||||
|
||||
let response_json = json!({
|
||||
"referral_code": referral_code,
|
||||
"user": user,
|
||||
});
|
||||
|
||||
let response = (status_code, Json(response_json)).into_response();
|
||||
Ok(response)
|
||||
}
|
259
web3_proxy/src/frontend/users/rpc_keys.rs
Normal file
259
web3_proxy/src/frontend/users/rpc_keys.rs
Normal file
@ -0,0 +1,259 @@
|
||||
//! Handle registration, logins, and managing account data.
|
||||
use super::super::authorization::RpcSecretKey;
|
||||
use super::super::errors::{Web3ProxyError, Web3ProxyErrorContext, Web3ProxyResponse};
|
||||
use crate::app::Web3ProxyApp;
|
||||
use axum::headers::{Header, Origin, Referer, UserAgent};
|
||||
use axum::{
|
||||
headers::{authorization::Bearer, Authorization},
|
||||
response::IntoResponse,
|
||||
Extension, Json, TypedHeader,
|
||||
};
|
||||
use axum_macros::debug_handler;
|
||||
use entities;
|
||||
use entities::rpc_key;
|
||||
use entities::sea_orm_active_enums::TrackingLevel;
|
||||
use hashbrown::HashMap;
|
||||
use http::HeaderValue;
|
||||
use ipnet::IpNet;
|
||||
use itertools::Itertools;
|
||||
use migration::sea_orm::{
|
||||
self, ActiveModelTrait, ColumnTrait, EntityTrait, IntoActiveModel, QueryFilter, TryIntoModel,
|
||||
};
|
||||
use serde::Deserialize;
|
||||
use serde_json::json;
|
||||
use std::sync::Arc;
|
||||
|
||||
/// `GET /user/keys` -- Use a bearer token to get the user's api keys and their settings.
|
||||
#[debug_handler]
|
||||
pub async fn rpc_keys_get(
|
||||
Extension(app): Extension<Arc<Web3ProxyApp>>,
|
||||
TypedHeader(Authorization(bearer)): TypedHeader<Authorization<Bearer>>,
|
||||
) -> Web3ProxyResponse {
|
||||
let (user, _semaphore) = app.bearer_is_authorized(bearer).await?;
|
||||
|
||||
let db_replica = app
|
||||
.db_replica()
|
||||
.web3_context("db_replica is required to fetch a user's keys")?;
|
||||
|
||||
let uks = rpc_key::Entity::find()
|
||||
.filter(rpc_key::Column::UserId.eq(user.id))
|
||||
.all(db_replica.conn())
|
||||
.await
|
||||
.web3_context("failed loading user's key")?;
|
||||
|
||||
let response_json = json!({
|
||||
"user_id": user.id,
|
||||
"user_rpc_keys": uks
|
||||
.into_iter()
|
||||
.map(|uk| (uk.id, uk))
|
||||
.collect::<HashMap::<_, _>>(),
|
||||
});
|
||||
|
||||
Ok(Json(response_json).into_response())
|
||||
}
|
||||
|
||||
/// `DELETE /user/keys` -- Use a bearer token to delete an existing key.
|
||||
#[debug_handler]
|
||||
pub async fn rpc_keys_delete(
|
||||
Extension(app): Extension<Arc<Web3ProxyApp>>,
|
||||
TypedHeader(Authorization(bearer)): TypedHeader<Authorization<Bearer>>,
|
||||
) -> Web3ProxyResponse {
|
||||
let (_user, _semaphore) = app.bearer_is_authorized(bearer).await?;
|
||||
|
||||
// TODO: think about how cascading deletes and billing should work
|
||||
Err(Web3ProxyError::NotImplemented)
|
||||
}
|
||||
|
||||
/// the JSON input to the `rpc_keys_management` handler.
|
||||
/// If `key_id` is set, it updates an existing key.
|
||||
/// If `key_id` is not set, it creates a new key.
|
||||
/// `log_request_method` cannot be change once the key is created
|
||||
/// `user_tier` cannot be changed here
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct UserKeyManagement {
|
||||
key_id: Option<u64>,
|
||||
active: Option<bool>,
|
||||
allowed_ips: Option<String>,
|
||||
allowed_origins: Option<String>,
|
||||
allowed_referers: Option<String>,
|
||||
allowed_user_agents: Option<String>,
|
||||
description: Option<String>,
|
||||
log_level: Option<TrackingLevel>,
|
||||
// TODO: enable log_revert_trace: Option<f64>,
|
||||
private_txs: Option<bool>,
|
||||
}
|
||||
|
||||
/// `POST /user/keys` or `PUT /user/keys` -- Use a bearer token to create or update an existing key.
|
||||
#[debug_handler]
|
||||
pub async fn rpc_keys_management(
|
||||
Extension(app): Extension<Arc<Web3ProxyApp>>,
|
||||
TypedHeader(Authorization(bearer)): TypedHeader<Authorization<Bearer>>,
|
||||
Json(payload): Json<UserKeyManagement>,
|
||||
) -> Web3ProxyResponse {
|
||||
// TODO: is there a way we can know if this is a PUT or POST? right now we can modify or create keys with either. though that probably doesn't matter
|
||||
|
||||
let (user, _semaphore) = app.bearer_is_authorized(bearer).await?;
|
||||
|
||||
let db_replica = app
|
||||
.db_replica()
|
||||
.web3_context("getting db for user's keys")?;
|
||||
|
||||
let mut uk = if let Some(existing_key_id) = payload.key_id {
|
||||
// get the key and make sure it belongs to the user
|
||||
rpc_key::Entity::find()
|
||||
.filter(rpc_key::Column::UserId.eq(user.id))
|
||||
.filter(rpc_key::Column::Id.eq(existing_key_id))
|
||||
.one(db_replica.conn())
|
||||
.await
|
||||
.web3_context("failed loading user's key")?
|
||||
.web3_context("key does not exist or is not controlled by this bearer token")?
|
||||
.into_active_model()
|
||||
} else {
|
||||
// make a new key
|
||||
// TODO: limit to 10 keys?
|
||||
let secret_key = RpcSecretKey::new();
|
||||
|
||||
let log_level = payload
|
||||
.log_level
|
||||
.web3_context("log level must be 'none', 'detailed', or 'aggregated'")?;
|
||||
|
||||
rpc_key::ActiveModel {
|
||||
user_id: sea_orm::Set(user.id),
|
||||
secret_key: sea_orm::Set(secret_key.into()),
|
||||
log_level: sea_orm::Set(log_level),
|
||||
..Default::default()
|
||||
}
|
||||
};
|
||||
|
||||
// TODO: do we need null descriptions? default to empty string should be fine, right?
|
||||
if let Some(description) = payload.description {
|
||||
if description.is_empty() {
|
||||
uk.description = sea_orm::Set(None);
|
||||
} else {
|
||||
uk.description = sea_orm::Set(Some(description));
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(private_txs) = payload.private_txs {
|
||||
uk.private_txs = sea_orm::Set(private_txs);
|
||||
}
|
||||
|
||||
if let Some(active) = payload.active {
|
||||
uk.active = sea_orm::Set(active);
|
||||
}
|
||||
|
||||
if let Some(allowed_ips) = payload.allowed_ips {
|
||||
if allowed_ips.is_empty() {
|
||||
uk.allowed_ips = sea_orm::Set(None);
|
||||
} else {
|
||||
// split allowed ips on ',' and try to parse them all. error on invalid input
|
||||
let allowed_ips = allowed_ips
|
||||
.split(',')
|
||||
.map(|x| x.trim().parse::<IpNet>())
|
||||
.collect::<Result<Vec<_>, _>>()?
|
||||
// parse worked. convert back to Strings
|
||||
.into_iter()
|
||||
.map(|x| x.to_string());
|
||||
|
||||
// and join them back together
|
||||
let allowed_ips: String =
|
||||
Itertools::intersperse(allowed_ips, ", ".to_string()).collect();
|
||||
|
||||
uk.allowed_ips = sea_orm::Set(Some(allowed_ips));
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: this should actually be bytes
|
||||
if let Some(allowed_origins) = payload.allowed_origins {
|
||||
if allowed_origins.is_empty() {
|
||||
uk.allowed_origins = sea_orm::Set(None);
|
||||
} else {
|
||||
// split allowed_origins on ',' and try to parse them all. error on invalid input
|
||||
let allowed_origins = allowed_origins
|
||||
.split(',')
|
||||
.map(|x| HeaderValue::from_str(x.trim()))
|
||||
.collect::<Result<Vec<_>, _>>()?
|
||||
.into_iter()
|
||||
.map(|x| Origin::decode(&mut [x].iter()))
|
||||
.collect::<Result<Vec<_>, _>>()?
|
||||
// parse worked. convert back to String and join them back together
|
||||
.into_iter()
|
||||
.map(|x| x.to_string());
|
||||
|
||||
let allowed_origins: String =
|
||||
Itertools::intersperse(allowed_origins, ", ".to_string()).collect();
|
||||
|
||||
uk.allowed_origins = sea_orm::Set(Some(allowed_origins));
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: this should actually be bytes
|
||||
if let Some(allowed_referers) = payload.allowed_referers {
|
||||
if allowed_referers.is_empty() {
|
||||
uk.allowed_referers = sea_orm::Set(None);
|
||||
} else {
|
||||
// split allowed ips on ',' and try to parse them all. error on invalid input
|
||||
let allowed_referers = allowed_referers
|
||||
.split(',')
|
||||
.map(|x| HeaderValue::from_str(x.trim()))
|
||||
.collect::<Result<Vec<_>, _>>()?
|
||||
.into_iter()
|
||||
.map(|x| Referer::decode(&mut [x].iter()))
|
||||
.collect::<Result<Vec<_>, _>>()?;
|
||||
|
||||
// parse worked. now we can put it back together.
|
||||
// but we can't go directly to String.
|
||||
// so we convert to HeaderValues first
|
||||
let mut header_map = vec![];
|
||||
for x in allowed_referers {
|
||||
x.encode(&mut header_map);
|
||||
}
|
||||
|
||||
// convert HeaderValues to Strings
|
||||
// since we got these from strings, this should always work (unless we figure out using bytes)
|
||||
let allowed_referers = header_map
|
||||
.into_iter()
|
||||
.map(|x| x.to_str().map(|x| x.to_string()))
|
||||
.collect::<Result<Vec<_>, _>>()?;
|
||||
|
||||
// join strings together with commas
|
||||
let allowed_referers: String =
|
||||
Itertools::intersperse(allowed_referers.into_iter(), ", ".to_string()).collect();
|
||||
|
||||
uk.allowed_referers = sea_orm::Set(Some(allowed_referers));
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(allowed_user_agents) = payload.allowed_user_agents {
|
||||
if allowed_user_agents.is_empty() {
|
||||
uk.allowed_user_agents = sea_orm::Set(None);
|
||||
} else {
|
||||
// split allowed_user_agents on ',' and try to parse them all. error on invalid input
|
||||
let allowed_user_agents = allowed_user_agents
|
||||
.split(',')
|
||||
.filter_map(|x| x.trim().parse::<UserAgent>().ok())
|
||||
// parse worked. convert back to String
|
||||
.map(|x| x.to_string());
|
||||
|
||||
// join the strings together
|
||||
let allowed_user_agents: String =
|
||||
Itertools::intersperse(allowed_user_agents, ", ".to_string()).collect();
|
||||
|
||||
uk.allowed_user_agents = sea_orm::Set(Some(allowed_user_agents));
|
||||
}
|
||||
}
|
||||
|
||||
let uk = if uk.is_changed() {
|
||||
let db_conn = app.db_conn().web3_context("login requires a db")?;
|
||||
|
||||
uk.save(&db_conn)
|
||||
.await
|
||||
.web3_context("Failed saving user key")?
|
||||
} else {
|
||||
uk
|
||||
};
|
||||
|
||||
let uk = uk.try_into_model()?;
|
||||
|
||||
Ok(Json(uk).into_response())
|
||||
}
|
123
web3_proxy/src/frontend/users/stats.rs
Normal file
123
web3_proxy/src/frontend/users/stats.rs
Normal file
@ -0,0 +1,123 @@
|
||||
//! Handle registration, logins, and managing account data.
|
||||
use crate::app::Web3ProxyApp;
|
||||
use crate::frontend::errors::{Web3ProxyErrorContext, Web3ProxyResponse};
|
||||
use crate::http_params::{
|
||||
get_chain_id_from_params, get_page_from_params, get_query_start_from_params,
|
||||
};
|
||||
use crate::stats::influxdb_queries::query_user_stats;
|
||||
use crate::stats::StatType;
|
||||
use axum::{
|
||||
extract::Query,
|
||||
headers::{authorization::Bearer, Authorization},
|
||||
response::IntoResponse,
|
||||
Extension, Json, TypedHeader,
|
||||
};
|
||||
use axum_macros::debug_handler;
|
||||
use entities;
|
||||
use entities::{revert_log, rpc_key};
|
||||
use hashbrown::HashMap;
|
||||
use migration::sea_orm::{ColumnTrait, EntityTrait, PaginatorTrait, QueryFilter, QueryOrder};
|
||||
use serde_json::json;
|
||||
use std::sync::Arc;
|
||||
|
||||
/// `GET /user/revert_logs` -- Use a bearer token to get the user's revert logs.
|
||||
#[debug_handler]
|
||||
pub async fn user_revert_logs_get(
|
||||
Extension(app): Extension<Arc<Web3ProxyApp>>,
|
||||
TypedHeader(Authorization(bearer)): TypedHeader<Authorization<Bearer>>,
|
||||
Query(params): Query<HashMap<String, String>>,
|
||||
) -> Web3ProxyResponse {
|
||||
let (user, _semaphore) = app.bearer_is_authorized(bearer).await?;
|
||||
|
||||
let chain_id = get_chain_id_from_params(app.as_ref(), ¶ms)?;
|
||||
let query_start = get_query_start_from_params(¶ms)?;
|
||||
let page = get_page_from_params(¶ms)?;
|
||||
|
||||
// TODO: page size from config
|
||||
let page_size = 1_000;
|
||||
|
||||
let mut response = HashMap::new();
|
||||
|
||||
response.insert("page", json!(page));
|
||||
response.insert("page_size", json!(page_size));
|
||||
response.insert("chain_id", json!(chain_id));
|
||||
response.insert("query_start", json!(query_start.timestamp() as u64));
|
||||
|
||||
let db_replica = app
|
||||
.db_replica()
|
||||
.web3_context("getting replica db for user's revert logs")?;
|
||||
|
||||
let uks = rpc_key::Entity::find()
|
||||
.filter(rpc_key::Column::UserId.eq(user.id))
|
||||
.all(db_replica.conn())
|
||||
.await
|
||||
.web3_context("failed loading user's key")?;
|
||||
|
||||
// TODO: only select the ids
|
||||
let uks: Vec<_> = uks.into_iter().map(|x| x.id).collect();
|
||||
|
||||
// get revert logs
|
||||
let mut q = revert_log::Entity::find()
|
||||
.filter(revert_log::Column::Timestamp.gte(query_start))
|
||||
.filter(revert_log::Column::RpcKeyId.is_in(uks))
|
||||
.order_by_asc(revert_log::Column::Timestamp);
|
||||
|
||||
if chain_id == 0 {
|
||||
// don't do anything
|
||||
} else {
|
||||
// filter on chain id
|
||||
q = q.filter(revert_log::Column::ChainId.eq(chain_id))
|
||||
}
|
||||
|
||||
// query the database for number of items and pages
|
||||
let pages_result = q
|
||||
.clone()
|
||||
.paginate(db_replica.conn(), page_size)
|
||||
.num_items_and_pages()
|
||||
.await?;
|
||||
|
||||
response.insert("num_items", pages_result.number_of_items.into());
|
||||
response.insert("num_pages", pages_result.number_of_pages.into());
|
||||
|
||||
// query the database for the revert logs
|
||||
let revert_logs = q
|
||||
.paginate(db_replica.conn(), page_size)
|
||||
.fetch_page(page)
|
||||
.await?;
|
||||
|
||||
response.insert("revert_logs", json!(revert_logs));
|
||||
|
||||
Ok(Json(response).into_response())
|
||||
}
|
||||
|
||||
/// `GET /user/stats/aggregate` -- Public endpoint for aggregate stats such as bandwidth used and methods requested.
|
||||
#[debug_handler]
|
||||
pub async fn user_stats_aggregated_get(
|
||||
Extension(app): Extension<Arc<Web3ProxyApp>>,
|
||||
bearer: Option<TypedHeader<Authorization<Bearer>>>,
|
||||
Query(params): Query<HashMap<String, String>>,
|
||||
) -> Web3ProxyResponse {
|
||||
let response = query_user_stats(&app, bearer, ¶ms, StatType::Aggregated).await?;
|
||||
|
||||
Ok(response)
|
||||
}
|
||||
|
||||
/// `GET /user/stats/detailed` -- Use a bearer token to get the user's key stats such as bandwidth used and methods requested.
|
||||
///
|
||||
/// If no bearer is provided, detailed stats for all users will be shown.
|
||||
/// View a single user with `?user_id=$x`.
|
||||
/// View a single chain with `?chain_id=$x`.
|
||||
///
|
||||
/// Set `$x` to zero to see all.
|
||||
///
|
||||
/// TODO: this will change as we add better support for secondary users.
|
||||
#[debug_handler]
|
||||
pub async fn user_stats_detailed_get(
|
||||
Extension(app): Extension<Arc<Web3ProxyApp>>,
|
||||
bearer: Option<TypedHeader<Authorization<Bearer>>>,
|
||||
Query(params): Query<HashMap<String, String>>,
|
||||
) -> Web3ProxyResponse {
|
||||
let response = query_user_stats(&app, bearer, ¶ms, StatType::Detailed).await?;
|
||||
|
||||
Ok(response)
|
||||
}
|
428
web3_proxy/src/frontend/users/subuser.rs
Normal file
428
web3_proxy/src/frontend/users/subuser.rs
Normal file
@ -0,0 +1,428 @@
|
||||
//! Handle subusers, viewing subusers, and viewing accessible rpc-keys
|
||||
use crate::app::Web3ProxyApp;
|
||||
use crate::frontend::authorization::RpcSecretKey;
|
||||
use crate::frontend::errors::{Web3ProxyError, Web3ProxyErrorContext, Web3ProxyResponse};
|
||||
use anyhow::Context;
|
||||
use axum::{
|
||||
extract::Query,
|
||||
headers::{authorization::Bearer, Authorization},
|
||||
response::IntoResponse,
|
||||
Extension, Json, TypedHeader,
|
||||
};
|
||||
use axum_macros::debug_handler;
|
||||
use entities::sea_orm_active_enums::Role;
|
||||
use entities::{balance, rpc_key, secondary_user, user, user_tier};
|
||||
use ethers::types::Address;
|
||||
use hashbrown::HashMap;
|
||||
use http::StatusCode;
|
||||
use log::{debug, warn};
|
||||
use migration::sea_orm;
|
||||
use migration::sea_orm::prelude::Decimal;
|
||||
use migration::sea_orm::ActiveModelTrait;
|
||||
use migration::sea_orm::ColumnTrait;
|
||||
use migration::sea_orm::EntityTrait;
|
||||
use migration::sea_orm::IntoActiveModel;
|
||||
use migration::sea_orm::QueryFilter;
|
||||
use migration::sea_orm::TransactionTrait;
|
||||
use serde_json::json;
|
||||
use std::sync::Arc;
|
||||
use ulid::{self, Ulid};
|
||||
use uuid::Uuid;
|
||||
|
||||
pub async fn get_keys_as_subuser(
|
||||
Extension(app): Extension<Arc<Web3ProxyApp>>,
|
||||
TypedHeader(Authorization(bearer)): TypedHeader<Authorization<Bearer>>,
|
||||
Query(_params): Query<HashMap<String, String>>,
|
||||
) -> Web3ProxyResponse {
|
||||
// First, authenticate
|
||||
let (subuser, _semaphore) = app.bearer_is_authorized(bearer).await?;
|
||||
|
||||
let db_replica = app
|
||||
.db_replica()
|
||||
.context("getting replica db for user's revert logs")?;
|
||||
|
||||
// TODO: JOIN over RPC_KEY, SUBUSER, PRIMARY_USER and return these items
|
||||
|
||||
// Get all secondary users that have access to this rpc key
|
||||
let secondary_user_entities = secondary_user::Entity::find()
|
||||
.filter(secondary_user::Column::UserId.eq(subuser.id))
|
||||
.all(db_replica.conn())
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|x| (x.rpc_secret_key_id, x))
|
||||
.collect::<HashMap<u64, secondary_user::Model>>();
|
||||
|
||||
// Now return a list of all subusers (their wallets)
|
||||
let rpc_key_entities: Vec<(rpc_key::Model, Option<user::Model>)> = rpc_key::Entity::find()
|
||||
.filter(
|
||||
rpc_key::Column::Id.is_in(
|
||||
secondary_user_entities
|
||||
.iter()
|
||||
.map(|(x, _)| *x)
|
||||
.collect::<Vec<_>>(),
|
||||
),
|
||||
)
|
||||
.find_also_related(user::Entity)
|
||||
.all(db_replica.conn())
|
||||
.await?;
|
||||
|
||||
// TODO: Merge rpc-key with respective user (join is probably easiest ...)
|
||||
|
||||
// Now return the list
|
||||
let response_json = json!({
|
||||
"subuser": format!("{:?}", Address::from_slice(&subuser.address)),
|
||||
"rpc_keys": rpc_key_entities
|
||||
.into_iter()
|
||||
.flat_map(|(rpc_key, rpc_owner)| {
|
||||
match rpc_owner {
|
||||
Some(inner_rpc_owner) => {
|
||||
let mut tmp = HashMap::new();
|
||||
tmp.insert("rpc-key", serde_json::Value::String(Ulid::from(rpc_key.secret_key).to_string()));
|
||||
tmp.insert("rpc-owner", serde_json::Value::String(format!("{:?}", Address::from_slice(&inner_rpc_owner.address))));
|
||||
tmp.insert("role", serde_json::Value::String(format!("{:?}", secondary_user_entities.get(&rpc_key.id).unwrap().role))); // .to_string() returns ugly "'...'"
|
||||
Some(tmp)
|
||||
},
|
||||
None => {
|
||||
// error!("Found RPC secret key with no user!".to_owned());
|
||||
None
|
||||
}
|
||||
}
|
||||
})
|
||||
.collect::<Vec::<_>>(),
|
||||
});
|
||||
|
||||
Ok(Json(response_json).into_response())
|
||||
}
|
||||
|
||||
pub async fn get_subusers(
|
||||
Extension(app): Extension<Arc<Web3ProxyApp>>,
|
||||
TypedHeader(Authorization(bearer)): TypedHeader<Authorization<Bearer>>,
|
||||
Query(mut params): Query<HashMap<String, String>>,
|
||||
) -> Web3ProxyResponse {
|
||||
// First, authenticate
|
||||
let (user, _semaphore) = app.bearer_is_authorized(bearer).await?;
|
||||
|
||||
let db_replica = app
|
||||
.db_replica()
|
||||
.context("getting replica db for user's revert logs")?;
|
||||
|
||||
// Second, check if the user is a premium user
|
||||
let user_tier = user_tier::Entity::find()
|
||||
.filter(user_tier::Column::Id.eq(user.user_tier_id))
|
||||
.one(db_replica.conn())
|
||||
.await?
|
||||
.ok_or(Web3ProxyError::BadRequest(
|
||||
"Could not find user in db although bearer token is there!".to_string(),
|
||||
))?;
|
||||
|
||||
debug!("User tier is: {:?}", user_tier);
|
||||
// TODO: This shouldn't be hardcoded. Also, it should be an enum, not sth like this ...
|
||||
if user_tier.id != 6 {
|
||||
return Err(
|
||||
anyhow::anyhow!("User is not premium. Must be premium to create referrals.").into(),
|
||||
);
|
||||
}
|
||||
|
||||
let rpc_key: Ulid = params
|
||||
.remove("rpc_key")
|
||||
// TODO: map_err so this becomes a 500. routing must be bad
|
||||
.ok_or(Web3ProxyError::BadRequest(
|
||||
"You have not provided the 'rpc_key' whose access to modify".to_string(),
|
||||
))?
|
||||
.parse()
|
||||
.context(format!("unable to parse rpc_key {:?}", params))?;
|
||||
|
||||
// Get the rpc key id
|
||||
let rpc_key = rpc_key::Entity::find()
|
||||
.filter(rpc_key::Column::SecretKey.eq(Uuid::from(rpc_key)))
|
||||
.one(db_replica.conn())
|
||||
.await?
|
||||
.ok_or(Web3ProxyError::BadRequest(
|
||||
"The provided RPC key cannot be found".to_string(),
|
||||
))?;
|
||||
|
||||
// Get all secondary users that have access to this rpc key
|
||||
let secondary_user_entities = secondary_user::Entity::find()
|
||||
.filter(secondary_user::Column::RpcSecretKeyId.eq(rpc_key.id))
|
||||
.all(db_replica.conn())
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|x| (x.user_id, x))
|
||||
.collect::<HashMap<u64, secondary_user::Model>>();
|
||||
|
||||
// Now return a list of all subusers (their wallets)
|
||||
let subusers = user::Entity::find()
|
||||
.filter(
|
||||
user::Column::Id.is_in(
|
||||
secondary_user_entities
|
||||
.iter()
|
||||
.map(|(x, _)| *x)
|
||||
.collect::<Vec<_>>(),
|
||||
),
|
||||
)
|
||||
.all(db_replica.conn())
|
||||
.await?;
|
||||
|
||||
warn!("Subusers are: {:?}", subusers);
|
||||
|
||||
// Now return the list
|
||||
let response_json = json!({
|
||||
"caller": format!("{:?}", Address::from_slice(&user.address)),
|
||||
"rpc_key": rpc_key,
|
||||
"subusers": subusers
|
||||
.into_iter()
|
||||
.map(|subuser| {
|
||||
let mut tmp = HashMap::new();
|
||||
// .encode_hex()
|
||||
tmp.insert("address", serde_json::Value::String(format!("{:?}", Address::from_slice(&subuser.address))));
|
||||
tmp.insert("role", serde_json::Value::String(format!("{:?}", secondary_user_entities.get(&subuser.id).unwrap().role)));
|
||||
json!(tmp)
|
||||
})
|
||||
.collect::<Vec::<_>>(),
|
||||
});
|
||||
|
||||
Ok(Json(response_json).into_response())
|
||||
}
|
||||
|
||||
#[debug_handler]
|
||||
pub async fn modify_subuser(
|
||||
Extension(app): Extension<Arc<Web3ProxyApp>>,
|
||||
TypedHeader(Authorization(bearer)): TypedHeader<Authorization<Bearer>>,
|
||||
Query(mut params): Query<HashMap<String, String>>,
|
||||
) -> Web3ProxyResponse {
|
||||
// First, authenticate
|
||||
let (user, _semaphore) = app.bearer_is_authorized(bearer).await?;
|
||||
|
||||
let db_replica = app
|
||||
.db_replica()
|
||||
.context("getting replica db for user's revert logs")?;
|
||||
|
||||
// Second, check if the user is a premium user
|
||||
let user_tier = user_tier::Entity::find()
|
||||
.filter(user_tier::Column::Id.eq(user.user_tier_id))
|
||||
.one(db_replica.conn())
|
||||
.await?
|
||||
.ok_or(Web3ProxyError::BadRequest(
|
||||
"Could not find user in db although bearer token is there!".to_string(),
|
||||
))?;
|
||||
|
||||
debug!("User tier is: {:?}", user_tier);
|
||||
// TODO: This shouldn't be hardcoded. Also, it should be an enum, not sth like this ...
|
||||
if user_tier.id != 6 {
|
||||
return Err(
|
||||
anyhow::anyhow!("User is not premium. Must be premium to create referrals.").into(),
|
||||
);
|
||||
}
|
||||
|
||||
warn!("Parameters are: {:?}", params);
|
||||
|
||||
// Then, distinguish the endpoint to modify
|
||||
let rpc_key_to_modify: Ulid = params
|
||||
.remove("rpc_key")
|
||||
// TODO: map_err so this becomes a 500. routing must be bad
|
||||
.ok_or(Web3ProxyError::BadRequest(
|
||||
"You have not provided the 'rpc_key' whose access to modify".to_string(),
|
||||
))?
|
||||
.parse::<Ulid>()
|
||||
.context(format!("unable to parse rpc_key {:?}", params))?;
|
||||
// let rpc_key_to_modify: Uuid = ulid::serde::ulid_as_uuid::deserialize(rpc_key_to_modify)?;
|
||||
|
||||
let subuser_address: Address = params
|
||||
.remove("subuser_address")
|
||||
// TODO: map_err so this becomes a 500. routing must be bad
|
||||
.ok_or(Web3ProxyError::BadRequest(
|
||||
"You have not provided the 'user_address' whose access to modify".to_string(),
|
||||
))?
|
||||
.parse()
|
||||
.context(format!("unable to parse subuser_address {:?}", params))?;
|
||||
|
||||
// TODO: Check subuser address for eip55 checksum
|
||||
|
||||
let keep_subuser: bool = match params
|
||||
.remove("new_status")
|
||||
// TODO: map_err so this becomes a 500. routing must be bad
|
||||
.ok_or(Web3ProxyError::BadRequest(
|
||||
"You have not provided the new_stats key in the request".to_string(),
|
||||
))?
|
||||
.as_str()
|
||||
{
|
||||
"upsert" => Ok(true),
|
||||
"remove" => Ok(false),
|
||||
_ => Err(Web3ProxyError::BadRequest(
|
||||
"'new_status' must be one of 'upsert' or 'remove'".to_string(),
|
||||
)),
|
||||
}?;
|
||||
|
||||
let new_role: Role = match params
|
||||
.remove("new_role")
|
||||
// TODO: map_err so this becomes a 500. routing must be bad
|
||||
.ok_or(Web3ProxyError::BadRequest(
|
||||
"You have not provided the new_stats key in the request".to_string(),
|
||||
))?
|
||||
.as_str()
|
||||
{
|
||||
// TODO: Technically, if this is the new owner, we should transpose the full table.
|
||||
// For now, let's just not allow the primary owner to just delete his account
|
||||
// (if there is even such a functionality)
|
||||
"owner" => Ok(Role::Owner),
|
||||
"admin" => Ok(Role::Admin),
|
||||
"collaborator" => Ok(Role::Collaborator),
|
||||
_ => Err(Web3ProxyError::BadRequest(
|
||||
"'new_role' must be one of 'owner', 'admin', 'collaborator'".to_string(),
|
||||
)),
|
||||
}?;
|
||||
|
||||
// ---------------------------
|
||||
// First, check if the user exists as a user. If not, add them
|
||||
// (and also create a balance, and rpc_key, same procedure as logging in for first time)
|
||||
// ---------------------------
|
||||
let subuser = user::Entity::find()
|
||||
.filter(user::Column::Address.eq(subuser_address.as_ref()))
|
||||
.one(db_replica.conn())
|
||||
.await?;
|
||||
|
||||
let rpc_key_entity = rpc_key::Entity::find()
|
||||
.filter(rpc_key::Column::SecretKey.eq(Uuid::from(rpc_key_to_modify)))
|
||||
.one(db_replica.conn())
|
||||
.await?
|
||||
.ok_or(Web3ProxyError::BadRequest(
|
||||
"Provided RPC key does not exist!".to_owned(),
|
||||
))?;
|
||||
|
||||
// Make sure that the user owns the rpc_key_entity
|
||||
if rpc_key_entity.user_id != user.id {
|
||||
return Err(Web3ProxyError::BadRequest(
|
||||
"you must own the RPC for which you are giving permissions out".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
// TODO: There is a good chunk of duplicate logic as login-post. Consider refactoring ...
|
||||
let db_conn = app.db_conn().web3_context("login requires a db")?;
|
||||
let (subuser, _subuser_rpc_keys, _status_code) = match subuser {
|
||||
None => {
|
||||
let txn = db_conn.begin().await?;
|
||||
// First add a user; the only thing we need from them is an address
|
||||
// everything else is optional
|
||||
let subuser = user::ActiveModel {
|
||||
address: sea_orm::Set(subuser_address.to_fixed_bytes().into()), // Address::from_slice(
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let subuser = subuser.insert(&txn).await?;
|
||||
|
||||
// create the user's first api key
|
||||
let rpc_secret_key = RpcSecretKey::new();
|
||||
|
||||
let subuser_rpc_key = rpc_key::ActiveModel {
|
||||
user_id: sea_orm::Set(subuser.id),
|
||||
secret_key: sea_orm::Set(rpc_secret_key.into()),
|
||||
description: sea_orm::Set(None),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let subuser_rpc_keys = vec![subuser_rpc_key
|
||||
.insert(&txn)
|
||||
.await
|
||||
.web3_context("Failed saving new user key")?];
|
||||
|
||||
// We should also create the balance entry ...
|
||||
let subuser_balance = balance::ActiveModel {
|
||||
user_id: sea_orm::Set(subuser.id),
|
||||
available_balance: sea_orm::Set(Decimal::new(0, 0)),
|
||||
used_balance: sea_orm::Set(Decimal::new(0, 0)),
|
||||
..Default::default()
|
||||
};
|
||||
subuser_balance.insert(&txn).await?;
|
||||
// save the user and key to the database
|
||||
txn.commit().await?;
|
||||
|
||||
(subuser, subuser_rpc_keys, StatusCode::CREATED)
|
||||
}
|
||||
Some(subuser) => {
|
||||
if subuser.id == user.id {
|
||||
return Err(Web3ProxyError::BadRequest(
|
||||
"you cannot make a subuser out of yourself".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
// Let's say that a user that exists can actually also redeem a key in retrospect...
|
||||
// the user is already registered
|
||||
let subuser_rpc_keys = rpc_key::Entity::find()
|
||||
.filter(rpc_key::Column::UserId.eq(subuser.id))
|
||||
.all(db_replica.conn())
|
||||
.await
|
||||
.web3_context("failed loading user's key")?;
|
||||
|
||||
(subuser, subuser_rpc_keys, StatusCode::OK)
|
||||
}
|
||||
};
|
||||
|
||||
// --------------------------------
|
||||
// Now apply the operation
|
||||
// Either add the subuser
|
||||
// Or revoke his subuser status
|
||||
// --------------------------------
|
||||
|
||||
// Search for subuser first of all
|
||||
// There should be a unique-constraint on user-id + rpc_key
|
||||
let subuser_entry_secondary_user = secondary_user::Entity::find()
|
||||
.filter(secondary_user::Column::UserId.eq(subuser.id))
|
||||
.filter(secondary_user::Column::RpcSecretKeyId.eq(rpc_key_entity.id))
|
||||
.one(db_replica.conn())
|
||||
.await
|
||||
.web3_context("failed using the db to check for a subuser")?;
|
||||
|
||||
let txn = db_conn.begin().await?;
|
||||
let mut action = "no action";
|
||||
|
||||
match subuser_entry_secondary_user {
|
||||
Some(secondary_user) => {
|
||||
// In this case, remove the subuser
|
||||
let mut active_subuser_entry_secondary_user = secondary_user.into_active_model();
|
||||
if !keep_subuser {
|
||||
// Remove the user
|
||||
active_subuser_entry_secondary_user.delete(&db_conn).await?;
|
||||
action = "removed";
|
||||
} else {
|
||||
// Just change the role
|
||||
active_subuser_entry_secondary_user.role = sea_orm::Set(new_role.clone());
|
||||
active_subuser_entry_secondary_user.save(&db_conn).await?;
|
||||
action = "role modified";
|
||||
}
|
||||
}
|
||||
None if keep_subuser => {
|
||||
let active_subuser_entry_secondary_user = secondary_user::ActiveModel {
|
||||
user_id: sea_orm::Set(subuser.id),
|
||||
rpc_secret_key_id: sea_orm::Set(rpc_key_entity.id),
|
||||
role: sea_orm::Set(new_role.clone()),
|
||||
..Default::default()
|
||||
};
|
||||
active_subuser_entry_secondary_user.insert(&txn).await?;
|
||||
action = "added";
|
||||
}
|
||||
_ => {
|
||||
// Return if the user should be removed and if there is no entry;
|
||||
// in this case, the user is not entered
|
||||
|
||||
// Return if the user should be added and there is already an entry;
|
||||
// in this case, they were already added, so we can skip this
|
||||
// Do nothing in this case
|
||||
}
|
||||
};
|
||||
txn.commit().await?;
|
||||
|
||||
let response = (
|
||||
StatusCode::OK,
|
||||
Json(json!({
|
||||
"rpc_key": rpc_key_to_modify,
|
||||
"subuser_address": subuser_address,
|
||||
"keep_user": keep_subuser,
|
||||
"new_role": new_role,
|
||||
"action": action
|
||||
})),
|
||||
)
|
||||
.into_response();
|
||||
|
||||
// Return early if the log was added, assume there is at most one valid log per transaction
|
||||
Ok(response)
|
||||
}
|
@ -232,20 +232,23 @@ pub fn get_query_window_seconds_from_params(
|
||||
|
||||
pub fn get_stats_column_from_params(params: &HashMap<String, String>) -> Web3ProxyResult<&str> {
|
||||
params.get("query_stats_column").map_or_else(
|
||||
|| Ok("frontend_requests"),
|
||||
|| Ok(""),
|
||||
|query_stats_column: &String| {
|
||||
// Must be one of: Otherwise respond with an error ...
|
||||
match query_stats_column.as_str() {
|
||||
"frontend_requests"
|
||||
""
|
||||
| "frontend_requests"
|
||||
| "backend_requests"
|
||||
| "cache_hits"
|
||||
| "cache_misses"
|
||||
| "no_servers"
|
||||
| "sum_request_bytes"
|
||||
| "sum_response_bytes"
|
||||
| "sum_response_millis" => Ok(query_stats_column),
|
||||
| "sum_response_millis"
|
||||
| "sum_credits_used"
|
||||
| "balance" => Ok(query_stats_column),
|
||||
_ => Err(Web3ProxyError::BadRequest(
|
||||
"Unable to parse query_stats_column. It must be one of: \
|
||||
"Unable to parse query_stats_column. It must be empty, or one of: \
|
||||
frontend_requests, \
|
||||
backend_requests, \
|
||||
cache_hits, \
|
||||
@ -253,7 +256,9 @@ pub fn get_stats_column_from_params(params: &HashMap<String, String>) -> Web3Pro
|
||||
no_servers, \
|
||||
sum_request_bytes, \
|
||||
sum_response_bytes, \
|
||||
sum_response_millis"
|
||||
sum_response_millis, \
|
||||
sum_credits_used, \
|
||||
balance"
|
||||
.to_string(),
|
||||
)),
|
||||
}
|
||||
|
@ -1,20 +1,17 @@
|
||||
use crate::frontend::errors::{Web3ProxyError, Web3ProxyResult};
|
||||
use crate::response_cache::JsonRpcResponseData;
|
||||
use derive_more::From;
|
||||
use ethers::prelude::ProviderError;
|
||||
use serde::de::{self, Deserializer, MapAccess, SeqAccess, Visitor};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::json;
|
||||
use serde_json::value::{to_raw_value, RawValue};
|
||||
use std::borrow::Cow;
|
||||
use std::fmt;
|
||||
|
||||
fn default_jsonrpc() -> String {
|
||||
"2.0".to_string()
|
||||
}
|
||||
|
||||
// TODO: &str here instead of String should save a lot of allocations
|
||||
#[derive(Clone, Deserialize, Serialize)]
|
||||
pub struct JsonRpcRequest {
|
||||
// TODO: skip jsonrpc entirely? its against spec to drop it, but some servers bad
|
||||
#[serde(default = "default_jsonrpc")]
|
||||
pub jsonrpc: String,
|
||||
/// id could be a stricter type, but many rpcs do things against the spec
|
||||
pub id: Box<RawValue>,
|
||||
@ -51,7 +48,7 @@ impl JsonRpcRequest {
|
||||
params: Option<serde_json::Value>,
|
||||
) -> anyhow::Result<Self> {
|
||||
let x = Self {
|
||||
jsonrpc: default_jsonrpc(),
|
||||
jsonrpc: "2.0".to_string(),
|
||||
id: id.to_raw_value(),
|
||||
method,
|
||||
params,
|
||||
@ -194,19 +191,38 @@ pub struct JsonRpcErrorData {
|
||||
/// The error code
|
||||
pub code: i64,
|
||||
/// The error message
|
||||
pub message: String,
|
||||
pub message: Cow<'static, str>,
|
||||
/// Additional data
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub data: Option<serde_json::Value>,
|
||||
}
|
||||
|
||||
impl From<&'static str> for JsonRpcErrorData {
|
||||
fn from(value: &'static str) -> Self {
|
||||
Self {
|
||||
code: -32000,
|
||||
message: Cow::Borrowed(value),
|
||||
data: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<String> for JsonRpcErrorData {
|
||||
fn from(value: String) -> Self {
|
||||
Self {
|
||||
code: -32000,
|
||||
message: Cow::Owned(value),
|
||||
data: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A complete response
|
||||
/// TODO: better Debug response
|
||||
#[derive(Clone, Debug, Deserialize, Serialize)]
|
||||
pub struct JsonRpcForwardedResponse {
|
||||
// TODO: jsonrpc a &str?
|
||||
#[serde(default = "default_jsonrpc")]
|
||||
pub jsonrpc: String,
|
||||
pub jsonrpc: &'static str,
|
||||
pub id: Box<RawValue>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub result: Option<Box<RawValue>>,
|
||||
@ -242,40 +258,40 @@ impl JsonRpcForwardedResponse {
|
||||
// TODO: this is too verbose. plenty of errors are valid, like users giving an invalid address. no need to log that
|
||||
// TODO: can we somehow get the initial request here? if we put that into a tracing span, will things slow down a ton?
|
||||
JsonRpcForwardedResponse {
|
||||
jsonrpc: "2.0".to_string(),
|
||||
id: id.unwrap_or_else(|| JsonRpcId::None.to_raw_value()),
|
||||
jsonrpc: "2.0",
|
||||
id: id.unwrap_or_default(),
|
||||
result: None,
|
||||
error: Some(JsonRpcErrorData {
|
||||
code: code.unwrap_or(-32099),
|
||||
message,
|
||||
message: Cow::Owned(message),
|
||||
// TODO: accept data as an argument
|
||||
data: None,
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_response(partial_response: Box<RawValue>, id: Box<RawValue>) -> Self {
|
||||
pub fn from_raw_response(result: Box<RawValue>, id: Box<RawValue>) -> Self {
|
||||
JsonRpcForwardedResponse {
|
||||
jsonrpc: "2.0".to_string(),
|
||||
jsonrpc: "2.0",
|
||||
id,
|
||||
// TODO: since we only use the result here, should that be all we return from try_send_request?
|
||||
result: Some(partial_response),
|
||||
result: Some(result),
|
||||
error: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_value(partial_response: serde_json::Value, id: Box<RawValue>) -> Self {
|
||||
let partial_response =
|
||||
to_raw_value(&partial_response).expect("Value to RawValue should always work");
|
||||
pub fn from_value(result: serde_json::Value, id: Box<RawValue>) -> Self {
|
||||
let partial_response = to_raw_value(&result).expect("Value to RawValue should always work");
|
||||
|
||||
JsonRpcForwardedResponse {
|
||||
jsonrpc: "2.0".to_string(),
|
||||
jsonrpc: "2.0",
|
||||
id,
|
||||
result: Some(partial_response),
|
||||
error: None,
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: delete this. its on JsonRpcErrorData
|
||||
pub fn from_ethers_error(e: ProviderError, id: Box<RawValue>) -> Web3ProxyResult<Self> {
|
||||
// TODO: move turning ClientError into json to a helper function?
|
||||
let code;
|
||||
@ -290,21 +306,24 @@ impl JsonRpcForwardedResponse {
|
||||
data = err.data.clone();
|
||||
} else if let Some(err) = err.as_serde_error() {
|
||||
// this is not an rpc error. keep it as an error
|
||||
return Err(Web3ProxyError::BadRequest(format!("bad request: {}", err)));
|
||||
return Err(Web3ProxyError::BadResponse(format!(
|
||||
"bad response: {}",
|
||||
err
|
||||
)));
|
||||
} else {
|
||||
return Err(anyhow::anyhow!("unexpected ethers error!").into());
|
||||
return Err(anyhow::anyhow!("unexpected ethers error! {:?}", err).into());
|
||||
}
|
||||
}
|
||||
e => return Err(e.into()),
|
||||
}
|
||||
|
||||
Ok(Self {
|
||||
jsonrpc: "2.0".to_string(),
|
||||
jsonrpc: "2.0",
|
||||
id,
|
||||
result: None,
|
||||
error: Some(JsonRpcErrorData {
|
||||
code,
|
||||
message,
|
||||
message: Cow::Owned(message),
|
||||
data,
|
||||
}),
|
||||
})
|
||||
@ -315,16 +334,21 @@ impl JsonRpcForwardedResponse {
|
||||
id: Box<RawValue>,
|
||||
) -> Web3ProxyResult<Self> {
|
||||
match result {
|
||||
Ok(response) => Ok(Self::from_response(response, id)),
|
||||
Ok(response) => Ok(Self::from_raw_response(response, id)),
|
||||
Err(e) => Self::from_ethers_error(e, id),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn num_bytes(&self) -> usize {
|
||||
// TODO: not sure how to do this without wasting a ton of allocations
|
||||
serde_json::to_string(self)
|
||||
.expect("this should always be valid json")
|
||||
.len()
|
||||
pub fn from_response_data(data: JsonRpcResponseData, id: Box<RawValue>) -> Self {
|
||||
match data {
|
||||
JsonRpcResponseData::Result { value, .. } => Self::from_raw_response(value, id),
|
||||
JsonRpcResponseData::Error { value, .. } => JsonRpcForwardedResponse {
|
||||
jsonrpc: "2.0",
|
||||
id,
|
||||
result: None,
|
||||
error: Some(value),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -7,6 +7,8 @@ pub mod http_params;
|
||||
pub mod jsonrpc;
|
||||
pub mod pagerduty;
|
||||
pub mod prometheus;
|
||||
pub mod referral_code;
|
||||
pub mod response_cache;
|
||||
pub mod rpcs;
|
||||
pub mod stats;
|
||||
pub mod user_token;
|
||||
@ -30,4 +32,5 @@ pub struct PostLoginQuery {
|
||||
pub struct PostLogin {
|
||||
sig: String,
|
||||
msg: String,
|
||||
pub referral_code: Option<String>,
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user