improve build process

This commit is contained in:
Bryan Stitt 2023-06-25 21:26:17 -07:00
parent f15d30a98d
commit 5b60a43466
4 changed files with 45 additions and 63 deletions

@ -1,13 +1,20 @@
.DS_Store
flamegraph.svg
perf.data
perf.data.old
/bin/
/config/devel*
/config/production*
/data/
/docker-compose*
/*.md
/.env
/.git
/.gitignore
/.vscode
/Dockerfile
/Jenkinsfile
/bin
/config/devel*
/config/production*
/data
/docker-compose*
/docs
/redis-cell-server/
/target

5
.gitignore vendored

@ -1,7 +1,8 @@
.DS_Store
/config/*.toml
/data
flamegraph.svg
perf.data
perf.data.old
/config/*.toml
/data
/target

@ -2,12 +2,17 @@ FROM debian:bullseye-slim as builder
WORKDIR /app
ENV CARGO_TERM_COLOR always
ENV PATH "/root/.foundry/bin:/root/.cargo/bin:${PATH}"
# install rustup dependencies
# also install web3-proxy system dependencies. most things are rust-only, but not everything
RUN apt-get update && \
apt-get install --yes \
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
\
apt-get update && \
apt-get install --no-install-recommends --yes \
build-essential \
ca-certificates \
cmake \
curl \
git \
@ -17,51 +22,54 @@ RUN apt-get update && \
libssl-dev \
libzstd-dev \
make \
pkg-config \
&& \
rm -rf /var/lib/apt/lists/*
pkg-config
# install rustup
ENV PATH="/root/.cargo/bin:${PATH}"
RUN --mount=type=cache,target=/usr/local/cargo/registry \
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain none
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain none
# install the correct version of rust
# we need nightly for a few features
COPY rust-toolchain.toml .
RUN /root/.cargo/bin/rustup update
# install our desired version of rust
COPY rust-toolchain.toml ./
RUN --mount=type=cache,target=/usr/local/cargo/registry \
\
rustup update
# a next-generation test runner for Rust projects.
# We only pay the installation cost once,
# it will be cached from the second build onwards
# TODO: more mount type cache?
RUN --mount=type=cache,target=/usr/local/cargo/registry \
\
cargo install cargo-nextest
# foundry is needed to run tests
# TODO: do this in a seperate FROM and COPY it in
ENV PATH /root/.foundry/bin:$PATH
RUN curl -L https://foundry.paradigm.xyz | bash && foundryup
# copy the application
COPY . .
RUN --mount=type=cache,target=/usr/local/cargo/registry \
\
curl -L https://foundry.paradigm.xyz | bash && foundryup
ENV WEB3_PROXY_FEATURES "rdkafka-src,connectinfo"
FROM builder as build_tests
# test the application with cargo-nextest
RUN --mount=type=cache,target=/usr/local/cargo/registry \
RUN --mount=type=bind,target=. \
--mount=type=cache,target=/usr/local/cargo/registry \
--mount=type=cache,target=/app/target \
RUST_LOG=web3_proxy=trace,info cargo nextest run --features "$WEB3_PROXY_FEATURES" --no-default-features
\
RUST_LOG=web3_proxy=trace,info cargo nextest run --features "$WEB3_PROXY_FEATURES" --no-default-features && \
touch /test_success
# TODO: does this split of build_tests and build_app actually do anything if they both mount the same cache?
FROM builder as build_app
# build the application
# using a "release" profile (which install does by default) is **very** important
# TODO: use the "faster_release" profile which builds with `codegen-units = 1`
RUN --mount=type=cache,target=/usr/local/cargo/registry \
RUN --mount=type=bind,target=.,rw \
--mount=type=cache,target=/usr/local/cargo/registry \
--mount=type=cache,target=/app/target \
\
cargo install \
--features "$WEB3_PROXY_FEATURES" \
--locked \
@ -88,6 +96,8 @@ CMD [ "--config", "/web3-proxy.toml", "proxyd" ]
# TODO: lower log level when done with prototyping
ENV RUST_LOG "warn,ethers_providers::rpc=off,web3_proxy=debug,web3_proxy::rpcs::consensus=info,web3_proxy_cli=debug"
# we copy something from build_tests just so that docker actually builds it
COPY --from=build_tests /test_success /
COPY --from=build_app /usr/local/bin/* /usr/local/bin/
# make sure the app works

36
bugs.md

@ -1,36 +0,0 @@
# deadlock
goerli_1 | 1 deadlocks detected
goerli_1 | Deadlock #0
goerli_1 | Thread Id 139881298757376
goerli_1 | 0: 0x5608f7f762d9 - backtrace::backtrace::trace::hbe74611947a262af
goerli_1 | 1: 0x5608f7f7a967 - backtrace::capture::Backtrace::new::h667fe9ee7ec04c33
goerli_1 | 2: 0x5608f7f6ed33 - parking_lot_core::parking_lot::deadlock_impl::on_unpark::h78879313dd6461e5
goerli_1 | 3: 0x5608f730dcd4 - parking_lot::raw_mutex::RawMutex::lock_slow::h9c58bf1ec322b8f6
goerli_1 | 4: 0x5608f78f2e87 - <moka::common::concurrent::housekeeper::ThreadPoolHousekeeper<T> as core::ops::drop::Drop>::drop::h4887dbe8ef7d7472
goerli_1 | 5: 0x5608f7909362 - alloc::sync::Arc<T>::drop_slow::h3de3d854b76812ea
goerli_1 | 6: 0x5608f7919596 - core::ptr::drop_in_place<moka::future::cache::Cache<web3_proxy::app_stats::UserProxyResponseKey,alloc::sync::Arc<web3_proxy::app_stats::ProxyResponseAggregate>,ahash::random_state::RandomState>>::h1bf4d8ebf87406ed
goerli_1 | 7: 0x5608f791ac00 - triomphe::arc::Arc<T>::drop_slow::h246e78aee1f2a265
goerli_1 | 8: 0x5608f78e38bd - crossbeam_epoch::deferred::Deferred::new::call::h395b93588d5e21a9
goerli_1 | 9: 0x5608f72fbaa2 - crossbeam_epoch::internal::Global::collect::h77479fc8b8898340
goerli_1 | 10: 0x5608f73ef22c - <moka::sync_base::base_cache::Inner<K,V,S> as moka::common::concurrent::housekeeper::InnerSync>::sync::h07f3f4f6db1c2598
goerli_1 | 11: 0x5608f75e4ee3 - moka::common::concurrent::housekeeper::ThreadPoolHousekeeper<T>::call_sync::h11b70044870c94f4
goerli_1 | 12: 0x5608f75e4b03 - moka::common::concurrent::housekeeper::ThreadPoolHousekeeper<T>::start_periodical_sync_job::{{closure}}::hdc1c253b1b156548
goerli_1 | 13: 0x5608f7cc8d15 - scheduled_thread_pool::Worker::run_job::hb3ae60b61103071b
goerli_1 | 14: 0x5608f7cc8b8b - scheduled_thread_pool::Worker::run::h760e10fe3281c379
goerli_1 | 15: 0x5608f7ccb294 - std::sys_common::backtrace::__rust_begin_short_backtrace::hc3b55a28c2ef3a5f
goerli_1 | 16: 0x5608f7cc9cb5 - core::ops::function::FnOnce::call_once{{vtable.shim}}::hf330c4157d74cf0e
goerli_1 | 17: 0x5608f7fc8dd3 - <alloc::boxed::Box<F,A> as core::ops::function::FnOnce<Args>>::call_once::h56d5fc072706762b
goerli_1 | at /rustc/a55dd71d5fb0ec5a6a3a9e8c27b2127ba491ce52/library/alloc/src/boxed.rs:1935:9
goerli_1 | <alloc::boxed::Box<F,A> as core::ops::function::FnOnce<Args>>::call_once::h41deef8e33b824bb
goerli_1 | at /rustc/a55dd71d5fb0ec5a6a3a9e8c27b2127ba491ce52/library/alloc/src/boxed.rs:1935:9
goerli_1 | std::sys::unix::thread::Thread::new::thread_start::ha6436304a1170bba
goerli_1 | at /rustc/a55dd71d5fb0ec5a6a3a9e8c27b2127ba491ce52/library/std/src/sys/unix/thread.rs:108:17
goerli_1 | 18: 0x7f3b309e9ea7 - start_thread
goerli_1 | 19: 0x7f3b307bfaef - clone
goerli_1 | 20: 0x0 - <unknown>
goerli_1 |
also saw deadlocks on other chains (arbitrum, goerli, gnosis, optimism, polygon, fantom). though luckily not on eth. and it seems like it kept going.
i'm going to guess that the problem is nested caches.
refactor to maybe use a dashmap at one level? or flatten into one level and use channels more