diff --git a/.github/workflows/book.yml b/.github/workflows/book.yml index db458a3dbfd..031a88b03c3 100644 --- a/.github/workflows/book.yml +++ b/.github/workflows/book.yml @@ -13,7 +13,7 @@ jobs: build-and-upload-to-s3: runs-on: ubuntu-20.04 steps: - - uses: actions/checkout@master + - uses: actions/checkout@v4 - name: Setup mdBook uses: peaceiris/actions-mdbook@v1 diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index bdd7b626532..54b355e631d 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -71,7 +71,7 @@ jobs: VERSION_SUFFIX: ${{ needs.extract-version.outputs.VERSION_SUFFIX }} FEATURE_SUFFIX: ${{ matrix.features.version_suffix }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Update Rust if: env.SELF_HOSTED_RUNNERS == 'false' run: rustup update stable @@ -106,10 +106,10 @@ jobs: - name: Set up Docker Buildx if: env.SELF_HOSTED_RUNNERS == 'false' - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 - name: Build and push - uses: docker/build-push-action@v4 + uses: docker/build-push-action@v5 with: file: ./Dockerfile.cross context: . @@ -129,7 +129,7 @@ jobs: VERSION_SUFFIX: ${{ needs.extract-version.outputs.VERSION_SUFFIX }} steps: - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 - name: Dockerhub login run: | @@ -148,7 +148,7 @@ jobs: VERSION: ${{ needs.extract-version.outputs.VERSION }} VERSION_SUFFIX: ${{ needs.extract-version.outputs.VERSION_SUFFIX }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Dockerhub login run: | echo "${DOCKER_PASSWORD}" | docker login --username ${DOCKER_USERNAME} --password-stdin diff --git a/.github/workflows/linkcheck.yml b/.github/workflows/linkcheck.yml index 7f5d3e0b602..7e8d9135dd1 100644 --- a/.github/workflows/linkcheck.yml +++ b/.github/workflows/linkcheck.yml @@ -20,7 +20,7 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Run mdbook server run: | diff --git a/.github/workflows/local-testnet.yml b/.github/workflows/local-testnet.yml index 75a81ce0e7c..42293d38a79 100644 --- a/.github/workflows/local-testnet.yml +++ b/.github/workflows/local-testnet.yml @@ -24,7 +24,7 @@ jobs: # Enable portable to prevent issues with caching `blst` for the wrong CPU type FEATURES: portable,jemalloc steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Get latest version of stable Rust run: rustup update stable @@ -46,7 +46,7 @@ jobs: echo "$(brew --prefix)/opt/gnu-sed/libexec/gnubin" >> $GITHUB_PATH echo "$(brew --prefix)/opt/grep/libexec/gnubin" >> $GITHUB_PATH # https://github.com/actions/cache/blob/main/examples.md#rust---cargo - - uses: actions/cache@v3 + - uses: actions/cache@v4 id: cache-cargo with: path: | @@ -95,6 +95,6 @@ jobs: runs-on: ubuntu-latest needs: ["run-local-testnet"] steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Check that success job is dependent on all others run: ./scripts/ci/check-success-job.sh ./.github/workflows/local-testnet.yml local-testnet-success diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index a1c72e5533c..3d23b4110e7 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -68,7 +68,7 @@ jobs: needs: extract-version steps: - name: Checkout sources - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Get latest version of stable Rust if: env.SELF_HOSTED_RUNNERS == 'false' run: rustup update stable @@ -172,17 +172,19 @@ jobs: # This is required to share artifacts between different jobs # ======================================================================= - - name: Upload artifact - uses: actions/upload-artifact@v3 + - name: Upload artifact + uses: actions/upload-artifact@v4 with: name: lighthouse-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}.tar.gz path: lighthouse-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}.tar.gz + compression-level: 0 - name: Upload signature - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: lighthouse-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}.tar.gz.asc path: lighthouse-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}.tar.gz.asc + compression-level: 0 draft-release: name: Draft Release @@ -193,7 +195,7 @@ jobs: steps: # This is necessary for generating the changelog. It has to come before "Download Artifacts" or else it deletes the artifacts. - name: Checkout sources - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 0 @@ -202,7 +204,7 @@ jobs: # ============================== - name: Download artifacts - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 # ============================== # Create release draft diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 28b8ec29e33..c501c8cabf4 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -41,7 +41,7 @@ jobs: # Use self-hosted runners only on the sigp repo. runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "large"]') || 'ubuntu-latest' }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Get latest version of stable Rust if: env.SELF_HOSTED_RUNNERS == 'false' uses: moonrepo/setup-rust@v1 @@ -65,7 +65,7 @@ jobs: name: release-tests-windows runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "windows", "CI"]') || 'windows-2019' }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Get latest version of stable Rust if: env.SELF_HOSTED_RUNNERS == 'false' uses: moonrepo/setup-rust@v1 @@ -102,7 +102,7 @@ jobs: env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Get latest version of stable Rust if: env.SELF_HOSTED_RUNNERS == 'false' uses: moonrepo/setup-rust@v1 @@ -121,7 +121,7 @@ jobs: env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Get latest version of stable Rust uses: moonrepo/setup-rust@v1 with: @@ -136,7 +136,7 @@ jobs: env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Get latest version of stable Rust uses: moonrepo/setup-rust@v1 with: @@ -151,7 +151,7 @@ jobs: env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Get latest version of stable Rust uses: moonrepo/setup-rust@v1 with: @@ -167,7 +167,7 @@ jobs: env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Get latest version of stable Rust if: env.SELF_HOSTED_RUNNERS == 'false' uses: moonrepo/setup-rust@v1 @@ -188,7 +188,7 @@ jobs: name: state-transition-vectors-ubuntu runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Get latest version of stable Rust uses: moonrepo/setup-rust@v1 with: @@ -203,7 +203,7 @@ jobs: env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Get latest version of stable Rust if: env.SELF_HOSTED_RUNNERS == 'false' uses: moonrepo/setup-rust@v1 @@ -211,7 +211,7 @@ jobs: channel: stable cache-target: release bins: cargo-nextest - - name: Run consensus-spec-tests with blst, milagro and fake_crypto + - name: Run consensus-spec-tests with blst and fake_crypto run: make nextest-ef - name: Show cache stats if: env.SELF_HOSTED_RUNNERS == 'true' @@ -220,7 +220,7 @@ jobs: name: dockerfile-ubuntu runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Build the root Dockerfile run: docker build --build-arg FEATURES=portable -t lighthouse:local . - name: Test the built image @@ -229,7 +229,7 @@ jobs: name: eth1-simulator-ubuntu runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Get latest version of stable Rust uses: moonrepo/setup-rust@v1 with: @@ -245,7 +245,7 @@ jobs: name: merge-transition-ubuntu runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Get latest version of stable Rust uses: moonrepo/setup-rust@v1 with: @@ -261,7 +261,7 @@ jobs: name: no-eth1-simulator-ubuntu runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Get latest version of stable Rust uses: moonrepo/setup-rust@v1 with: @@ -273,7 +273,7 @@ jobs: name: syncing-simulator-ubuntu runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Get latest version of stable Rust uses: moonrepo/setup-rust@v1 with: @@ -292,7 +292,7 @@ jobs: # Enable portable to prevent issues with caching `blst` for the wrong CPU type FEATURES: jemalloc,portable steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Get latest version of stable Rust if: env.SELF_HOSTED_RUNNERS == 'false' uses: moonrepo/setup-rust@v1 @@ -325,7 +325,7 @@ jobs: name: execution-engine-integration-ubuntu runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "small"]') || 'ubuntu-latest' }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Get latest version of stable Rust if: env.SELF_HOSTED_RUNNERS == 'false' uses: moonrepo/setup-rust@v1 @@ -346,7 +346,7 @@ jobs: env: CARGO_INCREMENTAL: 1 steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Get latest version of stable Rust uses: moonrepo/setup-rust@v1 with: @@ -372,7 +372,7 @@ jobs: name: check-msrv runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install Rust at Minimum Supported Rust Version (MSRV) run: | metadata=$(cargo metadata --no-deps --format-version 1) @@ -384,7 +384,7 @@ jobs: name: cargo-udeps runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Get latest version of nightly Rust uses: moonrepo/setup-rust@v1 with: @@ -406,9 +406,9 @@ jobs: name: compile-with-beta-compiler runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install dependencies - run: sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang + run: sudo apt update && sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang - name: Use Rust beta run: rustup override set beta - name: Run make @@ -417,7 +417,7 @@ jobs: name: cli-check runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Get latest version of stable Rust uses: moonrepo/setup-rust@v1 with: @@ -455,6 +455,6 @@ jobs: 'cli-check', ] steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Check that success job is dependent on all others run: ./scripts/ci/check-success-job.sh ./.github/workflows/test-suite.yml test-suite-success diff --git a/Cargo.lock b/Cargo.lock index 35e891b0520..1265bb28631 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -154,9 +154,9 @@ dependencies = [ [[package]] name = "ahash" -version = "0.8.8" +version = "0.8.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42cd52102d3df161c77a887b608d7a4897d7cc112886a9537b738a887a03aaff" +checksum = "8b79b82693f705137f8fb9b37871d99e4f9a7df12b917eed79c3d3954830a60b" dependencies = [ "cfg-if", "once_cell", @@ -226,9 +226,9 @@ dependencies = [ [[package]] name = "alloy-primitives" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef197eb250c64962003cb08b90b17f0882c192f4a6f2f544809d424fd7cb0e7d" +checksum = "600d34d8de81e23b6d909c094e23b3d357e01ca36b78a8c5424c501eedbe86f0" dependencies = [ "alloy-rlp", "bytes", @@ -265,14 +265,9 @@ checksum = "1a047897373be4bbb0224c1afdabca92648dc57a9c9ef6e7b0be3aff7a859c83" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.52", ] -[[package]] -name = "amcl" -version = "0.3.0" -source = "git+https://github.com/sigp/milagro_bls?tag=v1.5.1#d3fc0a40cfe8b72ccda46ba050ee6786a59ce753" - [[package]] name = "android-tzdata" version = "0.1.1" @@ -338,9 +333,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.79" +version = "1.0.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "080e9890a082662b09c1ad45f567faeeb47f22b5fb23895fbe1e651e718e25ca" +checksum = "5ad32ce52e4161730f7098c077cd2ed6229b5804ccf99e5366be1ab72a98b4e1" [[package]] name = "arbitrary" @@ -556,7 +551,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f28243a43d821d11341ab73c80bed182dc015c514b951616cf79bd4af39af0c3" dependencies = [ "concurrent-queue", - "event-listener 5.1.0", + "event-listener 5.2.0", "event-listener-strategy 0.5.0", "futures-core", "pin-project-lite", @@ -726,7 +721,7 @@ checksum = "c980ee35e870bd1a4d2c8294d4c04d0499e67bca1e4b5cefcc693c2fa00caea9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.52", ] [[package]] @@ -796,13 +791,13 @@ dependencies = [ [[package]] name = "auto_impl" -version = "1.1.2" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "823b8bb275161044e2ac7a25879cb3e2480cb403e3943022c7c769c599b756aa" +checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.52", ] [[package]] @@ -824,7 +819,7 @@ dependencies = [ "http 1.0.0", "http-body 1.0.0", "http-body-util", - "hyper 1.1.0", + "hyper 1.2.0", "hyper-util", "itoa", "matchit", @@ -933,7 +928,6 @@ dependencies = [ "ethereum_ssz", "ethereum_ssz_derive", "execution_layer", - "exit-future", "fork_choice", "futures", "genesis", @@ -980,7 +974,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "5.0.0" +version = "5.1.1" dependencies = [ "beacon_chain", "clap 4.5.1", @@ -996,7 +990,7 @@ dependencies = [ "genesis", "hex", "http_api", - "hyper 1.1.0", + "hyper 1.2.0", "lighthouse_network", "lighthouse_version", "monitoring_api", @@ -1086,7 +1080,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.49", + "syn 2.0.52", "which", ] @@ -1202,7 +1196,6 @@ dependencies = [ "ethereum_serde_utils", "ethereum_ssz", "hex", - "milagro_bls", "rand", "serde", "tree_hash", @@ -1233,7 +1226,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "5.0.0" +version = "5.1.1" dependencies = [ "beacon_node", "clap 4.5.1", @@ -1285,9 +1278,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.15.0" +version = "3.15.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d32a994c2b3ca201d9b263612a374263f05e7adde37c4707f693dcd375076d1f" +checksum = "8ea184aa71bb362a1157c896979544cc23974e08fd265f29ea96b59f0b4a555b" [[package]] name = "byte-slice-cast" @@ -1386,7 +1379,7 @@ checksum = "eee4243f1f26fc7a42710e7439c149e2b10b05472f88090acce52632f231a73a" dependencies = [ "camino", "cargo-platform", - "semver 1.0.21", + "semver 1.0.22", "serde", "serde_json", "thiserror", @@ -1400,9 +1393,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.0.83" +version = "1.0.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" +checksum = "8cd6604a82acf3039f1144f54b8eb34e91ffba622051189e71b781822d5ee1f5" dependencies = [ "jobserver", "libc", @@ -1456,7 +1449,7 @@ dependencies = [ "android-tzdata", "iana-time-zone", "num-traits", - "windows-targets 0.52.0", + "windows-targets 0.52.4", ] [[package]] @@ -1630,9 +1623,9 @@ dependencies = [ [[package]] name = "const-hex" -version = "1.11.0" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18d59688ad0945eaf6b84cb44fedbe93484c81b48970e98f09db8a22832d7961" +checksum = "efbd12d49ab0eaf8193ba9175e45f56bbc2e4b27d57b8cfe62aa47942a46b9a9" dependencies = [ "cfg-if", "cpufeatures", @@ -1740,9 +1733,9 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.11" +version = "0.5.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "176dc175b78f56c0f321911d9c8eb2b77a78a4860b9c19db83835fea1a46649b" +checksum = "ab3db02a9c5b5121e1e42fbdb1aeb65f5e02624cc58c43f2884c6ccac0b82f95" dependencies = [ "crossbeam-utils", ] @@ -1907,7 +1900,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.52", ] [[package]] @@ -2109,7 +2102,7 @@ checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.52", ] [[package]] @@ -2148,7 +2141,7 @@ dependencies = [ "diesel_table_macro_syntax", "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.52", ] [[package]] @@ -2168,7 +2161,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc5557efc453706fed5e4fa85006fe9817c224c3f480a34c7e5959fd700921c5" dependencies = [ - "syn 2.0.49", + "syn 2.0.52", ] [[package]] @@ -2281,7 +2274,7 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.52", ] [[package]] @@ -2464,7 +2457,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.52", ] [[package]] @@ -2494,10 +2487,10 @@ dependencies = [ name = "environment" version = "0.1.2" dependencies = [ + "async-channel 1.9.0", "ctrlc", "eth2_config", "eth2_network_config", - "exit-future", "futures", "logging", "serde", @@ -3020,9 +3013,9 @@ dependencies = [ [[package]] name = "event-listener" -version = "5.1.0" +version = "5.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7ad6fd685ce13acd6d9541a30f6db6567a7a24c9ffd4ba2955d29e3f22c8b27" +checksum = "2b5fb89194fa3cad959b833185b3063ba881dbfc7030680b314250779fb4cc91" dependencies = [ "concurrent-queue", "parking", @@ -3045,7 +3038,7 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "feedafcaa9b749175d5ac357452a9d41ea2911da598fde46ce1fe02c37751291" dependencies = [ - "event-listener 5.1.0", + "event-listener 5.2.0", "pin-project-lite", ] @@ -3053,12 +3046,12 @@ dependencies = [ name = "execution_engine_integration" version = "0.1.0" dependencies = [ + "async-channel 1.9.0", "deposit_contract", "environment", "ethers-core", "ethers-providers", "execution_layer", - "exit-future", "fork_choice", "futures", "hex", @@ -3089,7 +3082,6 @@ dependencies = [ "ethereum_serde_utils", "ethereum_ssz", "ethers-core", - "exit-future", "fork_choice", "futures", "hash-db", @@ -3126,15 +3118,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "exit-future" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e43f2f1833d64e33f15592464d6fdd70f349dda7b1a53088eb83cd94014008c5" -dependencies = [ - "futures", -] - [[package]] name = "eyre" version = "0.6.12" @@ -3437,7 +3420,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.52", ] [[package]] @@ -3475,9 +3458,9 @@ dependencies = [ [[package]] name = "futures-timer" -version = "3.0.2" +version = "3.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" +checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24" [[package]] name = "futures-util" @@ -3594,7 +3577,7 @@ checksum = "53010ccb100b96a67bc32c0175f0ed1426b31b655d562898e57325f81c023ac0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.52", ] [[package]] @@ -3649,7 +3632,7 @@ dependencies = [ "futures-sink", "futures-util", "http 0.2.11", - "indexmap 2.2.3", + "indexmap 2.2.5", "slab", "tokio", "tokio-util 0.7.10", @@ -3668,7 +3651,7 @@ dependencies = [ "futures-sink", "futures-util", "http 1.0.0", - "indexmap 2.2.3", + "indexmap 2.2.5", "slab", "tokio", "tokio-util 0.7.10", @@ -3677,9 +3660,9 @@ dependencies = [ [[package]] name = "half" -version = "1.8.2" +version = "1.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7" +checksum = "1b43ede17f21864e81be2fa654110bf1e793774238d86ef8555c37e6519c0403" [[package]] name = "hash-db" @@ -3780,9 +3763,9 @@ dependencies = [ [[package]] name = "hermit-abi" -version = "0.3.6" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd5256b483761cd23699d0da46cc6fd2ee3be420bbe6d020ae4a091e70b7e9fd" +checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" [[package]] name = "hex" @@ -3822,7 +3805,7 @@ dependencies = [ "ipnet", "once_cell", "rand", - "socket2 0.5.5", + "socket2 0.5.6", "thiserror", "tinyvec", "tokio", @@ -4079,7 +4062,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.5.5", + "socket2 0.5.6", "tokio", "tower-service", "tracing", @@ -4088,9 +4071,9 @@ dependencies = [ [[package]] name = "hyper" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb5aa53871fc917b1a9ed87b683a5d86db645e23acb32c2e0785a353e522fb75" +checksum = "186548d73ac615b32a73aafe38fb4f56c0d340e110e5a200bcadbaf2e199263a" dependencies = [ "bytes", "futures-channel", @@ -4102,6 +4085,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", + "smallvec", "tokio", ] @@ -4142,9 +4126,9 @@ dependencies = [ "futures-util", "http 1.0.0", "http-body 1.0.0", - "hyper 1.1.0", + "hyper 1.2.0", "pin-project-lite", - "socket2 0.5.5", + "socket2 0.5.6", "tokio", ] @@ -4197,17 +4181,6 @@ dependencies = [ "unicode-normalization", ] -[[package]] -name = "if-addrs" -version = "0.6.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2273e421f7c4f0fc99e1934fe4776f59d8df2972f4199d703fc0da9f2a9f73de" -dependencies = [ - "if-addrs-sys", - "libc", - "winapi", -] - [[package]] name = "if-addrs" version = "0.10.2" @@ -4218,16 +4191,6 @@ dependencies = [ "windows-sys 0.48.0", ] -[[package]] -name = "if-addrs-sys" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de74b9dd780476e837e5eb5ab7c88b49ed304126e412030a0adba99c8efe79ea" -dependencies = [ - "cc", - "libc", -] - [[package]] name = "if-watch" version = "3.2.0" @@ -4238,7 +4201,7 @@ dependencies = [ "core-foundation", "fnv", "futures", - "if-addrs 0.10.2", + "if-addrs", "ipnet", "log", "rtnetlink", @@ -4340,9 +4303,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.2.3" +version = "2.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "233cf39063f058ea2caae4091bf4a3ef70a653afbc026f5c4a4135d114e3c177" +checksum = "7b0b929d511467233429c45a44ac1dcaa21ba0f5ba11e4879e6ed28ddb4f9df4" dependencies = [ "equivalent", "hashbrown 0.14.3", @@ -4393,7 +4356,7 @@ version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" dependencies = [ - "hermit-abi 0.3.6", + "hermit-abi 0.3.9", "libc", "windows-sys 0.48.0", ] @@ -4404,7 +4367,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ - "socket2 0.5.5", + "socket2 0.5.6", "widestring 1.0.2", "windows-sys 0.48.0", "winreg", @@ -4416,6 +4379,17 @@ version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" +[[package]] +name = "is-terminal" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f23ff5ef2b80d608d61efee834934d862cd92461afc0560dedf493e4c033738b" +dependencies = [ + "hermit-abi 0.3.9", + "libc", + "windows-sys 0.52.0", +] + [[package]] name = "itertools" version = "0.10.5" @@ -4592,7 +4566,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "5.0.0" +version = "5.1.1" dependencies = [ "account_utils", "beacon_chain", @@ -4685,12 +4659,12 @@ dependencies = [ [[package]] name = "libloading" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c571b676ddfc9a8c12f1f3d3085a7b163966a8fd8098a90640953ce5f6170161" +checksum = "2caa5afb8bf9f3a2652760ce7d4f62d21c4d5a423e68466fca30df82f2330164" dependencies = [ "cfg-if", - "windows-sys 0.48.0", + "windows-targets 0.52.4", ] [[package]] @@ -4876,7 +4850,7 @@ dependencies = [ "libp2p-swarm", "rand", "smallvec", - "socket2 0.5.5", + "socket2 0.5.6", "tokio", "tracing", "void", @@ -4977,7 +4951,7 @@ dependencies = [ "rand", "ring 0.16.20", "rustls", - "socket2 0.5.5", + "socket2 0.5.6", "thiserror", "tokio", "tracing", @@ -5015,7 +4989,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.52", ] [[package]] @@ -5030,7 +5004,7 @@ dependencies = [ "libc", "libp2p-core", "libp2p-identity", - "socket2 0.5.5", + "socket2 0.5.6", "tokio", "tracing", ] @@ -5168,7 +5142,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "5.0.0" +version = "5.1.1" dependencies = [ "account_manager", "account_utils", @@ -5236,7 +5210,6 @@ dependencies = [ "error-chain", "ethereum_ssz", "ethereum_ssz_derive", - "exit-future", "fnv", "futures", "futures-ticker", @@ -5359,9 +5332,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.20" +version = "0.4.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" +checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" dependencies = [ "value-bag", ] @@ -5391,9 +5364,9 @@ dependencies = [ [[package]] name = "lru" -version = "0.12.2" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db2c024b41519440580066ba82aab04092b333e09066a5eb86c7c4890df31f22" +checksum = "d3262e75e648fce39813cb56ac41f3c3e3f65217ebf3844d818d1f9398cfb0dc" dependencies = [ "hashbrown 0.14.3", ] @@ -5567,18 +5540,6 @@ dependencies = [ "quote", ] -[[package]] -name = "milagro_bls" -version = "1.5.1" -source = "git+https://github.com/sigp/milagro_bls?tag=v1.5.1#d3fc0a40cfe8b72ccda46ba050ee6786a59ce753" -dependencies = [ - "amcl", - "hex", - "lazy_static", - "rand", - "zeroize", -] - [[package]] name = "mime" version = "0.3.17" @@ -5612,9 +5573,9 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.10" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f3d0b296e374a4e6f3c7b0a1f5a51d748a0d34c85e7dc48fc3fa9a87657fe09" +checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" dependencies = [ "libc", "wasi", @@ -5794,6 +5755,8 @@ dependencies = [ name = "network" version = "0.2.0" dependencies = [ + "anyhow", + "async-channel 1.9.0", "beacon_chain", "beacon_processor", "delay_map", @@ -5804,12 +5767,10 @@ dependencies = [ "ethereum-types 0.14.1", "ethereum_ssz", "execution_layer", - "exit-future", "fnv", "futures", "genesis", "hex", - "if-addrs 0.6.7", "igd-next", "itertools", "lazy_static", @@ -5984,16 +5945,7 @@ version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" dependencies = [ - "hermit-abi 0.3.6", - "libc", -] - -[[package]] -name = "num_threads" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c7398b9c8b70908f6371f47ed36737907c87c52af34c268fed0bf0ceb92ead9" -dependencies = [ + "hermit-abi 0.3.9", "libc", ] @@ -6036,9 +5988,9 @@ checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575" [[package]] name = "opaque-debug" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" +checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" [[package]] name = "open-fastrlp" @@ -6067,9 +6019,9 @@ dependencies = [ [[package]] name = "openssl" -version = "0.10.63" +version = "0.10.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15c9d69dd87a29568d4d017cfe8ec518706046a05184e5aea92d0af890b803c8" +checksum = "95a0481286a310808298130d22dd1fef0fa571e05a8f44ec801801e84b216b1f" dependencies = [ "bitflags 2.4.2", "cfg-if", @@ -6088,7 +6040,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.52", ] [[package]] @@ -6108,9 +6060,9 @@ dependencies = [ [[package]] name = "openssl-sys" -version = "0.9.99" +version = "0.9.101" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22e1bf214306098e4832460f797824c05d25aacdf896f64a985fb0fd992454ae" +checksum = "dda2b0f344e78efc2facf7d195d098df0dd72151b26ab98da807afc26c198dff" dependencies = [ "cc", "libc", @@ -6346,9 +6298,9 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.7.7" +version = "2.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "219c0dcc30b6a27553f9cc242972b67f75b60eb0db71f0b5462f38b058c41546" +checksum = "56f8023d0fb78c8e03784ea1c7f3fa36e68a723138990b8d5a47d916b651e7a8" dependencies = [ "memchr", "thiserror", @@ -6400,7 +6352,7 @@ checksum = "266c042b60c9c76b8d53061e52b2e0d1116abc57cefc8c5cd671619a56ac3690" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.52", ] [[package]] @@ -6622,7 +6574,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a41cf62165e97c7f814d2221421dbb9afcbcdb0a88068e5ea206e19951c2cbb5" dependencies = [ "proc-macro2", - "syn 2.0.49", + "syn 2.0.52", ] [[package]] @@ -6739,7 +6691,7 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.52", ] [[package]] @@ -6906,7 +6858,7 @@ checksum = "055b4e778e8feb9f93c4e439f71dc2156ef13360b432b799e179a8c4cdf0b1d7" dependencies = [ "bytes", "libc", - "socket2 0.5.5", + "socket2 0.5.6", "tracing", "windows-sys 0.48.0", ] @@ -6994,9 +6946,9 @@ dependencies = [ [[package]] name = "rayon" -version = "1.8.1" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa7237101a77a10773db45d62004a272517633fbcc3df19d96455ede1122e051" +checksum = "e4963ed1bc86e4f3ee217022bd855b297cef07fb9eac5dfa1f788b220b49b3bd" dependencies = [ "either", "rayon-core", @@ -7258,9 +7210,9 @@ dependencies = [ [[package]] name = "ruint" -version = "1.11.1" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "608a5726529f2f0ef81b8fde9873c4bb829d6b5b5ca6be4d97345ddf0749c825" +checksum = "49b1d9521f889713d1221270fdd63370feca7e5c71a18745343402fa86e4f04f" dependencies = [ "alloy-rlp", "ark-ff 0.3.0", @@ -7282,9 +7234,9 @@ dependencies = [ [[package]] name = "ruint-macro" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e666a5496a0b2186dbcd0ff6106e29e093c15591bde62c20d3842007c6978a09" +checksum = "f86854cf50259291520509879a5c294c3c9a4c334e9ff65071c51e42ef1e2343" [[package]] name = "rusqlite" @@ -7333,7 +7285,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver 1.0.21", + "semver 1.0.22", ] [[package]] @@ -7448,9 +7400,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f98d2aa92eebf49b69786be48e4477826b256916e84a57ff2a4f21923b48eb4c" +checksum = "e86697c916019a8588c99b5fac3cead74ec0b4b819707a682fd4d23fa0ce1ba1" [[package]] name = "safe_arith" @@ -7612,9 +7564,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.21" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b97ed7a9823b74f99c7742f5336af7be5ecd3eeafcb1507d1fa93347b1d589b0" +checksum = "92d43fe69e652f3df9bdc2b85b2854a0825b86e4fb76bc44d945137d053639ca" dependencies = [ "serde", ] @@ -7644,9 +7596,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.196" +version = "1.0.197" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "870026e60fa08c69f064aa766c10f10b1d62db9ccd4d0abb206472bee0ce3b32" +checksum = "3fb1c873e1b9b056a4dc4c0c198b24c3ffa059243875552b2bd0933b1aee4ce2" dependencies = [ "serde_derive", ] @@ -7673,20 +7625,20 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.196" +version = "1.0.197" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33c85360c95e7d137454dc81d9a4ed2b8efd8fbe19cee57357b32b9771fccb67" +checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.52", ] [[package]] name = "serde_json" -version = "1.0.113" +version = "1.0.114" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69801b70b1c3dac963ecb03a364ba0ceda9cf60c71cfe475e99864759c8b8a79" +checksum = "c5f09b1bd632ef549eaa9f60a1f8de742bdbc698e6cee2095fc84dde5f549ae0" dependencies = [ "itoa", "ryu", @@ -7711,7 +7663,7 @@ checksum = "0b2e6b945e9d3df726b65d6ee24060aff8e3533d431f677a9695db04eff9dfdb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.52", ] [[package]] @@ -7759,11 +7711,11 @@ dependencies = [ [[package]] name = "serde_yaml" -version = "0.9.31" +version = "0.9.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adf8a49373e98a4c5f0ceb5d05aa7c648d75f63774981ed95b7c7443bbd50c6e" +checksum = "8fd075d994154d4a774f95b51fb96bdc2832b0ea48425c92546073816cda1f2f" dependencies = [ - "indexmap 2.2.3", + "indexmap 2.2.5", "itoa", "ryu", "serde", @@ -8060,11 +8012,11 @@ dependencies = [ [[package]] name = "slog-term" -version = "2.9.0" +version = "2.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87d29185c55b7b258b4f120eab00f48557d4d9bc814f41713f449d35b0f8977c" +checksum = "b6e022d0b998abfe5c3782c1f03551a596269450ccd677ea51c56f8b214610e8" dependencies = [ - "atty", + "is-terminal", "slog", "term", "thread_local", @@ -8146,12 +8098,12 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.5" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b5fac59a5cb5dd637972e5fca70daf0523c9067fcdc4842f053dae04a18f8e9" +checksum = "05ffd9c0a93b7543e062e759284fcf5f5e3b098501104bfbdde4d404db792871" dependencies = [ "libc", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -8360,9 +8312,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.49" +version = "2.0.52" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "915aea9e586f80826ee59f8453c1101f9d1c4b3964cd2460185ee8e299ada496" +checksum = "b699d15b36d1f02c3e7c69f8ffef53de37aefae075d8488d4ba1a7788d574a07" dependencies = [ "proc-macro2", "quote", @@ -8464,7 +8416,7 @@ checksum = "c63f48baada5c52e65a29eef93ab4f8982681b67f9e8d29c7b05abcfec2b9ffe" name = "task_executor" version = "0.1.0" dependencies = [ - "exit-future", + "async-channel 1.9.0", "futures", "lazy_static", "lighthouse_metrics", @@ -8475,9 +8427,9 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.10.0" +version = "3.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a365e8cd18e44762ef95d87f284f4b5cd04107fec2ff3052bd6a3e6069669e67" +checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1" dependencies = [ "cfg-if", "fastrand 2.0.1", @@ -8564,14 +8516,14 @@ checksum = "a953cb265bef375dae3de6663da4d3804eee9682ea80d8e2542529b73c531c81" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.52", ] [[package]] name = "thread_local" -version = "1.1.7" +version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152" +checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" dependencies = [ "cfg-if", "once_cell", @@ -8594,9 +8546,7 @@ checksum = "c8248b6521bb14bc45b4067159b9b6ad792e2d6d754d6c41fb50e29fefe38749" dependencies = [ "deranged", "itoa", - "libc", "num-conv", - "num_threads", "powerfmt", "serde", "time-core", @@ -8696,7 +8646,7 @@ dependencies = [ "num_cpus", "pin-project-lite", "signal-hook-registry", - "socket2 0.5.5", + "socket2 0.5.6", "tokio-macros", "windows-sys 0.48.0", ] @@ -8719,7 +8669,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.52", ] [[package]] @@ -8752,7 +8702,7 @@ dependencies = [ "postgres-protocol", "postgres-types", "rand", - "socket2 0.5.5", + "socket2 0.5.6", "tokio", "tokio-util 0.7.10", "whoami", @@ -8847,7 +8797,7 @@ version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ - "indexmap 2.2.3", + "indexmap 2.2.5", "serde", "serde_spanned", "toml_datetime", @@ -8860,7 +8810,7 @@ version = "0.20.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "70f427fce4d84c72b5b732388bf4a9f4531b53f74e2887e3ecb2481f68f66d81" dependencies = [ - "indexmap 2.2.3", + "indexmap 2.2.5", "toml_datetime", "winnow", ] @@ -8925,7 +8875,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.52", ] [[package]] @@ -9146,9 +9096,9 @@ checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" [[package]] name = "unicode-normalization" -version = "0.1.22" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" +checksum = "a56d1686db2308d901306f92a263857ef59ea39678a5458e7cb17f01415101f5" dependencies = [ "tinyvec", ] @@ -9281,11 +9231,10 @@ dependencies = [ "eth2", "eth2_keystore", "ethereum_serde_utils", - "exit-future", "filesystem", "futures", "hex", - "hyper 1.1.0", + "hyper 1.2.0", "itertools", "lazy_static", "libsecp256k1", @@ -9411,9 +9360,9 @@ checksum = "f3c4517f54858c779bbcbf228f4fca63d121bf85fbecb2dc578cdf4a39395690" [[package]] name = "walkdir" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d71d857dc86794ca4c280d616f7da00d2dbfd8cd788846559a6813e6aa4b54ee" +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" dependencies = [ "same-file", "winapi-util", @@ -9484,6 +9433,12 @@ version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" +[[package]] +name = "wasite" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8dad83b4f25e74f184f64c43b150b91efe7647395b42289f38e50566d82855b" + [[package]] name = "wasm-bindgen" version = "0.2.91" @@ -9505,7 +9460,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.52", "wasm-bindgen-shared", ] @@ -9539,7 +9494,7 @@ checksum = "642f325be6301eb8107a83d12a8ac6c1e1c54345a7ef1a9261962dfefda09e66" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.52", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -9595,7 +9550,7 @@ dependencies = [ "eth2", "hex", "http_api", - "hyper 1.1.0", + "hyper 1.2.0", "log", "logging", "network", @@ -9629,10 +9584,10 @@ name = "web3signer_tests" version = "0.1.0" dependencies = [ "account_utils", + "async-channel 1.9.0", "environment", "eth2_keystore", "eth2_network_config", - "exit-future", "futures", "lazy_static", "parking_lot 0.12.1", @@ -9670,11 +9625,12 @@ dependencies = [ [[package]] name = "whoami" -version = "1.4.1" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22fc3756b8a9133049b26c7f61ab35416c130e8c09b660f5b3958b446f52cc50" +checksum = "0fec781d48b41f8163426ed18e8fc2864c12937df9ce54c88ede7bd47270893e" dependencies = [ - "wasm-bindgen", + "redox_syscall 0.4.1", + "wasite", "web-sys", ] @@ -9758,7 +9714,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" dependencies = [ - "windows-targets 0.52.0", + "windows-targets 0.52.4", ] [[package]] @@ -9785,7 +9741,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets 0.52.0", + "windows-targets 0.52.4", ] [[package]] @@ -9820,17 +9776,17 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.52.0" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd" +checksum = "7dd37b7e5ab9018759f893a1952c9420d060016fc19a472b4bb20d1bdd694d1b" dependencies = [ - "windows_aarch64_gnullvm 0.52.0", - "windows_aarch64_msvc 0.52.0", - "windows_i686_gnu 0.52.0", - "windows_i686_msvc 0.52.0", - "windows_x86_64_gnu 0.52.0", - "windows_x86_64_gnullvm 0.52.0", - "windows_x86_64_msvc 0.52.0", + "windows_aarch64_gnullvm 0.52.4", + "windows_aarch64_msvc 0.52.4", + "windows_i686_gnu 0.52.4", + "windows_i686_msvc 0.52.4", + "windows_x86_64_gnu 0.52.4", + "windows_x86_64_gnullvm 0.52.4", + "windows_x86_64_msvc 0.52.4", ] [[package]] @@ -9847,9 +9803,9 @@ checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.52.0" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea" +checksum = "bcf46cf4c365c6f2d1cc93ce535f2c8b244591df96ceee75d8e83deb70a9cac9" [[package]] name = "windows_aarch64_msvc" @@ -9865,9 +9821,9 @@ checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_aarch64_msvc" -version = "0.52.0" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef" +checksum = "da9f259dd3bcf6990b55bffd094c4f7235817ba4ceebde8e6d11cd0c5633b675" [[package]] name = "windows_i686_gnu" @@ -9883,9 +9839,9 @@ checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_gnu" -version = "0.52.0" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313" +checksum = "b474d8268f99e0995f25b9f095bc7434632601028cf86590aea5c8a5cb7801d3" [[package]] name = "windows_i686_msvc" @@ -9901,9 +9857,9 @@ checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_i686_msvc" -version = "0.52.0" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a" +checksum = "1515e9a29e5bed743cb4415a9ecf5dfca648ce85ee42e15873c3cd8610ff8e02" [[package]] name = "windows_x86_64_gnu" @@ -9919,9 +9875,9 @@ checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnu" -version = "0.52.0" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd" +checksum = "5eee091590e89cc02ad514ffe3ead9eb6b660aedca2183455434b93546371a03" [[package]] name = "windows_x86_64_gnullvm" @@ -9937,9 +9893,9 @@ checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_gnullvm" -version = "0.52.0" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e" +checksum = "77ca79f2451b49fa9e2af39f0747fe999fcda4f5e241b2898624dca97a1f2177" [[package]] name = "windows_x86_64_msvc" @@ -9955,9 +9911,9 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "windows_x86_64_msvc" -version = "0.52.0" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" +checksum = "32b752e52a2da0ddfbdbcc6fceadfeede4c939ed16d13e648833a61dfb611ed8" [[package]] name = "winnow" @@ -10083,8 +10039,7 @@ dependencies = [ [[package]] name = "yamux" version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad1d0148b89300047e72994bee99ecdabd15a9166a7b70c8b8c37c314dcc9002" +source = "git+https://github.com/sigp/rust-yamux.git#12a23aa0e34b7807c0c5f87f06b3438f7d6c2ed0" dependencies = [ "futures", "instant", @@ -10122,7 +10077,7 @@ checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.52", ] [[package]] @@ -10142,7 +10097,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.52", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 7f462991c93..9834475759a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -94,7 +94,9 @@ resolver = "2" edition = "2021" [workspace.dependencies] +anyhow = "1" arbitrary = { version = "1", features = ["derive"] } +async-channel = "1.9.0" bincode = "1" bitvec = "1" byteorder = "1" @@ -111,7 +113,7 @@ env_logger = "0.9" error-chain = "0.12" ethereum-types = "0.14" ethereum_hashing = "1.0.0-beta.2" -ethereum_serde_utils = "0.5" +ethereum_serde_utils = "0.5.2" ethereum_ssz = "0.5" ethereum_ssz_derive = "0.5" ethers-core = "1" @@ -229,6 +231,9 @@ validator_client = { path = "validator_client" } validator_dir = { path = "common/validator_dir" } warp_utils = { path = "common/warp_utils" } +[patch.crates-io] +yamux = { git = "https://github.com/sigp/rust-yamux.git" } + [profile.maxperf] inherits = "release" lto = "fat" diff --git a/Makefile b/Makefile index 8392d001705..6b6418cb83d 100644 --- a/Makefile +++ b/Makefile @@ -143,7 +143,6 @@ run-ef-tests: rm -rf $(EF_TESTS)/.accessed_file_log.txt cargo test --release -p ef_tests --features "ef_tests,$(EF_TEST_FEATURES)" cargo test --release -p ef_tests --features "ef_tests,$(EF_TEST_FEATURES),fake_crypto" - cargo test --release -p ef_tests --features "ef_tests,$(EF_TEST_FEATURES),milagro" ./$(EF_TESTS)/check_all_files_accessed.py $(EF_TESTS)/.accessed_file_log.txt $(EF_TESTS)/consensus-spec-tests # Runs EF test vectors with nextest @@ -151,7 +150,6 @@ nextest-run-ef-tests: rm -rf $(EF_TESTS)/.accessed_file_log.txt cargo nextest run --release -p ef_tests --features "ef_tests,$(EF_TEST_FEATURES)" cargo nextest run --release -p ef_tests --features "ef_tests,$(EF_TEST_FEATURES),fake_crypto" - cargo nextest run --release -p ef_tests --features "ef_tests,$(EF_TEST_FEATURES),milagro" ./$(EF_TESTS)/check_all_files_accessed.py $(EF_TESTS)/.accessed_file_log.txt $(EF_TESTS)/consensus-spec-tests # Run the tests in the `beacon_chain` crate for all known forks. diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index f960251e7a3..60a9f95a2be 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "5.0.0" +version = "5.1.1" authors = [ "Paul Hauner ", "Age Manning BeaconChain { (re_org_state.pre_state, re_org_state.state_root) } // Normal case: proposing a block atop the current head using the cache. - else if let Some((_, cached_state)) = self - .block_production_state - .lock() - .take() - .filter(|(cached_block_root, _)| *cached_block_root == head_block_root) + else if let Some((_, cached_state)) = + self.get_state_from_block_production_cache(head_block_root) { (cached_state.pre_state, cached_state.state_root) } // Fall back to a direct read of the snapshot cache. - else if let Some(pre_state) = self - .snapshot_cache - .try_read_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) - .and_then(|snapshot_cache| { - snapshot_cache.get_state_for_block_production(head_block_root) - }) + else if let Some(pre_state) = + self.get_state_from_snapshot_cache_for_block_production(head_block_root) { warn!( self.log, @@ -4221,6 +4214,40 @@ impl BeaconChain { Ok((state, state_root_opt)) } + /// Get the state cached for block production *if* it matches `head_block_root`. + /// + /// This will clear the cache regardless of whether the block root matches, so only call this if + /// you think the `head_block_root` is likely to match! + fn get_state_from_block_production_cache( + &self, + head_block_root: Hash256, + ) -> Option<(Hash256, BlockProductionPreState)> { + // Take care to drop the lock as quickly as possible. + let mut lock = self.block_production_state.lock(); + let result = lock + .take() + .filter(|(cached_block_root, _)| *cached_block_root == head_block_root); + drop(lock); + result + } + + /// Get a state for block production from the snapshot cache. + fn get_state_from_snapshot_cache_for_block_production( + &self, + head_block_root: Hash256, + ) -> Option> { + if let Some(lock) = self + .snapshot_cache + .try_read_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) + { + let result = lock.get_state_for_block_production(head_block_root); + drop(lock); + result + } else { + None + } + } + /// Fetch the beacon state to use for producing a block if a 1-slot proposer re-org is viable. /// /// This function will return `None` if proposer re-orgs are disabled. @@ -4313,12 +4340,8 @@ impl BeaconChain { // Only attempt a re-org if we hit the block production cache or snapshot cache. let pre_state = self - .block_production_state - .lock() - .take() - .and_then(|(cached_block_root, state)| { - (cached_block_root == re_org_parent_block).then_some(state) - }) + .get_state_from_block_production_cache(re_org_parent_block) + .map(|(_, state)| state) .or_else(|| { warn!( self.log, @@ -4327,11 +4350,7 @@ impl BeaconChain { "slot" => slot, "block_root" => ?re_org_parent_block ); - self.snapshot_cache - .try_read_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) - .and_then(|snapshot_cache| { - snapshot_cache.get_state_for_block_production(re_org_parent_block) - }) + self.get_state_from_snapshot_cache_for_block_production(re_org_parent_block) }) .or_else(|| { debug!( diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index c75c3f695b3..dd4b612f60b 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -12,7 +12,7 @@ use crate::light_client_server_cache::LightClientServerCache; use crate::migrate::{BackgroundMigrator, MigratorConfig}; use crate::persisted_beacon_chain::PersistedBeaconChain; use crate::shuffling_cache::{BlockShufflingIds, ShufflingCache}; -use crate::snapshot_cache::{SnapshotCache, DEFAULT_SNAPSHOT_CACHE_SIZE}; +use crate::snapshot_cache::SnapshotCache; use crate::timeout_rw_lock::TimeoutRwLock; use crate::validator_monitor::{ValidatorMonitor, ValidatorMonitorConfig}; use crate::validator_pubkey_cache::ValidatorPubkeyCache; @@ -870,6 +870,7 @@ where let head_for_snapshot_cache = head_snapshot.clone(); let canonical_head = CanonicalHead::new(fork_choice, Arc::new(head_snapshot)); let shuffling_cache_size = self.chain_config.shuffling_cache_size; + let snapshot_cache_size = self.chain_config.snapshot_cache_size; // Calculate the weak subjectivity point in which to backfill blocks to. let genesis_backfill_slot = if self.chain_config.genesis_backfill { @@ -946,7 +947,7 @@ where event_handler: self.event_handler, head_tracker, snapshot_cache: TimeoutRwLock::new(SnapshotCache::new( - DEFAULT_SNAPSHOT_CACHE_SIZE, + snapshot_cache_size, head_for_snapshot_cache, )), shuffling_cache: TimeoutRwLock::new(ShufflingCache::new( diff --git a/beacon_node/beacon_chain/src/chain_config.rs b/beacon_node/beacon_chain/src/chain_config.rs index 23e17a6efad..36481b4dcd0 100644 --- a/beacon_node/beacon_chain/src/chain_config.rs +++ b/beacon_node/beacon_chain/src/chain_config.rs @@ -72,6 +72,8 @@ pub struct ChainConfig { pub optimistic_finalized_sync: bool, /// The size of the shuffling cache, pub shuffling_cache_size: usize, + /// The size of the snapshot cache. + pub snapshot_cache_size: usize, /// If using a weak-subjectivity sync, whether we should download blocks all the way back to /// genesis. pub genesis_backfill: bool, @@ -112,6 +114,7 @@ impl Default for ChainConfig { // This value isn't actually read except in tests. optimistic_finalized_sync: true, shuffling_cache_size: crate::shuffling_cache::DEFAULT_CACHE_SIZE, + snapshot_cache_size: crate::snapshot_cache::DEFAULT_SNAPSHOT_CACHE_SIZE, genesis_backfill: false, always_prepare_payload: false, progressive_balances_mode: ProgressiveBalancesMode::Fast, diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 522009b1b27..529f269be10 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -50,7 +50,7 @@ mod pre_finalization_cache; pub mod proposer_prep_service; pub mod schema_change; pub mod shuffling_cache; -mod snapshot_cache; +pub mod snapshot_cache; pub mod state_advance_timer; pub mod sync_committee_rewards; pub mod sync_committee_verification; diff --git a/beacon_node/beacon_chain/src/snapshot_cache.rs b/beacon_node/beacon_chain/src/snapshot_cache.rs index d2846c08569..765ed0cb2aa 100644 --- a/beacon_node/beacon_chain/src/snapshot_cache.rs +++ b/beacon_node/beacon_chain/src/snapshot_cache.rs @@ -9,7 +9,7 @@ use types::{ }; /// The default size of the cache. -pub const DEFAULT_SNAPSHOT_CACHE_SIZE: usize = 4; +pub const DEFAULT_SNAPSHOT_CACHE_SIZE: usize = 3; /// The minimum block delay to clone the state in the cache instead of removing it. /// This helps keep block processing fast during re-orgs from late blocks. @@ -174,6 +174,7 @@ impl SnapshotCache { self.snapshots.iter().map(|s| s.beacon_block_root).collect() } + #[allow(clippy::len_without_is_empty)] /// The number of snapshots contained in `self`. pub fn len(&self) -> usize { self.snapshots.len() diff --git a/beacon_node/client/src/address_change_broadcast.rs b/beacon_node/client/src/address_change_broadcast.rs deleted file mode 100644 index 69614159fec..00000000000 --- a/beacon_node/client/src/address_change_broadcast.rs +++ /dev/null @@ -1,322 +0,0 @@ -use crate::*; -use lighthouse_network::PubsubMessage; -use network::NetworkMessage; -use slog::{debug, info, warn, Logger}; -use slot_clock::SlotClock; -use std::cmp; -use std::collections::HashSet; -use std::mem; -use std::time::Duration; -use tokio::sync::mpsc::UnboundedSender; -use tokio::time::sleep; -use types::EthSpec; - -/// The size of each chunk of addresses changes to be broadcast at the Capella -/// fork. -const BROADCAST_CHUNK_SIZE: usize = 128; -/// The delay between broadcasting each chunk. -const BROADCAST_CHUNK_DELAY: Duration = Duration::from_millis(500); - -/// If the Capella fork has already been reached, `broadcast_address_changes` is -/// called immediately. -/// -/// If the Capella fork has not been reached, waits until the start of the fork -/// epoch and then calls `broadcast_address_changes`. -pub async fn broadcast_address_changes_at_capella( - chain: &BeaconChain, - network_send: UnboundedSender>, - log: &Logger, -) { - let spec = &chain.spec; - let slot_clock = &chain.slot_clock; - - let capella_fork_slot = if let Some(epoch) = spec.capella_fork_epoch { - epoch.start_slot(T::EthSpec::slots_per_epoch()) - } else { - // Exit now if Capella is not defined. - return; - }; - - // Wait until the Capella fork epoch. - while chain.slot().map_or(true, |slot| slot < capella_fork_slot) { - match slot_clock.duration_to_slot(capella_fork_slot) { - Some(duration) => { - // Sleep until the Capella fork. - sleep(duration).await; - break; - } - None => { - // We were unable to read the slot clock wait another slot - // and then try again. - sleep(slot_clock.slot_duration()).await; - } - } - } - - // The following function will be called in two scenarios: - // - // 1. The node has been running for some time and the Capella fork has just - // been reached. - // 2. The node has just started and it is *after* the Capella fork. - broadcast_address_changes(chain, network_send, log).await -} - -/// Broadcasts any address changes that are flagged for broadcasting at the -/// Capella fork epoch. -/// -/// Address changes are published in chunks, with a delay between each chunk. -/// This helps reduce the load on the P2P network and also helps prevent us from -/// clogging our `network_send` channel and being late to publish -/// blocks, attestations, etc. -pub async fn broadcast_address_changes( - chain: &BeaconChain, - network_send: UnboundedSender>, - log: &Logger, -) { - let head = chain.head_snapshot(); - let mut changes = chain - .op_pool - .get_bls_to_execution_changes_received_pre_capella(&head.beacon_state, &chain.spec); - - while !changes.is_empty() { - // This `split_off` approach is to allow us to have owned chunks of the - // `changes` vec. The `std::slice::Chunks` method uses references and - // the `itertools` iterator that achives this isn't `Send` so it doesn't - // work well with the `sleep` at the end of the loop. - let tail = changes.split_off(cmp::min(BROADCAST_CHUNK_SIZE, changes.len())); - let chunk = mem::replace(&mut changes, tail); - - let mut published_indices = HashSet::with_capacity(BROADCAST_CHUNK_SIZE); - let mut num_ok = 0; - let mut num_err = 0; - - // Publish each individual address change. - for address_change in chunk { - let validator_index = address_change.message.validator_index; - - let pubsub_message = PubsubMessage::BlsToExecutionChange(Box::new(address_change)); - let message = NetworkMessage::Publish { - messages: vec![pubsub_message], - }; - // It seems highly unlikely that this unbounded send will fail, but - // we handle the result nonetheless. - if let Err(e) = network_send.send(message) { - debug!( - log, - "Failed to publish change message"; - "error" => ?e, - "validator_index" => validator_index - ); - num_err += 1; - } else { - debug!( - log, - "Published address change message"; - "validator_index" => validator_index - ); - num_ok += 1; - published_indices.insert(validator_index); - } - } - - // Remove any published indices from the list of indices that need to be - // published. - chain - .op_pool - .register_indices_broadcasted_at_capella(&published_indices); - - info!( - log, - "Published address change messages"; - "num_published" => num_ok, - ); - - if num_err > 0 { - warn!( - log, - "Failed to publish address changes"; - "info" => "failed messages will be retried", - "num_unable_to_publish" => num_err, - ); - } - - sleep(BROADCAST_CHUNK_DELAY).await; - } - - debug!( - log, - "Address change routine complete"; - ); -} - -#[cfg(not(debug_assertions))] // Tests run too slow in debug. -#[cfg(test)] -mod tests { - use super::*; - use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; - use operation_pool::ReceivedPreCapella; - use state_processing::{SigVerifiedOp, VerifyOperation}; - use std::collections::HashSet; - use tokio::sync::mpsc; - use types::*; - - type E = MainnetEthSpec; - - pub const VALIDATOR_COUNT: usize = BROADCAST_CHUNK_SIZE * 3; - pub const EXECUTION_ADDRESS: Address = Address::repeat_byte(42); - - struct Tester { - harness: BeaconChainHarness>, - /// Changes which should be broadcast at the Capella fork. - received_pre_capella_changes: Vec>, - /// Changes which should *not* be broadcast at the Capella fork. - not_received_pre_capella_changes: Vec>, - } - - impl Tester { - fn new() -> Self { - let altair_fork_epoch = Epoch::new(0); - let bellatrix_fork_epoch = Epoch::new(0); - let capella_fork_epoch = Epoch::new(2); - - let mut spec = E::default_spec(); - spec.altair_fork_epoch = Some(altair_fork_epoch); - spec.bellatrix_fork_epoch = Some(bellatrix_fork_epoch); - spec.capella_fork_epoch = Some(capella_fork_epoch); - - let harness = BeaconChainHarness::builder(E::default()) - .spec(spec) - .logger(logging::test_logger()) - .deterministic_keypairs(VALIDATOR_COUNT) - .deterministic_withdrawal_keypairs(VALIDATOR_COUNT) - .fresh_ephemeral_store() - .mock_execution_layer() - .build(); - - Self { - harness, - received_pre_capella_changes: <_>::default(), - not_received_pre_capella_changes: <_>::default(), - } - } - - fn produce_verified_address_change( - &self, - validator_index: u64, - ) -> SigVerifiedOp { - let change = self - .harness - .make_bls_to_execution_change(validator_index, EXECUTION_ADDRESS); - let head = self.harness.chain.head_snapshot(); - - change - .validate(&head.beacon_state, &self.harness.spec) - .unwrap() - } - - fn produce_received_pre_capella_changes(mut self, indices: Vec) -> Self { - for validator_index in indices { - self.received_pre_capella_changes - .push(self.produce_verified_address_change(validator_index)); - } - self - } - - fn produce_not_received_pre_capella_changes(mut self, indices: Vec) -> Self { - for validator_index in indices { - self.not_received_pre_capella_changes - .push(self.produce_verified_address_change(validator_index)); - } - self - } - - async fn run(self) { - let harness = self.harness; - let chain = harness.chain.clone(); - - let mut broadcast_indices = HashSet::new(); - for change in self.received_pre_capella_changes { - broadcast_indices.insert(change.as_inner().message.validator_index); - chain - .op_pool - .insert_bls_to_execution_change(change, ReceivedPreCapella::Yes); - } - - let mut non_broadcast_indices = HashSet::new(); - for change in self.not_received_pre_capella_changes { - non_broadcast_indices.insert(change.as_inner().message.validator_index); - chain - .op_pool - .insert_bls_to_execution_change(change, ReceivedPreCapella::No); - } - - harness.set_current_slot( - chain - .spec - .capella_fork_epoch - .unwrap() - .start_slot(E::slots_per_epoch()), - ); - - let (sender, mut receiver) = mpsc::unbounded_channel(); - - broadcast_address_changes_at_capella(&chain, sender, &logging::test_logger()).await; - - let mut broadcasted_changes = vec![]; - while let Some(NetworkMessage::Publish { mut messages }) = receiver.recv().await { - match messages.pop().unwrap() { - PubsubMessage::BlsToExecutionChange(change) => broadcasted_changes.push(change), - _ => panic!("unexpected message"), - } - } - - assert_eq!( - broadcasted_changes.len(), - broadcast_indices.len(), - "all expected changes should have been broadcast" - ); - - for broadcasted in &broadcasted_changes { - assert!( - !non_broadcast_indices.contains(&broadcasted.message.validator_index), - "messages not flagged for broadcast should not have been broadcast" - ); - } - - let head = chain.head_snapshot(); - assert!( - chain - .op_pool - .get_bls_to_execution_changes_received_pre_capella( - &head.beacon_state, - &chain.spec, - ) - .is_empty(), - "there shouldn't be any capella broadcast changes left in the op pool" - ); - } - } - - // Useful for generating even-numbered indices. Required since only even - // numbered genesis validators have BLS credentials. - fn even_indices(start: u64, count: usize) -> Vec { - (start..).filter(|i| i % 2 == 0).take(count).collect() - } - - #[tokio::test] - async fn one_chunk() { - Tester::new() - .produce_received_pre_capella_changes(even_indices(0, 4)) - .produce_not_received_pre_capella_changes(even_indices(10, 4)) - .run() - .await; - } - - #[tokio::test] - async fn multiple_chunks() { - Tester::new() - .produce_received_pre_capella_changes(even_indices(0, BROADCAST_CHUNK_SIZE * 3 / 2)) - .run() - .await; - } -} diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 558e5cbc84f..243dd132408 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -1,4 +1,3 @@ -use crate::address_change_broadcast::broadcast_address_changes_at_capella; use crate::compute_light_client_updates::{ compute_light_client_updates, LIGHT_CLIENT_SERVER_CHANNEL_CAPACITY, }; @@ -920,25 +919,6 @@ where beacon_chain.slot_clock.clone(), ); } - - // Spawn a service to publish BLS to execution changes at the Capella fork. - if let Some(network_senders) = self.network_senders.clone() { - let inner_chain = beacon_chain.clone(); - let broadcast_context = - runtime_context.service_context("addr_bcast".to_string()); - let log = broadcast_context.log().clone(); - broadcast_context.executor.spawn( - async move { - broadcast_address_changes_at_capella( - &inner_chain, - network_senders.network_send(), - &log, - ) - .await - }, - "addr_broadcast", - ); - } } // Spawn service to publish light_client updates at some interval into the slot. diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 2f14d87efc0..fd92c282554 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -1,6 +1,5 @@ extern crate slog; -mod address_change_broadcast; mod compute_light_client_updates; pub mod config; mod metrics; diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index 7fee3721d8f..ace8e24a8e4 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -29,7 +29,6 @@ kzg = { workspace = true } state_processing = { workspace = true } superstruct = { workspace = true } lru = { workspace = true } -exit-future = { workspace = true } tree_hash = { workspace = true } tree_hash_derive = { workspace = true } parking_lot = { workspace = true } diff --git a/beacon_node/execution_layer/src/engine_api/new_payload_request.rs b/beacon_node/execution_layer/src/engine_api/new_payload_request.rs index b1385399e89..b9527ed09db 100644 --- a/beacon_node/execution_layer/src/engine_api/new_payload_request.rs +++ b/beacon_node/execution_layer/src/engine_api/new_payload_request.rs @@ -86,7 +86,7 @@ impl<'block, E: EthSpec> NewPayloadRequest<'block, E> { /// /// https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.2/specs/deneb/beacon-chain.md#modified-verify_and_notify_new_payload pub fn perform_optimistic_sync_verifications(&self) -> Result<(), Error> { - self.verfiy_payload_block_hash()?; + self.verify_payload_block_hash()?; self.verify_versioned_hashes()?; Ok(()) @@ -98,7 +98,7 @@ impl<'block, E: EthSpec> NewPayloadRequest<'block, E> { /// /// Equivalent to `is_valid_block_hash` in the spec: /// https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.2/specs/deneb/beacon-chain.md#is_valid_block_hash - pub fn verfiy_payload_block_hash(&self) -> Result<(), Error> { + pub fn verify_payload_block_hash(&self) -> Result<(), Error> { let payload = self.execution_payload_ref(); let parent_beacon_block_root = self.parent_beacon_block_root().ok().cloned(); diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 664ceabb6cd..69b84adbb8f 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -7,6 +7,7 @@ use crate::payload_cache::PayloadCache; use arc_swap::ArcSwapOption; use auth::{strip_prefix, Auth, JwtKey}; +pub use block_hash::calculate_execution_block_hash; use builder_client::BuilderHttpClient; pub use engine_api::EngineCapabilities; use engine_api::Error as ApiError; diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index a9b245e7987..5a8d5cae070 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -68,7 +68,7 @@ use std::path::PathBuf; use std::pin::Pin; use std::sync::Arc; use sysinfo::{System, SystemExt}; -use system_health::observe_system_health_bn; +use system_health::{observe_nat, observe_system_health_bn}; use task_spawner::{Priority, TaskSpawner}; use tokio::sync::{ mpsc::{Sender, UnboundedSender}, @@ -3448,34 +3448,34 @@ pub fn serve( chain: Arc>, log: Logger| { task_spawner.blocking_json_task(Priority::P0, move || { - for subscription in &subscriptions { - chain - .validator_monitor - .write() - .auto_register_local_validator(subscription.validator_index); - - let validator_subscription = api_types::ValidatorSubscription { - validator_index: subscription.validator_index, - attestation_committee_index: subscription.committee_index, - slot: subscription.slot, - committee_count_at_slot: subscription.committees_at_slot, - is_aggregator: subscription.is_aggregator, - }; - - let message = ValidatorSubscriptionMessage::AttestationSubscribe { - subscriptions: vec![validator_subscription], - }; - if let Err(e) = validator_subscription_tx.try_send(message) { - warn!( - log, - "Unable to process committee subscriptions"; - "info" => "the host may be overloaded or resource-constrained", - "error" => ?e, - ); - return Err(warp_utils::reject::custom_server_error( - "unable to queue subscription, host may be overloaded or shutting down".to_string(), - )); - } + let subscriptions: std::collections::BTreeSet<_> = subscriptions + .iter() + .map(|subscription| { + chain + .validator_monitor + .write() + .auto_register_local_validator(subscription.validator_index); + api_types::ValidatorSubscription { + attestation_committee_index: subscription.committee_index, + slot: subscription.slot, + committee_count_at_slot: subscription.committees_at_slot, + is_aggregator: subscription.is_aggregator, + } + }) + .collect(); + let message = + ValidatorSubscriptionMessage::AttestationSubscribe { subscriptions }; + if let Err(e) = validator_subscription_tx.try_send(message) { + warn!( + log, + "Unable to process committee subscriptions"; + "info" => "the host may be overloaded or resource-constrained", + "error" => ?e, + ); + return Err(warp_utils::reject::custom_server_error( + "unable to queue subscription, host may be overloaded or shutting down" + .to_string(), + )); } Ok(()) @@ -3965,13 +3965,7 @@ pub fn serve( .and(warp::path::end()) .then(|task_spawner: TaskSpawner| { task_spawner.blocking_json_task(Priority::P1, move || { - Ok(api_types::GenericResponse::from( - lighthouse_network::metrics::NAT_OPEN - .as_ref() - .map(|v| v.get()) - .unwrap_or(0) - != 0, - )) + Ok(api_types::GenericResponse::from(observe_nat())) }) }); diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index a7ba2c1ab86..098f9f10512 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -2721,6 +2721,31 @@ impl ApiTester { self } + /// Check that the metadata from the headers & JSON response body are consistent, and that the + /// consensus block value is non-zero. + fn check_block_v3_metadata( + metadata: &ProduceBlockV3Metadata, + response: &JsonProduceBlockV3Response, + ) { + // Compare fork name to ForkVersionedResponse rather than metadata consensus_version, which + // is deserialized to a dummy value. + assert_eq!(Some(metadata.consensus_version), response.version); + assert_eq!(ForkName::Base, response.metadata.consensus_version); + assert_eq!( + metadata.execution_payload_blinded, + response.metadata.execution_payload_blinded + ); + assert_eq!( + metadata.execution_payload_value, + response.metadata.execution_payload_value + ); + assert_eq!( + metadata.consensus_block_value, + response.metadata.consensus_block_value + ); + assert!(!metadata.consensus_block_value.is_zero()); + } + pub async fn test_block_production_v3_ssz(self) -> Self { let fork = self.chain.canonical_head.cached_head().head_fork(); let genesis_validators_root = self.chain.genesis_validators_root; @@ -3582,11 +3607,12 @@ impl ApiTester { let (proposer_index, randao_reveal) = self.get_test_randao(slot, epoch).await; - let (payload_type, _) = self + let (payload_type, metadata) = self .client .get_validator_blocks_v3::(slot, &randao_reveal, None, None) .await .unwrap(); + Self::check_block_v3_metadata(&metadata, &payload_type); let payload: BlindedPayload = match payload_type.data { ProduceBlockV3Response::Blinded(payload) => { @@ -3608,11 +3634,12 @@ impl ApiTester { let (proposer_index, randao_reveal) = self.get_test_randao(slot, epoch).await; - let (payload_type, _) = self + let (payload_type, metadata) = self .client .get_validator_blocks_v3::(slot, &randao_reveal, None, Some(0)) .await .unwrap(); + Self::check_block_v3_metadata(&metadata, &payload_type); let payload: FullPayload = match payload_type.data { ProduceBlockV3Response::Full(payload) => { @@ -3634,11 +3661,12 @@ impl ApiTester { let (proposer_index, randao_reveal) = self.get_test_randao(slot, epoch).await; - let (payload_type, _) = self + let (payload_type, metadata) = self .client .get_validator_blocks_v3::(slot, &randao_reveal, None, Some(u64::MAX)) .await .unwrap(); + Self::check_block_v3_metadata(&metadata, &payload_type); let payload: BlindedPayload = match payload_type.data { ProduceBlockV3Response::Blinded(payload) => { @@ -3738,11 +3766,12 @@ impl ApiTester { let (proposer_index, randao_reveal) = self.get_test_randao(slot, epoch).await; - let (payload_type, _) = self + let (payload_type, metadata) = self .client .get_validator_blocks_v3::(slot, &randao_reveal, None, None) .await .unwrap(); + Self::check_block_v3_metadata(&metadata, &payload_type); let payload: BlindedPayload = match payload_type.data { ProduceBlockV3Response::Blinded(payload) => { @@ -3814,11 +3843,12 @@ impl ApiTester { let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let (payload_type, _) = self + let (payload_type, metadata) = self .client .get_validator_blocks_v3::(slot, &randao_reveal, None, None) .await .unwrap(); + Self::check_block_v3_metadata(&metadata, &payload_type); let payload: BlindedPayload = match payload_type.data { ProduceBlockV3Response::Blinded(payload) => { @@ -3904,11 +3934,12 @@ impl ApiTester { let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let (payload_type, _) = self + let (payload_type, metadata) = self .client .get_validator_blocks_v3::(slot, &randao_reveal, None, None) .await .unwrap(); + Self::check_block_v3_metadata(&metadata, &payload_type); let payload: FullPayload = match payload_type.data { ProduceBlockV3Response::Full(payload) => { @@ -3990,11 +4021,12 @@ impl ApiTester { .unwrap(); let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let (payload_type, _) = self + let (payload_type, metadata) = self .client .get_validator_blocks_v3::(slot, &randao_reveal, None, None) .await .unwrap(); + Self::check_block_v3_metadata(&metadata, &payload_type); let payload: FullPayload = match payload_type.data { ProduceBlockV3Response::Full(payload) => { @@ -4076,11 +4108,12 @@ impl ApiTester { let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let (payload_type, _) = self + let (payload_type, metadata) = self .client .get_validator_blocks_v3::(slot, &randao_reveal, None, None) .await .unwrap(); + Self::check_block_v3_metadata(&metadata, &payload_type); let payload: FullPayload = match payload_type.data { ProduceBlockV3Response::Full(payload) => { @@ -4160,11 +4193,12 @@ impl ApiTester { let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let (payload_type, _) = self + let (payload_type, metadata) = self .client .get_validator_blocks_v3::(slot, &randao_reveal, None, None) .await .unwrap(); + Self::check_block_v3_metadata(&metadata, &payload_type); let payload: FullPayload = match payload_type.data { ProduceBlockV3Response::Full(payload) => { @@ -4216,11 +4250,12 @@ impl ApiTester { let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let (payload_type, _) = self + let (payload_type, metadata) = self .client .get_validator_blocks_v3::(slot, &randao_reveal, None, None) .await .unwrap(); + Self::check_block_v3_metadata(&metadata, &payload_type); match payload_type.data { ProduceBlockV3Response::Full(_) => (), @@ -4282,11 +4317,12 @@ impl ApiTester { let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let (payload_type, _) = self + let (payload_type, metadata) = self .client .get_validator_blocks_v3::(slot, &randao_reveal, None, None) .await .unwrap(); + Self::check_block_v3_metadata(&metadata, &payload_type); match payload_type.data { ProduceBlockV3Response::Full(_) => (), @@ -4390,11 +4426,12 @@ impl ApiTester { .get_test_randao(next_slot, next_slot.epoch(E::slots_per_epoch())) .await; - let (payload_type, _) = self + let (payload_type, metadata) = self .client .get_validator_blocks_v3::(next_slot, &randao_reveal, None, None) .await .unwrap(); + Self::check_block_v3_metadata(&metadata, &payload_type); match payload_type.data { ProduceBlockV3Response::Blinded(_) => (), @@ -4410,11 +4447,12 @@ impl ApiTester { .get_test_randao(next_slot, next_slot.epoch(E::slots_per_epoch())) .await; - let (payload_type, _) = self + let (payload_type, metadata) = self .client .get_validator_blocks_v3::(next_slot, &randao_reveal, None, None) .await .unwrap(); + Self::check_block_v3_metadata(&metadata, &payload_type); match payload_type.data { ProduceBlockV3Response::Full(_) => (), @@ -4538,11 +4576,12 @@ impl ApiTester { .get_test_randao(next_slot, next_slot.epoch(E::slots_per_epoch())) .await; - let (payload_type, _) = self + let (payload_type, metadata) = self .client .get_validator_blocks_v3::(next_slot, &randao_reveal, None, None) .await .unwrap(); + Self::check_block_v3_metadata(&metadata, &payload_type); match payload_type.data { ProduceBlockV3Response::Full(_) => (), @@ -4568,11 +4607,12 @@ impl ApiTester { .get_test_randao(next_slot, next_slot.epoch(E::slots_per_epoch())) .await; - let (payload_type, _) = self + let (payload_type, metadata) = self .client .get_validator_blocks_v3::(next_slot, &randao_reveal, None, None) .await .unwrap(); + Self::check_block_v3_metadata(&metadata, &payload_type); match payload_type.data { ProduceBlockV3Response::Blinded(_) => (), @@ -4648,11 +4688,12 @@ impl ApiTester { let (proposer_index, randao_reveal) = self.get_test_randao(slot, epoch).await; - let (payload_type, _) = self + let (payload_type, metadata) = self .client .get_validator_blocks_v3::(slot, &randao_reveal, None, None) .await .unwrap(); + Self::check_block_v3_metadata(&metadata, &payload_type); let payload: FullPayload = match payload_type.data { ProduceBlockV3Response::Full(payload) => { @@ -4717,11 +4758,12 @@ impl ApiTester { let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let (payload_type, _) = self + let (payload_type, metadata) = self .client .get_validator_blocks_v3::(slot, &randao_reveal, None, None) .await .unwrap(); + Self::check_block_v3_metadata(&metadata, &payload_type); match payload_type.data { ProduceBlockV3Response::Blinded(_) => (), @@ -4781,11 +4823,12 @@ impl ApiTester { let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let (payload_type, _) = self + let (payload_type, metadata) = self .client .get_validator_blocks_v3::(slot, &randao_reveal, None, None) .await .unwrap(); + Self::check_block_v3_metadata(&metadata, &payload_type); match payload_type.data { ProduceBlockV3Response::Full(_) => (), @@ -4845,11 +4888,12 @@ impl ApiTester { let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let (payload_type, _) = self + let (payload_type, metadata) = self .client .get_validator_blocks_v3::(slot, &randao_reveal, None, None) .await .unwrap(); + Self::check_block_v3_metadata(&metadata, &payload_type); match payload_type.data { ProduceBlockV3Response::Full(_) => (), @@ -4907,11 +4951,12 @@ impl ApiTester { let epoch = self.chain.epoch().unwrap(); let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let (payload_type, _) = self + let (payload_type, metadata) = self .client .get_validator_blocks_v3::(slot, &randao_reveal, None, None) .await .unwrap(); + Self::check_block_v3_metadata(&metadata, &payload_type); let _block_contents = match payload_type.data { ProduceBlockV3Response::Blinded(payload) => payload, @@ -4979,11 +5024,12 @@ impl ApiTester { let epoch = self.chain.epoch().unwrap(); let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let (payload_type, _) = self + let (payload_type, metadata) = self .client .get_validator_blocks_v3::(slot, &randao_reveal, None, None) .await .unwrap(); + Self::check_block_v3_metadata(&metadata, &payload_type); match payload_type.data { ProduceBlockV3Response::Full(_) => (), diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index cd0de37d3ba..17114180729 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -5,6 +5,7 @@ authors = ["Sigma Prime "] edition = { workspace = true } [dependencies] +async-channel = { workspace = true } discv5 = { workspace = true } unsigned-varint = { version = "0.6", features = ["codec"] } ssz_types = { workspace = true } @@ -55,7 +56,6 @@ hex_fmt = "0.3.0" instant = "0.1.12" quick-protobuf = "0.8" void = "1.0.2" -async-channel = "1.9.0" asynchronous-codec = "0.7.0" base64 = "0.21.5" libp2p-mplex = "0.41" @@ -64,13 +64,12 @@ quick-protobuf-codec = "0.3" [dependencies.libp2p] version = "0.53" default-features = false -features = ["identify", "yamux", "noise", "dns", "tcp", "tokio", "plaintext", "secp256k1", "macros", "ecdsa", "metrics", "quic"] +features = ["identify", "yamux", "noise", "dns", "tcp", "tokio", "plaintext", "secp256k1", "macros", "ecdsa", "metrics", "quic", "upnp"] [dev-dependencies] slog-term = { workspace = true } slog-async = { workspace = true } tempfile = { workspace = true } -exit-future = { workspace = true } quickcheck = { workspace = true } quickcheck_macros = { workspace = true } async-std = { version = "1.6.3", features = ["unstable"] } diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index 5b13730f971..02134580e0f 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -101,7 +101,7 @@ pub struct Config { /// List of libp2p nodes to initially connect to. pub libp2p_nodes: Vec, - /// List of trusted libp2p nodes which are not scored. + /// List of trusted libp2p nodes which are not scored and marked as explicit. pub trusted_peers: Vec, /// Disables peer scoring altogether. diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index 6659ba1d26f..347c2352f18 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -1004,7 +1004,10 @@ impl NetworkBehaviour for Discovery { discv5::Event::SocketUpdated(socket_addr) => { info!(self.log, "Address updated"; "ip" => %socket_addr.ip(), "udp_port" => %socket_addr.port()); metrics::inc_counter(&metrics::ADDRESS_UPDATE_COUNT); - metrics::check_nat(); + // We have SOCKET_UPDATED messages. This occurs when discovery has a majority of + // users reporting an external port and our ENR gets updated. + // Which means we are able to do NAT traversal. + metrics::set_gauge_vec(&metrics::NAT_OPEN, &["discv5"], 1); // Discv5 will have updated our local ENR. We save the updated version // to disk. diff --git a/beacon_node/lighthouse_network/src/gossipsub/behaviour.rs b/beacon_node/lighthouse_network/src/gossipsub/behaviour.rs index 9769adca278..10025626d31 100644 --- a/beacon_node/lighthouse_network/src/gossipsub/behaviour.rs +++ b/beacon_node/lighthouse_network/src/gossipsub/behaviour.rs @@ -635,9 +635,33 @@ where || !self.score_below_threshold(p, |ts| ts.publish_threshold).0 })); } else { - match self.mesh.get(&raw_message.topic) { + match self.mesh.get(&topic_hash) { // Mesh peers Some(mesh_peers) => { + // We have a mesh set. We want to make sure to publish to at least `mesh_n` + // peers (if possible). + let needed_extra_peers = self.config.mesh_n().saturating_sub(mesh_peers.len()); + + if needed_extra_peers > 0 { + // We don't have `mesh_n` peers in our mesh, we will randomly select extras + // and publish to them. + + // Get a random set of peers that are appropriate to send messages too. + let peer_list = get_random_peers( + &self.connected_peers, + &topic_hash, + needed_extra_peers, + |peer| { + !mesh_peers.contains(peer) + && !self.explicit_peers.contains(peer) + && !self + .score_below_threshold(peer, |pst| pst.publish_threshold) + .0 + }, + ); + recipient_peers.extend(peer_list); + } + recipient_peers.extend(mesh_peers); } // Gossipsub peers @@ -729,10 +753,14 @@ where } } - if publish_failed { + if recipient_peers.is_empty() { return Err(PublishError::InsufficientPeers); } + if publish_failed { + return Err(PublishError::AllQueuesFull(recipient_peers.len())); + } + tracing::debug!(message=%msg_id, "Published message"); if let Some(metrics) = self.metrics.as_mut() { @@ -826,6 +854,13 @@ where } } + /// Register topics to ensure metrics are recorded correctly for these topics. + pub fn register_topics_for_metrics(&mut self, topics: Vec) { + if let Some(metrics) = &mut self.metrics { + metrics.register_allowed_topics(topics); + } + } + /// Adds a new peer to the list of explicitly connected peers. pub fn add_explicit_peer(&mut self, peer_id: &PeerId) { tracing::debug!(peer=%peer_id, "Adding explicit peer"); @@ -2203,10 +2238,9 @@ where if outbound <= self.config.mesh_outbound_min() { // do not remove anymore outbound peers continue; - } else { - // an outbound peer gets removed - outbound -= 1; } + // an outbound peer gets removed + outbound -= 1; } // remove the peer diff --git a/beacon_node/lighthouse_network/src/gossipsub/behaviour/tests.rs b/beacon_node/lighthouse_network/src/gossipsub/behaviour/tests.rs index eb006e52928..f191d38f515 100644 --- a/beacon_node/lighthouse_network/src/gossipsub/behaviour/tests.rs +++ b/beacon_node/lighthouse_network/src/gossipsub/behaviour/tests.rs @@ -741,8 +741,8 @@ fn test_publish_without_flood_publishing() { let config: Config = Config::default(); assert_eq!( publishes.len(), - config.mesh_n_low(), - "Should send a publish message to all known peers" + config.mesh_n(), + "Should send a publish message to at least mesh_n peers" ); assert!( diff --git a/beacon_node/lighthouse_network/src/gossipsub/error.rs b/beacon_node/lighthouse_network/src/gossipsub/error.rs index d00e1ec6d22..df3332bc923 100644 --- a/beacon_node/lighthouse_network/src/gossipsub/error.rs +++ b/beacon_node/lighthouse_network/src/gossipsub/error.rs @@ -36,6 +36,9 @@ pub enum PublishError { MessageTooLarge, /// The compression algorithm failed. TransformFailed(std::io::Error), + /// Messages could not be sent because all queues for peers were full. The usize represents the + /// number of peers that have full queues. + AllQueuesFull(usize), } impl std::fmt::Display for PublishError { diff --git a/beacon_node/lighthouse_network/src/gossipsub/metrics.rs b/beacon_node/lighthouse_network/src/gossipsub/metrics.rs index 94bcdbc487b..91bcd5f54bc 100644 --- a/beacon_node/lighthouse_network/src/gossipsub/metrics.rs +++ b/beacon_node/lighthouse_network/src/gossipsub/metrics.rs @@ -38,7 +38,7 @@ const DEFAULT_MAX_TOPICS: usize = 300; // Default value that limits how many topics for which there has never been a subscription do we // store metrics. -const DEFAULT_MAX_NEVER_SUBSCRIBED_TOPICS: usize = 50; +const DEFAULT_MAX_NEVER_SUBSCRIBED_TOPICS: usize = 100; #[derive(Debug, Clone)] pub struct Config { @@ -392,13 +392,21 @@ impl Metrics { } } - /// Increase the number of peers do we known are subscribed to this topic. + /// Registers a set of topics that we want to store calculate metrics for. + pub(crate) fn register_allowed_topics(&mut self, topics: Vec) { + for topic_hash in topics { + self.topic_info.insert(topic_hash, true); + } + } + + /// Increase the number of peers that are subscribed to this topic. pub(crate) fn inc_topic_peers(&mut self, topic: &TopicHash) { if self.register_topic(topic).is_ok() { self.topic_peers_count.get_or_create(topic).inc(); } } + /// Decrease the number of peers that are subscribed to this topic. pub(crate) fn dec_topic_peers(&mut self, topic: &TopicHash) { if self.register_topic(topic).is_ok() { self.topic_peers_count.get_or_create(topic).dec(); diff --git a/beacon_node/lighthouse_network/src/metrics.rs b/beacon_node/lighthouse_network/src/metrics.rs index ae02b689d81..fc441f25339 100644 --- a/beacon_node/lighthouse_network/src/metrics.rs +++ b/beacon_node/lighthouse_network/src/metrics.rs @@ -1,9 +1,10 @@ pub use lighthouse_metrics::*; lazy_static! { - pub static ref NAT_OPEN: Result = try_create_int_counter( + pub static ref NAT_OPEN: Result = try_create_int_gauge_vec( "nat_open", - "An estimate indicating if the local node is exposed to the internet." + "An estimate indicating if the local node is reachable from external nodes", + &["protocol"] ); pub static ref ADDRESS_UPDATE_COUNT: Result = try_create_int_counter( "libp2p_address_update_total", @@ -14,6 +15,9 @@ lazy_static! { "Count of libp2p peers currently connected" ); + pub static ref PEERS_CONNECTED_MULTI: Result = + try_create_int_gauge_vec("libp2p_peers_multi", "Count of libp2p peers currently connected", &["direction", "transport"]); + pub static ref TCP_PEERS_CONNECTED: Result = try_create_int_gauge( "libp2p_tcp_peers", "Count of libp2p peers currently connected via TCP" @@ -32,13 +36,10 @@ lazy_static! { "libp2p_peer_disconnect_event_total", "Count of libp2p peer disconnect events" ); - pub static ref DISCOVERY_SENT_BYTES: Result = try_create_int_gauge( - "discovery_sent_bytes", - "The number of bytes sent in discovery" - ); - pub static ref DISCOVERY_RECV_BYTES: Result = try_create_int_gauge( - "discovery_recv_bytes", - "The number of bytes received in discovery" + pub static ref DISCOVERY_BYTES: Result = try_create_int_gauge_vec( + "discovery_bytes", + "The number of bytes sent and received in discovery", + &["direction"] ); pub static ref DISCOVERY_QUEUE: Result = try_create_int_gauge( "discovery_queue_size", @@ -135,17 +136,6 @@ lazy_static! { &["type"] ); - /* - * Inbound/Outbound peers - */ - /// The number of peers that dialed us. - pub static ref NETWORK_INBOUND_PEERS: Result = - try_create_int_gauge("network_inbound_peers","The number of peers that are currently connected that have dialed us."); - - /// The number of peers that we dialed us. - pub static ref NETWORK_OUTBOUND_PEERS: Result = - try_create_int_gauge("network_outbound_peers","The number of peers that are currently connected that we dialed."); - /* * Peer Reporting */ @@ -156,31 +146,11 @@ lazy_static! { ); } -/// Checks if we consider the NAT open. -/// -/// Conditions for an open NAT: -/// 1. We have 1 or more SOCKET_UPDATED messages. This occurs when discovery has a majority of -/// users reporting an external port and our ENR gets updated. -/// 2. We have 0 SOCKET_UPDATED messages (can be true if the port was correct on boot), then we -/// rely on whether we have any inbound messages. If we have no socket update messages, but -/// manage to get at least one inbound peer, we are exposed correctly. -pub fn check_nat() { - // NAT is already deemed open. - if NAT_OPEN.as_ref().map(|v| v.get()).unwrap_or(0) != 0 { - return; - } - if ADDRESS_UPDATE_COUNT.as_ref().map(|v| v.get()).unwrap_or(0) != 0 - || NETWORK_INBOUND_PEERS.as_ref().map(|v| v.get()).unwrap_or(0) != 0_i64 - { - inc_counter(&NAT_OPEN); - } -} - pub fn scrape_discovery_metrics() { let metrics = discv5::metrics::Metrics::from(discv5::Discv5::::raw_metrics()); set_float_gauge(&DISCOVERY_REQS, metrics.unsolicited_requests_per_second); set_gauge(&DISCOVERY_SESSIONS, metrics.active_sessions as i64); - set_gauge(&DISCOVERY_SENT_BYTES, metrics.bytes_sent as i64); - set_gauge(&DISCOVERY_RECV_BYTES, metrics.bytes_recv as i64); + set_gauge_vec(&DISCOVERY_BYTES, &["inbound"], metrics.bytes_recv as i64); + set_gauge_vec(&DISCOVERY_BYTES, &["outbound"], metrics.bytes_sent as i64); } diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index e4976a0d374..92f876ee032 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -10,7 +10,7 @@ use delay_map::HashSetDelay; use discv5::Enr; use libp2p::identify::Info as IdentifyInfo; use lru_cache::LRUTimeCache; -use peerdb::{client::ClientKind, BanOperation, BanResult, ScoreUpdateResult}; +use peerdb::{BanOperation, BanResult, ScoreUpdateResult}; use rand::seq::SliceRandom; use slog::{debug, error, trace, warn}; use smallvec::SmallVec; @@ -18,7 +18,6 @@ use std::{ sync::Arc, time::{Duration, Instant}, }; -use strum::IntoEnumIterator; use types::{EthSpec, SyncSubnetId}; pub use libp2p::core::Multiaddr; @@ -719,46 +718,6 @@ impl PeerManager { } } - // This function updates metrics for all connected peers. - fn update_connected_peer_metrics(&self) { - // Do nothing if we don't have metrics enabled. - if !self.metrics_enabled { - return; - } - - let mut connected_peer_count = 0; - let mut inbound_connected_peers = 0; - let mut outbound_connected_peers = 0; - let mut clients_per_peer = HashMap::new(); - - for (_peer, peer_info) in self.network_globals.peers.read().connected_peers() { - connected_peer_count += 1; - if let PeerConnectionStatus::Connected { n_in, .. } = peer_info.connection_status() { - if *n_in > 0 { - inbound_connected_peers += 1; - } else { - outbound_connected_peers += 1; - } - } - *clients_per_peer - .entry(peer_info.client().kind.to_string()) - .or_default() += 1; - } - - metrics::set_gauge(&metrics::PEERS_CONNECTED, connected_peer_count); - metrics::set_gauge(&metrics::NETWORK_INBOUND_PEERS, inbound_connected_peers); - metrics::set_gauge(&metrics::NETWORK_OUTBOUND_PEERS, outbound_connected_peers); - - for client_kind in ClientKind::iter() { - let value = clients_per_peer.get(&client_kind.to_string()).unwrap_or(&0); - metrics::set_gauge_vec( - &metrics::PEERS_PER_CLIENT, - &[client_kind.as_ref()], - *value as i64, - ); - } - } - /* Internal functions */ /// Sets a peer as connected as long as their reputation allows it @@ -921,8 +880,7 @@ impl PeerManager { let outbound_only_peer_count = self.network_globals.connected_outbound_only_peers(); let wanted_peers = if peer_count < self.target_peers.saturating_sub(dialing_peers) { // We need more peers in general. - // Note: The maximum discovery query is bounded by `Discovery`. - self.target_peers.saturating_sub(dialing_peers) - peer_count + self.max_peers().saturating_sub(dialing_peers) - peer_count } else if outbound_only_peer_count < self.min_outbound_only_peers() && peer_count < self.max_outbound_dialing_peers() { diff --git a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs index cb60906f632..5dda78a0135 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs @@ -154,8 +154,8 @@ impl NetworkBehaviour for PeerManager { self.on_dial_failure(peer_id); } FromSwarm::ExternalAddrConfirmed(_) => { - // TODO: we likely want to check this against our assumed external tcp - // address + // We have an external address confirmed, means we are able to do NAT traversal. + metrics::set_gauge_vec(&metrics::NAT_OPEN, &["libp2p"], 1); } _ => { // NOTE: FromSwarm is a non exhaustive enum so updates should be based on release @@ -243,14 +243,15 @@ impl PeerManager { self.events.push(PeerManagerEvent::MetaData(peer_id)); } - // Check NAT if metrics are enabled - if self.network_globals.local_enr.read().udp4().is_some() { - metrics::check_nat(); - } - // increment prometheus metrics if self.metrics_enabled { let remote_addr = endpoint.get_remote_address(); + let direction = if endpoint.is_dialer() { + "outbound" + } else { + "inbound" + }; + match remote_addr.iter().find(|proto| { matches!( proto, @@ -258,10 +259,10 @@ impl PeerManager { ) }) { Some(multiaddr::Protocol::QuicV1) => { - metrics::inc_gauge(&metrics::QUIC_PEERS_CONNECTED); + metrics::inc_gauge_vec(&metrics::PEERS_CONNECTED_MULTI, &[direction, "quic"]); } Some(multiaddr::Protocol::Tcp(_)) => { - metrics::inc_gauge(&metrics::TCP_PEERS_CONNECTED); + metrics::inc_gauge_vec(&metrics::PEERS_CONNECTED_MULTI, &[direction, "tcp"]); } Some(_) => unreachable!(), None => { @@ -269,7 +270,7 @@ impl PeerManager { } }; - self.update_connected_peer_metrics(); + metrics::inc_gauge(&metrics::PEERS_CONNECTED); metrics::inc_counter(&metrics::PEER_CONNECT_EVENT_COUNT); } @@ -339,6 +340,12 @@ impl PeerManager { let remote_addr = endpoint.get_remote_address(); // Update the prometheus metrics if self.metrics_enabled { + let direction = if endpoint.is_dialer() { + "outbound" + } else { + "inbound" + }; + match remote_addr.iter().find(|proto| { matches!( proto, @@ -346,15 +353,16 @@ impl PeerManager { ) }) { Some(multiaddr::Protocol::QuicV1) => { - metrics::dec_gauge(&metrics::QUIC_PEERS_CONNECTED); + metrics::dec_gauge_vec(&metrics::PEERS_CONNECTED_MULTI, &[direction, "quic"]); } Some(multiaddr::Protocol::Tcp(_)) => { - metrics::dec_gauge(&metrics::TCP_PEERS_CONNECTED); + metrics::dec_gauge_vec(&metrics::PEERS_CONNECTED_MULTI, &[direction, "tcp"]); } // If it's an unknown protocol we already logged when connection was established. _ => {} }; - self.update_connected_peer_metrics(); + // Legacy standard metrics. + metrics::dec_gauge(&metrics::PEERS_CONNECTED); metrics::inc_counter(&metrics::PEER_DISCONNECT_EVENT_COUNT); } } diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs index a6bf3ffecce..ebb355fefcf 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs @@ -168,7 +168,7 @@ impl PeerDB { fn score_state_banned_or_disconnected(&self, peer_id: &PeerId) -> bool { if let Some(peer) = self.peers.get(peer_id) { match peer.score_state() { - ScoreState::Banned | ScoreState::Disconnected => true, + ScoreState::Banned | ScoreState::ForcedDisconnect => true, _ => self.ip_is_banned(peer).is_some(), } } else { @@ -756,8 +756,8 @@ impl PeerDB { // Update the connection state match direction { - ConnectionDirection::Incoming => info.connect_ingoing(Some(seen_address)), - ConnectionDirection::Outgoing => info.connect_outgoing(Some(seen_address)), + ConnectionDirection::Incoming => info.connect_ingoing(seen_address), + ConnectionDirection::Outgoing => info.connect_outgoing(seen_address), } } @@ -1062,12 +1062,12 @@ impl PeerDB { log: &slog::Logger, ) -> ScoreTransitionResult { match (info.score_state(), previous_state) { - (ScoreState::Banned, ScoreState::Healthy | ScoreState::Disconnected) => { + (ScoreState::Banned, ScoreState::Healthy | ScoreState::ForcedDisconnect) => { debug!(log, "Peer has been banned"; "peer_id" => %peer_id, "score" => %info.score()); ScoreTransitionResult::Banned } - (ScoreState::Disconnected, ScoreState::Banned | ScoreState::Healthy) => { - debug!(log, "Peer transitioned to disconnect state"; "peer_id" => %peer_id, "score" => %info.score(), "past_state" => %previous_state); + (ScoreState::ForcedDisconnect, ScoreState::Banned | ScoreState::Healthy) => { + debug!(log, "Peer transitioned to forced disconnect score state"; "peer_id" => %peer_id, "score" => %info.score(), "past_score_state" => %previous_state); // disconnect the peer if it's currently connected or dialing if info.is_connected_or_dialing() { ScoreTransitionResult::Disconnected @@ -1079,18 +1079,20 @@ impl PeerDB { ScoreTransitionResult::NoAction } } - (ScoreState::Healthy, ScoreState::Disconnected) => { - debug!(log, "Peer transitioned to healthy state"; "peer_id" => %peer_id, "score" => %info.score(), "past_state" => %previous_state); + (ScoreState::Healthy, ScoreState::ForcedDisconnect) => { + debug!(log, "Peer transitioned to healthy score state"; "peer_id" => %peer_id, "score" => %info.score(), "past_score_state" => %previous_state); ScoreTransitionResult::NoAction } (ScoreState::Healthy, ScoreState::Banned) => { - debug!(log, "Peer transitioned to healthy state"; "peer_id" => %peer_id, "score" => %info.score(), "past_state" => %previous_state); + debug!(log, "Peer transitioned to healthy score state"; "peer_id" => %peer_id, "score" => %info.score(), "past_score_state" => %previous_state); // unban the peer if it was previously banned. ScoreTransitionResult::Unbanned } // Explicitly ignore states that haven't transitioned. (ScoreState::Healthy, ScoreState::Healthy) => ScoreTransitionResult::NoAction, - (ScoreState::Disconnected, ScoreState::Disconnected) => ScoreTransitionResult::NoAction, + (ScoreState::ForcedDisconnect, ScoreState::ForcedDisconnect) => { + ScoreTransitionResult::NoAction + } (ScoreState::Banned, ScoreState::Banned) => ScoreTransitionResult::NoAction, } diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb/client.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb/client.rs index 1178dbcb9ce..9450584d6fc 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb/client.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb/client.rs @@ -33,6 +33,8 @@ pub enum ClientKind { Prysm, /// A lodestar node. Lodestar, + /// A Caplin node. + Caplin, /// An unknown client. Unknown, } @@ -88,6 +90,7 @@ impl std::fmt::Display for Client { self.version, self.os_version ), ClientKind::Lodestar => write!(f, "Lodestar: version: {}", self.version), + ClientKind::Caplin => write!(f, "Caplin"), ClientKind::Unknown => { if let Some(agent_string) = &self.agent_string { write!(f, "Unknown: {}", agent_string) @@ -109,11 +112,11 @@ impl std::fmt::Display for ClientKind { // kind and it's associated version and the OS kind. fn client_from_agent_version(agent_version: &str) -> (ClientKind, String, String) { let mut agent_split = agent_version.split('/'); + let mut version = String::from("unknown"); + let mut os_version = String::from("unknown"); match agent_split.next() { Some("Lighthouse") => { let kind = ClientKind::Lighthouse; - let mut version = String::from("unknown"); - let mut os_version = version.clone(); if let Some(agent_version) = agent_split.next() { version = agent_version.into(); if let Some(agent_os_version) = agent_split.next() { @@ -124,8 +127,6 @@ fn client_from_agent_version(agent_version: &str) -> (ClientKind, String, String } Some("teku") => { let kind = ClientKind::Teku; - let mut version = String::from("unknown"); - let mut os_version = version.clone(); if agent_split.next().is_some() { if let Some(agent_version) = agent_split.next() { version = agent_version.into(); @@ -138,13 +139,10 @@ fn client_from_agent_version(agent_version: &str) -> (ClientKind, String, String } Some("github.com") => { let kind = ClientKind::Prysm; - let unknown = String::from("unknown"); - (kind, unknown.clone(), unknown) + (kind, version, os_version) } Some("Prysm") => { let kind = ClientKind::Prysm; - let mut version = String::from("unknown"); - let mut os_version = version.clone(); if agent_split.next().is_some() { if let Some(agent_version) = agent_split.next() { version = agent_version.into(); @@ -157,8 +155,6 @@ fn client_from_agent_version(agent_version: &str) -> (ClientKind, String, String } Some("nimbus") => { let kind = ClientKind::Nimbus; - let mut version = String::from("unknown"); - let mut os_version = version.clone(); if agent_split.next().is_some() { if let Some(agent_version) = agent_split.next() { version = agent_version.into(); @@ -171,8 +167,6 @@ fn client_from_agent_version(agent_version: &str) -> (ClientKind, String, String } Some("nim-libp2p") => { let kind = ClientKind::Nimbus; - let mut version = String::from("unknown"); - let mut os_version = version.clone(); if let Some(agent_version) = agent_split.next() { version = agent_version.into(); if let Some(agent_os_version) = agent_split.next() { @@ -183,8 +177,6 @@ fn client_from_agent_version(agent_version: &str) -> (ClientKind, String, String } Some("js-libp2p") | Some("lodestar") => { let kind = ClientKind::Lodestar; - let mut version = String::from("unknown"); - let mut os_version = version.clone(); if let Some(agent_version) = agent_split.next() { version = agent_version.into(); if let Some(agent_os_version) = agent_split.next() { @@ -193,6 +185,14 @@ fn client_from_agent_version(agent_version: &str) -> (ClientKind, String, String } (kind, version, os_version) } + Some("erigon") => { + let client_kind = if let Some("caplin") = agent_split.next() { + ClientKind::Caplin + } else { + ClientKind::Unknown + }; + (client_kind, version, os_version) + } _ => { let unknown = String::from("unknown"); (ClientKind::Unknown, unknown.clone(), unknown) diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs index 44c54511ddc..c5e13c51506 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs @@ -307,13 +307,13 @@ impl PeerInfo { /// Checks if the peer is outbound-only pub fn is_outbound_only(&self) -> bool { - matches!(self.connection_status, Connected {n_in, n_out} if n_in == 0 && n_out > 0) + matches!(self.connection_status, Connected {n_in, n_out, ..} if n_in == 0 && n_out > 0) } /// Returns the number of connections with this peer. pub fn connections(&self) -> (u8, u8) { match self.connection_status { - Connected { n_in, n_out } => (n_in, n_out), + Connected { n_in, n_out, .. } => (n_in, n_out), _ => (0, 0), } } @@ -421,7 +421,9 @@ impl PeerInfo { /// Modifies the status to Connected and increases the number of ingoing /// connections by one - pub(super) fn connect_ingoing(&mut self, seen_multiaddr: Option) { + pub(super) fn connect_ingoing(&mut self, multiaddr: Multiaddr) { + self.seen_multiaddrs.insert(multiaddr.clone()); + match &mut self.connection_status { Connected { n_in, .. } => *n_in += 1, Disconnected { .. } @@ -429,19 +431,20 @@ impl PeerInfo { | Dialing { .. } | Disconnecting { .. } | Unknown => { - self.connection_status = Connected { n_in: 1, n_out: 0 }; + self.connection_status = Connected { + n_in: 1, + n_out: 0, + multiaddr, + }; self.connection_direction = Some(ConnectionDirection::Incoming); } } - - if let Some(multiaddr) = seen_multiaddr { - self.seen_multiaddrs.insert(multiaddr); - } } /// Modifies the status to Connected and increases the number of outgoing /// connections by one - pub(super) fn connect_outgoing(&mut self, seen_multiaddr: Option) { + pub(super) fn connect_outgoing(&mut self, multiaddr: Multiaddr) { + self.seen_multiaddrs.insert(multiaddr.clone()); match &mut self.connection_status { Connected { n_out, .. } => *n_out += 1, Disconnected { .. } @@ -449,13 +452,14 @@ impl PeerInfo { | Dialing { .. } | Disconnecting { .. } | Unknown => { - self.connection_status = Connected { n_in: 0, n_out: 1 }; + self.connection_status = Connected { + n_in: 0, + n_out: 1, + multiaddr, + }; self.connection_direction = Some(ConnectionDirection::Outgoing); } } - if let Some(multiaddr) = seen_multiaddr { - self.seen_multiaddrs.insert(multiaddr); - } } #[cfg(test)] @@ -487,6 +491,8 @@ pub enum ConnectionDirection { pub enum PeerConnectionStatus { /// The peer is connected. Connected { + /// The multiaddr that we are connected via. + multiaddr: Multiaddr, /// number of ingoing connections. n_in: u8, /// number of outgoing connections. @@ -522,7 +528,12 @@ impl Serialize for PeerConnectionStatus { fn serialize(&self, serializer: S) -> Result { let mut s = serializer.serialize_struct("connection_status", 6)?; match self { - Connected { n_in, n_out } => { + Connected { + n_in, + n_out, + multiaddr, + } => { + s.serialize_field("multiaddr", multiaddr)?; s.serialize_field("status", "connected")?; s.serialize_field("connections_in", n_in)?; s.serialize_field("connections_out", n_out)?; diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb/score.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb/score.rs index 877d725812c..ba9bd314722 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb/score.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb/score.rs @@ -104,7 +104,7 @@ pub(crate) enum ScoreState { /// We are content with the peers performance. We permit connections and messages. Healthy, /// The peer should be disconnected. We allow re-connections if the peer is persistent. - Disconnected, + ForcedDisconnect, /// The peer is banned. We disallow new connections until it's score has decayed into a /// tolerable threshold. Banned, @@ -115,7 +115,7 @@ impl std::fmt::Display for ScoreState { match self { ScoreState::Healthy => write!(f, "Healthy"), ScoreState::Banned => write!(f, "Banned"), - ScoreState::Disconnected => write!(f, "Disconnected"), + ScoreState::ForcedDisconnect => write!(f, "Disconnected"), } } } @@ -313,7 +313,7 @@ impl Score { pub(crate) fn state(&self) -> ScoreState { match self.score() { x if x <= MIN_SCORE_BEFORE_BAN => ScoreState::Banned, - x if x <= MIN_SCORE_BEFORE_DISCONNECT => ScoreState::Disconnected, + x if x <= MIN_SCORE_BEFORE_DISCONNECT => ScoreState::ForcedDisconnect, _ => ScoreState::Healthy, } } @@ -407,7 +407,7 @@ mod tests { assert!(score.score() < 0.0); assert_eq!(score.state(), ScoreState::Healthy); score.test_add(-1.0001); - assert_eq!(score.state(), ScoreState::Disconnected); + assert_eq!(score.state(), ScoreState::ForcedDisconnect); } #[test] diff --git a/beacon_node/lighthouse_network/src/rpc/mod.rs b/beacon_node/lighthouse_network/src/rpc/mod.rs index e22e5273866..335d4de1ab2 100644 --- a/beacon_node/lighthouse_network/src/rpc/mod.rs +++ b/beacon_node/lighthouse_network/src/rpc/mod.rs @@ -310,11 +310,16 @@ where Err(RateLimitedErr::TooLarge) => { // we set the batch sizes, so this is a coding/config err for most protocols let protocol = req.versioned_protocol().protocol(); - if matches!(protocol, Protocol::BlocksByRange) - || matches!(protocol, Protocol::BlobsByRange) - { - debug!(self.log, "By range request will never be processed"; "request" => %req, "protocol" => %protocol); + if matches!( + protocol, + Protocol::BlocksByRange + | Protocol::BlobsByRange + | Protocol::BlocksByRoot + | Protocol::BlobsByRoot + ) { + debug!(self.log, "Request too large to process"; "request" => %req, "protocol" => %protocol); } else { + // Other protocols shouldn't be sending large messages, we should flag the peer kind crit!(self.log, "Request size too large to ever be processed"; "protocol" => %protocol); } // send an error code to the peer. diff --git a/beacon_node/lighthouse_network/src/service/behaviour.rs b/beacon_node/lighthouse_network/src/service/behaviour.rs index a43678d4ba3..5a04d6c2d84 100644 --- a/beacon_node/lighthouse_network/src/service/behaviour.rs +++ b/beacon_node/lighthouse_network/src/service/behaviour.rs @@ -6,6 +6,7 @@ use crate::types::SnappyTransform; use crate::gossipsub; use libp2p::identify; use libp2p::swarm::NetworkBehaviour; +use libp2p::upnp::tokio::Behaviour as Upnp; use types::EthSpec; use super::api_types::RequestId; @@ -32,6 +33,8 @@ where // NOTE: The id protocol is used for initial interop. This will be removed by mainnet. /// Provides IP addresses and peer information. pub identify: identify::Behaviour, + /// Libp2p UPnP port mapping. + pub upnp: Upnp, /// The routing pub-sub mechanism for eth2. pub gossipsub: Gossipsub, } diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 401e43a53ff..aed9d54baab 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -18,9 +18,9 @@ use crate::rpc::*; use crate::service::behaviour::BehaviourEvent; pub use crate::service::behaviour::Gossipsub; use crate::types::{ - fork_core_topics, subnet_from_topic_hash, GossipEncoding, GossipKind, GossipTopic, - SnappyTransform, Subnet, SubnetDiscovery, ALTAIR_CORE_TOPICS, BASE_CORE_TOPICS, - CAPELLA_CORE_TOPICS, DENEB_CORE_TOPICS, LIGHT_CLIENT_GOSSIP_TOPICS, + attestation_sync_committee_topics, fork_core_topics, subnet_from_topic_hash, GossipEncoding, + GossipKind, GossipTopic, SnappyTransform, Subnet, SubnetDiscovery, ALTAIR_CORE_TOPICS, + BASE_CORE_TOPICS, CAPELLA_CORE_TOPICS, DENEB_CORE_TOPICS, LIGHT_CLIENT_GOSSIP_TOPICS, }; use crate::EnrExt; use crate::Eth2Enr; @@ -28,10 +28,9 @@ use crate::{error, metrics, Enr, NetworkGlobals, PubsubMessage, TopicHash}; use api_types::{PeerRequestId, Request, RequestId, Response}; use futures::stream::StreamExt; use gossipsub_scoring_parameters::{lighthouse_gossip_thresholds, PeerScoreSettings}; -use libp2p::multiaddr::{Multiaddr, Protocol as MProtocol}; +use libp2p::multiaddr::{self, Multiaddr, Protocol as MProtocol}; use libp2p::swarm::{Swarm, SwarmEvent}; -use libp2p::PeerId; -use libp2p::{identify, SwarmBuilder}; +use libp2p::{identify, PeerId, SwarmBuilder}; use slog::{crit, debug, info, o, trace, warn}; use std::path::PathBuf; use std::pin::Pin; @@ -145,6 +144,14 @@ impl Network { // initialise the node's ID let local_keypair = utils::load_private_key(&config, &log); + // Trusted peers will also be marked as explicit in GossipSub. + // Cfr. https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.1.md#explicit-peering-agreements + let trusted_peers: Vec = config + .trusted_peers + .iter() + .map(|x| PeerId::from(x.clone())) + .collect(); + // set up a collection of variables accessible outside of the network crate let network_globals = { // Create an ENR or load from disk if appropriate @@ -159,11 +166,7 @@ impl Network { let globals = NetworkGlobals::new( enr, meta_data, - config - .trusted_peers - .iter() - .map(|x| PeerId::from(x.clone())) - .collect(), + trusted_peers, config.disable_peer_scoring, &log, ); @@ -276,6 +279,27 @@ impl Network { .with_peer_score(params, thresholds) .expect("Valid score params and thresholds"); + // Mark trusted peers as explicit. + for explicit_peer in config.trusted_peers.iter() { + gossipsub.add_explicit_peer(&PeerId::from(explicit_peer.clone())); + } + + // If we are using metrics, then register which topics we want to make sure to keep + // track of + if ctx.libp2p_registry.is_some() { + let topics_to_keep_metrics_for = attestation_sync_committee_topics::() + .map(|gossip_kind| { + Topic::from(GossipTopic::new( + gossip_kind, + GossipEncoding::default(), + enr_fork_id.fork_digest, + )) + .into() + }) + .collect::>(); + gossipsub.register_topics_for_metrics(topics_to_keep_metrics_for); + } + (gossipsub, update_gossipsub_scores) }; @@ -363,6 +387,7 @@ impl Network { identify, peer_manager, connection_limits, + upnp: Default::default(), } }; @@ -640,6 +665,20 @@ impl Network { let topic = GossipTopic::new(kind, GossipEncoding::default(), new_fork_digest); self.subscribe(topic); } + + // Register the new topics for metrics + let topics_to_keep_metrics_for = attestation_sync_committee_topics::() + .map(|gossip_kind| { + Topic::from(GossipTopic::new( + gossip_kind, + GossipEncoding::default(), + new_fork_digest, + )) + .into() + }) + .collect::>(); + self.gossipsub_mut() + .register_topics_for_metrics(topics_to_keep_metrics_for); } /// Unsubscribe from all topics that doesn't have the given fork_digest @@ -1601,6 +1640,47 @@ impl Network { } } + fn inject_upnp_event(&mut self, event: libp2p::upnp::Event) { + match event { + libp2p::upnp::Event::NewExternalAddr(addr) => { + info!(self.log, "UPnP route established"; "addr" => %addr); + let mut iter = addr.iter(); + // Skip Ip address. + iter.next(); + match iter.next() { + Some(multiaddr::Protocol::Udp(udp_port)) => match iter.next() { + Some(multiaddr::Protocol::QuicV1) => { + if let Err(e) = self.discovery_mut().update_enr_quic_port(udp_port) { + warn!(self.log, "Failed to update ENR"; "error" => e); + } + } + _ => { + trace!(self.log, "UPnP address mapped multiaddr from unknown transport"; "addr" => %addr) + } + }, + Some(multiaddr::Protocol::Tcp(tcp_port)) => { + if let Err(e) = self.discovery_mut().update_enr_tcp_port(tcp_port) { + warn!(self.log, "Failed to update ENR"; "error" => e); + } + } + _ => { + trace!(self.log, "UPnP address mapped multiaddr from unknown transport"; "addr" => %addr); + } + } + } + libp2p::upnp::Event::ExpiredExternalAddr(_) => {} + libp2p::upnp::Event::GatewayNotFound => { + info!(self.log, "UPnP not available"); + } + libp2p::upnp::Event::NonRoutableGateway => { + info!( + self.log, + "UPnP is available but gateway is not exposed to public network" + ); + } + } + } + /* Networking polling */ /// Poll the p2p networking stack. @@ -1623,6 +1703,10 @@ impl Network { } BehaviourEvent::Identify(ie) => self.inject_identify_event(ie), BehaviourEvent::PeerManager(pe) => self.inject_pm_event(pe), + BehaviourEvent::Upnp(e) => { + self.inject_upnp_event(e); + None + } BehaviourEvent::ConnectionLimits(le) => void::unreachable(le), }, SwarmEvent::ConnectionEstablished { .. } => None, diff --git a/beacon_node/lighthouse_network/src/service/utils.rs b/beacon_node/lighthouse_network/src/service/utils.rs index 1dd6062edd4..489c5ae5293 100644 --- a/beacon_node/lighthouse_network/src/service/utils.rs +++ b/beacon_node/lighthouse_network/src/service/utils.rs @@ -8,7 +8,6 @@ use crate::{GossipTopic, NetworkConfig}; use futures::future::Either; use libp2p::core::{multiaddr::Multiaddr, muxing::StreamMuxerBox, transport::Boxed}; use libp2p::identity::{secp256k1, Keypair}; -use libp2p::quic; use libp2p::{core, noise, yamux, PeerId, Transport}; use prometheus_client::registry::Registry; use slog::{debug, warn}; @@ -63,8 +62,8 @@ pub fn build_transport( let transport = if quic_support { // Enables Quic // The default quic configuration suits us for now. - let quic_config = quic::Config::new(&local_private_key); - let quic = quic::tokio::Transport::new(quic_config); + let quic_config = libp2p::quic::Config::new(&local_private_key); + let quic = libp2p::quic::tokio::Transport::new(quic_config); let transport = tcp .or_transport(quic) .map(|either_output, _| match either_output { diff --git a/beacon_node/lighthouse_network/src/types/mod.rs b/beacon_node/lighthouse_network/src/types/mod.rs index af9e9ef45d5..8cf52f47dcd 100644 --- a/beacon_node/lighthouse_network/src/types/mod.rs +++ b/beacon_node/lighthouse_network/src/types/mod.rs @@ -17,7 +17,7 @@ pub use pubsub::{PubsubMessage, SnappyTransform}; pub use subnet::{Subnet, SubnetDiscovery}; pub use sync_state::{BackFillState, SyncState}; pub use topics::{ - core_topics_to_subscribe, fork_core_topics, subnet_from_topic_hash, GossipEncoding, GossipKind, - GossipTopic, ALTAIR_CORE_TOPICS, BASE_CORE_TOPICS, CAPELLA_CORE_TOPICS, DENEB_CORE_TOPICS, - LIGHT_CLIENT_GOSSIP_TOPICS, + attestation_sync_committee_topics, core_topics_to_subscribe, fork_core_topics, + subnet_from_topic_hash, GossipEncoding, GossipKind, GossipTopic, ALTAIR_CORE_TOPICS, + BASE_CORE_TOPICS, CAPELLA_CORE_TOPICS, DENEB_CORE_TOPICS, LIGHT_CLIENT_GOSSIP_TOPICS, }; diff --git a/beacon_node/lighthouse_network/src/types/topics.rs b/beacon_node/lighthouse_network/src/types/topics.rs index 717b976de04..b9194022cee 100644 --- a/beacon_node/lighthouse_network/src/types/topics.rs +++ b/beacon_node/lighthouse_network/src/types/topics.rs @@ -1,7 +1,7 @@ use crate::gossipsub::{IdentTopic as Topic, TopicHash}; use serde::{Deserialize, Serialize}; use strum::AsRefStr; -use types::{ChainSpec, EthSpec, ForkName, SubnetId, SyncSubnetId}; +use types::{ChainSpec, EthSpec, ForkName, SubnetId, SyncSubnetId, Unsigned}; use crate::Subnet; @@ -62,6 +62,17 @@ pub fn fork_core_topics(fork_name: &ForkName, spec: &ChainSpec) -> V } } +/// Returns all the attestation and sync committee topics, for a given fork. +pub fn attestation_sync_committee_topics() -> impl Iterator { + (0..TSpec::SubnetBitfieldLength::to_usize()) + .map(|subnet_id| GossipKind::Attestation(SubnetId::new(subnet_id as u64))) + .chain( + (0..TSpec::SyncCommitteeSubnetCount::to_usize()).map(|sync_committee_id| { + GossipKind::SyncCommitteeMessage(SyncSubnetId::new(sync_committee_id as u64)) + }), + ) +} + /// Returns all the topics that we need to subscribe to for a given fork /// including topics from older forks and new topics for the current fork. pub fn core_topics_to_subscribe( diff --git a/beacon_node/lighthouse_network/tests/common.rs b/beacon_node/lighthouse_network/tests/common.rs index af48244678d..3351ac23cb5 100644 --- a/beacon_node/lighthouse_network/tests/common.rs +++ b/beacon_node/lighthouse_network/tests/common.rs @@ -46,7 +46,7 @@ pub struct Libp2pInstance( LibP2PService, #[allow(dead_code)] // This field is managed for lifetime purposes may not be used directly, hence the `#[allow(dead_code)]` attribute. - exit_future::Signal, + async_channel::Sender<()>, ); impl std::ops::Deref for Libp2pInstance { @@ -110,7 +110,7 @@ pub async fn build_libp2p_instance( let config = build_config(boot_nodes); // launch libp2p service - let (signal, exit) = exit_future::signal(); + let (signal, exit) = async_channel::bounded(1); let (shutdown_tx, _) = futures::channel::mpsc::channel(1); let executor = task_executor::TaskExecutor::new(rt, exit, log.clone(), shutdown_tx); let libp2p_context = lighthouse_network::Context { diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index d8766d0091e..228066b31b7 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -8,12 +8,13 @@ edition = { workspace = true } sloggers = { workspace = true } genesis = { workspace = true } matches = "0.1.8" -exit-future = { workspace = true } slog-term = { workspace = true } slog-async = { workspace = true } eth2 = { workspace = true } [dependencies] +async-channel = { workspace = true } +anyhow = { workspace = true } beacon_chain = { workspace = true } store = { workspace = true } lighthouse_network = { workspace = true } @@ -35,11 +36,10 @@ lazy_static = { workspace = true } lighthouse_metrics = { workspace = true } logging = { workspace = true } task_executor = { workspace = true } -igd-next = "0.14.3" +igd-next = "0.14" itertools = { workspace = true } num_cpus = { workspace = true } lru_cache = { workspace = true } -if-addrs = "0.6.4" lru = { workspace = true } strum = { workspace = true } tokio-util = { workspace = true } @@ -56,4 +56,4 @@ environment = { workspace = true } # NOTE: This can be run via cargo build --bin lighthouse --features network/disable-backfill disable-backfill = [] fork_from_env = ["beacon_chain/fork_from_env"] -portable = ["beacon_chain/portable"] \ No newline at end of file +portable = ["beacon_chain/portable"] diff --git a/beacon_node/network/src/nat.rs b/beacon_node/network/src/nat.rs index cb81877b223..e63ff550398 100644 --- a/beacon_node/network/src/nat.rs +++ b/beacon_node/network/src/nat.rs @@ -3,231 +3,58 @@ //! Currently supported strategies: //! - UPnP -use crate::{NetworkConfig, NetworkMessage}; -use if_addrs::get_if_addrs; -use slog::{debug, info}; -use std::net::{IpAddr, SocketAddr, SocketAddrV4}; -use tokio::sync::mpsc; -use types::EthSpec; - -/// Configuration required to construct the UPnP port mappings. -pub struct UPnPConfig { - /// The local TCP port. - tcp_port: u16, - /// The local UDP discovery port. - disc_port: u16, - /// The local UDP quic port. - quic_port: u16, - /// Whether discovery is enabled or not. - disable_discovery: bool, - /// Whether quic is enabled or not. - disable_quic_support: bool, -} - -/// Contains mappings that managed to be established. -#[derive(Default, Debug)] -pub struct EstablishedUPnPMappings { - /// A TCP port mapping for libp2p. - pub tcp_port: Option, - /// A UDP port for the QUIC libp2p transport. - pub udp_quic_port: Option, - /// A UDP port for discv5. - pub udp_disc_port: Option, -} - -impl EstablishedUPnPMappings { - /// Returns true if at least one value is set. - pub fn is_some(&self) -> bool { - self.tcp_port.is_some() || self.udp_quic_port.is_some() || self.udp_disc_port.is_some() - } - - // Iterator over the UDP ports - pub fn udp_ports(&self) -> impl Iterator { - self.udp_quic_port.iter().chain(self.udp_disc_port.iter()) - } -} - -impl UPnPConfig { - pub fn from_config(config: &NetworkConfig) -> Option { - config.listen_addrs().v4().map(|v4_addr| UPnPConfig { - tcp_port: v4_addr.tcp_port, - disc_port: v4_addr.disc_port, - quic_port: v4_addr.quic_port, - disable_discovery: config.disable_discovery, - disable_quic_support: config.disable_quic_support, - }) - } -} - -/// Attempts to construct external port mappings with UPnP. -pub fn construct_upnp_mappings( - config: UPnPConfig, - network_send: mpsc::UnboundedSender>, +use anyhow::{bail, Context, Error}; +use igd_next::{aio::tokio as igd, PortMappingProtocol}; +use slog::debug; +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; +use std::time::Duration; +use tokio::time::sleep; + +/// The duration in seconds of a port mapping on the gateway. +const MAPPING_DURATION: u32 = 3600; + +/// Renew the Mapping every half of `MAPPING_DURATION` to avoid the port being unmapped. +const MAPPING_TIMEOUT: u64 = MAPPING_DURATION as u64 / 2; + +/// Attempts to map Discovery external port mappings with UPnP. +pub async fn construct_upnp_mappings( + addr: Ipv4Addr, + port: u16, log: slog::Logger, -) { - info!(log, "UPnP Attempting to initialise routes"); - match igd_next::search_gateway(Default::default()) { - Err(e) => info!(log, "UPnP not available"; "error" => %e), - Ok(gateway) => { - // Need to find the local listening address matched with the router subnet - let interfaces = match get_if_addrs() { - Ok(v) => v, - Err(e) => { - info!(log, "UPnP failed to get local interfaces"; "error" => %e); - return; - } - }; - let local_ip = interfaces.iter().find_map(|interface| { - // Just use the first IP of the first interface that is not a loopback and not an - // ipv6 address. - if !interface.is_loopback() { - interface.ip().is_ipv4().then(|| interface.ip()) - } else { - None - } - }); - - let local_ip = match local_ip { - None => { - info!(log, "UPnP failed to find local IP address"); - return; - } - Some(v) => v, - }; - - debug!(log, "UPnP Local IP Discovered"; "ip" => ?local_ip); - - let mut mappings = EstablishedUPnPMappings::default(); - - match local_ip { - IpAddr::V4(address) => { - let libp2p_socket = SocketAddrV4::new(address, config.tcp_port); - let external_ip = gateway.get_external_ip(); - // We add specific port mappings rather than getting the router to arbitrary assign - // one. - // I've found this to be more reliable. If multiple users are behind a single - // router, they should ideally try to set different port numbers. - mappings.tcp_port = add_port_mapping( - &gateway, - igd_next::PortMappingProtocol::TCP, - libp2p_socket, - "tcp", - &log, - ).map(|_| { - let external_socket = external_ip.as_ref().map(|ip| SocketAddr::new(*ip, config.tcp_port)).map_err(|_| ()); - info!(log, "UPnP TCP route established"; "external_socket" => format!("{}:{}", external_socket.as_ref().map(|ip| ip.to_string()).unwrap_or_else(|_| "".into()), config.tcp_port)); - config.tcp_port - }).ok(); - - let set_udp_mapping = |udp_port| { - let udp_socket = SocketAddrV4::new(address, udp_port); - add_port_mapping( - &gateway, - igd_next::PortMappingProtocol::UDP, - udp_socket, - "udp", - &log, - ).map(|_| { - info!(log, "UPnP UDP route established"; "external_socket" => format!("{}:{}", external_ip.as_ref().map(|ip| ip.to_string()).unwrap_or_else(|_| "".into()), udp_port)); - }) - }; - - // Set the discovery UDP port mapping - if !config.disable_discovery && set_udp_mapping(config.disc_port).is_ok() { - mappings.udp_disc_port = Some(config.disc_port); - } - - // Set the quic UDP port mapping - if !config.disable_quic_support && set_udp_mapping(config.quic_port).is_ok() { - mappings.udp_quic_port = Some(config.quic_port); - } - - // report any updates to the network service. - if mappings.is_some() { - network_send.send(NetworkMessage::UPnPMappingEstablished{ mappings }) - .unwrap_or_else(|e| debug!(log, "Could not send message to the network service"; "error" => %e)); - } - } - _ => debug!(log, "UPnP no routes constructed. IPv6 not supported"), - } - } +) -> Result<(), Error> { + let gateway = igd::search_gateway(Default::default()) + .await + .context("Gateway does not support UPnP")?; + + let external_address = gateway + .get_external_ip() + .await + .context("Could not access gateway's external ip")?; + + let is_private = match external_address { + IpAddr::V4(ipv4) => ipv4.is_private(), + IpAddr::V6(ipv6) => ipv6.is_loopback() || ipv6.is_unspecified(), }; -} -/// Sets up a port mapping for a protocol returning the mapped port if successful. -fn add_port_mapping( - gateway: &igd_next::Gateway, - protocol: igd_next::PortMappingProtocol, - socket: SocketAddrV4, - protocol_string: &'static str, - log: &slog::Logger, -) -> Result<(), ()> { - // We add specific port mappings rather than getting the router to arbitrary assign - // one. - // I've found this to be more reliable. If multiple users are behind a single - // router, they should ideally try to set different port numbers. - let mapping_string = &format!("lighthouse-{}", protocol_string); - for _ in 0..2 { - match gateway.add_port( - protocol, - socket.port(), - SocketAddr::V4(socket), - 0, - mapping_string, - ) { - Err(e) => { - match e { - igd_next::AddPortError::PortInUse => { - // Try and remove and re-create - debug!(log, "UPnP port in use, attempting to remap"; "protocol" => protocol_string, "port" => socket.port()); - match gateway.remove_port(protocol, socket.port()) { - Ok(()) => { - debug!(log, "UPnP Removed port mapping"; "protocol" => protocol_string, "port" => socket.port()) - } - Err(e) => { - debug!(log, "UPnP Port remove failure"; "protocol" => protocol_string, "port" => socket.port(), "error" => %e); - return Err(()); - } - } - } - e => { - info!(log, "UPnP TCP route not set"; "error" => %e); - return Err(()); - } - } - } - Ok(_) => { - return Ok(()); - } - } + if is_private { + bail!( + "Gateway's external address is a private address: {}", + external_address + ); } - Err(()) -} -/// Removes the specified TCP and UDP port mappings. -pub fn remove_mappings(mappings: &EstablishedUPnPMappings, log: &slog::Logger) { - if mappings.is_some() { - debug!(log, "Removing UPnP port mappings"); - match igd_next::search_gateway(Default::default()) { - Ok(gateway) => { - if let Some(tcp_port) = mappings.tcp_port { - match gateway.remove_port(igd_next::PortMappingProtocol::TCP, tcp_port) { - Ok(()) => debug!(log, "UPnP Removed TCP port mapping"; "port" => tcp_port), - Err(e) => { - debug!(log, "UPnP Failed to remove TCP port mapping"; "port" => tcp_port, "error" => %e) - } - } - } - for udp_port in mappings.udp_ports() { - match gateway.remove_port(igd_next::PortMappingProtocol::UDP, *udp_port) { - Ok(()) => debug!(log, "UPnP Removed UDP port mapping"; "port" => udp_port), - Err(e) => { - debug!(log, "UPnP Failed to remove UDP port mapping"; "port" => udp_port, "error" => %e) - } - } - } - } - Err(e) => debug!(log, "UPnP failed to remove mappings"; "error" => %e), - } + loop { + gateway + .add_port( + PortMappingProtocol::UDP, + port, + SocketAddr::new(IpAddr::V4(addr), port), + MAPPING_DURATION, + "Lighthouse Discovery port", + ) + .await + .with_context(|| format!("Could not UPnP map port: {} on the gateway", port))?; + debug!(log, "Discovery UPnP port mapped"; "port" => %port); + sleep(Duration::from_secs(MAPPING_TIMEOUT)).await; } } diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 01a7e1f9896..18284bd236c 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -1,5 +1,5 @@ use super::sync::manager::RequestId as SyncId; -use crate::nat::EstablishedUPnPMappings; +use crate::nat; use crate::network_beacon_processor::InvalidBlockStorage; use crate::persisted_dht::{clear_dht, load_dht, persist_dht}; use crate::router::{Router, RouterMessage}; @@ -27,6 +27,7 @@ use lighthouse_network::{ MessageId, NetworkEvent, NetworkGlobals, PeerId, }; use slog::{crit, debug, error, info, o, trace, warn}; +use std::collections::BTreeSet; use std::{collections::HashSet, pin::Pin, sync::Arc, time::Duration}; use store::HotColdDB; use strum::IntoStaticStr; @@ -94,11 +95,6 @@ pub enum NetworkMessage { /// The result of the validation validation_result: MessageAcceptance, }, - /// Called if UPnP managed to establish an external port mapping. - UPnPMappingEstablished { - /// The mappings that were established. - mappings: EstablishedUPnPMappings, - }, /// Reports a peer to the peer manager for performing an action. ReportPeer { peer_id: PeerId, @@ -124,7 +120,7 @@ pub enum NetworkMessage { pub enum ValidatorSubscriptionMessage { /// Subscribes a list of validators to specific slots for attestation duties. AttestationSubscribe { - subscriptions: Vec, + subscriptions: BTreeSet, }, SyncCommitteeSubscribe { subscriptions: Vec, @@ -188,9 +184,6 @@ pub struct NetworkService { store: Arc>, /// A collection of global variables, accessible outside of the network service. network_globals: Arc>, - /// Stores potentially created UPnP mappings to be removed on shutdown. (TCP port and UDP - /// ports). - upnp_mappings: EstablishedUPnPMappings, /// A delay that expires when a new fork takes place. next_fork_update: Pin>>, /// A delay that expires when we need to subscribe to a new fork's topics. @@ -237,22 +230,24 @@ impl NetworkService { "Backfill is disabled. DO NOT RUN IN PRODUCTION" ); - // try and construct UPnP port mappings if required. - if let Some(upnp_config) = crate::nat::UPnPConfig::from_config(config) { - let upnp_log = network_log.new(o!("service" => "UPnP")); - let upnp_network_send = network_senders.network_send(); - if config.upnp_enabled { - executor.spawn_blocking( - move || { - crate::nat::construct_upnp_mappings( - upnp_config, - upnp_network_send, - upnp_log, - ) - }, - "UPnP", - ); - } + if let (true, false, Some(v4)) = ( + config.upnp_enabled, + config.disable_discovery, + config.listen_addrs().v4(), + ) { + let nw = network_log.clone(); + let v4 = v4.clone(); + executor.spawn( + async move { + info!(nw, "UPnP Attempting to initialise routes"); + if let Err(e) = + nat::construct_upnp_mappings(v4.addr, v4.disc_port, nw.clone()).await + { + info!(nw, "Could not UPnP map Discovery port"; "error" => %e); + } + }, + "UPnP", + ); } // get a reference to the beacon chain store @@ -358,7 +353,6 @@ impl NetworkService { router_send, store, network_globals: network_globals.clone(), - upnp_mappings: EstablishedUPnPMappings::default(), next_fork_update, next_fork_subscriptions, next_unsubscribe, @@ -636,21 +630,6 @@ impl NetworkService { } => { self.libp2p.send_error_response(peer_id, id, error, reason); } - NetworkMessage::UPnPMappingEstablished { mappings } => { - self.upnp_mappings = mappings; - // If there is an external TCP port update, modify our local ENR. - if let Some(tcp_port) = self.upnp_mappings.tcp_port { - if let Err(e) = self.libp2p.discovery_mut().update_enr_tcp_port(tcp_port) { - warn!(self.log, "Failed to update ENR"; "error" => e); - } - } - // If there is an external QUIC port update, modify our local ENR. - if let Some(quic_port) = self.upnp_mappings.udp_quic_port { - if let Err(e) = self.libp2p.discovery_mut().update_enr_quic_port(quic_port) { - warn!(self.log, "Failed to update ENR"; "error" => e); - } - } - } NetworkMessage::ValidationResult { propagation_source, message_id, @@ -805,7 +784,7 @@ impl NetworkService { ValidatorSubscriptionMessage::AttestationSubscribe { subscriptions } => { if let Err(e) = self .attestation_service - .validator_subscriptions(subscriptions) + .validator_subscriptions(subscriptions.into_iter()) { warn!(self.log, "Attestation validator subscription failed"; "error" => e); } @@ -1009,10 +988,6 @@ impl Drop for NetworkService { "Saved DHT state"; ), } - - // attempt to remove port mappings - crate::nat::remove_mappings(&self.upnp_mappings, &self.log); - info!(self.log, "Network service shutdown"); } } diff --git a/beacon_node/network/src/service/tests.rs b/beacon_node/network/src/service/tests.rs index 85b3f6b7528..39e5e129268 100644 --- a/beacon_node/network/src/service/tests.rs +++ b/beacon_node/network/src/service/tests.rs @@ -62,7 +62,7 @@ mod tests { let runtime = Arc::new(Runtime::new().unwrap()); - let (signal, exit) = exit_future::signal(); + let (signal, exit) = async_channel::bounded(1); let (shutdown_tx, _) = futures::channel::mpsc::channel(1); let executor = task_executor::TaskExecutor::new( Arc::downgrade(&runtime), @@ -139,7 +139,7 @@ mod tests { // Build network service. let (mut network_service, network_globals, _network_senders) = runtime.block_on(async { - let (_, exit) = exit_future::signal(); + let (_, exit) = async_channel::bounded(1); let (shutdown_tx, _) = futures::channel::mpsc::channel(1); let executor = task_executor::TaskExecutor::new( Arc::downgrade(&runtime), diff --git a/beacon_node/network/src/subnet_service/attestation_subnets.rs b/beacon_node/network/src/subnet_service/attestation_subnets.rs index 1cae6299e1c..ab9ffb95a6c 100644 --- a/beacon_node/network/src/subnet_service/attestation_subnets.rs +++ b/beacon_node/network/src/subnet_service/attestation_subnets.rs @@ -196,7 +196,7 @@ impl AttestationService { /// safely dropped. pub fn validator_subscriptions( &mut self, - subscriptions: Vec, + subscriptions: impl Iterator, ) -> Result<(), String> { // If the node is in a proposer-only state, we ignore all subnet subscriptions. if self.proposer_only { @@ -227,7 +227,6 @@ impl AttestationService { warn!(self.log, "Failed to compute subnet id for validator subscription"; "error" => ?e, - "validator_index" => subscription.validator_index ); continue; } @@ -257,13 +256,11 @@ impl AttestationService { warn!(self.log, "Subscription to subnet error"; "error" => e, - "validator_index" => subscription.validator_index, ); } else { trace!(self.log, "Subscribed to subnet for aggregator duties"; "exact_subnet" => ?exact_subnet, - "validator_index" => subscription.validator_index ); } } diff --git a/beacon_node/network/src/subnet_service/tests/mod.rs b/beacon_node/network/src/subnet_service/tests/mod.rs index 658c851ba21..74f3f59df3c 100644 --- a/beacon_node/network/src/subnet_service/tests/mod.rs +++ b/beacon_node/network/src/subnet_service/tests/mod.rs @@ -180,14 +180,12 @@ mod attestation_service { use super::*; fn get_subscription( - validator_index: u64, attestation_committee_index: CommitteeIndex, slot: Slot, committee_count_at_slot: u64, is_aggregator: bool, ) -> ValidatorSubscription { ValidatorSubscription { - validator_index, attestation_committee_index, slot, committee_count_at_slot, @@ -204,7 +202,6 @@ mod attestation_service { (0..validator_count) .map(|validator_index| { get_subscription( - validator_index, validator_index, slot, committee_count_at_slot, @@ -217,7 +214,6 @@ mod attestation_service { #[tokio::test] async fn subscribe_current_slot_wait_for_unsubscribe() { // subscription config - let validator_index = 1; let committee_index = 1; // Keep a low subscription slot so that there are no additional subnet discovery events. let subscription_slot = 0; @@ -233,7 +229,6 @@ mod attestation_service { .expect("Could not get current slot"); let subscriptions = vec![get_subscription( - validator_index, committee_index, current_slot + Slot::new(subscription_slot), committee_count, @@ -242,7 +237,7 @@ mod attestation_service { // submit the subscriptions attestation_service - .validator_subscriptions(subscriptions) + .validator_subscriptions(subscriptions.into_iter()) .unwrap(); // not enough time for peer discovery, just subscribe, unsubscribe @@ -293,7 +288,6 @@ mod attestation_service { #[tokio::test] async fn test_same_subnet_unsubscription() { // subscription config - let validator_index = 1; let committee_count = 1; let subnets_per_node = MainnetEthSpec::default_spec().subnets_per_node as usize; @@ -313,7 +307,6 @@ mod attestation_service { .expect("Could not get current slot"); let sub1 = get_subscription( - validator_index, com1, current_slot + Slot::new(subscription_slot1), committee_count, @@ -321,7 +314,6 @@ mod attestation_service { ); let sub2 = get_subscription( - validator_index, com2, current_slot + Slot::new(subscription_slot2), committee_count, @@ -350,7 +342,7 @@ mod attestation_service { // submit the subscriptions attestation_service - .validator_subscriptions(vec![sub1, sub2]) + .validator_subscriptions(vec![sub1, sub2].into_iter()) .unwrap(); // Unsubscription event should happen at slot 2 (since subnet id's are the same, unsubscription event should be at higher slot + 1) @@ -431,7 +423,7 @@ mod attestation_service { // submit the subscriptions attestation_service - .validator_subscriptions(subscriptions) + .validator_subscriptions(subscriptions.into_iter()) .unwrap(); let events = get_events(&mut attestation_service, Some(131), 10).await; @@ -501,7 +493,7 @@ mod attestation_service { // submit the subscriptions attestation_service - .validator_subscriptions(subscriptions) + .validator_subscriptions(subscriptions.into_iter()) .unwrap(); let events = get_events(&mut attestation_service, None, 3).await; @@ -538,7 +530,6 @@ mod attestation_service { #[tokio::test] async fn test_subscribe_same_subnet_several_slots_apart() { // subscription config - let validator_index = 1; let committee_count = 1; let subnets_per_node = MainnetEthSpec::default_spec().subnets_per_node as usize; @@ -558,7 +549,6 @@ mod attestation_service { .expect("Could not get current slot"); let sub1 = get_subscription( - validator_index, com1, current_slot + Slot::new(subscription_slot1), committee_count, @@ -566,7 +556,6 @@ mod attestation_service { ); let sub2 = get_subscription( - validator_index, com2, current_slot + Slot::new(subscription_slot2), committee_count, @@ -595,7 +584,7 @@ mod attestation_service { // submit the subscriptions attestation_service - .validator_subscriptions(vec![sub1, sub2]) + .validator_subscriptions(vec![sub1, sub2].into_iter()) .unwrap(); // Unsubscription event should happen at the end of the slot. @@ -668,7 +657,7 @@ mod attestation_service { // submit the subscriptions attestation_service - .validator_subscriptions(subscriptions) + .validator_subscriptions(subscriptions.into_iter()) .unwrap(); // There should only be the same subscriptions as there are in the specification, diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 81b7772a7d7..1e3ceba5636 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -631,6 +631,13 @@ pub fn cli_app() -> Command { .help("Specifies how many states from the freezer database should cache in memory [default: 1]") .action(ArgAction::Set) ) + .arg( + Arg::with_name("state-cache-size") + .long("state-cache-size") + .value_name("STATE_CACHE_SIZE") + .help("Specifies the size of the snapshot cache [default: 3]") + .takes_value(true) + ) /* * Execution Layer Integration */ diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 6711a780cb9..06b488eaf0d 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -170,6 +170,9 @@ pub fn get_config( if let Some(cache_size) = clap_utils::parse_optional(cli_args, "shuffling-cache-size")? { client_config.chain.shuffling_cache_size = cache_size; } + if let Some(cache_size) = clap_utils::parse_optional(cli_args, "state-cache-size")? { + client_config.chain.snapshot_cache_size = cache_size; + } /* * Prometheus metrics HTTP server diff --git a/book/src/help_bn.md b/book/src/help_bn.md index b2a922020f5..e55c34a9ff9 100644 --- a/book/src/help_bn.md +++ b/book/src/help_bn.md @@ -461,6 +461,9 @@ OPTIONS: --slots-per-restore-point Specifies how often a freezer DB restore point should be stored. Cannot be changed after initialization. [default: 8192 (mainnet) or 64 (minimal)] + --state-cache-size + Specifies the size of the snapshot cache [default: 3] + --suggested-fee-recipient Emergency fallback fee recipient for use in case the validator client does not have one configured. You should set this flag on the validator client instead of (or in addition to) setting it here. diff --git a/book/src/http.md b/book/src/http.md deleted file mode 100644 index 82a688586b0..00000000000 --- a/book/src/http.md +++ /dev/null @@ -1,33 +0,0 @@ -# HTTP API - -[OpenAPI Specification](https://ethereum.github.io/beacon-APIs/) - -## Beacon Node - -A Lighthouse beacon node can be configured to expose a HTTP server by supplying the `--http` flag. The default listen address is `localhost:5052`. - -The following CLI flags control the HTTP server: - -- `--http`: enable the HTTP server (required even if the following flags are - provided). -- `--http-port`: specify the listen port of the server. -- `--http-address`: specify the listen address of the server. - -The schema of the API aligns with the standard Ethereum Beacon Node API as defined -at [github.com/ethereum/beacon-APIs](https://github.com/ethereum/beacon-APIs). -It is an easy-to-use RESTful HTTP/JSON API. An interactive specification is -available [here](https://ethereum.github.io/beacon-APIs/). - -## Troubleshooting - -### HTTP API is unavailable or refusing connections - -Ensure the `--http` flag has been supplied at the CLI. - -You can quickly check that the HTTP endpoint is up using `curl`: - -``` -curl "localhost:5052/beacon/head" - -{"slot":37934,"block_root":"0x4d3ae7ebe8c6ef042db05958ec76e8f7be9d412a67a0defa6420a677249afdc7","state_root":"0x1c86b13ffc70a41e410eccce20d33f1fe59d148585ea27c2afb4060f75fe6be2","finalized_slot":37856,"finalized_block_root":"0xbdae152b62acef1e5c332697567d2b89e358628790b8273729096da670b23e86","justified_slot":37888,"justified_block_root":"0x01c2f516a407d8fdda23cad4ed4381e4ab8913d638f935a2fe9bd00d6ced5ec4","previous_justified_slot":37856,"previous_justified_block_root":"0xbdae152b62acef1e5c332697567d2b89e358628790b8273729096da670b23e86"} -``` diff --git a/book/src/installation-source.md b/book/src/installation-source.md index 58e6917eca9..c2f5861576d 100644 --- a/book/src/installation-source.md +++ b/book/src/installation-source.md @@ -28,7 +28,7 @@ operating system. Install the following packages: ```bash -sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang +sudo apt update && sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang ``` > Tips: @@ -42,6 +42,16 @@ sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clan After this, you are ready to [build Lighthouse](#build-lighthouse). +#### Fedora/RHEL/CentOS + +Install the following packages: + +```bash +yum -y install git make perl clang cmake +``` + +After this, you are ready to [build Lighthouse](#build-lighthouse). + #### macOS 1. Install the [Homebrew][] package manager. diff --git a/book/src/pi.md b/book/src/pi.md index 550415240b4..7ccfe6a02a0 100644 --- a/book/src/pi.md +++ b/book/src/pi.md @@ -22,7 +22,7 @@ terminal and an Internet connection are necessary. Install the Ubuntu dependencies: ```bash -sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang +sudo apt update && sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang ``` > Tips: diff --git a/boot_node/Cargo.toml b/boot_node/Cargo.toml index b8cba80d735..77fd872bd22 100644 --- a/boot_node/Cargo.toml +++ b/boot_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "boot_node" -version = "5.0.0" +version = "5.1.1" authors = ["Sigma Prime "] edition = { workspace = true } diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index a301055f34c..8a1cf2ff37e 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -1710,12 +1710,12 @@ impl TryFrom<&HeaderMap> for ProduceBlockV3Metadata { })?; let execution_payload_value = parse_required_header(headers, EXECUTION_PAYLOAD_VALUE_HEADER, |s| { - s.parse::() + Uint256::from_dec_str(s) .map_err(|e| format!("invalid {EXECUTION_PAYLOAD_VALUE_HEADER}: {e:?}")) })?; let consensus_block_value = parse_required_header(headers, CONSENSUS_BLOCK_VALUE_HEADER, |s| { - s.parse::() + Uint256::from_dec_str(s) .map_err(|e| format!("invalid {CONSENSUS_BLOCK_VALUE_HEADER}: {e:?}")) })?; diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index 10759f94306..81d0e797a1e 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v5.0.0-", - fallback = "Lighthouse/v5.0.0" + prefix = "Lighthouse/v5.1.1-", + fallback = "Lighthouse/v5.1.1" ); /// Returns `VERSION`, but with platform information appended to the end. diff --git a/common/system_health/src/lib.rs b/common/system_health/src/lib.rs index d10540e506c..ec64ce31ad3 100644 --- a/common/system_health/src/lib.rs +++ b/common/system_health/src/lib.rs @@ -198,6 +198,25 @@ pub fn observe_system_health_vc( } } +/// Observes if NAT traversal is possible. +pub fn observe_nat() -> bool { + let discv5_nat = lighthouse_network::metrics::get_int_gauge( + &lighthouse_network::metrics::NAT_OPEN, + &["discv5"], + ) + .map(|g| g.get() == 1) + .unwrap_or_default(); + + let libp2p_nat = lighthouse_network::metrics::get_int_gauge( + &lighthouse_network::metrics::NAT_OPEN, + &["libp2p"], + ) + .map(|g| g.get() == 1) + .unwrap_or_default(); + + discv5_nat && libp2p_nat +} + /// Observes the Beacon Node system health. pub fn observe_system_health_bn( sysinfo: Arc>, @@ -223,11 +242,7 @@ pub fn observe_system_health_bn( .unwrap_or_else(|| (String::from("None"), 0, 0)); // Determine if the NAT is open or not. - let nat_open = lighthouse_network::metrics::NAT_OPEN - .as_ref() - .map(|v| v.get()) - .unwrap_or(0) - != 0; + let nat_open = observe_nat(); SystemHealthBN { system_health, diff --git a/common/task_executor/Cargo.toml b/common/task_executor/Cargo.toml index 38f4eca3699..b3d58fa5ea8 100644 --- a/common/task_executor/Cargo.toml +++ b/common/task_executor/Cargo.toml @@ -5,10 +5,10 @@ authors = ["Sigma Prime "] edition = { workspace = true } [dependencies] +async-channel = { workspace = true } tokio = { workspace = true } slog = { workspace = true } futures = { workspace = true } -exit-future = { workspace = true } lazy_static = { workspace = true } lighthouse_metrics = { workspace = true } sloggers = { workspace = true } diff --git a/common/task_executor/src/lib.rs b/common/task_executor/src/lib.rs index 2b8877b26ba..d6edfd3121c 100644 --- a/common/task_executor/src/lib.rs +++ b/common/task_executor/src/lib.rs @@ -73,7 +73,7 @@ pub struct TaskExecutor { /// The handle to the runtime on which tasks are spawned handle_provider: HandleProvider, /// The receiver exit future which on receiving shuts down the task - exit: exit_future::Exit, + exit: async_channel::Receiver<()>, /// Sender given to tasks, so that if they encounter a state in which execution cannot /// continue they can request that everything shuts down. /// @@ -93,7 +93,7 @@ impl TaskExecutor { /// crate). pub fn new>( handle: T, - exit: exit_future::Exit, + exit: async_channel::Receiver<()>, log: slog::Logger, signal_tx: Sender, ) -> Self { @@ -159,8 +159,8 @@ impl TaskExecutor { /// Spawn a future on the tokio runtime. /// - /// The future is wrapped in an `exit_future::Exit`. The task is cancelled when the corresponding - /// exit_future `Signal` is fired/dropped. + /// The future is wrapped in an `async-channel::Receiver`. The task is cancelled when the corresponding + /// Sender is dropped. /// /// The future is monitored via another spawned future to ensure that it doesn't panic. In case /// of a panic, the executor will be shut down via `self.signal_tx`. @@ -172,9 +172,9 @@ impl TaskExecutor { } } - /// Spawn a future on the tokio runtime. This function does not wrap the task in an `exit_future::Exit` + /// Spawn a future on the tokio runtime. This function does not wrap the task in an `async-channel::Receiver` /// like [spawn](#method.spawn). - /// The caller of this function is responsible for wrapping up the task with an `exit_future::Exit` to + /// The caller of this function is responsible for wrapping up the task with an `async-channel::Receiver` to /// ensure that the task gets canceled appropriately. /// This function generates prometheus metrics on number of tasks and task duration. /// @@ -213,9 +213,9 @@ impl TaskExecutor { } } - /// Spawn a future on the tokio runtime wrapped in an `exit_future::Exit` returning an optional + /// Spawn a future on the tokio runtime wrapped in an `async-channel::Receiver` returning an optional /// join handle to the future. - /// The task is canceled when the corresponding exit_future `Signal` is fired/dropped. + /// The task is canceled when the corresponding async-channel is dropped. /// /// This function generates prometheus metrics on number of tasks and task duration. pub fn spawn_handle( @@ -223,30 +223,29 @@ impl TaskExecutor { task: impl Future + Send + 'static, name: &'static str, ) -> Option>> { - let exit = self.exit.clone(); + let exit = self.exit(); let log = self.log.clone(); if let Some(int_gauge) = metrics::get_int_gauge(&metrics::ASYNC_TASKS_COUNT, &[name]) { // Task is shutdown before it completes if `exit` receives let int_gauge_1 = int_gauge.clone(); - let future = future::select(Box::pin(task), exit).then(move |either| { - let result = match either { - future::Either::Left((value, _)) => { - trace!(log, "Async task completed"; "task" => name); - Some(value) - } - future::Either::Right(_) => { - debug!(log, "Async task shutdown, exit received"; "task" => name); - None - } - }; - int_gauge_1.dec(); - futures::future::ready(result) - }); - int_gauge.inc(); if let Some(handle) = self.handle() { - Some(handle.spawn(future)) + Some(handle.spawn(async move { + futures::pin_mut!(exit); + let result = match future::select(Box::pin(task), exit).await { + future::Either::Left((value, _)) => { + trace!(log, "Async task completed"; "task" => name); + Some(value) + } + future::Either::Right(_) => { + debug!(log, "Async task shutdown, exit received"; "task" => name); + None + } + }; + int_gauge_1.dec(); + result + })) } else { debug!(self.log, "Couldn't spawn task. Runtime shutting down"); None @@ -324,7 +323,7 @@ impl TaskExecutor { metrics::inc_gauge_vec(&metrics::BLOCK_ON_TASKS_COUNT, &[name]); let log = self.log.clone(); let handle = self.handle()?; - let exit = self.exit.clone(); + let exit = self.exit(); debug!( log, @@ -362,9 +361,13 @@ impl TaskExecutor { self.handle_provider.handle() } - /// Returns a copy of the `exit_future::Exit`. - pub fn exit(&self) -> exit_future::Exit { - self.exit.clone() + /// Returns a future that completes when `async-channel::Sender` is dropped or () is sent, + /// which translates to the exit signal being triggered. + pub fn exit(&self) -> impl Future { + let exit = self.exit.clone(); + async move { + let _ = exit.recv().await; + } } /// Get a channel to request shutting down. diff --git a/common/task_executor/src/test_utils.rs b/common/task_executor/src/test_utils.rs index c6e5ad01e68..6e372d97575 100644 --- a/common/task_executor/src/test_utils.rs +++ b/common/task_executor/src/test_utils.rs @@ -14,7 +14,7 @@ use tokio::runtime; /// This struct should never be used in production, only testing. pub struct TestRuntime { runtime: Option>, - _runtime_shutdown: exit_future::Signal, + _runtime_shutdown: async_channel::Sender<()>, pub task_executor: TaskExecutor, pub log: Logger, } @@ -24,7 +24,7 @@ impl Default for TestRuntime { /// called *outside* any existing runtime, create a new `Runtime` and keep it alive until the /// `Self` is dropped. fn default() -> Self { - let (runtime_shutdown, exit) = exit_future::signal(); + let (runtime_shutdown, exit) = async_channel::bounded(1); let (shutdown_tx, _) = futures::channel::mpsc::channel(1); let log = null_logger().unwrap(); diff --git a/consensus/types/src/attestation.rs b/consensus/types/src/attestation.rs index ac4a583cbb6..d1d75523ad1 100644 --- a/consensus/types/src/attestation.rs +++ b/consensus/types/src/attestation.rs @@ -125,7 +125,7 @@ mod tests { // Check the in-memory size of an `Attestation`, which is useful for reasoning about memory // and preventing regressions. // - // This test will only pass with `blst`, if we run these tests with Milagro or another + // This test will only pass with `blst`, if we run these tests with another // BLS library in future we will have to make it generic. #[test] fn size_of() { diff --git a/consensus/types/src/validator_subscription.rs b/consensus/types/src/validator_subscription.rs index fd48660c52b..62932638ec1 100644 --- a/consensus/types/src/validator_subscription.rs +++ b/consensus/types/src/validator_subscription.rs @@ -4,10 +4,8 @@ use ssz_derive::{Decode, Encode}; /// A validator subscription, created when a validator subscribes to a slot to perform optional aggregation /// duties. -#[derive(PartialEq, Debug, Serialize, Deserialize, Clone, Encode, Decode)] +#[derive(PartialEq, Debug, Serialize, Deserialize, Clone, Encode, Decode, Eq, PartialOrd, Ord)] pub struct ValidatorSubscription { - /// The validators index. - pub validator_index: u64, /// The index of the committee within `slot` of which the validator is a member. Used by the /// beacon node to quickly evaluate the associated `SubnetId`. pub attestation_committee_index: CommitteeIndex, diff --git a/crypto/bls/Cargo.toml b/crypto/bls/Cargo.toml index 1216fc2a986..7aa8e02dcab 100644 --- a/crypto/bls/Cargo.toml +++ b/crypto/bls/Cargo.toml @@ -7,7 +7,6 @@ edition = { workspace = true } [dependencies] ethereum_ssz = { workspace = true } tree_hash = { workspace = true } -milagro_bls = { git = "https://github.com/sigp/milagro_bls", tag = "v1.5.1", optional = true } rand = { workspace = true } serde = { workspace = true } ethereum_serde_utils = { workspace = true } @@ -22,7 +21,6 @@ blst = { version = "0.3.3", optional = true } arbitrary = [] default = ["supranational"] fake_crypto = [] -milagro = ["milagro_bls"] supranational = ["blst"] supranational-portable = ["supranational", "blst/portable"] supranational-force-adx = ["supranational", "blst/force-adx"] diff --git a/crypto/bls/src/impls/milagro.rs b/crypto/bls/src/impls/milagro.rs deleted file mode 100644 index eb4767d3c70..00000000000 --- a/crypto/bls/src/impls/milagro.rs +++ /dev/null @@ -1,194 +0,0 @@ -use crate::{ - generic_aggregate_public_key::TAggregatePublicKey, - generic_aggregate_signature::TAggregateSignature, - generic_public_key::{GenericPublicKey, TPublicKey, PUBLIC_KEY_BYTES_LEN}, - generic_secret_key::{TSecretKey, SECRET_KEY_BYTES_LEN}, - generic_signature::{TSignature, SIGNATURE_BYTES_LEN}, - Error, Hash256, ZeroizeHash, -}; -pub use milagro_bls as milagro; -use rand::thread_rng; -use std::iter::ExactSizeIterator; - -/// Provides the externally-facing, core BLS types. -pub mod types { - pub use super::milagro::AggregatePublicKey; - pub use super::milagro::AggregateSignature; - pub use super::milagro::PublicKey; - pub use super::milagro::SecretKey; - pub use super::milagro::Signature; - pub use super::verify_signature_sets; - pub use super::SignatureSet; -} - -pub type SignatureSet<'a> = crate::generic_signature_set::GenericSignatureSet< - 'a, - milagro::PublicKey, - milagro::AggregatePublicKey, - milagro::Signature, - milagro::AggregateSignature, ->; - -pub fn verify_signature_sets<'a>( - signature_sets: impl ExactSizeIterator>, -) -> bool { - if signature_sets.len() == 0 { - return false; - } - - signature_sets - .map(|signature_set| { - let mut aggregate = milagro::AggregatePublicKey::from_public_key( - signature_set.signing_keys.first().ok_or(())?.point(), - ); - - for signing_key in signature_set.signing_keys.iter().skip(1) { - aggregate.add(signing_key.point()) - } - - if signature_set.signature.point().is_none() { - return Err(()); - } - - Ok(( - signature_set.signature.as_ref(), - aggregate, - signature_set.message, - )) - }) - .collect::, ()>>() - .map(|aggregates| { - milagro::AggregateSignature::verify_multiple_aggregate_signatures( - &mut rand::thread_rng(), - aggregates.iter().map(|(signature, aggregate, message)| { - ( - signature - .point() - .expect("guarded against none by previous check"), - aggregate, - message.as_bytes(), - ) - }), - ) - }) - .unwrap_or(false) -} - -impl TPublicKey for milagro::PublicKey { - fn serialize(&self) -> [u8; PUBLIC_KEY_BYTES_LEN] { - let mut bytes = [0; PUBLIC_KEY_BYTES_LEN]; - bytes[..].copy_from_slice(&self.as_bytes()); - bytes - } - - fn deserialize(bytes: &[u8]) -> Result { - Self::from_bytes(bytes).map_err(Into::into) - } -} - -impl TAggregatePublicKey for milagro::AggregatePublicKey { - fn to_public_key(&self) -> GenericPublicKey { - GenericPublicKey::from_point(milagro::PublicKey { - point: self.point.clone(), - }) - } - - fn aggregate(pubkeys: &[GenericPublicKey]) -> Result { - let pubkey_refs = pubkeys.iter().map(|pk| pk.point()).collect::>(); - Ok(milagro::AggregatePublicKey::aggregate(&pubkey_refs)?) - } -} - -impl TSignature for milagro::Signature { - fn serialize(&self) -> [u8; SIGNATURE_BYTES_LEN] { - let mut bytes = [0; SIGNATURE_BYTES_LEN]; - - bytes[..].copy_from_slice(&self.as_bytes()); - - bytes - } - - fn deserialize(bytes: &[u8]) -> Result { - milagro::Signature::from_bytes(&bytes).map_err(Error::MilagroError) - } - - fn verify(&self, pubkey: &milagro::PublicKey, msg: Hash256) -> bool { - self.verify(msg.as_bytes(), pubkey) - } -} - -impl TAggregateSignature - for milagro::AggregateSignature -{ - fn infinity() -> Self { - milagro::AggregateSignature::new() - } - - fn add_assign(&mut self, other: &milagro::Signature) { - self.add(other) - } - - fn add_assign_aggregate(&mut self, other: &Self) { - self.add_aggregate(other) - } - - fn serialize(&self) -> [u8; SIGNATURE_BYTES_LEN] { - let mut bytes = [0; SIGNATURE_BYTES_LEN]; - - bytes[..].copy_from_slice(&self.as_bytes()); - - bytes - } - - fn deserialize(bytes: &[u8]) -> Result { - milagro::AggregateSignature::from_bytes(&bytes).map_err(Error::MilagroError) - } - - fn fast_aggregate_verify( - &self, - msg: Hash256, - pubkeys: &[&GenericPublicKey], - ) -> bool { - let pubkeys = pubkeys.iter().map(|pk| pk.point()).collect::>(); - self.fast_aggregate_verify(msg.as_bytes(), &pubkeys) - } - - fn aggregate_verify( - &self, - msgs: &[Hash256], - pubkeys: &[&GenericPublicKey], - ) -> bool { - let pubkeys = pubkeys.iter().map(|pk| pk.point()).collect::>(); - let msgs = msgs.iter().map(|hash| hash.as_bytes()).collect::>(); - self.aggregate_verify(&msgs, &pubkeys) - } -} - -impl TSecretKey for milagro::SecretKey { - fn random() -> Self { - Self::random(&mut thread_rng()) - } - - fn public_key(&self) -> milagro::PublicKey { - let point = milagro::PublicKey::from_secret_key(self).point; - milagro::PublicKey { point } - } - - fn sign(&self, msg: Hash256) -> milagro::Signature { - let point = milagro::Signature::new(msg.as_bytes(), self).point; - milagro::Signature { point } - } - - fn serialize(&self) -> ZeroizeHash { - let mut bytes = [0; SECRET_KEY_BYTES_LEN]; - - // Takes the right-hand 32 bytes from the secret key. - bytes[..].copy_from_slice(&self.as_bytes()); - - bytes.into() - } - - fn deserialize(bytes: &[u8]) -> Result { - Self::from_bytes(bytes).map_err(Into::into) - } -} diff --git a/crypto/bls/src/impls/mod.rs b/crypto/bls/src/impls/mod.rs index b3f2da77b12..d87c3b12ba3 100644 --- a/crypto/bls/src/impls/mod.rs +++ b/crypto/bls/src/impls/mod.rs @@ -1,5 +1,3 @@ #[cfg(feature = "supranational")] pub mod blst; pub mod fake_crypto; -#[cfg(feature = "milagro")] -pub mod milagro; diff --git a/crypto/bls/src/lib.rs b/crypto/bls/src/lib.rs index 750e1bd5b80..fef9804b784 100644 --- a/crypto/bls/src/lib.rs +++ b/crypto/bls/src/lib.rs @@ -9,15 +9,13 @@ //! are supported via compile-time flags. There are three backends supported via features: //! //! - `supranational`: the pure-assembly, highly optimized version from the `blst` crate. -//! - `milagro`: the classic pure-Rust `milagro_bls` crate. //! - `fake_crypto`: an always-returns-valid implementation that is only useful for testing //! scenarios which intend to *ignore* real cryptography. //! //! This crate uses traits to reduce code-duplication between the two implementations. For example, //! the `GenericPublicKey` struct exported from this crate is generic across the `TPublicKey` trait //! (i.e., `PublicKey`). `TPublicKey` is implemented by all three backends (see the -//! `impls.rs` module). When compiling with the `milagro` feature, we export -//! `type PublicKey = GenericPublicKey`. +//! `impls.rs` module). #[macro_use] mod macros; @@ -43,16 +41,11 @@ pub use zeroize_hash::ZeroizeHash; #[cfg(feature = "supranational")] use blst::BLST_ERROR as BlstError; -#[cfg(feature = "milagro")] -use milagro_bls::AmclError; pub type Hash256 = ethereum_types::H256; #[derive(Clone, Debug, PartialEq)] pub enum Error { - /// An error was raised from the Milagro BLS library. - #[cfg(feature = "milagro")] - MilagroError(AmclError), /// An error was raised from the Supranational BLST BLS library. #[cfg(feature = "supranational")] BlstError(BlstError), @@ -66,13 +59,6 @@ pub enum Error { InvalidZeroSecretKey, } -#[cfg(feature = "milagro")] -impl From for Error { - fn from(e: AmclError) -> Error { - Error::MilagroError(e) - } -} - #[cfg(feature = "supranational")] impl From for Error { fn from(e: BlstError) -> Error { @@ -94,8 +80,7 @@ pub mod generics { } /// Defines all the fundamental BLS points which should be exported by this crate by making -/// concrete the generic type parameters using the points from some external BLS library (e.g., -/// Milagro, BLST). +/// concrete the generic type parameters using the points from some external BLS library (e.g.,BLST). macro_rules! define_mod { ($name: ident, $mod: path) => { pub mod $name { @@ -139,8 +124,6 @@ macro_rules! define_mod { }; } -#[cfg(feature = "milagro")] -define_mod!(milagro_implementations, crate::impls::milagro::types); #[cfg(feature = "supranational")] define_mod!(blst_implementations, crate::impls::blst::types); #[cfg(feature = "fake_crypto")] @@ -149,14 +132,7 @@ define_mod!( crate::impls::fake_crypto::types ); -#[cfg(all(feature = "milagro", not(feature = "fake_crypto"),))] -pub use milagro_implementations::*; - -#[cfg(all( - feature = "supranational", - not(feature = "fake_crypto"), - not(feature = "milagro") -))] +#[cfg(all(feature = "supranational", not(feature = "fake_crypto"),))] pub use blst_implementations::*; #[cfg(feature = "fake_crypto")] diff --git a/crypto/bls/tests/tests.rs b/crypto/bls/tests/tests.rs index ad498dbfa87..478c1b7dc26 100644 --- a/crypto/bls/tests/tests.rs +++ b/crypto/bls/tests/tests.rs @@ -509,8 +509,3 @@ macro_rules! test_suite { mod blst { test_suite!(blst_implementations); } - -#[cfg(all(feature = "milagro", not(debug_assertions)))] -mod milagro { - test_suite!(milagro_implementations); -} diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index 3acf3909b3b..9796217d03b 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "5.0.0" +version = "5.1.1" authors = ["Paul Hauner "] edition = { workspace = true } diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index ffa4727d7f2..d664aac3141 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lighthouse" -version = "5.0.0" +version = "5.1.1" authors = ["Sigma Prime "] edition = { workspace = true } autotests = false @@ -14,8 +14,6 @@ write_ssz_files = ["beacon_node/write_ssz_files"] portable = ["bls/supranational-portable"] # Compiles BLST so that it always uses ADX instructions. modern = ["bls/supranational-force-adx"] -# Uses the slower Milagro BLS library, which is written in native Rust. -milagro = ["bls/milagro"] # Support minimal spec (used for testing only). spec-minimal = [] # Support Gnosis spec and Gnosis Beacon Chain. diff --git a/lighthouse/environment/Cargo.toml b/lighthouse/environment/Cargo.toml index b57e1e9dee0..f95751392c8 100644 --- a/lighthouse/environment/Cargo.toml +++ b/lighthouse/environment/Cargo.toml @@ -5,6 +5,7 @@ authors = ["Paul Hauner "] edition = { workspace = true } [dependencies] +async-channel = { workspace = true } tokio = { workspace = true } slog = { workspace = true } sloggers = { workspace = true } @@ -17,7 +18,6 @@ slog-term = { workspace = true } slog-async = { workspace = true } futures = { workspace = true } slog-json = "2.3.0" -exit-future = { workspace = true } serde = { workspace = true } [target.'cfg(not(target_family = "unix"))'.dependencies] diff --git a/lighthouse/environment/src/lib.rs b/lighthouse/environment/src/lib.rs index 40001f1e1d4..e59b1d455a4 100644 --- a/lighthouse/environment/src/lib.rs +++ b/lighthouse/environment/src/lib.rs @@ -343,7 +343,7 @@ impl EnvironmentBuilder { /// Consumes the builder, returning an `Environment`. pub fn build(self) -> Result, String> { - let (signal, exit) = exit_future::signal(); + let (signal, exit) = async_channel::bounded(1); let (signal_tx, signal_rx) = channel(1); Ok(Environment { runtime: self @@ -370,8 +370,8 @@ pub struct Environment { signal_rx: Option>, /// Sender to request shutting down. signal_tx: Sender, - signal: Option, - exit: exit_future::Exit, + signal: Option>, + exit: async_channel::Receiver<()>, log: Logger, sse_logging_components: Option, eth_spec_instance: E, @@ -543,7 +543,7 @@ impl Environment { /// Fire exit signal which shuts down all spawned services pub fn fire_signal(&mut self) { if let Some(signal) = self.signal.take() { - let _ = signal.fire(); + drop(signal); } } diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index 3e1bb1c9da0..c27a9724a29 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -43,8 +43,6 @@ fn bls_library_name() -> &'static str { "blst-portable" } else if cfg!(feature = "modern") { "blst-modern" - } else if cfg!(feature = "milagro") { - "milagro" } else { "blst" } diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 7dee80cedea..e2bac0d4835 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -23,8 +23,11 @@ use types::{ Address, Checkpoint, Epoch, ExecutionBlockHash, ForkName, Hash256, MainnetEthSpec, ProgressiveBalancesMode, }; +use unused_port::{unused_tcp4_port, unused_tcp6_port, unused_udp4_port, unused_udp6_port}; const DEFAULT_ETH1_ENDPOINT: &str = "http://localhost:8545/"; + +// These dummy ports should ONLY be used for `enr-xxx-port` flags that do not bind. const DUMMY_ENR_TCP_PORT: u16 = 7777; const DUMMY_ENR_UDP_PORT: u16 = 8888; const DUMMY_ENR_QUIC_PORT: u16 = 9999; @@ -172,6 +175,26 @@ fn shuffling_cache_set() { .with_config(|config| assert_eq!(config.chain.shuffling_cache_size, 500)); } +#[test] +fn snapshot_cache_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.chain.snapshot_cache_size, + beacon_node::beacon_chain::snapshot_cache::DEFAULT_SNAPSHOT_CACHE_SIZE + ) + }); +} + +#[test] +fn snapshot_cache_set() { + CommandLineTest::new() + .flag("state-cache-size", Some("500")) + .run_with_zero_port() + .with_config(|config| assert_eq!(config.chain.snapshot_cache_size, 500)); +} + #[test] fn fork_choice_before_proposal_timeout_default() { CommandLineTest::new() @@ -871,7 +894,7 @@ fn network_port_flag_over_ipv4() { ); }); - let port = 9000; + let port = unused_tcp4_port().expect("Unable to find unused port."); CommandLineTest::new() .flag("port", Some(port.to_string().as_str())) .flag("allow-insecure-genesis-sync", None) @@ -908,7 +931,7 @@ fn network_port_flag_over_ipv6() { ); }); - let port = 9000; + let port = unused_tcp4_port().expect("Unable to find unused port."); CommandLineTest::new() .flag("listen-address", Some("::1")) .flag("port", Some(port.to_string().as_str())) @@ -958,8 +981,8 @@ fn network_port_flag_over_ipv4_and_ipv6() { ); }); - let port = 19000; - let port6 = 29000; + let port = unused_tcp4_port().expect("Unable to find unused port."); + let port6 = unused_tcp6_port().expect("Unable to find unused port."); CommandLineTest::new() .flag("listen-address", Some("127.0.0.1")) .flag("listen-address", Some("::1")) @@ -1300,9 +1323,8 @@ fn enr_tcp6_port_flag() { fn enr_match_flag_over_ipv4() { let addr = "127.0.0.2".parse::().unwrap(); - // the reason we use the ENR dummy values is because, due to the nature of the `--enr-match` flag, these will eventually become ENR ports (as well as listening ports). - let udp4_port = DUMMY_ENR_UDP_PORT; - let tcp4_port = DUMMY_ENR_TCP_PORT; + let udp4_port = unused_udp4_port().expect("Unable to find unused port."); + let tcp4_port = unused_tcp4_port().expect("Unable to find unused port."); CommandLineTest::new() .flag("enr-match", None) @@ -1332,9 +1354,8 @@ fn enr_match_flag_over_ipv6() { const ADDR: &str = "::1"; let addr = ADDR.parse::().unwrap(); - // the reason we use the ENR dummy values is because, due to the nature of the `--enr-match` flag, these will eventually become ENR ports (as well as listening ports). - let udp6_port = DUMMY_ENR_UDP_PORT; - let tcp6_port = DUMMY_ENR_TCP_PORT; + let udp6_port = unused_udp6_port().expect("Unable to find unused port."); + let tcp6_port = unused_tcp6_port().expect("Unable to find unused port."); CommandLineTest::new() .flag("enr-match", None) @@ -1363,15 +1384,13 @@ fn enr_match_flag_over_ipv6() { fn enr_match_flag_over_ipv4_and_ipv6() { const IPV6_ADDR: &str = "::1"; - // the reason we use the ENR dummy values is because, due to the nature of the `--enr-match` flag, these will eventually become ENR ports (as well as listening ports). - let udp6_port = DUMMY_ENR_UDP_PORT; - let tcp6_port = DUMMY_ENR_TCP_PORT; + let udp6_port = unused_udp6_port().expect("Unable to find unused port."); + let tcp6_port = unused_tcp6_port().expect("Unable to find unused port."); let ipv6_addr = IPV6_ADDR.parse::().unwrap(); const IPV4_ADDR: &str = "127.0.0.1"; - // the reason we use the ENR dummy values is because, due to the nature of the `--enr-match` flag, these will eventually become ENR ports (as well as listening ports). - let udp4_port = DUMMY_ENR_UDP_PORT; - let tcp4_port = DUMMY_ENR_TCP_PORT; + let udp4_port = unused_udp4_port().expect("Unable to find unused port."); + let tcp4_port = unused_tcp4_port().expect("Unable to find unused port."); let ipv4_addr = IPV4_ADDR.parse::().unwrap(); CommandLineTest::new() diff --git a/testing/ef_tests/Cargo.toml b/testing/ef_tests/Cargo.toml index 8bc36d008b1..f3d00fa035c 100644 --- a/testing/ef_tests/Cargo.toml +++ b/testing/ef_tests/Cargo.toml @@ -7,7 +7,6 @@ edition = { workspace = true } [features] # `ef_tests` feature must be enabled to actually run the tests ef_tests = [] -milagro = ["bls/milagro"] fake_crypto = ["bls/fake_crypto"] portable = ["beacon_chain/portable"] diff --git a/testing/ef_tests/src/cases/bls_eth_aggregate_pubkeys.rs b/testing/ef_tests/src/cases/bls_eth_aggregate_pubkeys.rs index 8783aa141e9..2a9a393bfdb 100644 --- a/testing/ef_tests/src/cases/bls_eth_aggregate_pubkeys.rs +++ b/testing/ef_tests/src/cases/bls_eth_aggregate_pubkeys.rs @@ -31,10 +31,6 @@ impl Case for BlsEthAggregatePubkeys { { return Ok(()); } - #[cfg(feature = "milagro")] - Err(bls::Error::MilagroError(_)) if self.output.is_none() => { - return Ok(()); - } Err(e) => return Err(Error::FailedToParseTest(format!("{:?}", e))), }; diff --git a/testing/ef_tests/src/decode.rs b/testing/ef_tests/src/decode.rs index b5c0da53a01..e95bddffac3 100644 --- a/testing/ef_tests/src/decode.rs +++ b/testing/ef_tests/src/decode.rs @@ -71,9 +71,7 @@ where f(&bytes).map_err(|e| { match e { // NOTE: this is a bit hacky, but seemingly better than the alternatives - ssz::DecodeError::BytesInvalid(message) - if message.contains("Blst") || message.contains("Milagro") => - { + ssz::DecodeError::BytesInvalid(message) if message.contains("Blst") => { Error::InvalidBLSInput(message) } e => Error::FailedToParseTest(format!( diff --git a/testing/execution_engine_integration/Cargo.toml b/testing/execution_engine_integration/Cargo.toml index 6de108fcb69..7f66658f0fa 100644 --- a/testing/execution_engine_integration/Cargo.toml +++ b/testing/execution_engine_integration/Cargo.toml @@ -4,12 +4,12 @@ version = "0.1.0" edition = { workspace = true } [dependencies] +async-channel = { workspace = true } tempfile = { workspace = true } serde_json = { workspace = true } task_executor = { workspace = true } tokio = { workspace = true } futures = { workspace = true } -exit-future = { workspace = true } environment = { workspace = true } execution_layer = { workspace = true } sensitive_url = { workspace = true } @@ -24,4 +24,4 @@ fork_choice = { workspace = true } logging = { workspace = true } [features] -portable = ["types/portable"] \ No newline at end of file +portable = ["types/portable"] diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index bfa56f63c0d..8a61f17ce6f 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -42,7 +42,7 @@ pub struct TestRig { ee_a: ExecutionPair, ee_b: ExecutionPair, spec: ChainSpec, - _runtime_shutdown: exit_future::Signal, + _runtime_shutdown: async_channel::Sender<()>, } /// Import a private key into the execution engine and unlock it so that we can @@ -111,7 +111,7 @@ impl TestRig { .build() .unwrap(), ); - let (runtime_shutdown, exit) = exit_future::signal(); + let (runtime_shutdown, exit) = async_channel::bounded(1); let (shutdown_tx, _) = futures::channel::mpsc::channel(1); let executor = TaskExecutor::new(Arc::downgrade(&runtime), exit, log.clone(), shutdown_tx); let mut spec = TEST_FORK.make_genesis_spec(MainnetEthSpec::default_spec()); diff --git a/testing/web3signer_tests/Cargo.toml b/testing/web3signer_tests/Cargo.toml index 38b775b3928..1bdf62cd22e 100644 --- a/testing/web3signer_tests/Cargo.toml +++ b/testing/web3signer_tests/Cargo.toml @@ -8,6 +8,7 @@ edition = { workspace = true } [dependencies] [dev-dependencies] +async-channel = { workspace = true } eth2_keystore = { workspace = true } types = { workspace = true } tempfile = { workspace = true } @@ -17,7 +18,6 @@ url = { workspace = true } validator_client = { workspace = true } slot_clock = { workspace = true } futures = { workspace = true } -exit-future = { workspace = true } task_executor = { workspace = true } environment = { workspace = true } account_utils = { workspace = true } diff --git a/testing/web3signer_tests/src/lib.rs b/testing/web3signer_tests/src/lib.rs index 8feea1fd7f2..3090b4da556 100644 --- a/testing/web3signer_tests/src/lib.rs +++ b/testing/web3signer_tests/src/lib.rs @@ -307,7 +307,7 @@ mod tests { validator_store: Arc>, _validator_dir: TempDir, runtime: Arc, - _runtime_shutdown: exit_future::Signal, + _runtime_shutdown: async_channel::Sender<()>, using_web3signer: bool, } @@ -340,7 +340,7 @@ mod tests { .build() .unwrap(), ); - let (runtime_shutdown, exit) = exit_future::signal(); + let (runtime_shutdown, exit) = async_channel::bounded(1); let (shutdown_tx, _) = futures::channel::mpsc::channel(1); let executor = TaskExecutor::new(Arc::downgrade(&runtime), exit, log.clone(), shutdown_tx); diff --git a/validator_client/Cargo.toml b/validator_client/Cargo.toml index 8e587c6155f..d3dffc3d02e 100644 --- a/validator_client/Cargo.toml +++ b/validator_client/Cargo.toml @@ -31,7 +31,6 @@ directory = { workspace = true } lockfile = { workspace = true } environment = { workspace = true } parking_lot = { workspace = true } -exit-future = { workspace = true } filesystem = { workspace = true } hex = { workspace = true } deposit_contract = { workspace = true } diff --git a/validator_client/src/attestation_service.rs b/validator_client/src/attestation_service.rs index 43b9d60e234..1c6b60addb6 100644 --- a/validator_client/src/attestation_service.rs +++ b/validator_client/src/attestation_service.rs @@ -430,6 +430,11 @@ impl AttestationService { .flatten() .unzip(); + if attestations.is_empty() { + warn!(log, "No attestations were published"); + return Ok(None); + } + // Post the attestations to the BN. match self .beacon_nodes diff --git a/validator_client/src/duties_service.rs b/validator_client/src/duties_service.rs index 290803e257a..b5b56943c4b 100644 --- a/validator_client/src/duties_service.rs +++ b/validator_client/src/duties_service.rs @@ -122,43 +122,37 @@ pub struct SubscriptionSlots { slots: Vec<(Slot, AtomicBool)>, } -impl DutyAndProof { - /// Instantiate `Self`, computing the selection proof as well. - pub async fn new_with_selection_proof( - duty: AttesterData, - validator_store: &ValidatorStore, - spec: &ChainSpec, - ) -> Result { - let selection_proof = validator_store - .produce_selection_proof(duty.pubkey, duty.slot) - .await - .map_err(Error::FailedToProduceSelectionProof)?; - - let selection_proof = selection_proof - .is_aggregator(duty.committee_length as usize, spec) - .map_err(Error::InvalidModulo) - .map(|is_aggregator| { - if is_aggregator { - Some(selection_proof) - } else { - // Don't bother storing the selection proof if the validator isn't an - // aggregator, we won't need it. - None - } - })?; - - let subscription_slots = SubscriptionSlots::new(duty.slot); - - Ok(Self { - duty, - selection_proof, - subscription_slots, +/// Create a selection proof for `duty`. +/// +/// Return `Ok(None)` if the attesting validator is not an aggregator. +async fn make_selection_proof( + duty: &AttesterData, + validator_store: &ValidatorStore, + spec: &ChainSpec, +) -> Result, Error> { + let selection_proof = validator_store + .produce_selection_proof(duty.pubkey, duty.slot) + .await + .map_err(Error::FailedToProduceSelectionProof)?; + + selection_proof + .is_aggregator(duty.committee_length as usize, spec) + .map_err(Error::InvalidModulo) + .map(|is_aggregator| { + if is_aggregator { + Some(selection_proof) + } else { + // Don't bother storing the selection proof if the validator isn't an + // aggregator, we won't need it. + None + } }) - } +} +impl DutyAndProof { /// Create a new `DutyAndProof` with the selection proof waiting to be filled in. - pub fn new_without_selection_proof(duty: AttesterData) -> Self { - let subscription_slots = SubscriptionSlots::new(duty.slot); + pub fn new_without_selection_proof(duty: AttesterData, current_slot: Slot) -> Self { + let subscription_slots = SubscriptionSlots::new(duty.slot, current_slot); Self { duty, selection_proof: None, @@ -168,10 +162,13 @@ impl DutyAndProof { } impl SubscriptionSlots { - fn new(duty_slot: Slot) -> Arc { + fn new(duty_slot: Slot, current_slot: Slot) -> Arc { let slots = ATTESTATION_SUBSCRIPTION_OFFSETS .into_iter() .filter_map(|offset| duty_slot.safe_sub(offset).ok()) + // Keep only scheduled slots that haven't happened yet. This avoids sending expired + // subscriptions. + .filter(|scheduled_slot| *scheduled_slot > current_slot) .map(|scheduled_slot| (scheduled_slot, AtomicBool::new(false))) .collect(); Arc::new(Self { slots }) @@ -787,14 +784,14 @@ async fn poll_beacon_attesters_for_epoch( // request for extra data unless necessary in order to save on network bandwidth. let uninitialized_validators = get_uninitialized_validators(duties_service, &epoch, local_pubkeys); - let indices_to_request = if !uninitialized_validators.is_empty() { + let initial_indices_to_request = if !uninitialized_validators.is_empty() { uninitialized_validators.as_slice() } else { &local_indices[0..min(INITIAL_DUTIES_QUERY_SIZE, local_indices.len())] }; let response = - post_validator_duties_attester(duties_service, epoch, indices_to_request).await?; + post_validator_duties_attester(duties_service, epoch, initial_indices_to_request).await?; let dependent_root = response.dependent_root; // Find any validators which have conflicting (epoch, dependent_root) values or missing duties for the epoch. @@ -818,24 +815,29 @@ async fn poll_beacon_attesters_for_epoch( return Ok(()); } - // Filter out validators which have already been requested. - let initial_duties = &response.data; + // Make a request for all indices that require updating which we have not already made a request + // for. let indices_to_request = validators_to_update .iter() - .filter(|&&&pubkey| !initial_duties.iter().any(|duty| duty.pubkey == pubkey)) .filter_map(|pubkey| duties_service.validator_store.validator_index(pubkey)) + .filter(|validator_index| !initial_indices_to_request.contains(validator_index)) .collect::>(); - let new_duties = if !indices_to_request.is_empty() { + // Filter the initial duties by their relevance so that we don't hit the warning below about + // overwriting duties. There was previously a bug here. + let new_initial_duties = response + .data + .into_iter() + .filter(|duty| validators_to_update.contains(&&duty.pubkey)); + + let mut new_duties = if !indices_to_request.is_empty() { post_validator_duties_attester(duties_service, epoch, indices_to_request.as_slice()) .await? .data - .into_iter() - .chain(response.data) - .collect::>() } else { - response.data + vec![] }; + new_duties.extend(new_initial_duties); drop(fetch_timer); @@ -854,26 +856,53 @@ async fn poll_beacon_attesters_for_epoch( // Update the duties service with the new `DutyAndProof` messages. let mut attesters = duties_service.attesters.write(); let mut already_warned = Some(()); + let current_slot = duties_service + .slot_clock + .now_or_genesis() + .unwrap_or_default(); for duty in &new_duties { let attester_map = attesters.entry(duty.pubkey).or_default(); // Create initial entries in the map without selection proofs. We'll compute them in the // background later to avoid creating a thundering herd of signing threads whenever new // duties are computed. - let duty_and_proof = DutyAndProof::new_without_selection_proof(duty.clone()); + let duty_and_proof = DutyAndProof::new_without_selection_proof(duty.clone(), current_slot); + + match attester_map.entry(epoch) { + hash_map::Entry::Occupied(mut occupied) => { + let mut_value = occupied.get_mut(); + let (prior_dependent_root, prior_duty_and_proof) = &mut_value; + + // Guard against overwriting an existing value for the same duty. If we did + // overwrite we could lose a selection proof or information from + // `subscription_slots`. Hitting this branch should be prevented by our logic for + // fetching duties only for unknown indices. + if dependent_root == *prior_dependent_root + && prior_duty_and_proof.duty == duty_and_proof.duty + { + warn!( + log, + "Redundant attester duty update"; + "dependent_root" => %dependent_root, + "validator_index" => duty.validator_index, + ); + continue; + } - if let Some((prior_dependent_root, _)) = - attester_map.insert(epoch, (dependent_root, duty_and_proof)) - { - // Using `already_warned` avoids excessive logs. - if dependent_root != prior_dependent_root && already_warned.take().is_some() { - warn!( - log, - "Attester duties re-org"; - "prior_dependent_root" => %prior_dependent_root, - "dependent_root" => %dependent_root, - "msg" => "this may happen from time to time" - ) + // Using `already_warned` avoids excessive logs. + if dependent_root != *prior_dependent_root && already_warned.take().is_some() { + warn!( + log, + "Attester duties re-org"; + "prior_dependent_root" => %prior_dependent_root, + "dependent_root" => %dependent_root, + "msg" => "this may happen from time to time" + ) + } + *mut_value = (dependent_root, duty_and_proof); + } + hash_map::Entry::Vacant(vacant) => { + vacant.insert((dependent_root, duty_and_proof)); } } } @@ -1030,12 +1059,13 @@ async fn fill_in_selection_proofs( // Sign selection proofs (serially). let duty_and_proof_results = stream::iter(relevant_duties.into_values().flatten()) .then(|duty| async { - DutyAndProof::new_with_selection_proof( - duty, + let opt_selection_proof = make_selection_proof( + &duty, &duties_service.validator_store, &duties_service.spec, ) - .await + .await?; + Ok((duty, opt_selection_proof)) }) .collect::>() .await; @@ -1043,7 +1073,7 @@ async fn fill_in_selection_proofs( // Add to attesters store. let mut attesters = duties_service.attesters.write(); for result in duty_and_proof_results { - let duty_and_proof = match result { + let (duty, selection_proof) = match result { Ok(duty_and_proof) => duty_and_proof, Err(Error::FailedToProduceSelectionProof( ValidatorStoreError::UnknownPubkey(pubkey), @@ -1071,12 +1101,12 @@ async fn fill_in_selection_proofs( } }; - let attester_map = attesters.entry(duty_and_proof.duty.pubkey).or_default(); - let epoch = duty_and_proof.duty.slot.epoch(E::slots_per_epoch()); + let attester_map = attesters.entry(duty.pubkey).or_default(); + let epoch = duty.slot.epoch(E::slots_per_epoch()); match attester_map.entry(epoch) { hash_map::Entry::Occupied(mut entry) => { // No need to update duties for which no proof was computed. - let Some(selection_proof) = duty_and_proof.selection_proof else { + let Some(selection_proof) = selection_proof else { continue; }; @@ -1097,6 +1127,14 @@ async fn fill_in_selection_proofs( } } hash_map::Entry::Vacant(entry) => { + // This probably shouldn't happen, but we have enough info to fill in the + // entry so we may as well. + let subscription_slots = SubscriptionSlots::new(duty.slot, current_slot); + let duty_and_proof = DutyAndProof { + duty, + selection_proof, + subscription_slots, + }; entry.insert((dependent_root, duty_and_proof)); } } @@ -1320,13 +1358,15 @@ mod test { #[test] fn subscription_slots_exact() { + // Set current slot in the past so no duties are considered expired. + let current_slot = Slot::new(0); for duty_slot in [ - Slot::new(32), + Slot::new(33), Slot::new(47), Slot::new(99), Slot::new(1002003), ] { - let subscription_slots = SubscriptionSlots::new(duty_slot); + let subscription_slots = SubscriptionSlots::new(duty_slot, current_slot); // Run twice to check idempotence (subscription slots shouldn't be marked as done until // we mark them manually). @@ -1360,8 +1400,9 @@ mod test { #[test] fn subscription_slots_mark_multiple() { for (i, offset) in ATTESTATION_SUBSCRIPTION_OFFSETS.into_iter().enumerate() { + let current_slot = Slot::new(0); let duty_slot = Slot::new(64); - let subscription_slots = SubscriptionSlots::new(duty_slot); + let subscription_slots = SubscriptionSlots::new(duty_slot, current_slot); subscription_slots.record_successful_subscription_at(duty_slot - offset); @@ -1376,4 +1417,22 @@ mod test { } } } + + /// Test the boundary condition where all subscription slots are *just* expired. + #[test] + fn subscription_slots_expired() { + let current_slot = Slot::new(100); + let duty_slot = current_slot + ATTESTATION_SUBSCRIPTION_OFFSETS[0]; + let subscription_slots = SubscriptionSlots::new(duty_slot, current_slot); + for offset in ATTESTATION_SUBSCRIPTION_OFFSETS.into_iter() { + let slot = duty_slot - offset; + assert!(!subscription_slots.should_send_subscription_at(slot)); + } + assert!(subscription_slots.slots.is_empty()); + + // If the duty slot is 1 later, we get a non-empty set of duties. + let subscription_slots = SubscriptionSlots::new(duty_slot + 1, current_slot); + assert_eq!(subscription_slots.slots.len(), 1); + assert!(subscription_slots.should_send_subscription_at(current_slot + 1),); + } } diff --git a/validator_client/src/http_api/mod.rs b/validator_client/src/http_api/mod.rs index dcf66d2fbca..a4480195e59 100644 --- a/validator_client/src/http_api/mod.rs +++ b/validator_client/src/http_api/mod.rs @@ -685,7 +685,16 @@ pub fn serve( let maybe_graffiti = body.graffiti.clone().map(Into::into); let initialized_validators_rw_lock = validator_store.initialized_validators(); - let mut initialized_validators = initialized_validators_rw_lock.write(); + let initialized_validators = initialized_validators_rw_lock.upgradable_read(); + + // Do not make any changes if all fields are identical or unchanged. + fn equal_or_none( + current_value: Option, + new_value: Option, + ) -> bool { + new_value.is_none() || current_value == new_value + } + match ( initialized_validators.is_enabled(&validator_pubkey), initialized_validators.validator(&validator_pubkey.compress()), @@ -694,32 +703,65 @@ pub fn serve( "no validator for {:?}", validator_pubkey ))), + // If all specified parameters match their existing settings, then this + // change is a no-op. (Some(is_enabled), Some(initialized_validator)) - if Some(is_enabled) == body.enabled - && initialized_validator.get_gas_limit() == body.gas_limit - && initialized_validator.get_builder_boost_factor() - == body.builder_boost_factor - && initialized_validator.get_builder_proposals() - == body.builder_proposals - && initialized_validator.get_prefer_builder_proposals() - == body.prefer_builder_proposals - && initialized_validator.get_graffiti() == maybe_graffiti => + if equal_or_none(Some(is_enabled), body.enabled) + && equal_or_none( + initialized_validator.get_gas_limit(), + body.gas_limit, + ) + && equal_or_none( + initialized_validator.get_builder_boost_factor(), + body.builder_boost_factor, + ) + && equal_or_none( + initialized_validator.get_builder_proposals(), + body.builder_proposals, + ) + && equal_or_none( + initialized_validator.get_prefer_builder_proposals(), + body.prefer_builder_proposals, + ) + && equal_or_none( + initialized_validator.get_graffiti(), + maybe_graffiti, + ) => + { + Ok(()) + } + // Disabling an already disabled validator *with no other changes* is a + // no-op. + (Some(false), None) + if body.enabled.map_or(true, |enabled| !enabled) + && body.gas_limit.is_none() + && body.builder_boost_factor.is_none() + && body.builder_proposals.is_none() + && body.prefer_builder_proposals.is_none() + && maybe_graffiti.is_none() => { Ok(()) } (Some(_), _) => { + // Upgrade read lock only in the case where a write is actually + // required. + let mut initialized_validators_write = + parking_lot::RwLockUpgradableReadGuard::upgrade( + initialized_validators, + ); if let Some(handle) = task_executor.handle() { handle .block_on( - initialized_validators.set_validator_definition_fields( - &validator_pubkey, - body.enabled, - body.gas_limit, - body.builder_proposals, - body.builder_boost_factor, - body.prefer_builder_proposals, - body.graffiti, - ), + initialized_validators_write + .set_validator_definition_fields( + &validator_pubkey, + body.enabled, + body.gas_limit, + body.builder_proposals, + body.builder_boost_factor, + body.prefer_builder_proposals, + body.graffiti, + ), ) .map_err(|e| { warp_utils::reject::custom_server_error(format!(