diff --git a/.github/assets/check_wasm.sh b/.github/assets/check_wasm.sh index 52d7009412c5..3b2f2d6b7e6c 100755 --- a/.github/assets/check_wasm.sh +++ b/.github/assets/check_wasm.sh @@ -36,7 +36,6 @@ exclude_crates=( reth-ethereum-engine-primitives reth-ethereum-payload-builder reth-etl - reth-evm-ethereum reth-exex reth-exex-test-utils reth-ipc diff --git a/.github/assets/hive/expected_failures.yaml b/.github/assets/hive/expected_failures.yaml index d4b3d2bcbd3c..ec7bd0549001 100644 --- a/.github/assets/hive/expected_failures.yaml +++ b/.github/assets/hive/expected_failures.yaml @@ -41,8 +41,6 @@ engine-withdrawals: - Withdrawals Fork on Canonical Block 8 / Side Block 9 - 10 Block Re-Org (Paris) (reth) - Withdrawals Fork on Canonical Block 8 / Side Block 9 - 10 Block Re-Org Sync (Paris) (reth) -# https://github.com/paradigmxyz/reth/issues/8305 -# https://github.com/paradigmxyz/reth/issues/6217 engine-api: [] # https://github.com/paradigmxyz/reth/issues/8305 @@ -58,6 +56,4 @@ engine-cancun: - Invalid NewPayload, Incomplete VersionedHashes, Syncing=False, EmptyTxs=False, DynFeeTxs=False (Cancun) (reth) - Invalid NewPayload, Extra VersionedHashes, Syncing=False, EmptyTxs=False, DynFeeTxs=False (Cancun) (reth) -# https://github.com/paradigmxyz/reth/issues/8579 -sync: - - sync reth -> reth +sync: [] diff --git a/.github/workflows/eth-sync.yml b/.github/workflows/eth-sync.yml deleted file mode 100644 index f473e29a57c6..000000000000 --- a/.github/workflows/eth-sync.yml +++ /dev/null @@ -1,53 +0,0 @@ -# Runs an ethereum mainnet sync test. - -name: eth-sync-test - -on: - pull_request: - merge_group: - push: - branches: [ main ] - -env: - CARGO_TERM_COLOR: always - -concurrency: - group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} - cancel-in-progress: true - -jobs: - sync: - name: sync / 100k blocks - # Only run sync tests in merge groups - if: github.event_name == 'merge_group' - runs-on: - group: Reth - env: - RUST_LOG: info,sync=error - RUST_BACKTRACE: 1 - timeout-minutes: 60 - steps: - - uses: actions/checkout@v4 - - uses: dtolnay/rust-toolchain@stable - - uses: Swatinem/rust-cache@v2 - with: - cache-on-failure: true - - name: Build reth - run: | - cargo install --features asm-keccak,jemalloc --path bin/reth - - name: Run sync - run: | - reth node \ - --debug.tip 0x91c90676cab257a59cd956d7cb0bceb9b1a71d79755c23c7277a0697ccfaf8c4 \ - --debug.max-block 100000 \ - --debug.terminate - - name: Verify the target block hash - run: | - reth db get static-file headers 100000 \ - | grep 0x91c90676cab257a59cd956d7cb0bceb9b1a71d79755c23c7277a0697ccfaf8c4 - - name: Run stage unwind for 100 blocks - run: | - reth stage unwind num-blocks 100 - - name: Run stage unwind to block hash - run: | - reth stage unwind to-block 0x52e0509d33a988ef807058e2980099ee3070187f7333aae12b64d4d675f34c5a diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 1921859c2729..4723d8a4d57f 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -18,10 +18,10 @@ jobs: matrix: include: - type: ethereum - args: --bin reth --workspace + args: --bin reth --workspace --locked features: "ethereum asm-keccak jemalloc jemalloc-prof min-error-logs min-warn-logs min-info-logs min-debug-logs min-trace-logs" - type: optimism - args: --bin op-reth --workspace + args: --bin op-reth --workspace --locked features: "optimism asm-keccak jemalloc jemalloc-prof min-error-logs min-warn-logs min-info-logs min-debug-logs min-trace-logs" - type: book args: --manifest-path book/sources/Cargo.toml --workspace --bins diff --git a/.github/workflows/op-sync.yml b/.github/workflows/op-sync.yml deleted file mode 100644 index 2a223391d711..000000000000 --- a/.github/workflows/op-sync.yml +++ /dev/null @@ -1,55 +0,0 @@ -# Runs a base mainnet sync test. - -name: op-sync-test - -on: - pull_request: - merge_group: - push: - branches: [ main ] - -env: - CARGO_TERM_COLOR: always - -concurrency: - group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} - cancel-in-progress: true - -jobs: - sync: - name: op sync / 10k blocks - # Only run sync tests in merge groups - if: github.event_name == 'merge_group' - runs-on: - group: Reth - env: - RUST_LOG: info,sync=error - RUST_BACKTRACE: 1 - timeout-minutes: 60 - steps: - - uses: actions/checkout@v4 - - uses: dtolnay/rust-toolchain@stable - - uses: Swatinem/rust-cache@v2 - with: - cache-on-failure: true - - name: Build op-reth - run: make install-op - - name: Run sync - # https://basescan.org/block/10000 - run: | - op-reth node \ - --chain base \ - --debug.tip 0xbb9b85352c7ebca6ba8efc63bd66cecd038c92ec8ebd02e153a3e0b197e672b7 \ - --debug.max-block 10000 \ - --debug.terminate - - name: Verify the target block hash - run: | - op-reth db --chain base get static-file headers 10000 \ - | grep 0xbb9b85352c7ebca6ba8efc63bd66cecd038c92ec8ebd02e153a3e0b197e672b7 - - name: Run stage unwind for 100 blocks - run: | - op-reth stage --chain base unwind num-blocks 100 - - name: Run stage unwind to block hash - run: | - op-reth stage --chain base unwind to-block 0x118a6e922a8c6cab221fc5adfe5056d2b72d58c6580e9c5629de55299e2cf8de - diff --git a/.github/workflows/release-dist.yml b/.github/workflows/release-dist.yml index 2142360e0396..f7df80e81f9f 100644 --- a/.github/workflows/release-dist.yml +++ b/.github/workflows/release-dist.yml @@ -12,7 +12,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Update Homebrew formula - uses: dawidd6/action-homebrew-bump-formula@v3 + uses: dawidd6/action-homebrew-bump-formula@v4 with: token: ${{ secrets.HOMEBREW }} no_fork: true diff --git a/.github/workflows/sync.yml b/.github/workflows/sync.yml new file mode 100644 index 000000000000..531d04b2e489 --- /dev/null +++ b/.github/workflows/sync.yml @@ -0,0 +1,63 @@ +# Runs sync tests. + +name: sync test + +on: + merge_group: + +env: + CARGO_TERM_COLOR: always + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +jobs: + sync: + name: sync (${{ matrix.chain.bin }}) + runs-on: + group: Reth + env: + RUST_LOG: info,sync=error + RUST_BACKTRACE: 1 + timeout-minutes: 60 + strategy: + matrix: + chain: + - build: install + bin: reth + chain: mainnet + tip: "0x91c90676cab257a59cd956d7cb0bceb9b1a71d79755c23c7277a0697ccfaf8c4" + block: 100000 + unwind-target: "0x52e0509d33a988ef807058e2980099ee3070187f7333aae12b64d4d675f34c5a" + - build: install-op + bin: op-reth + chain: base + tip: "0xbb9b85352c7ebca6ba8efc63bd66cecd038c92ec8ebd02e153a3e0b197e672b7" + block: 10000 + unwind-target: "0x118a6e922a8c6cab221fc5adfe5056d2b72d58c6580e9c5629de55299e2cf8de" + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@stable + - uses: Swatinem/rust-cache@v2 + with: + cache-on-failure: true + - name: Build ${{ matrix.chain.bin }} + run: make ${{ matrix.chain.build }} + - name: Run sync + run: | + ${{ matrix.chain.bin }} node \ + --chain ${{ matrix.chain.chain }} \ + --debug.tip ${{ matrix.chain.tip }} \ + --debug.max-block ${{ matrix.chain.block }} \ + --debug.terminate + - name: Verify the target block hash + run: | + ${{ matrix.chain.bin }} db --chain ${{ matrix.chain.chain }} get static-file headers ${{ matrix.chain.block }} \ + | grep ${{ matrix.chain.tip }} + - name: Run stage unwind for 100 blocks + run: | + ${{ matrix.chain.bin }} stage --chain ${{ matrix.chain.chain }} unwind num-blocks 100 + - name: Run stage unwind to block hash + run: | + ${{ matrix.chain.bin }} stage --chain ${{ matrix.chain.chain }} unwind to-block ${{ matrix.chain.unwind-target }} diff --git a/Cargo.lock b/Cargo.lock index 4def3dcabed7..8d4095b64a62 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -97,9 +97,9 @@ checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" [[package]] name = "alloy-chains" -version = "0.1.38" +version = "0.1.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "156bfc5dcd52ef9a5f33381701fa03310317e14c65093a9430d3e3557b08dcd3" +checksum = "d4932d790c723181807738cf1ac68198ab581cd699545b155601332541ee47bd" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -112,9 +112,9 @@ dependencies = [ [[package]] name = "alloy-consensus" -version = "0.5.2" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42642aed67f938363d9c7543e5ca4163cfb4205d9ec15fe933dc4e865d2932dd" +checksum = "41ed961a48297c732a5d97ee321aa8bb5009ecadbcb077d8bec90cb54e651629" dependencies = [ "alloy-eips", "alloy-primitives", @@ -161,13 +161,14 @@ dependencies = [ [[package]] name = "alloy-eip7702" -version = "0.2.0" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eeffd2590ce780ddfaa9d0ae340eb2b4e08627650c4676eef537cef0b4bf535d" +checksum = "64ffc577390ce50234e02d841214b3dc0bea6aaaae8e04bbf3cb82e9a45da9eb" dependencies = [ "alloy-primitives", "alloy-rlp", "arbitrary", + "derive_more 1.0.0", "k256", "rand 0.8.5", "serde", @@ -176,9 +177,9 @@ dependencies = [ [[package]] name = "alloy-eips" -version = "0.5.2" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fbc52a30df46f9831ed74557dfad0d94b12420393662a8b9ef90e2d6c8cb4b0" +checksum = "b69e06cf9c37be824b9d26d6d101114fdde6af0c87de2828b414c05c4b3daa71" dependencies = [ "alloy-eip2930", "alloy-eip7702", @@ -197,9 +198,9 @@ dependencies = [ [[package]] name = "alloy-genesis" -version = "0.5.2" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0787d1688b9806290313cc335d416cc7ee39b11e3245f3d218544c62572d92ba" +checksum = "dde15e14944a88bd6a57d325e9a49b75558746fe16aaccc79713ae50a6a9574c" dependencies = [ "alloy-primitives", "alloy-serde", @@ -220,9 +221,9 @@ dependencies = [ [[package]] name = "alloy-json-rpc" -version = "0.5.2" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d55a16a5f9ca498a217c060414bcd1c43e934235dc8058b31b87dcd69ff4f105" +checksum = "af5979e0d5a7bf9c7eb79749121e8256e59021af611322aee56e77e20776b4b3" dependencies = [ "alloy-primitives", "alloy-sol-types", @@ -234,9 +235,9 @@ dependencies = [ [[package]] name = "alloy-network" -version = "0.5.2" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d236a8c3e1d5adc09b1b63c81815fc9b757d9a4ba9482cc899f9679b55dd437" +checksum = "204237129086ce5dc17a58025e93739b01b45313841f98fa339eb1d780511e57" dependencies = [ "alloy-consensus", "alloy-eips", @@ -255,9 +256,9 @@ dependencies = [ [[package]] name = "alloy-network-primitives" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd15a0990fa8a56d85a42d6a689719aa4eebf5e2f1a5c5354658c0bfc52cac9a" +checksum = "9a968c063fcfcb937736665c865a71fc2242b68916156f5ffa41fee7b44bb695" dependencies = [ "alloy-consensus", "alloy-eips", @@ -268,9 +269,9 @@ dependencies = [ [[package]] name = "alloy-node-bindings" -version = "0.5.2" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2249f3c3ce446cf4063fe3d1aa7530823643c2706a1cc63045e0683ebc497a0a" +checksum = "27444ea67d360508753022807cdd0b49a95c878924c9c5f8f32668b7d7768245" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -317,9 +318,9 @@ dependencies = [ [[package]] name = "alloy-provider" -version = "0.5.2" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "316f522bb6f9ac3805132112197957013b570e20cfdad058e8339dae6030c849" +checksum = "4814d141ede360bb6cd1b4b064f1aab9de391e7c4d0d4d50ac89ea4bc1e25fbd" dependencies = [ "alloy-chains", "alloy-consensus", @@ -343,7 +344,7 @@ dependencies = [ "futures", "futures-utils-wasm", "lru", - "parking_lot 0.12.3", + "parking_lot", "pin-project", "reqwest", "schnellru", @@ -358,9 +359,9 @@ dependencies = [ [[package]] name = "alloy-pubsub" -version = "0.5.2" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "222cd9b17b1c5ad48de51a88ffbdb17f17145170288f22662f80ac88739125e6" +checksum = "96ba46eb69ddf7a9925b81f15229cb74658e6eebe5dd30a5b74e2cd040380573" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -394,14 +395,14 @@ checksum = "4d0f2d905ebd295e7effec65e5f6868d153936130ae718352771de3e7d03c75c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] name = "alloy-rpc-client" -version = "0.5.2" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b2ab59712c594c9624aaa69e38e4d38f180cb569f1fa46cdaf8c21fd50793e5" +checksum = "7fc2bd1e7403463a5f2c61e955bcc9d3072b63aa177442b0f9aa6a6d22a941e3" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -424,9 +425,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types" -version = "0.5.2" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba21284319e12d053baa204d438db6c1577aedd94c1298e4becefdac1f9cec87" +checksum = "eea9bf1abdd506f985a53533f5ac01296bcd6102c5e139bbc5d40bc468d2c916" dependencies = [ "alloy-primitives", "alloy-rpc-types-engine", @@ -437,9 +438,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-admin" -version = "0.5.2" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "416cc9f391d0b876c4c8da85f7131e771a88a55b917cc9a35e1724d9409e3b1c" +checksum = "ea02c25541fb19eaac4278aa5c41d2d7e0245898887e54a74bfc0f3103e99415" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -449,9 +450,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" -version = "0.5.2" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba40bea86c3102b9ed9b3be579e32e0b3e54e766248d873de5fc0437238c8df2" +checksum = "2382fc63fb0cf3e02818d547b80cb66cc49a31f8803d0c328402b2008bc13650" dependencies = [ "alloy-primitives", "alloy-serde", @@ -460,9 +461,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-beacon" -version = "0.5.2" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b535781fe224c101c3d957b514cb9f438d165ff0280e5c0b2f87a0d9a2950593" +checksum = "45357a642081c8ce235c0ad990c4e9279f5f18a723545076b38cfcc05cc25234" dependencies = [ "alloy-eips", "alloy-primitives", @@ -474,9 +475,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-debug" -version = "0.5.2" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4303deacf4cbf12ed4431a5a1bbc3284f0defb4b8b72d9aa2b888656cc5ae657" +checksum = "a5afe3ab1038f90faf56304aa0adf1e6a8c9844615d8f83967f932f3a70390b1" dependencies = [ "alloy-primitives", "serde", @@ -484,9 +485,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-engine" -version = "0.5.2" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44848fced3b42260b9cb61f22102246636dfe5a2d0132f8d10a617df3cb1a74b" +checksum = "886d22d41992287a235af2f3af4299b5ced2bcafb81eb835572ad35747476946" dependencies = [ "alloy-consensus", "alloy-eips", @@ -505,9 +506,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-eth" -version = "0.5.2" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35894711990019fafff0012b82b9176cbb744516eb2a9bbe6b8e5cae522163ee" +checksum = "00b034779a4850b4b03f5be5ea674a1cf7d746b2da762b34d1860ab45e48ca27" dependencies = [ "alloy-consensus", "alloy-eips", @@ -526,9 +527,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-mev" -version = "0.5.2" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cac6250cad380a005ecb5ffc6d2facf03df0e72628d819a63dd8c3ade7a766ff" +checksum = "3246948dfa5f5060a9abe04233d741ea656ef076b12958f3242416ce9f375058" dependencies = [ "alloy-eips", "alloy-primitives", @@ -539,9 +540,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-trace" -version = "0.5.2" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f568c5624881896d8a25e19acbdcbabadd8df339427ea2f10b2ee447d57c4509" +checksum = "4e5fb6c5c401321f802f69dcdb95b932f30f8158f6798793f914baac5995628e" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -553,9 +554,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-txpool" -version = "0.5.2" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4a37d2e1ed9b7daf20ad0b3e0092613cbae46737e0e988b23caa556c7067ce6" +checksum = "9ad066b49c3b1b5f64cdd2399177a19926a6a15db2dbf11e2098de621f9e7480" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -565,9 +566,9 @@ dependencies = [ [[package]] name = "alloy-serde" -version = "0.5.2" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2843c195675f06b29c09a4315cccdc233ab5bdc7c0a3775909f9f0cab5e9ae0f" +checksum = "028e72eaa9703e4882344983cfe7636ce06d8cce104a78ea62fd19b46659efc4" dependencies = [ "alloy-primitives", "arbitrary", @@ -577,9 +578,9 @@ dependencies = [ [[package]] name = "alloy-signer" -version = "0.5.2" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88b2a00d9803dfef99963303ffe41a7bf2221f3342f0a503d6741a9f4a18e5e5" +checksum = "592c185d7100258c041afac51877660c7bf6213447999787197db4842f0e938e" dependencies = [ "alloy-primitives", "async-trait", @@ -591,9 +592,9 @@ dependencies = [ [[package]] name = "alloy-signer-local" -version = "0.5.2" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a2505d4f8c98dcae86152d58d549cb4bcf953f8352fca903410e0a0ef535571" +checksum = "6614f02fc1d5b079b2a4a5320018317b506fd0a6d67c1fd5542a71201724986c" dependencies = [ "alloy-consensus", "alloy-network", @@ -609,23 +610,23 @@ dependencies = [ [[package]] name = "alloy-sol-macro" -version = "0.8.8" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b2395336745358cc47207442127c47c63801a7065ecc0aa928da844f8bb5576" +checksum = "b0900b83f4ee1f45c640ceee596afbc118051921b9438fdb5a3175c1a7e05f8b" dependencies = [ "alloy-sol-macro-expander", "alloy-sol-macro-input", "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] name = "alloy-sol-macro-expander" -version = "0.8.8" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ed5047c9a241df94327879c2b0729155b58b941eae7805a7ada2e19436e6b39" +checksum = "a41b1e78dde06b5e12e6702fa8c1d30621bf07728ba75b801fb801c9c6a0ba10" dependencies = [ "alloy-sol-macro-input", "const-hex", @@ -634,23 +635,23 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", "syn-solidity", "tiny-keccak", ] [[package]] name = "alloy-sol-macro-input" -version = "0.8.8" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dee02a81f529c415082235129f0df8b8e60aa1601b9c9298ffe54d75f57210b" +checksum = "91dc311a561a306664393407b88d3e53ae58581624128afd8a15faa5de3627dc" dependencies = [ "const-hex", "dunce", "heck", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", "syn-solidity", ] @@ -679,9 +680,9 @@ dependencies = [ [[package]] name = "alloy-transport" -version = "0.5.2" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9dc2c8f6b8c227ef0398f702d954c4ab572c2ead3c1ed4a5157aa1cbaf959747" +checksum = "be77579633ebbc1266ae6fd7694f75c408beb1aeb6865d0b18f22893c265a061" dependencies = [ "alloy-json-rpc", "base64 0.22.1", @@ -699,9 +700,9 @@ dependencies = [ [[package]] name = "alloy-transport-http" -version = "0.5.2" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd328e990d57f4c4e63899fb2c26877597d6503f8e0022a3d71b2d753ecbfc0c" +checksum = "91fd1a5d0827939847983b46f2f79510361f901dc82f8e3c38ac7397af142c6e" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -714,9 +715,9 @@ dependencies = [ [[package]] name = "alloy-transport-ipc" -version = "0.5.2" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89aea26aaf1d67904a7ff95ec4a24ddd5e7d419a6945f641b885962d7c2803e2" +checksum = "8073d1186bfeeb8fbdd1292b6f1a0731f3aed8e21e1463905abfae0b96a887a6" dependencies = [ "alloy-json-rpc", "alloy-pubsub", @@ -733,9 +734,9 @@ dependencies = [ [[package]] name = "alloy-transport-ws" -version = "0.5.2" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e222e950ecc4ea12fbfb524b9a2275cac2cd5f57c8ce25bcaf1bd3ff80dd8fc8" +checksum = "61f27837bb4a1d6c83a28231c94493e814882f0e9058648a97e908a5f3fc9fcf" dependencies = [ "alloy-pubsub", "alloy-transport", @@ -841,22 +842,22 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.89" +version = "1.0.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86fdf8605db99b54d3cd748a44c6d04df638eb5dafb219b135d0149bd0db01f6" +checksum = "37bf3594c4c988a53154954629820791dde498571819ae4ca50ca811e060cc95" [[package]] name = "aquamarine" -version = "0.5.0" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21cc1548309245035eb18aa7f0967da6bc65587005170c56e6ef2788a4cf3f4e" +checksum = "0f50776554130342de4836ba542aa85a4ddb361690d7e8df13774d7284c3d5c2" dependencies = [ "include_dir", "itertools 0.10.5", - "proc-macro-error", + "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -1032,9 +1033,9 @@ dependencies = [ [[package]] name = "async-compression" -version = "0.4.15" +version = "0.4.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e26a9844c659a2a293d239c7910b752f8487fe122c6c8bd1659bf85a6507c302" +checksum = "103db485efc3e41214fe4fda9f3dbeae2eb9082f48fd236e6095627a9422066e" dependencies = [ "brotli", "flate2", @@ -1079,7 +1080,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -1090,7 +1091,7 @@ checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -1128,7 +1129,7 @@ checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -1234,7 +1235,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -1416,7 +1417,7 @@ checksum = "240f4126219a83519bad05c9a40bfc0303921eeb571fc2d7e44c17ffac99d3f1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", "synstructure", ] @@ -1538,7 +1539,7 @@ checksum = "bcfcc3cd946cb52f0bbfdbbcfa2f4e24f75ebb6c0e1002f7c25904fada18b9ec" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -1626,9 +1627,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.1.30" +version = "1.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b16803a61b81d9eabb7eae2588776c4c1e584b738ede45fdbb4c972cec1e9945" +checksum = "c2e7962b54006dcfcc61cb72735f4d89bb97061dd6a7ed882ec6b8ee53714c6f" dependencies = [ "jobserver", "libc", @@ -1750,7 +1751,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -2038,9 +2039,9 @@ dependencies = [ [[package]] name = "critical-section" -version = "1.1.3" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f64009896348fc5af4222e9cf7d7d82a95a256c634ebcf61c53e4ea461422242" +checksum = "790eea4361631c5e7d22598ecd5723ff611904e3344ce8720784c93e3d83d40b" [[package]] name = "crossbeam-channel" @@ -2085,7 +2086,7 @@ dependencies = [ "bitflags 2.6.0", "crossterm_winapi", "libc", - "parking_lot 0.12.3", + "parking_lot", "winapi", ] @@ -2098,7 +2099,7 @@ dependencies = [ "bitflags 2.6.0", "crossterm_winapi", "mio 1.0.2", - "parking_lot 0.12.3", + "parking_lot", "rustix", "signal-hook", "signal-hook-mio", @@ -2207,7 +2208,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -2231,7 +2232,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -2242,7 +2243,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -2255,7 +2256,7 @@ dependencies = [ "hashbrown 0.14.5", "lock_api", "once_cell", - "parking_lot_core 0.9.10", + "parking_lot_core", ] [[package]] @@ -2269,7 +2270,7 @@ dependencies = [ "hashbrown 0.14.5", "lock_api", "once_cell", - "parking_lot_core 0.9.10", + "parking_lot_core", "serde", ] @@ -2316,11 +2317,12 @@ dependencies = [ [[package]] name = "delay_map" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4355c25cbf99edcb6b4a0e906f6bdc6956eda149e84455bea49696429b2f8e8" +checksum = "df941644b671f05f59433e481ba0d31ac10e3667de725236a4c0d587c496fba1" dependencies = [ "futures", + "tokio", "tokio-util", ] @@ -2363,7 +2365,7 @@ checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -2374,7 +2376,7 @@ checksum = "5f33878137e4dafd7fa914ad4e259e18a4e8e532b9617a2d0150262bf53abfce" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -2395,7 +2397,7 @@ dependencies = [ "convert_case", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", "unicode-xid", ] @@ -2470,9 +2472,9 @@ dependencies = [ [[package]] name = "discv5" -version = "0.7.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f569b8c367554666c8652305621e8bae3634a2ff5c6378081d5bd8c399c99f23" +checksum = "23e6b70634e26c909d1edbb3142b3eaf3b89da0e52f284f00ca7c80d9901ad9e" dependencies = [ "aes", "aes-gcm", @@ -2491,13 +2493,13 @@ dependencies = [ "lru", "more-asserts", "multiaddr", - "parking_lot 0.11.2", + "parking_lot", "rand 0.8.5", "smallvec", - "socket2 0.4.10", + "socket2", "tokio", "tracing", - "uint", + "uint 0.10.0", "zeroize", ] @@ -2509,7 +2511,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -2657,7 +2659,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -2668,7 +2670,7 @@ checksum = "2f9ed6b3789237c8a0c1c505af1c7eb2c560df6186f01b098c3a1064ea532f38" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -2725,7 +2727,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -2783,6 +2785,23 @@ dependencies = [ "tokio-stream", ] +[[package]] +name = "example-custom-beacon-withdrawals" +version = "0.0.0" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-sol-macro", + "alloy-sol-types", + "eyre", + "reth", + "reth-chainspec", + "reth-evm", + "reth-evm-ethereum", + "reth-node-ethereum", + "reth-primitives", +] + [[package]] name = "example-custom-dev-node" version = "0.0.0" @@ -3012,7 +3031,7 @@ dependencies = [ "alloy-genesis", "alloy-primitives", "eyre", - "parking_lot 0.12.3", + "parking_lot", "reth", "reth-chainspec", "reth-node-api", @@ -3263,7 +3282,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -3511,9 +3530,9 @@ dependencies = [ [[package]] name = "hashlink" -version = "0.8.4" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7" +checksum = "6ba4ff7128dee98c7dc9794b6a411377e1404dba1c97deb8d1a55297bd25d8af" dependencies = [ "hashbrown 0.14.5", ] @@ -3707,9 +3726,9 @@ dependencies = [ [[package]] name = "hyper" -version = "1.4.1" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50dfd22e0e76d0f662d429a5f80fcaf3855009297eab6a0a9f8543834744ba05" +checksum = "bbbff0a806a4728c99295b254c8838933b5b082d75e3cb70c8dab21fdfbcfa9a" dependencies = [ "bytes", "futures-channel", @@ -3759,7 +3778,7 @@ dependencies = [ "http-body", "hyper", "pin-project-lite", - "socket2 0.5.7", + "socket2", "tokio", "tower-service", "tracing", @@ -3789,7 +3808,7 @@ dependencies = [ "quote", "serde", "serde_json", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -3939,7 +3958,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -4107,7 +4126,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b23a0c8dfe501baac4adf6ebbfa6eddf8f0c07f56b058cc1288017e32397846c" dependencies = [ "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -4149,7 +4168,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ - "socket2 0.5.7", + "socket2", "widestring", "windows-sys 0.48.0", "winreg", @@ -4252,9 +4271,9 @@ dependencies = [ [[package]] name = "jsonrpsee" -version = "0.24.6" +version = "0.24.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02f01f48e04e0d7da72280ab787c9943695699c9b32b99158ece105e8ad0afea" +checksum = "c5c71d8c1a731cc4227c2f698d377e7848ca12c8a48866fc5e6951c43a4db843" dependencies = [ "jsonrpsee-client-transport", "jsonrpsee-core", @@ -4270,9 +4289,9 @@ dependencies = [ [[package]] name = "jsonrpsee-client-transport" -version = "0.24.6" +version = "0.24.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d80eccbd47a7b9f1e67663fd846928e941cb49c65236e297dd11c9ea3c5e3387" +checksum = "548125b159ba1314104f5bb5f38519e03a41862786aa3925cf349aae9cdd546e" dependencies = [ "base64 0.22.1", "futures-channel", @@ -4295,9 +4314,9 @@ dependencies = [ [[package]] name = "jsonrpsee-core" -version = "0.24.6" +version = "0.24.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c2709a32915d816a6e8f625bf72cf74523ebe5d8829f895d6b041b1d3137818" +checksum = "f2882f6f8acb9fdaec7cefc4fd607119a9bd709831df7d7672a1d3b644628280" dependencies = [ "async-trait", "bytes", @@ -4307,7 +4326,7 @@ dependencies = [ "http-body", "http-body-util", "jsonrpsee-types", - "parking_lot 0.12.3", + "parking_lot", "pin-project", "rand 0.8.5", "rustc-hash 2.0.0", @@ -4322,9 +4341,9 @@ dependencies = [ [[package]] name = "jsonrpsee-http-client" -version = "0.24.6" +version = "0.24.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc54db939002b030e794fbfc9d5a925aa2854889c5a2f0352b0bffa54681707e" +checksum = "b3638bc4617f96675973253b3a45006933bde93c2fd8a6170b33c777cc389e5b" dependencies = [ "async-trait", "base64 0.22.1", @@ -4347,22 +4366,22 @@ dependencies = [ [[package]] name = "jsonrpsee-proc-macros" -version = "0.24.6" +version = "0.24.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a9a4b2eaba8cc928f49c4ccf4fcfa65b690a73997682da99ed08f3393b51f07" +checksum = "c06c01ae0007548e73412c08e2285ffe5d723195bf268bce67b1b77c3bb2a14d" dependencies = [ "heck", "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] name = "jsonrpsee-server" -version = "0.24.6" +version = "0.24.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e30110d0f2d7866c8cc6c86483bdab2eb9f4d2f0e20db55518b2bca84651ba8e" +checksum = "82ad8ddc14be1d4290cd68046e7d1d37acd408efed6d3ca08aefcc3ad6da069c" dependencies = [ "futures-util", "http", @@ -4387,9 +4406,9 @@ dependencies = [ [[package]] name = "jsonrpsee-types" -version = "0.24.6" +version = "0.24.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ca331cd7b3fe95b33432825c2d4c9f5a43963e207fdc01ae67f9fd80ab0930f" +checksum = "a178c60086f24cc35bb82f57c651d0d25d99c4742b4d335de04e97fa1f08a8a1" dependencies = [ "http", "serde", @@ -4399,9 +4418,9 @@ dependencies = [ [[package]] name = "jsonrpsee-wasm-client" -version = "0.24.6" +version = "0.24.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c603d97578071dc44d79d3cfaf0775437638fd5adc33c6b622dfe4fa2ec812d" +checksum = "1a01cd500915d24ab28ca17527e23901ef1be6d659a2322451e1045532516c25" dependencies = [ "jsonrpsee-client-transport", "jsonrpsee-core", @@ -4410,9 +4429,9 @@ dependencies = [ [[package]] name = "jsonrpsee-ws-client" -version = "0.24.6" +version = "0.24.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "755ca3da1c67671f1fae01cd1a47f41dfb2233a8f19a643e587ab0a663942044" +checksum = "0fe322e0896d0955a3ebdd5bf813571c53fea29edd713bc315b76620b327e86d" dependencies = [ "http", "jsonrpsee-client-transport", @@ -4515,9 +4534,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.159" +version = "0.2.161" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "561d97a539a36e26a9a5fad1ea11a3039a67714694aaa379433e580854bc3dc5" +checksum = "8e9489c2807c139ffd9c1794f4af0ebe86a828db53ecdc7fea2111d0fed085d1" [[package]] name = "libloading" @@ -4573,7 +4592,7 @@ checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" dependencies = [ "bitflags 2.6.0", "libc", - "redox_syscall 0.5.7", + "redox_syscall", ] [[package]] @@ -4773,7 +4792,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -4920,7 +4939,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -5168,7 +5187,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -5416,17 +5435,6 @@ version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" -[[package]] -name = "parking_lot" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" -dependencies = [ - "instant", - "lock_api", - "parking_lot_core 0.8.6", -] - [[package]] name = "parking_lot" version = "0.12.3" @@ -5434,21 +5442,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" dependencies = [ "lock_api", - "parking_lot_core 0.9.10", -] - -[[package]] -name = "parking_lot_core" -version = "0.8.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc" -dependencies = [ - "cfg-if", - "instant", - "libc", - "redox_syscall 0.2.16", - "smallvec", - "winapi", + "parking_lot_core", ] [[package]] @@ -5459,7 +5453,7 @@ checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.5.7", + "redox_syscall", "smallvec", "windows-targets 0.52.6", ] @@ -5547,7 +5541,7 @@ dependencies = [ "phf_shared", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -5576,7 +5570,7 @@ checksum = "a4502d8515ca9f32f1fb543d987f63d95a14934883db45bdb48060b6b69257f8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -5689,7 +5683,7 @@ dependencies = [ "log", "nix", "once_cell", - "parking_lot 0.12.3", + "parking_lot", "smallvec", "symbolic-demangle", "tempfile", @@ -5743,12 +5737,12 @@ dependencies = [ [[package]] name = "prettyplease" -version = "0.2.22" +version = "0.2.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "479cf940fbbb3426c32c5d5176f62ad57549a0bb84773423ba8be9d089f5faba" +checksum = "904afd36257cdb6ce0bee88b7981847bd7b955e5e216bb32f466b302923ad446" dependencies = [ "proc-macro2", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -5768,7 +5762,7 @@ checksum = "0b34d9fd68ae0b74a41b21c03c2f62847aa0ffea044eee893b4c140b37e244e2" dependencies = [ "fixed-hash", "impl-codec", - "uint", + "uint 0.9.5", ] [[package]] @@ -5780,30 +5774,6 @@ dependencies = [ "toml_edit", ] -[[package]] -name = "proc-macro-error" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" -dependencies = [ - "proc-macro-error-attr", - "proc-macro2", - "quote", - "syn 1.0.109", - "version_check", -] - -[[package]] -name = "proc-macro-error-attr" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" -dependencies = [ - "proc-macro2", - "quote", - "version_check", -] - [[package]] name = "proc-macro-error-attr2" version = "2.0.0" @@ -5823,14 +5793,14 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] name = "proc-macro2" -version = "1.0.87" +version = "1.0.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3e4daa0dcf6feba26f985457cdf104d4b4256fc5a09547140f3631bb076b19a" +checksum = "7c3a7fc5db1e57d5a779a352c8cdb57b29aa4c40cc69c3a68a7fedc815fbf2f9" dependencies = [ "unicode-ident", ] @@ -5921,7 +5891,7 @@ checksum = "6ff7ff745a347b87471d859a377a9a404361e7efc2a971d73424a6d183c0fc77" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -5975,7 +5945,7 @@ dependencies = [ "quinn-udp", "rustc-hash 2.0.0", "rustls", - "socket2 0.5.7", + "socket2", "thiserror", "tokio", "tracing", @@ -6006,7 +5976,7 @@ checksum = "4fe68c2e9e1a1234e218683dbdf9f9dfcb094113c5ac2b938dfcb9bab4c4140b" dependencies = [ "libc", "once_cell", - "socket2 0.5.7", + "socket2", "tracing", "windows-sys 0.59.0", ] @@ -6163,15 +6133,6 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3edd4d5d42c92f0a659926464d4cce56b562761267ecf0f469d85b7de384175" -[[package]] -name = "redox_syscall" -version = "0.2.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" -dependencies = [ - "bitflags 1.3.2", -] - [[package]] name = "redox_syscall" version = "0.5.7" @@ -6409,6 +6370,7 @@ name = "reth-basic-payload-builder" version = "1.1.0" dependencies = [ "alloy-consensus", + "alloy-eips", "alloy-primitives", "alloy-rlp", "futures-core", @@ -6527,7 +6489,7 @@ dependencies = [ "assert_matches", "linked_hash_set", "metrics", - "parking_lot 0.12.3", + "parking_lot", "reth-blockchain-tree-api", "reth-chainspec", "reth-consensus", @@ -6577,7 +6539,7 @@ dependencies = [ "auto_impl", "derive_more 1.0.0", "metrics", - "parking_lot 0.12.3", + "parking_lot", "pin-project", "rand 0.8.5", "reth-chainspec", @@ -6744,7 +6706,7 @@ dependencies = [ "proc-macro2", "quote", "similar-asserts", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -6826,7 +6788,7 @@ dependencies = [ "iai-callgrind", "metrics", "page_size", - "parking_lot 0.12.3", + "parking_lot", "paste", "pprof", "proptest", @@ -6934,7 +6896,7 @@ dependencies = [ "discv5", "enr", "generic-array", - "parking_lot 0.12.3", + "parking_lot", "rand 0.8.5", "reth-ethereum-forks", "reth-net-banlist", @@ -6984,7 +6946,7 @@ dependencies = [ "data-encoding", "enr", "linked_hash_set", - "parking_lot 0.12.3", + "parking_lot", "rand 0.8.5", "reth-chainspec", "reth-ethereum-forks", @@ -7424,12 +7386,13 @@ dependencies = [ name = "reth-evm" version = "1.1.0" dependencies = [ + "alloy-consensus", "alloy-eips", "alloy-primitives", "auto_impl", "futures-util", "metrics", - "parking_lot 0.12.3", + "parking_lot", "reth-chainspec", "reth-consensus", "reth-consensus-common", @@ -7513,7 +7476,7 @@ dependencies = [ "futures", "itertools 0.13.0", "metrics", - "parking_lot 0.12.3", + "parking_lot", "rand 0.8.5", "reth-blockchain-tree", "reth-chain-state", @@ -7657,7 +7620,7 @@ dependencies = [ "dashmap 6.1.0", "derive_more 1.0.0", "indexmap 2.6.0", - "parking_lot 0.12.3", + "parking_lot", "pprof", "rand 0.8.5", "rand_xorshift", @@ -7727,7 +7690,7 @@ dependencies = [ "futures", "itertools 0.13.0", "metrics", - "parking_lot 0.12.3", + "parking_lot", "pin-project", "pprof", "rand 0.8.5", @@ -7799,7 +7762,7 @@ dependencies = [ "auto_impl", "derive_more 1.0.0", "futures", - "parking_lot 0.12.3", + "parking_lot", "reth-consensus", "reth-eth-wire-types", "reth-network-peers", @@ -7949,6 +7912,7 @@ name = "reth-node-core" version = "1.1.0" dependencies = [ "alloy-consensus", + "alloy-eips", "alloy-primitives", "alloy-rpc-types-engine", "clap", @@ -8036,6 +8000,7 @@ dependencies = [ name = "reth-node-events" version = "1.1.0" dependencies = [ + "alloy-consensus", "alloy-primitives", "alloy-rpc-types-engine", "futures", @@ -8071,7 +8036,7 @@ dependencies = [ "reth-metrics", "reth-provider", "reth-tasks", - "socket2 0.5.7", + "socket2", "tikv-jemalloc-ctl", "tokio", "tower 0.4.13", @@ -8096,6 +8061,7 @@ version = "1.1.0" dependencies = [ "alloy-chains", "alloy-consensus", + "alloy-eips", "alloy-genesis", "alloy-primitives", "derive_more 1.0.0", @@ -8219,7 +8185,7 @@ dependencies = [ "eyre", "op-alloy-consensus", "op-alloy-rpc-types-engine", - "parking_lot 0.12.3", + "parking_lot", "reth", "reth-auto-seal-consensus", "reth-basic-payload-builder", @@ -8308,7 +8274,7 @@ dependencies = [ "op-alloy-consensus", "op-alloy-network", "op-alloy-rpc-types", - "parking_lot 0.12.3", + "parking_lot", "reqwest", "reth-chainspec", "reth-evm", @@ -8391,7 +8357,6 @@ dependencies = [ name = "reth-payload-validator" version = "1.1.0" dependencies = [ - "alloy-eips", "alloy-rpc-types", "reth-chainspec", "reth-primitives", @@ -8429,7 +8394,6 @@ dependencies = [ "reth-chainspec", "reth-codecs", "reth-ethereum-forks", - "reth-optimism-chainspec", "reth-primitives-traits", "reth-static-file-types", "reth-testing-utils", @@ -8486,7 +8450,7 @@ dependencies = [ "itertools 0.13.0", "metrics", "notify", - "parking_lot 0.12.3", + "parking_lot", "rand 0.8.5", "rayon", "reth-blockchain-tree-api", @@ -8573,6 +8537,7 @@ dependencies = [ name = "reth-revm" version = "1.1.0" dependencies = [ + "alloy-consensus", "alloy-eips", "alloy-primitives", "reth-ethereum-forks", @@ -8615,7 +8580,7 @@ dependencies = [ "jsonrpsee", "jsonrpsee-types", "jsonwebtoken", - "parking_lot 0.12.3", + "parking_lot", "pin-project", "rand 0.8.5", "reth-chainspec", @@ -8802,7 +8767,7 @@ dependencies = [ "futures", "jsonrpsee", "jsonrpsee-types", - "parking_lot 0.12.3", + "parking_lot", "reth-chainspec", "reth-errors", "reth-evm", @@ -9015,7 +8980,7 @@ version = "1.1.0" dependencies = [ "alloy-primitives", "assert_matches", - "parking_lot 0.12.3", + "parking_lot", "rayon", "reth-db", "reth-db-api", @@ -9141,7 +9106,7 @@ dependencies = [ "criterion", "futures-util", "metrics", - "parking_lot 0.12.3", + "parking_lot", "paste", "pprof", "proptest", @@ -9301,9 +9266,9 @@ dependencies = [ [[package]] name = "revm" -version = "16.0.0" +version = "17.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34e44692d5736cc44c697a372e507890f8797f06d1541c5f4b9bec594d90fd8a" +checksum = "2eab16cb0a8cd5ac88b11230b20df588b7e8aae7dfab4b3f830e98aebeb4b365" dependencies = [ "auto_impl", "cfg-if", @@ -9316,9 +9281,9 @@ dependencies = [ [[package]] name = "revm-inspectors" -version = "0.9.0" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a64e2246ad480167548724eb9c9c66945241b867c7d50894de3ca860c9823a45" +checksum = "1e29c662f7887f3b659d4b0fd234673419a8fcbeaa1ecc29bf7034c0a75cc8ea" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -9335,9 +9300,9 @@ dependencies = [ [[package]] name = "revm-interpreter" -version = "12.0.0" +version = "13.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f89940d17d5d077570de1977f52f69049595322e237cb6c754c3d47f668f023" +checksum = "fac2034454f8bc69dc7d3c94cdb1b57559e27f5ef0518771f1787de543d7d6a1" dependencies = [ "revm-primitives", "serde", @@ -9345,9 +9310,9 @@ dependencies = [ [[package]] name = "revm-precompile" -version = "13.0.0" +version = "14.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8f816aaea3245cbdbe7fdd84955df33597f9322c7912c3e3ba7bc855e03211f" +checksum = "7a88c8c7c5f9b988a9e65fc0990c6ce859cdb74114db705bd118a96d22d08027" dependencies = [ "aurora-engine-modexp", "blst", @@ -9365,9 +9330,9 @@ dependencies = [ [[package]] name = "revm-primitives" -version = "12.0.0" +version = "13.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "532411bbde45a46707c1d434dcdc29866cf261c1b748fb01b303ce3b4310b361" +checksum = "0d11fa1e195b0bebaf3fb18596f314a13ba3a4cb1fdd16d3465934d812fd921e" dependencies = [ "alloy-eip2930", "alloy-eip7702", @@ -9589,9 +9554,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.14" +version = "0.23.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "415d9944693cb90382053259f89fbb077ea730ad7273047ec63b19bc9b160ba8" +checksum = "5fbb44d7acc4e873d613422379f69f237a1b141928c02f6bc6ccfddddc2d7993" dependencies = [ "log", "once_cell", @@ -9881,14 +9846,14 @@ checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] name = "serde_json" -version = "1.0.128" +version = "1.0.132" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8" +checksum = "d726bfaff4b320266d395898905d0eba0345aae23b54aee3a737e260fd46db03" dependencies = [ "indexmap 2.6.0", "itoa", @@ -9916,7 +9881,7 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -9967,7 +9932,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -9977,7 +9942,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4b4b487fe2acf240a021cf57c6b2b4903b1e78ca0ecd862a71b71d2a51fed77d" dependencies = [ "once_cell", - "parking_lot 0.12.3", + "parking_lot", "scc", "serial_test_derive", ] @@ -9990,7 +9955,7 @@ checksum = "82fe9db325bcef1fbcde82e078a5cc4efdf787e96b3b9cf45b50b529f2083d67" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -10182,16 +10147,6 @@ version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b6b67fb9a61334225b5b790716f609cd58395f895b3fe8b328786812a40bc3b" -[[package]] -name = "socket2" -version = "0.4.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f7916fc008ca5542385b89a3d3ce689953c143e9304a9bf8beec1de48994c0d" -dependencies = [ - "libc", - "winapi", -] - [[package]] name = "socket2" version = "0.5.7" @@ -10286,7 +10241,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -10344,9 +10299,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.79" +version = "2.0.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89132cd0bf050864e1d38dc3bbc07a0eb8e7530af26344d3d2bbbef83499f590" +checksum = "e6e185e337f816bc8da115b8afcb3324006ccc82eeaddf35113888d3bd8e44ac" dependencies = [ "proc-macro2", "quote", @@ -10355,14 +10310,14 @@ dependencies = [ [[package]] name = "syn-solidity" -version = "0.8.8" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebfc1bfd06acc78f16d8fd3ef846bc222ee7002468d10a7dce8d703d6eab89a3" +checksum = "9d5e0c2ea8db64b2898b62ea2fbd60204ca95e0b2c6bdf53ff768bbe916fbe4d" dependencies = [ "paste", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -10388,7 +10343,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -10465,7 +10420,7 @@ dependencies = [ "prettyplease", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -10504,7 +10459,7 @@ checksum = "08904e7672f5eb876eaaf87e0ce17857500934f4981c4a0ab2b4aa98baac7fc3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -10665,10 +10620,10 @@ dependencies = [ "bytes", "libc", "mio 1.0.2", - "parking_lot 0.12.3", + "parking_lot", "pin-project-lite", "signal-hook-registry", - "socket2 0.5.7", + "socket2", "tokio-macros", "windows-sys 0.52.0", ] @@ -10681,7 +10636,7 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -10882,7 +10837,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -11037,7 +10992,7 @@ dependencies = [ "ipconfig", "lru-cache", "once_cell", - "parking_lot 0.12.3", + "parking_lot", "rand 0.8.5", "resolv-conf", "serde", @@ -11098,6 +11053,18 @@ dependencies = [ "static_assertions", ] +[[package]] +name = "uint" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "909988d098b2f738727b161a106cfc7cab00c539c2687a8836f8e565976fb53e" +dependencies = [ + "byteorder", + "crunchy", + "hex", + "static_assertions", +] + [[package]] name = "unarray" version = "0.1.4" @@ -11106,12 +11073,9 @@ checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" [[package]] name = "unicase" -version = "2.7.0" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7d2d4dafb69621809a81864c9c1b864479e1235c0dd4e199924b9742439ed89" -dependencies = [ - "version_check", -] +checksum = "7e51b68083f157f853b6379db119d1c1be0e6e4dec98101079dec41f6f5cf6df" [[package]] name = "unicode-bidi" @@ -11229,9 +11193,9 @@ checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" -version = "1.10.0" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81dfa00651efa65069b0b6b651f4aaa31ba9e3c3ce0137aaad053604ee7e0314" +checksum = "f8c5f0a0af699448548ad1a2fbf920fb4bee257eae39953ba95cb84891a0446a" dependencies = [ "getrandom 0.2.15", ] @@ -11330,7 +11294,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", "wasm-bindgen-shared", ] @@ -11364,7 +11328,7 @@ checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -11396,7 +11360,7 @@ checksum = "c7ed9d8b15c7fb594d72bfb4b5a276f3d2029333cd93a932f376f5937f6f80ee" dependencies = [ "futures", "js-sys", - "parking_lot 0.12.3", + "parking_lot", "pin-utils", "slab", "wasm-bindgen", @@ -11520,7 +11484,7 @@ checksum = "9107ddc059d5b6fbfbffdfa7a7fe3e22a226def0b2608f72e9d552763d3e1ad7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -11531,7 +11495,7 @@ checksum = "2bbd5b46c938e506ecbce286b6628a02171d56153ba733b6c741fc627ec9579b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -11542,7 +11506,7 @@ checksum = "29bee4b38ea3cde66011baa44dba677c432a78593e202392d1e9070cf2a7fca7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -11553,7 +11517,7 @@ checksum = "053c4c462dc91d3b1504c6fe5a726dd15e216ba718e84a0e46a88fbe5ded3515" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -11828,7 +11792,7 @@ checksum = "28cc31741b18cb6f1d5ff12f5b7523e3d6eb0852bbbad19d73905511d9849b95" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", "synstructure", ] @@ -11850,7 +11814,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -11870,7 +11834,7 @@ checksum = "0ea7b4a3637ea8669cedf0f1fd5c286a17f3de97b8dd5a70a6c167a1730e63a5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", "synstructure", ] @@ -11891,7 +11855,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -11913,7 +11877,7 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 5b6912c33f27..22a78979dfcb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -145,6 +145,7 @@ members = [ "examples/rpc-db/", "examples/stateful-precompile/", "examples/txpool-tracing/", + "examples/custom-beacon-withdrawals", "testing/ef-tests/", "testing/testing-utils", ] @@ -287,6 +288,13 @@ codegen-units = 1 inherits = "release" lto = "fat" +[profile.reproducible] +inherits = "release" +debug = false +panic = "abort" +codegen-units = 1 +overflow-checks = true + [workspace.dependencies] # reth op-reth = { path = "crates/optimism/bin" } @@ -410,9 +418,9 @@ reth-trie-db = { path = "crates/trie/db" } reth-trie-parallel = { path = "crates/trie/parallel" } # revm -revm = { version = "16.0.0", features = ["std"], default-features = false } -revm-inspectors = "0.9.0" -revm-primitives = { version = "12.0.0", features = [ +revm = { version = "17.0.0", features = ["std"], default-features = false } +revm-inspectors = "0.10.0" +revm-primitives = { version = "13.0.0", features = [ "std", ], default-features = false } @@ -424,39 +432,39 @@ alloy-rlp = "0.3.4" alloy-sol-types = "0.8.0" alloy-trie = { version = "0.7", default-features = false } -alloy-consensus = { version = "0.5.2", default-features = false } -alloy-eips = { version = "0.5.2", default-features = false } -alloy-genesis = { version = "0.5.2", default-features = false } -alloy-json-rpc = { version = "0.5.2", default-features = false } -alloy-network = { version = "0.5.2", default-features = false } -alloy-network-primitives = { version = "0.5.2", default-features = false } -alloy-node-bindings = { version = "0.5.2", default-features = false } -alloy-provider = { version = "0.5.2", features = [ +alloy-consensus = { version = "0.5.4", default-features = false } +alloy-eips = { version = "0.5.4", default-features = false } +alloy-genesis = { version = "0.5.4", default-features = false } +alloy-json-rpc = { version = "0.5.4", default-features = false } +alloy-network = { version = "0.5.4", default-features = false } +alloy-network-primitives = { version = "0.5.4", default-features = false } +alloy-node-bindings = { version = "0.5.4", default-features = false } +alloy-provider = { version = "0.5.4", features = [ "reqwest", ], default-features = false } -alloy-pubsub = { version = "0.5.2", default-features = false } -alloy-rpc-client = { version = "0.5.2", default-features = false } -alloy-rpc-types = { version = "0.5.2", features = [ +alloy-pubsub = { version = "0.5.4", default-features = false } +alloy-rpc-client = { version = "0.5.4", default-features = false } +alloy-rpc-types = { version = "0.5.4", features = [ "eth", ], default-features = false } -alloy-rpc-types-admin = { version = "0.5.2", default-features = false } -alloy-rpc-types-anvil = { version = "0.5.2", default-features = false } -alloy-rpc-types-beacon = { version = "0.5.2", default-features = false } -alloy-rpc-types-debug = { version = "0.5.2", default-features = false } -alloy-rpc-types-engine = { version = "0.5.2", default-features = false } -alloy-rpc-types-eth = { version = "0.5.2", default-features = false } -alloy-rpc-types-mev = { version = "0.5.2", default-features = false } -alloy-rpc-types-trace = { version = "0.5.2", default-features = false } -alloy-rpc-types-txpool = { version = "0.5.2", default-features = false } -alloy-serde = { version = "0.5.2", default-features = false } -alloy-signer = { version = "0.5.2", default-features = false } -alloy-signer-local = { version = "0.5.2", default-features = false } -alloy-transport = { version = "0.5.2" } -alloy-transport-http = { version = "0.5.2", features = [ +alloy-rpc-types-admin = { version = "0.5.4", default-features = false } +alloy-rpc-types-anvil = { version = "0.5.4", default-features = false } +alloy-rpc-types-beacon = { version = "0.5.4", default-features = false } +alloy-rpc-types-debug = { version = "0.5.4", default-features = false } +alloy-rpc-types-engine = { version = "0.5.4", default-features = false } +alloy-rpc-types-eth = { version = "0.5.4", default-features = false } +alloy-rpc-types-mev = { version = "0.5.4", default-features = false } +alloy-rpc-types-trace = { version = "0.5.4", default-features = false } +alloy-rpc-types-txpool = { version = "0.5.4", default-features = false } +alloy-serde = { version = "0.5.4", default-features = false } +alloy-signer = { version = "0.5.4", default-features = false } +alloy-signer-local = { version = "0.5.4", default-features = false } +alloy-transport = { version = "0.5.4" } +alloy-transport-http = { version = "0.5.4", features = [ "reqwest-rustls-tls", ], default-features = false } -alloy-transport-ipc = { version = "0.5.2", default-features = false } -alloy-transport-ws = { version = "0.5.2", default-features = false } +alloy-transport-ipc = { version = "0.5.4", default-features = false } +alloy-transport-ws = { version = "0.5.4", default-features = false } # op op-alloy-rpc-types = "0.5" @@ -465,7 +473,7 @@ op-alloy-network = "0.5" op-alloy-consensus = "0.5" # misc -aquamarine = "0.5" +aquamarine = "0.6" auto_impl = "1" backon = { version = "1.2", default-features = false, features = [ "std-blocking-sleep", @@ -547,7 +555,7 @@ tower = "0.4" tower-http = "0.5" # p2p -discv5 = "0.7.0" +discv5 = "0.8.0" if-addrs = "0.13" # rpc diff --git a/Dockerfile.reproducible b/Dockerfile.reproducible new file mode 100644 index 000000000000..12c12dd7c7d4 --- /dev/null +++ b/Dockerfile.reproducible @@ -0,0 +1,37 @@ +# Use the Rust 1.82 image based on Debian Bullseye +FROM rust:1.82-bullseye@sha256:c42c8ca762560c182ba30edda0e0d71a8604040af2672370559d7e854653c66d AS builder + +# Install specific version of libclang-dev +RUN apt-get update && apt-get install -y libclang-dev=1:11.0-51+nmu5 + +# Clone the repository at the specific branch +RUN git clone https://github.com/paradigmxyz/reth /app +WORKDIR /app + +# Checkout the reproducible-build branch +RUN git checkout reproducible-build + +# Get the latest commit timestamp and set SOURCE_DATE_EPOCH +RUN SOURCE_DATE_EPOCH=$(git log -1 --pretty=%ct) && \ + echo "SOURCE_DATE_EPOCH=$SOURCE_DATE_EPOCH" >> /etc/environment + +# Set environment variables for reproducibility +ARG RUSTFLAGS="-C target-feature=+crt-static -C link-arg=-Wl,--build-id=none -Clink-arg=-static-libgcc -C metadata='' --remap-path-prefix $(pwd)=." +ENV SOURCE_DATE_EPOCH=$SOURCE_DATE_EPOCH \ + CARGO_INCREMENTAL=0 \ + LC_ALL=C \ + TZ=UTC \ + RUSTFLAGS="${RUSTFLAGS}" + +# Set the default features if not provided +ARG FEATURES="jemalloc asm-keccak" + +# Build the project with the reproducible settings +RUN . /etc/environment && \ + cargo build --bin reth --features "${FEATURES}" --profile "reproducible" --locked --target x86_64-unknown-linux-gnu + +# Create a minimal final image with just the binary +FROM scratch AS binaries + +# Copy the compiled binary from the builder stage +COPY --from=builder /app/target/x86_64-unknown-linux-gnu/reproducible/reth /reth diff --git a/Makefile b/Makefile index 908f1ef24daa..5ad7abac6755 100644 --- a/Makefile +++ b/Makefile @@ -62,6 +62,16 @@ install-op: ## Build and install the op-reth binary under `~/.cargo/bin`. build: ## Build the reth binary into `target` directory. cargo build --bin reth --features "$(FEATURES)" --profile "$(PROFILE)" +SOURCE_DATE_EPOCH := $(shell git log -1 --pretty=%ct) +.PHONY: reproducible +reproducible: ## Build the reth binary into `target` directory with reproducible builds. Only works for x86_64-unknown-linux-gnu currently + SOURCE_DATE_EPOCH=$(SOURCE_DATE_EPOCH) \ + CARGO_INCREMENTAL=0 \ + LC_ALL=C \ + TZ=UTC \ + RUSTFLAGS="-C target-feature=+crt-static -C link-arg=-Wl,--build-id=none -Clink-arg=-static-libgcc -C metadata='' --remap-path-prefix $$(pwd)=." \ + cargo build --bin reth --features "$(FEATURES)" --profile "reproducible" --locked --target x86_64-unknown-linux-gnu + .PHONY: build-debug build-debug: ## Build the reth binary into `target/debug` directory. cargo build --bin reth --features "$(FEATURES)" diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index 8380915d463e..ffd1998b24ea 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -101,7 +101,7 @@ tempfile.workspace = true [features] default = ["jemalloc"] -dev = ["reth-cli-commands/dev"] +dev = ["reth-cli-commands/arbitrary"] asm-keccak = [ "reth-node-core/asm-keccak", diff --git a/bin/reth/src/cli/mod.rs b/bin/reth/src/cli/mod.rs index 01f8f73e7b1b..01662eb4dcb6 100644 --- a/bin/reth/src/cli/mod.rs +++ b/bin/reth/src/cli/mod.rs @@ -146,8 +146,7 @@ impl, Ext: clap::Args + fmt::Debug> Cl let _guard = self.init_tracing()?; info!(target: "reth::cli", "Initialized tracing, debug log directory: {}", self.logs.log_file_directory); - // Install the prometheus recorder to be sure to record task - // executor's metrics + // Install the prometheus recorder to be sure to record all metrics let _ = install_prometheus_recorder(); let runner = CliRunner::default(); diff --git a/bin/reth/src/commands/debug_cmd/replay_engine.rs b/bin/reth/src/commands/debug_cmd/replay_engine.rs index de497cbe0070..e7b3de6b6c13 100644 --- a/bin/reth/src/commands/debug_cmd/replay_engine.rs +++ b/bin/reth/src/commands/debug_cmd/replay_engine.rs @@ -170,10 +170,8 @@ impl> Command { beacon_engine_handle.fork_choice_updated(state, payload_attrs).await?; debug!(target: "reth::cli", ?response, "Received for forkchoice updated"); } - StoredEngineApiMessage::NewPayload { payload, cancun_fields } => { - // todo: prague (last arg) - let response = - beacon_engine_handle.new_payload(payload, cancun_fields, None).await?; + StoredEngineApiMessage::NewPayload { payload, sidecar } => { + let response = beacon_engine_handle.new_payload(payload, sidecar).await?; debug!(target: "reth::cli", ?response, "Received for new payload"); } }; diff --git a/bin/reth/src/main.rs b/bin/reth/src/main.rs index f424163a24fe..e146912c06f9 100644 --- a/bin/reth/src/main.rs +++ b/bin/reth/src/main.rs @@ -33,11 +33,11 @@ pub struct EngineArgs { pub legacy: bool, /// Configure persistence threshold for engine experimental. - #[arg(long = "engine.persistence-threshold", requires = "experimental", default_value_t = DEFAULT_PERSISTENCE_THRESHOLD)] + #[arg(long = "engine.persistence-threshold", conflicts_with = "legacy", default_value_t = DEFAULT_PERSISTENCE_THRESHOLD)] pub persistence_threshold: u64, /// Configure the target number of blocks to keep in memory. - #[arg(long = "engine.memory-block-buffer-target", requires = "experimental", default_value_t = DEFAULT_MEMORY_BLOCK_BUFFER_TARGET)] + #[arg(long = "engine.memory-block-buffer-target", conflicts_with = "legacy", default_value_t = DEFAULT_MEMORY_BLOCK_BUFFER_TARGET)] pub memory_block_buffer_target: u64, } diff --git a/book/cli/reth/db.md b/book/cli/reth/db.md index 9e3b32cc0b39..f9a8a158adc8 100644 --- a/book/cli/reth/db.md +++ b/book/cli/reth/db.md @@ -81,6 +81,9 @@ Database: [possible values: true, false] + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + Logging: --log.stdout.format The format to use for logs written to stdout diff --git a/book/cli/reth/db/diff.md b/book/cli/reth/db/diff.md index ea4c29612ff7..f57c6ac364fe 100644 --- a/book/cli/reth/db/diff.md +++ b/book/cli/reth/db/diff.md @@ -45,6 +45,9 @@ Database: [possible values: true, false] + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + --table The table name to diff. If not specified, all tables are diffed. diff --git a/book/cli/reth/debug/build-block.md b/book/cli/reth/debug/build-block.md index 76ddac306ce2..2e6d637d52c2 100644 --- a/book/cli/reth/debug/build-block.md +++ b/book/cli/reth/debug/build-block.md @@ -69,6 +69,9 @@ Database: [possible values: true, false] + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + --trusted-setup-file Overrides the KZG trusted setup by reading from the supplied file diff --git a/book/cli/reth/debug/execution.md b/book/cli/reth/debug/execution.md index 202e1452a8ae..9ca74897c5ea 100644 --- a/book/cli/reth/debug/execution.md +++ b/book/cli/reth/debug/execution.md @@ -69,6 +69,9 @@ Database: [possible values: true, false] + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + Networking: -d, --disable-discovery Disable the discovery service diff --git a/book/cli/reth/debug/in-memory-merkle.md b/book/cli/reth/debug/in-memory-merkle.md index 534e6d46c69d..3e322a6913dd 100644 --- a/book/cli/reth/debug/in-memory-merkle.md +++ b/book/cli/reth/debug/in-memory-merkle.md @@ -69,6 +69,9 @@ Database: [possible values: true, false] + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + Networking: -d, --disable-discovery Disable the discovery service diff --git a/book/cli/reth/debug/merkle.md b/book/cli/reth/debug/merkle.md index 19bc38acceb9..d701803b81ca 100644 --- a/book/cli/reth/debug/merkle.md +++ b/book/cli/reth/debug/merkle.md @@ -69,6 +69,9 @@ Database: [possible values: true, false] + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + Networking: -d, --disable-discovery Disable the discovery service diff --git a/book/cli/reth/debug/replay-engine.md b/book/cli/reth/debug/replay-engine.md index 7a14b9cf09d4..dd587620a868 100644 --- a/book/cli/reth/debug/replay-engine.md +++ b/book/cli/reth/debug/replay-engine.md @@ -69,6 +69,9 @@ Database: [possible values: true, false] + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + Networking: -d, --disable-discovery Disable the discovery service diff --git a/book/cli/reth/import.md b/book/cli/reth/import.md index 7bd8a0079ec8..28e085bda718 100644 --- a/book/cli/reth/import.md +++ b/book/cli/reth/import.md @@ -69,6 +69,9 @@ Database: [possible values: true, false] + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + --no-state Disables stages that require state. diff --git a/book/cli/reth/init-state.md b/book/cli/reth/init-state.md index cb221634c40e..ddcd3cece378 100644 --- a/book/cli/reth/init-state.md +++ b/book/cli/reth/init-state.md @@ -69,6 +69,9 @@ Database: [possible values: true, false] + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + JSONL file with state dump. diff --git a/book/cli/reth/init.md b/book/cli/reth/init.md index cc889e5e35aa..cd01accc0471 100644 --- a/book/cli/reth/init.md +++ b/book/cli/reth/init.md @@ -69,6 +69,9 @@ Database: [possible values: true, false] + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + Logging: --log.stdout.format The format to use for logs written to stdout diff --git a/book/cli/reth/node.md b/book/cli/reth/node.md index 34d32209ada9..4cd55db1fe08 100644 --- a/book/cli/reth/node.md +++ b/book/cli/reth/node.md @@ -590,6 +590,9 @@ Database: [possible values: true, false] + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + Dev testnet: --dev Start the node in dev mode diff --git a/book/cli/reth/p2p.md b/book/cli/reth/p2p.md index 01253705b233..603b451d9405 100644 --- a/book/cli/reth/p2p.md +++ b/book/cli/reth/p2p.md @@ -247,6 +247,9 @@ Database: [possible values: true, false] + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + Logging: --log.stdout.format The format to use for logs written to stdout diff --git a/book/cli/reth/prune.md b/book/cli/reth/prune.md index e0641256f1c9..ed16197a76c3 100644 --- a/book/cli/reth/prune.md +++ b/book/cli/reth/prune.md @@ -69,6 +69,9 @@ Database: [possible values: true, false] + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + Logging: --log.stdout.format The format to use for logs written to stdout diff --git a/book/cli/reth/recover/storage-tries.md b/book/cli/reth/recover/storage-tries.md index 1f639cb095ae..ecdaabe77817 100644 --- a/book/cli/reth/recover/storage-tries.md +++ b/book/cli/reth/recover/storage-tries.md @@ -69,6 +69,9 @@ Database: [possible values: true, false] + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + Logging: --log.stdout.format The format to use for logs written to stdout diff --git a/book/cli/reth/stage/drop.md b/book/cli/reth/stage/drop.md index ae21a8918306..399b3818c28f 100644 --- a/book/cli/reth/stage/drop.md +++ b/book/cli/reth/stage/drop.md @@ -69,6 +69,9 @@ Database: [possible values: true, false] + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + Possible values: - headers: The headers stage within the pipeline diff --git a/book/cli/reth/stage/dump.md b/book/cli/reth/stage/dump.md index 291d896902d3..4b3de3fb1cb5 100644 --- a/book/cli/reth/stage/dump.md +++ b/book/cli/reth/stage/dump.md @@ -76,6 +76,9 @@ Database: [possible values: true, false] + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + Logging: --log.stdout.format The format to use for logs written to stdout diff --git a/book/cli/reth/stage/run.md b/book/cli/reth/stage/run.md index bfe5ff9d6c63..9da3ce0deb6d 100644 --- a/book/cli/reth/stage/run.md +++ b/book/cli/reth/stage/run.md @@ -69,6 +69,9 @@ Database: [possible values: true, false] + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + --metrics Enable Prometheus metrics. diff --git a/book/cli/reth/stage/unwind.md b/book/cli/reth/stage/unwind.md index d181b3bcade6..700ab3d7e7ce 100644 --- a/book/cli/reth/stage/unwind.md +++ b/book/cli/reth/stage/unwind.md @@ -74,6 +74,9 @@ Database: [possible values: true, false] + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + Networking: -d, --disable-discovery Disable the discovery service diff --git a/book/sources/Cargo.toml b/book/sources/Cargo.toml index 1529af952b92..b374ad798b5d 100644 --- a/book/sources/Cargo.toml +++ b/book/sources/Cargo.toml @@ -1,11 +1,13 @@ [workspace] -members = [ - "exex/hello-world", - "exex/remote", - "exex/tracking-state", -] +members = ["exex/hello-world", "exex/remote", "exex/tracking-state"] # Explicitly set the resolver to version 2, which is the default for packages with edition >= 2021 # https://doc.rust-lang.org/edition-guide/rust-2021/default-cargo-resolver.html resolver = "2" +[patch.'https://github.com/paradigmxyz/reth'] +reth = { path = "../../bin/reth" } +reth-exex = { path = "../../crates/exex/exex" } +reth-node-ethereum = { path = "../../crates/ethereum/node" } +reth-tracing = { path = "../../crates/tracing" } +reth-node-api = { path = "../../crates/node/api" } diff --git a/book/sources/exex/hello-world/Cargo.toml b/book/sources/exex/hello-world/Cargo.toml index e5d32a140549..c466018c667a 100644 --- a/book/sources/exex/hello-world/Cargo.toml +++ b/book/sources/exex/hello-world/Cargo.toml @@ -4,10 +4,10 @@ version = "0.1.0" edition = "2021" [dependencies] -reth = { git = "https://github.com/paradigmxyz/reth.git" } # Reth -reth-exex = { git = "https://github.com/paradigmxyz/reth.git" } # Execution Extensions +reth = { git = "https://github.com/paradigmxyz/reth.git" } # Reth +reth-exex = { git = "https://github.com/paradigmxyz/reth.git" } # Execution Extensions reth-node-ethereum = { git = "https://github.com/paradigmxyz/reth.git" } # Ethereum Node implementation -reth-tracing = { git = "https://github.com/paradigmxyz/reth.git" } # Logging +reth-tracing = { git = "https://github.com/paradigmxyz/reth.git" } # Logging -eyre = "0.6" # Easy error handling +eyre = "0.6" # Easy error handling futures-util = "0.3" # Stream utilities for consuming notifications diff --git a/book/sources/exex/tracking-state/Cargo.toml b/book/sources/exex/tracking-state/Cargo.toml index 3ce21b0c3403..a8e862d0a735 100644 --- a/book/sources/exex/tracking-state/Cargo.toml +++ b/book/sources/exex/tracking-state/Cargo.toml @@ -5,10 +5,12 @@ edition = "2021" [dependencies] reth = { git = "https://github.com/paradigmxyz/reth.git" } -reth-exex = { git = "https://github.com/paradigmxyz/reth.git", features = ["serde"] } -reth-node-ethereum = { git = "https://github.com/paradigmxyz/reth.git"} +reth-exex = { git = "https://github.com/paradigmxyz/reth.git", features = [ + "serde", +] } +reth-node-ethereum = { git = "https://github.com/paradigmxyz/reth.git" } reth-tracing = { git = "https://github.com/paradigmxyz/reth.git" } -eyre = "0.6" # Easy error handling -futures-util = "0.3" # Stream utilities for consuming notifications +eyre = "0.6" # Easy error handling +futures-util = "0.3" # Stream utilities for consuming notifications alloy-primitives = "0.8.7" diff --git a/crates/blockchain-tree/Cargo.toml b/crates/blockchain-tree/Cargo.toml index aa8fab16fa54..3fa6de2b402c 100644 --- a/crates/blockchain-tree/Cargo.toml +++ b/crates/blockchain-tree/Cargo.toml @@ -57,6 +57,7 @@ reth-consensus = { workspace = true, features = ["test-utils"] } reth-testing-utils.workspace = true reth-revm.workspace = true reth-evm-ethereum.workspace = true +reth-execution-types.workspace = true parking_lot.workspace = true assert_matches.workspace = true alloy-genesis.workspace = true diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 1e2ed2a4a2ed..95c0361f31f5 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -113,9 +113,6 @@ where /// is crucial for the correct execution of transactions. /// - `tree_config`: Configuration for the blockchain tree, including any parameters that affect /// its structure or performance. - /// - `prune_modes`: Configuration for pruning old blockchain data. This helps in managing the - /// storage space efficiently. It's important to validate this configuration to ensure it does - /// not lead to unintended data loss. pub fn new( externals: TreeExternals, config: BlockchainTreeConfig, diff --git a/crates/blockchain-tree/src/externals.rs b/crates/blockchain-tree/src/externals.rs index 719852c12ac0..4e22fcb78b6b 100644 --- a/crates/blockchain-tree/src/externals.rs +++ b/crates/blockchain-tree/src/externals.rs @@ -21,7 +21,6 @@ use std::{collections::BTreeMap, sync::Arc}; /// - A handle to the database /// - A handle to the consensus engine /// - The executor factory to execute blocks with -/// - The chain spec #[derive(Debug)] pub struct TreeExternals { /// The provider factory, used to commit the canonical chain, or unwind it. diff --git a/crates/blockchain-tree/src/state.rs b/crates/blockchain-tree/src/state.rs index b76db9e6a9ce..ca8af6f9b581 100644 --- a/crates/blockchain-tree/src/state.rs +++ b/crates/blockchain-tree/src/state.rs @@ -61,6 +61,7 @@ impl TreeState { pub(crate) fn block_by_hash(&self, block_hash: BlockHash) -> Option<&SealedBlock> { self.block_with_senders_by_hash(block_hash).map(|block| &block.block) } + /// Returns the block with matching hash from any side-chain. /// /// Caution: This will not return blocks from the canonical chain. @@ -128,3 +129,302 @@ impl From for SidechainId { Self(value) } } + +#[cfg(test)] +mod tests { + use super::*; + use crate::canonical_chain::CanonicalChain; + use alloy_primitives::B256; + use reth_execution_types::Chain; + use reth_provider::ExecutionOutcome; + + #[test] + fn test_tree_state_initialization() { + // Set up some dummy data for initialization + let last_finalized_block_number = 10u64; + let last_canonical_hashes = vec![(9u64, B256::random()), (10u64, B256::random())]; + let buffer_limit = 5; + + // Initialize the tree state + let tree_state = TreeState::new( + last_finalized_block_number, + last_canonical_hashes.clone(), + buffer_limit, + ); + + // Verify the tree state after initialization + assert_eq!(tree_state.block_chain_id_generator, 0); + assert_eq!(tree_state.block_indices().last_finalized_block(), last_finalized_block_number); + assert_eq!( + *tree_state.block_indices.canonical_chain().inner(), + *CanonicalChain::new(last_canonical_hashes.into_iter().collect()).inner() + ); + assert!(tree_state.chains.is_empty()); + assert!(tree_state.buffered_blocks.lru.is_empty()); + } + + #[test] + fn test_tree_state_next_id() { + // Initialize the tree state + let mut tree_state = TreeState::new(0, vec![], 5); + + // Generate a few sidechain IDs + let first_id = tree_state.next_id(); + let second_id = tree_state.next_id(); + + // Verify the generated sidechain IDs and the updated generator state + assert_eq!(first_id, SidechainId(0)); + assert_eq!(second_id, SidechainId(1)); + assert_eq!(tree_state.block_chain_id_generator, 2); + } + + #[test] + fn test_tree_state_insert_chain() { + // Initialize tree state + let mut tree_state = TreeState::new(0, vec![], 5); + + // Create a chain with two blocks + let block = SealedBlockWithSenders::default(); + let block1_hash = B256::random(); + let block2_hash = B256::random(); + + let mut block1 = block.clone(); + let mut block2 = block; + + block1.block.header.set_hash(block1_hash); + block1.block.header.set_block_number(9); + block2.block.header.set_hash(block2_hash); + block2.block.header.set_block_number(10); + + let chain = AppendableChain::new(Chain::new( + [block1, block2], + Default::default(), + Default::default(), + )); + + // Insert the chain into the TreeState + let chain_id = tree_state.insert_chain(chain).unwrap(); + + // Verify the chain ID and that it was added to the chains collection + assert_eq!(chain_id, SidechainId(0)); + assert!(tree_state.chains.contains_key(&chain_id)); + + // Ensure that the block indices are updated + assert_eq!( + tree_state.block_indices.get_side_chain_id(&block1_hash).unwrap(), + SidechainId(0) + ); + assert_eq!( + tree_state.block_indices.get_side_chain_id(&block2_hash).unwrap(), + SidechainId(0) + ); + + // Ensure that the block chain ID generator was updated + assert_eq!(tree_state.block_chain_id_generator, 1); + + // Create an empty chain + let chain_empty = AppendableChain::new(Chain::default()); + + // Insert the empty chain into the tree state + let chain_id = tree_state.insert_chain(chain_empty); + + // Ensure that the empty chain was not inserted + assert!(chain_id.is_none()); + + // Nothing should have changed and no new chain should have been added + assert!(tree_state.chains.contains_key(&SidechainId(0))); + assert!(!tree_state.chains.contains_key(&SidechainId(1))); + assert_eq!( + tree_state.block_indices.get_side_chain_id(&block1_hash).unwrap(), + SidechainId(0) + ); + assert_eq!( + tree_state.block_indices.get_side_chain_id(&block2_hash).unwrap(), + SidechainId(0) + ); + assert_eq!(tree_state.block_chain_id_generator, 1); + } + + #[test] + fn test_block_by_hash_side_chain() { + // Initialize a tree state with some dummy data + let mut tree_state = TreeState::new(0, vec![], 5); + + // Create two side-chain blocks with random hashes + let block1_hash = B256::random(); + let block2_hash = B256::random(); + + let mut block1 = SealedBlockWithSenders::default(); + let mut block2 = SealedBlockWithSenders::default(); + + block1.block.header.set_hash(block1_hash); + block1.block.header.set_block_number(9); + block2.block.header.set_hash(block2_hash); + block2.block.header.set_block_number(10); + + // Create an chain with these blocks + let chain = AppendableChain::new(Chain::new( + vec![block1.clone(), block2.clone()], + Default::default(), + Default::default(), + )); + + // Insert the side chain into the TreeState + tree_state.insert_chain(chain).unwrap(); + + // Retrieve the blocks by their hashes + let retrieved_block1 = tree_state.block_by_hash(block1_hash); + assert_eq!(*retrieved_block1.unwrap(), block1.block); + + let retrieved_block2 = tree_state.block_by_hash(block2_hash); + assert_eq!(*retrieved_block2.unwrap(), block2.block); + + // Test block_by_hash with a random hash that doesn't exist + let non_existent_hash = B256::random(); + let result = tree_state.block_by_hash(non_existent_hash); + + // Ensure that no block is found + assert!(result.is_none()); + } + + #[test] + fn test_block_with_senders_by_hash() { + // Initialize a tree state with some dummy data + let mut tree_state = TreeState::new(0, vec![], 5); + + // Create two side-chain blocks with random hashes + let block1_hash = B256::random(); + let block2_hash = B256::random(); + + let mut block1 = SealedBlockWithSenders::default(); + let mut block2 = SealedBlockWithSenders::default(); + + block1.block.header.set_hash(block1_hash); + block1.block.header.set_block_number(9); + block2.block.header.set_hash(block2_hash); + block2.block.header.set_block_number(10); + + // Create a chain with these blocks + let chain = AppendableChain::new(Chain::new( + vec![block1.clone(), block2.clone()], + Default::default(), + Default::default(), + )); + + // Insert the side chain into the TreeState + tree_state.insert_chain(chain).unwrap(); + + // Test to retrieve the blocks with senders by their hashes + let retrieved_block1 = tree_state.block_with_senders_by_hash(block1_hash); + assert_eq!(*retrieved_block1.unwrap(), block1); + + let retrieved_block2 = tree_state.block_with_senders_by_hash(block2_hash); + assert_eq!(*retrieved_block2.unwrap(), block2); + + // Test block_with_senders_by_hash with a random hash that doesn't exist + let non_existent_hash = B256::random(); + let result = tree_state.block_with_senders_by_hash(non_existent_hash); + + // Ensure that no block is found + assert!(result.is_none()); + } + + #[test] + fn test_get_buffered_block() { + // Initialize a tree state with some dummy data + let mut tree_state = TreeState::new(0, vec![], 5); + + // Create a block with a random hash and add it to the buffer + let block_hash = B256::random(); + let mut block = SealedBlockWithSenders::default(); + block.block.header.set_hash(block_hash); + + // Add the block to the buffered blocks in the TreeState + tree_state.buffered_blocks.insert_block(block.clone()); + + // Test get_buffered_block to retrieve the block by its hash + let retrieved_block = tree_state.get_buffered_block(&block_hash); + assert_eq!(*retrieved_block.unwrap(), block); + + // Test get_buffered_block with a non-existent hash + let non_existent_hash = B256::random(); + let result = tree_state.get_buffered_block(&non_existent_hash); + + // Ensure that no block is found + assert!(result.is_none()); + } + + #[test] + fn test_lowest_buffered_ancestor() { + // Initialize a tree state with some dummy data + let mut tree_state = TreeState::new(0, vec![], 5); + + // Create blocks with random hashes and set up parent-child relationships + let ancestor_hash = B256::random(); + let descendant_hash = B256::random(); + + let mut ancestor_block = SealedBlockWithSenders::default(); + let mut descendant_block = SealedBlockWithSenders::default(); + + ancestor_block.block.header.set_hash(ancestor_hash); + descendant_block.block.header.set_hash(descendant_hash); + descendant_block.block.header.set_parent_hash(ancestor_hash); + + // Insert the blocks into the buffer + tree_state.buffered_blocks.insert_block(ancestor_block.clone()); + tree_state.buffered_blocks.insert_block(descendant_block.clone()); + + // Test lowest_buffered_ancestor for the descendant block + let lowest_ancestor = tree_state.lowest_buffered_ancestor(&descendant_hash); + assert!(lowest_ancestor.is_some()); + assert_eq!(lowest_ancestor.unwrap().block.header.hash(), ancestor_hash); + + // Test lowest_buffered_ancestor with a non-existent hash + let non_existent_hash = B256::random(); + let result = tree_state.lowest_buffered_ancestor(&non_existent_hash); + + // Ensure that no ancestor is found + assert!(result.is_none()); + } + + #[test] + fn test_receipts_by_block_hash() { + // Initialize a tree state with some dummy data + let mut tree_state = TreeState::new(0, vec![], 5); + + // Create a block with a random hash and receipts + let block_hash = B256::random(); + let receipt1 = Receipt::default(); + let receipt2 = Receipt::default(); + + let mut block = SealedBlockWithSenders::default(); + block.block.header.set_hash(block_hash); + + let receipts = vec![receipt1, receipt2]; + + // Create a chain with the block and its receipts + let chain = AppendableChain::new(Chain::new( + vec![block.clone()], + ExecutionOutcome { receipts: receipts.clone().into(), ..Default::default() }, + Default::default(), + )); + + // Insert the chain into the TreeState + tree_state.insert_chain(chain).unwrap(); + + // Test receipts_by_block_hash for the inserted block + let retrieved_receipts = tree_state.receipts_by_block_hash(block_hash); + assert!(retrieved_receipts.is_some()); + + // Check if the correct receipts are returned + let receipts_ref: Vec<&Receipt> = receipts.iter().collect(); + assert_eq!(retrieved_receipts.unwrap(), receipts_ref); + + // Test receipts_by_block_hash with a non-existent block hash + let non_existent_hash = B256::random(); + let result = tree_state.receipts_by_block_hash(non_existent_hash); + + // Ensure that no receipts are found + assert!(result.is_none()); + } +} diff --git a/crates/chain-state/src/in_memory.rs b/crates/chain-state/src/in_memory.rs index be33e1fd79a8..a850e66521a6 100644 --- a/crates/chain-state/src/in_memory.rs +++ b/crates/chain-state/src/in_memory.rs @@ -4,7 +4,7 @@ use crate::{ CanonStateNotification, CanonStateNotificationSender, CanonStateNotifications, ChainInfoTracker, MemoryOverlayStateProvider, }; -use alloy_eips::BlockNumHash; +use alloy_eips::{BlockHashOrNumber, BlockNumHash}; use alloy_primitives::{map::HashMap, Address, TxHash, B256}; use parking_lot::RwLock; use reth_chainspec::ChainInfo; @@ -514,7 +514,7 @@ impl CanonicalInMemoryState { historical: StateProviderBox, ) -> MemoryOverlayStateProvider { let in_memory = if let Some(state) = self.state_by_hash(hash) { - state.chain().into_iter().map(|block_state| block_state.block()).collect() + state.chain().map(|block_state| block_state.block()).collect() } else { Vec::new() }; @@ -692,10 +692,8 @@ impl BlockState { /// Returns a vector of `BlockStates` representing the entire in memory chain. /// The block state order in the output vector is newest to oldest (highest to lowest), /// including self as the first element. - pub fn chain(&self) -> Vec<&Self> { - let mut chain = vec![self]; - self.append_parent_chain(&mut chain); - chain + pub fn chain(&self) -> impl Iterator { + std::iter::successors(Some(self), |state| state.parent.as_deref()) } /// Appends the parent chain of this [`BlockState`] to the given vector. @@ -715,10 +713,59 @@ impl BlockState { /// This merges the state of all blocks that are part of the chain that the this block is /// the head of. This includes all blocks that connect back to the canonical block on disk. pub fn state_provider(&self, historical: StateProviderBox) -> MemoryOverlayStateProvider { - let in_memory = self.chain().into_iter().map(|block_state| block_state.block()).collect(); + let in_memory = self.chain().map(|block_state| block_state.block()).collect(); MemoryOverlayStateProvider::new(historical, in_memory) } + + /// Tries to find a block by [`BlockHashOrNumber`] in the chain ending at this block. + pub fn block_on_chain(&self, hash_or_num: BlockHashOrNumber) -> Option<&Self> { + self.chain().find(|block| match hash_or_num { + BlockHashOrNumber::Hash(hash) => block.hash() == hash, + BlockHashOrNumber::Number(number) => block.number() == number, + }) + } + + /// Tries to find a transaction by [`TxHash`] in the chain ending at this block. + pub fn transaction_on_chain(&self, hash: TxHash) -> Option { + self.chain().find_map(|block_state| { + block_state + .block_ref() + .block() + .body + .transactions() + .find(|tx| tx.hash() == hash) + .cloned() + }) + } + + /// Tries to find a transaction with meta by [`TxHash`] in the chain ending at this block. + pub fn transaction_meta_on_chain( + &self, + tx_hash: TxHash, + ) -> Option<(TransactionSigned, TransactionMeta)> { + self.chain().find_map(|block_state| { + block_state + .block_ref() + .block() + .body + .transactions() + .enumerate() + .find(|(_, tx)| tx.hash() == tx_hash) + .map(|(index, tx)| { + let meta = TransactionMeta { + tx_hash, + index: index as u64, + block_hash: block_state.hash(), + block_number: block_state.block_ref().block.number, + base_fee: block_state.block_ref().block.header.base_fee_per_gas, + timestamp: block_state.block_ref().block.timestamp, + excess_blob_gas: block_state.block_ref().block.excess_blob_gas, + }; + (tx.clone(), meta) + }) + }) + } } /// Represents an executed block stored in-memory. @@ -1382,7 +1429,7 @@ mod tests { let parents = single_block.parent_state_chain(); assert_eq!(parents.len(), 0); - let block_state_chain = single_block.chain(); + let block_state_chain = single_block.chain().collect::>(); assert_eq!(block_state_chain.len(), 1); assert_eq!(block_state_chain[0].block().block.number, single_block_number); assert_eq!(block_state_chain[0].block().block.hash(), single_block_hash); @@ -1393,18 +1440,18 @@ mod tests { let mut test_block_builder = TestBlockBuilder::default(); let chain = create_mock_state_chain(&mut test_block_builder, 3); - let block_state_chain = chain[2].chain(); + let block_state_chain = chain[2].chain().collect::>(); assert_eq!(block_state_chain.len(), 3); assert_eq!(block_state_chain[0].block().block.number, 3); assert_eq!(block_state_chain[1].block().block.number, 2); assert_eq!(block_state_chain[2].block().block.number, 1); - let block_state_chain = chain[1].chain(); + let block_state_chain = chain[1].chain().collect::>(); assert_eq!(block_state_chain.len(), 2); assert_eq!(block_state_chain[0].block().block.number, 2); assert_eq!(block_state_chain[1].block().block.number, 1); - let block_state_chain = chain[0].chain(); + let block_state_chain = chain[0].chain().collect::>(); assert_eq!(block_state_chain.len(), 1); assert_eq!(block_state_chain[0].block().block.number, 1); } diff --git a/crates/chain-state/src/lib.rs b/crates/chain-state/src/lib.rs index 50a103111071..bd9b43a59eae 100644 --- a/crates/chain-state/src/lib.rs +++ b/crates/chain-state/src/lib.rs @@ -22,7 +22,7 @@ pub use notifications::{ }; mod memory_overlay; -pub use memory_overlay::MemoryOverlayStateProvider; +pub use memory_overlay::{MemoryOverlayStateProvider, MemoryOverlayStateProviderRef}; #[cfg(any(test, feature = "test-utils"))] /// Common test helpers diff --git a/crates/chain-state/src/memory_overlay.rs b/crates/chain-state/src/memory_overlay.rs index eb125dad115e..ada0faee4907 100644 --- a/crates/chain-state/src/memory_overlay.rs +++ b/crates/chain-state/src/memory_overlay.rs @@ -7,14 +7,26 @@ use alloy_primitives::{ use reth_errors::ProviderResult; use reth_primitives::{Account, Bytecode}; use reth_storage_api::{ - AccountReader, BlockHashReader, StateProofProvider, StateProvider, StateProviderBox, - StateRootProvider, StorageRootProvider, + AccountReader, BlockHashReader, StateProofProvider, StateProvider, StateRootProvider, + StorageRootProvider, }; use reth_trie::{ updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, TrieInput, }; use std::sync::OnceLock; +/// A state provider that stores references to in-memory blocks along with their state as well as a +/// reference of the historical state provider for fallback lookups. +#[allow(missing_debug_implementations)] +pub struct MemoryOverlayStateProviderRef<'a> { + /// Historical state provider for state lookups that are not found in in-memory blocks. + pub(crate) historical: Box, + /// The collection of executed parent blocks. Expected order is newest to oldest. + pub(crate) in_memory: Vec, + /// Lazy-loaded in-memory trie data. + pub(crate) trie_state: OnceLock, +} + /// A state provider that stores references to in-memory blocks along with their state as well as /// the historical state provider for fallback lookups. #[allow(missing_debug_implementations)] @@ -27,193 +39,200 @@ pub struct MemoryOverlayStateProvider { pub(crate) trie_state: OnceLock, } -impl MemoryOverlayStateProvider { - /// Create new memory overlay state provider. - /// - /// ## Arguments - /// - /// - `in_memory` - the collection of executed ancestor blocks in reverse. - /// - `historical` - a historical state provider for the latest ancestor block stored in the - /// database. - pub fn new(historical: Box, in_memory: Vec) -> Self { - Self { historical, in_memory, trie_state: OnceLock::new() } - } - - /// Turn this state provider into a [`StateProviderBox`] - pub fn boxed(self) -> StateProviderBox { - Box::new(self) - } - - /// Return lazy-loaded trie state aggregated from in-memory blocks. - fn trie_state(&self) -> &MemoryOverlayTrieState { - self.trie_state.get_or_init(|| { - let mut trie_state = MemoryOverlayTrieState::default(); - for block in self.in_memory.iter().rev() { - trie_state.state.extend_ref(block.hashed_state.as_ref()); - trie_state.nodes.extend_ref(block.trie.as_ref()); - } - trie_state - }) - } -} +macro_rules! impl_state_provider { + ([$($tokens:tt)*],$type:ty, $historical_type:ty) => { + impl $($tokens)* $type { + /// Create new memory overlay state provider. + /// + /// ## Arguments + /// + /// - `in_memory` - the collection of executed ancestor blocks in reverse. + /// - `historical` - a historical state provider for the latest ancestor block stored in the + /// database. + pub fn new(historical: $historical_type, in_memory: Vec) -> Self { + Self { historical, in_memory, trie_state: OnceLock::new() } + } + + /// Turn this state provider into a state provider + pub fn boxed(self) -> $historical_type { + Box::new(self) + } -impl BlockHashReader for MemoryOverlayStateProvider { - fn block_hash(&self, number: BlockNumber) -> ProviderResult> { - for block in &self.in_memory { - if block.block.number == number { - return Ok(Some(block.block.hash())) + /// Return lazy-loaded trie state aggregated from in-memory blocks. + fn trie_state(&self) -> &MemoryOverlayTrieState { + self.trie_state.get_or_init(|| { + let mut trie_state = MemoryOverlayTrieState::default(); + for block in self.in_memory.iter().rev() { + trie_state.state.extend_ref(block.hashed_state.as_ref()); + trie_state.nodes.extend_ref(block.trie.as_ref()); + } + trie_state + }) } } - self.historical.block_hash(number) - } - - fn canonical_hashes_range( - &self, - start: BlockNumber, - end: BlockNumber, - ) -> ProviderResult> { - let range = start..end; - let mut earliest_block_number = None; - let mut in_memory_hashes = Vec::new(); - for block in &self.in_memory { - if range.contains(&block.block.number) { - in_memory_hashes.insert(0, block.block.hash()); - earliest_block_number = Some(block.block.number); + impl $($tokens)* BlockHashReader for $type { + fn block_hash(&self, number: BlockNumber) -> ProviderResult> { + for block in &self.in_memory { + if block.block.number == number { + return Ok(Some(block.block.hash())) + } + } + + self.historical.block_hash(number) + } + + fn canonical_hashes_range( + &self, + start: BlockNumber, + end: BlockNumber, + ) -> ProviderResult> { + let range = start..end; + let mut earliest_block_number = None; + let mut in_memory_hashes = Vec::new(); + for block in &self.in_memory { + if range.contains(&block.block.number) { + in_memory_hashes.insert(0, block.block.hash()); + earliest_block_number = Some(block.block.number); + } + } + + let mut hashes = + self.historical.canonical_hashes_range(start, earliest_block_number.unwrap_or(end))?; + hashes.append(&mut in_memory_hashes); + Ok(hashes) } } - let mut hashes = - self.historical.canonical_hashes_range(start, earliest_block_number.unwrap_or(end))?; - hashes.append(&mut in_memory_hashes); - Ok(hashes) - } -} + impl $($tokens)* AccountReader for $type { + fn basic_account(&self, address: Address) -> ProviderResult> { + for block in &self.in_memory { + if let Some(account) = block.execution_output.account(&address) { + return Ok(account) + } + } -impl AccountReader for MemoryOverlayStateProvider { - fn basic_account(&self, address: Address) -> ProviderResult> { - for block in &self.in_memory { - if let Some(account) = block.execution_output.account(&address) { - return Ok(account) + self.historical.basic_account(address) } } - self.historical.basic_account(address) - } -} + impl $($tokens)* StateRootProvider for $type { + fn state_root(&self, state: HashedPostState) -> ProviderResult { + self.state_root_from_nodes(TrieInput::from_state(state)) + } -impl StateRootProvider for MemoryOverlayStateProvider { - fn state_root(&self, state: HashedPostState) -> ProviderResult { - self.state_root_from_nodes(TrieInput::from_state(state)) - } - - fn state_root_from_nodes(&self, mut input: TrieInput) -> ProviderResult { - let MemoryOverlayTrieState { nodes, state } = self.trie_state().clone(); - input.prepend_cached(nodes, state); - self.historical.state_root_from_nodes(input) - } - - fn state_root_with_updates( - &self, - state: HashedPostState, - ) -> ProviderResult<(B256, TrieUpdates)> { - self.state_root_from_nodes_with_updates(TrieInput::from_state(state)) - } - - fn state_root_from_nodes_with_updates( - &self, - mut input: TrieInput, - ) -> ProviderResult<(B256, TrieUpdates)> { - let MemoryOverlayTrieState { nodes, state } = self.trie_state().clone(); - input.prepend_cached(nodes, state); - self.historical.state_root_from_nodes_with_updates(input) - } -} + fn state_root_from_nodes(&self, mut input: TrieInput) -> ProviderResult { + let MemoryOverlayTrieState { nodes, state } = self.trie_state().clone(); + input.prepend_cached(nodes, state); + self.historical.state_root_from_nodes(input) + } -impl StorageRootProvider for MemoryOverlayStateProvider { - // TODO: Currently this does not reuse available in-memory trie nodes. - fn storage_root(&self, address: Address, storage: HashedStorage) -> ProviderResult { - let state = &self.trie_state().state; - let mut hashed_storage = - state.storages.get(&keccak256(address)).cloned().unwrap_or_default(); - hashed_storage.extend(&storage); - self.historical.storage_root(address, hashed_storage) - } - - // TODO: Currently this does not reuse available in-memory trie nodes. - fn storage_proof( - &self, - address: Address, - slot: B256, - storage: HashedStorage, - ) -> ProviderResult { - let state = &self.trie_state().state; - let mut hashed_storage = - state.storages.get(&keccak256(address)).cloned().unwrap_or_default(); - hashed_storage.extend(&storage); - self.historical.storage_proof(address, slot, hashed_storage) - } -} + fn state_root_with_updates( + &self, + state: HashedPostState, + ) -> ProviderResult<(B256, TrieUpdates)> { + self.state_root_from_nodes_with_updates(TrieInput::from_state(state)) + } -impl StateProofProvider for MemoryOverlayStateProvider { - fn proof( - &self, - mut input: TrieInput, - address: Address, - slots: &[B256], - ) -> ProviderResult { - let MemoryOverlayTrieState { nodes, state } = self.trie_state().clone(); - input.prepend_cached(nodes, state); - self.historical.proof(input, address, slots) - } - - fn multiproof( - &self, - mut input: TrieInput, - targets: HashMap>, - ) -> ProviderResult { - let MemoryOverlayTrieState { nodes, state } = self.trie_state().clone(); - input.prepend_cached(nodes, state); - self.historical.multiproof(input, targets) - } - - fn witness( - &self, - mut input: TrieInput, - target: HashedPostState, - ) -> ProviderResult> { - let MemoryOverlayTrieState { nodes, state } = self.trie_state().clone(); - input.prepend_cached(nodes, state); - self.historical.witness(input, target) - } -} + fn state_root_from_nodes_with_updates( + &self, + mut input: TrieInput, + ) -> ProviderResult<(B256, TrieUpdates)> { + let MemoryOverlayTrieState { nodes, state } = self.trie_state().clone(); + input.prepend_cached(nodes, state); + self.historical.state_root_from_nodes_with_updates(input) + } + } + + impl $($tokens)* StorageRootProvider for $type { + // TODO: Currently this does not reuse available in-memory trie nodes. + fn storage_root(&self, address: Address, storage: HashedStorage) -> ProviderResult { + let state = &self.trie_state().state; + let mut hashed_storage = + state.storages.get(&keccak256(address)).cloned().unwrap_or_default(); + hashed_storage.extend(&storage); + self.historical.storage_root(address, hashed_storage) + } -impl StateProvider for MemoryOverlayStateProvider { - fn storage( - &self, - address: Address, - storage_key: StorageKey, - ) -> ProviderResult> { - for block in &self.in_memory { - if let Some(value) = block.execution_output.storage(&address, storage_key.into()) { - return Ok(Some(value)) + // TODO: Currently this does not reuse available in-memory trie nodes. + fn storage_proof( + &self, + address: Address, + slot: B256, + storage: HashedStorage, + ) -> ProviderResult { + let state = &self.trie_state().state; + let mut hashed_storage = + state.storages.get(&keccak256(address)).cloned().unwrap_or_default(); + hashed_storage.extend(&storage); + self.historical.storage_proof(address, slot, hashed_storage) } } - self.historical.storage(address, storage_key) - } + impl $($tokens)* StateProofProvider for $type { + fn proof( + &self, + mut input: TrieInput, + address: Address, + slots: &[B256], + ) -> ProviderResult { + let MemoryOverlayTrieState { nodes, state } = self.trie_state().clone(); + input.prepend_cached(nodes, state); + self.historical.proof(input, address, slots) + } + + fn multiproof( + &self, + mut input: TrieInput, + targets: HashMap>, + ) -> ProviderResult { + let MemoryOverlayTrieState { nodes, state } = self.trie_state().clone(); + input.prepend_cached(nodes, state); + self.historical.multiproof(input, targets) + } - fn bytecode_by_hash(&self, code_hash: B256) -> ProviderResult> { - for block in &self.in_memory { - if let Some(contract) = block.execution_output.bytecode(&code_hash) { - return Ok(Some(contract)) + fn witness( + &self, + mut input: TrieInput, + target: HashedPostState, + ) -> ProviderResult> { + let MemoryOverlayTrieState { nodes, state } = self.trie_state().clone(); + input.prepend_cached(nodes, state); + self.historical.witness(input, target) } } - self.historical.bytecode_by_hash(code_hash) - } + impl $($tokens)* StateProvider for $type { + fn storage( + &self, + address: Address, + storage_key: StorageKey, + ) -> ProviderResult> { + for block in &self.in_memory { + if let Some(value) = block.execution_output.storage(&address, storage_key.into()) { + return Ok(Some(value)) + } + } + + self.historical.storage(address, storage_key) + } + + fn bytecode_by_hash(&self, code_hash: B256) -> ProviderResult> { + for block in &self.in_memory { + if let Some(contract) = block.execution_output.bytecode(&code_hash) { + return Ok(Some(contract)) + } + } + + self.historical.bytecode_by_hash(code_hash) + } + } + }; } +impl_state_provider!([], MemoryOverlayStateProvider, Box); +impl_state_provider!([<'a>], MemoryOverlayStateProviderRef<'a>, Box); + /// The collection of data necessary for trie-related operations for [`MemoryOverlayStateProvider`]. #[derive(Clone, Default, Debug)] pub(crate) struct MemoryOverlayTrieState { diff --git a/crates/chainspec/src/spec.rs b/crates/chainspec/src/spec.rs index a7f45727dd8e..bebf7ca26028 100644 --- a/crates/chainspec/src/spec.rs +++ b/crates/chainspec/src/spec.rs @@ -9,6 +9,7 @@ use alloy_primitives::{address, b256, Address, BlockNumber, B256, U256}; use derive_more::From; use alloy_consensus::constants::{DEV_GENESIS_HASH, MAINNET_GENESIS_HASH}; +use alloy_eips::eip1559::ETHEREUM_BLOCK_GAS_LIMIT; use reth_ethereum_forks::{ ChainHardforks, DisplayHardforks, EthereumHardfork, EthereumHardforks, ForkCondition, ForkFilter, ForkFilterKey, ForkHash, ForkId, Hardfork, Hardforks, Head, DEV_HARDFORKS, @@ -18,10 +19,7 @@ use reth_network_peers::{ sepolia_nodes, NodeRecord, }; use reth_primitives_traits::{ - constants::{ - EIP1559_INITIAL_BASE_FEE, ETHEREUM_BLOCK_GAS_LIMIT, HOLESKY_GENESIS_HASH, - SEPOLIA_GENESIS_HASH, - }, + constants::{EIP1559_INITIAL_BASE_FEE, HOLESKY_GENESIS_HASH, SEPOLIA_GENESIS_HASH}, Header, SealedHeader, }; use reth_trie_common::root::state_root_ref_unhashed; @@ -617,6 +615,7 @@ impl From for ChainSpec { (EthereumHardfork::Shanghai.boxed(), genesis.config.shanghai_time), (EthereumHardfork::Cancun.boxed(), genesis.config.cancun_time), (EthereumHardfork::Prague.boxed(), genesis.config.prague_time), + (EthereumHardfork::Osaka.boxed(), genesis.config.osaka_time), ]; let mut time_hardforks = time_hardfork_opts @@ -864,6 +863,13 @@ impl ChainSpecBuilder { self } + /// Enable Osaka at genesis. + pub fn osaka_activated(mut self) -> Self { + self = self.prague_activated(); + self.hardforks.insert(EthereumHardfork::Osaka, ForkCondition::Timestamp(0)); + self + } + /// Build the resulting [`ChainSpec`]. /// /// # Panics diff --git a/crates/cli/commands/Cargo.toml b/crates/cli/commands/Cargo.toml index e307859dfd86..6f4b1008f29f 100644 --- a/crates/cli/commands/Cargo.toml +++ b/crates/cli/commands/Cargo.toml @@ -88,10 +88,15 @@ reth-discv4.workspace = true [features] default = [] -dev = [ +arbitrary = [ "dep:proptest", "dep:arbitrary", "dep:proptest-arbitrary-interop", "reth-primitives/arbitrary", "reth-db-api/arbitrary", + "reth-eth-wire/arbitrary", + "reth-db/arbitrary", + "reth-chainspec/arbitrary", + "alloy-eips/arbitrary", + "alloy-primitives/arbitrary", ] diff --git a/crates/cli/commands/src/lib.rs b/crates/cli/commands/src/lib.rs index 33a38ddbc010..166ea438fb97 100644 --- a/crates/cli/commands/src/lib.rs +++ b/crates/cli/commands/src/lib.rs @@ -20,7 +20,7 @@ pub mod p2p; pub mod prune; pub mod recover; pub mod stage; -#[cfg(feature = "dev")] +#[cfg(feature = "arbitrary")] pub mod test_vectors; pub use node::NodeCommand; diff --git a/crates/consensus/beacon/Cargo.toml b/crates/consensus/beacon/Cargo.toml index dd1e339319b6..716e4290f539 100644 --- a/crates/consensus/beacon/Cargo.toml +++ b/crates/consensus/beacon/Cargo.toml @@ -31,9 +31,9 @@ reth-node-types.workspace = true reth-chainspec = { workspace = true, optional = true } # ethereum -alloy-eips.workspace = true alloy-primitives.workspace = true alloy-rpc-types-engine.workspace = true +alloy-eips.workspace = true # async tokio = { workspace = true, features = ["sync"] } diff --git a/crates/consensus/beacon/src/engine/handle.rs b/crates/consensus/beacon/src/engine/handle.rs index bb5c4dee174c..4aafc6e07c1c 100644 --- a/crates/consensus/beacon/src/engine/handle.rs +++ b/crates/consensus/beacon/src/engine/handle.rs @@ -4,9 +4,8 @@ use crate::{ engine::message::OnForkChoiceUpdated, BeaconConsensusEngineEvent, BeaconEngineMessage, BeaconForkChoiceUpdateError, BeaconOnNewPayloadError, }; -use alloy_eips::eip7685::Requests; use alloy_rpc_types_engine::{ - CancunPayloadFields, ExecutionPayload, ForkchoiceState, ForkchoiceUpdated, PayloadStatus, + ExecutionPayload, ExecutionPayloadSidecar, ForkchoiceState, ForkchoiceUpdated, PayloadStatus, }; use futures::TryFutureExt; use reth_engine_primitives::EngineTypes; @@ -47,18 +46,10 @@ where pub async fn new_payload( &self, payload: ExecutionPayload, - cancun_fields: Option, - execution_requests: Option, + sidecar: ExecutionPayloadSidecar, ) -> Result { let (tx, rx) = oneshot::channel(); - // HACK(onbjerg): We should have a pectra payload fields struct, this is just a temporary - // workaround. - let _ = self.to_engine.send(BeaconEngineMessage::NewPayload { - payload, - cancun_fields, - execution_requests, - tx, - }); + let _ = self.to_engine.send(BeaconEngineMessage::NewPayload { payload, sidecar, tx }); rx.await.map_err(|_| BeaconOnNewPayloadError::EngineUnavailable)? } diff --git a/crates/consensus/beacon/src/engine/message.rs b/crates/consensus/beacon/src/engine/message.rs index 56328f03db0b..e33decbd848f 100644 --- a/crates/consensus/beacon/src/engine/message.rs +++ b/crates/consensus/beacon/src/engine/message.rs @@ -1,7 +1,6 @@ use crate::engine::{error::BeaconOnNewPayloadError, forkchoice::ForkchoiceStatus}; -use alloy_eips::eip7685::Requests; use alloy_rpc_types_engine::{ - CancunPayloadFields, ExecutionPayload, ForkChoiceUpdateResult, ForkchoiceState, + ExecutionPayload, ExecutionPayloadSidecar, ForkChoiceUpdateResult, ForkchoiceState, ForkchoiceUpdateError, ForkchoiceUpdated, PayloadId, PayloadStatus, PayloadStatusEnum, }; use futures::{future::Either, FutureExt}; @@ -145,12 +144,9 @@ pub enum BeaconEngineMessage { NewPayload { /// The execution payload received by Engine API. payload: ExecutionPayload, - /// The cancun-related newPayload fields, if any. - cancun_fields: Option, - // HACK(onbjerg): We should have a pectra payload fields struct, this is just a temporary - // workaround. - /// The pectra EIP-7685 execution requests. - execution_requests: Option, + /// The execution payload sidecar with additional version-specific fields received by + /// engine API. + sidecar: ExecutionPayloadSidecar, /// The sender for returning payload status result. tx: oneshot::Sender>, }, diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index cff648b2843b..2363b9078408 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -1,7 +1,7 @@ -use alloy_eips::eip7685::Requests; +use alloy_eips::merge::EPOCH_SLOTS; use alloy_primitives::{BlockNumber, B256}; use alloy_rpc_types_engine::{ - CancunPayloadFields, ExecutionPayload, ForkchoiceState, PayloadStatus, PayloadStatusEnum, + ExecutionPayload, ExecutionPayloadSidecar, ForkchoiceState, PayloadStatus, PayloadStatusEnum, PayloadValidationError, }; use futures::{stream::BoxStream, Future, StreamExt}; @@ -20,9 +20,7 @@ use reth_node_types::NodeTypesWithEngine; use reth_payload_builder::PayloadBuilderHandle; use reth_payload_primitives::{PayloadAttributes, PayloadBuilder, PayloadBuilderAttributes}; use reth_payload_validator::ExecutionPayloadValidator; -use reth_primitives::{ - constants::EPOCH_SLOTS, BlockNumHash, Head, Header, SealedBlock, SealedHeader, -}; +use reth_primitives::{BlockNumHash, Head, Header, SealedBlock, SealedHeader}; use reth_provider::{ providers::ProviderNodeTypes, BlockIdReader, BlockReader, BlockSource, CanonChainTracker, ChainSpecProvider, ProviderError, StageCheckpointReader, @@ -1081,14 +1079,11 @@ where /// /// This returns a [`PayloadStatus`] that represents the outcome of a processed new payload and /// returns an error if an internal error occurred. - #[instrument(level = "trace", skip(self, payload, cancun_fields), fields(block_hash = ?payload.block_hash(), block_number = %payload.block_number(), is_pipeline_idle = %self.sync.is_pipeline_idle()), target = "consensus::engine")] + #[instrument(level = "trace", skip(self, payload, sidecar), fields(block_hash = ?payload.block_hash(), block_number = %payload.block_number(), is_pipeline_idle = %self.sync.is_pipeline_idle()), target = "consensus::engine")] fn on_new_payload( &mut self, payload: ExecutionPayload, - cancun_fields: Option, - // HACK(onbjerg): We should have a pectra payload fields struct, this is just a temporary - // workaround. - execution_requests: Option, + sidecar: ExecutionPayloadSidecar, ) -> Result, BeaconOnNewPayloadError> { self.metrics.new_payload_messages.increment(1); @@ -1118,11 +1113,7 @@ where // // This validation **MUST** be instantly run in all cases even during active sync process. let parent_hash = payload.parent_hash(); - let block = match self.payload_validator.ensure_well_formed_payload( - payload, - cancun_fields.into(), - execution_requests, - ) { + let block = match self.payload_validator.ensure_well_formed_payload(payload, sidecar) { Ok(block) => block, Err(error) => { error!(target: "consensus::engine", %error, "Invalid payload"); @@ -1867,13 +1858,8 @@ where BeaconEngineMessage::ForkchoiceUpdated { state, payload_attrs, tx } => { this.on_forkchoice_updated(state, payload_attrs, tx); } - BeaconEngineMessage::NewPayload { - payload, - cancun_fields, - execution_requests, - tx, - } => { - match this.on_new_payload(payload, cancun_fields, execution_requests) { + BeaconEngineMessage::NewPayload { payload, sidecar, tx } => { + match this.on_new_payload(payload, sidecar) { Ok(Either::Right(block)) => { this.set_blockchain_tree_action( BlockchainTreeAction::InsertNewPayload { block, tx }, @@ -2061,7 +2047,12 @@ mod tests { assert_matches!(rx.try_recv(), Err(TryRecvError::Empty)); // consensus engine is still idle because no FCUs were received - let _ = env.send_new_payload(block_to_payload_v1(SealedBlock::default()), None).await; + let _ = env + .send_new_payload( + block_to_payload_v1(SealedBlock::default()), + ExecutionPayloadSidecar::none(), + ) + .await; assert_matches!(rx.try_recv(), Err(TryRecvError::Empty)); @@ -2626,7 +2617,7 @@ mod tests { 0, BlockParams { ommers_count: Some(0), ..Default::default() }, )), - None, + ExecutionPayloadSidecar::none(), ) .await; @@ -2641,7 +2632,7 @@ mod tests { 1, BlockParams { ommers_count: Some(0), ..Default::default() }, )), - None, + ExecutionPayloadSidecar::none(), ) .await; @@ -2719,7 +2710,10 @@ mod tests { // Send new payload let result = env - .send_new_payload_retry_on_syncing(block_to_payload_v1(block2.clone()), None) + .send_new_payload_retry_on_syncing( + block_to_payload_v1(block2.clone()), + ExecutionPayloadSidecar::none(), + ) .await .unwrap(); @@ -2854,7 +2848,9 @@ mod tests { 2, BlockParams { parent: Some(parent), ommers_count: Some(0), ..Default::default() }, ); - let res = env.send_new_payload(block_to_payload_v1(block), None).await; + let res = env + .send_new_payload(block_to_payload_v1(block), ExecutionPayloadSidecar::none()) + .await; let expected_result = PayloadStatus::from_status(PayloadStatusEnum::Syncing); assert_matches!(res, Ok(result) => assert_eq!(result, expected_result)); @@ -2924,7 +2920,10 @@ mod tests { // Send new payload let result = env - .send_new_payload_retry_on_syncing(block_to_payload_v1(block2.clone()), None) + .send_new_payload_retry_on_syncing( + block_to_payload_v1(block2.clone()), + ExecutionPayloadSidecar::none(), + ) .await .unwrap(); diff --git a/crates/consensus/beacon/src/engine/test_utils.rs b/crates/consensus/beacon/src/engine/test_utils.rs index 7e9e1ec6b26d..912f0a871bf2 100644 --- a/crates/consensus/beacon/src/engine/test_utils.rs +++ b/crates/consensus/beacon/src/engine/test_utils.rs @@ -6,7 +6,7 @@ use crate::{ }; use alloy_primitives::{BlockNumber, Sealable, B256}; use alloy_rpc_types_engine::{ - CancunPayloadFields, ExecutionPayload, ForkchoiceState, ForkchoiceUpdated, PayloadStatus, + ExecutionPayload, ExecutionPayloadSidecar, ForkchoiceState, ForkchoiceUpdated, PayloadStatus, }; use reth_blockchain_tree::{ config::BlockchainTreeConfig, externals::TreeExternals, BlockchainTree, ShareableBlockchainTree, @@ -68,9 +68,9 @@ impl TestEnv { pub async fn send_new_payload>( &self, payload: T, - cancun_fields: Option, + sidecar: ExecutionPayloadSidecar, ) -> Result { - self.engine_handle.new_payload(payload.into(), cancun_fields, None).await + self.engine_handle.new_payload(payload.into(), sidecar).await } /// Sends the `ExecutionPayload` message to the consensus engine and retries if the engine @@ -78,11 +78,11 @@ impl TestEnv { pub async fn send_new_payload_retry_on_syncing>( &self, payload: T, - cancun_fields: Option, + sidecar: ExecutionPayloadSidecar, ) -> Result { let payload: ExecutionPayload = payload.into(); loop { - let result = self.send_new_payload(payload.clone(), cancun_fields.clone()).await?; + let result = self.send_new_payload(payload.clone(), sidecar.clone()).await?; if !result.is_syncing() { return Ok(result) } diff --git a/crates/consensus/common/src/calc.rs b/crates/consensus/common/src/calc.rs index 3f519332fec0..e30c5b715f5a 100644 --- a/crates/consensus/common/src/calc.rs +++ b/crates/consensus/common/src/calc.rs @@ -1,6 +1,6 @@ +use alloy_consensus::constants::ETH_TO_WEI; use alloy_primitives::{BlockNumber, U256}; use reth_chainspec::{EthereumHardfork, Hardforks}; -use reth_primitives::constants::ETH_TO_WEI; /// Calculates the base block reward. /// @@ -57,7 +57,7 @@ pub fn base_block_reward_pre_merge(chain_spec: impl Hardforks, block_number: Blo /// ``` /// # use reth_chainspec::MAINNET; /// # use reth_consensus_common::calc::{base_block_reward, block_reward}; -/// # use reth_primitives::constants::ETH_TO_WEI; +/// # use alloy_consensus::constants::ETH_TO_WEI; /// # use alloy_primitives::U256; /// # /// // This is block 126 on mainnet. diff --git a/crates/e2e-test-utils/src/payload.rs b/crates/e2e-test-utils/src/payload.rs index 946d9af57536..a5e4f56ac429 100644 --- a/crates/e2e-test-utils/src/payload.rs +++ b/crates/e2e-test-utils/src/payload.rs @@ -1,20 +1,20 @@ use futures_util::StreamExt; -use reth::api::{BuiltPayload, EngineTypes, PayloadBuilderAttributes}; +use reth::api::{BuiltPayload, PayloadBuilderAttributes}; use reth_payload_builder::{PayloadBuilderHandle, PayloadId}; -use reth_payload_primitives::{Events, PayloadBuilder}; +use reth_payload_primitives::{Events, PayloadBuilder, PayloadTypes}; use tokio_stream::wrappers::BroadcastStream; /// Helper for payload operations #[derive(Debug)] -pub struct PayloadTestContext { - pub payload_event_stream: BroadcastStream>, - payload_builder: PayloadBuilderHandle, +pub struct PayloadTestContext { + pub payload_event_stream: BroadcastStream>, + payload_builder: PayloadBuilderHandle, pub timestamp: u64, } -impl PayloadTestContext { +impl PayloadTestContext { /// Creates a new payload helper - pub async fn new(payload_builder: PayloadBuilderHandle) -> eyre::Result { + pub async fn new(payload_builder: PayloadBuilderHandle) -> eyre::Result { let payload_events = payload_builder.subscribe().await?; let payload_event_stream = payload_events.into_stream(); // Cancun timestamp @@ -24,10 +24,10 @@ impl PayloadTestContext { /// Creates a new payload job from static attributes pub async fn new_payload( &mut self, - attributes_generator: impl Fn(u64) -> E::PayloadBuilderAttributes, - ) -> eyre::Result { + attributes_generator: impl Fn(u64) -> T::PayloadBuilderAttributes, + ) -> eyre::Result { self.timestamp += 1; - let attributes: E::PayloadBuilderAttributes = attributes_generator(self.timestamp); + let attributes = attributes_generator(self.timestamp); self.payload_builder.send_new_payload(attributes.clone()).await.unwrap()?; Ok(attributes) } @@ -35,10 +35,10 @@ impl PayloadTestContext { /// Asserts that the next event is a payload attributes event pub async fn expect_attr_event( &mut self, - attrs: E::PayloadBuilderAttributes, + attrs: T::PayloadBuilderAttributes, ) -> eyre::Result<()> { let first_event = self.payload_event_stream.next().await.unwrap()?; - if let reth::payload::Events::Attributes(attr) = first_event { + if let Events::Attributes(attr) = first_event { assert_eq!(attrs.timestamp(), attr.timestamp()); } else { panic!("Expect first event as payload attributes.") @@ -59,9 +59,9 @@ impl PayloadTestContext { } /// Expects the next event to be a built payload event or panics - pub async fn expect_built_payload(&mut self) -> eyre::Result { + pub async fn expect_built_payload(&mut self) -> eyre::Result { let second_event = self.payload_event_stream.next().await.unwrap()?; - if let reth::payload::Events::BuiltPayload(payload) = second_event { + if let Events::BuiltPayload(payload) = second_event { Ok(payload) } else { panic!("Expect a built payload event."); diff --git a/crates/engine/local/src/miner.rs b/crates/engine/local/src/miner.rs index 552cbd047764..706ddc43de3f 100644 --- a/crates/engine/local/src/miner.rs +++ b/crates/engine/local/src/miner.rs @@ -1,7 +1,7 @@ //! Contains the implementation of the mining mode for the local engine. use alloy_primitives::{TxHash, B256}; -use alloy_rpc_types_engine::{CancunPayloadFields, ForkchoiceState}; +use alloy_rpc_types_engine::{CancunPayloadFields, ExecutionPayloadSidecar, ForkchoiceState}; use eyre::OptionExt; use futures_util::{stream::Fuse, StreamExt}; use reth_beacon_consensus::BeaconEngineMessage; @@ -221,9 +221,10 @@ where let (tx, rx) = oneshot::channel(); self.to_engine.send(BeaconEngineMessage::NewPayload { payload: block_to_payload(payload.block().clone()), - cancun_fields, - // todo: prague - execution_requests: None, + // todo: prague support + sidecar: cancun_fields + .map(ExecutionPayloadSidecar::v3) + .unwrap_or_else(ExecutionPayloadSidecar::none), tx, })?; diff --git a/crates/engine/service/src/service.rs b/crates/engine/service/src/service.rs index 026476a8260f..198438d457f1 100644 --- a/crates/engine/service/src/service.rs +++ b/crates/engine/service/src/service.rs @@ -151,7 +151,9 @@ mod tests { use reth_exex_types::FinishedExExHeight; use reth_network_p2p::test_utils::TestFullBlockClient; use reth_primitives::SealedHeader; - use reth_provider::test_utils::create_test_provider_factory_with_chain_spec; + use reth_provider::{ + providers::BlockchainProvider2, test_utils::create_test_provider_factory_with_chain_spec, + }; use reth_prune::Pruner; use reth_tasks::TokioTaskExecutor; use std::sync::Arc; diff --git a/crates/engine/tree/src/engine.rs b/crates/engine/tree/src/engine.rs index c1571ed82178..914121adce51 100644 --- a/crates/engine/tree/src/engine.rs +++ b/crates/engine/tree/src/engine.rs @@ -113,9 +113,11 @@ where } // advance the downloader - if let Poll::Ready(DownloadOutcome::Blocks(blocks)) = self.downloader.poll(cx) { - // delegate the downloaded blocks to the handler - self.handler.on_event(FromEngine::DownloadedBlocks(blocks)); + if let Poll::Ready(outcome) = self.downloader.poll(cx) { + if let DownloadOutcome::Blocks(blocks) = outcome { + // delegate the downloaded blocks to the handler + self.handler.on_event(FromEngine::DownloadedBlocks(blocks)); + } continue } diff --git a/crates/engine/tree/src/persistence.rs b/crates/engine/tree/src/persistence.rs index 25c1f0ed7030..f4650a047b4f 100644 --- a/crates/engine/tree/src/persistence.rs +++ b/crates/engine/tree/src/persistence.rs @@ -77,20 +77,22 @@ impl PersistenceService { } PersistenceAction::SaveBlocks(blocks, sender) => { let result = self.on_save_blocks(blocks)?; - if let Some(ref num_hash) = result { + let result_number = result.map(|r| r.number); + + // we ignore the error because the caller may or may not care about the result + let _ = sender.send(result); + + if let Some(block_number) = result_number { // send new sync metrics based on saved blocks let _ = self .sync_metrics_tx - .send(MetricEvent::SyncHeight { height: num_hash.number }); - } - // we ignore the error because the caller may or may not care about the result - let _ = sender.send(result); - } - PersistenceAction::PruneBefore(block_num, sender) => { - let res = self.prune_before(block_num)?; + .send(MetricEvent::SyncHeight { height: block_number }); - // we ignore the error because the caller may or may not care about the result - let _ = sender.send(res); + if self.pruner.is_pruning_needed(block_number) { + // We log `PrunerOutput` inside the `Pruner` + let _ = self.prune_before(block_number)?; + } + } } PersistenceAction::SaveFinalizedBlock(finalized_block) => { let provider = self.provider.database_provider_rw()?; @@ -175,10 +177,6 @@ pub enum PersistenceAction { /// static files. RemoveBlocksAbove(u64, oneshot::Sender>), - /// Prune associated block data before the given block number, according to already-configured - /// prune modes. - PruneBefore(u64, oneshot::Sender), - /// Update the persisted finalized block on disk SaveFinalizedBlock(u64), @@ -279,18 +277,6 @@ impl PersistenceHandle { ) -> Result<(), SendError> { self.send_action(PersistenceAction::RemoveBlocksAbove(block_num, tx)) } - - /// Tells the persistence service to remove block data before the given hash, according to the - /// configured prune config. - /// - /// The resulting [`PrunerOutput`] is returned in the receiver end of the sender argument. - pub fn prune_before( - &self, - block_num: u64, - tx: oneshot::Sender, - ) -> Result<(), SendError> { - self.send_action(PersistenceAction::PruneBefore(block_num, tx)) - } } #[cfg(test)] diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index a2abd3f531df..555cf89164f9 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -10,7 +10,7 @@ use alloy_primitives::{ BlockNumber, B256, U256, }; use alloy_rpc_types_engine::{ - CancunPayloadFields, ExecutionPayload, ForkchoiceState, PayloadStatus, PayloadStatusEnum, + ExecutionPayload, ExecutionPayloadSidecar, ForkchoiceState, PayloadStatus, PayloadStatusEnum, PayloadValidationError, }; use reth_beacon_consensus::{ @@ -70,7 +70,6 @@ use crate::{ engine::{EngineApiKind, EngineApiRequest}, tree::metrics::EngineApiMetrics, }; -use alloy_eips::eip7685::Requests; pub use config::TreeConfig; pub use invalid_block_hook::{InvalidBlockHooks, NoopInvalidBlockHook}; pub use persistence_state::PersistenceState; @@ -259,6 +258,7 @@ impl TreeState { } } } + debug!(target: "engine::tree", ?upper_bound, ?last_persisted_hash, "Removed canonical blocks from the tree"); } /// Removes all blocks that are below the finalized block, as well as removing non-canonical @@ -721,8 +721,7 @@ where fn on_new_payload( &mut self, payload: ExecutionPayload, - cancun_fields: Option, - execution_requests: Option, + sidecar: ExecutionPayloadSidecar, ) -> Result, InsertBlockFatalError> { trace!(target: "engine::tree", "invoked new payload"); self.metrics.engine.new_payload_messages.increment(1); @@ -753,11 +752,7 @@ where // // This validation **MUST** be instantly run in all cases even during active sync process. let parent_hash = payload.parent_hash(); - let block = match self.payload_validator.ensure_well_formed_payload( - payload, - cancun_fields.into(), - execution_requests, - ) { + let block = match self.payload_validator.ensure_well_formed_payload(payload, sidecar) { Ok(block) => block, Err(error) => { error!(target: "engine::tree", %error, "Invalid payload"); @@ -1240,14 +1235,8 @@ where error!(target: "engine::tree", "Failed to send event: {err:?}"); } } - BeaconEngineMessage::NewPayload { - payload, - cancun_fields, - execution_requests, - tx, - } => { - let output = - self.on_new_payload(payload, cancun_fields, execution_requests); + BeaconEngineMessage::NewPayload { payload, sidecar, tx } => { + let output = self.on_new_payload(payload, sidecar); if let Err(err) = tx.send(output.map(|o| o.outcome).map_err(|e| { reth_beacon_consensus::BeaconOnNewPayloadError::Internal( Box::new(e), @@ -1593,7 +1582,7 @@ where /// Returns an error if we failed to fetch the state from the database. fn state_provider(&self, hash: B256) -> ProviderResult> { if let Some((historical, blocks)) = self.state.tree_state.blocks_by_hash(hash) { - trace!(target: "engine::tree", %hash, "found canonical state for block in memory"); + debug!(target: "engine::tree", %hash, %historical, "found canonical state for block in memory"); // the block leads back to the canonical chain let historical = self.provider.state_by_block_hash(historical)?; return Ok(Some(Box::new(MemoryOverlayStateProvider::new(historical, blocks)))) @@ -1601,13 +1590,13 @@ where // the hash could belong to an unknown block or a persisted block if let Some(header) = self.provider.header(&hash)? { - trace!(target: "engine::tree", %hash, number = %header.number, "found canonical state for block in database"); + debug!(target: "engine::tree", %hash, number = %header.number, "found canonical state for block in database"); // the block is known and persisted let historical = self.provider.state_by_block_hash(hash)?; return Ok(Some(historical)) } - trace!(target: "engine::tree", %hash, "no canonical state found for block"); + debug!(target: "engine::tree", %hash, "no canonical state found for block"); Ok(None) } @@ -2137,7 +2126,8 @@ where &mut self, block: SealedBlockWithSenders, ) -> Result { - debug!(target: "engine::tree", block=?block.num_hash(), "Inserting new block into tree"); + debug!(target: "engine::tree", block=?block.num_hash(), parent = ?block.parent_hash, state_root = ?block.state_root, "Inserting new block into tree"); + if self.block_by_hash(block.hash())?.is_some() { return Ok(InsertPayloadOk2::AlreadySeen(BlockStatus2::Valid)) } @@ -2206,7 +2196,7 @@ where let hashed_state = HashedPostState::from_bundle_state(&output.state.state); - trace!(target: "engine::tree", block=?BlockNumHash::new(block_number, block_hash), "Calculating block state root"); + trace!(target: "engine::tree", block=?sealed_block.num_hash(), "Calculating block state root"); let root_time = Instant::now(); let mut state_root_result = None; @@ -2232,7 +2222,7 @@ where let (state_root, trie_output) = if let Some(result) = state_root_result { result } else { - debug!(target: "engine::tree", persistence_in_progress, "Failed to compute state root in parallel"); + debug!(target: "engine::tree", block=?sealed_block.num_hash(), persistence_in_progress, "Failed to compute state root in parallel"); state_provider.state_root_with_updates(hashed_state.clone())? }; @@ -2252,7 +2242,7 @@ where let root_elapsed = root_time.elapsed(); self.metrics.block_validation.record_state_root(&trie_output, root_elapsed.as_secs_f64()); - debug!(target: "engine::tree", ?root_elapsed, ?block_number, "Calculated state root"); + debug!(target: "engine::tree", ?root_elapsed, block=?sealed_block.num_hash(), "Calculated state root"); let executed = ExecutedBlock { block: sealed_block.clone(), @@ -2301,6 +2291,7 @@ where let mut input = TrieInput::default(); if let Some((historical, blocks)) = self.state.tree_state.blocks_by_hash(parent_hash) { + debug!(target: "engine::tree", %parent_hash, %historical, "Calculating state root in parallel, parent found in memory"); // Retrieve revert state for historical block. let revert_state = consistent_view.revert_state(historical)?; input.append(revert_state); @@ -2311,6 +2302,7 @@ where } } else { // The block attaches to canonical persisted parent. + debug!(target: "engine::tree", %parent_hash, "Calculating state root in parallel, parent found in disk"); let revert_state = consistent_view.revert_state(parent_hash)?; input.append(revert_state); } @@ -2581,6 +2573,7 @@ mod tests { use crate::persistence::PersistenceAction; use alloy_primitives::{Bytes, Sealable}; use alloy_rlp::Decodable; + use alloy_rpc_types_engine::{CancunPayloadFields, ExecutionPayloadSidecar}; use assert_matches::assert_matches; use reth_beacon_consensus::{EthBeaconConsensus, ForkchoiceStatus}; use reth_chain_state::{test_utils::TestBlockBuilder, BlockState}; @@ -2858,11 +2851,10 @@ mod tests { self.tree .on_new_payload( payload.into(), - Some(CancunPayloadFields { + ExecutionPayloadSidecar::v3(CancunPayloadFields { parent_beacon_block_root: block.parent_beacon_block_root.unwrap(), versioned_hashes: vec![], }), - None, ) .unwrap(); } @@ -3125,7 +3117,10 @@ mod tests { let mut test_harness = TestHarness::new(HOLESKY.clone()); - let outcome = test_harness.tree.on_new_payload(payload.into(), None, None).unwrap(); + let outcome = test_harness + .tree + .on_new_payload(payload.into(), ExecutionPayloadSidecar::none()) + .unwrap(); assert!(outcome.outcome.is_syncing()); // ensure block is buffered @@ -3169,8 +3164,7 @@ mod tests { .on_engine_message(FromEngine::Request( BeaconEngineMessage::NewPayload { payload: payload.clone().into(), - cancun_fields: None, - execution_requests: None, + sidecar: ExecutionPayloadSidecar::none(), tx, } .into(), diff --git a/crates/engine/util/src/engine_store.rs b/crates/engine/util/src/engine_store.rs index de193bf3bbe0..85c5e126fa44 100644 --- a/crates/engine/util/src/engine_store.rs +++ b/crates/engine/util/src/engine_store.rs @@ -1,6 +1,6 @@ //! Stores engine API messages to disk for later inspection and replay. -use alloy_rpc_types_engine::{CancunPayloadFields, ExecutionPayload, ForkchoiceState}; +use alloy_rpc_types_engine::{ExecutionPayload, ExecutionPayloadSidecar, ForkchoiceState}; use futures::{Stream, StreamExt}; use reth_beacon_consensus::BeaconEngineMessage; use reth_engine_primitives::EngineTypes; @@ -30,8 +30,9 @@ pub enum StoredEngineApiMessage { NewPayload { /// The [`ExecutionPayload`] sent in the persisted call. payload: ExecutionPayload, - /// The Cancun-specific fields sent in the persisted call, if any. - cancun_fields: Option, + /// The execution payload sidecar with additional version-specific fields received by + /// engine API. + sidecar: ExecutionPayloadSidecar, }, } @@ -73,20 +74,14 @@ impl EngineMessageStore { })?, )?; } - // todo(onbjerg): execution requests - BeaconEngineMessage::NewPayload { - payload, - cancun_fields, - execution_requests: _, - tx: _tx, - } => { + BeaconEngineMessage::NewPayload { payload, sidecar, tx: _tx } => { let filename = format!("{}-new_payload-{}.json", timestamp, payload.block_hash()); fs::write( self.path.join(filename), serde_json::to_vec( &StoredEngineApiMessage::::NewPayload { payload: payload.clone(), - cancun_fields: cancun_fields.clone(), + sidecar: sidecar.clone(), }, )?, )?; diff --git a/crates/engine/util/src/reorg.rs b/crates/engine/util/src/reorg.rs index 85216e32fad0..d109fb9e94ae 100644 --- a/crates/engine/util/src/reorg.rs +++ b/crates/engine/util/src/reorg.rs @@ -1,10 +1,9 @@ //! Stream wrapper that simulates reorgs. use alloy_consensus::Transaction; -use alloy_eips::eip7685::Requests; use alloy_primitives::U256; use alloy_rpc_types_engine::{ - CancunPayloadFields, ExecutionPayload, ForkchoiceState, PayloadStatus, + CancunPayloadFields, ExecutionPayload, ExecutionPayloadSidecar, ForkchoiceState, PayloadStatus, }; use futures::{stream::FuturesUnordered, Stream, StreamExt, TryFutureExt}; use itertools::Either; @@ -150,12 +149,7 @@ where let next = ready!(this.stream.poll_next_unpin(cx)); let item = match (next, &this.last_forkchoice_state) { ( - Some(BeaconEngineMessage::NewPayload { - payload, - cancun_fields, - execution_requests, - tx, - }), + Some(BeaconEngineMessage::NewPayload { payload, sidecar, tx }), Some(last_forkchoice_state), ) if this.forkchoice_states_forwarded > this.frequency && // Only enter reorg state if new payload attaches to current head. @@ -170,29 +164,26 @@ where // forkchoice state. We will rely on CL to reorg us back to canonical chain. // TODO: This is an expensive blocking operation, ideally it's spawned as a task // so that the stream could yield the control back. - let (reorg_payload, reorg_cancun_fields, reorg_execution_requests) = - match create_reorg_head( - this.provider, - this.evm_config, - this.payload_validator, - *this.depth, - payload.clone(), - cancun_fields.clone(), - execution_requests.clone(), - ) { - Ok(result) => result, - Err(error) => { - error!(target: "engine::stream::reorg", %error, "Error attempting to create reorg head"); - // Forward the payload and attempt to create reorg on top of - // the next one - return Poll::Ready(Some(BeaconEngineMessage::NewPayload { - payload, - cancun_fields, - execution_requests, - tx, - })) - } - }; + let (reorg_payload, reorg_sidecar) = match create_reorg_head( + this.provider, + this.evm_config, + this.payload_validator, + *this.depth, + payload.clone(), + sidecar.clone(), + ) { + Ok(result) => result, + Err(error) => { + error!(target: "engine::stream::reorg", %error, "Error attempting to create reorg head"); + // Forward the payload and attempt to create reorg on top of + // the next one + return Poll::Ready(Some(BeaconEngineMessage::NewPayload { + payload, + sidecar, + tx, + })) + } + }; let reorg_forkchoice_state = ForkchoiceState { finalized_block_hash: last_forkchoice_state.finalized_block_hash, safe_block_hash: last_forkchoice_state.safe_block_hash, @@ -208,17 +199,11 @@ where let queue = VecDeque::from([ // Current payload - BeaconEngineMessage::NewPayload { - payload, - cancun_fields, - execution_requests, - tx, - }, + BeaconEngineMessage::NewPayload { payload, sidecar, tx }, // Reorg payload BeaconEngineMessage::NewPayload { payload: reorg_payload, - cancun_fields: reorg_cancun_fields, - execution_requests: reorg_execution_requests, + sidecar: reorg_sidecar, tx: reorg_payload_tx, }, // Reorg forkchoice state @@ -252,9 +237,8 @@ fn create_reorg_head( payload_validator: &ExecutionPayloadValidator, mut depth: usize, next_payload: ExecutionPayload, - next_cancun_fields: Option, - next_execution_requests: Option, -) -> RethResult<(ExecutionPayload, Option, Option)> + next_sidecar: ExecutionPayloadSidecar, +) -> RethResult<(ExecutionPayload, ExecutionPayloadSidecar)> where Provider: BlockReader + StateProviderFactory, Evm: ConfigureEvm
, @@ -264,11 +248,7 @@ where // Ensure next payload is valid. let next_block = payload_validator - .ensure_well_formed_payload( - next_payload, - next_cancun_fields.into(), - next_execution_requests, - ) + .ensure_well_formed_payload(next_payload, next_sidecar) .map_err(RethError::msg)?; // Fetch reorg target block depending on its depth and its parent. @@ -439,11 +419,16 @@ where Ok(( block_to_payload(reorg_block), + // todo(onbjerg): how do we support execution requests? reorg_target .header .parent_beacon_block_root - .map(|root| CancunPayloadFields { parent_beacon_block_root: root, versioned_hashes }), - // todo(prague) - None, + .map(|root| { + ExecutionPayloadSidecar::v3(CancunPayloadFields { + parent_beacon_block_root: root, + versioned_hashes, + }) + }) + .unwrap_or_else(ExecutionPayloadSidecar::none), )) } diff --git a/crates/engine/util/src/skip_new_payload.rs b/crates/engine/util/src/skip_new_payload.rs index 47c48282eef6..16f2e98197c9 100644 --- a/crates/engine/util/src/skip_new_payload.rs +++ b/crates/engine/util/src/skip_new_payload.rs @@ -41,19 +41,14 @@ where loop { let next = ready!(this.stream.poll_next_unpin(cx)); let item = match next { - Some(BeaconEngineMessage::NewPayload { - payload, - cancun_fields, - execution_requests, - tx, - }) => { + Some(BeaconEngineMessage::NewPayload { payload, sidecar, tx }) => { if this.skipped < this.threshold { *this.skipped += 1; tracing::warn!( target: "engine::stream::skip_new_payload", block_number = payload.block_number(), block_hash = %payload.block_hash(), - ?cancun_fields, + ?sidecar, threshold=this.threshold, skipped=this.skipped, "Skipping new payload" ); @@ -61,12 +56,7 @@ where continue } *this.skipped = 0; - Some(BeaconEngineMessage::NewPayload { - payload, - cancun_fields, - execution_requests, - tx, - }) + Some(BeaconEngineMessage::NewPayload { payload, sidecar, tx }) } next => next, }; diff --git a/crates/ethereum-forks/src/hardfork/ethereum.rs b/crates/ethereum-forks/src/hardfork/ethereum.rs index 3d85b54a9608..4e13b001786c 100644 --- a/crates/ethereum-forks/src/hardfork/ethereum.rs +++ b/crates/ethereum-forks/src/hardfork/ethereum.rs @@ -49,6 +49,8 @@ hardfork!( Cancun, /// Prague: Prague, + /// Osaka: + Osaka, } ); diff --git a/crates/ethereum-forks/src/hardforks/ethereum.rs b/crates/ethereum-forks/src/hardforks/ethereum.rs index 3069367158fa..086d2d3b46ec 100644 --- a/crates/ethereum-forks/src/hardforks/ethereum.rs +++ b/crates/ethereum-forks/src/hardforks/ethereum.rs @@ -21,6 +21,11 @@ pub trait EthereumHardforks: Hardforks { self.is_fork_active_at_timestamp(EthereumHardfork::Prague, timestamp) } + /// Convenience method to check if [`EthereumHardfork::Osaka`] is active at a given timestamp. + fn is_osaka_active_at_timestamp(&self, timestamp: u64) -> bool { + self.is_fork_active_at_timestamp(EthereumHardfork::Osaka, timestamp) + } + /// Convenience method to check if [`EthereumHardfork::Byzantium`] is active at a given block /// number. fn is_byzantium_active_at_block(&self, block_number: u64) -> bool { diff --git a/crates/ethereum/cli/src/chainspec.rs b/crates/ethereum/cli/src/chainspec.rs index cbcce9f69f61..a60d70179421 100644 --- a/crates/ethereum/cli/src/chainspec.rs +++ b/crates/ethereum/cli/src/chainspec.rs @@ -89,7 +89,8 @@ mod tests { "terminalTotalDifficulty": 0, "shanghaiTime": 0, "cancunTime": 0, - "pragueTime": 0 + "pragueTime": 0, + "osakaTime": 0 } }"#; @@ -97,5 +98,6 @@ mod tests { assert!(spec.is_shanghai_active_at_timestamp(0)); assert!(spec.is_cancun_active_at_timestamp(0)); assert!(spec.is_prague_active_at_timestamp(0)); + assert!(spec.is_osaka_active_at_timestamp(0)); } } diff --git a/crates/ethereum/evm/src/config.rs b/crates/ethereum/evm/src/config.rs index e5253307b33b..9d6b6d8796ce 100644 --- a/crates/ethereum/evm/src/config.rs +++ b/crates/ethereum/evm/src/config.rs @@ -11,7 +11,9 @@ pub fn revm_spec_by_timestamp_after_merge( chain_spec: &ChainSpec, timestamp: u64, ) -> revm_primitives::SpecId { - if chain_spec.is_prague_active_at_timestamp(timestamp) { + if chain_spec.is_osaka_active_at_timestamp(timestamp) { + revm_primitives::OSAKA + } else if chain_spec.is_prague_active_at_timestamp(timestamp) { revm_primitives::PRAGUE } else if chain_spec.is_cancun_active_at_timestamp(timestamp) { revm_primitives::CANCUN diff --git a/crates/ethereum/evm/src/eip6110.rs b/crates/ethereum/evm/src/eip6110.rs index 4cf1c6ae9da5..d57002081955 100644 --- a/crates/ethereum/evm/src/eip6110.rs +++ b/crates/ethereum/evm/src/eip6110.rs @@ -1,12 +1,18 @@ //! EIP-6110 deposit requests parsing use alloc::{string::ToString, vec::Vec}; use alloy_eips::eip6110::MAINNET_DEPOSIT_CONTRACT_ADDRESS; -use alloy_primitives::{Bytes, Log}; +use alloy_primitives::{Address, Bytes, Log}; use alloy_sol_types::{sol, SolEvent}; -use reth_chainspec::ChainSpec; +use reth_chainspec::{ChainSpec, EthChainSpec}; use reth_evm::execute::BlockValidationError; use reth_primitives::Receipt; +/// The size of a deposit request in bytes. While the event fields emit +/// bytestrings, those bytestrings are fixed size. The fields are: 48-byte +/// pubkey, 32-byte withdrawal credentials, 8-byte amount, 96-byte signature, +/// and 8-byte index. +const DEPOSIT_BYTES_SIZE: usize = 48 + 32 + 8 + 96 + 8; + sol! { #[allow(missing_docs)] event DepositEvent( @@ -18,53 +24,79 @@ sol! { ); } -/// Parse [deposit contract](https://etherscan.io/address/0x00000000219ab540356cbb839cbe05303d7705fa) -/// (address is from the passed [`ChainSpec`]) deposits from receipts, and return them as a -/// [vector](Vec) of (requests)[`alloy_eips::eip7685::Requests`]. -pub fn parse_deposits_from_receipts<'a, I>( - chain_spec: &ChainSpec, - receipts: I, -) -> Result -where - I: IntoIterator, -{ - let mut requests = Vec::new(); - let deposit_contract_address = chain_spec - .deposit_contract - .as_ref() - .map_or(MAINNET_DEPOSIT_CONTRACT_ADDRESS, |contract| contract.address); - let logs: Vec<_> = receipts - .into_iter() - .flat_map(|receipt| &receipt.logs) - // No need to filter for topic because there's only one event and that's the Deposit - // event in the deposit contract. - .filter(|log| log.address == deposit_contract_address) - .collect(); +/// Accumulate a deposit request from a log. containing a [`DepositEvent`]. +pub fn accumulate_deposit_from_log(log: &Log, out: &mut Vec) { + out.reserve(DEPOSIT_BYTES_SIZE); + out.extend_from_slice(log.pubkey.as_ref()); + out.extend_from_slice(log.withdrawal_credentials.as_ref()); + out.extend_from_slice(log.amount.as_ref()); + out.extend_from_slice(log.signature.as_ref()); + out.extend_from_slice(log.index.as_ref()); +} - for log in &logs { +/// Accumulate deposits from an iterator of logs. +pub fn accumulate_deposits_from_logs<'a>( + address: Address, + logs: impl IntoIterator, + out: &mut Vec, +) -> Result<(), BlockValidationError> { + logs.into_iter().filter(|log| log.address == address).try_for_each(|log| { + // We assume that the log is valid because it was emitted by the + // deposit contract. let decoded_log = DepositEvent::decode_log(log, false).map_err(|err: alloy_sol_types::Error| { BlockValidationError::DepositRequestDecode(err.to_string()) })?; - requests.extend(parse_deposit_from_log(&decoded_log).as_ref()) - } + accumulate_deposit_from_log(&decoded_log, out); + Ok(()) + }) +} + +/// Accumulate deposits from a receipt. Iterates over the logs in the receipt +/// and accumulates the deposit request bytestrings. +pub fn accumulate_deposits_from_receipt( + address: Address, + receipt: &Receipt, + out: &mut Vec, +) -> Result<(), BlockValidationError> { + accumulate_deposits_from_logs(address, &receipt.logs, out) +} - Ok(requests.into()) +/// Accumulate deposits from a list of receipts. Iterates over the logs in the +/// receipts and accumulates the deposit request bytestrings. +pub fn accumulate_deposits_from_receipts<'a, I>( + address: Address, + receipts: I, + out: &mut Vec, +) -> Result<(), BlockValidationError> +where + I: IntoIterator, +{ + receipts + .into_iter() + .try_for_each(|receipt| accumulate_deposits_from_receipt(address, receipt, out)) } -fn parse_deposit_from_log(log: &Log) -> Bytes { - // SAFETY: These `expect` https://github.com/ethereum/consensus-specs/blob/5f48840f4d768bf0e0a8156a3ed06ec333589007/solidity_deposit_contract/deposit_contract.sol#L107-L110 - // are safe because the `DepositEvent` is the only event in the deposit contract and the length - // checks are done there. - [ - log.pubkey.as_ref(), - log.withdrawal_credentials.as_ref(), - log.amount.as_ref(), - log.signature.as_ref(), - log.index.as_ref(), - ] - .concat() - .into() +/// Find deposit logs in a list of receipts, and return the concatenated +/// deposit request bytestring. +/// +/// The address of the deposit contract is taken from the chain spec, and +/// defaults to [`MAINNET_DEPOSIT_CONTRACT_ADDRESS`] if not specified in +/// the chain spec. +pub fn parse_deposits_from_receipts<'a, I>( + chainspec: &ChainSpec, + receipts: I, +) -> Result +where + I: IntoIterator, +{ + let mut out = Vec::new(); + accumulate_deposits_from_receipts( + chainspec.deposit_contract().map(|c| c.address).unwrap_or(MAINNET_DEPOSIT_CONTRACT_ADDRESS), + receipts, + &mut out, + )?; + Ok(out.into()) } #[cfg(test)] diff --git a/crates/ethereum/evm/src/execute.rs b/crates/ethereum/evm/src/execute.rs index 185f351dd9f3..2d84ccd3e381 100644 --- a/crates/ethereum/evm/src/execute.rs +++ b/crates/ethereum/evm/src/execute.rs @@ -317,6 +317,8 @@ mod tests { use reth_primitives::{ constants::ETH_TO_WEI, public_key_to_address, Account, Block, BlockBody, Transaction, }; + use reth_execution_types::BlockExecutionOutput; + use reth_primitives::{public_key_to_address, Account, Block, BlockBody, Transaction}; use reth_revm::{ database::StateProviderDatabase, test_utils::StateProviderTest, TransitionState, }; diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index 3df46b4856f6..8fa693abe626 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -314,7 +314,7 @@ where ) -> eyre::Result { let network = ctx.network_builder().await?; let handle = ctx.start_network(network, pool); - + info!(target: "reth::cli", enode=%handle.local_node_record(), "P2P networking initialized"); Ok(handle) } } diff --git a/crates/evm/Cargo.toml b/crates/evm/Cargo.toml index 6a1e1fe0d727..bb506f7a4fe3 100644 --- a/crates/evm/Cargo.toml +++ b/crates/evm/Cargo.toml @@ -39,6 +39,7 @@ parking_lot = { workspace = true, optional = true } [dev-dependencies] parking_lot.workspace = true reth-ethereum-forks.workspace = true +alloy-consensus.workspace = true [features] default = ["std"] diff --git a/crates/evm/execution-types/src/chain.rs b/crates/evm/execution-types/src/chain.rs index d3ed2913ea3b..65f96ff56387 100644 --- a/crates/evm/execution-types/src/chain.rs +++ b/crates/evm/execution-types/src/chain.rs @@ -228,11 +228,11 @@ impl Chain { /// /// Attachment includes block number, block hash, transaction hash and transaction index. pub fn receipts_with_attachment(&self) -> Vec { - let mut receipt_attach = Vec::new(); + let mut receipt_attach = Vec::with_capacity(self.blocks().len()); for ((block_num, block), receipts) in self.blocks().iter().zip(self.execution_outcome.receipts().iter()) { - let mut tx_receipts = Vec::new(); + let mut tx_receipts = Vec::with_capacity(receipts.len()); for (tx, receipt) in block.body.transactions().zip(receipts.iter()) { tx_receipts.push(( tx.hash(), diff --git a/crates/evm/src/execute.rs b/crates/evm/src/execute.rs index d7c8590eea85..677a15dfa1b4 100644 --- a/crates/evm/src/execute.rs +++ b/crates/evm/src/execute.rs @@ -6,6 +6,7 @@ pub use reth_execution_errors::{ }; pub use reth_execution_types::{BlockExecutionInput, BlockExecutionOutput, ExecutionOutcome}; pub use reth_storage_errors::provider::ProviderError; +use revm::db::states::bundle_state::BundleRetention; use crate::system_calls::OnStateHook; use alloc::{boxed::Box, vec::Vec}; @@ -166,8 +167,20 @@ pub trait BlockExecutorProvider: Send + Sync + Clone + Unpin + 'static { DB: Database + Display>; } +/// Helper type for the output of executing a block. +#[derive(Debug, Clone)] +pub struct ExecuteOutput { + /// Receipts obtained after executing a block. + pub receipts: Vec, + /// Cumulative gas used in the block execution. + pub gas_used: u64, +} + /// Defines the strategy for executing a single block. -pub trait BlockExecutionStrategy { +pub trait BlockExecutionStrategy +where + DB: Database, +{ /// The error type returned by this strategy's methods. type Error: From + core::error::Error; @@ -183,7 +196,7 @@ pub trait BlockExecutionStrategy { &mut self, block: &BlockWithSenders, total_difficulty: U256, - ) -> Result<(Vec, u64), Self::Error>; + ) -> Result; /// Applies any necessary changes after executing the block's transactions. fn apply_post_execution_changes( @@ -200,18 +213,23 @@ pub trait BlockExecutionStrategy { fn state_mut(&mut self) -> &mut State; /// Sets a hook to be called after each state change during execution. - fn with_state_hook(&mut self, hook: Option>); + fn with_state_hook(&mut self, _hook: Option>) {} /// Returns the final bundle state. - fn finish(&mut self) -> BundleState; + fn finish(&mut self) -> BundleState { + self.state_mut().merge_transitions(BundleRetention::Reverts); + self.state_mut().take_bundle() + } /// Validate a block with regard to execution results. fn validate_block_post_execution( &self, - block: &BlockWithSenders, - receipts: &[Receipt], - requests: &Requests, - ) -> Result<(), ConsensusError>; + _block: &BlockWithSenders, + _receipts: &[Receipt], + _requests: &Requests, + ) -> Result<(), ConsensusError> { + Ok(()) + } } /// A strategy factory that can create block execution strategies. @@ -284,6 +302,7 @@ where pub struct BasicBlockExecutor where S: BlockExecutionStrategy, + DB: Database, { /// Block execution strategy. pub(crate) strategy: S, @@ -293,6 +312,7 @@ where impl BasicBlockExecutor where S: BlockExecutionStrategy, + DB: Database, { /// Creates a new `BasicBlockExecutor` with the given strategy. pub const fn new(strategy: S) -> Self { @@ -313,7 +333,8 @@ where let BlockExecutionInput { block, total_difficulty } = input; self.strategy.apply_pre_execution_changes(block, total_difficulty)?; - let (receipts, gas_used) = self.strategy.execute_transactions(block, total_difficulty)?; + let ExecuteOutput { receipts, gas_used } = + self.strategy.execute_transactions(block, total_difficulty)?; let requests = self.strategy.apply_post_execution_changes(block, total_difficulty, &receipts)?; let state = self.strategy.finish(); @@ -332,7 +353,8 @@ where let BlockExecutionInput { block, total_difficulty } = input; self.strategy.apply_pre_execution_changes(block, total_difficulty)?; - let (receipts, gas_used) = self.strategy.execute_transactions(block, total_difficulty)?; + let ExecuteOutput { receipts, gas_used } = + self.strategy.execute_transactions(block, total_difficulty)?; let requests = self.strategy.apply_post_execution_changes(block, total_difficulty, &receipts)?; @@ -356,7 +378,8 @@ where self.strategy.with_state_hook(Some(Box::new(state_hook))); self.strategy.apply_pre_execution_changes(block, total_difficulty)?; - let (receipts, gas_used) = self.strategy.execute_transactions(block, total_difficulty)?; + let ExecuteOutput { receipts, gas_used } = + self.strategy.execute_transactions(block, total_difficulty)?; let requests = self.strategy.apply_post_execution_changes(block, total_difficulty, &receipts)?; @@ -372,6 +395,7 @@ where pub struct BasicBatchExecutor where S: BlockExecutionStrategy, + DB: Database, { /// Batch execution strategy. pub(crate) strategy: S, @@ -383,6 +407,7 @@ where impl BasicBatchExecutor where S: BlockExecutionStrategy, + DB: Database, { /// Creates a new `BasicBatchExecutor` with the given strategy. pub const fn new(strategy: S, batch_record: BlockBatchRecord) -> Self { @@ -407,7 +432,8 @@ where } self.strategy.apply_pre_execution_changes(block, total_difficulty)?; - let (receipts, _gas_used) = self.strategy.execute_transactions(block, total_difficulty)?; + let ExecuteOutput { receipts, .. } = + self.strategy.execute_transactions(block, total_difficulty)?; let requests = self.strategy.apply_post_execution_changes(block, total_difficulty, &receipts)?; @@ -545,14 +571,14 @@ mod tests { _chain_spec: Arc, _evm_config: EvmConfig, state: State, - execute_transactions_result: (Vec, u64), + execute_transactions_result: ExecuteOutput, apply_post_execution_changes_result: Requests, finish_result: BundleState, } #[derive(Clone)] struct TestExecutorStrategyFactory { - execute_transactions_result: (Vec, u64), + execute_transactions_result: ExecuteOutput, apply_post_execution_changes_result: Requests, finish_result: BundleState, } @@ -584,7 +610,10 @@ mod tests { } } - impl BlockExecutionStrategy for TestExecutorStrategy { + impl BlockExecutionStrategy for TestExecutorStrategy + where + DB: Database, + { type Error = BlockExecutionError; fn apply_pre_execution_changes( @@ -599,7 +628,7 @@ mod tests { &mut self, _block: &BlockWithSenders, _total_difficulty: U256, - ) -> Result<(Vec, u64), Self::Error> { + ) -> Result { Ok(self.execute_transactions_result.clone()) } @@ -651,7 +680,8 @@ mod tests { fn test_strategy() { let expected_gas_used = 10; let expected_receipts = vec![Receipt::default()]; - let expected_execute_transactions_result = (expected_receipts.clone(), expected_gas_used); + let expected_execute_transactions_result = + ExecuteOutput { receipts: expected_receipts.clone(), gas_used: expected_gas_used }; let expected_apply_post_execution_changes_result = Requests::new(vec![bytes!("deadbeef")]); let expected_finish_result = BundleState::default(); diff --git a/crates/evm/src/state_change.rs b/crates/evm/src/state_change.rs index 2d5209015271..2a3d93f94d9c 100644 --- a/crates/evm/src/state_change.rs +++ b/crates/evm/src/state_change.rs @@ -91,9 +91,9 @@ pub fn insert_post_block_withdrawals_balance_increments BatchExecutor for MockExecutorProvider { impl BasicBlockExecutor where S: BlockExecutionStrategy, + DB: Database, { /// Provides safe read access to the state pub fn with_state(&self, f: F) -> R @@ -140,6 +141,7 @@ where impl BasicBatchExecutor where S: BlockExecutionStrategy, + DB: Database, { /// Provides safe read access to the state pub fn with_state(&self, f: F) -> R diff --git a/crates/exex/exex/src/backfill/test_utils.rs b/crates/exex/exex/src/backfill/test_utils.rs index 5eaf92bfefc8..0a8bde242457 100644 --- a/crates/exex/exex/src/backfill/test_utils.rs +++ b/crates/exex/exex/src/backfill/test_utils.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use alloy_consensus::TxEip2930; +use alloy_consensus::{constants::ETH_TO_WEI, TxEip2930}; use alloy_genesis::{Genesis, GenesisAccount}; use alloy_primitives::{b256, Address, TxKind, U256}; use eyre::OptionExt; @@ -10,8 +10,7 @@ use reth_evm::execute::{ }; use reth_evm_ethereum::execute::EthExecutorProvider; use reth_primitives::{ - constants::ETH_TO_WEI, Block, BlockBody, BlockWithSenders, Header, Receipt, - SealedBlockWithSenders, Transaction, + Block, BlockBody, BlockWithSenders, Header, Receipt, SealedBlockWithSenders, Transaction, }; use reth_provider::{ providers::ProviderNodeTypes, BlockWriter as _, ExecutionOutcome, LatestStateProviderRef, diff --git a/crates/exex/test-utils/src/lib.rs b/crates/exex/test-utils/src/lib.rs index 9b86da7c77a5..1f6ea75ce6d9 100644 --- a/crates/exex/test-utils/src/lib.rs +++ b/crates/exex/test-utils/src/lib.rs @@ -268,15 +268,16 @@ pub async fn test_exex_context_with_chain_spec( let network_manager = NetworkManager::new( NetworkConfigBuilder::new(SecretKey::new(&mut rand::thread_rng())) .with_unused_discovery_port() + .with_unused_listener_port() .build(provider_factory.clone()), ) .await?; let network = network_manager.handle().clone(); - - let (_, payload_builder) = NoopPayloadBuilderService::::new(); - let tasks = TaskManager::current(); let task_executor = tasks.executor(); + tasks.executor().spawn(network_manager); + + let (_, payload_builder) = NoopPayloadBuilderService::::new(); let components = NodeAdapter::, _>, _> { components: Components { diff --git a/crates/net/discv4/Cargo.toml b/crates/net/discv4/Cargo.toml index f008d03b56fa..6fb669388851 100644 --- a/crates/net/discv4/Cargo.toml +++ b/crates/net/discv4/Cargo.toml @@ -46,7 +46,7 @@ serde = { workspace = true, optional = true } [dev-dependencies] assert_matches.workspace = true rand.workspace = true -tokio = { workspace = true, features = ["macros"] } +tokio = { workspace = true, features = ["macros", "rt-multi-thread"] } reth-tracing.workspace = true [features] diff --git a/crates/net/discv4/src/config.rs b/crates/net/discv4/src/config.rs index 4fae31f585ae..38467304db2c 100644 --- a/crates/net/discv4/src/config.rs +++ b/crates/net/discv4/src/config.rs @@ -8,8 +8,6 @@ use alloy_rlp::Encodable; use reth_net_banlist::BanList; use reth_net_nat::{NatResolver, ResolveNatInterval}; use reth_network_peers::NodeRecord; -#[cfg(feature = "serde")] -use serde::{Deserialize, Serialize}; use std::{ collections::{HashMap, HashSet}, time::Duration, @@ -17,7 +15,7 @@ use std::{ /// Configuration parameters that define the performance of the discovery network. #[derive(Clone, Debug)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct Discv4Config { /// Whether to enable the incoming packet filter. Default: false. pub enable_packet_filter: bool, @@ -25,7 +23,7 @@ pub struct Discv4Config { pub udp_egress_message_buffer: usize, /// Size of the channel buffer for incoming messages. pub udp_ingress_message_buffer: usize, - /// The number of allowed failures for `FindNode` requests. Default: 5. + /// The number of allowed consecutive failures for `FindNode` requests. Default: 5. pub max_find_node_failures: u8, /// The interval to use when checking for expired nodes that need to be re-pinged. Default: /// 10min. @@ -118,7 +116,7 @@ impl Default for Discv4Config { // Every outgoing request will eventually lead to an incoming response udp_ingress_message_buffer: 1024, max_find_node_failures: 5, - ping_interval: Duration::from_secs(60 * 10), + ping_interval: Duration::from_secs(10), // Unified expiration and timeout durations, mirrors geth's `expiration` duration ping_expiration: Duration::from_secs(20), bond_expiration: Duration::from_secs(60 * 60), @@ -144,7 +142,7 @@ impl Default for Discv4Config { /// Builder type for [`Discv4Config`] #[derive(Clone, Debug, Default)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct Discv4ConfigBuilder { config: Discv4Config, } diff --git a/crates/net/discv4/src/lib.rs b/crates/net/discv4/src/lib.rs index 7c14eac9b653..a99906bdf09b 100644 --- a/crates/net/discv4/src/lib.rs +++ b/crates/net/discv4/src/lib.rs @@ -743,7 +743,8 @@ impl Discv4Service { trace!(target: "discv4", ?target, "Starting lookup"); let target_key = kad_key(target); - // Start a lookup context with the 16 (MAX_NODES_PER_BUCKET) closest nodes + // Start a lookup context with the 16 (MAX_NODES_PER_BUCKET) closest nodes to which we have + // a valid endpoint proof let ctx = LookupContext::new( target_key.clone(), self.kbuckets @@ -772,7 +773,10 @@ impl Discv4Service { trace!(target: "discv4", ?target, num = closest.len(), "Start lookup closest nodes"); for node in closest { - self.find_node(&node, ctx.clone()); + // here we still want to check against previous request failures and if necessary + // re-establish a new endpoint proof because it can be the case that the other node lost + // our entry and no longer has an endpoint proof on their end + self.find_node_checked(&node, ctx.clone()); } } @@ -788,6 +792,22 @@ impl Discv4Service { self.pending_find_nodes.insert(node.id, FindNodeRequest::new(ctx)); } + /// Sends a new `FindNode` packet to the node with `target` as the lookup target but checks + /// whether we should should send a new ping first to renew the endpoint proof by checking the + /// previously failed findNode requests. It could be that the node is no longer reachable or + /// lost our entry. + fn find_node_checked(&mut self, node: &NodeRecord, ctx: LookupContext) { + let max_failures = self.config.max_find_node_failures; + let needs_ping = self + .on_entry(node.id, |entry| entry.exceeds_find_node_failures(max_failures)) + .unwrap_or(true); + if needs_ping { + self.try_ping(*node, PingReason::Lookup(*node, ctx)) + } else { + self.find_node(node, ctx) + } + } + /// Notifies all listeners. /// /// Removes all listeners that are closed. @@ -829,6 +849,24 @@ impl Discv4Service { /// table. Returns `true` if the node was in the table and `false` otherwise. pub fn remove_node(&mut self, node_id: PeerId) -> bool { let key = kad_key(node_id); + self.remove_key(node_id, key) + } + + /// Removes a `node_id` from the routing table but only if there are enough other nodes in the + /// bucket (bucket must be at least half full) + /// + /// Returns `true` if the node was removed + pub fn soft_remove_node(&mut self, node_id: PeerId) -> bool { + let key = kad_key(node_id); + let Some(bucket) = self.kbuckets.get_bucket(&key) else { return false }; + if bucket.num_entries() < MAX_NODES_PER_BUCKET / 2 { + // skip half empty bucket + return false; + } + self.remove_key(node_id, key) + } + + fn remove_key(&mut self, node_id: PeerId, key: discv5::Key) -> bool { let removed = self.kbuckets.remove(&key); if removed { trace!(target: "discv4", ?node_id, "removed node"); @@ -842,7 +880,7 @@ impl Discv4Service { self.kbuckets.buckets_iter().fold(0, |count, bucket| count + bucket.num_connected()) } - /// Check if the peer has a bond + /// Check if the peer has an active bond. fn has_bond(&self, remote_id: PeerId, remote_ip: IpAddr) -> bool { if let Some(timestamp) = self.received_pongs.last_pong(remote_id, remote_ip) { if timestamp.elapsed() < self.config.bond_expiration { @@ -852,7 +890,22 @@ impl Discv4Service { false } - /// Update the entry on RE-ping + /// Applies a closure on the pending or present [`NodeEntry`]. + fn on_entry(&mut self, peer_id: PeerId, f: F) -> Option + where + F: FnOnce(&NodeEntry) -> R, + { + let key = kad_key(peer_id); + match self.kbuckets.entry(&key) { + BucketEntry::Present(entry, _) => Some(f(entry.value())), + BucketEntry::Pending(mut entry, _) => Some(f(entry.value())), + _ => None, + } + } + + /// Update the entry on RE-ping. + /// + /// Invoked when we received the Pong to our [`PingReason::RePing`] ping. /// /// On re-ping we check for a changed `enr_seq` if eip868 is enabled and when it changed we sent /// a followup request to retrieve the updated ENR @@ -909,7 +962,7 @@ impl Discv4Service { match self.kbuckets.entry(&key) { kbucket::Entry::Present(mut entry, old_status) => { // endpoint is now proven - entry.value_mut().has_endpoint_proof = true; + entry.value_mut().establish_proof(); entry.value_mut().update_with_enr(last_enr_seq); if !old_status.is_connected() { @@ -925,7 +978,7 @@ impl Discv4Service { } kbucket::Entry::Pending(mut entry, mut status) => { // endpoint is now proven - entry.value().has_endpoint_proof = true; + entry.value().establish_proof(); entry.value().update_with_enr(last_enr_seq); if !status.is_connected() { @@ -1028,11 +1081,23 @@ impl Discv4Service { let old_enr = match self.kbuckets.entry(&key) { kbucket::Entry::Present(mut entry, _) => { - is_proven = entry.value().has_endpoint_proof; + if entry.value().is_expired() { + // If no communication with the sender has occurred within the last 12h, a ping + // should be sent in addition to pong in order to receive an endpoint proof. + needs_bond = true; + } else { + is_proven = entry.value().has_endpoint_proof; + } entry.value_mut().update_with_enr(ping.enr_sq) } kbucket::Entry::Pending(mut entry, _) => { - is_proven = entry.value().has_endpoint_proof; + if entry.value().is_expired() { + // If no communication with the sender has occurred within the last 12h, a ping + // should be sent in addition to pong in order to receive an endpoint proof. + needs_bond = true; + } else { + is_proven = entry.value().has_endpoint_proof; + } entry.value().update_with_enr(ping.enr_sq) } kbucket::Entry::Absent(entry) => { @@ -1097,6 +1162,8 @@ impl Discv4Service { // try to send it ctx.unmark_queried(record.id); } else { + // we just received a ping from that peer so we can send a find node request + // directly self.find_node(&record, ctx); } } @@ -1205,7 +1272,8 @@ impl Discv4Service { self.update_on_pong(node, pong.enr_sq); } PingReason::EstablishBond => { - // nothing to do here + // same as `InitialInsert` which renews the bond if the peer is in the table + self.update_on_pong(node, pong.enr_sq); } PingReason::RePing => { self.update_on_reping(node, pong.enr_sq); @@ -1386,14 +1454,28 @@ impl Discv4Service { BucketEntry::SelfEntry => { // we received our own node entry } - BucketEntry::Present(mut entry, _) => { - if entry.value_mut().has_endpoint_proof { - self.find_node(&closest, ctx.clone()); + BucketEntry::Present(entry, _) => { + if entry.value().has_endpoint_proof { + if entry + .value() + .exceeds_find_node_failures(self.config.max_find_node_failures) + { + self.try_ping(closest, PingReason::Lookup(closest, ctx.clone())) + } else { + self.find_node(&closest, ctx.clone()); + } } } BucketEntry::Pending(mut entry, _) => { if entry.value().has_endpoint_proof { - self.find_node(&closest, ctx.clone()); + if entry + .value() + .exceeds_find_node_failures(self.config.max_find_node_failures) + { + self.try_ping(closest, PingReason::Lookup(closest, ctx.clone())) + } else { + self.find_node(&closest, ctx.clone()); + } } } } @@ -1431,11 +1513,12 @@ impl Discv4Service { true }); - trace!(target: "discv4", num=%failed_pings.len(), "evicting nodes due to failed pong"); - - // remove nodes that failed to pong - for node_id in failed_pings { - self.remove_node(node_id); + if !failed_pings.is_empty() { + // remove nodes that failed to pong + trace!(target: "discv4", num=%failed_pings.len(), "evicting nodes due to failed pong"); + for node_id in failed_pings { + self.remove_node(node_id); + } } let mut failed_lookups = Vec::new(); @@ -1446,34 +1529,40 @@ impl Discv4Service { } true }); - trace!(target: "discv4", num=%failed_lookups.len(), "evicting nodes due to failed lookup"); - // remove nodes that failed the e2e lookup process, so we can restart it - for node_id in failed_lookups { - self.remove_node(node_id); + if !failed_lookups.is_empty() { + // remove nodes that failed the e2e lookup process, so we can restart it + trace!(target: "discv4", num=%failed_lookups.len(), "evicting nodes due to failed lookup"); + for node_id in failed_lookups { + self.remove_node(node_id); + } } - self.evict_failed_neighbours(now); + self.evict_failed_find_nodes(now); } /// Handles failed responses to `FindNode` - fn evict_failed_neighbours(&mut self, now: Instant) { - let mut failed_neighbours = Vec::new(); + fn evict_failed_find_nodes(&mut self, now: Instant) { + let mut failed_find_nodes = Vec::new(); self.pending_find_nodes.retain(|node_id, find_node_request| { if now.duration_since(find_node_request.sent_at) > self.config.neighbours_expiration { if !find_node_request.answered { // node actually responded but with fewer entries than expected, but we don't // treat this as an hard error since it responded. - failed_neighbours.push(*node_id); + failed_find_nodes.push(*node_id); } return false } true }); - trace!(target: "discv4", num=%failed_neighbours.len(), "processing failed neighbours"); + if failed_find_nodes.is_empty() { + return + } + + trace!(target: "discv4", num=%failed_find_nodes.len(), "processing failed find nodes"); - for node_id in failed_neighbours { + for node_id in failed_find_nodes { let key = kad_key(node_id); let failures = match self.kbuckets.entry(&key) { kbucket::Entry::Present(mut entry, _) => { @@ -1490,14 +1579,8 @@ impl Discv4Service { // if the node failed to respond anything useful multiple times, remove the node from // the table, but only if there are enough other nodes in the bucket (bucket must be at // least half full) - if failures > (self.config.max_find_node_failures as usize) { - if let Some(bucket) = self.kbuckets.get_bucket(&key) { - if bucket.num_entries() < MAX_NODES_PER_BUCKET / 2 { - // skip half empty bucket - continue - } - } - self.remove_node(node_id); + if failures > self.config.max_find_node_failures { + self.soft_remove_node(node_id); } } } @@ -2189,8 +2272,8 @@ struct NodeEntry { last_enr_seq: Option, /// `ForkId` if retrieved via ENR requests. fork_id: Option, - /// Counter for failed findNode requests. - find_node_failures: usize, + /// Counter for failed _consecutive_ findNode requests. + find_node_failures: u8, /// Whether the endpoint of the peer is proven. has_endpoint_proof: bool, } @@ -2217,6 +2300,17 @@ impl NodeEntry { node } + /// Marks the entry with an established proof and resets the consecutive failure counter. + fn establish_proof(&mut self) { + self.has_endpoint_proof = true; + self.find_node_failures = 0; + } + + /// Returns true if the tracked find node failures exceed the max amount + const fn exceeds_find_node_failures(&self, max_failures: u8) -> bool { + self.find_node_failures >= max_failures + } + /// Updates the last timestamp and sets the enr seq fn update_with_enr(&mut self, last_enr_seq: Option) -> Option { self.update_now(|s| std::mem::replace(&mut s.last_enr_seq, last_enr_seq)) @@ -2247,7 +2341,7 @@ impl NodeEntry { impl NodeEntry { /// Returns true if the node should be re-pinged. fn is_expired(&self) -> bool { - self.last_seen.elapsed() > ENDPOINT_PROOF_EXPIRATION + self.last_seen.elapsed() > (ENDPOINT_PROOF_EXPIRATION / 2) } } @@ -2256,8 +2350,7 @@ impl NodeEntry { enum PingReason { /// Initial ping to a previously unknown peer that was inserted into the table. InitialInsert, - /// Initial ping to a previously unknown peer that didn't fit into the table. But we still want - /// to establish a bond. + /// A ping to a peer to establish a bond (endpoint proof). EstablishBond, /// Re-ping a peer. RePing, @@ -2324,9 +2417,9 @@ mod tests { let original = EnrForkIdEntry { fork_id: ForkId { hash: ForkHash([0xdc, 0xe9, 0x6c, 0x2d]), next: 0 }, }; - let mut encoded = Vec::new(); - original.encode(&mut encoded); let expected: [u8; 8] = [0xc7, 0xc6, 0x84, 0xdc, 0xe9, 0x6c, 0x2d, 0x80]; + let mut encoded = Vec::with_capacity(expected.len()); + original.encode(&mut encoded); assert_eq!(&expected[..], encoded.as_slice()); } @@ -2634,6 +2727,45 @@ mod tests { assert_eq!(ctx.inner.closest_nodes.borrow().len(), 1); } + #[tokio::test] + async fn test_reping_on_find_node_failures() { + reth_tracing::init_test_tracing(); + + let config = Discv4Config::builder().build(); + let (_discv4, mut service) = create_discv4_with_config(config).await; + + let target = PeerId::random(); + + let id = PeerId::random(); + let key = kad_key(id); + let record = NodeRecord::new("0.0.0.0:0".parse().unwrap(), id); + + let mut entry = NodeEntry::new_proven(record); + entry.find_node_failures = u8::MAX; + let _ = service.kbuckets.insert_or_update( + &key, + entry, + NodeStatus { + direction: ConnectionDirection::Incoming, + state: ConnectionState::Connected, + }, + ); + + service.lookup(target); + assert_eq!(service.pending_find_nodes.len(), 0); + assert_eq!(service.pending_pings.len(), 1); + + service.update_on_pong(record, None); + + service + .on_entry(record.id, |entry| { + // reset on pong + assert_eq!(entry.find_node_failures, 0); + assert!(entry.has_endpoint_proof); + }) + .unwrap(); + } + #[tokio::test] async fn test_service_commands() { reth_tracing::init_test_tracing(); diff --git a/crates/net/eth-wire-types/src/blocks.rs b/crates/net/eth-wire-types/src/blocks.rs index d60c63fc1f6e..878b4573f2b6 100644 --- a/crates/net/eth-wire-types/src/blocks.rs +++ b/crates/net/eth-wire-types/src/blocks.rs @@ -497,4 +497,13 @@ mod tests { let result = RequestPair::decode(&mut &data[..]).unwrap(); assert_eq!(result, expected); } + + #[test] + fn empty_block_bodies_rlp() { + let body = BlockBodies::default(); + let mut buf = Vec::new(); + body.encode(&mut buf); + let decoded = BlockBodies::decode(&mut buf.as_slice()).unwrap(); + assert_eq!(body, decoded); + } } diff --git a/crates/net/eth-wire-types/src/message.rs b/crates/net/eth-wire-types/src/message.rs index 9ef8e6c71474..4afcb34e13bf 100644 --- a/crates/net/eth-wire-types/src/message.rs +++ b/crates/net/eth-wire-types/src/message.rs @@ -494,7 +494,8 @@ where mod tests { use super::MessageError; use crate::{ - message::RequestPair, EthMessage, EthMessageID, GetNodeData, NodeData, ProtocolMessage, + message::RequestPair, EthMessage, EthMessageID, EthVersion, GetNodeData, NodeData, + ProtocolMessage, }; use alloy_primitives::hex; use alloy_rlp::{Decodable, Encodable, Error}; @@ -566,4 +567,17 @@ mod tests { let result = RequestPair::>::decode(&mut &*raw_pair); assert!(matches!(result, Err(Error::UnexpectedLength))); } + + #[test] + fn empty_block_bodies_protocol() { + let empty_block_bodies = ProtocolMessage::from(EthMessage::BlockBodies(RequestPair { + request_id: 0, + message: Default::default(), + })); + let mut buf = Vec::new(); + empty_block_bodies.encode(&mut buf); + let decoded = + ProtocolMessage::decode_message(EthVersion::Eth68, &mut buf.as_slice()).unwrap(); + assert_eq!(empty_block_bodies, decoded); + } } diff --git a/crates/node/core/Cargo.toml b/crates/node/core/Cargo.toml index 73c552f4d7a8..2b1fb8932626 100644 --- a/crates/node/core/Cargo.toml +++ b/crates/node/core/Cargo.toml @@ -40,6 +40,7 @@ reth-stages-types.workspace = true alloy-primitives.workspace = true alloy-rpc-types-engine = { workspace = true, features = ["jwt"] } alloy-consensus.workspace = true +alloy-eips.workspace = true # misc eyre.workspace = true diff --git a/crates/node/core/src/args/database.rs b/crates/node/core/src/args/database.rs index da96deb70c1b..0eec6639a117 100644 --- a/crates/node/core/src/args/database.rs +++ b/crates/node/core/src/args/database.rs @@ -1,12 +1,14 @@ //! clap [Args](clap::Args) for database configuration +use std::time::Duration; + use crate::version::default_client_version; use clap::{ builder::{PossibleValue, TypedValueParser}, error::ErrorKind, Arg, Args, Command, Error, }; -use reth_db::ClientVersion; +use reth_db::{mdbx::MaxReadTransactionDuration, ClientVersion}; use reth_storage_errors::db::LogLevel; /// Parameters for database configuration @@ -20,6 +22,9 @@ pub struct DatabaseArgs { /// NFS volume. #[arg(long = "db.exclusive")] pub exclusive: Option, + /// Read transaction timeout in seconds, 0 means no timeout. + #[arg(long = "db.read-transaction-timeout")] + pub read_transaction_timeout: Option, } impl DatabaseArgs { @@ -33,9 +38,16 @@ impl DatabaseArgs { &self, client_version: ClientVersion, ) -> reth_db::mdbx::DatabaseArguments { + let max_read_transaction_duration = match self.read_transaction_timeout { + None => None, // if not specified, use default value + Some(0) => Some(MaxReadTransactionDuration::Unbounded), // if 0, disable timeout + Some(secs) => Some(MaxReadTransactionDuration::Set(Duration::from_secs(secs))), + }; + reth_db::mdbx::DatabaseArguments::new(client_version) .with_log_level(self.log_level) .with_exclusive(self.exclusive) + .with_max_read_transaction_duration(max_read_transaction_duration) } } diff --git a/crates/node/core/src/args/payload_builder.rs b/crates/node/core/src/args/payload_builder.rs index 4a18fd5b0b7d..524a93195de1 100644 --- a/crates/node/core/src/args/payload_builder.rs +++ b/crates/node/core/src/args/payload_builder.rs @@ -1,11 +1,11 @@ use crate::{cli::config::PayloadBuilderConfig, version::default_extradata}; use alloy_consensus::constants::MAXIMUM_EXTRA_DATA_SIZE; +use alloy_eips::{eip1559::ETHEREUM_BLOCK_GAS_LIMIT, merge::SLOT_DURATION}; use clap::{ builder::{RangedU64ValueParser, TypedValueParser}, Arg, Args, Command, }; use reth_cli_util::{parse_duration_from_secs, parse_duration_from_secs_or_ms}; -use reth_primitives::constants::{ETHEREUM_BLOCK_GAS_LIMIT, SLOT_DURATION}; use std::{borrow::Cow, ffi::OsStr, time::Duration}; /// Parameters for configuring the Payload Builder diff --git a/crates/node/core/src/args/txpool.rs b/crates/node/core/src/args/txpool.rs index 63f6c566ca2b..282313555f76 100644 --- a/crates/node/core/src/args/txpool.rs +++ b/crates/node/core/src/args/txpool.rs @@ -1,9 +1,10 @@ //! Transaction pool arguments use crate::cli::config::RethTransactionPoolConfig; +use alloy_eips::eip1559::ETHEREUM_BLOCK_GAS_LIMIT; use alloy_primitives::Address; use clap::Args; -use reth_primitives::constants::{ETHEREUM_BLOCK_GAS_LIMIT, MIN_PROTOCOL_BASE_FEE}; +use reth_primitives::constants::MIN_PROTOCOL_BASE_FEE; use reth_transaction_pool::{ blobstore::disk::DEFAULT_MAX_CACHED_BLOBS, pool::{NEW_TX_LISTENER_BUFFER_SIZE, PENDING_TX_LISTENER_BUFFER_SIZE}, diff --git a/crates/node/events/Cargo.toml b/crates/node/events/Cargo.toml index 9c56c2da9b88..3b515b8ab5e9 100644 --- a/crates/node/events/Cargo.toml +++ b/crates/node/events/Cargo.toml @@ -25,6 +25,7 @@ reth-primitives-traits.workspace = true # ethereum alloy-primitives.workspace = true alloy-rpc-types-engine.workspace = true +alloy-consensus.workspace = true # async tokio.workspace = true diff --git a/crates/node/events/src/node.rs b/crates/node/events/src/node.rs index e10caaee7c53..92f8cb5e0fe7 100644 --- a/crates/node/events/src/node.rs +++ b/crates/node/events/src/node.rs @@ -1,6 +1,7 @@ //! Support for handling events emitted by node components. use crate::cl::ConsensusLayerHealthEvent; +use alloy_consensus::constants::GWEI_TO_WEI; use alloy_primitives::{BlockNumber, B256}; use alloy_rpc_types_engine::ForkchoiceState; use futures::Stream; @@ -263,7 +264,7 @@ impl NodeState { gas=%format_gas(block.header.gas_used), gas_throughput=%format_gas_throughput(block.header.gas_used, elapsed), full=%format!("{:.1}%", block.header.gas_used as f64 * 100.0 / block.header.gas_limit as f64), - base_fee=%format!("{:.2}gwei", block.header.base_fee_per_gas.unwrap_or(0) as f64 / constants::GWEI_TO_WEI as f64), + base_fee=%format!("{:.2}gwei", block.header.base_fee_per_gas.unwrap_or(0) as f64 / GWEI_TO_WEI as f64), blobs=block.header.blob_gas_used.unwrap_or(0) / constants::eip4844::DATA_GAS_PER_BLOB, excess_blobs=block.header.excess_blob_gas.unwrap_or(0) / constants::eip4844::DATA_GAS_PER_BLOB, ?elapsed, diff --git a/crates/optimism/chainspec/Cargo.toml b/crates/optimism/chainspec/Cargo.toml index 6b068dabbf0c..a7631b621730 100644 --- a/crates/optimism/chainspec/Cargo.toml +++ b/crates/optimism/chainspec/Cargo.toml @@ -26,6 +26,7 @@ alloy-chains.workspace = true alloy-genesis.workspace = true alloy-primitives.workspace = true alloy-consensus.workspace = true +alloy-eips.workspace = true # op op-alloy-rpc-types.workspace = true diff --git a/crates/optimism/chainspec/src/lib.rs b/crates/optimism/chainspec/src/lib.rs index 98c6589d1ceb..83c499de5257 100644 --- a/crates/optimism/chainspec/src/lib.rs +++ b/crates/optimism/chainspec/src/lib.rs @@ -20,7 +20,7 @@ mod op_sepolia; use alloc::{vec, vec::Vec}; use alloy_chains::Chain; use alloy_genesis::Genesis; -use alloy_primitives::{Parity, Signature, B256, U256}; +use alloy_primitives::{B256, U256}; pub use base::BASE_MAINNET; pub use base_sepolia::BASE_SEPOLIA; use core::fmt::Display; @@ -178,12 +178,6 @@ pub struct OpChainSpec { pub inner: ChainSpec, } -/// Returns the signature for the optimism deposit transactions, which don't include a -/// signature. -pub fn optimism_deposit_tx_signature() -> Signature { - Signature::new(U256::ZERO, U256::ZERO, Parity::Parity(false)) -} - impl EthChainSpec for OpChainSpec { fn chain(&self) -> alloy_chains::Chain { self.inner.chain() diff --git a/crates/optimism/chainspec/src/op.rs b/crates/optimism/chainspec/src/op.rs index 8c0da5320f94..5afb236cd33e 100644 --- a/crates/optimism/chainspec/src/op.rs +++ b/crates/optimism/chainspec/src/op.rs @@ -3,11 +3,11 @@ use alloc::{sync::Arc, vec}; use alloy_chains::Chain; +use alloy_eips::eip1559::ETHEREUM_BLOCK_GAS_LIMIT; use alloy_primitives::{b256, U256}; use reth_chainspec::{once_cell_set, BaseFeeParams, BaseFeeParamsKind, ChainSpec}; use reth_ethereum_forks::EthereumHardfork; use reth_optimism_forks::OptimismHardfork; -use reth_primitives_traits::constants::ETHEREUM_BLOCK_GAS_LIMIT; use crate::{LazyLock, OpChainSpec}; diff --git a/crates/optimism/chainspec/src/op_sepolia.rs b/crates/optimism/chainspec/src/op_sepolia.rs index d3243ebd5346..31c9eda6bddd 100644 --- a/crates/optimism/chainspec/src/op_sepolia.rs +++ b/crates/optimism/chainspec/src/op_sepolia.rs @@ -3,11 +3,11 @@ use alloc::{sync::Arc, vec}; use alloy_chains::{Chain, NamedChain}; +use alloy_eips::eip1559::ETHEREUM_BLOCK_GAS_LIMIT; use alloy_primitives::{b256, U256}; use reth_chainspec::{once_cell_set, BaseFeeParams, BaseFeeParamsKind, ChainSpec}; use reth_ethereum_forks::EthereumHardfork; use reth_optimism_forks::OptimismHardfork; -use reth_primitives_traits::constants::ETHEREUM_BLOCK_GAS_LIMIT; use crate::{LazyLock, OpChainSpec}; diff --git a/crates/optimism/cli/src/lib.rs b/crates/optimism/cli/src/lib.rs index e6eed86bf7fc..235b44559696 100644 --- a/crates/optimism/cli/src/lib.rs +++ b/crates/optimism/cli/src/lib.rs @@ -54,6 +54,7 @@ use tracing::info; // This allows us to manually enable node metrics features, required for proper jemalloc metric // reporting use reth_node_metrics as _; +use reth_node_metrics::recorder::install_prometheus_recorder; /// The main op-reth cli interface. /// @@ -135,6 +136,9 @@ where let _guard = self.init_tracing()?; info!(target: "reth::cli", "Initialized tracing, debug log directory: {}", self.logs.log_file_directory); + // Install the prometheus recorder to be sure to record all metrics + let _ = install_prometheus_recorder(); + let runner = CliRunner::default(); match self.command { Commands::Node(command) => { diff --git a/crates/optimism/evm/src/execute.rs b/crates/optimism/evm/src/execute.rs index d15cdee13d66..1cd92409847e 100644 --- a/crates/optimism/evm/src/execute.rs +++ b/crates/optimism/evm/src/execute.rs @@ -1,146 +1,143 @@ -//! Optimism block executor. +//! Optimism block execution strategy. -use crate::{ - l1::ensure_create2_deployer, OpChainSpec, OptimismBlockExecutionError, OptimismEvmConfig, -}; +use crate::{l1::ensure_create2_deployer, OptimismBlockExecutionError, OptimismEvmConfig}; use alloc::{boxed::Box, sync::Arc, vec::Vec}; use alloy_consensus::Transaction as _; use alloy_eips::eip7685::Requests; -use alloy_primitives::{BlockNumber, U256}; use core::fmt::Display; -use reth_chainspec::{ChainSpec, EthereumHardforks}; +use reth_chainspec::EthereumHardforks; +use reth_consensus::ConsensusError; use reth_evm::{ execute::{ - BatchExecutor, BlockExecutionError, BlockExecutionInput, BlockExecutionOutput, - BlockExecutorProvider, BlockValidationError, Executor, ProviderError, + BasicBlockExecutorProvider, BlockExecutionError, BlockExecutionStrategy, + BlockExecutionStrategyFactory, BlockValidationError, ExecuteOutput, ProviderError, }, state_change::post_block_balance_increments, - system_calls::{NoopHook, OnStateHook, SystemCaller}, + system_calls::{OnStateHook, SystemCaller}, ConfigureEvm, }; -use reth_execution_types::ExecutionOutcome; +use reth_optimism_chainspec::OpChainSpec; use reth_optimism_consensus::validate_block_post_execution; use reth_optimism_forks::OptimismHardfork; -use reth_primitives::{BlockWithSenders, Header, Receipt, Receipts, TxType}; -use reth_prune_types::PruneModes; -use reth_revm::{batch::BlockBatchRecord, db::states::bundle_state::BundleRetention, Evm, State}; +use reth_primitives::{BlockWithSenders, Header, Receipt, TxType}; +use reth_revm::{Database, State}; use revm_primitives::{ - db::{Database, DatabaseCommit}, - BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ResultAndState, + db::DatabaseCommit, BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ResultAndState, U256, }; use tracing::trace; -/// Provides executors to execute regular optimism blocks +/// Factory for [`OpExecutionStrategy`]. #[derive(Debug, Clone)] -pub struct OpExecutorProvider { +pub struct OpExecutionStrategyFactory { + /// The chainspec chain_spec: Arc, + /// How to create an EVM. evm_config: EvmConfig, } -impl OpExecutorProvider { - /// Creates a new default optimism executor provider. +impl OpExecutionStrategyFactory { + /// Creates a new default optimism executor strategy factory. pub fn optimism(chain_spec: Arc) -> Self { Self::new(chain_spec.clone(), OptimismEvmConfig::new(chain_spec)) } } -impl OpExecutorProvider { - /// Creates a new executor provider. +impl OpExecutionStrategyFactory { + /// Creates a new executor strategy factory. pub const fn new(chain_spec: Arc, evm_config: EvmConfig) -> Self { Self { chain_spec, evm_config } } } -impl OpExecutorProvider +impl BlockExecutionStrategyFactory for OpExecutionStrategyFactory where - EvmConfig: ConfigureEvm
, + EvmConfig: + Clone + Unpin + Sync + Send + 'static + ConfigureEvm
, { - fn op_executor(&self, db: DB) -> OpBlockExecutor + type Strategy + Display>> = + OpExecutionStrategy; + + fn create_strategy(&self, db: DB) -> Self::Strategy where DB: Database + Display>, { - OpBlockExecutor::new( - self.chain_spec.clone(), - self.evm_config.clone(), - State::builder().with_database(db).with_bundle_update().without_state_clear().build(), - ) + let state = + State::builder().with_database(db).with_bundle_update().without_state_clear().build(); + OpExecutionStrategy::new(state, self.chain_spec.clone(), self.evm_config.clone()) } } -impl BlockExecutorProvider for OpExecutorProvider +/// Block execution strategy for Optimism. +#[allow(missing_debug_implementations)] +pub struct OpExecutionStrategy where - EvmConfig: ConfigureEvm
, + EvmConfig: Clone, { - type Executor + Display>> = - OpBlockExecutor; - - type BatchExecutor + Display>> = - OpBatchExecutor; - fn executor(&self, db: DB) -> Self::Executor - where - DB: Database + Display>, - { - self.op_executor(db) - } - - fn batch_executor(&self, db: DB) -> Self::BatchExecutor - where - DB: Database + Display>, - { - let executor = self.op_executor(db); - OpBatchExecutor { executor, batch_record: BlockBatchRecord::default() } - } -} - -/// Helper container type for EVM with chain spec. -#[derive(Debug, Clone)] -pub struct OpEvmExecutor { /// The chainspec chain_spec: Arc, /// How to create an EVM. evm_config: EvmConfig, + /// Current state for block execution. + state: State, + /// Utility to call system smart contracts. + system_caller: SystemCaller, } -impl OpEvmExecutor +impl OpExecutionStrategy where - EvmConfig: ConfigureEvm
, + EvmConfig: Clone, { - /// Executes the transactions in the block and returns the receipts. - /// - /// This applies the pre-execution changes, and executes the transactions. - /// - /// The optional `state_hook` will be executed with the state changes if present. - /// - /// # Note + /// Creates a new [`OpExecutionStrategy`] + pub fn new(state: State, chain_spec: Arc, evm_config: EvmConfig) -> Self { + let system_caller = SystemCaller::new(evm_config.clone(), (*chain_spec).clone()); + Self { state, chain_spec, evm_config, system_caller } + } +} + +impl OpExecutionStrategy +where + DB: Database + Display>, + EvmConfig: ConfigureEvm
, +{ + /// Configures a new evm configuration and block environment for the given block. /// - /// It does __not__ apply post-execution changes. - fn execute_pre_and_transactions( - &self, + /// Caution: this does not initialize the tx environment. + fn evm_env_for_block(&self, header: &Header, total_difficulty: U256) -> EnvWithHandlerCfg { + let mut cfg = CfgEnvWithHandlerCfg::new(Default::default(), Default::default()); + let mut block_env = BlockEnv::default(); + self.evm_config.fill_cfg_and_block_env(&mut cfg, &mut block_env, header, total_difficulty); + + EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, Default::default()) + } +} + +impl BlockExecutionStrategy for OpExecutionStrategy +where + DB: Database + Display>, + EvmConfig: ConfigureEvm
, +{ + type Error = BlockExecutionError; + + fn apply_pre_execution_changes( + &mut self, block: &BlockWithSenders, - mut evm: Evm<'_, Ext, &mut State>, - state_hook: Option, - ) -> Result<(Vec, u64), BlockExecutionError> - where - DB: Database + Display>, - F: OnStateHook + 'static, - { - let mut system_caller = SystemCaller::new(self.evm_config.clone(), &self.chain_spec); - if let Some(hook) = state_hook { - system_caller.with_state_hook(Some(Box::new(hook) as Box)); - } + total_difficulty: U256, + ) -> Result<(), Self::Error> { + // Set state clear flag if the block is after the Spurious Dragon hardfork. + let state_clear_flag = + (*self.chain_spec).is_spurious_dragon_active_at_block(block.header.number); + self.state.set_state_clear_flag(state_clear_flag); + + let env = self.evm_env_for_block(&block.header, total_difficulty); + let mut evm = self.evm_config.evm_with_env(&mut self.state, env); - // apply pre execution changes - system_caller.apply_beacon_root_contract_call( + self.system_caller.apply_beacon_root_contract_call( block.timestamp, block.number, block.parent_beacon_block_root, &mut evm, )?; - // execute transactions - let is_regolith = - self.chain_spec.fork(OptimismHardfork::Regolith).active_at_timestamp(block.timestamp); - // Ensure that the create2deployer is force-deployed at the canyon transition. Optimism // blocks will always have at least a single transaction in them (the L1 info transaction), // so we can safely assume that this will always be triggered upon the transition and that @@ -148,6 +145,20 @@ where ensure_create2_deployer(self.chain_spec.clone(), block.timestamp, evm.db_mut()) .map_err(|_| OptimismBlockExecutionError::ForceCreate2DeployerFail)?; + Ok(()) + } + + fn execute_transactions( + &mut self, + block: &BlockWithSenders, + total_difficulty: U256, + ) -> Result { + let env = self.evm_env_for_block(&block.header, total_difficulty); + let mut evm = self.evm_config.evm_with_env(&mut self.state, env); + + let is_regolith = + self.chain_spec.fork(OptimismHardfork::Regolith).active_at_timestamp(block.timestamp); + let mut cumulative_gas_used = 0; let mut receipts = Vec::with_capacity(block.body.transactions.len()); for (sender, transaction) in block.transactions_with_sender() { @@ -200,7 +211,7 @@ where ?transaction, "Executed transaction" ); - system_caller.on_state(&result_and_state); + self.system_caller.on_state(&result_and_state); let ResultAndState { result, state } = result_and_state; evm.db_mut().commit(state); @@ -225,288 +236,58 @@ where .then_some(1), }); } - drop(evm); - - Ok((receipts, cumulative_gas_used)) - } -} - -/// A basic Optimism block executor. -/// -/// Expected usage: -/// - Create a new instance of the executor. -/// - Execute the block. -#[derive(Debug)] -pub struct OpBlockExecutor { - /// Chain specific evm config that's used to execute a block. - executor: OpEvmExecutor, - /// The state to use for execution - state: State, -} - -impl OpBlockExecutor { - /// Creates a new Optimism block executor. - pub const fn new( - chain_spec: Arc, - evm_config: EvmConfig, - state: State, - ) -> Self { - Self { executor: OpEvmExecutor { chain_spec, evm_config }, state } - } - - /// Returns the chain spec. - #[inline] - pub fn chain_spec(&self) -> &ChainSpec { - &self.executor.chain_spec - } - - /// Returns mutable reference to the state that wraps the underlying database. - pub fn state_mut(&mut self) -> &mut State { - &mut self.state - } -} - -impl OpBlockExecutor -where - EvmConfig: ConfigureEvm
, - DB: Database + Display>, -{ - /// Configures a new evm configuration and block environment for the given block. - /// - /// Caution: this does not initialize the tx environment. - fn evm_env_for_block(&self, header: &Header, total_difficulty: U256) -> EnvWithHandlerCfg { - let mut cfg = CfgEnvWithHandlerCfg::new(Default::default(), Default::default()); - let mut block_env = BlockEnv::default(); - self.executor.evm_config.fill_cfg_and_block_env( - &mut cfg, - &mut block_env, - header, - total_difficulty, - ); - - EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, Default::default()) - } - /// Convenience method to invoke `execute_without_verification_with_state_hook` setting the - /// state hook as `None`. - fn execute_without_verification( - &mut self, - block: &BlockWithSenders, - total_difficulty: U256, - ) -> Result<(Vec, u64), BlockExecutionError> { - self.execute_without_verification_with_state_hook(block, total_difficulty, None::) + Ok(ExecuteOutput { receipts, gas_used: cumulative_gas_used }) } - /// Execute a single block and apply the state changes to the internal state. - /// - /// Returns the receipts of the transactions in the block and the total gas used. - /// - /// Returns an error if execution fails. - fn execute_without_verification_with_state_hook( + fn apply_post_execution_changes( &mut self, block: &BlockWithSenders, total_difficulty: U256, - state_hook: Option, - ) -> Result<(Vec, u64), BlockExecutionError> - where - F: OnStateHook + 'static, - { - // 1. prepare state on new block - self.on_new_block(&block.header); - - // 2. configure the evm and execute - let env = self.evm_env_for_block(&block.header, total_difficulty); - - let (receipts, gas_used) = { - let evm = self.executor.evm_config.evm_with_env(&mut self.state, env); - self.executor.execute_pre_and_transactions(block, evm, state_hook) - }?; - - // 3. apply post execution changes - self.post_execution(block, total_difficulty)?; - - Ok((receipts, gas_used)) - } - - /// Apply settings before a new block is executed. - pub(crate) fn on_new_block(&mut self, header: &Header) { - // Set state clear flag if the block is after the Spurious Dragon hardfork. - let state_clear_flag = self.chain_spec().is_spurious_dragon_active_at_block(header.number); - self.state.set_state_clear_flag(state_clear_flag); - } - - /// Apply post execution state changes, including block rewards, withdrawals, and irregular DAO - /// hardfork state change. - pub fn post_execution( - &mut self, - block: &BlockWithSenders, - total_difficulty: U256, - ) -> Result<(), BlockExecutionError> { + _receipts: &[Receipt], + ) -> Result { let balance_increments = - post_block_balance_increments(self.chain_spec(), block, total_difficulty); + post_block_balance_increments(&self.chain_spec.clone(), block, total_difficulty); // increment balances self.state .increment_balances(balance_increments) .map_err(|_| BlockValidationError::IncrementBalanceFailed)?; - Ok(()) + Ok(Requests::default()) } -} - -impl Executor for OpBlockExecutor -where - EvmConfig: ConfigureEvm
, - DB: Database + Display>, -{ - type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; - type Output = BlockExecutionOutput; - type Error = BlockExecutionError; - /// Executes the block and commits the state changes. - /// - /// Returns the receipts of the transactions in the block. - /// - /// Returns an error if the block could not be executed or failed verification. - /// - /// State changes are committed to the database. - fn execute(mut self, input: Self::Input<'_>) -> Result { - let BlockExecutionInput { block, total_difficulty } = input; - let (receipts, gas_used) = self.execute_without_verification(block, total_difficulty)?; - - // NOTE: we need to merge keep the reverts for the bundle retention - self.state.merge_transitions(BundleRetention::Reverts); - - Ok(BlockExecutionOutput { - state: self.state.take_bundle(), - receipts, - requests: Requests::default(), - gas_used, - }) + fn state_ref(&self) -> &State { + &self.state } - fn execute_with_state_closure( - mut self, - input: Self::Input<'_>, - mut witness: F, - ) -> Result - where - F: FnMut(&State), - { - let BlockExecutionInput { block, total_difficulty } = input; - let (receipts, gas_used) = self.execute_without_verification(block, total_difficulty)?; - - // NOTE: we need to merge keep the reverts for the bundle retention - self.state.merge_transitions(BundleRetention::Reverts); - witness(&self.state); - - Ok(BlockExecutionOutput { - state: self.state.take_bundle(), - receipts, - requests: Requests::default(), - gas_used, - }) + fn state_mut(&mut self) -> &mut State { + &mut self.state } - fn execute_with_state_hook( - mut self, - input: Self::Input<'_>, - state_hook: F, - ) -> Result - where - F: OnStateHook + 'static, - { - let BlockExecutionInput { block, total_difficulty } = input; - let (receipts, gas_used) = self.execute_without_verification_with_state_hook( - block, - total_difficulty, - Some(state_hook), - )?; - - // NOTE: we need to merge keep the reverts for the bundle retention - self.state.merge_transitions(BundleRetention::Reverts); - - Ok(BlockExecutionOutput { - state: self.state.take_bundle(), - receipts, - requests: Requests::default(), - gas_used, - }) + fn with_state_hook(&mut self, hook: Option>) { + self.system_caller.with_state_hook(hook); } -} -/// An executor for a batch of blocks. -/// -/// State changes are tracked until the executor is finalized. -#[derive(Debug)] -pub struct OpBatchExecutor { - /// The executor used to execute blocks. - executor: OpBlockExecutor, - /// Keeps track of the batch and record receipts based on the configured prune mode - batch_record: BlockBatchRecord, -} - -impl OpBatchExecutor { - /// Returns the receipts of the executed blocks. - pub const fn receipts(&self) -> &Receipts { - self.batch_record.receipts() - } - - /// Returns mutable reference to the state that wraps the underlying database. - pub fn state_mut(&mut self) -> &mut State { - self.executor.state_mut() + fn validate_block_post_execution( + &self, + block: &BlockWithSenders, + receipts: &[Receipt], + _requests: &Requests, + ) -> Result<(), ConsensusError> { + validate_block_post_execution(block, &self.chain_spec.clone(), receipts) } } -impl BatchExecutor for OpBatchExecutor -where - EvmConfig: ConfigureEvm
, - DB: Database + Display>, -{ - type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; - type Output = ExecutionOutcome; - type Error = BlockExecutionError; - - fn execute_and_verify_one(&mut self, input: Self::Input<'_>) -> Result<(), Self::Error> { - let BlockExecutionInput { block, total_difficulty } = input; - - if self.batch_record.first_block().is_none() { - self.batch_record.set_first_block(block.number); - } - - let (receipts, _gas_used) = - self.executor.execute_without_verification(block, total_difficulty)?; - - validate_block_post_execution(block, self.executor.chain_spec(), &receipts)?; - - // prepare the state according to the prune mode - let retention = self.batch_record.bundle_retention(block.number); - self.executor.state.merge_transitions(retention); - - // store receipts in the set - self.batch_record.save_receipts(receipts)?; - - Ok(()) - } - - fn finalize(mut self) -> Self::Output { - ExecutionOutcome::new( - self.executor.state.take_bundle(), - self.batch_record.take_receipts(), - self.batch_record.first_block().unwrap_or_default(), - self.batch_record.take_requests(), - ) - } - - fn set_tip(&mut self, tip: BlockNumber) { - self.batch_record.set_tip(tip); - } - - fn set_prune_modes(&mut self, prune_modes: PruneModes) { - self.batch_record.set_prune_modes(prune_modes); - } +/// Helper type with backwards compatible methods to obtain executor providers. +#[derive(Debug)] +pub struct OpExecutorProvider; - fn size_hint(&self) -> Option { - Some(self.executor.state.bundle_state.size_hint()) +impl OpExecutorProvider { + /// Creates a new default optimism executor strategy factory. + pub fn optimism( + chain_spec: Arc, + ) -> BasicBlockExecutorProvider { + BasicBlockExecutorProvider::new(OpExecutionStrategyFactory::optimism(chain_spec)) } } @@ -516,8 +297,10 @@ mod tests { use crate::OpChainSpec; use alloy_consensus::TxEip1559; use alloy_primitives::{b256, Address, StorageKey, StorageValue}; + use op_alloy_consensus::TxDeposit; use reth_chainspec::MIN_TRANSACTION_GAS; - use reth_optimism_chainspec::{optimism_deposit_tx_signature, OpChainSpecBuilder}; + use reth_evm::execute::{BasicBlockExecutorProvider, BatchExecutor, BlockExecutorProvider}; + use reth_optimism_chainspec::OpChainSpecBuilder; use reth_primitives::{Account, Block, BlockBody, Signature, Transaction, TransactionSigned}; use reth_revm::{ database::StateProviderDatabase, test_utils::StateProviderTest, L1_BLOCK_CONTRACT, @@ -551,8 +334,13 @@ mod tests { db } - fn executor_provider(chain_spec: Arc) -> OpExecutorProvider { - OpExecutorProvider { evm_config: OptimismEvmConfig::new(chain_spec.clone()), chain_spec } + fn executor_provider( + chain_spec: Arc, + ) -> BasicBlockExecutorProvider { + let strategy_factory = + OpExecutionStrategyFactory::new(chain_spec.clone(), OptimismEvmConfig::new(chain_spec)); + + BasicBlockExecutorProvider::new(strategy_factory) } #[test] @@ -600,7 +388,10 @@ mod tests { let provider = executor_provider(chain_spec); let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); - executor.state_mut().load_cache_account(L1_BLOCK_CONTRACT).unwrap(); + // make sure the L1 block contract state is preloaded. + executor.with_state_mut(|state| { + state.load_cache_account(L1_BLOCK_CONTRACT).unwrap(); + }); // Attempt to execute a block with one deposit and one non-deposit transaction executor @@ -622,8 +413,9 @@ mod tests { ) .unwrap(); - let tx_receipt = executor.receipts()[0][0].as_ref().unwrap(); - let deposit_receipt = executor.receipts()[0][1].as_ref().unwrap(); + let receipts = executor.receipts(); + let tx_receipt = receipts[0][0].as_ref().unwrap(); + let deposit_receipt = receipts[0][1].as_ref().unwrap(); // deposit_receipt_version is not present in pre canyon transactions assert!(deposit_receipt.deposit_receipt_version.is_none()); @@ -674,13 +466,16 @@ mod tests { gas_limit: MIN_TRANSACTION_GAS, ..Default::default() }), - optimism_deposit_tx_signature(), + TxDeposit::signature(), ); let provider = executor_provider(chain_spec); let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); - executor.state_mut().load_cache_account(L1_BLOCK_CONTRACT).unwrap(); + // make sure the L1 block contract state is preloaded. + executor.with_state_mut(|state| { + state.load_cache_account(L1_BLOCK_CONTRACT).unwrap(); + }); // attempt to execute an empty block with parent beacon block root, this should not fail executor @@ -702,8 +497,9 @@ mod tests { ) .expect("Executing a block while canyon is active should not fail"); - let tx_receipt = executor.receipts()[0][0].as_ref().unwrap(); - let deposit_receipt = executor.receipts()[0][1].as_ref().unwrap(); + let receipts = executor.receipts(); + let tx_receipt = receipts[0][0].as_ref().unwrap(); + let deposit_receipt = receipts[0][1].as_ref().unwrap(); // deposit_receipt_version is set to 1 for post canyon deposit transactions assert_eq!(deposit_receipt.deposit_receipt_version, Some(1)); diff --git a/crates/optimism/evm/src/lib.rs b/crates/optimism/evm/src/lib.rs index ffc82fde43cb..60aa9f7db083 100644 --- a/crates/optimism/evm/src/lib.rs +++ b/crates/optimism/evm/src/lib.rs @@ -36,8 +36,6 @@ use revm_primitives::{ BlobExcessGasAndPrice, BlockEnv, Bytes, CfgEnv, Env, HandlerCfg, OptimismFields, SpecId, TxKind, }; -pub mod strategy; - /// Optimism-related EVM configuration. #[derive(Debug, Clone)] pub struct OptimismEvmConfig { diff --git a/crates/optimism/evm/src/strategy.rs b/crates/optimism/evm/src/strategy.rs deleted file mode 100644 index c626bb66587c..000000000000 --- a/crates/optimism/evm/src/strategy.rs +++ /dev/null @@ -1,494 +0,0 @@ -//! Optimism block execution strategy, - -use crate::{l1::ensure_create2_deployer, OptimismBlockExecutionError, OptimismEvmConfig}; -use alloc::{boxed::Box, sync::Arc, vec::Vec}; -use alloy_consensus::Transaction as _; -use alloy_eips::eip7685::Requests; -use core::fmt::Display; -use reth_chainspec::EthereumHardforks; -use reth_consensus::ConsensusError; -use reth_evm::{ - execute::{ - BlockExecutionError, BlockExecutionStrategy, BlockExecutionStrategyFactory, - BlockValidationError, ProviderError, - }, - state_change::post_block_balance_increments, - system_calls::{OnStateHook, SystemCaller}, - ConfigureEvm, ConfigureEvmEnv, -}; -use reth_optimism_chainspec::OpChainSpec; -use reth_optimism_consensus::validate_block_post_execution; -use reth_optimism_forks::OptimismHardfork; -use reth_primitives::{BlockWithSenders, Header, Receipt, TxType}; -use reth_revm::{ - db::{states::bundle_state::BundleRetention, BundleState}, - Database, State, -}; -use revm_primitives::{ - db::DatabaseCommit, BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ResultAndState, U256, -}; -use tracing::trace; - -/// Factory for [`OpExecutionStrategy`]. -#[derive(Debug, Clone)] -pub struct OpExecutionStrategyFactory { - /// The chainspec - chain_spec: Arc, - /// How to create an EVM. - evm_config: EvmConfig, -} - -impl OpExecutionStrategyFactory { - /// Creates a new default optimism executor strategy factory. - pub fn optimism(chain_spec: Arc) -> Self { - Self::new(chain_spec.clone(), OptimismEvmConfig::new(chain_spec)) - } -} - -impl OpExecutionStrategyFactory { - /// Creates a new executor strategy factory. - pub const fn new(chain_spec: Arc, evm_config: EvmConfig) -> Self { - Self { chain_spec, evm_config } - } -} - -impl BlockExecutionStrategyFactory for OpExecutionStrategyFactory { - type Strategy + Display>> = OpExecutionStrategy; - - fn create_strategy(&self, db: DB) -> Self::Strategy - where - DB: Database + Display>, - { - let state = - State::builder().with_database(db).with_bundle_update().without_state_clear().build(); - OpExecutionStrategy::new(state, self.chain_spec.clone(), self.evm_config.clone()) - } -} - -/// Block execution strategy for Optimism. -#[allow(missing_debug_implementations)] -pub struct OpExecutionStrategy { - /// The chainspec - chain_spec: Arc, - /// How to create an EVM. - evm_config: EvmConfig, - /// Current state for block execution. - state: State, - /// Utility to call system smart contracts. - system_caller: SystemCaller, -} - -impl OpExecutionStrategy { - /// Creates a new [`OpExecutionStrategy`] - pub fn new( - state: State, - chain_spec: Arc, - evm_config: OptimismEvmConfig, - ) -> Self { - let system_caller = SystemCaller::new(evm_config.clone(), (*chain_spec).clone()); - Self { state, chain_spec, evm_config, system_caller } - } -} - -impl OpExecutionStrategy { - /// Configures a new evm configuration and block environment for the given block. - /// - /// Caution: this does not initialize the tx environment. - fn evm_env_for_block(&self, header: &Header, total_difficulty: U256) -> EnvWithHandlerCfg { - let mut cfg = CfgEnvWithHandlerCfg::new(Default::default(), Default::default()); - let mut block_env = BlockEnv::default(); - self.evm_config.fill_cfg_and_block_env(&mut cfg, &mut block_env, header, total_difficulty); - - EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, Default::default()) - } -} - -impl BlockExecutionStrategy for OpExecutionStrategy -where - DB: Database + Display>, -{ - type Error = BlockExecutionError; - - fn apply_pre_execution_changes( - &mut self, - block: &BlockWithSenders, - total_difficulty: U256, - ) -> Result<(), Self::Error> { - // Set state clear flag if the block is after the Spurious Dragon hardfork. - let state_clear_flag = - (*self.chain_spec).is_spurious_dragon_active_at_block(block.header.number); - self.state.set_state_clear_flag(state_clear_flag); - - let env = self.evm_env_for_block(&block.header, total_difficulty); - let mut evm = self.evm_config.evm_with_env(&mut self.state, env); - - self.system_caller.apply_beacon_root_contract_call( - block.timestamp, - block.number, - block.parent_beacon_block_root, - &mut evm, - )?; - - // Ensure that the create2deployer is force-deployed at the canyon transition. Optimism - // blocks will always have at least a single transaction in them (the L1 info transaction), - // so we can safely assume that this will always be triggered upon the transition and that - // the above check for empty blocks will never be hit on OP chains. - ensure_create2_deployer(self.chain_spec.clone(), block.timestamp, evm.db_mut()) - .map_err(|_| OptimismBlockExecutionError::ForceCreate2DeployerFail)?; - - Ok(()) - } - - fn execute_transactions( - &mut self, - block: &BlockWithSenders, - total_difficulty: U256, - ) -> Result<(Vec, u64), Self::Error> { - let env = self.evm_env_for_block(&block.header, total_difficulty); - let mut evm = self.evm_config.evm_with_env(&mut self.state, env); - - let is_regolith = - self.chain_spec.fork(OptimismHardfork::Regolith).active_at_timestamp(block.timestamp); - - let mut cumulative_gas_used = 0; - let mut receipts = Vec::with_capacity(block.body.transactions.len()); - for (sender, transaction) in block.transactions_with_sender() { - // The sum of the transaction’s gas limit, Tg, and the gas utilized in this block prior, - // must be no greater than the block’s gasLimit. - let block_available_gas = block.header.gas_limit - cumulative_gas_used; - if transaction.gas_limit() > block_available_gas && - (is_regolith || !transaction.is_system_transaction()) - { - return Err(BlockValidationError::TransactionGasLimitMoreThanAvailableBlockGas { - transaction_gas_limit: transaction.gas_limit(), - block_available_gas, - } - .into()) - } - - // An optimism block should never contain blob transactions. - if matches!(transaction.tx_type(), TxType::Eip4844) { - return Err(OptimismBlockExecutionError::BlobTransactionRejected.into()) - } - - // Cache the depositor account prior to the state transition for the deposit nonce. - // - // Note that this *only* needs to be done post-regolith hardfork, as deposit nonces - // were not introduced in Bedrock. In addition, regular transactions don't have deposit - // nonces, so we don't need to touch the DB for those. - let depositor = (is_regolith && transaction.is_deposit()) - .then(|| { - evm.db_mut() - .load_cache_account(*sender) - .map(|acc| acc.account_info().unwrap_or_default()) - }) - .transpose() - .map_err(|_| OptimismBlockExecutionError::AccountLoadFailed(*sender))?; - - self.evm_config.fill_tx_env(evm.tx_mut(), transaction, *sender); - - // Execute transaction. - let result_and_state = evm.transact().map_err(move |err| { - let new_err = err.map_db_err(|e| e.into()); - // Ensure hash is calculated for error log, if not already done - BlockValidationError::EVM { - hash: transaction.recalculate_hash(), - error: Box::new(new_err), - } - })?; - - trace!( - target: "evm", - ?transaction, - "Executed transaction" - ); - self.system_caller.on_state(&result_and_state); - let ResultAndState { result, state } = result_and_state; - evm.db_mut().commit(state); - - // append gas used - cumulative_gas_used += result.gas_used(); - - // Push transaction changeset and calculate header bloom filter for receipt. - receipts.push(Receipt { - tx_type: transaction.tx_type(), - // Success flag was added in `EIP-658: Embedding transaction status code in - // receipts`. - success: result.is_success(), - cumulative_gas_used, - logs: result.into_logs(), - deposit_nonce: depositor.map(|account| account.nonce), - // The deposit receipt version was introduced in Canyon to indicate an update to how - // receipt hashes should be computed when set. The state transition process ensures - // this is only set for post-Canyon deposit transactions. - deposit_receipt_version: (transaction.is_deposit() && - self.chain_spec - .is_fork_active_at_timestamp(OptimismHardfork::Canyon, block.timestamp)) - .then_some(1), - }); - } - - Ok((receipts, cumulative_gas_used)) - } - - fn apply_post_execution_changes( - &mut self, - block: &BlockWithSenders, - total_difficulty: U256, - _receipts: &[Receipt], - ) -> Result { - let balance_increments = - post_block_balance_increments(&self.chain_spec.clone(), block, total_difficulty); - // increment balances - self.state - .increment_balances(balance_increments) - .map_err(|_| BlockValidationError::IncrementBalanceFailed)?; - - Ok(Requests::default()) - } - - fn state_ref(&self) -> &State { - &self.state - } - - fn state_mut(&mut self) -> &mut State { - &mut self.state - } - - fn with_state_hook(&mut self, hook: Option>) { - self.system_caller.with_state_hook(hook); - } - - fn finish(&mut self) -> BundleState { - self.state.merge_transitions(BundleRetention::Reverts); - self.state.take_bundle() - } - - fn validate_block_post_execution( - &self, - block: &BlockWithSenders, - receipts: &[Receipt], - _requests: &Requests, - ) -> Result<(), ConsensusError> { - validate_block_post_execution(block, &self.chain_spec.clone(), receipts) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::OpChainSpec; - use alloy_consensus::TxEip1559; - use alloy_primitives::{b256, Address, StorageKey, StorageValue}; - use reth_chainspec::MIN_TRANSACTION_GAS; - use reth_evm::execute::{BasicBlockExecutorProvider, BatchExecutor, BlockExecutorProvider}; - use reth_optimism_chainspec::{optimism_deposit_tx_signature, OpChainSpecBuilder}; - use reth_primitives::{Account, Block, BlockBody, Signature, Transaction, TransactionSigned}; - use reth_revm::{ - database::StateProviderDatabase, test_utils::StateProviderTest, L1_BLOCK_CONTRACT, - }; - use std::{collections::HashMap, str::FromStr}; - - fn create_op_state_provider() -> StateProviderTest { - let mut db = StateProviderTest::default(); - - let l1_block_contract_account = - Account { balance: U256::ZERO, bytecode_hash: None, nonce: 1 }; - - let mut l1_block_storage = HashMap::default(); - // base fee - l1_block_storage.insert(StorageKey::with_last_byte(1), StorageValue::from(1000000000)); - // l1 fee overhead - l1_block_storage.insert(StorageKey::with_last_byte(5), StorageValue::from(188)); - // l1 fee scalar - l1_block_storage.insert(StorageKey::with_last_byte(6), StorageValue::from(684000)); - // l1 free scalars post ecotone - l1_block_storage.insert( - StorageKey::with_last_byte(3), - StorageValue::from_str( - "0x0000000000000000000000000000000000001db0000d27300000000000000005", - ) - .unwrap(), - ); - - db.insert_account(L1_BLOCK_CONTRACT, l1_block_contract_account, None, l1_block_storage); - - db - } - - fn executor_provider( - chain_spec: Arc, - ) -> BasicBlockExecutorProvider { - let strategy_factory = - OpExecutionStrategyFactory::new(chain_spec.clone(), OptimismEvmConfig::new(chain_spec)); - - BasicBlockExecutorProvider::new(strategy_factory) - } - - #[test] - fn op_deposit_fields_pre_canyon() { - let header = Header { - timestamp: 1, - number: 1, - gas_limit: 1_000_000, - gas_used: 42_000, - receipts_root: b256!( - "83465d1e7d01578c0d609be33570f91242f013e9e295b0879905346abbd63731" - ), - ..Default::default() - }; - - let mut db = create_op_state_provider(); - - let addr = Address::ZERO; - let account = Account { balance: U256::MAX, ..Account::default() }; - db.insert_account(addr, account, None, HashMap::default()); - - let chain_spec = Arc::new(OpChainSpecBuilder::base_mainnet().regolith_activated().build()); - - let tx = TransactionSigned::from_transaction_and_signature( - Transaction::Eip1559(TxEip1559 { - chain_id: chain_spec.chain.id(), - nonce: 0, - gas_limit: MIN_TRANSACTION_GAS, - to: addr.into(), - ..Default::default() - }), - Signature::test_signature(), - ); - - let tx_deposit = TransactionSigned::from_transaction_and_signature( - Transaction::Deposit(op_alloy_consensus::TxDeposit { - from: addr, - to: addr.into(), - gas_limit: MIN_TRANSACTION_GAS, - ..Default::default() - }), - Signature::test_signature(), - ); - - let provider = executor_provider(chain_spec); - let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); - - // make sure the L1 block contract state is preloaded. - executor.with_state_mut(|state| { - state.load_cache_account(L1_BLOCK_CONTRACT).unwrap(); - }); - - // Attempt to execute a block with one deposit and one non-deposit transaction - executor - .execute_and_verify_one( - ( - &BlockWithSenders { - block: Block { - header, - body: BlockBody { - transactions: vec![tx, tx_deposit], - ..Default::default() - }, - }, - senders: vec![addr, addr], - }, - U256::ZERO, - ) - .into(), - ) - .unwrap(); - - let receipts = executor.receipts(); - let tx_receipt = receipts[0][0].as_ref().unwrap(); - let deposit_receipt = receipts[0][1].as_ref().unwrap(); - - // deposit_receipt_version is not present in pre canyon transactions - assert!(deposit_receipt.deposit_receipt_version.is_none()); - assert!(tx_receipt.deposit_receipt_version.is_none()); - - // deposit_nonce is present only in deposit transactions - assert!(deposit_receipt.deposit_nonce.is_some()); - assert!(tx_receipt.deposit_nonce.is_none()); - } - - #[test] - fn op_deposit_fields_post_canyon() { - // ensure_create2_deployer will fail if timestamp is set to less then 2 - let header = Header { - timestamp: 2, - number: 1, - gas_limit: 1_000_000, - gas_used: 42_000, - receipts_root: b256!( - "fffc85c4004fd03c7bfbe5491fae98a7473126c099ac11e8286fd0013f15f908" - ), - ..Default::default() - }; - - let mut db = create_op_state_provider(); - let addr = Address::ZERO; - let account = Account { balance: U256::MAX, ..Account::default() }; - - db.insert_account(addr, account, None, HashMap::default()); - - let chain_spec = Arc::new(OpChainSpecBuilder::base_mainnet().canyon_activated().build()); - - let tx = TransactionSigned::from_transaction_and_signature( - Transaction::Eip1559(TxEip1559 { - chain_id: chain_spec.chain.id(), - nonce: 0, - gas_limit: MIN_TRANSACTION_GAS, - to: addr.into(), - ..Default::default() - }), - Signature::test_signature(), - ); - - let tx_deposit = TransactionSigned::from_transaction_and_signature( - Transaction::Deposit(op_alloy_consensus::TxDeposit { - from: addr, - to: addr.into(), - gas_limit: MIN_TRANSACTION_GAS, - ..Default::default() - }), - optimism_deposit_tx_signature(), - ); - - let provider = executor_provider(chain_spec); - let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); - - // make sure the L1 block contract state is preloaded. - executor.with_state_mut(|state| { - state.load_cache_account(L1_BLOCK_CONTRACT).unwrap(); - }); - - // attempt to execute an empty block with parent beacon block root, this should not fail - executor - .execute_and_verify_one( - ( - &BlockWithSenders { - block: Block { - header, - body: BlockBody { - transactions: vec![tx, tx_deposit], - ..Default::default() - }, - }, - senders: vec![addr, addr], - }, - U256::ZERO, - ) - .into(), - ) - .expect("Executing a block while canyon is active should not fail"); - - let receipts = executor.receipts(); - let tx_receipt = receipts[0][0].as_ref().unwrap(); - let deposit_receipt = receipts[0][1].as_ref().unwrap(); - - // deposit_receipt_version is set to 1 for post canyon deposit transactions - assert_eq!(deposit_receipt.deposit_receipt_version, Some(1)); - assert!(tx_receipt.deposit_receipt_version.is_none()); - - // deposit_nonce is present only in deposit transactions - assert!(deposit_receipt.deposit_nonce.is_some()); - assert!(tx_receipt.deposit_nonce.is_none()); - } -} diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index 175b2d4bf413..9492bb8c429a 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -4,8 +4,8 @@ use std::sync::Arc; use reth_basic_payload_builder::{BasicPayloadJobGenerator, BasicPayloadJobGeneratorConfig}; use reth_chainspec::{EthChainSpec, Hardforks}; -use reth_evm::ConfigureEvm; -use reth_network::{NetworkConfig, NetworkHandle, NetworkManager}; +use reth_evm::{execute::BasicBlockExecutorProvider, ConfigureEvm}; +use reth_network::{NetworkConfig, NetworkHandle, NetworkManager, PeersInfo}; use reth_node_api::{ AddOnsContext, EngineValidator, FullNodeComponents, NodeAddOns, NodePrimitives, }; @@ -20,7 +20,7 @@ use reth_node_builder::{ }; use reth_optimism_chainspec::OpChainSpec; use reth_optimism_consensus::OptimismBeaconConsensus; -use reth_optimism_evm::{OpExecutorProvider, OptimismEvmConfig}; +use reth_optimism_evm::{OpExecutionStrategyFactory, OptimismEvmConfig}; use reth_optimism_rpc::OpEthApi; use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; use reth_primitives::{Block, Header}; @@ -184,14 +184,16 @@ where Node: FullNodeTypes>, { type EVM = OptimismEvmConfig; - type Executor = OpExecutorProvider; + type Executor = BasicBlockExecutorProvider; async fn build_evm( self, ctx: &BuilderContext, ) -> eyre::Result<(Self::EVM, Self::Executor)> { let evm_config = OptimismEvmConfig::new(ctx.chain_spec()); - let executor = OpExecutorProvider::new(ctx.chain_spec(), evm_config.clone()); + let strategy_factory = + OpExecutionStrategyFactory::new(ctx.chain_spec(), evm_config.clone()); + let executor = BasicBlockExecutorProvider::new(strategy_factory); Ok((evm_config, executor)) } @@ -430,6 +432,7 @@ where let network_config = self.network_config(ctx)?; let network = NetworkManager::builder(network_config).await?; let handle = ctx.start_network(network, pool); + info!(target: "reth::cli", enode=%handle.local_node_record(), "P2P networking initialized"); Ok(handle) } diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index b6ab9b87956b..3ed00c49aec5 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -146,12 +146,12 @@ where } } -/// Constructs an Ethereum transaction payload from the transactions sent through the +/// Constructs an Optimism transaction payload from the transactions sent through the /// Payload attributes by the sequencer. If the `no_tx_pool` argument is passed in /// the payload attributes, the transaction pool will be ignored and the only transactions /// included in the payload will be those sent through the attributes. /// -/// Given build arguments including an Ethereum client, transaction pool, +/// Given build arguments including an Optimism client, transaction pool, /// and configuration, this function creates a transaction payload. Returns /// a result indicating success with the payload or an error in case of failure. #[inline] diff --git a/crates/payload/basic/Cargo.toml b/crates/payload/basic/Cargo.toml index 9047768892a9..88ab99272dbd 100644 --- a/crates/payload/basic/Cargo.toml +++ b/crates/payload/basic/Cargo.toml @@ -27,6 +27,7 @@ alloy-rlp.workspace = true alloy-primitives.workspace = true revm.workspace = true alloy-consensus.workspace = true +alloy-eips.workspace = true # async tokio = { workspace = true, features = ["sync", "time"] } diff --git a/crates/payload/basic/src/lib.rs b/crates/payload/basic/src/lib.rs index fcc8be9a88ed..4274d451e43f 100644 --- a/crates/payload/basic/src/lib.rs +++ b/crates/payload/basic/src/lib.rs @@ -10,6 +10,7 @@ use crate::metrics::PayloadBuilderMetrics; use alloy_consensus::constants::EMPTY_WITHDRAWALS; +use alloy_eips::merge::SLOT_DURATION; use alloy_primitives::{Bytes, B256, U256}; use futures_core::ready; use futures_util::FutureExt; @@ -22,8 +23,7 @@ use reth_payload_primitives::{ BuiltPayload, PayloadBuilderAttributes, PayloadBuilderError, PayloadKind, }; use reth_primitives::{ - constants::{RETH_CLIENT_VERSION, SLOT_DURATION}, - proofs, BlockNumberOrTag, SealedBlock, Withdrawals, + constants::RETH_CLIENT_VERSION, proofs, BlockNumberOrTag, SealedBlock, Withdrawals, }; use reth_provider::{ BlockReaderIdExt, BlockSource, CanonStateNotification, ProviderError, StateProviderFactory, diff --git a/crates/payload/validator/Cargo.toml b/crates/payload/validator/Cargo.toml index 619b99f28de2..2662b987f889 100644 --- a/crates/payload/validator/Cargo.toml +++ b/crates/payload/validator/Cargo.toml @@ -18,5 +18,4 @@ reth-primitives.workspace = true reth-rpc-types-compat.workspace = true # alloy -alloy-eips.workspace = true alloy-rpc-types = { workspace = true, features = ["engine"] } diff --git a/crates/payload/validator/src/lib.rs b/crates/payload/validator/src/lib.rs index 3ec7b206a5b1..9952815fd982 100644 --- a/crates/payload/validator/src/lib.rs +++ b/crates/payload/validator/src/lib.rs @@ -8,8 +8,9 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -use alloy_eips::eip7685::Requests; -use alloy_rpc_types::engine::{ExecutionPayload, MaybeCancunPayloadFields, PayloadError}; +use alloy_rpc_types::engine::{ + ExecutionPayload, ExecutionPayloadSidecar, MaybeCancunPayloadFields, PayloadError, +}; use reth_chainspec::EthereumHardforks; use reth_primitives::SealedBlock; use reth_rpc_types_compat::engine::payload::try_into_block; @@ -112,15 +113,12 @@ impl ExecutionPayloadValidator { pub fn ensure_well_formed_payload( &self, payload: ExecutionPayload, - cancun_fields: MaybeCancunPayloadFields, - execution_requests: Option, + sidecar: ExecutionPayloadSidecar, ) -> Result { let expected_hash = payload.block_hash(); // First parse the block - let sealed_block = - try_into_block(payload, cancun_fields.parent_beacon_block_root(), execution_requests)? - .seal_slow(); + let sealed_block = try_into_block(payload, &sidecar)?.seal_slow(); // Ensure the hash included in the payload matches the block hash if expected_hash != sealed_block.hash() { @@ -139,7 +137,7 @@ impl ExecutionPayloadValidator { // cancun active but excess blob gas not present return Err(PayloadError::PostCancunBlockWithoutExcessBlobGas) } - if cancun_fields.as_ref().is_none() { + if sidecar.cancun().is_none() { // cancun active but cancun fields not present return Err(PayloadError::PostCancunWithoutCancunFields) } @@ -156,7 +154,7 @@ impl ExecutionPayloadValidator { // cancun not active but excess blob gas present return Err(PayloadError::PreCancunBlockWithExcessBlobGas) } - if cancun_fields.as_ref().is_some() { + if sidecar.cancun().is_some() { // cancun not active but cancun fields present return Err(PayloadError::PreCancunWithCancunFields) } @@ -175,7 +173,10 @@ impl ExecutionPayloadValidator { } // EIP-4844 checks - self.ensure_matching_blob_versioned_hashes(&sealed_block, &cancun_fields)?; + self.ensure_matching_blob_versioned_hashes( + &sealed_block, + &sidecar.cancun().cloned().into(), + )?; Ok(sealed_block) } diff --git a/crates/primitives-traits/src/constants/mod.rs b/crates/primitives-traits/src/constants/mod.rs index 482852bdccd4..86bd9349585e 100644 --- a/crates/primitives-traits/src/constants/mod.rs +++ b/crates/primitives-traits/src/constants/mod.rs @@ -1,7 +1,6 @@ //! Ethereum protocol-related constants use alloy_primitives::{address, b256, Address, B256, U256}; -use core::time::Duration; /// Gas units, for example [`GIGAGAS`]. pub mod gas_units; @@ -10,27 +9,9 @@ pub use gas_units::{GIGAGAS, KILOGAS, MEGAGAS}; /// The client version: `reth/v{major}.{minor}.{patch}` pub const RETH_CLIENT_VERSION: &str = concat!("reth/v", env!("CARGO_PKG_VERSION")); -/// The first four bytes of the call data for a function call specifies the function to be called. -pub const SELECTOR_LEN: usize = 4; - -/// An EPOCH is a series of 32 slots. -pub const EPOCH_SLOTS: u64 = 32; - -/// The duration of a slot in seconds. -/// -/// This is the time period of 12 seconds in which a randomly chosen validator has time to propose a -/// block. -pub const SLOT_DURATION: Duration = Duration::from_secs(12); - -/// An EPOCH is a series of 32 slots (~6.4min). -pub const EPOCH_DURATION: Duration = Duration::from_secs(12 * EPOCH_SLOTS); - /// The default block nonce in the beacon consensus pub const BEACON_NONCE: u64 = 0u64; -/// The default Ethereum block gas limit. -pub const ETHEREUM_BLOCK_GAS_LIMIT: u64 = 30_000_000; - /// The minimum tx fee below which the txpool will reject the transaction. /// /// Configured to `7` WEI which is the lowest possible value of base fee under mainnet EIP-1559 @@ -85,15 +66,6 @@ pub const OP_SEPOLIA_EIP1559_DEFAULT_ELASTICITY_MULTIPLIER: u128 = 6; /// [transaction costs](https://community.optimism.io/docs/developers/build/differences/#transaction-costs) doc. pub const BASE_SEPOLIA_EIP1559_DEFAULT_ELASTICITY_MULTIPLIER: u128 = 10; -/// Multiplier for converting gwei to wei. -pub const GWEI_TO_WEI: u64 = 1_000_000_000; - -/// Multiplier for converting finney (milliether) to wei. -pub const FINNEY_TO_WEI: u128 = (GWEI_TO_WEI as u128) * 1_000_000; - -/// Multiplier for converting ether to wei. -pub const ETH_TO_WEI: u128 = FINNEY_TO_WEI * 1000; - /// Sepolia genesis hash: `0x25a5cc106eea7138acab33231d7160d69cb777ee0c2c553fcddf5138993e6dd9` pub const SEPOLIA_GENESIS_HASH: B256 = b256!("25a5cc106eea7138acab33231d7160d69cb777ee0c2c553fcddf5138993e6dd9"); diff --git a/crates/primitives-traits/src/receipt.rs b/crates/primitives-traits/src/receipt.rs index e2d19e4d4ff5..5c317dc49a23 100644 --- a/crates/primitives-traits/src/receipt.rs +++ b/crates/primitives-traits/src/receipt.rs @@ -1,7 +1,5 @@ //! Receipt abstraction -use alloc::fmt; - use alloy_consensus::TxReceipt; use reth_codecs::Compact; use serde::{Deserialize, Serialize}; @@ -14,10 +12,6 @@ impl FullReceipt for T where T: Receipt + Compact {} /// Abstraction of a receipt. pub trait Receipt: TxReceipt - + Clone - + fmt::Debug - + PartialEq - + Eq + Default + alloy_rlp::Encodable + alloy_rlp::Decodable diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index 566a114bebf9..dd5c75310ee0 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -20,9 +20,6 @@ reth-trie-common.workspace = true revm-primitives = { workspace = true, features = ["serde"] } reth-codecs = { workspace = true, optional = true } -# op-reth -reth-optimism-chainspec = { workspace = true, optional = true } - # ethereum alloy-consensus.workspace = true alloy-primitives = { workspace = true, features = ["rand", "rlp"] } @@ -34,14 +31,15 @@ alloy-eips = { workspace = true, features = ["serde"] } # optimism op-alloy-rpc-types = { workspace = true, optional = true } op-alloy-consensus = { workspace = true, features = [ - "arbitrary", + "arbitrary", + "serde", ], optional = true } # crypto secp256k1 = { workspace = true, features = [ - "global-context", - "recovery", - "rand", + "global-context", + "recovery", + "rand", ], optional = true } k256.workspace = true # for eip-4844 @@ -83,9 +81,9 @@ test-fuzz.workspace = true criterion.workspace = true pprof = { workspace = true, features = [ - "flamegraph", - "frame-pointer", - "criterion", + "flamegraph", + "frame-pointer", + "criterion", ] } [features] @@ -128,27 +126,26 @@ arbitrary = [ ] secp256k1 = ["dep:secp256k1"] c-kzg = [ - "dep:c-kzg", - "alloy-consensus/kzg", - "alloy-eips/kzg", - "revm-primitives/c-kzg", + "dep:c-kzg", + "alloy-consensus/kzg", + "alloy-eips/kzg", + "revm-primitives/c-kzg", ] optimism = [ - "dep:op-alloy-consensus", - "dep:reth-optimism-chainspec", - "reth-codecs?/optimism", - "revm-primitives/optimism", + "dep:op-alloy-consensus", + "reth-codecs?/optimism", + "revm-primitives/optimism", ] alloy-compat = [ - "dep:alloy-rpc-types", - "dep:alloy-serde", - "dep:op-alloy-rpc-types", + "dep:alloy-rpc-types", + "dep:alloy-serde", + "dep:op-alloy-rpc-types", ] test-utils = [ "reth-primitives-traits/test-utils", "reth-chainspec/test-utils", "reth-codecs?/test-utils", - "reth-trie-common/test-utils" + "reth-trie-common/test-utils", ] serde-bincode-compat = [ "alloy-consensus/serde-bincode-compat", diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index 717b0446beae..a06979300acd 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -1090,4 +1090,13 @@ mod tests { let block = block.seal_slow(); assert_eq!(sealed, block.hash()); } + + #[test] + fn empty_block_rlp() { + let body = BlockBody::default(); + let mut buf = Vec::new(); + body.encode(&mut buf); + let decoded = BlockBody::decode(&mut buf.as_slice()).unwrap(); + assert_eq!(body, decoded); + } } diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 1d410da1ea8f..7798433d05dd 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -56,8 +56,6 @@ mod variant; #[cfg(feature = "optimism")] use op_alloy_consensus::TxDeposit; #[cfg(feature = "optimism")] -use reth_optimism_chainspec::optimism_deposit_tx_signature; -#[cfg(feature = "optimism")] pub use tx_type::DEPOSIT_TX_TYPE_ID; #[cfg(any(test, feature = "reth-codec"))] use tx_type::{ @@ -955,7 +953,7 @@ impl TransactionSignedNoHash { // transactions with an empty signature // // NOTE: this is very hacky and only relevant for op-mainnet pre bedrock - if self.is_legacy() && self.signature == optimism_deposit_tx_signature() { + if self.is_legacy() && self.signature == TxDeposit::signature() { return Some(Address::ZERO) } } @@ -1530,7 +1528,7 @@ impl Decodable2718 for TransactionSigned { #[cfg(feature = "optimism")] TxType::Deposit => Ok(Self::from_transaction_and_signature( Transaction::Deposit(TxDeposit::decode(buf)?), - optimism_deposit_tx_signature(), + TxDeposit::signature(), )), } } @@ -1575,8 +1573,7 @@ impl<'a> arbitrary::Arbitrary<'a> for TransactionSigned { } #[cfg(feature = "optimism")] - let signature = - if transaction.is_deposit() { optimism_deposit_tx_signature() } else { signature }; + let signature = if transaction.is_deposit() { TxDeposit::signature() } else { signature }; Ok(Self::from_transaction_and_signature(transaction, signature)) } diff --git a/crates/primitives/src/transaction/sidecar.rs b/crates/primitives/src/transaction/sidecar.rs index edc1427d1fe8..e901cbfc08dc 100644 --- a/crates/primitives/src/transaction/sidecar.rs +++ b/crates/primitives/src/transaction/sidecar.rs @@ -1,11 +1,9 @@ #![cfg_attr(docsrs, doc(cfg(feature = "c-kzg")))] use crate::{Signature, Transaction, TransactionSigned}; -use alloy_consensus::{ - constants::EIP4844_TX_TYPE_ID, transaction::TxEip4844, TxEip4844WithSidecar, -}; -use alloy_primitives::{keccak256, TxHash}; -use alloy_rlp::{Decodable, Error as RlpError, Header}; +use alloy_consensus::{constants::EIP4844_TX_TYPE_ID, TxEip4844WithSidecar}; +use alloy_primitives::TxHash; +use alloy_rlp::Header; use serde::{Deserialize, Serialize}; #[doc(inline)] @@ -14,8 +12,6 @@ pub use alloy_eips::eip4844::BlobTransactionSidecar; #[cfg(feature = "c-kzg")] pub use alloy_eips::eip4844::BlobTransactionValidationError; -use alloc::vec::Vec; - /// A response to `GetPooledTransactions` that includes blob data, their commitments, and their /// corresponding proofs. /// @@ -36,7 +32,7 @@ impl BlobTransaction { /// Constructs a new [`BlobTransaction`] from a [`TransactionSigned`] and a /// [`BlobTransactionSidecar`]. /// - /// Returns an error if the signed transaction is not [`TxEip4844`] + /// Returns an error if the signed transaction is not [`Transaction::Eip4844`] pub fn try_from_signed( tx: TransactionSigned, sidecar: BlobTransactionSidecar, @@ -57,7 +53,7 @@ impl BlobTransaction { /// Verifies that the transaction's blob data, commitments, and proofs are all valid. /// - /// See also [`TxEip4844::validate_blob`] + /// See also [`alloy_consensus::TxEip4844::validate_blob`] #[cfg(feature = "c-kzg")] pub fn validate( &self, @@ -163,7 +159,7 @@ impl BlobTransaction { // The payload length is the length of the `tranascation_payload_body` list, plus the // length of the blobs, commitments, and proofs. - let payload_length = tx_length + self.transaction.sidecar.fields_len(); + let payload_length = tx_length + self.transaction.sidecar.rlp_encoded_fields_length(); // We use the calculated payload len to construct the first list header, which encompasses // everything in the tx - the length of the second, inner list header is part of @@ -188,74 +184,17 @@ impl BlobTransaction { /// Note: this should be used only when implementing other RLP decoding methods, and does not /// represent the full RLP decoding of the `PooledTransactionsElement` type. pub(crate) fn decode_inner(data: &mut &[u8]) -> alloy_rlp::Result { - // decode the _first_ list header for the rest of the transaction - let outer_header = Header::decode(data)?; - if !outer_header.list { - return Err(RlpError::Custom("PooledTransactions blob tx must be encoded as a list")) - } - - let outer_remaining_len = data.len(); - - // Now we need to decode the inner 4844 transaction and its signature: - // - // `[chain_id, nonce, max_priority_fee_per_gas, ..., y_parity, r, s]` - let inner_header = Header::decode(data)?; - if !inner_header.list { - return Err(RlpError::Custom( - "PooledTransactions inner blob tx must be encoded as a list", - )) - } - - let inner_remaining_len = data.len(); - - // inner transaction - let transaction = TxEip4844::decode_fields(data)?; - - // signature - let signature = Signature::decode_rlp_vrs(data)?; - - // the inner header only decodes the transaction and signature, so we check the length here - let inner_consumed = inner_remaining_len - data.len(); - if inner_consumed != inner_header.payload_length { - return Err(RlpError::UnexpectedLength) - } - - // All that's left are the blobs, commitments, and proofs - let sidecar = BlobTransactionSidecar::decode(data)?; - - // # Calculating the hash - // - // The full encoding of the `PooledTransaction` response is: - // `tx_type (0x03) || rlp([tx_payload_body, blobs, commitments, proofs])` - // - // The transaction hash however, is: - // `keccak256(tx_type (0x03) || rlp(tx_payload_body))` - // - // Note that this is `tx_payload_body`, not `[tx_payload_body]`, which would be - // `[[chain_id, nonce, max_priority_fee_per_gas, ...]]`, i.e. a list within a list. - // - // Because the pooled transaction encoding is different than the hash encoding for - // EIP-4844 transactions, we do not use the original buffer to calculate the hash. - // - // Instead, we use `encode_with_signature`, which RLP encodes the transaction with a - // signature for hashing without a header. We then hash the result. - let mut buf = Vec::new(); - transaction.encode_with_signature(&signature, &mut buf, false); - let hash = keccak256(&buf); - - // the outer header is for the entire transaction, so we check the length here - let outer_consumed = outer_remaining_len - data.len(); - if outer_consumed != outer_header.payload_length { - return Err(RlpError::UnexpectedLength) - } + let (transaction, signature, hash) = + TxEip4844WithSidecar::decode_signed_fields(data)?.into_parts(); - Ok(Self { transaction: TxEip4844WithSidecar { tx: transaction, sidecar }, hash, signature }) + Ok(Self { transaction, hash, signature }) } } /// Generates a [`BlobTransactionSidecar`] structure containing blobs, commitments, and proofs. #[cfg(all(feature = "c-kzg", any(test, feature = "arbitrary")))] pub fn generate_blob_sidecar(blobs: Vec) -> BlobTransactionSidecar { + use alloc::vec::Vec; use alloy_eips::eip4844::env_settings::EnvKzgSettings; use c_kzg::{KzgCommitment, KzgProof}; @@ -285,12 +224,12 @@ pub fn generate_blob_sidecar(blobs: Vec) -> BlobTransactionSidecar mod tests { use super::*; use crate::{kzg::Blob, PooledTransactionsElement}; + use alloc::vec::Vec; use alloy_eips::{ eip2718::{Decodable2718, Encodable2718}, eip4844::Bytes48, }; use alloy_primitives::hex; - use alloy_rlp::Encodable; use std::{fs, path::PathBuf, str::FromStr}; #[test] @@ -392,7 +331,7 @@ mod tests { let mut encoded_rlp = Vec::new(); // Encode the inner data of the BlobTransactionSidecar into RLP - sidecar.encode(&mut encoded_rlp); + sidecar.rlp_encode_fields(&mut encoded_rlp); // Assert the equality between the expected RLP from the JSON and the encoded RLP assert_eq!(json_value.get("rlp").unwrap().as_str().unwrap(), hex::encode(&encoded_rlp)); @@ -423,10 +362,11 @@ mod tests { let mut encoded_rlp = Vec::new(); // Encode the inner data of the BlobTransactionSidecar into RLP - sidecar.encode(&mut encoded_rlp); + sidecar.rlp_encode_fields(&mut encoded_rlp); // Decode the RLP-encoded data back into a BlobTransactionSidecar - let decoded_sidecar = BlobTransactionSidecar::decode(&mut encoded_rlp.as_slice()).unwrap(); + let decoded_sidecar = + BlobTransactionSidecar::rlp_decode_fields(&mut encoded_rlp.as_slice()).unwrap(); // Assert the equality between the original BlobTransactionSidecar and the decoded one assert_eq!(sidecar, decoded_sidecar); diff --git a/crates/primitives/src/transaction/signature.rs b/crates/primitives/src/transaction/signature.rs index 39c0f92fda88..5bfdab8e68e9 100644 --- a/crates/primitives/src/transaction/signature.rs +++ b/crates/primitives/src/transaction/signature.rs @@ -4,9 +4,6 @@ use alloy_rlp::{Decodable, Error as RlpError}; pub use alloy_primitives::Signature; -#[cfg(feature = "optimism")] -use reth_optimism_chainspec::optimism_deposit_tx_signature; - /// The order of the secp256k1 curve, divided by two. Signatures that should be checked according /// to EIP-2 should have an S value less than or equal to this. /// @@ -82,7 +79,7 @@ pub fn legacy_parity(signature: &Signature, chain_id: Option) -> Parity { // transactions with an empty signature // // NOTE: this is very hacky and only relevant for op-mainnet pre bedrock - if *signature == optimism_deposit_tx_signature() { + if *signature == op_alloy_consensus::TxDeposit::signature() { return Parity::Parity(false) } Parity::NonEip155(signature.v().y_parity()) diff --git a/crates/revm/Cargo.toml b/crates/revm/Cargo.toml index 8f670d364b8a..9c1201668663 100644 --- a/crates/revm/Cargo.toml +++ b/crates/revm/Cargo.toml @@ -31,6 +31,7 @@ revm.workspace = true reth-trie.workspace = true reth-ethereum-forks.workspace = true alloy-primitives.workspace = true +alloy-consensus.workspace = true [features] default = ["std"] diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index ca055a77ea10..eb280408ecdb 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -5,8 +5,8 @@ use alloy_eips::{eip4844::BlobAndProofV1, eip7685::Requests}; use alloy_primitives::{BlockHash, BlockNumber, B256, U64}; use alloy_rpc_types_engine::{ CancunPayloadFields, ClientVersionV1, ExecutionPayload, ExecutionPayloadBodiesV1, - ExecutionPayloadInputV2, ExecutionPayloadV1, ExecutionPayloadV3, ForkchoiceState, - ForkchoiceUpdated, PayloadId, PayloadStatus, TransitionConfiguration, + ExecutionPayloadInputV2, ExecutionPayloadSidecar, ExecutionPayloadV1, ExecutionPayloadV3, + ForkchoiceState, ForkchoiceUpdated, PayloadId, PayloadStatus, TransitionConfiguration, }; use async_trait::async_trait; use jsonrpsee_core::RpcResult; @@ -140,7 +140,11 @@ where self.inner .validator .validate_version_specific_fields(EngineApiMessageVersion::V1, payload_or_attrs)?; - Ok(self.inner.beacon_consensus.new_payload(payload, None, None).await?) + Ok(self + .inner + .beacon_consensus + .new_payload(payload, ExecutionPayloadSidecar::none()) + .await?) } /// See also @@ -156,7 +160,11 @@ where self.inner .validator .validate_version_specific_fields(EngineApiMessageVersion::V2, payload_or_attrs)?; - Ok(self.inner.beacon_consensus.new_payload(payload, None, None).await?) + Ok(self + .inner + .beacon_consensus + .new_payload(payload, ExecutionPayloadSidecar::none()) + .await?) } /// See also @@ -176,9 +184,17 @@ where .validator .validate_version_specific_fields(EngineApiMessageVersion::V3, payload_or_attrs)?; - let cancun_fields = CancunPayloadFields { versioned_hashes, parent_beacon_block_root }; - - Ok(self.inner.beacon_consensus.new_payload(payload, Some(cancun_fields), None).await?) + Ok(self + .inner + .beacon_consensus + .new_payload( + payload, + ExecutionPayloadSidecar::v3(CancunPayloadFields { + versioned_hashes, + parent_beacon_block_root, + }), + ) + .await?) } /// See also @@ -187,8 +203,6 @@ where payload: ExecutionPayloadV3, versioned_hashes: Vec, parent_beacon_block_root: B256, - // TODO(onbjerg): Figure out why we even get these here, since we'll check the requests - // from execution against the requests root in the header. execution_requests: Requests, ) -> EngineApiResult { let payload = ExecutionPayload::from(payload); @@ -201,14 +215,16 @@ where .validator .validate_version_specific_fields(EngineApiMessageVersion::V4, payload_or_attrs)?; - let cancun_fields = CancunPayloadFields { versioned_hashes, parent_beacon_block_root }; - - // HACK(onbjerg): We should have a pectra payload fields struct, this is just a temporary - // workaround. Ok(self .inner .beacon_consensus - .new_payload(payload, Some(cancun_fields), Some(execution_requests)) + .new_payload( + payload, + ExecutionPayloadSidecar::v4( + CancunPayloadFields { versioned_hashes, parent_beacon_block_root }, + execution_requests, + ), + ) .await?) } diff --git a/crates/rpc/rpc-engine-api/tests/it/payload.rs b/crates/rpc/rpc-engine-api/tests/it/payload.rs index 007a62db045b..febbc291e35e 100644 --- a/crates/rpc/rpc-engine-api/tests/it/payload.rs +++ b/crates/rpc/rpc-engine-api/tests/it/payload.rs @@ -3,7 +3,8 @@ use alloy_primitives::{Bytes, Sealable, U256}; use alloy_rlp::{Decodable, Error as RlpError}; use alloy_rpc_types_engine::{ - ExecutionPayload, ExecutionPayloadBodyV1, ExecutionPayloadV1, PayloadError, + ExecutionPayload, ExecutionPayloadBodyV1, ExecutionPayloadSidecar, ExecutionPayloadV1, + PayloadError, }; use assert_matches::assert_matches; use reth_primitives::{proofs, Block, SealedBlock, SealedHeader, TransactionSigned, Withdrawals}; @@ -75,7 +76,10 @@ fn payload_validation() { b }); - assert_matches!(try_into_sealed_block(block_with_valid_extra_data, None, None), Ok(_)); + assert_matches!( + try_into_sealed_block(block_with_valid_extra_data, &ExecutionPayloadSidecar::none()), + Ok(_) + ); // Invalid extra data let block_with_invalid_extra_data = Bytes::from_static(&[0; 33]); @@ -84,7 +88,7 @@ fn payload_validation() { b }); assert_matches!( - try_into_sealed_block(invalid_extra_data_block, None, None), + try_into_sealed_block(invalid_extra_data_block, &ExecutionPayloadSidecar::none()), Err(PayloadError::ExtraData(data)) if data == block_with_invalid_extra_data ); @@ -94,7 +98,7 @@ fn payload_validation() { b }); assert_matches!( - try_into_sealed_block(block_with_zero_base_fee, None, None), + try_into_sealed_block(block_with_zero_base_fee, &ExecutionPayloadSidecar::none()), Err(PayloadError::BaseFee(val)) if val.is_zero() ); @@ -113,7 +117,7 @@ fn payload_validation() { b }); assert_matches!( - try_into_sealed_block(block_with_ommers.clone(), None, None), + try_into_sealed_block(block_with_ommers.clone(), &ExecutionPayloadSidecar::none()), Err(PayloadError::BlockHash { consensus, .. }) if consensus == block_with_ommers.block_hash() ); @@ -124,7 +128,7 @@ fn payload_validation() { b }); assert_matches!( - try_into_sealed_block(block_with_difficulty.clone(), None, None), + try_into_sealed_block(block_with_difficulty.clone(), &ExecutionPayloadSidecar::none()), Err(PayloadError::BlockHash { consensus, .. }) if consensus == block_with_difficulty.block_hash() ); @@ -134,9 +138,8 @@ fn payload_validation() { b }); assert_matches!( - try_into_sealed_block(block_with_nonce.clone(), None, None), + try_into_sealed_block(block_with_nonce.clone(), &ExecutionPayloadSidecar::none()), Err(PayloadError::BlockHash { consensus, .. }) if consensus == block_with_nonce.block_hash() - ); // Valid block diff --git a/crates/rpc/rpc-eth-api/src/helpers/block.rs b/crates/rpc/rpc-eth-api/src/helpers/block.rs index 9993b477a662..9bf35d850af6 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/block.rs @@ -219,17 +219,17 @@ pub trait LoadBlock: LoadPendingBlock + SpawnBlocking { async move { if block_id.is_pending() { // Pending block can be fetched directly without need for caching - let maybe_pending = LoadPendingBlock::provider(self) + if let Some(pending_block) = LoadPendingBlock::provider(self) .pending_block_with_senders() - .map_err(Self::Error::from_eth_err)?; - return if maybe_pending.is_some() { - Ok(maybe_pending.map(Arc::new)) - } else { - // If no pending block from provider, try to get local pending block - return match self.local_pending_block().await? { - Some((block, _)) => Ok(Some(Arc::new(block))), - None => Ok(None), - }; + .map_err(Self::Error::from_eth_err)? + { + return Ok(Some(Arc::new(pending_block))); + } + + // If no pending block from provider, try to get local pending block + return match self.local_pending_block().await? { + Some((block, _)) => Ok(Some(Arc::new(block))), + None => Ok(None), }; } diff --git a/crates/rpc/rpc-eth-api/src/helpers/call.rs b/crates/rpc/rpc-eth-api/src/helpers/call.rs index b43b34305bd9..0acf6646294f 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/call.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/call.rs @@ -544,6 +544,16 @@ pub trait Call: LoadState + SpawnBlocking { /// /// This returns the configured [`EnvWithHandlerCfg`] for the given [`TransactionRequest`] at /// the given [`BlockId`] and with configured call settings: `prepare_call_env`. + /// + /// This is primarily used by `eth_call`. + /// + /// # Blocking behaviour + /// + /// This assumes executing the call is relatively more expensive on IO than CPU because it + /// transacts a single transaction on an empty in memory database. Because `eth_call`s are + /// usually allowed to consume a lot of gas, this also allows a lot of memory operations so + /// we assume this is not primarily CPU bound and instead spawn the call on a regular tokio task + /// instead, where blocking IO is less problematic. fn spawn_with_call_at( &self, request: TransactionRequest, @@ -561,7 +571,7 @@ pub trait Call: LoadState + SpawnBlocking { async move { let (cfg, block_env, at) = self.evm_env_at(at).await?; let this = self.clone(); - self.spawn_tracing(move |_| { + self.spawn_blocking_io(move |_| { let state = this.state_at_block_id(at)?; let mut db = CacheDB::new(StateProviderDatabase::new(StateProviderTraitObjWrapper(&state))); @@ -645,14 +655,14 @@ pub trait Call: LoadState + SpawnBlocking { /// Returns the index of the target transaction in the given iterator. fn replay_transactions_until<'a, DB, I>( &self, - db: &mut CacheDB, + db: &mut DB, cfg: CfgEnvWithHandlerCfg, block_env: BlockEnv, transactions: I, target_tx_hash: B256, ) -> Result where - DB: DatabaseRef, + DB: Database + DatabaseCommit, EthApiError: From, I: IntoIterator, { @@ -919,14 +929,15 @@ pub trait Call: LoadState + SpawnBlocking { /// Executes the requests again after an out of gas error to check if the error is gas related /// or not #[inline] - fn map_out_of_gas_err( + fn map_out_of_gas_err( &self, env_gas_limit: U256, mut env: EnvWithHandlerCfg, - db: &mut CacheDB>, + db: &mut DB, ) -> Self::Error where - S: StateProvider, + DB: Database, + EthApiError: From, { let req_gas_limit = env.tx.gas_limit; env.tx.gas_limit = env_gas_limit.try_into().unwrap_or(u64::MAX); diff --git a/crates/rpc/rpc-eth-api/src/helpers/state.rs b/crates/rpc/rpc-eth-api/src/helpers/state.rs index f2fc13f5d03e..080d90dc3b00 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/state.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/state.rs @@ -272,6 +272,42 @@ pub trait LoadState: EthApiTypes { } } + /// Returns the next available nonce without gaps for the given address + /// Next available nonce is either the on chain nonce of the account or the highest consecutive + /// nonce in the pool + 1 + fn next_available_nonce( + &self, + address: Address, + ) -> impl Future> + Send + where + Self: SpawnBlocking, + { + self.spawn_blocking_io(move |this| { + // first fetch the on chain nonce of the account + let on_chain_account_nonce = this + .latest_state()? + .account_nonce(address) + .map_err(Self::Error::from_eth_err)? + .unwrap_or_default(); + + let mut next_nonce = on_chain_account_nonce; + // Retrieve the highest consecutive transaction for the sender from the transaction pool + if let Some(highest_tx) = this + .pool() + .get_highest_consecutive_transaction_by_sender(address, on_chain_account_nonce) + { + // Return the nonce of the highest consecutive transaction + 1 + next_nonce = highest_tx.nonce().checked_add(1).ok_or_else(|| { + Self::Error::from(EthApiError::InvalidTransaction( + RpcInvalidTransactionError::NonceMaxValue, + )) + })?; + } + + Ok(next_nonce) + }) + } + /// Returns the number of transactions sent from an address at the given block identifier. /// /// If this is [`BlockNumberOrTag::Pending`](reth_primitives::BlockNumberOrTag) then this will diff --git a/crates/rpc/rpc-eth-api/src/helpers/trace.rs b/crates/rpc/rpc-eth-api/src/helpers/trace.rs index 981de8fa6c45..64056148cd38 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/trace.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/trace.rs @@ -117,7 +117,7 @@ pub trait Trace: LoadState { self.spawn_with_state_at_block(at, move |state| { let mut db = CacheDB::new(StateProviderDatabase::new(state)); let mut inspector = TracingInspector::new(config); - let (res, _) = this.inspect(StateCacheDbRefMutWrapper(&mut db), env, &mut inspector)?; + let (res, _) = this.inspect(&mut db, env, &mut inspector)?; f(inspector, res, db) }) } diff --git a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs index 54d60cb7abdf..d29787d7a23b 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs @@ -365,9 +365,8 @@ pub trait EthTransactions: LoadTransaction { // set nonce if not already set before if request.nonce.is_none() { - let nonce = self.transaction_count(from, Some(BlockId::pending())).await?; - // note: `.to()` can't panic because the nonce is constructed from a `u64` - request.nonce = Some(nonce.to()); + let nonce = self.next_available_nonce(from).await?; + request.nonce = Some(nonce); } let chain_id = self.chain_id(); diff --git a/crates/rpc/rpc-eth-types/src/cache/db.rs b/crates/rpc/rpc-eth-types/src/cache/db.rs index 7422dcfb8a7b..627fd2b2df72 100644 --- a/crates/rpc/rpc-eth-types/src/cache/db.rs +++ b/crates/rpc/rpc-eth-types/src/cache/db.rs @@ -114,6 +114,13 @@ impl reth_storage_api::BlockHashReader for StateProviderTraitObjWrapper<'_> { self.0.block_hash(block_number) } + fn convert_block_hash( + &self, + hash_or_number: alloy_rpc_types::BlockHashOrNumber, + ) -> reth_errors::ProviderResult> { + self.0.convert_block_hash(hash_or_number) + } + fn canonical_hashes_range( &self, start: alloy_primitives::BlockNumber, @@ -121,21 +128,22 @@ impl reth_storage_api::BlockHashReader for StateProviderTraitObjWrapper<'_> { ) -> reth_errors::ProviderResult> { self.0.canonical_hashes_range(start, end) } +} - fn convert_block_hash( +impl StateProvider for StateProviderTraitObjWrapper<'_> { + fn storage( &self, - hash_or_number: alloy_rpc_types::BlockHashOrNumber, - ) -> reth_errors::ProviderResult> { - self.0.convert_block_hash(hash_or_number) + account: revm_primitives::Address, + storage_key: alloy_primitives::StorageKey, + ) -> reth_errors::ProviderResult> { + self.0.storage(account, storage_key) } -} -impl StateProvider for StateProviderTraitObjWrapper<'_> { - fn account_balance( + fn bytecode_by_hash( &self, - addr: revm_primitives::Address, - ) -> reth_errors::ProviderResult> { - self.0.account_balance(addr) + code_hash: B256, + ) -> reth_errors::ProviderResult> { + self.0.bytecode_by_hash(code_hash) } fn account_code( @@ -145,26 +153,18 @@ impl StateProvider for StateProviderTraitObjWrapper<'_> { self.0.account_code(addr) } - fn account_nonce( + fn account_balance( &self, addr: revm_primitives::Address, - ) -> reth_errors::ProviderResult> { - self.0.account_nonce(addr) - } - - fn bytecode_by_hash( - &self, - code_hash: B256, - ) -> reth_errors::ProviderResult> { - self.0.bytecode_by_hash(code_hash) + ) -> reth_errors::ProviderResult> { + self.0.account_balance(addr) } - fn storage( + fn account_nonce( &self, - account: revm_primitives::Address, - storage_key: alloy_primitives::StorageKey, - ) -> reth_errors::ProviderResult> { - self.0.storage(account, storage_key) + addr: revm_primitives::Address, + ) -> reth_errors::ProviderResult> { + self.0.account_nonce(addr) } } diff --git a/crates/rpc/rpc-eth-types/src/fee_history.rs b/crates/rpc/rpc-eth-types/src/fee_history.rs index 57dd276e5cf4..c845d9683870 100644 --- a/crates/rpc/rpc-eth-types/src/fee_history.rs +++ b/crates/rpc/rpc-eth-types/src/fee_history.rs @@ -305,7 +305,7 @@ pub fn calculate_reward_percentiles_for_block( // the percentiles are monotonically increasing. let mut tx_index = 0; let mut cumulative_gas_used = transactions.first().map(|tx| tx.gas_used).unwrap_or_default(); - let mut rewards_in_block = Vec::new(); + let mut rewards_in_block = Vec::with_capacity(percentiles.len()); for percentile in percentiles { // Empty blocks should return in a zero row if transactions.is_empty() { diff --git a/crates/rpc/rpc-eth-types/src/gas_oracle.rs b/crates/rpc/rpc-eth-types/src/gas_oracle.rs index 84e7ab8306df..065ac1acc204 100644 --- a/crates/rpc/rpc-eth-types/src/gas_oracle.rs +++ b/crates/rpc/rpc-eth-types/src/gas_oracle.rs @@ -1,11 +1,12 @@ //! An implementation of the eth gas price oracle, used for providing gas price estimates based on //! previous blocks. +use alloy_consensus::constants::GWEI_TO_WEI; use alloy_primitives::{B256, U256}; use alloy_rpc_types::BlockId; use derive_more::{Deref, DerefMut, From, Into}; use itertools::Itertools; -use reth_primitives::{constants::GWEI_TO_WEI, BlockNumberOrTag}; +use reth_primitives::BlockNumberOrTag; use reth_rpc_server_types::constants; use reth_storage_api::BlockReaderIdExt; use schnellru::{ByLength, LruMap}; diff --git a/crates/rpc/rpc-eth-types/src/revm_utils.rs b/crates/rpc/rpc-eth-types/src/revm_utils.rs index 25c54fd46777..7dc20c524219 100644 --- a/crates/rpc/rpc-eth-types/src/revm_utils.rs +++ b/crates/rpc/rpc-eth-types/src/revm_utils.rs @@ -315,7 +315,7 @@ where #[cfg(test)] mod tests { use super::*; - use reth_primitives::constants::GWEI_TO_WEI; + use alloy_consensus::constants::GWEI_TO_WEI; #[test] fn test_ensure_0_fallback() { diff --git a/crates/rpc/rpc-eth-types/src/simulate.rs b/crates/rpc/rpc-eth-types/src/simulate.rs index a673da967202..77db511e6251 100644 --- a/crates/rpc/rpc-eth-types/src/simulate.rs +++ b/crates/rpc/rpc-eth-types/src/simulate.rs @@ -183,7 +183,7 @@ pub fn build_block( ) -> Result>, EthApiError> { let mut calls: Vec = Vec::with_capacity(results.len()); let mut senders = Vec::with_capacity(results.len()); - let mut receipts = Vec::new(); + let mut receipts = Vec::with_capacity(results.len()); let mut log_index = 0; for (transaction_index, ((sender, result), tx)) in diff --git a/crates/rpc/rpc-server-types/src/constants.rs b/crates/rpc/rpc-server-types/src/constants.rs index 0bc441819323..48019745a34f 100644 --- a/crates/rpc/rpc-server-types/src/constants.rs +++ b/crates/rpc/rpc-server-types/src/constants.rs @@ -51,9 +51,9 @@ pub const DEFAULT_MAX_SIMULATE_BLOCKS: u64 = 256; /// The default eth historical proof window. pub const DEFAULT_ETH_PROOF_WINDOW: u64 = 0; -/// Maximum eth historical proof window. Equivalent to roughly one and a half months of data on a 12 -/// second block time, and a week on a 2 second block time. -pub const MAX_ETH_PROOF_WINDOW: u64 = 7 * 24 * 60 * 60 / 2; +/// Maximum eth historical proof window. Equivalent to roughly 6 months of data on a 12 +/// second block time, and a month on a 2 second block time. +pub const MAX_ETH_PROOF_WINDOW: u64 = 28 * 24 * 60 * 60 / 2; /// GPO specific constants pub mod gas_oracle { @@ -80,9 +80,8 @@ pub mod gas_oracle { /// The default gas limit for `eth_call` and adjacent calls. /// - /// This is different from the default to regular 30M block gas limit - /// [`ETHEREUM_BLOCK_GAS_LIMIT`](reth_primitives::constants::ETHEREUM_BLOCK_GAS_LIMIT) to allow - /// for more complex calls. + /// This is different from the default to regular 30M block gas limit `ETHEREUM_BLOCK_GAS_LIMIT` + /// to allow for more complex calls. pub const RPC_DEFAULT_GAS_CAP: u64 = 50_000_000; /// Allowed error ratio for gas estimation diff --git a/crates/rpc/rpc-types-compat/src/engine/payload.rs b/crates/rpc/rpc-types-compat/src/engine/payload.rs index b63b7453aeb0..b4c45a617812 100644 --- a/crates/rpc/rpc-types-compat/src/engine/payload.rs +++ b/crates/rpc/rpc-types-compat/src/engine/payload.rs @@ -9,7 +9,8 @@ use alloy_eips::{ use alloy_primitives::{B256, U256}; use alloy_rpc_types_engine::{ payload::{ExecutionPayloadBodyV1, ExecutionPayloadFieldV2, ExecutionPayloadInputV2}, - ExecutionPayload, ExecutionPayloadV1, ExecutionPayloadV2, ExecutionPayloadV3, PayloadError, + ExecutionPayload, ExecutionPayloadSidecar, ExecutionPayloadV1, ExecutionPayloadV2, + ExecutionPayloadV3, PayloadError, }; use reth_primitives::{ proofs::{self}, @@ -248,17 +249,18 @@ pub fn convert_block_to_payload_input_v2(value: SealedBlock) -> ExecutionPayload } } -/// Tries to create a new block (without a block hash) from the given payload and optional parent -/// beacon block root. +/// Tries to create a new unsealed block from the given payload and payload sidecar. +/// /// Performs additional validation of `extra_data` and `base_fee_per_gas` fields. /// -/// NOTE: The log bloom is assumed to be validated during serialization. +/// # Note +/// +/// The log bloom is assumed to be validated during serialization. /// /// See pub fn try_into_block( value: ExecutionPayload, - parent_beacon_block_root: Option, - execution_requests: Option, + sidecar: &ExecutionPayloadSidecar, ) -> Result { let mut base_payload = match value { ExecutionPayload::V1(payload) => try_payload_v1_to_block(payload)?, @@ -266,29 +268,30 @@ pub fn try_into_block( ExecutionPayload::V3(payload) => try_payload_v3_to_block(payload)?, }; - base_payload.header.parent_beacon_block_root = parent_beacon_block_root; - base_payload.header.requests_hash = execution_requests.map(|reqs| reqs.requests_hash()); + base_payload.header.parent_beacon_block_root = sidecar.parent_beacon_block_root(); + base_payload.header.requests_hash = sidecar.requests().map(Requests::requests_hash); Ok(base_payload) } -/// Tries to create a new block from the given payload and optional parent beacon block root. -/// -/// NOTE: Empty ommers, nonce and difficulty values are validated upon computing block hash and -/// comparing the value with `payload.block_hash`. +/// Tries to create a sealed new block from the given payload and payload sidecar. /// /// Uses [`try_into_block`] to convert from the [`ExecutionPayload`] to [`Block`] and seals the /// block with its hash. /// /// Uses [`validate_block_hash`] to validate the payload block hash and ultimately return the /// [`SealedBlock`]. +/// +/// # Note +/// +/// Empty ommers, nonce, difficulty, and execution request values are validated upon computing block +/// hash and comparing the value with `payload.block_hash`. pub fn try_into_sealed_block( payload: ExecutionPayload, - parent_beacon_block_root: Option, - execution_requests: Option, + sidecar: &ExecutionPayloadSidecar, ) -> Result { let block_hash = payload.block_hash(); - let base_payload = try_into_block(payload, parent_beacon_block_root, execution_requests)?; + let base_payload = try_into_block(payload, sidecar)?; // validate block hash and return validate_block_hash(block_hash, base_payload) @@ -356,8 +359,8 @@ mod tests { }; use alloy_primitives::{b256, hex, Bytes, U256}; use alloy_rpc_types_engine::{ - CancunPayloadFields, ExecutionPayload, ExecutionPayloadV1, ExecutionPayloadV2, - ExecutionPayloadV3, + CancunPayloadFields, ExecutionPayload, ExecutionPayloadSidecar, ExecutionPayloadV1, + ExecutionPayloadV2, ExecutionPayloadV3, }; #[test] @@ -575,8 +578,7 @@ mod tests { let cancun_fields = CancunPayloadFields { parent_beacon_block_root, versioned_hashes }; // convert into block - let block = - try_into_block(payload, Some(cancun_fields.parent_beacon_block_root), None).unwrap(); + let block = try_into_block(payload, &ExecutionPayloadSidecar::v3(cancun_fields)).unwrap(); // Ensure the actual hash is calculated if we set the fields to what they should be validate_block_hash(block_hash_with_blob_fee_fields, block).unwrap(); diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index 66465ef474aa..2d9d6f7822e1 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -21,8 +21,8 @@ use reth_evm::{ }; use reth_primitives::{Block, BlockId, BlockNumberOrTag, TransactionSignedEcRecovered}; use reth_provider::{ - BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, HeaderProvider, StateProofProvider, - StateProviderFactory, TransactionVariant, + BlockReaderIdExt, ChainSpecProvider, HeaderProvider, StateProofProvider, StateProviderFactory, + TransactionVariant, }; use reth_revm::database::StateProviderDatabase; use reth_rpc_api::DebugApiServer; @@ -81,7 +81,6 @@ where + HeaderProvider + ChainSpecProvider + StateProviderFactory - + EvmEnvProvider + 'static, Eth: EthApiTypes + TraceExt + 'static, BlockExecutor: BlockExecutorProvider, @@ -842,7 +841,6 @@ where + HeaderProvider + ChainSpecProvider + StateProviderFactory - + EvmEnvProvider + 'static, Eth: EthApiSpec + EthTransactions + TraceExt + 'static, BlockExecutor: BlockExecutorProvider, @@ -979,15 +977,6 @@ where .map_err(Into::into) } - /// Handler for `debug_executionWitness` - async fn debug_execution_witness( - &self, - block: BlockNumberOrTag, - ) -> RpcResult { - let _permit = self.acquire_trace_permit().await; - Self::debug_execution_witness(self, block).await.map_err(Into::into) - } - /// Handler for `debug_traceCall` async fn debug_trace_call( &self, @@ -1011,6 +1000,15 @@ where Self::debug_trace_call_many(self, bundles, state_context, opts).await.map_err(Into::into) } + /// Handler for `debug_executionWitness` + async fn debug_execution_witness( + &self, + block: BlockNumberOrTag, + ) -> RpcResult { + let _permit = self.acquire_trace_permit().await; + Self::debug_execution_witness(self, block).await.map_err(Into::into) + } + async fn debug_backtrace_at(&self, _location: &str) -> RpcResult<()> { Ok(()) } diff --git a/crates/rpc/rpc/src/eth/core.rs b/crates/rpc/rpc/src/eth/core.rs index 6da46804005c..5c7fbbd00239 100644 --- a/crates/rpc/rpc/src/eth/core.rs +++ b/crates/rpc/rpc/src/eth/core.rs @@ -421,8 +421,8 @@ mod tests { let mut rng = generators::rng(); // Build mock data - let mut gas_used_ratios = Vec::new(); - let mut base_fees_per_gas = Vec::new(); + let mut gas_used_ratios = Vec::with_capacity(block_count as usize); + let mut base_fees_per_gas = Vec::with_capacity(block_count as usize); let mut last_header = None; let mut parent_hash = B256::default(); @@ -444,8 +444,9 @@ mod tests { last_header = Some(header.clone()); parent_hash = hash; - let mut transactions = vec![]; - for _ in 0..100 { + const TOTAL_TRANSACTIONS: usize = 100; + let mut transactions = Vec::with_capacity(TOTAL_TRANSACTIONS); + for _ in 0..TOTAL_TRANSACTIONS { let random_fee: u128 = rng.gen(); if let Some(base_fee_per_gas) = header.base_fee_per_gas { diff --git a/crates/rpc/rpc/src/eth/helpers/signer.rs b/crates/rpc/rpc/src/eth/helpers/signer.rs index a5818aa494fd..e59be0ac2838 100644 --- a/crates/rpc/rpc/src/eth/helpers/signer.rs +++ b/crates/rpc/rpc/src/eth/helpers/signer.rs @@ -40,7 +40,7 @@ impl DevSigner { /// Generates provided number of random dev signers /// which satisfy [`EthSigner`] trait pub fn random_signers(num: u32) -> Vec> { - let mut signers = Vec::new(); + let mut signers = Vec::with_capacity(num as usize); for _ in 0..num { let sk = PrivateKeySigner::random_with(&mut rand::thread_rng()); diff --git a/crates/rpc/rpc/src/eth/helpers/state.rs b/crates/rpc/rpc/src/eth/helpers/state.rs index 8a35842798bc..429a10333d1b 100644 --- a/crates/rpc/rpc/src/eth/helpers/state.rs +++ b/crates/rpc/rpc/src/eth/helpers/state.rs @@ -45,11 +45,11 @@ where #[cfg(test)] mod tests { use super::*; + use alloy_eips::eip1559::ETHEREUM_BLOCK_GAS_LIMIT; use alloy_primitives::{Address, StorageKey, StorageValue, U256}; use reth_chainspec::MAINNET; use reth_evm_ethereum::EthEvmConfig; use reth_network_api::noop::NoopNetwork; - use reth_primitives::constants::ETHEREUM_BLOCK_GAS_LIMIT; use reth_provider::test_utils::{ExtendedAccount, MockEthProvider, NoopProvider}; use reth_rpc_eth_api::helpers::EthState; use reth_rpc_eth_types::{ diff --git a/crates/rpc/rpc/src/eth/helpers/transaction.rs b/crates/rpc/rpc/src/eth/helpers/transaction.rs index 8bd9997f6e8a..24a13cb8062c 100644 --- a/crates/rpc/rpc/src/eth/helpers/transaction.rs +++ b/crates/rpc/rpc/src/eth/helpers/transaction.rs @@ -55,11 +55,11 @@ where #[cfg(test)] mod tests { + use alloy_eips::eip1559::ETHEREUM_BLOCK_GAS_LIMIT; use alloy_primitives::{hex_literal::hex, Bytes}; use reth_chainspec::ChainSpecProvider; use reth_evm_ethereum::EthEvmConfig; use reth_network_api::noop::NoopNetwork; - use reth_primitives::constants::ETHEREUM_BLOCK_GAS_LIMIT; use reth_provider::test_utils::NoopProvider; use reth_rpc_eth_api::helpers::EthTransactions; use reth_rpc_eth_types::{ diff --git a/crates/stages/api/src/pipeline/builder.rs b/crates/stages/api/src/pipeline/builder.rs index 68ca887fe790..79a4c477ee6c 100644 --- a/crates/stages/api/src/pipeline/builder.rs +++ b/crates/stages/api/src/pipeline/builder.rs @@ -34,7 +34,9 @@ impl PipelineBuilder { /// [`builder`][StageSet::builder] on the set which will convert it to a /// [`StageSetBuilder`][crate::StageSetBuilder]. pub fn add_stages>(mut self, set: Set) -> Self { - for stage in set.builder().build() { + let states = set.builder().build(); + self.stages.reserve_exact(states.len()); + for stage in states { self.stages.push(stage); } self diff --git a/crates/stages/stages/src/stages/bodies.rs b/crates/stages/stages/src/stages/bodies.rs index 93d8a1229921..06a5250913ed 100644 --- a/crates/stages/stages/src/stages/bodies.rs +++ b/crates/stages/stages/src/stages/bodies.rs @@ -917,7 +917,8 @@ mod tests { return Poll::Ready(None) } - let mut response = Vec::default(); + let mut response = + Vec::with_capacity(std::cmp::min(this.headers.len(), this.batch_size as usize)); while let Some(header) = this.headers.pop_front() { if header.is_empty() { response.push(BlockResponse::Empty(header)) diff --git a/crates/static-file/types/src/lib.rs b/crates/static-file/types/src/lib.rs index 380931138869..6e954a781b71 100644 --- a/crates/static-file/types/src/lib.rs +++ b/crates/static-file/types/src/lib.rs @@ -71,3 +71,81 @@ pub const fn find_fixed_range( let start = (block / blocks_per_static_file) * blocks_per_static_file; SegmentRangeInclusive::new(start, start + blocks_per_static_file - 1) } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_highest_static_files_highest() { + let files = + HighestStaticFiles { headers: Some(100), receipts: Some(200), transactions: None }; + + // Test for headers segment + assert_eq!(files.highest(StaticFileSegment::Headers), Some(100)); + + // Test for receipts segment + assert_eq!(files.highest(StaticFileSegment::Receipts), Some(200)); + + // Test for transactions segment + assert_eq!(files.highest(StaticFileSegment::Transactions), None); + } + + #[test] + fn test_highest_static_files_as_mut() { + let mut files = HighestStaticFiles::default(); + + // Modify headers value + *files.as_mut(StaticFileSegment::Headers) = Some(150); + assert_eq!(files.headers, Some(150)); + + // Modify receipts value + *files.as_mut(StaticFileSegment::Receipts) = Some(250); + assert_eq!(files.receipts, Some(250)); + + // Modify transactions value + *files.as_mut(StaticFileSegment::Transactions) = Some(350); + assert_eq!(files.transactions, Some(350)); + } + + #[test] + fn test_highest_static_files_min() { + let files = + HighestStaticFiles { headers: Some(300), receipts: Some(100), transactions: None }; + + // Minimum value among the available segments + assert_eq!(files.min(), Some(100)); + + let empty_files = HighestStaticFiles::default(); + // No values, should return None + assert_eq!(empty_files.min(), None); + } + + #[test] + fn test_highest_static_files_max() { + let files = + HighestStaticFiles { headers: Some(300), receipts: Some(100), transactions: Some(500) }; + + // Maximum value among the available segments + assert_eq!(files.max(), Some(500)); + + let empty_files = HighestStaticFiles::default(); + // No values, should return None + assert_eq!(empty_files.max(), None); + } + + #[test] + fn test_find_fixed_range() { + // Test with default block size + let block: BlockNumber = 600_000; + let range = find_fixed_range(block, DEFAULT_BLOCKS_PER_STATIC_FILE); + assert_eq!(range.start(), 500_000); + assert_eq!(range.end(), 999_999); + + // Test with a custom block size + let block: BlockNumber = 1_200_000; + let range = find_fixed_range(block, 1_000_000); + assert_eq!(range.start(), 1_000_000); + assert_eq!(range.end(), 1_999_999); + } +} diff --git a/crates/storage/codecs/src/alloy/authorization_list.rs b/crates/storage/codecs/src/alloy/authorization_list.rs index 2b013c0d3c52..6dc36956d249 100644 --- a/crates/storage/codecs/src/alloy/authorization_list.rs +++ b/crates/storage/codecs/src/alloy/authorization_list.rs @@ -44,11 +44,9 @@ impl Compact for SignedAuthorization { where B: bytes::BufMut + AsMut<[u8]>, { - let signature = self.signature(); - let (v, r, s) = (signature.v(), signature.r(), signature.s()); - buf.put_u8(v.y_parity_byte()); - buf.put_slice(r.as_le_slice()); - buf.put_slice(s.as_le_slice()); + buf.put_u8(self.y_parity()); + buf.put_slice(self.r().as_le_slice()); + buf.put_slice(self.s().as_le_slice()); // to_compact doesn't write the len to buffer. // By placing it as last, we don't need to store it either. @@ -56,17 +54,15 @@ impl Compact for SignedAuthorization { } fn from_compact(mut buf: &[u8], len: usize) -> (Self, &[u8]) { - let y = alloy_primitives::Parity::Parity(buf.get_u8() == 1); + let y_parity = buf.get_u8(); let r = U256::from_le_slice(&buf[0..32]); buf.advance(32); let s = U256::from_le_slice(&buf[0..32]); buf.advance(32); - let signature = alloy_primitives::Signature::from_rs_and_parity(r, s, y) - .expect("invalid authorization signature"); let (auth, buf) = AlloyAuthorization::from_compact(buf, len); - (auth.into_signed(signature), buf) + (Self::new_unchecked(auth, y_parity, r, s), buf) } } diff --git a/crates/storage/codecs/src/lib.rs b/crates/storage/codecs/src/lib.rs index 54ca046cb71a..c432400a5766 100644 --- a/crates/storage/codecs/src/lib.rs +++ b/crates/storage/codecs/src/lib.rs @@ -48,6 +48,12 @@ pub mod test_utils; /// Regarding the `specialized_to/from_compact` methods: Mainly used as a workaround for not being /// able to specialize an impl over certain types like `Vec`/`Option` where `T` is a fixed /// size array like `Vec`. +/// +/// ## Caution +/// +/// Due to the bitfields, every type change on the rust type (e.g. `U256` to `u64`) is a breaking +/// change and will lead to a new, incompatible [`Compact`] implementation. Implementers must take +/// special care when changing or rearranging fields. pub trait Compact: Sized { /// Takes a buffer which can be written to. *Ideally*, it returns the length written to. fn to_compact(&self, buf: &mut B) -> usize @@ -499,7 +505,7 @@ mod tests { #[test] fn compact_address() { - let mut buf = vec![]; + let mut buf = Vec::with_capacity(21); assert_eq!(Address::ZERO.to_compact(&mut buf), 20); assert_eq!(buf, vec![0; 20]); diff --git a/crates/storage/nippy-jar/src/compression/mod.rs b/crates/storage/nippy-jar/src/compression/mod.rs index 28a92fe909f2..f9bf8110eebb 100644 --- a/crates/storage/nippy-jar/src/compression/mod.rs +++ b/crates/storage/nippy-jar/src/compression/mod.rs @@ -44,7 +44,9 @@ pub trait Compression: Serialize + for<'a> Deserialize<'a> { #[derive(Debug, Serialize, Deserialize)] #[cfg_attr(test, derive(PartialEq))] pub enum Compressors { + /// Zstandard compression algorithm with custom settings. Zstd(Zstd), + /// LZ4 compression algorithm with custom settings. Lz4(Lz4), } diff --git a/crates/storage/nippy-jar/src/compression/zstd.rs b/crates/storage/nippy-jar/src/compression/zstd.rs index 500247d17677..896a65bd7080 100644 --- a/crates/storage/nippy-jar/src/compression/zstd.rs +++ b/crates/storage/nippy-jar/src/compression/zstd.rs @@ -12,10 +12,13 @@ pub use zstd::{bulk::Decompressor, dict::DecoderDictionary}; type RawDictionary = Vec; +/// Represents the state of a Zstandard compression operation. #[derive(Debug, Default, PartialEq, Eq, Serialize, Deserialize)] pub enum ZstdState { + /// The compressor is pending a dictionary. #[default] PendingDictionary, + /// The compressor is ready to perform compression. Ready, } @@ -51,6 +54,7 @@ impl Zstd { } } + /// Sets the compression level for the Zstd compression instance. pub const fn with_level(mut self, level: i32) -> Self { self.level = level; self @@ -209,7 +213,7 @@ impl Compression for Zstd { return Err(NippyJarError::ColumnLenMismatch(self.columns, columns.len())) } - let mut dictionaries = vec![]; + let mut dictionaries = Vec::with_capacity(columns.len()); for column in columns { // ZSTD requires all training data to be continuous in memory, alongside the size of // each entry diff --git a/crates/storage/nippy-jar/src/consistency.rs b/crates/storage/nippy-jar/src/consistency.rs index 1093fb5546ac..952980ef6eff 100644 --- a/crates/storage/nippy-jar/src/consistency.rs +++ b/crates/storage/nippy-jar/src/consistency.rs @@ -28,6 +28,11 @@ pub struct NippyJarChecker { } impl NippyJarChecker { + /// Creates a new instance of [`NippyJarChecker`] with the provided [`NippyJar`]. + /// + /// This method initializes the checker without any associated file handles for + /// the data or offsets files. The [`NippyJar`] passed in contains all necessary + /// configurations for handling data. pub const fn new(jar: NippyJar) -> Self { Self { jar, data_file: None, offsets_file: None } } diff --git a/crates/storage/nippy-jar/src/cursor.rs b/crates/storage/nippy-jar/src/cursor.rs index 267764827299..376411ac2656 100644 --- a/crates/storage/nippy-jar/src/cursor.rs +++ b/crates/storage/nippy-jar/src/cursor.rs @@ -25,9 +25,10 @@ impl std::fmt::Debug for NippyJarCursor<'_, H> { } impl<'a, H: NippyJarHeader> NippyJarCursor<'a, H> { + /// Creates a new instance of [`NippyJarCursor`] for the given [`NippyJar`]. pub fn new(jar: &'a NippyJar) -> Result { let max_row_size = jar.max_row_size; - Ok(NippyJarCursor { + Ok(Self { jar, reader: Arc::new(jar.open_data_reader()?), // Makes sure that we have enough buffer capacity to decompress any row of data. @@ -36,12 +37,14 @@ impl<'a, H: NippyJarHeader> NippyJarCursor<'a, H> { }) } + /// Creates a new instance of [`NippyJarCursor`] with the specified [`NippyJar`] and data + /// reader. pub fn with_reader( jar: &'a NippyJar, reader: Arc, ) -> Result { let max_row_size = jar.max_row_size; - Ok(NippyJarCursor { + Ok(Self { jar, reader, // Makes sure that we have enough buffer capacity to decompress any row of data. diff --git a/crates/storage/nippy-jar/src/error.rs b/crates/storage/nippy-jar/src/error.rs index fc096cf848c6..f69bb44a068b 100644 --- a/crates/storage/nippy-jar/src/error.rs +++ b/crates/storage/nippy-jar/src/error.rs @@ -4,53 +4,92 @@ use thiserror::Error; /// Errors associated with [`crate::NippyJar`]. #[derive(Error, Debug)] pub enum NippyJarError { + /// An internal error occurred, wrapping any type of error. #[error(transparent)] Internal(#[from] Box), + + /// An error occurred while disconnecting, wrapping a standard I/O error. #[error(transparent)] Disconnect(#[from] std::io::Error), + + /// An error related to the file system occurred, wrapping a file system path error. #[error(transparent)] FileSystem(#[from] reth_fs_util::FsPathError), + + /// A custom error message provided by the user. #[error("{0}")] Custom(String), + + /// An error occurred during serialization/deserialization with Bincode. #[error(transparent)] Bincode(#[from] Box), + + /// An error occurred with the Elias-Fano encoding/decoding process. #[error(transparent)] EliasFano(#[from] anyhow::Error), + + /// Compression was enabled, but the compressor is not ready yet. #[error("compression was enabled, but it's not ready yet")] CompressorNotReady, + + /// Decompression was enabled, but the decompressor is not ready yet. #[error("decompression was enabled, but it's not ready yet")] DecompressorNotReady, + + /// The number of columns does not match the expected length. #[error("number of columns does not match: {0} != {1}")] ColumnLenMismatch(usize, usize), + + /// An unexpected missing value was encountered at a specific row and column. #[error("unexpected missing value: row:col {0}:{1}")] UnexpectedMissingValue(u64, u64), + + /// The size of an offset exceeds the maximum allowed size of 8 bytes. #[error("the size of an offset must be at most 8 bytes, got {offset_size}")] OffsetSizeTooBig { /// The read offset size in number of bytes. offset_size: u8, }, + + /// The size of an offset is less than the minimum allowed size of 1 byte. #[error("the size of an offset must be at least 1 byte, got {offset_size}")] OffsetSizeTooSmall { /// The read offset size in number of bytes. offset_size: u8, }, + + /// An attempt was made to read an offset that is out of bounds. #[error("attempted to read an out of bounds offset: {index}")] OffsetOutOfBounds { /// The index of the offset that was being read. index: usize, }, + + /// The output buffer is too small for the compression or decompression operation. #[error("compression or decompression requires a bigger destination output")] OutputTooSmall, + + /// A dictionary is not loaded when it is required for operations. #[error("dictionary is not loaded.")] DictionaryNotLoaded, + + /// It's not possible to generate a compressor after loading a dictionary. #[error("it's not possible to generate a compressor after loading a dictionary.")] CompressorNotAllowed, + + /// The number of offsets is smaller than the requested prune size. #[error("number of offsets ({0}) is smaller than prune request ({1}).")] InvalidPruning(u64, u64), + + /// The jar has been frozen and cannot be modified. #[error("jar has been frozen and cannot be modified.")] FrozenJar, + + /// The file is in an inconsistent state. #[error("File is in an inconsistent state.")] InconsistentState, + + /// A specified file is missing. #[error("Missing file: {0}.")] MissingFile(PathBuf), } diff --git a/crates/storage/nippy-jar/src/lib.rs b/crates/storage/nippy-jar/src/lib.rs index bdc950aa38a7..b1d174feb2c3 100644 --- a/crates/storage/nippy-jar/src/lib.rs +++ b/crates/storage/nippy-jar/src/lib.rs @@ -10,7 +10,6 @@ issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" )] #![cfg_attr(not(test), warn(unused_crate_dependencies))] -#![allow(missing_docs)] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] use memmap2::Mmap; @@ -28,6 +27,7 @@ use std::os::windows::prelude::OpenOptionsExt; use tracing::*; +/// Compression algorithms supported by `NippyJar`. pub mod compression; #[cfg(test)] use compression::Compression; @@ -55,10 +55,13 @@ pub use writer::NippyJarWriter; mod consistency; pub use consistency::NippyJarChecker; +/// The version number of the Nippy Jar format. const NIPPY_JAR_VERSION: usize = 1; - +/// The file extension used for index files. const INDEX_FILE_EXTENSION: &str = "idx"; +/// The file extension used for offsets files. const OFFSETS_FILE_EXTENSION: &str = "off"; +/// The file extension used for configuration files. pub const CONFIG_FILE_EXTENSION: &str = "conf"; /// A [`RefRow`] is a list of column value slices pointing to either an internal buffer or a diff --git a/crates/storage/nippy-jar/src/writer.rs b/crates/storage/nippy-jar/src/writer.rs index 9bf9bf526448..3a1003bee764 100644 --- a/crates/storage/nippy-jar/src/writer.rs +++ b/crates/storage/nippy-jar/src/writer.rs @@ -354,6 +354,10 @@ impl NippyJarWriter { Ok(()) } + /// Commits changes to the data file and offsets without synchronizing all data to disk. + /// + /// This function flushes the buffered data to the data file and commits the offsets, + /// but it does not guarantee that all data is synchronized to persistent storage. #[cfg(feature = "test-utils")] pub fn commit_without_sync_all(&mut self) -> Result<(), NippyJarError> { self.data_file.flush()?; @@ -412,41 +416,49 @@ impl NippyJarWriter { Ok(()) } + /// Returns the maximum row size for the associated [`NippyJar`]. #[cfg(test)] pub const fn max_row_size(&self) -> usize { self.jar.max_row_size } + /// Returns the column index of the current checker instance. #[cfg(test)] pub const fn column(&self) -> usize { self.column } + /// Returns a reference to the offsets vector. #[cfg(test)] pub fn offsets(&self) -> &[u64] { &self.offsets } + /// Returns a mutable reference to the offsets vector. #[cfg(test)] pub fn offsets_mut(&mut self) -> &mut Vec { &mut self.offsets } + /// Returns the path to the offsets file for the associated [`NippyJar`]. #[cfg(test)] pub fn offsets_path(&self) -> std::path::PathBuf { self.jar.offsets_path() } + /// Returns the path to the data file for the associated [`NippyJar`]. #[cfg(test)] pub fn data_path(&self) -> &Path { self.jar.data_path() } + /// Returns a mutable reference to the buffered writer for the data file. #[cfg(any(test, feature = "test-utils"))] pub fn data_file(&mut self) -> &mut BufWriter { &mut self.data_file } + /// Returns a reference to the associated [`NippyJar`] instance. #[cfg(any(test, feature = "test-utils"))] pub const fn jar(&self) -> &NippyJar { &self.jar diff --git a/crates/storage/provider/src/bundle_state/state_reverts.rs b/crates/storage/provider/src/bundle_state/state_reverts.rs index 09b892562fbd..3e1ba2a4b8f2 100644 --- a/crates/storage/provider/src/bundle_state/state_reverts.rs +++ b/crates/storage/provider/src/bundle_state/state_reverts.rs @@ -76,3 +76,105 @@ where } } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_storage_reverts_iter_empty() { + // Create empty sample data for reverts and wiped entries. + let reverts: Vec<(B256, RevertToSlot)> = vec![]; + let wiped: Vec<(B256, U256)> = vec![]; + + // Create the iterator with the empty data. + let iter = StorageRevertsIter::new(reverts, wiped); + + // Iterate and collect results into a vector for verification. + let results: Vec<_> = iter.collect(); + + // Verify that the results are empty. + assert_eq!(results, vec![]); + } + + #[test] + fn test_storage_reverts_iter_reverts_only() { + // Create sample data for only reverts. + let reverts = vec![ + (B256::from_slice(&[4; 32]), RevertToSlot::Destroyed), + (B256::from_slice(&[5; 32]), RevertToSlot::Some(U256::from(40))), + ]; + + // Create the iterator with only reverts and no wiped entries. + let iter = StorageRevertsIter::new(reverts, vec![]); + + // Iterate and collect results into a vector for verification. + let results: Vec<_> = iter.collect(); + + // Verify the output order and values. + assert_eq!( + results, + vec![ + (B256::from_slice(&[4; 32]), U256::ZERO), // Revert slot previous value + (B256::from_slice(&[5; 32]), U256::from(40)), // Only revert present. + ] + ); + } + + #[test] + fn test_storage_reverts_iter_wiped_only() { + // Create sample data for only wiped entries. + let wiped = vec![ + (B256::from_slice(&[6; 32]), U256::from(50)), + (B256::from_slice(&[7; 32]), U256::from(60)), + ]; + + // Create the iterator with only wiped entries and no reverts. + let iter = StorageRevertsIter::new(vec![], wiped); + + // Iterate and collect results into a vector for verification. + let results: Vec<_> = iter.collect(); + + // Verify the output order and values. + assert_eq!( + results, + vec![ + (B256::from_slice(&[6; 32]), U256::from(50)), // Only wiped present. + (B256::from_slice(&[7; 32]), U256::from(60)), // Only wiped present. + ] + ); + } + + #[test] + fn test_storage_reverts_iter_interleaved() { + // Create sample data for interleaved reverts and wiped entries. + let reverts = vec![ + (B256::from_slice(&[8; 32]), RevertToSlot::Some(U256::from(70))), + (B256::from_slice(&[9; 32]), RevertToSlot::Some(U256::from(80))), + // Some higher key than wiped + (B256::from_slice(&[15; 32]), RevertToSlot::Some(U256::from(90))), + ]; + + let wiped = vec![ + (B256::from_slice(&[8; 32]), U256::from(75)), // Same key as revert + (B256::from_slice(&[10; 32]), U256::from(85)), // Wiped with new key + ]; + + // Create the iterator with the sample data. + let iter = StorageRevertsIter::new(reverts, wiped); + + // Iterate and collect results into a vector for verification. + let results: Vec<_> = iter.collect(); + + // Verify the output order and values. + assert_eq!( + results, + vec![ + (B256::from_slice(&[8; 32]), U256::from(70)), // Revert takes priority. + (B256::from_slice(&[9; 32]), U256::from(80)), // Only revert present. + (B256::from_slice(&[10; 32]), U256::from(85)), // Wiped entry. + (B256::from_slice(&[15; 32]), U256::from(90)), // WGreater revert entry + ] + ); + } +} diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index 1866610e3f28..64a8a204a329 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -1,13 +1,15 @@ +#![allow(unused)] use crate::{ - providers::StaticFileProvider, AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, - BlockReader, BlockReaderIdExt, BlockSource, CanonChainTracker, CanonStateNotifications, - CanonStateSubscriptions, ChainSpecProvider, ChainStateBlockReader, ChangeSetReader, - DatabaseProviderFactory, DatabaseProviderRO, EvmEnvProvider, HeaderProvider, ProviderError, + providers::{ConsistentProvider, StaticFileProvider}, + AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, + BlockSource, CanonChainTracker, CanonStateNotifications, CanonStateSubscriptions, + ChainSpecProvider, ChainStateBlockReader, ChangeSetReader, DatabaseProvider, + DatabaseProviderFactory, EvmEnvProvider, FullProvider, HeaderProvider, ProviderError, ProviderFactory, PruneCheckpointReader, ReceiptProvider, ReceiptProviderIdExt, StageCheckpointReader, StateProviderBox, StateProviderFactory, StateReader, StaticFileProviderFactory, TransactionVariant, TransactionsProvider, WithdrawalsProvider, }; -use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag, HashOrNumber}; +use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag}; use alloy_primitives::{Address, BlockHash, BlockNumber, Sealable, TxHash, TxNumber, B256, U256}; use alloy_rpc_types_engine::ForkchoiceState; use reth_chain_state::{ @@ -15,10 +17,10 @@ use reth_chain_state::{ MemoryOverlayStateProvider, }; use reth_chainspec::{ChainInfo, EthereumHardforks}; -use reth_db::models::BlockNumberAddress; +use reth_db::{models::BlockNumberAddress, transaction::DbTx, Database}; use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_evm::ConfigureEvmEnv; -use reth_execution_types::{BundleStateInit, ExecutionOutcome, RevertsInit}; +use reth_execution_types::ExecutionOutcome; use reth_node_types::NodeTypesWithDB; use reth_primitives::{ Account, Block, BlockWithSenders, Header, Receipt, SealedBlock, SealedBlockWithSenders, @@ -27,21 +29,17 @@ use reth_primitives::{ }; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; -use reth_storage_api::StorageChangeSetReader; +use reth_storage_api::{DBProvider, StorageChangeSetReader}; use reth_storage_errors::provider::ProviderResult; -use revm::{ - db::states::PlainStorageRevert, - primitives::{BlockEnv, CfgEnvWithHandlerCfg}, -}; +use revm::primitives::{BlockEnv, CfgEnvWithHandlerCfg}; use std::{ - collections::{hash_map, HashMap}, - ops::{Add, Bound, RangeBounds, RangeInclusive, Sub}, + ops::{Add, RangeBounds, RangeInclusive, Sub}, sync::Arc, time::Instant, }; use tracing::trace; -use super::ProviderNodeTypes; +use crate::providers::ProviderNodeTypes; /// The main type for interacting with the blockchain. /// @@ -50,11 +48,11 @@ use super::ProviderNodeTypes; /// type that holds an instance of the database and the blockchain tree. #[derive(Debug)] pub struct BlockchainProvider2 { - /// Provider type used to access the database. - database: ProviderFactory, + /// Provider factory used to access the database. + pub(crate) database: ProviderFactory, /// Tracks the chain info wrt forkchoice updates and in memory canonical /// state. - pub(super) canonical_in_memory_state: CanonicalInMemoryState, + pub(crate) canonical_in_memory_state: CanonicalInMemoryState, } impl Clone for BlockchainProvider2 { @@ -67,15 +65,15 @@ impl Clone for BlockchainProvider2 { } impl BlockchainProvider2 { - /// Create a new provider using only the database, fetching the latest header from - /// the database to initialize the provider. - pub fn new(database: ProviderFactory) -> ProviderResult { - let provider = database.provider()?; - let best: ChainInfo = provider.chain_info()?; + /// Create a new [`BlockchainProvider2`] using only the storage, fetching the latest + /// header from the database to initialize the provider. + pub fn new(storage: ProviderFactory) -> ProviderResult { + let provider = storage.provider()?; + let best = provider.chain_info()?; match provider.header_by_number(best.best_number)? { Some(header) => { drop(provider); - Ok(Self::with_latest(database, SealedHeader::new(header, best.best_hash))?) + Ok(Self::with_latest(storage, SealedHeader::new(header, best.best_hash))?) } None => Err(ProviderError::HeaderNotFound(best.best_number.into())), } @@ -86,8 +84,8 @@ impl BlockchainProvider2 { /// /// This returns a `ProviderResult` since it tries the retrieve the last finalized header from /// `database`. - pub fn with_latest(database: ProviderFactory, latest: SealedHeader) -> ProviderResult { - let provider = database.provider()?; + pub fn with_latest(storage: ProviderFactory, latest: SealedHeader) -> ProviderResult { + let provider = storage.provider()?; let finalized_header = provider .last_finalized_block_number()? .map(|num| provider.sealed_header(num)) @@ -104,7 +102,7 @@ impl BlockchainProvider2 { .transpose()? .flatten(); Ok(Self { - database, + database: storage, canonical_in_memory_state: CanonicalInMemoryState::with_head( latest, finalized_header, @@ -118,281 +116,12 @@ impl BlockchainProvider2 { self.canonical_in_memory_state.clone() } - // Helper function to convert range bounds - fn convert_range_bounds( - &self, - range: impl RangeBounds, - end_unbounded: impl FnOnce() -> T, - ) -> (T, T) - where - T: Copy + Add + Sub + From, - { - let start = match range.start_bound() { - Bound::Included(&n) => n, - Bound::Excluded(&n) => n + T::from(1u8), - Bound::Unbounded => T::from(0u8), - }; - - let end = match range.end_bound() { - Bound::Included(&n) => n, - Bound::Excluded(&n) => n - T::from(1u8), - Bound::Unbounded => end_unbounded(), - }; - - (start, end) - } - - /// Return the last N blocks of state, recreating the [`ExecutionOutcome`]. - /// - /// If the range is empty, or there are no blocks for the given range, then this returns `None`. - pub fn get_state( - &self, - range: RangeInclusive, - ) -> ProviderResult> { - if range.is_empty() { - return Ok(None) - } - let start_block_number = *range.start(); - let end_block_number = *range.end(); - - // We are not removing block meta as it is used to get block changesets. - let mut block_bodies = Vec::new(); - for block_num in range.clone() { - let block_body = self - .block_body_indices(block_num)? - .ok_or(ProviderError::BlockBodyIndicesNotFound(block_num))?; - block_bodies.push((block_num, block_body)) - } - - // get transaction receipts - let Some(from_transaction_num) = block_bodies.first().map(|body| body.1.first_tx_num()) - else { - return Ok(None) - }; - let Some(to_transaction_num) = block_bodies.last().map(|body| body.1.last_tx_num()) else { - return Ok(None) - }; - - let mut account_changeset = Vec::new(); - for block_num in range.clone() { - let changeset = - self.account_block_changeset(block_num)?.into_iter().map(|elem| (block_num, elem)); - account_changeset.extend(changeset); - } - - let mut storage_changeset = Vec::new(); - for block_num in range { - let changeset = self.storage_changeset(block_num)?; - storage_changeset.extend(changeset); - } - - let (state, reverts) = - self.populate_bundle_state(account_changeset, storage_changeset, end_block_number)?; - - let mut receipt_iter = - self.receipts_by_tx_range(from_transaction_num..=to_transaction_num)?.into_iter(); - - let mut receipts = Vec::with_capacity(block_bodies.len()); - // loop break if we are at the end of the blocks. - for (_, block_body) in block_bodies { - let mut block_receipts = Vec::with_capacity(block_body.tx_count as usize); - for tx_num in block_body.tx_num_range() { - let receipt = receipt_iter - .next() - .ok_or_else(|| ProviderError::ReceiptNotFound(tx_num.into()))?; - block_receipts.push(Some(receipt)); - } - receipts.push(block_receipts); - } - - Ok(Some(ExecutionOutcome::new_init( - state, - reverts, - // We skip new contracts since we never delete them from the database - Vec::new(), - receipts.into(), - start_block_number, - Vec::new(), - ))) - } - - /// Populate a [`BundleStateInit`] and [`RevertsInit`] using cursors over the - /// [`reth_db::PlainAccountState`] and [`reth_db::PlainStorageState`] tables, based on the given - /// storage and account changesets. - fn populate_bundle_state( - &self, - account_changeset: Vec<(u64, AccountBeforeTx)>, - storage_changeset: Vec<(BlockNumberAddress, StorageEntry)>, - block_range_end: BlockNumber, - ) -> ProviderResult<(BundleStateInit, RevertsInit)> { - let mut state: BundleStateInit = HashMap::new(); - let mut reverts: RevertsInit = HashMap::new(); - let state_provider = self.state_by_block_number_or_tag(block_range_end.into())?; - - // add account changeset changes - for (block_number, account_before) in account_changeset.into_iter().rev() { - let AccountBeforeTx { info: old_info, address } = account_before; - match state.entry(address) { - hash_map::Entry::Vacant(entry) => { - let new_info = state_provider.basic_account(address)?; - entry.insert((old_info, new_info, HashMap::new())); - } - hash_map::Entry::Occupied(mut entry) => { - // overwrite old account state. - entry.get_mut().0 = old_info; - } - } - // insert old info into reverts. - reverts.entry(block_number).or_default().entry(address).or_default().0 = Some(old_info); - } - - // add storage changeset changes - for (block_and_address, old_storage) in storage_changeset.into_iter().rev() { - let BlockNumberAddress((block_number, address)) = block_and_address; - // get account state or insert from plain state. - let account_state = match state.entry(address) { - hash_map::Entry::Vacant(entry) => { - let present_info = state_provider.basic_account(address)?; - entry.insert((present_info, present_info, HashMap::new())) - } - hash_map::Entry::Occupied(entry) => entry.into_mut(), - }; - - // match storage. - match account_state.2.entry(old_storage.key) { - hash_map::Entry::Vacant(entry) => { - let new_storage_value = - state_provider.storage(address, old_storage.key)?.unwrap_or_default(); - entry.insert((old_storage.value, new_storage_value)); - } - hash_map::Entry::Occupied(mut entry) => { - entry.get_mut().0 = old_storage.value; - } - }; - - reverts - .entry(block_number) - .or_default() - .entry(address) - .or_default() - .1 - .push(old_storage); - } - - Ok((state, reverts)) - } - - /// Fetches a range of data from both in-memory state and persistent storage while a predicate - /// is met. - /// - /// Creates a snapshot of the in-memory chain state and database provider to prevent - /// inconsistencies. Splits the range into in-memory and storage sections, prioritizing - /// recent in-memory blocks in case of overlaps. - /// - /// * `fetch_db_range` function (`F`) provides access to the database provider, allowing the - /// user to retrieve the required items from the database using [`RangeInclusive`]. - /// * `map_block_state_item` function (`G`) provides each block of the range in the in-memory - /// state, allowing for selection or filtering for the desired data. - fn get_in_memory_or_storage_by_block_range_while( - &self, - range: impl RangeBounds, - fetch_db_range: F, - map_block_state_item: G, - mut predicate: P, - ) -> ProviderResult> - where - F: FnOnce( - &DatabaseProviderRO, - RangeInclusive, - &mut P, - ) -> ProviderResult>, - G: Fn(Arc, &mut P) -> Option, - P: FnMut(&T) -> bool, - { - // Each one provides a snapshot at the time of instantiation, but its order matters. - // - // If we acquire first the database provider, it's possible that before the in-memory chain - // snapshot is instantiated, it will flush blocks to disk. This would - // mean that our database provider would not have access to the flushed blocks (since it's - // working under an older view), while the in-memory state may have deleted them - // entirely. Resulting in gaps on the range. - let mut in_memory_chain = - self.canonical_in_memory_state.canonical_chain().collect::>(); - let db_provider = self.database_provider_ro()?; - - let (start, end) = self.convert_range_bounds(range, || { - // the first block is the highest one. - in_memory_chain - .first() - .map(|b| b.number()) - .unwrap_or_else(|| db_provider.last_block_number().unwrap_or_default()) - }); - - if start > end { - return Ok(vec![]) - } - - // Split range into storage_range and in-memory range. If the in-memory range is not - // necessary drop it early. - // - // The last block of `in_memory_chain` is the lowest block number. - let (in_memory, storage_range) = match in_memory_chain.last().as_ref().map(|b| b.number()) { - Some(lowest_memory_block) if lowest_memory_block <= end => { - let highest_memory_block = - in_memory_chain.first().as_ref().map(|b| b.number()).expect("qed"); - - // Database will for a time overlap with in-memory-chain blocks. In - // case of a re-org, it can mean that the database blocks are of a forked chain, and - // so, we should prioritize the in-memory overlapped blocks. - let in_memory_range = - lowest_memory_block.max(start)..=end.min(highest_memory_block); - - // If requested range is in the middle of the in-memory range, remove the necessary - // lowest blocks - in_memory_chain.truncate( - in_memory_chain - .len() - .saturating_sub(start.saturating_sub(lowest_memory_block) as usize), - ); - - let storage_range = - (lowest_memory_block > start).then(|| start..=lowest_memory_block - 1); - - (Some((in_memory_chain, in_memory_range)), storage_range) - } - _ => { - // Drop the in-memory chain so we don't hold blocks in memory. - drop(in_memory_chain); - - (None, Some(start..=end)) - } - }; - - let mut items = Vec::with_capacity((end - start + 1) as usize); - - if let Some(storage_range) = storage_range { - let mut db_items = fetch_db_range(&db_provider, storage_range.clone(), &mut predicate)?; - items.append(&mut db_items); - - // The predicate was not met, if the number of items differs from the expected. So, we - // return what we have. - if items.len() as u64 != storage_range.end() - storage_range.start() + 1 { - return Ok(items) - } - } - - if let Some((in_memory_chain, in_memory_range)) = in_memory { - for (num, block) in in_memory_range.zip(in_memory_chain.into_iter().rev()) { - debug_assert!(num == block.number()); - if let Some(item) = map_block_state_item(block, &mut predicate) { - items.push(item); - } else { - break - } - } - } - - Ok(items) + /// Returns a provider with a created `DbTx` inside, which allows fetching data from the + /// database using different types of providers. Example: [`HeaderProvider`] + /// [`BlockHashReader`]. This may fail if the inner read database transaction fails to open. + #[track_caller] + pub fn consistent_provider(&self) -> ProviderResult> { + ConsistentProvider::new(self.database.clone(), self.canonical_in_memory_state()) } /// This uses a given [`BlockState`] to initialize a state provider for that block. @@ -405,222 +134,14 @@ impl BlockchainProvider2 { Ok(state.state_provider(latest_historical)) } - /// Fetches data from either in-memory state or persistent storage for a range of transactions. + /// Return the last N blocks of state, recreating the [`ExecutionOutcome`]. /// - /// * `fetch_from_db`: has a [`DatabaseProviderRO`] and the storage specific range. - /// * `fetch_from_block_state`: has a [`RangeInclusive`] of elements that should be fetched from - /// [`BlockState`]. [`RangeInclusive`] is necessary to handle partial look-ups of a block. - fn get_in_memory_or_storage_by_tx_range( - &self, - range: impl RangeBounds, - fetch_from_db: S, - fetch_from_block_state: M, - ) -> ProviderResult> - where - S: FnOnce( - DatabaseProviderRO, - RangeInclusive, - ) -> ProviderResult>, - M: Fn(RangeInclusive, Arc) -> ProviderResult>, - { - let in_mem_chain = self.canonical_in_memory_state.canonical_chain().collect::>(); - let provider = self.database.provider()?; - - // Get the last block number stored in the storage which does NOT overlap with in-memory - // chain. - let last_database_block_number = in_mem_chain - .last() - .map(|b| Ok(b.anchor().number)) - .unwrap_or_else(|| provider.last_block_number())?; - - // Get the next tx number for the last block stored in the storage, which marks the start of - // the in-memory state. - let last_block_body_index = provider - .block_body_indices(last_database_block_number)? - .ok_or(ProviderError::BlockBodyIndicesNotFound(last_database_block_number))?; - let mut in_memory_tx_num = last_block_body_index.next_tx_num(); - - let (start, end) = self.convert_range_bounds(range, || { - in_mem_chain - .iter() - .map(|b| b.block_ref().block().body.transactions.len() as u64) - .sum::() + - last_block_body_index.last_tx_num() - }); - - if start > end { - return Ok(vec![]) - } - - let mut tx_range = start..=end; - - // If the range is entirely before the first in-memory transaction number, fetch from - // storage - if *tx_range.end() < in_memory_tx_num { - return fetch_from_db(provider, tx_range); - } - - let mut items = Vec::with_capacity((tx_range.end() - tx_range.start() + 1) as usize); - - // If the range spans storage and memory, get elements from storage first. - if *tx_range.start() < in_memory_tx_num { - // Determine the range that needs to be fetched from storage. - let db_range = *tx_range.start()..=in_memory_tx_num.saturating_sub(1); - - // Set the remaining transaction range for in-memory - tx_range = in_memory_tx_num..=*tx_range.end(); - - items.extend(fetch_from_db(provider, db_range)?); - } - - // Iterate from the lowest block to the highest in-memory chain - for block_state in in_mem_chain.into_iter().rev() { - let block_tx_count = block_state.block_ref().block().body.transactions.len(); - let remaining = (tx_range.end() - tx_range.start() + 1) as usize; - - // If the transaction range start is equal or higher than the next block first - // transaction, advance - if *tx_range.start() >= in_memory_tx_num + block_tx_count as u64 { - in_memory_tx_num += block_tx_count as u64; - continue - } - - // This should only be more than 0 once, in case of a partial range inside a block. - let skip = (tx_range.start() - in_memory_tx_num) as usize; - - items.extend(fetch_from_block_state( - skip..=skip + (remaining.min(block_tx_count - skip) - 1), - block_state, - )?); - - in_memory_tx_num += block_tx_count as u64; - - // Break if the range has been fully processed - if in_memory_tx_num > *tx_range.end() { - break - } - - // Set updated range - tx_range = in_memory_tx_num..=*tx_range.end(); - } - - Ok(items) - } - - /// Fetches data from either in-memory state or persistent storage by transaction - /// [`HashOrNumber`]. - fn get_in_memory_or_storage_by_tx( - &self, - id: HashOrNumber, - fetch_from_db: S, - fetch_from_block_state: M, - ) -> ProviderResult> - where - S: FnOnce(DatabaseProviderRO) -> ProviderResult>, - M: Fn(usize, TxNumber, Arc) -> ProviderResult>, - { - // Order of instantiation matters. More information on: - // `get_in_memory_or_storage_by_block_range_while`. - let in_mem_chain = self.canonical_in_memory_state.canonical_chain().collect::>(); - let provider = self.database.provider()?; - - // Get the last block number stored in the database which does NOT overlap with in-memory - // chain. - let last_database_block_number = in_mem_chain - .last() - .map(|b| Ok(b.anchor().number)) - .unwrap_or_else(|| provider.last_block_number())?; - - // Get the next tx number for the last block stored in the database and consider it the - // first tx number of the in-memory state - let last_block_body_index = provider - .block_body_indices(last_database_block_number)? - .ok_or(ProviderError::BlockBodyIndicesNotFound(last_database_block_number))?; - let mut in_memory_tx_num = last_block_body_index.next_tx_num(); - - // If the transaction number is less than the first in-memory transaction number, make a - // database lookup - if let HashOrNumber::Number(id) = id { - if id < in_memory_tx_num { - return fetch_from_db(provider) - } - } - - // Iterate from the lowest block to the highest - for block_state in in_mem_chain.into_iter().rev() { - let executed_block = block_state.block_ref(); - let block = executed_block.block(); - - for tx_index in 0..block.body.transactions.len() { - match id { - HashOrNumber::Hash(tx_hash) => { - if tx_hash == block.body.transactions[tx_index].hash() { - return fetch_from_block_state(tx_index, in_memory_tx_num, block_state) - } - } - HashOrNumber::Number(id) => { - if id == in_memory_tx_num { - return fetch_from_block_state(tx_index, in_memory_tx_num, block_state) - } - } - } - - in_memory_tx_num += 1; - } - } - - // Not found in-memory, so check database. - if let HashOrNumber::Hash(_) = id { - return fetch_from_db(provider) - } - - Ok(None) - } - - /// Fetches data from either in-memory state or persistent storage by [`BlockHashOrNumber`]. - fn get_in_memory_or_storage_by_block( + /// If the range is empty, or there are no blocks for the given range, then this returns `None`. + pub fn get_state( &self, - id: BlockHashOrNumber, - fetch_from_db: S, - fetch_from_block_state: M, - ) -> ProviderResult - where - S: FnOnce(DatabaseProviderRO) -> ProviderResult, - M: Fn(Arc) -> ProviderResult, - { - let block_state = match id { - BlockHashOrNumber::Hash(block_hash) => { - self.canonical_in_memory_state.state_by_hash(block_hash) - } - BlockHashOrNumber::Number(block_number) => { - self.canonical_in_memory_state.state_by_number(block_number) - } - }; - - if let Some(block_state) = block_state { - return fetch_from_block_state(block_state) - } - fetch_from_db(self.database_provider_ro()?) - } -} - -impl BlockchainProvider2 { - /// Ensures that the given block number is canonical (synced) - /// - /// This is a helper for guarding the `HistoricalStateProvider` against block numbers that are - /// out of range and would lead to invalid results, mainly during initial sync. - /// - /// Verifying the `block_number` would be expensive since we need to lookup sync table - /// Instead, we ensure that the `block_number` is within the range of the - /// [`Self::best_block_number`] which is updated when a block is synced. - #[inline] - fn ensure_canonical_block(&self, block_number: BlockNumber) -> ProviderResult<()> { - let latest = self.best_block_number()?; - if block_number > latest { - Err(ProviderError::HeaderNotFound(block_number.into())) - } else { - Ok(()) - } + range: RangeInclusive, + ) -> ProviderResult> { + self.consistent_provider()?.get_state(range) } } @@ -646,78 +167,34 @@ impl StaticFileProviderFactory for BlockchainProvider2 impl HeaderProvider for BlockchainProvider2 { fn header(&self, block_hash: &BlockHash) -> ProviderResult> { - self.get_in_memory_or_storage_by_block( - (*block_hash).into(), - |db_provider| db_provider.header(block_hash), - |block_state| Ok(Some(block_state.block_ref().block().header.header().clone())), - ) + self.consistent_provider()?.header(block_hash) } fn header_by_number(&self, num: BlockNumber) -> ProviderResult> { - self.get_in_memory_or_storage_by_block( - num.into(), - |db_provider| db_provider.header_by_number(num), - |block_state| Ok(Some(block_state.block_ref().block().header.header().clone())), - ) + self.consistent_provider()?.header_by_number(num) } fn header_td(&self, hash: &BlockHash) -> ProviderResult> { - if let Some(num) = self.block_number(*hash)? { - self.header_td_by_number(num) - } else { - Ok(None) - } + self.consistent_provider()?.header_td(hash) } fn header_td_by_number(&self, number: BlockNumber) -> ProviderResult> { - let number = if self.canonical_in_memory_state.hash_by_number(number).is_some() { - // If the block exists in memory, we should return a TD for it. - // - // The canonical in memory state should only store post-merge blocks. Post-merge blocks - // have zero difficulty. This means we can use the total difficulty for the last - // finalized block number if present (so that we are not affected by reorgs), if not the - // last number in the database will be used. - if let Some(last_finalized_num_hash) = - self.canonical_in_memory_state.get_finalized_num_hash() - { - last_finalized_num_hash.number - } else { - self.last_block_number()? - } - } else { - // Otherwise, return what we have on disk for the input block - number - }; - self.database.header_td_by_number(number) + self.consistent_provider()?.header_td_by_number(number) } fn headers_range(&self, range: impl RangeBounds) -> ProviderResult> { - self.get_in_memory_or_storage_by_block_range_while( - range, - |db_provider, range, _| db_provider.headers_range(range), - |block_state, _| Some(block_state.block_ref().block().header.header().clone()), - |_| true, - ) + self.consistent_provider()?.headers_range(range) } fn sealed_header(&self, number: BlockNumber) -> ProviderResult> { - self.get_in_memory_or_storage_by_block( - number.into(), - |db_provider| db_provider.sealed_header(number), - |block_state| Ok(Some(block_state.block_ref().block().header.clone())), - ) + self.consistent_provider()?.sealed_header(number) } fn sealed_headers_range( &self, range: impl RangeBounds, ) -> ProviderResult> { - self.get_in_memory_or_storage_by_block_range_while( - range, - |db_provider, range, _| db_provider.sealed_headers_range(range), - |block_state, _| Some(block_state.block_ref().block().header.clone()), - |_| true, - ) + self.consistent_provider()?.sealed_headers_range(range) } fn sealed_headers_while( @@ -725,25 +202,13 @@ impl HeaderProvider for BlockchainProvider2 { range: impl RangeBounds, predicate: impl FnMut(&SealedHeader) -> bool, ) -> ProviderResult> { - self.get_in_memory_or_storage_by_block_range_while( - range, - |db_provider, range, predicate| db_provider.sealed_headers_while(range, predicate), - |block_state, predicate| { - let header = &block_state.block_ref().block().header; - predicate(header).then(|| header.clone()) - }, - predicate, - ) + self.consistent_provider()?.sealed_headers_while(range, predicate) } } impl BlockHashReader for BlockchainProvider2 { fn block_hash(&self, number: u64) -> ProviderResult> { - self.get_in_memory_or_storage_by_block( - number.into(), - |db_provider| db_provider.block_hash(number), - |block_state| Ok(Some(block_state.hash())), - ) + self.consistent_provider()?.block_hash(number) } fn canonical_hashes_range( @@ -751,15 +216,7 @@ impl BlockHashReader for BlockchainProvider2 { start: BlockNumber, end: BlockNumber, ) -> ProviderResult> { - self.get_in_memory_or_storage_by_block_range_while( - start..end, - |db_provider, inclusive_range, _| { - db_provider - .canonical_hashes_range(*inclusive_range.start(), *inclusive_range.end() + 1) - }, - |block_state, _| Some(block_state.hash()), - |_| true, - ) + self.consistent_provider()?.canonical_hashes_range(start, end) } } @@ -777,11 +234,7 @@ impl BlockNumReader for BlockchainProvider2 { } fn block_number(&self, hash: B256) -> ProviderResult> { - self.get_in_memory_or_storage_by_block( - hash.into(), - |db_provider| db_provider.block_number(hash), - |block_state| Ok(Some(block_state.number())), - ) + self.consistent_provider()?.block_number(hash) } } @@ -801,28 +254,11 @@ impl BlockIdReader for BlockchainProvider2 { impl BlockReader for BlockchainProvider2 { fn find_block_by_hash(&self, hash: B256, source: BlockSource) -> ProviderResult> { - match source { - BlockSource::Any | BlockSource::Canonical => { - // Note: it's fine to return the unsealed block because the caller already has - // the hash - self.get_in_memory_or_storage_by_block( - hash.into(), - |db_provider| db_provider.find_block_by_hash(hash, source), - |block_state| Ok(Some(block_state.block_ref().block().clone().unseal())), - ) - } - BlockSource::Pending => { - Ok(self.canonical_in_memory_state.pending_block().map(|block| block.unseal())) - } - } + self.consistent_provider()?.find_block_by_hash(hash, source) } fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { - self.get_in_memory_or_storage_by_block( - id, - |db_provider| db_provider.block(id), - |block_state| Ok(Some(block_state.block_ref().block().clone().unseal())), - ) + self.consistent_provider()?.block(id) } fn pending_block(&self) -> ProviderResult> { @@ -838,51 +274,14 @@ impl BlockReader for BlockchainProvider2 { } fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>> { - self.get_in_memory_or_storage_by_block( - id, - |db_provider| db_provider.ommers(id), - |block_state| { - if self.chain_spec().final_paris_total_difficulty(block_state.number()).is_some() { - return Ok(Some(Vec::new())) - } - - Ok(Some(block_state.block_ref().block().body.ommers.clone())) - }, - ) + self.consistent_provider()?.ommers(id) } fn block_body_indices( &self, number: BlockNumber, ) -> ProviderResult> { - self.get_in_memory_or_storage_by_block( - number.into(), - |db_provider| db_provider.block_body_indices(number), - |block_state| { - // Find the last block indices on database - let last_storage_block_number = block_state.anchor().number; - let mut stored_indices = self - .database - .block_body_indices(last_storage_block_number)? - .ok_or(ProviderError::BlockBodyIndicesNotFound(last_storage_block_number))?; - - // Prepare our block indices - stored_indices.first_tx_num = stored_indices.next_tx_num(); - stored_indices.tx_count = 0; - - // Iterate from the lowest block in memory until our target block - for state in block_state.chain().into_iter().rev() { - let block_tx_count = state.block_ref().block.body.transactions.len() as u64; - if state.block_ref().block().number == number { - stored_indices.tx_count = block_tx_count; - } else { - stored_indices.first_tx_num += block_tx_count; - } - } - - Ok(Some(stored_indices)) - }, - ) + self.consistent_provider()?.block_body_indices(number) } /// Returns the block with senders with matching number or hash from database. @@ -896,11 +295,7 @@ impl BlockReader for BlockchainProvider2 { id: BlockHashOrNumber, transaction_kind: TransactionVariant, ) -> ProviderResult> { - self.get_in_memory_or_storage_by_block( - id, - |db_provider| db_provider.block_with_senders(id, transaction_kind), - |block_state| Ok(Some(block_state.block_with_senders())), - ) + self.consistent_provider()?.block_with_senders(id, transaction_kind) } fn sealed_block_with_senders( @@ -908,259 +303,116 @@ impl BlockReader for BlockchainProvider2 { id: BlockHashOrNumber, transaction_kind: TransactionVariant, ) -> ProviderResult> { - self.get_in_memory_or_storage_by_block( - id, - |db_provider| db_provider.sealed_block_with_senders(id, transaction_kind), - |block_state| Ok(Some(block_state.sealed_block_with_senders())), - ) + self.consistent_provider()?.sealed_block_with_senders(id, transaction_kind) } fn block_range(&self, range: RangeInclusive) -> ProviderResult> { - self.get_in_memory_or_storage_by_block_range_while( - range, - |db_provider, range, _| db_provider.block_range(range), - |block_state, _| Some(block_state.block_ref().block().clone().unseal()), - |_| true, - ) + self.consistent_provider()?.block_range(range) } fn block_with_senders_range( &self, range: RangeInclusive, ) -> ProviderResult> { - self.get_in_memory_or_storage_by_block_range_while( - range, - |db_provider, range, _| db_provider.block_with_senders_range(range), - |block_state, _| Some(block_state.block_with_senders()), - |_| true, - ) + self.consistent_provider()?.block_with_senders_range(range) } fn sealed_block_with_senders_range( &self, range: RangeInclusive, ) -> ProviderResult> { - self.get_in_memory_or_storage_by_block_range_while( - range, - |db_provider, range, _| db_provider.sealed_block_with_senders_range(range), - |block_state, _| Some(block_state.sealed_block_with_senders()), - |_| true, - ) + self.consistent_provider()?.sealed_block_with_senders_range(range) } } impl TransactionsProvider for BlockchainProvider2 { fn transaction_id(&self, tx_hash: TxHash) -> ProviderResult> { - self.get_in_memory_or_storage_by_tx( - tx_hash.into(), - |db_provider| db_provider.transaction_id(tx_hash), - |_, tx_number, _| Ok(Some(tx_number)), - ) + self.consistent_provider()?.transaction_id(tx_hash) } fn transaction_by_id(&self, id: TxNumber) -> ProviderResult> { - self.get_in_memory_or_storage_by_tx( - id.into(), - |provider| provider.transaction_by_id(id), - |tx_index, _, block_state| { - Ok(block_state.block_ref().block().body.transactions.get(tx_index).cloned()) - }, - ) + self.consistent_provider()?.transaction_by_id(id) } fn transaction_by_id_no_hash( &self, id: TxNumber, ) -> ProviderResult> { - self.get_in_memory_or_storage_by_tx( - id.into(), - |provider| provider.transaction_by_id_no_hash(id), - |tx_index, _, block_state| { - Ok(block_state - .block_ref() - .block() - .body - .transactions - .get(tx_index) - .cloned() - .map(Into::into)) - }, - ) + self.consistent_provider()?.transaction_by_id_no_hash(id) } fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { - if let Some(tx) = self.canonical_in_memory_state.transaction_by_hash(hash) { - return Ok(Some(tx)) - } - - self.database.transaction_by_hash(hash) + self.consistent_provider()?.transaction_by_hash(hash) } fn transaction_by_hash_with_meta( &self, tx_hash: TxHash, ) -> ProviderResult> { - if let Some((tx, meta)) = - self.canonical_in_memory_state.transaction_by_hash_with_meta(tx_hash) - { - return Ok(Some((tx, meta))) - } - - self.database.transaction_by_hash_with_meta(tx_hash) + self.consistent_provider()?.transaction_by_hash_with_meta(tx_hash) } fn transaction_block(&self, id: TxNumber) -> ProviderResult> { - self.get_in_memory_or_storage_by_tx( - id.into(), - |provider| provider.transaction_block(id), - |_, _, block_state| Ok(Some(block_state.block_ref().block().number)), - ) + self.consistent_provider()?.transaction_block(id) } fn transactions_by_block( &self, id: BlockHashOrNumber, ) -> ProviderResult>> { - self.get_in_memory_or_storage_by_block( - id, - |provider| provider.transactions_by_block(id), - |block_state| Ok(Some(block_state.block_ref().block().body.transactions.clone())), - ) + self.consistent_provider()?.transactions_by_block(id) } fn transactions_by_block_range( &self, range: impl RangeBounds, ) -> ProviderResult>> { - self.get_in_memory_or_storage_by_block_range_while( - range, - |db_provider, range, _| db_provider.transactions_by_block_range(range), - |block_state, _| Some(block_state.block_ref().block().body.transactions.clone()), - |_| true, - ) + self.consistent_provider()?.transactions_by_block_range(range) } fn transactions_by_tx_range( &self, range: impl RangeBounds, ) -> ProviderResult> { - self.get_in_memory_or_storage_by_tx_range( - range, - |db_provider, db_range| db_provider.transactions_by_tx_range(db_range), - |index_range, block_state| { - Ok(block_state.block_ref().block().body.transactions[index_range] - .iter() - .cloned() - .map(Into::into) - .collect()) - }, - ) + self.consistent_provider()?.transactions_by_tx_range(range) } fn senders_by_tx_range( &self, range: impl RangeBounds, ) -> ProviderResult> { - self.get_in_memory_or_storage_by_tx_range( - range, - |db_provider, db_range| db_provider.senders_by_tx_range(db_range), - |index_range, block_state| Ok(block_state.block_ref().senders[index_range].to_vec()), - ) + self.consistent_provider()?.senders_by_tx_range(range) } fn transaction_sender(&self, id: TxNumber) -> ProviderResult> { - self.get_in_memory_or_storage_by_tx( - id.into(), - |provider| provider.transaction_sender(id), - |tx_index, _, block_state| Ok(block_state.block_ref().senders.get(tx_index).copied()), - ) + self.consistent_provider()?.transaction_sender(id) } } impl ReceiptProvider for BlockchainProvider2 { fn receipt(&self, id: TxNumber) -> ProviderResult> { - self.get_in_memory_or_storage_by_tx( - id.into(), - |provider| provider.receipt(id), - |tx_index, _, block_state| { - Ok(block_state.executed_block_receipts().get(tx_index).cloned()) - }, - ) + self.consistent_provider()?.receipt(id) } fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { - for block_state in self.canonical_in_memory_state.canonical_chain() { - let executed_block = block_state.block_ref(); - let block = executed_block.block(); - let receipts = block_state.executed_block_receipts(); - - // assuming 1:1 correspondence between transactions and receipts - debug_assert_eq!( - block.body.transactions.len(), - receipts.len(), - "Mismatch between transaction and receipt count" - ); - - if let Some(tx_index) = block.body.transactions.iter().position(|tx| tx.hash() == hash) - { - // safe to use tx_index for receipts due to 1:1 correspondence - return Ok(receipts.get(tx_index).cloned()); - } - } - - self.database.receipt_by_hash(hash) + self.consistent_provider()?.receipt_by_hash(hash) } fn receipts_by_block(&self, block: BlockHashOrNumber) -> ProviderResult>> { - self.get_in_memory_or_storage_by_block( - block, - |db_provider| db_provider.receipts_by_block(block), - |block_state| Ok(Some(block_state.executed_block_receipts())), - ) + self.consistent_provider()?.receipts_by_block(block) } fn receipts_by_tx_range( &self, range: impl RangeBounds, ) -> ProviderResult> { - self.get_in_memory_or_storage_by_tx_range( - range, - |db_provider, db_range| db_provider.receipts_by_tx_range(db_range), - |index_range, block_state| { - Ok(block_state.executed_block_receipts().drain(index_range).collect()) - }, - ) + self.consistent_provider()?.receipts_by_tx_range(range) } } impl ReceiptProviderIdExt for BlockchainProvider2 { fn receipts_by_block_id(&self, block: BlockId) -> ProviderResult>> { - match block { - BlockId::Hash(rpc_block_hash) => { - let mut receipts = self.receipts_by_block(rpc_block_hash.block_hash.into())?; - if receipts.is_none() && !rpc_block_hash.require_canonical.unwrap_or(false) { - let block_state = self - .canonical_in_memory_state - .state_by_hash(rpc_block_hash.block_hash) - .ok_or(ProviderError::StateForHashNotFound(rpc_block_hash.block_hash))?; - receipts = Some(block_state.executed_block_receipts()); - } - Ok(receipts) - } - BlockId::Number(num_tag) => match num_tag { - BlockNumberOrTag::Pending => Ok(self - .canonical_in_memory_state - .pending_state() - .map(|block_state| block_state.executed_block_receipts())), - _ => { - if let Some(num) = self.convert_block_number(num_tag)? { - self.receipts_by_block(num.into()) - } else { - Ok(None) - } - } - }, - } + self.consistent_provider()?.receipts_by_block_id(block) } } @@ -1170,47 +422,25 @@ impl WithdrawalsProvider for BlockchainProvider2 { id: BlockHashOrNumber, timestamp: u64, ) -> ProviderResult> { - if !self.chain_spec().is_shanghai_active_at_timestamp(timestamp) { - return Ok(None) - } - - self.get_in_memory_or_storage_by_block( - id, - |db_provider| db_provider.withdrawals_by_block(id, timestamp), - |block_state| Ok(block_state.block_ref().block().body.withdrawals.clone()), - ) + self.consistent_provider()?.withdrawals_by_block(id, timestamp) } fn latest_withdrawal(&self) -> ProviderResult> { - let best_block_num = self.best_block_number()?; - - self.get_in_memory_or_storage_by_block( - best_block_num.into(), - |db_provider| db_provider.latest_withdrawal(), - |block_state| { - Ok(block_state - .block_ref() - .block() - .body - .withdrawals - .clone() - .and_then(|mut w| w.pop())) - }, - ) + self.consistent_provider()?.latest_withdrawal() } } impl StageCheckpointReader for BlockchainProvider2 { fn get_stage_checkpoint(&self, id: StageId) -> ProviderResult> { - self.database.provider()?.get_stage_checkpoint(id) + self.consistent_provider()?.get_stage_checkpoint(id) } fn get_stage_checkpoint_progress(&self, id: StageId) -> ProviderResult>> { - self.database.provider()?.get_stage_checkpoint_progress(id) + self.consistent_provider()?.get_stage_checkpoint_progress(id) } fn get_all_checkpoints(&self) -> ProviderResult> { - self.database.provider()?.get_all_checkpoints() + self.consistent_provider()?.get_all_checkpoints() } } @@ -1225,9 +455,7 @@ impl EvmEnvProvider for BlockchainProvider2 { where EvmConfig: ConfigureEvmEnv
, { - let hash = self.convert_number(at)?.ok_or(ProviderError::HeaderNotFound(at))?; - let header = self.header(&hash)?.ok_or(ProviderError::HeaderNotFound(at))?; - self.fill_env_with_header(cfg, block_env, &header, evm_config) + self.consistent_provider()?.fill_env_at(cfg, block_env, at, evm_config) } fn fill_env_with_header( @@ -1240,11 +468,7 @@ impl EvmEnvProvider for BlockchainProvider2 { where EvmConfig: ConfigureEvmEnv
, { - let total_difficulty = self - .header_td_by_number(header.number)? - .ok_or_else(|| ProviderError::HeaderNotFound(header.number.into()))?; - evm_config.fill_cfg_and_block_env(cfg, block_env, header, total_difficulty); - Ok(()) + self.consistent_provider()?.fill_env_with_header(cfg, block_env, header, evm_config) } fn fill_cfg_env_at( @@ -1256,9 +480,7 @@ impl EvmEnvProvider for BlockchainProvider2 { where EvmConfig: ConfigureEvmEnv
, { - let hash = self.convert_number(at)?.ok_or(ProviderError::HeaderNotFound(at))?; - let header = self.header(&hash)?.ok_or(ProviderError::HeaderNotFound(at))?; - self.fill_cfg_env_with_header(cfg, &header, evm_config) + self.consistent_provider()?.fill_cfg_env_at(cfg, at, evm_config) } fn fill_cfg_env_with_header( @@ -1270,11 +492,7 @@ impl EvmEnvProvider for BlockchainProvider2 { where EvmConfig: ConfigureEvmEnv
, { - let total_difficulty = self - .header_td_by_number(header.number)? - .ok_or_else(|| ProviderError::HeaderNotFound(header.number.into()))?; - evm_config.fill_cfg_env(cfg, header, total_difficulty); - Ok(()) + self.consistent_provider()?.fill_cfg_env_with_header(cfg, header, evm_config) } } @@ -1283,11 +501,11 @@ impl PruneCheckpointReader for BlockchainProvider2 { &self, segment: PruneSegment, ) -> ProviderResult> { - self.database.provider()?.get_prune_checkpoint(segment) + self.consistent_provider()?.get_prune_checkpoint(segment) } fn get_prune_checkpoints(&self) -> ProviderResult> { - self.database.provider()?.get_prune_checkpoints() + self.consistent_provider()?.get_prune_checkpoints() } } @@ -1318,8 +536,9 @@ impl StateProviderFactory for BlockchainProvider2 { block_number: BlockNumber, ) -> ProviderResult { trace!(target: "providers::blockchain", ?block_number, "Getting history by block number"); - self.ensure_canonical_block(block_number)?; - let hash = self + let provider = self.consistent_provider()?; + provider.ensure_canonical_block(block_number)?; + let hash = provider .block_hash(block_number)? .ok_or_else(|| ProviderError::HeaderNotFound(block_number.into()))?; self.history_by_block_hash(hash) @@ -1328,14 +547,11 @@ impl StateProviderFactory for BlockchainProvider2 { fn history_by_block_hash(&self, block_hash: BlockHash) -> ProviderResult { trace!(target: "providers::blockchain", ?block_hash, "Getting history by block hash"); - self.get_in_memory_or_storage_by_block( + self.consistent_provider()?.get_in_memory_or_storage_by_block( block_hash.into(), - |_| { - // TODO(joshie): port history_by_block_hash to DatabaseProvider and use db_provider - self.database.history_by_block_hash(block_hash) - }, + |_| self.database.history_by_block_hash(block_hash), |block_state| { - let state_provider = self.block_state_provider(&block_state)?; + let state_provider = self.block_state_provider(block_state)?; Ok(Box::new(state_provider)) }, ) @@ -1444,105 +660,35 @@ where } } -impl BlockReaderIdExt for BlockchainProvider2 +impl BlockReaderIdExt for BlockchainProvider2 where Self: BlockReader + ReceiptProviderIdExt, { fn block_by_id(&self, id: BlockId) -> ProviderResult> { - match id { - BlockId::Number(num) => self.block_by_number_or_tag(num), - BlockId::Hash(hash) => { - // TODO: should we only apply this for the RPCs that are listed in EIP-1898? - // so not at the provider level? - // if we decide to do this at a higher level, then we can make this an automatic - // trait impl - if Some(true) == hash.require_canonical { - // check the database, canonical blocks are only stored in the database - self.find_block_by_hash(hash.block_hash, BlockSource::Canonical) - } else { - self.block_by_hash(hash.block_hash) - } - } - } + self.consistent_provider()?.block_by_id(id) } fn header_by_number_or_tag(&self, id: BlockNumberOrTag) -> ProviderResult> { - Ok(match id { - BlockNumberOrTag::Latest => { - Some(self.canonical_in_memory_state.get_canonical_head().unseal()) - } - BlockNumberOrTag::Finalized => { - self.canonical_in_memory_state.get_finalized_header().map(|h| h.unseal()) - } - BlockNumberOrTag::Safe => { - self.canonical_in_memory_state.get_safe_header().map(|h| h.unseal()) - } - BlockNumberOrTag::Earliest => self.header_by_number(0)?, - BlockNumberOrTag::Pending => self.canonical_in_memory_state.pending_header(), - - BlockNumberOrTag::Number(num) => self.header_by_number(num)?, - }) + self.consistent_provider()?.header_by_number_or_tag(id) } fn sealed_header_by_number_or_tag( &self, id: BlockNumberOrTag, ) -> ProviderResult> { - match id { - BlockNumberOrTag::Latest => { - Ok(Some(self.canonical_in_memory_state.get_canonical_head())) - } - BlockNumberOrTag::Finalized => { - Ok(self.canonical_in_memory_state.get_finalized_header()) - } - BlockNumberOrTag::Safe => Ok(self.canonical_in_memory_state.get_safe_header()), - BlockNumberOrTag::Earliest => self.header_by_number(0)?.map_or_else( - || Ok(None), - |h| { - let sealed = h.seal_slow(); - let (header, seal) = sealed.into_parts(); - Ok(Some(SealedHeader::new(header, seal))) - }, - ), - BlockNumberOrTag::Pending => Ok(self.canonical_in_memory_state.pending_sealed_header()), - BlockNumberOrTag::Number(num) => self.header_by_number(num)?.map_or_else( - || Ok(None), - |h| { - let sealed = h.seal_slow(); - let (header, seal) = sealed.into_parts(); - Ok(Some(SealedHeader::new(header, seal))) - }, - ), - } + self.consistent_provider()?.sealed_header_by_number_or_tag(id) } fn sealed_header_by_id(&self, id: BlockId) -> ProviderResult> { - Ok(match id { - BlockId::Number(num) => self.sealed_header_by_number_or_tag(num)?, - BlockId::Hash(hash) => self.header(&hash.block_hash)?.map(|h| { - let sealed = h.seal_slow(); - let (header, seal) = sealed.into_parts(); - SealedHeader::new(header, seal) - }), - }) + self.consistent_provider()?.sealed_header_by_id(id) } fn header_by_id(&self, id: BlockId) -> ProviderResult> { - Ok(match id { - BlockId::Number(num) => self.header_by_number_or_tag(num)?, - BlockId::Hash(hash) => self.header(&hash.block_hash)?, - }) + self.consistent_provider()?.header_by_id(id) } fn ommers_by_id(&self, id: BlockId) -> ProviderResult>> { - match id { - BlockId::Number(num) => self.ommers_by_number_or_tag(num), - BlockId::Hash(hash) => { - // TODO: EIP-1898 question, see above - // here it is not handled - self.ommers(BlockHashOrNumber::Hash(hash.block_hash)) - } - } + self.consistent_provider()?.ommers_by_id(id) } } @@ -1569,49 +715,7 @@ impl StorageChangeSetReader for BlockchainProvider2 { &self, block_number: BlockNumber, ) -> ProviderResult> { - if let Some(state) = self.canonical_in_memory_state.state_by_number(block_number) { - let changesets = state - .block() - .execution_output - .bundle - .reverts - .clone() - .into_plain_state_reverts() - .storage - .into_iter() - .flatten() - .flat_map(|revert: PlainStorageRevert| { - revert.storage_revert.into_iter().map(move |(key, value)| { - ( - BlockNumberAddress((block_number, revert.address)), - StorageEntry { key: key.into(), value: value.to_previous_value() }, - ) - }) - }) - .collect(); - Ok(changesets) - } else { - // Perform checks on whether or not changesets exist for the block. - let provider = self.database.provider()?; - - // No prune checkpoint means history should exist and we should `unwrap_or(true)` - let storage_history_exists = provider - .get_prune_checkpoint(PruneSegment::StorageHistory)? - .and_then(|checkpoint| { - // return true if the block number is ahead of the prune checkpoint. - // - // The checkpoint stores the highest pruned block number, so we should make - // sure the block_number is strictly greater. - checkpoint.block_number.map(|checkpoint| block_number > checkpoint) - }) - .unwrap_or(true); - - if !storage_history_exists { - return Err(ProviderError::StateAtBlockPruned(block_number)) - } - - provider.storage_changeset(block_number) - } + self.consistent_provider()?.storage_changeset(block_number) } } @@ -1620,50 +724,14 @@ impl ChangeSetReader for BlockchainProvider2 { &self, block_number: BlockNumber, ) -> ProviderResult> { - if let Some(state) = self.canonical_in_memory_state.state_by_number(block_number) { - let changesets = state - .block_ref() - .execution_output - .bundle - .reverts - .clone() - .into_plain_state_reverts() - .accounts - .into_iter() - .flatten() - .map(|(address, info)| AccountBeforeTx { address, info: info.map(Into::into) }) - .collect(); - Ok(changesets) - } else { - // Perform checks on whether or not changesets exist for the block. - let provider = self.database.provider()?; - // No prune checkpoint means history should exist and we should `unwrap_or(true)` - let account_history_exists = provider - .get_prune_checkpoint(PruneSegment::AccountHistory)? - .and_then(|checkpoint| { - // return true if the block number is ahead of the prune checkpoint. - // - // The checkpoint stores the highest pruned block number, so we should make - // sure the block_number is strictly greater. - checkpoint.block_number.map(|checkpoint| block_number > checkpoint) - }) - .unwrap_or(true); - - if !account_history_exists { - return Err(ProviderError::StateAtBlockPruned(block_number)) - } - - provider.account_block_changeset(block_number) - } + self.consistent_provider()?.account_block_changeset(block_number) } } impl AccountReader for BlockchainProvider2 { /// Get basic account information. fn basic_account(&self, address: Address) -> ProviderResult> { - // use latest state provider - let state_provider = self.latest()?; - state_provider.basic_account(address) + self.consistent_provider()?.basic_account(address) } } @@ -1678,12 +746,7 @@ impl StateReader for BlockchainProvider2 { /// because the tree thread is responsible for modifying the [`CanonicalInMemoryState`] in the /// first place. fn get_state(&self, block: BlockNumber) -> ProviderResult> { - if let Some(state) = self.canonical_in_memory_state.state_by_number(block) { - let state = state.block_ref().execution_outcome().clone(); - Ok(Some(state)) - } else { - self.get_state(block..=block) - } + StateReader::get_state(&self.consistent_provider()?, block) } } diff --git a/crates/storage/provider/src/providers/consistent.rs b/crates/storage/provider/src/providers/consistent.rs new file mode 100644 index 000000000000..d6847fa1b8f6 --- /dev/null +++ b/crates/storage/provider/src/providers/consistent.rs @@ -0,0 +1,1871 @@ +use super::{DatabaseProviderRO, ProviderFactory, ProviderNodeTypes}; +use crate::{ + providers::StaticFileProvider, AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, + BlockReader, BlockReaderIdExt, BlockSource, ChainSpecProvider, ChangeSetReader, EvmEnvProvider, + HeaderProvider, ProviderError, PruneCheckpointReader, ReceiptProvider, ReceiptProviderIdExt, + StageCheckpointReader, StateReader, StaticFileProviderFactory, TransactionVariant, + TransactionsProvider, WithdrawalsProvider, +}; +use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag, HashOrNumber}; +use alloy_primitives::{Address, BlockHash, BlockNumber, Sealable, TxHash, TxNumber, B256, U256}; +use reth_chain_state::{BlockState, CanonicalInMemoryState, MemoryOverlayStateProviderRef}; +use reth_chainspec::{ChainInfo, EthereumHardforks}; +use reth_db::models::BlockNumberAddress; +use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; +use reth_evm::ConfigureEvmEnv; +use reth_execution_types::{BundleStateInit, ExecutionOutcome, RevertsInit}; +use reth_primitives::{ + Account, Block, BlockWithSenders, Header, Receipt, SealedBlock, SealedBlockWithSenders, + SealedHeader, StorageEntry, TransactionMeta, TransactionSigned, TransactionSignedNoHash, + Withdrawal, Withdrawals, +}; +use reth_prune_types::{PruneCheckpoint, PruneSegment}; +use reth_stages_types::{StageCheckpoint, StageId}; +use reth_storage_api::{DatabaseProviderFactory, StateProvider, StorageChangeSetReader}; +use reth_storage_errors::provider::ProviderResult; +use revm::{ + db::states::PlainStorageRevert, + primitives::{BlockEnv, CfgEnvWithHandlerCfg}, +}; +use std::{ + collections::{hash_map, HashMap}, + ops::{Add, Bound, RangeBounds, RangeInclusive, Sub}, + sync::Arc, +}; +use tracing::trace; + +/// Type that interacts with a snapshot view of the blockchain (storage and in-memory) at time of +/// instantiation, EXCEPT for pending, safe and finalized block which might change while holding +/// this provider. +/// +/// CAUTION: Avoid holding this provider for too long or the inner database transaction will +/// time-out. +#[derive(Debug)] +pub struct ConsistentProvider { + /// Storage provider. + storage_provider: as DatabaseProviderFactory>::Provider, + /// Head block at time of [`Self`] creation + head_block: Option>, + /// In-memory canonical state. This is not a snapshot, and can change! Use with caution. + canonical_in_memory_state: CanonicalInMemoryState, +} + +impl ConsistentProvider { + /// Create a new provider using [`ProviderFactory`] and [`CanonicalInMemoryState`], + /// + /// Underneath it will take a snapshot by fetching [`CanonicalInMemoryState::head_state`] and + /// [`ProviderFactory::database_provider_ro`] effectively maintaining one single snapshotted + /// view of memory and database. + pub fn new( + storage_provider_factory: ProviderFactory, + state: CanonicalInMemoryState, + ) -> ProviderResult { + // Each one provides a snapshot at the time of instantiation, but its order matters. + // + // If we acquire first the database provider, it's possible that before the in-memory chain + // snapshot is instantiated, it will flush blocks to disk. This would + // mean that our database provider would not have access to the flushed blocks (since it's + // working under an older view), while the in-memory state may have deleted them + // entirely. Resulting in gaps on the range. + let head_block = state.head_state(); + let storage_provider = storage_provider_factory.database_provider_ro()?; + Ok(Self { storage_provider, head_block, canonical_in_memory_state: state }) + } + + // Helper function to convert range bounds + fn convert_range_bounds( + &self, + range: impl RangeBounds, + end_unbounded: impl FnOnce() -> T, + ) -> (T, T) + where + T: Copy + Add + Sub + From, + { + let start = match range.start_bound() { + Bound::Included(&n) => n, + Bound::Excluded(&n) => n + T::from(1u8), + Bound::Unbounded => T::from(0u8), + }; + + let end = match range.end_bound() { + Bound::Included(&n) => n, + Bound::Excluded(&n) => n - T::from(1u8), + Bound::Unbounded => end_unbounded(), + }; + + (start, end) + } + + /// Storage provider for latest block + fn latest_ref<'a>(&'a self) -> ProviderResult> { + trace!(target: "providers::blockchain", "Getting latest block state provider"); + + // use latest state provider if the head state exists + if let Some(state) = &self.head_block { + trace!(target: "providers::blockchain", "Using head state for latest state provider"); + Ok(self.block_state_provider_ref(state)?.boxed()) + } else { + trace!(target: "providers::blockchain", "Using database state for latest state provider"); + self.storage_provider.latest() + } + } + + fn history_by_block_hash_ref<'a>( + &'a self, + block_hash: BlockHash, + ) -> ProviderResult> { + trace!(target: "providers::blockchain", ?block_hash, "Getting history by block hash"); + + self.get_in_memory_or_storage_by_block( + block_hash.into(), + |_| self.storage_provider.history_by_block_hash(block_hash), + |block_state| { + let state_provider = self.block_state_provider_ref(block_state)?; + Ok(Box::new(state_provider)) + }, + ) + } + + /// Returns a state provider indexed by the given block number or tag. + fn state_by_block_number_ref<'a>( + &'a self, + number: BlockNumber, + ) -> ProviderResult> { + let hash = + self.block_hash(number)?.ok_or_else(|| ProviderError::HeaderNotFound(number.into()))?; + self.history_by_block_hash_ref(hash) + } + + /// Return the last N blocks of state, recreating the [`ExecutionOutcome`]. + /// + /// If the range is empty, or there are no blocks for the given range, then this returns `None`. + pub fn get_state( + &self, + range: RangeInclusive, + ) -> ProviderResult> { + if range.is_empty() { + return Ok(None) + } + let start_block_number = *range.start(); + let end_block_number = *range.end(); + + // We are not removing block meta as it is used to get block changesets. + let mut block_bodies = Vec::new(); + for block_num in range.clone() { + let block_body = self + .block_body_indices(block_num)? + .ok_or(ProviderError::BlockBodyIndicesNotFound(block_num))?; + block_bodies.push((block_num, block_body)) + } + + // get transaction receipts + let Some(from_transaction_num) = block_bodies.first().map(|body| body.1.first_tx_num()) + else { + return Ok(None) + }; + let Some(to_transaction_num) = block_bodies.last().map(|body| body.1.last_tx_num()) else { + return Ok(None) + }; + + let mut account_changeset = Vec::new(); + for block_num in range.clone() { + let changeset = + self.account_block_changeset(block_num)?.into_iter().map(|elem| (block_num, elem)); + account_changeset.extend(changeset); + } + + let mut storage_changeset = Vec::new(); + for block_num in range { + let changeset = self.storage_changeset(block_num)?; + storage_changeset.extend(changeset); + } + + let (state, reverts) = + self.populate_bundle_state(account_changeset, storage_changeset, end_block_number)?; + + let mut receipt_iter = + self.receipts_by_tx_range(from_transaction_num..=to_transaction_num)?.into_iter(); + + let mut receipts = Vec::with_capacity(block_bodies.len()); + // loop break if we are at the end of the blocks. + for (_, block_body) in block_bodies { + let mut block_receipts = Vec::with_capacity(block_body.tx_count as usize); + for tx_num in block_body.tx_num_range() { + let receipt = receipt_iter + .next() + .ok_or_else(|| ProviderError::ReceiptNotFound(tx_num.into()))?; + block_receipts.push(Some(receipt)); + } + receipts.push(block_receipts); + } + + Ok(Some(ExecutionOutcome::new_init( + state, + reverts, + // We skip new contracts since we never delete them from the database + Vec::new(), + receipts.into(), + start_block_number, + Vec::new(), + ))) + } + + /// Populate a [`BundleStateInit`] and [`RevertsInit`] using cursors over the + /// [`reth_db::PlainAccountState`] and [`reth_db::PlainStorageState`] tables, based on the given + /// storage and account changesets. + fn populate_bundle_state( + &self, + account_changeset: Vec<(u64, AccountBeforeTx)>, + storage_changeset: Vec<(BlockNumberAddress, StorageEntry)>, + block_range_end: BlockNumber, + ) -> ProviderResult<(BundleStateInit, RevertsInit)> { + let mut state: BundleStateInit = HashMap::new(); + let mut reverts: RevertsInit = HashMap::new(); + let state_provider = self.state_by_block_number_ref(block_range_end)?; + + // add account changeset changes + for (block_number, account_before) in account_changeset.into_iter().rev() { + let AccountBeforeTx { info: old_info, address } = account_before; + match state.entry(address) { + hash_map::Entry::Vacant(entry) => { + let new_info = state_provider.basic_account(address)?; + entry.insert((old_info, new_info, HashMap::new())); + } + hash_map::Entry::Occupied(mut entry) => { + // overwrite old account state. + entry.get_mut().0 = old_info; + } + } + // insert old info into reverts. + reverts.entry(block_number).or_default().entry(address).or_default().0 = Some(old_info); + } + + // add storage changeset changes + for (block_and_address, old_storage) in storage_changeset.into_iter().rev() { + let BlockNumberAddress((block_number, address)) = block_and_address; + // get account state or insert from plain state. + let account_state = match state.entry(address) { + hash_map::Entry::Vacant(entry) => { + let present_info = state_provider.basic_account(address)?; + entry.insert((present_info, present_info, HashMap::new())) + } + hash_map::Entry::Occupied(entry) => entry.into_mut(), + }; + + // match storage. + match account_state.2.entry(old_storage.key) { + hash_map::Entry::Vacant(entry) => { + let new_storage_value = + state_provider.storage(address, old_storage.key)?.unwrap_or_default(); + entry.insert((old_storage.value, new_storage_value)); + } + hash_map::Entry::Occupied(mut entry) => { + entry.get_mut().0 = old_storage.value; + } + }; + + reverts + .entry(block_number) + .or_default() + .entry(address) + .or_default() + .1 + .push(old_storage); + } + + Ok((state, reverts)) + } + + /// Fetches a range of data from both in-memory state and persistent storage while a predicate + /// is met. + /// + /// Creates a snapshot of the in-memory chain state and database provider to prevent + /// inconsistencies. Splits the range into in-memory and storage sections, prioritizing + /// recent in-memory blocks in case of overlaps. + /// + /// * `fetch_db_range` function (`F`) provides access to the database provider, allowing the + /// user to retrieve the required items from the database using [`RangeInclusive`]. + /// * `map_block_state_item` function (`G`) provides each block of the range in the in-memory + /// state, allowing for selection or filtering for the desired data. + fn get_in_memory_or_storage_by_block_range_while( + &self, + range: impl RangeBounds, + fetch_db_range: F, + map_block_state_item: G, + mut predicate: P, + ) -> ProviderResult> + where + F: FnOnce( + &DatabaseProviderRO, + RangeInclusive, + &mut P, + ) -> ProviderResult>, + G: Fn(&BlockState, &mut P) -> Option, + P: FnMut(&T) -> bool, + { + // Each one provides a snapshot at the time of instantiation, but its order matters. + // + // If we acquire first the database provider, it's possible that before the in-memory chain + // snapshot is instantiated, it will flush blocks to disk. This would + // mean that our database provider would not have access to the flushed blocks (since it's + // working under an older view), while the in-memory state may have deleted them + // entirely. Resulting in gaps on the range. + let mut in_memory_chain = + self.head_block.as_ref().map(|b| b.chain().collect::>()).unwrap_or_default(); + let db_provider = &self.storage_provider; + + let (start, end) = self.convert_range_bounds(range, || { + // the first block is the highest one. + in_memory_chain + .first() + .map(|b| b.number()) + .unwrap_or_else(|| db_provider.last_block_number().unwrap_or_default()) + }); + + if start > end { + return Ok(vec![]) + } + + // Split range into storage_range and in-memory range. If the in-memory range is not + // necessary drop it early. + // + // The last block of `in_memory_chain` is the lowest block number. + let (in_memory, storage_range) = match in_memory_chain.last().as_ref().map(|b| b.number()) { + Some(lowest_memory_block) if lowest_memory_block <= end => { + let highest_memory_block = + in_memory_chain.first().as_ref().map(|b| b.number()).expect("qed"); + + // Database will for a time overlap with in-memory-chain blocks. In + // case of a re-org, it can mean that the database blocks are of a forked chain, and + // so, we should prioritize the in-memory overlapped blocks. + let in_memory_range = + lowest_memory_block.max(start)..=end.min(highest_memory_block); + + // If requested range is in the middle of the in-memory range, remove the necessary + // lowest blocks + in_memory_chain.truncate( + in_memory_chain + .len() + .saturating_sub(start.saturating_sub(lowest_memory_block) as usize), + ); + + let storage_range = + (lowest_memory_block > start).then(|| start..=lowest_memory_block - 1); + + (Some((in_memory_chain, in_memory_range)), storage_range) + } + _ => { + // Drop the in-memory chain so we don't hold blocks in memory. + drop(in_memory_chain); + + (None, Some(start..=end)) + } + }; + + let mut items = Vec::with_capacity((end - start + 1) as usize); + + if let Some(storage_range) = storage_range { + let mut db_items = fetch_db_range(db_provider, storage_range.clone(), &mut predicate)?; + items.append(&mut db_items); + + // The predicate was not met, if the number of items differs from the expected. So, we + // return what we have. + if items.len() as u64 != storage_range.end() - storage_range.start() + 1 { + return Ok(items) + } + } + + if let Some((in_memory_chain, in_memory_range)) = in_memory { + for (num, block) in in_memory_range.zip(in_memory_chain.into_iter().rev()) { + debug_assert!(num == block.number()); + if let Some(item) = map_block_state_item(block, &mut predicate) { + items.push(item); + } else { + break + } + } + } + + Ok(items) + } + + /// This uses a given [`BlockState`] to initialize a state provider for that block. + fn block_state_provider_ref( + &self, + state: &BlockState, + ) -> ProviderResult> { + let anchor_hash = state.anchor().hash; + let latest_historical = self.history_by_block_hash_ref(anchor_hash)?; + let in_memory = state.chain().map(|block_state| block_state.block()).collect(); + Ok(MemoryOverlayStateProviderRef::new(latest_historical, in_memory)) + } + + /// Fetches data from either in-memory state or persistent storage for a range of transactions. + /// + /// * `fetch_from_db`: has a `DatabaseProviderRO` and the storage specific range. + /// * `fetch_from_block_state`: has a [`RangeInclusive`] of elements that should be fetched from + /// [`BlockState`]. [`RangeInclusive`] is necessary to handle partial look-ups of a block. + fn get_in_memory_or_storage_by_tx_range( + &self, + range: impl RangeBounds, + fetch_from_db: S, + fetch_from_block_state: M, + ) -> ProviderResult> + where + S: FnOnce( + &DatabaseProviderRO, + RangeInclusive, + ) -> ProviderResult>, + M: Fn(RangeInclusive, &BlockState) -> ProviderResult>, + { + let in_mem_chain = self.head_block.iter().flat_map(|b| b.chain()).collect::>(); + let provider = &self.storage_provider; + + // Get the last block number stored in the storage which does NOT overlap with in-memory + // chain. + let last_database_block_number = in_mem_chain + .last() + .map(|b| Ok(b.anchor().number)) + .unwrap_or_else(|| provider.last_block_number())?; + + // Get the next tx number for the last block stored in the storage, which marks the start of + // the in-memory state. + let last_block_body_index = provider + .block_body_indices(last_database_block_number)? + .ok_or(ProviderError::BlockBodyIndicesNotFound(last_database_block_number))?; + let mut in_memory_tx_num = last_block_body_index.next_tx_num(); + + let (start, end) = self.convert_range_bounds(range, || { + in_mem_chain + .iter() + .map(|b| b.block_ref().block().body.transactions.len() as u64) + .sum::() + + last_block_body_index.last_tx_num() + }); + + if start > end { + return Ok(vec![]) + } + + let mut tx_range = start..=end; + + // If the range is entirely before the first in-memory transaction number, fetch from + // storage + if *tx_range.end() < in_memory_tx_num { + return fetch_from_db(provider, tx_range); + } + + let mut items = Vec::with_capacity((tx_range.end() - tx_range.start() + 1) as usize); + + // If the range spans storage and memory, get elements from storage first. + if *tx_range.start() < in_memory_tx_num { + // Determine the range that needs to be fetched from storage. + let db_range = *tx_range.start()..=in_memory_tx_num.saturating_sub(1); + + // Set the remaining transaction range for in-memory + tx_range = in_memory_tx_num..=*tx_range.end(); + + items.extend(fetch_from_db(provider, db_range)?); + } + + // Iterate from the lowest block to the highest in-memory chain + for block_state in in_mem_chain.iter().rev() { + let block_tx_count = block_state.block_ref().block().body.transactions.len(); + let remaining = (tx_range.end() - tx_range.start() + 1) as usize; + + // If the transaction range start is equal or higher than the next block first + // transaction, advance + if *tx_range.start() >= in_memory_tx_num + block_tx_count as u64 { + in_memory_tx_num += block_tx_count as u64; + continue + } + + // This should only be more than 0 once, in case of a partial range inside a block. + let skip = (tx_range.start() - in_memory_tx_num) as usize; + + items.extend(fetch_from_block_state( + skip..=skip + (remaining.min(block_tx_count - skip) - 1), + block_state, + )?); + + in_memory_tx_num += block_tx_count as u64; + + // Break if the range has been fully processed + if in_memory_tx_num > *tx_range.end() { + break + } + + // Set updated range + tx_range = in_memory_tx_num..=*tx_range.end(); + } + + Ok(items) + } + + /// Fetches data from either in-memory state or persistent storage by transaction + /// [`HashOrNumber`]. + fn get_in_memory_or_storage_by_tx( + &self, + id: HashOrNumber, + fetch_from_db: S, + fetch_from_block_state: M, + ) -> ProviderResult> + where + S: FnOnce(&DatabaseProviderRO) -> ProviderResult>, + M: Fn(usize, TxNumber, &BlockState) -> ProviderResult>, + { + let in_mem_chain = self.head_block.iter().flat_map(|b| b.chain()).collect::>(); + let provider = &self.storage_provider; + + // Get the last block number stored in the database which does NOT overlap with in-memory + // chain. + let last_database_block_number = in_mem_chain + .last() + .map(|b| Ok(b.anchor().number)) + .unwrap_or_else(|| provider.last_block_number())?; + + // Get the next tx number for the last block stored in the database and consider it the + // first tx number of the in-memory state + let last_block_body_index = provider + .block_body_indices(last_database_block_number)? + .ok_or(ProviderError::BlockBodyIndicesNotFound(last_database_block_number))?; + let mut in_memory_tx_num = last_block_body_index.next_tx_num(); + + // If the transaction number is less than the first in-memory transaction number, make a + // database lookup + if let HashOrNumber::Number(id) = id { + if id < in_memory_tx_num { + return fetch_from_db(provider) + } + } + + // Iterate from the lowest block to the highest + for block_state in in_mem_chain.iter().rev() { + let executed_block = block_state.block_ref(); + let block = executed_block.block(); + + for tx_index in 0..block.body.transactions.len() { + match id { + HashOrNumber::Hash(tx_hash) => { + if tx_hash == block.body.transactions[tx_index].hash() { + return fetch_from_block_state(tx_index, in_memory_tx_num, block_state) + } + } + HashOrNumber::Number(id) => { + if id == in_memory_tx_num { + return fetch_from_block_state(tx_index, in_memory_tx_num, block_state) + } + } + } + + in_memory_tx_num += 1; + } + } + + // Not found in-memory, so check database. + if let HashOrNumber::Hash(_) = id { + return fetch_from_db(provider) + } + + Ok(None) + } + + /// Fetches data from either in-memory state or persistent storage by [`BlockHashOrNumber`]. + pub(crate) fn get_in_memory_or_storage_by_block( + &self, + id: BlockHashOrNumber, + fetch_from_db: S, + fetch_from_block_state: M, + ) -> ProviderResult + where + S: FnOnce(&DatabaseProviderRO) -> ProviderResult, + M: Fn(&BlockState) -> ProviderResult, + { + if let Some(Some(block_state)) = self.head_block.as_ref().map(|b| b.block_on_chain(id)) { + return fetch_from_block_state(block_state) + } + fetch_from_db(&self.storage_provider) + } +} + +impl ConsistentProvider { + /// Ensures that the given block number is canonical (synced) + /// + /// This is a helper for guarding the `HistoricalStateProvider` against block numbers that are + /// out of range and would lead to invalid results, mainly during initial sync. + /// + /// Verifying the `block_number` would be expensive since we need to lookup sync table + /// Instead, we ensure that the `block_number` is within the range of the + /// [`Self::best_block_number`] which is updated when a block is synced. + #[inline] + pub(crate) fn ensure_canonical_block(&self, block_number: BlockNumber) -> ProviderResult<()> { + let latest = self.best_block_number()?; + if block_number > latest { + Err(ProviderError::HeaderNotFound(block_number.into())) + } else { + Ok(()) + } + } +} + +impl StaticFileProviderFactory for ConsistentProvider { + fn static_file_provider(&self) -> StaticFileProvider { + self.storage_provider.static_file_provider() + } +} + +impl HeaderProvider for ConsistentProvider { + fn header(&self, block_hash: &BlockHash) -> ProviderResult> { + self.get_in_memory_or_storage_by_block( + (*block_hash).into(), + |db_provider| db_provider.header(block_hash), + |block_state| Ok(Some(block_state.block_ref().block().header.header().clone())), + ) + } + + fn header_by_number(&self, num: BlockNumber) -> ProviderResult> { + self.get_in_memory_or_storage_by_block( + num.into(), + |db_provider| db_provider.header_by_number(num), + |block_state| Ok(Some(block_state.block_ref().block().header.header().clone())), + ) + } + + fn header_td(&self, hash: &BlockHash) -> ProviderResult> { + if let Some(num) = self.block_number(*hash)? { + self.header_td_by_number(num) + } else { + Ok(None) + } + } + + fn header_td_by_number(&self, number: BlockNumber) -> ProviderResult> { + let number = if self.head_block.as_ref().map(|b| b.block_on_chain(number.into())).is_some() + { + // If the block exists in memory, we should return a TD for it. + // + // The canonical in memory state should only store post-merge blocks. Post-merge blocks + // have zero difficulty. This means we can use the total difficulty for the last + // finalized block number if present (so that we are not affected by reorgs), if not the + // last number in the database will be used. + if let Some(last_finalized_num_hash) = + self.canonical_in_memory_state.get_finalized_num_hash() + { + last_finalized_num_hash.number + } else { + self.last_block_number()? + } + } else { + // Otherwise, return what we have on disk for the input block + number + }; + self.storage_provider.header_td_by_number(number) + } + + fn headers_range(&self, range: impl RangeBounds) -> ProviderResult> { + self.get_in_memory_or_storage_by_block_range_while( + range, + |db_provider, range, _| db_provider.headers_range(range), + |block_state, _| Some(block_state.block_ref().block().header.header().clone()), + |_| true, + ) + } + + fn sealed_header(&self, number: BlockNumber) -> ProviderResult> { + self.get_in_memory_or_storage_by_block( + number.into(), + |db_provider| db_provider.sealed_header(number), + |block_state| Ok(Some(block_state.block_ref().block().header.clone())), + ) + } + + fn sealed_headers_range( + &self, + range: impl RangeBounds, + ) -> ProviderResult> { + self.get_in_memory_or_storage_by_block_range_while( + range, + |db_provider, range, _| db_provider.sealed_headers_range(range), + |block_state, _| Some(block_state.block_ref().block().header.clone()), + |_| true, + ) + } + + fn sealed_headers_while( + &self, + range: impl RangeBounds, + predicate: impl FnMut(&SealedHeader) -> bool, + ) -> ProviderResult> { + self.get_in_memory_or_storage_by_block_range_while( + range, + |db_provider, range, predicate| db_provider.sealed_headers_while(range, predicate), + |block_state, predicate| { + let header = &block_state.block_ref().block().header; + predicate(header).then(|| header.clone()) + }, + predicate, + ) + } +} + +impl BlockHashReader for ConsistentProvider { + fn block_hash(&self, number: u64) -> ProviderResult> { + self.get_in_memory_or_storage_by_block( + number.into(), + |db_provider| db_provider.block_hash(number), + |block_state| Ok(Some(block_state.hash())), + ) + } + + fn canonical_hashes_range( + &self, + start: BlockNumber, + end: BlockNumber, + ) -> ProviderResult> { + self.get_in_memory_or_storage_by_block_range_while( + start..end, + |db_provider, inclusive_range, _| { + db_provider + .canonical_hashes_range(*inclusive_range.start(), *inclusive_range.end() + 1) + }, + |block_state, _| Some(block_state.hash()), + |_| true, + ) + } +} + +impl BlockNumReader for ConsistentProvider { + fn chain_info(&self) -> ProviderResult { + let best_number = self.best_block_number()?; + Ok(ChainInfo { best_hash: self.block_hash(best_number)?.unwrap_or_default(), best_number }) + } + + fn best_block_number(&self) -> ProviderResult { + self.head_block.as_ref().map(|b| Ok(b.number())).unwrap_or_else(|| self.last_block_number()) + } + + fn last_block_number(&self) -> ProviderResult { + self.storage_provider.last_block_number() + } + + fn block_number(&self, hash: B256) -> ProviderResult> { + self.get_in_memory_or_storage_by_block( + hash.into(), + |db_provider| db_provider.block_number(hash), + |block_state| Ok(Some(block_state.number())), + ) + } +} + +impl BlockIdReader for ConsistentProvider { + fn pending_block_num_hash(&self) -> ProviderResult> { + Ok(self.canonical_in_memory_state.pending_block_num_hash()) + } + + fn safe_block_num_hash(&self) -> ProviderResult> { + Ok(self.canonical_in_memory_state.get_safe_num_hash()) + } + + fn finalized_block_num_hash(&self) -> ProviderResult> { + Ok(self.canonical_in_memory_state.get_finalized_num_hash()) + } +} + +impl BlockReader for ConsistentProvider { + fn find_block_by_hash(&self, hash: B256, source: BlockSource) -> ProviderResult> { + match source { + BlockSource::Any | BlockSource::Canonical => { + // Note: it's fine to return the unsealed block because the caller already has + // the hash + self.get_in_memory_or_storage_by_block( + hash.into(), + |db_provider| db_provider.find_block_by_hash(hash, source), + |block_state| Ok(Some(block_state.block_ref().block().clone().unseal())), + ) + } + BlockSource::Pending => { + Ok(self.canonical_in_memory_state.pending_block().map(|block| block.unseal())) + } + } + } + + fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { + self.get_in_memory_or_storage_by_block( + id, + |db_provider| db_provider.block(id), + |block_state| Ok(Some(block_state.block_ref().block().clone().unseal())), + ) + } + + fn pending_block(&self) -> ProviderResult> { + Ok(self.canonical_in_memory_state.pending_block()) + } + + fn pending_block_with_senders(&self) -> ProviderResult> { + Ok(self.canonical_in_memory_state.pending_block_with_senders()) + } + + fn pending_block_and_receipts(&self) -> ProviderResult)>> { + Ok(self.canonical_in_memory_state.pending_block_and_receipts()) + } + + fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>> { + self.get_in_memory_or_storage_by_block( + id, + |db_provider| db_provider.ommers(id), + |block_state| { + if self.chain_spec().final_paris_total_difficulty(block_state.number()).is_some() { + return Ok(Some(Vec::new())) + } + + Ok(Some(block_state.block_ref().block().body.ommers.clone())) + }, + ) + } + + fn block_body_indices( + &self, + number: BlockNumber, + ) -> ProviderResult> { + self.get_in_memory_or_storage_by_block( + number.into(), + |db_provider| db_provider.block_body_indices(number), + |block_state| { + // Find the last block indices on database + let last_storage_block_number = block_state.anchor().number; + let mut stored_indices = self + .storage_provider + .block_body_indices(last_storage_block_number)? + .ok_or(ProviderError::BlockBodyIndicesNotFound(last_storage_block_number))?; + + // Prepare our block indices + stored_indices.first_tx_num = stored_indices.next_tx_num(); + stored_indices.tx_count = 0; + + // Iterate from the lowest block in memory until our target block + for state in block_state.chain().collect::>().into_iter().rev() { + let block_tx_count = state.block_ref().block.body.transactions.len() as u64; + if state.block_ref().block().number == number { + stored_indices.tx_count = block_tx_count; + } else { + stored_indices.first_tx_num += block_tx_count; + } + } + + Ok(Some(stored_indices)) + }, + ) + } + + /// Returns the block with senders with matching number or hash from database. + /// + /// **NOTE: If [`TransactionVariant::NoHash`] is provided then the transactions have invalid + /// hashes, since they would need to be calculated on the spot, and we want fast querying.** + /// + /// Returns `None` if block is not found. + fn block_with_senders( + &self, + id: BlockHashOrNumber, + transaction_kind: TransactionVariant, + ) -> ProviderResult> { + self.get_in_memory_or_storage_by_block( + id, + |db_provider| db_provider.block_with_senders(id, transaction_kind), + |block_state| Ok(Some(block_state.block_with_senders())), + ) + } + + fn sealed_block_with_senders( + &self, + id: BlockHashOrNumber, + transaction_kind: TransactionVariant, + ) -> ProviderResult> { + self.get_in_memory_or_storage_by_block( + id, + |db_provider| db_provider.sealed_block_with_senders(id, transaction_kind), + |block_state| Ok(Some(block_state.sealed_block_with_senders())), + ) + } + + fn block_range(&self, range: RangeInclusive) -> ProviderResult> { + self.get_in_memory_or_storage_by_block_range_while( + range, + |db_provider, range, _| db_provider.block_range(range), + |block_state, _| Some(block_state.block_ref().block().clone().unseal()), + |_| true, + ) + } + + fn block_with_senders_range( + &self, + range: RangeInclusive, + ) -> ProviderResult> { + self.get_in_memory_or_storage_by_block_range_while( + range, + |db_provider, range, _| db_provider.block_with_senders_range(range), + |block_state, _| Some(block_state.block_with_senders()), + |_| true, + ) + } + + fn sealed_block_with_senders_range( + &self, + range: RangeInclusive, + ) -> ProviderResult> { + self.get_in_memory_or_storage_by_block_range_while( + range, + |db_provider, range, _| db_provider.sealed_block_with_senders_range(range), + |block_state, _| Some(block_state.sealed_block_with_senders()), + |_| true, + ) + } +} + +impl TransactionsProvider for ConsistentProvider { + fn transaction_id(&self, tx_hash: TxHash) -> ProviderResult> { + self.get_in_memory_or_storage_by_tx( + tx_hash.into(), + |db_provider| db_provider.transaction_id(tx_hash), + |_, tx_number, _| Ok(Some(tx_number)), + ) + } + + fn transaction_by_id(&self, id: TxNumber) -> ProviderResult> { + self.get_in_memory_or_storage_by_tx( + id.into(), + |provider| provider.transaction_by_id(id), + |tx_index, _, block_state| { + Ok(block_state.block_ref().block().body.transactions.get(tx_index).cloned()) + }, + ) + } + + fn transaction_by_id_no_hash( + &self, + id: TxNumber, + ) -> ProviderResult> { + self.get_in_memory_or_storage_by_tx( + id.into(), + |provider| provider.transaction_by_id_no_hash(id), + |tx_index, _, block_state| { + Ok(block_state + .block_ref() + .block() + .body + .transactions + .get(tx_index) + .cloned() + .map(Into::into)) + }, + ) + } + + fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { + if let Some(tx) = self.head_block.as_ref().and_then(|b| b.transaction_on_chain(hash)) { + return Ok(Some(tx)) + } + + self.storage_provider.transaction_by_hash(hash) + } + + fn transaction_by_hash_with_meta( + &self, + tx_hash: TxHash, + ) -> ProviderResult> { + if let Some((tx, meta)) = + self.head_block.as_ref().and_then(|b| b.transaction_meta_on_chain(tx_hash)) + { + return Ok(Some((tx, meta))) + } + + self.storage_provider.transaction_by_hash_with_meta(tx_hash) + } + + fn transaction_block(&self, id: TxNumber) -> ProviderResult> { + self.get_in_memory_or_storage_by_tx( + id.into(), + |provider| provider.transaction_block(id), + |_, _, block_state| Ok(Some(block_state.block_ref().block().number)), + ) + } + + fn transactions_by_block( + &self, + id: BlockHashOrNumber, + ) -> ProviderResult>> { + self.get_in_memory_or_storage_by_block( + id, + |provider| provider.transactions_by_block(id), + |block_state| Ok(Some(block_state.block_ref().block().body.transactions.clone())), + ) + } + + fn transactions_by_block_range( + &self, + range: impl RangeBounds, + ) -> ProviderResult>> { + self.get_in_memory_or_storage_by_block_range_while( + range, + |db_provider, range, _| db_provider.transactions_by_block_range(range), + |block_state, _| Some(block_state.block_ref().block().body.transactions.clone()), + |_| true, + ) + } + + fn transactions_by_tx_range( + &self, + range: impl RangeBounds, + ) -> ProviderResult> { + self.get_in_memory_or_storage_by_tx_range( + range, + |db_provider, db_range| db_provider.transactions_by_tx_range(db_range), + |index_range, block_state| { + Ok(block_state.block_ref().block().body.transactions[index_range] + .iter() + .cloned() + .map(Into::into) + .collect()) + }, + ) + } + + fn senders_by_tx_range( + &self, + range: impl RangeBounds, + ) -> ProviderResult> { + self.get_in_memory_or_storage_by_tx_range( + range, + |db_provider, db_range| db_provider.senders_by_tx_range(db_range), + |index_range, block_state| Ok(block_state.block_ref().senders[index_range].to_vec()), + ) + } + + fn transaction_sender(&self, id: TxNumber) -> ProviderResult> { + self.get_in_memory_or_storage_by_tx( + id.into(), + |provider| provider.transaction_sender(id), + |tx_index, _, block_state| Ok(block_state.block_ref().senders.get(tx_index).copied()), + ) + } +} + +impl ReceiptProvider for ConsistentProvider { + fn receipt(&self, id: TxNumber) -> ProviderResult> { + self.get_in_memory_or_storage_by_tx( + id.into(), + |provider| provider.receipt(id), + |tx_index, _, block_state| { + Ok(block_state.executed_block_receipts().get(tx_index).cloned()) + }, + ) + } + + fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { + for block_state in self.head_block.iter().flat_map(|b| b.chain()) { + let executed_block = block_state.block_ref(); + let block = executed_block.block(); + let receipts = block_state.executed_block_receipts(); + + // assuming 1:1 correspondence between transactions and receipts + debug_assert_eq!( + block.body.transactions.len(), + receipts.len(), + "Mismatch between transaction and receipt count" + ); + + if let Some(tx_index) = block.body.transactions.iter().position(|tx| tx.hash() == hash) + { + // safe to use tx_index for receipts due to 1:1 correspondence + return Ok(receipts.get(tx_index).cloned()); + } + } + + self.storage_provider.receipt_by_hash(hash) + } + + fn receipts_by_block(&self, block: BlockHashOrNumber) -> ProviderResult>> { + self.get_in_memory_or_storage_by_block( + block, + |db_provider| db_provider.receipts_by_block(block), + |block_state| Ok(Some(block_state.executed_block_receipts())), + ) + } + + fn receipts_by_tx_range( + &self, + range: impl RangeBounds, + ) -> ProviderResult> { + self.get_in_memory_or_storage_by_tx_range( + range, + |db_provider, db_range| db_provider.receipts_by_tx_range(db_range), + |index_range, block_state| { + Ok(block_state.executed_block_receipts().drain(index_range).collect()) + }, + ) + } +} + +impl ReceiptProviderIdExt for ConsistentProvider { + fn receipts_by_block_id(&self, block: BlockId) -> ProviderResult>> { + match block { + BlockId::Hash(rpc_block_hash) => { + let mut receipts = self.receipts_by_block(rpc_block_hash.block_hash.into())?; + if receipts.is_none() && !rpc_block_hash.require_canonical.unwrap_or(false) { + if let Some(state) = self + .head_block + .as_ref() + .and_then(|b| b.block_on_chain(rpc_block_hash.block_hash.into())) + { + receipts = Some(state.executed_block_receipts()); + } + } + Ok(receipts) + } + BlockId::Number(num_tag) => match num_tag { + BlockNumberOrTag::Pending => Ok(self + .canonical_in_memory_state + .pending_state() + .map(|block_state| block_state.executed_block_receipts())), + _ => { + if let Some(num) = self.convert_block_number(num_tag)? { + self.receipts_by_block(num.into()) + } else { + Ok(None) + } + } + }, + } + } +} + +impl WithdrawalsProvider for ConsistentProvider { + fn withdrawals_by_block( + &self, + id: BlockHashOrNumber, + timestamp: u64, + ) -> ProviderResult> { + if !self.chain_spec().is_shanghai_active_at_timestamp(timestamp) { + return Ok(None) + } + + self.get_in_memory_or_storage_by_block( + id, + |db_provider| db_provider.withdrawals_by_block(id, timestamp), + |block_state| Ok(block_state.block_ref().block().body.withdrawals.clone()), + ) + } + + fn latest_withdrawal(&self) -> ProviderResult> { + let best_block_num = self.best_block_number()?; + + self.get_in_memory_or_storage_by_block( + best_block_num.into(), + |db_provider| db_provider.latest_withdrawal(), + |block_state| { + Ok(block_state + .block_ref() + .block() + .body + .withdrawals + .clone() + .and_then(|mut w| w.pop())) + }, + ) + } +} + +impl StageCheckpointReader for ConsistentProvider { + fn get_stage_checkpoint(&self, id: StageId) -> ProviderResult> { + self.storage_provider.get_stage_checkpoint(id) + } + + fn get_stage_checkpoint_progress(&self, id: StageId) -> ProviderResult>> { + self.storage_provider.get_stage_checkpoint_progress(id) + } + + fn get_all_checkpoints(&self) -> ProviderResult> { + self.storage_provider.get_all_checkpoints() + } +} + +impl EvmEnvProvider for ConsistentProvider { + fn fill_env_at( + &self, + cfg: &mut CfgEnvWithHandlerCfg, + block_env: &mut BlockEnv, + at: BlockHashOrNumber, + evm_config: EvmConfig, + ) -> ProviderResult<()> + where + EvmConfig: ConfigureEvmEnv
, + { + let hash = self.convert_number(at)?.ok_or(ProviderError::HeaderNotFound(at))?; + let header = self.header(&hash)?.ok_or(ProviderError::HeaderNotFound(at))?; + self.fill_env_with_header(cfg, block_env, &header, evm_config) + } + + fn fill_env_with_header( + &self, + cfg: &mut CfgEnvWithHandlerCfg, + block_env: &mut BlockEnv, + header: &Header, + evm_config: EvmConfig, + ) -> ProviderResult<()> + where + EvmConfig: ConfigureEvmEnv
, + { + let total_difficulty = self + .header_td_by_number(header.number)? + .ok_or_else(|| ProviderError::HeaderNotFound(header.number.into()))?; + evm_config.fill_cfg_and_block_env(cfg, block_env, header, total_difficulty); + Ok(()) + } + + fn fill_cfg_env_at( + &self, + cfg: &mut CfgEnvWithHandlerCfg, + at: BlockHashOrNumber, + evm_config: EvmConfig, + ) -> ProviderResult<()> + where + EvmConfig: ConfigureEvmEnv
, + { + let hash = self.convert_number(at)?.ok_or(ProviderError::HeaderNotFound(at))?; + let header = self.header(&hash)?.ok_or(ProviderError::HeaderNotFound(at))?; + self.fill_cfg_env_with_header(cfg, &header, evm_config) + } + + fn fill_cfg_env_with_header( + &self, + cfg: &mut CfgEnvWithHandlerCfg, + header: &Header, + evm_config: EvmConfig, + ) -> ProviderResult<()> + where + EvmConfig: ConfigureEvmEnv
, + { + let total_difficulty = self + .header_td_by_number(header.number)? + .ok_or_else(|| ProviderError::HeaderNotFound(header.number.into()))?; + evm_config.fill_cfg_env(cfg, header, total_difficulty); + Ok(()) + } +} + +impl PruneCheckpointReader for ConsistentProvider { + fn get_prune_checkpoint( + &self, + segment: PruneSegment, + ) -> ProviderResult> { + self.storage_provider.get_prune_checkpoint(segment) + } + + fn get_prune_checkpoints(&self) -> ProviderResult> { + self.storage_provider.get_prune_checkpoints() + } +} + +impl ChainSpecProvider for ConsistentProvider { + type ChainSpec = N::ChainSpec; + + fn chain_spec(&self) -> Arc { + ChainSpecProvider::chain_spec(&self.storage_provider) + } +} + +impl BlockReaderIdExt for ConsistentProvider { + fn block_by_id(&self, id: BlockId) -> ProviderResult> { + match id { + BlockId::Number(num) => self.block_by_number_or_tag(num), + BlockId::Hash(hash) => { + // TODO: should we only apply this for the RPCs that are listed in EIP-1898? + // so not at the provider level? + // if we decide to do this at a higher level, then we can make this an automatic + // trait impl + if Some(true) == hash.require_canonical { + // check the database, canonical blocks are only stored in the database + self.find_block_by_hash(hash.block_hash, BlockSource::Canonical) + } else { + self.block_by_hash(hash.block_hash) + } + } + } + } + + fn header_by_number_or_tag(&self, id: BlockNumberOrTag) -> ProviderResult> { + Ok(match id { + BlockNumberOrTag::Latest => { + Some(self.canonical_in_memory_state.get_canonical_head().unseal()) + } + BlockNumberOrTag::Finalized => { + self.canonical_in_memory_state.get_finalized_header().map(|h| h.unseal()) + } + BlockNumberOrTag::Safe => { + self.canonical_in_memory_state.get_safe_header().map(|h| h.unseal()) + } + BlockNumberOrTag::Earliest => self.header_by_number(0)?, + BlockNumberOrTag::Pending => self.canonical_in_memory_state.pending_header(), + + BlockNumberOrTag::Number(num) => self.header_by_number(num)?, + }) + } + + fn sealed_header_by_number_or_tag( + &self, + id: BlockNumberOrTag, + ) -> ProviderResult> { + match id { + BlockNumberOrTag::Latest => { + Ok(Some(self.canonical_in_memory_state.get_canonical_head())) + } + BlockNumberOrTag::Finalized => { + Ok(self.canonical_in_memory_state.get_finalized_header()) + } + BlockNumberOrTag::Safe => Ok(self.canonical_in_memory_state.get_safe_header()), + BlockNumberOrTag::Earliest => self.header_by_number(0)?.map_or_else( + || Ok(None), + |h| { + let sealed = h.seal_slow(); + let (header, seal) = sealed.into_parts(); + Ok(Some(SealedHeader::new(header, seal))) + }, + ), + BlockNumberOrTag::Pending => Ok(self.canonical_in_memory_state.pending_sealed_header()), + BlockNumberOrTag::Number(num) => self.header_by_number(num)?.map_or_else( + || Ok(None), + |h| { + let sealed = h.seal_slow(); + let (header, seal) = sealed.into_parts(); + Ok(Some(SealedHeader::new(header, seal))) + }, + ), + } + } + + fn sealed_header_by_id(&self, id: BlockId) -> ProviderResult> { + Ok(match id { + BlockId::Number(num) => self.sealed_header_by_number_or_tag(num)?, + BlockId::Hash(hash) => self.header(&hash.block_hash)?.map(|h| { + let sealed = h.seal_slow(); + let (header, seal) = sealed.into_parts(); + SealedHeader::new(header, seal) + }), + }) + } + + fn header_by_id(&self, id: BlockId) -> ProviderResult> { + Ok(match id { + BlockId::Number(num) => self.header_by_number_or_tag(num)?, + BlockId::Hash(hash) => self.header(&hash.block_hash)?, + }) + } + + fn ommers_by_id(&self, id: BlockId) -> ProviderResult>> { + match id { + BlockId::Number(num) => self.ommers_by_number_or_tag(num), + BlockId::Hash(hash) => { + // TODO: EIP-1898 question, see above + // here it is not handled + self.ommers(BlockHashOrNumber::Hash(hash.block_hash)) + } + } + } +} + +impl StorageChangeSetReader for ConsistentProvider { + fn storage_changeset( + &self, + block_number: BlockNumber, + ) -> ProviderResult> { + if let Some(state) = + self.head_block.as_ref().and_then(|b| b.block_on_chain(block_number.into())) + { + let changesets = state + .block() + .execution_output + .bundle + .reverts + .clone() + .into_plain_state_reverts() + .storage + .into_iter() + .flatten() + .flat_map(|revert: PlainStorageRevert| { + revert.storage_revert.into_iter().map(move |(key, value)| { + ( + BlockNumberAddress((block_number, revert.address)), + StorageEntry { key: key.into(), value: value.to_previous_value() }, + ) + }) + }) + .collect(); + Ok(changesets) + } else { + // Perform checks on whether or not changesets exist for the block. + + // No prune checkpoint means history should exist and we should `unwrap_or(true)` + let storage_history_exists = self + .storage_provider + .get_prune_checkpoint(PruneSegment::StorageHistory)? + .and_then(|checkpoint| { + // return true if the block number is ahead of the prune checkpoint. + // + // The checkpoint stores the highest pruned block number, so we should make + // sure the block_number is strictly greater. + checkpoint.block_number.map(|checkpoint| block_number > checkpoint) + }) + .unwrap_or(true); + + if !storage_history_exists { + return Err(ProviderError::StateAtBlockPruned(block_number)) + } + + self.storage_provider.storage_changeset(block_number) + } + } +} + +impl ChangeSetReader for ConsistentProvider { + fn account_block_changeset( + &self, + block_number: BlockNumber, + ) -> ProviderResult> { + if let Some(state) = + self.head_block.as_ref().and_then(|b| b.block_on_chain(block_number.into())) + { + let changesets = state + .block_ref() + .execution_output + .bundle + .reverts + .clone() + .into_plain_state_reverts() + .accounts + .into_iter() + .flatten() + .map(|(address, info)| AccountBeforeTx { address, info: info.map(Into::into) }) + .collect(); + Ok(changesets) + } else { + // Perform checks on whether or not changesets exist for the block. + + // No prune checkpoint means history should exist and we should `unwrap_or(true)` + let account_history_exists = self + .storage_provider + .get_prune_checkpoint(PruneSegment::AccountHistory)? + .and_then(|checkpoint| { + // return true if the block number is ahead of the prune checkpoint. + // + // The checkpoint stores the highest pruned block number, so we should make + // sure the block_number is strictly greater. + checkpoint.block_number.map(|checkpoint| block_number > checkpoint) + }) + .unwrap_or(true); + + if !account_history_exists { + return Err(ProviderError::StateAtBlockPruned(block_number)) + } + + self.storage_provider.account_block_changeset(block_number) + } + } +} + +impl AccountReader for ConsistentProvider { + /// Get basic account information. + fn basic_account(&self, address: Address) -> ProviderResult> { + // use latest state provider + let state_provider = self.latest_ref()?; + state_provider.basic_account(address) + } +} + +impl StateReader for ConsistentProvider { + /// Re-constructs the [`ExecutionOutcome`] from in-memory and database state, if necessary. + /// + /// If data for the block does not exist, this will return [`None`]. + /// + /// NOTE: This cannot be called safely in a loop outside of the blockchain tree thread. This is + /// because the [`CanonicalInMemoryState`] could change during a reorg, causing results to be + /// inconsistent. Currently this can safely be called within the blockchain tree thread, + /// because the tree thread is responsible for modifying the [`CanonicalInMemoryState`] in the + /// first place. + fn get_state(&self, block: BlockNumber) -> ProviderResult> { + if let Some(state) = self.head_block.as_ref().and_then(|b| b.block_on_chain(block.into())) { + let state = state.block_ref().execution_outcome().clone(); + Ok(Some(state)) + } else { + Self::get_state(self, block..=block) + } + } +} + +#[cfg(test)] +mod tests { + use crate::{ + providers::blockchain_provider::BlockchainProvider2, + test_utils::create_test_provider_factory, BlockWriter, + }; + use alloy_eips::BlockHashOrNumber; + use alloy_primitives::B256; + use itertools::Itertools; + use rand::Rng; + use reth_chain_state::{ExecutedBlock, NewCanonicalChain}; + use reth_db::models::AccountBeforeTx; + use reth_execution_types::ExecutionOutcome; + use reth_primitives::SealedBlock; + use reth_storage_api::{BlockReader, BlockSource, ChangeSetReader}; + use reth_testing_utils::generators::{ + self, random_block_range, random_changeset_range, random_eoa_accounts, BlockRangeParams, + }; + use revm::db::BundleState; + use std::{ + ops::{Bound, Range, RangeBounds}, + sync::Arc, + }; + + const TEST_BLOCKS_COUNT: usize = 5; + + fn random_blocks( + rng: &mut impl Rng, + database_blocks: usize, + in_memory_blocks: usize, + requests_count: Option>, + withdrawals_count: Option>, + tx_count: impl RangeBounds, + ) -> (Vec, Vec) { + let block_range = (database_blocks + in_memory_blocks - 1) as u64; + + let tx_start = match tx_count.start_bound() { + Bound::Included(&n) | Bound::Excluded(&n) => n, + Bound::Unbounded => u8::MIN, + }; + let tx_end = match tx_count.end_bound() { + Bound::Included(&n) | Bound::Excluded(&n) => n + 1, + Bound::Unbounded => u8::MAX, + }; + + let blocks = random_block_range( + rng, + 0..=block_range, + BlockRangeParams { + parent: Some(B256::ZERO), + tx_count: tx_start..tx_end, + requests_count, + withdrawals_count, + }, + ); + let (database_blocks, in_memory_blocks) = blocks.split_at(database_blocks); + (database_blocks.to_vec(), in_memory_blocks.to_vec()) + } + + #[test] + fn test_block_reader_find_block_by_hash() -> eyre::Result<()> { + // Initialize random number generator and provider factory + let mut rng = generators::rng(); + let factory = create_test_provider_factory(); + + // Generate 10 random blocks and split into database and in-memory blocks + let blocks = random_block_range( + &mut rng, + 0..=10, + BlockRangeParams { parent: Some(B256::ZERO), tx_count: 0..1, ..Default::default() }, + ); + let (database_blocks, in_memory_blocks) = blocks.split_at(5); + + // Insert first 5 blocks into the database + let provider_rw = factory.provider_rw()?; + for block in database_blocks { + provider_rw.insert_historical_block( + block.clone().seal_with_senders().expect("failed to seal block with senders"), + )?; + } + provider_rw.commit()?; + + // Create a new provider + let provider = BlockchainProvider2::new(factory)?; + let consistent_provider = provider.consistent_provider()?; + + // Useful blocks + let first_db_block = database_blocks.first().unwrap(); + let first_in_mem_block = in_memory_blocks.first().unwrap(); + let last_in_mem_block = in_memory_blocks.last().unwrap(); + + // No block in memory before setting in memory state + assert_eq!( + consistent_provider.find_block_by_hash(first_in_mem_block.hash(), BlockSource::Any)?, + None + ); + assert_eq!( + consistent_provider + .find_block_by_hash(first_in_mem_block.hash(), BlockSource::Canonical)?, + None + ); + // No pending block in memory + assert_eq!( + consistent_provider + .find_block_by_hash(first_in_mem_block.hash(), BlockSource::Pending)?, + None + ); + + // Insert first block into the in-memory state + let in_memory_block_senders = + first_in_mem_block.senders().expect("failed to recover senders"); + let chain = NewCanonicalChain::Commit { + new: vec![ExecutedBlock::new( + Arc::new(first_in_mem_block.clone()), + Arc::new(in_memory_block_senders), + Default::default(), + Default::default(), + Default::default(), + )], + }; + consistent_provider.canonical_in_memory_state.update_chain(chain); + let consistent_provider = provider.consistent_provider()?; + + // Now the block should be found in memory + assert_eq!( + consistent_provider.find_block_by_hash(first_in_mem_block.hash(), BlockSource::Any)?, + Some(first_in_mem_block.clone().into()) + ); + assert_eq!( + consistent_provider + .find_block_by_hash(first_in_mem_block.hash(), BlockSource::Canonical)?, + Some(first_in_mem_block.clone().into()) + ); + + // Find the first block in database by hash + assert_eq!( + consistent_provider.find_block_by_hash(first_db_block.hash(), BlockSource::Any)?, + Some(first_db_block.clone().into()) + ); + assert_eq!( + consistent_provider + .find_block_by_hash(first_db_block.hash(), BlockSource::Canonical)?, + Some(first_db_block.clone().into()) + ); + + // No pending block in database + assert_eq!( + consistent_provider.find_block_by_hash(first_db_block.hash(), BlockSource::Pending)?, + None + ); + + // Insert the last block into the pending state + provider.canonical_in_memory_state.set_pending_block(ExecutedBlock { + block: Arc::new(last_in_mem_block.clone()), + senders: Default::default(), + execution_output: Default::default(), + hashed_state: Default::default(), + trie: Default::default(), + }); + + // Now the last block should be found in memory + assert_eq!( + consistent_provider + .find_block_by_hash(last_in_mem_block.hash(), BlockSource::Pending)?, + Some(last_in_mem_block.clone().into()) + ); + + Ok(()) + } + + #[test] + fn test_block_reader_block() -> eyre::Result<()> { + // Initialize random number generator and provider factory + let mut rng = generators::rng(); + let factory = create_test_provider_factory(); + + // Generate 10 random blocks and split into database and in-memory blocks + let blocks = random_block_range( + &mut rng, + 0..=10, + BlockRangeParams { parent: Some(B256::ZERO), tx_count: 0..1, ..Default::default() }, + ); + let (database_blocks, in_memory_blocks) = blocks.split_at(5); + + // Insert first 5 blocks into the database + let provider_rw = factory.provider_rw()?; + for block in database_blocks { + provider_rw.insert_historical_block( + block.clone().seal_with_senders().expect("failed to seal block with senders"), + )?; + } + provider_rw.commit()?; + + // Create a new provider + let provider = BlockchainProvider2::new(factory)?; + let consistent_provider = provider.consistent_provider()?; + + // First in memory block + let first_in_mem_block = in_memory_blocks.first().unwrap(); + // First database block + let first_db_block = database_blocks.first().unwrap(); + + // First in memory block should not be found yet as not integrated to the in-memory state + assert_eq!( + consistent_provider.block(BlockHashOrNumber::Hash(first_in_mem_block.hash()))?, + None + ); + assert_eq!( + consistent_provider.block(BlockHashOrNumber::Number(first_in_mem_block.number))?, + None + ); + + // Insert first block into the in-memory state + let in_memory_block_senders = + first_in_mem_block.senders().expect("failed to recover senders"); + let chain = NewCanonicalChain::Commit { + new: vec![ExecutedBlock::new( + Arc::new(first_in_mem_block.clone()), + Arc::new(in_memory_block_senders), + Default::default(), + Default::default(), + Default::default(), + )], + }; + consistent_provider.canonical_in_memory_state.update_chain(chain); + + let consistent_provider = provider.consistent_provider()?; + + // First in memory block should be found + assert_eq!( + consistent_provider.block(BlockHashOrNumber::Hash(first_in_mem_block.hash()))?, + Some(first_in_mem_block.clone().into()) + ); + assert_eq!( + consistent_provider.block(BlockHashOrNumber::Number(first_in_mem_block.number))?, + Some(first_in_mem_block.clone().into()) + ); + + // First database block should be found + assert_eq!( + consistent_provider.block(BlockHashOrNumber::Hash(first_db_block.hash()))?, + Some(first_db_block.clone().into()) + ); + assert_eq!( + consistent_provider.block(BlockHashOrNumber::Number(first_db_block.number))?, + Some(first_db_block.clone().into()) + ); + + Ok(()) + } + + #[test] + fn test_changeset_reader() -> eyre::Result<()> { + let mut rng = generators::rng(); + + let (database_blocks, in_memory_blocks) = + random_blocks(&mut rng, TEST_BLOCKS_COUNT, 1, None, None, 0..1); + + let first_database_block = database_blocks.first().map(|block| block.number).unwrap(); + let last_database_block = database_blocks.last().map(|block| block.number).unwrap(); + let first_in_memory_block = in_memory_blocks.first().map(|block| block.number).unwrap(); + + let accounts = random_eoa_accounts(&mut rng, 2); + + let (database_changesets, database_state) = random_changeset_range( + &mut rng, + &database_blocks, + accounts.into_iter().map(|(address, account)| (address, (account, Vec::new()))), + 0..0, + 0..0, + ); + let (in_memory_changesets, in_memory_state) = random_changeset_range( + &mut rng, + &in_memory_blocks, + database_state + .iter() + .map(|(address, (account, storage))| (*address, (*account, storage.clone()))), + 0..0, + 0..0, + ); + + let factory = create_test_provider_factory(); + + let provider_rw = factory.provider_rw()?; + provider_rw.append_blocks_with_state( + database_blocks + .into_iter() + .map(|b| b.seal_with_senders().expect("failed to seal block with senders")) + .collect(), + ExecutionOutcome { + bundle: BundleState::new( + database_state.into_iter().map(|(address, (account, _))| { + (address, None, Some(account.into()), Default::default()) + }), + database_changesets + .iter() + .map(|block_changesets| { + block_changesets.iter().map(|(address, account, _)| { + (*address, Some(Some((*account).into())), []) + }) + }) + .collect::>(), + Vec::new(), + ), + first_block: first_database_block, + ..Default::default() + }, + Default::default(), + Default::default(), + )?; + provider_rw.commit()?; + + let provider = BlockchainProvider2::new(factory)?; + + let in_memory_changesets = in_memory_changesets.into_iter().next().unwrap(); + let chain = NewCanonicalChain::Commit { + new: vec![in_memory_blocks + .first() + .map(|block| { + let senders = block.senders().expect("failed to recover senders"); + ExecutedBlock::new( + Arc::new(block.clone()), + Arc::new(senders), + Arc::new(ExecutionOutcome { + bundle: BundleState::new( + in_memory_state.into_iter().map(|(address, (account, _))| { + (address, None, Some(account.into()), Default::default()) + }), + [in_memory_changesets.iter().map(|(address, account, _)| { + (*address, Some(Some((*account).into())), Vec::new()) + })], + [], + ), + first_block: first_in_memory_block, + ..Default::default() + }), + Default::default(), + Default::default(), + ) + }) + .unwrap()], + }; + provider.canonical_in_memory_state.update_chain(chain); + + let consistent_provider = provider.consistent_provider()?; + + assert_eq!( + consistent_provider.account_block_changeset(last_database_block).unwrap(), + database_changesets + .into_iter() + .last() + .unwrap() + .into_iter() + .sorted_by_key(|(address, _, _)| *address) + .map(|(address, account, _)| AccountBeforeTx { address, info: Some(account) }) + .collect::>() + ); + assert_eq!( + consistent_provider.account_block_changeset(first_in_memory_block).unwrap(), + in_memory_changesets + .into_iter() + .sorted_by_key(|(address, _, _)| *address) + .map(|(address, account, _)| AccountBeforeTx { address, info: Some(account) }) + .collect::>() + ); + + Ok(()) + } +} diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index b98bbf5be472..c81ef05d2eaa 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -61,6 +61,9 @@ pub use consistent_view::{ConsistentDbView, ConsistentViewError}; mod blockchain_provider; pub use blockchain_provider::BlockchainProvider2; +mod consistent; +pub use consistent::ConsistentProvider; + /// Helper trait keeping common requirements of providers for [`NodeTypesWithDB`]. pub trait ProviderNodeTypes: NodeTypesWithDB {} @@ -118,7 +121,7 @@ impl BlockchainProvider { /// the database to initialize the provider. pub fn new(database: ProviderFactory, tree: Arc) -> ProviderResult { let provider = database.provider()?; - let best: ChainInfo = provider.chain_info()?; + let best = provider.chain_info()?; let latest_header = provider .header_by_number(best.best_number)? .ok_or_else(|| ProviderError::HeaderNotFound(best.best_number.into()))?; diff --git a/crates/storage/provider/src/providers/state/historical.rs b/crates/storage/provider/src/providers/state/historical.rs index 640041e0801f..56a1d057e704 100644 --- a/crates/storage/provider/src/providers/state/historical.rs +++ b/crates/storage/provider/src/providers/state/historical.rs @@ -2,6 +2,7 @@ use crate::{ providers::{state::macros::delegate_provider_impls, StaticFileProvider}, AccountReader, BlockHashReader, ProviderError, StateProvider, StateRootProvider, }; +use alloy_eips::merge::EPOCH_SLOTS; use alloy_primitives::{ map::{HashMap, HashSet}, Address, BlockNumber, Bytes, StorageKey, StorageValue, B256, @@ -13,7 +14,7 @@ use reth_db_api::{ table::Table, transaction::DbTx, }; -use reth_primitives::{constants::EPOCH_SLOTS, Account, Bytecode, StaticFileSegment}; +use reth_primitives::{Account, Bytecode, StaticFileSegment}; use reth_storage_api::{StateProofProvider, StorageRootProvider}; use reth_storage_errors::provider::ProviderResult; use reth_trie::{ diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index 20d6a1b184b2..e81dc01f722c 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -1364,13 +1364,13 @@ impl TransactionsProviderExt for StaticFileProvider { // chunks are too big, there will be idle threads waiting for work. Choosing an // arbitrary smaller value to make sure it doesn't happen. let chunk_size = 100; - let mut channels = Vec::new(); // iterator over the chunks let chunks = tx_range .clone() .step_by(chunk_size) .map(|start| start..std::cmp::min(start + chunk_size as u64, tx_range.end)); + let mut channels = Vec::with_capacity(tx_range_size.div_ceil(chunk_size)); for chunk_range in chunks { let (channel_tx, channel_rx) = mpsc::channel(); diff --git a/crates/tokio-util/src/event_sender.rs b/crates/tokio-util/src/event_sender.rs index a4e9815388cd..16208ee19c0e 100644 --- a/crates/tokio-util/src/event_sender.rs +++ b/crates/tokio-util/src/event_sender.rs @@ -40,3 +40,96 @@ impl EventSender { EventStream::new(self.sender.subscribe()) } } + +#[cfg(test)] +mod tests { + use super::*; + use tokio::{ + task, + time::{timeout, Duration}, + }; + use tokio_stream::StreamExt; + + #[tokio::test] + async fn test_event_broadcast_to_listener() { + let sender = EventSender::default(); + + // Create a listener for the events + let mut listener = sender.new_listener(); + + // Broadcast an event + sender.notify("event1"); + + // Check if the listener receives the event + let received_event = listener.next().await; + assert_eq!(received_event, Some("event1")); + } + + #[tokio::test] + async fn test_event_no_listener() { + let sender = EventSender::default(); + + // Broadcast an event with no listeners + sender.notify("event2"); + + // Ensure it doesn't panic or fail when no listeners are present + // (this test passes if it runs without errors). + } + + #[tokio::test] + async fn test_multiple_listeners_receive_event() { + let sender = EventSender::default(); + + // Create two listeners + let mut listener1 = sender.new_listener(); + let mut listener2 = sender.new_listener(); + + // Broadcast an event + sender.notify("event3"); + + // Both listeners should receive the same event + let event1 = listener1.next().await; + let event2 = listener2.next().await; + + assert_eq!(event1, Some("event3")); + assert_eq!(event2, Some("event3")); + } + + #[tokio::test] + async fn test_bounded_channel_size() { + // Create a channel with size 2 + let sender = EventSender::new(2); + + // Create a listener + let mut listener = sender.new_listener(); + + // Broadcast 3 events, which exceeds the channel size + sender.notify("event4"); + sender.notify("event5"); + sender.notify("event6"); + + // Only the last two should be received due to the size limit + let received_event1 = listener.next().await; + let received_event2 = listener.next().await; + + assert_eq!(received_event1, Some("event5")); + assert_eq!(received_event2, Some("event6")); + } + + #[tokio::test] + async fn test_event_listener_timeout() { + let sender = EventSender::default(); + let mut listener = sender.new_listener(); + + // Broadcast an event asynchronously + task::spawn(async move { + tokio::time::sleep(Duration::from_millis(50)).await; + sender.notify("delayed_event"); + }); + + // Use a timeout to ensure that the event is received within a certain time + let result = timeout(Duration::from_millis(100), listener.next()).await; + assert!(result.is_ok()); + assert_eq!(result.unwrap(), Some("delayed_event")); + } +} diff --git a/crates/transaction-pool/benches/truncate.rs b/crates/transaction-pool/benches/truncate.rs index 22e457630540..1ca6f98499cd 100644 --- a/crates/transaction-pool/benches/truncate.rs +++ b/crates/transaction-pool/benches/truncate.rs @@ -66,7 +66,7 @@ fn generate_many_transactions(senders: usize, max_depth: usize) -> Vec().new_tree(&mut runner).unwrap().current() % max_depth + 1; diff --git a/crates/transaction-pool/src/blobstore/disk.rs b/crates/transaction-pool/src/blobstore/disk.rs index 96119a0f8170..787d4985ff1b 100644 --- a/crates/transaction-pool/src/blobstore/disk.rs +++ b/crates/transaction-pool/src/blobstore/disk.rs @@ -3,7 +3,6 @@ use crate::blobstore::{BlobStore, BlobStoreCleanupStat, BlobStoreError, BlobStoreSize}; use alloy_eips::eip4844::BlobAndProofV1; use alloy_primitives::{TxHash, B256}; -use alloy_rlp::{Decodable, Encodable}; use parking_lot::{Mutex, RwLock}; use reth_primitives::BlobTransactionSidecar; use schnellru::{ByLength, LruMap}; @@ -204,8 +203,8 @@ impl DiskFileBlobStoreInner { /// Ensures blob is in the blob cache and written to the disk. fn insert_one(&self, tx: B256, data: BlobTransactionSidecar) -> Result<(), BlobStoreError> { - let mut buf = Vec::with_capacity(data.fields_len()); - data.encode(&mut buf); + let mut buf = Vec::with_capacity(data.rlp_encoded_fields_length()); + data.rlp_encode_fields(&mut buf); self.blob_cache.lock().insert(tx, data); let size = self.write_one_encoded(tx, &buf)?; @@ -219,8 +218,8 @@ impl DiskFileBlobStoreInner { let raw = txs .iter() .map(|(tx, data)| { - let mut buf = Vec::with_capacity(data.fields_len()); - data.encode(&mut buf); + let mut buf = Vec::with_capacity(data.rlp_encoded_fields_length()); + data.rlp_encode_fields(&mut buf); (self.blob_disk_file(*tx), buf) }) .collect::>(); @@ -312,7 +311,7 @@ impl DiskFileBlobStoreInner { } } }; - BlobTransactionSidecar::decode(&mut data.as_slice()) + BlobTransactionSidecar::rlp_decode_fields(&mut data.as_slice()) .map(Some) .map_err(BlobStoreError::DecodeError) } @@ -322,7 +321,7 @@ impl DiskFileBlobStoreInner { self.read_many_raw(txs) .into_iter() .filter_map(|(tx, data)| { - BlobTransactionSidecar::decode(&mut data.as_slice()) + BlobTransactionSidecar::rlp_decode_fields(&mut data.as_slice()) .map(|sidecar| (tx, sidecar)) .ok() }) @@ -409,13 +408,9 @@ impl DiskFileBlobStoreInner { /// Returns an error if there are any missing blobs. #[inline] fn get_exact(&self, txs: Vec) -> Result, BlobStoreError> { - let mut res = Vec::with_capacity(txs.len()); - for tx in txs { - let blob = self.get_one(tx)?.ok_or_else(|| BlobStoreError::MissingSidecar(tx))?; - res.push(blob) - } - - Ok(res) + txs.into_iter() + .map(|tx| self.get_one(tx)?.ok_or(BlobStoreError::MissingSidecar(tx))) + .collect() } } diff --git a/crates/transaction-pool/src/config.rs b/crates/transaction-pool/src/config.rs index 30703f888c38..8fe49f47652d 100644 --- a/crates/transaction-pool/src/config.rs +++ b/crates/transaction-pool/src/config.rs @@ -3,8 +3,9 @@ use crate::{ PoolSize, TransactionOrigin, }; use alloy_consensus::constants::EIP4844_TX_TYPE_ID; +use alloy_eips::eip1559::ETHEREUM_BLOCK_GAS_LIMIT; use alloy_primitives::Address; -use reth_primitives::constants::{ETHEREUM_BLOCK_GAS_LIMIT, MIN_PROTOCOL_BASE_FEE}; +use reth_primitives::constants::MIN_PROTOCOL_BASE_FEE; use std::{collections::HashSet, ops::Mul}; /// Guarantees max transactions for one sender, compatible with geth/erigon diff --git a/crates/transaction-pool/src/noop.rs b/crates/transaction-pool/src/noop.rs index 4464ae1fc8a5..11c5e7eea29b 100644 --- a/crates/transaction-pool/src/noop.rs +++ b/crates/transaction-pool/src/noop.rs @@ -16,10 +16,10 @@ use crate::{ PooledTransactionsElement, PropagatedTransactions, TransactionEvents, TransactionOrigin, TransactionPool, TransactionValidationOutcome, TransactionValidator, ValidPoolTransaction, }; -use alloy_eips::eip4844::BlobAndProofV1; +use alloy_eips::{eip1559::ETHEREUM_BLOCK_GAS_LIMIT, eip4844::BlobAndProofV1}; use alloy_primitives::{Address, TxHash, B256, U256}; use reth_eth_wire_types::HandleMempoolData; -use reth_primitives::{constants::ETHEREUM_BLOCK_GAS_LIMIT, BlobTransactionSidecar}; +use reth_primitives::BlobTransactionSidecar; use std::{collections::HashSet, marker::PhantomData, sync::Arc}; use tokio::sync::{mpsc, mpsc::Receiver}; diff --git a/crates/transaction-pool/src/pool/parked.rs b/crates/transaction-pool/src/pool/parked.rs index b591fdb539a9..407f04fd5be3 100644 --- a/crates/transaction-pool/src/pool/parked.rs +++ b/crates/transaction-pool/src/pool/parked.rs @@ -35,8 +35,8 @@ pub struct ParkedPool { best: BTreeSet>, /// Keeps track of last submission id for each sender. /// - /// This are sorted in Reverse order, so the last (highest) submission id is first, and the - /// lowest(oldest) is the last. + /// This are sorted in reverse order, so the last (highest) submission id is first, and the + /// lowest (oldest) is the last. last_sender_submission: BTreeSet, /// Keeps track of the number of transactions in the pool by the sender and the last submission /// id. @@ -856,4 +856,64 @@ mod tests { assert_eq!(submission_info2.sender_id, sender2); assert_eq!(submission_info2.submission_id, 2); } + + #[test] + fn test_remove_sender_count() { + // Initialize a mock transaction factory + let mut f = MockTransactionFactory::default(); + // Create an empty transaction pool + let mut pool = ParkedPool::>::default(); + // Generate two validated transactions and add them to the pool + let tx1 = f.validated_arc(MockTransaction::eip1559().inc_price()); + let tx2 = f.validated_arc(MockTransaction::eip1559().inc_price()); + pool.add_transaction(tx1); + pool.add_transaction(tx2); + + // Define two different sender IDs and their corresponding submission IDs + let sender1: SenderId = 11.into(); + let sender2: SenderId = 22.into(); + + // Add the sender counts to the pool + pool.add_sender_count(sender1, 1); + + // We add sender 2 multiple times to test the removal of sender counts + pool.add_sender_count(sender2, 2); + pool.add_sender_count(sender2, 3); + + // Before removing the sender count we should have 4 sender transaction counts + assert_eq!(pool.sender_transaction_count.len(), 4); + assert!(pool.sender_transaction_count.contains_key(&sender1)); + + // We should have 1 sender transaction count for sender 1 before removing the sender count + assert_eq!(pool.sender_transaction_count.get(&sender1).unwrap().count, 1); + + // Remove the sender count for sender 1 + pool.remove_sender_count(sender1); + + // After removing the sender count we should have 3 sender transaction counts remaining + assert_eq!(pool.sender_transaction_count.len(), 3); + assert!(!pool.sender_transaction_count.contains_key(&sender1)); + + // Check the sender transaction count for sender 2 before removing the sender count + assert_eq!( + *pool.sender_transaction_count.get(&sender2).unwrap(), + SenderTransactionCount { count: 2, last_submission_id: 3 } + ); + + // Remove the sender count for sender 2 + pool.remove_sender_count(sender2); + + // After removing the sender count for sender 2, we still have 3 sender transaction counts + // remaining. + // + // This is because we added sender 2 multiple times and we only removed the last submission. + assert_eq!(pool.sender_transaction_count.len(), 3); + assert!(pool.sender_transaction_count.contains_key(&sender2)); + + // Sender transaction count for sender 2 should be updated correctly + assert_eq!( + *pool.sender_transaction_count.get(&sender2).unwrap(), + SenderTransactionCount { count: 1, last_submission_id: 3 } + ); + } } diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index a85a9a1856b9..a3f192992d1a 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -22,10 +22,9 @@ use alloy_consensus::constants::{ EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID, LEGACY_TX_TYPE_ID, }; +use alloy_eips::eip1559::ETHEREUM_BLOCK_GAS_LIMIT; use alloy_primitives::{Address, TxHash, B256}; -use reth_primitives::constants::{ - eip4844::BLOB_TX_MIN_BLOB_GASPRICE, ETHEREUM_BLOCK_GAS_LIMIT, MIN_PROTOCOL_BASE_FEE, -}; +use reth_primitives::constants::{eip4844::BLOB_TX_MIN_BLOB_GASPRICE, MIN_PROTOCOL_BASE_FEE}; use rustc_hash::FxHashMap; use smallvec::SmallVec; use std::{ @@ -1405,12 +1404,9 @@ impl AllTransactions { /// Caution: This assumes that mutually exclusive invariant is always true for the same sender. #[inline] fn contains_conflicting_transaction(&self, tx: &ValidPoolTransaction) -> bool { - let mut iter = self.txs_iter(tx.transaction_id.sender); - if let Some((_, existing)) = iter.next() { - return tx.tx_type_conflicts_with(&existing.transaction) - } - // no existing transaction for this sender - false + self.txs_iter(tx.transaction_id.sender) + .next() + .map_or(false, |(_, existing)| tx.tx_type_conflicts_with(&existing.transaction)) } /// Additional checks for a new transaction. @@ -1424,11 +1420,15 @@ impl AllTransactions { fn ensure_valid( &self, transaction: ValidPoolTransaction, + on_chain_nonce: u64, ) -> Result, InsertErr> { if !self.local_transactions_config.is_local(transaction.origin, transaction.sender()) { let current_txs = self.tx_counter.get(&transaction.sender_id()).copied().unwrap_or_default(); - if current_txs >= self.max_account_slots { + + // Reject transactions if sender's capacity is exceeded. + // If transaction's nonce matches on-chain nonce always let it through + if current_txs >= self.max_account_slots && transaction.nonce() > on_chain_nonce { return Err(InsertErr::ExceededSenderTransactionsCapacity { transaction: Arc::new(transaction), }) @@ -1510,52 +1510,6 @@ impl AllTransactions { Ok(new_blob_tx) } - /// Returns true if the replacement candidate is underpriced and can't replace the existing - /// transaction. - #[inline] - fn is_underpriced( - existing_transaction: &ValidPoolTransaction, - maybe_replacement: &ValidPoolTransaction, - price_bumps: &PriceBumpConfig, - ) -> bool { - let price_bump = price_bumps.price_bump(existing_transaction.tx_type()); - - if maybe_replacement.max_fee_per_gas() <= - existing_transaction.max_fee_per_gas() * (100 + price_bump) / 100 - { - return true - } - - let existing_max_priority_fee_per_gas = - existing_transaction.transaction.max_priority_fee_per_gas().unwrap_or(0); - let replacement_max_priority_fee_per_gas = - maybe_replacement.transaction.max_priority_fee_per_gas().unwrap_or(0); - - if replacement_max_priority_fee_per_gas <= - existing_max_priority_fee_per_gas * (100 + price_bump) / 100 && - existing_max_priority_fee_per_gas != 0 && - replacement_max_priority_fee_per_gas != 0 - { - return true - } - - // check max blob fee per gas - if let Some(existing_max_blob_fee_per_gas) = - existing_transaction.transaction.max_fee_per_blob_gas() - { - // this enforces that blob txs can only be replaced by blob txs - let replacement_max_blob_fee_per_gas = - maybe_replacement.transaction.max_fee_per_blob_gas().unwrap_or(0); - if replacement_max_blob_fee_per_gas <= - existing_max_blob_fee_per_gas * (100 + price_bump) / 100 - { - return true - } - } - - false - } - /// Inserts a new _valid_ transaction into the pool. /// /// If the transaction already exists, it will be replaced if not underpriced. @@ -1595,7 +1549,7 @@ impl AllTransactions { ) -> InsertResult { assert!(on_chain_nonce <= transaction.nonce(), "Invalid transaction"); - let mut transaction = self.ensure_valid(transaction)?; + let mut transaction = self.ensure_valid(transaction, on_chain_nonce)?; let inserted_tx_id = *transaction.id(); let mut state = TxState::default(); @@ -1670,8 +1624,7 @@ impl AllTransactions { let maybe_replacement = transaction.as_ref(); // Ensure the new transaction is not underpriced - if Self::is_underpriced(existing_transaction, maybe_replacement, &self.price_bumps) - { + if existing_transaction.is_underpriced(maybe_replacement, &self.price_bumps) { return Err(InsertErr::Underpriced { transaction: pool_tx.transaction, existing: *entry.get().transaction.hash(), @@ -2634,6 +2587,7 @@ mod tests { let mut pool = AllTransactions::default(); let mut tx = MockTransaction::eip1559(); + let unblocked_tx = tx.clone(); for _ in 0..pool.max_account_slots { tx = tx.next(); pool.insert_tx(f.validated(tx.clone()), on_chain_balance, on_chain_nonce).unwrap(); @@ -2647,6 +2601,10 @@ mod tests { let err = pool.insert_tx(f.validated(tx.next()), on_chain_balance, on_chain_nonce).unwrap_err(); assert!(matches!(err, InsertErr::ExceededSenderTransactionsCapacity { .. })); + + assert!(pool + .insert_tx(f.validated(unblocked_tx), on_chain_balance, on_chain_nonce) + .is_ok()); } #[test] diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index fcfdae4ed1b3..00cda8e1cbe5 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -2,7 +2,7 @@ use crate::{ blobstore::BlobStoreError, - error::PoolResult, + error::{InvalidPoolTransactionError, PoolResult}, pool::{state::SubPool, BestTransactionFilter, TransactionEvents}, validate::ValidPoolTransaction, AllTransactionsEvents, @@ -72,7 +72,6 @@ pub trait TransactionPool: Send + Sync + Clone { /// Imports all _external_ transactions /// - /// /// Consumer: Utility fn add_external_transactions( &self, @@ -83,7 +82,7 @@ pub trait TransactionPool: Send + Sync + Clone { /// Adds an _unvalidated_ transaction into the pool and subscribe to state changes. /// - /// This is the same as [TransactionPool::add_transaction] but returns an event stream for the + /// This is the same as [`TransactionPool::add_transaction`] but returns an event stream for the /// given transaction. /// /// Consumer: Custom @@ -962,6 +961,26 @@ pub trait PoolTransaction: fmt::Debug + Send + Sync + Clone { /// Returns `chain_id` fn chain_id(&self) -> Option; + + /// Ensures that the transaction's code size does not exceed the provided `max_init_code_size`. + /// + /// This is specifically relevant for contract creation transactions ([`TxKind::Create`]), + /// where the input data contains the initialization code. If the input code size exceeds + /// the configured limit, an [`InvalidPoolTransactionError::ExceedsMaxInitCodeSize`] error is + /// returned. + fn ensure_max_init_code_size( + &self, + max_init_code_size: usize, + ) -> Result<(), InvalidPoolTransactionError> { + if self.kind().is_create() && self.input().len() > max_init_code_size { + Err(InvalidPoolTransactionError::ExceedsMaxInitCodeSize( + self.size(), + max_init_code_size, + )) + } else { + Ok(()) + } + } } /// Super trait for transactions that can be converted to and from Eth transactions @@ -1575,4 +1594,27 @@ mod tests { assert_eq!(pooled_tx.blob_sidecar, EthBlobTransactionSidecar::None); assert_eq!(pooled_tx.cost, U256::from(100) + U256::from(10 * 1000)); } + + #[test] + fn test_pooled_transaction_limit() { + // No limit should never exceed + let limit_none = GetPooledTransactionLimit::None; + // Any size should return false + assert!(!limit_none.exceeds(1000)); + + // Size limit of 2MB (2 * 1024 * 1024 bytes) + let size_limit_2mb = GetPooledTransactionLimit::ResponseSizeSoftLimit(2 * 1024 * 1024); + + // Test with size below the limit + // 1MB is below 2MB, should return false + assert!(!size_limit_2mb.exceeds(1024 * 1024)); + + // Test with size exactly at the limit + // 2MB equals the limit, should return false + assert!(!size_limit_2mb.exceeds(2 * 1024 * 1024)); + + // Test with size exceeding the limit + // 3MB is above the 2MB limit, should return true + assert!(size_limit_2mb.exceeds(3 * 1024 * 1024)); + } } diff --git a/crates/transaction-pool/src/validate/eth.rs b/crates/transaction-pool/src/validate/eth.rs index 22744c58a798..bf7749fb85c3 100644 --- a/crates/transaction-pool/src/validate/eth.rs +++ b/crates/transaction-pool/src/validate/eth.rs @@ -8,7 +8,7 @@ use crate::{ }, traits::TransactionOrigin, validate::{ValidTransaction, ValidationTask, MAX_INIT_CODE_BYTE_SIZE}, - EthBlobTransactionSidecar, EthPoolTransaction, LocalTransactionConfig, PoolTransaction, + EthBlobTransactionSidecar, EthPoolTransaction, LocalTransactionConfig, TransactionValidationOutcome, TransactionValidationTaskExecutor, TransactionValidator, }; use alloy_consensus::constants::{ @@ -223,7 +223,7 @@ where // Check whether the init code size has been exceeded. if self.fork_tracker.is_shanghai_activated() { - if let Err(err) = ensure_max_init_code_size(&transaction, MAX_INIT_CODE_BYTE_SIZE) { + if let Err(err) = transaction.ensure_max_init_code_size(MAX_INIT_CODE_BYTE_SIZE) { return TransactionValidationOutcome::Invalid(transaction, err) } } @@ -711,7 +711,7 @@ impl EthTransactionValidatorBuilder { EthTransactionValidator { inner: Arc::new(inner) } } - /// Builds a the [`EthTransactionValidator`] and spawns validation tasks via the + /// Builds a [`EthTransactionValidator`] and spawns validation tasks via the /// [`TransactionValidationTaskExecutor`] /// /// The validator will spawn `additional_tasks` additional tasks for validation. @@ -783,22 +783,6 @@ impl ForkTracker { } } -/// Ensure that the code size is not greater than `max_init_code_size`. -/// `max_init_code_size` should be configurable so this will take it as an argument. -pub fn ensure_max_init_code_size( - transaction: &T, - max_init_code_size: usize, -) -> Result<(), InvalidPoolTransactionError> { - if transaction.kind().is_create() && transaction.input().len() > max_init_code_size { - Err(InvalidPoolTransactionError::ExceedsMaxInitCodeSize( - transaction.size(), - max_init_code_size, - )) - } else { - Ok(()) - } -} - /// Ensures that gas limit of the transaction exceeds the intrinsic gas of the transaction. /// /// Caution: This only checks past the Merge hardfork. @@ -833,8 +817,8 @@ pub fn ensure_intrinsic_gas( mod tests { use super::*; use crate::{ - blobstore::InMemoryBlobStore, error::PoolErrorKind, CoinbaseTipOrdering, - EthPooledTransaction, Pool, TransactionPool, + blobstore::InMemoryBlobStore, error::PoolErrorKind, traits::PoolTransaction, + CoinbaseTipOrdering, EthPooledTransaction, Pool, TransactionPool, }; use alloy_eips::eip2718::Decodable2718; use alloy_primitives::{hex, U256}; diff --git a/crates/transaction-pool/src/validate/mod.rs b/crates/transaction-pool/src/validate/mod.rs index 4395cc97908b..4a82a1a148ff 100644 --- a/crates/transaction-pool/src/validate/mod.rs +++ b/crates/transaction-pool/src/validate/mod.rs @@ -4,6 +4,7 @@ use crate::{ error::InvalidPoolTransactionError, identifier::{SenderId, TransactionId}, traits::{PoolTransaction, TransactionOrigin}, + PriceBumpConfig, }; use alloy_primitives::{Address, TxHash, B256, U256}; use futures_util::future::Either; @@ -372,6 +373,58 @@ impl ValidPoolTransaction { pub(crate) fn tx_type_conflicts_with(&self, other: &Self) -> bool { self.is_eip4844() != other.is_eip4844() } + + /// Determines whether a candidate transaction (`maybe_replacement`) is underpriced compared to + /// an existing transaction in the pool. + /// + /// A transaction is considered underpriced if it doesn't meet the required fee bump threshold. + /// This applies to both standard gas fees and, for blob-carrying transactions (EIP-4844), + /// the blob-specific fees. + #[inline] + pub(crate) fn is_underpriced( + &self, + maybe_replacement: &Self, + price_bumps: &PriceBumpConfig, + ) -> bool { + // Retrieve the required price bump percentage for this type of transaction. + // + // The bump is different for EIP-4844 and other transactions. See `PriceBumpConfig`. + let price_bump = price_bumps.price_bump(self.tx_type()); + + // Check if the max fee per gas is underpriced. + if maybe_replacement.max_fee_per_gas() <= self.max_fee_per_gas() * (100 + price_bump) / 100 + { + return true + } + + let existing_max_priority_fee_per_gas = + self.transaction.max_priority_fee_per_gas().unwrap_or_default(); + let replacement_max_priority_fee_per_gas = + maybe_replacement.transaction.max_priority_fee_per_gas().unwrap_or_default(); + + // Check max priority fee per gas (relevant for EIP-1559 transactions only) + if existing_max_priority_fee_per_gas != 0 && + replacement_max_priority_fee_per_gas != 0 && + replacement_max_priority_fee_per_gas <= + existing_max_priority_fee_per_gas * (100 + price_bump) / 100 + { + return true + } + + // Check max blob fee per gas + if let Some(existing_max_blob_fee_per_gas) = self.transaction.max_fee_per_blob_gas() { + // This enforces that blob txs can only be replaced by blob txs + let replacement_max_blob_fee_per_gas = + maybe_replacement.transaction.max_fee_per_blob_gas().unwrap_or_default(); + if replacement_max_blob_fee_per_gas <= + existing_max_blob_fee_per_gas * (100 + price_bump) / 100 + { + return true + } + } + + false + } } impl>> ValidPoolTransaction { diff --git a/crates/transaction-pool/tests/it/evict.rs b/crates/transaction-pool/tests/it/evict.rs index c7438c9964e9..c1d0bbaa6421 100644 --- a/crates/transaction-pool/tests/it/evict.rs +++ b/crates/transaction-pool/tests/it/evict.rs @@ -1,8 +1,9 @@ //! Transaction pool eviction tests. +use alloy_eips::eip1559::ETHEREUM_BLOCK_GAS_LIMIT; use alloy_primitives::{Address, B256}; use rand::distributions::Uniform; -use reth_primitives::constants::{ETHEREUM_BLOCK_GAS_LIMIT, MIN_PROTOCOL_BASE_FEE}; +use reth_primitives::constants::MIN_PROTOCOL_BASE_FEE; use reth_transaction_pool::{ error::PoolErrorKind, test_utils::{ diff --git a/crates/trie/sparse/benches/root.rs b/crates/trie/sparse/benches/root.rs index 248e3caeeeee..bc221a8f8313 100644 --- a/crates/trie/sparse/benches/root.rs +++ b/crates/trie/sparse/benches/root.rs @@ -198,6 +198,8 @@ fn generate_test_data(size: usize) -> HashMap { .new_tree(&mut runner) .unwrap() .current() + .into_iter() + .collect() } criterion_group!(root, calculate_root_from_leaves, calculate_root_from_leaves_repeated); diff --git a/crates/trie/sparse/src/errors.rs b/crates/trie/sparse/src/errors.rs index f60d1736c06f..506b206fdd79 100644 --- a/crates/trie/sparse/src/errors.rs +++ b/crates/trie/sparse/src/errors.rs @@ -4,6 +4,8 @@ use alloy_primitives::{Bytes, B256}; use reth_trie::Nibbles; use thiserror::Error; +use crate::SparseNode; + /// Result type with [`SparseStateTrieError`] as error. pub type SparseStateTrieResult = Result; @@ -43,6 +45,14 @@ pub enum SparseTrieError { /// Node hash hash: B256, }, + /// Encountered unexpected node at path when revealing. + #[error("encountered an invalid node at path {path:?} when revealing: {node:?}")] + Reveal { + /// Path to the node. + path: Nibbles, + /// Node that was at the path when revealing. + node: Box, + }, /// RLP error. #[error(transparent)] Rlp(#[from] alloy_rlp::Error), diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index 0b9ffb5c0ed4..4d195cbf34cf 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -142,21 +142,55 @@ impl RevealedSparseTrie { stack_ptr += 1; } } - self.nodes - .insert(path, SparseNode::Branch { state_mask: branch.state_mask, hash: None }); - } - TrieNode::Extension(ext) => { - let mut child_path = path.clone(); - child_path.extend_from_slice_unchecked(&ext.key); - self.reveal_node_or_hash(child_path, &ext.child)?; - self.nodes.insert(path, SparseNode::Extension { key: ext.key, hash: None }); - } - TrieNode::Leaf(leaf) => { - let mut full = path.clone(); - full.extend_from_slice_unchecked(&leaf.key); - self.values.insert(full, leaf.value); - self.nodes.insert(path, SparseNode::new_leaf(leaf.key)); + + match self.nodes.get(&path) { + // Blinded and non-existent nodes can be replaced. + Some(SparseNode::Hash(_)) | None => { + self.nodes.insert( + path, + SparseNode::Branch { state_mask: branch.state_mask, hash: None }, + ); + } + // Branch node already exists, or an extension node was placed where a + // branch node was before. + Some(SparseNode::Branch { .. } | SparseNode::Extension { .. }) => {} + // All other node types can't be handled. + Some(node @ (SparseNode::Empty | SparseNode::Leaf { .. })) => { + return Err(SparseTrieError::Reveal { path, node: Box::new(node.clone()) }) + } + } } + TrieNode::Extension(ext) => match self.nodes.get(&path) { + Some(SparseNode::Hash(_)) | None => { + let mut child_path = path.clone(); + child_path.extend_from_slice_unchecked(&ext.key); + self.reveal_node_or_hash(child_path, &ext.child)?; + self.nodes.insert(path, SparseNode::Extension { key: ext.key, hash: None }); + } + // Extension node already exists, or an extension node was placed where a branch + // node was before. + Some(SparseNode::Extension { .. } | SparseNode::Branch { .. }) => {} + // All other node types can't be handled. + Some(node @ (SparseNode::Empty | SparseNode::Leaf { .. })) => { + return Err(SparseTrieError::Reveal { path, node: Box::new(node.clone()) }) + } + }, + TrieNode::Leaf(leaf) => match self.nodes.get(&path) { + Some(SparseNode::Hash(_)) | None => { + let mut full = path.clone(); + full.extend_from_slice_unchecked(&leaf.key); + self.values.insert(full, leaf.value); + self.nodes.insert(path, SparseNode::new_leaf(leaf.key)); + } + // Left node already exists. + Some(SparseNode::Leaf { .. }) => {} + // All other node types can't be handled. + Some( + node @ (SparseNode::Empty | + SparseNode::Extension { .. } | + SparseNode::Branch { .. }), + ) => return Err(SparseTrieError::Reveal { path, node: Box::new(node.clone()) }), + }, } Ok(()) @@ -164,8 +198,18 @@ impl RevealedSparseTrie { fn reveal_node_or_hash(&mut self, path: Nibbles, child: &[u8]) -> SparseTrieResult<()> { if child.len() == B256::len_bytes() + 1 { - // TODO: revise insert to not overwrite existing entries - self.nodes.insert(path, SparseNode::Hash(B256::from_slice(&child[1..]))); + let hash = B256::from_slice(&child[1..]); + match self.nodes.get(&path) { + // Hash node with a different hash can't be handled. + Some(node @ SparseNode::Hash(previous_hash)) if previous_hash != &hash => { + return Err(SparseTrieError::Reveal { path, node: Box::new(node.clone()) }) + } + None => { + self.nodes.insert(path, SparseNode::Hash(hash)); + } + // All other node types mean that it has already been revealed. + Some(_) => {} + } return Ok(()) } @@ -265,15 +309,19 @@ impl RevealedSparseTrie { } /// Remove leaf node from the trie. - pub fn remove_leaf(&mut self, path: Nibbles) -> SparseTrieResult<()> { + pub fn remove_leaf(&mut self, path: &Nibbles) -> SparseTrieResult<()> { self.prefix_set.insert(path.clone()); - let existing = self.values.remove(&path); - if existing.is_none() { - // trie structure unchanged, return immediately - return Ok(()) - } + self.values.remove(path); - let mut removed_nodes = self.take_nodes_for_path(&path)?; + // If the path wasn't present in `values`, we still need to walk the trie and ensure that + // there is no node at the path. When a leaf node is a blinded `Hash`, it will have an entry + // in `nodes`, but not in the `values`. + + // If the path wasn't present in `values`, we still need to walk the trie and ensure that + // there is no node at the path. When a leaf node is a blinded `Hash`, it will have an entry + // in `nodes`, but not in the `values`. + + let mut removed_nodes = self.take_nodes_for_path(path)?; debug!(target: "trie::sparse", ?path, ?removed_nodes, "Removed nodes for path"); // Pop the first node from the stack which is the leaf node we want to remove. let mut child = removed_nodes.pop().expect("leaf exists"); @@ -282,7 +330,7 @@ impl RevealedSparseTrie { let mut child_path = child.path.clone(); let SparseNode::Leaf { key, .. } = &child.node else { panic!("expected leaf node") }; child_path.extend_from_slice_unchecked(key); - assert_eq!(child_path, path); + assert_eq!(&child_path, path); } // If we don't have any other removed nodes, insert an empty node at the root. @@ -518,34 +566,46 @@ impl RevealedSparseTrie { } } - /// Update node hashes only if their path exceeds the provided level. - pub fn update_rlp_node_level(&mut self, min_len: usize) { - let mut paths = Vec::from([Nibbles::default()]); + /// Update hashes of the nodes that are located at a level deeper than or equal to the provided + /// depth. Root node has a level of 0. + pub fn update_rlp_node_level(&mut self, depth: usize) { + let targets = self.get_nodes_at_depth(depth); + let mut prefix_set = self.prefix_set.clone().freeze(); + for target in targets { + self.rlp_node(target, &mut prefix_set); + } + } + + /// Returns a list of paths to the nodes that are located at the provided depth when counting + /// from the root node. If there's a leaf at a depth less than the provided depth, it will be + /// included in the result. + fn get_nodes_at_depth(&self, depth: usize) -> HashSet { + let mut paths = Vec::from([(Nibbles::default(), 0)]); let mut targets = HashSet::::default(); - while let Some(mut path) = paths.pop() { + while let Some((mut path, level)) = paths.pop() { match self.nodes.get(&path).unwrap() { SparseNode::Empty | SparseNode::Hash(_) => {} SparseNode::Leaf { .. } => { targets.insert(path); } SparseNode::Extension { key, .. } => { - if path.len() >= min_len { + if level >= depth { targets.insert(path); } else { path.extend_from_slice_unchecked(key); - paths.push(path); + paths.push((path, level + 1)); } } SparseNode::Branch { state_mask, .. } => { - if path.len() >= min_len { + if level >= depth { targets.insert(path); } else { for bit in CHILD_INDEX_RANGE { if state_mask.is_bit_set(bit) { let mut child_path = path.clone(); child_path.push_unchecked(bit); - paths.push(child_path); + paths.push((child_path, level + 1)); } } } @@ -553,10 +613,7 @@ impl RevealedSparseTrie { } } - let mut prefix_set = self.prefix_set.clone().freeze(); - for target in targets { - self.rlp_node(target, &mut prefix_set); - } + targets } fn rlp_node(&mut self, path: Nibbles, prefix_set: &mut PrefixSet) -> RlpNode { @@ -726,7 +783,9 @@ mod tests { use super::*; use alloy_primitives::U256; + use assert_matches::assert_matches; use itertools::Itertools; + use prop::sample::SizeRange; use proptest::prelude::*; use rand::seq::IteratorRandom; use reth_testing_utils::generators; @@ -959,7 +1018,7 @@ mod tests { pretty_assertions::assert_eq!( sparse.nodes.clone().into_iter().collect::>(), BTreeMap::from_iter([ - (Nibbles::new(), SparseNode::new_ext(Nibbles::from_nibbles([0x5]))), + (Nibbles::default(), SparseNode::new_ext(Nibbles::from_nibbles([0x5]))), (Nibbles::from_nibbles([0x5]), SparseNode::new_branch(0b1101.into())), ( Nibbles::from_nibbles([0x5, 0x0]), @@ -971,11 +1030,11 @@ mod tests { ), ( Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x1]), - SparseNode::new_leaf(Nibbles::new()) + SparseNode::new_leaf(Nibbles::default()) ), ( Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x3]), - SparseNode::new_leaf(Nibbles::new()) + SparseNode::new_leaf(Nibbles::default()) ), ( Nibbles::from_nibbles([0x5, 0x2]), @@ -998,7 +1057,7 @@ mod tests { ]) ); - sparse.remove_leaf(Nibbles::from_nibbles([0x5, 0x2, 0x0, 0x1, 0x3])).unwrap(); + sparse.remove_leaf(&Nibbles::from_nibbles([0x5, 0x2, 0x0, 0x1, 0x3])).unwrap(); // Extension (Key = 5) // └── Branch (Mask = 1001) @@ -1014,7 +1073,7 @@ mod tests { pretty_assertions::assert_eq!( sparse.nodes.clone().into_iter().collect::>(), BTreeMap::from_iter([ - (Nibbles::new(), SparseNode::new_ext(Nibbles::from_nibbles([0x5]))), + (Nibbles::default(), SparseNode::new_ext(Nibbles::from_nibbles([0x5]))), (Nibbles::from_nibbles([0x5]), SparseNode::new_branch(0b1001.into())), ( Nibbles::from_nibbles([0x5, 0x0]), @@ -1026,11 +1085,11 @@ mod tests { ), ( Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x1]), - SparseNode::new_leaf(Nibbles::new()) + SparseNode::new_leaf(Nibbles::default()) ), ( Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x3]), - SparseNode::new_leaf(Nibbles::new()) + SparseNode::new_leaf(Nibbles::default()) ), (Nibbles::from_nibbles([0x5, 0x3]), SparseNode::new_branch(0b1010.into())), ( @@ -1049,7 +1108,7 @@ mod tests { ]) ); - sparse.remove_leaf(Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x1])).unwrap(); + sparse.remove_leaf(&Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x1])).unwrap(); // Extension (Key = 5) // └── Branch (Mask = 1001) @@ -1062,7 +1121,7 @@ mod tests { pretty_assertions::assert_eq!( sparse.nodes.clone().into_iter().collect::>(), BTreeMap::from_iter([ - (Nibbles::new(), SparseNode::new_ext(Nibbles::from_nibbles([0x5]))), + (Nibbles::default(), SparseNode::new_ext(Nibbles::from_nibbles([0x5]))), (Nibbles::from_nibbles([0x5]), SparseNode::new_branch(0b1001.into())), ( Nibbles::from_nibbles([0x5, 0x0]), @@ -1085,7 +1144,7 @@ mod tests { ]) ); - sparse.remove_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x1, 0x0, 0x2])).unwrap(); + sparse.remove_leaf(&Nibbles::from_nibbles([0x5, 0x3, 0x1, 0x0, 0x2])).unwrap(); // Extension (Key = 5) // └── Branch (Mask = 1001) @@ -1096,7 +1155,7 @@ mod tests { pretty_assertions::assert_eq!( sparse.nodes.clone().into_iter().collect::>(), BTreeMap::from_iter([ - (Nibbles::new(), SparseNode::new_ext(Nibbles::from_nibbles([0x5]))), + (Nibbles::default(), SparseNode::new_ext(Nibbles::from_nibbles([0x5]))), (Nibbles::from_nibbles([0x5]), SparseNode::new_branch(0b1001.into())), ( Nibbles::from_nibbles([0x5, 0x0]), @@ -1118,7 +1177,7 @@ mod tests { ]) ); - sparse.remove_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x2, 0x0])).unwrap(); + sparse.remove_leaf(&Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x2, 0x0])).unwrap(); // Extension (Key = 5) // └── Branch (Mask = 1001) @@ -1127,7 +1186,7 @@ mod tests { pretty_assertions::assert_eq!( sparse.nodes.clone().into_iter().collect::>(), BTreeMap::from_iter([ - (Nibbles::new(), SparseNode::new_ext(Nibbles::from_nibbles([0x5]))), + (Nibbles::default(), SparseNode::new_ext(Nibbles::from_nibbles([0x5]))), (Nibbles::from_nibbles([0x5]), SparseNode::new_branch(0b1001.into())), ( Nibbles::from_nibbles([0x5, 0x0]), @@ -1140,56 +1199,84 @@ mod tests { ]) ); - sparse.remove_leaf(Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x3])).unwrap(); + sparse.remove_leaf(&Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x3])).unwrap(); // Leaf (Key = 53302) pretty_assertions::assert_eq!( sparse.nodes.clone().into_iter().collect::>(), BTreeMap::from_iter([( - Nibbles::new(), + Nibbles::default(), SparseNode::new_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x0, 0x2])) ),]) ); - sparse.remove_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x0, 0x2])).unwrap(); + sparse.remove_leaf(&Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x0, 0x2])).unwrap(); // Empty pretty_assertions::assert_eq!( sparse.nodes.clone().into_iter().collect::>(), - BTreeMap::from_iter([(Nibbles::new(), SparseNode::Empty),]) + BTreeMap::from_iter([(Nibbles::default(), SparseNode::Empty)]) + ); + } + + #[test] + fn sparse_trie_remove_leaf_blinded() { + let leaf = LeafNode::new( + Nibbles::default(), + alloy_rlp::encode_fixed_size(&U256::from(1)).to_vec(), + ); + let branch = TrieNode::Branch(BranchNode::new( + vec![ + RlpNode::word_rlp(&B256::repeat_byte(1)), + RlpNode::from_raw_rlp(&alloy_rlp::encode(leaf.clone())).unwrap(), + ], + TrieMask::new(0b11), + )); + + let mut sparse = RevealedSparseTrie::from_root(branch.clone()).unwrap(); + + // Reveal a branch node and one of its children + // + // Branch (Mask = 11) + // ├── 0 -> Hash (Path = 0) + // └── 1 -> Leaf (Path = 1) + sparse.reveal_node(Nibbles::default(), branch).unwrap(); + sparse.reveal_node(Nibbles::from_nibbles([0x1]), TrieNode::Leaf(leaf)).unwrap(); + + // Removing a blinded leaf should result in an error + assert_matches!( + sparse.remove_leaf(&Nibbles::from_nibbles([0x0])), + Err(SparseTrieError::BlindedNode { path, hash }) if path == Nibbles::from_nibbles([0x0]) && hash == B256::repeat_byte(1) ); } #[test] fn sparse_trie_fuzz() { - proptest!(ProptestConfig::with_cases(10), |(updates: Vec>)| { + // Having only the first 3 nibbles set, we narrow down the range of keys + // to 4096 different hashes. It allows us to generate collisions more likely + // to test the sparse trie updates. + const KEY_NIBBLES_LEN: usize = 3; + + fn test(updates: Vec>>) { let mut rng = generators::rng(); let mut state = BTreeMap::default(); - let mut unpacked_state = BTreeMap::default(); let mut sparse = RevealedSparseTrie::default(); for update in updates { let keys_to_delete_len = update.len() / 2; - let unpacked_update = update.iter().map(|(key, value)| ( - Nibbles::unpack(key), - alloy_rlp::encode_fixed_size(value).to_vec() - )); - // Insert state updates into the sparse trie and calculate the root - for (key, value) in unpacked_update.clone() { + for (key, value) in update.clone() { sparse.update_leaf(key, value).unwrap(); } let sparse_root = sparse.root(); // Insert state updates into the hash builder and calculate the root - unpacked_state.extend(unpacked_update); state.extend(update); - let keys = state.keys().map(Nibbles::unpack).collect::>(); let (hash_builder_root, hash_builder_proof_nodes) = hash_builder_root_with_proofs( - unpacked_state.clone(), - keys, + state.clone(), + state.keys().cloned().collect::>(), ); // Assert that the sparse trie root matches the hash builder root @@ -1204,20 +1291,18 @@ mod tests { .keys() .choose_multiple(&mut rng, keys_to_delete_len) .into_iter() - .copied() + .cloned() .collect::>(); for key in keys_to_delete { state.remove(&key).unwrap(); - unpacked_state.remove(&Nibbles::unpack(key)).unwrap(); - sparse.remove_leaf(Nibbles::unpack(key)).unwrap(); + sparse.remove_leaf(&key).unwrap(); } let sparse_root = sparse.root(); - let keys = state.keys().map(Nibbles::unpack).collect::>(); let (hash_builder_root, hash_builder_proof_nodes) = hash_builder_root_with_proofs( - unpacked_state.clone(), - keys, + state.clone(), + state.keys().cloned().collect::>(), ); // Assert that the sparse trie root matches the hash builder root @@ -1225,6 +1310,290 @@ mod tests { // Assert that the sparse trie nodes match the hash builder proof nodes assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); } - }); + } + + /// Pad nibbles of length [`KEY_NIBBLES_LEN`] with zeros to the length of a B256 hash. + fn pad_nibbles(nibbles: Nibbles) -> Nibbles { + let mut base = + Nibbles::from_nibbles_unchecked([0; { B256::len_bytes() / 2 - KEY_NIBBLES_LEN }]); + base.extend_from_slice_unchecked(&nibbles); + base + } + + proptest!(ProptestConfig::with_cases(10), |( + updates in proptest::collection::vec( + proptest::collection::hash_map( + any_with::(SizeRange::new(KEY_NIBBLES_LEN..=KEY_NIBBLES_LEN)).prop_map(pad_nibbles), + any::>(), + 1..100, + ), + 1..100, + ) + )| { test(updates.into_iter().collect()) }); + } + + /// We have three leaves that share the same prefix: 0x00, 0x01 and 0x02. Hash builder trie has + /// only nodes 0x00 and 0x01, and we have proofs for them. Node B is new and inserted in the + /// sparse trie first. + /// + /// 1. Reveal the hash builder proof to leaf 0x00 in the sparse trie. + /// 2. Insert leaf 0x01 into the sparse trie. + /// 3. Reveal the hash builder proof to leaf 0x02 in the sparse trie. + /// + /// The hash builder proof to the leaf 0x02 didn't have the leaf 0x01 at the corresponding + /// nibble of the branch node, so we need to adjust the branch node instead of fully + /// replacing it. + #[test] + fn sparse_trie_reveal_node_1() { + let key1 = || Nibbles::from_nibbles_unchecked([0x00]); + let key2 = || Nibbles::from_nibbles_unchecked([0x01]); + let key3 = || Nibbles::from_nibbles_unchecked([0x02]); + let value = || alloy_rlp::encode_fixed_size(&B256::repeat_byte(1)); + + // Generate the proof for the root node and initialize the sparse trie with it + let (_, proof_nodes) = hash_builder_root_with_proofs( + [(key1(), value()), (key3(), value())], + [Nibbles::default()], + ); + let mut sparse = RevealedSparseTrie::from_root( + TrieNode::decode(&mut &proof_nodes.nodes_sorted()[0].1[..]).unwrap(), + ) + .unwrap(); + + // Generate the proof for the first key and reveal it in the sparse trie + let (_, proof_nodes) = + hash_builder_root_with_proofs([(key1(), value()), (key3(), value())], [key1()]); + for (path, node) in proof_nodes.nodes_sorted() { + sparse.reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap()).unwrap(); + } + + // Check that the branch node exists with only two nibbles set + assert_eq!( + sparse.nodes.get(&Nibbles::default()), + Some(&SparseNode::new_branch(0b101.into())) + ); + + // Insert the leaf for the second key + sparse.update_leaf(key2(), value().to_vec()).unwrap(); + + // Check that the branch node was updated and another nibble was set + assert_eq!( + sparse.nodes.get(&Nibbles::default()), + Some(&SparseNode::new_branch(0b111.into())) + ); + + // Generate the proof for the third key and reveal it in the sparse trie + let (_, proof_nodes_3) = + hash_builder_root_with_proofs([(key1(), value()), (key3(), value())], [key3()]); + for (path, node) in proof_nodes_3.nodes_sorted() { + sparse.reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap()).unwrap(); + } + + // Check that nothing changed in the branch node + assert_eq!( + sparse.nodes.get(&Nibbles::default()), + Some(&SparseNode::new_branch(0b111.into())) + ); + + // Generate the nodes for the full trie with all three key using the hash builder, and + // compare them to the sparse trie + let (_, proof_nodes) = hash_builder_root_with_proofs( + [(key1(), value()), (key2(), value()), (key3(), value())], + [key1(), key2(), key3()], + ); + + assert_eq_sparse_trie_proof_nodes(&sparse, proof_nodes); + } + + /// We have three leaves: 0x0000, 0x0101, and 0x0102. Hash builder trie has all nodes, and we + /// have proofs for them. + /// + /// 1. Reveal the hash builder proof to leaf 0x00 in the sparse trie. + /// 2. Remove leaf 0x00 from the sparse trie (that will remove the branch node and create an + /// extension node with the key 0x0000). + /// 3. Reveal the hash builder proof to leaf 0x0101 in the sparse trie. + /// + /// The hash builder proof to the leaf 0x0101 had a branch node in the path, but we turned it + /// into an extension node, so it should ignore this node. + #[test] + fn sparse_trie_reveal_node_2() { + let key1 = || Nibbles::from_nibbles_unchecked([0x00, 0x00]); + let key2 = || Nibbles::from_nibbles_unchecked([0x01, 0x01]); + let key3 = || Nibbles::from_nibbles_unchecked([0x01, 0x02]); + let value = || alloy_rlp::encode_fixed_size(&B256::repeat_byte(1)); + + // Generate the proof for the root node and initialize the sparse trie with it + let (_, proof_nodes) = hash_builder_root_with_proofs( + [(key1(), value()), (key2(), value()), (key3(), value())], + [Nibbles::default()], + ); + let mut sparse = RevealedSparseTrie::from_root( + TrieNode::decode(&mut &proof_nodes.nodes_sorted()[0].1[..]).unwrap(), + ) + .unwrap(); + + // Generate the proof for the children of the root branch node and reveal it in the sparse + // trie + let (_, proof_nodes) = hash_builder_root_with_proofs( + [(key1(), value()), (key2(), value()), (key3(), value())], + [key1(), Nibbles::from_nibbles_unchecked([0x01])], + ); + for (path, node) in proof_nodes.nodes_sorted() { + sparse.reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap()).unwrap(); + } + + // Check that the branch node exists + assert_eq!( + sparse.nodes.get(&Nibbles::default()), + Some(&SparseNode::new_branch(0b11.into())) + ); + + // Remove the leaf for the first key + sparse.remove_leaf(&key1()).unwrap(); + + // Check that the branch node was turned into an extension node + assert_eq!( + sparse.nodes.get(&Nibbles::default()), + Some(&SparseNode::new_ext(Nibbles::from_nibbles_unchecked([0x01]))) + ); + + // Generate the proof for the third key and reveal it in the sparse trie + let (_, proof_nodes) = hash_builder_root_with_proofs( + [(key1(), value()), (key2(), value()), (key3(), value())], + [key2()], + ); + for (path, node) in proof_nodes.nodes_sorted() { + sparse.reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap()).unwrap(); + } + + // Check that nothing changed in the extension node + assert_eq!( + sparse.nodes.get(&Nibbles::default()), + Some(&SparseNode::new_ext(Nibbles::from_nibbles_unchecked([0x01]))) + ); + } + + /// We have two leaves that share the same prefix: 0x0001 and 0x0002, and a leaf with a + /// different prefix: 0x0100. Hash builder trie has only the first two leaves, and we have + /// proofs for them. + /// + /// 1. Insert the leaf 0x0100 into the sparse trie, and check that the root extensino node was + /// turned into a branch node. + /// 2. Reveal the leaf 0x0001 in the sparse trie, and check that the root branch node wasn't + /// overwritten with the extension node from the proof. + #[test] + fn sparse_trie_reveal_node_3() { + let key1 = || Nibbles::from_nibbles_unchecked([0x00, 0x01]); + let key2 = || Nibbles::from_nibbles_unchecked([0x00, 0x02]); + let key3 = || Nibbles::from_nibbles_unchecked([0x01, 0x00]); + let value = || alloy_rlp::encode_fixed_size(&B256::repeat_byte(1)); + + // Generate the proof for the root node and initialize the sparse trie with it + let (_, proof_nodes) = hash_builder_root_with_proofs( + [(key1(), value()), (key2(), value())], + [Nibbles::default()], + ); + let mut sparse = RevealedSparseTrie::from_root( + TrieNode::decode(&mut &proof_nodes.nodes_sorted()[0].1[..]).unwrap(), + ) + .unwrap(); + + // Check that the root extension node exists + assert_matches!( + sparse.nodes.get(&Nibbles::default()), + Some(SparseNode::Extension { key, hash: None }) if *key == Nibbles::from_nibbles([0x00]) + ); + + // Insert the leaf with a different prefix + sparse.update_leaf(key3(), value().to_vec()).unwrap(); + + // Check that the extension node was turned into a branch node + assert_matches!( + sparse.nodes.get(&Nibbles::default()), + Some(SparseNode::Branch { state_mask, hash: None }) if *state_mask == TrieMask::new(0b11) + ); + + // Generate the proof for the first key and reveal it in the sparse trie + let (_, proof_nodes) = + hash_builder_root_with_proofs([(key1(), value()), (key2(), value())], [key1()]); + for (path, node) in proof_nodes.nodes_sorted() { + sparse.reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap()).unwrap(); + } + + // Check that the branch node wasn't overwritten by the extension node in the proof + assert_matches!( + sparse.nodes.get(&Nibbles::default()), + Some(SparseNode::Branch { state_mask, hash: None }) if *state_mask == TrieMask::new(0b11) + ); + } + + #[test] + fn sparse_trie_get_nodes_at_depth() { + let mut sparse = RevealedSparseTrie::default(); + + let value = alloy_rlp::encode_fixed_size(&U256::ZERO).to_vec(); + + // Extension (Key = 5) – Level 0 + // └── Branch (Mask = 1011) – Level 1 + // ├── 0 -> Extension (Key = 23) – Level 2 + // │ └── Branch (Mask = 0101) – Level 3 + // │ ├── 1 -> Leaf (Key = 1, Path = 50231) – Level 4 + // │ └── 3 -> Leaf (Key = 3, Path = 50233) – Level 4 + // ├── 2 -> Leaf (Key = 013, Path = 52013) – Level 2 + // └── 3 -> Branch (Mask = 0101) – Level 2 + // ├── 1 -> Leaf (Key = 3102, Path = 53102) – Level 3 + // └── 3 -> Branch (Mask = 1010) – Level 3 + // ├── 0 -> Leaf (Key = 3302, Path = 53302) – Level 4 + // └── 2 -> Leaf (Key = 3320, Path = 53320) – Level 4 + sparse + .update_leaf(Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x1]), value.clone()) + .unwrap(); + sparse + .update_leaf(Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x3]), value.clone()) + .unwrap(); + sparse + .update_leaf(Nibbles::from_nibbles([0x5, 0x2, 0x0, 0x1, 0x3]), value.clone()) + .unwrap(); + sparse + .update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x1, 0x0, 0x2]), value.clone()) + .unwrap(); + sparse + .update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x0, 0x2]), value.clone()) + .unwrap(); + sparse.update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x2, 0x0]), value).unwrap(); + + assert_eq!(sparse.get_nodes_at_depth(0), HashSet::from([Nibbles::default()])); + assert_eq!( + sparse.get_nodes_at_depth(1), + HashSet::from([Nibbles::from_nibbles_unchecked([0x5])]) + ); + assert_eq!( + sparse.get_nodes_at_depth(2), + HashSet::from([ + Nibbles::from_nibbles_unchecked([0x5, 0x0]), + Nibbles::from_nibbles_unchecked([0x5, 0x2]), + Nibbles::from_nibbles_unchecked([0x5, 0x3]) + ]) + ); + assert_eq!( + sparse.get_nodes_at_depth(3), + HashSet::from([ + Nibbles::from_nibbles_unchecked([0x5, 0x0, 0x2, 0x3]), + Nibbles::from_nibbles_unchecked([0x5, 0x2]), + Nibbles::from_nibbles_unchecked([0x5, 0x3, 0x1]), + Nibbles::from_nibbles_unchecked([0x5, 0x3, 0x3]) + ]) + ); + assert_eq!( + sparse.get_nodes_at_depth(4), + HashSet::from([ + Nibbles::from_nibbles_unchecked([0x5, 0x0, 0x2, 0x3, 0x1]), + Nibbles::from_nibbles_unchecked([0x5, 0x0, 0x2, 0x3, 0x3]), + Nibbles::from_nibbles_unchecked([0x5, 0x2]), + Nibbles::from_nibbles_unchecked([0x5, 0x3, 0x1]), + Nibbles::from_nibbles_unchecked([0x5, 0x3, 0x3, 0x0]), + Nibbles::from_nibbles_unchecked([0x5, 0x3, 0x3, 0x2]) + ]) + ); } } diff --git a/crates/trie/trie/src/prefix_set.rs b/crates/trie/trie/src/prefix_set.rs index da912fbbdad9..0cf16f939d7b 100644 --- a/crates/trie/trie/src/prefix_set.rs +++ b/crates/trie/trie/src/prefix_set.rs @@ -168,8 +168,7 @@ pub struct PrefixSet { } impl PrefixSet { - /// Returns `true` if any of the keys in the set has the given prefix or - /// if the given prefix is a prefix of any key in the set. + /// Returns `true` if any of the keys in the set has the given prefix #[inline] pub fn contains(&mut self, prefix: &[u8]) -> bool { if self.all { diff --git a/etc/README.md b/etc/README.md index 28c71b04688b..fb36d15b89e1 100644 --- a/etc/README.md +++ b/etc/README.md @@ -68,6 +68,7 @@ To set up a new metric in Reth and its Grafana dashboard: - Click `Save to file` 9. Update dashboard file: + - Replace the content of the corresponding file in the [dashboards folder](./grafana/dashboards) with the exported JSON @@ -86,4 +87,4 @@ In order to import new Grafana dashboards or update a dashboard: 4. If updating an existing dashboard, you will need to change the name and UID of the imported dashboard in order to avoid conflict -5. Delete the old dashboard +5. Delete the old dashboard \ No newline at end of file diff --git a/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs b/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs index cc761aa98a61..1c53e4f41051 100644 --- a/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs +++ b/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs @@ -109,6 +109,7 @@ where match self.pool.get_all_blobs_exact(txs.iter().map(|(tx, _)| tx.hash()).collect()) { Ok(blobs) => { + actions_to_queue.reserve_exact(txs.len()); for ((tx, _), sidecar) in txs.iter().zip(blobs.iter()) { let transaction = BlobTransaction::try_from_signed(tx.clone(), sidecar.clone()) .expect("should not fail to convert blob tx if it is already eip4844"); diff --git a/examples/custom-beacon-withdrawals/Cargo.toml b/examples/custom-beacon-withdrawals/Cargo.toml new file mode 100644 index 000000000000..c396ca11df8b --- /dev/null +++ b/examples/custom-beacon-withdrawals/Cargo.toml @@ -0,0 +1,26 @@ +[package] +name = "example-custom-beacon-withdrawals" +version = "0.0.0" +publish = false +edition.workspace = true +license.workspace = true + +[dependencies] +reth.workspace = true +reth-node-ethereum.workspace = true +reth-evm-ethereum.workspace = true +reth-chainspec.workspace = true +reth-evm.workspace = true +reth-primitives.workspace = true + +alloy-sol-macro = "0.8.9" +alloy-sol-types.workspace = true +alloy-eips.workspace = true +alloy-consensus.workspace = true + +eyre.workspace = true + +[features] +optimism = [ + "reth-primitives/optimism" +] \ No newline at end of file diff --git a/examples/custom-beacon-withdrawals/src/main.rs b/examples/custom-beacon-withdrawals/src/main.rs new file mode 100644 index 000000000000..09dad2f7007d --- /dev/null +++ b/examples/custom-beacon-withdrawals/src/main.rs @@ -0,0 +1,286 @@ +//! Example for how to modify a block post-execution step. It credits beacon withdrawals with a +//! custom mechanism instead of minting native tokens + +#![cfg_attr(not(test), warn(unused_crate_dependencies))] + +use alloy_eips::eip7685::Requests; +use alloy_sol_macro::sol; +use alloy_sol_types::SolCall; +#[cfg(feature = "optimism")] +use reth::revm::primitives::OptimismFields; +use reth::{ + api::{ConfigureEvm, ConfigureEvmEnv, NodeTypesWithEngine}, + builder::{components::ExecutorBuilder, BuilderContext, FullNodeTypes}, + cli::Cli, + providers::ProviderError, + revm::{ + interpreter::Host, + primitives::{Env, TransactTo, TxEnv}, + Database, DatabaseCommit, Evm, State, + }, +}; +use reth_chainspec::{ChainSpec, EthereumHardforks}; +use reth_evm::execute::{ + BlockExecutionError, BlockExecutionStrategy, BlockExecutionStrategyFactory, ExecuteOutput, + InternalBlockExecutionError, +}; +use reth_evm_ethereum::EthEvmConfig; +use reth_node_ethereum::{node::EthereumAddOns, BasicBlockExecutorProvider, EthereumNode}; +use reth_primitives::{ + revm_primitives::{ + address, Address, BlockEnv, Bytes, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, U256, + }, + BlockWithSenders, Receipt, Withdrawal, +}; +use std::{fmt::Display, sync::Arc}; + +pub const SYSTEM_ADDRESS: Address = address!("fffffffffffffffffffffffffffffffffffffffe"); +pub const WITHDRAWALS_ADDRESS: Address = address!("4200000000000000000000000000000000000000"); + +fn main() { + Cli::parse_args() + .run(|builder, _| async move { + let handle = builder + // use the default ethereum node types + .with_types::() + // Configure the components of the node + // use default ethereum components but use our custom pool + .with_components( + EthereumNode::components().executor(CustomExecutorBuilder::default()), + ) + .with_add_ons(EthereumAddOns::default()) + .launch() + .await?; + + handle.wait_for_node_exit().await + }) + .unwrap(); +} + +/// A custom executor builder +#[derive(Debug, Default, Clone, Copy)] +#[non_exhaustive] +pub struct CustomExecutorBuilder; + +impl ExecutorBuilder for CustomExecutorBuilder +where + Types: NodeTypesWithEngine, + Node: FullNodeTypes, +{ + type EVM = EthEvmConfig; + type Executor = BasicBlockExecutorProvider; + + async fn build_evm( + self, + ctx: &BuilderContext, + ) -> eyre::Result<(Self::EVM, Self::Executor)> { + let chain_spec = ctx.chain_spec(); + let evm_config = EthEvmConfig::new(ctx.chain_spec()); + let strategy_factory = + CustomExecutorStrategyFactory { chain_spec, evm_config: evm_config.clone() }; + let executor = BasicBlockExecutorProvider::new(strategy_factory); + + Ok((evm_config, executor)) + } +} + +#[derive(Clone)] +pub struct CustomExecutorStrategyFactory { + /// The chainspec + chain_spec: Arc, + /// How to create an EVM. + evm_config: EthEvmConfig, +} + +impl BlockExecutionStrategyFactory for CustomExecutorStrategyFactory { + type Strategy + Display>> = CustomExecutorStrategy; + + fn create_strategy(&self, db: DB) -> Self::Strategy + where + DB: Database + Display>, + { + let state = + State::builder().with_database(db).with_bundle_update().without_state_clear().build(); + CustomExecutorStrategy { + state, + chain_spec: self.chain_spec.clone(), + evm_config: self.evm_config.clone(), + } + } +} + +pub struct CustomExecutorStrategy +where + DB: Database + Display>, +{ + /// The chainspec + chain_spec: Arc, + /// How to create an EVM. + evm_config: EthEvmConfig, + /// Current state for block execution. + state: State, +} + +impl CustomExecutorStrategy +where + DB: Database + Display>, +{ + /// Configures a new evm configuration and block environment for the given block. + /// + /// # Caution + /// + /// This does not initialize the tx environment. + fn evm_env_for_block( + &self, + header: &alloy_consensus::Header, + total_difficulty: U256, + ) -> EnvWithHandlerCfg { + let mut cfg = CfgEnvWithHandlerCfg::new(Default::default(), Default::default()); + let mut block_env = BlockEnv::default(); + self.evm_config.fill_cfg_and_block_env(&mut cfg, &mut block_env, header, total_difficulty); + + EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, Default::default()) + } +} + +impl BlockExecutionStrategy for CustomExecutorStrategy +where + DB: Database + Display>, +{ + type Error = BlockExecutionError; + + fn apply_pre_execution_changes( + &mut self, + block: &BlockWithSenders, + _total_difficulty: U256, + ) -> Result<(), Self::Error> { + // Set state clear flag if the block is after the Spurious Dragon hardfork. + let state_clear_flag = + (*self.chain_spec).is_spurious_dragon_active_at_block(block.header.number); + self.state.set_state_clear_flag(state_clear_flag); + + Ok(()) + } + + fn execute_transactions( + &mut self, + _block: &BlockWithSenders, + _total_difficulty: U256, + ) -> Result { + Ok(ExecuteOutput { receipts: vec![], gas_used: 0 }) + } + + fn apply_post_execution_changes( + &mut self, + block: &BlockWithSenders, + total_difficulty: U256, + _receipts: &[Receipt], + ) -> Result { + let env = self.evm_env_for_block(&block.header, total_difficulty); + let mut evm = self.evm_config.evm_with_env(&mut self.state, env); + + if let Some(withdrawals) = block.body.withdrawals.as_ref() { + apply_withdrawals_contract_call(withdrawals, &mut evm)?; + } + + Ok(Requests::default()) + } + + fn state_ref(&self) -> &State { + &self.state + } + + fn state_mut(&mut self) -> &mut State { + &mut self.state + } +} + +sol!( + function withdrawals( + uint64[] calldata amounts, + address[] calldata addresses + ); +); + +/// Applies the post-block call to the withdrawal / deposit contract, using the given block, +/// [`ChainSpec`], EVM. +pub fn apply_withdrawals_contract_call( + withdrawals: &[Withdrawal], + evm: &mut Evm<'_, EXT, DB>, +) -> Result<(), BlockExecutionError> +where + DB::Error: std::fmt::Display, +{ + // get previous env + let previous_env = Box::new(evm.context.env().clone()); + + // modify env for pre block call + fill_tx_env_with_system_contract_call( + &mut evm.context.evm.env, + SYSTEM_ADDRESS, + WITHDRAWALS_ADDRESS, + withdrawalsCall { + amounts: withdrawals.iter().map(|w| w.amount).collect::>(), + addresses: withdrawals.iter().map(|w| w.address).collect::>(), + } + .abi_encode() + .into(), + ); + + let mut state = match evm.transact() { + Ok(res) => res.state, + Err(e) => { + evm.context.evm.env = previous_env; + return Err(BlockExecutionError::Internal(InternalBlockExecutionError::Other( + format!("withdrawal contract system call revert: {}", e).into(), + ))) + } + }; + + // Clean-up post system tx context + state.remove(&SYSTEM_ADDRESS); + state.remove(&evm.block().coinbase); + evm.context.evm.db.commit(state); + // re-set the previous env + evm.context.evm.env = previous_env; + + Ok(()) +} + +fn fill_tx_env_with_system_contract_call( + env: &mut Env, + caller: Address, + contract: Address, + data: Bytes, +) { + env.tx = TxEnv { + caller, + transact_to: TransactTo::Call(contract), + // Explicitly set nonce to None so revm does not do any nonce checks + nonce: None, + gas_limit: 30_000_000, + value: U256::ZERO, + data, + // Setting the gas price to zero enforces that no value is transferred as part of the call, + // and that the call will not count against the block's gas limit + gas_price: U256::ZERO, + // The chain ID check is not relevant here and is disabled if set to None + chain_id: None, + // Setting the gas priority fee to None ensures the effective gas price is derived from the + // `gas_price` field, which we need to be zero + gas_priority_fee: None, + access_list: Vec::new(), + // blob fields can be None for this tx + blob_hashes: Vec::new(), + max_fee_per_blob_gas: None, + authorization_list: None, + #[cfg(feature = "optimism")] + optimism: OptimismFields::default(), + }; + + // ensure the block gas limit is >= the tx + env.block.gas_limit = U256::from(env.tx.gas_limit); + + // disable the base fee check for this call by setting the base fee to zero + env.block.basefee = U256::ZERO; +}