diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 1596d90b30f..a0558be60fe 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,19 +1,19 @@ * @gakonst bin/ @onbjerg -crates/blockchain-tree/ @rakita @rkrasiuk @mattsse @Rjected crates/blockchain-tree-api/ @rakita @rkrasiuk @mattsse @Rjected -crates/chainspec/ @Rjected @joshieDo @mattsse +crates/blockchain-tree/ @rakita @rkrasiuk @mattsse @Rjected crates/chain-state/ @fgimenez @mattsse @rkrasiuk +crates/chainspec/ @Rjected @joshieDo @mattsse crates/cli/ @onbjerg @mattsse crates/config/ @onbjerg crates/consensus/ @rkrasiuk @mattsse @Rjected -crates/engine @rkrasiuk @mattsse @Rjected crates/e2e-test-utils/ @mattsse @Rjected +crates/engine @rkrasiuk @mattsse @Rjected crates/engine/ @rkrasiuk @mattsse @Rjected @fgimenez +crates/era/ @mattsse @RomanHodulak crates/errors/ @mattsse -crates/era/ @mattsse -crates/ethereum/ @mattsse @Rjected crates/ethereum-forks/ @mattsse @Rjected +crates/ethereum/ @mattsse @Rjected crates/etl/ @joshieDo @shekhirin crates/evm/ @rakita @mattsse @Rjected crates/exex/ @onbjerg @shekhirin @@ -24,17 +24,18 @@ crates/net/downloaders/ @onbjerg @rkrasiuk crates/node/ @mattsse @Rjected @onbjerg @klkvr crates/optimism/ @mattsse @Rjected @fgimenez crates/payload/ @mattsse @Rjected +crates/primitives-traits/ @Rjected @RomanHodulak @mattsse @klkvr crates/primitives/ @Rjected @mattsse @klkvr -crates/primitives-traits/ @Rjected @joshieDo @mattsse @klkvr crates/prune/ @shekhirin @joshieDo +crates/ress @rkrasiuk crates/revm/ @mattsse @rakita -crates/rpc/ @mattsse @Rjected +crates/rpc/ @mattsse @Rjected @RomanHodulak crates/stages/ @onbjerg @rkrasiuk @shekhirin crates/static-file/ @joshieDo @shekhirin crates/storage/codecs/ @joshieDo -crates/storage/db/ @joshieDo @rakita crates/storage/db-api/ @joshieDo @rakita crates/storage/db-common/ @Rjected @onbjerg +crates/storage/db/ @joshieDo @rakita crates/storage/errors/ @rakita @onbjerg crates/storage/libmdbx-rs/ @rakita @shekhirin crates/storage/nippy-jar/ @joshieDo @shekhirin @@ -44,7 +45,6 @@ crates/tasks/ @mattsse crates/tokio-util/ @fgimenez crates/tracing/ @onbjerg crates/transaction-pool/ @mattsse -crates/trie/ @rkrasiuk @Rjected @shekhirin -crates/ress @rkrasiuk +crates/trie/ @rkrasiuk @Rjected @shekhirin @mediocregopher etc/ @Rjected @onbjerg @shekhirin .github/ @onbjerg @gakonst @DaniPopes diff --git a/.github/assets/check_wasm.sh b/.github/assets/check_wasm.sh index e759a549283..1a94c0d4c33 100755 --- a/.github/assets/check_wasm.sh +++ b/.github/assets/check_wasm.sh @@ -58,6 +58,7 @@ exclude_crates=( reth-ress-provider # The following are not supposed to be working reth # all of the crates below + reth-alloy-provider reth-invalid-block-hooks # reth-provider reth-libmdbx # mdbx reth-mdbx-sys # mdbx @@ -69,6 +70,7 @@ exclude_crates=( reth-transaction-pool # c-kzg reth-payload-util # reth-transaction-pool reth-trie-parallel # tokio + reth-trie-sparse-parallel # rayon reth-testing-utils reth-optimism-txpool # reth-transaction-pool reth-era-downloader # tokio diff --git a/.github/assets/hive/expected_failures.yaml b/.github/assets/hive/expected_failures.yaml index c6444017ba2..f155a3478c6 100644 --- a/.github/assets/hive/expected_failures.yaml +++ b/.github/assets/hive/expected_failures.yaml @@ -16,6 +16,10 @@ rpc-compat: - eth_getTransactionReceipt/get-legacy-input (reth) - eth_getTransactionReceipt/get-legacy-receipt (reth) + # after https://github.com/paradigmxyz/reth/pull/16742 we start the node in + # syncing mode, the test expects syncing to be false on start + - eth_syncing/check-syncing (reth) + # no fix due to https://github.com/paradigmxyz/reth/issues/8732 engine-withdrawals: - Withdrawals Fork On Genesis (Paris) (reth) @@ -36,6 +40,9 @@ engine-api: [] # no fix due to https://github.com/paradigmxyz/reth/issues/8732 engine-cancun: - Invalid PayloadAttributes, Missing BeaconRoot, Syncing=True (Cancun) (reth) + # the test fails with older verions of the code for which it passed before, probably related to changes + # in hive or its dependencies + - Blob Transaction Ordering, Multiple Clients (Cancun) (reth) sync: [] diff --git a/.github/workflows/book.yml b/.github/workflows/book.yml index ac6650624c3..ac21b08526b 100644 --- a/.github/workflows/book.yml +++ b/.github/workflows/book.yml @@ -7,115 +7,51 @@ on: branches: [main, scroll] pull_request: branches: [main, scroll] + types: [opened, reopened, synchronize, closed] merge_group: jobs: - test: - runs-on: ubuntu-latest - name: test - timeout-minutes: 60 - - steps: - - uses: actions/checkout@v4 - - - name: Install mdbook - run: | - mkdir mdbook - curl -sSL https://github.com/rust-lang/mdBook/releases/download/v0.4.14/mdbook-v0.4.14-x86_64-unknown-linux-gnu.tar.gz | tar -xz --directory=./mdbook - echo $(pwd)/mdbook >> $GITHUB_PATH - - - name: Install mdbook-template - run: | - mkdir mdbook-template - curl -sSL https://github.com/sgoudham/mdbook-template/releases/latest/download/mdbook-template-x86_64-unknown-linux-gnu.tar.gz | tar -xz --directory=./mdbook-template - echo $(pwd)/mdbook-template >> $GITHUB_PATH - - - name: Run tests - run: mdbook test - - lint: - runs-on: ubuntu-latest - name: lint - timeout-minutes: 60 - - steps: - - uses: actions/checkout@v4 - - - name: Install mdbook-linkcheck - run: | - mkdir mdbook-linkcheck - curl -sSL -o mdbook-linkcheck.zip https://github.com/Michael-F-Bryan/mdbook-linkcheck/releases/latest/download/mdbook-linkcheck.x86_64-unknown-linux-gnu.zip - unzip mdbook-linkcheck.zip -d ./mdbook-linkcheck - chmod +x $(pwd)/mdbook-linkcheck/mdbook-linkcheck - echo $(pwd)/mdbook-linkcheck >> $GITHUB_PATH - - - name: Run linkcheck - run: mdbook-linkcheck --standalone - build: runs-on: ubuntu-latest timeout-minutes: 60 steps: - - uses: actions/checkout@v4 - - uses: rui314/setup-mold@v1 - - uses: dtolnay/rust-toolchain@nightly - - name: Install mdbook - run: | - mkdir mdbook - curl -sSL https://github.com/rust-lang/mdBook/releases/download/v0.4.14/mdbook-v0.4.14-x86_64-unknown-linux-gnu.tar.gz | tar -xz --directory=./mdbook - echo $(pwd)/mdbook >> $GITHUB_PATH + - name: Checkout + uses: actions/checkout@v4 - - name: Install mdbook-template - run: | - mkdir mdbook-template - curl -sSL https://github.com/sgoudham/mdbook-template/releases/latest/download/mdbook-template-x86_64-unknown-linux-gnu.tar.gz | tar -xz --directory=./mdbook-template - echo $(pwd)/mdbook-template >> $GITHUB_PATH + - name: Install bun + uses: oven-sh/setup-bun@v2 - - uses: Swatinem/rust-cache@v2 - with: - cache-on-failure: true + - name: Install Playwright browsers + # Required for rehype-mermaid to render Mermaid diagrams during build + run: | + cd docs/vocs/ + bun i + npx playwright install --with-deps chromium - - name: Build book - run: mdbook build + - name: Install Rust nightly + uses: dtolnay/rust-toolchain@nightly - name: Build docs - run: cargo docs --exclude "example-*" - env: - # Keep in sync with ./ci.yml:jobs.docs - RUSTDOCFLAGS: --cfg docsrs --show-type-layout --generate-link-to-definition --enable-index-page -Zunstable-options + run: cd docs/vocs && bash scripts/build-cargo-docs.sh - - name: Move docs to book folder + - name: Build Vocs run: | - mv target/doc target/book/docs + cd docs/vocs/ && bun run build + echo "Vocs Build Complete" - - name: Archive artifact - shell: sh - run: | - chmod -c -R +rX "target/book" | - while read line; do - echo "::warning title=Invalid file permissions automatically fixed::$line" - done - tar \ - --dereference --hard-dereference \ - --directory "target/book" \ - -cvf "$RUNNER_TEMP/artifact.tar" \ - --exclude=.git \ - --exclude=.github \ - . + - name: Setup Pages + uses: actions/configure-pages@v5 - name: Upload artifact - uses: actions/upload-artifact@v4 + uses: actions/upload-pages-artifact@v3 with: - name: github-pages - path: ${{ runner.temp }}/artifact.tar - retention-days: 1 - if-no-files-found: error + path: "./docs/vocs/docs/dist" deploy: # Only deploy if a push to main if: github.ref_name == 'main' && github.event_name == 'push' runs-on: ubuntu-latest - needs: [test, lint, build] + needs: [build] # Grant GITHUB_TOKEN the permissions required to make a Pages deployment permissions: diff --git a/.github/workflows/hive.yml b/.github/workflows/hive.yml index 7bab615fbc1..af8c5d5c2a1 100644 --- a/.github/workflows/hive.yml +++ b/.github/workflows/hive.yml @@ -31,7 +31,8 @@ jobs: uses: actions/checkout@v4 with: repository: ethereum/hive - ref: master + # TODO: unpin when https://github.com/ethereum/hive/issues/1306 is fixed + ref: edd9969338dd1798ba2e61f049c7e3a15cef53e6 path: hivetests - uses: actions/setup-go@v5 diff --git a/.github/workflows/integration.yml b/.github/workflows/integration.yml index 3e9a8139dc7..b94701c8592 100644 --- a/.github/workflows/integration.yml +++ b/.github/workflows/integration.yml @@ -66,12 +66,12 @@ jobs: if: github.event_name == 'schedule' runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 - - uses: rui314/setup-mold@v1 - - uses: dtolnay/rust-toolchain@stable - - uses: taiki-e/install-action@nextest - - uses: Swatinem/rust-cache@v2 - with: - cache-on-failure: true - - name: run era1 files integration tests - run: cargo nextest run --package reth-era --test it -- --ignored + - uses: actions/checkout@v4 + - uses: rui314/setup-mold@v1 + - uses: dtolnay/rust-toolchain@stable + - uses: taiki-e/install-action@nextest + - uses: Swatinem/rust-cache@v2 + with: + cache-on-failure: true + - name: run era1 files integration tests + run: cargo nextest run --package reth-era --test it -- --ignored diff --git a/.github/workflows/kurtosis-op.yml b/.github/workflows/kurtosis-op.yml index 5fb26acefa9..e77d5528feb 100644 --- a/.github/workflows/kurtosis-op.yml +++ b/.github/workflows/kurtosis-op.yml @@ -62,7 +62,9 @@ jobs: sudo apt update sudo apt install kurtosis-cli kurtosis engine start - kurtosis run --enclave op-devnet github.com/ethpandaops/optimism-package --args-file .github/assets/kurtosis_op_network_params.yaml + # TODO: unpin optimism-package when https://github.com/ethpandaops/optimism-package/issues/340 is fixed + # kurtosis run --enclave op-devnet github.com/ethpandaops/optimism-package --args-file .github/assets/kurtosis_op_network_params.yaml + kurtosis run --enclave op-devnet github.com/ethpandaops/optimism-package@452133367b693e3ba22214a6615c86c60a1efd5e --args-file .github/assets/kurtosis_op_network_params.yaml ENCLAVE_ID=$(curl http://127.0.0.1:9779/api/enclaves | jq --raw-output 'keys[0]') GETH_PORT=$(curl "http://127.0.0.1:9779/api/enclaves/$ENCLAVE_ID/services" | jq '."op-el-2151908-1-op-geth-op-node-op-kurtosis".public_ports.rpc.number') RETH_PORT=$(curl "http://127.0.0.1:9779/api/enclaves/$ENCLAVE_ID/services" | jq '."op-el-2151908-2-op-reth-op-node-op-kurtosis".public_ports.rpc.number') diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 93a7e633d09..961a435ea70 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -23,10 +23,6 @@ jobs: - type: scroll args: --bin scroll-reth --workspace --lib --examples --tests --benches --locked features: "asm-keccak jemalloc jemalloc-prof min-error-logs min-warn-logs min-info-logs min-debug-logs min-trace-logs" - # issue - # - type: book - # args: --manifest-path book/sources/Cargo.toml --workspace --bins - # features: "" steps: - uses: actions/checkout@v4 - uses: rui314/setup-mold@v1 @@ -163,8 +159,6 @@ jobs: components: rustfmt - name: Run fmt run: cargo fmt --all --check - - name: Run fmt on book sources - run: cargo fmt --manifest-path book/sources/Cargo.toml --all --check udeps: name: udeps @@ -179,6 +173,7 @@ jobs: cache-on-failure: true - uses: taiki-e/install-action@cargo-udeps - run: cargo udeps --workspace --lib --examples --tests --benches --all-features --locked + book: name: book runs-on: ubuntu-latest @@ -193,8 +188,8 @@ jobs: - run: cargo build --bin reth --workspace --features ethereum env: RUSTFLAGS: -D warnings - - run: ./book/cli/update.sh target/debug/reth - - name: Check book changes + - run: ./docs/cli/update.sh target/debug/reth + - name: Check docs changes run: git diff --exit-code codespell: @@ -213,7 +208,7 @@ jobs: - name: Checkout repository uses: actions/checkout@v4 - name: Run dprint - uses: dprint/check@v2.2 + uses: dprint/check@v2.3 with: config-path: dprint.json diff --git a/.github/workflows/pr-title.yml b/.github/workflows/pr-title.yml index 7977c11d8bb..1d422ad45cf 100644 --- a/.github/workflows/pr-title.yml +++ b/.github/workflows/pr-title.yml @@ -30,6 +30,7 @@ jobs: fix chore test + bench perf refactor docs @@ -55,23 +56,24 @@ jobs: - `fix`: Patches a bug - `chore`: General maintenance tasks or updates - `test`: Adding new tests or modifying existing tests + - `bench`: Adding new benchmarks or modifying existing benchmarks - `perf`: Performance improvements - `refactor`: Changes to improve code structure - `docs`: Documentation updates - `ci`: Changes to CI/CD configurations - `revert`: Reverts a previously merged PR - `deps`: Updates dependencies - + **Breaking Changes** Breaking changes are noted by using an exclamation mark. For example: - `feat!: changed the API` - `chore(node)!: Removed unused public function` - + **Help** For more information, follow the guidelines here: https://www.conventionalcommits.org/en/v1.0.0/ - + - name: Remove Comment for Valid Title if: steps.lint_pr_title.outcome == 'success' uses: marocchino/sticky-pull-request-comment@v2 diff --git a/.github/workflows/release-dist.yml b/.github/workflows/release-dist.yml index f7df80e81f9..57a6f311d0b 100644 --- a/.github/workflows/release-dist.yml +++ b/.github/workflows/release-dist.yml @@ -12,7 +12,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Update Homebrew formula - uses: dawidd6/action-homebrew-bump-formula@v4 + uses: dawidd6/action-homebrew-bump-formula@v5 with: token: ${{ secrets.HOMEBREW }} no_fork: true diff --git a/.github/workflows/sync-era.yml b/.github/workflows/sync-era.yml new file mode 100644 index 00000000000..973dc5ec036 --- /dev/null +++ b/.github/workflows/sync-era.yml @@ -0,0 +1,67 @@ +# Runs sync tests with ERA stage enabled. + +name: sync-era test + +on: + workflow_dispatch: + schedule: + - cron: "0 */6 * * *" + +env: + CARGO_TERM_COLOR: always + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +jobs: + sync: + name: sync (${{ matrix.chain.bin }}) + runs-on: + group: Reth + env: + RUST_LOG: info,sync=error + RUST_BACKTRACE: 1 + timeout-minutes: 60 + strategy: + matrix: + chain: + - build: install + bin: reth + chain: mainnet + tip: "0x91c90676cab257a59cd956d7cb0bceb9b1a71d79755c23c7277a0697ccfaf8c4" + block: 100000 + unwind-target: "0x52e0509d33a988ef807058e2980099ee3070187f7333aae12b64d4d675f34c5a" + - build: install-op + bin: op-reth + chain: base + tip: "0xbb9b85352c7ebca6ba8efc63bd66cecd038c92ec8ebd02e153a3e0b197e672b7" + block: 10000 + unwind-target: "0x118a6e922a8c6cab221fc5adfe5056d2b72d58c6580e9c5629de55299e2cf8de" + steps: + - uses: actions/checkout@v4 + - uses: rui314/setup-mold@v1 + - uses: dtolnay/rust-toolchain@stable + - uses: Swatinem/rust-cache@v2 + with: + cache-on-failure: true + - name: Build ${{ matrix.chain.bin }} + run: make ${{ matrix.chain.build }} + - name: Run sync with ERA enabled + run: | + ${{ matrix.chain.bin }} node \ + --chain ${{ matrix.chain.chain }} \ + --debug.tip ${{ matrix.chain.tip }} \ + --debug.max-block ${{ matrix.chain.block }} \ + --debug.terminate + --era.enable + - name: Verify the target block hash + run: | + ${{ matrix.chain.bin }} db --chain ${{ matrix.chain.chain }} get static-file headers ${{ matrix.chain.block }} \ + | grep ${{ matrix.chain.tip }} + - name: Run stage unwind for 100 blocks + run: | + ${{ matrix.chain.bin }} stage unwind num-blocks 100 --chain ${{ matrix.chain.chain }} + - name: Run stage unwind to block hash + run: | + ${{ matrix.chain.bin }} stage unwind to-block ${{ matrix.chain.unwind-target }} --chain ${{ matrix.chain.chain }} diff --git a/.github/workflows/unit.yml b/.github/workflows/unit.yml index 1cfacc4dbae..c7afa2eab09 100644 --- a/.github/workflows/unit.yml +++ b/.github/workflows/unit.yml @@ -46,11 +46,6 @@ jobs: args: -p "reth-scroll-*" -p "scroll-alloy-*" --locked partition: 1 total_partitions: 1 - # issue - # - type: book - # args: --manifest-path book/sources/Cargo.toml - # partition: 1 - # total_partitions: 1 timeout-minutes: 30 steps: - name: Free up disk space diff --git a/.gitignore b/.gitignore index 7ec2e506cf3..a057ce1c628 100644 --- a/.gitignore +++ b/.gitignore @@ -55,5 +55,8 @@ rustc-ice-* # Book sources should be able to build with the latest version book/sources/Cargo.lock +# vocs node_modules +docs/vocs/node_modules + # Cargo chef recipe file recipe.json diff --git a/CLAUDE.md b/CLAUDE.md new file mode 100644 index 00000000000..99282fbf864 --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1,314 @@ +# Reth Development Guide for AI Agents + +This guide provides comprehensive instructions for AI agents working on the Reth codebase. It covers the architecture, development workflows, and critical guidelines for effective contributions. + +## Project Overview + +Reth is a high-performance Ethereum execution client written in Rust, focusing on modularity, performance, and contributor-friendliness. The codebase is organized into well-defined crates with clear boundaries and responsibilities. + +## Architecture Overview + +### Core Components + +1. **Consensus (`crates/consensus/`)**: Validates blocks according to Ethereum consensus rules +2. **Storage (`crates/storage/`)**: Hybrid database using MDBX + static files for optimal performance +3. **Networking (`crates/net/`)**: P2P networking stack with discovery, sync, and transaction propagation +4. **RPC (`crates/rpc/`)**: JSON-RPC server supporting all standard Ethereum APIs +5. **Execution (`crates/evm/`, `crates/ethereum/`)**: Transaction execution and state transitions +6. **Pipeline (`crates/stages/`)**: Staged sync architecture for blockchain synchronization +7. **Trie (`crates/trie/`)**: Merkle Patricia Trie implementation with parallel state root computation +8. **Node Builder (`crates/node/`)**: High-level node orchestration and configuration +9 **The Consensus Engine (`crates/engine/`)**: Handles processing blocks received from the consensus layer with the Engine API (newPayload, forkchoiceUpdated) + +### Key Design Principles + +- **Modularity**: Each crate can be used as a standalone library +- **Performance**: Extensive use of parallelism, memory-mapped I/O, and optimized data structures +- **Extensibility**: Traits and generic types allow for different implementations (Ethereum, Optimism, etc.) +- **Type Safety**: Strong typing throughout with minimal use of dynamic dispatch + +## Development Workflow + +### Code Style and Standards + +1. **Formatting**: Always use nightly rustfmt + ```bash + cargo +nightly fmt --all + ``` + +2. **Linting**: Run clippy with all features + ```bash + RUSTFLAGS="-D warnings" cargo +nightly clippy --workspace --lib --examples --tests --benches --all-features --locked + ``` + +3. **Testing**: Use nextest for faster test execution + ```bash + cargo nextest run --workspace + ``` + +### Common Contribution Types + +Based on actual recent PRs, here are typical contribution patterns: + +#### 1. Small Bug Fixes (1-10 lines) +Real example: Fixing beacon block root handling ([#16767](https://github.com/paradigmxyz/reth/pull/16767)) +```rust +// Changed a single line to fix logic error +- parent_beacon_block_root: parent.parent_beacon_block_root(), ++ parent_beacon_block_root: parent.parent_beacon_block_root().map(|_| B256::ZERO), +``` + +#### 2. Integration with Upstream Changes +Real example: Integrating revm updates ([#16752](https://github.com/paradigmxyz/reth/pull/16752)) +```rust +// Update code to use new APIs from dependencies +- if self.fork_tracker.is_shanghai_activated() { +- if let Err(err) = transaction.ensure_max_init_code_size(MAX_INIT_CODE_BYTE_SIZE) { ++ if let Some(init_code_size_limit) = self.fork_tracker.max_initcode_size() { ++ if let Err(err) = transaction.ensure_max_init_code_size(init_code_size_limit) { +``` + +#### 3. Adding Comprehensive Tests +Real example: ETH69 protocol tests ([#16759](https://github.com/paradigmxyz/reth/pull/16759)) +```rust +#[tokio::test(flavor = "multi_thread")] +async fn test_eth69_peers_can_connect() { + // Create test network with specific protocol versions + let p0 = PeerConfig::with_protocols(NoopProvider::default(), Some(EthVersion::Eth69.into())); + // Test connection and version negotiation +} +``` + +#### 4. Making Components Generic +Real example: Making EthEvmConfig generic over chainspec ([#16758](https://github.com/paradigmxyz/reth/pull/16758)) +```rust +// Before: Hardcoded to ChainSpec +- pub struct EthEvmConfig { +- pub executor_factory: EthBlockExecutorFactory, EvmFactory>, + +// After: Generic over any chain spec type ++ pub struct EthEvmConfig ++ where ++ C: EthereumHardforks, ++ { ++ pub executor_factory: EthBlockExecutorFactory, EvmFactory>, +``` + +#### 5. Resource Management Improvements +Real example: ETL directory cleanup ([#16770](https://github.com/paradigmxyz/reth/pull/16770)) +```rust +// Add cleanup logic on startup ++ if let Err(err) = fs::remove_dir_all(&etl_path) { ++ warn!(target: "reth::cli", ?etl_path, %err, "Failed to remove ETL path on launch"); ++ } +``` + +#### 6. Feature Additions +Real example: Sharded mempool support ([#16756](https://github.com/paradigmxyz/reth/pull/16756)) +```rust +// Add new filtering policies for transaction announcements +pub struct ShardedMempoolAnnouncementFilter { + pub inner: T, + pub shard_bits: u8, + pub node_id: Option, +} +``` + +### Testing Guidelines + +1. **Unit Tests**: Test individual functions and components +2. **Integration Tests**: Test interactions between components +3. **Benchmarks**: For performance-critical code +4. **Fuzz Tests**: For parsing and serialization code +5. **Property Tests**: For checking component correctness on a wide variety of inputs + +Example test structure: +```rust +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_component_behavior() { + // Arrange + let component = Component::new(); + + // Act + let result = component.operation(); + + // Assert + assert_eq!(result, expected); + } +} +``` + +### Performance Considerations + +1. **Avoid Allocations in Hot Paths**: Use references and borrowing +2. **Parallel Processing**: Use rayon for CPU-bound parallel work +3. **Async/Await**: Use tokio for I/O-bound operations +4. **File Operations**: Use `reth_fs_util` instead of `std::fs` for better error handling + +### Common Pitfalls + +1. **Don't Block Async Tasks**: Use `spawn_blocking` for CPU-intensive work or work with lots of blocking I/O +2. **Handle Errors Properly**: Use `?` operator and proper error types + +### What to Avoid + +Based on PR patterns, avoid: + +1. **Large, sweeping changes**: Keep PRs focused and reviewable +2. **Mixing unrelated changes**: One logical change per PR +3. **Ignoring CI failures**: All checks must pass +4. **Incomplete implementations**: Finish features before submitting +5. **Modifying libmdbx sources**: Never modify files in `crates/storage/libmdbx-rs/mdbx-sys/libmdbx/` - this is vendored third-party code + +### CI Requirements + +Before submitting changes, ensure: + +1. **Format Check**: `cargo +nightly fmt --all --check` +2. **Clippy**: No warnings with `RUSTFLAGS="-D warnings"` +3. **Tests Pass**: All unit and integration tests +4. **Documentation**: Update relevant docs and add doc comments with `cargo docs --document-private-items` +5. **Commit Messages**: Follow conventional format (feat:, fix:, chore:, etc.) + + +### Opening PRs against + +Label PRs appropriately, first check the available labels and then apply the relevant ones: +* when changes are RPC related, add A-rpc label +* when changes are docs related, add C-docs label +* when changes are optimism related (e.g. new feature or exclusive changes to crates/optimism), add A-op-reth label +* ... and so on, check the available labels for more options. +* if being tasked to open a pr, ensure that all changes are properly formatted: `cargo +nightly fmt --all` + +If changes in reth include changes to dependencies, run commands `zepter` and `make lint-toml` before finalizing the pr. Assume `zepter` binary is installed. + +### Debugging Tips + +1. **Logging**: Use `tracing` crate with appropriate levels + ```rust + tracing::debug!(target: "reth::component", ?value, "description"); + ``` + +2. **Metrics**: Add metrics for monitoring + ```rust + metrics::counter!("reth_component_operations").increment(1); + ``` + +3. **Test Isolation**: Use separate test databases/directories + +### Finding Where to Contribute + +1. **Check Issues**: Look for issues labeled `good-first-issue` or `help-wanted` +2. **Review TODOs**: Search for `TODO` comments in the codebase +3. **Improve Tests**: Areas with low test coverage are good targets +4. **Documentation**: Improve code comments and documentation +5. **Performance**: Profile and optimize hot paths (with benchmarks) + +### Common PR Patterns + +#### Small, Focused Changes +Most PRs change only 1-5 files. Examples: +- Single-line bug fixes +- Adding a missing trait implementation +- Updating error messages +- Adding test cases for edge conditions + +#### Integration Work +When dependencies update (especially revm), code needs updating: +- Check for breaking API changes +- Update to use new features (like EIP implementations) +- Ensure compatibility with new versions + +#### Test Improvements +Tests often need expansion for: +- New protocol versions (ETH68, ETH69) +- Edge cases in state transitions +- Network behavior under specific conditions +- Concurrent operations + +#### Making Code More Generic +Common refactoring pattern: +- Replace concrete types with generics +- Add trait bounds for flexibility +- Enable reuse across different chain types (Ethereum, Optimism) + +### Example Contribution Workflow + +Let's say you want to fix a bug where external IP resolution fails on startup: + +1. **Create a branch**: + ```bash + git checkout -b fix-external-ip-resolution + ``` + +2. **Find the relevant code**: + ```bash + # Search for IP resolution code + rg "external.*ip" --type rust + ``` + +3. **Reason about the problem, when the problem is identified, make the fix**: + ```rust + // In crates/net/discv4/src/lib.rs + pub fn resolve_external_ip() -> Option { + // Add fallback mechanism + nat::external_ip() + .or_else(|| nat::external_ip_from_stun()) + .or_else(|| Some(DEFAULT_IP)) + } + ``` + +4. **Add a test**: + ```rust + #[test] + fn test_external_ip_fallback() { + // Test that resolution has proper fallbacks + } + ``` + +5. **Run checks**: + ```bash + cargo +nightly fmt --all + cargo clippy --all-features + cargo test -p reth-discv4 + ``` + +6. **Commit with clear message**: + ```bash + git commit -m "fix: add fallback for external IP resolution + + Previously, node startup could fail if external IP resolution + failed. This adds fallback mechanisms to ensure the node can + always start with a reasonable default." + ``` + +## Quick Reference + +### Essential Commands + +```bash +# Format code +cargo +nightly fmt --all + +# Run lints +RUSTFLAGS="-D warnings" cargo +nightly clippy --workspace --all-features --locked + +# Run tests +cargo nextest run --workspace + +# Run specific benchmark +cargo bench --bench bench_name + +# Build optimized binary +cargo build --release --features "jemalloc asm-keccak" + +# Check compilation for all features +cargo check --workspace --all-features + +# Check documentation +cargo docs --document-private-items +``` diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 53f5c9075bc..a5d3775cd0f 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -55,7 +55,7 @@ If you have reviewed existing documentation and still have questions, or you are *opening a discussion**. This repository comes with a discussions board where you can also ask for help. Click the " Discussions" tab at the top. -As Reth is still in heavy development, the documentation can be a bit scattered. The [Reth Book][reth-book] is our +As Reth is still in heavy development, the documentation can be a bit scattered. The [Reth Docs][reth-docs] is our current best-effort attempt at keeping up-to-date information. ### Submitting a bug report @@ -235,7 +235,7 @@ _Adapted from the [Foundry contributing guide][foundry-contributing]_. [dev-tg]: https://t.me/paradigm_reth -[reth-book]: https://github.com/paradigmxyz/reth/tree/main/book +[reth-docs]: https://github.com/paradigmxyz/reth/tree/main/docs [mcve]: https://stackoverflow.com/help/mcve diff --git a/Cargo.lock b/Cargo.lock index 6a580f4a03f..aa682bd2af5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -112,15 +112,16 @@ dependencies = [ [[package]] name = "alloy-consensus" -version = "1.0.9" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad451f9a70c341d951bca4e811d74dbe1e193897acd17e9dbac1353698cc430b" +checksum = "d8b77018eec2154eb158869f9f2914a3ea577adf87b11be2764d4795d5ccccf7" dependencies = [ "alloy-eips", "alloy-primitives", "alloy-rlp", "alloy-serde", "alloy-trie", + "alloy-tx-macros", "arbitrary", "auto_impl", "c-kzg", @@ -129,7 +130,7 @@ dependencies = [ "k256", "once_cell", "rand 0.8.5", - "secp256k1", + "secp256k1 0.30.0", "serde", "serde_with", "thiserror 2.0.12", @@ -152,9 +153,9 @@ dependencies = [ [[package]] name = "alloy-contract" -version = "1.0.9" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebf25443920ecb9728cb087fe4dc04a0b290bd6ac85638c58fe94aba70f1a44e" +checksum = "049ed4836d368929d7c5e206bab2e8d92f00524222edc0026c6bf2a3cb8a02d5" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -168,14 +169,15 @@ dependencies = [ "alloy-transport", "futures", "futures-util", + "serde_json", "thiserror 2.0.12", ] [[package]] name = "alloy-dyn-abi" -version = "1.1.2" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18cc14d832bc3331ca22a1c7819de1ede99f58f61a7d123952af7dde8de124a6" +checksum = "7b95b3deca680efc7e9cba781f1a1db352fa1ea50e6384a514944dcf4419e652" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -234,9 +236,9 @@ dependencies = [ [[package]] name = "alloy-eips" -version = "1.0.9" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3056872f6da48046913e76edb5ddced272861f6032f09461aea1a2497be5ae5d" +checksum = "33d134f3ac4926124eaf521a1031d11ea98816df3d39fc446fcfd6b36884603f" dependencies = [ "alloy-eip2124", "alloy-eip2930", @@ -257,14 +259,15 @@ dependencies = [ [[package]] name = "alloy-evm" -version = "0.10.0" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "394b09cf3a32773eedf11828987f9c72dfa74545040be0422e3f5f09a2a3fab9" +checksum = "ff5aae4c6dc600734b206b175f3200085ee82dcdaa388760358830a984ca9869" dependencies = [ "alloy-consensus", "alloy-eips", "alloy-hardforks", "alloy-primitives", + "alloy-rpc-types-eth", "alloy-sol-types", "auto_impl", "derive_more", @@ -276,9 +279,9 @@ dependencies = [ [[package]] name = "alloy-genesis" -version = "1.0.9" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c98fb40f07997529235cc474de814cd7bd9de561e101716289095696c0e4639d" +checksum = "fb1c2792605e648bdd1fddcfed8ce0d39d3db495c71d2240cb53df8aee8aea1f" dependencies = [ "alloy-eips", "alloy-primitives", @@ -289,9 +292,9 @@ dependencies = [ [[package]] name = "alloy-hardforks" -version = "0.2.3" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d6b8067561eb8f884b215ace4c962313c5467e47bde6b457c8c51e268fb5d99" +checksum = "4ce138b29a2f8e7ed97c064af8359dfa6559c12cba5e821ae4eb93081a56557e" dependencies = [ "alloy-chains", "alloy-eip2124", @@ -303,9 +306,9 @@ dependencies = [ [[package]] name = "alloy-json-abi" -version = "1.1.2" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ccaa79753d7bf15f06399ea76922afbfaf8d18bebed9e8fc452984b4a90dcc9" +checksum = "15516116086325c157c18261d768a20677f0f699348000ed391d4ad0dcb82530" dependencies = [ "alloy-primitives", "alloy-sol-type-parser", @@ -315,12 +318,13 @@ dependencies = [ [[package]] name = "alloy-json-rpc" -version = "1.0.9" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc08b31ebf9273839bd9a01f9333cbb7a3abb4e820c312ade349dd18bdc79581" +checksum = "31cfdacfeb6b6b40bf6becf92e69e575c68c9f80311c3961d019e29c0b8d6be2" dependencies = [ "alloy-primitives", "alloy-sol-types", + "http", "serde", "serde_json", "thiserror 2.0.12", @@ -329,9 +333,9 @@ dependencies = [ [[package]] name = "alloy-network" -version = "1.0.9" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed117b08f0cc190312bf0c38c34cf4f0dabfb4ea8f330071c587cd7160a88cb2" +checksum = "de68a3f09cd9ab029cf87d08630e1336ca9a530969689fd151d505fa888a2603" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -355,9 +359,9 @@ dependencies = [ [[package]] name = "alloy-network-primitives" -version = "1.0.9" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7162ff7be8649c0c391f4e248d1273e85c62076703a1f3ec7daf76b283d886d" +checksum = "fcc2689c8addfc43461544d07a6f5f3a3e1f5f4efae61206cb5783dc383cfc8f" dependencies = [ "alloy-consensus", "alloy-eips", @@ -368,9 +372,9 @@ dependencies = [ [[package]] name = "alloy-op-evm" -version = "0.10.0" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f32538cc243ec5d4603da9845cc2f5254c6a3a78e82475beb1a2a1de6c0d36c" +checksum = "588a87b77b30452991151667522d2f2f724cec9c2ec6602e4187bc97f66d8095" dependencies = [ "alloy-consensus", "alloy-eips", @@ -396,9 +400,9 @@ dependencies = [ [[package]] name = "alloy-primitives" -version = "1.1.2" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18c35fc4b03ace65001676358ffbbaefe2a2b27ee50fe777c345082c7c888be8" +checksum = "6177ed26655d4e84e00b65cb494d4e0b8830e7cae7ef5d63087d445a2600fb55" dependencies = [ "alloy-rlp", "arbitrary", @@ -427,9 +431,9 @@ dependencies = [ [[package]] name = "alloy-provider" -version = "1.0.9" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d84eba1fd8b6fe8b02f2acd5dd7033d0f179e304bd722d11e817db570d1fa6c4" +checksum = "8ced931220f547d30313530ad315654b7862ef52631c90ab857d792865f84a7d" dependencies = [ "alloy-chains", "alloy-consensus", @@ -455,6 +459,7 @@ dependencies = [ "either", "futures", "futures-utils-wasm", + "http", "lru 0.13.0", "parking_lot", "pin-project", @@ -470,9 +475,9 @@ dependencies = [ [[package]] name = "alloy-pubsub" -version = "1.0.9" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8550f7306e0230fc835eb2ff4af0a96362db4b6fc3f25767d161e0ad0ac765bf" +checksum = "8e37d6cf286fd30bacac525ab1491f9d1030d39ecce237821f2a5d5922eb9a37" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -513,9 +518,9 @@ dependencies = [ [[package]] name = "alloy-rpc-client" -version = "1.0.9" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "518a699422a3eab800f3dac2130d8f2edba8e4fff267b27a9c7dc6a2b0d313ee" +checksum = "6d1d1eac6e48b772c7290f0f79211a0e822a38b057535b514cc119abd857d5b6" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -541,9 +546,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types" -version = "1.0.9" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c000cab4ec26a4b3e29d144e999e1c539c2fa0abed871bf90311eb3466187ca8" +checksum = "8589c6ae318fcc9624d42e9166f7f82b630d9ad13e180c52addf20b93a8af266" dependencies = [ "alloy-primitives", "alloy-rpc-types-engine", @@ -554,9 +559,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-admin" -version = "1.0.9" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ebdc864f573645c5288370c208912b85b5cacc8025b700c50c2b74d06ab9830" +checksum = "0182187bcbe47f3a737f5eced007b7788d4ed37aba19d43fd3df123169b3b05e" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -566,9 +571,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" -version = "1.0.9" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8abecc34549a208b5f91bc7f02df3205c36e2aa6586f1d9375c3382da1066b3b" +checksum = "754d5062b594ed300a3bb0df615acb7bacdbd7bd1cd1a6e5b59fb936c5025a13" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -589,9 +594,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-beacon" -version = "1.0.9" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "241aba7808bddc3ad1c6228e296a831f326f89118b1017012090709782a13334" +checksum = "32c1ddf8fb2e41fa49316185d7826ed034f55819e0017e65dc6715f911b8a1ee" dependencies = [ "alloy-eips", "alloy-primitives", @@ -607,9 +612,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-debug" -version = "1.0.9" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c832f2e851801093928dbb4b7bd83cd22270faf76b2e080646b806a285c8757" +checksum = "7c81ae89a04859751bac72e5e73459bceb3e6a4d2541f2f1374e35be358fd171" dependencies = [ "alloy-primitives", "serde", @@ -617,9 +622,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-engine" -version = "1.0.9" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cab52691970553d84879d777419fa7b6a2e92e9fe8641f9324cc071008c2f656" +checksum = "662b720c498883427ffb9f5e38c7f02b56ac5c0cdd60b457e88ce6b6a20b9ce9" dependencies = [ "alloy-consensus", "alloy-eips", @@ -638,9 +643,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-eth" -version = "1.0.9" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcaf7dff0fdd756a714d58014f4f8354a1706ebf9fa2cf73431e0aeec3c9431e" +checksum = "bb082c325bdfd05a7c71f52cd1060e62491fbf6edf55962720bdc380847b0784" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -659,9 +664,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-mev" -version = "1.0.9" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18bd1c5d7b9f3f1caeeaa1c082aa28ba7ce2d67127b12b2a9b462712c8f6e1c5" +checksum = "84c1b50012f55de4a6d58ee9512944089fa61a835e6fe3669844075bb6e0312e" dependencies = [ "alloy-consensus", "alloy-eips", @@ -674,9 +679,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-trace" -version = "1.0.9" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e3507a04e868dd83219ad3cd6a8c58aefccb64d33f426b3934423a206343e84" +checksum = "eaf52c884c7114c5d1f1f2735634ba0f6579911427281fb02cbd5cb8147723ca" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -688,9 +693,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-txpool" -version = "1.0.9" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eec36272621c3ac82b47dd77f0508346687730b1c2e3e10d3715705c217c0a05" +checksum = "5e4fd0df1af2ed62d02e7acbc408a162a06f30cb91550c2ec34b11c760cdc0ba" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -700,9 +705,9 @@ dependencies = [ [[package]] name = "alloy-serde" -version = "1.0.9" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "730e8f2edf2fc224cabd1c25d090e1655fa6137b2e409f92e5eec735903f1507" +checksum = "c7f26c17270c2ac1bd555c4304fe067639f0ddafdd3c8d07a200b2bb5a326e03" dependencies = [ "alloy-primitives", "arbitrary", @@ -712,9 +717,9 @@ dependencies = [ [[package]] name = "alloy-signer" -version = "1.0.9" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b0d2428445ec13edc711909e023d7779618504c4800be055a5b940025dbafe3" +checksum = "5d9fd649d6ed5b8d7e5014e01758efb937e8407124b182a7f711bf487a1a2697" dependencies = [ "alloy-primitives", "async-trait", @@ -727,9 +732,9 @@ dependencies = [ [[package]] name = "alloy-signer-local" -version = "1.0.9" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e14fe6fedb7fe6e0dfae47fe020684f1d8e063274ef14bca387ddb7a6efa8ec1" +checksum = "c288c5b38be486bb84986701608f5d815183de990e884bb747f004622783e125" dependencies = [ "alloy-consensus", "alloy-network", @@ -745,9 +750,9 @@ dependencies = [ [[package]] name = "alloy-sol-macro" -version = "1.1.2" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8612e0658964d616344f199ab251a49d48113992d81b92dab93ed855faa66383" +checksum = "a14f21d053aea4c6630687c2f4ad614bed4c81e14737a9b904798b24f30ea849" dependencies = [ "alloy-sol-macro-expander", "alloy-sol-macro-input", @@ -759,9 +764,9 @@ dependencies = [ [[package]] name = "alloy-sol-macro-expander" -version = "1.1.2" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a384edac7283bc4c010a355fb648082860c04b826bb7a814c45263c8f304c74" +checksum = "34d99282e7c9ef14eb62727981a985a01869e586d1dec729d3bb33679094c100" dependencies = [ "alloy-sol-macro-input", "const-hex", @@ -777,9 +782,9 @@ dependencies = [ [[package]] name = "alloy-sol-macro-input" -version = "1.1.2" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0dd588c2d516da7deb421b8c166dc60b7ae31bca5beea29ab6621fcfa53d6ca5" +checksum = "eda029f955b78e493360ee1d7bd11e1ab9f2a220a5715449babc79d6d0a01105" dependencies = [ "const-hex", "dunce", @@ -793,9 +798,9 @@ dependencies = [ [[package]] name = "alloy-sol-type-parser" -version = "1.1.2" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e86ddeb70792c7ceaad23e57d52250107ebbb86733e52f4a25d8dc1abc931837" +checksum = "10db1bd7baa35bc8d4a1b07efbf734e73e5ba09f2580fb8cee3483a36087ceb2" dependencies = [ "serde", "winnow", @@ -803,9 +808,9 @@ dependencies = [ [[package]] name = "alloy-sol-types" -version = "1.1.2" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "584cb97bfc5746cb9dcc4def77da11694b5d6d7339be91b7480a6a68dc129387" +checksum = "58377025a47d8b8426b3e4846a251f2c1991033b27f517aade368146f6ab1dfe" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -815,9 +820,9 @@ dependencies = [ [[package]] name = "alloy-transport" -version = "1.0.9" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a712bdfeff42401a7dd9518f72f617574c36226a9b5414537fedc34350b73bf9" +checksum = "e1b790b89e31e183ae36ac0a1419942e21e94d745066f5281417c3e4299ea39e" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -838,9 +843,9 @@ dependencies = [ [[package]] name = "alloy-transport-http" -version = "1.0.9" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ea5a76d7f2572174a382aedf36875bedf60bcc41116c9f031cf08040703a2dc" +checksum = "f643645a33a681d09ac1ca2112014c2ca09c68aad301da4400484d59c746bc70" dependencies = [ "alloy-json-rpc", "alloy-rpc-types-engine", @@ -859,9 +864,9 @@ dependencies = [ [[package]] name = "alloy-transport-ipc" -version = "1.0.9" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "606af17a7e064d219746f6d2625676122c79d78bf73dfe746d6db9ecd7dbcb85" +checksum = "1c2d843199d0bdb4cbed8f1b6f2da7f68bcb9c5da7f57e789009e4e7e76d1bec" dependencies = [ "alloy-json-rpc", "alloy-pubsub", @@ -879,9 +884,9 @@ dependencies = [ [[package]] name = "alloy-transport-ws" -version = "1.0.9" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0c6f9b37cd8d44aab959613966cc9d4d7a9b429c575cec43b3e5b46ea109a79" +checksum = "3d27aae8c7a6403d3d3e874ad2eeeadbf46267b614bac2d4d82786b9b8496464" dependencies = [ "alloy-pubsub", "alloy-transport", @@ -897,9 +902,9 @@ dependencies = [ [[package]] name = "alloy-trie" -version = "0.8.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "983d99aa81f586cef9dae38443245e585840fcf0fc58b09aee0b1f27aed1d500" +checksum = "bada1fc392a33665de0dc50d401a3701b62583c655e3522a323490a5da016962" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -915,6 +920,19 @@ dependencies = [ "tracing", ] +[[package]] +name = "alloy-tx-macros" +version = "1.0.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4ef40a046b9bf141afc440cef596c79292708aade57c450dc74e843270fd8e7" +dependencies = [ + "alloy-primitives", + "darling", + "proc-macro2", + "quote", + "syn 2.0.101", +] + [[package]] name = "android-tzdata" version = "0.1.1" @@ -1450,6 +1468,12 @@ version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" +[[package]] +name = "az" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b7e4c2464d97fe331d41de9d5db0def0a96f4d823b8b32a2efd503578988973" + [[package]] name = "backon" version = "1.5.0" @@ -1672,9 +1696,9 @@ dependencies = [ [[package]] name = "blst" -version = "0.3.14" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47c79a94619fade3c0b887670333513a67ac28a6a7e653eb260bf0d4103db38d" +checksum = "4fd49896f12ac9b6dcd7a5998466b9b58263a695a3dd1ecc1aaca2e12a90b080" dependencies = [ "cc", "glob", @@ -2276,7 +2300,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "117725a109d387c937a1533ce01b450cbde6b88abceea8473c4d7a85853cda3c" dependencies = [ "lazy_static", - "windows-sys 0.48.0", + "windows-sys 0.59.0", ] [[package]] @@ -2765,9 +2789,9 @@ dependencies = [ [[package]] name = "derive-where" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e73f2692d4bd3cac41dca28934a39894200c9fabf49586d77d0e5954af1d7902" +checksum = "510c292c8cf384b1a340b816a9a6cf2599eb8f566a44949024af88418000c50b" dependencies = [ "proc-macro2", "quote", @@ -3023,7 +3047,7 @@ dependencies = [ [[package]] name = "ef-tests" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -3105,7 +3129,7 @@ dependencies = [ "k256", "log", "rand 0.8.5", - "secp256k1", + "secp256k1 0.30.0", "serde", "sha3", "zeroize", @@ -3292,7 +3316,7 @@ dependencies = [ "reth-primitives-traits", "reth-provider", "reth-tracing", - "secp256k1", + "secp256k1 0.30.0", "serde", "serde_json", "thiserror 2.0.12", @@ -3403,12 +3427,14 @@ dependencies = [ "reth-op", "reth-optimism-forks", "reth-payload-builder", + "reth-primitives-traits", "reth-rpc-api", "reth-rpc-engine-api", "revm", "revm-primitives", "serde", "test-fuzz", + "thiserror 2.0.12", ] [[package]] @@ -3516,7 +3542,7 @@ dependencies = [ "reth-ecies", "reth-ethereum", "reth-network-peers", - "secp256k1", + "secp256k1 0.30.0", "tokio", ] @@ -3576,7 +3602,7 @@ dependencies = [ "reth-discv4", "reth-ethereum", "reth-tracing", - "secp256k1", + "secp256k1 0.30.0", "serde_json", "tokio", "tokio-stream", @@ -3621,7 +3647,7 @@ dependencies = [ [[package]] name = "exex-subscription" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-primitives", "clap", @@ -4064,6 +4090,16 @@ dependencies = [ "web-sys", ] +[[package]] +name = "gmp-mpfr-sys" +version = "1.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c66d61197a68f6323b9afa616cf83d55d69191e1bf364d4eb7d35ae18defe776" +dependencies = [ + "libc", + "windows-sys 0.59.0", +] + [[package]] name = "group" version = "0.13.0" @@ -4446,7 +4482,7 @@ dependencies = [ "js-sys", "log", "wasm-bindgen", - "windows-core 0.58.0", + "windows-core 0.61.2", ] [[package]] @@ -5956,14 +5992,14 @@ dependencies = [ [[package]] name = "nybbles" -version = "0.3.4" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8983bb634df7248924ee0c4c3a749609b5abcb082c28fffe3254b3eb3602b307" +checksum = "11d51b0175c49668a033fe7cc69080110d9833b291566cdf332905f3ad9c68a0" dependencies = [ "alloy-rlp", "arbitrary", - "const-hex", "proptest", + "ruint", "serde", "smallvec", ] @@ -6001,9 +6037,9 @@ checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e" [[package]] name = "op-alloy-consensus" -version = "0.17.2" +version = "0.18.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2423a125ef2daa0d15dacc361805a0b6f76d6acfc6e24a1ff6473582087fe75" +checksum = "a8719d9b783b29cfa1cf8d591b894805786b9ab4940adc700a57fd0d5b721cf5" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6027,9 +6063,9 @@ checksum = "a79f352fc3893dcd670172e615afef993a41798a1d3fc0db88a3e60ef2e70ecc" [[package]] name = "op-alloy-network" -version = "0.17.2" +version = "0.18.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bac5140ed9a01112a1c63866da3c38c74eb387b95917d0f304a4bd4ee825986" +checksum = "839a7a1826dc1d38fdf9c6d30d1f4ed8182c63816c97054e5815206f1ebf08c7" dependencies = [ "alloy-consensus", "alloy-network", @@ -6043,9 +6079,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-jsonrpsee" -version = "0.17.2" +version = "0.18.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64cb0771602eb2b25e38817d64cd0f841ff07ef9df1e9ce96a53c1742776e874" +checksum = "6b9d3de5348e2b34366413412f1f1534dc6b10d2cf6e8e1d97c451749c0c81c0" dependencies = [ "alloy-primitives", "jsonrpsee", @@ -6053,9 +6089,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-types" -version = "0.17.2" +version = "0.18.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f82a315004b6720fbf756afdcfdc97ea7ddbcdccfec86ea7df7562bb0da29a3f" +checksum = "9640f9e78751e13963762a4a44c846e9ec7974b130c29a51706f40503fe49152" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6072,9 +6108,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-types-engine" -version = "0.17.2" +version = "0.18.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47aea08d8ad3f533df0c5082d3e93428a4c57898b7ade1be928fa03918f22e71" +checksum = "6a4559d84f079b3fdfd01e4ee0bb118025e92105fbb89736f5d77ab3ca261698" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6085,6 +6121,7 @@ dependencies = [ "arbitrary", "derive_more", "ethereum_ssz", + "ethereum_ssz_derive", "op-alloy-consensus", "serde", "snap", @@ -6093,7 +6130,7 @@ dependencies = [ [[package]] name = "op-reth" -version = "1.4.8" +version = "1.5.0" dependencies = [ "clap", "reth-cli-util", @@ -6111,8 +6148,8 @@ dependencies = [ [[package]] name = "op-revm" -version = "5.0.0" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v74#774616019e9562b12cbe1c3f1cdd110793f8084c" +version = "7.0.1" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v78#64e018f80e65d79505591aacec4f35ec46bca5ff" dependencies = [ "auto_impl", "once_cell", @@ -6690,17 +6727,17 @@ dependencies = [ [[package]] name = "proptest" -version = "1.6.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14cae93065090804185d3b75f0bf93b8eeda30c7a9b4a33d3bdb3988d6229e50" +checksum = "6fcdab19deb5195a31cf7726a210015ff1496ba1464fd42cb4f537b8b01b471f" dependencies = [ "bit-set", "bit-vec", "bitflags 2.9.1", "lazy_static", "num-traits", - "rand 0.8.5", - "rand_chacha 0.3.1", + "rand 0.9.1", + "rand_chacha 0.9.0", "rand_xorshift", "regex-syntax 0.8.5", "rusty-fork", @@ -6974,11 +7011,11 @@ dependencies = [ [[package]] name = "rand_xorshift" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d25bf25ec5ae4a3f1b92f929810509a2f53d7dca2f50b794ff57e3face536c8f" +checksum = "513962919efc330f829edb2535844d1b912b0fbe2ca165d613e4e8788bb05a5a" dependencies = [ - "rand_core 0.6.4", + "rand_core 0.9.3", ] [[package]] @@ -7192,7 +7229,7 @@ checksum = "95325155c684b1c89f7765e30bc1c42e4a6da51ca513615660cb8a62ef9a88e3" [[package]] name = "reth" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-rpc-types", "aquamarine", @@ -7226,9 +7263,9 @@ dependencies = [ "reth-rpc", "reth-rpc-api", "reth-rpc-builder", + "reth-rpc-convert", "reth-rpc-eth-types", "reth-rpc-server-types", - "reth-rpc-types-compat", "reth-tasks", "reth-tokio-util", "reth-transaction-pool", @@ -7238,9 +7275,38 @@ dependencies = [ "tracing", ] +[[package]] +name = "reth-alloy-provider" +version = "1.5.0" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-network", + "alloy-primitives", + "alloy-provider", + "alloy-rpc-types", + "alloy-rpc-types-engine", + "reth-chainspec", + "reth-db-api", + "reth-errors", + "reth-execution-types", + "reth-node-types", + "reth-primitives", + "reth-provider", + "reth-prune-types", + "reth-rpc-convert", + "reth-stages-types", + "reth-storage-api", + "reth-trie", + "revm", + "revm-primitives", + "tokio", + "tracing", +] + [[package]] name = "reth-basic-payload-builder" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7263,7 +7329,7 @@ dependencies = [ [[package]] name = "reth-bench" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-eips", "alloy-json-rpc", @@ -7301,7 +7367,7 @@ dependencies = [ [[package]] name = "reth-chain-state" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7332,7 +7398,7 @@ dependencies = [ [[package]] name = "reth-chainspec" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-chains", "alloy-consensus", @@ -7352,7 +7418,7 @@ dependencies = [ [[package]] name = "reth-cli" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-genesis", "clap", @@ -7365,7 +7431,7 @@ dependencies = [ [[package]] name = "reth-cli-commands" -version = "1.4.8" +version = "1.5.0" dependencies = [ "ahash", "alloy-chains", @@ -7431,7 +7497,7 @@ dependencies = [ "reth-trie", "reth-trie-common", "reth-trie-db", - "secp256k1", + "secp256k1 0.30.0", "serde", "serde_json", "tar", @@ -7443,7 +7509,7 @@ dependencies = [ [[package]] name = "reth-cli-runner" -version = "1.4.8" +version = "1.5.0" dependencies = [ "reth-tasks", "tokio", @@ -7452,7 +7518,7 @@ dependencies = [ [[package]] name = "reth-cli-util" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7462,7 +7528,7 @@ dependencies = [ "rand 0.8.5", "rand 0.9.1", "reth-fs-util", - "secp256k1", + "secp256k1 0.30.0", "serde", "snmalloc-rs", "thiserror 2.0.12", @@ -7472,7 +7538,7 @@ dependencies = [ [[package]] name = "reth-codecs" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7496,7 +7562,7 @@ dependencies = [ [[package]] name = "reth-codecs-derive" -version = "1.4.8" +version = "1.5.0" dependencies = [ "convert_case", "proc-macro2", @@ -7507,7 +7573,7 @@ dependencies = [ [[package]] name = "reth-config" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-primitives", "eyre", @@ -7524,7 +7590,7 @@ dependencies = [ [[package]] name = "reth-consensus" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -7536,7 +7602,7 @@ dependencies = [ [[package]] name = "reth-consensus-common" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7550,7 +7616,7 @@ dependencies = [ [[package]] name = "reth-consensus-debug-client" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7573,7 +7639,7 @@ dependencies = [ [[package]] name = "reth-db" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -7606,7 +7672,7 @@ dependencies = [ [[package]] name = "reth-db-api" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -7637,7 +7703,7 @@ dependencies = [ [[package]] name = "reth-db-common" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -7666,7 +7732,7 @@ dependencies = [ [[package]] name = "reth-db-models" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7683,7 +7749,7 @@ dependencies = [ [[package]] name = "reth-discv4" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -7700,7 +7766,7 @@ dependencies = [ "reth-network-peers", "reth-tracing", "schnellru", - "secp256k1", + "secp256k1 0.30.0", "serde", "thiserror 2.0.12", "tokio", @@ -7710,7 +7776,7 @@ dependencies = [ [[package]] name = "reth-discv5" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -7727,7 +7793,7 @@ dependencies = [ "reth-metrics", "reth-network-peers", "reth-tracing", - "secp256k1", + "secp256k1 0.30.0", "thiserror 2.0.12", "tokio", "tracing", @@ -7735,7 +7801,7 @@ dependencies = [ [[package]] name = "reth-dns-discovery" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-chains", "alloy-primitives", @@ -7752,7 +7818,7 @@ dependencies = [ "reth-tokio-util", "reth-tracing", "schnellru", - "secp256k1", + "secp256k1 0.30.0", "serde", "serde_with", "thiserror 2.0.12", @@ -7763,7 +7829,7 @@ dependencies = [ [[package]] name = "reth-downloaders" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7802,7 +7868,7 @@ dependencies = [ [[package]] name = "reth-e2e-test-utils" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7849,7 +7915,7 @@ dependencies = [ [[package]] name = "reth-ecies" -version = "1.4.8" +version = "1.5.0" dependencies = [ "aes", "alloy-primitives", @@ -7866,7 +7932,7 @@ dependencies = [ "pin-project", "rand 0.8.5", "reth-network-peers", - "secp256k1", + "secp256k1 0.30.0", "sha2 0.10.9", "sha3", "thiserror 2.0.12", @@ -7879,7 +7945,7 @@ dependencies = [ [[package]] name = "reth-engine-local" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -7903,7 +7969,7 @@ dependencies = [ [[package]] name = "reth-engine-primitives" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7927,7 +7993,7 @@ dependencies = [ [[package]] name = "reth-engine-service" -version = "1.4.8" +version = "1.5.0" dependencies = [ "futures", "pin-project", @@ -7957,7 +8023,7 @@ dependencies = [ [[package]] name = "reth-engine-tree" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8003,7 +8069,7 @@ dependencies = [ "reth-prune", "reth-prune-types", "reth-revm", - "reth-rpc-types-compat", + "reth-rpc-convert", "reth-stages", "reth-stages-api", "reth-static-file", @@ -8026,7 +8092,7 @@ dependencies = [ [[package]] name = "reth-engine-util" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-rpc-types-engine", @@ -8052,7 +8118,7 @@ dependencies = [ [[package]] name = "reth-era" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8074,7 +8140,7 @@ dependencies = [ [[package]] name = "reth-era-downloader" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-primitives", "bytes", @@ -8091,9 +8157,11 @@ dependencies = [ [[package]] name = "reth-era-utils" -version = "1.4.8" +version = "1.5.0" dependencies = [ + "alloy-consensus", "alloy-primitives", + "alloy-rlp", "bytes", "eyre", "futures", @@ -8103,19 +8171,22 @@ dependencies = [ "reth-db-common", "reth-era", "reth-era-downloader", + "reth-ethereum-primitives", "reth-etl", "reth-fs-util", "reth-primitives-traits", "reth-provider", + "reth-stages-types", "reth-storage-api", "tempfile", "tokio", + "tokio-util", "tracing", ] [[package]] name = "reth-errors" -version = "1.4.8" +version = "1.5.0" dependencies = [ "reth-consensus", "reth-execution-errors", @@ -8125,7 +8196,7 @@ dependencies = [ [[package]] name = "reth-eth-wire" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-chains", "alloy-consensus", @@ -8150,7 +8221,7 @@ dependencies = [ "reth-network-peers", "reth-primitives-traits", "reth-tracing", - "secp256k1", + "secp256k1 0.30.0", "serde", "snap", "test-fuzz", @@ -8163,7 +8234,7 @@ dependencies = [ [[package]] name = "reth-eth-wire-types" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-chains", "alloy-consensus", @@ -8188,12 +8259,13 @@ dependencies = [ [[package]] name = "reth-ethereum" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-rpc-types-engine", "alloy-rpc-types-eth", "reth-chainspec", "reth-cli-util", + "reth-codecs", "reth-consensus", "reth-consensus-common", "reth-db", @@ -8221,11 +8293,12 @@ dependencies = [ "reth-tasks", "reth-transaction-pool", "reth-trie", + "reth-trie-db", ] [[package]] name = "reth-ethereum-cli" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8284,7 +8357,7 @@ dependencies = [ [[package]] name = "reth-ethereum-consensus" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8300,7 +8373,7 @@ dependencies = [ [[package]] name = "reth-ethereum-engine-primitives" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -8318,7 +8391,7 @@ dependencies = [ [[package]] name = "reth-ethereum-forks" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-eip2124", "alloy-hardforks", @@ -8331,7 +8404,7 @@ dependencies = [ [[package]] name = "reth-ethereum-payload-builder" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8357,7 +8430,7 @@ dependencies = [ [[package]] name = "reth-ethereum-primitives" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8374,7 +8447,7 @@ dependencies = [ "reth-codecs", "reth-primitives-traits", "reth-zstd-compressors", - "secp256k1", + "secp256k1 0.30.0", "serde", "serde_with", "test-fuzz", @@ -8382,7 +8455,7 @@ dependencies = [ [[package]] name = "reth-etl" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-primitives", "rayon", @@ -8392,7 +8465,7 @@ dependencies = [ [[package]] name = "reth-evm" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8418,7 +8491,7 @@ dependencies = [ [[package]] name = "reth-evm-ethereum" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8435,12 +8508,12 @@ dependencies = [ "reth-primitives-traits", "reth-testing-utils", "revm", - "secp256k1", + "secp256k1 0.30.0", ] [[package]] name = "reth-execution-errors" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-evm", "alloy-primitives", @@ -8452,7 +8525,7 @@ dependencies = [ [[package]] name = "reth-execution-types" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8472,7 +8545,7 @@ dependencies = [ [[package]] name = "reth-exex" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8506,7 +8579,7 @@ dependencies = [ "reth-testing-utils", "reth-tracing", "rmp-serde", - "secp256k1", + "secp256k1 0.30.0", "tempfile", "thiserror 2.0.12", "tokio", @@ -8516,7 +8589,7 @@ dependencies = [ [[package]] name = "reth-exex-test-utils" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-eips", "eyre", @@ -8548,7 +8621,7 @@ dependencies = [ [[package]] name = "reth-exex-types" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -8565,7 +8638,7 @@ dependencies = [ [[package]] name = "reth-fs-util" -version = "1.4.8" +version = "1.5.0" dependencies = [ "serde", "serde_json", @@ -8574,7 +8647,7 @@ dependencies = [ [[package]] name = "reth-invalid-block-hooks" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -8601,7 +8674,7 @@ dependencies = [ [[package]] name = "reth-ipc" -version = "1.4.8" +version = "1.5.0" dependencies = [ "bytes", "futures", @@ -8623,7 +8696,7 @@ dependencies = [ [[package]] name = "reth-libmdbx" -version = "1.4.8" +version = "1.5.0" dependencies = [ "bitflags 2.9.1", "byteorder", @@ -8642,7 +8715,7 @@ dependencies = [ [[package]] name = "reth-mdbx-sys" -version = "1.4.8" +version = "1.5.0" dependencies = [ "bindgen", "cc", @@ -8650,7 +8723,7 @@ dependencies = [ [[package]] name = "reth-metrics" -version = "1.4.8" +version = "1.5.0" dependencies = [ "futures", "metrics", @@ -8661,14 +8734,14 @@ dependencies = [ [[package]] name = "reth-net-banlist" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-primitives", ] [[package]] name = "reth-net-nat" -version = "1.4.8" +version = "1.5.0" dependencies = [ "futures-util", "if-addrs", @@ -8682,7 +8755,7 @@ dependencies = [ [[package]] name = "reth-network" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8729,7 +8802,7 @@ dependencies = [ "reth-transaction-pool", "rustc-hash 2.1.1", "schnellru", - "secp256k1", + "secp256k1 0.30.0", "serde", "smallvec", "tempfile", @@ -8743,7 +8816,7 @@ dependencies = [ [[package]] name = "reth-network-api" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-primitives", "alloy-rpc-types-admin", @@ -8767,7 +8840,7 @@ dependencies = [ [[package]] name = "reth-network-p2p" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8789,14 +8862,14 @@ dependencies = [ [[package]] name = "reth-network-peers" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-primitives", "alloy-rlp", "enr", "rand 0.8.5", "rand 0.9.1", - "secp256k1", + "secp256k1 0.30.0", "serde_json", "serde_with", "thiserror 2.0.12", @@ -8806,7 +8879,7 @@ dependencies = [ [[package]] name = "reth-network-types" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-eip2124", "humantime-serde", @@ -8819,7 +8892,7 @@ dependencies = [ [[package]] name = "reth-nippy-jar" -version = "1.4.8" +version = "1.5.0" dependencies = [ "anyhow", "bincode 1.3.3", @@ -8837,7 +8910,7 @@ dependencies = [ [[package]] name = "reth-node-api" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-rpc-types-engine", "eyre", @@ -8860,7 +8933,7 @@ dependencies = [ [[package]] name = "reth-node-builder" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8915,7 +8988,7 @@ dependencies = [ "reth-tokio-util", "reth-tracing", "reth-transaction-pool", - "secp256k1", + "secp256k1 0.30.0", "serde_json", "tempfile", "tokio", @@ -8925,7 +8998,7 @@ dependencies = [ [[package]] name = "reth-node-core" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8954,15 +9027,15 @@ dependencies = [ "reth-network-peers", "reth-primitives-traits", "reth-prune-types", + "reth-rpc-convert", "reth-rpc-eth-types", "reth-rpc-server-types", - "reth-rpc-types-compat", "reth-stages-types", "reth-storage-api", "reth-storage-errors", "reth-tracing", "reth-transaction-pool", - "secp256k1", + "secp256k1 0.30.0", "serde", "shellexpand", "strum 0.27.1", @@ -8977,7 +9050,7 @@ dependencies = [ [[package]] name = "reth-node-ethereum" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-contract", @@ -9030,7 +9103,7 @@ dependencies = [ [[package]] name = "reth-node-events" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9053,7 +9126,7 @@ dependencies = [ [[package]] name = "reth-node-metrics" -version = "1.4.8" +version = "1.5.0" dependencies = [ "eyre", "http", @@ -9075,7 +9148,7 @@ dependencies = [ [[package]] name = "reth-node-types" -version = "1.4.8" +version = "1.5.0" dependencies = [ "reth-chainspec", "reth-db-api", @@ -9087,10 +9160,11 @@ dependencies = [ [[package]] name = "reth-op" -version = "1.4.8" +version = "1.5.0" dependencies = [ "reth-chainspec", "reth-cli-util", + "reth-codecs", "reth-consensus", "reth-consensus-common", "reth-db", @@ -9120,11 +9194,12 @@ dependencies = [ "reth-tasks", "reth-transaction-pool", "reth-trie", + "reth-trie-db", ] [[package]] name = "reth-optimism-chainspec" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-chains", "alloy-consensus", @@ -9150,7 +9225,7 @@ dependencies = [ [[package]] name = "reth-optimism-cli" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9198,7 +9273,7 @@ dependencies = [ [[package]] name = "reth-optimism-consensus" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-chains", "alloy-consensus", @@ -9230,7 +9305,7 @@ dependencies = [ [[package]] name = "reth-optimism-evm" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9256,7 +9331,7 @@ dependencies = [ [[package]] name = "reth-optimism-forks" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-op-hardforks", "alloy-primitives", @@ -9266,7 +9341,7 @@ dependencies = [ [[package]] name = "reth-optimism-node" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9326,7 +9401,7 @@ dependencies = [ [[package]] name = "reth-optimism-payload-builder" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9364,7 +9439,7 @@ dependencies = [ [[package]] name = "reth-optimism-primitives" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9383,7 +9458,7 @@ dependencies = [ "reth-primitives-traits", "reth-zstd-compressors", "rstest", - "secp256k1", + "secp256k1 0.30.0", "serde", "serde_json", "serde_with", @@ -9391,7 +9466,7 @@ dependencies = [ [[package]] name = "reth-optimism-rpc" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9442,14 +9517,16 @@ dependencies = [ "reth-tasks", "reth-transaction-pool", "revm", + "serde_json", "thiserror 2.0.12", "tokio", + "tower", "tracing", ] [[package]] name = "reth-optimism-storage" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9467,7 +9544,7 @@ dependencies = [ [[package]] name = "reth-optimism-txpool" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9504,7 +9581,7 @@ dependencies = [ [[package]] name = "reth-payload-builder" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9524,7 +9601,7 @@ dependencies = [ [[package]] name = "reth-payload-builder-primitives" -version = "1.4.8" +version = "1.5.0" dependencies = [ "pin-project", "reth-payload-primitives", @@ -9535,7 +9612,7 @@ dependencies = [ [[package]] name = "reth-payload-primitives" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -9555,7 +9632,7 @@ dependencies = [ [[package]] name = "reth-payload-util" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9564,7 +9641,7 @@ dependencies = [ [[package]] name = "reth-payload-validator" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-rpc-types-engine", @@ -9573,7 +9650,7 @@ dependencies = [ [[package]] name = "reth-primitives" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9595,13 +9672,14 @@ dependencies = [ [[package]] name = "reth-primitives-traits" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-eips", "alloy-genesis", "alloy-primitives", "alloy-rlp", + "alloy-rpc-types-eth", "alloy-trie", "arbitrary", "auto_impl", @@ -9623,7 +9701,7 @@ dependencies = [ "revm-primitives", "revm-state", "scroll-alloy-consensus", - "secp256k1", + "secp256k1 0.30.0", "serde", "serde_json", "serde_with", @@ -9633,7 +9711,7 @@ dependencies = [ [[package]] name = "reth-provider" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9682,7 +9760,7 @@ dependencies = [ [[package]] name = "reth-prune" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9714,7 +9792,7 @@ dependencies = [ [[package]] name = "reth-prune-types" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-primitives", "arbitrary", @@ -9733,7 +9811,7 @@ dependencies = [ [[package]] name = "reth-ress-protocol" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9759,7 +9837,7 @@ dependencies = [ [[package]] name = "reth-ress-provider" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9785,7 +9863,7 @@ dependencies = [ [[package]] name = "reth-revm" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9799,7 +9877,7 @@ dependencies = [ [[package]] name = "reth-rpc" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -9851,11 +9929,11 @@ dependencies = [ "reth-provider", "reth-revm", "reth-rpc-api", + "reth-rpc-convert", "reth-rpc-engine-api", "reth-rpc-eth-api", "reth-rpc-eth-types", "reth-rpc-server-types", - "reth-rpc-types-compat", "reth-storage-api", "reth-tasks", "reth-testing-utils", @@ -9877,7 +9955,7 @@ dependencies = [ [[package]] name = "reth-rpc-api" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-eips", "alloy-genesis", @@ -9904,7 +9982,7 @@ dependencies = [ [[package]] name = "reth-rpc-api-testing-util" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -9923,7 +10001,7 @@ dependencies = [ [[package]] name = "reth-rpc-builder" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-eips", "alloy-network", @@ -9956,12 +10034,12 @@ dependencies = [ "reth-provider", "reth-rpc", "reth-rpc-api", + "reth-rpc-convert", "reth-rpc-engine-api", "reth-rpc-eth-api", "reth-rpc-eth-types", "reth-rpc-layer", "reth-rpc-server-types", - "reth-rpc-types-compat", "reth-storage-api", "reth-tasks", "reth-tracing", @@ -9976,9 +10054,35 @@ dependencies = [ "tracing", ] +[[package]] +name = "reth-rpc-convert" +version = "1.5.0" +dependencies = [ + "alloy-consensus", + "alloy-json-rpc", + "alloy-network", + "alloy-primitives", + "alloy-rpc-types-eth", + "jsonrpsee-types", + "op-alloy-consensus", + "op-alloy-rpc-types", + "op-revm", + "reth-evm", + "reth-optimism-primitives", + "reth-primitives-traits", + "reth-scroll-primitives", + "reth-storage-api", + "revm-context", + "revm-scroll", + "scroll-alloy-consensus", + "scroll-alloy-evm", + "scroll-alloy-rpc-types", + "thiserror 2.0.12", +] + [[package]] name = "reth-rpc-engine-api" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10014,11 +10118,12 @@ dependencies = [ [[package]] name = "reth-rpc-eth-api" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-dyn-abi", "alloy-eips", + "alloy-evm", "alloy-json-rpc", "alloy-network", "alloy-primitives", @@ -10033,6 +10138,7 @@ dependencies = [ "jsonrpsee", "jsonrpsee-types", "parking_lot", + "reth-chain-state", "reth-chainspec", "reth-errors", "reth-evm", @@ -10041,9 +10147,9 @@ dependencies = [ "reth-payload-builder", "reth-primitives-traits", "reth-revm", + "reth-rpc-convert", "reth-rpc-eth-types", "reth-rpc-server-types", - "reth-rpc-types-compat", "reth-storage-api", "reth-tasks", "reth-transaction-pool", @@ -10056,10 +10162,11 @@ dependencies = [ [[package]] name = "reth-rpc-eth-types" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-eips", + "alloy-evm", "alloy-primitives", "alloy-rpc-types-eth", "alloy-sol-types", @@ -10079,8 +10186,8 @@ dependencies = [ "reth-metrics", "reth-primitives-traits", "reth-revm", + "reth-rpc-convert", "reth-rpc-server-types", - "reth-rpc-types-compat", "reth-storage-api", "reth-tasks", "reth-transaction-pool", @@ -10098,7 +10205,7 @@ dependencies = [ [[package]] name = "reth-rpc-layer" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-rpc-types-engine", "http", @@ -10115,7 +10222,7 @@ dependencies = [ [[package]] name = "reth-rpc-server-types" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10128,30 +10235,9 @@ dependencies = [ "strum 0.27.1", ] -[[package]] -name = "reth-rpc-types-compat" -version = "1.4.8" -dependencies = [ - "alloy-consensus", - "alloy-network", - "alloy-primitives", - "alloy-rpc-types-eth", - "jsonrpsee-types", - "op-alloy-consensus", - "op-alloy-rpc-types", - "reth-optimism-primitives", - "reth-primitives-traits", - "reth-scroll-primitives", - "reth-storage-api", - "scroll-alloy-consensus", - "scroll-alloy-rpc-types", - "serde", - "thiserror 2.0.12", -] - [[package]] name = "reth-scroll-chainspec" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-chains", "alloy-consensus", @@ -10174,7 +10260,7 @@ dependencies = [ [[package]] name = "reth-scroll-cli" -version = "1.4.8" +version = "1.5.0" dependencies = [ "clap", "eyre", @@ -10189,7 +10275,6 @@ dependencies = [ "reth-node-metrics", "reth-scroll-chainspec", "reth-scroll-evm", - "reth-scroll-node", "reth-scroll-primitives", "reth-tracing", "scroll-alloy-consensus", @@ -10198,7 +10283,7 @@ dependencies = [ [[package]] name = "reth-scroll-consensus" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -10216,7 +10301,7 @@ dependencies = [ [[package]] name = "reth-scroll-engine-primitives" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10243,7 +10328,7 @@ dependencies = [ [[package]] name = "reth-scroll-evm" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10271,7 +10356,7 @@ dependencies = [ [[package]] name = "reth-scroll-forks" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-chains", "alloy-primitives", @@ -10284,7 +10369,7 @@ dependencies = [ [[package]] name = "reth-scroll-node" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -10335,7 +10420,7 @@ dependencies = [ [[package]] name = "reth-scroll-payload" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -10365,7 +10450,7 @@ dependencies = [ [[package]] name = "reth-scroll-primitives" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10386,7 +10471,7 @@ dependencies = [ [[package]] name = "reth-scroll-rpc" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -10403,6 +10488,7 @@ dependencies = [ "reth-primitives-traits", "reth-provider", "reth-rpc", + "reth-rpc-convert", "reth-rpc-eth-api", "reth-rpc-eth-types", "reth-scroll-chainspec", @@ -10422,7 +10508,7 @@ dependencies = [ [[package]] name = "reth-scroll-txpool" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10451,7 +10537,7 @@ dependencies = [ [[package]] name = "reth-stages" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10512,7 +10598,7 @@ dependencies = [ [[package]] name = "reth-stages-api" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10541,7 +10627,7 @@ dependencies = [ [[package]] name = "reth-stages-types" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-primitives", "arbitrary", @@ -10558,7 +10644,7 @@ dependencies = [ [[package]] name = "reth-stateless" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -10583,7 +10669,7 @@ dependencies = [ [[package]] name = "reth-static-file" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-primitives", "assert_matches", @@ -10607,7 +10693,7 @@ dependencies = [ [[package]] name = "reth-static-file-types" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-primitives", "clap", @@ -10619,7 +10705,7 @@ dependencies = [ [[package]] name = "reth-storage-api" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10642,7 +10728,7 @@ dependencies = [ [[package]] name = "reth-storage-errors" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10657,7 +10743,7 @@ dependencies = [ [[package]] name = "reth-tasks" -version = "1.4.8" +version = "1.5.0" dependencies = [ "auto_impl", "dyn-clone", @@ -10674,7 +10760,7 @@ dependencies = [ [[package]] name = "reth-testing-utils" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10684,12 +10770,12 @@ dependencies = [ "rand 0.9.1", "reth-ethereum-primitives", "reth-primitives-traits", - "secp256k1", + "secp256k1 0.30.0", ] [[package]] name = "reth-tokio-util" -version = "1.4.8" +version = "1.5.0" dependencies = [ "tokio", "tokio-stream", @@ -10698,7 +10784,7 @@ dependencies = [ [[package]] name = "reth-tracing" -version = "1.4.8" +version = "1.5.0" dependencies = [ "clap", "eyre", @@ -10712,7 +10798,7 @@ dependencies = [ [[package]] name = "reth-tracing-otlp" -version = "1.4.8" +version = "1.5.0" dependencies = [ "opentelemetry", "opentelemetry-otlp", @@ -10725,7 +10811,7 @@ dependencies = [ [[package]] name = "reth-transaction-pool" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10771,7 +10857,7 @@ dependencies = [ [[package]] name = "reth-trie" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10803,7 +10889,7 @@ dependencies = [ [[package]] name = "reth-trie-common" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -10835,7 +10921,7 @@ dependencies = [ [[package]] name = "reth-trie-db" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -10861,7 +10947,7 @@ dependencies = [ [[package]] name = "reth-trie-parallel" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -10890,7 +10976,7 @@ dependencies = [ [[package]] name = "reth-trie-sparse" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -10920,17 +11006,40 @@ dependencies = [ "tracing", ] +[[package]] +name = "reth-trie-sparse-parallel" +version = "1.5.0" +dependencies = [ + "alloy-primitives", + "alloy-rlp", + "alloy-trie", + "arbitrary", + "assert_matches", + "itertools 0.14.0", + "proptest", + "proptest-arbitrary-interop", + "rand 0.8.5", + "rand 0.9.1", + "reth-execution-errors", + "reth-primitives-traits", + "reth-trie", + "reth-trie-common", + "reth-trie-sparse", + "smallvec", + "tracing", +] + [[package]] name = "reth-zstd-compressors" -version = "1.4.8" +version = "1.5.0" dependencies = [ "zstd", ] [[package]] name = "revm" -version = "24.0.0" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v74#774616019e9562b12cbe1c3f1cdd110793f8084c" +version = "26.0.1" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v78#64e018f80e65d79505591aacec4f35ec46bca5ff" dependencies = [ "revm-bytecode", "revm-context", @@ -10947,8 +11056,8 @@ dependencies = [ [[package]] name = "revm-bytecode" -version = "4.0.1" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v74#774616019e9562b12cbe1c3f1cdd110793f8084c" +version = "5.0.0" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v78#64e018f80e65d79505591aacec4f35ec46bca5ff" dependencies = [ "bitvec", "once_cell", @@ -10959,8 +11068,8 @@ dependencies = [ [[package]] name = "revm-context" -version = "5.0.0" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v74#774616019e9562b12cbe1c3f1cdd110793f8084c" +version = "7.0.1" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v78#64e018f80e65d79505591aacec4f35ec46bca5ff" dependencies = [ "cfg-if", "derive-where", @@ -10974,8 +11083,8 @@ dependencies = [ [[package]] name = "revm-context-interface" -version = "5.0.0" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v74#774616019e9562b12cbe1c3f1cdd110793f8084c" +version = "7.0.1" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v78#64e018f80e65d79505591aacec4f35ec46bca5ff" dependencies = [ "alloy-eip2930", "alloy-eip7702", @@ -10989,8 +11098,8 @@ dependencies = [ [[package]] name = "revm-database" -version = "4.0.1" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v74#774616019e9562b12cbe1c3f1cdd110793f8084c" +version = "6.0.0" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v78#64e018f80e65d79505591aacec4f35ec46bca5ff" dependencies = [ "alloy-eips", "revm-bytecode", @@ -11002,8 +11111,8 @@ dependencies = [ [[package]] name = "revm-database-interface" -version = "4.0.1" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v74#774616019e9562b12cbe1c3f1cdd110793f8084c" +version = "6.0.0" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v78#64e018f80e65d79505591aacec4f35ec46bca5ff" dependencies = [ "auto_impl", "revm-primitives", @@ -11013,10 +11122,11 @@ dependencies = [ [[package]] name = "revm-handler" -version = "5.0.0" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v74#774616019e9562b12cbe1c3f1cdd110793f8084c" +version = "7.0.1" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v78#64e018f80e65d79505591aacec4f35ec46bca5ff" dependencies = [ "auto_impl", + "derive-where", "revm-bytecode", "revm-context", "revm-context-interface", @@ -11030,10 +11140,11 @@ dependencies = [ [[package]] name = "revm-inspector" -version = "5.0.0" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v74#774616019e9562b12cbe1c3f1cdd110793f8084c" +version = "7.0.1" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v78#64e018f80e65d79505591aacec4f35ec46bca5ff" dependencies = [ "auto_impl", + "either", "revm-context", "revm-database-interface", "revm-handler", @@ -11046,9 +11157,9 @@ dependencies = [ [[package]] name = "revm-inspectors" -version = "0.23.0" +version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b50ef375dbacefecfdacf8f02afc31df98acc5d8859a6f2b24d121ff2a740a8" +checksum = "2aabdffc06bdb434d9163e2d63b6fae843559afd300ea3fbeb113b8a0d8ec728" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -11066,8 +11177,8 @@ dependencies = [ [[package]] name = "revm-interpreter" -version = "20.0.0" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v74#774616019e9562b12cbe1c3f1cdd110793f8084c" +version = "22.0.1" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v78#64e018f80e65d79505591aacec4f35ec46bca5ff" dependencies = [ "revm-bytecode", "revm-context-interface", @@ -11077,8 +11188,8 @@ dependencies = [ [[package]] name = "revm-precompile" -version = "21.0.0" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v74#774616019e9562b12cbe1c3f1cdd110793f8084c" +version = "23.0.0" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v78#64e018f80e65d79505591aacec4f35ec46bca5ff" dependencies = [ "ark-bls12-381", "ark-bn254", @@ -11095,14 +11206,15 @@ dependencies = [ "p256", "revm-primitives", "ripemd", - "secp256k1", + "rug", + "secp256k1 0.31.1", "sha2 0.10.9", ] [[package]] name = "revm-primitives" -version = "19.1.0" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v74#774616019e9562b12cbe1c3f1cdd110793f8084c" +version = "20.0.0" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v78#64e018f80e65d79505591aacec4f35ec46bca5ff" dependencies = [ "alloy-primitives", "num_enum", @@ -11112,7 +11224,7 @@ dependencies = [ [[package]] name = "revm-scroll" version = "0.1.0" -source = "git+https://github.com/scroll-tech/scroll-revm?rev=6c1942f9a8eaf7aae1807654c4ee99d771150fbd#6c1942f9a8eaf7aae1807654c4ee99d771150fbd" +source = "git+https://github.com/scroll-tech/scroll-revm?branch=feat%2Fv78#c0609bc9e8cb23aba8f560a82e040a49726cf760" dependencies = [ "auto_impl", "enumn", @@ -11125,8 +11237,8 @@ dependencies = [ [[package]] name = "revm-state" -version = "4.0.1" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v74#774616019e9562b12cbe1c3f1cdd110793f8084c" +version = "6.0.0" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v78#64e018f80e65d79505591aacec4f35ec46bca5ff" dependencies = [ "bitflags 2.9.1", "revm-bytecode", @@ -11269,6 +11381,18 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "rug" +version = "1.27.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4207e8d668e5b8eb574bda8322088ccd0d7782d3d03c7e8d562e82ed82bdcbc3" +dependencies = [ + "az", + "gmp-mpfr-sys", + "libc", + "libm", +] + [[package]] name = "ruint" version = "1.15.0" @@ -11531,7 +11655,7 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "scroll-alloy-consensus" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11555,7 +11679,7 @@ dependencies = [ [[package]] name = "scroll-alloy-evm" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11577,7 +11701,7 @@ dependencies = [ [[package]] name = "scroll-alloy-hardforks" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-hardforks", "auto_impl", @@ -11586,7 +11710,7 @@ dependencies = [ [[package]] name = "scroll-alloy-network" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-network", @@ -11600,7 +11724,7 @@ dependencies = [ [[package]] name = "scroll-alloy-provider" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-primitives", "alloy-provider", @@ -11640,7 +11764,7 @@ dependencies = [ [[package]] name = "scroll-alloy-rpc-types" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11658,7 +11782,7 @@ dependencies = [ [[package]] name = "scroll-alloy-rpc-types-engine" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-primitives", "alloy-rpc-types-engine", @@ -11669,7 +11793,7 @@ dependencies = [ [[package]] name = "scroll-reth" -version = "1.4.8" +version = "1.5.0" dependencies = [ "clap", "reth-cli-util", @@ -11701,10 +11825,21 @@ checksum = "b50c5943d326858130af85e049f2661ba3c78b26589b8ab98e65e80ae44a1252" dependencies = [ "bitcoin_hashes", "rand 0.8.5", - "secp256k1-sys", + "secp256k1-sys 0.10.1", "serde", ] +[[package]] +name = "secp256k1" +version = "0.31.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c3c81b43dc2d8877c216a3fccf76677ee1ebccd429566d3e67447290d0c42b2" +dependencies = [ + "bitcoin_hashes", + "rand 0.9.1", + "secp256k1-sys 0.11.0", +] + [[package]] name = "secp256k1-sys" version = "0.10.1" @@ -11714,6 +11849,15 @@ dependencies = [ "cc", ] +[[package]] +name = "secp256k1-sys" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dcb913707158fadaf0d8702c2db0e857de66eb003ccfdda5924b5f5ac98efb38" +dependencies = [ + "cc", +] + [[package]] name = "security-framework" version = "2.11.1" @@ -12260,9 +12404,9 @@ dependencies = [ [[package]] name = "syn-solidity" -version = "1.1.2" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b5d879005cc1b5ba4e18665be9e9501d9da3a9b95f625497c4cb7ee082b532e" +checksum = "b9ac494e7266fcdd2ad80bf4375d55d27a117ea5c866c26d0e97fe5b3caeeb75" dependencies = [ "paste", "proc-macro2", @@ -13005,7 +13149,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "69fff37da548239c3bf9e64a12193d261e8b22b660991c6fd2df057c168f435f" dependencies = [ "cc", - "windows-targets 0.48.5", + "windows-targets 0.52.6", ] [[package]] @@ -13551,7 +13695,7 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" dependencies = [ - "windows-sys 0.48.0", + "windows-sys 0.59.0", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index e6c452b2ce2..72714f1fc2b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,5 +1,5 @@ [workspace.package] -version = "1.4.8" +version = "1.5.0" edition = "2021" rust-version = "1.86" license = "MIT OR Apache-2.0" @@ -11,6 +11,7 @@ exclude = [".github/"] members = [ "bin/reth-bench/", "bin/reth/", + "crates/alloy-provider/", "crates/chain-state/", "crates/chainspec/", "crates/cli/cli/", @@ -104,7 +105,7 @@ members = [ "crates/rpc/rpc-layer", "crates/rpc/rpc-server-types/", "crates/rpc/rpc-testing-util/", - "crates/rpc/rpc-types-compat/", + "crates/rpc/rpc-convert/", "crates/rpc/rpc/", "crates/scroll/alloy/consensus", "crates/scroll/alloy/evm", @@ -152,6 +153,7 @@ members = [ "crates/trie/db", "crates/trie/parallel/", "crates/trie/sparse", + "crates/trie/sparse-parallel/", "crates/trie/trie", "examples/beacon-api-sidecar-fetcher/", "examples/beacon-api-sse/", @@ -338,6 +340,7 @@ codegen-units = 1 # reth op-reth = { path = "crates/optimism/bin" } reth = { path = "bin/reth" } +reth-alloy-provider = { path = "crates/alloy-provider" } reth-basic-payload-builder = { path = "crates/payload/basic" } reth-bench = { path = "bin/reth-bench" } reth-chain-state = { path = "crates/chain-state" } @@ -441,7 +444,7 @@ reth-rpc-eth-api = { path = "crates/rpc/rpc-eth-api" } reth-rpc-eth-types = { path = "crates/rpc/rpc-eth-types", default-features = false } reth-rpc-layer = { path = "crates/rpc/rpc-layer" } reth-rpc-server-types = { path = "crates/rpc/rpc-server-types" } -reth-rpc-types-compat = { path = "crates/rpc/rpc-types-compat" } +reth-rpc-convert = { path = "crates/rpc/rpc-convert" } reth-stages = { path = "crates/stages/stages" } reth-stages-api = { path = "crates/stages/api" } reth-stages-types = { path = "crates/stages/types", default-features = false } @@ -460,69 +463,70 @@ reth-trie-common = { path = "crates/trie/common", default-features = false } reth-trie-db = { path = "crates/trie/db" } reth-trie-parallel = { path = "crates/trie/parallel" } reth-trie-sparse = { path = "crates/trie/sparse", default-features = false } +reth-trie-sparse-parallel = { path = "crates/trie/sparse-parallel" } reth-zstd-compressors = { path = "crates/storage/zstd-compressors", default-features = false } reth-ress-protocol = { path = "crates/ress/protocol" } reth-ress-provider = { path = "crates/ress/provider" } # revm -revm = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v74", default-features = false, features = ["secp256r1", "enable_eip7702"] } -revm-bytecode = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v74", default-features = false } -revm-database = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v74", default-features = false } -revm-state = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v74", default-features = false } -revm-primitives = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v74", default-features = false } -revm-interpreter = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v74", default-features = false } -revm-inspector = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v74", default-features = false } -revm-context = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v74", default-features = false } -revm-context-interface = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v74", default-features = false } -revm-database-interface = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v74", default-features = false } -op-revm = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v74", default-features = false } -revm-scroll = { git = "https://github.com/scroll-tech/scroll-revm", rev = "6c1942f9a8eaf7aae1807654c4ee99d771150fbd", default-features = false } -revm-inspectors = "0.23.0" +revm = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78", default-features = false, features = ["enable_eip7702"] } +revm-bytecode = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78", default-features = false } +revm-database = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78", default-features = false } +revm-state = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78", default-features = false } +revm-primitives = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78", default-features = false } +revm-interpreter = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78", default-features = false } +revm-inspector = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78", default-features = false } +revm-context = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78", default-features = false } +revm-context-interface = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78", default-features = false } +revm-database-interface = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78", default-features = false } +op-revm = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78", default-features = false } +revm-scroll = { git = "https://github.com/scroll-tech/scroll-revm", branch = "feat/v78", default-features = false } +revm-inspectors = "0.25.0" # eth alloy-chains = { version = "0.2.0", default-features = false } -alloy-dyn-abi = "1.1.0" +alloy-dyn-abi = "1.2.0" alloy-eip2124 = { version = "0.2.0", default-features = false } -alloy-evm = { version = "0.10", default-features = false } -alloy-primitives = { version = "1.1.0", default-features = false, features = ["map-foldhash"] } +alloy-evm = { version = "0.12", default-features = false } +alloy-primitives = { version = "1.2.0", default-features = false, features = ["map-foldhash"] } alloy-rlp = { version = "0.3.10", default-features = false, features = ["core-net"] } -alloy-sol-macro = "1.1.0" -alloy-sol-types = { version = "1.1.0", default-features = false } -alloy-trie = { version = "0.8.1", default-features = false } - -alloy-hardforks = "0.2.2" - -alloy-consensus = { version = "1.0.9", default-features = false } -alloy-contract = { version = "1.0.9", default-features = false } -alloy-eips = { version = "1.0.9", default-features = false } -alloy-genesis = { version = "1.0.9, <1.0.13", default-features = false } # FIXME: when upstream upgrade this -alloy-json-rpc = { version = "1.0.9", default-features = false } -alloy-network = { version = "1.0.9, <=1.0.13", default-features = false } # FIXME: when upstream upgrade this -alloy-network-primitives = { version = "1.0.9", default-features = false } -alloy-provider = { version = "1.0.9", features = ["reqwest"], default-features = false } -alloy-pubsub = { version = "1.0.9", default-features = false } -alloy-rpc-client = { version = "1.0.9", default-features = false } -alloy-rpc-types = { version = "1.0.9", features = ["eth"], default-features = false } -alloy-rpc-types-admin = { version = "1.0.9", default-features = false } -alloy-rpc-types-anvil = { version = "1.0.9", default-features = false } -alloy-rpc-types-beacon = { version = "1.0.9", default-features = false } -alloy-rpc-types-debug = { version = "1.0.9", default-features = false } -alloy-rpc-types-engine = { version = "1.0.9", default-features = false } -alloy-rpc-types-eth = { version = "1.0.9", default-features = false } -alloy-rpc-types-mev = { version = "1.0.9", default-features = false } -alloy-rpc-types-trace = { version = "1.0.9", default-features = false } -alloy-rpc-types-txpool = { version = "1.0.9", default-features = false } -alloy-serde = { version = "1.0.9", default-features = false } -alloy-signer = { version = "1.0.9", default-features = false } -alloy-signer-local = { version = "1.0.9", default-features = false } -alloy-transport = { version = "1.0.9" } -alloy-transport-http = { version = "1.0.9", features = ["reqwest-rustls-tls"], default-features = false } -alloy-transport-ipc = { version = "1.0.9", default-features = false } -alloy-transport-ws = { version = "1.0.9", default-features = false } +alloy-sol-macro = "1.2.0" +alloy-sol-types = { version = "1.2.0", default-features = false } +alloy-trie = { version = "0.9.0", default-features = false } + +alloy-hardforks = "0.2.7" + +alloy-consensus = { version = "1.0.13", default-features = false } +alloy-contract = { version = "1.0.13", default-features = false } +alloy-eips = { version = "1.0.13", default-features = false } +alloy-genesis = { version = "1.0.13", default-features = false } +alloy-json-rpc = { version = "1.0.13", default-features = false } +alloy-network = { version = "1.0.13", default-features = false } +alloy-network-primitives = { version = "1.0.13", default-features = false } +alloy-provider = { version = "1.0.13", features = ["reqwest"], default-features = false } +alloy-pubsub = { version = "1.0.13", default-features = false } +alloy-rpc-client = { version = "1.0.13", default-features = false } +alloy-rpc-types = { version = "1.0.13", features = ["eth"], default-features = false } +alloy-rpc-types-admin = { version = "1.0.13", default-features = false } +alloy-rpc-types-anvil = { version = "1.0.13", default-features = false } +alloy-rpc-types-beacon = { version = "1.0.13", default-features = false } +alloy-rpc-types-debug = { version = "1.0.13", default-features = false } +alloy-rpc-types-engine = { version = "1.0.13", default-features = false } +alloy-rpc-types-eth = { version = "1.0.13", default-features = false } +alloy-rpc-types-mev = { version = "1.0.13", default-features = false } +alloy-rpc-types-trace = { version = "1.0.13", default-features = false } +alloy-rpc-types-txpool = { version = "1.0.13", default-features = false } +alloy-serde = { version = "1.0.13", default-features = false } +alloy-signer = { version = "1.0.13", default-features = false } +alloy-signer-local = { version = "1.0.13", default-features = false } +alloy-transport = { version = "1.0.13" } +alloy-transport-http = { version = "1.0.13", features = ["reqwest-rustls-tls"], default-features = false } +alloy-transport-ipc = { version = "1.0.13", default-features = false } +alloy-transport-ws = { version = "1.0.13", default-features = false } # scroll scroll-alloy-consensus = { path = "crates/scroll/alloy/consensus", default-features = false } -scroll-alloy-evm = { path = "crates/scroll/alloy/evm" } +scroll-alloy-evm = { path = "crates/scroll/alloy/evm", default-features = false } scroll-alloy-hardforks = { path = "crates/scroll/alloy/hardforks", default-features = false } scroll-alloy-network = { path = "crates/scroll/alloy/network", default-features = false } scroll-alloy-rpc-types = { path = "crates/scroll/alloy/rpc-types", default-features = false } @@ -544,13 +548,13 @@ reth-scroll-txpool = { path = "crates/scroll/txpool" } poseidon-bn254 = { git = "https://github.com/scroll-tech/poseidon-bn254", rev = "526a64a", features = ["bn254"] } # op -alloy-op-evm = { version = "0.10.0", default-features = false } +alloy-op-evm = { version = "0.12", default-features = false } alloy-op-hardforks = "0.2.2" -op-alloy-rpc-types = { version = "0.17.2", default-features = false } -op-alloy-rpc-types-engine = { version = "0.17.2", default-features = false } -op-alloy-network = { version = "0.17.2", default-features = false } -op-alloy-consensus = { version = "0.17.2", default-features = false } -op-alloy-rpc-jsonrpsee = { version = "0.17.2", default-features = false } +op-alloy-rpc-types = { version = "0.18.7", default-features = false } +op-alloy-rpc-types-engine = { version = "0.18.7", default-features = false } +op-alloy-network = { version = "0.18.7", default-features = false } +op-alloy-consensus = { version = "0.18.7", default-features = false } +op-alloy-rpc-jsonrpsee = { version = "0.18.7", default-features = false } op-alloy-flz = { version = "0.13.1", default-features = false } # misc @@ -578,7 +582,7 @@ linked_hash_set = "0.1" lz4 = "1.28.1" modular-bitfield = "0.11.2" notify = { version = "8.0.0", default-features = false, features = ["macos_fsevent"] } -nybbles = { version = "0.3.0", default-features = false } +nybbles = { version = "0.4.0", default-features = false } once_cell = { version = "1.19", default-features = false, features = ["critical-section"] } parking_lot = "0.12" paste = "1.0" @@ -671,7 +675,7 @@ toml = "0.8" arbitrary = "1.3" assert_matches = "1.5.0" criterion = { package = "codspeed-criterion-compat", version = "2.7" } -proptest = "1.4" +proptest = "1.7" proptest-derive = "0.5" similar-asserts = { version = "1.5.0", features = ["serde"] } tempfile = "3.20" @@ -745,8 +749,8 @@ walkdir = "2.3.3" vergen-git2 = "1.0.5" [patch.crates-io] -revm = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v74" } -op-revm = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v74" } +revm = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" } +op-revm = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" } # alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "cfb13aa" } # alloy-contract = { git = "https://github.com/alloy-rs/alloy", rev = "cfb13aa" } # alloy-eips = { git = "https://github.com/alloy-rs/alloy", rev = "cfb13aa" } diff --git a/HARDFORK-CHECKLIST.md b/HARDFORK-CHECKLIST.md index fa69107a2c1..3e3628f0b4c 100644 --- a/HARDFORK-CHECKLIST.md +++ b/HARDFORK-CHECKLIST.md @@ -35,7 +35,7 @@ Adding a new versioned endpoint requires the same changes as for L1 just for the ### Hardforks -Opstack has dedicated hardkfors (e.g. Isthmus), that can be entirely opstack specific (e.g. Holocene) or can be an L1 +Opstack has dedicated hardforks (e.g. Isthmus), that can be entirely opstack specific (e.g. Holocene) or can be an L1 equivalent hardfork. Since opstack sticks to the L1 header primitive, a new L1 equivalent hardfork also requires new equivalent consensus checks. For this reason these `OpHardfork` must be mapped to L1 `EthereumHardfork`, for example: -`OpHardfork::Isthmus` corresponds to `EthereumHardfork::Prague`. These mappings must be defined in the `ChainSpec`. \ No newline at end of file +`OpHardfork::Isthmus` corresponds to `EthereumHardfork::Prague`. These mappings must be defined in the `ChainSpec`. diff --git a/Makefile b/Makefile index 59d5e0e63b5..e7c73d44837 100644 --- a/Makefile +++ b/Makefile @@ -374,7 +374,7 @@ db-tools: ## Compile MDBX debugging tools. .PHONY: update-book-cli update-book-cli: build-debug ## Update book cli documentation. @echo "Updating book cli doc..." - @./book/cli/update.sh $(CARGO_TARGET_DIR)/debug/reth + @./docs/cli/update.sh $(CARGO_TARGET_DIR)/debug/reth .PHONY: profiling profiling: ## Builds `reth` with optimisations, but also symbols. diff --git a/README.md b/README.md index abac066fd16..390868e3976 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,7 @@ ![](./assets/reth-prod.png) **[Install](https://paradigmxyz.github.io/reth/installation/installation.html)** -| [User Book](https://reth.rs) +| [User Docs](https://reth.rs) | [Developer Docs](./docs) | [Crate Docs](https://reth.rs/docs) @@ -40,13 +40,14 @@ More concretely, our goals are: Reth is production ready, and suitable for usage in mission-critical environments such as staking or high-uptime services. We also actively recommend professional node operators to switch to Reth in production for performance and cost reasons in use cases where high performance with great margins is required such as RPC, MEV, Indexing, Simulations, and P2P activities. More historical context below: -* We released 1.0 "production-ready" stable Reth in June 2024. - * Reth completed an audit with [Sigma Prime](https://sigmaprime.io/), the developers of [Lighthouse](https://github.com/sigp/lighthouse), the Rust Consensus Layer implementation. Find it [here](./audit/sigma_prime_audit_v2.pdf). - * Revm (the EVM used in Reth) underwent an audit with [Guido Vranken](https://twitter.com/guidovranken) (#1 [Ethereum Bug Bounty](https://ethereum.org/en/bug-bounty)). We will publish the results soon. -* We released multiple iterative beta versions, up to [beta.9](https://github.com/paradigmxyz/reth/releases/tag/v0.2.0-beta.9) on Monday June 3rd 2024 the last beta release. -* We released [beta](https://github.com/paradigmxyz/reth/releases/tag/v0.2.0-beta.1) on Monday March 4th 2024, our first breaking change to the database model, providing faster query speed, smaller database footprint, and allowing "history" to be mounted on separate drives. -* We shipped iterative improvements until the last alpha release on February 28th 2024, [0.1.0-alpha.21](https://github.com/paradigmxyz/reth/releases/tag/v0.1.0-alpha.21). -* We [initially announced](https://www.paradigm.xyz/2023/06/reth-alpha) [0.1.0-alpha.1](https://github.com/paradigmxyz/reth/releases/tag/v0.1.0-alpha.1) in June 20th 2023. + +- We released 1.0 "production-ready" stable Reth in June 2024. + - Reth completed an audit with [Sigma Prime](https://sigmaprime.io/), the developers of [Lighthouse](https://github.com/sigp/lighthouse), the Rust Consensus Layer implementation. Find it [here](./audit/sigma_prime_audit_v2.pdf). + - Revm (the EVM used in Reth) underwent an audit with [Guido Vranken](https://twitter.com/guidovranken) (#1 [Ethereum Bug Bounty](https://ethereum.org/en/bug-bounty)). We will publish the results soon. +- We released multiple iterative beta versions, up to [beta.9](https://github.com/paradigmxyz/reth/releases/tag/v0.2.0-beta.9) on Monday June 3rd 2024 the last beta release. +- We released [beta](https://github.com/paradigmxyz/reth/releases/tag/v0.2.0-beta.1) on Monday March 4th 2024, our first breaking change to the database model, providing faster query speed, smaller database footprint, and allowing "history" to be mounted on separate drives. +- We shipped iterative improvements until the last alpha release on February 28th 2024, [0.1.0-alpha.21](https://github.com/paradigmxyz/reth/releases/tag/v0.1.0-alpha.21). +- We [initially announced](https://www.paradigm.xyz/2023/06/reth-alpha) [0.1.0-alpha.1](https://github.com/paradigmxyz/reth/releases/tag/v0.1.0-alpha.1) in June 20th 2023. ### Database compatibility @@ -60,7 +61,7 @@ If you had a database produced by alpha versions of Reth, you need to drop it wi ## For Users -See the [Reth Book](https://paradigmxyz.github.io/reth) for instructions on how to install and run Reth. +See the [Reth documentation](https://paradigmxyz.github.io/reth) for instructions on how to install and run Reth. ## For Developers @@ -76,8 +77,8 @@ For a general overview of the crates, see [Project Layout](./docs/repo/layout.md If you want to contribute, or follow along with contributor discussion, you can use our [main telegram](https://t.me/paradigm_reth) to chat with us about the development of Reth! -- Our contributor guidelines can be found in [`CONTRIBUTING.md`](./CONTRIBUTING.md). -- See our [contributor docs](./docs) for more information on the project. A good starting point is [Project Layout](./docs/repo/layout.md). +- Our contributor guidelines can be found in [`CONTRIBUTING.md`](./CONTRIBUTING.md). +- See our [contributor docs](./docs) for more information on the project. A good starting point is [Project Layout](./docs/repo/layout.md). ### Building and testing @@ -90,7 +91,7 @@ When updating this, also update: The Minimum Supported Rust Version (MSRV) of this project is [1.86.0](https://blog.rust-lang.org/2025/04/03/Rust-1.86.0/). -See the book for detailed instructions on how to [build from source](https://paradigmxyz.github.io/reth/installation/source.html). +See the docs for detailed instructions on how to [build from source](https://paradigmxyz.github.io/reth/installation/source). To fully test Reth, you will need to have [Geth installed](https://geth.ethereum.org/docs/getting-started/installing-geth), but it is possible to run a subset of tests without Geth. @@ -119,13 +120,13 @@ Using `cargo test` to run tests may work fine, but this is not tested and does n ## Getting Help -If you have any questions, first see if the answer to your question can be found in the [book][book]. +If you have any questions, first see if the answer to your question can be found in the [docs][book]. If the answer is not there: -- Join the [Telegram][tg-url] to get help, or -- Open a [discussion](https://github.com/paradigmxyz/reth/discussions/new) with your question, or -- Open an issue with [the bug](https://github.com/paradigmxyz/reth/issues/new?assignees=&labels=C-bug%2CS-needs-triage&projects=&template=bug.yml) +- Join the [Telegram][tg-url] to get help, or +- Open a [discussion](https://github.com/paradigmxyz/reth/discussions/new) with your question, or +- Open an issue with [the bug](https://github.com/paradigmxyz/reth/issues/new?assignees=&labels=C-bug%2CS-needs-triage&projects=&template=bug.yml) ## Security @@ -137,9 +138,9 @@ Reth is a new implementation of the Ethereum protocol. In the process of develop None of this would have been possible without them, so big shoutout to the teams below: -- [Geth](https://github.com/ethereum/go-ethereum/): We would like to express our heartfelt gratitude to the go-ethereum team for their outstanding contributions to Ethereum over the years. Their tireless efforts and dedication have helped to shape the Ethereum ecosystem and make it the vibrant and innovative community it is today. Thank you for your hard work and commitment to the project. -- [Erigon](https://github.com/ledgerwatch/erigon) (fka Turbo-Geth): Erigon pioneered the ["Staged Sync" architecture](https://erigon.substack.com/p/erigon-stage-sync-and-control-flows) that Reth is using, as well as [introduced MDBX](https://github.com/ledgerwatch/erigon/wiki/Choice-of-storage-engine) as the database of choice. We thank Erigon for pushing the state of the art research on the performance limits of Ethereum nodes. -- [Akula](https://github.com/akula-bft/akula/): Reth uses forks of the Apache versions of Akula's [MDBX Bindings](https://github.com/paradigmxyz/reth/pull/132), [FastRLP](https://github.com/paradigmxyz/reth/pull/63) and [ECIES](https://github.com/paradigmxyz/reth/pull/80) . Given that these packages were already released under the Apache License, and they implement standardized solutions, we decided not to reimplement them to iterate faster. We thank the Akula team for their contributions to the Rust Ethereum ecosystem and for publishing these packages. +- [Geth](https://github.com/ethereum/go-ethereum/): We would like to express our heartfelt gratitude to the go-ethereum team for their outstanding contributions to Ethereum over the years. Their tireless efforts and dedication have helped to shape the Ethereum ecosystem and make it the vibrant and innovative community it is today. Thank you for your hard work and commitment to the project. +- [Erigon](https://github.com/ledgerwatch/erigon) (fka Turbo-Geth): Erigon pioneered the ["Staged Sync" architecture](https://erigon.substack.com/p/erigon-stage-sync-and-control-flows) that Reth is using, as well as [introduced MDBX](https://github.com/ledgerwatch/erigon/wiki/Choice-of-storage-engine) as the database of choice. We thank Erigon for pushing the state of the art research on the performance limits of Ethereum nodes. +- [Akula](https://github.com/akula-bft/akula/): Reth uses forks of the Apache versions of Akula's [MDBX Bindings](https://github.com/paradigmxyz/reth/pull/132), [FastRLP](https://github.com/paradigmxyz/reth/pull/63) and [ECIES](https://github.com/paradigmxyz/reth/pull/80) . Given that these packages were already released under the Apache License, and they implement standardized solutions, we decided not to reimplement them to iterate faster. We thank the Akula team for their contributions to the Rust Ethereum ecosystem and for publishing these packages. ## Warning diff --git a/bin/reth-bench/scripts/compare_newpayload_latency.py b/bin/reth-bench/scripts/compare_newpayload_latency.py new file mode 100755 index 00000000000..ff9cdad5262 --- /dev/null +++ b/bin/reth-bench/scripts/compare_newpayload_latency.py @@ -0,0 +1,130 @@ +#!/usr/bin/env -S uv run +# /// script +# requires-python = ">=3.8" +# dependencies = [ +# "pandas", +# "matplotlib", +# "numpy", +# ] +# /// + +# A simple script which plots graphs comparing two combined_latency.csv files +# output by reth-bench. The graphs which are plotted are: +# +# - A histogram of the percent change between latencies, bucketed by 1% +# increments. +# +# - A simple line graph plotting the latencies of the two files against each +# other. + + +import argparse +import pandas as pd +import matplotlib.pyplot as plt +import numpy as np +import sys + +def main(): + parser = argparse.ArgumentParser(description='Generate histogram of total_latency percent differences between two CSV files') + parser.add_argument('baseline_csv', help='First CSV file, used as the baseline/control') + parser.add_argument('comparison_csv', help='Second CSV file, which is being compared to the baseline') + parser.add_argument('-o', '--output', default='latency.png', help='Output image file (default: latency.png)') + + args = parser.parse_args() + + try: + df1 = pd.read_csv(args.baseline_csv) + df2 = pd.read_csv(args.comparison_csv) + except FileNotFoundError as e: + print(f"Error: {e}", file=sys.stderr) + sys.exit(1) + except Exception as e: + print(f"Error reading CSV files: {e}", file=sys.stderr) + sys.exit(1) + + if 'total_latency' not in df1.columns: + print(f"Error: 'total_latency' column not found in {args.baseline_csv}", file=sys.stderr) + sys.exit(1) + + if 'total_latency' not in df2.columns: + print(f"Error: 'total_latency' column not found in {args.comparison_csv}", file=sys.stderr) + sys.exit(1) + + if len(df1) != len(df2): + print("Warning: CSV files have different number of rows. Using minimum length.", file=sys.stderr) + min_len = min(len(df1), len(df2)) + df1 = df1.head(min_len) + df2 = df2.head(min_len) + + latency1 = df1['total_latency'].values + latency2 = df2['total_latency'].values + + # Handle division by zero + with np.errstate(divide='ignore', invalid='ignore'): + percent_diff = ((latency2 - latency1) / latency1) * 100 + + # Remove infinite and NaN values + percent_diff = percent_diff[np.isfinite(percent_diff)] + + if len(percent_diff) == 0: + print("Error: No valid percent differences could be calculated", file=sys.stderr) + sys.exit(1) + + # Create histogram with 1% buckets + min_diff = np.floor(percent_diff.min()) + max_diff = np.ceil(percent_diff.max()) + + bins = np.arange(min_diff, max_diff + 1, 1) + + # Create figure with two subplots + fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(12, 12)) + + # Top subplot: Histogram + ax1.hist(percent_diff, bins=bins, edgecolor='black', alpha=0.7) + ax1.set_xlabel('Percent Difference (%)') + ax1.set_ylabel('Number of Blocks') + ax1.set_title(f'Total Latency Percent Difference Histogram\n({args.baseline_csv} vs {args.comparison_csv})') + ax1.grid(True, alpha=0.3) + + # Add statistics to the histogram + mean_diff = np.mean(percent_diff) + median_diff = np.median(percent_diff) + ax1.axvline(mean_diff, color='red', linestyle='--', label=f'Mean: {mean_diff:.2f}%') + ax1.axvline(median_diff, color='orange', linestyle='--', label=f'Median: {median_diff:.2f}%') + ax1.legend() + + # Bottom subplot: Latency vs Block Number + if 'block_number' in df1.columns and 'block_number' in df2.columns: + block_numbers = df1['block_number'].values[:len(percent_diff)] + ax2.plot(block_numbers, latency1[:len(percent_diff)], 'b-', alpha=0.7, label=f'Baseline ({args.baseline_csv})') + ax2.plot(block_numbers, latency2[:len(percent_diff)], 'r-', alpha=0.7, label=f'Comparison ({args.comparison_csv})') + ax2.set_xlabel('Block Number') + ax2.set_ylabel('Total Latency (ms)') + ax2.set_title('Total Latency vs Block Number') + ax2.grid(True, alpha=0.3) + ax2.legend() + else: + # If no block_number column, use index + indices = np.arange(len(percent_diff)) + ax2.plot(indices, latency1[:len(percent_diff)], 'b-', alpha=0.7, label=f'Baseline ({args.baseline_csv})') + ax2.plot(indices, latency2[:len(percent_diff)], 'r-', alpha=0.7, label=f'Comparison ({args.comparison_csv})') + ax2.set_xlabel('Block Index') + ax2.set_ylabel('Total Latency (ms)') + ax2.set_title('Total Latency vs Block Index') + ax2.grid(True, alpha=0.3) + ax2.legend() + + plt.tight_layout() + plt.savefig(args.output, dpi=300, bbox_inches='tight') + print(f"Histogram and latency graph saved to {args.output}") + + print(f"\nStatistics:") + print(f"Mean percent difference: {mean_diff:.2f}%") + print(f"Median percent difference: {median_diff:.2f}%") + print(f"Standard deviation: {np.std(percent_diff):.2f}%") + print(f"Min: {percent_diff.min():.2f}%") + print(f"Max: {percent_diff.max():.2f}%") + print(f"Total blocks analyzed: {len(percent_diff)}") + +if __name__ == '__main__': + main() diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index 4d93ca5d73c..fb940250033 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -27,7 +27,7 @@ reth-cli-util.workspace = true reth-consensus-common.workspace = true reth-rpc-builder.workspace = true reth-rpc.workspace = true -reth-rpc-types-compat.workspace = true +reth-rpc-convert.workspace = true reth-rpc-api = { workspace = true, features = ["client"] } reth-rpc-eth-types.workspace = true reth-rpc-server-types.workspace = true diff --git a/bin/reth/src/lib.rs b/bin/reth/src/lib.rs index 11a50acd3a7..ae07f9f3567 100644 --- a/bin/reth/src/lib.rs +++ b/bin/reth/src/lib.rs @@ -175,9 +175,9 @@ pub mod rpc { pub use reth_rpc_server_types::result::*; } - /// Re-exported from `reth_rpc_types_compat`. + /// Re-exported from `reth_rpc_convert`. pub mod compat { - pub use reth_rpc_types_compat::*; + pub use reth_rpc_convert::*; } } diff --git a/book/SUMMARY.md b/book/SUMMARY.md deleted file mode 100644 index 310eebb0285..00000000000 --- a/book/SUMMARY.md +++ /dev/null @@ -1,84 +0,0 @@ -# Reth Book - -- [Introduction](./intro.md) -- [Installation](./installation/installation.md) - - [Pre-Built Binaries](./installation/binaries.md) - - [Docker](./installation/docker.md) - - [Build from Source](./installation/source.md) - - [Build for ARM devices](./installation/build-for-arm-devices.md) - - [Update Priorities](./installation/priorities.md) -- [Run a Node](./run/run-a-node.md) - - [Mainnet or official testnets](./run/mainnet.md) - - [OP Stack](./run/optimism.md) - - [Run an OP Mainnet Node](./run/sync-op-mainnet.md) - - [Private testnet](./run/private-testnet.md) - - [Metrics](./run/observability.md) - - [Configuring Reth](./run/config.md) - - [Transaction types](./run/transactions.md) - - [Pruning & Full Node](./run/pruning.md) - - [Ports](./run/ports.md) - - [Troubleshooting](./run/troubleshooting.md) -- [Interacting with Reth over JSON-RPC](./jsonrpc/intro.md) - - [eth](./jsonrpc/eth.md) - - [web3](./jsonrpc/web3.md) - - [net](./jsonrpc/net.md) - - [txpool](./jsonrpc/txpool.md) - - [debug](./jsonrpc/debug.md) - - [trace](./jsonrpc/trace.md) - - [admin](./jsonrpc/admin.md) - - [rpc](./jsonrpc/rpc.md) -- [CLI Reference](./cli/cli.md) - - [`reth`](./cli/reth.md) - - [`reth node`](./cli/reth/node.md) - - [`reth init`](./cli/reth/init.md) - - [`reth init-state`](./cli/reth/init-state.md) - - [`reth import`](./cli/reth/import.md) - - [`reth import-era`](./cli/reth/import-era.md) - - [`reth dump-genesis`](./cli/reth/dump-genesis.md) - - [`reth db`](./cli/reth/db.md) - - [`reth db stats`](./cli/reth/db/stats.md) - - [`reth db list`](./cli/reth/db/list.md) - - [`reth db checksum`](./cli/reth/db/checksum.md) - - [`reth db diff`](./cli/reth/db/diff.md) - - [`reth db get`](./cli/reth/db/get.md) - - [`reth db get mdbx`](./cli/reth/db/get/mdbx.md) - - [`reth db get static-file`](./cli/reth/db/get/static-file.md) - - [`reth db drop`](./cli/reth/db/drop.md) - - [`reth db clear`](./cli/reth/db/clear.md) - - [`reth db clear mdbx`](./cli/reth/db/clear/mdbx.md) - - [`reth db clear static-file`](./cli/reth/db/clear/static-file.md) - - [`reth db version`](./cli/reth/db/version.md) - - [`reth db path`](./cli/reth/db/path.md) - - [`reth download`](./cli/reth/download.md) - - [`reth stage`](./cli/reth/stage.md) - - [`reth stage run`](./cli/reth/stage/run.md) - - [`reth stage drop`](./cli/reth/stage/drop.md) - - [`reth stage dump`](./cli/reth/stage/dump.md) - - [`reth stage dump execution`](./cli/reth/stage/dump/execution.md) - - [`reth stage dump storage-hashing`](./cli/reth/stage/dump/storage-hashing.md) - - [`reth stage dump account-hashing`](./cli/reth/stage/dump/account-hashing.md) - - [`reth stage dump merkle`](./cli/reth/stage/dump/merkle.md) - - [`reth stage unwind`](./cli/reth/stage/unwind.md) - - [`reth stage unwind to-block`](./cli/reth/stage/unwind/to-block.md) - - [`reth stage unwind num-blocks`](./cli/reth/stage/unwind/num-blocks.md) - - [`reth p2p`](./cli/reth/p2p.md) - - [`reth p2p header`](./cli/reth/p2p/header.md) - - [`reth p2p body`](./cli/reth/p2p/body.md) - - [`reth p2p rlpx`](./cli/reth/p2p/rlpx.md) - - [`reth p2p rlpx ping`](./cli/reth/p2p/rlpx/ping.md) - - [`reth config`](./cli/reth/config.md) - - [`reth debug`](./cli/reth/debug.md) - - [`reth debug execution`](./cli/reth/debug/execution.md) - - [`reth debug merkle`](./cli/reth/debug/merkle.md) - - [`reth debug in-memory-merkle`](./cli/reth/debug/in-memory-merkle.md) - - [`reth debug build-block`](./cli/reth/debug/build-block.md) - - [`reth recover`](./cli/reth/recover.md) - - [`reth recover storage-tries`](./cli/reth/recover/storage-tries.md) - - [`reth prune`](./cli/reth/prune.md) -- [Developers](./developers/developers.md) - - [Execution Extensions](./developers/exex/exex.md) - - [How do ExExes work?](./developers/exex/how-it-works.md) - - [Hello World](./developers/exex/hello-world.md) - - [Tracking State](./developers/exex/tracking-state.md) - - [Remote](./developers/exex/remote.md) - - [Contribute](./developers/contribute.md) diff --git a/book/cli/SUMMARY.md b/book/cli/SUMMARY.md deleted file mode 100644 index aa625298590..00000000000 --- a/book/cli/SUMMARY.md +++ /dev/null @@ -1,47 +0,0 @@ -- [`reth`](./reth.md) - - [`reth node`](./reth/node.md) - - [`reth init`](./reth/init.md) - - [`reth init-state`](./reth/init-state.md) - - [`reth import`](./reth/import.md) - - [`reth import-era`](./reth/import-era.md) - - [`reth dump-genesis`](./reth/dump-genesis.md) - - [`reth db`](./reth/db.md) - - [`reth db stats`](./reth/db/stats.md) - - [`reth db list`](./reth/db/list.md) - - [`reth db checksum`](./reth/db/checksum.md) - - [`reth db diff`](./reth/db/diff.md) - - [`reth db get`](./reth/db/get.md) - - [`reth db get mdbx`](./reth/db/get/mdbx.md) - - [`reth db get static-file`](./reth/db/get/static-file.md) - - [`reth db drop`](./reth/db/drop.md) - - [`reth db clear`](./reth/db/clear.md) - - [`reth db clear mdbx`](./reth/db/clear/mdbx.md) - - [`reth db clear static-file`](./reth/db/clear/static-file.md) - - [`reth db version`](./reth/db/version.md) - - [`reth db path`](./reth/db/path.md) - - [`reth download`](./reth/download.md) - - [`reth stage`](./reth/stage.md) - - [`reth stage run`](./reth/stage/run.md) - - [`reth stage drop`](./reth/stage/drop.md) - - [`reth stage dump`](./reth/stage/dump.md) - - [`reth stage dump execution`](./reth/stage/dump/execution.md) - - [`reth stage dump storage-hashing`](./reth/stage/dump/storage-hashing.md) - - [`reth stage dump account-hashing`](./reth/stage/dump/account-hashing.md) - - [`reth stage dump merkle`](./reth/stage/dump/merkle.md) - - [`reth stage unwind`](./reth/stage/unwind.md) - - [`reth stage unwind to-block`](./reth/stage/unwind/to-block.md) - - [`reth stage unwind num-blocks`](./reth/stage/unwind/num-blocks.md) - - [`reth p2p`](./reth/p2p.md) - - [`reth p2p header`](./reth/p2p/header.md) - - [`reth p2p body`](./reth/p2p/body.md) - - [`reth p2p rlpx`](./reth/p2p/rlpx.md) - - [`reth p2p rlpx ping`](./reth/p2p/rlpx/ping.md) - - [`reth config`](./reth/config.md) - - [`reth debug`](./reth/debug.md) - - [`reth debug execution`](./reth/debug/execution.md) - - [`reth debug merkle`](./reth/debug/merkle.md) - - [`reth debug in-memory-merkle`](./reth/debug/in-memory-merkle.md) - - [`reth debug build-block`](./reth/debug/build-block.md) - - [`reth recover`](./reth/recover.md) - - [`reth recover storage-tries`](./reth/recover/storage-tries.md) - - [`reth prune`](./reth/prune.md) diff --git a/book/cli/reth/debug/replay-engine.md b/book/cli/reth/debug/replay-engine.md deleted file mode 100644 index da36f11cc0e..00000000000 --- a/book/cli/reth/debug/replay-engine.md +++ /dev/null @@ -1,332 +0,0 @@ -# reth debug replay-engine - -Debug engine API by replaying stored messages - -```bash -$ reth debug replay-engine --help -``` -```txt -Usage: reth debug replay-engine [OPTIONS] --engine-api-store - -Options: - --instance - Add a new instance of a node. - - Configures the ports of the node to avoid conflicts with the defaults. This is useful for running multiple nodes on the same machine. - - Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - - Changes to the following port numbers: - `DISCOVERY_PORT`: default + `instance` - 1 - `AUTH_PORT`: default + `instance` * 100 - 100 - `HTTP_RPC_PORT`: default - `instance` + 1 - `WS_RPC_PORT`: default + `instance` * 2 - 2 - - [default: 1] - - -h, --help - Print help (see a summary with '-h') - -Datadir: - --datadir - The path to the data dir for all reth files and subdirectories. - - Defaults to the OS-specific data directory: - - - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` - - Windows: `{FOLDERID_RoamingAppData}/reth/` - - macOS: `$HOME/Library/Application Support/reth/` - - [default: default] - - --datadir.static-files - The absolute path to store static files in. - - --config - The path to the configuration file to use - - --chain - The chain this node is running. - Possible values are either a built-in chain or the path to a chain specification file. - - Built-in chains: - mainnet, sepolia, holesky, dev - - [default: mainnet] - -Database: - --db.log-level - Database logging level. Levels higher than "notice" require a debug build - - Possible values: - - fatal: Enables logging for critical conditions, i.e. assertion failures - - error: Enables logging for error conditions - - warn: Enables logging for warning conditions - - notice: Enables logging for normal but significant condition - - verbose: Enables logging for verbose informational - - debug: Enables logging for debug-level messages - - trace: Enables logging for trace debug-level messages - - extra: Enables logging for extra debug-level messages - - --db.exclusive - Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume - - [possible values: true, false] - - --db.max-size - Maximum database size (e.g., 4TB, 8MB) - - --db.growth-step - Database growth step (e.g., 4GB, 4KB) - - --db.read-transaction-timeout - Read transaction timeout in seconds, 0 means no timeout - -Networking: - -d, --disable-discovery - Disable the discovery service - - --disable-dns-discovery - Disable the DNS discovery - - --disable-discv4-discovery - Disable Discv4 discovery - - --enable-discv5-discovery - Enable Discv5 discovery - - --disable-nat - Disable Nat discovery - - --discovery.addr - The UDP address to use for devp2p peer discovery version 4 - - [default: 0.0.0.0] - - --discovery.port - The UDP port to use for devp2p peer discovery version 4 - - [default: 30303] - - --discovery.v5.addr - The UDP IPv4 address to use for devp2p peer discovery version 5. Overwritten by `RLPx` address, if it's also IPv4 - - --discovery.v5.addr.ipv6 - The UDP IPv6 address to use for devp2p peer discovery version 5. Overwritten by `RLPx` address, if it's also IPv6 - - --discovery.v5.port - The UDP IPv4 port to use for devp2p peer discovery version 5. Not used unless `--addr` is IPv4, or `--discovery.v5.addr` is set - - [default: 9200] - - --discovery.v5.port.ipv6 - The UDP IPv6 port to use for devp2p peer discovery version 5. Not used unless `--addr` is IPv6, or `--discovery.addr.ipv6` is set - - [default: 9200] - - --discovery.v5.lookup-interval - The interval in seconds at which to carry out periodic lookup queries, for the whole run of the program - - [default: 20] - - --discovery.v5.bootstrap.lookup-interval - The interval in seconds at which to carry out boost lookup queries, for a fixed number of times, at bootstrap - - [default: 5] - - --discovery.v5.bootstrap.lookup-countdown - The number of times to carry out boost lookup queries at bootstrap - - [default: 200] - - --trusted-peers - Comma separated enode URLs of trusted peers for P2P connections. - - --trusted-peers enode://abcd@192.168.0.1:30303 - - --trusted-only - Connect to or accept from trusted peers only - - --bootnodes - Comma separated enode URLs for P2P discovery bootstrap. - - Will fall back to a network-specific default if not specified. - - --dns-retries - Amount of DNS resolution requests retries to perform when peering - - [default: 0] - - --peers-file - The path to the known peers file. Connected peers are dumped to this file on nodes - shutdown, and read on startup. Cannot be used with `--no-persist-peers`. - - --identity - Custom node identity - - [default: reth/-/] - - --p2p-secret-key - Secret key to use for this node. - - This will also deterministically set the peer ID. If not specified, it will be set in the data dir for the chain being used. - - --no-persist-peers - Do not persist peers. - - --nat - NAT resolution method (any|none|upnp|publicip|extip:\) - - [default: any] - - --addr - Network listening address - - [default: 0.0.0.0] - - --port - Network listening port - - [default: 30303] - - --max-outbound-peers - Maximum number of outbound requests. default: 100 - - --max-inbound-peers - Maximum number of inbound requests. default: 30 - - --max-tx-reqs - Max concurrent `GetPooledTransactions` requests. - - [default: 130] - - --max-tx-reqs-peer - Max concurrent `GetPooledTransactions` requests per peer. - - [default: 1] - - --max-seen-tx-history - Max number of seen transactions to remember per peer. - - Default is 320 transaction hashes. - - [default: 320] - - --max-pending-imports - Max number of transactions to import concurrently. - - [default: 4096] - - --pooled-tx-response-soft-limit - Experimental, for usage in research. Sets the max accumulated byte size of transactions - to pack in one response. - Spec'd at 2MiB. - - [default: 2097152] - - --pooled-tx-pack-soft-limit - Experimental, for usage in research. Sets the max accumulated byte size of transactions to - request in one request. - - Since `RLPx` protocol version 68, the byte size of a transaction is shared as metadata in a - transaction announcement (see `RLPx` specs). This allows a node to request a specific size - response. - - By default, nodes request only 128 KiB worth of transactions, but should a peer request - more, up to 2 MiB, a node will answer with more than 128 KiB. - - Default is 128 KiB. - - [default: 131072] - - --max-tx-pending-fetch - Max capacity of cache of hashes for transactions pending fetch. - - [default: 25600] - - --net-if.experimental - Name of network interface used to communicate with peers. - - If flag is set, but no value is passed, the default interface for docker `eth0` is tried. - - --engine-api-store - The path to read engine API messages from - - --interval - The number of milliseconds between Engine API messages - - [default: 1000] - -Logging: - --log.stdout.format - The format to use for logs written to stdout - - [default: terminal] - - Possible values: - - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - - terminal: Represents terminal-friendly formatting for logs - - --log.stdout.filter - The filter to use for logs written to stdout - - [default: ] - - --log.file.format - The format to use for logs written to the log file - - [default: terminal] - - Possible values: - - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - - terminal: Represents terminal-friendly formatting for logs - - --log.file.filter - The filter to use for logs written to the log file - - [default: debug] - - --log.file.directory - The path to put log files in - - [default: /logs] - - --log.file.max-size - The maximum size (in MB) of one log file - - [default: 200] - - --log.file.max-files - The maximum amount of log files that will be stored. If set to 0, background file logging is disabled - - [default: 5] - - --log.journald - Write logs to journald - - --log.journald.filter - The filter to use for logs written to journald - - [default: error] - - --color - Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - - [default: always] - - Possible values: - - always: Colors on - - auto: Colors on - - never: Colors off - -Display: - -v, --verbosity... - Set the minimum log level. - - -v Errors - -vv Warnings - -vvv Info - -vvvv Debug - -vvvvv Traces (warning: very verbose!) - - -q, --quiet - Silence all log output -``` \ No newline at end of file diff --git a/book/cli/reth/import-op.md b/book/cli/reth/import-op.md deleted file mode 100644 index d2d81980ce3..00000000000 --- a/book/cli/reth/import-op.md +++ /dev/null @@ -1,134 +0,0 @@ -# op-reth import - -This syncs RLP encoded blocks from a file. Supports import of OVM blocks -from the Bedrock datadir. Requires blocks, up to same height as receipts -file, to already be imported. - -```bash -$ op-reth import-op --help -Usage: op-reth import-op [OPTIONS] - -Options: - --config - The path to the configuration file to use. - - --datadir - The path to the data dir for all reth files and subdirectories. - - Defaults to the OS-specific data directory: - - - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` - - Windows: `{FOLDERID_RoamingAppData}/reth/` - - macOS: `$HOME/Library/Application Support/reth/` - - [default: default] - - --chunk-len - Chunk byte length to read from file. - - [default: 1GB] - - -h, --help - Print help (see a summary with '-h') - -Database: - --db.log-level - Database logging level. Levels higher than "notice" require a debug build - - Possible values: - - fatal: Enables logging for critical conditions, i.e. assertion failures - - error: Enables logging for error conditions - - warn: Enables logging for warning conditions - - notice: Enables logging for normal but significant condition - - verbose: Enables logging for verbose informational - - debug: Enables logging for debug-level messages - - trace: Enables logging for trace debug-level messages - - extra: Enables logging for extra debug-level messages - - --db.exclusive - Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume - - [possible values: true, false] - - - The path to a `.rlp` block file for import. - - The online sync pipeline stages (headers and bodies) are replaced by a file import. Skips block execution since blocks below Bedrock are built on OVM. - -Logging: - --log.stdout.format - The format to use for logs written to stdout - - [default: terminal] - - Possible values: - - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - - terminal: Represents terminal-friendly formatting for logs - - --log.stdout.filter - The filter to use for logs written to stdout - - [default: ] - - --log.file.format - The format to use for logs written to the log file - - [default: terminal] - - Possible values: - - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - - terminal: Represents terminal-friendly formatting for logs - - --log.file.filter - The filter to use for logs written to the log file - - [default: debug] - - --log.file.directory - The path to put log files in - - [default: /logs] - - --log.file.max-size - The maximum size (in MB) of one log file - - [default: 200] - - --log.file.max-files - The maximum amount of log files that will be stored. If set to 0, background file logging is disabled - - [default: 5] - - --log.journald - Write logs to journald - - --log.journald.filter - The filter to use for logs written to journald - - [default: error] - - --color - Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - - [default: always] - - Possible values: - - always: Colors on - - auto: Colors on - - never: Colors off - -Display: - -v, --verbosity... - Set the minimum log level. - - -v Errors - -vv Warnings - -vvv Info - -vvvv Debug - -vvvvv Traces (warning: very verbose!) - - -q, --quiet - Silence all log output -``` \ No newline at end of file diff --git a/book/cli/reth/import-receipts-op.md b/book/cli/reth/import-receipts-op.md deleted file mode 100644 index 0b7135e1d7a..00000000000 --- a/book/cli/reth/import-receipts-op.md +++ /dev/null @@ -1,133 +0,0 @@ -# op-reth import-receipts-op - -This imports non-standard RLP encoded receipts from a file. -The supported RLP encoding, is the non-standard encoding used -for receipt export in . -Supports import of OVM receipts from the Bedrock datadir. - -```bash -$ op-reth import-receipts-op --help -Usage: op-reth import-receipts-op [OPTIONS] - -Options: - --datadir - The path to the data dir for all reth files and subdirectories. - - Defaults to the OS-specific data directory: - - - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` - - Windows: `{FOLDERID_RoamingAppData}/reth/` - - macOS: `$HOME/Library/Application Support/reth/` - - [default: default] - - --chunk-len - Chunk byte length to read from file. - - [default: 1GB] - - -h, --help - Print help (see a summary with '-h') - -Database: - --db.log-level - Database logging level. Levels higher than "notice" require a debug build - - Possible values: - - fatal: Enables logging for critical conditions, i.e. assertion failures - - error: Enables logging for error conditions - - warn: Enables logging for warning conditions - - notice: Enables logging for normal but significant condition - - verbose: Enables logging for verbose informational - - debug: Enables logging for debug-level messages - - trace: Enables logging for trace debug-level messages - - extra: Enables logging for extra debug-level messages - - --db.exclusive - Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume - - [possible values: true, false] - - - The path to a receipts file for import. File must use `OpGethReceiptFileCodec` (used for - exporting OP chain segment below Bedrock block via testinprod/op-geth). - - - -Logging: - --log.stdout.format - The format to use for logs written to stdout - - [default: terminal] - - Possible values: - - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - - terminal: Represents terminal-friendly formatting for logs - - --log.stdout.filter - The filter to use for logs written to stdout - - [default: ] - - --log.file.format - The format to use for logs written to the log file - - [default: terminal] - - Possible values: - - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - - terminal: Represents terminal-friendly formatting for logs - - --log.file.filter - The filter to use for logs written to the log file - - [default: debug] - - --log.file.directory - The path to put log files in - - [default: /logs] - - --log.file.max-size - The maximum size (in MB) of one log file - - [default: 200] - - --log.file.max-files - The maximum amount of log files that will be stored. If set to 0, background file logging is disabled - - [default: 5] - - --log.journald - Write logs to journald - - --log.journald.filter - The filter to use for logs written to journald - - [default: error] - - --color - Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - - [default: always] - - Possible values: - - always: Colors on - - auto: Colors on - - never: Colors off - -Display: - -v, --verbosity... - Set the minimum log level. - - -v Errors - -vv Warnings - -vvv Info - -vvvv Debug - -vvvvv Traces (warning: very verbose!) - - -q, --quiet - Silence all log output -``` \ No newline at end of file diff --git a/book/cli/reth/test-vectors.md b/book/cli/reth/test-vectors.md deleted file mode 100644 index 844c5ed8455..00000000000 --- a/book/cli/reth/test-vectors.md +++ /dev/null @@ -1,113 +0,0 @@ -# reth test-vectors - -Generate Test Vectors - -```bash -$ reth test-vectors --help -Usage: reth test-vectors [OPTIONS] - -Commands: - tables Generates test vectors for specified tables. If no table is specified, generate for all - help Print this message or the help of the given subcommand(s) - -Options: - --chain - The chain this node is running. - Possible values are either a built-in chain or the path to a chain specification file. - - Built-in chains: - mainnet, sepolia, holesky, dev - - [default: mainnet] - - --instance - Add a new instance of a node. - - Configures the ports of the node to avoid conflicts with the defaults. This is useful for running multiple nodes on the same machine. - - Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - - Changes to the following port numbers: - `DISCOVERY_PORT`: default + `instance` - 1 - `AUTH_PORT`: default + `instance` * 100 - 100 - `HTTP_RPC_PORT`: default - `instance` + 1 - `WS_RPC_PORT`: default + `instance` * 2 - 2 - - [default: 1] - - -h, --help - Print help (see a summary with '-h') - -Logging: - --log.stdout.format - The format to use for logs written to stdout - - [default: terminal] - - Possible values: - - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - - terminal: Represents terminal-friendly formatting for logs - - --log.stdout.filter - The filter to use for logs written to stdout - - [default: ] - - --log.file.format - The format to use for logs written to the log file - - [default: terminal] - - Possible values: - - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - - terminal: Represents terminal-friendly formatting for logs - - --log.file.filter - The filter to use for logs written to the log file - - [default: debug] - - --log.file.directory - The path to put log files in - - [default: /logs] - - --log.file.max-size - The maximum size (in MB) of one log file - - [default: 200] - - --log.file.max-files - The maximum amount of log files that will be stored. If set to 0, background file logging is disabled - - [default: 5] - - --log.journald - Write logs to journald - - --log.journald.filter - The filter to use for logs written to journald - - [default: error] - - --color - Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - - [default: always] - - Possible values: - - always: Colors on - - auto: Colors on - - never: Colors off - -Display: - -v, --verbosity... - Set the minimum log level. - - -v Errors - -vv Warnings - -vvv Info - -vvvv Debug - -vvvvv Traces (warning: very verbose!) - - -q, --quiet - Silence all log output -``` \ No newline at end of file diff --git a/book/cli/update.sh b/book/cli/update.sh deleted file mode 100755 index 6e792df0f2b..00000000000 --- a/book/cli/update.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/usr/bin/env bash -set -eo pipefail - -BOOK_ROOT="$(dirname "$(dirname "$0")")" -RETH=${1:-"$(dirname "$BOOK_ROOT")/target/debug/reth"} - -cmd=( - "$(dirname "$0")/help.rs" - --root-dir "$BOOK_ROOT/" - --root-indentation 2 - --root-summary - --out-dir "$BOOK_ROOT/cli/" - "$RETH" -) -echo "Running: $" "${cmd[*]}" -"${cmd[@]}" diff --git a/book/developers/contribute.md b/book/developers/contribute.md deleted file mode 100644 index 74f00e69a1a..00000000000 --- a/book/developers/contribute.md +++ /dev/null @@ -1,9 +0,0 @@ -# Contribute - - - -Reth has docs specifically geared for developers and contributors, including documentation on the structure and architecture of reth, the general workflow we employ, and other useful tips. - -You can find these docs [here](https://github.com/paradigmxyz/reth/tree/main/docs). - -Check out our contributing guidelines [here](https://github.com/paradigmxyz/reth/blob/main/CONTRIBUTING.md). diff --git a/book/developers/developers.md b/book/developers/developers.md deleted file mode 100644 index 9d8c5a9c673..00000000000 --- a/book/developers/developers.md +++ /dev/null @@ -1,3 +0,0 @@ -# Developers - -Reth is composed of several crates that can be used in standalone projects. If you are interested in using one or more of the crates, you can get an overview of them in the [developer docs](https://github.com/paradigmxyz/reth/tree/main/docs), or take a look at the [crate docs](https://paradigmxyz.github.io/reth/docs). diff --git a/book/installation/priorities.md b/book/installation/priorities.md deleted file mode 100644 index f7444e79d63..00000000000 --- a/book/installation/priorities.md +++ /dev/null @@ -1,18 +0,0 @@ -# Update Priorities - -When publishing releases, reth will include an "Update Priority" section in the release notes, in the same manner Lighthouse does. - -The "Update Priority" section will include a table which may appear like so: - -| User Class | Priority | -|----------------------|-----------------| -| Payload Builders | Medium Priority | -| Non-Payload Builders | Low Priority | - -To understand this table, the following terms are important: - -- *Payload builders* are those who use reth to build and validate payloads. -- *Non-payload builders* are those who run reth for other purposes (e.g., data analysis, RPC or applications). -- *High priority* updates should be completed as soon as possible (e.g., hours or days). -- *Medium priority* updates should be completed at the next convenience (e.g., days or a week). -- *Low priority* updates should be completed in the next routine update cycle (e.g., two weeks). diff --git a/book/run/ports.md b/book/run/ports.md deleted file mode 100644 index 5239a5262c4..00000000000 --- a/book/run/ports.md +++ /dev/null @@ -1,38 +0,0 @@ -# Ports - -This section provides essential information about the ports used by the system, their primary purposes, and recommendations for exposure settings. - -## Peering Ports - -- **Port:** 30303 -- **Protocol:** TCP and UDP -- **Purpose:** Peering with other nodes for synchronization of blockchain data. Nodes communicate through this port to maintain network consensus and share updated information. -- **Exposure Recommendation:** This port should be exposed to enable seamless interaction and synchronization with other nodes in the network. - -## Metrics Port - -- **Port:** 9001 -- **Protocol:** TCP -- **Purpose:** This port is designated for serving metrics related to the system's performance and operation. It allows internal monitoring and data collection for analysis. -- **Exposure Recommendation:** By default, this port should not be exposed to the public. It is intended for internal monitoring and analysis purposes. - -## HTTP RPC Port - -- **Port:** 8545 -- **Protocol:** TCP -- **Purpose:** Port 8545 provides an HTTP-based Remote Procedure Call (RPC) interface. It enables external applications to interact with the blockchain by sending requests over HTTP. -- **Exposure Recommendation:** Similar to the metrics port, exposing this port to the public is not recommended by default due to security considerations. - -## WS RPC Port - -- **Port:** 8546 -- **Protocol:** TCP -- **Purpose:** Port 8546 offers a WebSocket-based Remote Procedure Call (RPC) interface. It allows real-time communication between external applications and the blockchain. -- **Exposure Recommendation:** As with the HTTP RPC port, the WS RPC port should not be exposed by default for security reasons. - -## Engine API Port - -- **Port:** 8551 -- **Protocol:** TCP -- **Purpose:** Port 8551 facilitates communication between specific components, such as "reth" and "CL" (assuming their definitions are understood within the context of the system). It enables essential internal processes. -- **Exposure Recommendation:** This port is not meant to be exposed to the public by default. It should be reserved for internal communication between vital components of the system. diff --git a/book/run/run-a-node.md b/book/run/run-a-node.md deleted file mode 100644 index d8981e15522..00000000000 --- a/book/run/run-a-node.md +++ /dev/null @@ -1,15 +0,0 @@ -# Run a Node - -Congratulations, now that you have installed Reth, it's time to run it! - -In this chapter we'll go through a few different topics you'll encounter when running Reth, including: -1. [Running on mainnet or official testnets](./mainnet.md) -1. [Running on OP Stack chains](./optimism.md) -1. [Logs and Observability](./observability.md) -1. [Configuring reth.toml](./config.md) -1. [Transaction types](./transactions.md) -1. [Pruning & Full Node](./pruning.md) -1. [Ports](./ports.md) -1. [Troubleshooting](./troubleshooting.md) - -In the future, we also intend to support the [OP Stack](https://docs.optimism.io/get-started/superchain), which will allow you to run Reth as a Layer 2 client. More there soon! diff --git a/book/sources/Cargo.toml b/book/sources/Cargo.toml deleted file mode 100644 index c98694dc5c3..00000000000 --- a/book/sources/Cargo.toml +++ /dev/null @@ -1,16 +0,0 @@ -[workspace] -members = ["exex/hello-world", "exex/remote", "exex/tracking-state"] - -# Explicitly set the resolver to version 2, which is the default for packages with edition >= 2021 -# https://doc.rust-lang.org/edition-guide/rust-2021/default-cargo-resolver.html -resolver = "2" - -[patch.'https://github.com/paradigmxyz/reth'] -reth = { path = "../../bin/reth" } -reth-exex = { path = "../../crates/exex/exex" } -reth-node-ethereum = { path = "../../crates/ethereum/node" } -reth-tracing = { path = "../../crates/tracing" } -reth-node-api = { path = "../../crates/node/api" } - -[patch.crates-io] -revm = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v74" } diff --git a/book/templates/source_and_github.md b/book/templates/source_and_github.md deleted file mode 100644 index c4abbaa3894..00000000000 --- a/book/templates/source_and_github.md +++ /dev/null @@ -1,4 +0,0 @@ -[File: [[ #path ]]](https://github.com/paradigmxyz/reth/blob/main/[[ #path ]]) -```rust,no_run,noplayground -{{#include [[ #path_to_root ]][[ #path ]]:[[ #anchor ]]}} -``` \ No newline at end of file diff --git a/book/theme/head.hbs b/book/theme/head.hbs deleted file mode 100644 index 37667d80f6e..00000000000 --- a/book/theme/head.hbs +++ /dev/null @@ -1,5 +0,0 @@ - - - - - diff --git a/crates/alloy-provider/Cargo.toml b/crates/alloy-provider/Cargo.toml new file mode 100644 index 00000000000..22a8e724890 --- /dev/null +++ b/crates/alloy-provider/Cargo.toml @@ -0,0 +1,49 @@ +[package] +name = "reth-alloy-provider" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +description = "Alloy provider implementation for reth that fetches state via RPC" + +[lints] +workspace = true + +[dependencies] +# reth +reth-storage-api.workspace = true +reth-chainspec.workspace = true +reth-primitives.workspace = true +reth-provider.workspace = true +reth-errors.workspace = true +reth-execution-types.workspace = true +reth-prune-types.workspace = true +reth-node-types.workspace = true +reth-trie.workspace = true +reth-stages-types.workspace = true +reth-db-api.workspace = true +reth-rpc-convert.workspace = true + +# alloy +alloy-provider.workspace = true +alloy-network.workspace = true +alloy-primitives.workspace = true +alloy-consensus.workspace = true +alloy-rpc-types.workspace = true +alloy-rpc-types-engine.workspace = true +alloy-eips.workspace = true + +# async +tokio = { workspace = true, features = ["sync", "macros", "rt-multi-thread"] } + +# other +tracing.workspace = true + +# revm +revm.workspace = true +revm-primitives.workspace = true + +[dev-dependencies] +tokio = { workspace = true, features = ["rt", "macros"] } diff --git a/crates/alloy-provider/README.md b/crates/alloy-provider/README.md new file mode 100644 index 00000000000..37a75f1b328 --- /dev/null +++ b/crates/alloy-provider/README.md @@ -0,0 +1,60 @@ +# Alloy Provider for Reth + +This crate provides an implementation of reth's `StateProviderFactory` and related traits that fetches state data via RPC instead of from a local database. + +Originally created by [cakevm](https://github.com/cakevm/alloy-reth-provider). + +## Features + +- Implements `StateProviderFactory` for remote RPC state access +- Supports Ethereum networks +- Useful for testing without requiring a full database +- Can be used with reth ExEx (Execution Extensions) for testing + +## Usage + +```rust +use alloy_provider::ProviderBuilder; +use reth_alloy_provider::AlloyRethProvider; +use reth_ethereum_node::EthereumNode; + +// Initialize provider +let provider = ProviderBuilder::new() + .builtin("https://eth.merkle.io") + .await + .unwrap(); + +// Create database provider with NodeTypes +let db_provider = AlloyRethProvider::new(provider, EthereumNode); + +// Get state at specific block +let state = db_provider.state_by_block_id(BlockId::number(16148323)).unwrap(); +``` + +## Configuration + +The provider can be configured with custom settings: + +```rust +use reth_alloy_provider::{AlloyRethProvider, AlloyRethProviderConfig}; +use reth_ethereum_node::EthereumNode; + +let config = AlloyRethProviderConfig { + compute_state_root: true, // Enable state root computation +}; + +let db_provider = AlloyRethProvider::new_with_config(provider, EthereumNode, config); +``` + +## Technical Details + +The provider uses `alloy_network::AnyNetwork` for network operations, providing compatibility with various Ethereum-based networks while maintaining the expected block structure with headers. + +## License + +Licensed under either of: + +- Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) +- MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) + +at your option. \ No newline at end of file diff --git a/crates/alloy-provider/src/lib.rs b/crates/alloy-provider/src/lib.rs new file mode 100644 index 00000000000..ba4767006a4 --- /dev/null +++ b/crates/alloy-provider/src/lib.rs @@ -0,0 +1,1769 @@ +//! # Alloy Provider for Reth +//! +//! This crate provides an implementation of reth's `StateProviderFactory` and related traits +//! that fetches state data via RPC instead of from a local database. +//! +//! Originally created by [cakevm](https://github.com/cakevm/alloy-reth-provider). +//! +//! ## Features +//! +//! - Implements `StateProviderFactory` for remote RPC state access +//! - Supports Ethereum and Optimism network +//! - Useful for testing without requiring a full database +//! - Can be used with reth ExEx (Execution Extensions) for testing + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] + +use alloy_consensus::BlockHeader; +use alloy_eips::BlockHashOrNumber; +use alloy_network::{primitives::HeaderResponse, BlockResponse}; +use alloy_primitives::{Address, BlockHash, BlockNumber, StorageKey, TxHash, TxNumber, B256, U256}; +use alloy_provider::{network::Network, Provider}; +use alloy_rpc_types::BlockId; +use alloy_rpc_types_engine::ForkchoiceState; +use reth_chainspec::{ChainInfo, ChainSpecProvider}; +use reth_db_api::{ + mock::{DatabaseMock, TxMock}, + models::StoredBlockBodyIndices, +}; +use reth_errors::{ProviderError, ProviderResult}; +use reth_node_types::{BlockTy, HeaderTy, NodeTypes, PrimitivesTy, ReceiptTy, TxTy}; +use reth_primitives::{ + Account, Bytecode, RecoveredBlock, SealedBlock, SealedHeader, TransactionMeta, +}; +use reth_provider::{ + AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BytecodeReader, + CanonChainTracker, CanonStateNotification, CanonStateNotifications, CanonStateSubscriptions, + ChainStateBlockReader, ChainStateBlockWriter, ChangeSetReader, DatabaseProviderFactory, + HeaderProvider, PruneCheckpointReader, ReceiptProvider, StageCheckpointReader, StateProvider, + StateProviderBox, StateProviderFactory, StateReader, StateRootProvider, StorageReader, + TransactionVariant, TransactionsProvider, +}; +use reth_prune_types::{PruneCheckpoint, PruneSegment}; +use reth_rpc_convert::TryFromBlockResponse; +use reth_stages_types::{StageCheckpoint, StageId}; +use reth_storage_api::{ + BlockBodyIndicesProvider, BlockReaderIdExt, BlockSource, DBProvider, NodePrimitivesProvider, + ReceiptProviderIdExt, StatsReader, +}; +use reth_trie::{updates::TrieUpdates, AccountProof, HashedPostState, MultiProof, TrieInput}; +use std::{ + collections::BTreeMap, + future::Future, + ops::{RangeBounds, RangeInclusive}, + sync::Arc, +}; +use tokio::{runtime::Handle, sync::broadcast}; +use tracing::trace; + +/// Configuration for `AlloyRethProvider` +#[derive(Debug, Clone, Default)] +pub struct AlloyRethProviderConfig { + /// Whether to compute state root when creating execution outcomes + pub compute_state_root: bool, +} + +impl AlloyRethProviderConfig { + /// Sets whether to compute state root when creating execution outcomes + pub const fn with_compute_state_root(mut self, compute: bool) -> Self { + self.compute_state_root = compute; + self + } +} + +/// A provider implementation that uses Alloy RPC to fetch state data +/// +/// This provider implements reth's `StateProviderFactory` and related traits, +/// allowing it to be used as a drop-in replacement for database-backed providers +/// in scenarios where RPC access is preferred (e.g., testing). +/// +/// The provider type is generic over the network type N (defaulting to `AnyNetwork`), +/// but the current implementation is specialized for `alloy_network::AnyNetwork` +/// as it needs to access block header fields directly. +#[derive(Clone)] +pub struct AlloyRethProvider +where + Node: NodeTypes, +{ + /// The underlying Alloy provider + provider: P, + /// Node types marker + node_types: std::marker::PhantomData, + /// Network marker + network: std::marker::PhantomData, + /// Broadcast channel for canon state notifications + canon_state_notification: broadcast::Sender>>, + /// Configuration for the provider + config: AlloyRethProviderConfig, + /// Cached chain spec + chain_spec: Arc, +} + +impl std::fmt::Debug for AlloyRethProvider { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("AlloyRethProvider").field("config", &self.config).finish() + } +} + +impl AlloyRethProvider { + /// Creates a new `AlloyRethProvider` with default configuration + pub fn new(provider: P) -> Self + where + Node::ChainSpec: Default, + { + Self::new_with_config(provider, AlloyRethProviderConfig::default()) + } + + /// Creates a new `AlloyRethProvider` with custom configuration + pub fn new_with_config(provider: P, config: AlloyRethProviderConfig) -> Self + where + Node::ChainSpec: Default, + { + let (canon_state_notification, _) = broadcast::channel(1); + Self { + provider, + node_types: std::marker::PhantomData, + network: std::marker::PhantomData, + canon_state_notification, + config, + chain_spec: Arc::new(Node::ChainSpec::default()), + } + } + + /// Helper function to execute async operations in a blocking context + fn block_on_async(&self, fut: F) -> T + where + F: Future, + { + tokio::task::block_in_place(move || Handle::current().block_on(fut)) + } + + /// Get a reference to the canon state notification sender + pub const fn canon_state_notification( + &self, + ) -> &broadcast::Sender>> { + &self.canon_state_notification + } +} + +impl AlloyRethProvider +where + P: Provider + Clone + 'static, + N: Network, + Node: NodeTypes, +{ + /// Helper function to create a state provider for a given block ID + fn create_state_provider(&self, block_id: BlockId) -> AlloyRethStateProvider { + AlloyRethStateProvider::with_chain_spec( + self.provider.clone(), + block_id, + self.chain_spec.clone(), + ) + } + + /// Helper function to get state provider by block number + fn state_by_block_number( + &self, + block_number: BlockNumber, + ) -> Result { + Ok(Box::new(self.create_state_provider(BlockId::number(block_number)))) + } +} + +// Implementation note: While the types are generic over Network N, the trait implementations +// are specialized for AnyNetwork because they need to access block header fields. +// This allows the types to be instantiated with any network while the actual functionality +// requires AnyNetwork. Future improvements could add trait bounds for networks with +// compatible block structures. +impl BlockHashReader for AlloyRethProvider +where + P: Provider + Clone + 'static, + N: Network, + Node: NodeTypes, +{ + fn block_hash(&self, number: BlockNumber) -> Result, ProviderError> { + let block = self.block_on_async(async { + self.provider.get_block_by_number(number.into()).await.map_err(ProviderError::other) + })?; + Ok(block.map(|b| b.header().hash())) + } + + fn canonical_hashes_range( + &self, + _start: BlockNumber, + _end: BlockNumber, + ) -> Result, ProviderError> { + // Would need to make multiple RPC calls + Err(ProviderError::UnsupportedProvider) + } +} + +impl BlockNumReader for AlloyRethProvider +where + P: Provider + Clone + 'static, + N: Network, + Node: NodeTypes, +{ + fn chain_info(&self) -> Result { + // For RPC provider, we can't get full chain info + Err(ProviderError::UnsupportedProvider) + } + + fn best_block_number(&self) -> Result { + self.block_on_async(async { + self.provider.get_block_number().await.map_err(ProviderError::other) + }) + } + + fn last_block_number(&self) -> Result { + self.best_block_number() + } + + fn block_number(&self, hash: B256) -> Result, ProviderError> { + let block = self.block_on_async(async { + self.provider.get_block_by_hash(hash).await.map_err(ProviderError::other) + })?; + Ok(block.map(|b| b.header().number())) + } +} + +impl BlockIdReader for AlloyRethProvider +where + P: Provider + Clone + 'static, + N: Network, + Node: NodeTypes, +{ + fn block_number_for_id(&self, block_id: BlockId) -> Result, ProviderError> { + match block_id { + BlockId::Hash(hash) => { + let block = self.block_on_async(async { + self.provider + .get_block_by_hash(hash.block_hash) + .await + .map_err(ProviderError::other) + })?; + Ok(block.map(|b| b.header().number())) + } + BlockId::Number(number_or_tag) => match number_or_tag { + alloy_rpc_types::BlockNumberOrTag::Number(num) => Ok(Some(num)), + alloy_rpc_types::BlockNumberOrTag::Latest => self.block_on_async(async { + self.provider.get_block_number().await.map(Some).map_err(ProviderError::other) + }), + _ => Ok(None), + }, + } + } + + fn pending_block_num_hash(&self) -> Result, ProviderError> { + // RPC doesn't provide pending block number and hash together + Err(ProviderError::UnsupportedProvider) + } + + fn safe_block_num_hash(&self) -> Result, ProviderError> { + // RPC doesn't provide safe block number and hash + Err(ProviderError::UnsupportedProvider) + } + + fn finalized_block_num_hash(&self) -> Result, ProviderError> { + // RPC doesn't provide finalized block number and hash + Err(ProviderError::UnsupportedProvider) + } +} + +impl HeaderProvider for AlloyRethProvider +where + P: Provider + Clone + 'static, + N: Network, + Node: NodeTypes, +{ + type Header = HeaderTy; + + fn header(&self, _block_hash: &BlockHash) -> ProviderResult> { + Err(ProviderError::UnsupportedProvider) + } + + fn header_by_number(&self, _num: u64) -> ProviderResult> { + Err(ProviderError::UnsupportedProvider) + } + + fn header_td(&self, _hash: &BlockHash) -> ProviderResult> { + Err(ProviderError::UnsupportedProvider) + } + + fn header_td_by_number(&self, _number: BlockNumber) -> ProviderResult> { + Err(ProviderError::UnsupportedProvider) + } + + fn headers_range( + &self, + _range: impl RangeBounds, + ) -> ProviderResult> { + Err(ProviderError::UnsupportedProvider) + } + + fn sealed_header( + &self, + _number: BlockNumber, + ) -> ProviderResult>> { + Err(ProviderError::UnsupportedProvider) + } + + fn sealed_headers_while( + &self, + _range: impl RangeBounds, + _predicate: impl FnMut(&SealedHeader) -> bool, + ) -> ProviderResult>> { + Err(ProviderError::UnsupportedProvider) + } +} + +impl BlockBodyIndicesProvider for AlloyRethProvider +where + P: Provider + Clone + 'static, + N: Network, + Node: NodeTypes, +{ + fn block_body_indices(&self, _num: u64) -> ProviderResult> { + Err(ProviderError::UnsupportedProvider) + } + + fn block_body_indices_range( + &self, + _range: RangeInclusive, + ) -> ProviderResult> { + Err(ProviderError::UnsupportedProvider) + } +} + +impl BlockReader for AlloyRethProvider +where + P: Provider + Clone + 'static, + N: Network, + Node: NodeTypes, + BlockTy: TryFromBlockResponse, +{ + type Block = BlockTy; + + fn find_block_by_hash( + &self, + _hash: B256, + _source: BlockSource, + ) -> ProviderResult> { + Err(ProviderError::UnsupportedProvider) + } + + fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { + let block_response = self.block_on_async(async { + self.provider.get_block(id.into()).full().await.map_err(ProviderError::other) + })?; + + let Some(block_response) = block_response else { + // If the block was not found, return None + return Ok(None); + }; + + // Convert the network block response to primitive block + let block = as TryFromBlockResponse>::from_block_response(block_response) + .map_err(ProviderError::other)?; + + Ok(Some(block)) + } + + fn pending_block(&self) -> ProviderResult>> { + Err(ProviderError::UnsupportedProvider) + } + + fn pending_block_and_receipts( + &self, + ) -> ProviderResult, Vec)>> { + Err(ProviderError::UnsupportedProvider) + } + + fn recovered_block( + &self, + _id: BlockHashOrNumber, + _transaction_kind: TransactionVariant, + ) -> ProviderResult>> { + Err(ProviderError::UnsupportedProvider) + } + + fn sealed_block_with_senders( + &self, + _id: BlockHashOrNumber, + _transaction_kind: TransactionVariant, + ) -> ProviderResult>> { + Err(ProviderError::UnsupportedProvider) + } + + fn block_range(&self, _range: RangeInclusive) -> ProviderResult> { + Err(ProviderError::UnsupportedProvider) + } + + fn block_with_senders_range( + &self, + _range: RangeInclusive, + ) -> ProviderResult>> { + Err(ProviderError::UnsupportedProvider) + } + + fn recovered_block_range( + &self, + _range: RangeInclusive, + ) -> ProviderResult>> { + Err(ProviderError::UnsupportedProvider) + } +} + +impl BlockReaderIdExt for AlloyRethProvider +where + P: Provider + Clone + 'static, + N: Network, + Node: NodeTypes, + BlockTy: TryFromBlockResponse, +{ + fn block_by_id(&self, id: BlockId) -> ProviderResult> { + match id { + BlockId::Number(number_or_tag) => self.block_by_number_or_tag(number_or_tag), + BlockId::Hash(hash) => self.block_by_hash(hash.block_hash), + } + } + + fn sealed_header_by_id( + &self, + _id: BlockId, + ) -> ProviderResult>> { + Err(ProviderError::UnsupportedProvider) + } + + fn header_by_id(&self, _id: BlockId) -> ProviderResult> { + Err(ProviderError::UnsupportedProvider) + } +} + +impl ReceiptProvider for AlloyRethProvider +where + P: Provider + Clone + 'static, + N: Network, + Node: NodeTypes, +{ + type Receipt = ReceiptTy; + + fn receipt(&self, _id: TxNumber) -> ProviderResult> { + Err(ProviderError::UnsupportedProvider) + } + + fn receipt_by_hash(&self, _hash: TxHash) -> ProviderResult> { + Err(ProviderError::UnsupportedProvider) + } + + fn receipts_by_block( + &self, + _block: BlockHashOrNumber, + ) -> ProviderResult>> { + Err(ProviderError::UnsupportedProvider) + } + + fn receipts_by_tx_range( + &self, + _range: impl RangeBounds, + ) -> ProviderResult> { + Err(ProviderError::UnsupportedProvider) + } + + fn receipts_by_block_range( + &self, + _block_range: RangeInclusive, + ) -> ProviderResult>> { + Err(ProviderError::UnsupportedProvider) + } +} + +impl ReceiptProviderIdExt for AlloyRethProvider +where + P: Provider + Clone + 'static, + N: Network, + Node: NodeTypes, +{ +} + +impl TransactionsProvider for AlloyRethProvider +where + P: Provider + Clone + 'static, + N: Network, + Node: NodeTypes, +{ + type Transaction = TxTy; + + fn transaction_id(&self, _tx_hash: TxHash) -> ProviderResult> { + Err(ProviderError::UnsupportedProvider) + } + + fn transaction_by_id(&self, _id: TxNumber) -> ProviderResult> { + Err(ProviderError::UnsupportedProvider) + } + + fn transaction_by_id_unhashed( + &self, + _id: TxNumber, + ) -> ProviderResult> { + Err(ProviderError::UnsupportedProvider) + } + + fn transaction_by_hash(&self, _hash: TxHash) -> ProviderResult> { + Err(ProviderError::UnsupportedProvider) + } + + fn transaction_by_hash_with_meta( + &self, + _hash: TxHash, + ) -> ProviderResult> { + Err(ProviderError::UnsupportedProvider) + } + + fn transaction_block(&self, _id: TxNumber) -> ProviderResult> { + Err(ProviderError::UnsupportedProvider) + } + + fn transactions_by_block( + &self, + _block: BlockHashOrNumber, + ) -> ProviderResult>> { + Err(ProviderError::UnsupportedProvider) + } + + fn transactions_by_block_range( + &self, + _range: impl RangeBounds, + ) -> ProviderResult>> { + Err(ProviderError::UnsupportedProvider) + } + + fn transactions_by_tx_range( + &self, + _range: impl RangeBounds, + ) -> ProviderResult> { + Err(ProviderError::UnsupportedProvider) + } + + fn senders_by_tx_range( + &self, + _range: impl RangeBounds, + ) -> ProviderResult> { + Err(ProviderError::UnsupportedProvider) + } + + fn transaction_sender(&self, _id: TxNumber) -> ProviderResult> { + Err(ProviderError::UnsupportedProvider) + } +} + +impl StateProviderFactory for AlloyRethProvider +where + P: Provider + Clone + 'static, + N: Network, + Node: NodeTypes, +{ + fn latest(&self) -> Result { + trace!(target: "alloy-provider", "Getting latest state provider"); + + let block_number = self.block_on_async(async { + self.provider.get_block_number().await.map_err(ProviderError::other) + })?; + + self.state_by_block_number(block_number) + } + + fn state_by_block_id(&self, block_id: BlockId) -> Result { + Ok(Box::new(self.create_state_provider(block_id))) + } + + fn state_by_block_number_or_tag( + &self, + number_or_tag: alloy_rpc_types::BlockNumberOrTag, + ) -> Result { + match number_or_tag { + alloy_rpc_types::BlockNumberOrTag::Latest => self.latest(), + alloy_rpc_types::BlockNumberOrTag::Pending => self.pending(), + alloy_rpc_types::BlockNumberOrTag::Number(num) => self.state_by_block_number(num), + _ => Err(ProviderError::UnsupportedProvider), + } + } + + fn history_by_block_number( + &self, + block_number: BlockNumber, + ) -> Result { + self.state_by_block_number(block_number) + } + + fn history_by_block_hash( + &self, + block_hash: BlockHash, + ) -> Result { + self.state_by_block_hash(block_hash) + } + + fn state_by_block_hash( + &self, + block_hash: BlockHash, + ) -> Result { + trace!(target: "alloy-provider", ?block_hash, "Getting state provider by block hash"); + + let block = self.block_on_async(async { + self.provider + .get_block_by_hash(block_hash) + .await + .map_err(ProviderError::other)? + .ok_or(ProviderError::BlockHashNotFound(block_hash)) + })?; + + let block_number = block.header().number(); + Ok(Box::new(self.create_state_provider(BlockId::number(block_number)))) + } + + fn pending(&self) -> Result { + trace!(target: "alloy-provider", "Getting pending state provider"); + self.latest() + } + + fn pending_state_by_hash( + &self, + _block_hash: B256, + ) -> Result, ProviderError> { + // RPC provider doesn't support pending state by hash + Err(ProviderError::UnsupportedProvider) + } +} + +impl DatabaseProviderFactory for AlloyRethProvider +where + P: Provider + Clone + 'static, + N: Network, + Node: NodeTypes, +{ + type DB = DatabaseMock; + type ProviderRW = AlloyRethStateProvider; + type Provider = AlloyRethStateProvider; + + fn database_provider_ro(&self) -> Result { + // RPC provider returns a new state provider + let block_number = self.block_on_async(async { + self.provider.get_block_number().await.map_err(ProviderError::other) + })?; + + Ok(self.create_state_provider(BlockId::number(block_number))) + } + + fn database_provider_rw(&self) -> Result { + // RPC provider returns a new state provider + let block_number = self.block_on_async(async { + self.provider.get_block_number().await.map_err(ProviderError::other) + })?; + + Ok(self.create_state_provider(BlockId::number(block_number))) + } +} + +impl CanonChainTracker for AlloyRethProvider +where + P: Provider + Clone + 'static, + N: Network, + Node: NodeTypes, +{ + type Header = alloy_consensus::Header; + fn on_forkchoice_update_received(&self, _update: &ForkchoiceState) { + // No-op for RPC provider + } + + fn last_received_update_timestamp(&self) -> Option { + None + } + + fn set_canonical_head(&self, _header: SealedHeader) { + // No-op for RPC provider + } + + fn set_safe(&self, _header: SealedHeader) { + // No-op for RPC provider + } + + fn set_finalized(&self, _header: SealedHeader) { + // No-op for RPC provider + } +} + +impl NodePrimitivesProvider for AlloyRethProvider +where + P: Send + Sync, + N: Send + Sync, + Node: NodeTypes, +{ + type Primitives = PrimitivesTy; +} + +impl CanonStateSubscriptions for AlloyRethProvider +where + P: Provider + Clone + 'static, + N: Network, + Node: NodeTypes, +{ + fn subscribe_to_canonical_state(&self) -> CanonStateNotifications> { + trace!(target: "alloy-provider", "Subscribing to canonical state notifications"); + self.canon_state_notification.subscribe() + } +} + +impl ChainSpecProvider for AlloyRethProvider +where + P: Send + Sync, + N: Send + Sync, + Node: NodeTypes, + Node::ChainSpec: Default, +{ + type ChainSpec = Node::ChainSpec; + + fn chain_spec(&self) -> Arc { + self.chain_spec.clone() + } +} + +/// State provider implementation that fetches state via RPC +#[derive(Clone)] +pub struct AlloyRethStateProvider +where + Node: NodeTypes, +{ + /// The underlying Alloy provider + provider: P, + /// The block ID to fetch state at + block_id: BlockId, + /// Node types marker + node_types: std::marker::PhantomData, + /// Network marker + network: std::marker::PhantomData, + /// Cached chain spec (shared with parent provider) + chain_spec: Option>, +} + +impl std::fmt::Debug + for AlloyRethStateProvider +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("AlloyRethStateProvider") + .field("provider", &self.provider) + .field("block_id", &self.block_id) + .finish() + } +} + +impl AlloyRethStateProvider { + /// Creates a new state provider for the given block + pub const fn new( + provider: P, + block_id: BlockId, + _primitives: std::marker::PhantomData, + ) -> Self { + Self { + provider, + block_id, + node_types: std::marker::PhantomData, + network: std::marker::PhantomData, + chain_spec: None, + } + } + + /// Creates a new state provider with a cached chain spec + pub const fn with_chain_spec( + provider: P, + block_id: BlockId, + chain_spec: Arc, + ) -> Self { + Self { + provider, + block_id, + node_types: std::marker::PhantomData, + network: std::marker::PhantomData, + chain_spec: Some(chain_spec), + } + } + + /// Helper function to execute async operations in a blocking context + fn block_on_async(&self, fut: F) -> T + where + F: Future, + { + tokio::task::block_in_place(move || Handle::current().block_on(fut)) + } + + /// Helper function to create a new state provider with a different block ID + fn with_block_id(&self, block_id: BlockId) -> Self { + Self { + provider: self.provider.clone(), + block_id, + node_types: self.node_types, + network: self.network, + chain_spec: self.chain_spec.clone(), + } + } + + /// Get account information from RPC + fn get_account(&self, address: Address) -> Result, ProviderError> + where + P: Provider + Clone + 'static, + N: Network, + { + self.block_on_async(async { + // Get account info in a single RPC call + let account_info = self + .provider + .get_account_info(address) + .block_id(self.block_id) + .await + .map_err(ProviderError::other)?; + + // Only return account if it exists (has balance, nonce, or code) + if account_info.balance.is_zero() && + account_info.nonce == 0 && + account_info.code.is_empty() + { + Ok(None) + } else { + let bytecode = if account_info.code.is_empty() { + None + } else { + Some(Bytecode::new_raw(account_info.code)) + }; + + Ok(Some(Account { + balance: account_info.balance, + nonce: account_info.nonce, + bytecode_hash: bytecode.as_ref().map(|b| b.hash_slow()), + })) + } + }) + } +} + +impl StateProvider for AlloyRethStateProvider +where + P: Provider + Clone + 'static, + N: Network, + Node: NodeTypes, +{ + fn storage( + &self, + address: Address, + storage_key: StorageKey, + ) -> Result, ProviderError> { + self.block_on_async(async { + let value = self + .provider + .get_storage_at(address, storage_key.into()) + .block_id(self.block_id) + .await + .map_err(ProviderError::other)?; + + if value.is_zero() { + Ok(None) + } else { + Ok(Some(value)) + } + }) + } + + fn account_code(&self, addr: &Address) -> Result, ProviderError> { + self.block_on_async(async { + let code = self + .provider + .get_code_at(*addr) + .block_id(self.block_id) + .await + .map_err(ProviderError::other)?; + + if code.is_empty() { + Ok(None) + } else { + Ok(Some(Bytecode::new_raw(code))) + } + }) + } + + fn account_balance(&self, addr: &Address) -> Result, ProviderError> { + self.get_account(*addr).map(|acc| acc.map(|a| a.balance)) + } + + fn account_nonce(&self, addr: &Address) -> Result, ProviderError> { + self.get_account(*addr).map(|acc| acc.map(|a| a.nonce)) + } +} + +impl BytecodeReader for AlloyRethStateProvider +where + P: Provider + Clone + 'static, + N: Network, + Node: NodeTypes, +{ + fn bytecode_by_hash(&self, _code_hash: &B256) -> Result, ProviderError> { + // Cannot fetch bytecode by hash via RPC + Err(ProviderError::UnsupportedProvider) + } +} + +impl AccountReader for AlloyRethStateProvider +where + P: Provider + Clone + 'static, + N: Network, + Node: NodeTypes, +{ + fn basic_account(&self, address: &Address) -> Result, ProviderError> { + self.get_account(*address) + } +} + +impl StateRootProvider for AlloyRethStateProvider +where + P: Provider + Clone + 'static, + N: Network, + Node: NodeTypes, +{ + fn state_root(&self, _state: HashedPostState) -> Result { + // Return the state root from the block + self.block_on_async(async { + let block = self + .provider + .get_block(self.block_id) + .await + .map_err(ProviderError::other)? + .ok_or(ProviderError::HeaderNotFound(0.into()))?; + + Ok(block.header().state_root()) + }) + } + + fn state_root_from_nodes(&self, _input: TrieInput) -> Result { + Err(ProviderError::UnsupportedProvider) + } + + fn state_root_with_updates( + &self, + _state: HashedPostState, + ) -> Result<(B256, TrieUpdates), ProviderError> { + Err(ProviderError::UnsupportedProvider) + } + + fn state_root_from_nodes_with_updates( + &self, + _input: TrieInput, + ) -> Result<(B256, TrieUpdates), ProviderError> { + Err(ProviderError::UnsupportedProvider) + } +} + +impl StorageReader for AlloyRethStateProvider +where + P: Provider + Clone + 'static, + N: Network, + Node: NodeTypes, +{ + fn plain_state_storages( + &self, + addresses_with_keys: impl IntoIterator)>, + ) -> Result)>, ProviderError> { + let mut results = Vec::new(); + + for (address, keys) in addresses_with_keys { + let mut values = Vec::new(); + for key in keys { + let value = self.storage(address, key)?.unwrap_or_default(); + values.push(reth_primitives::StorageEntry::new(key, value)); + } + results.push((address, values)); + } + + Ok(results) + } + + fn changed_storages_with_range( + &self, + _range: RangeInclusive, + ) -> Result>, ProviderError> { + Ok(BTreeMap::new()) + } + + fn changed_storages_and_blocks_with_range( + &self, + _range: RangeInclusive, + ) -> Result>, ProviderError> { + Ok(BTreeMap::new()) + } +} + +impl reth_storage_api::StorageRootProvider for AlloyRethStateProvider +where + P: Provider + Clone + 'static, + N: Network, + Node: NodeTypes, +{ + fn storage_root( + &self, + _address: Address, + _hashed_storage: reth_trie::HashedStorage, + ) -> Result { + // RPC doesn't provide storage root computation + Err(ProviderError::UnsupportedProvider) + } + + fn storage_proof( + &self, + _address: Address, + _slot: B256, + _hashed_storage: reth_trie::HashedStorage, + ) -> Result { + Err(ProviderError::UnsupportedProvider) + } + + fn storage_multiproof( + &self, + _address: Address, + _slots: &[B256], + _hashed_storage: reth_trie::HashedStorage, + ) -> Result { + Err(ProviderError::UnsupportedProvider) + } +} + +impl reth_storage_api::StateProofProvider for AlloyRethStateProvider +where + P: Provider + Clone + 'static, + N: Network, + Node: NodeTypes, +{ + fn proof( + &self, + _input: TrieInput, + _address: Address, + _slots: &[B256], + ) -> Result { + Err(ProviderError::UnsupportedProvider) + } + + fn multiproof( + &self, + _input: TrieInput, + _targets: reth_trie::MultiProofTargets, + ) -> Result { + Err(ProviderError::UnsupportedProvider) + } + + fn witness( + &self, + _input: TrieInput, + _target: HashedPostState, + ) -> Result, ProviderError> { + Err(ProviderError::UnsupportedProvider) + } +} + +impl reth_storage_api::HashedPostStateProvider for AlloyRethStateProvider +where + P: Provider + Clone + 'static, + N: Network, + Node: NodeTypes, +{ + fn hashed_post_state(&self, _bundle_state: &revm::database::BundleState) -> HashedPostState { + // Return empty hashed post state for RPC provider + HashedPostState::default() + } +} + +impl StateReader for AlloyRethStateProvider +where + P: Provider + Clone + 'static, + N: Network, + Node: NodeTypes, +{ + type Receipt = ReceiptTy; + + fn get_state( + &self, + _block: BlockNumber, + ) -> Result>, ProviderError> { + // RPC doesn't provide execution outcomes + Err(ProviderError::UnsupportedProvider) + } +} + +impl DBProvider for AlloyRethStateProvider +where + P: Provider + Clone + 'static, + N: Network, + Node: NodeTypes, +{ + type Tx = TxMock; + + fn tx_ref(&self) -> &Self::Tx { + // We can't use a static here since TxMock doesn't allow direct construction + // This is fine since we're just returning a mock transaction + unimplemented!("tx_ref not supported for RPC provider") + } + + fn tx_mut(&mut self) -> &mut Self::Tx { + unimplemented!("tx_mut not supported for RPC provider") + } + + fn into_tx(self) -> Self::Tx { + TxMock::default() + } + + fn prune_modes_ref(&self) -> &reth_prune_types::PruneModes { + unimplemented!("prune modes not supported for RPC provider") + } + + fn disable_long_read_transaction_safety(self) -> Self { + // No-op for RPC provider + self + } +} + +impl BlockNumReader for AlloyRethStateProvider +where + P: Provider + Clone + 'static, + N: Network, + Node: NodeTypes, +{ + fn chain_info(&self) -> Result { + self.block_on_async(async { + let block = self + .provider + .get_block(self.block_id) + .await + .map_err(ProviderError::other)? + .ok_or(ProviderError::HeaderNotFound(0.into()))?; + + Ok(ChainInfo { best_hash: block.header().hash(), best_number: block.header().number() }) + }) + } + + fn best_block_number(&self) -> Result { + self.block_on_async(async { + self.provider.get_block_number().await.map_err(ProviderError::other) + }) + } + + fn last_block_number(&self) -> Result { + self.best_block_number() + } + + fn block_number(&self, hash: B256) -> Result, ProviderError> { + self.block_on_async(async { + let block = + self.provider.get_block_by_hash(hash).await.map_err(ProviderError::other)?; + + Ok(block.map(|b| b.header().number())) + }) + } +} + +impl BlockHashReader for AlloyRethStateProvider +where + P: Provider + Clone + 'static, + N: Network, + Node: NodeTypes, +{ + fn block_hash(&self, number: u64) -> Result, ProviderError> { + self.block_on_async(async { + let block = self + .provider + .get_block_by_number(number.into()) + .await + .map_err(ProviderError::other)?; + + Ok(block.map(|b| b.header().hash())) + }) + } + + fn canonical_hashes_range( + &self, + _start: BlockNumber, + _end: BlockNumber, + ) -> Result, ProviderError> { + Err(ProviderError::UnsupportedProvider) + } +} + +impl BlockIdReader for AlloyRethStateProvider +where + P: Provider + Clone + 'static, + N: Network, + Node: NodeTypes, +{ + fn block_number_for_id( + &self, + _block_id: BlockId, + ) -> Result, ProviderError> { + Err(ProviderError::UnsupportedProvider) + } + + fn pending_block_num_hash(&self) -> Result, ProviderError> { + Err(ProviderError::UnsupportedProvider) + } + + fn safe_block_num_hash(&self) -> Result, ProviderError> { + Err(ProviderError::UnsupportedProvider) + } + + fn finalized_block_num_hash(&self) -> Result, ProviderError> { + Err(ProviderError::UnsupportedProvider) + } +} + +impl BlockReader for AlloyRethStateProvider +where + P: Provider + Clone + 'static, + N: Network, + Node: NodeTypes, +{ + type Block = BlockTy; + + fn find_block_by_hash( + &self, + _hash: B256, + _source: reth_provider::BlockSource, + ) -> Result, ProviderError> { + Err(ProviderError::UnsupportedProvider) + } + + fn block( + &self, + _id: alloy_rpc_types::BlockHashOrNumber, + ) -> Result, ProviderError> { + Err(ProviderError::UnsupportedProvider) + } + + fn pending_block(&self) -> Result>, ProviderError> { + Err(ProviderError::UnsupportedProvider) + } + + fn pending_block_and_receipts( + &self, + ) -> Result, Vec)>, ProviderError> { + Err(ProviderError::UnsupportedProvider) + } + + fn recovered_block( + &self, + _id: alloy_rpc_types::BlockHashOrNumber, + _transaction_kind: TransactionVariant, + ) -> Result>, ProviderError> { + Err(ProviderError::UnsupportedProvider) + } + + fn sealed_block_with_senders( + &self, + _id: alloy_rpc_types::BlockHashOrNumber, + _transaction_kind: TransactionVariant, + ) -> Result>>, ProviderError> { + Err(ProviderError::UnsupportedProvider) + } + + fn block_range( + &self, + _range: RangeInclusive, + ) -> Result, ProviderError> { + Err(ProviderError::UnsupportedProvider) + } + + fn block_with_senders_range( + &self, + _range: RangeInclusive, + ) -> Result>>, ProviderError> { + Err(ProviderError::UnsupportedProvider) + } + + fn recovered_block_range( + &self, + _range: RangeInclusive, + ) -> Result>, ProviderError> { + Err(ProviderError::UnsupportedProvider) + } +} + +impl TransactionsProvider for AlloyRethStateProvider +where + P: Provider + Clone + 'static, + N: Network, + Node: NodeTypes, +{ + type Transaction = TxTy; + + fn transaction_id(&self, _tx_hash: B256) -> Result, ProviderError> { + Err(ProviderError::UnsupportedProvider) + } + + fn transaction_by_id(&self, _id: TxNumber) -> Result, ProviderError> { + Err(ProviderError::UnsupportedProvider) + } + + fn transaction_by_id_unhashed( + &self, + _id: TxNumber, + ) -> Result, ProviderError> { + Err(ProviderError::UnsupportedProvider) + } + + fn transaction_by_hash(&self, _hash: B256) -> Result, ProviderError> { + Err(ProviderError::UnsupportedProvider) + } + + fn transaction_by_hash_with_meta( + &self, + _hash: B256, + ) -> Result, ProviderError> { + Err(ProviderError::UnsupportedProvider) + } + + fn transaction_block(&self, _id: TxNumber) -> Result, ProviderError> { + Err(ProviderError::UnsupportedProvider) + } + + fn transactions_by_block( + &self, + _block: alloy_rpc_types::BlockHashOrNumber, + ) -> Result>, ProviderError> { + Err(ProviderError::UnsupportedProvider) + } + + fn transactions_by_block_range( + &self, + _range: impl RangeBounds, + ) -> Result>, ProviderError> { + Err(ProviderError::UnsupportedProvider) + } + + fn transactions_by_tx_range( + &self, + _range: impl RangeBounds, + ) -> Result, ProviderError> { + Err(ProviderError::UnsupportedProvider) + } + + fn senders_by_tx_range( + &self, + _range: impl RangeBounds, + ) -> Result, ProviderError> { + Err(ProviderError::UnsupportedProvider) + } + + fn transaction_sender(&self, _id: TxNumber) -> Result, ProviderError> { + Err(ProviderError::UnsupportedProvider) + } +} + +impl ReceiptProvider for AlloyRethStateProvider +where + P: Provider + Clone + 'static, + N: Network, + Node: NodeTypes, +{ + type Receipt = ReceiptTy; + + fn receipt(&self, _id: TxNumber) -> Result, ProviderError> { + Err(ProviderError::UnsupportedProvider) + } + + fn receipt_by_hash(&self, _hash: B256) -> Result, ProviderError> { + Err(ProviderError::UnsupportedProvider) + } + + fn receipts_by_block( + &self, + _block: alloy_rpc_types::BlockHashOrNumber, + ) -> Result>, ProviderError> { + Err(ProviderError::UnsupportedProvider) + } + + fn receipts_by_tx_range( + &self, + _range: impl RangeBounds, + ) -> Result, ProviderError> { + Err(ProviderError::UnsupportedProvider) + } + + fn receipts_by_block_range( + &self, + _range: RangeInclusive, + ) -> Result>, ProviderError> { + Err(ProviderError::UnsupportedProvider) + } +} + +impl HeaderProvider for AlloyRethStateProvider +where + P: Provider + Clone + 'static, + N: Network, + Node: NodeTypes, +{ + type Header = HeaderTy; + + fn header(&self, _block_hash: &BlockHash) -> Result, ProviderError> { + Err(ProviderError::UnsupportedProvider) + } + + fn header_by_number(&self, _num: BlockNumber) -> Result, ProviderError> { + Err(ProviderError::UnsupportedProvider) + } + + fn header_td(&self, _hash: &BlockHash) -> Result, ProviderError> { + Err(ProviderError::UnsupportedProvider) + } + + fn header_td_by_number(&self, _number: BlockNumber) -> Result, ProviderError> { + Err(ProviderError::UnsupportedProvider) + } + + fn headers_range( + &self, + _range: impl RangeBounds, + ) -> Result, ProviderError> { + Err(ProviderError::UnsupportedProvider) + } + + fn sealed_header( + &self, + _number: BlockNumber, + ) -> Result>>, ProviderError> { + Err(ProviderError::UnsupportedProvider) + } + + fn sealed_headers_range( + &self, + _range: impl RangeBounds, + ) -> Result>>, ProviderError> { + Err(ProviderError::UnsupportedProvider) + } + + fn sealed_headers_while( + &self, + _range: impl RangeBounds, + _predicate: impl FnMut(&SealedHeader>) -> bool, + ) -> Result>>, ProviderError> { + Err(ProviderError::UnsupportedProvider) + } +} + +impl PruneCheckpointReader for AlloyRethStateProvider +where + P: Provider + Clone + 'static, + N: Network, + Node: NodeTypes, +{ + fn get_prune_checkpoint( + &self, + _segment: PruneSegment, + ) -> Result, ProviderError> { + Err(ProviderError::UnsupportedProvider) + } + + fn get_prune_checkpoints(&self) -> Result, ProviderError> { + Err(ProviderError::UnsupportedProvider) + } +} + +impl StageCheckpointReader for AlloyRethStateProvider +where + P: Provider + Clone + 'static, + N: Network, + Node: NodeTypes, +{ + fn get_stage_checkpoint(&self, _id: StageId) -> Result, ProviderError> { + Err(ProviderError::UnsupportedProvider) + } + + fn get_stage_checkpoint_progress( + &self, + _id: StageId, + ) -> Result>, ProviderError> { + Err(ProviderError::UnsupportedProvider) + } + + fn get_all_checkpoints(&self) -> Result, ProviderError> { + Err(ProviderError::UnsupportedProvider) + } +} + +impl ChangeSetReader for AlloyRethStateProvider +where + P: Provider + Clone + 'static, + N: Network, + Node: NodeTypes, +{ + fn account_block_changeset( + &self, + _block_number: BlockNumber, + ) -> Result, ProviderError> { + Err(ProviderError::UnsupportedProvider) + } +} + +impl StateProviderFactory for AlloyRethStateProvider +where + P: Provider + Clone + 'static + Send + Sync, + Node: NodeTypes + 'static, + Node::ChainSpec: Send + Sync, + N: Network, + Self: Clone + 'static, +{ + fn latest(&self) -> Result { + Ok(Box::new(self.clone()) as StateProviderBox) + } + + fn state_by_block_id(&self, block_id: BlockId) -> Result { + Ok(Box::new(self.with_block_id(block_id))) + } + + fn state_by_block_number_or_tag( + &self, + number_or_tag: alloy_rpc_types::BlockNumberOrTag, + ) -> Result { + match number_or_tag { + alloy_rpc_types::BlockNumberOrTag::Latest => self.latest(), + alloy_rpc_types::BlockNumberOrTag::Pending => self.pending(), + alloy_rpc_types::BlockNumberOrTag::Number(num) => self.history_by_block_number(num), + _ => Err(ProviderError::UnsupportedProvider), + } + } + + fn history_by_block_number( + &self, + block_number: BlockNumber, + ) -> Result { + Ok(Box::new(Self::new( + self.provider.clone(), + BlockId::number(block_number), + self.node_types, + ))) + } + + fn history_by_block_hash( + &self, + block_hash: BlockHash, + ) -> Result { + Ok(Box::new(self.with_block_id(BlockId::hash(block_hash)))) + } + + fn state_by_block_hash( + &self, + block_hash: BlockHash, + ) -> Result { + self.history_by_block_hash(block_hash) + } + + fn pending(&self) -> Result { + Ok(Box::new(self.clone())) + } + + fn pending_state_by_hash( + &self, + _block_hash: B256, + ) -> Result, ProviderError> { + // RPC provider doesn't support pending state by hash + Err(ProviderError::UnsupportedProvider) + } +} + +impl ChainSpecProvider for AlloyRethStateProvider +where + P: Send + Sync + std::fmt::Debug, + N: Send + Sync, + Node: NodeTypes, + Node::ChainSpec: Default, +{ + type ChainSpec = Node::ChainSpec; + + fn chain_spec(&self) -> Arc { + if let Some(chain_spec) = &self.chain_spec { + chain_spec.clone() + } else { + // Fallback for when chain_spec is not provided + Arc::new(Node::ChainSpec::default()) + } + } +} + +// Note: FullExecutionDataProvider is already implemented via the blanket implementation +// for types that implement both ExecutionDataProvider and BlockExecutionForkProvider + +impl StatsReader for AlloyRethStateProvider +where + P: Provider + Clone + 'static, + N: Network, + Node: NodeTypes, +{ + fn count_entries(&self) -> Result { + Ok(0) + } +} + +impl BlockBodyIndicesProvider for AlloyRethStateProvider +where + P: Provider + Clone + 'static, + N: Network, + Node: NodeTypes, +{ + fn block_body_indices( + &self, + _num: u64, + ) -> Result, ProviderError> { + Err(ProviderError::UnsupportedProvider) + } + + fn block_body_indices_range( + &self, + _range: RangeInclusive, + ) -> Result, ProviderError> { + Err(ProviderError::UnsupportedProvider) + } +} + +impl NodePrimitivesProvider for AlloyRethStateProvider +where + P: Send + Sync + std::fmt::Debug, + N: Send + Sync, + Node: NodeTypes, +{ + type Primitives = PrimitivesTy; +} + +impl ChainStateBlockReader for AlloyRethStateProvider +where + P: Provider + Clone + 'static, + N: Network, + Node: NodeTypes, +{ + fn last_finalized_block_number(&self) -> Result, ProviderError> { + Err(ProviderError::UnsupportedProvider) + } + + fn last_safe_block_number(&self) -> Result, ProviderError> { + Err(ProviderError::UnsupportedProvider) + } +} + +impl ChainStateBlockWriter for AlloyRethStateProvider +where + P: Provider + Clone + 'static, + N: Network, + Node: NodeTypes, +{ + fn save_finalized_block_number(&self, _block_number: BlockNumber) -> Result<(), ProviderError> { + Err(ProviderError::UnsupportedProvider) + } + + fn save_safe_block_number(&self, _block_number: BlockNumber) -> Result<(), ProviderError> { + Err(ProviderError::UnsupportedProvider) + } +} + +// Async database wrapper for revm compatibility +#[allow(dead_code)] +#[derive(Debug, Clone)] +struct AsyncDbWrapper { + provider: P, + block_id: BlockId, + network: std::marker::PhantomData, +} + +#[allow(dead_code)] +impl AsyncDbWrapper { + const fn new(provider: P, block_id: BlockId) -> Self { + Self { provider, block_id, network: std::marker::PhantomData } + } + + /// Helper function to execute async operations in a blocking context + fn block_on_async(&self, fut: F) -> T + where + F: Future, + { + tokio::task::block_in_place(move || Handle::current().block_on(fut)) + } +} + +impl revm::Database for AsyncDbWrapper +where + P: Provider + Clone + 'static, + N: Network, +{ + type Error = ProviderError; + + fn basic(&mut self, address: Address) -> Result, Self::Error> { + self.block_on_async(async { + let account_info = self + .provider + .get_account_info(address) + .block_id(self.block_id) + .await + .map_err(ProviderError::other)?; + + // Only return account if it exists + if account_info.balance.is_zero() && + account_info.nonce == 0 && + account_info.code.is_empty() + { + Ok(None) + } else { + let code_hash = if account_info.code.is_empty() { + revm_primitives::KECCAK_EMPTY + } else { + revm_primitives::keccak256(&account_info.code) + }; + + Ok(Some(revm::state::AccountInfo { + balance: account_info.balance, + nonce: account_info.nonce, + code_hash, + code: if account_info.code.is_empty() { + None + } else { + Some(revm::bytecode::Bytecode::new_raw(account_info.code)) + }, + })) + } + }) + } + + fn code_by_hash(&mut self, _code_hash: B256) -> Result { + // Cannot fetch bytecode by hash via RPC + Ok(revm::bytecode::Bytecode::default()) + } + + fn storage(&mut self, address: Address, index: U256) -> Result { + let index = B256::from(index); + + self.block_on_async(async { + self.provider + .get_storage_at(address, index.into()) + .block_id(self.block_id) + .await + .map_err(ProviderError::other) + }) + } + + fn block_hash(&mut self, number: u64) -> Result { + self.block_on_async(async { + let block = self + .provider + .get_block_by_number(number.into()) + .await + .map_err(ProviderError::other)? + .ok_or(ProviderError::HeaderNotFound(number.into()))?; + + Ok(block.header().hash()) + }) + } +} diff --git a/crates/chain-state/src/in_memory.rs b/crates/chain-state/src/in_memory.rs index 7e8e3e7027a..20f2a2a4c21 100644 --- a/crates/chain-state/src/in_memory.rs +++ b/crates/chain-state/src/in_memory.rs @@ -996,8 +996,8 @@ mod tests { use reth_ethereum_primitives::{EthPrimitives, Receipt}; use reth_primitives_traits::{Account, Bytecode}; use reth_storage_api::{ - AccountReader, BlockHashReader, HashedPostStateProvider, StateProofProvider, StateProvider, - StateRootProvider, StorageRootProvider, + AccountReader, BlockHashReader, BytecodeReader, HashedPostStateProvider, + StateProofProvider, StateProvider, StateRootProvider, StorageRootProvider, }; use reth_trie::{ AccountProof, HashedStorage, MultiProof, MultiProofTargets, StorageMultiProof, @@ -1045,7 +1045,9 @@ mod tests { ) -> ProviderResult> { Ok(None) } + } + impl BytecodeReader for MockStateProvider { fn bytecode_by_hash(&self, _code_hash: &B256) -> ProviderResult> { Ok(None) } diff --git a/crates/chain-state/src/memory_overlay.rs b/crates/chain-state/src/memory_overlay.rs index e454b84b700..dfb76d0e583 100644 --- a/crates/chain-state/src/memory_overlay.rs +++ b/crates/chain-state/src/memory_overlay.rs @@ -4,8 +4,8 @@ use alloy_primitives::{keccak256, Address, BlockNumber, Bytes, StorageKey, Stora use reth_errors::ProviderResult; use reth_primitives_traits::{Account, Bytecode, NodePrimitives}; use reth_storage_api::{ - AccountReader, BlockHashReader, HashedPostStateProvider, StateProofProvider, StateProvider, - StateRootProvider, StorageRootProvider, + AccountReader, BlockHashReader, BytecodeReader, HashedPostStateProvider, StateProofProvider, + StateProvider, StateRootProvider, StorageRootProvider, }; use reth_trie::{ updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, @@ -222,7 +222,9 @@ impl StateProvider for MemoryOverlayStateProviderRef<'_, N> { self.historical.storage(address, storage_key) } +} +impl BytecodeReader for MemoryOverlayStateProviderRef<'_, N> { fn bytecode_by_hash(&self, code_hash: &B256) -> ProviderResult> { for block in &self.in_memory { if let Some(contract) = block.execution_output.bytecode(code_hash) { diff --git a/crates/chainspec/src/api.rs b/crates/chainspec/src/api.rs index d1c7b3e5a3a..afbc5c827c1 100644 --- a/crates/chainspec/src/api.rs +++ b/crates/chainspec/src/api.rs @@ -1,8 +1,8 @@ use crate::{ChainSpec, DepositContract}; use alloc::{boxed::Box, vec::Vec}; use alloy_chains::Chain; -use alloy_consensus::Header; -use alloy_eips::{eip1559::BaseFeeParams, eip7840::BlobParams}; +use alloy_consensus::{BlockHeader, Header}; +use alloy_eips::{calc_next_block_base_fee, eip1559::BaseFeeParams, eip7840::BlobParams}; use alloy_genesis::Genesis; use alloy_primitives::{B256, U256}; use core::fmt::{Debug, Display}; @@ -13,7 +13,7 @@ use reth_network_peers::NodeRecord; #[auto_impl::auto_impl(&, Arc)] pub trait EthChainSpec: Send + Sync + Unpin + Debug { /// The header type of the network. - type Header; + type Header: BlockHeader; /// Returns the [`Chain`] object this spec targets. fn chain(&self) -> Chain; @@ -65,6 +65,16 @@ pub trait EthChainSpec: Send + Sync + Unpin + Debug { /// Returns the final total difficulty if the Paris hardfork is known. fn final_paris_total_difficulty(&self) -> Option; + + /// See [`calc_next_block_base_fee`]. + fn next_block_base_fee(&self, parent: &Self::Header, target_timestamp: u64) -> Option { + Some(calc_next_block_base_fee( + parent.gas_used(), + parent.gas_limit(), + parent.base_fee_per_gas()?, + self.base_fee_params_at_timestamp(target_timestamp), + )) + } } impl EthChainSpec for ChainSpec { diff --git a/crates/chainspec/src/lib.rs b/crates/chainspec/src/lib.rs index 3c59008496c..6c3654a8edd 100644 --- a/crates/chainspec/src/lib.rs +++ b/crates/chainspec/src/lib.rs @@ -145,4 +145,34 @@ mod tests { let chain: Chain = NamedChain::Holesky.into(); assert_eq!(s, chain.public_dns_network_protocol().unwrap().as_str()); } + + #[test] + fn test_centralized_base_fee_calculation() { + use crate::{ChainSpec, EthChainSpec}; + use alloy_consensus::Header; + use alloy_eips::eip1559::INITIAL_BASE_FEE; + + fn parent_header() -> Header { + Header { + gas_used: 15_000_000, + gas_limit: 30_000_000, + base_fee_per_gas: Some(INITIAL_BASE_FEE), + timestamp: 1_000, + ..Default::default() + } + } + + let spec = ChainSpec::default(); + let parent = parent_header(); + + // For testing, assume next block has timestamp 12 seconds later + let next_timestamp = parent.timestamp + 12; + + let expected = parent + .next_block_base_fee(spec.base_fee_params_at_timestamp(next_timestamp)) + .unwrap_or_default(); + + let got = spec.next_block_base_fee(&parent, next_timestamp).unwrap_or_default(); + assert_eq!(expected, got, "Base fee calculation does not match expected value"); + } } diff --git a/crates/chainspec/src/spec.rs b/crates/chainspec/src/spec.rs index 151eb084463..7accf96fa3b 100644 --- a/crates/chainspec/src/spec.rs +++ b/crates/chainspec/src/spec.rs @@ -1042,11 +1042,7 @@ mod tests { use alloy_trie::{TrieAccount, EMPTY_ROOT_HASH}; use core::ops::Deref; use reth_ethereum_forks::{ForkCondition, ForkHash, ForkId, Head}; - use std::{ - collections::{BTreeMap, HashMap}, - str::FromStr, - string::String, - }; + use std::{collections::HashMap, str::FromStr}; fn test_hardfork_fork_ids(spec: &ChainSpec, cases: &[(EthereumHardfork, ForkId)]) { for (hardfork, expected_id) in cases { @@ -2512,31 +2508,35 @@ Post-merge hard forks (timestamp based): #[test] fn blob_params_from_genesis() { let s = r#"{ - "cancun":{ - "baseFeeUpdateFraction":3338477, - "max":6, - "target":3 - }, - "prague":{ - "baseFeeUpdateFraction":3338477, - "max":6, - "target":3 - } - }"#; - let schedule: BTreeMap = serde_json::from_str(s).unwrap(); - let hardfork_params = BlobScheduleBlobParams::from_schedule(&schedule); + "blobSchedule": { + "cancun":{ + "baseFeeUpdateFraction":3338477, + "max":6, + "target":3 + }, + "prague":{ + "baseFeeUpdateFraction":3338477, + "max":6, + "target":3 + } + } + }"#; + let config: ChainConfig = serde_json::from_str(s).unwrap(); + let hardfork_params = config.blob_schedule_blob_params(); let expected = BlobScheduleBlobParams { cancun: BlobParams { target_blob_count: 3, max_blob_count: 6, update_fraction: 3338477, min_blob_fee: BLOB_TX_MIN_BLOB_GASPRICE, + max_blobs_per_tx: 6, }, prague: BlobParams { target_blob_count: 3, max_blob_count: 6, update_fraction: 3338477, min_blob_fee: BLOB_TX_MIN_BLOB_GASPRICE, + max_blobs_per_tx: 6, }, ..Default::default() }; diff --git a/crates/cli/commands/src/common.rs b/crates/cli/commands/src/common.rs index cb005ae6ff4..be3bcec5a17 100644 --- a/crates/cli/commands/src/common.rs +++ b/crates/cli/commands/src/common.rs @@ -9,7 +9,9 @@ use reth_consensus::{noop::NoopConsensus, ConsensusError, FullConsensus}; use reth_db::{init_db, open_db_read_only, DatabaseEnv}; use reth_db_common::init::init_genesis; use reth_downloaders::{bodies::noop::NoopBodiesDownloader, headers::noop::NoopHeaderDownloader}; +use reth_eth_wire::NetPrimitivesFor; use reth_evm::{noop::NoopEvmConfig, ConfigureEvm}; +use reth_network::NetworkEventListenerProvider; use reth_node_api::FullNodeTypesAdapter; use reth_node_builder::{ Node, NodeComponents, NodeComponentsBuilder, NodeTypes, NodeTypesWithDBAdapter, @@ -218,6 +220,7 @@ type FullTypesAdapter = FullNodeTypesAdapter< /// [`NodeTypes`] in CLI. pub trait CliNodeTypes: NodeTypesForProvider { type Evm: ConfigureEvm; + type NetworkPrimitives: NetPrimitivesFor; } impl CliNodeTypes for N @@ -225,6 +228,7 @@ where N: Node> + NodeTypesForProvider, { type Evm = <>>::Components as NodeComponents>>::Evm; + type NetworkPrimitives = <<>>::Components as NodeComponents>>::Network as NetworkEventListenerProvider>::Primitives; } /// Helper trait aggregating components required for the CLI. diff --git a/crates/cli/commands/src/import.rs b/crates/cli/commands/src/import.rs index d821570901b..eef67117063 100644 --- a/crates/cli/commands/src/import.rs +++ b/crates/cli/commands/src/import.rs @@ -56,11 +56,13 @@ pub struct ImportCommand { impl> ImportCommand { /// Execute `import` command - pub async fn execute(self, components: F) -> eyre::Result<()> + pub async fn execute( + self, + components: impl FnOnce(Arc) -> Comp, + ) -> eyre::Result<()> where N: CliNodeTypes, Comp: CliNodeComponents, - F: FnOnce(Arc) -> Comp, { info!(target: "reth::cli", "reth {} starting", SHORT_VERSION); diff --git a/crates/cli/commands/src/import_era.rs b/crates/cli/commands/src/import_era.rs index fbd8d23bd56..7920fda3131 100644 --- a/crates/cli/commands/src/import_era.rs +++ b/crates/cli/commands/src/import_era.rs @@ -2,14 +2,17 @@ use crate::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; use alloy_chains::{ChainKind, NamedChain}; use clap::{Args, Parser}; -use eyre::{eyre, OptionExt}; +use eyre::eyre; use reqwest::{Client, Url}; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_cli::chainspec::ChainSpecParser; use reth_era_downloader::{read_dir, EraClient, EraStream, EraStreamConfig}; use reth_era_utils as era; use reth_etl::Collector; -use reth_node_core::{dirs::data_dir, version::SHORT_VERSION}; +use reth_fs_util as fs; +use reth_node_core::version::SHORT_VERSION; +use reth_provider::StaticFileProviderFactory; +use reth_static_file_types::StaticFileSegment; use std::{path::PathBuf, sync::Arc}; use tracing::info; @@ -70,23 +73,32 @@ impl> ImportEraC let Environment { provider_factory, config, .. } = self.env.init::(AccessRights::RW)?; let mut hash_collector = Collector::new(config.stages.etl.file_size, config.stages.etl.dir); - let provider_factory = &provider_factory.provider_rw()?.0; + + let next_block = provider_factory + .static_file_provider() + .get_highest_static_file_block(StaticFileSegment::Headers) + .unwrap_or_default() + + 1; if let Some(path) = self.import.path { - let stream = read_dir(path, 0)?; + let stream = read_dir(path, next_block)?; - era::import(stream, provider_factory, &mut hash_collector)?; + era::import(stream, &provider_factory, &mut hash_collector)?; } else { let url = match self.import.url { Some(url) => url, None => self.env.chain.chain().kind().try_to_url()?, }; - let folder = data_dir().ok_or_eyre("Missing data directory")?.join("era"); - let folder = folder.into_boxed_path(); + let folder = + self.env.datadir.resolve_datadir(self.env.chain.chain()).data_dir().join("era"); + + fs::create_dir_all(&folder)?; + + let config = EraStreamConfig::default().start_from(next_block); let client = EraClient::new(Client::new(), url, folder); - let stream = EraStream::new(client, EraStreamConfig::default()); + let stream = EraStream::new(client, config); - era::import(stream, provider_factory, &mut hash_collector)?; + era::import(stream, &provider_factory, &mut hash_collector)?; } Ok(()) diff --git a/crates/cli/commands/src/p2p/bootnode.rs b/crates/cli/commands/src/p2p/bootnode.rs index 9be60aca658..c27586b243f 100644 --- a/crates/cli/commands/src/p2p/bootnode.rs +++ b/crates/cli/commands/src/p2p/bootnode.rs @@ -10,7 +10,7 @@ use tokio::select; use tokio_stream::StreamExt; use tracing::info; -/// Satrt a discovery only bootnode. +/// Start a discovery only bootnode. #[derive(Parser, Debug)] pub struct Command { /// Listen address for the bootnode (default: ":30301"). diff --git a/crates/cli/commands/src/p2p/mod.rs b/crates/cli/commands/src/p2p/mod.rs index 5e4d31464b1..ab07a553c19 100644 --- a/crates/cli/commands/src/p2p/mod.rs +++ b/crates/cli/commands/src/p2p/mod.rs @@ -2,6 +2,7 @@ use std::{path::PathBuf, sync::Arc}; +use crate::common::CliNodeTypes; use alloy_eips::BlockHashOrNumber; use backon::{ConstantBuilder, Retryable}; use clap::{Parser, Subcommand}; @@ -9,7 +10,7 @@ use reth_chainspec::{EthChainSpec, EthereumHardforks, Hardforks}; use reth_cli::chainspec::ChainSpecParser; use reth_cli_util::{get_secret_key, hash_or_num_value_parser}; use reth_config::Config; -use reth_network::{BlockDownloaderProvider, NetworkConfigBuilder, NetworkPrimitives}; +use reth_network::{BlockDownloaderProvider, NetworkConfigBuilder}; use reth_network_p2p::bodies::client::BodiesClient; use reth_node_core::{ args::{DatabaseArgs, DatadirArgs, NetworkArgs}, @@ -76,7 +77,7 @@ pub enum Subcommands { impl> Command { /// Execute `p2p` command - pub async fn execute(self) -> eyre::Result<()> { + pub async fn execute>(self) -> eyre::Result<()> { let data_dir = self.datadir.clone().resolve_datadir(self.chain.chain()); let config_path = self.config.clone().unwrap_or_else(|| data_dir.config()); @@ -100,7 +101,7 @@ impl let rlpx_socket = (self.network.addr, self.network.port).into(); let boot_nodes = self.chain.bootnodes().unwrap_or_default(); - let net = NetworkConfigBuilder::::new(p2p_secret_key) + let net = NetworkConfigBuilder::::new(p2p_secret_key) .peer_config(config.peers_config_with_basic_nodes_from_file(None)) .external_ip_resolver(self.network.nat) .disable_discv4_discovery_if(self.chain.chain().is_optimism()) diff --git a/crates/cli/commands/src/stage/dump/merkle.rs b/crates/cli/commands/src/stage/dump/merkle.rs index 8e82c3e1403..cc21c0fc29f 100644 --- a/crates/cli/commands/src/stage/dump/merkle.rs +++ b/crates/cli/commands/src/stage/dump/merkle.rs @@ -18,7 +18,7 @@ use reth_provider::{ use reth_stages::{ stages::{ AccountHashingStage, ExecutionStage, MerkleStage, StorageHashingStage, - MERKLE_STAGE_DEFAULT_CLEAN_THRESHOLD, + MERKLE_STAGE_DEFAULT_REBUILD_THRESHOLD, }, ExecutionStageThresholds, Stage, StageCheckpoint, UnwindInput, }; @@ -108,7 +108,7 @@ fn unwind_and_copy( max_cumulative_gas: None, max_duration: None, }, - MERKLE_STAGE_DEFAULT_CLEAN_THRESHOLD, + MERKLE_STAGE_DEFAULT_REBUILD_THRESHOLD, ExExManagerHandle::empty(), ); @@ -161,7 +161,8 @@ where let mut stage = MerkleStage::::Execution { // Forces updating the root instead of calculating from scratch - clean_threshold: u64::MAX, + rebuild_threshold: u64::MAX, + incremental_threshold: u64::MAX, consensus: NoopConsensus::arc(), }; diff --git a/crates/cli/commands/src/stage/mod.rs b/crates/cli/commands/src/stage/mod.rs index 09d6ea9c091..0401d06cd8c 100644 --- a/crates/cli/commands/src/stage/mod.rs +++ b/crates/cli/commands/src/stage/mod.rs @@ -7,7 +7,6 @@ use clap::{Parser, Subcommand}; use reth_chainspec::{EthChainSpec, EthereumHardforks, Hardforks}; use reth_cli::chainspec::ChainSpecParser; use reth_cli_runner::CliContext; -use reth_eth_wire::NetPrimitivesFor; pub mod drop; pub mod dump; @@ -41,15 +40,17 @@ pub enum Subcommands { impl> Command { /// Execute `stage` command - pub async fn execute(self, ctx: CliContext, components: F) -> eyre::Result<()> + pub async fn execute( + self, + ctx: CliContext, + components: impl FnOnce(Arc) -> Comp, + ) -> eyre::Result<()> where N: CliNodeTypes, Comp: CliNodeComponents, - F: FnOnce(Arc) -> Comp, - P: NetPrimitivesFor, { match self.command { - Subcommands::Run(command) => command.execute::(ctx, components).await, + Subcommands::Run(command) => command.execute::(ctx, components).await, Subcommands::Drop(command) => command.execute::().await, Subcommands::Dump(command) => command.execute::(components).await, Subcommands::Unwind(command) => command.execute::(components).await, diff --git a/crates/cli/commands/src/stage/run.rs b/crates/cli/commands/src/stage/run.rs index f30d452b583..bba9858f212 100644 --- a/crates/cli/commands/src/stage/run.rs +++ b/crates/cli/commands/src/stage/run.rs @@ -16,7 +16,6 @@ use reth_downloaders::{ bodies::bodies::BodiesDownloaderBuilder, headers::reverse_headers::ReverseHeadersDownloaderBuilder, }; -use reth_eth_wire::NetPrimitivesFor; use reth_exex::ExExManagerHandle; use reth_network::BlockDownloaderProvider; use reth_network_p2p::HeadersClient; @@ -103,12 +102,11 @@ pub struct Command { impl> Command { /// Execute `stage` command - pub async fn execute(self, ctx: CliContext, components: F) -> eyre::Result<()> + pub async fn execute(self, ctx: CliContext, components: F) -> eyre::Result<()> where N: CliNodeTypes, Comp: CliNodeComponents, F: FnOnce(Arc) -> Comp, - P: NetPrimitivesFor, { // Raise the fd limit of the process. // Does not do anything on windows. @@ -174,7 +172,7 @@ impl let network = self .network - .network_config::

( + .network_config::( &config, provider_factory.chain_spec(), p2p_secret_key, @@ -186,7 +184,7 @@ impl let fetch_client = Arc::new(network.fetch_client().await?); // Use `to` as the tip for the stage - let tip: P::BlockHeader = loop { + let tip = loop { match fetch_client.get_header(BlockHashOrNumber::Number(self.to)).await { Ok(header) => { if let Some(header) = header.into_data() { @@ -229,7 +227,7 @@ impl let network = self .network - .network_config::

( + .network_config::( &config, provider_factory.chain_spec(), p2p_secret_key, @@ -271,7 +269,7 @@ impl max_cumulative_gas: None, max_duration: None, }, - config.stages.merkle.clean_threshold, + config.stages.merkle.incremental_threshold, ExExManagerHandle::empty(), )), None, @@ -302,7 +300,8 @@ impl let consensus = Arc::new(components.consensus().clone()); ( Box::new(MerkleStage::::new_execution( - config.stages.merkle.clean_threshold, + config.stages.merkle.rebuild_threshold, + config.stages.merkle.incremental_threshold, consensus.clone(), )), Some(Box::new(MerkleStage::::new_unwind(consensus))), diff --git a/crates/config/src/config.rs b/crates/config/src/config.rs index 55883d04e8d..c1c5ef96075 100644 --- a/crates/config/src/config.rs +++ b/crates/config/src/config.rs @@ -136,7 +136,7 @@ impl StageConfig { /// `ExecutionStage` pub fn execution_external_clean_threshold(&self) -> u64 { self.merkle - .clean_threshold + .incremental_threshold .max(self.account_hashing.clean_threshold) .max(self.storage_hashing.clean_threshold) } @@ -342,14 +342,22 @@ impl Default for HashingConfig { #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr(feature = "serde", serde(default))] pub struct MerkleConfig { + /// The number of blocks we will run the incremental root method for when we are catching up on + /// the merkle stage for a large number of blocks. + /// + /// When we are catching up for a large number of blocks, we can only run the incremental root + /// for a limited number of blocks, otherwise the incremental root method may cause the node to + /// OOM. This number determines how many blocks in a row we will run the incremental root + /// method for. + pub incremental_threshold: u64, /// The threshold (in number of blocks) for switching from incremental trie building of changes /// to whole rebuild. - pub clean_threshold: u64, + pub rebuild_threshold: u64, } impl Default for MerkleConfig { fn default() -> Self { - Self { clean_threshold: 5_000 } + Self { incremental_threshold: 7_000, rebuild_threshold: 100_000 } } } @@ -455,6 +463,7 @@ impl PruneConfig { receipts, account_history, storage_history, + bodies_history, receipts_log_filter, }, } = other; @@ -470,6 +479,7 @@ impl PruneConfig { self.segments.receipts = self.segments.receipts.or(receipts); self.segments.account_history = self.segments.account_history.or(account_history); self.segments.storage_history = self.segments.storage_history.or(storage_history); + self.segments.bodies_history = self.segments.bodies_history.or(bodies_history); if self.segments.receipts_log_filter.0.is_empty() && !receipts_log_filter.0.is_empty() { self.segments.receipts_log_filter = receipts_log_filter; @@ -990,6 +1000,7 @@ receipts = 'full' receipts: Some(PruneMode::Distance(1000)), account_history: None, storage_history: Some(PruneMode::Before(5000)), + bodies_history: None, receipts_log_filter: ReceiptsLogPruneConfig(BTreeMap::from([( Address::random(), PruneMode::Full, @@ -1005,6 +1016,7 @@ receipts = 'full' receipts: Some(PruneMode::Full), account_history: Some(PruneMode::Distance(2000)), storage_history: Some(PruneMode::Distance(3000)), + bodies_history: None, receipts_log_filter: ReceiptsLogPruneConfig(BTreeMap::from([ (Address::random(), PruneMode::Distance(1000)), (Address::random(), PruneMode::Before(2000)), diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index b3e75677b1f..a682bc2f910 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -3,7 +3,7 @@ use alloy_consensus::{ constants::MAXIMUM_EXTRA_DATA_SIZE, BlockHeader as _, EMPTY_OMMER_ROOT_HASH, }; -use alloy_eips::{calc_next_block_base_fee, eip4844::DATA_GAS_PER_BLOB, eip7840::BlobParams}; +use alloy_eips::{eip4844::DATA_GAS_PER_BLOB, eip7840::BlobParams}; use reth_chainspec::{EthChainSpec, EthereumHardfork, EthereumHardforks}; use reth_consensus::ConsensusError; use reth_primitives_traits::{ @@ -249,12 +249,9 @@ pub fn validate_against_parent_hash_number( /// Validates the base fee against the parent and EIP-1559 rules. #[inline] -pub fn validate_against_parent_eip1559_base_fee< - H: BlockHeader, - ChainSpec: EthChainSpec + EthereumHardforks, ->( - header: &H, - parent: &H, +pub fn validate_against_parent_eip1559_base_fee( + header: &ChainSpec::Header, + parent: &ChainSpec::Header, chain_spec: &ChainSpec, ) -> Result<(), ConsensusError> { if chain_spec.is_london_active_at_block(header.number()) { @@ -266,15 +263,9 @@ pub fn validate_against_parent_eip1559_base_fee< { alloy_eips::eip1559::INITIAL_BASE_FEE } else { - // This BaseFeeMissing will not happen as previous blocks are checked to have - // them. - let base_fee = parent.base_fee_per_gas().ok_or(ConsensusError::BaseFeeMissing)?; - calc_next_block_base_fee( - parent.gas_used(), - parent.gas_limit(), - base_fee, - chain_spec.base_fee_params_at_timestamp(header.timestamp()), - ) + chain_spec + .next_block_base_fee(parent, header.timestamp()) + .ok_or(ConsensusError::BaseFeeMissing)? }; if expected_base_fee != base_fee { return Err(ConsensusError::BaseFeeDiff(GotExpected { diff --git a/crates/e2e-test-utils/src/lib.rs b/crates/e2e-test-utils/src/lib.rs index 08c5895e02a..2953e752009 100644 --- a/crates/e2e-test-utils/src/lib.rs +++ b/crates/e2e-test-utils/src/lib.rs @@ -116,6 +116,36 @@ pub async fn setup_engine( TaskManager, Wallet, )> +where + N: NodeBuilderHelper, + LocalPayloadAttributesBuilder: + PayloadAttributesBuilder<::PayloadAttributes>, + TmpNodeAddOnsHandle: RpcHandleProvider, TmpNodeEthApi>, +{ + setup_engine_with_connection::( + num_nodes, + chain_spec, + is_dev, + tree_config, + attributes_generator, + true, + ) + .await +} + +/// Creates the initial setup with `num_nodes` started and optionally interconnected. +pub async fn setup_engine_with_connection( + num_nodes: usize, + chain_spec: Arc, + is_dev: bool, + tree_config: reth_node_api::TreeConfig, + attributes_generator: impl Fn(u64) -> <::Payload as PayloadTypes>::PayloadBuilderAttributes + Send + Sync + Copy + 'static, + connect_nodes: bool, +) -> eyre::Result<( + Vec>>>, + TaskManager, + Wallet, +)> where N: NodeBuilderHelper, LocalPayloadAttributesBuilder: @@ -168,15 +198,17 @@ where let genesis = node.block_hash(0); node.update_forkchoice(genesis, genesis).await?; - // Connect each node in a chain. - if let Some(previous_node) = nodes.last_mut() { - previous_node.connect(&mut node).await; - } + // Connect each node in a chain if requested. + if connect_nodes { + if let Some(previous_node) = nodes.last_mut() { + previous_node.connect(&mut node).await; + } - // Connect last node with the first if there are more than two - if idx + 1 == num_nodes && num_nodes > 2 { - if let Some(first_node) = nodes.first_mut() { - node.connect(first_node).await; + // Connect last node with the first if there are more than two + if idx + 1 == num_nodes && num_nodes > 2 { + if let Some(first_node) = nodes.first_mut() { + node.connect(first_node).await; + } } } diff --git a/crates/e2e-test-utils/src/testsuite/actions/engine_api.rs b/crates/e2e-test-utils/src/testsuite/actions/engine_api.rs new file mode 100644 index 00000000000..6548fc951c6 --- /dev/null +++ b/crates/e2e-test-utils/src/testsuite/actions/engine_api.rs @@ -0,0 +1,350 @@ +//! Engine API specific actions for testing. + +use crate::testsuite::{Action, Environment}; +use alloy_primitives::B256; +use alloy_rpc_types_engine::{ + ExecutionPayloadV1, ExecutionPayloadV2, ExecutionPayloadV3, PayloadStatusEnum, +}; +use alloy_rpc_types_eth::{Block, Header, Receipt, Transaction, TransactionRequest}; +use eyre::Result; +use futures_util::future::BoxFuture; +use reth_node_api::{EngineTypes, PayloadTypes}; +use reth_rpc_api::clients::{EngineApiClient, EthApiClient}; +use std::marker::PhantomData; +use tracing::debug; + +/// Action that sends a newPayload request to a specific node. +#[derive(Debug)] +pub struct SendNewPayload +where + Engine: EngineTypes, +{ + /// The node index to send to + pub node_idx: usize, + /// The block number to send + pub block_number: u64, + /// The source node to get the block from + pub source_node_idx: usize, + /// Expected payload status + pub expected_status: ExpectedPayloadStatus, + _phantom: PhantomData, +} + +/// Expected status for a payload +#[derive(Debug, Clone)] +pub enum ExpectedPayloadStatus { + /// Expect the payload to be valid + Valid, + /// Expect the payload to be invalid + Invalid, + /// Expect the payload to be syncing or accepted (buffered) + SyncingOrAccepted, +} + +impl SendNewPayload +where + Engine: EngineTypes, +{ + /// Create a new `SendNewPayload` action + pub fn new( + node_idx: usize, + block_number: u64, + source_node_idx: usize, + expected_status: ExpectedPayloadStatus, + ) -> Self { + Self { + node_idx, + block_number, + source_node_idx, + expected_status, + _phantom: Default::default(), + } + } +} + +impl Action for SendNewPayload +where + Engine: EngineTypes + PayloadTypes, +{ + fn execute<'a>(&'a mut self, env: &'a mut Environment) -> BoxFuture<'a, Result<()>> { + Box::pin(async move { + if self.node_idx >= env.node_clients.len() { + return Err(eyre::eyre!("Target node index out of bounds: {}", self.node_idx)); + } + if self.source_node_idx >= env.node_clients.len() { + return Err(eyre::eyre!( + "Source node index out of bounds: {}", + self.source_node_idx + )); + } + + // Get the block from the source node with retries + let source_rpc = &env.node_clients[self.source_node_idx].rpc; + let mut block = None; + let mut retries = 0; + const MAX_RETRIES: u32 = 5; + + while retries < MAX_RETRIES { + match EthApiClient::::block_by_number( + source_rpc, + alloy_eips::BlockNumberOrTag::Number(self.block_number), + true, // include transactions + ) + .await + { + Ok(Some(b)) => { + block = Some(b); + break; + } + Ok(None) => { + debug!( + "Block {} not found on source node {} (attempt {}/{})", + self.block_number, + self.source_node_idx, + retries + 1, + MAX_RETRIES + ); + retries += 1; + if retries < MAX_RETRIES { + tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; + } + } + Err(e) => return Err(e.into()), + } + } + + let block = block.ok_or_else(|| { + eyre::eyre!( + "Block {} not found on source node {} after {} retries", + self.block_number, + self.source_node_idx, + MAX_RETRIES + ) + })?; + + // Convert block to ExecutionPayloadV3 + let payload = block_to_payload_v3(block.clone()); + + // Send the payload to the target node + let target_engine = env.node_clients[self.node_idx].engine.http_client(); + let result = EngineApiClient::::new_payload_v3( + &target_engine, + payload, + vec![], + B256::ZERO, // parent_beacon_block_root + ) + .await?; + + debug!( + "Node {}: new_payload for block {} response - status: {:?}, latest_valid_hash: {:?}", + self.node_idx, self.block_number, result.status, result.latest_valid_hash + ); + + // Validate the response based on expectations + match (&result.status, &self.expected_status) { + (PayloadStatusEnum::Valid, ExpectedPayloadStatus::Valid) => { + debug!( + "Node {}: Block {} marked as VALID as expected", + self.node_idx, self.block_number + ); + Ok(()) + } + ( + PayloadStatusEnum::Invalid { validation_error }, + ExpectedPayloadStatus::Invalid, + ) => { + debug!( + "Node {}: Block {} marked as INVALID as expected: {:?}", + self.node_idx, self.block_number, validation_error + ); + Ok(()) + } + ( + PayloadStatusEnum::Syncing | PayloadStatusEnum::Accepted, + ExpectedPayloadStatus::SyncingOrAccepted, + ) => { + debug!( + "Node {}: Block {} marked as SYNCING/ACCEPTED as expected (buffered)", + self.node_idx, self.block_number + ); + Ok(()) + } + (status, expected) => Err(eyre::eyre!( + "Node {}: Unexpected payload status for block {}. Got {:?}, expected {:?}", + self.node_idx, + self.block_number, + status, + expected + )), + } + }) + } +} + +/// Action that sends multiple blocks to a node in a specific order. +#[derive(Debug)] +pub struct SendNewPayloads +where + Engine: EngineTypes, +{ + /// The node index to send to + target_node: Option, + /// The source node to get the blocks from + source_node: Option, + /// The starting block number + start_block: Option, + /// The total number of blocks to send + total_blocks: Option, + /// Whether to send in reverse order + reverse_order: bool, + /// Custom block numbers to send (if not using `start_block` + `total_blocks`) + custom_block_numbers: Option>, + _phantom: PhantomData, +} + +impl SendNewPayloads +where + Engine: EngineTypes, +{ + /// Create a new `SendNewPayloads` action builder + pub fn new() -> Self { + Self { + target_node: None, + source_node: None, + start_block: None, + total_blocks: None, + reverse_order: false, + custom_block_numbers: None, + _phantom: Default::default(), + } + } + + /// Set the target node index + pub const fn with_target_node(mut self, node_idx: usize) -> Self { + self.target_node = Some(node_idx); + self + } + + /// Set the source node index + pub const fn with_source_node(mut self, node_idx: usize) -> Self { + self.source_node = Some(node_idx); + self + } + + /// Set the starting block number + pub const fn with_start_block(mut self, block_num: u64) -> Self { + self.start_block = Some(block_num); + self + } + + /// Set the total number of blocks to send + pub const fn with_total_blocks(mut self, count: u64) -> Self { + self.total_blocks = Some(count); + self + } + + /// Send blocks in reverse order (useful for testing buffering) + pub const fn in_reverse_order(mut self) -> Self { + self.reverse_order = true; + self + } + + /// Set custom block numbers to send + pub fn with_block_numbers(mut self, block_numbers: Vec) -> Self { + self.custom_block_numbers = Some(block_numbers); + self + } +} + +impl Default for SendNewPayloads +where + Engine: EngineTypes, +{ + fn default() -> Self { + Self::new() + } +} + +impl Action for SendNewPayloads +where + Engine: EngineTypes + PayloadTypes, +{ + fn execute<'a>(&'a mut self, env: &'a mut Environment) -> BoxFuture<'a, Result<()>> { + Box::pin(async move { + // Validate required fields + let target_node = + self.target_node.ok_or_else(|| eyre::eyre!("Target node not specified"))?; + let source_node = + self.source_node.ok_or_else(|| eyre::eyre!("Source node not specified"))?; + + // Determine block numbers to send + let block_numbers = if let Some(custom_numbers) = &self.custom_block_numbers { + custom_numbers.clone() + } else { + let start = + self.start_block.ok_or_else(|| eyre::eyre!("Start block not specified"))?; + let count = + self.total_blocks.ok_or_else(|| eyre::eyre!("Total blocks not specified"))?; + + if self.reverse_order { + // Send blocks in reverse order (e.g., for count=2, start=1: [2, 1]) + (0..count).map(|i| start + count - 1 - i).collect() + } else { + // Send blocks in normal order + (0..count).map(|i| start + i).collect() + } + }; + + for &block_number in &block_numbers { + // For the first block in reverse order, expect buffering + // For subsequent blocks, they might connect immediately + let expected_status = + if self.reverse_order && block_number == *block_numbers.first().unwrap() { + ExpectedPayloadStatus::SyncingOrAccepted + } else { + ExpectedPayloadStatus::Valid + }; + + let mut action = SendNewPayload::::new( + target_node, + block_number, + source_node, + expected_status, + ); + + action.execute(env).await?; + } + + Ok(()) + }) + } +} + +/// Helper function to convert a block to `ExecutionPayloadV3` +fn block_to_payload_v3(block: Block) -> ExecutionPayloadV3 { + use alloy_primitives::U256; + + ExecutionPayloadV3 { + payload_inner: ExecutionPayloadV2 { + payload_inner: ExecutionPayloadV1 { + parent_hash: block.header.inner.parent_hash, + fee_recipient: block.header.inner.beneficiary, + state_root: block.header.inner.state_root, + receipts_root: block.header.inner.receipts_root, + logs_bloom: block.header.inner.logs_bloom, + prev_randao: block.header.inner.mix_hash, + block_number: block.header.inner.number, + gas_limit: block.header.inner.gas_limit, + gas_used: block.header.inner.gas_used, + timestamp: block.header.inner.timestamp, + extra_data: block.header.inner.extra_data.clone(), + base_fee_per_gas: U256::from(block.header.inner.base_fee_per_gas.unwrap_or(0)), + block_hash: block.header.hash, + transactions: vec![], // No transactions needed for buffering tests + }, + withdrawals: block.withdrawals.unwrap_or_default().to_vec(), + }, + blob_gas_used: block.header.inner.blob_gas_used.unwrap_or(0), + excess_blob_gas: block.header.inner.excess_blob_gas.unwrap_or(0), + } +} diff --git a/crates/e2e-test-utils/src/testsuite/actions/fork.rs b/crates/e2e-test-utils/src/testsuite/actions/fork.rs index a0be6bdd8d0..1511d90fa59 100644 --- a/crates/e2e-test-utils/src/testsuite/actions/fork.rs +++ b/crates/e2e-test-utils/src/testsuite/actions/fork.rs @@ -5,7 +5,7 @@ use crate::testsuite::{ Action, BlockInfo, Environment, }; use alloy_rpc_types_engine::{ForkchoiceState, PayloadAttributes}; -use alloy_rpc_types_eth::{Block, Header, Receipt, Transaction}; +use alloy_rpc_types_eth::{Block, Header, Receipt, Transaction, TransactionRequest}; use eyre::Result; use futures_util::future::BoxFuture; use reth_node_api::{EngineTypes, PayloadTypes}; @@ -130,14 +130,19 @@ where // get the block at the fork base number to establish the fork point let rpc_client = &env.node_clients[0].rpc; - let fork_base_block = - EthApiClient::::block_by_number( - rpc_client, - alloy_eips::BlockNumberOrTag::Number(self.fork_base_block), - false, - ) - .await? - .ok_or_else(|| eyre::eyre!("Fork base block {} not found", self.fork_base_block))?; + let fork_base_block = EthApiClient::< + TransactionRequest, + Transaction, + Block, + Receipt, + Header, + >::block_by_number( + rpc_client, + alloy_eips::BlockNumberOrTag::Number(self.fork_base_block), + false, + ) + .await? + .ok_or_else(|| eyre::eyre!("Fork base block {} not found", self.fork_base_block))?; // update active node state to point to the fork base block let active_node_state = env.active_node_state_mut()?; @@ -243,7 +248,7 @@ where // walk backwards through the chain until we reach the fork base while current_number > self.fork_base_number { - let block = EthApiClient::::block_by_hash( + let block = EthApiClient::::block_by_hash( rpc_client, current_hash, false, diff --git a/crates/e2e-test-utils/src/testsuite/actions/mod.rs b/crates/e2e-test-utils/src/testsuite/actions/mod.rs index 7f09c283568..205eb9ac48e 100644 --- a/crates/e2e-test-utils/src/testsuite/actions/mod.rs +++ b/crates/e2e-test-utils/src/testsuite/actions/mod.rs @@ -1,24 +1,29 @@ //! Actions that can be performed in tests. use crate::testsuite::Environment; -use alloy_rpc_types_engine::{ForkchoiceUpdated, PayloadStatusEnum}; +use alloy_rpc_types_engine::{ForkchoiceState, ForkchoiceUpdated, PayloadStatusEnum}; use eyre::Result; use futures_util::future::BoxFuture; use reth_node_api::EngineTypes; +use reth_rpc_api::clients::EngineApiClient; use std::future::Future; use tracing::debug; +pub mod engine_api; pub mod fork; pub mod node_ops; pub mod produce_blocks; pub mod reorg; +pub use engine_api::{ExpectedPayloadStatus, SendNewPayload, SendNewPayloads}; pub use fork::{CreateFork, ForkBase, SetForkBase, SetForkBaseFromBlockInfo, ValidateFork}; -pub use node_ops::{CaptureBlockOnNode, CompareNodeChainTips, SelectActiveNode, ValidateBlockTag}; +pub use node_ops::{ + CaptureBlockOnNode, CompareNodeChainTips, SelectActiveNode, ValidateBlockTag, WaitForSync, +}; pub use produce_blocks::{ AssertMineBlock, BroadcastLatestForkchoice, BroadcastNextNewPayload, CheckPayloadAccepted, ExpectFcuStatus, GenerateNextPayload, GeneratePayloadAttributes, PickNextBlockProducer, - ProduceBlocks, ProduceInvalidBlocks, TestFcuToTag, UpdateBlockInfo, + ProduceBlocks, ProduceBlocksLocally, ProduceInvalidBlocks, TestFcuToTag, UpdateBlockInfo, UpdateBlockInfoToLatestPayload, ValidateCanonicalTag, }; pub use reorg::{ReorgTarget, ReorgTo, SetReorgTarget}; @@ -102,12 +107,20 @@ where /// Action that makes the current latest block canonical by broadcasting a forkchoice update #[derive(Debug, Default)] -pub struct MakeCanonical {} +pub struct MakeCanonical { + /// If true, only send to the active node. If false, broadcast to all nodes. + active_node_only: bool, +} impl MakeCanonical { /// Create a new `MakeCanonical` action pub const fn new() -> Self { - Self {} + Self { active_node_only: false } + } + + /// Create a new `MakeCanonical` action that only applies to the active node + pub const fn with_active_node() -> Self { + Self { active_node_only: true } } } @@ -120,23 +133,59 @@ where { fn execute<'a>(&'a mut self, env: &'a mut Environment) -> BoxFuture<'a, Result<()>> { Box::pin(async move { - let mut actions: Vec>> = vec![ - Box::new(BroadcastLatestForkchoice::default()), - Box::new(UpdateBlockInfo::default()), - ]; - - // if we're on a fork, validate it now that it's canonical - if let Ok(active_state) = env.active_node_state() { - if let Some(fork_base) = active_state.current_fork_base { - debug!("MakeCanonical: Adding fork validation from base block {}", fork_base); - actions.push(Box::new(ValidateFork::new(fork_base))); - // clear the fork base since we're now canonical - env.active_node_state_mut()?.current_fork_base = None; + if self.active_node_only { + // Only update the active node + let latest_block = env + .current_block_info() + .ok_or_else(|| eyre::eyre!("No latest block information available"))?; + + let fork_choice_state = ForkchoiceState { + head_block_hash: latest_block.hash, + safe_block_hash: latest_block.hash, + finalized_block_hash: latest_block.hash, + }; + + let active_idx = env.active_node_idx; + let engine = env.node_clients[active_idx].engine.http_client(); + + let fcu_response = EngineApiClient::::fork_choice_updated_v3( + &engine, + fork_choice_state, + None, + ) + .await?; + + debug!( + "Active node {}: Forkchoice update status: {:?}", + active_idx, fcu_response.payload_status.status + ); + + validate_fcu_response(&fcu_response, &format!("Active node {active_idx}"))?; + + Ok(()) + } else { + // Original broadcast behavior + let mut actions: Vec>> = vec![ + Box::new(BroadcastLatestForkchoice::default()), + Box::new(UpdateBlockInfo::default()), + ]; + + // if we're on a fork, validate it now that it's canonical + if let Ok(active_state) = env.active_node_state() { + if let Some(fork_base) = active_state.current_fork_base { + debug!( + "MakeCanonical: Adding fork validation from base block {}", + fork_base + ); + actions.push(Box::new(ValidateFork::new(fork_base))); + // clear the fork base since we're now canonical + env.active_node_state_mut()?.current_fork_base = None; + } } - } - let mut sequence = Sequence::new(actions); - sequence.execute(env).await + let mut sequence = Sequence::new(actions); + sequence.execute(env).await + } }) } } diff --git a/crates/e2e-test-utils/src/testsuite/actions/node_ops.rs b/crates/e2e-test-utils/src/testsuite/actions/node_ops.rs index 3a240d8f644..2b3914339f8 100644 --- a/crates/e2e-test-utils/src/testsuite/actions/node_ops.rs +++ b/crates/e2e-test-utils/src/testsuite/actions/node_ops.rs @@ -1,11 +1,13 @@ //! Node-specific operations for multi-node testing. use crate::testsuite::{Action, Environment}; -use alloy_rpc_types_eth::{Block, Header, Receipt, Transaction}; +use alloy_rpc_types_eth::{Block, Header, Receipt, Transaction, TransactionRequest}; use eyre::Result; use futures_util::future::BoxFuture; use reth_node_api::EngineTypes; use reth_rpc_api::clients::EthApiClient; +use std::time::Duration; +use tokio::time::{sleep, timeout}; use tracing::debug; /// Action to select which node should be active for subsequent single-node operations. @@ -72,7 +74,7 @@ where let node_b_client = &env.node_clients[self.node_b]; // Get latest block from each node - let block_a = EthApiClient::::block_by_number( + let block_a = EthApiClient::::block_by_number( &node_a_client.rpc, alloy_eips::BlockNumberOrTag::Latest, false, @@ -80,7 +82,7 @@ where .await? .ok_or_else(|| eyre::eyre!("Failed to get latest block from node {}", self.node_a))?; - let block_b = EthApiClient::::block_by_number( + let block_b = EthApiClient::::block_by_number( &node_b_client.rpc, alloy_eips::BlockNumberOrTag::Latest, false, @@ -213,3 +215,126 @@ where }) } } + +/// Action that waits for two nodes to sync and have the same chain tip. +#[derive(Debug)] +pub struct WaitForSync { + /// First node index + pub node_a: usize, + /// Second node index + pub node_b: usize, + /// Maximum time to wait for sync (default: 30 seconds) + pub timeout_secs: u64, + /// Polling interval (default: 1 second) + pub poll_interval_secs: u64, +} + +impl WaitForSync { + /// Create a new `WaitForSync` action with default timeouts + pub const fn new(node_a: usize, node_b: usize) -> Self { + Self { node_a, node_b, timeout_secs: 30, poll_interval_secs: 1 } + } + + /// Set custom timeout + pub const fn with_timeout(mut self, timeout_secs: u64) -> Self { + self.timeout_secs = timeout_secs; + self + } + + /// Set custom poll interval + pub const fn with_poll_interval(mut self, poll_interval_secs: u64) -> Self { + self.poll_interval_secs = poll_interval_secs; + self + } +} + +impl Action for WaitForSync +where + Engine: EngineTypes, +{ + fn execute<'a>(&'a mut self, env: &'a mut Environment) -> BoxFuture<'a, Result<()>> { + Box::pin(async move { + if self.node_a >= env.node_count() || self.node_b >= env.node_count() { + return Err(eyre::eyre!("Node index out of bounds")); + } + + let timeout_duration = Duration::from_secs(self.timeout_secs); + let poll_interval = Duration::from_secs(self.poll_interval_secs); + + debug!( + "Waiting for nodes {} and {} to sync (timeout: {}s, poll interval: {}s)", + self.node_a, self.node_b, self.timeout_secs, self.poll_interval_secs + ); + + let sync_check = async { + loop { + let node_a_client = &env.node_clients[self.node_a]; + let node_b_client = &env.node_clients[self.node_b]; + + // Get latest block from each node + let block_a = EthApiClient::< + TransactionRequest, + Transaction, + Block, + Receipt, + Header, + >::block_by_number( + &node_a_client.rpc, + alloy_eips::BlockNumberOrTag::Latest, + false, + ) + .await? + .ok_or_else(|| { + eyre::eyre!("Failed to get latest block from node {}", self.node_a) + })?; + + let block_b = EthApiClient::< + TransactionRequest, + Transaction, + Block, + Receipt, + Header, + >::block_by_number( + &node_b_client.rpc, + alloy_eips::BlockNumberOrTag::Latest, + false, + ) + .await? + .ok_or_else(|| { + eyre::eyre!("Failed to get latest block from node {}", self.node_b) + })?; + + debug!( + "Sync check: Node {} tip: {} (block {}), Node {} tip: {} (block {})", + self.node_a, + block_a.header.hash, + block_a.header.number, + self.node_b, + block_b.header.hash, + block_b.header.number + ); + + if block_a.header.hash == block_b.header.hash { + debug!( + "Nodes {} and {} successfully synced to block {} (hash: {})", + self.node_a, self.node_b, block_a.header.number, block_a.header.hash + ); + return Ok(()); + } + + sleep(poll_interval).await; + } + }; + + match timeout(timeout_duration, sync_check).await { + Ok(result) => result, + Err(_) => Err(eyre::eyre!( + "Timeout waiting for nodes {} and {} to sync after {}s", + self.node_a, + self.node_b, + self.timeout_secs + )), + } + }) + } +} diff --git a/crates/e2e-test-utils/src/testsuite/actions/produce_blocks.rs b/crates/e2e-test-utils/src/testsuite/actions/produce_blocks.rs index 02f7155b66a..c20b79d9ae4 100644 --- a/crates/e2e-test-utils/src/testsuite/actions/produce_blocks.rs +++ b/crates/e2e-test-utils/src/testsuite/actions/produce_blocks.rs @@ -1,14 +1,14 @@ //! Block production actions for the e2e testing framework. use crate::testsuite::{ - actions::{validate_fcu_response, Action, Sequence}, + actions::{expect_fcu_not_syncing_or_accepted, validate_fcu_response, Action, Sequence}, BlockInfo, Environment, }; use alloy_primitives::{Bytes, B256}; use alloy_rpc_types_engine::{ payload::ExecutionPayloadEnvelopeV3, ForkchoiceState, PayloadAttributes, PayloadStatusEnum, }; -use alloy_rpc_types_eth::{Block, Header, Receipt, Transaction}; +use alloy_rpc_types_eth::{Block, Header, Receipt, Transaction, TransactionRequest}; use eyre::Result; use futures_util::future::BoxFuture; use reth_node_api::{EngineTypes, PayloadTypes}; @@ -73,13 +73,16 @@ where let engine_client = node_client.engine.http_client(); // get the latest block to use as parent - let latest_block = - EthApiClient::::block_by_number( - rpc_client, - alloy_eips::BlockNumberOrTag::Latest, - false, - ) - .await?; + let latest_block = EthApiClient::< + TransactionRequest, + Transaction, + Block, + Receipt, + Header, + >::block_by_number( + rpc_client, alloy_eips::BlockNumberOrTag::Latest, false + ) + .await?; let latest_block = latest_block.ok_or_else(|| eyre::eyre!("Latest block not found"))?; let parent_hash = latest_block.header.hash; @@ -242,7 +245,9 @@ where debug!("FCU result: {:?}", fcu_result); // validate the FCU status before proceeding - validate_fcu_response(&fcu_result, "GenerateNextPayload")?; + // Note: In the context of GenerateNextPayload, Syncing usually means the engine + // doesn't have the requested head block, which should be an error + expect_fcu_not_syncing_or_accepted(&fcu_result, "GenerateNextPayload")?; let payload_id = if let Some(payload_id) = fcu_result.payload_id { debug!("Received new payload ID: {:?}", payload_id); @@ -269,7 +274,10 @@ where debug!("Fresh FCU result: {:?}", fresh_fcu_result); // validate the fresh FCU status - validate_fcu_response(&fresh_fcu_result, "GenerateNextPayload (fresh)")?; + expect_fcu_not_syncing_or_accepted( + &fresh_fcu_result, + "GenerateNextPayload (fresh)", + )?; if let Some(payload_id) = fresh_fcu_result.payload_id { payload_id @@ -334,14 +342,17 @@ where } else { // fallback to RPC query let rpc_client = &env.node_clients[0].rpc; - let current_head_block = - EthApiClient::::block_by_number( - rpc_client, - alloy_eips::BlockNumberOrTag::Latest, - false, - ) - .await? - .ok_or_else(|| eyre::eyre!("No latest block found from RPC"))?; + let current_head_block = EthApiClient::< + TransactionRequest, + Transaction, + Block, + Receipt, + Header, + >::block_by_number( + rpc_client, alloy_eips::BlockNumberOrTag::Latest, false + ) + .await? + .ok_or_else(|| eyre::eyre!("No latest block found from RPC"))?; debug!("Using RPC latest block hash as head: {}", current_head_block.header.hash); current_head_block.header.hash }; @@ -404,14 +415,17 @@ where Box::pin(async move { // get the latest block from the first client to update environment state let rpc_client = &env.node_clients[0].rpc; - let latest_block = - EthApiClient::::block_by_number( - rpc_client, - alloy_eips::BlockNumberOrTag::Latest, - false, - ) - .await? - .ok_or_else(|| eyre::eyre!("No latest block found from RPC"))?; + let latest_block = EthApiClient::< + TransactionRequest, + Transaction, + Block, + Receipt, + Header, + >::block_by_number( + rpc_client, alloy_eips::BlockNumberOrTag::Latest, false + ) + .await? + .ok_or_else(|| eyre::eyre!("No latest block found from RPC"))?; // update environment with the new block information env.set_current_block_info(BlockInfo { @@ -511,13 +525,17 @@ where let rpc_client = &client.rpc; // get the last header by number using latest_head_number - let rpc_latest_header = - EthApiClient::::header_by_number( - rpc_client, - alloy_eips::BlockNumberOrTag::Latest, - ) - .await? - .ok_or_else(|| eyre::eyre!("No latest header found from rpc"))?; + let rpc_latest_header = EthApiClient::< + TransactionRequest, + Transaction, + Block, + Receipt, + Header, + >::header_by_number( + rpc_client, alloy_eips::BlockNumberOrTag::Latest + ) + .await? + .ok_or_else(|| eyre::eyre!("No latest header found from rpc"))?; // perform several checks let next_new_payload = env @@ -594,7 +612,17 @@ where /// Action that broadcasts the next new payload #[derive(Debug, Default)] -pub struct BroadcastNextNewPayload {} +pub struct BroadcastNextNewPayload { + /// If true, only send to the active node. If false, broadcast to all nodes. + active_node_only: bool, +} + +impl BroadcastNextNewPayload { + /// Create a new `BroadcastNextNewPayload` action that only sends to the active node + pub const fn with_active_node() -> Self { + Self { active_node_only: true } + } +} impl Action for BroadcastNextNewPayload where @@ -625,14 +653,11 @@ where let execution_payload_envelope: ExecutionPayloadEnvelopeV3 = payload_envelope.into(); let execution_payload = execution_payload_envelope.execution_payload; - // Loop through all clients and broadcast the next new payload - let mut broadcast_results = Vec::new(); - let mut first_valid_seen = false; - - for (idx, client) in env.node_clients.iter().enumerate() { - let engine = client.engine.http_client(); + if self.active_node_only { + // Send only to the active node + let active_idx = env.active_node_idx; + let engine = env.node_clients[active_idx].engine.http_client(); - // Broadcast the execution payload let result = EngineApiClient::::new_payload_v3( &engine, execution_payload.clone(), @@ -641,35 +666,70 @@ where ) .await?; - broadcast_results.push((idx, result.status.clone())); - debug!("Node {}: new_payload broadcast status: {:?}", idx, result.status); + debug!("Active node {}: new_payload status: {:?}", active_idx, result.status); - // Check if this node accepted the payload - if result.status == PayloadStatusEnum::Valid && !first_valid_seen { - first_valid_seen = true; - } else if let PayloadStatusEnum::Invalid { validation_error } = result.status { - debug!( - "Node {}: Invalid payload status returned from broadcast: {:?}", - idx, validation_error - ); + // Validate the response + match result.status { + PayloadStatusEnum::Valid => { + env.active_node_state_mut()?.latest_payload_executed = + Some(next_new_payload); + Ok(()) + } + other => Err(eyre::eyre!( + "Active node {}: Unexpected payload status: {:?}", + active_idx, + other + )), } - } + } else { + // Loop through all clients and broadcast the next new payload + let mut broadcast_results = Vec::new(); + let mut first_valid_seen = false; + + for (idx, client) in env.node_clients.iter().enumerate() { + let engine = client.engine.http_client(); + + // Broadcast the execution payload + let result = EngineApiClient::::new_payload_v3( + &engine, + execution_payload.clone(), + vec![], + parent_beacon_block_root, + ) + .await?; - // Update the executed payload state after broadcasting to all nodes - if first_valid_seen { - env.active_node_state_mut()?.latest_payload_executed = Some(next_new_payload); - } + broadcast_results.push((idx, result.status.clone())); + debug!("Node {}: new_payload broadcast status: {:?}", idx, result.status); - // Check if at least one node accepted the payload - let any_valid = - broadcast_results.iter().any(|(_, status)| *status == PayloadStatusEnum::Valid); - if !any_valid { - return Err(eyre::eyre!("Failed to successfully broadcast payload to any client")); - } + // Check if this node accepted the payload + if result.status == PayloadStatusEnum::Valid && !first_valid_seen { + first_valid_seen = true; + } else if let PayloadStatusEnum::Invalid { validation_error } = result.status { + debug!( + "Node {}: Invalid payload status returned from broadcast: {:?}", + idx, validation_error + ); + } + } + + // Update the executed payload state after broadcasting to all nodes + if first_valid_seen { + env.active_node_state_mut()?.latest_payload_executed = Some(next_new_payload); + } - debug!("Broadcast complete. Results: {:?}", broadcast_results); + // Check if at least one node accepted the payload + let any_valid = + broadcast_results.iter().any(|(_, status)| *status == PayloadStatusEnum::Valid); + if !any_valid { + return Err(eyre::eyre!( + "Failed to successfully broadcast payload to any client" + )); + } - Ok(()) + debug!("Broadcast complete. Results: {:?}", broadcast_results); + + Ok(()) + } }) } } @@ -868,6 +928,60 @@ where } } +/// Action that produces blocks locally without broadcasting to other nodes +/// This sends the payload only to the active node to ensure it's available locally +#[derive(Debug)] +pub struct ProduceBlocksLocally { + /// Number of blocks to produce + pub num_blocks: u64, + /// Tracks engine type + _phantom: PhantomData, +} + +impl ProduceBlocksLocally { + /// Create a new `ProduceBlocksLocally` action + pub fn new(num_blocks: u64) -> Self { + Self { num_blocks, _phantom: Default::default() } + } +} + +impl Default for ProduceBlocksLocally { + fn default() -> Self { + Self::new(0) + } +} + +impl Action for ProduceBlocksLocally +where + Engine: EngineTypes + PayloadTypes, + Engine::PayloadAttributes: From + Clone, + Engine::ExecutionPayloadEnvelopeV3: Into, +{ + fn execute<'a>(&'a mut self, env: &'a mut Environment) -> BoxFuture<'a, Result<()>> { + Box::pin(async move { + // Remember the active node to ensure all blocks are produced on the same node + let producer_idx = env.active_node_idx; + + for _ in 0..self.num_blocks { + // Ensure we always use the same producer + env.last_producer_idx = Some(producer_idx); + + // create a sequence that produces blocks and sends only to active node + let mut sequence = Sequence::new(vec![ + // Skip PickNextBlockProducer to maintain the same producer + Box::new(GeneratePayloadAttributes::default()), + Box::new(GenerateNextPayload::default()), + // Send payload only to the active node to make it available + Box::new(BroadcastNextNewPayload::with_active_node()), + Box::new(UpdateBlockInfoToLatestPayload::default()), + ]); + sequence.execute(env).await?; + } + Ok(()) + }) + } +} + /// Action that produces a sequence of blocks where some blocks are intentionally invalid #[derive(Debug)] pub struct ProduceInvalidBlocks { diff --git a/crates/e2e-test-utils/src/testsuite/setup.rs b/crates/e2e-test-utils/src/testsuite/setup.rs index 0894c208203..0970451526b 100644 --- a/crates/e2e-test-utils/src/testsuite/setup.rs +++ b/crates/e2e-test-utils/src/testsuite/setup.rs @@ -1,13 +1,13 @@ //! Test setup utilities for configuring the initial state. use crate::{ - setup_engine, testsuite::Environment, Adapter, NodeBuilderHelper, PayloadAttributesBuilder, - RpcHandleProvider, TmpNodeAddOnsHandle, TmpNodeEthApi, + setup_engine_with_connection, testsuite::Environment, Adapter, NodeBuilderHelper, + PayloadAttributesBuilder, RpcHandleProvider, TmpNodeAddOnsHandle, TmpNodeEthApi, }; use alloy_eips::BlockNumberOrTag; use alloy_primitives::B256; use alloy_rpc_types_engine::{ForkchoiceState, PayloadAttributes}; -use alloy_rpc_types_eth::{Block as RpcBlock, Header, Receipt, Transaction}; +use alloy_rpc_types_eth::{Block as RpcBlock, Header, Receipt, Transaction, TransactionRequest}; use eyre::{eyre, Result}; use reth_chainspec::ChainSpec; use reth_engine_local::LocalPayloadAttributesBuilder; @@ -24,7 +24,7 @@ use tokio::{ }; use tracing::{debug, error}; -/// Configuration for setting upa test environment +/// Configuration for setting up test environment #[derive(Debug)] pub struct Setup { /// Chain specification to use @@ -161,12 +161,13 @@ where ) }; - let result = setup_engine::( + let result = setup_engine_with_connection::( node_count, Arc::::new((*chain_spec).clone().into()), is_dev, self.tree_config.clone(), attributes_generator, + self.network.connect_nodes, ) .await; @@ -210,7 +211,7 @@ where let mut last_error = None; while retry_count < MAX_RETRIES { - match EthApiClient::::block_by_number( + match EthApiClient::::block_by_number( &client.rpc, BlockNumberOrTag::Latest, false, @@ -244,14 +245,17 @@ where // Initialize each node's state with genesis block information let genesis_block_info = { let first_client = &env.node_clients[0]; - let genesis_block = - EthApiClient::::block_by_number( - &first_client.rpc, - BlockNumberOrTag::Number(0), - false, - ) - .await? - .ok_or_else(|| eyre!("Genesis block not found"))?; + let genesis_block = EthApiClient::< + TransactionRequest, + Transaction, + RpcBlock, + Receipt, + Header, + >::block_by_number( + &first_client.rpc, BlockNumberOrTag::Number(0), false + ) + .await? + .ok_or_else(|| eyre!("Genesis block not found"))?; crate::testsuite::BlockInfo { hash: genesis_block.header.hash, @@ -296,16 +300,23 @@ pub struct Genesis {} pub struct NetworkSetup { /// Number of nodes to create pub node_count: usize, + /// Whether nodes should be connected to each other + pub connect_nodes: bool, } impl NetworkSetup { /// Create a new network setup with a single node pub const fn single_node() -> Self { - Self { node_count: 1 } + Self { node_count: 1, connect_nodes: true } } - /// Create a new network setup with multiple nodes + /// Create a new network setup with multiple nodes (connected) pub const fn multi_node(count: usize) -> Self { - Self { node_count: count } + Self { node_count: count, connect_nodes: true } + } + + /// Create a new network setup with multiple nodes (disconnected) + pub const fn multi_node_unconnected(count: usize) -> Self { + Self { node_count: count, connect_nodes: false } } } diff --git a/crates/engine/invalid-block-hooks/src/witness.rs b/crates/engine/invalid-block-hooks/src/witness.rs index 7ddf593d337..54e18c07a70 100644 --- a/crates/engine/invalid-block-hooks/src/witness.rs +++ b/crates/engine/invalid-block-hooks/src/witness.rs @@ -281,7 +281,7 @@ where let filename = format!("{}_{}.bundle_state.diff", block.number(), block.hash()); // Convert bundle state to sorted struct which has BTreeMap instead of HashMap to - // have deterministric ordering + // have deterministic ordering let bundle_state_sorted = BundleStateSorted::from_bundle_state(&bundle_state); let output_state_sorted = BundleStateSorted::from_bundle_state(&output.state); @@ -317,13 +317,15 @@ where if &trie_output != original_updates { // Trie updates are too big to diff, so we just save the original and re-executed + let trie_output_sorted = &trie_output.into_sorted_ref(); + let original_updates_sorted = &original_updates.into_sorted_ref(); let original_path = self.save_file( format!("{}_{}.trie_updates.original.json", block.number(), block.hash()), - original_updates, + original_updates_sorted, )?; let re_executed_path = self.save_file( format!("{}_{}.trie_updates.re_executed.json", block.number(), block.hash()), - &trie_output, + trie_output_sorted, )?; warn!( target: "engine::invalid_block_hooks::witness", diff --git a/crates/engine/primitives/src/config.rs b/crates/engine/primitives/src/config.rs index 639b227679d..9794caf4473 100644 --- a/crates/engine/primitives/src/config.rs +++ b/crates/engine/primitives/src/config.rs @@ -11,7 +11,7 @@ pub const DEFAULT_MAX_PROOF_TASK_CONCURRENCY: u64 = 256; /// Default number of reserved CPU cores for non-reth processes. /// -/// This will be deducated from the thread count of main reth global threadpool. +/// This will be deducted from the thread count of main reth global threadpool. pub const DEFAULT_RESERVED_CPU_CORES: usize = 1; const DEFAULT_BLOCK_BUFFER_LIMIT: u32 = 256; diff --git a/crates/engine/service/src/service.rs b/crates/engine/service/src/service.rs index d9093aed30a..f634d2a3264 100644 --- a/crates/engine/service/src/service.rs +++ b/crates/engine/service/src/service.rs @@ -51,7 +51,7 @@ type EngineServiceType = ChainOrchestrator< /// The type that drives the chain forward and communicates progress. #[pin_project] #[expect(missing_debug_implementations)] -// TODO(mattsse): remove hidde once fixed : +// TODO(mattsse): remove hidden once fixed : // otherwise rustdoc fails to resolve the alias #[doc(hidden)] pub struct EngineService diff --git a/crates/engine/tree/Cargo.toml b/crates/engine/tree/Cargo.toml index 8b17a4a8a75..b5515142cad 100644 --- a/crates/engine/tree/Cargo.toml +++ b/crates/engine/tree/Cargo.toml @@ -82,7 +82,7 @@ reth-exex-types.workspace = true reth-network-p2p = { workspace = true, features = ["test-utils"] } reth-prune-types.workspace = true reth-prune.workspace = true -reth-rpc-types-compat.workspace = true +reth-rpc-convert.workspace = true reth-stages = { workspace = true, features = ["test-utils"] } reth-static-file.workspace = true reth-testing-utils.workspace = true diff --git a/crates/engine/tree/benches/channel_perf.rs b/crates/engine/tree/benches/channel_perf.rs index 448a25a05f1..41dd651c890 100644 --- a/crates/engine/tree/benches/channel_perf.rs +++ b/crates/engine/tree/benches/channel_perf.rs @@ -5,7 +5,7 @@ use alloy_primitives::{B256, U256}; use criterion::{criterion_group, criterion_main, BatchSize, BenchmarkId, Criterion}; use proptest::test_runner::TestRunner; -use rand_08::Rng; +use rand::Rng; use revm_primitives::{Address, HashMap}; use revm_state::{Account, AccountInfo, AccountStatus, EvmState, EvmStorage, EvmStorageSlot}; use std::{hint::black_box, thread}; @@ -18,17 +18,18 @@ fn create_bench_state(num_accounts: usize) -> EvmState { for i in 0..num_accounts { let storage = - EvmStorage::from_iter([(U256::from(i), EvmStorageSlot::new(U256::from(i + 1)))]); + EvmStorage::from_iter([(U256::from(i), EvmStorageSlot::new(U256::from(i + 1), 0))]); let account = Account { info: AccountInfo { balance: U256::from(100), nonce: 10, - code_hash: B256::from_slice(&rng.r#gen::<[u8; 32]>()), - ..Default::default() + code_hash: B256::from_slice(&rng.random::<[u8; 32]>()), + code: Default::default(), }, storage, - status: AccountStatus::Loaded, + status: AccountStatus::empty(), + transaction_id: 0, }; let address = Address::with_last_byte(i as u8); diff --git a/crates/engine/tree/benches/state_root_task.rs b/crates/engine/tree/benches/state_root_task.rs index 75ddad00bcf..0d19bad0b14 100644 --- a/crates/engine/tree/benches/state_root_task.rs +++ b/crates/engine/tree/benches/state_root_task.rs @@ -8,7 +8,7 @@ use alloy_evm::block::StateChangeSource; use alloy_primitives::{Address, B256}; use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; use proptest::test_runner::TestRunner; -use rand_08::Rng; +use rand::Rng; use reth_chain_state::EthPrimitives; use reth_chainspec::ChainSpec; use reth_db_common::init::init_genesis; @@ -52,43 +52,46 @@ fn create_bench_state_updates(params: &BenchParams) -> Vec { for _ in 0..params.updates_per_account { let mut state_update = EvmState::default(); - let num_accounts_in_update = rng.gen_range(1..=params.num_accounts); + let num_accounts_in_update = rng.random_range(1..=params.num_accounts); // regular updates for randomly selected accounts for &address in &all_addresses[0..num_accounts_in_update] { // randomly choose to self-destruct with probability // (selfdestructs/accounts) - let is_selfdestruct = - rng.gen_bool(params.selfdestructs_per_update as f64 / params.num_accounts as f64); + let is_selfdestruct = rng + .random_bool(params.selfdestructs_per_update as f64 / params.num_accounts as f64); let account = if is_selfdestruct { RevmAccount { info: AccountInfo::default(), storage: HashMap::default(), status: AccountStatus::SelfDestructed, + transaction_id: 0, } } else { RevmAccount { #[allow(clippy::needless_update)] info: AccountInfo { - balance: U256::from(rng.r#gen::()), - nonce: rng.r#gen::(), + balance: U256::from(rng.random::()), + nonce: rng.random::(), code_hash: KECCAK_EMPTY, code: Some(Default::default()), ..Default::default() }, - storage: (0..rng.gen_range(0..=params.storage_slots_per_account)) + storage: (0..rng.random_range(0..=params.storage_slots_per_account)) .map(|_| { ( - U256::from(rng.r#gen::()), + U256::from(rng.random::()), EvmStorageSlot::new_changed( U256::ZERO, - U256::from(rng.r#gen::()), + U256::from(rng.random::()), + 0, ), ) }) .collect(), status: AccountStatus::Touched, + transaction_id: 0, } }; @@ -229,7 +232,7 @@ fn bench_state_root(c: &mut Criterion) { (genesis_hash, payload_processor, provider, state_updates) }, - |(genesis_hash, payload_processor, provider, state_updates)| { + |(genesis_hash, mut payload_processor, provider, state_updates)| { black_box({ let mut handle = payload_processor.spawn( Default::default(), diff --git a/crates/engine/tree/src/tree/cached_state.rs b/crates/engine/tree/src/tree/cached_state.rs index a6e16a7503a..bce9949564f 100644 --- a/crates/engine/tree/src/tree/cached_state.rs +++ b/crates/engine/tree/src/tree/cached_state.rs @@ -6,8 +6,8 @@ use reth_errors::ProviderResult; use reth_metrics::Metrics; use reth_primitives_traits::{Account, Bytecode}; use reth_provider::{ - AccountReader, BlockHashReader, HashedPostStateProvider, StateProofProvider, StateProvider, - StateRootProvider, StorageRootProvider, + AccountReader, BlockHashReader, BytecodeReader, HashedPostStateProvider, StateProofProvider, + StateProvider, StateRootProvider, StorageRootProvider, }; use reth_revm::db::BundleState; use reth_trie::{ @@ -162,7 +162,9 @@ impl StateProvider for CachedStateProvider { } } } +} +impl BytecodeReader for CachedStateProvider { fn bytecode_by_hash(&self, code_hash: &B256) -> ProviderResult> { if let Some(res) = self.caches.code_cache.get(code_hash) { self.metrics.code_cache_hits.increment(1); diff --git a/crates/engine/tree/src/tree/e2e_tests.rs b/crates/engine/tree/src/tree/e2e_tests.rs index fbadae28698..9eb6a64c885 100644 --- a/crates/engine/tree/src/tree/e2e_tests.rs +++ b/crates/engine/tree/src/tree/e2e_tests.rs @@ -5,8 +5,9 @@ use eyre::Result; use reth_chainspec::{ChainSpecBuilder, MAINNET}; use reth_e2e_test_utils::testsuite::{ actions::{ - CaptureBlock, CreateFork, ExpectFcuStatus, MakeCanonical, ProduceBlocks, - ProduceInvalidBlocks, ReorgTo, ValidateCanonicalTag, + CaptureBlock, CompareNodeChainTips, CreateFork, ExpectFcuStatus, MakeCanonical, + ProduceBlocks, ProduceBlocksLocally, ProduceInvalidBlocks, ReorgTo, SelectActiveNode, + SendNewPayloads, UpdateBlockInfo, ValidateCanonicalTag, WaitForSync, }, setup::{NetworkSetup, Setup}, TestBuilder, @@ -151,3 +152,183 @@ async fn test_engine_tree_valid_and_invalid_forks_with_older_canonical_head_e2e( Ok(()) } + +/// Test that verifies engine tree behavior when handling invalid blocks. +/// This test demonstrates that invalid blocks are correctly rejected and that +/// attempts to build on top of them fail appropriately. +#[tokio::test] +async fn test_engine_tree_reorg_with_missing_ancestor_expecting_valid_e2e() -> Result<()> { + reth_tracing::init_test_tracing(); + + let test = TestBuilder::new() + .with_setup(default_engine_tree_setup()) + // build main chain (blocks 1-6) + .with_action(ProduceBlocks::::new(6)) + .with_action(MakeCanonical::new()) + .with_action(CaptureBlock::new("main_chain_tip")) + // create a valid fork first + .with_action(CreateFork::::new_from_tag("main_chain_tip", 5)) + .with_action(CaptureBlock::new("valid_fork_tip")) + // FCU to the valid fork should work + .with_action(ExpectFcuStatus::valid("valid_fork_tip")); + + test.run::().await?; + + // attempting to build invalid chains fails properly + let invalid_test = TestBuilder::new() + .with_setup(default_engine_tree_setup()) + .with_action(ProduceBlocks::::new(3)) + .with_action(MakeCanonical::new()) + // This should fail when trying to build subsequent blocks on the invalid block + .with_action(ProduceInvalidBlocks::::with_invalid_at(2, 0)); + + assert!(invalid_test.run::().await.is_err()); + + Ok(()) +} + +/// Test that verifies buffered blocks are eventually connected when sent in reverse order. +#[tokio::test] +async fn test_engine_tree_buffered_blocks_are_eventually_connected_e2e() -> Result<()> { + reth_tracing::init_test_tracing(); + + let test = TestBuilder::new() + .with_setup( + Setup::default() + .with_chain_spec(Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis( + serde_json::from_str(include_str!( + "../../../../e2e-test-utils/src/testsuite/assets/genesis.json" + )) + .unwrap(), + ) + .cancun_activated() + .build(), + )) + .with_network(NetworkSetup::multi_node_unconnected(2)) // Need 2 disconnected nodes + .with_tree_config( + TreeConfig::default() + .with_legacy_state_root(false) + .with_has_enough_parallelism(true), + ), + ) + // node 0 produces blocks 1 and 2 locally without broadcasting + .with_action(SelectActiveNode::new(0)) + .with_action(ProduceBlocksLocally::::new(2)) + // make the blocks canonical on node 0 so they're available via RPC + .with_action(MakeCanonical::with_active_node()) + // send blocks in reverse order (2, then 1) from node 0 to node 1 + .with_action( + SendNewPayloads::::new() + .with_target_node(1) + .with_source_node(0) + .with_start_block(1) + .with_total_blocks(2) + .in_reverse_order(), + ) + // update node 1's view to recognize the new blocks + .with_action(SelectActiveNode::new(1)) + // get the latest block from node 1's RPC and update environment + .with_action(UpdateBlockInfo::default()) + // make block 2 canonical on node 1 with a forkchoice update + .with_action(MakeCanonical::with_active_node()) + // verify both nodes eventually have the same chain tip + .with_action(CompareNodeChainTips::expect_same(0, 1)); + + test.run::().await?; + + Ok(()) +} + +/// Test that verifies forkchoice updates can extend the canonical chain progressively. +/// +/// This test creates a longer chain of blocks, then uses forkchoice updates to make +/// different parts of the chain canonical in sequence, verifying that FCU properly +/// advances the canonical head when all blocks are already available. +#[tokio::test] +async fn test_engine_tree_fcu_extends_canon_chain_e2e() -> Result<()> { + reth_tracing::init_test_tracing(); + + let test = TestBuilder::new() + .with_setup(default_engine_tree_setup()) + // create and make canonical a base chain with 1 block + .with_action(ProduceBlocks::::new(1)) + .with_action(MakeCanonical::new()) + // extend the chain with 10 more blocks (total 11 blocks: 0-10) + .with_action(ProduceBlocks::::new(10)) + // capture block 6 as our intermediate target (from 0-indexed, this is block 6) + .with_action(CaptureBlock::new("target_block")) + // make the intermediate target canonical via FCU + .with_action(ReorgTo::::new_from_tag("target_block")) + // now make the chain tip canonical via FCU + .with_action(MakeCanonical::new()); + + test.run::().await?; + + Ok(()) +} + +/// Test that verifies live sync transition where a long chain eventually becomes canonical. +/// +/// This test simulates a scenario where: +/// 1. Both nodes start with the same short base chain +/// 2. Node 0 builds a long chain locally (no broadcast, becomes its canonical tip) +/// 3. Node 1 still has only the short base chain as its canonical tip +/// 4. Node 1 receives FCU pointing to Node 0's long chain tip and must sync +/// 5. Both nodes end up with the same canonical chain through real P2P sync +#[tokio::test] +async fn test_engine_tree_live_sync_transition_eventually_canonical_e2e() -> Result<()> { + reth_tracing::init_test_tracing(); + + const MIN_BLOCKS_FOR_PIPELINE_RUN: u64 = 32; // EPOCH_SLOTS from alloy-eips + + let test = TestBuilder::new() + .with_setup( + Setup::default() + .with_chain_spec(Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis( + serde_json::from_str(include_str!( + "../../../../e2e-test-utils/src/testsuite/assets/genesis.json" + )) + .unwrap(), + ) + .cancun_activated() + .build(), + )) + .with_network(NetworkSetup::multi_node(2)) // Two connected nodes + .with_tree_config( + TreeConfig::default() + .with_legacy_state_root(false) + .with_has_enough_parallelism(true), + ), + ) + // Both nodes start with the same base chain (1 block) + .with_action(SelectActiveNode::new(0)) + .with_action(ProduceBlocks::::new(1)) + .with_action(MakeCanonical::new()) // Both nodes have the same base chain + .with_action(CaptureBlock::new("base_chain_tip")) + // Node 0: Build a much longer chain but don't broadcast it yet + .with_action(ProduceBlocksLocally::::new(MIN_BLOCKS_FOR_PIPELINE_RUN + 10)) + .with_action(MakeCanonical::with_active_node()) // Only make it canonical on Node 0 + .with_action(CaptureBlock::new("long_chain_tip")) + // Verify Node 0's canonical tip is the long chain tip + .with_action(ValidateCanonicalTag::new("long_chain_tip")) + // Verify Node 1's canonical tip is still the base chain tip + .with_action(SelectActiveNode::new(1)) + .with_action(ValidateCanonicalTag::new("base_chain_tip")) + // Node 1: Send FCU pointing to Node 0's long chain tip + // This should trigger Node 1 to sync the missing blocks from Node 0 + .with_action(ReorgTo::::new_from_tag("long_chain_tip")) + // Wait for Node 1 to sync with Node 0 + .with_action(WaitForSync::new(0, 1).with_timeout(60)) + // Verify both nodes end up with the same canonical chain + .with_action(CompareNodeChainTips::expect_same(0, 1)); + + test.run::().await?; + + Ok(()) +} diff --git a/crates/engine/tree/src/tree/instrumented_state.rs b/crates/engine/tree/src/tree/instrumented_state.rs index ab6707972ec..9d96aca3a2e 100644 --- a/crates/engine/tree/src/tree/instrumented_state.rs +++ b/crates/engine/tree/src/tree/instrumented_state.rs @@ -5,8 +5,8 @@ use reth_errors::ProviderResult; use reth_metrics::Metrics; use reth_primitives_traits::{Account, Bytecode}; use reth_provider::{ - AccountReader, BlockHashReader, HashedPostStateProvider, StateProofProvider, StateProvider, - StateRootProvider, StorageRootProvider, + AccountReader, BlockHashReader, BytecodeReader, HashedPostStateProvider, StateProofProvider, + StateProvider, StateRootProvider, StorageRootProvider, }; use reth_trie::{ updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, @@ -191,7 +191,9 @@ impl StateProvider for InstrumentedStateProvider { self.record_storage_fetch(start.elapsed()); res } +} +impl BytecodeReader for InstrumentedStateProvider { fn bytecode_by_hash(&self, code_hash: &B256) -> ProviderResult> { let start = Instant::now(); let res = self.state_provider.bytecode_by_hash(code_hash); diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index d783ba282d9..fd214e88781 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -10,7 +10,7 @@ use crate::{ use alloy_consensus::BlockHeader; use alloy_eips::{merge::EPOCH_SLOTS, BlockNumHash, NumHash}; use alloy_evm::block::BlockExecutor; -use alloy_primitives::B256; +use alloy_primitives::{Address, B256}; use alloy_rpc_types_engine::{ ForkchoiceState, PayloadStatus, PayloadStatusEnum, PayloadValidationError, }; @@ -18,7 +18,7 @@ use error::{InsertBlockError, InsertBlockErrorKind, InsertBlockFatalError}; use instrumented_state::InstrumentedStateProvider; use payload_processor::sparse_trie::StateRootComputeOutcome; use persistence_state::CurrentPersistenceAction; -use precompile_cache::{CachedPrecompile, PrecompileCacheMap}; +use precompile_cache::{CachedPrecompile, CachedPrecompileMetrics, PrecompileCacheMap}; use reth_chain_state::{ CanonicalInMemoryState, ExecutedBlock, ExecutedBlockWithTrieUpdates, ExecutedTrieUpdates, MemoryOverlayStateProvider, NewCanonicalChain, @@ -50,6 +50,7 @@ use reth_trie_parallel::root::{ParallelStateRoot, ParallelStateRootError}; use state::TreeState; use std::{ borrow::Cow, + collections::HashMap, fmt::Debug, sync::{ mpsc::{Receiver, RecvError, RecvTimeoutError, Sender}, @@ -276,6 +277,8 @@ where evm_config: C, /// Precompile cache map. precompile_cache_map: PrecompileCacheMap>, + /// Metrics for precompile cache, stored per address to avoid re-allocation. + precompile_cache_metrics: HashMap, } impl std::fmt::Debug @@ -370,6 +373,7 @@ where payload_processor, evm_config, precompile_cache_map, + precompile_cache_metrics: HashMap::new(), } } @@ -1505,8 +1509,8 @@ where fn canonical_block_by_hash(&self, hash: B256) -> ProviderResult>> { trace!(target: "engine::tree", ?hash, "Fetching executed block by hash"); // check memory first - if let Some(block) = self.state.tree_state.executed_block_by_hash(hash).cloned() { - return Ok(Some(block.block)) + if let Some(block) = self.state.tree_state.executed_block_by_hash(hash) { + return Ok(Some(block.block.clone())) } let (block, senders) = self @@ -1914,12 +1918,13 @@ where let old = old .iter() .filter_map(|block| { - let (_, trie) = self + let trie = self .state .tree_state .persisted_trie_updates - .get(&block.recovered_block.hash()) - .cloned()?; + .get(&block.recovered_block.hash())? + .1 + .clone(); Some(ExecutedBlockWithTrieUpdates { block: block.clone(), trie: ExecutedTrieUpdates::Present(trie), @@ -2183,9 +2188,21 @@ where // root task proof calculation will include a lot of unrelated paths in the prefix sets. // It's cheaper to run a parallel state root that does one walk over trie tables while // accounting for the prefix sets. + let has_ancestors_with_missing_trie_updates = + self.has_ancestors_with_missing_trie_updates(block.sealed_header()); let mut use_state_root_task = run_parallel_state_root && self.config.use_state_root_task() && - !self.has_ancestors_with_missing_trie_updates(block.sealed_header()); + !has_ancestors_with_missing_trie_updates; + + debug!( + target: "engine::tree", + block=?block_num_hash, + run_parallel_state_root, + has_ancestors_with_missing_trie_updates, + use_state_root_task, + config_allows_state_root_task=self.config.use_state_root_task(), + "Deciding which state root algorithm to run" + ); // use prewarming background task let header = block.clone_sealed_header(); @@ -2225,6 +2242,7 @@ where &self.config, ) } else { + debug!(target: "engine::tree", block=?block_num_hash, "Disabling state root task due to non-empty prefix sets"); use_state_root_task = false; self.payload_processor.spawn_cache_exclusive(header, txs, provider_builder) } @@ -2282,8 +2300,9 @@ where // if we new payload extends the current canonical change we attempt to use the // background task or try to compute it in parallel if use_state_root_task { + debug!(target: "engine::tree", block=?block_num_hash, "Using sparse trie state root algorithm"); match handle.state_root() { - Ok(StateRootComputeOutcome { state_root, trie_updates }) => { + Ok(StateRootComputeOutcome { state_root, trie_updates, trie }) => { let elapsed = execution_finish.elapsed(); info!(target: "engine::tree", ?state_root, ?elapsed, "State root task finished"); // we double check the state root here for good measure @@ -2297,12 +2316,16 @@ where "State root task returned incorrect state root" ); } + + // hold on to the sparse trie for the next payload + self.payload_processor.set_sparse_trie(trie); } Err(error) => { debug!(target: "engine::tree", %error, "Background parallel state root computation failed"); } } } else { + debug!(target: "engine::tree", block=?block_num_hash, "Using parallel state root algorithm"); match self.compute_state_root_parallel( persisting_kind, block.header().parent_hash(), @@ -2420,10 +2443,16 @@ where if !self.config.precompile_cache_disabled() { executor.evm_mut().precompiles_mut().map_precompiles(|address, precompile| { + let metrics = self + .precompile_cache_metrics + .entry(*address) + .or_insert_with(|| CachedPrecompileMetrics::new_with_address(*address)) + .clone(); CachedPrecompile::wrap( precompile, self.precompile_cache_map.cache_for_address(*address), *self.evm_config.evm_env(block.header()).spec_id(), + Some(metrics), ) }); } diff --git a/crates/engine/tree/src/tree/payload_processor/mod.rs b/crates/engine/tree/src/tree/payload_processor/mod.rs index 5c782fbd4bb..055d4622d1e 100644 --- a/crates/engine/tree/src/tree/payload_processor/mod.rs +++ b/crates/engine/tree/src/tree/payload_processor/mod.rs @@ -28,6 +28,7 @@ use reth_trie_parallel::{ proof_task::{ProofTaskCtx, ProofTaskManager}, root::ParallelStateRootError, }; +use reth_trie_sparse::SparseTrieState; use std::{ collections::VecDeque, sync::{ @@ -67,6 +68,9 @@ where precompile_cache_disabled: bool, /// Precompile cache map. precompile_cache_map: PrecompileCacheMap>, + /// A sparse trie, kept around to be used for the state root computation so that allocations + /// can be minimized. + sparse_trie: Option, _marker: std::marker::PhantomData, } @@ -91,6 +95,7 @@ where evm_config, precompile_cache_disabled: config.precompile_cache_disabled(), precompile_cache_map, + sparse_trie: None, _marker: Default::default(), } } @@ -134,7 +139,7 @@ where /// This returns a handle to await the final state root and to interact with the tasks (e.g. /// canceling) pub fn spawn

( - &self, + &mut self, header: SealedHeaderFor, transactions: VecDeque>, provider_builder: StateProviderBuilder, @@ -191,11 +196,15 @@ where multi_proof_task.run(); }); - let mut sparse_trie_task = SparseTrieTask::new( + // take the sparse trie if it was set + let sparse_trie = self.sparse_trie.take(); + + let mut sparse_trie_task = SparseTrieTask::new_with_stored_trie( self.executor.clone(), sparse_trie_rx, proof_task.handle(), self.trie_metrics.clone(), + sparse_trie, ); // wire the sparse trie to the state root response receiver @@ -241,6 +250,11 @@ where PayloadHandle { to_multi_proof: None, prewarm_handle, state_root: None } } + /// Sets the sparse trie to be kept around for the state root computation. + pub(super) fn set_sparse_trie(&mut self, sparse_trie: SparseTrieState) { + self.sparse_trie = Some(sparse_trie); + } + /// Spawn prewarming optionally wired to the multiproof task for target updates. fn spawn_caching_with

( &self, @@ -489,6 +503,7 @@ mod tests { EvmStorageSlot::new_changed( U256::ZERO, U256::from(rng.random::()), + 0, ), ); } @@ -503,6 +518,7 @@ mod tests { }, storage, status: AccountStatus::Touched, + transaction_id: 0, }; state_update.insert(address, account); @@ -566,7 +582,7 @@ mod tests { } } - let payload_processor = PayloadProcessor::::new( + let mut payload_processor = PayloadProcessor::::new( WorkloadExecutor::default(), EthEvmConfig::new(factory.chain_spec()), &TreeConfig::default(), diff --git a/crates/engine/tree/src/tree/payload_processor/prewarm.rs b/crates/engine/tree/src/tree/payload_processor/prewarm.rs index 2153f6ee753..85e9e803305 100644 --- a/crates/engine/tree/src/tree/payload_processor/prewarm.rs +++ b/crates/engine/tree/src/tree/payload_processor/prewarm.rs @@ -275,6 +275,7 @@ where precompile, precompile_cache_map.cache_for_address(*address), spec_id, + None, // No metrics for prewarm ) }); } diff --git a/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs b/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs index 93f00491090..c8de07c1ec5 100644 --- a/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs +++ b/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs @@ -11,7 +11,7 @@ use reth_trie_parallel::root::ParallelStateRootError; use reth_trie_sparse::{ blinded::{BlindedProvider, BlindedProviderFactory}, errors::{SparseStateTrieResult, SparseTrieErrorKind}, - SparseStateTrie, + SparseStateTrie, SparseTrieState, }; use std::{ sync::mpsc, @@ -63,6 +63,43 @@ where } } + /// Creates a new sparse trie, populating the accounts trie with the given cleared + /// `SparseTrieState` if it exists. + pub(super) fn new_with_stored_trie( + executor: WorkloadExecutor, + updates: mpsc::Receiver, + blinded_provider_factory: BPF, + trie_metrics: MultiProofTaskMetrics, + sparse_trie_state: Option, + ) -> Self { + if let Some(sparse_trie_state) = sparse_trie_state { + Self::with_accounts_trie( + executor, + updates, + blinded_provider_factory, + trie_metrics, + sparse_trie_state, + ) + } else { + Self::new(executor, updates, blinded_provider_factory, trie_metrics) + } + } + + /// Creates a new sparse trie task, using the given cleared `SparseTrieState` for the accounts + /// trie. + pub(super) fn with_accounts_trie( + executor: WorkloadExecutor, + updates: mpsc::Receiver, + blinded_provider_factory: BPF, + metrics: MultiProofTaskMetrics, + sparse_trie_state: SparseTrieState, + ) -> Self { + let mut trie = SparseStateTrie::new(blinded_provider_factory).with_updates(true); + trie.populate_from(sparse_trie_state); + + Self { executor, updates, metrics, trie } + } + /// Runs the sparse trie task to completion. /// /// This waits for new incoming [`SparseTrieUpdate`]. @@ -109,7 +146,10 @@ where self.metrics.sparse_trie_final_update_duration_histogram.record(start.elapsed()); self.metrics.sparse_trie_total_duration_histogram.record(now.elapsed()); - Ok(StateRootComputeOutcome { state_root, trie_updates }) + // take the account trie + let trie = self.trie.take_cleared_account_trie_state(); + + Ok(StateRootComputeOutcome { state_root, trie_updates, trie }) } } @@ -121,6 +161,8 @@ pub struct StateRootComputeOutcome { pub state_root: B256, /// The trie updates. pub trie_updates: TrieUpdates, + /// The account state trie. + pub trie: SparseTrieState, } /// Updates the sparse trie with the given proofs and state, and returns the elapsed time. diff --git a/crates/engine/tree/src/tree/precompile_cache.rs b/crates/engine/tree/src/tree/precompile_cache.rs index 47d985a9296..a3eb3a5ba2b 100644 --- a/crates/engine/tree/src/tree/precompile_cache.rs +++ b/crates/engine/tree/src/tree/precompile_cache.rs @@ -2,7 +2,7 @@ use alloy_primitives::Bytes; use parking_lot::Mutex; -use reth_evm::precompiles::{DynPrecompile, Precompile}; +use reth_evm::precompiles::{DynPrecompile, Precompile, PrecompileInput}; use revm::precompile::{PrecompileOutput, PrecompileResult}; use revm_primitives::Address; use schnellru::LruMap; @@ -123,7 +123,7 @@ where /// The precompile. precompile: DynPrecompile, /// Cache metrics. - metrics: CachedPrecompileMetrics, + metrics: Option, /// Spec id associated to the EVM from which this cached precompile was created. spec_id: S, } @@ -133,30 +133,47 @@ where S: Eq + Hash + std::fmt::Debug + Send + Sync + Clone + 'static, { /// `CachedPrecompile` constructor. - pub(crate) fn new(precompile: DynPrecompile, cache: PrecompileCache, spec_id: S) -> Self { - Self { precompile, cache, spec_id, metrics: Default::default() } + pub(crate) const fn new( + precompile: DynPrecompile, + cache: PrecompileCache, + spec_id: S, + metrics: Option, + ) -> Self { + Self { precompile, cache, spec_id, metrics } } pub(crate) fn wrap( precompile: DynPrecompile, cache: PrecompileCache, spec_id: S, + metrics: Option, ) -> DynPrecompile { - let wrapped = Self::new(precompile, cache, spec_id); - move |data: &[u8], gas_limit: u64| -> PrecompileResult { wrapped.call(data, gas_limit) } - .into() + let wrapped = Self::new(precompile, cache, spec_id, metrics); + move |input: PrecompileInput<'_>| -> PrecompileResult { wrapped.call(input) }.into() } fn increment_by_one_precompile_cache_hits(&self) { - self.metrics.precompile_cache_hits.increment(1); + if let Some(metrics) = &self.metrics { + metrics.precompile_cache_hits.increment(1); + } } fn increment_by_one_precompile_cache_misses(&self) { - self.metrics.precompile_cache_misses.increment(1); + if let Some(metrics) = &self.metrics { + metrics.precompile_cache_misses.increment(1); + } + } + + fn set_precompile_cache_size_metric(&self, to: f64) { + if let Some(metrics) = &self.metrics { + metrics.precompile_cache_size.set(to); + } } fn increment_by_one_precompile_errors(&self) { - self.metrics.precompile_errors.increment(1); + if let Some(metrics) = &self.metrics { + metrics.precompile_errors.increment(1); + } } } @@ -164,23 +181,23 @@ impl Precompile for CachedPrecompile where S: Eq + Hash + std::fmt::Debug + Send + Sync + Clone + 'static, { - fn call(&self, data: &[u8], gas_limit: u64) -> PrecompileResult { - let key = CacheKeyRef::new(self.spec_id.clone(), data); + fn call(&self, input: PrecompileInput<'_>) -> PrecompileResult { + let key = CacheKeyRef::new(self.spec_id.clone(), input.data); if let Some(entry) = &self.cache.get(&key) { self.increment_by_one_precompile_cache_hits(); - if gas_limit >= entry.gas_used() { + if input.gas >= entry.gas_used() { return entry.to_precompile_result() } } - let result = self.precompile.call(data, gas_limit); + let result = self.precompile.call(input); match &result { Ok(output) => { - let key = CacheKey::new(self.spec_id.clone(), Bytes::copy_from_slice(data)); + let key = CacheKey::new(self.spec_id.clone(), Bytes::copy_from_slice(input.data)); let size = self.cache.insert(key, CacheEntry(output.clone())); - self.metrics.precompile_cache_size.set(size as f64); + self.set_precompile_cache_size_metric(size as f64); self.increment_by_one_precompile_cache_misses(); } _ => { @@ -208,13 +225,23 @@ pub(crate) struct CachedPrecompileMetrics { precompile_errors: metrics::Counter, } +impl CachedPrecompileMetrics { + /// Creates a new instance of [`CachedPrecompileMetrics`] with the given address. + /// + /// Adds address as an `address` label padded with zeros to at least two hex symbols, prefixed + /// by `0x`. + pub(crate) fn new_with_address(address: Address) -> Self { + Self::new_with_labels(&[("address", format!("0x{address:02x}"))]) + } +} + #[cfg(test)] mod tests { use std::hash::DefaultHasher; use super::*; use revm::precompile::PrecompileOutput; - use revm_primitives::hardfork::SpecId; + use revm_primitives::{hardfork::SpecId, U256}; #[test] fn test_cache_key_ref_hash() { @@ -235,13 +262,13 @@ mod tests { #[test] fn test_precompile_cache_basic() { - let dyn_precompile: DynPrecompile = |_input: &[u8], _gas: u64| -> PrecompileResult { + let dyn_precompile: DynPrecompile = |_input: PrecompileInput<'_>| -> PrecompileResult { Ok(PrecompileOutput { gas_used: 0, bytes: Bytes::default() }) } .into(); let cache = - CachedPrecompile::new(dyn_precompile, PrecompileCache::default(), SpecId::PRAGUE); + CachedPrecompile::new(dyn_precompile, PrecompileCache::default(), SpecId::PRAGUE, None); let output = PrecompileOutput { gas_used: 50, @@ -270,8 +297,8 @@ mod tests { // create the first precompile with a specific output let precompile1: DynPrecompile = { - move |data: &[u8], _gas: u64| -> PrecompileResult { - assert_eq!(data, input_data); + move |input: PrecompileInput<'_>| -> PrecompileResult { + assert_eq!(input.data, input_data); Ok(PrecompileOutput { gas_used: 5000, @@ -283,8 +310,8 @@ mod tests { // create the second precompile with a different output let precompile2: DynPrecompile = { - move |data: &[u8], _gas: u64| -> PrecompileResult { - assert_eq!(data, input_data); + move |input: PrecompileInput<'_>| -> PrecompileResult { + assert_eq!(input.data, input_data); Ok(PrecompileOutput { gas_used: 7000, @@ -298,24 +325,47 @@ mod tests { precompile1, cache_map.cache_for_address(address1), SpecId::PRAGUE, + None, ); let wrapped_precompile2 = CachedPrecompile::wrap( precompile2, cache_map.cache_for_address(address2), SpecId::PRAGUE, + None, ); // first invocation of precompile1 (cache miss) - let result1 = wrapped_precompile1.call(input_data, gas_limit).unwrap(); + let result1 = wrapped_precompile1 + .call(PrecompileInput { + data: input_data, + gas: gas_limit, + caller: Address::ZERO, + value: U256::ZERO, + }) + .unwrap(); assert_eq!(result1.bytes.as_ref(), b"output_from_precompile_1"); // first invocation of precompile2 with the same input (should be a cache miss) // if cache was incorrectly shared, we'd get precompile1's result - let result2 = wrapped_precompile2.call(input_data, gas_limit).unwrap(); + let result2 = wrapped_precompile2 + .call(PrecompileInput { + data: input_data, + gas: gas_limit, + caller: Address::ZERO, + value: U256::ZERO, + }) + .unwrap(); assert_eq!(result2.bytes.as_ref(), b"output_from_precompile_2"); // second invocation of precompile1 (should be a cache hit) - let result3 = wrapped_precompile1.call(input_data, gas_limit).unwrap(); + let result3 = wrapped_precompile1 + .call(PrecompileInput { + data: input_data, + gas: gas_limit, + caller: Address::ZERO, + value: U256::ZERO, + }) + .unwrap(); assert_eq!(result3.bytes.as_ref(), b"output_from_precompile_1"); } } diff --git a/crates/engine/tree/src/tree/tests.rs b/crates/engine/tree/src/tree/tests.rs index 9fa1d960486..9922d29ff1d 100644 --- a/crates/engine/tree/src/tree/tests.rs +++ b/crates/engine/tree/src/tree/tests.rs @@ -6,10 +6,7 @@ use alloy_primitives::{ Bytes, B256, }; use alloy_rlp::Decodable; -use alloy_rpc_types_engine::{ - CancunPayloadFields, ExecutionData, ExecutionPayloadSidecar, ExecutionPayloadV1, - ExecutionPayloadV3, -}; +use alloy_rpc_types_engine::{ExecutionData, ExecutionPayloadSidecar, ExecutionPayloadV1}; use assert_matches::assert_matches; use reth_chain_state::{test_utils::TestBlockBuilder, BlockState}; use reth_chainspec::{ChainSpec, HOLESKY, MAINNET}; @@ -93,7 +90,6 @@ struct TestHarness { from_tree_rx: UnboundedReceiver, blocks: Vec, action_rx: Receiver, - evm_config: MockEvmConfig, block_builder: TestBlockBuilder, provider: MockEthProvider, } @@ -146,11 +142,10 @@ impl TestHarness { persistence_handle, PersistenceState::default(), payload_builder, - // TODO: fix tests for state root task https://github.com/paradigmxyz/reth/issues/14376 // always assume enough parallelism for tests - TreeConfig::default().with_legacy_state_root(true).with_has_enough_parallelism(true), + TreeConfig::default().with_legacy_state_root(false).with_has_enough_parallelism(true), EngineApiKind::Ethereum, - evm_config.clone(), + evm_config, ); let block_builder = TestBlockBuilder::default().with_chain_spec((*chain_spec).clone()); @@ -160,7 +155,6 @@ impl TestHarness { from_tree_rx, blocks: vec![], action_rx, - evm_config, block_builder, provider, } @@ -215,23 +209,6 @@ impl TestHarness { self } - fn extend_execution_outcome( - &self, - execution_outcomes: impl IntoIterator>, - ) { - self.evm_config.extend(execution_outcomes); - } - - fn insert_block( - &mut self, - block: RecoveredBlock, - ) -> Result> { - let execution_outcome = self.block_builder.get_execution_outcome(block.clone()); - self.extend_execution_outcome([execution_outcome]); - self.tree.provider.add_state_root(block.state_root); - self.tree.insert_block(block) - } - async fn fcu_to(&mut self, block_hash: B256, fcu_status: impl Into) { let fcu_status = fcu_status.into(); @@ -289,78 +266,6 @@ impl TestHarness { } } - async fn send_new_payload(&mut self, block: RecoveredBlock) { - let payload = ExecutionPayloadV3::from_block_unchecked( - block.hash(), - &block.clone_sealed_block().into_block(), - ); - self.tree - .on_new_payload(ExecutionData { - payload: payload.into(), - sidecar: ExecutionPayloadSidecar::v3(CancunPayloadFields { - parent_beacon_block_root: block.parent_beacon_block_root.unwrap(), - versioned_hashes: vec![], - }), - }) - .unwrap(); - } - - async fn insert_chain( - &mut self, - chain: impl IntoIterator> + Clone, - ) { - for block in chain.clone() { - self.insert_block(block.clone()).unwrap(); - } - self.check_canon_chain_insertion(chain).await; - } - - async fn check_canon_commit(&mut self, hash: B256) { - let event = self.from_tree_rx.recv().await.unwrap(); - match event { - EngineApiEvent::BeaconConsensus( - BeaconConsensusEngineEvent::CanonicalChainCommitted(header, _), - ) => { - assert_eq!(header.hash(), hash); - } - _ => panic!("Unexpected event: {event:#?}"), - } - } - - async fn check_canon_chain_insertion( - &mut self, - chain: impl IntoIterator> + Clone, - ) { - for block in chain.clone() { - self.check_canon_block_added(block.hash()).await; - } - } - - async fn check_canon_block_added(&mut self, expected_hash: B256) { - let event = self.from_tree_rx.recv().await.unwrap(); - match event { - EngineApiEvent::BeaconConsensus(BeaconConsensusEngineEvent::CanonicalBlockAdded( - executed, - _, - )) => { - assert_eq!(executed.recovered_block.hash(), expected_hash); - } - _ => panic!("Unexpected event: {event:#?}"), - } - } - - async fn check_block_received(&mut self, hash: B256) { - let event = self.from_tree_rx.recv().await.unwrap(); - match event { - EngineApiEvent::BeaconConsensus(BeaconConsensusEngineEvent::BlockReceived( - num_hash, - )) => { - assert_eq!(num_hash.hash, hash); - } - _ => panic!("Unexpected event: {event:#?}"), - } - } - fn persist_blocks(&self, blocks: Vec>) { let mut block_data: Vec<(B256, Block)> = Vec::with_capacity(blocks.len()); let mut headers_data: Vec<(B256, Header)> = Vec::with_capacity(blocks.len()); @@ -373,49 +278,6 @@ impl TestHarness { self.provider.extend_blocks(block_data); self.provider.extend_headers(headers_data); } - - fn setup_range_insertion_for_valid_chain( - &mut self, - chain: Vec>, - ) { - self.setup_range_insertion_for_chain(chain, None) - } - - fn setup_range_insertion_for_invalid_chain( - &mut self, - chain: Vec>, - index: usize, - ) { - self.setup_range_insertion_for_chain(chain, Some(index)) - } - - fn setup_range_insertion_for_chain( - &mut self, - chain: Vec>, - invalid_index: Option, - ) { - // setting up execution outcomes for the chain, the blocks will be - // executed starting from the oldest, so we need to reverse. - let mut chain_rev = chain; - chain_rev.reverse(); - - let mut execution_outcomes = Vec::with_capacity(chain_rev.len()); - for (index, block) in chain_rev.iter().enumerate() { - let execution_outcome = self.block_builder.get_execution_outcome(block.clone()); - let state_root = if invalid_index.is_some() && invalid_index.unwrap() == index { - B256::random() - } else { - block.state_root - }; - self.tree.provider.add_state_root(state_root); - execution_outcomes.push(execution_outcome); - } - self.extend_execution_outcome(execution_outcomes); - } - - fn check_canon_head(&self, head_hash: B256) { - assert_eq!(self.tree.state.tree_state.canonical_head().hash, head_hash); - } } #[test] @@ -951,236 +813,3 @@ async fn test_engine_tree_live_sync_transition_required_blocks_requested() { _ => panic!("Unexpected event: {event:#?}"), } } - -#[tokio::test] -async fn test_engine_tree_live_sync_transition_eventually_canonical() { - reth_tracing::init_test_tracing(); - - let chain_spec = MAINNET.clone(); - let mut test_harness = TestHarness::new(chain_spec.clone()); - test_harness.tree.config = test_harness.tree.config.with_max_execute_block_batch_size(100); - - // create base chain and setup test harness with it - let base_chain: Vec<_> = test_harness.block_builder.get_executed_blocks(0..1).collect(); - test_harness = test_harness.with_blocks(base_chain.clone()); - - // fcu to the tip of base chain - test_harness - .fcu_to(base_chain.last().unwrap().recovered_block().hash(), ForkchoiceStatus::Valid) - .await; - - // create main chain, extension of base chain, with enough blocks to - // trigger backfill sync - let main_chain = test_harness - .block_builder - .create_fork(base_chain[0].recovered_block(), MIN_BLOCKS_FOR_PIPELINE_RUN + 10); - - let main_chain_last = main_chain.last().unwrap(); - let main_chain_last_hash = main_chain_last.hash(); - let main_chain_backfill_target = main_chain.get(MIN_BLOCKS_FOR_PIPELINE_RUN as usize).unwrap(); - let main_chain_backfill_target_hash = main_chain_backfill_target.hash(); - - // fcu to the element of main chain that should trigger backfill sync - test_harness.send_fcu(main_chain_backfill_target_hash, ForkchoiceStatus::Syncing).await; - test_harness.check_fcu(main_chain_backfill_target_hash, ForkchoiceStatus::Syncing).await; - - // check download request for target - let event = test_harness.from_tree_rx.recv().await.unwrap(); - match event { - EngineApiEvent::Download(DownloadRequest::BlockSet(hash_set)) => { - assert_eq!(hash_set, HashSet::from_iter([main_chain_backfill_target_hash])); - } - _ => panic!("Unexpected event: {event:#?}"), - } - - // send message to tell the engine the requested block was downloaded - test_harness - .tree - .on_engine_message(FromEngine::DownloadedBlocks(vec![main_chain_backfill_target.clone()])) - .unwrap(); - - // check that backfill is triggered - let event = test_harness.from_tree_rx.recv().await.unwrap(); - match event { - EngineApiEvent::BackfillAction(BackfillAction::Start( - reth_stages::PipelineTarget::Sync(target_hash), - )) => { - assert_eq!(target_hash, main_chain_backfill_target_hash); - } - _ => panic!("Unexpected event: {event:#?}"), - } - - // persist blocks of main chain, same as the backfill operation would do - let backfilled_chain: Vec<_> = - main_chain.clone().drain(0..(MIN_BLOCKS_FOR_PIPELINE_RUN + 1) as usize).collect(); - test_harness.persist_blocks(backfilled_chain.clone()); - - test_harness.setup_range_insertion_for_valid_chain(backfilled_chain); - - // send message to mark backfill finished - test_harness - .tree - .on_engine_message(FromEngine::Event(FromOrchestrator::BackfillSyncFinished( - ControlFlow::Continue { block_number: main_chain_backfill_target.number }, - ))) - .unwrap(); - - // send fcu to the tip of main - test_harness.fcu_to(main_chain_last_hash, ForkchoiceStatus::Syncing).await; - - let event = test_harness.from_tree_rx.recv().await.unwrap(); - match event { - EngineApiEvent::Download(DownloadRequest::BlockSet(target_hash)) => { - assert_eq!(target_hash, HashSet::from_iter([main_chain_last_hash])); - } - _ => panic!("Unexpected event: {event:#?}"), - } - - // tell engine main chain tip downloaded - test_harness - .tree - .on_engine_message(FromEngine::DownloadedBlocks(vec![main_chain_last.clone()])) - .unwrap(); - - // check download range request - let event = test_harness.from_tree_rx.recv().await.unwrap(); - match event { - EngineApiEvent::Download(DownloadRequest::BlockRange(initial_hash, total_blocks)) => { - assert_eq!( - total_blocks, - (main_chain.len() - MIN_BLOCKS_FOR_PIPELINE_RUN as usize - 2) as u64 - ); - assert_eq!(initial_hash, main_chain_last.parent_hash); - } - _ => panic!("Unexpected event: {event:#?}"), - } - - let remaining: Vec<_> = main_chain - .clone() - .drain((MIN_BLOCKS_FOR_PIPELINE_RUN + 1) as usize..main_chain.len()) - .collect(); - - test_harness.setup_range_insertion_for_valid_chain(remaining.clone()); - - // tell engine block range downloaded - test_harness.tree.on_engine_message(FromEngine::DownloadedBlocks(remaining.clone())).unwrap(); - - test_harness.check_canon_chain_insertion(remaining).await; - - // check canonical chain committed event with the hash of the latest block - test_harness.check_canon_commit(main_chain_last_hash).await; - - // new head is the tip of the main chain - test_harness.check_canon_head(main_chain_last_hash); -} - -#[tokio::test] -async fn test_engine_tree_live_sync_fcu_extends_canon_chain() { - reth_tracing::init_test_tracing(); - - let chain_spec = MAINNET.clone(); - let mut test_harness = TestHarness::new(chain_spec.clone()); - - // create base chain and setup test harness with it - let base_chain: Vec<_> = test_harness.block_builder.get_executed_blocks(0..1).collect(); - test_harness = test_harness.with_blocks(base_chain.clone()); - - // fcu to the tip of base chain - test_harness - .fcu_to(base_chain.last().unwrap().recovered_block().hash(), ForkchoiceStatus::Valid) - .await; - - // create main chain, extension of base chain - let main_chain = test_harness.block_builder.create_fork(base_chain[0].recovered_block(), 10); - // determine target in the middle of main hain - let target = main_chain.get(5).unwrap(); - let target_hash = target.hash(); - let main_last = main_chain.last().unwrap(); - let main_last_hash = main_last.hash(); - - // insert main chain - test_harness.insert_chain(main_chain).await; - - // send fcu to target - test_harness.send_fcu(target_hash, ForkchoiceStatus::Valid).await; - - test_harness.check_canon_commit(target_hash).await; - test_harness.check_fcu(target_hash, ForkchoiceStatus::Valid).await; - - // send fcu to main tip - test_harness.send_fcu(main_last_hash, ForkchoiceStatus::Valid).await; - - test_harness.check_canon_commit(main_last_hash).await; - test_harness.check_fcu(main_last_hash, ForkchoiceStatus::Valid).await; - test_harness.check_canon_head(main_last_hash); -} - -#[tokio::test] -async fn test_engine_tree_buffered_blocks_are_eventually_connected() { - let chain_spec = MAINNET.clone(); - let mut test_harness = TestHarness::new(chain_spec.clone()); - - let base_chain: Vec<_> = test_harness.block_builder.get_executed_blocks(0..1).collect(); - test_harness = test_harness.with_blocks(base_chain.clone()); - - // side chain consisting of two blocks, the last will be inserted first - // so that we force it to be buffered - let side_chain = - test_harness.block_builder.create_fork(base_chain.last().unwrap().recovered_block(), 2); - - // buffer last block of side chain - let buffered_block = side_chain.last().unwrap(); - let buffered_block_hash = buffered_block.hash(); - - test_harness.setup_range_insertion_for_valid_chain(vec![buffered_block.clone()]); - test_harness.send_new_payload(buffered_block.clone()).await; - - assert!(test_harness.tree.state.buffer.block(&buffered_block_hash).is_some()); - - let non_buffered_block = side_chain.first().unwrap(); - let non_buffered_block_hash = non_buffered_block.hash(); - - // insert block that continues the canon chain, should not be buffered - test_harness.setup_range_insertion_for_valid_chain(vec![non_buffered_block.clone()]); - test_harness.send_new_payload(non_buffered_block.clone()).await; - assert!(test_harness.tree.state.buffer.block(&non_buffered_block_hash).is_none()); - - // the previously buffered block should be connected now - assert!(test_harness.tree.state.buffer.block(&buffered_block_hash).is_none()); - - // both blocks are added to the canon chain in order - // note that the buffered block is received first, but added last - test_harness.check_block_received(buffered_block_hash).await; - test_harness.check_block_received(non_buffered_block_hash).await; - test_harness.check_canon_block_added(non_buffered_block_hash).await; - test_harness.check_canon_block_added(buffered_block_hash).await; -} - -#[tokio::test] -async fn test_engine_tree_reorg_with_missing_ancestor_expecting_valid() { - reth_tracing::init_test_tracing(); - let chain_spec = MAINNET.clone(); - let mut test_harness = TestHarness::new(chain_spec.clone()); - - let base_chain: Vec<_> = test_harness.block_builder.get_executed_blocks(0..6).collect(); - test_harness = test_harness.with_blocks(base_chain.clone()); - - // create a side chain with an invalid block - let side_chain = - test_harness.block_builder.create_fork(base_chain.last().unwrap().recovered_block(), 15); - let invalid_index = 9; - - test_harness.setup_range_insertion_for_invalid_chain(side_chain.clone(), invalid_index); - - for (index, block) in side_chain.iter().enumerate() { - test_harness.send_new_payload(block.clone()).await; - - if index < side_chain.len() - invalid_index - 1 { - test_harness.send_fcu(block.hash(), ForkchoiceStatus::Valid).await; - } - } - - // Try to do a forkchoice update to a block after the invalid one - let fork_tip_hash = side_chain.last().unwrap().hash(); - test_harness.send_fcu(fork_tip_hash, ForkchoiceStatus::Invalid).await; -} diff --git a/crates/engine/tree/src/tree/trie_updates.rs b/crates/engine/tree/src/tree/trie_updates.rs index 4f2e3c40eb1..ba8f7fc16a9 100644 --- a/crates/engine/tree/src/tree/trie_updates.rs +++ b/crates/engine/tree/src/tree/trie_updates.rs @@ -114,11 +114,11 @@ pub(super) fn compare_trie_updates( .account_nodes .keys() .chain(regular.account_nodes.keys()) - .cloned() + .copied() .collect::>() { let (task, regular) = (task.account_nodes.remove(&key), regular.account_nodes.remove(&key)); - let database = account_trie_cursor.seek_exact(key.clone())?.map(|x| x.1); + let database = account_trie_cursor.seek_exact(key)?.map(|x| x.1); if !branch_nodes_equal(task.as_ref(), regular.as_ref(), database.as_ref())? { diff.account_nodes.insert(key, EntryDiff { task, regular, database }); @@ -131,12 +131,12 @@ pub(super) fn compare_trie_updates( .removed_nodes .iter() .chain(regular.removed_nodes.iter()) - .cloned() + .copied() .collect::>() { let (task_removed, regular_removed) = (task.removed_nodes.contains(&key), regular.removed_nodes.contains(&key)); - let database_not_exists = account_trie_cursor.seek_exact(key.clone())?.is_none(); + let database_not_exists = account_trie_cursor.seek_exact(key)?.is_none(); // If the deletion is a no-op, meaning that the entry is not in the // database, do not add it to the diff. if task_removed != regular_removed && !database_not_exists { @@ -206,11 +206,11 @@ fn compare_storage_trie_updates( .storage_nodes .keys() .chain(regular.storage_nodes.keys()) - .cloned() + .copied() .collect::>() { let (task, regular) = (task.storage_nodes.remove(&key), regular.storage_nodes.remove(&key)); - let database = storage_trie_cursor.seek_exact(key.clone())?.map(|x| x.1); + let database = storage_trie_cursor.seek_exact(key)?.map(|x| x.1); if !branch_nodes_equal(task.as_ref(), regular.as_ref(), database.as_ref())? { diff.storage_nodes.insert(key, EntryDiff { task, regular, database }); } @@ -218,22 +218,20 @@ fn compare_storage_trie_updates( // compare removed nodes let mut storage_trie_cursor = trie_cursor()?; - for key in task - .removed_nodes - .iter() - .chain(regular.removed_nodes.iter()) - .cloned() - .collect::>() + for key in + task.removed_nodes.iter().chain(regular.removed_nodes.iter()).collect::>() { let (task_removed, regular_removed) = - (task.removed_nodes.contains(&key), regular.removed_nodes.contains(&key)); - let database_not_exists = - storage_trie_cursor.seek_exact(key.clone())?.map(|x| x.1).is_none(); + (task.removed_nodes.contains(key), regular.removed_nodes.contains(key)); + if task_removed == regular_removed { + continue; + } + let database_not_exists = storage_trie_cursor.seek_exact(*key)?.map(|x| x.1).is_none(); // If the deletion is a no-op, meaning that the entry is not in the // database, do not add it to the diff. - if task_removed != regular_removed && !database_not_exists { + if !database_not_exists { diff.removed_nodes.insert( - key, + *key, EntryDiff { task: task_removed, regular: regular_removed, diff --git a/crates/era-downloader/src/client.rs b/crates/era-downloader/src/client.rs index 752523c262f..ea4894cadbd 100644 --- a/crates/era-downloader/src/client.rs +++ b/crates/era-downloader/src/client.rs @@ -1,5 +1,4 @@ -use crate::BLOCKS_PER_FILE; -use alloy_primitives::{hex, hex::ToHexExt, BlockNumber}; +use alloy_primitives::{hex, hex::ToHexExt}; use bytes::Bytes; use eyre::{eyre, OptionExt}; use futures_util::{stream::StreamExt, Stream, TryStreamExt}; @@ -8,7 +7,7 @@ use sha2::{Digest, Sha256}; use std::{future::Future, path::Path, str::FromStr}; use tokio::{ fs::{self, File}, - io::{self, AsyncBufReadExt, AsyncWriteExt}, + io::{self, AsyncBufReadExt, AsyncRead, AsyncReadExt, AsyncWriteExt}, join, try_join, }; @@ -42,23 +41,14 @@ pub struct EraClient { client: Http, url: Url, folder: Box, - start_from: Option, } impl EraClient { const CHECKSUMS: &'static str = "checksums.txt"; /// Constructs [`EraClient`] using `client` to download from `url` into `folder`. - pub const fn new(client: Http, url: Url, folder: Box) -> Self { - Self { client, url, folder, start_from: None } - } - - /// Overrides the starting ERA file based on `block_number`. - /// - /// The normal behavior is that the index is recovered based on files contained in the `folder`. - pub const fn start_from(mut self, block_number: BlockNumber) -> Self { - self.start_from.replace(block_number / BLOCKS_PER_FILE); - self + pub fn new(client: Http, url: Url, folder: impl Into>) -> Self { + Self { client, url, folder: folder.into() } } /// Performs a GET request on `url` and stores the response body into a file located within @@ -75,61 +65,43 @@ impl EraClient { .ok_or_eyre("empty path segments")?; let path = path.join(file_name); - let number = - self.file_name_to_number(file_name).ok_or_eyre("Cannot parse number from file name")?; + if !self.is_downloaded(file_name, &path).await? { + let number = self + .file_name_to_number(file_name) + .ok_or_eyre("Cannot parse number from file name")?; + + let mut tries = 1..3; + let mut actual_checksum: eyre::Result<_>; + loop { + actual_checksum = async { + let mut file = File::create(&path).await?; + let mut stream = client.get(url.clone()).await?; + let mut hasher = Sha256::new(); + + while let Some(item) = stream.next().await.transpose()? { + io::copy(&mut item.as_ref(), &mut file).await?; + hasher.update(item); + } - let mut tries = 1..3; - let mut actual_checksum: eyre::Result<_>; - loop { - actual_checksum = async { - let mut file = File::create(&path).await?; - let mut stream = client.get(url.clone()).await?; - let mut hasher = Sha256::new(); - - while let Some(item) = stream.next().await.transpose()? { - io::copy(&mut item.as_ref(), &mut file).await?; - hasher.update(item); + Ok(hasher.finalize().to_vec()) } + .await; - Ok(hasher.finalize().to_vec()) + if actual_checksum.is_ok() || tries.next().is_none() { + break; + } } - .await; - if actual_checksum.is_ok() || tries.next().is_none() { - break; - } - } - - let actual_checksum = actual_checksum?; - - let file = File::open(self.folder.join(Self::CHECKSUMS)).await?; - let reader = io::BufReader::new(file); - let mut lines = reader.lines(); - - for _ in 0..number { - lines.next_line().await?; - } - let expected_checksum = - lines.next_line().await?.ok_or_else(|| eyre!("Missing hash for number {number}"))?; - let expected_checksum = hex::decode(expected_checksum)?; - - if actual_checksum != expected_checksum { - return Err(eyre!( - "Checksum mismatch, got: {}, expected: {}", - actual_checksum.encode_hex(), - expected_checksum.encode_hex() - )); + self.assert_checksum(number, actual_checksum?) + .await + .map_err(|e| eyre!("{e} for {file_name} at {}", path.display()))?; } Ok(path.into_boxed_path()) } /// Recovers index of file following the latest downloaded file from a different run. - pub async fn recover_index(&self) -> u64 { - if let Some(block_number) = self.start_from { - return block_number; - } - + pub async fn recover_index(&self) -> Option { let mut max = None; if let Ok(mut dir) = fs::read_dir(&self.folder).await { @@ -137,18 +109,39 @@ impl EraClient { if let Some(name) = entry.file_name().to_str() { if let Some(number) = self.file_name_to_number(name) { if max.is_none() || matches!(max, Some(max) if number > max) { - max.replace(number); + max.replace(number + 1); + } + } + } + } + } + + max + } + + /// Deletes files that are outside-of the working range. + pub async fn delete_outside_range(&self, index: usize, max_files: usize) -> eyre::Result<()> { + let last = index + max_files; + + if let Ok(mut dir) = fs::read_dir(&self.folder).await { + while let Ok(Some(entry)) = dir.next_entry().await { + if let Some(name) = entry.file_name().to_str() { + if let Some(number) = self.file_name_to_number(name) { + if number < index || number >= last { + eprintln!("Deleting kokot {}", entry.path().display()); + eprintln!("{number} < {index} || {number} > {last}"); + reth_fs_util::remove_file(entry.path())?; } } } } } - max.map(|v| v + 1).unwrap_or(0) + Ok(()) } /// Returns a download URL for the file corresponding to `number`. - pub async fn url(&self, number: u64) -> eyre::Result> { + pub async fn url(&self, number: usize) -> eyre::Result> { Ok(self.number_to_file_name(number).await?.map(|name| self.url.join(&name)).transpose()?) } @@ -229,7 +222,7 @@ impl EraClient { } /// Returns ERA1 file name that is ordered at `number`. - pub async fn number_to_file_name(&self, number: u64) -> eyre::Result> { + pub async fn number_to_file_name(&self, number: usize) -> eyre::Result> { let path = self.folder.to_path_buf().join("index"); let file = File::open(&path).await?; let reader = io::BufReader::new(file); @@ -241,9 +234,99 @@ impl EraClient { Ok(lines.next_line().await?) } - fn file_name_to_number(&self, file_name: &str) -> Option { - file_name.split('-').nth(1).and_then(|v| u64::from_str(v).ok()) + async fn is_downloaded(&self, name: &str, path: impl AsRef) -> eyre::Result { + let path = path.as_ref(); + + match File::open(path).await { + Ok(file) => { + let number = self + .file_name_to_number(name) + .ok_or_else(|| eyre!("Cannot parse ERA number from {name}"))?; + + let actual_checksum = checksum(file).await?; + let is_verified = self.verify_checksum(number, actual_checksum).await?; + + if !is_verified { + fs::remove_file(path).await?; + } + + Ok(is_verified) + } + Err(e) if e.kind() == io::ErrorKind::NotFound => Ok(false), + Err(e) => Err(e)?, + } } + + /// Returns `true` if `actual_checksum` matches expected checksum of the ERA1 file indexed by + /// `number` based on the [file list]. + /// + /// [file list]: Self::fetch_file_list + async fn verify_checksum(&self, number: usize, actual_checksum: Vec) -> eyre::Result { + Ok(actual_checksum == self.expected_checksum(number).await?) + } + + /// Returns `Ok` if `actual_checksum` matches expected checksum of the ERA1 file indexed by + /// `number` based on the [file list]. + /// + /// [file list]: Self::fetch_file_list + async fn assert_checksum(&self, number: usize, actual_checksum: Vec) -> eyre::Result<()> { + let expected_checksum = self.expected_checksum(number).await?; + + if actual_checksum == expected_checksum { + Ok(()) + } else { + Err(eyre!( + "Checksum mismatch, got: {}, expected: {}", + actual_checksum.encode_hex(), + expected_checksum.encode_hex() + )) + } + } + + /// Returns SHA-256 checksum for ERA1 file indexed by `number` based on the [file list]. + /// + /// [file list]: Self::fetch_file_list + async fn expected_checksum(&self, number: usize) -> eyre::Result> { + let file = File::open(self.folder.join(Self::CHECKSUMS)).await?; + let reader = io::BufReader::new(file); + let mut lines = reader.lines(); + + for _ in 0..number { + lines.next_line().await?; + } + let expected_checksum = + lines.next_line().await?.ok_or_else(|| eyre!("Missing hash for number {number}"))?; + let expected_checksum = hex::decode(expected_checksum)?; + + Ok(expected_checksum) + } + + fn file_name_to_number(&self, file_name: &str) -> Option { + file_name.split('-').nth(1).and_then(|v| usize::from_str(v).ok()) + } +} + +async fn checksum(mut reader: impl AsyncRead + Unpin) -> eyre::Result> { + let mut hasher = Sha256::new(); + + // Create a buffer to read data into, sized for performance. + let mut data = vec![0; 64 * 1024]; + + loop { + // Read data from the reader into the buffer. + let len = reader.read(&mut data).await?; + if len == 0 { + break; + } // Exit loop if no more data. + + // Update the hash with the data read. + hasher.update(&data[..len]); + } + + // Finalize the hash after all data has been processed. + let hash = hasher.finalize().to_vec(); + + Ok(hash) } #[cfg(test)] @@ -254,11 +337,7 @@ mod tests { impl EraClient { fn empty() -> Self { - Self::new( - Client::new(), - Url::from_str("file:///").unwrap(), - PathBuf::new().into_boxed_path(), - ) + Self::new(Client::new(), Url::from_str("file:///").unwrap(), PathBuf::new()) } } @@ -266,7 +345,7 @@ mod tests { #[test_case("mainnet-00000-a81ae85f.era1", Some(0))] #[test_case("00000-a81ae85f.era1", None)] #[test_case("", None)] - fn test_file_name_to_number(file_name: &str, expected_number: Option) { + fn test_file_name_to_number(file_name: &str, expected_number: Option) { let client = EraClient::empty(); let actual_number = client.file_name_to_number(file_name); diff --git a/crates/era-downloader/src/fs.rs b/crates/era-downloader/src/fs.rs index 2fe40e86e7d..17a2d46d26a 100644 --- a/crates/era-downloader/src/fs.rs +++ b/crates/era-downloader/src/fs.rs @@ -45,7 +45,7 @@ pub fn read_dir( entries.sort_by(|(left, _), (right, _)| left.cmp(right)); - Ok(stream::iter(entries.into_iter().skip((start_from / BLOCKS_PER_FILE) as usize).map( + Ok(stream::iter(entries.into_iter().skip(start_from as usize / BLOCKS_PER_FILE).map( move |(_, path)| { let expected_checksum = checksums.next().transpose()?.ok_or_eyre("Got less checksums than ERA files")?; diff --git a/crates/era-downloader/src/lib.rs b/crates/era-downloader/src/lib.rs index 01147e41e1c..88afaa7af4e 100644 --- a/crates/era-downloader/src/lib.rs +++ b/crates/era-downloader/src/lib.rs @@ -12,7 +12,7 @@ //! let url = Url::from_str("file:///")?; //! //! // Directory where the ERA1 files will be downloaded to -//! let folder = PathBuf::new().into_boxed_path(); +//! let folder = PathBuf::new(); //! //! let client = EraClient::new(Client::new(), url, folder); //! @@ -42,4 +42,4 @@ pub use client::{EraClient, HttpClient}; pub use fs::read_dir; pub use stream::{EraMeta, EraStream, EraStreamConfig}; -pub(crate) const BLOCKS_PER_FILE: u64 = 8192; +pub(crate) const BLOCKS_PER_FILE: usize = 8192; diff --git a/crates/era-downloader/src/stream.rs b/crates/era-downloader/src/stream.rs index 336085a2682..a488e098ab0 100644 --- a/crates/era-downloader/src/stream.rs +++ b/crates/era-downloader/src/stream.rs @@ -1,4 +1,4 @@ -use crate::{client::HttpClient, EraClient}; +use crate::{client::HttpClient, EraClient, BLOCKS_PER_FILE}; use alloy_primitives::BlockNumber; use futures_util::{stream::FuturesOrdered, FutureExt, Stream, StreamExt}; use reqwest::Url; @@ -24,7 +24,7 @@ use std::{ pub struct EraStreamConfig { max_files: usize, max_concurrent_downloads: usize, - start_from: Option, + start_from: Option, } impl Default for EraStreamConfig { @@ -47,11 +47,8 @@ impl EraStreamConfig { } /// Overrides the starting ERA file index to be the first one that contains `block_number`. - /// - /// The normal behavior is that the ERA file index is recovered from the last file inside the - /// download folder. pub const fn start_from(mut self, block_number: BlockNumber) -> Self { - self.start_from.replace(block_number); + self.start_from.replace(block_number as usize / BLOCKS_PER_FILE); self } } @@ -93,11 +90,13 @@ impl EraStream { client, files_count: Box::pin(async move { usize::MAX }), next_url: Box::pin(async move { Ok(None) }), - recover_index: Box::pin(async move { 0 }), + delete_outside_range: Box::pin(async move { Ok(()) }), + recover_index: Box::pin(async move { None }), fetch_file_list: Box::pin(async move { Ok(()) }), state: Default::default(), max_files: config.max_files, - index: 0, + index: config.start_from.unwrap_or_default(), + last: None, downloading: 0, }, } @@ -223,11 +222,13 @@ struct StartingStream { client: EraClient, files_count: Pin + Send + Sync + 'static>>, next_url: Pin>> + Send + Sync + 'static>>, - recover_index: Pin + Send + Sync + 'static>>, + delete_outside_range: Pin> + Send + Sync + 'static>>, + recover_index: Pin> + Send + Sync + 'static>>, fetch_file_list: Pin> + Send + Sync + 'static>>, state: State, max_files: usize, - index: u64, + index: usize, + last: Option, downloading: usize, } @@ -246,6 +247,7 @@ enum State { #[default] Initial, FetchFileList, + DeleteOutsideRange, RecoverIndex, CountFiles, Missing(usize), @@ -262,23 +264,43 @@ impl Stream for Starti if self.state == State::FetchFileList { if let Poll::Ready(result) = self.fetch_file_list.poll_unpin(cx) { + match result { + Ok(_) => self.delete_outside_range(), + Err(e) => { + self.fetch_file_list(); + + return Poll::Ready(Some(Box::pin(async move { Err(e) }))); + } + } + } + } + + if self.state == State::DeleteOutsideRange { + if let Poll::Ready(result) = self.delete_outside_range.poll_unpin(cx) { match result { Ok(_) => self.recover_index(), - Err(e) => return Poll::Ready(Some(Box::pin(async move { Err(e) }))), + Err(e) => { + self.delete_outside_range(); + + return Poll::Ready(Some(Box::pin(async move { Err(e) }))); + } } } } if self.state == State::RecoverIndex { - if let Poll::Ready(index) = self.recover_index.poll_unpin(cx) { - self.index = index; + if let Poll::Ready(last) = self.recover_index.poll_unpin(cx) { + self.last = last; self.count_files(); } } if self.state == State::CountFiles { if let Poll::Ready(downloaded) = self.files_count.poll_unpin(cx) { - let max_missing = self.max_files.saturating_sub(downloaded + self.downloading); + let max_missing = self + .max_files + .saturating_sub(downloaded + self.downloading) + .max(self.last.unwrap_or_default().saturating_sub(self.index)); self.state = State::Missing(max_missing); } } @@ -328,6 +350,17 @@ impl StartingStream { self.state = State::FetchFileList; } + fn delete_outside_range(&mut self) { + let index = self.index; + let max_files = self.max_files; + let client = self.client.clone(); + + Pin::new(&mut self.delete_outside_range) + .set(Box::pin(async move { client.delete_outside_range(index, max_files).await })); + + self.state = State::DeleteOutsideRange; + } + fn recover_index(&mut self) { let client = self.client.clone(); @@ -345,7 +378,7 @@ impl StartingStream { self.state = State::CountFiles; } - fn next_url(&mut self, index: u64, max_missing: usize) { + fn next_url(&mut self, index: usize, max_missing: usize) { let client = self.client.clone(); Pin::new(&mut self.next_url).set(Box::pin(async move { client.url(index).await })); diff --git a/crates/era-downloader/tests/it/checksums.rs b/crates/era-downloader/tests/it/checksums.rs index 70a78345dbd..630cbece5d4 100644 --- a/crates/era-downloader/tests/it/checksums.rs +++ b/crates/era-downloader/tests/it/checksums.rs @@ -3,7 +3,7 @@ use futures::Stream; use futures_util::StreamExt; use reqwest::{IntoUrl, Url}; use reth_era_downloader::{EraClient, EraStream, EraStreamConfig, HttpClient}; -use std::{future::Future, str::FromStr}; +use std::str::FromStr; use tempfile::tempdir; use test_case::test_case; @@ -14,8 +14,8 @@ use test_case::test_case; async fn test_invalid_checksum_returns_error(url: &str) { let base_url = Url::from_str(url).unwrap(); let folder = tempdir().unwrap(); - let folder = folder.path().to_owned().into_boxed_path(); - let client = EraClient::new(FailingClient, base_url, folder.clone()); + let folder = folder.path(); + let client = EraClient::new(FailingClient, base_url, folder); let mut stream = EraStream::new( client, @@ -23,16 +23,24 @@ async fn test_invalid_checksum_returns_error(url: &str) { ); let actual_err = stream.next().await.unwrap().unwrap_err().to_string(); - let expected_err = "Checksum mismatch, \ + let expected_err = format!( + "Checksum mismatch, \ got: 87428fc522803d31065e7bce3cf03fe475096631e5e07bbd7a0fde60c4cf25c7, \ -expected: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"; +expected: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa \ +for mainnet-00000-5ec1ffb8.era1 at {}/mainnet-00000-5ec1ffb8.era1", + folder.display() + ); assert_eq!(actual_err, expected_err); let actual_err = stream.next().await.unwrap().unwrap_err().to_string(); - let expected_err = "Checksum mismatch, \ + let expected_err = format!( + "Checksum mismatch, \ got: 0263829989b6fd954f72baaf2fc64bc2e2f01d692d4de72986ea808f6e99813f, \ -expected: bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"; +expected: bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb \ +for mainnet-00001-a5364e9a.era1 at {}/mainnet-00001-a5364e9a.era1", + folder.display() + ); assert_eq!(actual_err, expected_err); } @@ -46,91 +54,30 @@ const CHECKSUMS: &[u8] = b"0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa struct FailingClient; impl HttpClient for FailingClient { - fn get( + async fn get( &self, url: U, - ) -> impl Future< - Output = eyre::Result> + Send + Sync + Unpin>, - > + Send - + Sync { + ) -> eyre::Result> + Send + Sync + Unpin> { let url = url.into_url().unwrap(); - async move { - match url.to_string().as_str() { - "https://mainnet.era1.nimbus.team/" => { - Ok(Box::new(futures::stream::once(Box::pin(async move { - Ok(bytes::Bytes::from(crate::NIMBUS)) - }))) - as Box> + Send + Sync + Unpin>) - } - "https://era1.ethportal.net/" => { - Ok(Box::new(futures::stream::once(Box::pin(async move { - Ok(bytes::Bytes::from(crate::ETH_PORTAL)) - }))) - as Box> + Send + Sync + Unpin>) - } - "https://era.ithaca.xyz/era1/index.html" => { - Ok(Box::new(futures::stream::once(Box::pin(async move { - Ok(bytes::Bytes::from(crate::ITHACA)) - }))) - as Box> + Send + Sync + Unpin>) - } - "https://mainnet.era1.nimbus.team/checksums.txt" => { - Ok(Box::new(futures::stream::once(Box::pin(async move { - Ok(bytes::Bytes::from(CHECKSUMS)) - }))) - as Box> + Send + Sync + Unpin>) - } - "https://era1.ethportal.net/checksums.txt" => { - Ok(Box::new(futures::stream::once(Box::pin(async move { - Ok(bytes::Bytes::from(CHECKSUMS)) - }))) - as Box> + Send + Sync + Unpin>) - } - "https://era.ithaca.xyz/era1/checksums.txt" => { - Ok(Box::new(futures::stream::once(Box::pin(async move { - Ok(bytes::Bytes::from(CHECKSUMS)) - }))) - as Box> + Send + Sync + Unpin>) - } - "https://era1.ethportal.net/mainnet-00000-5ec1ffb8.era1" => { - Ok(Box::new(futures::stream::once(Box::pin(async move { - Ok(bytes::Bytes::from(crate::MAINNET_0)) - }))) - as Box> + Send + Sync + Unpin>) - } - "https://mainnet.era1.nimbus.team/mainnet-00000-5ec1ffb8.era1" => { - Ok(Box::new(futures::stream::once(Box::pin(async move { - Ok(bytes::Bytes::from(crate::MAINNET_0)) - }))) - as Box> + Send + Sync + Unpin>) - } - "https://era.ithaca.xyz/era1/mainnet-00000-5ec1ffb8.era1" => { - Ok(Box::new(futures::stream::once(Box::pin(async move { - Ok(bytes::Bytes::from(crate::MAINNET_0)) - }))) - as Box> + Send + Sync + Unpin>) - } - "https://era1.ethportal.net/mainnet-00001-a5364e9a.era1" => { - Ok(Box::new(futures::stream::once(Box::pin(async move { - Ok(bytes::Bytes::from(crate::MAINNET_1)) - }))) - as Box> + Send + Sync + Unpin>) - } - "https://mainnet.era1.nimbus.team/mainnet-00001-a5364e9a.era1" => { - Ok(Box::new(futures::stream::once(Box::pin(async move { - Ok(bytes::Bytes::from(crate::MAINNET_1)) - }))) - as Box> + Send + Sync + Unpin>) - } - "https://era.ithaca.xyz/era1/mainnet-00001-a5364e9a.era1" => { - Ok(Box::new(futures::stream::once(Box::pin(async move { - Ok(bytes::Bytes::from(crate::MAINNET_1)) - }))) - as Box> + Send + Sync + Unpin>) - } - v => unimplemented!("Unexpected URL \"{v}\""), + Ok(futures::stream::iter(vec![Ok(match url.to_string().as_str() { + "https://mainnet.era1.nimbus.team/" => Bytes::from_static(crate::NIMBUS), + "https://era1.ethportal.net/" => Bytes::from_static(crate::ETH_PORTAL), + "https://era.ithaca.xyz/era1/index.html" => Bytes::from_static(crate::ITHACA), + "https://mainnet.era1.nimbus.team/checksums.txt" | + "https://era1.ethportal.net/checksums.txt" | + "https://era.ithaca.xyz/era1/checksums.txt" => Bytes::from_static(CHECKSUMS), + "https://era1.ethportal.net/mainnet-00000-5ec1ffb8.era1" | + "https://mainnet.era1.nimbus.team/mainnet-00000-5ec1ffb8.era1" | + "https://era.ithaca.xyz/era1/mainnet-00000-5ec1ffb8.era1" => { + Bytes::from_static(crate::MAINNET_0) + } + "https://era1.ethportal.net/mainnet-00001-a5364e9a.era1" | + "https://mainnet.era1.nimbus.team/mainnet-00001-a5364e9a.era1" | + "https://era.ithaca.xyz/era1/mainnet-00001-a5364e9a.era1" => { + Bytes::from_static(crate::MAINNET_1) } - } + v => unimplemented!("Unexpected URL \"{v}\""), + })])) } } diff --git a/crates/era-downloader/tests/it/download.rs b/crates/era-downloader/tests/it/download.rs index 5502874fc1f..e7756bfede9 100644 --- a/crates/era-downloader/tests/it/download.rs +++ b/crates/era-downloader/tests/it/download.rs @@ -13,7 +13,7 @@ use test_case::test_case; async fn test_getting_file_url_after_fetching_file_list(url: &str) { let base_url = Url::from_str(url).unwrap(); let folder = tempdir().unwrap(); - let folder = folder.path().to_owned().into_boxed_path(); + let folder = folder.path(); let client = EraClient::new(StubClient, base_url.clone(), folder); client.fetch_file_list().await.unwrap(); @@ -31,7 +31,7 @@ async fn test_getting_file_url_after_fetching_file_list(url: &str) { async fn test_getting_file_after_fetching_file_list(url: &str) { let base_url = Url::from_str(url).unwrap(); let folder = tempdir().unwrap(); - let folder = folder.path().to_owned().into_boxed_path(); + let folder = folder.path(); let mut client = EraClient::new(StubClient, base_url, folder); client.fetch_file_list().await.unwrap(); diff --git a/crates/era-downloader/tests/it/list.rs b/crates/era-downloader/tests/it/list.rs index adc0df7e1cb..3940fa5d8be 100644 --- a/crates/era-downloader/tests/it/list.rs +++ b/crates/era-downloader/tests/it/list.rs @@ -13,7 +13,7 @@ use test_case::test_case; async fn test_getting_file_name_after_fetching_file_list(url: &str) { let url = Url::from_str(url).unwrap(); let folder = tempdir().unwrap(); - let folder = folder.path().to_owned().into_boxed_path(); + let folder = folder.path(); let client = EraClient::new(StubClient, url, folder); client.fetch_file_list().await.unwrap(); diff --git a/crates/era-downloader/tests/it/main.rs b/crates/era-downloader/tests/it/main.rs index 26ba4e6143e..526d3885bff 100644 --- a/crates/era-downloader/tests/it/main.rs +++ b/crates/era-downloader/tests/it/main.rs @@ -9,10 +9,9 @@ mod stream; const fn main() {} use bytes::Bytes; -use futures_util::Stream; +use futures::Stream; use reqwest::IntoUrl; use reth_era_downloader::HttpClient; -use std::future::Future; pub(crate) const NIMBUS: &[u8] = include_bytes!("../res/nimbus.html"); pub(crate) const ETH_PORTAL: &[u8] = include_bytes!("../res/ethportal.html"); @@ -27,91 +26,30 @@ pub(crate) const MAINNET_1: &[u8] = include_bytes!("../res/mainnet-00001-a5364e9 struct StubClient; impl HttpClient for StubClient { - fn get( + async fn get( &self, url: U, - ) -> impl Future< - Output = eyre::Result> + Send + Sync + Unpin>, - > + Send - + Sync { + ) -> eyre::Result> + Send + Sync + Unpin> { let url = url.into_url().unwrap(); - async move { - match url.to_string().as_str() { - "https://mainnet.era1.nimbus.team/" => { - Ok(Box::new(futures::stream::once(Box::pin(async move { - Ok(bytes::Bytes::from(NIMBUS)) - }))) - as Box> + Send + Sync + Unpin>) - } - "https://era1.ethportal.net/" => { - Ok(Box::new(futures::stream::once(Box::pin(async move { - Ok(bytes::Bytes::from(ETH_PORTAL)) - }))) - as Box> + Send + Sync + Unpin>) - } - "https://era.ithaca.xyz/era1/index.html" => { - Ok(Box::new(futures::stream::once(Box::pin(async move { - Ok(bytes::Bytes::from(ITHACA)) - }))) - as Box> + Send + Sync + Unpin>) - } - "https://mainnet.era1.nimbus.team/checksums.txt" => { - Ok(Box::new(futures::stream::once(Box::pin(async move { - Ok(bytes::Bytes::from(CHECKSUMS)) - }))) - as Box> + Send + Sync + Unpin>) - } - "https://era1.ethportal.net/checksums.txt" => { - Ok(Box::new(futures::stream::once(Box::pin(async move { - Ok(bytes::Bytes::from(CHECKSUMS)) - }))) - as Box> + Send + Sync + Unpin>) - } - "https://era.ithaca.xyz/era1/checksums.txt" => { - Ok(Box::new(futures::stream::once(Box::pin(async move { - Ok(bytes::Bytes::from(CHECKSUMS)) - }))) - as Box> + Send + Sync + Unpin>) - } - "https://era1.ethportal.net/mainnet-00000-5ec1ffb8.era1" => { - Ok(Box::new(futures::stream::once(Box::pin(async move { - Ok(bytes::Bytes::from(MAINNET_0)) - }))) - as Box> + Send + Sync + Unpin>) - } - "https://mainnet.era1.nimbus.team/mainnet-00000-5ec1ffb8.era1" => { - Ok(Box::new(futures::stream::once(Box::pin(async move { - Ok(bytes::Bytes::from(MAINNET_0)) - }))) - as Box> + Send + Sync + Unpin>) - } - "https://era.ithaca.xyz/era1/mainnet-00000-5ec1ffb8.era1" => { - Ok(Box::new(futures::stream::once(Box::pin(async move { - Ok(bytes::Bytes::from(MAINNET_0)) - }))) - as Box> + Send + Sync + Unpin>) - } - "https://era1.ethportal.net/mainnet-00001-a5364e9a.era1" => { - Ok(Box::new(futures::stream::once(Box::pin(async move { - Ok(bytes::Bytes::from(MAINNET_1)) - }))) - as Box> + Send + Sync + Unpin>) - } - "https://mainnet.era1.nimbus.team/mainnet-00001-a5364e9a.era1" => { - Ok(Box::new(futures::stream::once(Box::pin(async move { - Ok(bytes::Bytes::from(MAINNET_1)) - }))) - as Box> + Send + Sync + Unpin>) - } - "https://era.ithaca.xyz/era1/mainnet-00001-a5364e9a.era1" => { - Ok(Box::new(futures::stream::once(Box::pin(async move { - Ok(bytes::Bytes::from(MAINNET_1)) - }))) - as Box> + Send + Sync + Unpin>) - } - v => unimplemented!("Unexpected URL \"{v}\""), + Ok(futures::stream::iter(vec![Ok(match url.to_string().as_str() { + "https://mainnet.era1.nimbus.team/" => Bytes::from_static(NIMBUS), + "https://era1.ethportal.net/" => Bytes::from_static(ETH_PORTAL), + "https://era.ithaca.xyz/era1/index.html" => Bytes::from_static(ITHACA), + "https://mainnet.era1.nimbus.team/checksums.txt" | + "https://era1.ethportal.net/checksums.txt" | + "https://era.ithaca.xyz/era1/checksums.txt" => Bytes::from_static(CHECKSUMS), + "https://era1.ethportal.net/mainnet-00000-5ec1ffb8.era1" | + "https://mainnet.era1.nimbus.team/mainnet-00000-5ec1ffb8.era1" | + "https://era.ithaca.xyz/era1/mainnet-00000-5ec1ffb8.era1" => { + Bytes::from_static(MAINNET_0) } - } + "https://era1.ethportal.net/mainnet-00001-a5364e9a.era1" | + "https://mainnet.era1.nimbus.team/mainnet-00001-a5364e9a.era1" | + "https://era.ithaca.xyz/era1/mainnet-00001-a5364e9a.era1" => { + Bytes::from_static(MAINNET_1) + } + v => unimplemented!("Unexpected URL \"{v}\""), + })])) } } diff --git a/crates/era-downloader/tests/it/stream.rs b/crates/era-downloader/tests/it/stream.rs index 5c7b812b9d7..eb7dc2da727 100644 --- a/crates/era-downloader/tests/it/stream.rs +++ b/crates/era-downloader/tests/it/stream.rs @@ -14,8 +14,8 @@ use test_case::test_case; async fn test_streaming_files_after_fetching_file_list(url: &str) { let base_url = Url::from_str(url).unwrap(); let folder = tempdir().unwrap(); - let folder = folder.path().to_owned().into_boxed_path(); - let client = EraClient::new(StubClient, base_url, folder.clone()); + let folder = folder.path(); + let client = EraClient::new(StubClient, base_url, folder); let mut stream = EraStream::new( client, @@ -32,3 +32,20 @@ async fn test_streaming_files_after_fetching_file_list(url: &str) { assert_eq!(actual_file.as_ref(), expected_file.as_ref()); } + +#[tokio::test] +async fn test_streaming_files_after_fetching_file_list_into_missing_folder_fails() { + let base_url = Url::from_str("https://era.ithaca.xyz/era1/index.html").unwrap(); + let folder = tempdir().unwrap().path().to_owned(); + let client = EraClient::new(StubClient, base_url, folder); + + let mut stream = EraStream::new( + client, + EraStreamConfig::default().with_max_files(2).with_max_concurrent_downloads(1), + ); + + let actual_error = stream.next().await.unwrap().unwrap_err().to_string(); + let expected_error = "No such file or directory (os error 2)".to_owned(); + + assert_eq!(actual_error, expected_error); +} diff --git a/crates/era-utils/Cargo.toml b/crates/era-utils/Cargo.toml index 46764560a67..6d48e338386 100644 --- a/crates/era-utils/Cargo.toml +++ b/crates/era-utils/Cargo.toml @@ -11,15 +11,19 @@ exclude.workspace = true [dependencies] # alloy +alloy-consensus.workspace = true alloy-primitives.workspace = true +alloy-rlp.workspace = true # reth reth-db-api.workspace = true reth-era.workspace = true reth-era-downloader.workspace = true reth-etl.workspace = true +reth-ethereum-primitives.workspace = true reth-fs-util.workspace = true reth-provider.workspace = true +reth-stages-types.workspace = true reth-storage-api.workspace = true reth-primitives-traits.workspace = true @@ -41,6 +45,7 @@ reth-db-common.workspace = true # async tokio.workspace = true tokio.features = ["fs", "io-util", "macros", "rt-multi-thread"] +tokio-util.workspace = true futures.workspace = true bytes.workspace = true diff --git a/crates/era-utils/src/export.rs b/crates/era-utils/src/export.rs new file mode 100644 index 00000000000..2eba464e509 --- /dev/null +++ b/crates/era-utils/src/export.rs @@ -0,0 +1,342 @@ +//! Logic to export from database era1 block history +//! and injecting them into era1 files with `Era1Writer`. + +use alloy_consensus::{BlockBody, BlockHeader, Header}; +use alloy_primitives::{BlockNumber, B256, U256}; +use eyre::{eyre, Result}; +use reth_era::{ + era1_file::Era1Writer, + era1_types::{BlockIndex, Era1Id}, + execution_types::{ + Accumulator, BlockTuple, CompressedBody, CompressedHeader, CompressedReceipts, + TotalDifficulty, MAX_BLOCKS_PER_ERA1, + }, +}; +use reth_fs_util as fs; +use reth_storage_api::{BlockNumReader, BlockReader, HeaderProvider}; +use std::{ + path::PathBuf, + time::{Duration, Instant}, +}; +use tracing::{info, warn}; + +const REPORT_INTERVAL_SECS: u64 = 10; +const ENTRY_HEADER_SIZE: usize = 8; +const VERSION_ENTRY_SIZE: usize = ENTRY_HEADER_SIZE; + +/// Configuration to export block history +/// to era1 files +#[derive(Clone, Debug)] +pub struct ExportConfig { + /// Directory to export era1 files to + pub dir: PathBuf, + /// First block to export + pub first_block_number: BlockNumber, + /// Last block to export + pub last_block_number: BlockNumber, + /// Number of blocks per era1 file + /// It can never be larger than `MAX_BLOCKS_PER_ERA1 = 8192` + /// See also <`https://github.com/eth-clients/e2store-format-specs/blob/main/formats/era1.md`> + pub max_blocks_per_file: u64, + /// Network name + pub network: String, +} + +impl Default for ExportConfig { + fn default() -> Self { + Self { + dir: PathBuf::new(), + first_block_number: 0, + last_block_number: (MAX_BLOCKS_PER_ERA1 - 1) as u64, + max_blocks_per_file: MAX_BLOCKS_PER_ERA1 as u64, + network: "mainnet".to_string(), + } + } +} + +impl ExportConfig { + /// Validates the export configuration parameters + pub fn validate(&self) -> Result<()> { + if self.max_blocks_per_file > MAX_BLOCKS_PER_ERA1 as u64 { + return Err(eyre!( + "Max blocks per file ({}) exceeds ERA1 limit ({})", + self.max_blocks_per_file, + MAX_BLOCKS_PER_ERA1 + )); + } + + if self.max_blocks_per_file == 0 { + return Err(eyre!("Max blocks per file cannot be zero")); + } + + Ok(()) + } +} + +/// Fetches block history data from the provider +/// and prepares it for export to era1 files +/// for a given number of blocks then writes them to disk. +pub fn export(provider: &P, config: &ExportConfig) -> Result> +where + P: BlockReader, + B: Into>, + P::Header: Into

, +{ + config.validate()?; + info!( + "Exporting blockchain history from block {} to {} with this max of blocks per file of {}", + config.first_block_number, config.last_block_number, config.max_blocks_per_file + ); + + // Determine the actual last block to export + // best_block_number() might be outdated, so check actual block availability + let last_block_number = determine_export_range(provider, config)?; + + info!( + target: "era::history::export", + first = config.first_block_number, + last = last_block_number, + max_blocks_per_file = config.max_blocks_per_file, + "Preparing era1 export data" + ); + + if !config.dir.exists() { + fs::create_dir_all(&config.dir) + .map_err(|e| eyre!("Failed to create output directory: {}", e))?; + } + + let start_time = Instant::now(); + let mut last_report_time = Instant::now(); + let report_interval = Duration::from_secs(REPORT_INTERVAL_SECS); + + let mut created_files = Vec::new(); + let mut total_blocks_processed = 0; + + let mut total_difficulty = if config.first_block_number > 0 { + let prev_block_number = config.first_block_number - 1; + provider + .header_td_by_number(prev_block_number)? + .ok_or_else(|| eyre!("Total difficulty not found for block {prev_block_number}"))? + } else { + U256::ZERO + }; + + // Process blocks in chunks according to `max_blocks_per_file` + for start_block in + (config.first_block_number..=last_block_number).step_by(config.max_blocks_per_file as usize) + { + let end_block = (start_block + config.max_blocks_per_file - 1).min(last_block_number); + let block_count = (end_block - start_block + 1) as usize; + + info!( + target: "era::history::export", + "Processing blocks {start_block} to {end_block} ({block_count} blocks)" + ); + + let headers = provider.headers_range(start_block..=end_block)?; + + let era1_id = Era1Id::new(&config.network, start_block, block_count as u32); + let file_path = config.dir.join(era1_id.to_file_name()); + let file = std::fs::File::create(&file_path)?; + let mut writer = Era1Writer::new(file); + writer.write_version()?; + + let mut offsets = Vec::with_capacity(block_count); + let mut position = VERSION_ENTRY_SIZE as i64; + let mut blocks_written = 0; + let mut final_header_data = Vec::new(); + + for (i, header) in headers.into_iter().enumerate() { + let expected_block_number = start_block + i as u64; + + let (compressed_header, compressed_body, compressed_receipts) = compress_block_data( + provider, + header, + expected_block_number, + &mut total_difficulty, + )?; + + // Save last block's header data for accumulator + if expected_block_number == end_block { + final_header_data = compressed_header.data.clone(); + } + + let difficulty = TotalDifficulty::new(total_difficulty); + + let header_size = compressed_header.data.len() + ENTRY_HEADER_SIZE; + let body_size = compressed_body.data.len() + ENTRY_HEADER_SIZE; + let receipts_size = compressed_receipts.data.len() + ENTRY_HEADER_SIZE; + let difficulty_size = 32 + ENTRY_HEADER_SIZE; // U256 is 32 + 8 bytes header overhead + let total_size = header_size + body_size + receipts_size + difficulty_size; + + let block_tuple = BlockTuple::new( + compressed_header, + compressed_body, + compressed_receipts, + difficulty, + ); + + offsets.push(position); + position += total_size as i64; + + writer.write_block(&block_tuple)?; + blocks_written += 1; + total_blocks_processed += 1; + + if last_report_time.elapsed() >= report_interval { + info!( + target: "era::history::export", + "Export progress: block {expected_block_number}/{last_block_number} ({:.2}%) - elapsed: {:?}", + (total_blocks_processed as f64) / + ((last_block_number - config.first_block_number + 1) as f64) * + 100.0, + start_time.elapsed() + ); + last_report_time = Instant::now(); + } + } + if blocks_written > 0 { + let accumulator_hash = + B256::from_slice(&final_header_data[0..32.min(final_header_data.len())]); + let accumulator = Accumulator::new(accumulator_hash); + let block_index = BlockIndex::new(start_block, offsets); + + writer.write_accumulator(&accumulator)?; + writer.write_block_index(&block_index)?; + writer.flush()?; + created_files.push(file_path.clone()); + + info!( + target: "era::history::export", + "Wrote ERA1 file: {file_path:?} with {blocks_written} blocks" + ); + } + } + + info!( + target: "era::history::export", + "Successfully wrote {} ERA1 files in {:?}", + created_files.len(), + start_time.elapsed() + ); + + Ok(created_files) +} + +// Determines the actual last block number that can be exported, +// Uses `headers_range` fallback when `best_block_number` is stale due to static file storage. +fn determine_export_range

(provider: &P, config: &ExportConfig) -> Result +where + P: HeaderProvider + BlockNumReader, +{ + let best_block_number = provider.best_block_number()?; + + let last_block_number = if best_block_number < config.last_block_number { + warn!( + "Last block {} is beyond current head {}, setting last = head", + config.last_block_number, best_block_number + ); + + // Check if more blocks are actually available beyond what `best_block_number()` reports + if let Ok(headers) = provider.headers_range(best_block_number..=config.last_block_number) { + if let Some(last_header) = headers.last() { + let highest_block = last_header.number(); + info!("Found highest available block {} via headers_range", highest_block); + highest_block + } else { + warn!("No headers found in range, using best_block_number {}", best_block_number); + best_block_number + } + } else { + warn!("headers_range failed, using best_block_number {}", best_block_number); + best_block_number + } + } else { + config.last_block_number + }; + + Ok(last_block_number) +} + +// Compresses block data and returns compressed components with metadata +fn compress_block_data( + provider: &P, + header: P::Header, + expected_block_number: BlockNumber, + total_difficulty: &mut U256, +) -> Result<(CompressedHeader, CompressedBody, CompressedReceipts)> +where + P: BlockReader, + B: Into>, + P::Header: Into

, +{ + let actual_block_number = header.number(); + + if expected_block_number != actual_block_number { + return Err(eyre!("Expected block {expected_block_number}, got {actual_block_number}")); + } + + let body = provider + .block_by_number(actual_block_number)? + .ok_or_else(|| eyre!("Block body not found for block {}", actual_block_number))?; + + let receipts = provider + .receipts_by_block(actual_block_number.into())? + .ok_or_else(|| eyre!("Receipts not found for block {}", actual_block_number))?; + + *total_difficulty += header.difficulty(); + + let compressed_header = CompressedHeader::from_header(&header.into())?; + let compressed_body = CompressedBody::from_body(&body.into())?; + let compressed_receipts = CompressedReceipts::from_encodable_list(&receipts) + .map_err(|e| eyre!("Failed to compress receipts: {}", e))?; + + Ok((compressed_header, compressed_body, compressed_receipts)) +} + +#[cfg(test)] +mod tests { + use crate::ExportConfig; + use reth_era::execution_types::MAX_BLOCKS_PER_ERA1; + use tempfile::tempdir; + + #[test] + fn test_export_config_validation() { + let temp_dir = tempdir().unwrap(); + + // Default config should pass + let default_config = ExportConfig::default(); + assert!(default_config.validate().is_ok(), "Default config should be valid"); + + // Exactly at the limit should pass + let limit_config = + ExportConfig { max_blocks_per_file: MAX_BLOCKS_PER_ERA1 as u64, ..Default::default() }; + assert!(limit_config.validate().is_ok(), "Config at ERA1 limit should pass validation"); + + // Valid config should pass + let valid_config = ExportConfig { + dir: temp_dir.path().to_path_buf(), + max_blocks_per_file: 1000, + ..Default::default() + }; + assert!(valid_config.validate().is_ok(), "Valid config should pass validation"); + + // Zero blocks per file should fail + let zero_blocks_config = ExportConfig { + max_blocks_per_file: 0, // Invalid + ..Default::default() + }; + let result = zero_blocks_config.validate(); + assert!(result.is_err(), "Zero blocks per file should fail validation"); + assert!(result.unwrap_err().to_string().contains("cannot be zero")); + + // Exceeding era1 limit should fail + let oversized_config = ExportConfig { + max_blocks_per_file: MAX_BLOCKS_PER_ERA1 as u64 + 1, // Invalid + ..Default::default() + }; + let result = oversized_config.validate(); + assert!(result.is_err(), "Oversized blocks per file should fail validation"); + assert!(result.unwrap_err().to_string().contains("exceeds ERA1 limit")); + } +} diff --git a/crates/era-utils/src/history.rs b/crates/era-utils/src/history.rs index 029b310b820..75eaa4591cf 100644 --- a/crates/era-utils/src/history.rs +++ b/crates/era-utils/src/history.rs @@ -17,10 +17,16 @@ use reth_etl::Collector; use reth_fs_util as fs; use reth_primitives_traits::{Block, FullBlockBody, FullBlockHeader, NodePrimitives}; use reth_provider::{ - providers::StaticFileProviderRWRefMut, BlockWriter, ProviderError, StaticFileProviderFactory, - StaticFileSegment, StaticFileWriter, + providers::StaticFileProviderRWRefMut, writer::UnifiedStorageWriter, BlockWriter, + ProviderError, StaticFileProviderFactory, StaticFileSegment, StaticFileWriter, +}; +use reth_stages_types::{ + CheckpointBlockRange, EntitiesCheckpoint, HeadersCheckpoint, StageCheckpoint, StageId, +}; +use reth_storage_api::{ + errors::ProviderResult, DBProvider, DatabaseProviderFactory, HeaderProvider, + NodePrimitivesProvider, StageCheckpointWriter, StorageLocation, }; -use reth_storage_api::{DBProvider, HeaderProvider, NodePrimitivesProvider, StorageLocation}; use std::{ collections::Bound, error::Error, @@ -35,22 +41,26 @@ use tracing::info; /// Imports blocks from `downloader` using `provider`. /// /// Returns current block height. -pub fn import( +pub fn import( mut downloader: Downloader, - provider: &P, + provider_factory: &PF, hash_collector: &mut Collector, ) -> eyre::Result where B: Block
, BH: FullBlockHeader + Value, BB: FullBlockBody< - Transaction = <

::Primitives as NodePrimitives>::SignedTx, + Transaction = <<::ProviderRW as NodePrimitivesProvider>::Primitives as NodePrimitives>::SignedTx, OmmerHeader = BH, >, Downloader: Stream> + Send + 'static + Unpin, Era: EraMeta + Send + 'static, - P: DBProvider + StaticFileProviderFactory + BlockWriter, -

::Primitives: NodePrimitives, + PF: DatabaseProviderFactory< + ProviderRW: BlockWriter + + DBProvider + + StaticFileProviderFactory> + + StageCheckpointWriter, + > + StaticFileProviderFactory::ProviderRW as NodePrimitivesProvider>::Primitives>, { let (tx, rx) = mpsc::channel(); @@ -62,31 +72,74 @@ where tx.send(None) }); - let static_file_provider = provider.static_file_provider(); + let static_file_provider = provider_factory.static_file_provider(); // Consistency check of expected headers in static files vs DB is done on provider::sync_gap // when poll_execute_ready is polled. - let mut last_header_number = static_file_provider + let mut height = static_file_provider .get_highest_static_file_block(StaticFileSegment::Headers) .unwrap_or_default(); // Find the latest total difficulty let mut td = static_file_provider - .header_td_by_number(last_header_number)? - .ok_or(ProviderError::TotalDifficultyNotFound(last_header_number))?; - - // Although headers were downloaded in reverse order, the collector iterates it in ascending - // order - let mut writer = static_file_provider.latest_writer(StaticFileSegment::Headers)?; + .header_td_by_number(height)? + .ok_or(ProviderError::TotalDifficultyNotFound(height))?; while let Some(meta) = rx.recv()? { - last_header_number = - process(&meta?, &mut writer, provider, hash_collector, &mut td, last_header_number..)?; + let from = height; + let provider = provider_factory.database_provider_rw()?; + + height = process( + &meta?, + &mut static_file_provider.latest_writer(StaticFileSegment::Headers)?, + &provider, + hash_collector, + &mut td, + height.., + )?; + + save_stage_checkpoints(&provider, from, height, height, height)?; + + UnifiedStorageWriter::commit(provider)?; } - build_index(provider, hash_collector)?; + let provider = provider_factory.database_provider_rw()?; - Ok(last_header_number) + build_index(&provider, hash_collector)?; + + UnifiedStorageWriter::commit(provider)?; + + Ok(height) +} + +/// Saves progress of ERA import into stages sync. +/// +/// Since the ERA import does the same work as `HeaderStage` and `BodyStage`, it needs to inform +/// these stages that this work has already been done. Otherwise, there might be some conflict with +/// database integrity. +pub fn save_stage_checkpoints

( + provider: &P, + from: BlockNumber, + to: BlockNumber, + processed: u64, + total: u64, +) -> ProviderResult<()> +where + P: StageCheckpointWriter, +{ + provider.save_stage_checkpoint( + StageId::Headers, + StageCheckpoint::new(to).with_headers_stage_checkpoint(HeadersCheckpoint { + block_range: CheckpointBlockRange { from, to }, + progress: EntitiesCheckpoint { processed, total }, + }), + )?; + provider.save_stage_checkpoint( + StageId::Bodies, + StageCheckpoint::new(to) + .with_entities_stage_checkpoint(EntitiesCheckpoint { processed, total }), + )?; + Ok(()) } /// Extracts block headers and bodies from `meta` and appends them using `writer` and `provider`. @@ -116,7 +169,7 @@ where OmmerHeader = BH, >, Era: EraMeta + ?Sized, - P: DBProvider + StaticFileProviderFactory + BlockWriter, + P: DBProvider + NodePrimitivesProvider + BlockWriter,

::Primitives: NodePrimitives, { let reader = open(meta)?; @@ -226,7 +279,7 @@ where Transaction = <

::Primitives as NodePrimitives>::SignedTx, OmmerHeader = BH, >, - P: DBProvider + StaticFileProviderFactory + BlockWriter, + P: DBProvider + NodePrimitivesProvider + BlockWriter,

::Primitives: NodePrimitives, { let mut last_header_number = match block_numbers.start_bound() { @@ -287,7 +340,7 @@ where Transaction = <

::Primitives as NodePrimitives>::SignedTx, OmmerHeader = BH, >, - P: DBProvider + StaticFileProviderFactory + BlockWriter, + P: DBProvider + NodePrimitivesProvider + BlockWriter,

::Primitives: NodePrimitives, { let total_headers = hash_collector.len(); diff --git a/crates/era-utils/src/lib.rs b/crates/era-utils/src/lib.rs index ce3e70246e7..966709d2f21 100644 --- a/crates/era-utils/src/lib.rs +++ b/crates/era-utils/src/lib.rs @@ -1,8 +1,18 @@ //! Utilities to store history from downloaded ERA files with storage-api +//! and export it to recreate era1 files. //! //! The import is downloaded using [`reth_era_downloader`] and parsed using [`reth_era`]. mod history; +/// Export block history data from the database to recreate era1 files. +mod export; + +/// Export history from storage-api between 2 blocks +/// with parameters defined in [`ExportConfig`]. +pub use export::{export, ExportConfig}; + /// Imports history from ERA files. -pub use history::{build_index, decode, import, open, process, process_iter, ProcessIter}; +pub use history::{ + build_index, decode, import, open, process, process_iter, save_stage_checkpoints, ProcessIter, +}; diff --git a/crates/era-utils/tests/it/genesis.rs b/crates/era-utils/tests/it/genesis.rs new file mode 100644 index 00000000000..dacef15eeac --- /dev/null +++ b/crates/era-utils/tests/it/genesis.rs @@ -0,0 +1,30 @@ +use reth_db_common::init::init_genesis; +use reth_era_utils::{export, ExportConfig}; +use reth_fs_util as fs; +use reth_provider::{test_utils::create_test_provider_factory, BlockReader}; +use tempfile::tempdir; + +#[test] +fn test_export_with_genesis_only() { + let provider_factory = create_test_provider_factory(); + init_genesis(&provider_factory).unwrap(); + let provider = provider_factory.provider().unwrap(); + assert!(provider.block_by_number(0).unwrap().is_some(), "Genesis block should exist"); + assert!(provider.block_by_number(1).unwrap().is_none(), "Block 1 should not exist"); + + let export_dir = tempdir().unwrap(); + let export_config = ExportConfig { dir: export_dir.path().to_owned(), ..Default::default() }; + + let exported_files = + export(&provider_factory.provider_rw().unwrap().0, &export_config).unwrap(); + + assert_eq!(exported_files.len(), 1, "Should export exactly one file"); + + let file_path = &exported_files[0]; + assert!(file_path.exists(), "Exported file should exist on disk"); + let file_name = file_path.file_name().unwrap().to_str().unwrap(); + assert!(file_name.starts_with("mainnet-0-"), "File should have correct prefix"); + assert!(file_name.ends_with(".era1"), "File should have correct extension"); + let metadata = fs::metadata(file_path).unwrap(); + assert!(metadata.len() > 0, "Exported file should not be empty"); +} diff --git a/crates/era-utils/tests/it/history.rs b/crates/era-utils/tests/it/history.rs index d3d447615b9..4811e729539 100644 --- a/crates/era-utils/tests/it/history.rs +++ b/crates/era-utils/tests/it/history.rs @@ -1,21 +1,27 @@ -use alloy_primitives::bytes::Bytes; -use futures_util::{Stream, TryStreamExt}; -use reqwest::{Client, IntoUrl, Url}; +use crate::{ClientWithFakeIndex, ITHACA_ERA_INDEX_URL}; +use reqwest::{Client, Url}; use reth_db_common::init::init_genesis; -use reth_era_downloader::{EraClient, EraStream, EraStreamConfig, HttpClient}; +use reth_era_downloader::{EraClient, EraStream, EraStreamConfig}; +use reth_era_utils::{export, import, ExportConfig}; use reth_etl::Collector; -use reth_provider::test_utils::create_test_provider_factory; -use std::{future::Future, str::FromStr}; +use reth_fs_util as fs; +use reth_provider::{test_utils::create_test_provider_factory, BlockNumReader, BlockReader}; +use std::str::FromStr; use tempfile::tempdir; +const EXPORT_FIRST_BLOCK: u64 = 0; +const EXPORT_BLOCKS_PER_FILE: u64 = 250; +const EXPORT_TOTAL_BLOCKS: u64 = 900; +const EXPORT_LAST_BLOCK: u64 = EXPORT_FIRST_BLOCK + EXPORT_TOTAL_BLOCKS - 1; + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn test_history_imports_from_fresh_state_successfully() { // URL where the ERA1 files are hosted - let url = Url::from_str("https://era.ithaca.xyz/era1/index.html").unwrap(); + let url = Url::from_str(ITHACA_ERA_INDEX_URL).unwrap(); // Directory where the ERA1 files will be downloaded to let folder = tempdir().unwrap(); - let folder = folder.path().to_owned().into_boxed_path(); + let folder = folder.path(); let client = EraClient::new(ClientWithFakeIndex(Client::new()), url, folder); @@ -31,43 +37,102 @@ async fn test_history_imports_from_fresh_state_successfully() { let mut hash_collector = Collector::new(4096, folder); let expected_block_number = 8191; - let actual_block_number = - reth_era_utils::import(stream, &pf.provider_rw().unwrap().0, &mut hash_collector).unwrap(); + let actual_block_number = import(stream, &pf, &mut hash_collector).unwrap(); assert_eq!(actual_block_number, expected_block_number); } -/// An HTTP client pre-programmed with canned answer to index. -/// -/// Passes any other calls to a real HTTP client! -#[derive(Debug, Clone)] -struct ClientWithFakeIndex(Client); - -impl HttpClient for ClientWithFakeIndex { - fn get( - &self, - url: U, - ) -> impl Future< - Output = eyre::Result> + Send + Sync + Unpin>, - > + Send - + Sync { - let url = url.into_url().unwrap(); - - async move { - match url.to_string().as_str() { - "https://era.ithaca.xyz/era1/index.html" => { - Ok(Box::new(futures::stream::once(Box::pin(async move { - Ok(bytes::Bytes::from_static(b"mainnet-00000-5ec1ffb8.era1")) - }))) - as Box> + Send + Sync + Unpin>) - } - _ => { - let response = Client::get(&self.0, url).send().await?; - - Ok(Box::new(response.bytes_stream().map_err(|e| eyre::Error::new(e))) - as Box> + Send + Sync + Unpin>) - } - } - } +/// Test that verifies the complete roundtrip from importing to exporting era1 files. +/// It validates : +/// - Downloads the first era1 file from ithaca's url and import the file data, into the database +/// - Exports blocks from database back to era1 format +/// - Ensure exported files have correct structure and naming +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_roundtrip_export_after_import() { + // URL where the ERA1 files are hosted + let url = Url::from_str(ITHACA_ERA_INDEX_URL).unwrap(); + let download_folder = tempdir().unwrap(); + let download_folder = download_folder.path().to_owned().into_boxed_path(); + + let client = EraClient::new(ClientWithFakeIndex(Client::new()), url, download_folder); + let config = EraStreamConfig::default().with_max_files(1).with_max_concurrent_downloads(1); + + let stream = EraStream::new(client, config); + let pf = create_test_provider_factory(); + init_genesis(&pf).unwrap(); + + let folder = tempdir().unwrap(); + let folder = Some(folder.path().to_owned()); + let mut hash_collector = Collector::new(4096, folder); + + // Import blocks from one era1 file into database + let last_imported_block_height = import(stream, &pf, &mut hash_collector).unwrap(); + + assert_eq!(last_imported_block_height, 8191); + let provider_ref = pf.provider_rw().unwrap().0; + let best_block = provider_ref.best_block_number().unwrap(); + + assert!(best_block <= 8191, "Best block {best_block} should not exceed imported count"); + + // Verify some blocks exist in the database + for &block_num in &[0, 1, 2, 10, 50, 100, 5000, 8190, 8191] { + let block_exists = provider_ref.block_by_number(block_num).unwrap().is_some(); + assert!(block_exists, "Block {block_num} should exist after importing 8191 blocks"); + } + + // The import was verified let's start the export! + + // 900 blocks will be exported from 0 to 899 + // It should be split into 3 files of 250 blocks each, and the last file with 150 blocks + let export_folder = tempdir().unwrap(); + let export_config = ExportConfig { + dir: export_folder.path().to_path_buf(), + first_block_number: EXPORT_FIRST_BLOCK, // 0 + last_block_number: EXPORT_LAST_BLOCK, // 899 + max_blocks_per_file: EXPORT_BLOCKS_PER_FILE, // 250 blocks per file + network: "mainnet".to_string(), + }; + + // Export blocks from database to era1 files + let exported_files = export(&provider_ref, &export_config).expect("Export should succeed"); + + // Calculate how many files we expect based on the configuration + // We expect 4 files for 900 blocks: first 3 files with 250 blocks each, + // then 150 for the last file + let expected_files_number = EXPORT_TOTAL_BLOCKS.div_ceil(EXPORT_BLOCKS_PER_FILE); + + assert_eq!( + exported_files.len(), + expected_files_number as usize, + "Should create {expected_files_number} files for {EXPORT_TOTAL_BLOCKS} blocks with {EXPORT_BLOCKS_PER_FILE} blocks per file" + ); + + for (i, file_path) in exported_files.iter().enumerate() { + // Verify file exists and has content + assert!(file_path.exists(), "File {} should exist", i + 1); + let file_size = fs::metadata(file_path).unwrap().len(); + assert!(file_size > 0, "File {} should not be empty", i + 1); + + // Calculate expected file parameters + let file_start_block = EXPORT_FIRST_BLOCK + (i as u64 * EXPORT_BLOCKS_PER_FILE); + let remaining_blocks = EXPORT_TOTAL_BLOCKS - (i as u64 * EXPORT_BLOCKS_PER_FILE); + let blocks_numbers_per_file = std::cmp::min(EXPORT_BLOCKS_PER_FILE, remaining_blocks); + + // Verify chunking : first 3 files have 250 blocks, last file has 150 blocks - 900 total + let expected_blocks = if i < 3 { 250 } else { 150 }; + assert_eq!( + blocks_numbers_per_file, + expected_blocks, + "File {} should contain exactly {} blocks, got {}", + i + 1, + expected_blocks, + blocks_numbers_per_file + ); + + // Verify exact ERA1 naming convention: `mainnet-{start_block}-{block_count}.era1` + let file_name = file_path.file_name().unwrap().to_str().unwrap(); + let expected_filename = + format!("mainnet-{file_start_block}-{blocks_numbers_per_file}.era1"); + assert_eq!(file_name, expected_filename, "File {} should have correct name", i + 1); } } diff --git a/crates/era-utils/tests/it/main.rs b/crates/era-utils/tests/it/main.rs index 9a035cdf7da..94805c5b356 100644 --- a/crates/era-utils/tests/it/main.rs +++ b/crates/era-utils/tests/it/main.rs @@ -1,5 +1,49 @@ //! Root module for test modules, so that the tests are built into a single binary. +use alloy_primitives::bytes::Bytes; +use futures_util::{stream, Stream, TryStreamExt}; +use reqwest::{Client, IntoUrl}; +use reth_era_downloader::HttpClient; +use tokio_util::either::Either; + +// Url where the ERA1 files are hosted +const ITHACA_ERA_INDEX_URL: &str = "https://era.ithaca.xyz/era1/index.html"; + +// The response containing one file that the fake client will return when the index Url is requested +const GENESIS_ITHACA_INDEX_RESPONSE: &[u8] = b"mainnet-00000-5ec1ffb8.era1"; + +mod genesis; mod history; const fn main() {} + +/// An HTTP client that fakes the file list to always show one known file +/// +/// but passes all other calls including actual downloads to a real HTTP client +/// +/// In that way, only one file is used but downloads are still performed from the original source. +#[derive(Debug, Clone)] +struct ClientWithFakeIndex(Client); + +impl HttpClient for ClientWithFakeIndex { + async fn get( + &self, + url: U, + ) -> eyre::Result> + Send + Sync + Unpin> { + let url = url.into_url()?; + + match url.to_string().as_str() { + ITHACA_ERA_INDEX_URL => { + // Create a static stream without boxing + let stream = + stream::iter(vec![Ok(Bytes::from_static(GENESIS_ITHACA_INDEX_RESPONSE))]); + Ok(Either::Left(stream)) + } + _ => { + let response = Client::get(&self.0, url).send().await?; + let stream = response.bytes_stream().map_err(|e| eyre::Error::new(e)); + Ok(Either::Right(stream)) + } + } + } +} diff --git a/crates/era/Cargo.toml b/crates/era/Cargo.toml index d8259ec813c..09d5b8b9180 100644 --- a/crates/era/Cargo.toml +++ b/crates/era/Cargo.toml @@ -1,5 +1,6 @@ [package] name = "reth-era" +description = "e2store and era1 files core logic" version.workspace = true edition.workspace = true rust-version.workspace = true diff --git a/crates/era/src/execution_types.rs b/crates/era/src/execution_types.rs index 4591abb281a..27030b112a1 100644 --- a/crates/era/src/execution_types.rs +++ b/crates/era/src/execution_types.rs @@ -333,6 +333,18 @@ impl CompressedReceipts { let compressed = encoder.encode(data)?; Ok(Self::new(compressed)) } + /// Encode a list of receipts to RLP format + pub fn encode_receipts_to_rlp(receipts: &[T]) -> Result, E2sError> { + let mut rlp_data = Vec::new(); + alloy_rlp::encode_list(receipts, &mut rlp_data); + Ok(rlp_data) + } + + /// Encode and compress a list of receipts + pub fn from_encodable_list(receipts: &[T]) -> Result { + let rlp_data = Self::encode_receipts_to_rlp(receipts)?; + Self::from_rlp(&rlp_data) + } } impl DecodeCompressed for CompressedReceipts { diff --git a/crates/era/tests/it/main.rs b/crates/era/tests/it/main.rs index e27e25e1658..fa939819189 100644 --- a/crates/era/tests/it/main.rs +++ b/crates/era/tests/it/main.rs @@ -115,7 +115,7 @@ impl Era1TestDownloader { let final_url = Url::from_str(url).map_err(|e| eyre!("Failed to parse URL: {}", e))?; - let folder = self.temp_dir.path().to_owned().into_boxed_path(); + let folder = self.temp_dir.path(); // set up the client let client = EraClient::new(Client::new(), final_url, folder); diff --git a/crates/ethereum/cli/src/interface.rs b/crates/ethereum/cli/src/interface.rs index 46fb8720238..91164489bdb 100644 --- a/crates/ethereum/cli/src/interface.rs +++ b/crates/ethereum/cli/src/interface.rs @@ -12,7 +12,6 @@ use reth_cli_commands::{ }; use reth_cli_runner::CliRunner; use reth_db::DatabaseEnv; -use reth_network::EthNetworkPrimitives; use reth_node_builder::{NodeBuilder, WithLaunchContext}; use reth_node_core::{ args::LogArgs, @@ -162,7 +161,7 @@ impl, Ext: clap::Args + fmt::Debug> Cl runner.run_blocking_until_ctrl_c(command.execute::()) } Commands::Import(command) => { - runner.run_blocking_until_ctrl_c(command.execute::(components)) + runner.run_blocking_until_ctrl_c(command.execute::(components)) } Commands::ImportEra(command) => { runner.run_blocking_until_ctrl_c(command.execute::()) @@ -174,12 +173,9 @@ impl, Ext: clap::Args + fmt::Debug> Cl Commands::Download(command) => { runner.run_blocking_until_ctrl_c(command.execute::()) } - Commands::Stage(command) => runner.run_command_until_exit(|ctx| { - command.execute::(ctx, components) - }), - Commands::P2P(command) => { - runner.run_until_ctrl_c(command.execute::()) - } + Commands::Stage(command) => runner + .run_command_until_exit(|ctx| command.execute::(ctx, components)), + Commands::P2P(command) => runner.run_until_ctrl_c(command.execute::()), #[cfg(feature = "dev")] Commands::TestVectors(command) => runner.run_until_ctrl_c(command.execute()), Commands::Config(command) => runner.run_until_ctrl_c(command.execute()), diff --git a/crates/ethereum/consensus/src/lib.rs b/crates/ethereum/consensus/src/lib.rs index 70eb799311e..82a37b2386e 100644 --- a/crates/ethereum/consensus/src/lib.rs +++ b/crates/ethereum/consensus/src/lib.rs @@ -98,7 +98,7 @@ impl EthBeaconConsensus impl FullConsensus for EthBeaconConsensus where - ChainSpec: Send + Sync + EthChainSpec + EthereumHardforks + Debug, + ChainSpec: Send + Sync + EthChainSpec

+ EthereumHardforks + Debug, N: NodePrimitives, { fn validate_block_post_execution( @@ -110,10 +110,10 @@ where } } -impl Consensus - for EthBeaconConsensus +impl Consensus for EthBeaconConsensus where B: Block, + ChainSpec: EthChainSpec
+ EthereumHardforks + Debug + Send + Sync, { type Error = ConsensusError; @@ -130,10 +130,10 @@ where } } -impl HeaderValidator - for EthBeaconConsensus +impl HeaderValidator for EthBeaconConsensus where H: BlockHeader, + ChainSpec: EthChainSpec
+ EthereumHardforks + Debug + Send + Sync, { fn validate_header(&self, header: &SealedHeader) -> Result<(), ConsensusError> { let header = header.header(); diff --git a/crates/ethereum/engine-primitives/src/lib.rs b/crates/ethereum/engine-primitives/src/lib.rs index ceb3a3b0a84..dcd73232db6 100644 --- a/crates/ethereum/engine-primitives/src/lib.rs +++ b/crates/ethereum/engine-primitives/src/lib.rs @@ -12,7 +12,7 @@ extern crate alloc; mod payload; -pub use payload::{BlobSidecars, EthBuiltPayload, EthPayloadBuilderAttributes}; +pub use payload::{payload_id, BlobSidecars, EthBuiltPayload, EthPayloadBuilderAttributes}; mod error; pub use error::*; diff --git a/crates/ethereum/engine-primitives/src/payload.rs b/crates/ethereum/engine-primitives/src/payload.rs index ec8152dd805..444747716ee 100644 --- a/crates/ethereum/engine-primitives/src/payload.rs +++ b/crates/ethereum/engine-primitives/src/payload.rs @@ -404,7 +404,7 @@ impl PayloadBuilderAttributes for EthPayloadBuilderAttributes { /// Generates the payload id for the configured payload from the [`PayloadAttributes`]. /// /// Returns an 8-byte identifier by hashing the payload components with sha256 hash. -pub(crate) fn payload_id(parent: &B256, attributes: &PayloadAttributes) -> PayloadId { +pub fn payload_id(parent: &B256, attributes: &PayloadAttributes) -> PayloadId { use sha2::Digest; let mut hasher = sha2::Sha256::new(); hasher.update(parent.as_slice()); diff --git a/crates/ethereum/evm/src/build.rs b/crates/ethereum/evm/src/build.rs index 1762e951cd1..5e80ca9ba46 100644 --- a/crates/ethereum/evm/src/build.rs +++ b/crates/ethereum/evm/src/build.rs @@ -52,7 +52,7 @@ where .. } = input; - let timestamp = evm_env.block_env.timestamp; + let timestamp = evm_env.block_env.timestamp.saturating_to(); let transactions_root = proofs::calculate_transaction_root(&transactions); let receipts_root = Receipt::calculate_receipt_root_no_memo(receipts); @@ -101,7 +101,7 @@ where mix_hash: evm_env.block_env.prevrandao.unwrap_or_default(), nonce: BEACON_NONCE.into(), base_fee_per_gas: Some(evm_env.block_env.basefee), - number: evm_env.block_env.number, + number: evm_env.block_env.number.saturating_to(), gas_limit: evm_env.block_env.gas_limit, difficulty: evm_env.block_env.difficulty, gas_used: *gas_used, diff --git a/crates/ethereum/evm/src/config.rs b/crates/ethereum/evm/src/config.rs index e94ca17fb37..676b790edb7 100644 --- a/crates/ethereum/evm/src/config.rs +++ b/crates/ethereum/evm/src/config.rs @@ -1,19 +1,25 @@ use alloy_consensus::Header; -use reth_chainspec::{ChainSpec, EthereumHardforks}; -use reth_ethereum_forks::EthereumHardfork; +use reth_chainspec::{EthChainSpec, EthereumHardforks}; +use reth_ethereum_forks::{EthereumHardfork, Hardforks}; use revm::primitives::hardfork::SpecId; /// Map the latest active hardfork at the given header to a revm [`SpecId`]. -pub fn revm_spec(chain_spec: &ChainSpec, header: &Header) -> SpecId { +pub fn revm_spec(chain_spec: &C, header: &Header) -> SpecId +where + C: EthereumHardforks + EthChainSpec + Hardforks, +{ revm_spec_by_timestamp_and_block_number(chain_spec, header.timestamp, header.number) } /// Map the latest active hardfork at the given timestamp or block number to a revm [`SpecId`]. -pub fn revm_spec_by_timestamp_and_block_number( - chain_spec: &ChainSpec, +pub fn revm_spec_by_timestamp_and_block_number( + chain_spec: &C, timestamp: u64, block_number: u64, -) -> SpecId { +) -> SpecId +where + C: EthereumHardforks + EthChainSpec + Hardforks, +{ if chain_spec .fork(EthereumHardfork::Osaka) .active_at_timestamp_or_number(timestamp, block_number) @@ -83,8 +89,8 @@ pub fn revm_spec_by_timestamp_and_block_number( SpecId::FRONTIER } else { panic!( - "invalid hardfork chainspec: expected at least one hardfork, got {:?}", - chain_spec.hardforks + "invalid hardfork chainspec: expected at least one hardfork, got {}", + chain_spec.display_hardforks() ) } } @@ -199,55 +205,55 @@ mod tests { #[test] fn test_eth_spec() { assert_eq!( - revm_spec(&MAINNET, &Header { timestamp: 1710338135, ..Default::default() }), + revm_spec(&*MAINNET, &Header { timestamp: 1710338135, ..Default::default() }), SpecId::CANCUN ); assert_eq!( - revm_spec(&MAINNET, &Header { timestamp: 1681338455, ..Default::default() }), + revm_spec(&*MAINNET, &Header { timestamp: 1681338455, ..Default::default() }), SpecId::SHANGHAI ); assert_eq!( revm_spec( - &MAINNET, + &*MAINNET, &Header { difficulty: U256::from(10_u128), number: 15537394, ..Default::default() } ), SpecId::MERGE ); assert_eq!( - revm_spec(&MAINNET, &Header { number: 15537394 - 10, ..Default::default() }), + revm_spec(&*MAINNET, &Header { number: 15537394 - 10, ..Default::default() }), SpecId::LONDON ); assert_eq!( - revm_spec(&MAINNET, &Header { number: 12244000 + 10, ..Default::default() }), + revm_spec(&*MAINNET, &Header { number: 12244000 + 10, ..Default::default() }), SpecId::BERLIN ); assert_eq!( - revm_spec(&MAINNET, &Header { number: 12244000 - 10, ..Default::default() }), + revm_spec(&*MAINNET, &Header { number: 12244000 - 10, ..Default::default() }), SpecId::ISTANBUL ); assert_eq!( - revm_spec(&MAINNET, &Header { number: 7280000 + 10, ..Default::default() }), + revm_spec(&*MAINNET, &Header { number: 7280000 + 10, ..Default::default() }), SpecId::PETERSBURG ); assert_eq!( - revm_spec(&MAINNET, &Header { number: 7280000 - 10, ..Default::default() }), + revm_spec(&*MAINNET, &Header { number: 7280000 - 10, ..Default::default() }), SpecId::BYZANTIUM ); assert_eq!( - revm_spec(&MAINNET, &Header { number: 2675000 + 10, ..Default::default() }), + revm_spec(&*MAINNET, &Header { number: 2675000 + 10, ..Default::default() }), SpecId::SPURIOUS_DRAGON ); assert_eq!( - revm_spec(&MAINNET, &Header { number: 2675000 - 10, ..Default::default() }), + revm_spec(&*MAINNET, &Header { number: 2675000 - 10, ..Default::default() }), SpecId::TANGERINE ); assert_eq!( - revm_spec(&MAINNET, &Header { number: 1150000 + 10, ..Default::default() }), + revm_spec(&*MAINNET, &Header { number: 1150000 + 10, ..Default::default() }), SpecId::HOMESTEAD ); assert_eq!( - revm_spec(&MAINNET, &Header { number: 1150000 - 10, ..Default::default() }), + revm_spec(&*MAINNET, &Header { number: 1150000 - 10, ..Default::default() }), SpecId::FRONTIER ); } diff --git a/crates/ethereum/evm/src/lib.rs b/crates/ethereum/evm/src/lib.rs index ad77ae74ea4..c91fe4cee79 100644 --- a/crates/ethereum/evm/src/lib.rs +++ b/crates/ethereum/evm/src/lib.rs @@ -41,8 +41,9 @@ use revm::{ mod config; use alloy_eips::{eip1559::INITIAL_BASE_FEE, eip7840::BlobParams}; +use alloy_evm::eth::spec::EthExecutorSpec; pub use config::{revm_spec, revm_spec_by_timestamp_and_block_number}; -use reth_ethereum_forks::EthereumHardfork; +use reth_ethereum_forks::{EthereumHardfork, Hardforks}; /// Helper type with backwards compatible methods to obtain Ethereum executor /// providers. @@ -67,14 +68,21 @@ pub use test_utils::*; /// Ethereum-related EVM configuration. #[derive(Debug, Clone)] -pub struct EthEvmConfig { +pub struct EthEvmConfig { /// Inner [`EthBlockExecutorFactory`]. - pub executor_factory: EthBlockExecutorFactory, EvmFactory>, + pub executor_factory: EthBlockExecutorFactory, EvmFactory>, /// Ethereum block assembler. - pub block_assembler: EthBlockAssembler, + pub block_assembler: EthBlockAssembler, } impl EthEvmConfig { + /// Creates a new Ethereum EVM configuration for the ethereum mainnet. + pub fn mainnet() -> Self { + Self::ethereum(MAINNET.clone()) + } +} + +impl EthEvmConfig { /// Creates a new Ethereum EVM configuration with the given chain spec. pub fn new(chain_spec: Arc) -> Self { Self::ethereum(chain_spec) @@ -84,14 +92,9 @@ impl EthEvmConfig { pub fn ethereum(chain_spec: Arc) -> Self { Self::new_with_evm_factory(chain_spec, EthEvmFactory::default()) } - - /// Creates a new Ethereum EVM configuration for the ethereum mainnet. - pub fn mainnet() -> Self { - Self::ethereum(MAINNET.clone()) - } } -impl EthEvmConfig { +impl EthEvmConfig { /// Creates a new Ethereum EVM configuration with the given chain spec and EVM factory. pub fn new_with_evm_factory(chain_spec: Arc, evm_factory: EvmFactory) -> Self { Self { @@ -116,8 +119,9 @@ impl EthEvmConfig { } } -impl ConfigureEvm for EthEvmConfig +impl ConfigureEvm for EthEvmConfig where + ChainSpec: EthExecutorSpec + EthChainSpec
+ Hardforks + 'static, EvmF: EvmFactory< Tx: TransactionEnv + FromRecoveredTx @@ -154,7 +158,7 @@ where CfgEnv::new().with_chain_id(self.chain_spec().chain().id()).with_spec(spec); if let Some(blob_params) = &blob_params { - cfg_env.set_blob_max_count(blob_params.max_blob_count); + cfg_env.set_max_blobs_per_tx(blob_params.max_blobs_per_tx); } // derive the EIP-4844 blob fees from the header's `excess_blob_gas` and the current @@ -166,9 +170,9 @@ where }); let block_env = BlockEnv { - number: header.number(), + number: U256::from(header.number()), beneficiary: header.beneficiary(), - timestamp: header.timestamp(), + timestamp: U256::from(header.timestamp()), difficulty: if spec >= SpecId::MERGE { U256::ZERO } else { header.difficulty() }, prevrandao: if spec >= SpecId::MERGE { header.mix_hash() } else { None }, gas_limit: header.gas_limit(), @@ -198,7 +202,7 @@ where CfgEnv::new().with_chain_id(self.chain_spec().chain().id()).with_spec(spec_id); if let Some(blob_params) = &blob_params { - cfg.set_blob_max_count(blob_params.max_blob_count); + cfg.set_max_blobs_per_tx(blob_params.max_blobs_per_tx); } // if the parent block did not have excess blob gas (i.e. it was pre-cancun), but it is @@ -212,9 +216,7 @@ where BlobExcessGasAndPrice { excess_blob_gas, blob_gasprice } }); - let mut basefee = parent.next_block_base_fee( - self.chain_spec().base_fee_params_at_timestamp(attributes.timestamp), - ); + let mut basefee = chain_spec.next_block_base_fee(parent, attributes.timestamp); let mut gas_limit = attributes.gas_limit; @@ -235,9 +237,9 @@ where } let block_env = BlockEnv { - number: parent.number + 1, + number: U256::from(parent.number + 1), beneficiary: attributes.suggested_fee_recipient, - timestamp: attributes.timestamp, + timestamp: U256::from(attributes.timestamp), difficulty: U256::ZERO, prevrandao: Some(attributes.prev_randao), gas_limit, @@ -351,8 +353,12 @@ mod tests { let db = CacheDB::>::default(); // Create customs block and tx env - let block = - BlockEnv { basefee: 1000, gas_limit: 10_000_000, number: 42, ..Default::default() }; + let block = BlockEnv { + basefee: 1000, + gas_limit: 10_000_000, + number: U256::from(42), + ..Default::default() + }; let evm_env = EvmEnv { block_env: block, ..Default::default() }; @@ -418,8 +424,12 @@ mod tests { let db = CacheDB::>::default(); // Create custom block and tx environment - let block = - BlockEnv { basefee: 1000, gas_limit: 10_000_000, number: 42, ..Default::default() }; + let block = BlockEnv { + basefee: 1000, + gas_limit: 10_000_000, + number: U256::from(42), + ..Default::default() + }; let evm_env = EvmEnv { block_env: block, ..Default::default() }; let evm = evm_config.evm_with_env_and_inspector(db, evm_env.clone(), NoOpInspector {}); diff --git a/crates/ethereum/evm/tests/execute.rs b/crates/ethereum/evm/tests/execute.rs index c7f408f3f16..61e0c1c4b66 100644 --- a/crates/ethereum/evm/tests/execute.rs +++ b/crates/ethereum/evm/tests/execute.rs @@ -12,7 +12,10 @@ use alloy_evm::block::BlockValidationError; use alloy_primitives::{b256, fixed_bytes, keccak256, Bytes, TxKind, B256, U256}; use reth_chainspec::{ChainSpecBuilder, EthereumHardfork, ForkCondition, MAINNET}; use reth_ethereum_primitives::{Block, BlockBody, Transaction}; -use reth_evm::{execute::Executor, ConfigureEvm}; +use reth_evm::{ + execute::{BasicBlockExecutor, Executor}, + ConfigureEvm, +}; use reth_evm_ethereum::EthEvmConfig; use reth_execution_types::BlockExecutionResult; use reth_primitives_traits::{ @@ -76,7 +79,7 @@ fn eip_4788_non_genesis_call() { let provider = EthEvmConfig::new(chain_spec); - let mut executor = provider.batch_executor(db); + let mut executor = BasicBlockExecutor::new(provider, db); // attempt to execute a block without parent beacon block root, expect err let err = executor @@ -197,7 +200,7 @@ fn eip_4788_empty_account_call() { ..Header::default() }; - let mut executor = provider.batch_executor(db); + let mut executor = BasicBlockExecutor::new(provider, db); // attempt to execute an empty block with parent beacon block root, this should not fail executor @@ -230,7 +233,7 @@ fn eip_4788_genesis_call() { let mut header = chain_spec.genesis_header().clone(); let provider = EthEvmConfig::new(chain_spec); - let mut executor = provider.batch_executor(db); + let mut executor = BasicBlockExecutor::new(provider, db); // attempt to execute the genesis block with non-zero parent beacon block root, expect err header.parent_beacon_block_root = Some(B256::with_last_byte(0x69)); @@ -291,7 +294,7 @@ fn eip_4788_high_base_fee() { let provider = EthEvmConfig::new(chain_spec); // execute header - let mut executor = provider.batch_executor(db); + let mut executor = BasicBlockExecutor::new(provider, db); // Now execute a block with the fixed header, ensure that it does not fail executor @@ -354,7 +357,7 @@ fn eip_2935_pre_fork() { ); let provider = EthEvmConfig::new(chain_spec); - let mut executor = provider.batch_executor(db); + let mut executor = BasicBlockExecutor::new(provider, db); // construct the header for block one let header = Header { timestamp: 1, number: 1, ..Header::default() }; @@ -392,7 +395,7 @@ fn eip_2935_fork_activation_genesis() { let header = chain_spec.genesis_header().clone(); let provider = EthEvmConfig::new(chain_spec); - let mut executor = provider.batch_executor(db); + let mut executor = BasicBlockExecutor::new(provider, db); // attempt to execute genesis block, this should not fail executor @@ -436,7 +439,7 @@ fn eip_2935_fork_activation_within_window_bounds() { ..Header::default() }; let provider = EthEvmConfig::new(chain_spec); - let mut executor = provider.batch_executor(db); + let mut executor = BasicBlockExecutor::new(provider, db); // attempt to execute the fork activation block, this should not fail executor @@ -478,7 +481,7 @@ fn eip_2935_fork_activation_outside_window_bounds() { ); let provider = EthEvmConfig::new(chain_spec); - let mut executor = provider.batch_executor(db); + let mut executor = BasicBlockExecutor::new(provider, db); let header = Header { parent_hash: B256::random(), @@ -520,7 +523,7 @@ fn eip_2935_state_transition_inside_fork() { let header_hash = header.hash_slow(); let provider = EthEvmConfig::new(chain_spec); - let mut executor = provider.batch_executor(db); + let mut executor = BasicBlockExecutor::new(provider, db); // attempt to execute the genesis block, this should not fail executor diff --git a/crates/ethereum/node/Cargo.toml b/crates/ethereum/node/Cargo.toml index 74b5867bca2..d3266bbb21b 100644 --- a/crates/ethereum/node/Cargo.toml +++ b/crates/ethereum/node/Cargo.toml @@ -26,8 +26,8 @@ reth-evm.workspace = true reth-evm-ethereum.workspace = true reth-consensus.workspace = true reth-rpc.workspace = true -reth-rpc-builder.workspace = true reth-rpc-api.workspace = true +reth-rpc-builder.workspace = true reth-rpc-server-types.workspace = true reth-node-api.workspace = true reth-chainspec.workspace = true diff --git a/crates/ethereum/node/src/engine.rs b/crates/ethereum/node/src/engine.rs index f6c26dbfb25..14e1f4eff2a 100644 --- a/crates/ethereum/node/src/engine.rs +++ b/crates/ethereum/node/src/engine.rs @@ -5,7 +5,7 @@ pub use alloy_rpc_types_engine::{ ExecutionPayloadEnvelopeV2, ExecutionPayloadEnvelopeV3, ExecutionPayloadEnvelopeV4, ExecutionPayloadV1, PayloadAttributes as EthPayloadAttributes, }; -use reth_chainspec::ChainSpec; +use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_engine_primitives::{EngineValidator, PayloadValidator}; use reth_ethereum_payload_builder::EthereumExecutionPayloadValidator; use reth_ethereum_primitives::Block; @@ -19,11 +19,11 @@ use std::sync::Arc; /// Validator for the ethereum engine API. #[derive(Debug, Clone)] -pub struct EthereumEngineValidator { +pub struct EthereumEngineValidator { inner: EthereumExecutionPayloadValidator, } -impl EthereumEngineValidator { +impl EthereumEngineValidator { /// Instantiates a new validator. pub const fn new(chain_spec: Arc) -> Self { Self { inner: EthereumExecutionPayloadValidator::new(chain_spec) } @@ -36,7 +36,10 @@ impl EthereumEngineValidator { } } -impl PayloadValidator for EthereumEngineValidator { +impl PayloadValidator for EthereumEngineValidator +where + ChainSpec: EthChainSpec + EthereumHardforks + 'static, +{ type Block = Block; type ExecutionData = ExecutionData; @@ -49,8 +52,9 @@ impl PayloadValidator for EthereumEngineValidator { } } -impl EngineValidator for EthereumEngineValidator +impl EngineValidator for EthereumEngineValidator where + ChainSpec: EthChainSpec + EthereumHardforks + 'static, Types: PayloadTypes, { fn validate_version_specific_fields( diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index e8c9d002eb6..02ebacdb7d7 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -3,14 +3,18 @@ pub use crate::{payload::EthereumPayloadBuilder, EthereumEngineValidator}; use crate::{EthEngineTypes, EthEvmConfig}; use alloy_eips::{eip7840::BlobParams, merge::EPOCH_SLOTS}; +use alloy_rpc_types_engine::ExecutionData; use reth_chainspec::{ChainSpec, EthChainSpec, EthereumHardforks, Hardforks}; use reth_consensus::{ConsensusError, FullConsensus}; +use reth_engine_primitives::EngineTypes; use reth_ethereum_consensus::EthBeaconConsensus; use reth_ethereum_engine_primitives::{ EthBuiltPayload, EthPayloadAttributes, EthPayloadBuilderAttributes, }; use reth_ethereum_primitives::{EthPrimitives, TransactionSigned}; -use reth_evm::{ConfigureEvm, EvmFactory, EvmFactoryFor, NextBlockEnvAttributes}; +use reth_evm::{ + eth::spec::EthExecutorSpec, ConfigureEvm, EvmFactory, EvmFactoryFor, NextBlockEnvAttributes, +}; use reth_network::{primitives::BasicNetworkPrimitives, NetworkHandle, PeersInfo}; use reth_node_api::{ AddOnsContext, FullNodeComponents, NodeAddOns, NodePrimitives, PrimitivesTy, TxTy, @@ -22,8 +26,8 @@ use reth_node_builder::{ }, node::{FullNodeTypes, NodeTypes}, rpc::{ - EngineValidatorAddOn, EngineValidatorBuilder, EthApiBuilder, EthApiCtx, RethRpcAddOns, - RpcAddOns, RpcHandle, + BasicEngineApiBuilder, EngineApiBuilder, EngineValidatorAddOn, EngineValidatorBuilder, + EthApiBuilder, EthApiCtx, RethRpcAddOns, RpcAddOns, RpcHandle, }, BuilderContext, DebugNode, Node, NodeAdapter, NodeComponentsBuilder, PayloadBuilderConfig, PayloadTypes, @@ -59,7 +63,12 @@ impl EthereumNode { EthereumConsensusBuilder, > where - Node: FullNodeTypes>, + Node: FullNodeTypes< + Types: NodeTypes< + ChainSpec: Hardforks + EthereumHardforks + EthExecutorSpec, + Primitives = EthPrimitives, + >, + >, ::Payload: PayloadTypes< BuiltPayload = EthBuiltPayload, PayloadAttributes = EthPayloadAttributes, @@ -152,36 +161,76 @@ where /// Add-ons w.r.t. l1 ethereum. #[derive(Debug)] -pub struct EthereumAddOns +pub struct EthereumAddOns< + N: FullNodeComponents, + EthB: EthApiBuilder, + EV, + EB = BasicEngineApiBuilder, +> { + inner: RpcAddOns, +} + +impl Default for EthereumAddOns where - EthApiFor: FullEthApiServer, + N: FullNodeComponents, + EthereumEthApiBuilder: EthApiBuilder, { - inner: RpcAddOns, + fn default() -> Self { + Self { + inner: RpcAddOns::new( + EthereumEthApiBuilder, + EthereumEngineValidatorBuilder::default(), + BasicEngineApiBuilder::default(), + Default::default(), + ), + } + } } -impl Default for EthereumAddOns +impl EthereumAddOns where - EthApiFor: FullEthApiServer, + N: FullNodeComponents, + EthB: EthApiBuilder, { - fn default() -> Self { - Self { inner: Default::default() } + /// Replace the engine API builder. + pub fn with_engine_api(self, engine_api_builder: T) -> EthereumAddOns + where + T: Send, + { + let Self { inner } = self; + EthereumAddOns { inner: inner.with_engine_api(engine_api_builder) } + } + + /// Replace the engine validator builder. + pub fn with_engine_validator( + self, + engine_validator_builder: T, + ) -> EthereumAddOns + where + T: Send, + { + let Self { inner } = self; + EthereumAddOns { inner: inner.with_engine_validator(engine_validator_builder) } } } -impl NodeAddOns for EthereumAddOns +impl NodeAddOns for EthereumAddOns where N: FullNodeComponents< Types: NodeTypes< - ChainSpec = ChainSpec, + ChainSpec: EthChainSpec + EthereumHardforks, Primitives = EthPrimitives, - Payload = EthEngineTypes, + Payload: EngineTypes, >, Evm: ConfigureEvm, >, + EthB: EthApiBuilder, + EV: EngineValidatorBuilder, + EB: EngineApiBuilder, EthApiError: FromEvmError, EvmFactoryFor: EvmFactory, { - type Handle = RpcHandle>; + type Handle = RpcHandle; async fn launch_add_ons( self, @@ -209,41 +258,49 @@ where } } -impl RethRpcAddOns for EthereumAddOns +impl RethRpcAddOns for EthereumAddOns where N: FullNodeComponents< Types: NodeTypes< - ChainSpec = ChainSpec, + ChainSpec: EthChainSpec + EthereumHardforks, Primitives = EthPrimitives, - Payload = EthEngineTypes, + Payload: EngineTypes, >, Evm: ConfigureEvm, >, + EthB: EthApiBuilder, + EV: EngineValidatorBuilder, + EB: EngineApiBuilder, EthApiError: FromEvmError, EvmFactoryFor: EvmFactory, { - type EthApi = EthApiFor; + type EthApi = EthB::EthApi; fn hooks_mut(&mut self) -> &mut reth_node_builder::rpc::RpcHooks { self.inner.hooks_mut() } } -impl EngineValidatorAddOn for EthereumAddOns +impl EngineValidatorAddOn for EthereumAddOns where N: FullNodeComponents< Types: NodeTypes< - ChainSpec = ChainSpec, + ChainSpec: EthChainSpec + EthereumHardforks, Primitives = EthPrimitives, - Payload = EthEngineTypes, + Payload: EngineTypes, >, + Evm: ConfigureEvm, >, - EthApiFor: FullEthApiServer, + EthB: EthApiBuilder, + EV: EngineValidatorBuilder, + EB: EngineApiBuilder, + EthApiError: FromEvmError, + EvmFactoryFor: EvmFactory, { - type Validator = EthereumEngineValidator; + type Validator = EV::Validator; async fn engine_validator(&self, ctx: &AddOnsContext<'_, N>) -> eyre::Result { - EthereumEngineValidatorBuilder::default().build(ctx).await + self.inner.engine_validator(ctx).await } } @@ -262,6 +319,8 @@ where type AddOns = EthereumAddOns< NodeAdapter>::Components>, + EthereumEthApiBuilder, + EthereumEngineValidatorBuilder, >; fn components_builder(&self) -> Self::ComponentsBuilder { @@ -288,10 +347,13 @@ pub struct EthereumExecutorBuilder; impl ExecutorBuilder for EthereumExecutorBuilder where - Types: NodeTypes, + Types: NodeTypes< + ChainSpec: Hardforks + EthExecutorSpec + EthereumHardforks, + Primitives = EthPrimitives, + >, Node: FullNodeTypes, { - type EVM = EthEvmConfig; + type EVM = EthEvmConfig; async fn build_evm(self, ctx: &BuilderContext) -> eyre::Result { let evm_config = EthEvmConfig::new(ctx.chain_spec()) @@ -345,6 +407,7 @@ where let validator = TransactionValidationTaskExecutor::eth_builder(ctx.provider().clone()) .with_head_timestamp(ctx.head().timestamp) + .with_max_tx_input_bytes(ctx.config().txpool.max_tx_input_bytes) .kzg_settings(ctx.kzg_settings()?) .with_local_transactions_config(pool_config.local_transactions_config.clone()) .set_tx_fee_cap(ctx.config().rpc.rpc_tx_fee_cap) @@ -398,7 +461,9 @@ pub struct EthereumConsensusBuilder { impl ConsensusBuilder for EthereumConsensusBuilder where - Node: FullNodeTypes>, + Node: FullNodeTypes< + Types: NodeTypes, + >, { type Consensus = Arc>; @@ -410,14 +475,23 @@ where /// Builder for [`EthereumEngineValidator`]. #[derive(Debug, Default, Clone)] #[non_exhaustive] -pub struct EthereumEngineValidatorBuilder; +pub struct EthereumEngineValidatorBuilder { + _phantom: std::marker::PhantomData, +} -impl EngineValidatorBuilder for EthereumEngineValidatorBuilder +impl EngineValidatorBuilder + for EthereumEngineValidatorBuilder where - Types: NodeTypes, + Types: NodeTypes< + ChainSpec = ChainSpec, + Payload: EngineTypes + + PayloadTypes, + Primitives = EthPrimitives, + >, Node: FullNodeComponents, + ChainSpec: EthChainSpec + EthereumHardforks + Clone + 'static, { - type Validator = EthereumEngineValidator; + type Validator = EthereumEngineValidator; async fn build(self, ctx: &AddOnsContext<'_, Node>) -> eyre::Result { Ok(EthereumEngineValidator::new(ctx.config.chain.clone())) diff --git a/crates/ethereum/node/tests/e2e/rpc.rs b/crates/ethereum/node/tests/e2e/rpc.rs index 57462fbfc6d..ea49d8b3c8e 100644 --- a/crates/ethereum/node/tests/e2e/rpc.rs +++ b/crates/ethereum/node/tests/e2e/rpc.rs @@ -1,5 +1,5 @@ use crate::utils::eth_payload_attributes; -use alloy_eips::{calc_next_block_base_fee, eip2718::Encodable2718}; +use alloy_eips::eip2718::Encodable2718; use alloy_primitives::{Address, B256, U256}; use alloy_provider::{network::EthereumWallet, Provider, ProviderBuilder, SendableTx}; use alloy_rpc_types_beacon::relay::{ @@ -9,7 +9,7 @@ use alloy_rpc_types_beacon::relay::{ use alloy_rpc_types_engine::{BlobsBundleV1, ExecutionPayloadV3}; use alloy_rpc_types_eth::TransactionRequest; use rand::{rngs::StdRng, Rng, SeedableRng}; -use reth_chainspec::{ChainSpecBuilder, MAINNET}; +use reth_chainspec::{ChainSpecBuilder, EthChainSpec, MAINNET}; use reth_e2e_test_utils::setup_engine; use reth_node_ethereum::EthereumNode; use reth_payload_primitives::BuiltPayload; @@ -98,14 +98,9 @@ async fn test_fee_history() -> eyre::Result<()> { .unwrap() .header; for block in (latest_block + 2 - block_count)..=latest_block { - let expected_base_fee = calc_next_block_base_fee( - prev_header.gas_used, - prev_header.gas_limit, - prev_header.base_fee_per_gas.unwrap(), - chain_spec.base_fee_params_at_block(block), - ); - let header = provider.get_block_by_number(block.into()).await?.unwrap().header; + let expected_base_fee = + chain_spec.next_block_base_fee(&prev_header, header.timestamp).unwrap(); assert_eq!(header.base_fee_per_gas.unwrap(), expected_base_fee); assert_eq!( diff --git a/crates/ethereum/node/tests/it/builder.rs b/crates/ethereum/node/tests/it/builder.rs index e3d78182ed5..91dfd683efe 100644 --- a/crates/ethereum/node/tests/it/builder.rs +++ b/crates/ethereum/node/tests/it/builder.rs @@ -50,15 +50,20 @@ async fn test_eth_launcher() { let _builder = NodeBuilder::new(config) .with_database(db) + .with_launch_context(tasks.executor()) .with_types_and_provider::>>, >>() .with_components(EthereumNode::components()) .with_add_ons(EthereumAddOns::default()) + .apply(|builder| { + let _ = builder.db(); + builder + }) .launch_with_fn(|builder| { let launcher = EngineNodeLauncher::new( tasks.executor(), - builder.config.datadir(), + builder.config().datadir(), Default::default(), ); builder.launch_with(launcher) diff --git a/crates/ethereum/payload/src/lib.rs b/crates/ethereum/payload/src/lib.rs index 9fd7145033f..603e7ab74e5 100644 --- a/crates/ethereum/payload/src/lib.rs +++ b/crates/ethereum/payload/src/lib.rs @@ -307,7 +307,7 @@ where } } - // update add to total fees + // update and add to total fees let miner_fee = tx.effective_tip_per_gas(base_fee).expect("fee is always valid; execution succeeded"); total_fees += U256::from(miner_fee) * U256::from(gas_used); diff --git a/crates/ethereum/primitives/src/receipt.rs b/crates/ethereum/primitives/src/receipt.rs index 4d947661f5c..ffc06c7fc82 100644 --- a/crates/ethereum/primitives/src/receipt.rs +++ b/crates/ethereum/primitives/src/receipt.rs @@ -283,8 +283,6 @@ impl InMemorySize for Receipt { } } -impl reth_primitives_traits::Receipt for Receipt {} - impl From> for Receipt where T: Into, @@ -438,13 +436,13 @@ pub(super) mod serde_bincode_compat { #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] struct Data { #[serde_as(as = "serde_bincode_compat::Receipt<'_>")] - reseipt: Receipt, + receipt: Receipt, } let mut bytes = [0u8; 1024]; rand::rng().fill(bytes.as_mut_slice()); let data = Data { - reseipt: Receipt::arbitrary(&mut arbitrary::Unstructured::new(&bytes)).unwrap(), + receipt: Receipt::arbitrary(&mut arbitrary::Unstructured::new(&bytes)).unwrap(), }; let encoded = bincode::serialize(&data).unwrap(); let decoded: Data = bincode::deserialize(&encoded).unwrap(); diff --git a/crates/ethereum/reth/Cargo.toml b/crates/ethereum/reth/Cargo.toml index f6f45922583..0522e6f84dc 100644 --- a/crates/ethereum/reth/Cargo.toml +++ b/crates/ethereum/reth/Cargo.toml @@ -19,6 +19,7 @@ reth-network-api = { workspace = true, optional = true } reth-eth-wire = { workspace = true, optional = true } reth-provider = { workspace = true, optional = true } reth-db = { workspace = true, optional = true, features = ["mdbx"] } +reth-codecs = { workspace = true, optional = true } reth-storage-api = { workspace = true, optional = true } reth-node-api = { workspace = true, optional = true } reth-node-core = { workspace = true, optional = true } @@ -33,6 +34,7 @@ reth-rpc-eth-types = { workspace = true, optional = true } reth-rpc-builder = { workspace = true, optional = true } reth-exex = { workspace = true, optional = true } reth-trie = { workspace = true, optional = true } +reth-trie-db = { workspace = true, optional = true } reth-node-builder = { workspace = true, optional = true } reth-tasks = { workspace = true, optional = true } reth-cli-util = { workspace = true, optional = true } @@ -74,6 +76,7 @@ arbitrary = [ "reth-transaction-pool?/arbitrary", "reth-eth-wire?/arbitrary", "alloy-rpc-types-engine?/arbitrary", + "reth-codecs?/arbitrary", ] test-utils = [ @@ -91,6 +94,8 @@ test-utils = [ "reth-transaction-pool?/test-utils", "reth-evm-ethereum?/test-utils", "reth-node-builder?/test-utils", + "reth-trie-db?/test-utils", + "reth-codecs?/test-utils", ] full = [ @@ -122,7 +127,7 @@ node = [ "dep:reth-node-ethereum", "dep:reth-node-builder", "rpc", - "trie", + "trie-db", ] pool = ["dep:reth-transaction-pool"] rpc = [ @@ -137,6 +142,7 @@ rpc = [ tasks = ["dep:reth-tasks"] js-tracer = ["rpc", "reth-rpc/js-tracer"] network = ["dep:reth-network", "tasks", "dep:reth-network-api", "dep:reth-eth-wire"] -provider = ["storage-api", "tasks", "dep:reth-provider", "dep:reth-db"] +provider = ["storage-api", "tasks", "dep:reth-provider", "dep:reth-db", "dep:reth-codecs"] storage-api = ["dep:reth-storage-api"] trie = ["dep:reth-trie"] +trie-db = ["trie", "dep:reth-trie-db"] diff --git a/crates/ethereum/reth/src/lib.rs b/crates/ethereum/reth/src/lib.rs index 421cabe9968..2a3a6135495 100644 --- a/crates/ethereum/reth/src/lib.rs +++ b/crates/ethereum/reth/src/lib.rs @@ -91,6 +91,10 @@ pub mod provider { pub use reth_db as db; } +/// Re-exported codec crate +#[cfg(feature = "provider")] +pub use reth_codecs as codec; + /// Re-exported reth storage api types #[cfg(feature = "storage-api")] pub mod storage { @@ -116,6 +120,10 @@ pub mod node { pub mod trie { #[doc(inline)] pub use reth_trie::*; + + #[cfg(feature = "trie-db")] + #[doc(inline)] + pub use reth_trie_db::*; } /// Re-exported rpc types diff --git a/crates/evm/evm/Cargo.toml b/crates/evm/evm/Cargo.toml index 00c29072c42..c1c830168a1 100644 --- a/crates/evm/evm/Cargo.toml +++ b/crates/evm/evm/Cargo.toml @@ -29,7 +29,7 @@ alloy-evm.workspace = true alloy-consensus.workspace = true # scroll -scroll-alloy-evm = { workspace = true, optional = true } +scroll-alloy-evm = { workspace = true, optional = true, default-features = false } auto_impl.workspace = true derive_more.workspace = true diff --git a/crates/evm/evm/src/execute.rs b/crates/evm/evm/src/execute.rs index 5c9f22334d1..148cadf0cfc 100644 --- a/crates/evm/evm/src/execute.rs +++ b/crates/evm/evm/src/execute.rs @@ -142,6 +142,40 @@ pub struct ExecuteOutput { } /// Input for block building. Consumed by [`BlockAssembler`]. +/// +/// This struct contains all the data needed by the [`BlockAssembler`] to create +/// a complete block after transaction execution. +/// +/// # Fields Overview +/// +/// - `evm_env`: The EVM configuration used during execution (spec ID, block env, etc.) +/// - `execution_ctx`: Additional context like withdrawals and ommers +/// - `parent`: The parent block header this block builds on +/// - `transactions`: All transactions that were successfully executed +/// - `output`: Execution results including receipts and gas used +/// - `bundle_state`: Accumulated state changes from all transactions +/// - `state_provider`: Access to the current state for additional lookups +/// - `state_root`: The calculated state root after all changes +/// +/// # Usage +/// +/// This is typically created internally by [`BlockBuilder::finish`] after all +/// transactions have been executed: +/// +/// ```rust,ignore +/// let input = BlockAssemblerInput { +/// evm_env: builder.evm_env(), +/// execution_ctx: builder.context(), +/// parent: &parent_header, +/// transactions: executed_transactions, +/// output: &execution_result, +/// bundle_state: &state_changes, +/// state_provider: &state, +/// state_root: calculated_root, +/// }; +/// +/// let block = assembler.assemble_block(input)?; +/// ``` #[derive(derive_more::Debug)] #[non_exhaustive] pub struct BlockAssemblerInput<'a, 'b, F: BlockExecutorFactory, H = Header> { @@ -166,7 +200,48 @@ pub struct BlockAssemblerInput<'a, 'b, F: BlockExecutorFactory, H = Header> { pub state_root: B256, } -/// A type that knows how to assemble a block. +/// A type that knows how to assemble a block from execution results. +/// +/// The [`BlockAssembler`] is the final step in block production. After transactions +/// have been executed by the [`BlockExecutor`], the assembler takes all the execution +/// outputs and creates a properly formatted block. +/// +/// # Responsibilities +/// +/// The assembler is responsible for: +/// - Setting the correct block header fields (gas used, receipts root, logs bloom, etc.) +/// - Including the executed transactions in the correct order +/// - Setting the state root from the post-execution state +/// - Applying any chain-specific rules or adjustments +/// +/// # Example Flow +/// +/// ```rust,ignore +/// // 1. Execute transactions and get results +/// let execution_result = block_executor.finish()?; +/// +/// // 2. Calculate state root from changes +/// let state_root = state_provider.state_root(&bundle_state)?; +/// +/// // 3. Assemble the final block +/// let block = assembler.assemble_block(BlockAssemblerInput { +/// evm_env, // Environment used during execution +/// execution_ctx, // Context like withdrawals, ommers +/// parent, // Parent block header +/// transactions, // Executed transactions +/// output, // Execution results (receipts, gas) +/// bundle_state, // All state changes +/// state_provider, // For additional lookups if needed +/// state_root, // Computed state root +/// })?; +/// ``` +/// +/// # Relationship with Block Building +/// +/// The assembler works together with: +/// - `NextBlockEnvAttributes`: Provides the configuration for the new block +/// - [`BlockExecutor`]: Executes transactions and produces results +/// - [`BlockBuilder`]: Orchestrates the entire process and calls the assembler #[auto_impl::auto_impl(&, Arc)] pub trait BlockAssembler { /// The block type produced by the assembler. diff --git a/crates/evm/evm/src/lib.rs b/crates/evm/evm/src/lib.rs index bafd973adb0..ed8d6d1cde7 100644 --- a/crates/evm/evm/src/lib.rs +++ b/crates/evm/evm/src/lib.rs @@ -17,7 +17,7 @@ extern crate alloc; -use crate::execute::BasicBlockBuilder; +use crate::execute::{BasicBlockBuilder, Executor}; use alloc::vec::Vec; use alloy_eips::{ eip2718::{EIP2930_TX_TYPE_ID, LEGACY_TX_TYPE_ID}, @@ -31,6 +31,7 @@ use alloy_evm::{ use alloy_primitives::{Address, B256}; use core::{error::Error, fmt::Debug}; use execute::{BasicBlockExecutor, BlockAssembler, BlockBuilder}; +use reth_execution_errors::BlockExecutionError; use reth_primitives_traits::{ BlockTy, HeaderTy, NodePrimitives, ReceiptTy, SealedBlock, SealedHeader, TxTy, }; @@ -60,37 +61,118 @@ pub use alloy_evm::block::state_changes as state_change; /// A complete configuration of EVM for Reth. /// /// This trait encapsulates complete configuration required for transaction execution and block -/// execution/building. +/// execution/building, providing a unified interface for EVM operations. +/// +/// # Architecture Overview /// /// The EVM abstraction consists of the following layers: -/// - [`Evm`] produced by [`EvmFactory`]: The EVM implementation responsilble for executing -/// individual transactions and producing output for them including state changes, logs, gas -/// usage, etc. -/// - [`BlockExecutor`] produced by [`BlockExecutorFactory`]: Executor operates on top of -/// [`Evm`] and is responsible for executing entire blocks. This is different from simply -/// aggregating outputs of transactions execution as it also involves higher level state -/// changes such as receipt building, applying block rewards, system calls, etc. -/// - [`BlockAssembler`]: Encapsulates logic for assembling blocks. It operates on context and -/// output of [`BlockExecutor`], and is required to know how to assemble a next block to -/// include in the chain. -/// -/// All of the above components need configuration environment which we are abstracting over to -/// allow plugging EVM implementation into Reth SDK. -/// -/// The abstraction is designed to serve 2 codepaths: -/// 1. Externally provided complete block (e.g received while syncing). -/// 2. Block building when we know parent block and some additional context obtained from -/// payload attributes or alike. -/// -/// First case is handled by [`ConfigureEvm::evm_env`] and [`ConfigureEvm::context_for_block`] -/// which implement a conversion from [`NodePrimitives::Block`] to [`EvmEnv`] and [`ExecutionCtx`], -/// and allow configuring EVM and block execution environment at a given block. -/// -/// Second case is handled by similar [`ConfigureEvm::next_evm_env`] and -/// [`ConfigureEvm::context_for_next_block`] which take parent [`NodePrimitives::BlockHeader`] -/// along with [`NextBlockEnvCtx`]. [`NextBlockEnvCtx`] is very similar to payload attributes and -/// simply contains context for next block that is generally received from a CL node (timestamp, -/// beneficiary, withdrawals, etc.). +/// +/// 1. **[`Evm`] (produced by [`EvmFactory`])**: The core EVM implementation responsible for +/// executing individual transactions and producing outputs including state changes, logs, gas +/// usage, etc. +/// +/// 2. **[`BlockExecutor`] (produced by [`BlockExecutorFactory`])**: A higher-level component that +/// operates on top of [`Evm`] to execute entire blocks. This involves: +/// - Executing all transactions in sequence +/// - Building receipts from transaction outputs +/// - Applying block rewards to the beneficiary +/// - Executing system calls (e.g., EIP-4788 beacon root updates) +/// - Managing state changes and bundle accumulation +/// +/// 3. **[`BlockAssembler`]**: Responsible for assembling valid blocks from executed transactions. +/// It takes the output from [`BlockExecutor`] along with execution context and produces a +/// complete block ready for inclusion in the chain. +/// +/// # Usage Patterns +/// +/// The abstraction supports two primary use cases: +/// +/// ## 1. Executing Externally Provided Blocks (e.g., during sync) +/// +/// ```rust,ignore +/// use reth_evm::ConfigureEvm; +/// +/// // Execute a received block +/// let mut executor = evm_config.executor(state_db); +/// let output = executor.execute(&block)?; +/// +/// // Access the execution results +/// println!("Gas used: {}", output.result.gas_used); +/// println!("Receipts: {:?}", output.result.receipts); +/// ``` +/// +/// ## 2. Building New Blocks (e.g., payload building) +/// +/// Payload building is slightly different as it doesn't have the block's header yet, but rather +/// attributes for the block's environment, such as timestamp, fee recipient, and randomness value. +/// The block's header will be the outcome of the block building process. +/// +/// ```rust,ignore +/// use reth_evm::{ConfigureEvm, NextBlockEnvAttributes}; +/// +/// // Create attributes for the next block +/// let attributes = NextBlockEnvAttributes { +/// timestamp: current_time + 12, +/// suggested_fee_recipient: beneficiary_address, +/// prev_randao: randomness_value, +/// gas_limit: 30_000_000, +/// withdrawals: Some(withdrawals), +/// parent_beacon_block_root: Some(beacon_root), +/// }; +/// +/// // Build a new block on top of parent +/// let mut builder = evm_config.builder_for_next_block( +/// &mut state_db, +/// &parent_header, +/// attributes +/// )?; +/// +/// // Apply pre-execution changes (e.g., beacon root update) +/// builder.apply_pre_execution_changes()?; +/// +/// // Execute transactions +/// for tx in pending_transactions { +/// match builder.execute_transaction(tx) { +/// Ok(gas_used) => { +/// println!("Transaction executed, gas used: {}", gas_used); +/// } +/// Err(e) => { +/// println!("Transaction failed: {:?}", e); +/// } +/// } +/// } +/// +/// // Finish block building and get the outcome (block) +/// let outcome = builder.finish(state_provider)?; +/// let block = outcome.block; +/// ``` +/// +/// # Key Components +/// +/// ## [`NextBlockEnvCtx`] +/// +/// Contains attributes needed to configure the next block that cannot be derived from the +/// parent block alone. This includes data typically provided by the consensus layer: +/// - `timestamp`: Block timestamp +/// - `suggested_fee_recipient`: Beneficiary address +/// - `prev_randao`: Randomness value +/// - `gas_limit`: Block gas limit +/// - `withdrawals`: Consensus layer withdrawals +/// - `parent_beacon_block_root`: EIP-4788 beacon root +/// +/// ## [`BlockAssembler`] +/// +/// Takes the execution output and produces a complete block. It receives: +/// - Transaction execution results (receipts, gas used) +/// - Final state root after all executions +/// - Bundle state with all changes +/// - Execution context and environment +/// +/// The assembler is responsible for: +/// - Setting the correct block header fields +/// - Including executed transactions +/// - Setting gas used and receipts root +/// - Applying any chain-specific rules /// /// [`ExecutionCtx`]: BlockExecutorFactory::ExecutionCtx /// [`NextBlockEnvCtx`]: ConfigureEvm::NextBlockEnvCtx @@ -140,6 +222,16 @@ pub trait ConfigureEvm: Clone + Debug + Send + Sync + Unpin { /// This is intended for usage in block building after the merge and requires additional /// attributes that can't be derived from the parent block: attributes that are determined by /// the CL, such as the timestamp, suggested fee recipient, and randomness value. + /// + /// # Example + /// + /// ```rust,ignore + /// let evm_env = evm_config.next_evm_env(&parent_header, &attributes)?; + /// // evm_env now contains: + /// // - Correct spec ID based on timestamp and block number + /// // - Block environment with next block's parameters + /// // - Configuration like chain ID and blob parameters + /// ``` fn next_evm_env( &self, parent: &HeaderTy, @@ -243,6 +335,15 @@ pub trait ConfigureEvm: Clone + Debug + Send + Sync + Unpin { /// interface. Builder collects all of the executed transactions, and once /// [`BlockBuilder::finish`] is called, it invokes the configured [`BlockAssembler`] to /// create a block. + /// + /// # Example + /// + /// ```rust,ignore + /// // Create a builder with specific EVM configuration + /// let evm = evm_config.evm_with_env(&mut state_db, evm_env); + /// let ctx = evm_config.context_for_next_block(&parent, attributes); + /// let builder = evm_config.create_block_builder(evm, &parent, ctx); + /// ``` fn create_block_builder<'a, DB, I>( &'a self, evm: EvmFor, I>, @@ -267,6 +368,33 @@ pub trait ConfigureEvm: Clone + Debug + Send + Sync + Unpin { /// Creates a [`BlockBuilder`] for building of a new block. This is a helper to invoke /// [`ConfigureEvm::create_block_builder`]. + /// + /// This is the primary method for building new blocks. It combines: + /// 1. Creating the EVM environment for the next block + /// 2. Setting up the execution context from attributes + /// 3. Initializing the block builder with proper configuration + /// + /// # Example + /// + /// ```rust,ignore + /// // Build a block with specific attributes + /// let mut builder = evm_config.builder_for_next_block( + /// &mut state_db, + /// &parent_header, + /// attributes + /// )?; + /// + /// // Execute system calls (e.g., beacon root update) + /// builder.apply_pre_execution_changes()?; + /// + /// // Execute transactions + /// for tx in transactions { + /// builder.execute_transaction(tx)?; + /// } + /// + /// // Complete block building + /// let outcome = builder.finish(state_provider)?; + /// ``` fn builder_for_next_block<'a, DB: Database>( &'a self, db: &'a mut State, @@ -279,23 +407,76 @@ pub trait ConfigureEvm: Clone + Debug + Send + Sync + Unpin { Ok(self.create_block_builder(evm, parent, ctx)) } - /// Returns a new [`BasicBlockExecutor`]. + /// Returns a new [`Executor`] for executing blocks. + /// + /// The executor processes complete blocks including: + /// - All transactions in order + /// - Block rewards and fees + /// - Block level system calls + /// - State transitions + /// + /// # Example + /// + /// ```rust,ignore + /// // Create an executor + /// let mut executor = evm_config.executor(state_db); + /// + /// // Execute a single block + /// let output = executor.execute(&block)?; + /// + /// // Execute multiple blocks + /// let batch_output = executor.execute_batch(&blocks)?; + /// ``` #[auto_impl(keep_default_for(&, Arc))] - fn executor(&self, db: DB) -> BasicBlockExecutor<&Self, DB> { + fn executor( + &self, + db: DB, + ) -> impl Executor { BasicBlockExecutor::new(self, db) } /// Returns a new [`BasicBlockExecutor`]. #[auto_impl(keep_default_for(&, Arc))] - fn batch_executor(&self, db: DB) -> BasicBlockExecutor<&Self, DB> { + fn batch_executor( + &self, + db: DB, + ) -> impl Executor { BasicBlockExecutor::new(self, db) } } /// Represents additional attributes required to configure the next block. -/// This is used to configure the next block's environment -/// [`ConfigureEvm::next_evm_env`] and contains fields that can't be derived from the -/// parent header alone (attributes that are determined by the CL.) +/// +/// This struct contains all the information needed to build a new block that cannot be +/// derived from the parent block header alone. These attributes are typically provided +/// by the consensus layer (CL) through the Engine API during payload building. +/// +/// # Relationship with [`ConfigureEvm`] and [`BlockAssembler`] +/// +/// The flow for building a new block involves: +/// +/// 1. **Receive attributes** from the consensus layer containing: +/// - Timestamp for the new block +/// - Fee recipient (coinbase/beneficiary) +/// - Randomness value (prevRandao) +/// - Withdrawals to process +/// - Parent beacon block root for EIP-4788 +/// +/// 2. **Configure EVM environment** using these attributes: ```rust,ignore let evm_env = +/// evm_config.next_evm_env(&parent, &attributes)?; ``` +/// +/// 3. **Build the block** with transactions: ```rust,ignore let mut builder = +/// evm_config.builder_for_next_block( &mut state, &parent, attributes )?; ``` +/// +/// 4. **Assemble the final block** using [`BlockAssembler`] which takes: +/// - Execution results from all transactions +/// - The attributes used during execution +/// - Final state root after all changes +/// +/// This design cleanly separates: +/// - **Configuration** (what parameters to use) - handled by `NextBlockEnvAttributes` +/// - **Execution** (running transactions) - handled by `BlockExecutor` +/// - **Assembly** (creating the final block) - handled by `BlockAssembler` #[derive(Debug, Clone, PartialEq, Eq)] pub struct NextBlockEnvAttributes { /// The timestamp of the next block. diff --git a/crates/evm/evm/src/metrics.rs b/crates/evm/evm/src/metrics.rs index b8f6d4f34ee..586c1c154d6 100644 --- a/crates/evm/evm/src/metrics.rs +++ b/crates/evm/evm/src/metrics.rs @@ -278,7 +278,7 @@ mod tests { let state = { let mut state = EvmState::default(); let storage = - EvmStorage::from_iter([(U256::from(1), EvmStorageSlot::new(U256::from(2)))]); + EvmStorage::from_iter([(U256::from(1), EvmStorageSlot::new(U256::from(2), 0))]); state.insert( Default::default(), Account { @@ -290,7 +290,8 @@ mod tests { ..Default::default() }, storage, - status: AccountStatus::Loaded, + status: AccountStatus::default(), + transaction_id: 0, }, ); state diff --git a/crates/evm/execution-errors/src/trie.rs b/crates/evm/execution-errors/src/trie.rs index b8a1a3e9bd3..7c5ad72b1cb 100644 --- a/crates/evm/execution-errors/src/trie.rs +++ b/crates/evm/execution-errors/src/trie.rs @@ -170,6 +170,12 @@ pub enum SparseTrieErrorKind { /// RLP error. #[error(transparent)] Rlp(#[from] alloy_rlp::Error), + /// Node not found in provider during revealing. + #[error("node {path:?} not found in provider during removal")] + NodeNotFoundInProvider { + /// Path to the missing node. + path: Nibbles, + }, /// Other. #[error(transparent)] Other(#[from] Box), diff --git a/crates/exex/exex/src/wal/mod.rs b/crates/exex/exex/src/wal/mod.rs index 66c528c14fa..b5537aa88fc 100644 --- a/crates/exex/exex/src/wal/mod.rs +++ b/crates/exex/exex/src/wal/mod.rs @@ -96,7 +96,7 @@ where N: NodePrimitives, { fn new(directory: impl AsRef) -> WalResult { - let mut wal = Self { + let wal = Self { next_file_id: AtomicU32::new(0), storage: Storage::new(directory)?, block_cache: RwLock::new(BlockCache::default()), @@ -112,7 +112,7 @@ where /// Fills the block cache with the notifications from the storage. #[instrument(skip(self))] - fn fill_block_cache(&mut self) -> WalResult<()> { + fn fill_block_cache(&self) -> WalResult<()> { let Some(files_range) = self.storage.files_range()? else { return Ok(()) }; self.next_file_id.store(files_range.end() + 1, Ordering::Relaxed); diff --git a/crates/exex/test-utils/src/lib.rs b/crates/exex/test-utils/src/lib.rs index a04a9da767e..00bcdcbbf70 100644 --- a/crates/exex/test-utils/src/lib.rs +++ b/crates/exex/test-utils/src/lib.rs @@ -41,7 +41,10 @@ use reth_node_builder::{ }; use reth_node_core::node_config::NodeConfig; use reth_node_ethereum::{ - node::{EthereumAddOns, EthereumNetworkBuilder, EthereumPayloadBuilder}, + node::{ + EthereumAddOns, EthereumEngineValidatorBuilder, EthereumEthApiBuilder, + EthereumNetworkBuilder, EthereumPayloadBuilder, + }, EthEngineTypes, }; use reth_payload_builder::noop::NoopPayloadBuilderService; @@ -120,14 +123,7 @@ impl NodeTypes for TestNode { impl Node for TestNode where - N: FullNodeTypes< - Types: NodeTypes< - Payload = EthEngineTypes, - ChainSpec = ChainSpec, - Primitives = EthPrimitives, - Storage = EthStorage, - >, - >, + N: FullNodeTypes, { type ComponentsBuilder = ComponentsBuilder< N, @@ -139,6 +135,8 @@ where >; type AddOns = EthereumAddOns< NodeAdapter>::Components>, + EthereumEthApiBuilder, + EthereumEngineValidatorBuilder, >; fn components_builder(&self) -> Self::ComponentsBuilder { diff --git a/crates/metrics/src/common/mpsc.rs b/crates/metrics/src/common/mpsc.rs index 2de8ddf9d53..b347440203f 100644 --- a/crates/metrics/src/common/mpsc.rs +++ b/crates/metrics/src/common/mpsc.rs @@ -11,7 +11,6 @@ use std::{ use tokio::sync::mpsc::{ self, error::{SendError, TryRecvError, TrySendError}, - OwnedPermit, }; use tokio_util::sync::{PollSendError, PollSender}; @@ -144,11 +143,38 @@ impl MeteredSender { Self { sender, metrics: MeteredSenderMetrics::new(scope) } } - /// Tries to acquire a permit to send a message. + /// Tries to acquire a permit to send a message without waiting. /// /// See also [Sender](mpsc::Sender)'s `try_reserve_owned`. - pub fn try_reserve_owned(&self) -> Result, TrySendError>> { - self.sender.clone().try_reserve_owned() + pub fn try_reserve_owned(self) -> Result, TrySendError> { + let Self { sender, metrics } = self; + sender.try_reserve_owned().map(|permit| OwnedPermit::new(permit, metrics.clone())).map_err( + |err| match err { + TrySendError::Full(sender) => TrySendError::Full(Self { sender, metrics }), + TrySendError::Closed(sender) => TrySendError::Closed(Self { sender, metrics }), + }, + ) + } + + /// Waits to acquire a permit to send a message and return owned permit. + /// + /// See also [Sender](mpsc::Sender)'s `reserve_owned`. + pub async fn reserve_owned(self) -> Result, SendError<()>> { + self.sender.reserve_owned().await.map(|permit| OwnedPermit::new(permit, self.metrics)) + } + + /// Waits to acquire a permit to send a message. + /// + /// See also [Sender](mpsc::Sender)'s `reserve`. + pub async fn reserve(&self) -> Result, SendError<()>> { + self.sender.reserve().await.map(|permit| Permit::new(permit, &self.metrics)) + } + + /// Tries to acquire a permit to send a message without waiting. + /// + /// See also [Sender](mpsc::Sender)'s `try_reserve`. + pub fn try_reserve(&self) -> Result, TrySendError<()>> { + self.sender.try_reserve().map(|permit| Permit::new(permit, &self.metrics)) } /// Returns the underlying [Sender](mpsc::Sender). @@ -193,6 +219,51 @@ impl Clone for MeteredSender { } } +/// A wrapper type around [`OwnedPermit`](mpsc::OwnedPermit) that updates metrics accounting +/// when sending +#[derive(Debug)] +pub struct OwnedPermit { + permit: mpsc::OwnedPermit, + /// Holds metrics for this type + metrics: MeteredSenderMetrics, +} + +impl OwnedPermit { + /// Creates a new [`OwnedPermit`] wrapping the provided [`mpsc::OwnedPermit`] with given metrics + /// handle. + pub const fn new(permit: mpsc::OwnedPermit, metrics: MeteredSenderMetrics) -> Self { + Self { permit, metrics } + } + + /// Sends a value using the reserved capacity and update metrics accordingly. + pub fn send(self, value: T) -> MeteredSender { + let Self { permit, metrics } = self; + metrics.messages_sent_total.increment(1); + MeteredSender { sender: permit.send(value), metrics } + } +} + +/// A wrapper type around [Permit](mpsc::Permit) that updates metrics accounting +/// when sending +#[derive(Debug)] +pub struct Permit<'a, T> { + permit: mpsc::Permit<'a, T>, + metrics_ref: &'a MeteredSenderMetrics, +} + +impl<'a, T> Permit<'a, T> { + /// Creates a new [`Permit`] wrapping the provided [`mpsc::Permit`] with given metrics ref. + pub const fn new(permit: mpsc::Permit<'a, T>, metrics_ref: &'a MeteredSenderMetrics) -> Self { + Self { permit, metrics_ref } + } + + /// Sends a value using the reserved capacity and updates metrics accordingly. + pub fn send(self, value: T) { + self.metrics_ref.messages_sent_total.increment(1); + self.permit.send(value); + } +} + /// A wrapper type around [Receiver](mpsc::Receiver) that updates metrics on receive. #[derive(Debug)] pub struct MeteredReceiver { @@ -252,7 +323,7 @@ impl Stream for MeteredReceiver { /// Throughput metrics for [`MeteredSender`] #[derive(Clone, Metrics)] #[metrics(dynamic = true)] -struct MeteredSenderMetrics { +pub struct MeteredSenderMetrics { /// Number of messages sent messages_sent_total: Counter, /// Number of failed message deliveries diff --git a/crates/net/discv4/src/lib.rs b/crates/net/discv4/src/lib.rs index 2379f71461e..976ade1728f 100644 --- a/crates/net/discv4/src/lib.rs +++ b/crates/net/discv4/src/lib.rs @@ -252,7 +252,12 @@ impl Discv4 { local_node_record.udp_port = local_addr.port(); trace!(target: "discv4", ?local_addr,"opened UDP socket"); - let service = Discv4Service::new(socket, local_addr, local_node_record, secret_key, config); + let mut service = + Discv4Service::new(socket, local_addr, local_node_record, secret_key, config); + + // resolve the external address immediately + service.resolve_external_ip(); + let discv4 = service.handle(); Ok((discv4, service)) } @@ -620,6 +625,15 @@ impl Discv4Service { self.lookup_interval = tokio::time::interval(duration); } + /// Sets the external Ip to the configured external IP if [`NatResolver::ExternalIp`]. + fn resolve_external_ip(&mut self) { + if let Some(r) = &self.resolve_external_ip_interval { + if let Some(external_ip) = r.resolver().as_external_ip() { + self.set_external_ip_addr(external_ip); + } + } + } + /// Sets the given ip address as the node's external IP in the node record announced in /// discovery pub fn set_external_ip_addr(&mut self, external_ip: IpAddr) { diff --git a/crates/net/eth-wire-types/src/broadcast.rs b/crates/net/eth-wire-types/src/broadcast.rs index fac61392711..c877b673c78 100644 --- a/crates/net/eth-wire-types/src/broadcast.rs +++ b/crates/net/eth-wire-types/src/broadcast.rs @@ -169,7 +169,7 @@ impl NewPooledTransactionHashes { matches!(version, EthVersion::Eth67 | EthVersion::Eth66) } Self::Eth68(_) => { - matches!(version, EthVersion::Eth68) + matches!(version, EthVersion::Eth68 | EthVersion::Eth69) } } } diff --git a/crates/net/eth-wire-types/src/message.rs b/crates/net/eth-wire-types/src/message.rs index 7c47618b5dc..0e54b86222f 100644 --- a/crates/net/eth-wire-types/src/message.rs +++ b/crates/net/eth-wire-types/src/message.rs @@ -16,7 +16,7 @@ use crate::{ status::StatusMessage, BlockRangeUpdate, EthNetworkPrimitives, EthVersion, NetworkPrimitives, RawCapabilityMessage, Receipts69, SharedTransactions, }; -use alloc::{boxed::Box, sync::Arc}; +use alloc::{boxed::Box, string::String, sync::Arc}; use alloy_primitives::{ bytes::{Buf, BufMut}, Bytes, @@ -37,6 +37,9 @@ pub enum MessageError { /// Thrown when rlp decoding a message failed. #[error("RLP error: {0}")] RlpError(#[from] alloy_rlp::Error), + /// Other message error with custom message + #[error("{0}")] + Other(String), } /// An `eth` protocol message, containing a message ID and payload. @@ -69,15 +72,9 @@ impl ProtocolMessage { StatusMessage::Eth69(StatusEth69::decode(buf)?) }), EthMessageID::NewBlockHashes => { - if version.is_eth69() { - return Err(MessageError::Invalid(version, EthMessageID::NewBlockHashes)); - } EthMessage::NewBlockHashes(NewBlockHashes::decode(buf)?) } EthMessageID::NewBlock => { - if version.is_eth69() { - return Err(MessageError::Invalid(version, EthMessageID::NewBlock)); - } EthMessage::NewBlock(Box::new(N::NewBlockPayload::decode(buf)?)) } EthMessageID::Transactions => EthMessage::Transactions(Transactions::decode(buf)?), @@ -328,6 +325,7 @@ impl EthMessage { self, Self::PooledTransactions(_) | Self::Receipts(_) | + Self::Receipts69(_) | Self::BlockHeaders(_) | Self::BlockBodies(_) | Self::NodeData(_) diff --git a/crates/net/eth-wire-types/src/primitives.rs b/crates/net/eth-wire-types/src/primitives.rs index 7fc1000339d..25f08f35efc 100644 --- a/crates/net/eth-wire-types/src/primitives.rs +++ b/crates/net/eth-wire-types/src/primitives.rs @@ -9,8 +9,23 @@ use reth_primitives_traits::{ Block, BlockBody, BlockHeader, BlockTy, NodePrimitives, SignedTransaction, }; -/// Abstraction over primitive types which might appear in network messages. See -/// [`crate::EthMessage`] for more context. +/// Abstraction over primitive types which might appear in network messages. +/// +/// This trait defines the types used in the Ethereum Wire Protocol (devp2p) for +/// peer-to-peer communication. While [`NodePrimitives`] defines the core types +/// used throughout the node (consensus format), `NetworkPrimitives` defines how +/// these types are represented when transmitted over the network. +/// +/// The key distinction is in transaction handling: +/// - [`NodePrimitives`] defines `SignedTx` - the consensus format stored in blocks +/// - `NetworkPrimitives` defines `BroadcastedTransaction` and `PooledTransaction` - the formats +/// used for network propagation with additional data like blob sidecars +/// +/// These traits work together through implementations like [`NetPrimitivesFor`], +/// which ensures type compatibility between a node's internal representation and +/// its network representation. +/// +/// See [`crate::EthMessage`] for more context. pub trait NetworkPrimitives: Send + Sync + Unpin + Clone + Debug + 'static { /// The block header type. type BlockHeader: BlockHeader + 'static; @@ -24,12 +39,20 @@ pub trait NetworkPrimitives: Send + Sync + Unpin + Clone + Debug + 'static { + Decodable + 'static; - /// The transaction type which peers announce in `Transactions` messages. It is different from - /// `PooledTransactions` to account for Ethereum case where EIP-4844 transactions are not being - /// announced and can only be explicitly requested from peers. + /// The transaction type which peers announce in `Transactions` messages. + /// + /// This is different from `PooledTransactions` to account for the Ethereum case where + /// EIP-4844 blob transactions are not announced over the network and can only be + /// explicitly requested from peers. This is because blob transactions can be quite + /// large and broadcasting them to all peers would cause + /// significant bandwidth usage. type BroadcastedTransaction: SignedTransaction + 'static; /// The transaction type which peers return in `PooledTransactions` messages. + /// + /// For EIP-4844 blob transactions, this includes the full blob sidecar with + /// KZG commitments and proofs that are needed for validation but are not + /// included in the consensus block format. type PooledTransaction: SignedTransaction + TryFrom + 'static; /// The transaction type which peers return in `GetReceipts` messages. diff --git a/crates/net/eth-wire-types/src/status.rs b/crates/net/eth-wire-types/src/status.rs index 0ef0358f77e..8f90058639c 100644 --- a/crates/net/eth-wire-types/src/status.rs +++ b/crates/net/eth-wire-types/src/status.rs @@ -111,7 +111,7 @@ impl UnifiedStatus { /// Convert this `UnifiedStatus` into the appropriate `StatusMessage` variant based on version. pub fn into_message(self) -> StatusMessage { - if self.version == EthVersion::Eth69 { + if self.version >= EthVersion::Eth69 { StatusMessage::Eth69(self.into_eth69()) } else { StatusMessage::Legacy(self.into_legacy()) diff --git a/crates/net/eth-wire-types/src/version.rs b/crates/net/eth-wire-types/src/version.rs index 172d2b1af45..7b461aec89d 100644 --- a/crates/net/eth-wire-types/src/version.rs +++ b/crates/net/eth-wire-types/src/version.rs @@ -33,6 +33,9 @@ impl EthVersion { /// The latest known eth version pub const LATEST: Self = Self::Eth68; + /// All known eth versions + pub const ALL_VERSIONS: &'static [Self] = &[Self::Eth69, Self::Eth68, Self::Eth67, Self::Eth66]; + /// Returns the total number of messages the protocol version supports. pub const fn total_messages(&self) -> u8 { match self { diff --git a/crates/net/eth-wire/src/ethstream.rs b/crates/net/eth-wire/src/ethstream.rs index 87345d80e96..415603c8c2b 100644 --- a/crates/net/eth-wire/src/ethstream.rs +++ b/crates/net/eth-wire/src/ethstream.rs @@ -64,6 +64,9 @@ where /// Consumes the [`UnauthedEthStream`] and returns an [`EthStream`] after the `Status` /// handshake is completed successfully. This also returns the `Status` message sent by the /// remote peer. + /// + /// Caution: This expects that the [`UnifiedStatus`] has the proper eth version configured, with + /// ETH69 the initial status message changed. pub async fn handshake( self, status: UnifiedStatus, diff --git a/crates/net/eth-wire/src/hello.rs b/crates/net/eth-wire/src/hello.rs index 3490f0b2e7a..49876a47fb7 100644 --- a/crates/net/eth-wire/src/hello.rs +++ b/crates/net/eth-wire/src/hello.rs @@ -206,6 +206,7 @@ impl HelloMessageBuilder { client_version: client_version.unwrap_or_else(|| RETH_CLIENT_VERSION.to_string()), protocols: protocols.unwrap_or_else(|| { vec![EthVersion::Eth68.into(), EthVersion::Eth67.into(), EthVersion::Eth66.into()] + // TODO: enable: EthVersion::ALL_VERSIONS.iter().copied().map(Into::into).collect() }), port: port.unwrap_or(DEFAULT_TCP_PORT), id, diff --git a/crates/net/nat/src/lib.rs b/crates/net/nat/src/lib.rs index 3bdb3afc902..c7466b44012 100644 --- a/crates/net/nat/src/lib.rs +++ b/crates/net/nat/src/lib.rs @@ -161,6 +161,11 @@ impl ResolveNatInterval { Self::with_interval(resolver, interval) } + /// Returns the resolver used by this interval + pub const fn resolver(&self) -> &NatResolver { + &self.resolver + } + /// Completes when the next [`IpAddr`] in the interval has been reached. pub async fn tick(&mut self) -> Option { poll_fn(|cx| self.poll_tick(cx)).await @@ -230,7 +235,8 @@ async fn resolve_external_ip_url_res(url: &str) -> Result { } async fn resolve_external_ip_url(url: &str) -> Option { - let response = reqwest::get(url).await.ok()?; + let client = reqwest::Client::builder().timeout(Duration::from_secs(10)).build().ok()?; + let response = client.get(url).send().await.ok()?; let response = response.error_for_status().ok()?; let text = response.text().await.ok()?; text.trim().parse().ok() diff --git a/crates/net/network-api/src/events.rs b/crates/net/network-api/src/events.rs index 642dd50f814..8a5c7541490 100644 --- a/crates/net/network-api/src/events.rs +++ b/crates/net/network-api/src/events.rs @@ -4,7 +4,7 @@ use reth_eth_wire_types::{ message::RequestPair, BlockBodies, BlockHeaders, Capabilities, DisconnectReason, EthMessage, EthNetworkPrimitives, EthVersion, GetBlockBodies, GetBlockHeaders, GetNodeData, GetPooledTransactions, GetReceipts, NetworkPrimitives, NodeData, PooledTransactions, Receipts, - UnifiedStatus, + Receipts69, UnifiedStatus, }; use reth_ethereum_forks::ForkId; use reth_network_p2p::error::{RequestError, RequestResult}; @@ -229,6 +229,15 @@ pub enum PeerRequest { /// The channel to send the response for receipts. response: oneshot::Sender>>, }, + /// Requests receipts from the peer without bloom filter. + /// + /// The response should be sent through the channel. + GetReceipts69 { + /// The request for receipts. + request: GetReceipts, + /// The channel to send the response for receipts. + response: oneshot::Sender>>, + }, } // === impl PeerRequest === @@ -247,6 +256,7 @@ impl PeerRequest { Self::GetPooledTransactions { response, .. } => response.send(Err(err)).ok(), Self::GetNodeData { response, .. } => response.send(Err(err)).ok(), Self::GetReceipts { response, .. } => response.send(Err(err)).ok(), + Self::GetReceipts69 { response, .. } => response.send(Err(err)).ok(), }; } @@ -268,7 +278,7 @@ impl PeerRequest { Self::GetNodeData { request, .. } => { EthMessage::GetNodeData(RequestPair { request_id, message: request.clone() }) } - Self::GetReceipts { request, .. } => { + Self::GetReceipts { request, .. } | Self::GetReceipts69 { request, .. } => { EthMessage::GetReceipts(RequestPair { request_id, message: request.clone() }) } } diff --git a/crates/net/network-types/src/peers/mod.rs b/crates/net/network-types/src/peers/mod.rs index 2f0bd6141b8..f3529875018 100644 --- a/crates/net/network-types/src/peers/mod.rs +++ b/crates/net/network-types/src/peers/mod.rs @@ -83,12 +83,16 @@ impl Peer { } /// Applies a reputation change to the peer and returns what action should be taken. - pub fn apply_reputation(&mut self, reputation: i32) -> ReputationChangeOutcome { + pub fn apply_reputation( + &mut self, + reputation: i32, + kind: ReputationChangeKind, + ) -> ReputationChangeOutcome { let previous = self.reputation; // we add reputation since negative reputation change decrease total reputation self.reputation = previous.saturating_add(reputation); - trace!(target: "net::peers", reputation=%self.reputation, banned=%self.is_banned(), "applied reputation change"); + trace!(target: "net::peers", reputation=%self.reputation, banned=%self.is_banned(), ?kind, "applied reputation change"); if self.state.is_connected() && self.is_banned() { self.state.disconnect(); diff --git a/crates/net/network/src/eth_requests.rs b/crates/net/network/src/eth_requests.rs index 408937e4533..39e485318bb 100644 --- a/crates/net/network/src/eth_requests.rs +++ b/crates/net/network/src/eth_requests.rs @@ -10,7 +10,7 @@ use alloy_rlp::Encodable; use futures::StreamExt; use reth_eth_wire::{ BlockBodies, BlockHeaders, EthNetworkPrimitives, GetBlockBodies, GetBlockHeaders, GetNodeData, - GetReceipts, HeadersDirection, NetworkPrimitives, NodeData, Receipts, + GetReceipts, HeadersDirection, NetworkPrimitives, NodeData, Receipts, Receipts69, }; use reth_network_api::test_utils::PeersHandle; use reth_network_p2p::error::RequestResult; @@ -190,19 +190,45 @@ where ) { self.metrics.eth_receipts_requests_received_total.increment(1); - let mut receipts = Vec::new(); + let receipts = self.get_receipts_response(request, |receipts_by_block| { + receipts_by_block.into_iter().map(ReceiptWithBloom::from).collect::>() + }); + + let _ = response.send(Ok(Receipts(receipts))); + } + + fn on_receipts69_request( + &self, + _peer_id: PeerId, + request: GetReceipts, + response: oneshot::Sender>>, + ) { + self.metrics.eth_receipts_requests_received_total.increment(1); + + let receipts = self.get_receipts_response(request, |receipts_by_block| { + // skip bloom filter for eth69 + receipts_by_block + }); + + let _ = response.send(Ok(Receipts69(receipts))); + } + #[inline] + fn get_receipts_response(&self, request: GetReceipts, transform_fn: F) -> Vec> + where + F: Fn(Vec) -> Vec, + T: Encodable, + { + let mut receipts = Vec::new(); let mut total_bytes = 0; for hash in request.0 { if let Some(receipts_by_block) = self.client.receipts_by_block(BlockHashOrNumber::Hash(hash)).unwrap_or_default() { - let receipt = - receipts_by_block.into_iter().map(ReceiptWithBloom::from).collect::>(); - - total_bytes += receipt.length(); - receipts.push(receipt); + let transformed_receipts = transform_fn(receipts_by_block); + total_bytes += transformed_receipts.length(); + receipts.push(transformed_receipts); if receipts.len() >= MAX_RECEIPTS_SERVE || total_bytes > SOFT_RESPONSE_LIMIT { break @@ -212,7 +238,7 @@ where } } - let _ = response.send(Ok(Receipts(receipts))); + receipts } } @@ -252,6 +278,9 @@ where IncomingEthRequest::GetReceipts { peer_id, request, response } => { this.on_receipts_request(peer_id, request, response) } + IncomingEthRequest::GetReceipts69 { peer_id, request, response } => { + this.on_receipts69_request(peer_id, request, response) + } } }, ); @@ -315,4 +344,15 @@ pub enum IncomingEthRequest { /// The channel sender for the response containing receipts. response: oneshot::Sender>>, }, + /// Request Receipts from the peer without bloom filter. + /// + /// The response should be sent through the channel. + GetReceipts69 { + /// The ID of the peer to request receipts from. + peer_id: PeerId, + /// The specific receipts requested. + request: GetReceipts, + /// The channel sender for the response containing Receipts69. + response: oneshot::Sender>>, + }, } diff --git a/crates/net/network/src/lib.rs b/crates/net/network/src/lib.rs index 1d8e4d15be3..ee7d503af09 100644 --- a/crates/net/network/src/lib.rs +++ b/crates/net/network/src/lib.rs @@ -173,8 +173,11 @@ pub use swarm::NetworkConnectionState; /// re-export p2p interfaces pub use reth_network_p2p as p2p; -/// re-export types crate -pub use reth_eth_wire_types as types; +/// re-export types crates +pub mod types { + pub use reth_eth_wire_types::*; + pub use reth_network_types::*; +} use aquamarine as _; diff --git a/crates/net/network/src/manager.rs b/crates/net/network/src/manager.rs index d2ce957614e..c4dbbf75d15 100644 --- a/crates/net/network/src/manager.rs +++ b/crates/net/network/src/manager.rs @@ -516,6 +516,13 @@ impl NetworkManager { response, }) } + PeerRequest::GetReceipts69 { request, response } => { + self.delegate_eth_request(IncomingEthRequest::GetReceipts69 { + peer_id, + request, + response, + }) + } PeerRequest::GetPooledTransactions { request, response } => { self.notify_tx_manager(NetworkTransactionEvent::GetPooledTransactions { peer_id, diff --git a/crates/net/network/src/message.rs b/crates/net/network/src/message.rs index f1dd603fd22..7b489d2ffac 100644 --- a/crates/net/network/src/message.rs +++ b/crates/net/network/src/message.rs @@ -107,6 +107,11 @@ pub enum PeerResponse { /// The receiver channel for the response to a receipts request. response: oneshot::Receiver>>, }, + /// Represents a response to a request for receipts. + Receipts69 { + /// The receiver channel for the response to a receipts request. + response: oneshot::Receiver>>, + }, } // === impl PeerResponse === @@ -139,6 +144,9 @@ impl PeerResponse { Self::Receipts { response } => { poll_request!(response, Receipts, cx) } + Self::Receipts69 { response } => { + poll_request!(response, Receipts69, cx) + } }; Poll::Ready(res) } diff --git a/crates/net/network/src/peers.rs b/crates/net/network/src/peers.rs index bb69d1adc76..c0694023ceb 100644 --- a/crates/net/network/src/peers.rs +++ b/crates/net/network/src/peers.rs @@ -480,7 +480,7 @@ impl PeersManager { reputation_change = MAX_TRUSTED_PEER_REPUTATION_CHANGE; } } - peer.apply_reputation(reputation_change) + peer.apply_reputation(reputation_change, rep) } } else { return diff --git a/crates/net/network/src/session/active.rs b/crates/net/network/src/session/active.rs index 19f57f0f249..827c4bfb190 100644 --- a/crates/net/network/src/session/active.rs +++ b/crates/net/network/src/session/active.rs @@ -16,7 +16,7 @@ use crate::{ session::{ conn::EthRlpxConnection, handle::{ActiveSessionMessage, SessionCommand}, - BlockRangeInfo, SessionId, + BlockRangeInfo, EthVersion, SessionId, }, }; use alloy_primitives::Sealable; @@ -24,7 +24,7 @@ use futures::{stream::Fuse, SinkExt, StreamExt}; use metrics::Gauge; use reth_eth_wire::{ errors::{EthHandshakeError, EthStreamError}, - message::{EthBroadcastMessage, RequestPair}, + message::{EthBroadcastMessage, MessageError, RequestPair}, Capabilities, DisconnectP2P, DisconnectReason, EthMessage, NetworkPrimitives, NewBlockPayload, }; use reth_eth_wire_types::RawCapabilityMessage; @@ -43,10 +43,16 @@ use tokio_stream::wrappers::ReceiverStream; use tokio_util::sync::PollSender; use tracing::{debug, trace}; +/// The recommended interval at which a new range update should be sent to the remote peer. +/// +/// This is set to 120 seconds (2 minutes) as per the Ethereum specification for eth69. +pub(super) const RANGE_UPDATE_INTERVAL: Duration = Duration::from_secs(120); + // Constants for timeout updating. /// Minimum timeout value const MINIMUM_TIMEOUT: Duration = Duration::from_secs(2); + /// Maximum timeout value const MAXIMUM_TIMEOUT: Duration = INITIAL_REQUEST_TIMEOUT; /// How much the new measurements affect the current timeout (X percent) @@ -116,6 +122,12 @@ pub(crate) struct ActiveSession { Option<(PollSender>, ActiveSessionMessage)>, /// The eth69 range info for the remote peer. pub(crate) range_info: Option, + /// The eth69 range info for the local node (this node). + /// This represents the range of blocks that this node can serve to other peers. + pub(crate) local_range_info: BlockRangeInfo, + /// Optional interval for sending periodic range updates to the remote peer (eth69+) + /// Recommended frequency is ~2 minutes per spec + pub(crate) range_update_interval: Option, } impl ActiveSession { @@ -174,6 +186,7 @@ impl ActiveSession { if let Some(req) = self.inflight_requests.remove(&request_id) { match req.request { RequestState::Waiting(PeerRequest::$item { response, .. }) => { + trace!(peer_id=?self.remote_peer_id, ?request_id, "received response from peer"); let _ = response.send(Ok(message)); self.update_request_timeout(req.timestamp, Instant::now()); } @@ -186,6 +199,7 @@ impl ActiveSession { } } } else { + trace!(peer_id=?self.remote_peer_id, ?request_id, "received response to unknown request"); // we received a response to a request we never sent self.on_bad_message(); } @@ -253,7 +267,11 @@ impl ActiveSession { on_response!(resp, GetNodeData) } EthMessage::GetReceipts(req) => { - on_request!(req, Receipts, GetReceipts) + if self.conn.version() >= EthVersion::Eth69 { + on_request!(req, Receipts69, GetReceipts69) + } else { + on_request!(req, Receipts, GetReceipts) + } } EthMessage::Receipts(resp) => { on_response!(resp, GetReceipts) @@ -264,6 +282,17 @@ impl ActiveSession { on_response!(resp, GetReceipts) } EthMessage::BlockRangeUpdate(msg) => { + // Validate that earliest <= latest according to the spec + if msg.earliest > msg.latest { + return OnIncomingMessageOutcome::BadMessage { + error: EthStreamError::InvalidMessage(MessageError::Other(format!( + "invalid block range: earliest ({}) > latest ({})", + msg.earliest, msg.latest + ))), + message: EthMessage::BlockRangeUpdate(msg), + }; + } + if let Some(range_info) = self.range_info.as_ref() { range_info.update(msg.earliest, msg.latest, msg.latest_hash); } @@ -277,6 +306,8 @@ impl ActiveSession { /// Handle an internal peer request that will be sent to the remote. fn on_internal_peer_request(&mut self, request: PeerRequest, deadline: Instant) { let request_id = self.next_id(); + + trace!(?request, peer_id=?self.remote_peer_id, ?request_id, "sending request to peer"); let msg = request.create_request_message(request_id); self.queued_outgoing.push_back(msg.into()); let req = InflightRequest { @@ -299,6 +330,8 @@ impl ActiveSession { PeerMessage::PooledTransactions(msg) => { if msg.is_valid_for_version(self.conn.version()) { self.queued_outgoing.push_back(EthMessage::from(msg).into()); + } else { + debug!(target: "net", ?msg, version=?self.conn.version(), "Message is invalid for connection version, skipping"); } } PeerMessage::EthRequest(req) => { @@ -696,6 +729,15 @@ impl Future for ActiveSession { } } + if let Some(interval) = &mut this.range_update_interval { + // queue in new range updates if the interval is ready + while interval.poll_tick(cx).is_ready() { + this.queued_outgoing.push_back( + EthMessage::BlockRangeUpdate(this.local_range_info.to_message()).into(), + ); + } + } + while this.internal_request_timeout_interval.poll_tick(cx).is_ready() { // check for timed out requests if this.check_timed_out_requests(Instant::now()) { @@ -788,6 +830,7 @@ enum RequestState { } /// Outgoing messages that can be sent over the wire. +#[derive(Debug)] pub(crate) enum OutgoingMessage { /// A message that is owned. Eth(EthMessage), @@ -911,7 +954,7 @@ mod tests { F: FnOnce(EthStream>, N>) -> O + Send + 'static, O: Future + Send + Sync, { - let status = self.status; + let mut status = self.status; let fork_filter = self.fork_filter.clone(); let local_peer_id = self.local_peer_id; let mut hello = self.hello.clone(); @@ -923,6 +966,9 @@ mod tests { let (p2p_stream, _) = UnauthedP2PStream::new(sink).handshake(hello).await.unwrap(); + let eth_version = p2p_stream.shared_capabilities().eth_version().unwrap(); + status.set_eth_version(eth_version); + let (client_stream, _) = UnauthedEthStream::new(p2p_stream) .handshake(status, fork_filter) .await @@ -994,6 +1040,12 @@ mod tests { protocol_breach_request_timeout: PROTOCOL_BREACH_REQUEST_TIMEOUT, terminate_message: None, range_info: None, + local_range_info: BlockRangeInfo::new( + 0, + 1000, + alloy_primitives::B256::ZERO, + ), + range_update_interval: None, } } ev => { diff --git a/crates/net/network/src/session/mod.rs b/crates/net/network/src/session/mod.rs index 1b73b87f8fd..5aad90cbb6f 100644 --- a/crates/net/network/src/session/mod.rs +++ b/crates/net/network/src/session/mod.rs @@ -48,6 +48,7 @@ use tokio_stream::wrappers::ReceiverStream; use tokio_util::sync::PollSender; use tracing::{debug, instrument, trace}; +use crate::session::active::RANGE_UPDATE_INTERVAL; pub use conn::EthRlpxConnection; pub use handle::{ ActiveSessionHandle, ActiveSessionMessage, PendingSessionEvent, PendingSessionHandle, @@ -116,6 +117,9 @@ pub struct SessionManager { metrics: SessionManagerMetrics, /// The [`EthRlpxHandshake`] is used to perform the initial handshake with the peer. handshake: Arc, + /// Shared local range information that gets propagated to active sessions. + /// This represents the range of blocks that this node can serve to other peers. + local_range_info: BlockRangeInfo, } // === impl SessionManager === @@ -137,6 +141,13 @@ impl SessionManager { let (active_session_tx, active_session_rx) = mpsc::channel(config.session_event_buffer); let active_session_tx = PollSender::new(active_session_tx); + // Initialize local range info from the status + let local_range_info = BlockRangeInfo::new( + status.earliest_block.unwrap_or_default(), + status.latest_block.unwrap_or_default(), + status.blockhash, + ); + Self { next_id: 0, counter: SessionCounter::new(config.limits), @@ -159,6 +170,7 @@ impl SessionManager { disconnections_counter: Default::default(), metrics: Default::default(), handshake, + local_range_info, } } @@ -521,6 +533,14 @@ impl SessionManager { // negotiated version let version = conn.version(); + // Configure the interval at which the range information is updated, starting with + // ETH69 + let range_update_interval = (conn.version() >= EthVersion::Eth69).then(|| { + let mut interval = tokio::time::interval(RANGE_UPDATE_INTERVAL); + interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay); + interval + }); + let session = ActiveSession { next_id: 0, remote_peer_id: peer_id, @@ -544,6 +564,8 @@ impl SessionManager { protocol_breach_request_timeout: self.protocol_breach_request_timeout, terminate_message: None, range_info: None, + local_range_info: self.local_range_info.clone(), + range_update_interval, }; self.spawn(session); @@ -653,13 +675,24 @@ impl SessionManager { } } - pub(crate) const fn update_advertised_block_range( - &mut self, - block_range_update: BlockRangeUpdate, - ) { + /// Updates the advertised block range that this node can serve to other peers starting with + /// Eth69. + /// + /// This method updates both the local status message that gets sent to peers during handshake + /// and the shared local range information that gets propagated to active sessions (Eth69). + /// The range information is used in ETH69 protocol where peers announce the range of blocks + /// they can serve to optimize data synchronization. + pub(crate) fn update_advertised_block_range(&mut self, block_range_update: BlockRangeUpdate) { self.status.earliest_block = Some(block_range_update.earliest); self.status.latest_block = Some(block_range_update.latest); self.status.blockhash = block_range_update.latest_hash; + + // Update the shared local range info that gets propagated to active sessions + self.local_range_info.update( + block_range_update.earliest, + block_range_update.latest, + block_range_update.latest_hash, + ); } } @@ -1071,12 +1104,12 @@ async fn authenticate_stream( } }; + // Before trying status handshake, set up the version to negotiated shared version + status.set_eth_version(eth_version); + let (conn, their_status) = if p2p_stream.shared_capabilities().len() == 1 { // if the shared caps are 1, we know both support the eth version // if the hello handshake was successful we can try status handshake - // - // Before trying status handshake, set up the version to negotiated shared version - status.set_eth_version(eth_version); // perform the eth protocol handshake match handshake diff --git a/crates/net/network/src/session/types.rs b/crates/net/network/src/session/types.rs index c8cd98c3cbc..b73bfe3b992 100644 --- a/crates/net/network/src/session/types.rs +++ b/crates/net/network/src/session/types.rs @@ -2,6 +2,7 @@ use alloy_primitives::B256; use parking_lot::RwLock; +use reth_eth_wire::BlockRangeUpdate; use std::{ ops::RangeInclusive, sync::{ @@ -13,7 +14,7 @@ use std::{ /// Information about the range of blocks available from a peer. /// /// This represents the announced `eth69` -/// [`BlockRangeUpdate`](reth_eth_wire_types::BlockRangeUpdate) of a peer. +/// [`BlockRangeUpdate`] of a peer. #[derive(Debug, Clone)] pub struct BlockRangeInfo { /// The inner range information. @@ -65,6 +66,15 @@ impl BlockRangeInfo { self.inner.latest.store(latest, Ordering::Relaxed); *self.inner.latest_hash.write() = latest_hash; } + + /// Converts the current range information to an Eth69 [`BlockRangeUpdate`] message. + pub fn to_message(&self) -> BlockRangeUpdate { + BlockRangeUpdate { + earliest: self.earliest(), + latest: self.latest(), + latest_hash: self.latest_hash(), + } + } } /// Inner structure containing the range information with atomic and thread-safe fields. diff --git a/crates/net/network/src/transactions/config.rs b/crates/net/network/src/transactions/config.rs index 85ea9e23589..910e35d37be 100644 --- a/crates/net/network/src/transactions/config.rs +++ b/crates/net/network/src/transactions/config.rs @@ -120,7 +120,7 @@ pub trait TransactionPropagationPolicy: Send + Sync + Unpin + 'static { pub enum TransactionPropagationKind { /// Propagate transactions to all peers. /// - /// No restructions + /// No restrictions #[default] All, /// Propagate transactions to only trusted peers. @@ -168,8 +168,8 @@ pub enum AnnouncementAcceptance { }, } -/// A policy that defines how to handle incoming transaction annoucements, -/// particularly concerning transaction types and other annoucement metadata. +/// A policy that defines how to handle incoming transaction announcements, +/// particularly concerning transaction types and other announcement metadata. pub trait AnnouncementFilteringPolicy: Send + Sync + Unpin + 'static { /// Decides how to handle a transaction announcement based on its type, hash, and size. fn decide_on_announcement(&self, ty: u8, hash: &B256, size: usize) -> AnnouncementAcceptance; diff --git a/crates/net/network/src/transactions/constants.rs b/crates/net/network/src/transactions/constants.rs index 4213c171e05..905c5931e9e 100644 --- a/crates/net/network/src/transactions/constants.rs +++ b/crates/net/network/src/transactions/constants.rs @@ -57,7 +57,6 @@ pub mod tx_manager { /// Constants used by [`TransactionFetcher`](super::TransactionFetcher). pub mod tx_fetcher { - use crate::transactions::fetcher::TransactionFetcherInfo; use reth_network_types::peers::config::{ DEFAULT_MAX_COUNT_PEERS_INBOUND, DEFAULT_MAX_COUNT_PEERS_OUTBOUND, }; @@ -202,14 +201,16 @@ pub mod tx_fetcher { /// Default divisor of the max inflight request when calculating search breadth of the search /// for any idle peer to which to send a request filled with hashes pending fetch. The max - /// inflight requests is configured in [`TransactionFetcherInfo`]. + /// inflight requests is configured in + /// [`TransactionFetcherInfo`](crate::transactions::fetcher::TransactionFetcherInfo). /// /// Default is 3 requests. pub const DEFAULT_DIVISOR_MAX_COUNT_INFLIGHT_REQUESTS_ON_FIND_IDLE_PEER: usize = 3; /// Default divisor of the max inflight request when calculating search breadth of the search /// for the intersection of hashes announced by a peer and hashes pending fetch. The max - /// inflight requests is configured in [`TransactionFetcherInfo`]. + /// inflight requests is configured in + /// [`TransactionFetcherInfo`](crate::transactions::fetcher::TransactionFetcherInfo). /// /// Default is 3 requests. pub const DEFAULT_DIVISOR_MAX_COUNT_INFLIGHT_REQUESTS_ON_FIND_INTERSECTION: usize = 3; @@ -256,26 +257,4 @@ pub mod tx_fetcher { /// /// Default is 8 hashes. pub const DEFAULT_MARGINAL_COUNT_HASHES_GET_POOLED_TRANSACTIONS_REQUEST: usize = 8; - - /// Returns the approx number of transaction hashes that a - /// [`GetPooledTransactions`](reth_eth_wire::GetPooledTransactions) request will have capacity - /// for w.r.t. the [`Eth68`](reth_eth_wire::EthVersion::Eth68) protocol version. This is useful - /// for preallocating memory. - pub const fn approx_capacity_get_pooled_transactions_req_eth68( - info: &TransactionFetcherInfo, - ) -> usize { - let max_size_expected_response = - info.soft_limit_byte_size_pooled_transactions_response_on_pack_request; - - max_size_expected_response / MEDIAN_BYTE_SIZE_SMALL_LEGACY_TX_ENCODED + - DEFAULT_MARGINAL_COUNT_HASHES_GET_POOLED_TRANSACTIONS_REQUEST - } - - /// Returns the approx number of transactions that a - /// [`GetPooledTransactions`](reth_eth_wire::GetPooledTransactions) request will - /// have capacity for w.r.t. the [`Eth66`](reth_eth_wire::EthVersion::Eth66) protocol version. - /// This is useful for preallocating memory. - pub const fn approx_capacity_get_pooled_transactions_req_eth66() -> usize { - SOFT_LIMIT_COUNT_HASHES_IN_GET_POOLED_TRANSACTIONS_REQUEST - } } diff --git a/crates/net/network/src/transactions/fetcher.rs b/crates/net/network/src/transactions/fetcher.rs index c1fdf0e1064..2656840128c 100644 --- a/crates/net/network/src/transactions/fetcher.rs +++ b/crates/net/network/src/transactions/fetcher.rs @@ -41,7 +41,7 @@ use derive_more::{Constructor, Deref}; use futures::{stream::FuturesUnordered, Future, FutureExt, Stream, StreamExt}; use pin_project::pin_project; use reth_eth_wire::{ - DedupPayload, EthVersion, GetPooledTransactions, HandleMempoolData, HandleVersionedMempoolData, + DedupPayload, GetPooledTransactions, HandleMempoolData, HandleVersionedMempoolData, PartiallyValidData, RequestTxHashes, ValidAnnouncementData, }; use reth_eth_wire_types::{EthNetworkPrimitives, NetworkPrimitives}; @@ -840,19 +840,6 @@ impl TransactionFetcher { } } - /// Returns the approx number of transactions that a [`GetPooledTransactions`] request will - /// have capacity for w.r.t. the given version of the protocol. - pub const fn approx_capacity_get_pooled_transactions_req( - &self, - announcement_version: EthVersion, - ) -> usize { - if announcement_version.is_eth68() { - approx_capacity_get_pooled_transactions_req_eth68(&self.info) - } else { - approx_capacity_get_pooled_transactions_req_eth66() - } - } - /// Processes a resolved [`GetPooledTransactions`] request. Queues the outcome as a /// [`FetchEvent`], which will then be streamed by /// [`TransactionsManager`](super::TransactionsManager). @@ -895,15 +882,19 @@ impl TransactionFetcher { if unsolicited > 0 { self.metrics.unsolicited_transactions.increment(unsolicited as u64); } - if verification_outcome == VerificationOutcome::ReportPeer { - // todo: report peer for sending hashes that weren't requested + + let report_peer = if verification_outcome == VerificationOutcome::ReportPeer { trace!(target: "net::tx", peer_id=format!("{peer_id:#}"), unverified_len, verified_payload_len=verified_payload.len(), "received `PooledTransactions` response from peer with entries that didn't verify against request, filtered out transactions" ); - } + true + } else { + false + }; + // peer has only sent hashes that we didn't request if verified_payload.is_empty() { return FetchEvent::FetchError { peer_id, error: RequestError::BadResponse } @@ -965,7 +956,7 @@ impl TransactionFetcher { let transactions = valid_payload.into_data().into_values().collect(); - FetchEvent::TransactionsFetched { peer_id, transactions } + FetchEvent::TransactionsFetched { peer_id, transactions, report_peer } } Ok(Err(req_err)) => { self.try_buffer_hashes_for_retry(requested_hashes, &peer_id); @@ -1052,6 +1043,9 @@ pub enum FetchEvent { peer_id: PeerId, /// The transactions that were fetched, if available. transactions: PooledTransactions, + /// Whether the peer should be penalized for sending unsolicited transactions or for + /// misbehavior. + report_peer: bool, }, /// Triggered when there is an error in fetching transactions. FetchError { @@ -1298,6 +1292,7 @@ mod test { use alloy_primitives::{hex, B256}; use alloy_rlp::Decodable; use derive_more::IntoIterator; + use reth_eth_wire_types::EthVersion; use reth_ethereum_primitives::TransactionSigned; use std::{collections::HashSet, str::FromStr}; diff --git a/crates/net/network/src/transactions/mod.rs b/crates/net/network/src/transactions/mod.rs index 0fdee4a915f..18233700e25 100644 --- a/crates/net/network/src/transactions/mod.rs +++ b/crates/net/network/src/transactions/mod.rs @@ -242,6 +242,40 @@ impl TransactionsHandle { /// /// It can be configured with different policies for transaction propagation and announcement /// filtering. See [`NetworkPolicies`] and [`TransactionPolicies`] for more details. +/// +/// ## Network Transaction Processing +/// +/// ### Message Types +/// +/// - **`Transactions`**: Full transaction broadcasts (rejects blob transactions) +/// - **`NewPooledTransactionHashes`**: Hash announcements +/// +/// ### Peer Tracking +/// +/// - Maintains per-peer transaction cache (default: 10,240 entries) +/// - Prevents duplicate imports and enables efficient propagation +/// +/// ### Bad Transaction Handling +/// +/// Caches and rejects transactions with consensus violations (gas, signature, chain ID). +/// Penalizes peers sending invalid transactions. +/// +/// ### Import Management +/// +/// Limits concurrent pool imports and backs off when approaching capacity. +/// +/// ### Transaction Fetching +/// +/// For announced transactions: filters known → queues unknown → fetches → imports +/// +/// ### Propagation Rules +/// +/// Based on: origin (Local/External/Private), peer capabilities, and network state. +/// Disabled during initial sync. +/// +/// ### Security +/// +/// Rate limiting via reputation, bad transaction isolation, peer scoring. #[derive(Debug)] #[must_use = "Manager does nothing unless polled."] pub struct TransactionsManager< @@ -1132,7 +1166,8 @@ where } TransactionsCommand::PropagateTransactions(txs) => self.propagate_all(txs), TransactionsCommand::BroadcastTransactions(txs) => { - self.propagate_transactions(txs, PropagationMode::Forced); + let propagated = self.propagate_transactions(txs, PropagationMode::Forced); + self.pool.on_propagated(propagated); } TransactionsCommand::GetTransactionHashes { peers, tx } => { let mut res = HashMap::with_capacity(peers.len()); @@ -1323,91 +1358,89 @@ where // tracks the quality of the given transactions let mut has_bad_transactions = false; - // 2. filter out transactions that are invalid or already pending import - if let Some(peer) = self.peers.get_mut(&peer_id) { - // pre-size to avoid reallocations - let mut new_txs = Vec::with_capacity(transactions.len()); - for tx in transactions { - // recover transaction - let tx = match tx.try_into_recovered() { - Ok(tx) => tx, - Err(badtx) => { + // 2. filter out transactions that are invalid or already pending import pre-size to avoid + // reallocations + let mut new_txs = Vec::with_capacity(transactions.len()); + for tx in transactions { + // recover transaction + let tx = match tx.try_into_recovered() { + Ok(tx) => tx, + Err(badtx) => { + trace!(target: "net::tx", + peer_id=format!("{peer_id:#}"), + hash=%badtx.tx_hash(), + client_version=%peer.client_version, + "failed ecrecovery for transaction" + ); + has_bad_transactions = true; + continue + } + }; + + match self.transactions_by_peers.entry(*tx.tx_hash()) { + Entry::Occupied(mut entry) => { + // transaction was already inserted + entry.get_mut().insert(peer_id); + } + Entry::Vacant(entry) => { + if self.bad_imports.contains(tx.tx_hash()) { trace!(target: "net::tx", peer_id=format!("{peer_id:#}"), - hash=%badtx.tx_hash(), + hash=%tx.tx_hash(), client_version=%peer.client_version, - "failed ecrecovery for transaction" + "received a known bad transaction from peer" ); has_bad_transactions = true; - continue - } - }; + } else { + // this is a new transaction that should be imported into the pool - match self.transactions_by_peers.entry(*tx.tx_hash()) { - Entry::Occupied(mut entry) => { - // transaction was already inserted - entry.get_mut().insert(peer_id); - } - Entry::Vacant(entry) => { - if self.bad_imports.contains(tx.tx_hash()) { - trace!(target: "net::tx", - peer_id=format!("{peer_id:#}"), - hash=%tx.tx_hash(), - client_version=%peer.client_version, - "received a known bad transaction from peer" - ); - has_bad_transactions = true; - } else { - // this is a new transaction that should be imported into the pool - - let pool_transaction = Pool::Transaction::from_pooled(tx); - new_txs.push(pool_transaction); - - entry.insert(HashSet::from([peer_id])); - } + let pool_transaction = Pool::Transaction::from_pooled(tx); + new_txs.push(pool_transaction); + + entry.insert(HashSet::from([peer_id])); } } } - new_txs.shrink_to_fit(); + } + new_txs.shrink_to_fit(); - // 3. import new transactions as a batch to minimize lock contention on the underlying - // pool - if !new_txs.is_empty() { - let pool = self.pool.clone(); - // update metrics - let metric_pending_pool_imports = self.metrics.pending_pool_imports.clone(); - metric_pending_pool_imports.increment(new_txs.len() as f64); + // 3. import new transactions as a batch to minimize lock contention on the underlying + // pool + if !new_txs.is_empty() { + let pool = self.pool.clone(); + // update metrics + let metric_pending_pool_imports = self.metrics.pending_pool_imports.clone(); + metric_pending_pool_imports.increment(new_txs.len() as f64); + + // update self-monitoring info + self.pending_pool_imports_info + .pending_pool_imports + .fetch_add(new_txs.len(), Ordering::Relaxed); + let tx_manager_info_pending_pool_imports = + self.pending_pool_imports_info.pending_pool_imports.clone(); + + trace!(target: "net::tx::propagation", new_txs_len=?new_txs.len(), "Importing new transactions"); + let import = Box::pin(async move { + let added = new_txs.len(); + let res = pool.add_external_transactions(new_txs).await; + // update metrics + metric_pending_pool_imports.decrement(added as f64); // update self-monitoring info - self.pending_pool_imports_info - .pending_pool_imports - .fetch_add(new_txs.len(), Ordering::Relaxed); - let tx_manager_info_pending_pool_imports = - self.pending_pool_imports_info.pending_pool_imports.clone(); - - trace!(target: "net::tx::propagation", new_txs_len=?new_txs.len(), "Importing new transactions"); - let import = Box::pin(async move { - let added = new_txs.len(); - let res = pool.add_external_transactions(new_txs).await; - - // update metrics - metric_pending_pool_imports.decrement(added as f64); - // update self-monitoring info - tx_manager_info_pending_pool_imports.fetch_sub(added, Ordering::Relaxed); - - res - }); - - self.pool_imports.push(import); - } + tx_manager_info_pending_pool_imports.fetch_sub(added, Ordering::Relaxed); - if num_already_seen_by_peer > 0 { - self.metrics.messages_with_transactions_already_seen_by_peer.increment(1); - self.metrics - .occurrences_of_transaction_already_seen_by_peer - .increment(num_already_seen_by_peer); - trace!(target: "net::tx", num_txs=%num_already_seen_by_peer, ?peer_id, client=?peer.client_version, "Peer sent already seen transactions"); - } + res + }); + + self.pool_imports.push(import); + } + + if num_already_seen_by_peer > 0 { + self.metrics.messages_with_transactions_already_seen_by_peer.increment(1); + self.metrics + .occurrences_of_transaction_already_seen_by_peer + .increment(num_already_seen_by_peer); + trace!(target: "net::tx", num_txs=%num_already_seen_by_peer, ?peer_id, client=?peer.client_version, "Peer sent already seen transactions"); } if has_bad_transactions { @@ -1423,8 +1456,11 @@ where /// Processes a [`FetchEvent`]. fn on_fetch_event(&mut self, fetch_event: FetchEvent) { match fetch_event { - FetchEvent::TransactionsFetched { peer_id, transactions } => { + FetchEvent::TransactionsFetched { peer_id, transactions, report_peer } => { self.import_transactions(peer_id, transactions, TransactionSource::Response); + if report_peer { + self.report_peer(peer_id, ReputationChangeKind::BadTransactions); + } } FetchEvent::FetchError { peer_id, error } => { trace!(target: "net::tx", ?peer_id, %error, "requesting transactions from peer failed"); diff --git a/crates/net/network/tests/it/connect.rs b/crates/net/network/tests/it/connect.rs index ab6ddac7345..f4c4aa159b3 100644 --- a/crates/net/network/tests/it/connect.rs +++ b/crates/net/network/tests/it/connect.rs @@ -19,7 +19,9 @@ use reth_network_p2p::{ sync::{NetworkSyncUpdater, SyncState}, }; use reth_network_peers::{mainnet_nodes, NodeRecord, TrustedPeer}; +use reth_network_types::peers::config::PeerBackoffDurations; use reth_storage_api::noop::NoopProvider; +use reth_tracing::init_test_tracing; use reth_transaction_pool::test_utils::testing_pool; use secp256k1::SecretKey; use std::time::Duration; @@ -359,16 +361,28 @@ async fn test_shutdown() { #[tokio::test(flavor = "multi_thread")] async fn test_trusted_peer_only() { + init_test_tracing(); let net = Testnet::create(2).await; let mut handles = net.handles(); + + // handle0 is used to test that: + // * outgoing connections to untrusted peers are not allowed + // * outgoing connections to trusted peers are allowed and succeed let handle0 = handles.next().unwrap(); + + // handle1 is used to test that: + // * incoming connections from untrusted peers are not allowed + // * incoming connections from trusted peers are allowed and succeed let handle1 = handles.next().unwrap(); drop(handles); let _handle = net.spawn(); let secret_key = SecretKey::new(&mut rand_08::thread_rng()); - let peers_config = PeersConfig::default().with_trusted_nodes_only(true); + let peers_config = PeersConfig::default() + .with_backoff_durations(PeerBackoffDurations::test()) + .with_ban_duration(Duration::from_millis(200)) + .with_trusted_nodes_only(true); let config = NetworkConfigBuilder::eth(secret_key) .listener_port(0) @@ -390,8 +404,8 @@ async fn test_trusted_peer_only() { // connect to an untrusted peer should fail. handle.add_peer(*handle0.peer_id(), handle0.local_addr()); - // wait 2 seconds, the number of connection is still 0. - tokio::time::sleep(Duration::from_secs(2)).await; + // wait 1 second, the number of connection is still 0. + tokio::time::sleep(Duration::from_secs(1)).await; assert_eq!(handle.num_connected_peers(), 0); // add to trusted peer. @@ -402,18 +416,24 @@ async fn test_trusted_peer_only() { assert_eq!(handle.num_connected_peers(), 1); // only receive connections from trusted peers. + handle1.add_peer(*handle.peer_id(), handle.local_addr()); - handle1.add_peer(*handle.peer_id(), handle0.local_addr()); - - // wait 2 seconds, the number of connections is still 1, because peer1 is untrusted. - tokio::time::sleep(Duration::from_secs(2)).await; + // wait 1 second, the number of connections is still 1, because peer1 is untrusted. + tokio::time::sleep(Duration::from_secs(1)).await; assert_eq!(handle.num_connected_peers(), 1); - handle1.add_trusted_peer(*handle.peer_id(), handle.local_addr()); + handle.add_trusted_peer(*handle1.peer_id(), handle1.local_addr()); + // wait for the next session established event to check the handle1 incoming connection let outgoing_peer_id1 = event_stream.next_session_established().await.unwrap(); assert_eq!(outgoing_peer_id1, *handle1.peer_id()); + + tokio::time::sleep(Duration::from_secs(1)).await; assert_eq!(handle.num_connected_peers(), 2); + + // check that handle0 and handle1 both have peers. + assert_eq!(handle0.num_connected_peers(), 1); + assert_eq!(handle1.num_connected_peers(), 1); } #[tokio::test(flavor = "multi_thread")] diff --git a/crates/net/network/tests/it/requests.rs b/crates/net/network/tests/it/requests.rs index 3a9dcf6308a..aa6c1d9c107 100644 --- a/crates/net/network/tests/it/requests.rs +++ b/crates/net/network/tests/it/requests.rs @@ -1,14 +1,12 @@ #![allow(unreachable_pub)] //! Tests for eth related requests -use std::sync::Arc; - use alloy_consensus::Header; use rand::Rng; -use reth_eth_wire::HeadersDirection; +use reth_eth_wire::{EthVersion, HeadersDirection}; use reth_ethereum_primitives::Block; use reth_network::{ - test_utils::{NetworkEventStream, Testnet}, + test_utils::{NetworkEventStream, PeerConfig, Testnet}, BlockDownloaderProvider, NetworkEventListenerProvider, }; use reth_network_api::{NetworkInfo, Peers}; @@ -17,7 +15,9 @@ use reth_network_p2p::{ headers::client::{HeadersClient, HeadersRequest}, }; use reth_provider::test_utils::MockEthProvider; -use reth_transaction_pool::test_utils::TransactionGenerator; +use reth_transaction_pool::test_utils::{TestPool, TransactionGenerator}; +use std::sync::Arc; +use tokio::sync::oneshot; #[tokio::test(flavor = "multi_thread")] async fn test_get_body() { @@ -107,3 +107,251 @@ async fn test_get_header() { assert_eq!(headers[0], header); } } + +#[tokio::test(flavor = "multi_thread")] +async fn test_eth68_get_receipts() { + reth_tracing::init_test_tracing(); + let mut rng = rand::rng(); + let mock_provider = Arc::new(MockEthProvider::default()); + + let mut net: Testnet, TestPool> = Testnet::default(); + + // Create peers with ETH68 protocol explicitly + let p0 = PeerConfig::with_protocols(mock_provider.clone(), Some(EthVersion::Eth68.into())); + net.add_peer_with_config(p0).await.unwrap(); + + let p1 = PeerConfig::with_protocols(mock_provider.clone(), Some(EthVersion::Eth68.into())); + net.add_peer_with_config(p1).await.unwrap(); + + // install request handlers + net.for_each_mut(|peer| peer.install_request_handler()); + + let handle0 = net.peers()[0].handle(); + let mut events0 = NetworkEventStream::new(handle0.event_listener()); + + let handle1 = net.peers()[1].handle(); + + let _handle = net.spawn(); + + handle0.add_peer(*handle1.peer_id(), handle1.local_addr()); + let connected = events0.next_session_established().await.unwrap(); + assert_eq!(connected, *handle1.peer_id()); + + // Create test receipts and add them to the mock provider + for block_num in 1..=10 { + let block_hash = rng.random(); + let header = Header { number: block_num, ..Default::default() }; + + // Create some test receipts + let receipts = vec![ + reth_ethereum_primitives::Receipt { + cumulative_gas_used: 21000, + success: true, + ..Default::default() + }, + reth_ethereum_primitives::Receipt { + cumulative_gas_used: 42000, + success: false, + ..Default::default() + }, + ]; + + mock_provider.add_header(block_hash, header.clone()); + mock_provider.add_receipts(header.number, receipts); + + // Test receipt request via low-level peer request + let (tx, rx) = oneshot::channel(); + handle0.send_request( + *handle1.peer_id(), + reth_network::PeerRequest::GetReceipts { + request: reth_eth_wire::GetReceipts(vec![block_hash]), + response: tx, + }, + ); + + let result = rx.await.unwrap(); + let receipts_response = result.unwrap(); + assert_eq!(receipts_response.0.len(), 1); + assert_eq!(receipts_response.0[0].len(), 2); + // Eth68 receipts should have bloom filters - verify the structure + assert_eq!(receipts_response.0[0][0].receipt.cumulative_gas_used, 21000); + assert_eq!(receipts_response.0[0][1].receipt.cumulative_gas_used, 42000); + } +} + +#[tokio::test(flavor = "multi_thread")] +async fn test_eth69_get_headers() { + reth_tracing::init_test_tracing(); + let mut rng = rand::rng(); + let mock_provider = Arc::new(MockEthProvider::default()); + + let mut net: Testnet, TestPool> = Testnet::default(); + + // Create peers with ETH69 protocol + let p0 = PeerConfig::with_protocols(mock_provider.clone(), Some(EthVersion::Eth69.into())); + net.add_peer_with_config(p0).await.unwrap(); + + let p1 = PeerConfig::with_protocols(mock_provider.clone(), Some(EthVersion::Eth69.into())); + net.add_peer_with_config(p1).await.unwrap(); + + // install request handlers + net.for_each_mut(|peer| peer.install_request_handler()); + + let handle0 = net.peers()[0].handle(); + let mut events0 = NetworkEventStream::new(handle0.event_listener()); + + let handle1 = net.peers()[1].handle(); + + let _handle = net.spawn(); + + let fetch0 = handle0.fetch_client().await.unwrap(); + + handle0.add_peer(*handle1.peer_id(), handle1.local_addr()); + let connected = events0.next_session_established().await.unwrap(); + assert_eq!(connected, *handle1.peer_id()); + + let start: u64 = rng.random(); + let mut hash = rng.random(); + // request some headers via eth69 connection + for idx in 0..50 { + let header = Header { number: start + idx, parent_hash: hash, ..Default::default() }; + hash = rng.random(); + + mock_provider.add_header(hash, header.clone()); + + let req = + HeadersRequest { start: hash.into(), limit: 1, direction: HeadersDirection::Falling }; + + let res = fetch0.get_headers(req).await; + assert!(res.is_ok(), "{res:?}"); + + let headers = res.unwrap().1; + assert_eq!(headers.len(), 1); + assert_eq!(headers[0], header); + } +} + +#[tokio::test(flavor = "multi_thread")] +async fn test_eth69_get_bodies() { + reth_tracing::init_test_tracing(); + let mut rng = rand::rng(); + let mock_provider = Arc::new(MockEthProvider::default()); + let mut tx_gen = TransactionGenerator::new(rand::rng()); + + let mut net: Testnet, TestPool> = Testnet::default(); + + // Create peers with ETH69 protocol + let p0 = PeerConfig::with_protocols(mock_provider.clone(), Some(EthVersion::Eth69.into())); + net.add_peer_with_config(p0).await.unwrap(); + + let p1 = PeerConfig::with_protocols(mock_provider.clone(), Some(EthVersion::Eth69.into())); + net.add_peer_with_config(p1).await.unwrap(); + + // install request handlers + net.for_each_mut(|peer| peer.install_request_handler()); + + let handle0 = net.peers()[0].handle(); + let mut events0 = NetworkEventStream::new(handle0.event_listener()); + + let handle1 = net.peers()[1].handle(); + + let _handle = net.spawn(); + + let fetch0 = handle0.fetch_client().await.unwrap(); + + handle0.add_peer(*handle1.peer_id(), handle1.local_addr()); + let connected = events0.next_session_established().await.unwrap(); + assert_eq!(connected, *handle1.peer_id()); + + // request some blocks via eth69 connection + for _ in 0..50 { + let block_hash = rng.random(); + let mut block: Block = Block::default(); + block.body.transactions.push(tx_gen.gen_eip4844()); + + mock_provider.add_block(block_hash, block.clone()); + + let res = fetch0.get_block_bodies(vec![block_hash]).await; + assert!(res.is_ok(), "{res:?}"); + + let blocks = res.unwrap().1; + assert_eq!(blocks.len(), 1); + assert_eq!(blocks[0], block.body); + } +} + +#[tokio::test(flavor = "multi_thread")] +async fn test_eth69_get_receipts() { + reth_tracing::init_test_tracing(); + let mut rng = rand::rng(); + let mock_provider = Arc::new(MockEthProvider::default()); + + let mut net: Testnet, TestPool> = Testnet::default(); + + // Create peers with ETH69 protocol + let p0 = PeerConfig::with_protocols(mock_provider.clone(), Some(EthVersion::Eth69.into())); + net.add_peer_with_config(p0).await.unwrap(); + + let p1 = PeerConfig::with_protocols(mock_provider.clone(), Some(EthVersion::Eth69.into())); + net.add_peer_with_config(p1).await.unwrap(); + + // install request handlers + net.for_each_mut(|peer| peer.install_request_handler()); + + let handle0 = net.peers()[0].handle(); + let mut events0 = NetworkEventStream::new(handle0.event_listener()); + + let handle1 = net.peers()[1].handle(); + + let _handle = net.spawn(); + + handle0.add_peer(*handle1.peer_id(), handle1.local_addr()); + + // Wait for the session to be established + let connected = events0.next_session_established().await.unwrap(); + assert_eq!(connected, *handle1.peer_id()); + + // Create test receipts and add them to the mock provider + for block_num in 1..=10 { + let block_hash = rng.random(); + let header = Header { number: block_num, ..Default::default() }; + + // Create some test receipts + let receipts = vec![ + reth_ethereum_primitives::Receipt { + cumulative_gas_used: 21000, + success: true, + ..Default::default() + }, + reth_ethereum_primitives::Receipt { + cumulative_gas_used: 42000, + success: false, + ..Default::default() + }, + ]; + + mock_provider.add_header(block_hash, header.clone()); + mock_provider.add_receipts(header.number, receipts); + + let (tx, rx) = oneshot::channel(); + handle0.send_request( + *handle1.peer_id(), + reth_network::PeerRequest::GetReceipts { + request: reth_eth_wire::GetReceipts(vec![block_hash]), + response: tx, + }, + ); + + let result = rx.await.unwrap(); + let receipts_response = match result { + Ok(resp) => resp, + Err(e) => panic!("Failed to get receipts response: {e:?}"), + }; + assert_eq!(receipts_response.0.len(), 1); + assert_eq!(receipts_response.0[0].len(), 2); + // When using GetReceipts request with ETH69 peers, the response should still include bloom + // filters The protocol version handling is done at a lower level + assert_eq!(receipts_response.0[0][0].receipt.cumulative_gas_used, 21000); + assert_eq!(receipts_response.0[0][1].receipt.cumulative_gas_used, 42000); + } +} diff --git a/crates/net/network/tests/it/session.rs b/crates/net/network/tests/it/session.rs index 5ab305e5746..24875a0f410 100644 --- a/crates/net/network/tests/it/session.rs +++ b/crates/net/network/tests/it/session.rs @@ -37,7 +37,7 @@ async fn test_session_established_with_highest_version() { NetworkEvent::ActivePeerSession { info, .. } => { let SessionInfo { peer_id, status, .. } = info; assert_eq!(handle1.peer_id(), &peer_id); - assert_eq!(status.version, EthVersion::Eth68); + assert_eq!(status.version, EthVersion::LATEST); } ev => { panic!("unexpected event {ev:?}") @@ -123,3 +123,365 @@ async fn test_capability_version_mismatch() { handle.terminate().await; } + +#[tokio::test(flavor = "multi_thread")] +async fn test_eth69_peers_can_connect() { + reth_tracing::init_test_tracing(); + + let mut net = Testnet::create(0).await; + + // Create two peers that only support ETH69 + let p0 = PeerConfig::with_protocols(NoopProvider::default(), Some(EthVersion::Eth69.into())); + net.add_peer_with_config(p0).await.unwrap(); + + let p1 = PeerConfig::with_protocols(NoopProvider::default(), Some(EthVersion::Eth69.into())); + net.add_peer_with_config(p1).await.unwrap(); + + net.for_each(|peer| assert_eq!(0, peer.num_peers())); + + let mut handles = net.handles(); + let handle0 = handles.next().unwrap(); + let handle1 = handles.next().unwrap(); + drop(handles); + + let handle = net.spawn(); + + let mut events = handle0.event_listener().take(2); + handle0.add_peer(*handle1.peer_id(), handle1.local_addr()); + + while let Some(event) = events.next().await { + match event { + NetworkEvent::Peer(PeerEvent::PeerAdded(peer_id)) => { + assert_eq!(handle1.peer_id(), &peer_id); + } + NetworkEvent::ActivePeerSession { info, .. } => { + let SessionInfo { peer_id, status, .. } = info; + assert_eq!(handle1.peer_id(), &peer_id); + // Both peers support only ETH69, so they should connect with ETH69 + assert_eq!(status.version, EthVersion::Eth69); + } + ev => { + panic!("unexpected event: {ev:?}") + } + } + } + + handle.terminate().await; +} + +#[tokio::test(flavor = "multi_thread")] +async fn test_peers_negotiate_highest_version_eth69() { + reth_tracing::init_test_tracing(); + + let mut net = Testnet::create(0).await; + + // Create one peer with multiple ETH versions including ETH69 + let p0 = PeerConfig::with_protocols( + NoopProvider::default(), + vec![ + EthVersion::Eth69.into(), + EthVersion::Eth68.into(), + EthVersion::Eth67.into(), + EthVersion::Eth66.into(), + ], + ); + net.add_peer_with_config(p0).await.unwrap(); + + // Create another peer with multiple ETH versions including ETH69 + let p1 = PeerConfig::with_protocols( + NoopProvider::default(), + vec![EthVersion::Eth69.into(), EthVersion::Eth68.into(), EthVersion::Eth67.into()], + ); + net.add_peer_with_config(p1).await.unwrap(); + + net.for_each(|peer| assert_eq!(0, peer.num_peers())); + + let mut handles = net.handles(); + let handle0 = handles.next().unwrap(); + let handle1 = handles.next().unwrap(); + drop(handles); + + let handle = net.spawn(); + + let mut events = handle0.event_listener().take(2); + handle0.add_peer(*handle1.peer_id(), handle1.local_addr()); + + while let Some(event) = events.next().await { + match event { + NetworkEvent::Peer(PeerEvent::PeerAdded(peer_id)) => { + assert_eq!(handle1.peer_id(), &peer_id); + } + NetworkEvent::ActivePeerSession { info, .. } => { + let SessionInfo { peer_id, status, .. } = info; + assert_eq!(handle1.peer_id(), &peer_id); + // Both peers support ETH69, so they should negotiate to the highest version: ETH69 + assert_eq!(status.version, EthVersion::Eth69); + } + ev => { + panic!("unexpected event: {ev:?}") + } + } + } + + handle.terminate().await; +} + +#[tokio::test(flavor = "multi_thread")] +async fn test_eth69_vs_eth68_incompatible() { + reth_tracing::init_test_tracing(); + + let mut net = Testnet::create(0).await; + + // Create one peer that only supports ETH69 + let p0 = PeerConfig::with_protocols(NoopProvider::default(), Some(EthVersion::Eth69.into())); + net.add_peer_with_config(p0).await.unwrap(); + + // Create another peer that only supports ETH68 + let p1 = PeerConfig::with_protocols(NoopProvider::default(), Some(EthVersion::Eth68.into())); + net.add_peer_with_config(p1).await.unwrap(); + + net.for_each(|peer| assert_eq!(0, peer.num_peers())); + + let mut handles = net.handles(); + let handle0 = handles.next().unwrap(); + let handle1 = handles.next().unwrap(); + drop(handles); + + let handle = net.spawn(); + + let events = handle0.event_listener(); + let mut event_stream = NetworkEventStream::new(events); + + handle0.add_peer(*handle1.peer_id(), handle1.local_addr()); + + let added_peer_id = event_stream.peer_added().await.unwrap(); + assert_eq!(added_peer_id, *handle1.peer_id()); + + // Peers with no shared ETH version should fail to connect and be removed. + let removed_peer_id = event_stream.peer_removed().await.unwrap(); + assert_eq!(removed_peer_id, *handle1.peer_id()); + + handle.terminate().await; +} + +#[tokio::test(flavor = "multi_thread")] +async fn test_eth69_mixed_version_negotiation() { + reth_tracing::init_test_tracing(); + + let mut net = Testnet::create(0).await; + + // Create one peer that supports ETH69 + ETH68 + let p0 = PeerConfig::with_protocols( + NoopProvider::default(), + vec![EthVersion::Eth69.into(), EthVersion::Eth68.into()], + ); + net.add_peer_with_config(p0).await.unwrap(); + + // Create another peer that only supports ETH68 + let p1 = PeerConfig::with_protocols(NoopProvider::default(), Some(EthVersion::Eth68.into())); + net.add_peer_with_config(p1).await.unwrap(); + + net.for_each(|peer| assert_eq!(0, peer.num_peers())); + + let mut handles = net.handles(); + let handle0 = handles.next().unwrap(); + let handle1 = handles.next().unwrap(); + drop(handles); + + let handle = net.spawn(); + + let mut events = handle0.event_listener().take(2); + handle0.add_peer(*handle1.peer_id(), handle1.local_addr()); + + while let Some(event) = events.next().await { + match event { + NetworkEvent::Peer(PeerEvent::PeerAdded(peer_id)) => { + assert_eq!(handle1.peer_id(), &peer_id); + } + NetworkEvent::ActivePeerSession { info, .. } => { + let SessionInfo { peer_id, status, .. } = info; + assert_eq!(handle1.peer_id(), &peer_id); + // Should negotiate to ETH68 (highest common version) + assert_eq!(status.version, EthVersion::Eth68); + } + ev => { + panic!("unexpected event: {ev:?}") + } + } + } + + handle.terminate().await; +} + +#[tokio::test(flavor = "multi_thread")] +async fn test_multiple_peers_different_eth_versions() { + reth_tracing::init_test_tracing(); + + let mut net = Testnet::create(0).await; + + // Create a peer that supports all versions (ETH66-ETH69) + let p0 = PeerConfig::with_protocols( + NoopProvider::default(), + vec![ + EthVersion::Eth69.into(), + EthVersion::Eth68.into(), + EthVersion::Eth67.into(), + EthVersion::Eth66.into(), + ], + ); + net.add_peer_with_config(p0).await.unwrap(); + + // Create a peer that only supports newer versions (ETH68-ETH69) + let p1 = PeerConfig::with_protocols( + NoopProvider::default(), + vec![EthVersion::Eth69.into(), EthVersion::Eth68.into()], + ); + net.add_peer_with_config(p1).await.unwrap(); + + // Create a peer that only supports older versions (ETH66-ETH67) + let p2 = PeerConfig::with_protocols( + NoopProvider::default(), + vec![EthVersion::Eth67.into(), EthVersion::Eth66.into()], + ); + net.add_peer_with_config(p2).await.unwrap(); + + net.for_each(|peer| assert_eq!(0, peer.num_peers())); + + let mut handles = net.handles(); + let handle0 = handles.next().unwrap(); // All versions peer + let handle1 = handles.next().unwrap(); // Newer versions peer + let handle2 = handles.next().unwrap(); // Older versions peer + drop(handles); + + let handle = net.spawn(); + + let events = handle0.event_listener(); + let mut event_stream = NetworkEventStream::new(events); + + // Connect peer0 (all versions) to peer1 (newer versions) - should negotiate ETH69 + handle0.add_peer(*handle1.peer_id(), handle1.local_addr()); + + let added_peer_id = event_stream.peer_added().await.unwrap(); + assert_eq!(added_peer_id, *handle1.peer_id()); + + let established_peer_id = event_stream.next_session_established().await.unwrap(); + assert_eq!(established_peer_id, *handle1.peer_id()); + + // Connect peer0 (all versions) to peer2 (older versions) - should negotiate ETH67 + handle0.add_peer(*handle2.peer_id(), handle2.local_addr()); + + let added_peer_id = event_stream.peer_added().await.unwrap(); + assert_eq!(added_peer_id, *handle2.peer_id()); + + let established_peer_id = event_stream.next_session_established().await.unwrap(); + assert_eq!(established_peer_id, *handle2.peer_id()); + + // Both connections should be established successfully + + handle.terminate().await; +} + +#[tokio::test(flavor = "multi_thread")] +async fn test_eth69_capability_negotiation_fallback() { + reth_tracing::init_test_tracing(); + + let mut net = Testnet::create(0).await; + + // Create a peer that prefers ETH69 but supports fallback to ETH67 + let p0 = PeerConfig::with_protocols( + NoopProvider::default(), + vec![EthVersion::Eth69.into(), EthVersion::Eth67.into()], + ); + net.add_peer_with_config(p0).await.unwrap(); + + // Create a peer that skips ETH68 and only supports ETH67/ETH66 + let p1 = PeerConfig::with_protocols( + NoopProvider::default(), + vec![EthVersion::Eth67.into(), EthVersion::Eth66.into()], + ); + net.add_peer_with_config(p1).await.unwrap(); + + net.for_each(|peer| assert_eq!(0, peer.num_peers())); + + let mut handles = net.handles(); + let handle0 = handles.next().unwrap(); + let handle1 = handles.next().unwrap(); + drop(handles); + + let handle = net.spawn(); + + let mut events = handle0.event_listener().take(2); + handle0.add_peer(*handle1.peer_id(), handle1.local_addr()); + + while let Some(event) = events.next().await { + match event { + NetworkEvent::Peer(PeerEvent::PeerAdded(peer_id)) => { + assert_eq!(handle1.peer_id(), &peer_id); + } + NetworkEvent::ActivePeerSession { info, .. } => { + let SessionInfo { peer_id, status, .. } = info; + assert_eq!(handle1.peer_id(), &peer_id); + // Should fallback to ETH67 (skipping ETH68 which neither supports) + assert_eq!(status.version, EthVersion::Eth67); + } + ev => { + panic!("unexpected event: {ev:?}") + } + } + } + + handle.terminate().await; +} + +#[tokio::test(flavor = "multi_thread")] +async fn test_overlapping_version_sets_negotiation() { + reth_tracing::init_test_tracing(); + + let mut net = Testnet::create(0).await; + + // Peer 0: supports ETH69, ETH67, ETH66 (skips ETH68) + let p0 = PeerConfig::with_protocols( + NoopProvider::default(), + vec![EthVersion::Eth69.into(), EthVersion::Eth67.into(), EthVersion::Eth66.into()], + ); + net.add_peer_with_config(p0).await.unwrap(); + + // Peer 1: supports ETH68, ETH67, ETH66 (skips ETH69) + let p1 = PeerConfig::with_protocols( + NoopProvider::default(), + vec![EthVersion::Eth68.into(), EthVersion::Eth67.into(), EthVersion::Eth66.into()], + ); + net.add_peer_with_config(p1).await.unwrap(); + + net.for_each(|peer| assert_eq!(0, peer.num_peers())); + + let mut handles = net.handles(); + let handle0 = handles.next().unwrap(); + let handle1 = handles.next().unwrap(); + drop(handles); + + let handle = net.spawn(); + + let mut events = handle0.event_listener().take(2); + handle0.add_peer(*handle1.peer_id(), handle1.local_addr()); + + while let Some(event) = events.next().await { + match event { + NetworkEvent::Peer(PeerEvent::PeerAdded(peer_id)) => { + assert_eq!(handle1.peer_id(), &peer_id); + } + NetworkEvent::ActivePeerSession { info, .. } => { + let SessionInfo { peer_id, status, .. } = info; + assert_eq!(handle1.peer_id(), &peer_id); + // Should negotiate to ETH67 (highest common version between ETH69,67,66 and + // ETH68,67,66) + assert_eq!(status.version, EthVersion::Eth67); + } + ev => { + panic!("unexpected event: {ev:?}") + } + } + } + + handle.terminate().await; +} diff --git a/crates/node/builder/src/builder/mod.rs b/crates/node/builder/src/builder/mod.rs index c5f7c57c441..540905960b5 100644 --- a/crates/node/builder/src/builder/mod.rs +++ b/crates/node/builder/src/builder/mod.rs @@ -168,6 +168,48 @@ impl NodeBuilder<(), ChainSpec> { pub const fn new(config: NodeConfig) -> Self { Self { config, database: () } } +} + +impl NodeBuilder { + /// Returns a reference to the node builder's config. + pub const fn config(&self) -> &NodeConfig { + &self.config + } + + /// Returns a mutable reference to the node builder's config. + pub const fn config_mut(&mut self) -> &mut NodeConfig { + &mut self.config + } + + /// Returns a reference to the node's database + pub const fn db(&self) -> &DB { + &self.database + } + + /// Returns a mutable reference to the node's database + pub const fn db_mut(&mut self) -> &mut DB { + &mut self.database + } + + /// Applies a fallible function to the builder. + pub fn try_apply(self, f: F) -> Result + where + F: FnOnce(Self) -> Result, + { + f(self) + } + + /// Applies a fallible function to the builder, if the condition is `true`. + pub fn try_apply_if(self, cond: bool, f: F) -> Result + where + F: FnOnce(Self) -> Result, + { + if cond { + f(self) + } else { + Ok(self) + } + } /// Apply a function to the builder pub fn apply(self, f: F) -> Self @@ -190,18 +232,6 @@ impl NodeBuilder<(), ChainSpec> { } } -impl NodeBuilder { - /// Returns a reference to the node builder's config. - pub const fn config(&self) -> &NodeConfig { - &self.config - } - - /// Returns a mutable reference to the node builder's config. - pub const fn config_mut(&mut self) -> &mut NodeConfig { - &mut self.config - } -} - impl NodeBuilder { /// Configures the underlying database that the node will use. pub fn with_database(self, database: D) -> NodeBuilder { @@ -426,6 +456,36 @@ where &self.builder.config } + /// Returns a reference to node's database. + pub const fn db(&self) -> &T::DB { + &self.builder.adapter.database + } + + /// Returns a mutable reference to node's database. + pub const fn db_mut(&mut self) -> &mut T::DB { + &mut self.builder.adapter.database + } + + /// Applies a fallible function to the builder. + pub fn try_apply(self, f: F) -> Result + where + F: FnOnce(Self) -> Result, + { + f(self) + } + + /// Applies a fallible function to the builder, if the condition is `true`. + pub fn try_apply_if(self, cond: bool, f: F) -> Result + where + F: FnOnce(Self) -> Result, + { + if cond { + f(self) + } else { + Ok(self) + } + } + /// Apply a function to the builder pub fn apply(self, f: F) -> Self where diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index fb289886e36..f7696799e97 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -3,13 +3,14 @@ use crate::{ components::{NodeComponents, NodeComponentsBuilder}, hooks::OnComponentInitializedHook, - BuilderContext, NodeAdapter, + BuilderContext, ExExLauncher, NodeAdapter, PrimitivesTy, }; +use alloy_consensus::BlockHeader as _; use alloy_eips::eip2124::Head; use alloy_primitives::{BlockNumber, B256}; use eyre::{Context, OptionExt}; use rayon::ThreadPoolBuilder; -use reth_chainspec::{Chain, EthChainSpec, EthereumHardforks}; +use reth_chainspec::{Chain, EthChainSpec, EthereumHardfork, EthereumHardforks}; use reth_config::{config::EtlConfig, PruneConfig}; use reth_consensus::noop::NoopConsensus; use reth_db_api::{database::Database, database_metrics::DatabaseMetrics}; @@ -18,12 +19,13 @@ use reth_downloaders::{bodies::noop::NoopBodiesDownloader, headers::noop::NoopHe use reth_engine_local::MiningMode; use reth_engine_tree::tree::{InvalidBlockHook, InvalidBlockHooks, NoopInvalidBlockHook}; use reth_evm::{noop::NoopEvmConfig, ConfigureEvm}; +use reth_exex::ExExManagerHandle; use reth_fs_util as fs; use reth_invalid_block_hooks::InvalidBlockWitnessHook; use reth_network_p2p::headers::client::HeadersClient; use reth_node_api::{FullNodeTypes, NodeTypes, NodeTypesWithDB, NodeTypesWithDBAdapter}; use reth_node_core::{ - args::InvalidBlockHookType, + args::{DefaultEraHost, InvalidBlockHookType}, dirs::{ChainPath, DataDirPath}, node_config::NodeConfig, primitives::BlockHeader, @@ -41,14 +43,18 @@ use reth_node_metrics::{ }; use reth_provider::{ providers::{NodeTypesForProvider, ProviderNodeTypes, StaticFileProvider}, - BlockHashReader, BlockNumReader, ChainSpecProvider, ProviderError, ProviderFactory, - ProviderResult, StageCheckpointReader, StateProviderFactory, StaticFileProviderFactory, + BlockHashReader, BlockNumReader, BlockReaderIdExt, ChainSpecProvider, ProviderError, + ProviderFactory, ProviderResult, StageCheckpointReader, StateProviderFactory, + StaticFileProviderFactory, }; use reth_prune::{PruneModes, PrunerBuilder}; use reth_rpc_api::clients::EthApiClient; use reth_rpc_builder::config::RethRpcServerConfig; use reth_rpc_layer::JwtSecret; -use reth_stages::{sets::DefaultStages, MetricEvent, PipelineBuilder, PipelineTarget, StageId}; +use reth_stages::{ + sets::DefaultStages, stages::EraImportSource, MetricEvent, PipelineBuilder, PipelineTarget, + StageId, +}; use reth_static_file::StaticFileProducer; use reth_tasks::TaskExecutor; use reth_tracing::tracing::{debug, error, info, warn}; @@ -59,6 +65,9 @@ use tokio::sync::{ oneshot, watch, }; +use futures::{future::Either, stream, Stream, StreamExt}; +use reth_node_events::{cl::ConsensusLayerHealthEvents, node::NodeEvent}; + /// Reusable setup for launching a node. /// /// This provides commonly used boilerplate for launching a node. @@ -85,10 +94,13 @@ impl LaunchContext { /// `config`. /// /// Attaches both the `NodeConfig` and the loaded `reth.toml` config to the launch context. - pub fn with_loaded_toml_config( + pub fn with_loaded_toml_config( self, config: NodeConfig, - ) -> eyre::Result>> { + ) -> eyre::Result>> + where + ChainSpec: EthChainSpec + reth_chainspec::EthereumHardforks, + { let toml_config = self.load_toml_config(&config)?; Ok(self.with(WithConfigs { config, toml_config })) } @@ -97,10 +109,13 @@ impl LaunchContext { /// `config`. /// /// This is async because the trusted peers may have to be resolved. - pub fn load_toml_config( + pub fn load_toml_config( &self, config: &NodeConfig, - ) -> eyre::Result { + ) -> eyre::Result + where + ChainSpec: EthChainSpec + reth_chainspec::EthereumHardforks, + { let config_path = config.config.clone().unwrap_or_else(|| self.data_dir.config()); let mut toml_config = reth_config::Config::from_path(&config_path) @@ -117,11 +132,14 @@ impl LaunchContext { } /// Save prune config to the toml file if node is a full node. - fn save_pruning_config_if_full_node( + fn save_pruning_config_if_full_node( reth_config: &mut reth_config::Config, config: &NodeConfig, config_path: impl AsRef, - ) -> eyre::Result<()> { + ) -> eyre::Result<()> + where + ChainSpec: EthChainSpec + reth_chainspec::EthereumHardforks, + { if reth_config.prune.is_none() { if let Some(prune_config) = config.prune_config() { reth_config.update_prune_config(prune_config); @@ -273,8 +291,14 @@ impl LaunchContextWith Self { if self.toml_config_mut().stages.etl.dir.is_none() { - self.toml_config_mut().stages.etl.dir = - Some(EtlConfig::from_datadir(self.data_dir().data_dir())) + let etl_path = EtlConfig::from_datadir(self.data_dir().data_dir()); + if etl_path.exists() { + // Remove etl-path files on launch + if let Err(err) = fs::remove_dir_all(&etl_path) { + warn!(target: "reth::cli", ?etl_path, %err, "Failed to remove ETL path on launch"); + } + } + self.toml_config_mut().stages.etl.dir = Some(etl_path); } self @@ -334,7 +358,10 @@ impl LaunchContextWith Option { + pub fn prune_config(&self) -> Option + where + ChainSpec: reth_chainspec::EthereumHardforks, + { let Some(mut node_prune_config) = self.node_config().prune_config() else { // No CLI config is set, use the toml config. return self.toml_config().prune.clone(); @@ -346,12 +373,18 @@ impl LaunchContextWith PruneModes { + pub fn prune_modes(&self) -> PruneModes + where + ChainSpec: reth_chainspec::EthereumHardforks, + { self.prune_config().map(|config| config.segments).unwrap_or_default() } /// Returns an initialized [`PrunerBuilder`] based on the configured [`PruneConfig`] - pub fn pruner_builder(&self) -> PrunerBuilder { + pub fn pruner_builder(&self) -> PrunerBuilder + where + ChainSpec: reth_chainspec::EthereumHardforks, + { PrunerBuilder::new(self.prune_config().unwrap_or_default()) .delete_limit(self.chain_spec().prune_delete_limit()) .timeout(PrunerBuilder::DEFAULT_TIMEOUT) @@ -867,6 +900,36 @@ where Ok(None) } + /// Expire the pre-merge transactions if the node is configured to do so and the chain has a + /// merge block. + /// + /// If the node is configured to prune pre-merge transactions and it has synced past the merge + /// block, it will delete the pre-merge transaction static files if they still exist. + pub fn expire_pre_merge_transactions(&self) -> eyre::Result<()> + where + T: FullNodeTypes, + { + if self.node_config().pruning.bodies_pre_merge { + if let Some(merge_block) = + self.chain_spec().ethereum_fork_activation(EthereumHardfork::Paris).block_number() + { + // Ensure we only expire transactions after we synced past the merge block. + let Some(latest) = self.blockchain_db().latest_header()? else { return Ok(()) }; + if latest.number() > merge_block { + let provider = self.blockchain_db().static_file_provider(); + if provider.get_lowest_transaction_static_file_block() < Some(merge_block) { + info!(target: "reth::cli", merge_block, "Expiring pre-merge transactions"); + provider.delete_transactions_below(merge_block)?; + } else { + debug!(target: "reth::cli", merge_block, "No pre-merge transactions to expire"); + } + } + } + } + + Ok(()) + } + /// Returns the metrics sender. pub fn sync_metrics_tx(&self) -> UnboundedSender { self.right().db_provider_container.metrics_sender.clone() @@ -876,6 +939,65 @@ where pub const fn components(&self) -> &CB::Components { &self.node_adapter().components } + + /// Launches ExEx (Execution Extensions) and returns the ExEx manager handle. + #[allow(clippy::type_complexity)] + pub async fn launch_exex( + &self, + installed_exex: Vec<( + String, + Box>>, + )>, + ) -> eyre::Result>>> { + ExExLauncher::new( + self.head(), + self.node_adapter().clone(), + installed_exex, + self.configs().clone(), + ) + .launch() + .await + } + + /// Creates the ERA import source based on node configuration. + /// + /// Returns `Some(EraImportSource)` if ERA is enabled in the node config, otherwise `None`. + pub fn era_import_source(&self) -> Option { + let node_config = self.node_config(); + if !node_config.era.enabled { + return None; + } + + EraImportSource::maybe_new( + node_config.era.source.path.clone(), + node_config.era.source.url.clone(), + || node_config.chain.chain().kind().default_era_host(), + || node_config.datadir().data_dir().join("era").into(), + ) + } + + /// Creates consensus layer health events stream based on node configuration. + /// + /// Returns a stream that monitors consensus layer health if: + /// - No debug tip is configured + /// - Not running in dev mode + /// + /// Otherwise returns an empty stream. + pub fn consensus_layer_events( + &self, + ) -> impl Stream>> + 'static + where + T::Provider: reth_provider::CanonChainTracker, + { + if self.node_config().debug.tip.is_none() && !self.is_dev() { + Either::Left( + ConsensusLayerHealthEvents::new(Box::new(self.blockchain_db().clone())) + .map(Into::into), + ) + } else { + Either::Right(stream::empty()) + } + } } impl @@ -890,13 +1012,13 @@ where CB: NodeComponentsBuilder, { /// Returns the [`InvalidBlockHook`] to use for the node. - pub fn invalid_block_hook( + pub async fn invalid_block_hook( &self, ) -> eyre::Result::Primitives>>> { let Some(ref hook) = self.node_config().debug.invalid_block_hook else { return Ok(Box::new(NoopInvalidBlockHook::default())) }; - let healthy_node_rpc_client = self.get_healthy_node_client()?; + let healthy_node_rpc_client = self.get_healthy_node_client().await?; let output_directory = self.data_dir().invalid_block_hooks(); let hooks = hook @@ -924,32 +1046,31 @@ where } /// Returns an RPC client for the healthy node, if configured in the node config. - fn get_healthy_node_client(&self) -> eyre::Result> { - self.node_config() - .debug - .healthy_node_rpc_url - .as_ref() - .map(|url| { - let client = jsonrpsee::http_client::HttpClientBuilder::default().build(url)?; - - // Verify that the healthy node is running the same chain as the current node. - let chain_id = futures::executor::block_on(async { - EthApiClient::< - alloy_rpc_types::Transaction, - alloy_rpc_types::Block, - alloy_rpc_types::Receipt, - alloy_rpc_types::Header, - >::chain_id(&client) - .await - })? - .ok_or_eyre("healthy node rpc client didn't return a chain id")?; - if chain_id.to::() != self.chain_id().id() { - eyre::bail!("invalid chain id for healthy node: {chain_id}") - } + async fn get_healthy_node_client( + &self, + ) -> eyre::Result> { + let Some(url) = self.node_config().debug.healthy_node_rpc_url.as_ref() else { + return Ok(None); + }; - Ok(client) - }) - .transpose() + let client = jsonrpsee::http_client::HttpClientBuilder::default().build(url)?; + + // Verify that the healthy node is running the same chain as the current node. + let chain_id = EthApiClient::< + alloy_rpc_types::TransactionRequest, + alloy_rpc_types::Transaction, + alloy_rpc_types::Block, + alloy_rpc_types::Receipt, + alloy_rpc_types::Header, + >::chain_id(&client) + .await? + .ok_or_eyre("healthy node rpc client didn't return a chain id")?; + + if chain_id.to::() != self.chain_id().id() { + eyre::bail!("invalid chain id for healthy node: {chain_id}") + } + + Ok(Some(client)) } } @@ -1088,7 +1209,10 @@ mod tests { storage_history_full: false, storage_history_distance: None, storage_history_before: None, + bodies_pre_merge: false, + bodies_distance: None, receipts_log_filter: None, + bodies_before: None, }, ..NodeConfig::test() }; diff --git a/crates/node/builder/src/launch/debug.rs b/crates/node/builder/src/launch/debug.rs index e299399d40a..fd85ba45206 100644 --- a/crates/node/builder/src/launch/debug.rs +++ b/crates/node/builder/src/launch/debug.rs @@ -10,18 +10,78 @@ use reth_consensus_debug_client::{DebugConsensusClient, EtherscanBlockProvider, use reth_node_api::{BlockTy, FullNodeComponents}; use std::sync::Arc; use tracing::info; -/// [`Node`] extension with support for debugging utilities, see [`DebugNodeLauncher`] for more -/// context. + +/// [`Node`] extension with support for debugging utilities. +/// +/// This trait provides additional necessary conversion from RPC block type to the node's +/// primitive block type, e.g. `alloy_rpc_types_eth::Block` to the node's internal block +/// representation. +/// +/// This is used in conjunction with the [`DebugNodeLauncher`] to enable debugging features such as: +/// +/// - **Etherscan Integration**: Use Etherscan as a consensus client to follow the chain and submit +/// blocks to the local engine. +/// - **RPC Consensus Client**: Connect to an external RPC endpoint to fetch blocks and submit them +/// to the local engine to follow the chain. +/// +/// See [`DebugNodeLauncher`] for the launcher that enables these features. +/// +/// # Implementation +/// +/// To implement this trait, you need to: +/// 1. Define the RPC block type (typically `alloy_rpc_types_eth::Block`) +/// 2. Implement the conversion from RPC format to your primitive block type +/// +/// # Example +/// +/// ```ignore +/// impl> DebugNode for MyNode { +/// type RpcBlock = alloy_rpc_types_eth::Block; +/// +/// fn rpc_to_primitive_block(rpc_block: Self::RpcBlock) -> BlockTy { +/// // Convert from RPC format to primitive format by converting the transactions +/// rpc_block.into_consensus().convert_transactions() +/// } +/// } +/// ``` pub trait DebugNode: Node { /// RPC block type. Used by [`DebugConsensusClient`] to fetch blocks and submit them to the - /// engine. + /// engine. This is intended to match the block format returned by the external RPC endpoint. type RpcBlock: Serialize + DeserializeOwned + 'static; /// Converts an RPC block to a primitive block. + /// + /// This method handles the conversion between the RPC block format and the internal primitive + /// block format used by the node's consensus engine. + /// + /// # Example + /// + /// For Ethereum nodes, this typically converts from `alloy_rpc_types_eth::Block` + /// to the node's internal block representation. fn rpc_to_primitive_block(rpc_block: Self::RpcBlock) -> BlockTy; } /// Node launcher with support for launching various debugging utilities. +/// +/// This launcher wraps an existing launcher and adds debugging capabilities when +/// certain debug flags are enabled. It provides two main debugging features: +/// +/// ## RPC Consensus Client +/// +/// When `--debug.rpc-consensus-ws ` is provided, the launcher will: +/// - Connect to an external RPC `WebSocket` endpoint +/// - Fetch blocks from that endpoint +/// - Submit them to the local engine for execution +/// - Useful for testing engine behavior with real network data +/// +/// ## Etherscan Consensus Client +/// +/// When `--debug.etherscan [URL]` is provided, the launcher will: +/// - Use Etherscan API as a consensus client +/// - Fetch recent blocks from Etherscan +/// - Submit them to the local engine +/// - Requires `ETHERSCAN_API_KEY` environment variable +/// - Falls back to default Etherscan URL for the chain if URL not provided #[derive(Debug, Clone)] pub struct DebugNodeLauncher { inner: L, @@ -75,7 +135,6 @@ where }); } - // TODO: migrate to devmode with https://github.com/paradigmxyz/reth/issues/10104 if let Some(maybe_custom_etherscan_url) = config.debug.etherscan.clone() { info!(target: "reth::cli", "Using etherscan as consensus client"); diff --git a/crates/node/builder/src/launch/engine.rs b/crates/node/builder/src/launch/engine.rs index 025aaacf8f8..a7d31623cd2 100644 --- a/crates/node/builder/src/launch/engine.rs +++ b/crates/node/builder/src/launch/engine.rs @@ -1,7 +1,15 @@ //! Engine node related functionality. +use crate::{ + common::{Attached, LaunchContextWith, WithConfigs}, + hooks::NodeHooks, + rpc::{EngineValidatorAddOn, RethRpcAddOns, RpcHandleProvider}, + setup::build_networked_pipeline, + AddOns, AddOnsContext, FullNode, LaunchContext, LaunchNode, NodeAdapter, + NodeBuilderWithComponents, NodeComponents, NodeComponentsBuilder, NodeHandle, NodeTypesAdapter, +}; use alloy_consensus::BlockHeader; -use futures::{future::Either, stream, stream_select, StreamExt}; +use futures::{stream_select, StreamExt}; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_db_api::{database_metrics::DatabaseMetrics, Database}; use reth_engine_local::{LocalMiner, LocalPayloadAttributesBuilder}; @@ -12,21 +20,22 @@ use reth_engine_tree::{ }; use reth_engine_util::EngineMessageStreamExt; use reth_exex::ExExManagerHandle; -use reth_network::{NetworkSyncUpdater, SyncState}; +use reth_network::{types::BlockRangeUpdate, NetworkSyncUpdater, SyncState}; use reth_network_api::BlockDownloaderProvider; use reth_node_api::{ BeaconConsensusEngineHandle, BuiltPayload, FullNodeTypes, NodeTypes, NodeTypesWithDBAdapter, PayloadAttributesBuilder, PayloadTypes, }; use reth_node_core::{ - args::DefaultEraHost, dirs::{ChainPath, DataDirPath}, exit::NodeExitFuture, primitives::Head, }; -use reth_node_events::{cl::ConsensusLayerHealthEvents, node}; -use reth_provider::providers::{BlockchainProvider, NodeTypesForProvider}; -use reth_stages::stages::EraImportSource; +use reth_node_events::node; +use reth_provider::{ + providers::{BlockchainProvider, NodeTypesForProvider}, + BlockNumReader, +}; use reth_tasks::TaskExecutor; use reth_tokio_util::EventSender; use reth_tracing::tracing::{debug, error, info}; @@ -34,15 +43,6 @@ use std::sync::Arc; use tokio::sync::{mpsc::unbounded_channel, oneshot}; use tokio_stream::wrappers::UnboundedReceiverStream; -use crate::{ - common::{Attached, LaunchContextWith, WithConfigs}, - hooks::NodeHooks, - rpc::{EngineValidatorAddOn, RethRpcAddOns, RpcHandleProvider}, - setup::build_networked_pipeline, - AddOns, AddOnsContext, ExExLauncher, FullNode, LaunchContext, LaunchNode, NodeAdapter, - NodeBuilderWithComponents, NodeComponents, NodeComponentsBuilder, NodeHandle, NodeTypesAdapter, -}; - /// The engine node launcher. #[derive(Debug)] pub struct EngineNodeLauncher { @@ -134,22 +134,22 @@ where })? .with_components(components_builder, on_component_initialized).await?; - // spawn exexs - let exex_manager_handle = ExExLauncher::new( - ctx.head(), - ctx.node_adapter().clone(), - installed_exex, - ctx.configs().clone(), - ) - .launch() - .await?; + // Try to expire pre-merge transaction history if configured + ctx.expire_pre_merge_transactions()?; + + // spawn exexs if any + let maybe_exex_manager_handle = ctx.launch_exex(installed_exex).await?; // create pipeline - let network_client = ctx.components().network().fetch_client().await?; + let network_handle = ctx.components().network().clone(); + let network_client = network_handle.fetch_client().await?; let (consensus_engine_tx, consensus_engine_rx) = unbounded_channel(); let node_config = ctx.node_config(); + // We always assume that node is syncing after a restart + network_handle.update_sync_state(SyncState::Syncing); + let max_block = ctx.max_block(network_client.clone()).await?; let static_file_producer = ctx.static_file_producer(); @@ -158,21 +158,6 @@ where let consensus = Arc::new(ctx.components().consensus().clone()); - // Configure the pipeline - let pipeline_exex_handle = - exex_manager_handle.clone().unwrap_or_else(ExExManagerHandle::empty); - - let era_import_source = if node_config.era.enabled { - EraImportSource::maybe_new( - node_config.era.source.path.clone(), - node_config.era.source.url.clone(), - || node_config.chain.chain().kind().default_era_host(), - || node_config.datadir().data_dir().join("era").into(), - ) - } else { - None - }; - let pipeline = build_networked_pipeline( &ctx.toml_config().stages, network_client.clone(), @@ -184,8 +169,8 @@ where max_block, static_file_producer, ctx.components().evm_config().clone(), - pipeline_exex_handle, - era_import_source, + maybe_exex_manager_handle.clone().unwrap_or_else(ExExManagerHandle::empty), + ctx.era_import_source(), )?; // The new engine writes directly to static files. This ensures that they're up to the tip. @@ -194,7 +179,7 @@ where let pipeline_events = pipeline.events(); let mut pruner_builder = ctx.pruner_builder(); - if let Some(exex_manager_handle) = &exex_manager_handle { + if let Some(exex_manager_handle) = &maybe_exex_manager_handle { pruner_builder = pruner_builder.finished_exex_height(exex_manager_handle.finished_height()); } @@ -246,7 +231,7 @@ where ctx.components().payload_builder_handle().clone(), engine_payload_validator, engine_tree_config, - ctx.invalid_block_hook()?, + ctx.invalid_block_hook().await?, ctx.sync_metrics_tx(), ctx.components().evm_config().clone(), ); @@ -270,14 +255,7 @@ where let events = stream_select!( event_sender.new_listener().map(Into::into), pipeline_events.map(Into::into), - if ctx.node_config().debug.tip.is_none() && !ctx.is_dev() { - Either::Left( - ConsensusLayerHealthEvents::new(Box::new(ctx.blockchain_db().clone())) - .map(Into::into), - ) - } else { - Either::Right(stream::empty()) - }, + ctx.consensus_layer_events(), pruner_events.map(Into::into), static_file_producer_events.map(Into::into), ); @@ -295,7 +273,6 @@ where // Run consensus engine to completion let initial_target = ctx.initial_backfill_target()?; - let network_handle = ctx.components().network().clone(); let mut built_payloads = ctx .components() .payload_builder_handle() @@ -304,7 +281,9 @@ where .map_err(|e| eyre::eyre!("Failed to subscribe to payload builder events: {:?}", e))? .into_built_payload_stream() .fuse(); + let chainspec = ctx.chain_spec(); + let provider = ctx.blockchain_db().clone(); let (exit, rx) = oneshot::channel(); let terminate_after_backfill = ctx.terminate_after_initial_backfill(); @@ -335,8 +314,6 @@ where debug!(target: "reth::cli", "Terminating after initial backfill"); break } - - network_handle.update_sync_state(SyncState::Idle); } ChainEvent::BackfillSyncStarted => { network_handle.update_sync_state(SyncState::Syncing); @@ -348,7 +325,9 @@ where } ChainEvent::Handler(ev) => { if let Some(head) = ev.canonical_header() { - let head_block = Head { + // Once we're progressing via live sync, we can consider the node is not syncing anymore + network_handle.update_sync_state(SyncState::Idle); + let head_block = Head { number: head.number(), hash: head.hash(), difficulty: head.difficulty(), @@ -356,6 +335,13 @@ where total_difficulty: chainspec.final_paris_total_difficulty().filter(|_| chainspec.is_paris_active_at_block(head.number())).unwrap_or_default(), }; network_handle.update_status(head_block); + + let updated = BlockRangeUpdate { + earliest: provider.earliest_block_number().unwrap_or_default(), + latest:head.number(), + latest_hash:head.hash() + }; + network_handle.update_block_range(updated); } event_sender.notify(ev); } diff --git a/crates/node/builder/src/rpc.rs b/crates/node/builder/src/rpc.rs index 90d9d7b8ac5..70fbe4818b7 100644 --- a/crates/node/builder/src/rpc.rs +++ b/crates/node/builder/src/rpc.rs @@ -1,9 +1,12 @@ //! Builder support for rpc components. +pub use jsonrpsee::server::middleware::rpc::{RpcService, RpcServiceBuilder}; +pub use reth_rpc_builder::{middleware::RethRpcMiddleware, Identity}; + use crate::{BeaconConsensusEngineEvent, BeaconConsensusEngineHandle}; use alloy_rpc_types::engine::ClientVersionV1; use alloy_rpc_types_engine::ExecutionData; -use jsonrpsee::RpcModule; +use jsonrpsee::{core::middleware::layer::Either, RpcModule}; use reth_chain_state::CanonStateSubscriptions; use reth_chainspec::{ChainSpecProvider, EthereumHardforks}; use reth_node_api::{ @@ -20,7 +23,8 @@ use reth_rpc_api::{eth::helpers::AddDevSigners, IntoEngineApiRpcModule}; use reth_rpc_builder::{ auth::{AuthRpcModule, AuthServerHandle}, config::RethRpcServerConfig, - RpcModuleBuilder, RpcRegistryInner, RpcServerConfig, RpcServerHandle, TransportRpcModules, + RpcModuleBuilder, RpcRegistryInner, RpcServerConfig, RpcServerHandle, Stack, + TransportRpcModules, }; use reth_rpc_engine_api::{capabilities::EngineCapabilities, EngineApi}; use reth_rpc_eth_types::{cache::cache_new_blocks_task, EthConfig, EthStateCache}; @@ -431,6 +435,7 @@ pub struct RpcAddOns< EthB: EthApiBuilder, EV, EB = BasicEngineApiBuilder, + RpcMiddleware = Identity, > { /// Additional RPC add-ons. pub hooks: RpcHooks, @@ -440,9 +445,14 @@ pub struct RpcAddOns< engine_validator_builder: EV, /// Builder for `EngineApi` engine_api_builder: EB, + /// Configurable RPC middleware stack. + /// + /// This middleware is applied to all RPC requests across all transports (HTTP, WS, IPC). + /// See [`RpcAddOns::with_rpc_middleware`] for more details. + rpc_middleware: RpcMiddleware, } -impl Debug for RpcAddOns +impl Debug for RpcAddOns where Node: FullNodeComponents, EthB: EthApiBuilder, @@ -455,11 +465,12 @@ where .field("eth_api_builder", &"...") .field("engine_validator_builder", &self.engine_validator_builder) .field("engine_api_builder", &self.engine_api_builder) + .field("rpc_middleware", &"...") .finish() } } -impl RpcAddOns +impl RpcAddOns where Node: FullNodeComponents, EthB: EthApiBuilder, @@ -469,28 +480,126 @@ where eth_api_builder: EthB, engine_validator_builder: EV, engine_api_builder: EB, + rpc_middleware: RpcMiddleware, ) -> Self { Self { hooks: RpcHooks::default(), eth_api_builder, engine_validator_builder, engine_api_builder, + rpc_middleware, } } /// Maps the [`EngineApiBuilder`] builder type. - pub fn with_engine_api(self, engine_api_builder: T) -> RpcAddOns { - let Self { hooks, eth_api_builder, engine_validator_builder, .. } = self; - RpcAddOns { hooks, eth_api_builder, engine_validator_builder, engine_api_builder } + pub fn with_engine_api( + self, + engine_api_builder: T, + ) -> RpcAddOns { + let Self { hooks, eth_api_builder, engine_validator_builder, rpc_middleware, .. } = self; + RpcAddOns { + hooks, + eth_api_builder, + engine_validator_builder, + engine_api_builder, + rpc_middleware, + } } /// Maps the [`EngineValidatorBuilder`] builder type. pub fn with_engine_validator( self, engine_validator_builder: T, - ) -> RpcAddOns { - let Self { hooks, eth_api_builder, engine_api_builder, .. } = self; - RpcAddOns { hooks, eth_api_builder, engine_validator_builder, engine_api_builder } + ) -> RpcAddOns { + let Self { hooks, eth_api_builder, engine_api_builder, rpc_middleware, .. } = self; + RpcAddOns { + hooks, + eth_api_builder, + engine_validator_builder, + engine_api_builder, + rpc_middleware, + } + } + + /// Sets the RPC middleware stack for processing RPC requests. + /// + /// This method configures a custom middleware stack that will be applied to all RPC requests + /// across HTTP, `WebSocket`, and IPC transports. The middleware is applied to the RPC service + /// layer, allowing you to intercept, modify, or enhance RPC request processing. + /// + /// + /// # How It Works + /// + /// The middleware uses the Tower ecosystem's `Layer` pattern. When an RPC server is started, + /// the configured middleware stack is applied to create a layered service that processes + /// requests in the order the layers were added. + /// + /// # Examples + /// + /// ```ignore + /// use reth_rpc_builder::{RpcServiceBuilder, RpcRequestMetrics}; + /// use tower::Layer; + /// + /// // Simple example with metrics + /// let metrics_layer = RpcRequestMetrics::new(metrics_recorder); + /// let with_metrics = rpc_addons.with_rpc_middleware( + /// RpcServiceBuilder::new().layer(metrics_layer) + /// ); + /// + /// // Composing multiple middleware layers + /// let middleware_stack = RpcServiceBuilder::new() + /// .layer(rate_limit_layer) + /// .layer(logging_layer) + /// .layer(metrics_layer); + /// let with_full_stack = rpc_addons.with_rpc_middleware(middleware_stack); + /// ``` + /// + /// # Notes + /// + /// - Middleware is applied to the RPC service layer, not the HTTP transport layer + /// - The default middleware is `Identity` (no-op), which passes through requests unchanged + /// - Middleware layers are applied in the order they are added via `.layer()` + pub fn with_rpc_middleware(self, rpc_middleware: T) -> RpcAddOns { + let Self { hooks, eth_api_builder, engine_validator_builder, engine_api_builder, .. } = + self; + RpcAddOns { + hooks, + eth_api_builder, + engine_validator_builder, + engine_api_builder, + rpc_middleware, + } + } + + /// Add a new layer `T` to the configured [`RpcServiceBuilder`]. + pub fn layer_rpc_middleware( + self, + layer: T, + ) -> RpcAddOns> { + let Self { + hooks, + eth_api_builder, + engine_validator_builder, + engine_api_builder, + rpc_middleware, + } = self; + let rpc_middleware = Stack::new(rpc_middleware, layer); + RpcAddOns { + hooks, + eth_api_builder, + engine_validator_builder, + engine_api_builder, + rpc_middleware, + } + } + + /// Optionally adds a new layer `T` to the configured [`RpcServiceBuilder`]. + pub fn option_layer_rpc_middleware( + self, + layer: Option, + ) -> RpcAddOns>> { + let layer = layer.map(Either::Left).unwrap_or(Either::Right(Identity::new())); + self.layer_rpc_middleware(layer) } /// Sets the hook that is run once the rpc server is started. @@ -514,7 +623,7 @@ where } } -impl Default for RpcAddOns +impl Default for RpcAddOns where Node: FullNodeComponents, EthB: EthApiBuilder, @@ -522,17 +631,18 @@ where EB: Default, { fn default() -> Self { - Self::new(EthB::default(), EV::default(), EB::default()) + Self::new(EthB::default(), EV::default(), EB::default(), Default::default()) } } -impl RpcAddOns +impl RpcAddOns where N: FullNodeComponents, N::Provider: ChainSpecProvider, EthB: EthApiBuilder, EV: EngineValidatorBuilder, EB: EngineApiBuilder, + RpcMiddleware: RethRpcMiddleware, { /// Launches only the regular RPC server (HTTP/WS/IPC), without the authenticated Engine API /// server. @@ -547,6 +657,7 @@ where where F: FnOnce(RpcModuleContainer<'_, N, EthB::EthApi>) -> eyre::Result<()>, { + let rpc_middleware = self.rpc_middleware.clone(); let setup_ctx = self.setup_rpc_components(ctx, ext).await?; let RpcSetupContext { node, @@ -560,7 +671,7 @@ where engine_handle, } = setup_ctx; - let server_config = config.rpc.rpc_server_config(); + let server_config = config.rpc.rpc_server_config().set_rpc_middleware(rpc_middleware); let rpc_server_handle = Self::launch_rpc_server_internal(server_config, &modules).await?; let handles = @@ -593,6 +704,7 @@ where where F: FnOnce(RpcModuleContainer<'_, N, EthB::EthApi>) -> eyre::Result<()>, { + let rpc_middleware = self.rpc_middleware.clone(); let setup_ctx = self.setup_rpc_components(ctx, ext).await?; let RpcSetupContext { node, @@ -606,7 +718,7 @@ where engine_handle, } = setup_ctx; - let server_config = config.rpc.rpc_server_config(); + let server_config = config.rpc.rpc_server_config().set_rpc_middleware(rpc_middleware); let auth_module_clone = auth_module.clone(); // launch servers concurrently @@ -720,10 +832,13 @@ where } /// Helper to launch the RPC server - async fn launch_rpc_server_internal( - server_config: RpcServerConfig, + async fn launch_rpc_server_internal( + server_config: RpcServerConfig, modules: &TransportRpcModules, - ) -> eyre::Result { + ) -> eyre::Result + where + M: RethRpcMiddleware, + { let handle = server_config.start(modules).await?; if let Some(path) = handle.ipc_endpoint() { @@ -774,13 +889,14 @@ where } } -impl NodeAddOns for RpcAddOns +impl NodeAddOns for RpcAddOns where N: FullNodeComponents, ::Provider: ChainSpecProvider, EthB: EthApiBuilder, EV: EngineValidatorBuilder, EB: EngineApiBuilder, + RpcMiddleware: RethRpcMiddleware, { type Handle = RpcHandle; @@ -802,7 +918,8 @@ where fn hooks_mut(&mut self) -> &mut RpcHooks; } -impl RethRpcAddOns for RpcAddOns +impl RethRpcAddOns + for RpcAddOns where Self: NodeAddOns>, EthB: EthApiBuilder, diff --git a/crates/node/core/Cargo.toml b/crates/node/core/Cargo.toml index 3aff3175717..1a36c9af5ef 100644 --- a/crates/node/core/Cargo.toml +++ b/crates/node/core/Cargo.toml @@ -23,7 +23,7 @@ reth-network = { workspace = true, features = ["serde"] } reth-network-p2p.workspace = true reth-rpc-eth-types.workspace = true reth-rpc-server-types.workspace = true -reth-rpc-types-compat.workspace = true +reth-rpc-convert.workspace = true reth-transaction-pool.workspace = true reth-tracing.workspace = true reth-config = { workspace = true, features = ["serde"] } diff --git a/crates/node/core/src/args/benchmark_args.rs b/crates/node/core/src/args/benchmark_args.rs index 1ff49c9c84d..0f2a2b2d68c 100644 --- a/crates/node/core/src/args/benchmark_args.rs +++ b/crates/node/core/src/args/benchmark_args.rs @@ -21,7 +21,13 @@ pub struct BenchmarkArgs { /// /// If no path is provided, a secret will be generated and stored in the datadir under /// `//jwt.hex`. For mainnet this would be `~/.reth/mainnet/jwt.hex` by default. - #[arg(long = "jwtsecret", value_name = "PATH", global = true, required = false)] + #[arg( + long = "jwt-secret", + alias = "jwtsecret", + value_name = "PATH", + global = true, + required = false + )] pub auth_jwtsecret: Option, /// The RPC url to use for sending engine requests. diff --git a/crates/node/core/src/args/debug.rs b/crates/node/core/src/args/debug.rs index 83c5c268d7d..d8b6d570384 100644 --- a/crates/node/core/src/args/debug.rs +++ b/crates/node/core/src/args/debug.rs @@ -80,6 +80,11 @@ pub struct DebugArgs { pub invalid_block_hook: Option, /// The RPC URL of a healthy node to use for comparing invalid block hook results against. + /// + ///Debug setting that enables execution witness comparison for troubleshooting bad blocks. + /// When enabled, the node will collect execution witnesses from the specified source and + /// compare them against local execution when a bad block is encountered, helping identify + /// discrepancies in state execution. #[arg( long = "debug.healthy-node-rpc-url", help_heading = "Debug", diff --git a/crates/node/core/src/args/pruning.rs b/crates/node/core/src/args/pruning.rs index b523191eeca..3f493a900a9 100644 --- a/crates/node/core/src/args/pruning.rs +++ b/crates/node/core/src/args/pruning.rs @@ -1,8 +1,9 @@ //! Pruning and full node arguments -use crate::args::error::ReceiptsLogError; +use crate::{args::error::ReceiptsLogError, primitives::EthereumHardfork}; use alloy_primitives::{Address, BlockNumber}; use clap::{builder::RangedU64ValueParser, Args}; +use reth_chainspec::EthereumHardforks; use reth_config::config::PruneConfig; use reth_prune_types::{PruneMode, PruneModes, ReceiptsLogPruneConfig, MINIMUM_PRUNING_DISTANCE}; use std::collections::BTreeMap; @@ -86,11 +87,27 @@ pub struct PruningArgs { /// pruned. #[arg(long = "prune.storagehistory.before", value_name = "BLOCK_NUMBER", conflicts_with_all = &["storage_history_full", "storage_history_distance"])] pub storage_history_before: Option, + + // Bodies + /// Prune bodies before the merge block. + #[arg(long = "prune.bodies.pre-merge", value_name = "BLOCKS", conflicts_with_all = &["bodies_distance", "bodies_before"])] + pub bodies_pre_merge: bool, + /// Prune bodies before the `head-N` block number. In other words, keep last N + 1 + /// blocks. + #[arg(long = "prune.bodies.distance", value_name = "BLOCKS", conflicts_with_all = &["bodies_pre_merge", "bodies_before"])] + pub bodies_distance: Option, + /// Prune storage history before the specified block number. The specified block number is not + /// pruned. + #[arg(long = "prune.bodies.before", value_name = "BLOCK_NUMBER", conflicts_with_all = &["bodies_distance", "bodies_pre_merge"])] + pub bodies_before: Option, } impl PruningArgs { /// Returns pruning configuration. - pub fn prune_config(&self) -> Option { + pub fn prune_config(&self, chain_spec: &ChainSpec) -> Option + where + ChainSpec: EthereumHardforks, + { // Initialise with a default prune configuration. let mut config = PruneConfig::default(); @@ -104,6 +121,8 @@ impl PruningArgs { receipts: Some(PruneMode::Distance(MINIMUM_PRUNING_DISTANCE)), account_history: Some(PruneMode::Distance(MINIMUM_PRUNING_DISTANCE)), storage_history: Some(PruneMode::Distance(MINIMUM_PRUNING_DISTANCE)), + // TODO: set default to pre-merge block if available + bodies_history: None, receipts_log_filter: Default::default(), }, } @@ -125,6 +144,9 @@ impl PruningArgs { if let Some(mode) = self.account_history_prune_mode() { config.segments.account_history = Some(mode); } + if let Some(mode) = self.bodies_prune_mode(chain_spec) { + config.segments.bodies_history = Some(mode); + } if let Some(mode) = self.storage_history_prune_mode() { config.segments.storage_history = Some(mode); } @@ -140,6 +162,22 @@ impl PruningArgs { Some(config) } + fn bodies_prune_mode(&self, chain_spec: &ChainSpec) -> Option + where + ChainSpec: EthereumHardforks, + { + if self.bodies_pre_merge { + chain_spec + .ethereum_fork_activation(EthereumHardfork::Paris) + .block_number() + .map(PruneMode::Before) + } else if let Some(distance) = self.bodies_distance { + Some(PruneMode::Distance(distance)) + } else { + self.bodies_before.map(PruneMode::Before) + } + } + const fn sender_recovery_prune_mode(&self) -> Option { if self.sender_recovery_full { Some(PruneMode::Full) diff --git a/crates/node/core/src/lib.rs b/crates/node/core/src/lib.rs index aa4f72bd6a4..b999121c5e9 100644 --- a/crates/node/core/src/lib.rs +++ b/crates/node/core/src/lib.rs @@ -31,6 +31,6 @@ pub mod rpc { /// Re-exported from `reth_rpc::eth`. pub mod compat { - pub use reth_rpc_types_compat::*; + pub use reth_rpc_convert::*; } } diff --git a/crates/node/core/src/node_config.rs b/crates/node/core/src/node_config.rs index e94256556cf..b1998110a33 100644 --- a/crates/node/core/src/node_config.rs +++ b/crates/node/core/src/node_config.rs @@ -14,7 +14,7 @@ use alloy_primitives::{BlockNumber, B256}; use eyre::eyre; use reth_chainspec::{ChainSpec, EthChainSpec, MAINNET}; use reth_config::config::PruneConfig; -use reth_ethereum_forks::Head; +use reth_ethereum_forks::{EthereumHardforks, Head}; use reth_network_p2p::headers::client::HeadersClient; use reth_primitives_traits::SealedHeader; use reth_stages_types::StageId; @@ -288,8 +288,11 @@ impl NodeConfig { } /// Returns pruning configuration. - pub fn prune_config(&self) -> Option { - self.pruning.prune_config() + pub fn prune_config(&self) -> Option + where + ChainSpec: EthereumHardforks, + { + self.pruning.prune_config(&self.chain) } /// Returns the max block that the node should run to, looking it up from the network if diff --git a/crates/node/events/src/node.rs b/crates/node/events/src/node.rs index 10173bafdda..bd583c45e42 100644 --- a/crates/node/events/src/node.rs +++ b/crates/node/events/src/node.rs @@ -259,6 +259,7 @@ impl NodeState { txs=block.body().transactions().len(), gas=%format_gas(block.gas_used()), gas_throughput=%format_gas_throughput(block.gas_used(), elapsed), + gas_limit=%format_gas(block.gas_limit()), full=%format!("{:.1}%", block.gas_used() as f64 * 100.0 / block.gas_limit() as f64), base_fee=%format!("{:.2}gwei", block.base_fee_per_gas().unwrap_or(0) as f64 / GWEI_TO_WEI as f64), blobs=block.blob_gas_used().unwrap_or(0) / alloy_eips::eip4844::DATA_GAS_PER_BLOB, diff --git a/crates/node/types/src/lib.rs b/crates/node/types/src/lib.rs index 13245a18b9b..5e90376a7e9 100644 --- a/crates/node/types/src/lib.rs +++ b/crates/node/types/src/lib.rs @@ -220,3 +220,6 @@ pub type PrimitivesTy = ::Primitives; /// Helper type for getting the `Primitives` associated type from a [`NodeTypes`]. pub type KeyHasherTy = <::StateCommitment as StateCommitment>::KeyHasher; + +/// Helper adapter type for accessing [`PayloadTypes::PayloadAttributes`] on [`NodeTypes`]. +pub type PayloadAttrTy = <::Payload as PayloadTypes>::PayloadAttributes; diff --git a/crates/optimism/chainspec/src/lib.rs b/crates/optimism/chainspec/src/lib.rs index 061cede2632..ee5cd27a139 100644 --- a/crates/optimism/chainspec/src/lib.rs +++ b/crates/optimism/chainspec/src/lib.rs @@ -45,10 +45,15 @@ mod superchain; #[cfg(feature = "superchain-configs")] pub use superchain::*; +pub use base::BASE_MAINNET; +pub use base_sepolia::BASE_SEPOLIA; pub use dev::OP_DEV; pub use op::OP_MAINNET; pub use op_sepolia::OP_SEPOLIA; +/// Re-export for convenience +pub use reth_optimism_forks::*; + use alloc::{boxed::Box, vec, vec::Vec}; use alloy_chains::Chain; use alloy_consensus::{proofs::storage_root_unhashed, Header}; @@ -56,8 +61,6 @@ use alloy_eips::eip7840::BlobParams; use alloy_genesis::Genesis; use alloy_hardforks::Hardfork; use alloy_primitives::{B256, U256}; -pub use base::BASE_MAINNET; -pub use base_sepolia::BASE_SEPOLIA; use derive_more::{Constructor, Deref, From, Into}; use reth_chainspec::{ BaseFeeParams, BaseFeeParamsKind, ChainSpec, ChainSpecBuilder, DepositContract, @@ -66,7 +69,6 @@ use reth_chainspec::{ }; use reth_ethereum_forks::{ChainHardforks, EthereumHardfork, ForkCondition}; use reth_network_peers::NodeRecord; -use reth_optimism_forks::{OpHardfork, OpHardforks, OP_MAINNET_HARDFORKS}; use reth_optimism_primitives::ADDRESS_L2_TO_L1_MESSAGE_PASSER; use reth_primitives_traits::{sync::LazyLock, SealedHeader}; @@ -737,9 +739,7 @@ mod tests { genesis.hash_slow(), b256!("0xf712aa9241cc24369b143cf6dce85f0902a9731e70d66818a3a5845b296c73dd") ); - let base_fee = genesis - .next_block_base_fee(BASE_MAINNET.base_fee_params_at_timestamp(genesis.timestamp)) - .unwrap(); + let base_fee = BASE_MAINNET.next_block_base_fee(genesis, genesis.timestamp).unwrap(); // assert_eq!(base_fee, 980000000); } @@ -751,9 +751,7 @@ mod tests { genesis.hash_slow(), b256!("0x0dcc9e089e30b90ddfc55be9a37dd15bc551aeee999d2e2b51414c54eaf934e4") ); - let base_fee = genesis - .next_block_base_fee(BASE_SEPOLIA.base_fee_params_at_timestamp(genesis.timestamp)) - .unwrap(); + let base_fee = BASE_SEPOLIA.next_block_base_fee(genesis, genesis.timestamp).unwrap(); // assert_eq!(base_fee, 980000000); } @@ -765,9 +763,7 @@ mod tests { genesis.hash_slow(), b256!("0x102de6ffb001480cc9b8b548fd05c34cd4f46ae4aa91759393db90ea0409887d") ); - let base_fee = genesis - .next_block_base_fee(OP_SEPOLIA.base_fee_params_at_timestamp(genesis.timestamp)) - .unwrap(); + let base_fee = OP_SEPOLIA.next_block_base_fee(genesis, genesis.timestamp).unwrap(); // assert_eq!(base_fee, 980000000); } diff --git a/crates/optimism/cli/src/app.rs b/crates/optimism/cli/src/app.rs index b6c5b6b56c2..1c7af0d328c 100644 --- a/crates/optimism/cli/src/app.rs +++ b/crates/optimism/cli/src/app.rs @@ -6,7 +6,7 @@ use reth_cli_runner::CliRunner; use reth_node_metrics::recorder::install_prometheus_recorder; use reth_optimism_chainspec::OpChainSpec; use reth_optimism_consensus::OpBeaconConsensus; -use reth_optimism_node::{OpExecutorProvider, OpNetworkPrimitives, OpNode}; +use reth_optimism_node::{OpExecutorProvider, OpNode}; use reth_tracing::{FileWorkerGuard, Layers}; use std::fmt; use tracing::info; @@ -84,13 +84,11 @@ where Commands::DumpGenesis(command) => runner.run_blocking_until_ctrl_c(command.execute()), Commands::Db(command) => runner.run_blocking_until_ctrl_c(command.execute::()), Commands::Stage(command) => runner.run_command_until_exit(|ctx| { - command.execute::(ctx, |spec| { + command.execute::(ctx, |spec| { (OpExecutorProvider::optimism(spec.clone()), OpBeaconConsensus::new(spec)) }) }), - Commands::P2P(command) => { - runner.run_until_ctrl_c(command.execute::()) - } + Commands::P2P(command) => runner.run_until_ctrl_c(command.execute::()), Commands::Config(command) => runner.run_until_ctrl_c(command.execute()), Commands::Recover(command) => { runner.run_command_until_exit(|ctx| command.execute::(ctx)) diff --git a/crates/optimism/consensus/src/lib.rs b/crates/optimism/consensus/src/lib.rs index 164af8ab923..3e4201dc73b 100644 --- a/crates/optimism/consensus/src/lib.rs +++ b/crates/optimism/consensus/src/lib.rs @@ -57,8 +57,10 @@ impl OpBeaconConsensus { } } -impl> - FullConsensus for OpBeaconConsensus +impl FullConsensus for OpBeaconConsensus +where + N: NodePrimitives, + ChainSpec: EthChainSpec
+ OpHardforks + Debug + Send + Sync, { fn validate_block_post_execution( &self, @@ -69,8 +71,10 @@ impl Consensus - for OpBeaconConsensus +impl Consensus for OpBeaconConsensus +where + B: Block, + ChainSpec: EthChainSpec
+ OpHardforks + Debug + Send + Sync, { type Error = ConsensusError; @@ -128,8 +132,10 @@ impl Consensus } } -impl HeaderValidator - for OpBeaconConsensus +impl HeaderValidator for OpBeaconConsensus +where + H: BlockHeader, + ChainSpec: EthChainSpec
+ OpHardforks + Debug + Send + Sync, { fn validate_header(&self, header: &SealedHeader) -> Result<(), ConsensusError> { let header = header.header(); diff --git a/crates/optimism/consensus/src/validation/mod.rs b/crates/optimism/consensus/src/validation/mod.rs index 1432d0ca37a..4977647d89c 100644 --- a/crates/optimism/consensus/src/validation/mod.rs +++ b/crates/optimism/consensus/src/validation/mod.rs @@ -189,9 +189,9 @@ pub fn decode_holocene_base_fee( /// Read from parent to determine the base fee for the next block /// /// See also [Base fee computation](https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/holocene/exec-engine.md#base-fee-computation) -pub fn next_block_base_fee( - chain_spec: impl EthChainSpec + OpHardforks, - parent: impl BlockHeader, +pub fn next_block_base_fee( + chain_spec: impl EthChainSpec
+ OpHardforks, + parent: &H, timestamp: u64, ) -> Result { // If we are in the Holocene, we need to use the base fee params @@ -200,9 +200,7 @@ pub fn next_block_base_fee( if chain_spec.is_holocene_active_at_timestamp(parent.timestamp()) { Ok(decode_holocene_base_fee(chain_spec, parent, timestamp)?) } else { - Ok(parent - .next_block_base_fee(chain_spec.base_fee_params_at_timestamp(timestamp)) - .unwrap_or_default()) + Ok(chain_spec.next_block_base_fee(parent, timestamp).unwrap_or_default()) } } @@ -255,9 +253,7 @@ mod tests { let base_fee = next_block_base_fee(&op_chain_spec, &parent, 0); assert_eq!( base_fee.unwrap(), - parent - .next_block_base_fee(op_chain_spec.base_fee_params_at_timestamp(0)) - .unwrap_or_default() + op_chain_spec.next_block_base_fee(&parent, 0).unwrap_or_default() ); } @@ -275,9 +271,7 @@ mod tests { let base_fee = next_block_base_fee(&op_chain_spec, &parent, 1800000005); assert_eq!( base_fee.unwrap(), - parent - .next_block_base_fee(op_chain_spec.base_fee_params_at_timestamp(0)) - .unwrap_or_default() + op_chain_spec.next_block_base_fee(&parent, 0).unwrap_or_default() ); } diff --git a/crates/optimism/evm/src/build.rs b/crates/optimism/evm/src/build.rs index 8b38db717b5..94d9822e78a 100644 --- a/crates/optimism/evm/src/build.rs +++ b/crates/optimism/evm/src/build.rs @@ -52,7 +52,7 @@ impl OpBlockAssembler { .. } = input; - let timestamp = evm_env.block_env.timestamp; + let timestamp = evm_env.block_env.timestamp.saturating_to(); let transactions_root = proofs::calculate_transaction_root(&transactions); let receipts_root = @@ -97,7 +97,7 @@ impl OpBlockAssembler { mix_hash: evm_env.block_env.prevrandao.unwrap_or_default(), nonce: BEACON_NONCE.into(), base_fee_per_gas: Some(evm_env.block_env.basefee), - number: evm_env.block_env.number, + number: evm_env.block_env.number.saturating_to(), gas_limit: evm_env.block_env.gas_limit, difficulty: evm_env.block_env.difficulty, gas_used: *gas_used, diff --git a/crates/optimism/evm/src/execute.rs b/crates/optimism/evm/src/execute.rs index 841c5e4603d..ff8a72dc82a 100644 --- a/crates/optimism/evm/src/execute.rs +++ b/crates/optimism/evm/src/execute.rs @@ -12,7 +12,7 @@ mod tests { use op_alloy_consensus::TxDeposit; use op_revm::constants::L1_BLOCK_CONTRACT; use reth_chainspec::MIN_TRANSACTION_GAS; - use reth_evm::{execute::Executor, ConfigureEvm}; + use reth_evm::execute::{BasicBlockExecutor, Executor}; use reth_optimism_chainspec::{OpChainSpec, OpChainSpecBuilder}; use reth_optimism_primitives::{OpReceipt, OpTransactionSigned}; use reth_primitives_traits::{Account, RecoveredBlock}; @@ -90,7 +90,7 @@ mod tests { .into(); let provider = evm_config(chain_spec); - let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); + let mut executor = BasicBlockExecutor::new(provider, StateProviderDatabase::new(&db)); // make sure the L1 block contract state is preloaded. executor.with_state_mut(|state| { @@ -163,7 +163,7 @@ mod tests { .into(); let provider = evm_config(chain_spec); - let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); + let mut executor = BasicBlockExecutor::new(provider, StateProviderDatabase::new(&db)); // make sure the L1 block contract state is preloaded. executor.with_state_mut(|state| { diff --git a/crates/optimism/evm/src/lib.rs b/crates/optimism/evm/src/lib.rs index 523bd49de79..a3f4e2042af 100644 --- a/crates/optimism/evm/src/lib.rs +++ b/crates/optimism/evm/src/lib.rs @@ -101,7 +101,7 @@ impl OpEvmConfig impl ConfigureEvm for OpEvmConfig where - ChainSpec: EthChainSpec + OpHardforks, + ChainSpec: EthChainSpec
+ OpHardforks, N: NodePrimitives< Receipt = R::Receipt, SignedTx = R::Transaction, @@ -132,10 +132,15 @@ where let cfg_env = CfgEnv::new().with_chain_id(self.chain_spec().chain().id()).with_spec(spec); + let blob_excess_gas_and_price = spec + .into_eth_spec() + .is_enabled_in(SpecId::CANCUN) + .then_some(BlobExcessGasAndPrice { excess_blob_gas: 0, blob_gasprice: 0 }); + let block_env = BlockEnv { - number: header.number(), + number: U256::from(header.number()), beneficiary: header.beneficiary(), - timestamp: header.timestamp(), + timestamp: U256::from(header.timestamp()), difficulty: if spec.into_eth_spec() >= SpecId::MERGE { U256::ZERO } else { @@ -149,9 +154,7 @@ where gas_limit: header.gas_limit(), basefee: header.base_fee_per_gas().unwrap_or_default(), // EIP-4844 excess blob gas of this block, introduced in Cancun - blob_excess_gas_and_price: header.excess_blob_gas().map(|excess_blob_gas| { - BlobExcessGasAndPrice::new(excess_blob_gas, spec.into_eth_spec() >= SpecId::PRAGUE) - }), + blob_excess_gas_and_price, }; EvmEnv { cfg_env, block_env } @@ -171,17 +174,15 @@ where // if the parent block did not have excess blob gas (i.e. it was pre-cancun), but it is // cancun now, we need to set the excess blob gas to the default value(0) - let blob_excess_gas_and_price = parent - .maybe_next_block_excess_blob_gas( - self.chain_spec().blob_params_at_timestamp(attributes.timestamp), - ) - .or_else(|| (spec_id.into_eth_spec().is_enabled_in(SpecId::CANCUN)).then_some(0)) - .map(|gas| BlobExcessGasAndPrice::new(gas, false)); + let blob_excess_gas_and_price = spec_id + .into_eth_spec() + .is_enabled_in(SpecId::CANCUN) + .then_some(BlobExcessGasAndPrice { excess_blob_gas: 0, blob_gasprice: 0 }); let block_env = BlockEnv { - number: parent.number() + 1, + number: U256::from(parent.number() + 1), beneficiary: attributes.suggested_fee_recipient, - timestamp: attributes.timestamp, + timestamp: U256::from(attributes.timestamp), difficulty: U256::ZERO, prevrandao: Some(attributes.prev_randao), gas_limit: attributes.gas_limit, @@ -307,8 +308,12 @@ mod tests { let db = CacheDB::>::default(); // Create customs block and tx env - let block = - BlockEnv { basefee: 1000, gas_limit: 10_000_000, number: 42, ..Default::default() }; + let block = BlockEnv { + basefee: 1000, + gas_limit: 10_000_000, + number: U256::from(42), + ..Default::default() + }; let evm_env = EvmEnv { block_env: block, ..Default::default() }; @@ -368,8 +373,12 @@ mod tests { let db = CacheDB::>::default(); // Create custom block and tx environment - let block = - BlockEnv { basefee: 1000, gas_limit: 10_000_000, number: 42, ..Default::default() }; + let block = BlockEnv { + basefee: 1000, + gas_limit: 10_000_000, + number: U256::from(42), + ..Default::default() + }; let evm_env = EvmEnv { block_env: block, ..Default::default() }; let evm = evm_config.evm_with_env_and_inspector(db, evm_env.clone(), NoOpInspector {}); diff --git a/crates/optimism/node/src/args.rs b/crates/optimism/node/src/args.rs index 3276abf2e78..9e93f8e63f9 100644 --- a/crates/optimism/node/src/args.rs +++ b/crates/optimism/node/src/args.rs @@ -17,11 +17,6 @@ pub struct RollupArgs { #[arg(long = "rollup.disable-tx-pool-gossip")] pub disable_txpool_gossip: bool, - /// Enable walkback to genesis on startup. This is useful for re-validating the existing DB - /// prior to beginning normal syncing. - #[arg(long = "rollup.enable-genesis-walkback")] - pub enable_genesis_walkback: bool, - /// By default the pending block equals the latest block /// to save resources and not leak txs from the tx-pool, /// this flag enables computing of the pending block @@ -60,6 +55,14 @@ pub struct RollupArgs { #[arg(long = "rollup.sequencer-headers", requires = "sequencer")] pub sequencer_headers: Vec, + /// RPC endpoint for historical data. + #[arg( + long = "rollup.historicalrpc", + alias = "rollup.historical-rpc", + value_name = "HISTORICAL_HTTP_URL" + )] + pub historical_rpc: Option, + /// Minimum suggested priority fee (tip) in wei, default `1_000_000` #[arg(long, default_value_t = 1_000_000)] pub min_suggested_priority_fee: u64, @@ -70,13 +73,13 @@ impl Default for RollupArgs { Self { sequencer: None, disable_txpool_gossip: false, - enable_genesis_walkback: false, compute_pending_block: false, discovery_v4: false, enable_tx_conditional: false, supervisor_http: DEFAULT_SUPERVISOR_URL.to_string(), supervisor_safety_level: SafetyLevel::CrossUnsafe, sequencer_headers: Vec::new(), + historical_rpc: None, min_suggested_priority_fee: 1_000_000, } } @@ -101,15 +104,6 @@ mod tests { assert_eq!(args, default_args); } - #[test] - fn test_parse_optimism_walkback_args() { - let expected_args = RollupArgs { enable_genesis_walkback: true, ..Default::default() }; - let args = - CommandParser::::parse_from(["reth", "--rollup.enable-genesis-walkback"]) - .args; - assert_eq!(args, expected_args); - } - #[test] fn test_parse_optimism_compute_pending_block_args() { let expected_args = RollupArgs { compute_pending_block: true, ..Default::default() }; @@ -162,7 +156,6 @@ mod tests { let expected_args = RollupArgs { disable_txpool_gossip: true, compute_pending_block: true, - enable_genesis_walkback: true, enable_tx_conditional: true, sequencer: Some("http://host:port".into()), ..Default::default() @@ -171,7 +164,6 @@ mod tests { "reth", "--rollup.disable-tx-pool-gossip", "--rollup.compute-pending-block", - "--rollup.enable-genesis-walkback", "--rollup.enable-tx-conditional", "--rollup.sequencer-http", "http://host:port", diff --git a/crates/optimism/node/src/lib.rs b/crates/optimism/node/src/lib.rs index ac9cfe98d83..4ef8a706785 100644 --- a/crates/optimism/node/src/lib.rs +++ b/crates/optimism/node/src/lib.rs @@ -18,7 +18,6 @@ pub mod args; /// trait. pub mod engine; pub use engine::OpEngineTypes; -pub use reth_optimism_payload_builder::{OpPayloadPrimitives, OpPayloadTypes}; pub mod node; pub use node::*; @@ -36,7 +35,8 @@ pub use reth_optimism_txpool as txpool; pub mod utils; pub use reth_optimism_payload_builder::{ - OpBuiltPayload, OpPayloadAttributes, OpPayloadBuilder, OpPayloadBuilderAttributes, + self as payload, config::OpDAConfig, OpBuiltPayload, OpPayloadAttributes, OpPayloadBuilder, + OpPayloadBuilderAttributes, OpPayloadPrimitives, OpPayloadTypes, }; pub use reth_optimism_evm::*; diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index 4eb76160a3b..2d33f05f4ae 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -26,12 +26,12 @@ use reth_node_builder::{ }, node::{FullNodeTypes, NodeTypes}, rpc::{ - EngineApiBuilder, EngineValidatorAddOn, EngineValidatorBuilder, EthApiBuilder, - RethRpcAddOns, RethRpcServerHandles, RpcAddOns, RpcContext, RpcHandle, + EngineApiBuilder, EngineValidatorAddOn, EngineValidatorBuilder, EthApiBuilder, Identity, + RethRpcAddOns, RethRpcMiddleware, RethRpcServerHandles, RpcAddOns, RpcContext, RpcHandle, }, BuilderContext, DebugNode, Node, NodeAdapter, NodeComponentsBuilder, }; -use reth_optimism_chainspec::OpChainSpec; +use reth_optimism_chainspec::{OpChainSpec, OpHardfork}; use reth_optimism_consensus::OpBeaconConsensus; use reth_optimism_evm::{OpEvmConfig, OpNextBlockEnvAttributes, OpRethReceiptBuilder}; use reth_optimism_forks::OpHardforks; @@ -43,6 +43,7 @@ use reth_optimism_payload_builder::{ use reth_optimism_primitives::{DepositReceipt, OpPrimitives}; use reth_optimism_rpc::{ eth::{ext::OpEthExtApi, OpEthApiBuilder}, + historical::{HistoricalRpc, HistoricalRpcClient}, miner::{MinerApiExtServer, OpMinerExtApi}, witness::{DebugExecutionWitnessApiServer, OpDebugWitnessApi}, OpEthApi, OpEthApiError, SequencerClient, @@ -81,6 +82,28 @@ impl OpNodeTypes for N where { } +/// Helper trait for Optimism node types with full configuration including storage and execution +/// data. +pub trait OpFullNodeTypes: + NodeTypes< + ChainSpec: OpHardforks, + Primitives: OpPayloadPrimitives, + Storage = OpStorage, + Payload: EngineTypes, +> +{ +} + +impl OpFullNodeTypes for N where + N: NodeTypes< + ChainSpec: OpHardforks, + Primitives: OpPayloadPrimitives, + Storage = OpStorage, + Payload: EngineTypes, + > +{ +} + /// Type configuration for a regular Optimism node. #[derive(Debug, Default, Clone)] #[non_exhaustive] @@ -180,14 +203,7 @@ impl OpNode { impl Node for OpNode where - N: FullNodeTypes< - Types: NodeTypes< - Payload = OpEngineTypes, - ChainSpec: OpHardforks + Hardforks, - Primitives = OpPrimitives, - Storage = OpStorage, - >, - >, + N: FullNodeTypes, { type ComponentsBuilder = ComponentsBuilder< N, @@ -216,6 +232,7 @@ where .with_da_config(self.da_config.clone()) .with_enable_tx_conditional(self.args.enable_tx_conditional) .with_min_suggested_priority_fee(self.args.min_suggested_priority_fee) + .with_historical_rpc(self.args.historical_rpc.clone()) .build() } } @@ -240,11 +257,15 @@ impl NodeTypes for OpNode { } /// Add-ons w.r.t. optimism. +/// +/// This type provides optimism-specific addons to the node and exposes the RPC server and engine +/// API. #[derive(Debug)] -pub struct OpAddOns, EV, EB> { +pub struct OpAddOns, EV, EB, RpcMiddleware = Identity> +{ /// Rpc add-ons responsible for launching the RPC servers and instantiating the RPC handlers /// and eth-api. - pub rpc_add_ons: RpcAddOns, + pub rpc_add_ons: RpcAddOns, /// Data availability configuration for the OP builder. pub da_config: OpDAConfig, /// Sequencer client, configured to forward submitted transactions to sequencer of given OP @@ -252,6 +273,10 @@ pub struct OpAddOns, EV, EB> { pub sequencer_url: Option, /// Headers to use for the sequencer client requests. pub sequencer_headers: Vec, + /// RPC endpoint for historical data. + /// + /// This can be used to forward pre-bedrock rpc requests (op-mainnet). + pub historical_rpc: Option, /// Enable transaction conditionals. enable_tx_conditional: bool, min_suggested_priority_fee: u64, @@ -290,18 +315,22 @@ where } } -impl OpAddOns +impl OpAddOns where N: FullNodeComponents, EthB: EthApiBuilder, { /// Maps the [`reth_node_builder::rpc::EngineApiBuilder`] builder type. - pub fn with_engine_api(self, engine_api_builder: T) -> OpAddOns { + pub fn with_engine_api( + self, + engine_api_builder: T, + ) -> OpAddOns { let Self { rpc_add_ons, da_config, sequencer_url, sequencer_headers, + historical_rpc, enable_tx_conditional, min_suggested_priority_fee, } = self; @@ -311,12 +340,16 @@ where sequencer_url, sequencer_headers, enable_tx_conditional, + historical_rpc, min_suggested_priority_fee, } } /// Maps the [`EngineValidatorBuilder`] builder type. - pub fn with_engine_validator(self, engine_validator_builder: T) -> OpAddOns { + pub fn with_engine_validator( + self, + engine_validator_builder: T, + ) -> OpAddOns { let Self { rpc_add_ons, da_config, @@ -324,6 +357,7 @@ where sequencer_headers, enable_tx_conditional, min_suggested_priority_fee, + historical_rpc, } = self; OpAddOns { rpc_add_ons: rpc_add_ons.with_engine_validator(engine_validator_builder), @@ -332,6 +366,35 @@ where sequencer_headers, enable_tx_conditional, min_suggested_priority_fee, + historical_rpc, + } + } + + /// Sets the RPC middleware stack for processing RPC requests. + /// + /// This method configures a custom middleware stack that will be applied to all RPC requests + /// across HTTP, `WebSocket`, and IPC transports. The middleware is applied to the RPC service + /// layer, allowing you to intercept, modify, or enhance RPC request processing. + /// + /// See also [`RpcAddOns::with_rpc_middleware`]. + pub fn with_rpc_middleware(self, rpc_middleware: T) -> OpAddOns { + let Self { + rpc_add_ons, + da_config, + sequencer_url, + sequencer_headers, + enable_tx_conditional, + min_suggested_priority_fee, + historical_rpc, + } = self; + OpAddOns { + rpc_add_ons: rpc_add_ons.with_rpc_middleware(rpc_middleware), + da_config, + sequencer_url, + sequencer_headers, + enable_tx_conditional, + min_suggested_priority_fee, + historical_rpc, } } @@ -356,17 +419,14 @@ where } } -impl NodeAddOns for OpAddOns, EV, EB> +impl NodeAddOns + for OpAddOns, EV, EB, RpcMiddleware> where N: FullNodeComponents< - Types: NodeTypes< - ChainSpec: OpHardforks, - Primitives: OpPayloadPrimitives, - Storage = OpStorage, - Payload: EngineTypes, - >, + Types: OpFullNodeTypes, Evm: ConfigureEvm, >, + N::Types: NodeTypes, OpEthApiError: FromEvmError, ::Transaction: OpPooledTx, EvmFactoryFor: EvmFactory>, @@ -374,6 +434,7 @@ where NetworkT: op_alloy_network::Network + Unpin, EV: EngineValidatorBuilder, EB: EngineApiBuilder, + RpcMiddleware: RethRpcMiddleware, { type Handle = RpcHandle>; @@ -387,9 +448,32 @@ where sequencer_url, sequencer_headers, enable_tx_conditional, + historical_rpc, .. } = self; + let maybe_pre_bedrock_historical_rpc = historical_rpc + .and_then(|historical_rpc| { + ctx.node + .provider() + .chain_spec() + .op_fork_activation(OpHardfork::Bedrock) + .block_number() + .filter(|activation| *activation > 0) + .map(|bedrock_block| (historical_rpc, bedrock_block)) + }) + .map(|(historical_rpc, bedrock_block)| -> eyre::Result<_> { + info!(target: "reth::cli", %bedrock_block, ?historical_rpc, "Using historical RPC endpoint pre bedrock"); + let provider = ctx.node.provider().clone(); + let client = HistoricalRpcClient::new(&historical_rpc)?; + let layer = HistoricalRpc::new(provider, client, bedrock_block); + Ok(layer) + }) + .transpose()? + ; + + let rpc_add_ons = rpc_add_ons.option_layer_rpc_middleware(maybe_pre_bedrock_historical_rpc); + let builder = reth_optimism_payload_builder::OpPayloadBuilder::new( ctx.node.pool().clone(), ctx.node.provider().clone(), @@ -458,12 +542,7 @@ where impl RethRpcAddOns for OpAddOns, EV, EB> where N: FullNodeComponents< - Types: NodeTypes< - ChainSpec: OpHardforks, - Primitives = OpPrimitives, - Storage = OpStorage, - Payload: EngineTypes, - >, + Types: OpFullNodeTypes, Evm: ConfigureEvm, >, OpEthApiError: FromEvmError, @@ -483,13 +562,7 @@ where impl EngineValidatorAddOn for OpAddOns, EV, EB> where - N: FullNodeComponents< - Types: NodeTypes< - ChainSpec: OpHardforks, - Primitives = OpPrimitives, - Payload: EngineTypes, - >, - >, + N: FullNodeComponents, OpEthApiBuilder: EthApiBuilder, EV: EngineValidatorBuilder + Default, EB: EngineApiBuilder, @@ -510,6 +583,8 @@ pub struct OpAddOnsBuilder { sequencer_url: Option, /// Headers to use for the sequencer client requests. sequencer_headers: Vec, + /// RPC endpoint for historical data. + historical_rpc: Option, /// Data availability configuration for the OP builder. da_config: Option, /// Enable transaction conditionals. @@ -525,6 +600,7 @@ impl Default for OpAddOnsBuilder { Self { sequencer_url: None, sequencer_headers: Vec::new(), + historical_rpc: None, da_config: None, enable_tx_conditional: false, min_suggested_priority_fee: 1_000_000, @@ -563,6 +639,12 @@ impl OpAddOnsBuilder { self.min_suggested_priority_fee = min; self } + + /// Configures the endpoint for historical RPC forwarding. + pub fn with_historical_rpc(mut self, historical_rpc: Option) -> Self { + self.historical_rpc = historical_rpc; + self + } } impl OpAddOnsBuilder { @@ -580,6 +662,7 @@ impl OpAddOnsBuilder { da_config, enable_tx_conditional, min_suggested_priority_fee, + historical_rpc, .. } = self; @@ -591,10 +674,12 @@ impl OpAddOnsBuilder { .with_min_suggested_priority_fee(min_suggested_priority_fee), EV::default(), EB::default(), + Default::default(), ), da_config: da_config.unwrap_or_default(), sequencer_url, sequencer_headers, + historical_rpc, enable_tx_conditional, min_suggested_priority_fee, } @@ -718,6 +803,7 @@ where let validator = TransactionValidationTaskExecutor::eth_builder(ctx.provider().clone()) .no_eip4844() .with_head_timestamp(ctx.head().timestamp) + .with_max_tx_input_bytes(ctx.config().txpool.max_tx_input_bytes) .kzg_settings(ctx.kzg_settings()?) .set_tx_fee_cap(ctx.config().rpc.rpc_tx_fee_cap) .with_additional_tasks( @@ -982,10 +1068,9 @@ where #[non_exhaustive] pub struct OpEngineValidatorBuilder; -impl EngineValidatorBuilder for OpEngineValidatorBuilder +impl EngineValidatorBuilder for OpEngineValidatorBuilder where - Types: NodeTypes, - Node: FullNodeComponents, + Node: FullNodeComponents, { type Validator = OpEngineValidator< Node::Provider, @@ -994,7 +1079,7 @@ where >; async fn build(self, ctx: &AddOnsContext<'_, Node>) -> eyre::Result { - Ok(OpEngineValidator::new::>( + Ok(OpEngineValidator::new::>( ctx.config.chain.clone(), ctx.node.provider().clone(), )) diff --git a/crates/optimism/payload/src/lib.rs b/crates/optimism/payload/src/lib.rs index 03545863e81..19ef8f3b218 100644 --- a/crates/optimism/payload/src/lib.rs +++ b/crates/optimism/payload/src/lib.rs @@ -16,7 +16,9 @@ pub use builder::OpPayloadBuilder; pub mod error; pub mod payload; use op_alloy_rpc_types_engine::OpExecutionData; -pub use payload::{OpBuiltPayload, OpPayloadAttributes, OpPayloadBuilderAttributes}; +pub use payload::{ + payload_id_optimism, OpBuiltPayload, OpPayloadAttributes, OpPayloadBuilderAttributes, +}; mod traits; use reth_optimism_primitives::OpPrimitives; use reth_payload_primitives::{BuiltPayload, PayloadTypes}; diff --git a/crates/optimism/payload/src/payload.rs b/crates/optimism/payload/src/payload.rs index f32f19ff6f9..0416cf68bab 100644 --- a/crates/optimism/payload/src/payload.rs +++ b/crates/optimism/payload/src/payload.rs @@ -327,7 +327,7 @@ where /// Generates the payload id for the configured payload from the [`OpPayloadAttributes`]. /// /// Returns an 8-byte identifier by hashing the payload components with sha256 hash. -pub(crate) fn payload_id_optimism( +pub fn payload_id_optimism( parent: &B256, attributes: &OpPayloadAttributes, payload_version: u8, diff --git a/crates/optimism/primitives/src/receipt.rs b/crates/optimism/primitives/src/receipt.rs index de9a777cb1d..e0ef6318081 100644 --- a/crates/optimism/primitives/src/receipt.rs +++ b/crates/optimism/primitives/src/receipt.rs @@ -377,8 +377,6 @@ impl InMemorySize for OpReceipt { } } -impl reth_primitives_traits::Receipt for OpReceipt {} - /// Trait for deposit receipt. pub trait DepositReceipt: reth_primitives_traits::Receipt { /// Converts a `Receipt` into a mutable Optimism deposit receipt. @@ -602,17 +600,17 @@ pub(super) mod serde_bincode_compat { #[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] struct Data { #[serde_as(as = "serde_bincode_compat::OpReceipt<'_>")] - reseipt: OpReceipt, + receipt: OpReceipt, } let mut bytes = [0u8; 1024]; rand::rng().fill(bytes.as_mut_slice()); let mut data = Data { - reseipt: OpReceipt::arbitrary(&mut arbitrary::Unstructured::new(&bytes)).unwrap(), + receipt: OpReceipt::arbitrary(&mut arbitrary::Unstructured::new(&bytes)).unwrap(), }; - let success = data.reseipt.as_receipt_mut().status.coerce_status(); + let success = data.receipt.as_receipt_mut().status.coerce_status(); // // ensure we don't have an invalid poststate variant - data.reseipt.as_receipt_mut().status = success.into(); + data.receipt.as_receipt_mut().status = success.into(); let encoded = bincode::serialize(&data).unwrap(); let decoded: Data = bincode::deserialize(&encoded).unwrap(); diff --git a/crates/optimism/primitives/src/transaction/mod.rs b/crates/optimism/primitives/src/transaction/mod.rs index d24acaa08b7..3284b67fcbf 100644 --- a/crates/optimism/primitives/src/transaction/mod.rs +++ b/crates/optimism/primitives/src/transaction/mod.rs @@ -6,34 +6,7 @@ mod tx_type; #[cfg(test)] mod signed; -pub use op_alloy_consensus::{OpTxType, OpTypedTransaction}; -use reth_primitives_traits::Extended; +pub use op_alloy_consensus::{OpTransaction, OpTxType, OpTypedTransaction}; /// Signed transaction. pub type OpTransactionSigned = op_alloy_consensus::OpTxEnvelope; - -/// A trait that represents an optimism transaction, mainly used to indicate whether or not the -/// transaction is a deposit transaction. -pub trait OpTransaction { - /// Whether or not the transaction is a dpeosit transaction. - fn is_deposit(&self) -> bool; -} - -impl OpTransaction for op_alloy_consensus::OpTxEnvelope { - fn is_deposit(&self) -> bool { - Self::is_deposit(self) - } -} - -impl OpTransaction for Extended -where - B: OpTransaction, - T: OpTransaction, -{ - fn is_deposit(&self) -> bool { - match self { - Self::BuiltIn(b) => b.is_deposit(), - Self::Other(t) => t.is_deposit(), - } - } -} diff --git a/crates/optimism/reth/Cargo.toml b/crates/optimism/reth/Cargo.toml index f00b52acbe9..150a50fc84d 100644 --- a/crates/optimism/reth/Cargo.toml +++ b/crates/optimism/reth/Cargo.toml @@ -19,6 +19,7 @@ reth-network-api = { workspace = true, optional = true } reth-eth-wire = { workspace = true, optional = true } reth-provider = { workspace = true, optional = true } reth-db = { workspace = true, optional = true, features = ["mdbx", "op"] } +reth-codecs = { workspace = true, optional = true } reth-storage-api = { workspace = true, optional = true } reth-node-api = { workspace = true, optional = true } reth-node-core = { workspace = true, optional = true } @@ -33,6 +34,7 @@ reth-rpc-builder = { workspace = true, optional = true } reth-exex = { workspace = true, optional = true } reth-transaction-pool = { workspace = true, optional = true } reth-trie = { workspace = true, optional = true } +reth-trie-db = { workspace = true, optional = true } reth-node-builder = { workspace = true, optional = true } reth-tasks = { workspace = true, optional = true } reth-cli-util = { workspace = true, optional = true } @@ -69,6 +71,7 @@ arbitrary = [ "reth-db?/arbitrary", "reth-transaction-pool?/arbitrary", "reth-eth-wire?/arbitrary", + "reth-codecs?/arbitrary", ] test-utils = [ @@ -84,6 +87,8 @@ test-utils = [ "reth-trie?/test-utils", "reth-transaction-pool?/test-utils", "reth-node-builder?/test-utils", + "reth-trie-db?/test-utils", + "reth-codecs?/test-utils", ] full = ["consensus", "evm", "node", "provider", "rpc", "trie", "pool", "network"] @@ -106,7 +111,7 @@ node = [ "dep:reth-optimism-node", "dep:reth-node-builder", "rpc", - "trie", + "trie-db", ] rpc = [ "tasks", @@ -119,7 +124,8 @@ rpc = [ tasks = ["dep:reth-tasks"] js-tracer = ["rpc", "reth-rpc/js-tracer"] network = ["dep:reth-network", "tasks", "dep:reth-network-api", "dep:reth-eth-wire"] -provider = ["storage-api", "tasks", "dep:reth-provider", "dep:reth-db"] +provider = ["storage-api", "tasks", "dep:reth-provider", "dep:reth-db", "dep:reth-codecs"] pool = ["dep:reth-transaction-pool"] storage-api = ["dep:reth-storage-api"] trie = ["dep:reth-trie"] +trie-db = ["trie", "dep:reth-trie-db"] diff --git a/crates/optimism/reth/src/lib.rs b/crates/optimism/reth/src/lib.rs index abafb72c66c..3028b07b237 100644 --- a/crates/optimism/reth/src/lib.rs +++ b/crates/optimism/reth/src/lib.rs @@ -48,6 +48,7 @@ pub mod consensus { } /// Re-exported from `reth_chainspec` +#[allow(ambiguous_glob_reexports)] pub mod chainspec { #[doc(inline)] pub use reth_chainspec::*; @@ -99,6 +100,10 @@ pub mod provider { pub use reth_db as db; } +/// Re-exported codec crate +#[cfg(feature = "provider")] +pub use reth_codecs as codec; + /// Re-exported reth storage api types #[cfg(feature = "storage-api")] pub mod storage { @@ -124,6 +129,10 @@ pub mod node { pub mod trie { #[doc(inline)] pub use reth_trie::*; + + #[cfg(feature = "trie-db")] + #[doc(inline)] + pub use reth_trie_db::*; } /// Re-exported rpc types diff --git a/crates/optimism/rpc/Cargo.toml b/crates/optimism/rpc/Cargo.toml index 1187076f5d3..d31de8a0b43 100644 --- a/crates/optimism/rpc/Cargo.toml +++ b/crates/optimism/rpc/Cargo.toml @@ -17,7 +17,7 @@ reth-evm.workspace = true reth-primitives-traits.workspace = true reth-storage-api.workspace = true reth-chain-state.workspace = true -reth-rpc-eth-api.workspace = true +reth-rpc-eth-api = { workspace = true, features = ["op"] } reth-rpc-eth-types.workspace = true reth-rpc-server-types.workspace = true reth-tasks = { workspace = true, features = ["rayon"] } @@ -35,7 +35,7 @@ reth-optimism-evm.workspace = true reth-optimism-payload-builder.workspace = true reth-optimism-txpool.workspace = true # TODO remove node-builder import -reth-optimism-primitives = { workspace = true, features = ["reth-codec", "serde-bincode-compat"] } +reth-optimism-primitives = { workspace = true, features = ["reth-codec", "serde-bincode-compat", "serde"] } reth-optimism-forks.workspace = true # ethereum @@ -62,11 +62,13 @@ parking_lot.workspace = true tokio.workspace = true reqwest = { workspace = true, features = ["rustls-tls-native-roots"] } async-trait.workspace = true +tower.workspace = true # rpc jsonrpsee-core.workspace = true jsonrpsee-types.workspace = true jsonrpsee.workspace = true +serde_json.workspace = true # misc eyre.workspace = true diff --git a/crates/optimism/rpc/src/error.rs b/crates/optimism/rpc/src/error.rs index 134de276f92..f5445863497 100644 --- a/crates/optimism/rpc/src/error.rs +++ b/crates/optimism/rpc/src/error.rs @@ -7,7 +7,7 @@ use jsonrpsee_types::error::{INTERNAL_ERROR_CODE, INVALID_PARAMS_CODE}; use op_revm::{OpHaltReason, OpTransactionError}; use reth_evm::execute::ProviderError; use reth_optimism_evm::OpBlockExecutionError; -use reth_rpc_eth_api::{AsEthApiError, TransactionConversionError}; +use reth_rpc_eth_api::{AsEthApiError, EthTxEnvError, TransactionConversionError}; use reth_rpc_eth_types::{error::api::FromEvmHalt, EthApiError}; use reth_rpc_server_types::result::{internal_rpc_err, rpc_err}; use revm::context_interface::result::{EVMError, InvalidTransaction}; @@ -195,6 +195,12 @@ impl From for OpEthApiError { } } +impl From for OpEthApiError { + fn from(value: EthTxEnvError) -> Self { + Self::Eth(EthApiError::from(value)) + } +} + impl From for OpEthApiError { fn from(value: ProviderError) -> Self { Self::Eth(EthApiError::from(value)) diff --git a/crates/optimism/rpc/src/eth/block.rs b/crates/optimism/rpc/src/eth/block.rs index 12f3c168d3f..34ce4081b2e 100644 --- a/crates/optimism/rpc/src/eth/block.rs +++ b/crates/optimism/rpc/src/eth/block.rs @@ -40,7 +40,17 @@ where let excess_blob_gas = block.excess_blob_gas(); let timestamp = block.timestamp(); - let mut l1_block_info = reth_optimism_evm::extract_l1_info(block.body())?; + let mut l1_block_info = match reth_optimism_evm::extract_l1_info(block.body()) { + Ok(l1_block_info) => l1_block_info, + Err(err) => { + // If it is the genesis block (i.e block number is 0), there is no L1 info, so + // we return an empty l1_block_info. + if block_number == 0 { + return Ok(Some(vec![])); + } + return Err(err.into()); + } + }; return block .body() diff --git a/crates/optimism/rpc/src/eth/call.rs b/crates/optimism/rpc/src/eth/call.rs index 87e31ace9be..d886b201bdf 100644 --- a/crates/optimism/rpc/src/eth/call.rs +++ b/crates/optimism/rpc/src/eth/call.rs @@ -1,18 +1,15 @@ use super::OpNodeCore; use crate::{OpEthApi, OpEthApiError}; -use alloy_consensus::transaction::Either; -use alloy_primitives::{Bytes, TxKind, U256}; -use alloy_rpc_types_eth::transaction::TransactionRequest; +use alloy_rpc_types_eth::TransactionRequest; use op_revm::OpTransaction; -use reth_evm::{execute::BlockExecutorFactory, ConfigureEvm, EvmEnv, EvmFactory, SpecFor}; +use reth_evm::{execute::BlockExecutorFactory, ConfigureEvm, EvmFactory, TxEnvFor}; use reth_node_api::NodePrimitives; use reth_rpc_eth_api::{ helpers::{estimate::EstimateCall, Call, EthCall, LoadBlock, LoadState, SpawnBlocking}, - FromEthApiError, FromEvmError, FullEthApiTypes, IntoEthApiError, + FromEvmError, FullEthApiTypes, RpcConvert, RpcTypes, }; -use reth_rpc_eth_types::{revm_utils::CallFees, EthApiError, RpcInvalidTransactionError}; -use reth_storage_api::{ProviderHeader, ProviderTx}; -use revm::{context::TxEnv, context_interface::Block, Database}; +use reth_storage_api::{errors::ProviderError, ProviderHeader, ProviderTx}; +use revm::context::TxEnv; impl EthCall for OpEthApi where @@ -41,7 +38,11 @@ where EvmFactory: EvmFactory>, >, >, - Error: FromEvmError, + RpcConvert: RpcConvert, Network = Self::NetworkTypes>, + NetworkTypes: RpcTypes>, + Error: FromEvmError + + From<::Error> + + From, > + SpawnBlocking, Self::Error: From, N: OpNodeCore, @@ -55,97 +56,4 @@ where fn max_simulate_blocks(&self) -> u64 { self.inner.eth_api.max_simulate_blocks() } - - fn create_txn_env( - &self, - evm_env: &EvmEnv>, - request: TransactionRequest, - mut db: impl Database>, - ) -> Result, Self::Error> { - // Ensure that if versioned hashes are set, they're not empty - if request.blob_versioned_hashes.as_ref().is_some_and(|hashes| hashes.is_empty()) { - return Err(RpcInvalidTransactionError::BlobTransactionMissingBlobHashes.into_eth_err()) - } - - let tx_type = request.minimal_tx_type() as u8; - - let TransactionRequest { - from, - to, - gas_price, - max_fee_per_gas, - max_priority_fee_per_gas, - gas, - value, - input, - nonce, - access_list, - chain_id, - blob_versioned_hashes, - max_fee_per_blob_gas, - authorization_list, - transaction_type: _, - sidecar: _, - } = request; - - let CallFees { max_priority_fee_per_gas, gas_price, max_fee_per_blob_gas } = - CallFees::ensure_fees( - gas_price.map(U256::from), - max_fee_per_gas.map(U256::from), - max_priority_fee_per_gas.map(U256::from), - U256::from(evm_env.block_env.basefee), - blob_versioned_hashes.as_deref(), - max_fee_per_blob_gas.map(U256::from), - evm_env.block_env.blob_gasprice().map(U256::from), - )?; - - let gas_limit = gas.unwrap_or( - // Use maximum allowed gas limit. The reason for this - // is that both Erigon and Geth use pre-configured gas cap even if - // it's possible to derive the gas limit from the block: - // - evm_env.block_env.gas_limit, - ); - - let chain_id = chain_id.unwrap_or(evm_env.cfg_env.chain_id); - - let caller = from.unwrap_or_default(); - - let nonce = if let Some(nonce) = nonce { - nonce - } else { - db.basic(caller).map_err(Into::into)?.map(|acc| acc.nonce).unwrap_or_default() - }; - - let base = TxEnv { - tx_type, - gas_limit, - nonce, - caller, - gas_price: gas_price.saturating_to(), - gas_priority_fee: max_priority_fee_per_gas.map(|v| v.saturating_to()), - kind: to.unwrap_or(TxKind::Create), - value: value.unwrap_or_default(), - data: input - .try_into_unique_input() - .map_err(Self::Error::from_eth_err)? - .unwrap_or_default(), - chain_id: Some(chain_id), - access_list: access_list.unwrap_or_default(), - // EIP-4844 fields - blob_hashes: blob_versioned_hashes.unwrap_or_default(), - max_fee_per_blob_gas: max_fee_per_blob_gas - .map(|v| v.saturating_to()) - .unwrap_or_default(), - // EIP-7702 fields - authorization_list: authorization_list - .unwrap_or_default() - .into_iter() - .map(Either::Left) - .collect(), - }; - - Ok(OpTransaction { base, enveloped_tx: Some(Bytes::new()), deposit: Default::default() }) - } } diff --git a/crates/optimism/rpc/src/eth/mod.rs b/crates/optimism/rpc/src/eth/mod.rs index e9d2efe04f1..29384e3aa0b 100644 --- a/crates/optimism/rpc/src/eth/mod.rs +++ b/crates/optimism/rpc/src/eth/mod.rs @@ -65,9 +65,8 @@ impl OpNodeCore for T where T: RpcNodeCore {} pub struct OpEthApi { /// Gateway to node's core components. inner: Arc>, - /// Marker for the network types. - _nt: PhantomData, - tx_resp_builder: RpcConverter>, + /// Converter for RPC types. + tx_resp_builder: RpcConverter>, } impl OpEthApi { @@ -81,7 +80,6 @@ impl OpEthApi { Arc::new(OpEthApiInner { eth_api, sequencer_client, min_suggested_priority_fee }); Self { inner: inner.clone(), - _nt: PhantomData, tx_resp_builder: RpcConverter::with_mapper(OpTxInfoMapper::new(inner)), } } @@ -114,14 +112,14 @@ where Self: Send + Sync + fmt::Debug, N: OpNodeCore, NetworkT: op_alloy_network::Network + Clone + fmt::Debug, + ::Evm: fmt::Debug, ::Primitives: fmt::Debug, { type Error = OpEthApiError; type NetworkTypes = NetworkT; - type TransactionCompat = - RpcConverter>; + type RpcConvert = RpcConverter>; - fn tx_resp_builder(&self) -> &Self::TransactionCompat { + fn tx_resp_builder(&self) -> &Self::RpcConvert { &self.tx_resp_builder } } @@ -203,6 +201,7 @@ where Self: Send + Sync + Clone + 'static, N: OpNodeCore, NetworkT: op_alloy_network::Network, + ::Evm: fmt::Debug, ::Primitives: fmt::Debug, { #[inline] @@ -236,14 +235,13 @@ where } #[inline] - fn fee_history_cache(&self) -> &FeeHistoryCache { + fn fee_history_cache(&self) -> &FeeHistoryCache> { self.inner.eth_api.fee_history_cache() } async fn suggested_priority_fee(&self) -> Result { - let base_tip = self.inner.eth_api.gas_oracle().suggest_tip_cap().await?; let min_tip = U256::from(self.inner.min_suggested_priority_fee); - Ok(base_tip.max(min_tip)) + self.inner.eth_api.gas_oracle().op_suggest_tip_cap(min_tip).await.map_err(Into::into) } } @@ -254,6 +252,7 @@ where Pool: TransactionPool, >, NetworkT: op_alloy_network::Network, + ::Evm: fmt::Debug, ::Primitives: fmt::Debug, { } @@ -271,7 +270,11 @@ where impl EthFees for OpEthApi where - Self: LoadFee, + Self: LoadFee< + Provider: ChainSpecProvider< + ChainSpec: EthChainSpec
>, + >, + >, N: OpNodeCore, { } diff --git a/crates/optimism/rpc/src/eth/pending_block.rs b/crates/optimism/rpc/src/eth/pending_block.rs index 684207fde8a..de011aa2797 100644 --- a/crates/optimism/rpc/src/eth/pending_block.rs +++ b/crates/optimism/rpc/src/eth/pending_block.rs @@ -9,12 +9,11 @@ use reth_evm::ConfigureEvm; use reth_node_api::NodePrimitives; use reth_optimism_evm::OpNextBlockEnvAttributes; use reth_optimism_forks::OpHardforks; -use reth_optimism_primitives::{OpBlock, OpReceipt, OpTransactionSigned}; use reth_primitives_traits::{RecoveredBlock, SealedHeader}; use reth_rpc_eth_api::{ helpers::{LoadPendingBlock, SpawnBlocking}, types::RpcTypes, - EthApiTypes, FromEthApiError, FromEvmError, RpcNodeCore, + EthApiTypes, FromEthApiError, FromEvmError, RpcConvert, RpcNodeCore, }; use reth_rpc_eth_types::{EthApiError, PendingBlock}; use reth_storage_api::{ @@ -31,19 +30,16 @@ where Header = alloy_rpc_types_eth::Header>, >, Error: FromEvmError, + RpcConvert: RpcConvert, >, N: RpcNodeCore< - Provider: BlockReaderIdExt< - Transaction = OpTransactionSigned, - Block = OpBlock, - Receipt = OpReceipt, - Header = alloy_consensus::Header, - > + ChainSpecProvider + Provider: BlockReaderIdExt + + ChainSpecProvider + StateProviderFactory, Pool: TransactionPool>>, Evm: ConfigureEvm< Primitives = ::Primitives, - NextBlockEnvCtx = OpNextBlockEnvAttributes, + NextBlockEnvCtx: From, >, Primitives: NodePrimitives< BlockHeader = ProviderHeader, @@ -72,8 +68,9 @@ where prev_randao: B256::random(), gas_limit: parent.gas_limit(), parent_beacon_block_root: parent.parent_beacon_block_root(), - extra_data: parent.extra_data.clone(), - }) + extra_data: parent.extra_data().clone(), + } + .into()) } /// Returns the locally built pending block diff --git a/crates/optimism/rpc/src/historical.rs b/crates/optimism/rpc/src/historical.rs index ac9320d4fff..0f8824882b3 100644 --- a/crates/optimism/rpc/src/historical.rs +++ b/crates/optimism/rpc/src/historical.rs @@ -1,10 +1,18 @@ //! Client support for optimism historical RPC requests. use crate::sequencer::Error; +use alloy_eips::BlockId; use alloy_json_rpc::{RpcRecv, RpcSend}; +use alloy_primitives::BlockNumber; use alloy_rpc_client::RpcClient; -use std::sync::Arc; -use tracing::warn; +use jsonrpsee_core::{ + middleware::{Batch, Notification, RpcServiceT}, + server::MethodResponse, +}; +use jsonrpsee_types::{Params, Request}; +use reth_storage_api::BlockReaderIdExt; +use std::{future::Future, sync::Arc}; +use tracing::{debug, warn}; /// A client that can be used to forward RPC requests for historical data to an endpoint. /// @@ -17,7 +25,7 @@ pub struct HistoricalRpcClient { impl HistoricalRpcClient { /// Constructs a new historical RPC client with the given endpoint URL. - pub async fn new(endpoint: &str) -> Result { + pub fn new(endpoint: &str) -> Result { let client = RpcClient::new_http( endpoint.parse::().map_err(|err| Error::InvalidUrl(err.to_string()))?, ); @@ -66,3 +74,204 @@ struct HistoricalRpcClientInner { historical_endpoint: String, client: RpcClient, } + +/// A layer that provides historical RPC forwarding functionality for a given service. +#[derive(Debug, Clone)] +pub struct HistoricalRpc

{ + inner: Arc>, +} + +impl

HistoricalRpc

{ + /// Constructs a new historical RPC layer with the given provider, client and bedrock block + /// number. + pub fn new(provider: P, client: HistoricalRpcClient, bedrock_block: BlockNumber) -> Self { + let inner = Arc::new(HistoricalRpcInner { provider, client, bedrock_block }); + + Self { inner } + } +} + +impl tower::Layer for HistoricalRpc

{ + type Service = HistoricalRpcService; + + fn layer(&self, inner: S) -> Self::Service { + HistoricalRpcService::new(inner, self.inner.clone()) + } +} + +/// A service that intercepts RPC calls and forwards pre-bedrock historical requests +/// to a dedicated endpoint. +/// +/// This checks if the request is for a pre-bedrock block and forwards it via the configured +/// historical RPC client. +#[derive(Debug, Clone)] +pub struct HistoricalRpcService { + /// The inner service that handles regular RPC requests + inner: S, + /// The context required to forward historical requests. + historical: Arc>, +} + +impl HistoricalRpcService { + /// Constructs a new historical RPC service with the given inner service, historical client, + /// provider, and bedrock block number. + const fn new(inner: S, historical: Arc>) -> Self { + Self { inner, historical } + } +} + +impl RpcServiceT for HistoricalRpcService +where + S: RpcServiceT + Send + Sync + Clone + 'static, + + P: BlockReaderIdExt + Send + Sync + Clone + 'static, +{ + type MethodResponse = S::MethodResponse; + type NotificationResponse = S::NotificationResponse; + type BatchResponse = S::BatchResponse; + + fn call<'a>(&self, req: Request<'a>) -> impl Future + Send + 'a { + let inner_service = self.inner.clone(); + let historical = self.historical.clone(); + + Box::pin(async move { + let maybe_block_id = match req.method_name() { + "eth_getBlockByNumber" | "eth_getBlockByHash" => { + parse_block_id_from_params(&req.params(), 0) + } + "eth_getBalance" | + "eth_getCode" | + "eth_getTransactionCount" | + "eth_call" | + "eth_estimateGas" | + "eth_createAccessList" => parse_block_id_from_params(&req.params(), 1), + "eth_getStorageAt" | "eth_getProof" => parse_block_id_from_params(&req.params(), 2), + _ => None, + }; + + // if we've extracted a block ID, check if it's pre-Bedrock + if let Some(block_id) = maybe_block_id { + let is_pre_bedrock = if let Ok(Some(num)) = + historical.provider.block_number_for_id(block_id) + { + num < historical.bedrock_block + } else { + // If we can't convert the hash to a number, assume it's post-Bedrock + debug!(target: "rpc::historical", ?block_id, "hash unknown; not forwarding"); + false + }; + + // if the block is pre-Bedrock, forward the request to the historical client + if is_pre_bedrock { + debug!(target: "rpc::historical", method = %req.method_name(), ?block_id, params=?req.params(), "forwarding pre-Bedrock request"); + + let params = req.params(); + let params = params.as_str().unwrap_or("[]"); + if let Ok(params) = serde_json::from_str::(params) { + if let Ok(raw) = historical + .client + .request::<_, serde_json::Value>(req.method_name(), params) + .await + { + let payload = + jsonrpsee_types::ResponsePayload::success(raw.to_string()).into(); + return MethodResponse::response(req.id, payload, usize::MAX); + } + } + } + } + + // handle the request with the inner service + inner_service.call(req).await + }) + } + + fn batch<'a>(&self, req: Batch<'a>) -> impl Future + Send + 'a { + self.inner.batch(req) + } + + fn notification<'a>( + &self, + n: Notification<'a>, + ) -> impl Future + Send + 'a { + self.inner.notification(n) + } +} + +#[derive(Debug)] +struct HistoricalRpcInner

{ + /// Provider used to determine if a block is pre-bedrock + provider: P, + /// Client used to forward historical requests + client: HistoricalRpcClient, + /// Bedrock transition block number + bedrock_block: BlockNumber, +} + +/// Parses a `BlockId` from the given parameters at the specified position. +fn parse_block_id_from_params(params: &Params<'_>, position: usize) -> Option { + let values: Vec = params.parse().ok()?; + let val = values.into_iter().nth(position)?; + serde_json::from_value::(val).ok() +} + +#[cfg(test)] +mod tests { + use super::*; + use alloy_eips::{BlockId, BlockNumberOrTag}; + use jsonrpsee::types::Params; + use jsonrpsee_core::middleware::layer::Either; + use reth_node_builder::rpc::RethRpcMiddleware; + use reth_storage_api::noop::NoopProvider; + use tower::layer::util::Identity; + + #[test] + fn check_historical_rpc() { + fn assert_historical_rpc() {} + assert_historical_rpc::>(); + assert_historical_rpc::, Identity>>(); + } + + /// Tests that various valid id types can be parsed from the first parameter. + #[test] + fn parses_block_id_from_first_param() { + // Test with a block number + let params_num = Params::new(Some(r#"["0x64"]"#)); // 100 + assert_eq!( + parse_block_id_from_params(¶ms_num, 0).unwrap(), + BlockId::Number(BlockNumberOrTag::Number(100)) + ); + + // Test with the "earliest" tag + let params_tag = Params::new(Some(r#"["earliest"]"#)); + assert_eq!( + parse_block_id_from_params(¶ms_tag, 0).unwrap(), + BlockId::Number(BlockNumberOrTag::Earliest) + ); + } + + /// Tests that the function correctly parses from a position other than 0. + #[test] + fn parses_block_id_from_second_param() { + let params = + Params::new(Some(r#"["0x0000000000000000000000000000000000000000", "latest"]"#)); + let result = parse_block_id_from_params(¶ms, 1).unwrap(); + assert_eq!(result, BlockId::Number(BlockNumberOrTag::Latest)); + } + + /// Tests that the function returns nothing if the parameter is missing or empty. + #[test] + fn defaults_to_latest_when_param_is_missing() { + let params = Params::new(Some(r#"["0x0000000000000000000000000000000000000000"]"#)); + let result = parse_block_id_from_params(¶ms, 1); + assert!(result.is_none()); + } + + /// Tests that the function doesn't parse anything if the parameter is not a valid block id. + #[test] + fn returns_error_for_invalid_input() { + let params = Params::new(Some(r#"[true]"#)); + let result = parse_block_id_from_params(¶ms, 0); + assert!(result.is_none()); + } +} diff --git a/crates/optimism/rpc/src/lib.rs b/crates/optimism/rpc/src/lib.rs index 6c782bb086e..e5e142f815d 100644 --- a/crates/optimism/rpc/src/lib.rs +++ b/crates/optimism/rpc/src/lib.rs @@ -20,5 +20,5 @@ pub mod witness; pub use engine::OpEngineApiClient; pub use engine::{OpEngineApi, OpEngineApiServer, OP_ENGINE_CAPABILITIES}; pub use error::{OpEthApiError, OpInvalidTransactionError, SequencerClientError}; -pub use eth::{OpEthApi, OpReceiptBuilder}; +pub use eth::{OpEthApi, OpEthApiBuilder, OpReceiptBuilder}; pub use sequencer::SequencerClient; diff --git a/crates/optimism/txpool/src/supervisor/errors.rs b/crates/optimism/txpool/src/supervisor/errors.rs index bae6fe48d03..9993a5ca5d1 100644 --- a/crates/optimism/txpool/src/supervisor/errors.rs +++ b/crates/optimism/txpool/src/supervisor/errors.rs @@ -1,6 +1,6 @@ use alloy_json_rpc::RpcError; use core::error; -use op_alloy_rpc_types::InvalidInboxEntry; +use op_alloy_rpc_types::SuperchainDAError; /// Failures occurring during validation of inbox entries. #[derive(thiserror::Error, Debug)] @@ -11,7 +11,7 @@ pub enum InteropTxValidatorError { /// Message does not satisfy validation requirements #[error(transparent)] - InvalidEntry(#[from] InvalidInboxEntry), + InvalidEntry(#[from] SuperchainDAError), /// Catch-all variant. #[error("supervisor server error: {0}")] @@ -36,10 +36,10 @@ impl InteropTxValidatorError { { // Try to extract error details from the RPC error if let Some(error_payload) = err.as_error_resp() { - let code = error_payload.code; + let code = error_payload.code as i32; - // Try to convert the error code to an InvalidInboxEntry variant - if let Ok(invalid_entry) = InvalidInboxEntry::try_from(code) { + // Try to convert the error code to an SuperchainDAError variant + if let Ok(invalid_entry) = SuperchainDAError::try_from(code) { return Self::InvalidEntry(invalid_entry); } } diff --git a/crates/optimism/txpool/src/supervisor/metrics.rs b/crates/optimism/txpool/src/supervisor/metrics.rs index 0c66d0039ac..cbe08e7a442 100644 --- a/crates/optimism/txpool/src/supervisor/metrics.rs +++ b/crates/optimism/txpool/src/supervisor/metrics.rs @@ -1,7 +1,7 @@ //! Optimism supervisor and sequencer metrics use crate::supervisor::InteropTxValidatorError; -use op_alloy_rpc_types::InvalidInboxEntry; +use op_alloy_rpc_types::SuperchainDAError; use reth_metrics::{ metrics::{Counter, Histogram}, Metrics, @@ -50,22 +50,22 @@ impl SupervisorMetrics { pub fn increment_metrics_for_error(&self, error: &InteropTxValidatorError) { if let InteropTxValidatorError::InvalidEntry(inner) = error { match inner { - InvalidInboxEntry::SkippedData => self.skipped_data_count.increment(1), - InvalidInboxEntry::UnknownChain => self.unknown_chain_count.increment(1), - InvalidInboxEntry::ConflictingData => self.conflicting_data_count.increment(1), - InvalidInboxEntry::IneffectiveData => self.ineffective_data_count.increment(1), - InvalidInboxEntry::OutOfOrder => self.out_of_order_count.increment(1), - InvalidInboxEntry::AwaitingReplacement => { + SuperchainDAError::SkippedData => self.skipped_data_count.increment(1), + SuperchainDAError::UnknownChain => self.unknown_chain_count.increment(1), + SuperchainDAError::ConflictingData => self.conflicting_data_count.increment(1), + SuperchainDAError::IneffectiveData => self.ineffective_data_count.increment(1), + SuperchainDAError::OutOfOrder => self.out_of_order_count.increment(1), + SuperchainDAError::AwaitingReplacement => { self.awaiting_replacement_count.increment(1) } - InvalidInboxEntry::OutOfScope => self.out_of_scope_count.increment(1), - InvalidInboxEntry::NoParentForFirstBlock => { + SuperchainDAError::OutOfScope => self.out_of_scope_count.increment(1), + SuperchainDAError::NoParentForFirstBlock => { self.no_parent_for_first_block_count.increment(1) } - InvalidInboxEntry::FutureData => self.future_data_count.increment(1), - InvalidInboxEntry::MissedData => self.missed_data_count.increment(1), - InvalidInboxEntry::DataCorruption => self.data_corruption_count.increment(1), - InvalidInboxEntry::UninitializedChainDatabase => {} + SuperchainDAError::FutureData => self.future_data_count.increment(1), + SuperchainDAError::MissedData => self.missed_data_count.increment(1), + SuperchainDAError::DataCorruption => self.data_corruption_count.increment(1), + SuperchainDAError::UninitializedChainDatabase => {} } } } diff --git a/crates/optimism/txpool/src/validator.rs b/crates/optimism/txpool/src/validator.rs index 6f739553906..6c986e9498f 100644 --- a/crates/optimism/txpool/src/validator.rs +++ b/crates/optimism/txpool/src/validator.rs @@ -8,7 +8,7 @@ use reth_optimism_forks::OpHardforks; use reth_primitives_traits::{ transaction::error::InvalidTransactionError, Block, BlockBody, GotExpected, SealedBlock, }; -use reth_storage_api::{BlockReaderIdExt, StateProvider, StateProviderFactory}; +use reth_storage_api::{AccountInfoReader, BlockReaderIdExt, StateProviderFactory}; use reth_transaction_pool::{ error::InvalidPoolTransactionError, EthPoolTransaction, EthTransactionValidator, TransactionOrigin, TransactionValidationOutcome, TransactionValidator, @@ -181,7 +181,7 @@ where &self, origin: TransactionOrigin, transaction: Tx, - state: &mut Option>, + state: &mut Option>, ) -> TransactionValidationOutcome { if transaction.is_eip4844() { return TransactionValidationOutcome::Invalid( @@ -215,37 +215,6 @@ where self.apply_op_checks(outcome) } - /// Validates all given transactions. - /// - /// Returns all outcomes for the given transactions in the same order. - /// - /// See also [`Self::validate_one`] - pub async fn validate_all( - &self, - transactions: Vec<(TransactionOrigin, Tx)>, - ) -> Vec> { - futures_util::future::join_all( - transactions.into_iter().map(|(origin, tx)| self.validate_one(origin, tx)), - ) - .await - } - - /// Validates all given transactions with the specified origin parameter. - /// - /// Returns all outcomes for the given transactions in the same order. - /// - /// See also [`Self::validate_one`] - pub async fn validate_all_with_origin( - &self, - origin: TransactionOrigin, - transactions: impl IntoIterator + Send, - ) -> Vec> { - futures_util::future::join_all( - transactions.into_iter().map(|tx| self.validate_one(origin, tx)), - ) - .await - } - /// Performs the necessary opstack specific checks based on top of the regular eth outcome. fn apply_op_checks( &self, @@ -341,7 +310,10 @@ where &self, transactions: Vec<(TransactionOrigin, Self::Transaction)>, ) -> Vec> { - self.validate_all(transactions).await + futures_util::future::join_all( + transactions.into_iter().map(|(origin, tx)| self.validate_one(origin, tx)), + ) + .await } async fn validate_transactions_with_origin( @@ -349,7 +321,10 @@ where origin: TransactionOrigin, transactions: impl IntoIterator + Send, ) -> Vec> { - self.validate_all_with_origin(origin, transactions).await + futures_util::future::join_all( + transactions.into_iter().map(|tx| self.validate_one(origin, tx)), + ) + .await } fn on_new_head_block(&self, new_tip_block: &SealedBlock) diff --git a/crates/payload/builder/src/noop.rs b/crates/payload/builder/src/noop.rs index cbf21f1cebf..6047bffa8b1 100644 --- a/crates/payload/builder/src/noop.rs +++ b/crates/payload/builder/src/noop.rs @@ -57,3 +57,10 @@ where } } } + +impl Default for NoopPayloadBuilderService { + fn default() -> Self { + let (service, _) = Self::new(); + service + } +} diff --git a/crates/payload/builder/src/service.rs b/crates/payload/builder/src/service.rs index 928b7747218..3c4daf25557 100644 --- a/crates/payload/builder/src/service.rs +++ b/crates/payload/builder/src/service.rs @@ -337,7 +337,7 @@ where .map(|(j, _)| j.payload_attributes()); if attributes.is_none() { - trace!(%id, "no matching payload job found to get attributes for"); + trace!(target: "payload_builder", %id, "no matching payload job found to get attributes for"); } attributes @@ -374,10 +374,10 @@ where match job.poll_unpin(cx) { Poll::Ready(Ok(_)) => { this.metrics.set_active_jobs(this.payload_jobs.len()); - trace!(%id, "payload job finished"); + trace!(target: "payload_builder", %id, "payload job finished"); } Poll::Ready(Err(err)) => { - warn!(%err, ?id, "Payload builder job failed; resolving payload"); + warn!(target: "payload_builder",%err, ?id, "Payload builder job failed; resolving payload"); this.metrics.inc_failed_jobs(); this.metrics.set_active_jobs(this.payload_jobs.len()); } @@ -399,13 +399,13 @@ where let mut res = Ok(id); if this.contains_payload(id) { - debug!(%id, parent = %attr.parent(), "Payload job already in progress, ignoring."); + debug!(target: "payload_builder",%id, parent = %attr.parent(), "Payload job already in progress, ignoring."); } else { // no job for this payload yet, create one let parent = attr.parent(); match this.generator.new_payload_job(attr.clone()) { Ok(job) => { - info!(%id, %parent, "New payload job created"); + info!(target: "payload_builder", %id, %parent, "New payload job created"); this.metrics.inc_initiated_jobs(); new_job = true; this.payload_jobs.push((job, id)); @@ -413,7 +413,7 @@ where } Err(err) => { this.metrics.inc_failed_jobs(); - warn!(%err, %id, "Failed to create payload builder job"); + warn!(target: "payload_builder", %err, %id, "Failed to create payload builder job"); res = Err(err); } } diff --git a/crates/payload/primitives/src/error.rs b/crates/payload/primitives/src/error.rs index 9717182ba6f..4de4b4ccabe 100644 --- a/crates/payload/primitives/src/error.rs +++ b/crates/payload/primitives/src/error.rs @@ -1,4 +1,4 @@ -//! Error types emitted by types or implementations of this crate. +//! Error types for payload operations. use alloc::{boxed::Box, string::ToString}; use alloy_primitives::B256; diff --git a/crates/payload/primitives/src/lib.rs b/crates/payload/primitives/src/lib.rs index d2cac69065f..fb78cae16c7 100644 --- a/crates/payload/primitives/src/lib.rs +++ b/crates/payload/primitives/src/lib.rs @@ -1,4 +1,6 @@ -//! This crate defines abstractions to create and update payloads (blocks) +//! Abstractions for working with execution payloads. +//! +//! This crate provides types and traits for execution and building payloads. #![doc( html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", @@ -22,8 +24,6 @@ pub use error::{ PayloadBuilderError, VersionSpecificValidationError, }; -/// Contains traits to abstract over payload attributes types and default implementations of the -/// [`PayloadAttributes`] trait for ethereum mainnet and optimism types. mod traits; pub use traits::{ BuiltPayload, PayloadAttributes, PayloadAttributesBuilder, PayloadBuilderAttributes, @@ -32,22 +32,32 @@ pub use traits::{ mod payload; pub use payload::{ExecutionPayload, PayloadOrAttributes}; -/// The types that are used by the engine API. +/// Core trait that defines the associated types for working with execution payloads. pub trait PayloadTypes: Send + Sync + Unpin + core::fmt::Debug + Clone + 'static { - /// The execution payload type provided as input + /// The format for execution payload data that can be processed and validated. + /// + /// This type represents the canonical format for block data that includes + /// all necessary information for execution and validation. type ExecutionData: ExecutionPayload; - /// The built payload type. + /// The type representing a successfully built payload/block. type BuiltPayload: BuiltPayload + Clone + Unpin; - /// The RPC payload attributes type the CL node emits via the engine API. + /// Attributes that specify how a payload should be constructed. + /// + /// These attributes typically come from external sources (e.g., consensus layer over RPC such + /// as the Engine API) and contain parameters like timestamp, fee recipient, and randomness. type PayloadAttributes: PayloadAttributes + Unpin; - /// The payload attributes type that contains information about a running payload job. + /// Extended attributes used internally during payload building. + /// + /// This type augments the basic payload attributes with additional information + /// needed during the building process, such as unique identifiers and parent + /// block references. type PayloadBuilderAttributes: PayloadBuilderAttributes + Clone + Unpin; - /// Converts a block into an execution payload. + /// Converts a sealed block into the execution payload format. fn block_to_payload( block: SealedBlock< <::Primitives as NodePrimitives>::Block, diff --git a/crates/payload/primitives/src/payload.rs b/crates/payload/primitives/src/payload.rs index e21aabed75e..9648a5675c0 100644 --- a/crates/payload/primitives/src/payload.rs +++ b/crates/payload/primitives/src/payload.rs @@ -1,3 +1,5 @@ +//! Types and traits for execution payload data structures. + use crate::{MessageValidationKind, PayloadAttributes}; use alloc::vec::Vec; use alloy_eips::{eip4895::Withdrawal, eip7685::Requests}; @@ -6,29 +8,37 @@ use alloy_rpc_types_engine::ExecutionData; use core::fmt::Debug; use serde::{de::DeserializeOwned, Serialize}; -/// An execution payload. +/// Represents the core data structure of an execution payload. +/// +/// Contains all necessary information to execute and validate a block, including +/// headers, transactions, and consensus fields. Provides a unified interface +/// regardless of protocol version. pub trait ExecutionPayload: Serialize + DeserializeOwned + Debug + Clone + Send + Sync + 'static { - /// Returns the parent hash of the block. + /// Returns the hash of this block's parent. fn parent_hash(&self) -> B256; - /// Returns the hash of the block. + /// Returns this block's hash. fn block_hash(&self) -> B256; - /// Returns the number of the block. + /// Returns this block's number (height). fn block_number(&self) -> u64; - /// Returns the withdrawals for the payload, if it exists. + /// Returns the withdrawals included in this payload. + /// + /// Returns `None` for pre-Shanghai blocks. fn withdrawals(&self) -> Option<&Vec>; - /// Return the parent beacon block root for the payload, if it exists. + /// Returns the beacon block root associated with this payload. + /// + /// Returns `None` for pre-merge payloads. fn parent_beacon_block_root(&self) -> Option; - /// Returns the timestamp to be used in the payload. + /// Returns this block's timestamp (seconds since Unix epoch). fn timestamp(&self) -> u64; - /// Gas used by the payload + /// Returns the total gas consumed by all transactions in this block. fn gas_used(&self) -> u64; } @@ -62,25 +72,25 @@ impl ExecutionPayload for ExecutionData { } } -/// Either a type that implements the [`ExecutionPayload`] or a type that implements the -/// [`PayloadAttributes`] trait. +/// A unified type for handling both execution payloads and payload attributes. /// -/// This is a helper type to unify pre-validation of version specific fields of the engine API. +/// Enables generic validation and processing logic for both complete payloads +/// and payload attributes, useful for version-specific validation. #[derive(Debug)] pub enum PayloadOrAttributes<'a, Payload, Attributes> { - /// An [`ExecutionPayload`] + /// A complete execution payload containing block data ExecutionPayload(&'a Payload), - /// A payload attributes type. + /// Attributes specifying how to build a new payload PayloadAttributes(&'a Attributes), } impl<'a, Payload, Attributes> PayloadOrAttributes<'a, Payload, Attributes> { - /// Construct a [`PayloadOrAttributes::ExecutionPayload`] variant + /// Creates a `PayloadOrAttributes` from an execution payload reference pub const fn from_execution_payload(payload: &'a Payload) -> Self { Self::ExecutionPayload(payload) } - /// Construct a [`PayloadOrAttributes::PayloadAttributes`] variant + /// Creates a `PayloadOrAttributes` from a payload attributes reference pub const fn from_attributes(attributes: &'a Attributes) -> Self { Self::PayloadAttributes(attributes) } @@ -91,7 +101,7 @@ where Payload: ExecutionPayload, Attributes: PayloadAttributes, { - /// Return the withdrawals for the payload or attributes. + /// Returns withdrawals from either the payload or attributes. pub fn withdrawals(&self) -> Option<&Vec> { match self { Self::ExecutionPayload(payload) => payload.withdrawals(), @@ -99,7 +109,7 @@ where } } - /// Return the timestamp for the payload or attributes. + /// Returns the timestamp from either the payload or attributes. pub fn timestamp(&self) -> u64 { match self { Self::ExecutionPayload(payload) => payload.timestamp(), @@ -107,7 +117,7 @@ where } } - /// Return the parent beacon block root for the payload or attributes. + /// Returns the parent beacon block root from either the payload or attributes. pub fn parent_beacon_block_root(&self) -> Option { match self { Self::ExecutionPayload(payload) => payload.parent_beacon_block_root(), @@ -115,7 +125,7 @@ where } } - /// Return a [`MessageValidationKind`] for the payload or attributes. + /// Determines the validation context based on the contained type. pub const fn message_validation_kind(&self) -> MessageValidationKind { match self { Self::ExecutionPayload { .. } => MessageValidationKind::Payload, @@ -165,19 +175,15 @@ impl ExecutionPayload for op_alloy_rpc_types_engine::OpExecutionData { } } -/// Special implementation for Ethereum types that provides additional helper methods +/// Extended functionality for Ethereum execution payloads impl PayloadOrAttributes<'_, ExecutionData, Attributes> where Attributes: PayloadAttributes, { - /// Return the execution requests from the payload, if available. - /// - /// This will return `Some(requests)` only if: - /// - The payload is an `ExecutionData` (not `PayloadAttributes`) - /// - The payload has Prague payload fields - /// - The Prague fields contain requests (not a hash) + /// Extracts execution layer requests from the payload. /// - /// Returns `None` in all other cases. + /// Returns `Some(requests)` if this is an execution payload with request data, + /// `None` otherwise. pub fn execution_requests(&self) -> Option<&Requests> { if let Self::ExecutionPayload(payload) = self { payload.sidecar.requests() diff --git a/crates/payload/primitives/src/traits.rs b/crates/payload/primitives/src/traits.rs index 6a1a69a05ac..9d712acc827 100644 --- a/crates/payload/primitives/src/traits.rs +++ b/crates/payload/primitives/src/traits.rs @@ -1,3 +1,5 @@ +//! Core traits for working with execution payloads. + use alloc::vec::Vec; use alloy_eips::{ eip4895::{Withdrawal, Withdrawals}, @@ -9,42 +11,49 @@ use core::fmt; use reth_chain_state::ExecutedBlockWithTrieUpdates; use reth_primitives_traits::{NodePrimitives, SealedBlock}; -/// Represents a built payload type that contains a built `SealedBlock` and can be converted into -/// engine API execution payloads. +/// Represents a successfully built execution payload (block). +/// +/// Provides access to the underlying block data, execution results, and associated metadata +/// for payloads ready for execution or propagation. #[auto_impl::auto_impl(&, Arc)] pub trait BuiltPayload: Send + Sync + fmt::Debug { /// The node's primitive types type Primitives: NodePrimitives; - /// Returns the built block (sealed) + /// Returns the built block in its sealed (hash-verified) form. fn block(&self) -> &SealedBlock<::Block>; - /// Returns the fees collected for the built block + /// Returns the total fees collected from all transactions in this block. fn fees(&self) -> U256; - /// Returns the entire execution data for the built block, if available. + /// Returns the complete execution result including state updates. + /// + /// Returns `None` if execution data is not available or not tracked. fn executed_block(&self) -> Option> { None } - /// Returns the EIP-7685 requests for the payload if any. + /// Returns the EIP-7685 execution layer requests included in this block. + /// + /// These are requests generated by the execution layer that need to be + /// processed by the consensus layer (e.g., validator deposits, withdrawals). fn requests(&self) -> Option; } -/// This can be implemented by types that describe a currently running payload job. +/// Attributes used to guide the construction of a new execution payload. /// -/// This is used as a conversion type, transforming a payload attributes type that the engine API -/// receives, into a type that the payload builder can use. +/// Extends basic payload attributes with additional context needed during the +/// building process, tracking in-progress payload jobs and their parameters. pub trait PayloadBuilderAttributes: Send + Sync + fmt::Debug { - /// The payload attributes that can be used to construct this type. Used as the argument in - /// [`PayloadBuilderAttributes::try_new`]. + /// The external payload attributes format this type can be constructed from. type RpcPayloadAttributes; /// The error type used in [`PayloadBuilderAttributes::try_new`]. type Error: core::error::Error; - /// Creates a new payload builder for the given parent block and the attributes. + /// Constructs new builder attributes from external payload attributes. /// - /// Derives the unique [`PayloadId`] for the given parent, attributes and version. + /// Validates attributes and generates a unique [`PayloadId`] based on the + /// parent block, attributes, and version. fn try_new( parent: B256, rpc_payload_attributes: Self::RpcPayloadAttributes, @@ -53,42 +62,48 @@ pub trait PayloadBuilderAttributes: Send + Sync + fmt::Debug { where Self: Sized; - /// Returns the [`PayloadId`] for the running payload job. + /// Returns the unique identifier for this payload build job. fn payload_id(&self) -> PayloadId; - /// Returns the parent block hash for the running payload job. + /// Returns the hash of the parent block this payload builds on. fn parent(&self) -> B256; - /// Returns the timestamp for the running payload job. + /// Returns the timestamp to be used in the payload's header. fn timestamp(&self) -> u64; - /// Returns the parent beacon block root for the running payload job, if it exists. + /// Returns the beacon chain block root from the parent block. + /// + /// Returns `None` for pre-merge blocks or non-beacon contexts. fn parent_beacon_block_root(&self) -> Option; - /// Returns the suggested fee recipient for the running payload job. + /// Returns the address that should receive transaction fees. fn suggested_fee_recipient(&self) -> Address; - /// Returns the prevrandao field for the running payload job. + /// Returns the randomness value for this block. fn prev_randao(&self) -> B256; - /// Returns the withdrawals for the running payload job. + /// Returns the list of withdrawals to be processed in this block. fn withdrawals(&self) -> &Withdrawals; } -/// The execution payload attribute type the CL node emits via the engine API. -/// This trait should be implemented by types that could be used to spawn a payload job. +/// Basic attributes required to initiate payload construction. /// -/// This type is emitted as part of the forkchoiceUpdated call +/// Defines minimal parameters needed to build a new execution payload. +/// Implementations must be serializable for transmission. pub trait PayloadAttributes: serde::de::DeserializeOwned + serde::Serialize + fmt::Debug + Clone + Send + Sync + 'static { - /// Returns the timestamp to be used in the payload job. + /// Returns the timestamp for the new payload. fn timestamp(&self) -> u64; - /// Returns the withdrawals for the given payload attributes. + /// Returns the withdrawals to be included in the payload. + /// + /// `Some` for post-Shanghai blocks, `None` for earlier blocks. fn withdrawals(&self) -> Option<&Vec>; - /// Return the parent beacon block root for the payload attributes. + /// Returns the parent beacon block root. + /// + /// `Some` for post-merge blocks, `None` for pre-merge blocks. fn parent_beacon_block_root(&self) -> Option; } @@ -136,8 +151,11 @@ impl PayloadAttributes for scroll_alloy_rpc_types_engine::ScrollPayloadAttribute } } -/// A builder that can return the current payload attribute. +/// Factory trait for creating payload attributes. +/// +/// Enables different strategies for generating payload attributes based on +/// contextual information. Useful for testing and specialized building. pub trait PayloadAttributesBuilder: Send + Sync + 'static { - /// Return a new payload attribute from the builder. + /// Constructs new payload attributes for the given timestamp. fn build(&self, timestamp: u64) -> Attributes; } diff --git a/crates/primitives-traits/Cargo.toml b/crates/primitives-traits/Cargo.toml index e03c9c04827..ed4115b43df 100644 --- a/crates/primitives-traits/Cargo.toml +++ b/crates/primitives-traits/Cargo.toml @@ -55,6 +55,7 @@ arbitrary = { workspace = true, features = ["derive"], optional = true } proptest = { workspace = true, optional = true } proptest-arbitrary-interop = { workspace = true, optional = true } rayon = { workspace = true, optional = true } +alloy-rpc-types-eth = { workspace = true, optional = true } [dev-dependencies] reth-codecs.workspace = true @@ -98,6 +99,7 @@ std = [ "reth-chainspec/std", "revm-bytecode/std", "revm-state/std", + "alloy-rpc-types-eth?/std", "scroll-alloy-consensus?/std", ] secp256k1 = ["alloy-consensus/secp256k1"] @@ -121,6 +123,7 @@ arbitrary = [ "op-alloy-consensus?/arbitrary", "alloy-trie/arbitrary", "reth-chainspec/arbitrary", + "alloy-rpc-types-eth?/arbitrary", "scroll-alloy-consensus?/arbitrary", ] serde-bincode-compat = [ @@ -148,6 +151,7 @@ serde = [ "revm-bytecode/serde", "revm-state/serde", "rand_08/serde", + "alloy-rpc-types-eth?/serde", "scroll-alloy-consensus?/serde", ] reth-codec = [ @@ -164,3 +168,4 @@ scroll-alloy-traits = ["scroll-alloy-consensus"] rayon = [ "dep:rayon", ] +rpc-compat = ["alloy-rpc-types-eth"] diff --git a/crates/primitives-traits/src/account.rs b/crates/primitives-traits/src/account.rs index b3eb0f80e30..34a533fc4a4 100644 --- a/crates/primitives-traits/src/account.rs +++ b/crates/primitives-traits/src/account.rs @@ -18,9 +18,6 @@ pub mod compact_ids { /// Identifier for [`LegacyAnalyzed`](revm_bytecode::Bytecode::LegacyAnalyzed). pub const LEGACY_ANALYZED_BYTECODE_ID: u8 = 2; - /// Identifier for [`Eof`](revm_bytecode::Bytecode::Eof). - pub const EOF_BYTECODE_ID: u8 = 3; - /// Identifier for [`Eip7702`](revm_bytecode::Bytecode::Eip7702). pub const EIP7702_BYTECODE_ID: u8 = 4; } @@ -125,11 +122,10 @@ impl reth_codecs::Compact for Bytecode { where B: bytes::BufMut + AsMut<[u8]>, { - use compact_ids::{EIP7702_BYTECODE_ID, EOF_BYTECODE_ID, LEGACY_ANALYZED_BYTECODE_ID}; + use compact_ids::{EIP7702_BYTECODE_ID, LEGACY_ANALYZED_BYTECODE_ID}; let bytecode = match &self.0 { RevmBytecode::LegacyAnalyzed(analyzed) => analyzed.bytecode(), - RevmBytecode::Eof(eof) => eof.raw(), RevmBytecode::Eip7702(eip7702) => eip7702.raw(), }; buf.put_u32(bytecode.len() as u32); @@ -143,10 +139,6 @@ impl reth_codecs::Compact for Bytecode { buf.put_slice(map); 1 + 8 + map.len() } - RevmBytecode::Eof(_) => { - buf.put_u8(EOF_BYTECODE_ID); - 1 - } RevmBytecode::Eip7702(_) => { buf.put_u8(EIP7702_BYTECODE_ID); 1 @@ -192,8 +184,8 @@ impl reth_codecs::Compact for Bytecode { revm_bytecode::JumpTable::from_slice(buf, jump_table_len), )) } - EOF_BYTECODE_ID | EIP7702_BYTECODE_ID => { - // EOF and EIP-7702 bytecode objects will be decoded from the raw bytecode + EIP7702_BYTECODE_ID => { + // EIP-7702 bytecode objects will be decoded from the raw bytecode Self(RevmBytecode::new_raw(bytes)) } _ => unreachable!("Junk data in database: unknown Bytecode variant"), @@ -292,6 +284,7 @@ mod tests { } #[test] + #[ignore] fn test_bytecode() { let mut buf = vec![]; let bytecode = Bytecode::new_raw(Bytes::default()); diff --git a/crates/primitives-traits/src/block/mod.rs b/crates/primitives-traits/src/block/mod.rs index f3ac7f2bc7c..35ecb171440 100644 --- a/crates/primitives-traits/src/block/mod.rs +++ b/crates/primitives-traits/src/block/mod.rs @@ -1,4 +1,26 @@ //! Block abstraction. +//! +//! This module provides the core block types and transformations: +//! +//! ```rust +//! # use reth_primitives_traits::{Block, SealedBlock, RecoveredBlock}; +//! # fn example(block: B) -> Result<(), Box> +//! # where B::Body: reth_primitives_traits::BlockBody { +//! // Basic block flow +//! let block: B = block; +//! +//! // Seal (compute hash) +//! let sealed: SealedBlock = block.seal(); +//! +//! // Recover senders +//! let recovered: RecoveredBlock = sealed.try_recover()?; +//! +//! // Access components +//! let senders = recovered.senders(); +//! let hash = recovered.hash(); +//! # Ok(()) +//! # } +//! ``` pub(crate) mod sealed; pub use sealed::SealedBlock; @@ -47,7 +69,7 @@ pub type BlockTx = <::Body as BlockBody>::Transaction; /// /// This type defines the structure of a block in the blockchain. /// A [`Block`] is composed of a header and a body. -/// It is expected that a block can always be completely reconstructed from its header and body. +/// It is expected that a block can always be completely reconstructed from its header and body pub trait Block: Send + Sync diff --git a/crates/primitives-traits/src/block/recovered.rs b/crates/primitives-traits/src/block/recovered.rs index d2cc011d1b5..7da2bcf3733 100644 --- a/crates/primitives-traits/src/block/recovered.rs +++ b/crates/primitives-traits/src/block/recovered.rs @@ -13,7 +13,22 @@ use derive_more::Deref; /// A block with senders recovered from the block's transactions. /// -/// This type is a [`SealedBlock`] with a list of senders that match the transactions in the block. +/// This type represents a [`SealedBlock`] where all transaction senders have been +/// recovered and verified. Recovery is an expensive operation that extracts the +/// sender address from each transaction's signature. +/// +/// # Construction +/// +/// - [`RecoveredBlock::new`] / [`RecoveredBlock::new_unhashed`] - Create with pre-recovered senders +/// (unchecked) +/// - [`RecoveredBlock::try_new`] / [`RecoveredBlock::try_new_unhashed`] - Create with validation +/// - [`RecoveredBlock::try_recover`] - Recover from a block +/// - [`RecoveredBlock::try_recover_sealed`] - Recover from a sealed block +/// +/// # Performance +/// +/// Sender recovery is computationally expensive. Cache recovered blocks when possible +/// to avoid repeated recovery operations. /// /// ## Sealing /// @@ -456,6 +471,44 @@ impl From> for Sealed { } } +/// Converts a block with recovered transactions into a [`RecoveredBlock`]. +/// +/// This implementation takes an `alloy_consensus::Block` where transactions are of type +/// `Recovered` (transactions with their recovered senders) and converts it into a +/// [`RecoveredBlock`] which stores transactions and senders separately for efficiency. +impl From, H>> + for RecoveredBlock> +where + T: SignedTransaction, + H: crate::block::header::BlockHeader, +{ + fn from(block: alloy_consensus::Block, H>) -> Self { + let header = block.header; + + // Split the recovered transactions into transactions and senders + let (transactions, senders): (Vec, Vec

) = block + .body + .transactions + .into_iter() + .map(|recovered| { + let (tx, sender) = recovered.into_parts(); + (tx, sender) + }) + .unzip(); + + // Reconstruct the block with regular transactions + let body = alloy_consensus::BlockBody { + transactions, + ommers: block.body.ommers, + withdrawals: block.body.withdrawals, + }; + + let block = alloy_consensus::Block::new(header, body); + + Self::new_unhashed(block, senders) + } +} + #[cfg(any(test, feature = "arbitrary"))] impl<'a, B> arbitrary::Arbitrary<'a> for RecoveredBlock where @@ -533,6 +586,196 @@ impl RecoveredBlock { } } +#[cfg(feature = "rpc-compat")] +mod rpc_compat { + use super::{ + Block as BlockTrait, BlockBody as BlockBodyTrait, RecoveredBlock, SignedTransaction, + }; + use crate::block::error::BlockRecoveryError; + use alloc::vec::Vec; + use alloy_consensus::{ + transaction::Recovered, Block as CBlock, BlockBody, BlockHeader, Sealable, + }; + use alloy_primitives::U256; + use alloy_rpc_types_eth::{ + Block, BlockTransactions, BlockTransactionsKind, Header, TransactionInfo, + }; + + impl RecoveredBlock + where + B: BlockTrait, + { + /// Converts the block into an RPC [`Block`] with the given [`BlockTransactionsKind`]. + /// + /// The `tx_resp_builder` closure transforms each transaction into the desired response + /// type. + pub fn into_rpc_block( + self, + kind: BlockTransactionsKind, + tx_resp_builder: F, + ) -> Result>, E> + where + F: Fn( + Recovered<<::Body as BlockBodyTrait>::Transaction>, + TransactionInfo, + ) -> Result, + { + match kind { + BlockTransactionsKind::Hashes => Ok(self.into_rpc_block_with_tx_hashes()), + BlockTransactionsKind::Full => self.into_rpc_block_full(tx_resp_builder), + } + } + + /// Converts the block to an RPC [`Block`] without consuming self. + /// + /// For transaction hashes, only necessary parts are cloned for efficiency. + /// For full transactions, the entire block is cloned. + /// + /// The `tx_resp_builder` closure transforms each transaction into the desired response + /// type. + pub fn clone_into_rpc_block( + &self, + kind: BlockTransactionsKind, + tx_resp_builder: F, + ) -> Result>, E> + where + F: Fn( + Recovered<<::Body as BlockBodyTrait>::Transaction>, + TransactionInfo, + ) -> Result, + { + match kind { + BlockTransactionsKind::Hashes => Ok(self.to_rpc_block_with_tx_hashes()), + BlockTransactionsKind::Full => self.clone().into_rpc_block_full(tx_resp_builder), + } + } + + /// Creates an RPC [`Block`] with transaction hashes from a reference. + /// + /// Returns [`BlockTransactions::Hashes`] containing only transaction hashes. + /// Efficiently clones only necessary parts, not the entire block. + pub fn to_rpc_block_with_tx_hashes(&self) -> Block> { + let transactions = self.body().transaction_hashes_iter().copied().collect(); + let rlp_length = self.rlp_length(); + let header = self.clone_sealed_header(); + let withdrawals = self.body().withdrawals().cloned(); + + let transactions = BlockTransactions::Hashes(transactions); + let uncles = + self.body().ommers().unwrap_or(&[]).iter().map(|h| h.hash_slow()).collect(); + let header = Header::from_consensus(header.into(), None, Some(U256::from(rlp_length))); + + Block { header, uncles, transactions, withdrawals } + } + + /// Converts the block into an RPC [`Block`] with transaction hashes. + /// + /// Consumes self and returns [`BlockTransactions::Hashes`] containing only transaction + /// hashes. + pub fn into_rpc_block_with_tx_hashes(self) -> Block> { + let transactions = self.body().transaction_hashes_iter().copied().collect(); + let rlp_length = self.rlp_length(); + let (header, body) = self.into_sealed_block().split_sealed_header_body(); + let BlockBody { ommers, withdrawals, .. } = body.into_ethereum_body(); + + let transactions = BlockTransactions::Hashes(transactions); + let uncles = ommers.into_iter().map(|h| h.hash_slow()).collect(); + let header = Header::from_consensus(header.into(), None, Some(U256::from(rlp_length))); + + Block { header, uncles, transactions, withdrawals } + } + + /// Converts the block into an RPC [`Block`] with full transaction objects. + /// + /// Returns [`BlockTransactions::Full`] with complete transaction data. + /// The `tx_resp_builder` closure transforms each transaction with its metadata. + pub fn into_rpc_block_full( + self, + tx_resp_builder: F, + ) -> Result>, E> + where + F: Fn( + Recovered<<::Body as BlockBodyTrait>::Transaction>, + TransactionInfo, + ) -> Result, + { + let block_number = self.header().number(); + let base_fee = self.header().base_fee_per_gas(); + let block_length = self.rlp_length(); + let block_hash = Some(self.hash()); + + let (block, senders) = self.split_sealed(); + let (header, body) = block.split_sealed_header_body(); + let BlockBody { transactions, ommers, withdrawals } = body.into_ethereum_body(); + + let transactions = transactions + .into_iter() + .zip(senders) + .enumerate() + .map(|(idx, (tx, sender))| { + let tx_info = TransactionInfo { + hash: Some(*tx.tx_hash()), + block_hash, + block_number: Some(block_number), + base_fee, + index: Some(idx as u64), + }; + + tx_resp_builder(Recovered::new_unchecked(tx, sender), tx_info) + }) + .collect::, E>>()?; + + let transactions = BlockTransactions::Full(transactions); + let uncles = ommers.into_iter().map(|h| h.hash_slow()).collect(); + let header = + Header::from_consensus(header.into(), None, Some(U256::from(block_length))); + + let block = Block { header, uncles, transactions, withdrawals }; + + Ok(block) + } + } + + impl RecoveredBlock> + where + T: SignedTransaction, + { + /// Creates a `RecoveredBlock` from an RPC block. + /// + /// Converts the RPC block to consensus format and recovers transaction senders. + /// Works with any transaction type `U` that can be converted to `T`. + /// + /// # Examples + /// ```ignore + /// let rpc_block: alloy_rpc_types_eth::Block = get_rpc_block(); + /// let recovered = RecoveredBlock::from_rpc_block(rpc_block)?; + /// ``` + pub fn from_rpc_block( + block: alloy_rpc_types_eth::Block, + ) -> Result>> + where + T: From, + { + // Convert to consensus block and then convert transactions + let consensus_block = block.into_consensus().convert_transactions(); + + // Try to recover the block + consensus_block.try_into_recovered() + } + } + + impl TryFrom> for RecoveredBlock> + where + T: SignedTransaction + From, + { + type Error = BlockRecoveryError>; + + fn try_from(block: alloy_rpc_types_eth::Block) -> Result { + Self::from_rpc_block(block) + } + } +} + /// Bincode-compatible [`RecoveredBlock`] serde implementation. #[cfg(feature = "serde-bincode-compat")] pub(super) mod serde_bincode_compat { @@ -632,3 +875,48 @@ pub(super) mod serde_bincode_compat { } } } + +#[cfg(test)] +mod tests { + use super::*; + use alloy_consensus::{Header, TxLegacy}; + use alloy_primitives::{bytes, Signature, TxKind}; + + #[test] + fn test_from_block_with_recovered_transactions() { + let tx = TxLegacy { + chain_id: Some(1), + nonce: 0, + gas_price: 21_000_000_000, + gas_limit: 21_000, + to: TxKind::Call(Address::ZERO), + value: U256::ZERO, + input: bytes!(), + }; + + let signature = Signature::new(U256::from(1), U256::from(2), false); + let sender = Address::from([0x01; 20]); + + let signed_tx = alloy_consensus::TxEnvelope::Legacy( + alloy_consensus::Signed::new_unchecked(tx, signature, B256::ZERO), + ); + + let recovered_tx = Recovered::new_unchecked(signed_tx, sender); + + let header = Header::default(); + let body = alloy_consensus::BlockBody { + transactions: vec![recovered_tx], + ommers: vec![], + withdrawals: None, + }; + let block_with_recovered = alloy_consensus::Block::new(header, body); + + let recovered_block: RecoveredBlock< + alloy_consensus::Block, + > = block_with_recovered.into(); + + assert_eq!(recovered_block.senders().len(), 1); + assert_eq!(recovered_block.senders()[0], sender); + assert_eq!(recovered_block.body().transactions().count(), 1); + } +} diff --git a/crates/primitives-traits/src/extended.rs b/crates/primitives-traits/src/extended.rs index 94c35d0190b..e235f47033e 100644 --- a/crates/primitives-traits/src/extended.rs +++ b/crates/primitives-traits/src/extended.rs @@ -274,8 +274,28 @@ impl From> for Extended OpTransaction for Extended + where + B: OpTransaction, + T: OpTransaction, + { + fn is_deposit(&self) -> bool { + match self { + Self::BuiltIn(b) => b.is_deposit(), + Self::Other(t) => t.is_deposit(), + } + } + + fn as_deposit(&self) -> Option<&Sealed> { + match self { + Self::BuiltIn(b) => b.as_deposit(), + Self::Other(t) => t.as_deposit(), + } + } + } impl TryFrom> for Extended { type Error = >::Error; @@ -338,7 +358,7 @@ mod serde_bincode_compat { #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] #[derive(Debug)] - pub enum ExtendedTxEnvelopeRepr<'a, B: SerdeBincodeCompat, T: SerdeBincodeCompat> { + pub enum ExtendedRepr<'a, B: SerdeBincodeCompat, T: SerdeBincodeCompat> { BuiltIn(B::BincodeRepr<'a>), Other(T::BincodeRepr<'a>), } @@ -348,19 +368,19 @@ mod serde_bincode_compat { B: SerdeBincodeCompat + core::fmt::Debug, T: SerdeBincodeCompat + core::fmt::Debug, { - type BincodeRepr<'a> = ExtendedTxEnvelopeRepr<'a, B, T>; + type BincodeRepr<'a> = ExtendedRepr<'a, B, T>; fn as_repr(&self) -> Self::BincodeRepr<'_> { match self { - Self::BuiltIn(tx) => ExtendedTxEnvelopeRepr::BuiltIn(tx.as_repr()), - Self::Other(tx) => ExtendedTxEnvelopeRepr::Other(tx.as_repr()), + Self::BuiltIn(tx) => ExtendedRepr::BuiltIn(tx.as_repr()), + Self::Other(tx) => ExtendedRepr::Other(tx.as_repr()), } } fn from_repr(repr: Self::BincodeRepr<'_>) -> Self { match repr { - ExtendedTxEnvelopeRepr::BuiltIn(tx_repr) => Self::BuiltIn(B::from_repr(tx_repr)), - ExtendedTxEnvelopeRepr::Other(tx_repr) => Self::Other(T::from_repr(tx_repr)), + ExtendedRepr::BuiltIn(tx_repr) => Self::BuiltIn(B::from_repr(tx_repr)), + ExtendedRepr::Other(tx_repr) => Self::Other(T::from_repr(tx_repr)), } } } diff --git a/crates/primitives-traits/src/header/error.rs b/crates/primitives-traits/src/header/error.rs deleted file mode 100644 index 3905d831053..00000000000 --- a/crates/primitives-traits/src/header/error.rs +++ /dev/null @@ -1,8 +0,0 @@ -/// Errors that can occur during header sanity checks. -#[derive(Debug, PartialEq, Eq)] -pub enum HeaderError { - /// Represents an error when the block difficulty is too large. - LargeDifficulty, - /// Represents an error when the block extra data is too large. - LargeExtraData, -} diff --git a/crates/primitives-traits/src/header/mod.rs b/crates/primitives-traits/src/header/mod.rs index 7f3a5ab0660..198b9cb3c8f 100644 --- a/crates/primitives-traits/src/header/mod.rs +++ b/crates/primitives-traits/src/header/mod.rs @@ -1,9 +1,6 @@ mod sealed; pub use sealed::{Header, SealedHeader, SealedHeaderFor}; -mod error; -pub use error::HeaderError; - #[cfg(any(test, feature = "test-utils", feature = "arbitrary"))] pub mod test_utils; diff --git a/crates/primitives-traits/src/header/sealed.rs b/crates/primitives-traits/src/header/sealed.rs index b84a7fa622f..bcf69813f97 100644 --- a/crates/primitives-traits/src/header/sealed.rs +++ b/crates/primitives-traits/src/header/sealed.rs @@ -239,6 +239,34 @@ impl SealedHeader { } } +#[cfg(feature = "rpc-compat")] +mod rpc_compat { + use super::*; + + impl SealedHeader { + /// Converts this header into `alloy_rpc_types_eth::Header`. + /// + /// Note: This does not set the total difficulty or size of the block. + pub fn into_rpc_header(self) -> alloy_rpc_types_eth::Header + where + H: Sealable, + { + alloy_rpc_types_eth::Header::from_sealed(self.into()) + } + + /// Converts an `alloy_rpc_types_eth::Header` into a `SealedHeader`. + pub fn from_rpc_header(header: alloy_rpc_types_eth::Header) -> Self { + Self::new(header.inner, header.hash) + } + } + + impl From> for SealedHeader { + fn from(value: alloy_rpc_types_eth::Header) -> Self { + Self::from_rpc_header(value) + } + } +} + /// Bincode-compatible [`SealedHeader`] serde implementation. #[cfg(feature = "serde-bincode-compat")] pub(super) mod serde_bincode_compat { diff --git a/crates/primitives-traits/src/lib.rs b/crates/primitives-traits/src/lib.rs index c2d563a16a5..60d265d2be6 100644 --- a/crates/primitives-traits/src/lib.rs +++ b/crates/primitives-traits/src/lib.rs @@ -1,10 +1,18 @@ //! Commonly used types and traits in Reth. //! -//! This crate contains various primitive traits used across reth's components. -//! It provides the [`Block`] trait which is used to represent a block and all its components. -//! A [`Block`] is composed of a [`Header`] and a [`BlockBody`]. In ethereum (and optimism), a block -//! body consists of a list of transactions, a list of uncle headers, and a list of withdrawals. For -//! optimism, uncle headers and withdrawals are always empty lists. +//! ## Overview +//! +//! This crate defines various traits and types that form the foundation of the reth stack. +//! The top-level trait is [`Block`] which represents a block in the blockchain. A [`Block`] is +//! composed of a [`Header`] and a [`BlockBody`]. A [`BlockBody`] contains the transactions in the +//! block and additional data that is part of the block. In ethereum, this includes uncle headers +//! and withdrawals. For optimism, uncle headers and withdrawals are always empty lists. +//! +//! The most common types you'll use are: +//! - [`Block`] - A basic block with header and body +//! - [`SealedBlock`] - A block with its hash cached +//! - [`SealedHeader`] - A header with its hash cached +//! - [`RecoveredBlock`] - A sealed block with sender addresses recovered //! //! ## Feature Flags //! @@ -13,20 +21,14 @@ //! types. //! - `reth-codec`: Enables db codec support for reth types including zstd compression for certain //! types. +//! - `rpc-compat`: Adds RPC compatibility functions for the types in this crate, e.g. rpc type +//! conversions. //! - `serde`: Adds serde support for all types. //! - `secp256k1`: Adds secp256k1 support for transaction signing/recovery. (By default the no-std //! friendly `k256` is used) //! - `rayon`: Uses `rayon` for parallel transaction sender recovery in [`BlockBody`] by default. //! - `serde-bincode-compat` provides helpers for dealing with the `bincode` crate. //! -//! ## Overview -//! -//! This crate defines various traits and types that form the foundation of the reth stack. -//! The top-level trait is [`Block`] which represents a block in the blockchain. A [`Block`] is -//! composed of a [`Header`] and a [`BlockBody`]. A [`BlockBody`] contains the transactions in the -//! block any additional data that is part of the block. A [`Header`] contains the metadata of the -//! block. -//! //! ### Sealing (Hashing) //! //! The block hash is derived from the [`Header`] and is used to uniquely identify the block. This @@ -53,14 +55,55 @@ //! mainnet. Newer transactions must always be recovered with the regular `recover` functions, see //! also [`recover_signer`](crypto::secp256k1::recover_signer). //! +//! ## Error Handling +//! +//! Most operations that can fail return `Result` types: +//! - [`RecoveryError`](transaction::signed::RecoveryError) - Transaction signature recovery failed +//! - [`BlockRecoveryError`](block::error::BlockRecoveryError) - Block-level recovery failed +//! - [`GotExpected`] / [`GotExpectedBoxed`] - Generic error for mismatched values +//! +//! Recovery errors typically indicate invalid signatures or corrupted data. The block recovery +//! error preserves the original block for further inspection. +//! +//! ### Example +//! +//! ```rust +//! # use reth_primitives_traits::{SealedBlock, RecoveredBlock}; +//! # use reth_primitives_traits::block::error::BlockRecoveryError; +//! # fn example(sealed_block: SealedBlock) -> Result<(), BlockRecoveryError>> +//! # where B::Body: reth_primitives_traits::BlockBody { +//! // Attempt to recover senders from a sealed block +//! match sealed_block.try_recover() { +//! Ok(recovered) => { +//! // Successfully recovered all senders +//! println!("Recovered {} senders", recovered.senders().len()); +//! Ok(()) +//! } +//! Err(err) => { +//! // Recovery failed - the block is returned in the error +//! println!("Failed to recover senders for block"); +//! // You can still access the original block +//! let block = err.into_inner(); +//! let hash = block.hash(); +//! Err(BlockRecoveryError::new(block)) +//! } +//! } +//! # } +//! ``` +//! +//! ## Performance Considerations +//! +//! - **Hashing**: Block hashing is expensive. Use [`SealedBlock`] to cache hashes. +//! - **Recovery**: Sender recovery is CPU-intensive. Use [`RecoveredBlock`] to cache results. +//! - **Parallel Recovery**: Enable the `rayon` feature for parallel transaction recovery. +//! //! ## Bincode serde compatibility //! //! The [bincode-crate](https://github.com/bincode-org/bincode) is often used by additional tools when sending data over the network. //! `bincode` crate doesn't work well with optionally serializable serde fields, but some of the consensus types require optional serialization for RPC compatibility. Read more: //! -//! As a workaround this crate introduces the -//! [`SerdeBincodeCompat`](serde_bincode_compat::SerdeBincodeCompat) trait used to a bincode -//! compatible serde representation. +//! As a workaround this crate introduces the `SerdeBincodeCompat` trait (available with the +//! `serde-bincode-compat` feature) used to provide a bincode compatible serde representation. #![doc( html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", @@ -126,7 +169,7 @@ mod extended; pub use extended::Extended; /// Common header types pub mod header; -pub use header::{Header, HeaderError, SealedHeader, SealedHeaderFor}; +pub use header::{Header, SealedHeader, SealedHeaderFor}; /// Bincode-compatible serde implementations for common abstracted types in Reth. /// diff --git a/crates/primitives-traits/src/node.rs b/crates/primitives-traits/src/node.rs index 59181a412cd..42f7c74b1d3 100644 --- a/crates/primitives-traits/src/node.rs +++ b/crates/primitives-traits/src/node.rs @@ -5,6 +5,10 @@ use crate::{ use core::fmt; /// Configures all the primitive types of the node. +/// +/// This trait defines the core types used throughout the node for representing +/// blockchain data. It serves as the foundation for type consistency across +/// different node implementations. pub trait NodePrimitives: Send + Sync + Unpin + Clone + Default + fmt::Debug + PartialEq + Eq + 'static { @@ -15,6 +19,9 @@ pub trait NodePrimitives: /// Block body primitive. type BlockBody: FullBlockBody; /// Signed version of the transaction type. + /// + /// This represents the transaction as it exists in the blockchain - the consensus + /// format that includes the signature and can be included in a block. type SignedTx: FullSignedTx; /// A receipt. type Receipt: Receipt; diff --git a/crates/primitives-traits/src/receipt.rs b/crates/primitives-traits/src/receipt.rs index 3e2e64ad923..9be419987f0 100644 --- a/crates/primitives-traits/src/receipt.rs +++ b/crates/primitives-traits/src/receipt.rs @@ -14,7 +14,6 @@ pub trait FullReceipt: Receipt + MaybeCompact {} impl FullReceipt for T where T: Receipt + MaybeCompact {} /// Abstraction of a receipt. -#[auto_impl::auto_impl(&, Arc)] pub trait Receipt: Send + Sync @@ -34,6 +33,26 @@ pub trait Receipt: { } +// Blanket implementation for any type that satisfies all the supertrait bounds +impl Receipt for T where + T: Send + + Sync + + Unpin + + Clone + + fmt::Debug + + TxReceipt + + RlpEncodableReceipt + + RlpDecodableReceipt + + Encodable + + Decodable + + Eip2718EncodableReceipt + + Typed2718 + + MaybeSerde + + InMemorySize + + MaybeSerdeBincodeCompat +{ +} + /// Retrieves gas spent by transactions as a vector of tuples (transaction index, gas used). pub fn gas_spent_by_transactions(receipts: I) -> Vec<(u64, u64)> where diff --git a/crates/primitives-traits/src/serde_bincode_compat.rs b/crates/primitives-traits/src/serde_bincode_compat.rs index d08a05ecdfd..fcfb7f681e7 100644 --- a/crates/primitives-traits/src/serde_bincode_compat.rs +++ b/crates/primitives-traits/src/serde_bincode_compat.rs @@ -1,3 +1,38 @@ +//! Bincode compatibility support for reth primitive types. +//! +//! This module provides traits and implementations to work around bincode's limitations +//! with optional serde fields. The bincode crate requires all fields to be present during +//! serialization, which conflicts with types that have `#[serde(skip_serializing_if)]` +//! attributes for RPC compatibility. +//! +//! # Overview +//! +//! The main trait is `SerdeBincodeCompat`, which provides a conversion mechanism between +//! types and their bincode-compatible representations. There are two main ways to implement +//! this trait: +//! +//! 1. **Using RLP encoding** - Implement `RlpBincode` for types that already support RLP +//! 2. **Custom implementation** - Define a custom representation type +//! +//! # Examples +//! +//! ## Using with `serde_with` +//! +//! ```rust +//! # use reth_primitives_traits::serde_bincode_compat::{self, SerdeBincodeCompat}; +//! # use serde::{Deserialize, Serialize}; +//! # use serde_with::serde_as; +//! # use alloy_consensus::Header; +//! #[serde_as] +//! #[derive(Serialize, Deserialize)] +//! struct MyStruct { +//! #[serde_as(as = "serde_bincode_compat::BincodeReprFor<'_, Header>")] +//! data: Header, +//! } +//! ``` + +use alloc::vec::Vec; +use alloy_primitives::Bytes; use core::fmt::Debug; use serde::{de::DeserializeOwned, Serialize}; @@ -9,8 +44,26 @@ pub use block_bincode::{Block, BlockBody}; /// Trait for types that can be serialized and deserialized using bincode. /// +/// This trait provides a workaround for bincode's incompatibility with optional +/// serde fields. It ensures all fields are serialized, making the type bincode-compatible. +/// +/// # Implementation +/// +/// The easiest way to implement this trait is using [`RlpBincode`] for RLP-encodable types: +/// +/// ```rust +/// # use reth_primitives_traits::serde_bincode_compat::RlpBincode; +/// # use alloy_rlp::{RlpEncodable, RlpDecodable}; +/// # #[derive(RlpEncodable, RlpDecodable)] +/// # struct MyType; +/// impl RlpBincode for MyType {} +/// // SerdeBincodeCompat is automatically implemented +/// ``` +/// +/// For custom implementations, see the examples in the `block` module. +/// /// The recommended way to add bincode compatible serialization is via the -/// [`serde_with`] crate and the `serde_as` macro that. See for reference [`header`]. +/// [`serde_with`] crate and the `serde_as` macro. See for reference [`header`]. pub trait SerdeBincodeCompat: Sized + 'static { /// Serde representation of the type for bincode serialization. /// @@ -37,8 +90,58 @@ impl SerdeBincodeCompat for alloy_consensus::Header { } /// Type alias for the [`SerdeBincodeCompat::BincodeRepr`] associated type. +/// +/// This provides a convenient way to refer to the bincode representation type +/// without having to write out the full associated type projection. +/// +/// # Example +/// +/// ```rust +/// # use reth_primitives_traits::serde_bincode_compat::{SerdeBincodeCompat, BincodeReprFor}; +/// fn serialize_to_bincode(value: &T) -> BincodeReprFor<'_, T> { +/// value.as_repr() +/// } +/// ``` pub type BincodeReprFor<'a, T> = ::BincodeRepr<'a>; +/// A helper trait for using RLP-encoding for providing bincode-compatible serialization. +/// +/// By implementing this trait, [`SerdeBincodeCompat`] will be automatically implemented for the +/// type and RLP encoding will be used for serialization and deserialization for bincode +/// compatibility. +/// +/// # Example +/// +/// ```rust +/// # use reth_primitives_traits::serde_bincode_compat::RlpBincode; +/// # use alloy_rlp::{RlpEncodable, RlpDecodable}; +/// #[derive(RlpEncodable, RlpDecodable)] +/// struct MyCustomType { +/// value: u64, +/// data: Vec, +/// } +/// +/// // Simply implement the marker trait +/// impl RlpBincode for MyCustomType {} +/// +/// // Now MyCustomType can be used with bincode through RLP encoding +/// ``` +pub trait RlpBincode: alloy_rlp::Encodable + alloy_rlp::Decodable {} + +impl SerdeBincodeCompat for T { + type BincodeRepr<'a> = Bytes; + + fn as_repr(&self) -> Self::BincodeRepr<'_> { + let mut buf = Vec::new(); + self.encode(&mut buf); + buf.into() + } + + fn from_repr(repr: Self::BincodeRepr<'_>) -> Self { + Self::decode(&mut repr.as_ref()).expect("Failed to decode bincode rlp representation") + } +} + mod block_bincode { use crate::serde_bincode_compat::SerdeBincodeCompat; use alloc::{borrow::Cow, vec::Vec}; diff --git a/crates/primitives-traits/src/transaction/mod.rs b/crates/primitives-traits/src/transaction/mod.rs index 5137c756445..f11c3346aec 100644 --- a/crates/primitives-traits/src/transaction/mod.rs +++ b/crates/primitives-traits/src/transaction/mod.rs @@ -1,4 +1,15 @@ //! Transaction abstraction +//! +//! This module provides traits for working with blockchain transactions: +//! - [`Transaction`] - Basic transaction interface +//! - [`signed::SignedTransaction`] - Transaction with signature and recovery methods +//! - [`FullTransaction`] - Transaction with database encoding support +//! +//! # Transaction Recovery +//! +//! Transaction senders are not stored directly but recovered from signatures. +//! Use `recover_signer` for post-EIP-2 transactions or `recover_signer_unchecked` +//! for historical transactions. pub mod execute; pub mod signature; diff --git a/crates/primitives-traits/src/transaction/signed.rs b/crates/primitives-traits/src/transaction/signed.rs index 1bbca03d74c..56ce917a33b 100644 --- a/crates/primitives-traits/src/transaction/signed.rs +++ b/crates/primitives-traits/src/transaction/signed.rs @@ -21,6 +21,15 @@ pub trait FullSignedTx: SignedTransaction + MaybeCompact + MaybeSerdeBincodeComp impl FullSignedTx for T where T: SignedTransaction + MaybeCompact + MaybeSerdeBincodeCompat {} /// A signed transaction. +/// +/// # Recovery Methods +/// +/// This trait provides two types of recovery methods: +/// - Standard methods (e.g., `try_recover`) - enforce EIP-2 low-s signature requirement +/// - Unchecked methods (e.g., `try_recover_unchecked`) - skip EIP-2 validation for pre-EIP-2 +/// transactions +/// +/// Use unchecked methods only when dealing with historical pre-EIP-2 transactions. #[auto_impl::auto_impl(&, Arc)] pub trait SignedTransaction: Send diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 2aa550807d7..47ae3683434 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -31,8 +31,8 @@ pub use block::{BlockWithSenders, SealedBlockFor, SealedBlockWithSenders}; pub use receipt::{gas_spent_by_transactions, Receipt}; pub use reth_primitives_traits::{ logs_bloom, Account, BlockTy, BodyTy, Bytecode, GotExpected, GotExpectedBoxed, Header, - HeaderError, HeaderTy, Log, LogData, NodePrimitives, ReceiptTy, RecoveredBlock, SealedHeader, - StorageEntry, TxTy, + HeaderTy, Log, LogData, NodePrimitives, ReceiptTy, RecoveredBlock, SealedHeader, StorageEntry, + TxTy, }; pub use static_file::StaticFileSegment; diff --git a/crates/prune/prune/src/segments/set.rs b/crates/prune/prune/src/segments/set.rs index c99defe0841..52e6ee75442 100644 --- a/crates/prune/prune/src/segments/set.rs +++ b/crates/prune/prune/src/segments/set.rs @@ -64,6 +64,7 @@ where receipts, account_history, storage_history, + bodies_history: _, receipts_log_filter, } = prune_modes; diff --git a/crates/prune/prune/src/segments/static_file/transactions.rs b/crates/prune/prune/src/segments/static_file/transactions.rs index 7005ae15e7d..409e7f9b3d3 100644 --- a/crates/prune/prune/src/segments/static_file/transactions.rs +++ b/crates/prune/prune/src/segments/static_file/transactions.rs @@ -15,6 +15,7 @@ use reth_prune_types::{ use reth_static_file_types::StaticFileSegment; use tracing::trace; +/// The type responsible for pruning transactions in the database and history expiry. #[derive(Debug)] pub struct Transactions { static_file_provider: StaticFileProvider, diff --git a/crates/prune/types/src/lib.rs b/crates/prune/types/src/lib.rs index ef8ef882b8c..c1d268a0fb7 100644 --- a/crates/prune/types/src/lib.rs +++ b/crates/prune/types/src/lib.rs @@ -30,7 +30,7 @@ pub use pruner::{ SegmentOutputCheckpoint, }; pub use segment::{PrunePurpose, PruneSegment, PruneSegmentError}; -pub use target::{PruneModes, MINIMUM_PRUNING_DISTANCE}; +pub use target::{PruneModes, UnwindTargetPrunedError, MINIMUM_PRUNING_DISTANCE}; /// Configuration for pruning receipts not associated with logs emitted by the specified contracts. #[derive(Debug, Clone, PartialEq, Eq, Default)] diff --git a/crates/prune/types/src/target.rs b/crates/prune/types/src/target.rs index 9edeb71ec97..d91faea0a11 100644 --- a/crates/prune/types/src/target.rs +++ b/crates/prune/types/src/target.rs @@ -1,3 +1,7 @@ +use alloy_primitives::BlockNumber; +use derive_more::Display; +use thiserror::Error; + use crate::{PruneMode, ReceiptsLogPruneConfig}; /// Minimum distance from the tip necessary for the node to work correctly: @@ -7,6 +11,31 @@ use crate::{PruneMode, ReceiptsLogPruneConfig}; /// unwind is required. pub const MINIMUM_PRUNING_DISTANCE: u64 = 32 * 2 + 10_000; +/// Type of history that can be pruned +#[derive(Debug, Error, PartialEq, Eq, Clone)] +pub enum UnwindTargetPrunedError { + /// The target block is beyond the history limit + #[error("Cannot unwind to block {target_block} as it is beyond the {history_type} limit. Latest block: {latest_block}, History limit: {limit}")] + TargetBeyondHistoryLimit { + /// The latest block number + latest_block: BlockNumber, + /// The target block number + target_block: BlockNumber, + /// The type of history that is beyond the limit + history_type: HistoryType, + /// The limit of the history + limit: u64, + }, +} + +#[derive(Debug, Display, Clone, PartialEq, Eq)] +pub enum HistoryType { + /// Account history + AccountHistory, + /// Storage history + StorageHistory, +} + /// Pruning configuration for every segment of the data that can be pruned. #[derive(Debug, Clone, Default, Eq, PartialEq)] #[cfg_attr(any(test, feature = "serde"), derive(serde::Serialize, serde::Deserialize))] @@ -46,6 +75,15 @@ pub struct PruneModes { ) )] pub storage_history: Option, + /// Bodies History pruning configuration. + #[cfg_attr( + any(test, feature = "serde"), + serde( + skip_serializing_if = "Option::is_none", + deserialize_with = "deserialize_opt_prune_mode_with_min_blocks::" + ) + )] + pub bodies_history: Option, /// Receipts pruning configuration by retaining only those receipts that contain logs emitted /// by the specified addresses, discarding others. This setting is overridden by `receipts`. /// @@ -68,6 +106,7 @@ impl PruneModes { receipts: Some(PruneMode::Full), account_history: Some(PruneMode::Full), storage_history: Some(PruneMode::Full), + bodies_history: Some(PruneMode::Full), receipts_log_filter: Default::default(), } } @@ -81,6 +120,35 @@ impl PruneModes { pub fn is_empty(&self) -> bool { self == &Self::none() } + + /// Returns true if target block is within history limit + pub fn ensure_unwind_target_unpruned( + &self, + latest_block: u64, + target_block: u64, + ) -> Result<(), UnwindTargetPrunedError> { + let distance = latest_block.saturating_sub(target_block); + [ + (self.account_history, HistoryType::AccountHistory), + (self.storage_history, HistoryType::StorageHistory), + ] + .iter() + .find_map(|(prune_mode, history_type)| { + if let Some(PruneMode::Distance(limit)) = prune_mode { + (distance > *limit).then_some(Err( + UnwindTargetPrunedError::TargetBeyondHistoryLimit { + latest_block, + target_block, + history_type: history_type.clone(), + limit: *limit, + }, + )) + } else { + None + } + }) + .unwrap_or(Ok(())) + } } /// Deserializes [`Option`] and validates that the value is not less than the const diff --git a/crates/revm/src/database.rs b/crates/revm/src/database.rs index fafe990c3b1..50415815759 100644 --- a/crates/revm/src/database.rs +++ b/crates/revm/src/database.rs @@ -2,7 +2,7 @@ use crate::primitives::alloy_primitives::{BlockNumber, StorageKey, StorageValue} use alloy_primitives::{Address, B256, U256}; use core::ops::{Deref, DerefMut}; use reth_primitives_traits::Account; -use reth_storage_api::{AccountReader, BlockHashReader, StateProvider}; +use reth_storage_api::{AccountReader, BlockHashReader, BytecodeReader, StateProvider}; use reth_storage_errors::provider::{ProviderError, ProviderResult}; use revm::{bytecode::Bytecode, state::AccountInfo, Database, DatabaseRef}; @@ -47,7 +47,7 @@ impl EvmStateProvider for T { &self, code_hash: &B256, ) -> ProviderResult> { - ::bytecode_by_hash(self, code_hash) + ::bytecode_by_hash(self, code_hash) } fn storage( diff --git a/crates/revm/src/test_utils.rs b/crates/revm/src/test_utils.rs index d32f7a9e7a7..e0d40070878 100644 --- a/crates/revm/src/test_utils.rs +++ b/crates/revm/src/test_utils.rs @@ -4,8 +4,8 @@ use alloy_primitives::{ }; use reth_primitives_traits::{Account, Bytecode}; use reth_storage_api::{ - AccountReader, BlockHashReader, HashedPostStateProvider, StateProofProvider, StateProvider, - StateRootProvider, StorageRootProvider, + AccountReader, BlockHashReader, BytecodeReader, HashedPostStateProvider, StateProofProvider, + StateProvider, StateRootProvider, StorageRootProvider, }; use reth_storage_errors::provider::ProviderResult; use reth_trie::{ @@ -158,7 +158,9 @@ impl StateProvider for StateProviderTest { ) -> ProviderResult> { Ok(self.accounts.get(&account).and_then(|(storage, _)| storage.get(&storage_key).copied())) } +} +impl BytecodeReader for StateProviderTest { fn bytecode_by_hash(&self, code_hash: &B256) -> ProviderResult> { Ok(self.contracts.get(code_hash).cloned()) } diff --git a/crates/rpc/ipc/src/server/mod.rs b/crates/rpc/ipc/src/server/mod.rs index 818761f4475..e9e00a7f6c0 100644 --- a/crates/rpc/ipc/src/server/mod.rs +++ b/crates/rpc/ipc/src/server/mod.rs @@ -431,7 +431,7 @@ struct ProcessConnection<'a, HttpMiddleware, RpcMiddleware> { /// Spawns the IPC connection onto a new task #[instrument(name = "connection", skip_all, fields(conn_id = %params.conn_id), level = "INFO")] -fn process_connection<'b, RpcMiddleware, HttpMiddleware>( +fn process_connection( params: ProcessConnection<'_, HttpMiddleware, RpcMiddleware>, ) where RpcMiddleware: Layer + Clone + Send + 'static, diff --git a/crates/rpc/rpc-api/src/mev.rs b/crates/rpc/rpc-api/src/mev.rs index 4980b5cc671..76de76a079b 100644 --- a/crates/rpc/rpc-api/src/mev.rs +++ b/crates/rpc/rpc-api/src/mev.rs @@ -1,5 +1,5 @@ use alloy_rpc_types_mev::{ - SendBundleRequest, SendBundleResponse, SimBundleOverrides, SimBundleResponse, + EthBundleHash, SendBundleRequest, SimBundleOverrides, SimBundleResponse, }; use jsonrpsee::proc_macros::rpc; @@ -27,7 +27,7 @@ pub trait MevFullApi { async fn send_bundle( &self, request: SendBundleRequest, - ) -> jsonrpsee::core::RpcResult; + ) -> jsonrpsee::core::RpcResult; /// Similar to `mev_sendBundle` but instead of submitting a bundle to the relay, it returns /// a simulation result. Only fully matched bundles can be simulated. diff --git a/crates/rpc/rpc-api/src/reth.rs b/crates/rpc/rpc-api/src/reth.rs index cc72705fa54..de0402624a9 100644 --- a/crates/rpc/rpc-api/src/reth.rs +++ b/crates/rpc/rpc-api/src/reth.rs @@ -1,9 +1,11 @@ use alloy_eips::BlockId; use alloy_primitives::{Address, U256}; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; -use reth_chain_state::CanonStateNotification; use std::collections::HashMap; +// Required for the subscription attribute below +use reth_chain_state as _; + /// Reth API namespace for reth-specific methods #[cfg_attr(not(feature = "client"), rpc(server, namespace = "reth"))] #[cfg_attr(feature = "client", rpc(server, client, namespace = "reth"))] @@ -19,7 +21,7 @@ pub trait RethApi { #[subscription( name = "subscribeChainNotifications", unsubscribe = "unsubscribeChainNotifications", - item = CanonStateNotification + item = reth_chain_state::CanonStateNotification )] async fn reth_subscribe_chain_notifications(&self) -> jsonrpsee::core::SubscriptionResult; } diff --git a/crates/rpc/rpc-builder/Cargo.toml b/crates/rpc/rpc-builder/Cargo.toml index 92ef9cfbc12..281b32ef568 100644 --- a/crates/rpc/rpc-builder/Cargo.toml +++ b/crates/rpc/rpc-builder/Cargo.toml @@ -65,7 +65,7 @@ reth-rpc-api = { workspace = true, features = ["client"] } reth-rpc-engine-api.workspace = true reth-tracing.workspace = true reth-transaction-pool = { workspace = true, features = ["test-utils"] } -reth-rpc-types-compat.workspace = true +reth-rpc-convert.workspace = true reth-engine-primitives.workspace = true reth-node-ethereum.workspace = true diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index d0623ea4a94..4dcce346c0d 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -26,11 +26,8 @@ use error::{ConflictingModules, RpcError, ServerKind}; use http::{header::AUTHORIZATION, HeaderMap}; use jsonrpsee::{ core::RegisterMethodError, - server::{ - middleware::rpc::{RpcService, RpcServiceBuilder, RpcServiceT}, - AlreadyStoppedError, IdProvider, ServerHandle, - }, - MethodResponse, Methods, RpcModule, + server::{middleware::rpc::RpcServiceBuilder, AlreadyStoppedError, IdProvider, ServerHandle}, + Methods, RpcModule, }; use reth_chainspec::{ChainSpecProvider, EthereumHardforks}; use reth_consensus::{ConsensusError, FullConsensus}; @@ -45,6 +42,7 @@ use reth_rpc_api::servers::*; use reth_rpc_eth_api::{ helpers::{Call, EthApiSpec, EthTransactions, LoadPendingBlock, TraceExt}, EthApiServer, EthApiTypes, FullEthApiServer, RpcBlock, RpcHeader, RpcReceipt, RpcTransaction, + RpcTxReq, }; use reth_rpc_eth_types::{EthConfig, EthSubscriptionIdProvider}; use reth_rpc_layer::{AuthLayer, Claims, CompressionLayer, JwtAuthValidator, JwtSecret}; @@ -53,16 +51,14 @@ use reth_storage_api::{ StateProviderFactory, }; use reth_tasks::{pool::BlockingTaskGuard, TaskSpawner, TokioTaskExecutor}; -use reth_transaction_pool::{noop::NoopTransactionPool, PoolTransaction, TransactionPool}; +use reth_transaction_pool::{noop::NoopTransactionPool, TransactionPool}; use serde::{Deserialize, Serialize}; use std::{ collections::HashMap, fmt::Debug, net::{Ipv4Addr, SocketAddr, SocketAddrV4}, - sync::Arc, time::{Duration, SystemTime, UNIX_EPOCH}, }; -use tower::Layer; use tower_http::cors::CorsLayer; pub use cors::CorsDomainError; @@ -82,6 +78,9 @@ pub mod auth; /// RPC server utilities. pub mod config; +/// Utils for installing Rpc middleware +pub mod middleware; + /// Cors utilities. mod cors; @@ -94,6 +93,7 @@ pub use eth::EthHandlers; // Rpc server metrics mod metrics; +use crate::middleware::RethRpcMiddleware; pub use metrics::{MeteredRequestFuture, RpcRequestMetricsService}; use reth_chain_state::CanonStateSubscriptions; use reth_rpc::eth::sim_bundle::EthSimBundle; @@ -101,40 +101,6 @@ use reth_rpc::eth::sim_bundle::EthSimBundle; // Rpc rate limiter pub mod rate_limiter; -/// Convenience function for starting a server in one step. -#[expect(clippy::too_many_arguments)] -pub async fn launch( - provider: Provider, - pool: Pool, - network: Network, - module_config: impl Into, - server_config: impl Into, - executor: Box, - evm_config: EvmConfig, - eth: EthApi, - consensus: Arc>, -) -> Result -where - N: NodePrimitives, - Provider: FullRpcProvider - + CanonStateSubscriptions - + AccountReader - + ChangeSetReader, - Pool: TransactionPool + 'static, - Network: NetworkInfo + Peers + Clone + 'static, - EvmConfig: ConfigureEvm + 'static, - EthApi: FullEthApiServer, -{ - let module_config = module_config.into(); - server_config - .into() - .start( - &RpcModuleBuilder::new(provider, pool, network, executor, evm_config, consensus) - .build(module_config, eth), - ) - .await -} - /// A builder type to configure the RPC module: See [`RpcModule`] /// /// This is the main entrypoint and the easiest way to configure an RPC server. @@ -160,9 +126,6 @@ pub struct RpcModuleBuilder { impl RpcModuleBuilder -where - N: NodePrimitives, - EvmConfig: Clone, { /// Create a new instance of the builder pub const fn new( @@ -180,12 +143,7 @@ where pub fn with_provider

( self, provider: P, - ) -> RpcModuleBuilder - where - P: BlockReader - + StateProviderFactory - + 'static, - { + ) -> RpcModuleBuilder { let Self { pool, network, executor, evm_config, consensus, _primitives, .. } = self; RpcModuleBuilder { provider, network, pool, executor, evm_config, consensus, _primitives } } @@ -194,10 +152,7 @@ where pub fn with_pool

( self, pool: P, - ) -> RpcModuleBuilder - where - P: TransactionPool> + 'static, - { + ) -> RpcModuleBuilder { let Self { provider, network, executor, evm_config, consensus, _primitives, .. } = self; RpcModuleBuilder { provider, network, pool, executor, evm_config, consensus, _primitives } } @@ -226,10 +181,7 @@ where pub fn with_network( self, network: Net, - ) -> RpcModuleBuilder - where - Net: NetworkInfo + Peers + 'static, - { + ) -> RpcModuleBuilder { let Self { provider, pool, executor, evm_config, consensus, _primitives, .. } = self; RpcModuleBuilder { provider, network, pool, executor, evm_config, consensus, _primitives } } @@ -281,11 +233,7 @@ where pub fn with_evm_config( self, evm_config: E, - ) -> RpcModuleBuilder - where - EvmConfig: 'static, - E: ConfigureEvm + Clone, - { + ) -> RpcModuleBuilder { let Self { provider, pool, executor, network, consensus, _primitives, .. } = self; RpcModuleBuilder { provider, network, pool, executor, evm_config, consensus, _primitives } } @@ -322,6 +270,7 @@ where /// See also [`EthApiBuilder`]. pub fn bootstrap_eth_api(&self) -> EthApi where + N: NodePrimitives, Provider: BlockReaderIdExt + StateProviderFactory + CanonStateSubscriptions @@ -698,6 +647,7 @@ where + CanonStateSubscriptions, Network: NetworkInfo + Peers + Clone + 'static, EthApi: EthApiServer< + RpcTxReq, RpcTransaction, RpcBlock, RpcReceipt, @@ -1076,7 +1026,7 @@ pub struct RpcServerConfig { /// JWT secret for authentication jwt_secret: Option, /// Configurable RPC middleware - rpc_middleware: RpcServiceBuilder, + rpc_middleware: RpcMiddleware, } // === impl RpcServerConfig === @@ -1095,7 +1045,7 @@ impl Default for RpcServerConfig { ipc_server_config: None, ipc_endpoint: None, jwt_secret: None, - rpc_middleware: RpcServiceBuilder::new(), + rpc_middleware: Default::default(), } } } @@ -1147,7 +1097,7 @@ impl RpcServerConfig { impl RpcServerConfig { /// Configure rpc middleware - pub fn set_rpc_middleware(self, rpc_middleware: RpcServiceBuilder) -> RpcServerConfig { + pub fn set_rpc_middleware(self, rpc_middleware: T) -> RpcServerConfig { RpcServerConfig { http_server_config: self.http_server_config, http_cors_domains: self.http_cors_domains, @@ -1289,16 +1239,7 @@ impl RpcServerConfig { /// Returns the [`RpcServerHandle`] with the handle to the started servers. pub async fn start(self, modules: &TransportRpcModules) -> Result where - RpcMiddleware: Layer> + Clone + Send + 'static, - for<'a> >>::Service: - Send - + Sync - + 'static - + RpcServiceT< - MethodResponse = MethodResponse, - BatchResponse = MethodResponse, - NotificationResponse = MethodResponse, - >, + RpcMiddleware: RethRpcMiddleware, { let mut http_handle = None; let mut ws_handle = None; @@ -1359,14 +1300,16 @@ impl RpcServerConfig { )), ) .set_rpc_middleware( - self.rpc_middleware.clone().layer( - modules - .http - .as_ref() - .or(modules.ws.as_ref()) - .map(RpcRequestMetrics::same_port) - .unwrap_or_default(), - ), + RpcServiceBuilder::default() + .layer( + modules + .http + .as_ref() + .or(modules.ws.as_ref()) + .map(RpcRequestMetrics::same_port) + .unwrap_or_default(), + ) + .layer(self.rpc_middleware.clone()), ) .set_config(config.build()) .build(http_socket_addr) @@ -1408,9 +1351,9 @@ impl RpcServerConfig { .option_layer(Self::maybe_jwt_layer(self.jwt_secret)), ) .set_rpc_middleware( - self.rpc_middleware - .clone() - .layer(modules.ws.as_ref().map(RpcRequestMetrics::ws).unwrap_or_default()), + RpcServiceBuilder::default() + .layer(modules.ws.as_ref().map(RpcRequestMetrics::ws).unwrap_or_default()) + .layer(self.rpc_middleware.clone()), ) .build(ws_socket_addr) .await @@ -1434,9 +1377,11 @@ impl RpcServerConfig { .option_layer(Self::maybe_compression_layer(self.http_disable_compression)), ) .set_rpc_middleware( - self.rpc_middleware.clone().layer( - modules.http.as_ref().map(RpcRequestMetrics::http).unwrap_or_default(), - ), + RpcServiceBuilder::default() + .layer( + modules.http.as_ref().map(RpcRequestMetrics::http).unwrap_or_default(), + ) + .layer(self.rpc_middleware.clone()), ) .build(http_socket_addr) .await diff --git a/crates/rpc/rpc-builder/src/middleware.rs b/crates/rpc/rpc-builder/src/middleware.rs new file mode 100644 index 00000000000..c03f63501fc --- /dev/null +++ b/crates/rpc/rpc-builder/src/middleware.rs @@ -0,0 +1,37 @@ +use jsonrpsee::server::middleware::rpc::RpcService; +use tower::Layer; + +/// A Helper alias trait for the RPC middleware supported by the server. +pub trait RethRpcMiddleware: + Layer< + RpcService, + Service: jsonrpsee::server::middleware::rpc::RpcServiceT< + MethodResponse = jsonrpsee::MethodResponse, + BatchResponse = jsonrpsee::MethodResponse, + NotificationResponse = jsonrpsee::MethodResponse, + > + Send + + Sync + + Clone + + 'static, + > + Clone + + Send + + 'static +{ +} + +impl RethRpcMiddleware for T where + T: Layer< + RpcService, + Service: jsonrpsee::server::middleware::rpc::RpcServiceT< + MethodResponse = jsonrpsee::MethodResponse, + BatchResponse = jsonrpsee::MethodResponse, + NotificationResponse = jsonrpsee::MethodResponse, + > + Send + + Sync + + Clone + + 'static, + > + Clone + + Send + + 'static +{ +} diff --git a/crates/rpc/rpc-builder/tests/it/http.rs b/crates/rpc/rpc-builder/tests/it/http.rs index a32b208d939..d21d6f915a9 100644 --- a/crates/rpc/rpc-builder/tests/it/http.rs +++ b/crates/rpc/rpc-builder/tests/it/http.rs @@ -176,24 +176,38 @@ where .unwrap(); // Implemented - EthApiClient::::protocol_version(client).await.unwrap(); - EthApiClient::::chain_id(client).await.unwrap(); - EthApiClient::::accounts(client).await.unwrap(); - EthApiClient::::get_account( + EthApiClient::::protocol_version( client, - address, - block_number.into(), ) .await .unwrap(); - EthApiClient::::block_number(client).await.unwrap(); - EthApiClient::::get_code(client, address, None) + EthApiClient::::chain_id(client) + .await + .unwrap(); + EthApiClient::::accounts(client) .await .unwrap(); - EthApiClient::::send_raw_transaction(client, tx) + EthApiClient::::get_account( + client, + address, + block_number.into(), + ) + .await + .unwrap(); + EthApiClient::::block_number(client) .await .unwrap(); - EthApiClient::::fee_history( + EthApiClient::::get_code( + client, address, None, + ) + .await + .unwrap(); + EthApiClient::::send_raw_transaction( + client, tx, + ) + .await + .unwrap(); + EthApiClient::::fee_history( client, U64::from(0), block_number, @@ -201,13 +215,17 @@ where ) .await .unwrap(); - EthApiClient::::balance(client, address, None) - .await - .unwrap(); - EthApiClient::::transaction_count(client, address, None) - .await - .unwrap(); - EthApiClient::::storage_at( + EthApiClient::::balance( + client, address, None, + ) + .await + .unwrap(); + EthApiClient::::transaction_count( + client, address, None, + ) + .await + .unwrap(); + EthApiClient::::storage_at( client, address, U256::default().into(), @@ -215,72 +233,80 @@ where ) .await .unwrap(); - EthApiClient::::block_by_hash(client, hash, false) - .await - .unwrap(); - EthApiClient::::block_by_number( + EthApiClient::::block_by_hash( + client, hash, false, + ) + .await + .unwrap(); + EthApiClient::::block_by_number( client, block_number, false, ) .await .unwrap(); - EthApiClient::::block_transaction_count_by_number( + EthApiClient::::block_transaction_count_by_number( client, block_number, ) .await .unwrap(); - EthApiClient::::block_transaction_count_by_hash( + EthApiClient::::block_transaction_count_by_hash( client, hash, ) .await .unwrap(); - EthApiClient::::block_uncles_count_by_hash(client, hash) + EthApiClient::::block_uncles_count_by_hash(client, hash) .await .unwrap(); - EthApiClient::::block_uncles_count_by_number( + EthApiClient::::block_uncles_count_by_number( client, block_number, ) .await .unwrap(); - EthApiClient::::uncle_by_block_hash_and_index( + EthApiClient::::uncle_by_block_hash_and_index( client, hash, index, ) .await .unwrap(); - EthApiClient::::uncle_by_block_number_and_index( + EthApiClient::::uncle_by_block_number_and_index( client, block_number, index, ) .await .unwrap(); - EthApiClient::::sign(client, address, bytes.clone()) - .await - .unwrap_err(); - EthApiClient::::sign_typed_data( + EthApiClient::::sign( + client, + address, + bytes.clone(), + ) + .await + .unwrap_err(); + EthApiClient::::sign_typed_data( client, address, typed_data, ) .await .unwrap_err(); - EthApiClient::::transaction_by_hash(client, tx_hash) - .await - .unwrap(); - EthApiClient::::transaction_by_block_hash_and_index( + EthApiClient::::transaction_by_hash( + client, tx_hash, + ) + .await + .unwrap(); + EthApiClient::::transaction_by_block_hash_and_index( client, hash, index, ) .await .unwrap(); - EthApiClient::::transaction_by_block_number_and_index( + EthApiClient::::transaction_by_block_number_and_index( client, block_number, index, ) .await .unwrap(); - EthApiClient::::create_access_list( + EthApiClient::::create_access_list( client, call_request.clone(), Some(block_number.into()), @@ -288,7 +314,7 @@ where ) .await .unwrap_err(); - EthApiClient::::estimate_gas( + EthApiClient::::estimate_gas( client, call_request.clone(), Some(block_number.into()), @@ -296,7 +322,7 @@ where ) .await .unwrap_err(); - EthApiClient::::call( + EthApiClient::::call( client, call_request.clone(), Some(block_number.into()), @@ -305,47 +331,67 @@ where ) .await .unwrap_err(); - EthApiClient::::syncing(client).await.unwrap(); - EthApiClient::::send_transaction( + EthApiClient::::syncing(client) + .await + .unwrap(); + EthApiClient::::send_transaction( client, transaction_request.clone(), ) .await .unwrap_err(); - EthApiClient::::sign_transaction( + EthApiClient::::sign_transaction( client, transaction_request, ) .await .unwrap_err(); - EthApiClient::::hashrate(client).await.unwrap(); - EthApiClient::::submit_hashrate( + EthApiClient::::hashrate(client) + .await + .unwrap(); + EthApiClient::::submit_hashrate( client, U256::default(), B256::default(), ) .await .unwrap(); - EthApiClient::::gas_price(client).await.unwrap_err(); - EthApiClient::::max_priority_fee_per_gas(client) + EthApiClient::::gas_price(client) .await .unwrap_err(); - EthApiClient::::get_proof(client, address, vec![], None) + EthApiClient::::max_priority_fee_per_gas(client) .await - .unwrap(); + .unwrap_err(); + EthApiClient::::get_proof( + client, + address, + vec![], + None, + ) + .await + .unwrap(); // Unimplemented assert!(is_unimplemented( - EthApiClient::::author(client).await.err().unwrap() + EthApiClient::::author(client) + .await + .err() + .unwrap() )); assert!(is_unimplemented( - EthApiClient::::is_mining(client).await.err().unwrap() + EthApiClient::::is_mining(client) + .await + .err() + .unwrap() )); assert!(is_unimplemented( - EthApiClient::::get_work(client).await.err().unwrap() + EthApiClient::::get_work(client) + .await + .err() + .unwrap() )); assert!(is_unimplemented( - EthApiClient::::submit_work( + EthApiClient::::submit_work( client, B64::default(), B256::default(), diff --git a/crates/rpc/rpc-builder/tests/it/middleware.rs b/crates/rpc/rpc-builder/tests/it/middleware.rs index 5e89867c8c6..60541a57c39 100644 --- a/crates/rpc/rpc-builder/tests/it/middleware.rs +++ b/crates/rpc/rpc-builder/tests/it/middleware.rs @@ -1,8 +1,8 @@ use crate::utils::{test_address, test_rpc_builder}; -use alloy_rpc_types_eth::{Block, Header, Receipt, Transaction}; +use alloy_rpc_types_eth::{Block, Header, Receipt, Transaction, TransactionRequest}; use jsonrpsee::{ core::middleware::{Batch, Notification}, - server::middleware::rpc::{RpcServiceBuilder, RpcServiceT}, + server::middleware::rpc::RpcServiceT, types::Request, }; use reth_rpc_builder::{RpcServerConfig, TransportRpcModuleConfig}; @@ -79,13 +79,17 @@ async fn test_rpc_middleware() { let handle = RpcServerConfig::http(Default::default()) .with_http_address(test_address()) - .set_rpc_middleware(RpcServiceBuilder::new().layer(mylayer.clone())) + .set_rpc_middleware(mylayer.clone()) .start(&modules) .await .unwrap(); let client = handle.http_client().unwrap(); - EthApiClient::::protocol_version(&client).await.unwrap(); + EthApiClient::::protocol_version( + &client, + ) + .await + .unwrap(); let count = mylayer.count.load(Ordering::Relaxed); assert_eq!(count, 1); } diff --git a/crates/rpc/rpc-convert/Cargo.toml b/crates/rpc/rpc-convert/Cargo.toml new file mode 100644 index 00000000000..45721742310 --- /dev/null +++ b/crates/rpc/rpc-convert/Cargo.toml @@ -0,0 +1,69 @@ +[package] +name = "reth-rpc-convert" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +description = "Compatibility layer for reth-primitives and ethereum RPC types" + +[lints] +workspace = true + +[dependencies] +# reth +reth-primitives-traits.workspace = true +reth-storage-api = { workspace = true, optional = true } +reth-evm.workspace = true + +# ethereum +alloy-primitives.workspace = true +alloy-rpc-types-eth = { workspace = true, features = ["serde"] } +alloy-consensus.workspace = true +alloy-network.workspace = true +alloy-json-rpc.workspace = true + +# optimism +op-alloy-consensus = { workspace = true, optional = true } +op-alloy-rpc-types = { workspace = true, optional = true } +reth-optimism-primitives = { workspace = true, optional = true } +op-revm = { workspace = true, optional = true } + +# scroll +scroll-alloy-consensus = { workspace = true, optional = true } +scroll-alloy-evm = { workspace = true, optional = true } +scroll-alloy-rpc-types = { workspace = true, optional = true } +reth-scroll-primitives = { workspace = true, optional = true } +revm-scroll = { workspace = true, optional = true } + +# revm +revm-context.workspace = true + +# io +jsonrpsee-types.workspace = true + +# error +thiserror.workspace = true + +[features] +default = [] +op = [ + "dep:op-alloy-consensus", + "dep:op-alloy-rpc-types", + "dep:reth-optimism-primitives", + "dep:reth-storage-api", + "dep:op-revm", + "reth-evm/op", + "reth-primitives-traits/op", +] +scroll = [ + "dep:scroll-alloy-consensus", + "dep:scroll-alloy-evm", + "dep:scroll-alloy-rpc-types", + "dep:reth-scroll-primitives", + "dep:reth-storage-api", + "dep:revm-scroll", + "reth-evm/scroll-alloy-traits", + "reth-primitives-traits/scroll-alloy-traits", +] diff --git a/crates/rpc/rpc-convert/src/block.rs b/crates/rpc/rpc-convert/src/block.rs new file mode 100644 index 00000000000..144bcdcac97 --- /dev/null +++ b/crates/rpc/rpc-convert/src/block.rs @@ -0,0 +1,47 @@ +//! Conversion traits for block responses to primitive block types. + +use alloy_network::Network; +use std::convert::Infallible; + +/// Trait for converting network block responses to primitive block types. +pub trait TryFromBlockResponse { + /// The error type returned if the conversion fails. + type Error: core::error::Error + Send + Sync + Unpin; + + /// Converts a network block response to a primitive block type. + /// + /// # Returns + /// + /// Returns `Ok(Self)` on successful conversion, or `Err(Self::Error)` if the conversion fails. + fn from_block_response(block_response: N::BlockResponse) -> Result + where + Self: Sized; +} + +impl TryFromBlockResponse for alloy_consensus::Block +where + N::BlockResponse: Into, +{ + type Error = Infallible; + + fn from_block_response(block_response: N::BlockResponse) -> Result { + Ok(block_response.into()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use alloy_consensus::{Block, TxEnvelope}; + use alloy_network::Ethereum; + use alloy_rpc_types_eth::BlockTransactions; + + #[test] + fn test_try_from_block_response() { + let rpc_block: alloy_rpc_types_eth::Block = + alloy_rpc_types_eth::Block::new(Default::default(), BlockTransactions::Full(vec![])); + let result = + as TryFromBlockResponse>::from_block_response(rpc_block); + assert!(result.is_ok()); + } +} diff --git a/crates/rpc/rpc-eth-types/src/revm_utils.rs b/crates/rpc/rpc-convert/src/fees.rs similarity index 50% rename from crates/rpc/rpc-eth-types/src/revm_utils.rs rename to crates/rpc/rpc-convert/src/fees.rs index 53a75ebbb07..46f8fc8c207 100644 --- a/crates/rpc/rpc-eth-types/src/revm_utils.rs +++ b/crates/rpc/rpc-convert/src/fees.rs @@ -1,57 +1,6 @@ -//! utilities for working with revm - -use alloy_primitives::{keccak256, Address, B256, U256}; -use alloy_rpc_types_eth::{ - state::{AccountOverride, StateOverride}, - BlockOverrides, -}; -use reth_evm::TransactionEnv; -use revm::{ - context::BlockEnv, - database::{CacheDB, State}, - state::{Account, AccountStatus, Bytecode, EvmStorageSlot}, - Database, DatabaseCommit, -}; -use std::{ - cmp::min, - collections::{BTreeMap, HashMap}, -}; - -use super::{EthApiError, EthResult, RpcInvalidTransactionError}; - -/// Calculates the caller gas allowance. -/// -/// `allowance = (account.balance - tx.value) / tx.gas_price` -/// -/// Returns an error if the caller has insufficient funds. -/// Caution: This assumes non-zero `env.gas_price`. Otherwise, zero allowance will be returned. -/// -/// Note: this takes the mut [Database] trait because the loaded sender can be reused for the -/// following operation like `eth_call`. -pub fn caller_gas_allowance(db: &mut DB, env: &impl TransactionEnv) -> EthResult -where - DB: Database, - EthApiError: From<::Error>, -{ - // Get the caller account. - let caller = db.basic(env.caller())?; - // Get the caller balance. - let balance = caller.map(|acc| acc.balance).unwrap_or_default(); - // Get transaction value. - let value = env.value(); - // Subtract transferred value from the caller balance. Return error if the caller has - // insufficient funds. - let balance = balance - .checked_sub(env.value()) - .ok_or_else(|| RpcInvalidTransactionError::InsufficientFunds { cost: value, balance })?; - - Ok(balance - // Calculate the amount of gas the caller can afford with the specified gas price. - .checked_div(U256::from(env.gas_price())) - // This will be 0 if gas price is 0. It is fine, because we check it before. - .unwrap_or_default() - .saturating_to()) -} +use alloy_primitives::{B256, U256}; +use std::cmp::min; +use thiserror::Error; /// Helper type for representing the fees of a `TransactionRequest` #[derive(Debug)] @@ -69,8 +18,6 @@ pub struct CallFees { pub max_fee_per_blob_gas: Option, } -// === impl CallFees === - impl CallFees { /// Ensures the fields of a `TransactionRequest` are not conflicting. /// @@ -90,6 +37,8 @@ impl CallFees { /// missing values, bypassing fee checks wrt. `baseFeePerGas`. /// /// This mirrors geth's behaviour when transaction requests are executed: + /// + /// [`BlockEnv`]: revm_context::BlockEnv pub fn ensure_fees( call_gas_price: Option, call_max_fee: Option, @@ -98,14 +47,14 @@ impl CallFees { blob_versioned_hashes: Option<&[B256]>, max_fee_per_blob_gas: Option, block_blob_fee: Option, - ) -> EthResult { + ) -> Result { /// Get the effective gas price of a transaction as specfified in EIP-1559 with relevant /// checks. fn get_effective_gas_price( max_fee_per_gas: Option, max_priority_fee_per_gas: Option, block_base_fee: U256, - ) -> EthResult { + ) -> Result { match max_fee_per_gas { Some(max_fee) => { let max_priority_fee_per_gas = max_priority_fee_per_gas.unwrap_or(U256::ZERO); @@ -115,25 +64,25 @@ impl CallFees { max_fee < block_base_fee { // `base_fee_per_gas` is greater than the `max_fee_per_gas` - return Err(RpcInvalidTransactionError::FeeCapTooLow.into()) + return Err(CallFeesError::FeeCapTooLow) } if max_fee < max_priority_fee_per_gas { return Err( // `max_priority_fee_per_gas` is greater than the `max_fee_per_gas` - RpcInvalidTransactionError::TipAboveFeeCap.into(), + CallFeesError::TipAboveFeeCap, ) } // ref Ok(min( max_fee, - block_base_fee.checked_add(max_priority_fee_per_gas).ok_or_else(|| { - EthApiError::from(RpcInvalidTransactionError::TipVeryHigh) - })?, + block_base_fee + .checked_add(max_priority_fee_per_gas) + .ok_or(CallFeesError::TipVeryHigh)?, )) } None => Ok(block_base_fee .checked_add(max_priority_fee_per_gas.unwrap_or(U256::ZERO)) - .ok_or(EthApiError::from(RpcInvalidTransactionError::TipVeryHigh))?), + .ok_or(CallFeesError::TipVeryHigh)?), } } @@ -176,7 +125,7 @@ impl CallFees { // Ensure blob_hashes are present if !has_blob_hashes { // Blob transaction but no blob hashes - return Err(RpcInvalidTransactionError::BlobTransactionMissingBlobHashes.into()) + return Err(CallFeesError::BlobTransactionMissingBlobHashes) } Ok(Self { @@ -187,169 +136,38 @@ impl CallFees { } _ => { // this fallback covers incompatible combinations of fields - Err(EthApiError::ConflictingFeeFieldsInRequest) + Err(CallFeesError::ConflictingFeeFieldsInRequest) } } } } -/// Helper trait implemented for databases that support overriding block hashes. -/// -/// Used for applying [`BlockOverrides::block_hash`] -pub trait OverrideBlockHashes { - /// Overrides the given block hashes. - fn override_block_hashes(&mut self, block_hashes: BTreeMap); -} - -impl OverrideBlockHashes for CacheDB { - fn override_block_hashes(&mut self, block_hashes: BTreeMap) { - self.cache - .block_hashes - .extend(block_hashes.into_iter().map(|(num, hash)| (U256::from(num), hash))) - } -} - -impl OverrideBlockHashes for State { - fn override_block_hashes(&mut self, block_hashes: BTreeMap) { - self.block_hashes.extend(block_hashes); - } -} - -/// Applies the given block overrides to the env and updates overridden block hashes in the db. -pub fn apply_block_overrides( - overrides: BlockOverrides, - db: &mut impl OverrideBlockHashes, - env: &mut BlockEnv, -) { - let BlockOverrides { - number, - difficulty, - time, - gas_limit, - coinbase, - random, - base_fee, - block_hash, - } = overrides; - - if let Some(block_hashes) = block_hash { - // override block hashes - db.override_block_hashes(block_hashes); - } - - if let Some(number) = number { - env.number = number.saturating_to(); - } - if let Some(difficulty) = difficulty { - env.difficulty = difficulty; - } - if let Some(time) = time { - env.timestamp = time; - } - if let Some(gas_limit) = gas_limit { - env.gas_limit = gas_limit; - } - if let Some(coinbase) = coinbase { - env.beneficiary = coinbase; - } - if let Some(random) = random { - env.prevrandao = Some(random); - } - if let Some(base_fee) = base_fee { - env.basefee = base_fee.saturating_to(); - } -} - -/// Applies the given state overrides (a set of [`AccountOverride`]) to the [`CacheDB`]. -pub fn apply_state_overrides(overrides: StateOverride, db: &mut DB) -> EthResult<()> -where - DB: Database + DatabaseCommit, - EthApiError: From, -{ - for (account, account_overrides) in overrides { - apply_account_override(account, account_overrides, db)?; - } - Ok(()) -} - -/// Applies a single [`AccountOverride`] to the [`CacheDB`]. -fn apply_account_override( - account: Address, - account_override: AccountOverride, - db: &mut DB, -) -> EthResult<()> -where - DB: Database + DatabaseCommit, - EthApiError: From, -{ - let mut info = db.basic(account)?.unwrap_or_default(); - - if let Some(nonce) = account_override.nonce { - info.nonce = nonce; - } - if let Some(code) = account_override.code { - // we need to set both the bytecode and the codehash - info.code_hash = keccak256(&code); - info.code = Some( - Bytecode::new_raw_checked(code) - .map_err(|err| EthApiError::InvalidBytecode(err.to_string()))?, - ); - } - if let Some(balance) = account_override.balance { - info.balance = balance; - } - - // Create a new account marked as touched - let mut acc = - revm::state::Account { info, status: AccountStatus::Touched, storage: HashMap::default() }; - - let storage_diff = match (account_override.state, account_override.state_diff) { - (Some(_), Some(_)) => return Err(EthApiError::BothStateAndStateDiffInOverride(account)), - (None, None) => None, - // If we need to override the entire state, we firstly mark account as destroyed to clear - // its storage, and then we mark it is "NewlyCreated" to make sure that old storage won't be - // used. - (Some(state), None) => { - // Destroy the account to ensure that its storage is cleared - db.commit(HashMap::from_iter([( - account, - Account { - status: AccountStatus::SelfDestructed | AccountStatus::Touched, - ..Default::default() - }, - )])); - // Mark the account as created to ensure that old storage is not read - acc.mark_created(); - Some(state) - } - (None, Some(state)) => Some(state), - }; - - if let Some(state) = storage_diff { - for (slot, value) in state { - acc.storage.insert( - slot.into(), - EvmStorageSlot { - // we use inverted value here to ensure that storage is treated as changed - original_value: (!value).into(), - present_value: value.into(), - is_cold: false, - }, - ); - } - } - - db.commit(HashMap::from_iter([(account, acc)])); - - Ok(()) +/// Error coming from decoding and validating transaction request fees. +#[derive(Debug, Error)] +pub enum CallFeesError { + /// Thrown when a call or transaction request (`eth_call`, `eth_estimateGas`, + /// `eth_sendTransaction`) contains conflicting fields (legacy, EIP-1559) + #[error("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified")] + ConflictingFeeFieldsInRequest, + /// Thrown post London if the transaction's fee is less than the base fee of the block + #[error("max fee per gas less than block base fee")] + FeeCapTooLow, + /// Thrown to ensure no one is able to specify a transaction with a tip higher than the total + /// fee cap. + #[error("max priority fee per gas higher than max fee per gas")] + TipAboveFeeCap, + /// A sanity error to avoid huge numbers specified in the tip field. + #[error("max priority fee per gas higher than 2^256-1")] + TipVeryHigh, + /// Blob transaction has no versioned hashes + #[error("blob transaction missing blob hashes")] + BlobTransactionMissingBlobHashes, } #[cfg(test)] mod tests { use super::*; use alloy_consensus::constants::GWEI_TO_WEI; - use alloy_primitives::{address, bytes}; - use reth_revm::db::EmptyDB; #[test] fn test_ensure_0_fallback() { @@ -460,38 +278,4 @@ mod tests { ); assert!(call_fees.is_err()); } - - #[test] - fn state_override_state() { - let code = bytes!( - "0x63d0e30db05f525f5f6004601c3473c02aaa39b223fe8d0a0e5c4f27ead9083c756cc25af15f5260205ff3" - ); - let to = address!("0x2260FAC5E5542a773Aa44fBCfeDf7C193bc2C599"); - - let mut db = State::builder().with_database(CacheDB::new(EmptyDB::new())).build(); - - let acc_override = AccountOverride::default().with_code(code.clone()); - apply_account_override(to, acc_override, &mut db).unwrap(); - - let account = db.basic(to).unwrap().unwrap(); - assert!(account.code.is_some()); - assert_eq!(account.code_hash, keccak256(&code)); - } - - #[test] - fn state_override_cache_db() { - let code = bytes!( - "0x63d0e30db05f525f5f6004601c3473c02aaa39b223fe8d0a0e5c4f27ead9083c756cc25af15f5260205ff3" - ); - let to = address!("0x2260FAC5E5542a773Aa44fBCfeDf7C193bc2C599"); - - let mut db = CacheDB::new(EmptyDB::new()); - - let acc_override = AccountOverride::default().with_code(code.clone()); - apply_account_override(to, acc_override, &mut db).unwrap(); - - let account = db.basic(to).unwrap().unwrap(); - assert!(account.code.is_some()); - assert_eq!(account.code_hash, keccak256(&code)); - } } diff --git a/crates/rpc/rpc-types-compat/src/lib.rs b/crates/rpc/rpc-convert/src/lib.rs similarity index 63% rename from crates/rpc/rpc-types-compat/src/lib.rs rename to crates/rpc/rpc-convert/src/lib.rs index 8b0e5a4d0eb..db1d7b86fc7 100644 --- a/crates/rpc/rpc-types-compat/src/lib.rs +++ b/crates/rpc/rpc-convert/src/lib.rs @@ -11,8 +11,20 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] pub mod block; +mod fees; +mod rpc; pub mod transaction; + +pub use block::TryFromBlockResponse; +pub use fees::{CallFees, CallFeesError}; +pub use rpc::*; pub use transaction::{ - try_into_op_tx_info, try_into_scroll_tx_info, IntoRpcTx, RpcConverter, TransactionCompat, - TransactionConversionError, TryIntoSimTx, TxInfoMapper, + EthTxEnvError, IntoRpcTx, RpcConvert, RpcConverter, TransactionConversionError, TryIntoSimTx, + TxInfoMapper, }; + +#[cfg(feature = "op")] +pub use transaction::op::*; + +#[cfg(feature = "scroll")] +pub use transaction::scroll::*; diff --git a/crates/rpc/rpc-convert/src/rpc.rs b/crates/rpc/rpc-convert/src/rpc.rs new file mode 100644 index 00000000000..7b5c457419c --- /dev/null +++ b/crates/rpc/rpc-convert/src/rpc.rs @@ -0,0 +1,32 @@ +use alloy_json_rpc::RpcObject; +use alloy_network::{Network, ReceiptResponse, TransactionResponse}; + +/// RPC types used by the `eth_` RPC API. +/// +/// This is a subset of [`Network`] trait with only RPC response types kept. +pub trait RpcTypes { + /// Header response type. + type Header: RpcObject; + /// Receipt response type. + type Receipt: RpcObject + ReceiptResponse; + /// Transaction response type. + type TransactionResponse: RpcObject + TransactionResponse; + /// Transaction response type. + type TransactionRequest: RpcObject; +} + +impl RpcTypes for T +where + T: Network, +{ + type Header = T::HeaderResponse; + type Receipt = T::ReceiptResponse; + type TransactionResponse = T::TransactionResponse; + type TransactionRequest = T::TransactionRequest; +} + +/// Adapter for network specific transaction response. +pub type RpcTransaction = ::TransactionResponse; + +/// Adapter for network specific transaction request. +pub type RpcTxReq = ::TransactionRequest; diff --git a/crates/rpc/rpc-convert/src/transaction.rs b/crates/rpc/rpc-convert/src/transaction.rs new file mode 100644 index 00000000000..68dc1a2974e --- /dev/null +++ b/crates/rpc/rpc-convert/src/transaction.rs @@ -0,0 +1,576 @@ +//! Compatibility functions for rpc `Transaction` type. + +use crate::{ + fees::{CallFees, CallFeesError}, + RpcTransaction, RpcTxReq, RpcTypes, +}; +use alloy_consensus::{error::ValueError, transaction::Recovered, EthereumTxEnvelope, TxEip4844}; +use alloy_primitives::{Address, TxKind, U256}; +use alloy_rpc_types_eth::{ + request::{TransactionInputError, TransactionRequest}, + Transaction, TransactionInfo, +}; +use core::error; +use reth_evm::{ + revm::context_interface::{either::Either, Block}, + ConfigureEvm, TxEnvFor, +}; +use reth_primitives_traits::{NodePrimitives, TxTy}; +use revm_context::{BlockEnv, CfgEnv, TxEnv}; +use std::{convert::Infallible, error::Error, fmt::Debug, marker::PhantomData}; +use thiserror::Error; + +/// Responsible for the conversions from and into RPC requests and responses. +/// +/// The JSON-RPC schema and the Node primitives are configurable using the [`RpcConvert::Network`] +/// and [`RpcConvert::Primitives`] associated types respectively. +/// +/// A generic implementation [`RpcConverter`] should be preferred over a manual implementation. As +/// long as its trait bound requirements are met, the implementation is created automatically and +/// can be used in RPC method handlers for all the conversions. +pub trait RpcConvert: Send + Sync + Unpin + Clone + Debug { + /// Associated lower layer consensus types to convert from and into types of [`Self::Network`]. + type Primitives: NodePrimitives; + + /// Associated upper layer JSON-RPC API network requests and responses to convert from and into + /// types of [`Self::Primitives`]. + type Network: RpcTypes + Send + Sync + Unpin + Clone + Debug; + + /// A set of variables for executing a transaction. + type TxEnv; + + /// An associated RPC conversion error. + type Error: error::Error + Into>; + + /// Wrapper for `fill()` with default `TransactionInfo` + /// Create a new rpc transaction result for a _pending_ signed transaction, setting block + /// environment related fields to `None`. + fn fill_pending( + &self, + tx: Recovered>, + ) -> Result, Self::Error> { + self.fill(tx, TransactionInfo::default()) + } + + /// Create a new rpc transaction result for a mined transaction, using the given block hash, + /// number, and tx index fields to populate the corresponding fields in the rpc result. + /// + /// The block hash, number, and tx index fields should be from the original block where the + /// transaction was mined. + fn fill( + &self, + tx: Recovered>, + tx_inf: TransactionInfo, + ) -> Result, Self::Error>; + + /// Builds a fake transaction from a transaction request for inclusion into block built in + /// `eth_simulateV1`. + fn build_simulate_v1_transaction( + &self, + request: RpcTxReq, + ) -> Result, Self::Error>; + + /// Creates a transaction environment for execution based on `request` with corresponding + /// `cfg_env` and `block_env`. + fn tx_env( + &self, + request: RpcTxReq, + cfg_env: &CfgEnv, + block_env: &BlockEnv, + ) -> Result; +} + +/// Converts `self` into `T`. The opposite of [`FromConsensusTx`]. +/// +/// Should create an RPC transaction response object based on a consensus transaction, its signer +/// [`Address`] and an additional context [`IntoRpcTx::TxInfo`]. +/// +/// Avoid implementing [`IntoRpcTx`] and use [`FromConsensusTx`] instead. Implementing it +/// automatically provides an implementation of [`IntoRpcTx`] thanks to the blanket implementation +/// in this crate. +/// +/// Prefer using [`IntoRpcTx`] over [`FromConsensusTx`] when specifying trait bounds on a generic +/// function to ensure that types that only implement [`IntoRpcTx`] can be used as well. +pub trait IntoRpcTx { + /// An additional context, usually [`TransactionInfo`] in a wrapper that carries some + /// implementation specific extra information. + type TxInfo; + + /// Performs the conversion consuming `self` with `signer` and `tx_info`. See [`IntoRpcTx`] + /// for details. + fn into_rpc_tx(self, signer: Address, tx_info: Self::TxInfo) -> T; +} + +/// Converts `T` into `self`. It is reciprocal of [`IntoRpcTx`]. +/// +/// Should create an RPC transaction response object based on a consensus transaction, its signer +/// [`Address`] and an additional context [`FromConsensusTx::TxInfo`]. +/// +/// Prefer implementing [`FromConsensusTx`] over [`IntoRpcTx`] because it automatically provides an +/// implementation of [`IntoRpcTx`] thanks to the blanket implementation in this crate. +/// +/// Prefer using [`IntoRpcTx`] over using [`FromConsensusTx`] when specifying trait bounds on a +/// generic function. This way, types that directly implement [`IntoRpcTx`] can be used as arguments +/// as well. +pub trait FromConsensusTx { + /// An additional context, usually [`TransactionInfo`] in a wrapper that carries some + /// implementation specific extra information. + type TxInfo; + + /// Performs the conversion consuming `tx` with `signer` and `tx_info`. See [`FromConsensusTx`] + /// for details. + fn from_consensus_tx(tx: T, signer: Address, tx_info: Self::TxInfo) -> Self; +} + +impl> + FromConsensusTx for Transaction +{ + type TxInfo = TransactionInfo; + + fn from_consensus_tx(tx: TxIn, signer: Address, tx_info: Self::TxInfo) -> Self { + Self::from_transaction(Recovered::new_unchecked(tx.into(), signer), tx_info) + } +} + +impl IntoRpcTx for ConsensusTx +where + ConsensusTx: alloy_consensus::Transaction, + RpcTx: FromConsensusTx, +{ + type TxInfo = RpcTx::TxInfo; + + fn into_rpc_tx(self, signer: Address, tx_info: Self::TxInfo) -> RpcTx { + RpcTx::from_consensus_tx(self, signer, tx_info) + } +} + +/// Converts `self` into `T`. +/// +/// Should create a fake transaction for simulation using [`TransactionRequest`]. +pub trait TryIntoSimTx +where + Self: Sized, +{ + /// Performs the conversion. + /// + /// Should return a signed typed transaction envelope for the [`eth_simulateV1`] endpoint with a + /// dummy signature or an error if [required fields] are missing. + /// + /// [`eth_simulateV1`]: + /// [required fields]: TransactionRequest::buildable_type + fn try_into_sim_tx(self) -> Result>; +} + +/// Adds extra context to [`TransactionInfo`]. +pub trait TxInfoMapper { + /// An associated output type that carries [`TransactionInfo`] with some extra context. + type Out; + /// An associated error that can occur during the mapping. + type Err; + + /// Performs the conversion. + fn try_map(&self, tx: T, tx_info: TransactionInfo) -> Result; +} + +impl TxInfoMapper<&T> for () { + type Out = TransactionInfo; + type Err = Infallible; + + fn try_map(&self, _tx: &T, tx_info: TransactionInfo) -> Result { + Ok(tx_info) + } +} + +impl TryIntoSimTx> for TransactionRequest { + fn try_into_sim_tx(self) -> Result, ValueError> { + Self::build_typed_simulate_transaction(self) + } +} + +/// Converts `self` into `T`. +/// +/// Should create an executable transaction environment using [`TransactionRequest`]. +pub trait TryIntoTxEnv { + /// An associated error that can occur during the conversion. + type Err; + + /// Performs the conversion. + fn try_into_tx_env( + self, + cfg_env: &CfgEnv, + block_env: &BlockEnv, + ) -> Result; +} + +/// An Ethereum specific transaction environment error than can occur during conversion from +/// [`TransactionRequest`]. +#[derive(Debug, Error)] +pub enum EthTxEnvError { + /// Error while decoding or validating transaction request fees. + #[error(transparent)] + CallFees(#[from] CallFeesError), + /// Both data and input fields are set and not equal. + #[error(transparent)] + Input(#[from] TransactionInputError), +} + +impl TryIntoTxEnv for TransactionRequest { + type Err = EthTxEnvError; + + fn try_into_tx_env( + self, + cfg_env: &CfgEnv, + block_env: &BlockEnv, + ) -> Result { + // Ensure that if versioned hashes are set, they're not empty + if self.blob_versioned_hashes.as_ref().is_some_and(|hashes| hashes.is_empty()) { + return Err(CallFeesError::BlobTransactionMissingBlobHashes.into()) + } + + let tx_type = self.minimal_tx_type() as u8; + + let Self { + from, + to, + gas_price, + max_fee_per_gas, + max_priority_fee_per_gas, + gas, + value, + input, + nonce, + access_list, + chain_id, + blob_versioned_hashes, + max_fee_per_blob_gas, + authorization_list, + transaction_type: _, + sidecar: _, + } = self; + + let CallFees { max_priority_fee_per_gas, gas_price, max_fee_per_blob_gas } = + CallFees::ensure_fees( + gas_price.map(U256::from), + max_fee_per_gas.map(U256::from), + max_priority_fee_per_gas.map(U256::from), + U256::from(block_env.basefee), + blob_versioned_hashes.as_deref(), + max_fee_per_blob_gas.map(U256::from), + block_env.blob_gasprice().map(U256::from), + )?; + + let gas_limit = gas.unwrap_or( + // Use maximum allowed gas limit. The reason for this + // is that both Erigon and Geth use pre-configured gas cap even if + // it's possible to derive the gas limit from the block: + // + block_env.gas_limit, + ); + + let chain_id = chain_id.unwrap_or(cfg_env.chain_id); + + let caller = from.unwrap_or_default(); + + let nonce = nonce.unwrap_or_default(); + + let env = TxEnv { + tx_type, + gas_limit, + nonce, + caller, + gas_price: gas_price.saturating_to(), + gas_priority_fee: max_priority_fee_per_gas.map(|v| v.saturating_to()), + kind: to.unwrap_or(TxKind::Create), + value: value.unwrap_or_default(), + data: input.try_into_unique_input().map_err(EthTxEnvError::from)?.unwrap_or_default(), + chain_id: Some(chain_id), + access_list: access_list.unwrap_or_default(), + // EIP-4844 fields + blob_hashes: blob_versioned_hashes.unwrap_or_default(), + max_fee_per_blob_gas: max_fee_per_blob_gas + .map(|v| v.saturating_to()) + .unwrap_or_default(), + // EIP-7702 fields + authorization_list: authorization_list + .unwrap_or_default() + .into_iter() + .map(Either::Left) + .collect(), + }; + + Ok(env) + } +} + +/// Conversion into transaction RPC response failed. +#[derive(Debug, Clone, Error)] +#[error("Failed to convert transaction into RPC response: {0}")] +pub struct TransactionConversionError(String); + +/// Generic RPC response object converter for `Evm` and network `E`. +/// +/// The main purpose of this struct is to provide an implementation of [`RpcConvert`] for generic +/// associated types. This struct can then be used for conversions in RPC method handlers. +/// +/// An [`RpcConvert`] implementation is generated if the following traits are implemented for the +/// network and EVM associated primitives: +/// * [`FromConsensusTx`]: from signed transaction into RPC response object. +/// * [`TryIntoSimTx`]: from RPC transaction request into a simulated transaction. +/// * [`TryIntoTxEnv`]: from RPC transaction request into an executable transaction. +/// * [`TxInfoMapper`]: from [`TransactionInfo`] into [`FromConsensusTx::TxInfo`]. Should be +/// implemented for a dedicated struct that is assigned to `Map`. If [`FromConsensusTx::TxInfo`] +/// is [`TransactionInfo`] then `()` can be used as `Map` which trivially passes over the input +/// object. +#[derive(Debug)] +pub struct RpcConverter { + phantom: PhantomData<(E, Evm, Err)>, + mapper: Map, +} + +impl RpcConverter { + /// Creates a new [`RpcConverter`] with the default mapper. + pub const fn new() -> Self { + Self::with_mapper(()) + } +} + +impl RpcConverter { + /// Creates a new [`RpcConverter`] with `mapper`. + pub const fn with_mapper(mapper: Map) -> Self { + Self { phantom: PhantomData, mapper } + } + + /// Converts the generic types. + pub fn convert(self) -> RpcConverter { + RpcConverter::with_mapper(self.mapper) + } + + /// Swaps the inner `mapper`. + pub fn map(self, mapper: Map2) -> RpcConverter { + RpcConverter::with_mapper(mapper) + } + + /// Converts the generic types and swaps the inner `mapper`. + pub fn convert_map( + self, + mapper: Map2, + ) -> RpcConverter { + self.convert().map(mapper) + } +} + +impl Clone for RpcConverter { + fn clone(&self) -> Self { + Self::with_mapper(self.mapper.clone()) + } +} + +impl Default for RpcConverter { + fn default() -> Self { + Self::new() + } +} + +impl RpcConvert for RpcConverter +where + N: NodePrimitives, + E: RpcTypes + Send + Sync + Unpin + Clone + Debug, + Evm: ConfigureEvm, + TxTy: IntoRpcTx + Clone + Debug, + RpcTxReq: TryIntoSimTx> + TryIntoTxEnv>, + Err: From + + From< as TryIntoTxEnv>>::Err> + + for<'a> From<>>::Err> + + Error + + Unpin + + Sync + + Send + + Into>, + Map: for<'a> TxInfoMapper< + &'a TxTy, + Out = as IntoRpcTx>::TxInfo, + > + Clone + + Debug + + Unpin + + Send + + Sync, +{ + type Primitives = N; + type Network = E; + type TxEnv = TxEnvFor; + type Error = Err; + + fn fill( + &self, + tx: Recovered>, + tx_info: TransactionInfo, + ) -> Result { + let (tx, signer) = tx.into_parts(); + let tx_info = self.mapper.try_map(&tx, tx_info)?; + + Ok(tx.into_rpc_tx(signer, tx_info)) + } + + fn build_simulate_v1_transaction(&self, request: RpcTxReq) -> Result, Self::Error> { + Ok(request.try_into_sim_tx().map_err(|e| TransactionConversionError(e.to_string()))?) + } + + fn tx_env( + &self, + request: RpcTxReq, + cfg_env: &CfgEnv, + block_env: &BlockEnv, + ) -> Result { + Ok(request.try_into_tx_env(cfg_env, block_env)?) + } +} + +/// Scroll specific RPC transaction compatibility implementations. +#[cfg(feature = "scroll")] +pub mod scroll { + use super::*; + use alloy_consensus::SignableTransaction; + use alloy_primitives::{Address, Bytes, Signature}; + use reth_primitives_traits::SignedTransaction; + use reth_scroll_primitives::ScrollReceipt; + use reth_storage_api::{errors::ProviderError, ReceiptProvider}; + use revm_scroll::l1block::TX_L1_FEE_PRECISION_U256; + use scroll_alloy_consensus::{ScrollAdditionalInfo, ScrollTransactionInfo, ScrollTxEnvelope}; + use scroll_alloy_rpc_types::ScrollTransactionRequest; + + /// Creates [`ScrollTransactionInfo`] by adding [`ScrollAdditionalInfo`] to [`TransactionInfo`] + /// if `tx` is not a L1 message. + pub fn try_into_scroll_tx_info>( + provider: &T, + tx: &ScrollTxEnvelope, + tx_info: TransactionInfo, + ) -> Result { + let additional_info = if tx.is_l1_message() { + None + } else { + provider + .receipt_by_hash(*tx.tx_hash())? + .map(|receipt| ScrollAdditionalInfo { l1_fee: receipt.l1_fee() }) + } + .unwrap_or_default(); + + Ok(ScrollTransactionInfo::new(tx_info, additional_info)) + } + + impl FromConsensusTx for scroll_alloy_rpc_types::Transaction { + type TxInfo = ScrollTransactionInfo; + + fn from_consensus_tx(tx: ScrollTxEnvelope, signer: Address, tx_info: Self::TxInfo) -> Self { + Self::from_transaction(Recovered::new_unchecked(tx, signer), tx_info) + } + } + + impl TryIntoSimTx for ScrollTransactionRequest { + fn try_into_sim_tx(self) -> Result> { + let tx = self + .build_typed_tx() + .map_err(|request| ValueError::new(request, "Required fields missing"))?; + + // Create an empty signature for the transaction. + let signature = Signature::new(Default::default(), Default::default(), false); + + Ok(tx.into_signed(signature).into()) + } + } + + impl TryIntoTxEnv> + for ScrollTransactionRequest + { + type Err = EthTxEnvError; + + fn try_into_tx_env( + self, + cfg_env: &CfgEnv, + block_env: &BlockEnv, + ) -> Result, Self::Err> { + Ok(scroll_alloy_evm::ScrollTransactionIntoTxEnv::new( + self.as_ref().clone().try_into_tx_env(cfg_env, block_env)?, + Some(Bytes::new()), + Some(TX_L1_FEE_PRECISION_U256), + )) + } + } +} + +/// Optimism specific RPC transaction compatibility implementations. +#[cfg(feature = "op")] +pub mod op { + use super::*; + use alloy_consensus::SignableTransaction; + use alloy_primitives::{Address, Bytes, Signature}; + use op_alloy_consensus::{ + transaction::{OpDepositInfo, OpTransactionInfo}, + OpTxEnvelope, + }; + use op_alloy_rpc_types::OpTransactionRequest; + use op_revm::OpTransaction; + use reth_optimism_primitives::DepositReceipt; + use reth_storage_api::{errors::ProviderError, ReceiptProvider}; + + /// Creates [`OpTransactionInfo`] by adding [`OpDepositInfo`] to [`TransactionInfo`] if `tx` is + /// a deposit. + pub fn try_into_op_tx_info>( + provider: &T, + tx: &OpTxEnvelope, + tx_info: TransactionInfo, + ) -> Result { + let deposit_meta = if tx.is_deposit() { + provider.receipt_by_hash(tx.tx_hash())?.and_then(|receipt| { + receipt.as_deposit_receipt().map(|receipt| OpDepositInfo { + deposit_receipt_version: receipt.deposit_receipt_version, + deposit_nonce: receipt.deposit_nonce, + }) + }) + } else { + None + } + .unwrap_or_default(); + + Ok(OpTransactionInfo::new(tx_info, deposit_meta)) + } + + impl FromConsensusTx + for op_alloy_rpc_types::Transaction + { + type TxInfo = OpTransactionInfo; + + fn from_consensus_tx(tx: T, signer: Address, tx_info: Self::TxInfo) -> Self { + Self::from_transaction(Recovered::new_unchecked(tx, signer), tx_info) + } + } + + impl TryIntoSimTx for OpTransactionRequest { + fn try_into_sim_tx(self) -> Result> { + let tx = self + .build_typed_tx() + .map_err(|request| ValueError::new(request, "Required fields missing"))?; + + // Create an empty signature for the transaction. + let signature = Signature::new(Default::default(), Default::default(), false); + + Ok(tx.into_signed(signature).into()) + } + } + + impl TryIntoTxEnv> for OpTransactionRequest { + type Err = EthTxEnvError; + + fn try_into_tx_env( + self, + cfg_env: &CfgEnv, + block_env: &BlockEnv, + ) -> Result, Self::Err> { + Ok(OpTransaction { + base: self.as_ref().clone().try_into_tx_env(cfg_env, block_env)?, + enveloped_tx: Some(Bytes::new()), + deposit: Default::default(), + }) + } + } +} diff --git a/crates/rpc/rpc-engine-api/tests/it/payload.rs b/crates/rpc/rpc-engine-api/tests/it/payload.rs index 477fda2b1f5..81359969c76 100644 --- a/crates/rpc/rpc-engine-api/tests/it/payload.rs +++ b/crates/rpc/rpc-engine-api/tests/it/payload.rs @@ -2,7 +2,7 @@ use alloy_eips::eip4895::Withdrawals; use alloy_primitives::Bytes; -use alloy_rlp::{Decodable, Error as RlpError}; +use alloy_rlp::Decodable; use alloy_rpc_types_engine::{ ExecutionPayload, ExecutionPayloadBodyV1, ExecutionPayloadSidecar, ExecutionPayloadV1, PayloadError, @@ -87,16 +87,6 @@ fn payload_validation_conversion() { Err(PayloadError::ExtraData(data)) if data == block_with_invalid_extra_data ); - // Zero base fee - let block_with_zero_base_fee = transform_block(block.clone(), |mut b| { - b.header.base_fee_per_gas = Some(0); - b - }); - assert_matches!( - block_with_zero_base_fee.try_into_block_with_sidecar::(&ExecutionPayloadSidecar::none()), - Err(PayloadError::BaseFee(val)) if val.is_zero() - ); - // Invalid encoded transactions let mut payload_with_invalid_txs = ExecutionPayloadV1::from_block_unchecked(block.hash(), &block.into_block()); @@ -105,5 +95,5 @@ fn payload_validation_conversion() { *tx = Bytes::new(); }); let payload_with_invalid_txs = payload_with_invalid_txs.try_into_block::(); - assert_matches!(payload_with_invalid_txs, Err(PayloadError::Decode(RlpError::InputTooShort))); + assert_matches!(payload_with_invalid_txs, Err(PayloadError::Decode(_))); } diff --git a/crates/rpc/rpc-eth-api/Cargo.toml b/crates/rpc/rpc-eth-api/Cargo.toml index bc431891c48..af8bcb90def 100644 --- a/crates/rpc/rpc-eth-api/Cargo.toml +++ b/crates/rpc/rpc-eth-api/Cargo.toml @@ -14,13 +14,14 @@ workspace = true [dependencies] # reth revm = { workspace = true, features = ["optional_block_gas_limit", "optional_eip3607", "optional_no_base_fee"] } +reth-chain-state.workspace = true revm-inspectors.workspace = true -reth-primitives-traits.workspace = true +reth-primitives-traits = { workspace = true, features = ["rpc-compat"] } reth-errors.workspace = true reth-evm.workspace = true reth-storage-api.workspace = true reth-revm.workspace = true -reth-rpc-types-compat.workspace = true +reth-rpc-convert.workspace = true reth-tasks = { workspace = true, features = ["rayon"] } reth-transaction-pool.workspace = true reth-chainspec.workspace = true @@ -32,6 +33,7 @@ reth-trie-common = { workspace = true, features = ["eip1186"] } reth-payload-builder.workspace = true # ethereum +alloy-evm = { workspace = true, features = ["overrides", "call-util"] } alloy-rlp.workspace = true alloy-serde.workspace = true alloy-eips.workspace = true @@ -61,3 +63,9 @@ tracing.workspace = true [features] js-tracer = ["revm-inspectors/js-tracer", "reth-rpc-eth-types/js-tracer"] client = ["jsonrpsee/client", "jsonrpsee/async-client"] +op = [ + "reth-evm/op", + "reth-primitives-traits/op", + "reth-rpc-convert/op", + "alloy-evm/op", +] diff --git a/crates/rpc/rpc-eth-api/src/core.rs b/crates/rpc/rpc-eth-api/src/core.rs index 2a3e361729c..0f2b9eb3896 100644 --- a/crates/rpc/rpc-eth-api/src/core.rs +++ b/crates/rpc/rpc-eth-api/src/core.rs @@ -1,5 +1,9 @@ //! Implementation of the [`jsonrpsee`] generated [`EthApiServer`] trait. Handles RPC requests for //! the `eth_` namespace. +use crate::{ + helpers::{EthApiSpec, EthBlocks, EthCall, EthFees, EthState, EthTransactions, FullEthApi}, + RpcBlock, RpcHeader, RpcReceipt, RpcTransaction, +}; use alloy_dyn_abi::TypedData; use alloy_eips::{eip2930::AccessListResult, BlockId, BlockNumberOrTag}; use alloy_json_rpc::RpcObject; @@ -7,24 +11,20 @@ use alloy_primitives::{Address, Bytes, B256, B64, U256, U64}; use alloy_rpc_types_eth::{ simulate::{SimulatePayload, SimulatedBlock}, state::{EvmOverrides, StateOverride}, - transaction::TransactionRequest, BlockOverrides, Bundle, EIP1186AccountProofResponse, EthCallResponse, FeeHistory, Index, - StateContext, SyncStatus, Work, + StateContext, SyncStatus, TransactionRequest, Work, }; use alloy_serde::JsonStorageKey; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; +use reth_rpc_convert::RpcTxReq; use reth_rpc_server_types::{result::internal_rpc_err, ToRpcResult}; use tracing::trace; -use crate::{ - helpers::{EthApiSpec, EthBlocks, EthCall, EthFees, EthState, EthTransactions, FullEthApi}, - RpcBlock, RpcHeader, RpcReceipt, RpcTransaction, -}; - /// Helper trait, unifies functionality that must be supported to implement all RPC methods for /// server. pub trait FullEthApiServer: EthApiServer< + RpcTxReq, RpcTransaction, RpcBlock, RpcReceipt, @@ -36,6 +36,7 @@ pub trait FullEthApiServer: impl FullEthApiServer for T where T: EthApiServer< + RpcTxReq, RpcTransaction, RpcBlock, RpcReceipt, @@ -48,7 +49,7 @@ impl FullEthApiServer for T where /// Eth rpc interface: #[cfg_attr(not(feature = "client"), rpc(server, namespace = "eth"))] #[cfg_attr(feature = "client", rpc(server, client, namespace = "eth"))] -pub trait EthApi { +pub trait EthApi { /// Returns the protocol version encoded as a string. #[method(name = "protocolVersion")] async fn protocol_version(&self) -> RpcResult; @@ -338,6 +339,12 @@ pub trait EthApi { #[method(name = "sendRawTransaction")] async fn send_raw_transaction(&self, bytes: Bytes) -> RpcResult; + /// Sends a signed transaction and awaits the transaction receipt. + /// + /// This will return a timeout error if the transaction isn't included within some time period. + #[method(name = "sendRawTransactionSync")] + async fn send_raw_transaction_sync(&self, bytes: Bytes) -> RpcResult; + /// Returns an Ethereum specific signature with: sign(keccak256("\x19Ethereum Signed Message:\n" /// + len(message) + message))). #[method(name = "sign")] @@ -376,6 +383,7 @@ pub trait EthApi { #[async_trait::async_trait] impl EthApiServer< + RpcTxReq, RpcTransaction, RpcBlock, RpcReceipt, @@ -802,6 +810,12 @@ where Ok(EthTransactions::send_raw_transaction(self, tx).await?) } + /// Handler for: `eth_sendRawTransactionSync` + async fn send_raw_transaction_sync(&self, tx: Bytes) -> RpcResult> { + trace!(target: "rpc::eth", ?tx, "Serving eth_sendRawTransactionSync"); + Ok(EthTransactions::send_raw_transaction_sync(self, tx).await?) + } + /// Handler for: `eth_sign` async fn sign(&self, address: Address, message: Bytes) -> RpcResult { trace!(target: "rpc::eth", ?address, ?message, "Serving eth_sign"); diff --git a/crates/rpc/rpc-eth-api/src/helpers/block.rs b/crates/rpc/rpc-eth-api/src/helpers/block.rs index 24992560126..91a6739b8b3 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/block.rs @@ -13,7 +13,7 @@ use futures::Future; use reth_evm::ConfigureEvm; use reth_node_api::BlockBody; use reth_primitives_traits::{NodePrimitives, RecoveredBlock, SealedBlock}; -use reth_rpc_types_compat::block::from_block; +use reth_rpc_convert::RpcConvert; use reth_storage_api::{BlockIdReader, BlockReader, ProviderHeader, ProviderReceipt, ProviderTx}; use reth_transaction_pool::{PoolTransaction, TransactionPool}; use std::sync::Arc; @@ -59,7 +59,9 @@ pub trait EthBlocks: LoadBlock { async move { let Some(block) = self.recovered_block(block_id).await? else { return Ok(None) }; - let block = from_block((*block).clone(), full.into(), self.tx_resp_builder())?; + let block = block.clone_into_rpc_block(full.into(), |tx, tx_info| { + self.tx_resp_builder().fill(tx, tx_info) + })?; Ok(Some(block)) } } diff --git a/crates/rpc/rpc-eth-api/src/helpers/call.rs b/crates/rpc/rpc-eth-api/src/helpers/call.rs index dda235ffaf3..12d63243f1c 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/call.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/call.rs @@ -7,6 +7,10 @@ use crate::{ }; use alloy_consensus::BlockHeader; use alloy_eips::eip2930::AccessListResult; +use alloy_evm::{ + call::caller_gas_allowance, + overrides::{apply_block_overrides, apply_state_overrides}, +}; use alloy_primitives::{Bytes, B256, U256}; use alloy_rpc_types_eth::{ simulate::{SimBlock, SimulatePayload, SimulatedBlock}, @@ -27,10 +31,10 @@ use reth_revm::{ db::{CacheDB, State}, DatabaseRef, }; +use reth_rpc_convert::{RpcConvert, RpcTypes}; use reth_rpc_eth_types::{ cache::db::{StateCacheDbRefMutWrapper, StateProviderTraitObjWrapper}, error::{api::FromEvmHalt, ensure_success, FromEthApiError}, - revm_utils::{apply_block_overrides, apply_state_overrides, caller_gas_allowance}, simulate::{self, EthSimulateError}, EthApiError, RevertError, RpcInvalidTransactionError, StateCacheDb, }; @@ -131,7 +135,8 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA apply_block_overrides(block_overrides, &mut db, &mut evm_env.block_env); } if let Some(state_overrides) = state_overrides { - apply_state_overrides(state_overrides, &mut db)?; + apply_state_overrides(state_overrides, &mut db) + .map_err(Self::Error::from_eth_err)?; } let block_gas_limit = evm_env.block_env.gas_limit; @@ -189,7 +194,7 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA let block = simulate::build_simulated_block( result.block, results, - return_full_transactions, + return_full_transactions.into(), this.tx_resp_builder(), )?; @@ -380,7 +385,7 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA let mut db = CacheDB::new(StateProviderDatabase::new(state)); if let Some(state_overrides) = state_override { - apply_state_overrides(state_overrides, &mut db)?; + apply_state_overrides(state_overrides, &mut db).map_err(Self::Error::from_eth_err)?; } let mut tx_env = self.create_txn_env(&evm_env, request.clone(), &mut db)?; @@ -398,7 +403,7 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA evm_env.cfg_env.disable_eip3607 = true; if request.gas.is_none() && tx_env.gas_price() > 0 { - let cap = caller_gas_allowance(&mut db, &tx_env)?; + let cap = caller_gas_allowance(&mut db, &tx_env).map_err(Self::Error::from_eth_err)?; // no gas limit was provided in the request, so we need to cap the request's gas limit tx_env.set_gas_limit(cap.min(evm_env.block_env.gas_limit)); } @@ -455,7 +460,13 @@ pub trait Call: SignedTx = ProviderTx, >, >, - Error: FromEvmError, + RpcConvert: RpcConvert< + TxEnv = TxEnvFor, + Network: RpcTypes>, + >, + Error: FromEvmError + + From<::Error> + + From, > + SpawnBlocking { /// Returns default gas limit to use for `eth_call` and tracing RPC methods. @@ -689,9 +700,20 @@ pub trait Call: fn create_txn_env( &self, evm_env: &EvmEnv>, - request: TransactionRequest, - db: impl Database>, - ) -> Result, Self::Error>; + mut request: TransactionRequest, + mut db: impl Database>, + ) -> Result, Self::Error> { + if request.nonce.is_none() { + request.nonce.replace( + db.basic(request.from.unwrap_or_default()) + .map_err(Into::into)? + .map(|acc| acc.nonce) + .unwrap_or_default(), + ); + } + + Ok(self.tx_resp_builder().tx_env(request.into(), &evm_env.cfg_env, &evm_env.block_env)?) + } /// Prepares the [`EvmEnv`] for execution of calls. /// @@ -744,7 +766,8 @@ pub trait Call: apply_block_overrides(*block_overrides, db, &mut evm_env.block_env); } if let Some(state_overrides) = overrides.state { - apply_state_overrides(state_overrides, db)?; + apply_state_overrides(state_overrides, db) + .map_err(EthApiError::from_state_overrides_err)?; } let request_gas = request.gas; @@ -755,7 +778,7 @@ pub trait Call: if tx_env.gas_price() > 0 { // If gas price is specified, cap transaction gas limit with caller allowance trace!(target: "rpc::eth::call", ?tx_env, "Applying gas limit cap with caller allowance"); - let cap = caller_gas_allowance(db, &tx_env)?; + let cap = caller_gas_allowance(db, &tx_env).map_err(EthApiError::from_call_err)?; // ensure we cap gas_limit to the block's tx_env.set_gas_limit(cap.min(evm_env.block_env.gas_limit)); } diff --git a/crates/rpc/rpc-eth-api/src/helpers/estimate.rs b/crates/rpc/rpc-eth-api/src/helpers/estimate.rs index 297559fbabf..91af2c37e4c 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/estimate.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/estimate.rs @@ -2,16 +2,16 @@ use super::{Call, LoadPendingBlock}; use crate::{AsEthApiError, FromEthApiError, IntoEthApiError}; +use alloy_evm::{call::caller_gas_allowance, overrides::apply_state_overrides}; use alloy_primitives::{TxKind, U256}; use alloy_rpc_types_eth::{state::StateOverride, transaction::TransactionRequest, BlockId}; use futures::Future; use reth_chainspec::MIN_TRANSACTION_GAS; use reth_errors::ProviderError; -use reth_evm::{Database, EvmEnvFor, TransactionEnv, TxEnvFor}; +use reth_evm::{ConfigureEvm, Database, Evm, EvmEnvFor, EvmFor, TransactionEnv, TxEnvFor}; use reth_revm::{database::StateProviderDatabase, db::CacheDB}; use reth_rpc_eth_types::{ - error::api::FromEvmHalt, - revm_utils::{apply_state_overrides, caller_gas_allowance}, + error::{api::FromEvmHalt, FromEvmError}, EthApiError, RevertError, RpcInvalidTransactionError, }; use reth_rpc_server_types::constants::gas_oracle::{CALL_STIPEND_GAS, ESTIMATE_GAS_ERROR_RATIO}; @@ -81,25 +81,12 @@ pub trait EstimateCall: Call { apply_state_overrides(state_override, &mut db).map_err(Self::Error::from_eth_err)?; } - // Optimize for simple transfer transactions, potentially reducing the gas estimate. + // Check if this is a basic transfer (no input data to account with no code) + let mut is_basic_transfer = false; if tx_env.input().is_empty() { if let TxKind::Call(to) = tx_env.kind() { if let Ok(code) = db.db.account_code(&to) { - let no_code_callee = code.map(|code| code.is_empty()).unwrap_or(true); - if no_code_callee { - // If the tx is a simple transfer (call to an account with no code) we can - // shortcircuit. But simply returning - // `MIN_TRANSACTION_GAS` is dangerous because there might be additional - // field combos that bump the price up, so we try executing the function - // with the minimum gas limit to make sure. - let mut tx_env = tx_env.clone(); - tx_env.set_gas_limit(MIN_TRANSACTION_GAS); - if let Ok(res) = self.transact(&mut db, evm_env.clone(), tx_env) { - if res.result.is_success() { - return Ok(U256::from(MIN_TRANSACTION_GAS)) - } - } - } + is_basic_transfer = code.map(|code| code.is_empty()).unwrap_or(true); } } } @@ -116,10 +103,31 @@ pub trait EstimateCall: Call { // If the provided gas limit is less than computed cap, use that tx_env.set_gas_limit(tx_env.gas_limit().min(highest_gas_limit)); - trace!(target: "rpc::eth::estimate", ?evm_env, ?tx_env, "Starting gas estimation"); + // Create EVM instance once and reuse it throughout the entire estimation process + let mut evm = self.evm_config().evm_with_env(&mut db, evm_env); + + // For basic transfers, try using minimum gas before running full binary search + if is_basic_transfer { + // If the tx is a simple transfer (call to an account with no code) we can + // shortcircuit. But simply returning + // `MIN_TRANSACTION_GAS` is dangerous because there might be additional + // field combos that bump the price up, so we try executing the function + // with the minimum gas limit to make sure. + let mut min_tx_env = tx_env.clone(); + min_tx_env.set_gas_limit(MIN_TRANSACTION_GAS); + + // Reuse the same EVM instance + if let Ok(res) = evm.transact(min_tx_env).map_err(Self::Error::from_evm_err) { + if res.result.is_success() { + return Ok(U256::from(MIN_TRANSACTION_GAS)) + } + } + } + + trace!(target: "rpc::eth::estimate", ?tx_env, gas_limit = tx_env.gas_limit(), is_basic_transfer, "Starting gas estimation"); // Execute the transaction with the highest possible gas limit. - let mut res = match self.transact(&mut db, evm_env.clone(), tx_env.clone()) { + let mut res = match evm.transact(tx_env.clone()).map_err(Self::Error::from_evm_err) { // Handle the exceptional case where the transaction initialization uses too much // gas. If the gas price or gas limit was specified in the request, // retry the transaction with the block's gas limit to determine if @@ -128,7 +136,7 @@ pub trait EstimateCall: Call { if err.is_gas_too_high() && (tx_request_gas_limit.is_some() || tx_request_gas_price.is_some()) => { - return Err(self.map_out_of_gas_err(block_env_gas_limit, evm_env, tx_env, &mut db)) + return Self::map_out_of_gas_err(&mut evm, tx_env, block_env_gas_limit); } Err(err) if err.is_gas_too_low() => { // This failed because the configured gas cost of the tx was lower than what @@ -155,7 +163,7 @@ pub trait EstimateCall: Call { // if price or limit was included in the request then we can execute the request // again with the block's gas limit to check if revert is gas related or not return if tx_request_gas_limit.is_some() || tx_request_gas_price.is_some() { - Err(self.map_out_of_gas_err(block_env_gas_limit, evm_env, tx_env, &mut db)) + Self::map_out_of_gas_err(&mut evm, tx_env, block_env_gas_limit) } else { // the transaction did revert Err(RpcInvalidTransactionError::Revert(RevertError::new(output)).into_eth_err()) @@ -185,10 +193,13 @@ pub trait EstimateCall: Call { let optimistic_gas_limit = (gas_used + gas_refund + CALL_STIPEND_GAS) * 64 / 63; if optimistic_gas_limit < highest_gas_limit { // Set the transaction's gas limit to the calculated optimistic gas limit. - tx_env.set_gas_limit(optimistic_gas_limit); + let mut optimistic_tx_env = tx_env.clone(); + optimistic_tx_env.set_gas_limit(optimistic_gas_limit); + // Re-execute the transaction with the new gas limit and update the result and // environment. - res = self.transact(&mut db, evm_env.clone(), tx_env.clone())?; + res = evm.transact(optimistic_tx_env).map_err(Self::Error::from_evm_err)?; + // Update the gas used based on the new result. gas_used = res.result.gas_used(); // Update the gas limit estimates (highest and lowest) based on the execution result. @@ -206,7 +217,7 @@ pub trait EstimateCall: Call { ((highest_gas_limit as u128 + lowest_gas_limit as u128) / 2) as u64, ); - trace!(target: "rpc::eth::estimate", ?evm_env, ?tx_env, ?highest_gas_limit, ?lowest_gas_limit, ?mid_gas_limit, "Starting binary search for gas"); + trace!(target: "rpc::eth::estimate", ?highest_gas_limit, ?lowest_gas_limit, ?mid_gas_limit, "Starting binary search for gas"); // Binary search narrows the range to find the minimum gas limit needed for the transaction // to succeed. @@ -220,10 +231,11 @@ pub trait EstimateCall: Call { break }; - tx_env.set_gas_limit(mid_gas_limit); + let mut mid_tx_env = tx_env.clone(); + mid_tx_env.set_gas_limit(mid_gas_limit); // Execute transaction and handle potential gas errors, adjusting limits accordingly. - match self.transact(&mut db, evm_env.clone(), tx_env.clone()) { + match evm.transact(mid_tx_env).map_err(Self::Error::from_evm_err) { Err(err) if err.is_gas_too_high() => { // Decrease the highest gas limit if gas is too high highest_gas_limit = mid_gas_limit; @@ -278,34 +290,31 @@ pub trait EstimateCall: Call { /// or not #[inline] fn map_out_of_gas_err( - &self, - env_gas_limit: u64, - evm_env: EvmEnvFor, + evm: &mut EvmFor, mut tx_env: TxEnvFor, - db: &mut DB, - ) -> Self::Error + higher_gas_limit: u64, + ) -> Result where DB: Database, EthApiError: From, { let req_gas_limit = tx_env.gas_limit(); - tx_env.set_gas_limit(env_gas_limit); - let res = match self.transact(db, evm_env, tx_env) { - Ok(res) => res, - Err(err) => return err, - }; - match res.result { + tx_env.set_gas_limit(higher_gas_limit); + + let retry_res = evm.transact(tx_env).map_err(Self::Error::from_evm_err)?; + + match retry_res.result { ExecutionResult::Success { .. } => { - // transaction succeeded by manually increasing the gas limit to - // highest, which means the caller lacks funds to pay for the tx - RpcInvalidTransactionError::BasicOutOfGas(req_gas_limit).into_eth_err() + // Transaction succeeded by manually increasing the gas limit, + // which means the caller lacks funds to pay for the tx + Err(RpcInvalidTransactionError::BasicOutOfGas(req_gas_limit).into_eth_err()) } ExecutionResult::Revert { output, .. } => { // reverted again after bumping the limit - RpcInvalidTransactionError::Revert(RevertError::new(output)).into_eth_err() + Err(RpcInvalidTransactionError::Revert(RevertError::new(output)).into_eth_err()) } ExecutionResult::Halt { reason, .. } => { - Self::Error::from_evm_halt(reason, req_gas_limit) + Err(Self::Error::from_evm_halt(reason, req_gas_limit)) } } } diff --git a/crates/rpc/rpc-eth-api/src/helpers/fee.rs b/crates/rpc/rpc-eth-api/src/helpers/fee.rs index da354181aff..3e63c04f75f 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/fee.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/fee.rs @@ -13,12 +13,16 @@ use reth_rpc_eth_types::{ fee_history::calculate_reward_percentiles_for_block, EthApiError, FeeHistoryCache, FeeHistoryEntry, GasPriceOracle, RpcInvalidTransactionError, }; -use reth_storage_api::{BlockIdReader, BlockReaderIdExt, HeaderProvider}; +use reth_storage_api::{BlockIdReader, BlockReaderIdExt, HeaderProvider, ProviderHeader}; use tracing::debug; /// Fee related functions for the [`EthApiServer`](crate::EthApiServer) trait in the /// `eth_` namespace. -pub trait EthFees: LoadFee { +pub trait EthFees: + LoadFee< + Provider: ChainSpecProvider>>, +> +{ /// Returns a suggestion for a gas price for legacy transactions. /// /// See also: @@ -86,8 +90,6 @@ pub trait EthFees: LoadFee { if newest_block.is_pending() { // cap the target block since we don't have fee history for the pending block newest_block = BlockNumberOrTag::Latest; - // account for missing pending block - block_count = block_count.saturating_sub(1); } let end_block = self @@ -138,7 +140,8 @@ pub trait EthFees: LoadFee { } for entry in &fee_entries { - base_fee_per_gas.push(entry.base_fee_per_gas as u128); + base_fee_per_gas + .push(entry.header.base_fee_per_gas().unwrap_or_default() as u128); gas_used_ratio.push(entry.gas_used_ratio); base_fee_per_blob_gas.push(entry.base_fee_per_blob_gas.unwrap_or_default()); blob_gas_used_ratio.push(entry.blob_gas_used_ratio); @@ -155,8 +158,12 @@ pub trait EthFees: LoadFee { // Also need to include the `base_fee_per_gas` and `base_fee_per_blob_gas` for the // next block - base_fee_per_gas - .push(last_entry.next_block_base_fee(self.provider().chain_spec()) as u128); + base_fee_per_gas.push( + self.provider() + .chain_spec() + .next_block_base_fee(&last_entry.header, last_entry.header.timestamp()) + .unwrap_or_default() as u128, + ); base_fee_per_blob_gas.push(last_entry.next_block_blob_fee().unwrap_or_default()); } else { @@ -168,13 +175,12 @@ pub trait EthFees: LoadFee { return Err(EthApiError::InvalidBlockRange.into()) } - + let chain_spec = self.provider().chain_spec(); for header in &headers { base_fee_per_gas.push(header.base_fee_per_gas().unwrap_or_default() as u128); gas_used_ratio.push(header.gas_used() as f64 / header.gas_limit() as f64); - let blob_params = self.provider() - .chain_spec() + let blob_params = chain_spec .blob_params_at_timestamp(header.timestamp()) .unwrap_or_else(BlobParams::cancun); @@ -211,18 +217,16 @@ pub trait EthFees: LoadFee { // The unwrap is safe since we checked earlier that we got at least 1 header. let last_header = headers.last().expect("is present"); base_fee_per_gas.push( - last_header.next_block_base_fee( - self.provider() - .chain_spec() - .base_fee_params_at_timestamp(last_header.timestamp())).unwrap_or_default() as u128 + chain_spec + .next_block_base_fee(last_header.header(), last_header.timestamp()) + .unwrap_or_default() as u128, ); - // Same goes for the `base_fee_per_blob_gas`: // > "[..] includes the next block after the newest of the returned range, because this value can be derived from the newest block. base_fee_per_blob_gas.push( last_header .maybe_next_block_blob_fee( - self.provider().chain_spec().blob_params_at_timestamp(last_header.timestamp()) + chain_spec.blob_params_at_timestamp(last_header.timestamp()) ).unwrap_or_default() ); }; @@ -240,7 +244,11 @@ pub trait EthFees: LoadFee { /// Approximates reward at a given percentile for a specific block /// Based on the configured resolution - fn approximate_percentile(&self, entry: &FeeHistoryEntry, requested_percentile: f64) -> u128 { + fn approximate_percentile( + &self, + entry: &FeeHistoryEntry>, + requested_percentile: f64, + ) -> u128 { let resolution = self.fee_history_cache().resolution(); let rounded_percentile = (requested_percentile * resolution as f64).round() / resolution as f64; @@ -268,7 +276,7 @@ where /// Returns a handle for reading fee history data from memory. /// /// Data access in default (L1) trait method implementations. - fn fee_history_cache(&self) -> &FeeHistoryCache; + fn fee_history_cache(&self) -> &FeeHistoryCache>; /// Returns the gas price if it is set, otherwise fetches a suggested gas price for legacy /// transactions. diff --git a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs index 62202c5b664..272f3c18f1f 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs @@ -5,6 +5,7 @@ use super::SpawnBlocking; use crate::{types::RpcTypes, EthApiTypes, FromEthApiError, FromEvmError, RpcNodeCore}; use alloy_consensus::{BlockHeader, Transaction}; use alloy_eips::eip7840::BlobParams; +use alloy_primitives::U256; use alloy_rpc_types_eth::BlockNumberOrTag; use futures::Future; use reth_chainspec::{ChainSpecProvider, EthChainSpec, EthereumHardforks}; @@ -18,6 +19,7 @@ use reth_primitives_traits::{ transaction::error::InvalidTransactionError, Receipt, RecoveredBlock, SealedHeader, }; use reth_revm::{database::StateProviderDatabase, db::State}; +use reth_rpc_convert::RpcConvert; use reth_rpc_eth_types::{EthApiError, PendingBlock, PendingBlockEnv, PendingBlockEnvOrigin}; use reth_storage_api::{ BlockReader, BlockReaderIdExt, ProviderBlock, ProviderHeader, ProviderReceipt, ProviderTx, @@ -41,6 +43,7 @@ pub trait LoadPendingBlock: Header = alloy_rpc_types_eth::Header>, >, Error: FromEvmError, + RpcConvert: RpcConvert, > + RpcNodeCore< Provider: BlockReaderIdExt + ChainSpecProvider @@ -152,7 +155,7 @@ pub trait LoadPendingBlock: // check if the block is still good if let Some(pending_block) = lock.as_ref() { // this is guaranteed to be the `latest` header - if pending.evm_env.block_env.number == pending_block.block.number() && + if pending.evm_env.block_env.number == U256::from(pending_block.block.number()) && parent.hash() == pending_block.block.parent_hash() && now <= pending_block.expires_at { diff --git a/crates/rpc/rpc-eth-api/src/helpers/trace.rs b/crates/rpc/rpc-eth-api/src/helpers/trace.rs index e2f70602351..31085bdc08f 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/trace.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/trace.rs @@ -9,22 +9,18 @@ use futures::Future; use reth_chainspec::ChainSpecProvider; use reth_errors::ProviderError; use reth_evm::{ - system_calls::SystemCaller, ConfigureEvm, Database, Evm, EvmEnvFor, HaltReasonFor, - InspectorFor, TxEnvFor, + evm::EvmFactoryExt, system_calls::SystemCaller, tracing::TracingCtx, ConfigureEvm, Database, + Evm, EvmEnvFor, EvmFor, HaltReasonFor, InspectorFor, TxEnvFor, }; use reth_node_api::NodePrimitives; -use reth_primitives_traits::{BlockBody, RecoveredBlock, SignedTransaction}; +use reth_primitives_traits::{BlockBody, Recovered, RecoveredBlock, SignedTransaction}; use reth_revm::{database::StateProviderDatabase, db::CacheDB}; use reth_rpc_eth_types::{ cache::db::{StateCacheDb, StateCacheDbRefMutWrapper, StateProviderTraitObjWrapper}, EthApiError, }; use reth_storage_api::{BlockReader, ProviderBlock, ProviderHeader, ProviderTx}; -use revm::{ - context_interface::result::{ExecutionResult, ResultAndState}, - state::EvmState, - DatabaseCommit, -}; +use revm::{context_interface::result::ResultAndState, DatabaseCommit}; use revm_inspectors::tracing::{TracingInspector, TracingInspectorConfig}; use std::sync::Arc; @@ -242,10 +238,11 @@ pub trait Trace: Self: LoadBlock, F: Fn( TransactionInfo, - TracingInspector, - ExecutionResult>, - &EvmState, - &StateCacheDb<'_>, + TracingCtx< + '_, + Recovered<&ProviderTx>, + EvmFor, TracingInspector>, + >, ) -> Result + Send + 'static, @@ -282,15 +279,16 @@ pub trait Trace: Self: LoadBlock, F: Fn( TransactionInfo, - Insp, - ExecutionResult>, - &EvmState, - &StateCacheDb<'_>, + TracingCtx< + '_, + Recovered<&ProviderTx>, + EvmFor, Insp>, + >, ) -> Result + Send + 'static, Setup: FnMut() -> Insp + Send + 'static, - Insp: for<'a, 'b> InspectorFor>, + Insp: Clone + for<'a, 'b> InspectorFor>, R: Send + 'static, { async move { @@ -317,7 +315,7 @@ pub trait Trace: let state_at = block.parent_hash(); let block_hash = block.hash(); - let block_number = evm_env.block_env.number; + let block_number = evm_env.block_env.number.saturating_to(); let base_fee = evm_env.block_env.basefee; // now get the state @@ -334,43 +332,26 @@ pub trait Trace: // we need + 1 because the index is 0-based highest as usize + 1 }); - let mut results = Vec::with_capacity(max_transactions); - let mut transactions = block - .transactions_recovered() - .take(max_transactions) - .enumerate() - .map(|(idx, tx)| { + let mut idx = 0; + + let results = this + .evm_config() + .evm_factory() + .create_tracer(StateCacheDbRefMutWrapper(&mut db), evm_env, inspector_setup()) + .try_trace_many(block.transactions_recovered().take(max_transactions), |ctx| { let tx_info = TransactionInfo { - hash: Some(*tx.tx_hash()), - index: Some(idx as u64), + hash: Some(*ctx.tx.tx_hash()), + index: Some(idx), block_hash: Some(block_hash), block_number: Some(block_number), base_fee: Some(base_fee), }; - let tx_env = this.evm_config().tx_env(tx); - (tx_info, tx_env) - }) - .peekable(); + idx += 1; - while let Some((tx_info, tx)) = transactions.next() { - let mut inspector = inspector_setup(); - let (res, _) = this.inspect( - StateCacheDbRefMutWrapper(&mut db), - evm_env.clone(), - tx, - &mut inspector, - )?; - let ResultAndState { result, state } = res; - results.push(f(tx_info, inspector, result, &state, &db)?); - - // need to apply the state changes of this transaction before executing the - // next transaction, but only if there's a next transaction - if transactions.peek().is_some() { - // commit the state changes to the DB - db.commit(state) - } - } + f(tx_info, ctx) + }) + .collect::>()?; Ok(Some(results)) }) @@ -401,10 +382,11 @@ pub trait Trace: // state and db F: Fn( TransactionInfo, - TracingInspector, - ExecutionResult>, - &EvmState, - &StateCacheDb<'_>, + TracingCtx< + '_, + Recovered<&ProviderTx>, + EvmFor, TracingInspector>, + >, ) -> Result + Send + 'static, @@ -421,7 +403,7 @@ pub trait Trace: /// 2. configures the EVM evn /// 3. loops over all transactions and executes them /// 4. calls the callback with the transaction info, the execution result, the changed state - /// _after_ the transaction [`EvmState`] and the database that points to the state right + /// _after_ the transaction `EvmState` and the database that points to the state right /// _before_ the transaction, in other words the state the transaction was executed on: /// `changed_state = tx(cached_state)` /// @@ -440,15 +422,16 @@ pub trait Trace: // state and db F: Fn( TransactionInfo, - Insp, - ExecutionResult>, - &EvmState, - &StateCacheDb<'_>, + TracingCtx< + '_, + Recovered<&ProviderTx>, + EvmFor, Insp>, + >, ) -> Result + Send + 'static, Setup: FnMut() -> Insp + Send + 'static, - Insp: for<'a, 'b> InspectorFor>, + Insp: Clone + for<'a, 'b> InspectorFor>, R: Send + 'static, { self.trace_block_until_with_inspector(block_id, block, None, insp_setup, f) diff --git a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs index 4c7377a30fd..c0c759d400d 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs @@ -15,11 +15,15 @@ use alloy_eips::{eip2718::Encodable2718, BlockId}; use alloy_network::TransactionBuilder; use alloy_primitives::{Address, Bytes, TxHash, B256}; use alloy_rpc_types_eth::{transaction::TransactionRequest, BlockNumberOrTag, TransactionInfo}; -use futures::Future; +use futures::{Future, StreamExt}; +use reth_chain_state::CanonStateSubscriptions; use reth_node_api::BlockBody; use reth_primitives_traits::{RecoveredBlock, SignedTransaction}; -use reth_rpc_eth_types::{utils::binary_search, EthApiError, SignError, TransactionSource}; -use reth_rpc_types_compat::transaction::TransactionCompat; +use reth_rpc_convert::transaction::RpcConvert; +use reth_rpc_eth_types::{ + utils::binary_search, EthApiError, EthApiError::TransactionConfirmationTimeout, SignError, + TransactionSource, +}; use reth_storage_api::{ BlockNumReader, BlockReaderIdExt, ProviderBlock, ProviderReceipt, ProviderTx, ReceiptProvider, TransactionsProvider, @@ -64,6 +68,47 @@ pub trait EthTransactions: LoadTransaction { tx: Bytes, ) -> impl Future> + Send; + /// Decodes and recovers the transaction and submits it to the pool. + /// + /// And awaits the receipt. + fn send_raw_transaction_sync( + &self, + tx: Bytes, + ) -> impl Future, Self::Error>> + Send + where + Self: LoadReceipt + 'static, + { + let this = self.clone(); + async move { + let hash = EthTransactions::send_raw_transaction(&this, tx).await?; + let mut stream = this.provider().canonical_state_stream(); + const TIMEOUT_DURATION: tokio::time::Duration = tokio::time::Duration::from_secs(30); + tokio::time::timeout(TIMEOUT_DURATION, async { + while let Some(notification) = stream.next().await { + let chain = notification.committed(); + for block in chain.blocks_iter() { + if block.body().contains_transaction(&hash) { + if let Some(receipt) = this.transaction_receipt(hash).await? { + return Ok(receipt); + } + } + } + } + Err(Self::Error::from_eth_err(TransactionConfirmationTimeout { + hash, + duration: TIMEOUT_DURATION, + })) + }) + .await + .unwrap_or_else(|_elapsed| { + Err(Self::Error::from_eth_err(TransactionConfirmationTimeout { + hash, + duration: TIMEOUT_DURATION, + })) + }) + } + } + /// Returns the transaction by hash. /// /// Checks the pool and state. @@ -444,7 +489,7 @@ pub trait EthTransactions: LoadTransaction { fn find_signer( &self, account: &Address, - ) -> Result> + 'static)>, Self::Error> { + ) -> Result> + 'static>, Self::Error> { self.signers() .read() .iter() diff --git a/crates/rpc/rpc-eth-api/src/lib.rs b/crates/rpc/rpc-eth-api/src/lib.rs index 3916b5eb696..a44c7600b9d 100644 --- a/crates/rpc/rpc-eth-api/src/lib.rs +++ b/crates/rpc/rpc-eth-api/src/lib.rs @@ -27,13 +27,10 @@ pub use ext::L2EthApiExtServer; pub use filter::{EngineEthFilter, EthFilterApiServer, QueryLimits}; pub use node::{RpcNodeCore, RpcNodeCoreExt}; pub use pubsub::EthPubSubApiServer; +pub use reth_rpc_convert::*; pub use reth_rpc_eth_types::error::{ AsEthApiError, FromEthApiError, FromEvmError, IntoEthApiError, }; -pub use reth_rpc_types_compat::{ - try_into_op_tx_info, try_into_scroll_tx_info, IntoRpcTx, RpcConverter, TransactionCompat, - TransactionConversionError, TryIntoSimTx, TxInfoMapper, -}; pub use types::{EthApiTypes, FullEthApiTypes, RpcBlock, RpcHeader, RpcReceipt, RpcTransaction}; #[cfg(feature = "client")] diff --git a/crates/rpc/rpc-eth-api/src/node.rs b/crates/rpc/rpc-eth-api/src/node.rs index 13dcf90c05d..44e0cc812a2 100644 --- a/crates/rpc/rpc-eth-api/src/node.rs +++ b/crates/rpc/rpc-eth-api/src/node.rs @@ -5,12 +5,15 @@ use reth_payload_builder::PayloadBuilderHandle; use reth_rpc_eth_types::EthStateCache; use reth_storage_api::{BlockReader, ProviderBlock, ProviderReceipt}; -/// Helper trait to relax trait bounds on [`FullNodeComponents`]. +/// Helper trait that provides the same interface as [`FullNodeComponents`] but without requiring +/// implementation of trait bounds. /// -/// Helpful when defining types that would otherwise have a generic `N: FullNodeComponents`. Using -/// `N: RpcNodeCore` instead, allows access to all the associated types on [`FullNodeComponents`] -/// that are used in RPC, but with more flexibility since they have no trait bounds (asides auto -/// traits). +/// This trait is structurally equivalent to [`FullNodeComponents`], exposing the same associated +/// types and methods. However, it doesn't enforce the trait bounds required by +/// [`FullNodeComponents`]. This makes it useful for RPC types that need access to node components +/// where the full trait bounds of the components are not necessary. +/// +/// Every type that is a [`FullNodeComponents`] also implements this trait. pub trait RpcNodeCore: Clone + Send + Sync { /// Blockchain data primitives. type Primitives: Send + Sync + Clone + Unpin; diff --git a/crates/rpc/rpc-eth-api/src/types.rs b/crates/rpc/rpc-eth-api/src/types.rs index bdc8d615737..7bb91af8258 100644 --- a/crates/rpc/rpc-eth-api/src/types.rs +++ b/crates/rpc/rpc-eth-api/src/types.rs @@ -1,10 +1,9 @@ //! Trait for specifying `eth` network dependent API types. use crate::{AsEthApiError, FromEthApiError, RpcNodeCore}; -use alloy_json_rpc::RpcObject; -use alloy_network::{Network, ReceiptResponse, TransactionResponse}; -use alloy_rpc_types_eth::Block; -use reth_rpc_types_compat::TransactionCompat; +use alloy_rpc_types_eth::{Block, TransactionRequest}; +use reth_chain_state::CanonStateSubscriptions; +use reth_rpc_convert::RpcConvert; use reth_storage_api::{ProviderTx, ReceiptProvider, TransactionsProvider}; use reth_transaction_pool::{PoolTransaction, TransactionPool}; use std::{ @@ -12,32 +11,13 @@ use std::{ fmt::{self}, }; -/// RPC types used by the `eth_` RPC API. -/// -/// This is a subset of [`alloy_network::Network`] trait with only RPC response types kept. -pub trait RpcTypes { - /// Header response type. - type Header: RpcObject; - /// Receipt response type. - type Receipt: RpcObject + ReceiptResponse; - /// Transaction response type. - type Transaction: RpcObject + TransactionResponse; -} - -impl RpcTypes for T -where - T: Network, -{ - type Header = T::HeaderResponse; - type Receipt = T::ReceiptResponse; - type Transaction = T::TransactionResponse; -} +pub use reth_rpc_convert::{RpcTransaction, RpcTxReq, RpcTypes}; /// Network specific `eth` API types. /// /// This trait defines the network specific rpc types and helpers required for the `eth_` and -/// adjacent endpoints. `NetworkTypes` is [`Network`] as defined by the alloy crate, see also -/// [`alloy_network::Ethereum`]. +/// adjacent endpoints. `NetworkTypes` is [`alloy_network::Network`] as defined by the alloy crate, +/// see also [`alloy_network::Ethereum`]. /// /// This type is stateful so that it can provide additional context if necessary, e.g. populating /// receipts with additional data. @@ -52,15 +32,12 @@ pub trait EthApiTypes: Send + Sync + Clone { /// Blockchain primitive types, specific to network, e.g. block and transaction. type NetworkTypes: RpcTypes; /// Conversion methods for transaction RPC type. - type TransactionCompat: Send + Sync + Clone + fmt::Debug; + type RpcConvert: Send + Sync + Clone + fmt::Debug; /// Returns reference to transaction response builder. - fn tx_resp_builder(&self) -> &Self::TransactionCompat; + fn tx_resp_builder(&self) -> &Self::RpcConvert; } -/// Adapter for network specific transaction type. -pub type RpcTransaction = ::Transaction; - /// Adapter for network specific block type. pub type RpcBlock = Block, RpcHeader>; @@ -77,32 +54,34 @@ pub type RpcError = ::Error; pub trait FullEthApiTypes where Self: RpcNodeCore< - Provider: TransactionsProvider + ReceiptProvider, + Provider: TransactionsProvider + ReceiptProvider + CanonStateSubscriptions, Pool: TransactionPool< Transaction: PoolTransaction>, >, > + EthApiTypes< - TransactionCompat: TransactionCompat< + RpcConvert: RpcConvert< Primitives = ::Primitives, - Transaction = RpcTransaction, + Network = Self::NetworkTypes, Error = RpcError, >, + NetworkTypes: RpcTypes>, >, { } impl FullEthApiTypes for T where T: RpcNodeCore< - Provider: TransactionsProvider + ReceiptProvider, + Provider: TransactionsProvider + ReceiptProvider + CanonStateSubscriptions, Pool: TransactionPool< Transaction: PoolTransaction>, >, > + EthApiTypes< - TransactionCompat: TransactionCompat< + RpcConvert: RpcConvert< Primitives = ::Primitives, - Transaction = RpcTransaction, + Network = Self::NetworkTypes, Error = RpcError, >, + NetworkTypes: RpcTypes>, > { } diff --git a/crates/rpc/rpc-eth-types/Cargo.toml b/crates/rpc/rpc-eth-types/Cargo.toml index 20254eea731..4a2104d9146 100644 --- a/crates/rpc/rpc-eth-types/Cargo.toml +++ b/crates/rpc/rpc-eth-types/Cargo.toml @@ -19,17 +19,18 @@ reth-evm.workspace = true reth-execution-types.workspace = true reth-metrics.workspace = true reth-ethereum-primitives.workspace = true -reth-primitives-traits.workspace = true +reth-primitives-traits = { workspace = true, features = ["rpc-compat"] } reth-storage-api.workspace = true reth-revm.workspace = true reth-rpc-server-types.workspace = true -reth-rpc-types-compat.workspace = true +reth-rpc-convert.workspace = true reth-tasks.workspace = true reth-transaction-pool.workspace = true reth-trie.workspace = true # ethereum alloy-eips.workspace = true +alloy-evm = { workspace = true, features = ["overrides", "call-util"] } alloy-primitives.workspace = true alloy-consensus.workspace = true alloy-sol-types.workspace = true diff --git a/crates/rpc/rpc-eth-types/src/cache/db.rs b/crates/rpc/rpc-eth-types/src/cache/db.rs index 633a4482e74..7c1bedb8224 100644 --- a/crates/rpc/rpc-eth-types/src/cache/db.rs +++ b/crates/rpc/rpc-eth-types/src/cache/db.rs @@ -5,12 +5,13 @@ use alloy_primitives::{Address, B256, U256}; use reth_errors::ProviderResult; use reth_revm::{database::StateProviderDatabase, DatabaseRef}; -use reth_storage_api::{HashedPostStateProvider, StateProvider}; +use reth_storage_api::{BytecodeReader, HashedPostStateProvider, StateProvider}; use reth_trie::{HashedStorage, MultiProofTargets}; use revm::{ database::{BundleState, CacheDB}, + primitives::HashMap, state::{AccountInfo, Bytecode}, - Database, + Database, DatabaseCommit, }; /// Helper alias type for the state's [`CacheDB`] @@ -154,13 +155,6 @@ impl StateProvider for StateProviderTraitObjWrapper<'_> { self.0.storage(account, storage_key) } - fn bytecode_by_hash( - &self, - code_hash: &B256, - ) -> reth_errors::ProviderResult> { - self.0.bytecode_by_hash(code_hash) - } - fn account_code( &self, addr: &Address, @@ -177,6 +171,15 @@ impl StateProvider for StateProviderTraitObjWrapper<'_> { } } +impl BytecodeReader for StateProviderTraitObjWrapper<'_> { + fn bytecode_by_hash( + &self, + code_hash: &B256, + ) -> reth_errors::ProviderResult> { + self.0.bytecode_by_hash(code_hash) + } +} + /// Hack to get around 'higher-ranked lifetime error', see /// #[expect(missing_debug_implementations)] @@ -220,3 +223,9 @@ impl<'a> DatabaseRef for StateCacheDbRefMutWrapper<'a, '_> { self.0.block_hash_ref(number) } } + +impl DatabaseCommit for StateCacheDbRefMutWrapper<'_, '_> { + fn commit(&mut self, changes: HashMap) { + self.0.commit(changes) + } +} diff --git a/crates/rpc/rpc-eth-types/src/error/mod.rs b/crates/rpc/rpc-eth-types/src/error/mod.rs index eae015060a3..96adc4e67b2 100644 --- a/crates/rpc/rpc-eth-types/src/error/mod.rs +++ b/crates/rpc/rpc-eth-types/src/error/mod.rs @@ -3,17 +3,18 @@ pub mod api; use crate::error::api::FromEvmHalt; use alloy_eips::BlockId; -use alloy_primitives::{Address, Bytes, U256}; +use alloy_evm::{call::CallError, overrides::StateOverrideError}; +use alloy_primitives::{Address, Bytes, B256, U256}; use alloy_rpc_types_eth::{error::EthRpcErrorCode, request::TransactionInputError, BlockError}; use alloy_sol_types::{ContractError, RevertReason}; pub use api::{AsEthApiError, FromEthApiError, FromEvmError, IntoEthApiError}; use core::time::Duration; use reth_errors::{BlockExecutionError, RethError}; use reth_primitives_traits::transaction::{error::InvalidTransactionError, signed::RecoveryError}; +use reth_rpc_convert::{CallFeesError, EthTxEnvError, TransactionConversionError}; use reth_rpc_server_types::result::{ block_id_to_str, internal_rpc_err, invalid_params_rpc_err, rpc_err, rpc_error_with_code, }; -use reth_rpc_types_compat::TransactionConversionError; use reth_transaction_pool::error::{ Eip4844PoolTransactionError, Eip7702PoolTransactionError, InvalidPoolTransactionError, PoolError, PoolErrorKind, PoolTransactionError, @@ -61,6 +62,14 @@ pub enum EthApiError { /// Header range not found for start block hash/number/tag to end block hash/number/tag #[error("header range not found, start block {0:?}, end block {1:?}")] HeaderRangeNotFound(BlockId, BlockId), + /// Thrown when historical data is not available because it has been pruned + /// + /// This error is intended for use as a standard response when historical data is + /// requested that has been pruned according to the node's data retention policy. + /// + /// See also + #[error("pruned history unavailable")] + PrunedHistoryUnavailable, /// Receipts not found for block hash/number/tag #[error("receipts not found")] ReceiptsNotFound(BlockId), @@ -147,6 +156,16 @@ pub enum EthApiError { /// Error thrown when tracing with a muxTracer fails #[error(transparent)] MuxTracerError(#[from] MuxError), + /// Error thrown when waiting for transaction confirmation times out + #[error( + "Transaction {hash} was added to the mempool but wasn't confirmed within {duration:?}." + )] + TransactionConfirmationTimeout { + /// Hash of the transaction that timed out + hash: B256, + /// Duration that was waited before timing out + duration: Duration, + }, /// Any other error #[error("{0}")] Other(Box), @@ -167,6 +186,22 @@ impl EthApiError { pub const fn is_gas_too_low(&self) -> bool { matches!(self, Self::InvalidTransaction(RpcInvalidTransactionError::GasTooLow)) } + + /// Converts the given [`StateOverrideError`] into a new [`EthApiError`] instance. + pub fn from_state_overrides_err(err: StateOverrideError) -> Self + where + E: Into, + { + err.into() + } + + /// Converts the given [`CallError`] into a new [`EthApiError`] instance. + pub fn from_call_err(err: CallError) -> Self + where + E: Into, + { + err.into() + } } impl From for jsonrpsee_types::error::ErrorObject<'static> { @@ -214,6 +249,9 @@ impl From for jsonrpsee_types::error::ErrorObject<'static> { block_id_to_str(end_id), ), ), + err @ EthApiError::TransactionConfirmationTimeout { .. } => { + rpc_error_with_code(EthRpcErrorCode::TransactionRejected.code(), err.to_string()) + } EthApiError::Unsupported(msg) => internal_rpc_err(msg), EthApiError::InternalJsTracerError(msg) => internal_rpc_err(msg), EthApiError::InvalidParams(msg) => invalid_params_rpc_err(msg), @@ -225,6 +263,7 @@ impl From for jsonrpsee_types::error::ErrorObject<'static> { internal_rpc_err(err.to_string()) } err @ EthApiError::TransactionInputError(_) => invalid_params_rpc_err(err.to_string()), + EthApiError::PrunedHistoryUnavailable => rpc_error_with_code(4444, error.to_string()), EthApiError::Other(err) => err.to_rpc_error(), EthApiError::MuxTracerError(msg) => internal_rpc_err(msg.to_string()), } @@ -237,6 +276,65 @@ impl From for EthApiError { } } +impl From> for EthApiError +where + E: Into, +{ + fn from(value: CallError) -> Self { + match value { + CallError::Database(err) => err.into(), + CallError::InsufficientFunds(insufficient_funds_error) => { + Self::InvalidTransaction(RpcInvalidTransactionError::InsufficientFunds { + cost: insufficient_funds_error.cost, + balance: insufficient_funds_error.balance, + }) + } + } + } +} + +impl From> for EthApiError +where + E: Into, +{ + fn from(value: StateOverrideError) -> Self { + match value { + StateOverrideError::InvalidBytecode(bytecode_decode_error) => { + Self::InvalidBytecode(bytecode_decode_error.to_string()) + } + StateOverrideError::BothStateAndStateDiff(address) => { + Self::BothStateAndStateDiffInOverride(address) + } + StateOverrideError::Database(err) => err.into(), + } + } +} + +impl From for EthApiError { + fn from(value: EthTxEnvError) -> Self { + match value { + EthTxEnvError::CallFees(CallFeesError::BlobTransactionMissingBlobHashes) => { + Self::InvalidTransaction( + RpcInvalidTransactionError::BlobTransactionMissingBlobHashes, + ) + } + EthTxEnvError::CallFees(CallFeesError::FeeCapTooLow) => { + Self::InvalidTransaction(RpcInvalidTransactionError::FeeCapTooLow) + } + EthTxEnvError::CallFees(CallFeesError::ConflictingFeeFieldsInRequest) => { + Self::ConflictingFeeFieldsInRequest + } + EthTxEnvError::CallFees(CallFeesError::TipAboveFeeCap) => { + Self::InvalidTransaction(RpcInvalidTransactionError::TipAboveFeeCap) + } + EthTxEnvError::CallFees(CallFeesError::TipVeryHigh) => { + Self::InvalidTransaction(RpcInvalidTransactionError::TipVeryHigh) + } + EthTxEnvError::Input(err) => Self::TransactionInputError(err), + } + } +} + #[cfg(feature = "js-tracer")] impl From for EthApiError { fn from(error: revm_inspectors::tracing::js::JsInspectorError) -> Self { @@ -466,9 +564,6 @@ pub enum RpcInvalidTransactionError { /// Blob transaction is a create transaction #[error("blob transaction is a create transaction")] BlobTransactionIsCreate, - /// EOF crate should have `to` address - #[error("EOF crate should have `to` address")] - EofCrateShouldHaveToAddress, /// EIP-7702 is not enabled. #[error("EIP-7702 authorization list not supported")] AuthorizationListNotSupported, @@ -494,7 +589,11 @@ impl RpcInvalidTransactionError { Self::InvalidChainId | Self::GasTooLow | Self::GasTooHigh | - Self::GasRequiredExceedsAllowance { .. } => EthRpcErrorCode::InvalidInput.code(), + Self::GasRequiredExceedsAllowance { .. } | + Self::NonceTooLow { .. } | + Self::NonceTooHigh { .. } | + Self::FeeCapTooLow | + Self::FeeCapVeryHigh => EthRpcErrorCode::InvalidInput.code(), Self::Revert(_) => EthRpcErrorCode::ExecutionError.code(), _ => EthRpcErrorCode::TransactionRejected.code(), } @@ -544,10 +643,13 @@ impl From for jsonrpsee_types::error::ErrorObject<'s impl From for RpcInvalidTransactionError { fn from(err: InvalidTransaction) -> Self { match err { - InvalidTransaction::InvalidChainId => Self::InvalidChainId, + InvalidTransaction::InvalidChainId | InvalidTransaction::MissingChainId => { + Self::InvalidChainId + } InvalidTransaction::PriorityFeeGreaterThanMaxFee => Self::TipAboveFeeCap, InvalidTransaction::GasPriceLessThanBasefee => Self::FeeCapTooLow, - InvalidTransaction::CallerGasLimitMoreThanBlock => { + InvalidTransaction::CallerGasLimitMoreThanBlock | + InvalidTransaction::TxGasLimitGreaterThanCap { .. } => { // tx.gas > block.gas_limit Self::GasTooHigh } @@ -581,7 +683,6 @@ impl From for RpcInvalidTransactionError { InvalidTransaction::BlobVersionNotSupported => Self::BlobHashVersionMismatch, InvalidTransaction::TooManyBlobs { have, .. } => Self::TooManyBlobs { have }, InvalidTransaction::BlobCreateTransaction => Self::BlobTransactionIsCreate, - InvalidTransaction::EofCreateShouldHaveToAddress => Self::EofCrateShouldHaveToAddress, InvalidTransaction::AuthorizationListNotSupported => { Self::AuthorizationListNotSupported } @@ -748,7 +849,22 @@ impl From for jsonrpsee_types::error::ErrorObject<'static> { RpcPoolError::TxPoolOverflow => { rpc_error_with_code(EthRpcErrorCode::TransactionRejected.code(), error.to_string()) } - error => internal_rpc_err(error.to_string()), + RpcPoolError::AlreadyKnown | + RpcPoolError::InvalidSender | + RpcPoolError::Underpriced | + RpcPoolError::ReplaceUnderpriced | + RpcPoolError::ExceedsGasLimit | + RpcPoolError::ExceedsFeeCap { .. } | + RpcPoolError::NegativeValue | + RpcPoolError::OversizedData | + RpcPoolError::ExceedsMaxInitCodeSize | + RpcPoolError::PoolTransactionError(_) | + RpcPoolError::Eip4844(_) | + RpcPoolError::Eip7702(_) | + RpcPoolError::AddressAlreadyReserved => { + rpc_error_with_code(EthRpcErrorCode::InvalidInput.code(), error.to_string()) + } + RpcPoolError::Other(other) => internal_rpc_err(other.to_string()), } } } @@ -785,6 +901,9 @@ impl From for RpcPoolError { } InvalidPoolTransactionError::OversizedData(_, _) => Self::OversizedData, InvalidPoolTransactionError::Underpriced => Self::Underpriced, + InvalidPoolTransactionError::Eip2681 => { + Self::Invalid(RpcInvalidTransactionError::NonceMaxValue) + } InvalidPoolTransactionError::Other(err) => Self::PoolTransactionError(err), InvalidPoolTransactionError::Eip4844(err) => Self::Eip4844(err), InvalidPoolTransactionError::Eip7702(err) => Self::Eip7702(err), diff --git a/crates/rpc/rpc-eth-types/src/fee_history.rs b/crates/rpc/rpc-eth-types/src/fee_history.rs index 616b53b86fb..7262c1c44ca 100644 --- a/crates/rpc/rpc-eth-types/src/fee_history.rs +++ b/crates/rpc/rpc-eth-types/src/fee_history.rs @@ -6,9 +6,8 @@ use std::{ sync::{atomic::Ordering::SeqCst, Arc}, }; -use alloy_consensus::{BlockHeader, Transaction, TxReceipt}; -use alloy_eips::{eip1559::calc_next_block_base_fee, eip7840::BlobParams}; -use alloy_primitives::B256; +use alloy_consensus::{BlockHeader, Header, Transaction, TxReceipt}; +use alloy_eips::eip7840::BlobParams; use alloy_rpc_types_eth::TxGasAndReward; use futures::{ future::{Fuse, FusedFuture}, @@ -29,11 +28,14 @@ use super::{EthApiError, EthStateCache}; /// /// Purpose for this is to provide cached data for `eth_feeHistory`. #[derive(Debug, Clone)] -pub struct FeeHistoryCache { - inner: Arc, +pub struct FeeHistoryCache { + inner: Arc>, } -impl FeeHistoryCache { +impl FeeHistoryCache +where + H: BlockHeader + Clone, +{ /// Creates new `FeeHistoryCache` instance, initialize it with the more recent data, set bounds pub fn new(config: FeeHistoryCacheConfig) -> Self { let inner = FeeHistoryCacheInner { @@ -74,7 +76,7 @@ impl FeeHistoryCache { /// Insert block data into the cache. async fn insert_blocks<'a, I, B, R, C>(&self, blocks: I, chain_spec: &C) where - B: Block + 'a, + B: Block

+ 'a, R: TxReceipt + 'a, I: IntoIterator, &'a [R])>, C: EthChainSpec, @@ -84,14 +86,14 @@ impl FeeHistoryCache { let percentiles = self.predefined_percentiles(); // Insert all new blocks and calculate approximated rewards for (block, receipts) in blocks { - let mut fee_history_entry = FeeHistoryEntry::new( + let mut fee_history_entry = FeeHistoryEntry::::new( block, chain_spec.blob_params_at_timestamp(block.header().timestamp()), ); fee_history_entry.rewards = calculate_reward_percentiles_for_block( &percentiles, - fee_history_entry.gas_used, - fee_history_entry.base_fee_per_gas, + fee_history_entry.header.gas_used(), + fee_history_entry.header.base_fee_per_gas().unwrap_or_default(), block.body().transactions(), receipts, ) @@ -133,7 +135,7 @@ impl FeeHistoryCache { self.inner.lower_bound.load(SeqCst) } - /// Collect fee history for given range. + /// Collect fee history for the given range (inclusive `start_block..=end_block`). /// /// This function retrieves fee history entries from the cache for the specified range. /// If the requested range (`start_block` to `end_block`) is within the cache bounds, @@ -143,7 +145,11 @@ impl FeeHistoryCache { &self, start_block: u64, end_block: u64, - ) -> Option> { + ) -> Option>> { + if end_block < start_block { + // invalid range, return None + return None + } let lower_bound = self.lower_bound(); let upper_bound = self.upper_bound(); if start_block >= lower_bound && end_block <= upper_bound { @@ -195,7 +201,7 @@ impl Default for FeeHistoryCacheConfig { /// Container type for shared state in [`FeeHistoryCache`] #[derive(Debug)] -struct FeeHistoryCacheInner { +struct FeeHistoryCacheInner { /// Stores the lower bound of the cache lower_bound: AtomicU64, /// Stores the upper bound of the cache @@ -204,13 +210,13 @@ struct FeeHistoryCacheInner { /// and max number of blocks config: FeeHistoryCacheConfig, /// Stores the entries of the cache - entries: tokio::sync::RwLock>, + entries: tokio::sync::RwLock>>, } /// Awaits for new chain events and directly inserts them into the cache so they're available /// immediately before they need to be fetched from disk. pub async fn fee_history_cache_new_blocks_task( - fee_history_cache: FeeHistoryCache, + fee_history_cache: FeeHistoryCache, mut events: St, provider: Provider, cache: EthStateCache, @@ -219,6 +225,7 @@ pub async fn fee_history_cache_new_blocks_task( Provider: BlockReaderIdExt + ChainSpecProvider + 'static, N: NodePrimitives, + N::BlockHeader: BlockHeader + Clone, { // We're listening for new blocks emitted when the node is in live sync. // If the node transitions to stage sync, we need to fetch the missing blocks @@ -333,9 +340,9 @@ where /// A cached entry for a block's fee history. #[derive(Debug, Clone)] -pub struct FeeHistoryEntry { - /// The base fee per gas for this block. - pub base_fee_per_gas: u64, +pub struct FeeHistoryEntry { + /// The full block header. + pub header: H, /// Gas used ratio this block. pub gas_used_ratio: f64, /// The base per blob gas for EIP-4844. @@ -346,35 +353,28 @@ pub struct FeeHistoryEntry { /// Calculated as the ratio of blob gas used and the available blob data gas per block. /// Will be zero if no blob gas was used or pre EIP-4844. pub blob_gas_used_ratio: f64, - /// The excess blob gas of the block. - pub excess_blob_gas: Option, - /// The total amount of blob gas consumed by the transactions within the block, - /// added in EIP-4844 - pub blob_gas_used: Option, - /// Gas used by this block. - pub gas_used: u64, - /// Gas limit by this block. - pub gas_limit: u64, - /// Hash of the block. - pub header_hash: B256, /// Approximated rewards for the configured percentiles. pub rewards: Vec, - /// The timestamp of the block. - pub timestamp: u64, /// Blob parameters for this block. pub blob_params: Option, } -impl FeeHistoryEntry { +impl FeeHistoryEntry +where + H: BlockHeader + Clone, +{ /// Creates a new entry from a sealed block. /// /// Note: This does not calculate the rewards for the block. - pub fn new(block: &SealedBlock, blob_params: Option) -> Self { + pub fn new(block: &SealedBlock, blob_params: Option) -> Self + where + B: Block
, + { + let header = block.header(); Self { - base_fee_per_gas: block.header().base_fee_per_gas().unwrap_or_default(), - gas_used_ratio: block.header().gas_used() as f64 / block.header().gas_limit() as f64, - base_fee_per_blob_gas: block - .header() + header: block.header().clone(), + gas_used_ratio: header.gas_used() as f64 / header.gas_limit() as f64, + base_fee_per_blob_gas: header .excess_blob_gas() .and_then(|excess_blob_gas| Some(blob_params?.calc_blob_fee(excess_blob_gas))), blob_gas_used_ratio: block.body().blob_gas_used() as f64 / @@ -383,27 +383,11 @@ impl FeeHistoryEntry { .map(|params| params.max_blob_gas_per_block()) .unwrap_or(alloy_eips::eip4844::MAX_DATA_GAS_PER_BLOCK_DENCUN) as f64, - excess_blob_gas: block.header().excess_blob_gas(), - blob_gas_used: block.header().blob_gas_used(), - gas_used: block.header().gas_used(), - header_hash: block.hash(), - gas_limit: block.header().gas_limit(), rewards: Vec::new(), - timestamp: block.header().timestamp(), blob_params, } } - /// Returns the base fee for the next block according to the EIP-1559 spec. - pub fn next_block_base_fee(&self, chain_spec: impl EthChainSpec) -> u64 { - calc_next_block_base_fee( - self.gas_used, - self.gas_limit, - self.base_fee_per_gas, - chain_spec.base_fee_params_at_timestamp(self.timestamp), - ) - } - /// Returns the blob fee for the next block according to the EIP-4844 spec. /// /// Returns `None` if `excess_blob_gas` is None. @@ -418,8 +402,11 @@ impl FeeHistoryEntry { /// /// Returns a `None` if no excess blob gas is set, no EIP-4844 support pub fn next_block_excess_blob_gas(&self) -> Option { - self.excess_blob_gas.and_then(|excess_blob_gas| { - Some(self.blob_params?.next_block_excess_blob_gas(excess_blob_gas, self.blob_gas_used?)) + self.header.excess_blob_gas().and_then(|excess_blob_gas| { + Some( + self.blob_params? + .next_block_excess_blob_gas(excess_blob_gas, self.header.blob_gas_used()?), + ) }) } } diff --git a/crates/rpc/rpc-eth-types/src/gas_oracle.rs b/crates/rpc/rpc-eth-types/src/gas_oracle.rs index 27b23b54e40..795363f3dfd 100644 --- a/crates/rpc/rpc-eth-types/src/gas_oracle.rs +++ b/crates/rpc/rpc-eth-types/src/gas_oracle.rs @@ -2,7 +2,7 @@ //! previous blocks. use super::{EthApiError, EthResult, EthStateCache, RpcInvalidTransactionError}; -use alloy_consensus::{constants::GWEI_TO_WEI, BlockHeader, Transaction}; +use alloy_consensus::{constants::GWEI_TO_WEI, BlockHeader, Transaction, TxReceipt}; use alloy_eips::BlockNumberOrTag; use alloy_primitives::{B256, U256}; use alloy_rpc_types_eth::BlockId; @@ -273,6 +273,78 @@ where Ok(Some((parent_hash, prices))) } + /// Suggests a max priority fee value using a simplified and more predictable algorithm + /// appropriate for chains like Optimism with a single known block builder. + /// + /// It returns either: + /// - The minimum suggested priority fee when blocks have capacity + /// - 10% above the median effective priority fee from the last block when at capacity + /// + /// A block is considered at capacity if its total gas used plus the maximum single transaction + /// gas would exceed the block's gas limit. + pub async fn op_suggest_tip_cap(&self, min_suggested_priority_fee: U256) -> EthResult { + let header = self + .provider + .sealed_header_by_number_or_tag(BlockNumberOrTag::Latest)? + .ok_or(EthApiError::HeaderNotFound(BlockId::latest()))?; + + let mut inner = self.inner.lock().await; + + // if we have stored a last price, then we check whether or not it was for the same head + if inner.last_price.block_hash == header.hash() { + return Ok(inner.last_price.price); + } + + let mut suggestion = min_suggested_priority_fee; + + // find the maximum gas used by any of the transactions in the block to use as the + // capacity margin for the block, if no receipts are found return the + // suggested_min_priority_fee + let Some(max_tx_gas_used) = self + .cache + .get_receipts(header.hash()) + .await? + .ok_or(EthApiError::ReceiptsNotFound(BlockId::latest()))? + // get the gas used by each transaction in the block, by subtracting the + // cumulative gas used of the previous transaction from the cumulative gas used of the + // current transaction. This is because there is no gas_used() method on the Receipt + // trait. + .windows(2) + .map(|window| { + let prev = window[0].cumulative_gas_used(); + let curr = window[1].cumulative_gas_used(); + curr - prev + }) + .max() + else { + return Ok(suggestion); + }; + + // if the block is at capacity, the suggestion must be increased + if header.gas_used() + max_tx_gas_used > header.gas_limit() { + let Some(median_tip) = self.get_block_median_tip(header.hash()).await? else { + return Ok(suggestion); + }; + + let new_suggestion = median_tip + median_tip / U256::from(10); + + if new_suggestion > suggestion { + suggestion = new_suggestion; + } + } + + // constrain to the max price + if let Some(max_price) = self.oracle_config.max_price { + if suggestion > max_price { + suggestion = max_price; + } + } + + inner.last_price = GasPriceOracleResult { block_hash: header.hash(), price: suggestion }; + + Ok(suggestion) + } + /// Get the median tip value for the given block. This is useful for determining /// tips when a block is at capacity. /// diff --git a/crates/rpc/rpc-eth-types/src/lib.rs b/crates/rpc/rpc-eth-types/src/lib.rs index 8d92fda9c33..815160abf4e 100644 --- a/crates/rpc/rpc-eth-types/src/lib.rs +++ b/crates/rpc/rpc-eth-types/src/lib.rs @@ -17,7 +17,6 @@ pub mod id_provider; pub mod logs_utils; pub mod pending_block; pub mod receipt; -pub mod revm_utils; pub mod simulate; pub mod transaction; pub mod utils; diff --git a/crates/rpc/rpc-eth-types/src/simulate.rs b/crates/rpc/rpc-eth-types/src/simulate.rs index 1b4ed709eff..988261b8179 100644 --- a/crates/rpc/rpc-eth-types/src/simulate.rs +++ b/crates/rpc/rpc-eth-types/src/simulate.rs @@ -22,8 +22,8 @@ use reth_evm::{ use reth_primitives_traits::{ block::BlockTx, BlockBody as _, NodePrimitives, Recovered, RecoveredBlock, SignedTransaction, }; +use reth_rpc_convert::{RpcConvert, RpcTransaction, RpcTypes}; use reth_rpc_server_types::result::rpc_err; -use reth_rpc_types_compat::{block::from_block, TransactionCompat}; use reth_storage_api::noop::NoopProvider; use revm::{ context_interface::result::ExecutionResult, @@ -77,7 +77,10 @@ pub fn execute_transactions( > where S: BlockBuilder>>>>, - T: TransactionCompat, + T: RpcConvert< + Primitives = S::Primitives, + Network: RpcTypes>, + >, { builder.apply_pre_execution_changes()?; @@ -121,7 +124,10 @@ pub fn resolve_transaction( ) -> Result, EthApiError> where DB::Error: Into, - T: TransactionCompat>, + T: RpcConvert< + Primitives: NodePrimitives, + Network: RpcTypes>, + >, { // If we're missing any fields we try to fill nonce, gas and // gas price. @@ -178,7 +184,7 @@ where } let tx = tx_resp_builder - .build_simulate_v1_transaction(tx) + .build_simulate_v1_transaction(tx.into()) .map_err(|e| EthApiError::other(e.into()))?; Ok(Recovered::new_unchecked(tx, from)) @@ -189,11 +195,11 @@ where pub fn build_simulated_block( block: RecoveredBlock, results: Vec>, - full_transactions: bool, + txs_kind: BlockTransactionsKind, tx_resp_builder: &T, -) -> Result>>, T::Error> +) -> Result, Header>>, T::Error> where - T: TransactionCompat< + T: RpcConvert< Primitives: NodePrimitives>, Error: FromEthApiError + FromEvmHalt, >, @@ -256,9 +262,6 @@ where calls.push(call); } - let txs_kind = - if full_transactions { BlockTransactionsKind::Full } else { BlockTransactionsKind::Hashes }; - - let block = from_block(block, txs_kind, tx_resp_builder)?; + let block = block.into_rpc_block(txs_kind, |tx, tx_info| tx_resp_builder.fill(tx, tx_info))?; Ok(SimulatedBlock { inner: block, calls }) } diff --git a/crates/rpc/rpc-eth-types/src/transaction.rs b/crates/rpc/rpc-eth-types/src/transaction.rs index de11acc8dc8..de3323d61e6 100644 --- a/crates/rpc/rpc-eth-types/src/transaction.rs +++ b/crates/rpc/rpc-eth-types/src/transaction.rs @@ -6,7 +6,7 @@ use alloy_primitives::B256; use alloy_rpc_types_eth::TransactionInfo; use reth_ethereum_primitives::TransactionSigned; use reth_primitives_traits::{NodePrimitives, Recovered, SignedTransaction}; -use reth_rpc_types_compat::TransactionCompat; +use reth_rpc_convert::{RpcConvert, RpcTransaction}; /// Represents from where a transaction was fetched. #[derive(Debug, Clone, Eq, PartialEq)] @@ -42,9 +42,9 @@ impl TransactionSource { pub fn into_transaction( self, resp_builder: &Builder, - ) -> Result + ) -> Result, Builder::Error> where - Builder: TransactionCompat>, + Builder: RpcConvert>, { match self { Self::Pool(tx) => resp_builder.fill_pending(tx), diff --git a/crates/rpc/rpc-testing-util/src/debug.rs b/crates/rpc/rpc-testing-util/src/debug.rs index 73e3c0471c5..85b1bc4208c 100644 --- a/crates/rpc/rpc-testing-util/src/debug.rs +++ b/crates/rpc/rpc-testing-util/src/debug.rs @@ -77,7 +77,9 @@ pub trait DebugApiExt { impl DebugApiExt for T where - T: EthApiClient + DebugApiClient + Sync, + T: EthApiClient + + DebugApiClient + + Sync, { type Provider = T; diff --git a/crates/rpc/rpc-testing-util/tests/it/trace.rs b/crates/rpc/rpc-testing-util/tests/it/trace.rs index c733e6bde67..301d65a820b 100644 --- a/crates/rpc/rpc-testing-util/tests/it/trace.rs +++ b/crates/rpc/rpc-testing-util/tests/it/trace.rs @@ -1,7 +1,7 @@ //! Integration tests for the trace API. use alloy_primitives::map::HashSet; -use alloy_rpc_types_eth::{Block, Header, Transaction}; +use alloy_rpc_types_eth::{Block, Header, Transaction, TransactionRequest}; use alloy_rpc_types_trace::{ filter::TraceFilter, parity::TraceType, tracerequest::TraceCallRequest, }; @@ -112,12 +112,17 @@ async fn debug_trace_block_entire_chain() { let url = url.unwrap(); let client = HttpClientBuilder::default().build(url).unwrap(); - let current_block: u64 = - >::block_number(&client) - .await - .unwrap() - .try_into() - .unwrap(); + let current_block: u64 = >::block_number(&client) + .await + .unwrap() + .try_into() + .unwrap(); let range = 0..=current_block; let mut stream = client.debug_trace_block_buffered_unordered(range, None, 20); let now = Instant::now(); @@ -141,12 +146,17 @@ async fn debug_trace_block_opcodes_entire_chain() { let url = url.unwrap(); let client = HttpClientBuilder::default().build(url).unwrap(); - let current_block: u64 = - >::block_number(&client) - .await - .unwrap() - .try_into() - .unwrap(); + let current_block: u64 = >::block_number(&client) + .await + .unwrap() + .try_into() + .unwrap(); let range = 0..=current_block; println!("Tracing blocks {range:?} for opcodes"); let mut stream = client.trace_block_opcode_gas_unordered(range, 2).enumerate(); diff --git a/crates/rpc/rpc-types-compat/Cargo.toml b/crates/rpc/rpc-types-compat/Cargo.toml deleted file mode 100644 index 0fa228c7d04..00000000000 --- a/crates/rpc/rpc-types-compat/Cargo.toml +++ /dev/null @@ -1,40 +0,0 @@ -[package] -name = "reth-rpc-types-compat" -version.workspace = true -edition.workspace = true -rust-version.workspace = true -license.workspace = true -homepage.workspace = true -repository.workspace = true -description = "Compatibility layer for reth-primitives and ethereum RPC types" - -[lints] -workspace = true - -[dependencies] -# reth -reth-primitives-traits.workspace = true -reth-storage-api = { workspace = true, features = ["serde", "serde-bincode-compat"] } - -# ethereum -alloy-primitives.workspace = true -alloy-rpc-types-eth = { workspace = true, default-features = false, features = ["serde"] } -alloy-consensus.workspace = true -alloy-network.workspace = true - -# scroll -reth-scroll-primitives = { workspace = true, features = ["serde", "serde-bincode-compat"] } -scroll-alloy-consensus.workspace = true -scroll-alloy-rpc-types.workspace = true - -# optimism -op-alloy-consensus.workspace = true -op-alloy-rpc-types.workspace = true -reth-optimism-primitives = { workspace = true, features = ["serde", "serde-bincode-compat"] } - -# io -serde.workspace = true -jsonrpsee-types.workspace = true - -# error -thiserror.workspace = true diff --git a/crates/rpc/rpc-types-compat/src/block.rs b/crates/rpc/rpc-types-compat/src/block.rs deleted file mode 100644 index 92f90f3c150..00000000000 --- a/crates/rpc/rpc-types-compat/src/block.rs +++ /dev/null @@ -1,102 +0,0 @@ -//! Compatibility functions for rpc `Block` type. - -use crate::transaction::TransactionCompat; -use alloy_consensus::{transaction::Recovered, BlockBody, BlockHeader, Sealable}; -use alloy_primitives::U256; -use alloy_rpc_types_eth::{ - Block, BlockTransactions, BlockTransactionsKind, Header, TransactionInfo, -}; -use reth_primitives_traits::{ - Block as BlockTrait, BlockBody as BlockBodyTrait, NodePrimitives, RecoveredBlock, - SignedTransaction, -}; - -/// Converts the given primitive block into a [`Block`] response with the given -/// [`BlockTransactionsKind`] -/// -/// If a `block_hash` is provided, then this is used, otherwise the block hash is computed. -#[expect(clippy::type_complexity)] -pub fn from_block( - block: RecoveredBlock, - kind: BlockTransactionsKind, - tx_resp_builder: &T, -) -> Result>, T::Error> -where - T: TransactionCompat, - B: BlockTrait::SignedTx>>, -{ - match kind { - BlockTransactionsKind::Hashes => Ok(from_block_with_tx_hashes::(block)), - BlockTransactionsKind::Full => from_block_full::(block, tx_resp_builder), - } -} - -/// Create a new [`Block`] response from a [`RecoveredBlock`], using the -/// total difficulty to populate its field in the rpc response. -/// -/// This will populate the `transactions` field with only the hashes of the transactions in the -/// block: [`BlockTransactions::Hashes`] -pub fn from_block_with_tx_hashes(block: RecoveredBlock) -> Block> -where - B: BlockTrait, -{ - let transactions = block.body().transaction_hashes_iter().copied().collect(); - let rlp_length = block.rlp_length(); - let (header, body) = block.into_sealed_block().split_sealed_header_body(); - let BlockBody { ommers, withdrawals, .. } = body.into_ethereum_body(); - - let transactions = BlockTransactions::Hashes(transactions); - let uncles = ommers.into_iter().map(|h| h.hash_slow()).collect(); - let header = Header::from_consensus(header.into(), None, Some(U256::from(rlp_length))); - - Block { header, uncles, transactions, withdrawals } -} - -/// Create a new [`Block`] response from a [`RecoveredBlock`], using the -/// total difficulty to populate its field in the rpc response. -/// -/// This will populate the `transactions` field with the _full_ -/// [`TransactionCompat::Transaction`] objects: [`BlockTransactions::Full`] -#[expect(clippy::type_complexity)] -pub fn from_block_full( - block: RecoveredBlock, - tx_resp_builder: &T, -) -> Result>, T::Error> -where - T: TransactionCompat, - B: BlockTrait::SignedTx>>, -{ - let block_number = block.header().number(); - let base_fee = block.header().base_fee_per_gas(); - let block_length = block.rlp_length(); - let block_hash = Some(block.hash()); - - let (block, senders) = block.split_sealed(); - let (header, body) = block.split_sealed_header_body(); - let BlockBody { transactions, ommers, withdrawals } = body.into_ethereum_body(); - - let transactions = transactions - .into_iter() - .zip(senders) - .enumerate() - .map(|(idx, (tx, sender))| { - let tx_info = TransactionInfo { - hash: Some(*tx.tx_hash()), - block_hash, - block_number: Some(block_number), - base_fee, - index: Some(idx as u64), - }; - - tx_resp_builder.fill(Recovered::new_unchecked(tx, sender), tx_info) - }) - .collect::, T::Error>>()?; - - let transactions = BlockTransactions::Full(transactions); - let uncles = ommers.into_iter().map(|h| h.hash_slow()).collect(); - let header = Header::from_consensus(header.into(), None, Some(U256::from(block_length))); - - let block = Block { header, uncles, transactions, withdrawals }; - - Ok(block) -} diff --git a/crates/rpc/rpc-types-compat/src/transaction.rs b/crates/rpc/rpc-types-compat/src/transaction.rs deleted file mode 100644 index 40bac0a985d..00000000000 --- a/crates/rpc/rpc-types-compat/src/transaction.rs +++ /dev/null @@ -1,318 +0,0 @@ -//! Compatibility functions for rpc `Transaction` type. - -use alloy_consensus::{ - error::ValueError, transaction::Recovered, EthereumTxEnvelope, SignableTransaction, TxEip4844, -}; -use alloy_network::Network; -use alloy_primitives::{Address, Signature}; -use alloy_rpc_types_eth::{request::TransactionRequest, Transaction, TransactionInfo}; -use core::error; -use op_alloy_consensus::{ - transaction::{OpDepositInfo, OpTransactionInfo}, - OpTxEnvelope, -}; -use op_alloy_rpc_types::OpTransactionRequest; -use reth_optimism_primitives::DepositReceipt; -use reth_primitives_traits::{NodePrimitives, SignedTransaction, TxTy}; -use reth_scroll_primitives::ScrollReceipt; -use reth_storage_api::{errors::ProviderError, ReceiptProvider}; -use scroll_alloy_consensus::{ScrollAdditionalInfo, ScrollTransactionInfo, ScrollTxEnvelope}; -use scroll_alloy_rpc_types::ScrollTransactionRequest; -use serde::{Deserialize, Serialize}; -use std::{convert::Infallible, error::Error, fmt::Debug, marker::PhantomData}; -use thiserror::Error; - -/// Builds RPC transaction w.r.t. network. -pub trait TransactionCompat: Send + Sync + Unpin + Clone + Debug { - /// The lower layer consensus types to convert from. - type Primitives: NodePrimitives; - - /// RPC transaction response type. - type Transaction: Serialize + for<'de> Deserialize<'de> + Send + Sync + Unpin + Clone + Debug; - - /// RPC transaction error type. - type Error: error::Error + Into>; - - /// Wrapper for `fill()` with default `TransactionInfo` - /// Create a new rpc transaction result for a _pending_ signed transaction, setting block - /// environment related fields to `None`. - fn fill_pending( - &self, - tx: Recovered>, - ) -> Result { - self.fill(tx, TransactionInfo::default()) - } - - /// Create a new rpc transaction result for a mined transaction, using the given block hash, - /// number, and tx index fields to populate the corresponding fields in the rpc result. - /// - /// The block hash, number, and tx index fields should be from the original block where the - /// transaction was mined. - fn fill( - &self, - tx: Recovered>, - tx_inf: TransactionInfo, - ) -> Result; - - /// Builds a fake transaction from a transaction request for inclusion into block built in - /// `eth_simulateV1`. - fn build_simulate_v1_transaction( - &self, - request: TransactionRequest, - ) -> Result, Self::Error>; -} - -/// Converts `self` into `T`. -/// -/// Should create an RPC transaction response object based on a consensus transaction, its signer -/// [`Address`] and an additional context. -pub trait IntoRpcTx { - /// An additional context, usually [`TransactionInfo`] in a wrapper that carries some - /// implementation specific extra information. - type TxInfo; - - /// Performs the conversion. - fn into_rpc_tx(self, signer: Address, tx_info: Self::TxInfo) -> T; -} - -/// Converts `self` into `T`. -/// -/// Should create a fake transaction for simulation using [`TransactionRequest`]. -pub trait TryIntoSimTx -where - Self: Sized, -{ - /// Performs the conversion. - /// - /// Should return a signed typed transaction envelope for the [`eth_simulateV1`] endpoint with a - /// dummy signature or an error if [required fields] are missing. - /// - /// [`eth_simulateV1`]: - /// [required fields]: TransactionRequest::buildable_type - fn try_into_sim_tx(self) -> Result>; -} - -impl IntoRpcTx for EthereumTxEnvelope { - type TxInfo = TransactionInfo; - - fn into_rpc_tx(self, signer: Address, tx_info: TransactionInfo) -> Transaction { - Transaction::from_transaction(self.with_signer(signer).convert(), tx_info) - } -} - -/// Adds extra context to [`TransactionInfo`]. -pub trait TxInfoMapper { - /// An associated output type that carries [`TransactionInfo`] with some extra context. - type Out; - /// An associated error that can occur during the mapping. - type Err; - - /// Performs the conversion. - fn try_map(&self, tx: T, tx_info: TransactionInfo) -> Result; -} - -impl TxInfoMapper<&T> for () { - type Out = TransactionInfo; - type Err = Infallible; - - fn try_map(&self, _tx: &T, tx_info: TransactionInfo) -> Result { - Ok(tx_info) - } -} - -/// Creates [`OpTransactionInfo`] by adding [`OpDepositInfo`] to [`TransactionInfo`] if `tx` is a -/// deposit. -pub fn try_into_op_tx_info>( - provider: &T, - tx: &OpTxEnvelope, - tx_info: TransactionInfo, -) -> Result { - let deposit_meta = if tx.is_deposit() { - provider.receipt_by_hash(tx.tx_hash())?.and_then(|receipt| { - receipt.as_deposit_receipt().map(|receipt| OpDepositInfo { - deposit_receipt_version: receipt.deposit_receipt_version, - deposit_nonce: receipt.deposit_nonce, - }) - }) - } else { - None - } - .unwrap_or_default(); - - Ok(OpTransactionInfo::new(tx_info, deposit_meta)) -} - -/// Creates [`ScrollTransactionInfo`] by adding [`ScrollAdditionalInfo`] to [`TransactionInfo`] if -/// `tx` is not a L1 message. -pub fn try_into_scroll_tx_info>( - provider: &T, - tx: &ScrollTxEnvelope, - tx_info: TransactionInfo, -) -> Result { - let additional_info = if tx.is_l1_message() { - None - } else { - provider - .receipt_by_hash(*tx.tx_hash())? - .map(|receipt| ScrollAdditionalInfo { l1_fee: receipt.l1_fee() }) - } - .unwrap_or_default(); - - Ok(ScrollTransactionInfo::new(tx_info, additional_info)) -} - -impl IntoRpcTx for OpTxEnvelope { - type TxInfo = OpTransactionInfo; - - fn into_rpc_tx( - self, - signer: Address, - tx_info: OpTransactionInfo, - ) -> op_alloy_rpc_types::Transaction { - op_alloy_rpc_types::Transaction::from_transaction(self.with_signer(signer), tx_info) - } -} - -impl IntoRpcTx for ScrollTxEnvelope { - type TxInfo = ScrollTransactionInfo; - - fn into_rpc_tx( - self, - signer: Address, - tx_info: Self::TxInfo, - ) -> scroll_alloy_rpc_types::Transaction { - scroll_alloy_rpc_types::Transaction::from_transaction(self.with_signer(signer), tx_info) - } -} - -impl TryIntoSimTx> for TransactionRequest { - fn try_into_sim_tx(self) -> Result, ValueError> { - Self::build_typed_simulate_transaction(self) - } -} - -impl TryIntoSimTx for TransactionRequest { - fn try_into_sim_tx(self) -> Result> { - let request: OpTransactionRequest = self.into(); - let tx = request.build_typed_tx().map_err(|request| { - ValueError::new(request.as_ref().clone(), "Required fields missing") - })?; - - // Create an empty signature for the transaction. - let signature = Signature::new(Default::default(), Default::default(), false); - - Ok(tx.into_signed(signature).into()) - } -} - -impl TryIntoSimTx for TransactionRequest { - fn try_into_sim_tx(self) -> Result> { - let request: ScrollTransactionRequest = self.into(); - let tx = request.build_typed_tx().map_err(|request| { - ValueError::new(request.as_ref().clone(), "Required fields missing") - })?; - - // Create an empty signature for the transaction. - let signature = Signature::new(Default::default(), Default::default(), false); - - Ok(tx.into_signed(signature).into()) - } -} - -/// Conversion into transaction RPC response failed. -#[derive(Debug, Clone, Error)] -#[error("Failed to convert transaction into RPC response: {0}")] -pub struct TransactionConversionError(String); - -/// Generic RPC response object converter for primitives `N` and network `E`. -#[derive(Debug)] -pub struct RpcConverter { - phantom: PhantomData<(N, E, Err)>, - mapper: Map, -} - -impl RpcConverter { - /// Creates a new [`RpcConverter`] with the default mapper. - pub const fn new() -> Self { - Self::with_mapper(()) - } -} - -impl RpcConverter { - /// Creates a new [`RpcConverter`] with `mapper`. - pub const fn with_mapper(mapper: Map) -> Self { - Self { phantom: PhantomData, mapper } - } - - /// Converts the generic types. - pub fn convert(self) -> RpcConverter { - RpcConverter::with_mapper(self.mapper) - } - - /// Swaps the inner `mapper`. - pub fn map(self, mapper: Map2) -> RpcConverter { - RpcConverter::with_mapper(mapper) - } - - /// Converts the generic types and swaps the inner `mapper`. - pub fn convert_map(self, mapper: Map2) -> RpcConverter { - self.convert().map(mapper) - } -} - -impl Clone for RpcConverter { - fn clone(&self) -> Self { - Self::with_mapper(self.mapper.clone()) - } -} - -impl Default for RpcConverter { - fn default() -> Self { - Self::new() - } -} - -impl TransactionCompat for RpcConverter -where - N: NodePrimitives, - E: Network + Unpin, - TxTy: IntoRpcTx<::TransactionResponse> + Clone + Debug, - TransactionRequest: TryIntoSimTx>, - Err: From - + for<'a> From<>>::Err> - + Error - + Unpin - + Sync - + Send - + Into>, - Map: for<'a> TxInfoMapper< - &'a TxTy, - Out = as IntoRpcTx<::TransactionResponse>>::TxInfo, - > + Clone - + Debug - + Unpin - + Send - + Sync, -{ - type Primitives = N; - type Transaction = ::TransactionResponse; - type Error = Err; - - fn fill( - &self, - tx: Recovered>, - tx_info: TransactionInfo, - ) -> Result { - let (tx, signer) = tx.into_parts(); - let tx_info = self.mapper.try_map(&tx, tx_info)?; - - Ok(tx.into_rpc_tx(signer, tx_info)) - } - - fn build_simulate_v1_transaction( - &self, - request: TransactionRequest, - ) -> Result, Self::Error> { - Ok(request.try_into_sim_tx().map_err(|e| TransactionConversionError(e.to_string()))?) - } -} diff --git a/crates/rpc/rpc/Cargo.toml b/crates/rpc/rpc/Cargo.toml index e289c60a459..2f41caa5480 100644 --- a/crates/rpc/rpc/Cargo.toml +++ b/crates/rpc/rpc/Cargo.toml @@ -29,10 +29,11 @@ reth-network-api.workspace = true reth-rpc-engine-api.workspace = true reth-revm = { workspace = true, features = ["witness"] } reth-tasks = { workspace = true, features = ["rayon"] } -reth-rpc-types-compat.workspace = true +reth-rpc-convert.workspace = true revm-inspectors.workspace = true reth-network-peers = { workspace = true, features = ["secp256k1"] } reth-evm.workspace = true +reth-evm-ethereum.workspace = true reth-rpc-eth-types.workspace = true reth-rpc-server-types.workspace = true reth-network-types.workspace = true @@ -41,7 +42,7 @@ reth-node-api.workspace = true reth-trie-common.workspace = true # ethereum -alloy-evm.workspace = true +alloy-evm = { workspace = true, features = ["overrides"] } alloy-consensus.workspace = true alloy-signer.workspace = true alloy-signer-local.workspace = true diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index 3f9518fd38c..1e2f107398e 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -1,7 +1,7 @@ use alloy_consensus::{transaction::SignerRecoverable, BlockHeader}; use alloy_eips::{eip2718::Encodable2718, BlockId, BlockNumberOrTag}; use alloy_genesis::ChainConfig; -use alloy_primitives::{Address, Bytes, B256}; +use alloy_primitives::{uint, Address, Bytes, B256}; use alloy_rlp::{Decodable, Encodable}; use alloy_rpc_types_debug::ExecutionWitness; use alloy_rpc_types_eth::{ @@ -34,7 +34,7 @@ use reth_rpc_eth_types::{EthApiError, StateCacheDb}; use reth_rpc_server_types::{result::internal_rpc_err, ToRpcResult}; use reth_storage_api::{ BlockIdReader, BlockReaderIdExt, HeaderProvider, ProviderBlock, ReceiptProviderIdExt, - StateProofProvider, StateProvider, StateProviderFactory, StateRootProvider, TransactionVariant, + StateProofProvider, StateProviderFactory, StateRootProvider, TransactionVariant, }; use reth_tasks::pool::BlockingTaskGuard; use reth_trie_common::{updates::TrieUpdates, HashedPostState}; @@ -363,7 +363,7 @@ where let db = db.0; let tx_info = TransactionInfo { - block_number: Some(evm_env.block_env.number), + block_number: Some(evm_env.block_env.number.saturating_to()), base_fee: Some(evm_env.block_env.basefee), hash: None, block_hash: None, @@ -575,8 +575,8 @@ where results.push(trace); } // Increment block_env number and timestamp for the next bundle - evm_env.block_env.number += 1; - evm_env.block_env.timestamp += 12; + evm_env.block_env.number += uint!(1_U256); + evm_env.block_env.timestamp += uint!(12_U256); all_bundles.push(results); } @@ -728,7 +728,7 @@ where .map(|c| c.tx_index.map(|i| i as u64)) .unwrap_or_default(), block_hash: transaction_context.as_ref().map(|c| c.block_hash).unwrap_or_default(), - block_number: Some(evm_env.block_env.number), + block_number: Some(evm_env.block_env.number.saturating_to()), base_fee: Some(evm_env.block_env.basefee), }; diff --git a/crates/rpc/rpc/src/engine.rs b/crates/rpc/rpc/src/engine.rs index 824e5fb40d7..33ef2b3e5fe 100644 --- a/crates/rpc/rpc/src/engine.rs +++ b/crates/rpc/rpc/src/engine.rs @@ -7,6 +7,7 @@ use alloy_rpc_types_eth::{ use alloy_serde::JsonStorageKey; use jsonrpsee::core::RpcResult as Result; use reth_rpc_api::{EngineEthApiServer, EthApiServer}; +use reth_rpc_convert::RpcTxReq; /// Re-export for convenience pub use reth_rpc_engine_api::EngineApi; use reth_rpc_eth_api::{ @@ -40,6 +41,7 @@ impl EngineEthApiServer, RpcReceipt< for EngineEthApi where Eth: EthApiServer< + RpcTxReq, RpcTransaction, RpcBlock, RpcReceipt, diff --git a/crates/rpc/rpc/src/eth/builder.rs b/crates/rpc/rpc/src/eth/builder.rs index dbc7af09d0b..732ae1edf11 100644 --- a/crates/rpc/rpc/src/eth/builder.rs +++ b/crates/rpc/rpc/src/eth/builder.rs @@ -160,7 +160,11 @@ where + StateProviderFactory + ChainSpecProvider + CanonStateSubscriptions< - Primitives: NodePrimitives, + Primitives: NodePrimitives< + Block = Provider::Block, + Receipt = Provider::Receipt, + BlockHeader = Provider::Header, + >, > + Clone + Unpin + 'static, @@ -188,7 +192,7 @@ where let gas_oracle = gas_oracle.unwrap_or_else(|| { GasPriceOracle::new(provider.clone(), gas_oracle_config, eth_cache.clone()) }); - let fee_history_cache = FeeHistoryCache::new(fee_history_cache_config); + let fee_history_cache = FeeHistoryCache::::new(fee_history_cache_config); let new_canonical_blocks = provider.canonical_state_stream(); let fhc = fee_history_cache.clone(); let cache = eth_cache.clone(); @@ -232,7 +236,11 @@ where Provider: BlockReaderIdExt + StateProviderFactory + CanonStateSubscriptions< - Primitives: NodePrimitives, + Primitives: NodePrimitives< + Block = Provider::Block, + Receipt = Provider::Receipt, + BlockHeader = Provider::Header, + >, > + ChainSpecProvider + Clone + Unpin diff --git a/crates/rpc/rpc/src/eth/bundle.rs b/crates/rpc/rpc/src/eth/bundle.rs index 1c67139582e..8a0683b7636 100644 --- a/crates/rpc/rpc/src/eth/bundle.rs +++ b/crates/rpc/rpc/src/eth/bundle.rs @@ -2,7 +2,7 @@ use alloy_consensus::{EnvKzgSettings, Transaction as _}; use alloy_eips::eip7840::BlobParams; -use alloy_primitives::{Keccak256, U256}; +use alloy_primitives::{uint, Keccak256, U256}; use alloy_rpc_types_mev::{EthCallBundle, EthCallBundleResponse, EthCallBundleTransactionResult}; use jsonrpsee::core::RpcResult; use reth_chainspec::{ChainSpecProvider, EthChainSpec}; @@ -94,9 +94,9 @@ where // need to adjust the timestamp for the next block if let Some(timestamp) = timestamp { - evm_env.block_env.timestamp = timestamp; + evm_env.block_env.timestamp = U256::from(timestamp); } else { - evm_env.block_env.timestamp += 12; + evm_env.block_env.timestamp += uint!(12_U256); } if let Some(difficulty) = difficulty { @@ -111,7 +111,7 @@ where .eth_api() .provider() .chain_spec() - .blob_params_at_timestamp(evm_env.block_env.timestamp) + .blob_params_at_timestamp(evm_env.block_env.timestamp.saturating_to()) .unwrap_or_else(BlobParams::cancun); if transactions.iter().filter_map(|tx| tx.blob_gas_used()).sum::() > blob_params.max_blob_gas_per_block() @@ -141,7 +141,7 @@ where let state_block_number = evm_env.block_env.number; // use the block number of the request - evm_env.block_env.number = block_number; + evm_env.block_env.number = U256::from(block_number); let eth_api = self.eth_api().clone(); @@ -254,7 +254,7 @@ where eth_sent_to_coinbase, gas_fees: total_gas_fees, results, - state_block_number, + state_block_number: state_block_number.to(), total_gas_used, }; diff --git a/crates/rpc/rpc/src/eth/core.rs b/crates/rpc/rpc/src/eth/core.rs index cf70176ebb5..f6cceee46e0 100644 --- a/crates/rpc/rpc/src/eth/core.rs +++ b/crates/rpc/rpc/src/eth/core.rs @@ -19,7 +19,8 @@ use reth_rpc_eth_types::{ EthApiError, EthStateCache, FeeHistoryCache, GasCap, GasPriceOracle, PendingBlock, }; use reth_storage_api::{ - BlockReader, BlockReaderIdExt, NodePrimitivesProvider, ProviderBlock, ProviderReceipt, + BlockReader, BlockReaderIdExt, NodePrimitivesProvider, ProviderBlock, ProviderHeader, + ProviderReceipt, }; use reth_tasks::{ pool::{BlockingTaskGuard, BlockingTaskPool}, @@ -127,7 +128,7 @@ where max_simulate_blocks: u64, eth_proof_window: u64, blocking_task_pool: BlockingTaskPool, - fee_history_cache: FeeHistoryCache, + fee_history_cache: FeeHistoryCache>, evm_config: EvmConfig, proof_permits: usize, ) -> Self { @@ -158,9 +159,9 @@ where { type Error = EthApiError; type NetworkTypes = Ethereum; - type TransactionCompat = EthRpcConverter; + type RpcConvert = EthRpcConverter; - fn tx_resp_builder(&self) -> &Self::TransactionCompat { + fn tx_resp_builder(&self) -> &Self::RpcConvert { &self.tx_resp_builder } } @@ -276,7 +277,7 @@ pub struct EthApiInner { /// A pool dedicated to CPU heavy blocking tasks. blocking_task_pool: BlockingTaskPool, /// Cache for block fees history - fee_history_cache: FeeHistoryCache, + fee_history_cache: FeeHistoryCache>, /// The type that defines how to configure the EVM evm_config: EvmConfig, @@ -303,7 +304,7 @@ where max_simulate_blocks: u64, eth_proof_window: u64, blocking_task_pool: BlockingTaskPool, - fee_history_cache: FeeHistoryCache, + fee_history_cache: FeeHistoryCache>, evm_config: EvmConfig, task_spawner: Box, proof_permits: usize, @@ -411,7 +412,7 @@ where /// Returns a handle to the fee history cache. #[inline] - pub const fn fee_history_cache(&self) -> &FeeHistoryCache { + pub const fn fee_history_cache(&self) -> &FeeHistoryCache> { &self.fee_history_cache } @@ -470,7 +471,7 @@ mod tests { use jsonrpsee_types::error::INVALID_PARAMS_CODE; use rand::Rng; use reth_chain_state::CanonStateSubscriptions; - use reth_chainspec::{BaseFeeParams, ChainSpec, ChainSpecProvider}; + use reth_chainspec::{ChainSpec, ChainSpecProvider, EthChainSpec}; use reth_ethereum_primitives::TransactionSigned; use reth_evm_ethereum::EthEvmConfig; use reth_network_api::noop::NoopNetwork; @@ -582,11 +583,11 @@ mod tests { // Add final base fee (for the next block outside of the request) let last_header = last_header.unwrap(); - base_fees_per_gas.push(BaseFeeParams::ethereum().next_block_base_fee( - last_header.gas_used, - last_header.gas_limit, - last_header.base_fee_per_gas.unwrap_or_default(), - ) as u128); + let spec = mock_provider.chain_spec(); + base_fees_per_gas.push( + spec.next_block_base_fee(&last_header, last_header.timestamp).unwrap_or_default() + as u128, + ); let eth_api = build_test_eth_api(mock_provider); @@ -596,7 +597,7 @@ mod tests { /// Invalid block range #[tokio::test] async fn test_fee_history_empty() { - let response = as EthApiServer<_, _, _, _>>::fee_history( + let response = as EthApiServer<_, _, _, _, _>>::fee_history( &build_test_eth_api(NoopProvider::default()), U64::from(1), BlockNumberOrTag::Latest, @@ -618,7 +619,7 @@ mod tests { let (eth_api, _, _) = prepare_eth_api(newest_block, oldest_block, block_count, MockEthProvider::default()); - let response = as EthApiServer<_, _, _, _>>::fee_history( + let response = as EthApiServer<_, _, _, _, _>>::fee_history( ð_api, U64::from(newest_block + 1), newest_block.into(), @@ -641,7 +642,7 @@ mod tests { let (eth_api, _, _) = prepare_eth_api(newest_block, oldest_block, block_count, MockEthProvider::default()); - let response = as EthApiServer<_, _, _, _>>::fee_history( + let response = as EthApiServer<_, _, _, _, _>>::fee_history( ð_api, U64::from(1), (newest_block + 1000).into(), @@ -664,7 +665,7 @@ mod tests { let (eth_api, _, _) = prepare_eth_api(newest_block, oldest_block, block_count, MockEthProvider::default()); - let response = as EthApiServer<_, _, _, _>>::fee_history( + let response = as EthApiServer<_, _, _, _, _>>::fee_history( ð_api, U64::from(0), newest_block.into(), diff --git a/crates/rpc/rpc/src/eth/filter.rs b/crates/rpc/rpc/src/eth/filter.rs index 1f67bee8958..d672fd10f6e 100644 --- a/crates/rpc/rpc/src/eth/filter.rs +++ b/crates/rpc/rpc/src/eth/filter.rs @@ -12,8 +12,8 @@ use jsonrpsee::{core::RpcResult, server::IdProvider}; use reth_errors::ProviderError; use reth_primitives_traits::NodePrimitives; use reth_rpc_eth_api::{ - EngineEthFilter, EthApiTypes, EthFilterApiServer, FullEthApiTypes, QueryLimits, RpcNodeCore, - RpcNodeCoreExt, RpcTransaction, TransactionCompat, + EngineEthFilter, EthApiTypes, EthFilterApiServer, FullEthApiTypes, QueryLimits, RpcConvert, + RpcNodeCore, RpcNodeCoreExt, RpcTransaction, }; use reth_rpc_eth_types::{ logs_utils::{self, append_matching_block_logs, ProviderOrBlock}, @@ -416,7 +416,9 @@ struct EthFilterInner { impl EthFilterInner where - Eth: RpcNodeCoreExt + EthApiTypes + 'static, + Eth: RpcNodeCoreExt + + EthApiTypes + + 'static, { /// Access the underlying provider. fn provider(&self) -> &Eth::Provider { @@ -685,7 +687,7 @@ struct FullTransactionsReceiver { impl FullTransactionsReceiver where T: PoolTransaction + 'static, - TxCompat: TransactionCompat>, + TxCompat: RpcConvert>, { /// Creates a new `FullTransactionsReceiver` encapsulating the provided transaction stream. fn new(stream: NewSubpoolTransactionStream, tx_resp_builder: TxCompat) -> Self { @@ -693,7 +695,7 @@ where } /// Returns all new pending transactions received since the last poll. - async fn drain(&self) -> FilterChanges { + async fn drain(&self) -> FilterChanges> { let mut pending_txs = Vec::new(); let mut prepared_stream = self.txs_stream.lock().await; @@ -719,13 +721,13 @@ trait FullTransactionsFilter: fmt::Debug + Send + Sync + Unpin + 'static { } #[async_trait] -impl FullTransactionsFilter +impl FullTransactionsFilter> for FullTransactionsReceiver where T: PoolTransaction + 'static, - TxCompat: TransactionCompat> + 'static, + TxCompat: RpcConvert> + 'static, { - async fn drain(&self) -> FilterChanges { + async fn drain(&self) -> FilterChanges> { Self::drain(self).await } } diff --git a/crates/rpc/rpc/src/eth/helpers/block.rs b/crates/rpc/rpc/src/eth/helpers/block.rs index 0cb0b57a423..724b3a5c965 100644 --- a/crates/rpc/rpc/src/eth/helpers/block.rs +++ b/crates/rpc/rpc/src/eth/helpers/block.rs @@ -5,6 +5,7 @@ use alloy_rpc_types_eth::{BlockId, TransactionReceipt}; use reth_chainspec::{ChainSpecProvider, EthChainSpec}; use reth_evm::ConfigureEvm; use reth_primitives_traits::{BlockBody, NodePrimitives}; +use reth_rpc_convert::RpcConvert; use reth_rpc_eth_api::{ helpers::{EthBlocks, LoadBlock, LoadPendingBlock, LoadReceipt, SpawnBlocking}, types::RpcTypes, @@ -21,6 +22,7 @@ where Self: LoadBlock< Error = EthApiError, NetworkTypes: RpcTypes, + RpcConvert: RpcConvert, Provider: BlockReader< Transaction = reth_ethereum_primitives::TransactionSigned, Receipt = reth_ethereum_primitives::Receipt, diff --git a/crates/rpc/rpc/src/eth/helpers/call.rs b/crates/rpc/rpc/src/eth/helpers/call.rs index ab6adb53f39..1a41b8d5768 100644 --- a/crates/rpc/rpc/src/eth/helpers/call.rs +++ b/crates/rpc/rpc/src/eth/helpers/call.rs @@ -2,19 +2,18 @@ use crate::EthApi; use alloy_evm::block::BlockExecutorFactory; -use alloy_primitives::{TxKind, U256}; -use alloy_rpc_types::TransactionRequest; -use alloy_signer::Either; -use reth_evm::{ConfigureEvm, EvmEnv, EvmFactory, SpecFor}; +use alloy_rpc_types_eth::TransactionRequest; +use reth_errors::ProviderError; +use reth_evm::{ConfigureEvm, EvmFactory, TxEnvFor}; use reth_node_api::NodePrimitives; +use reth_rpc_convert::{RpcConvert, RpcTypes}; use reth_rpc_eth_api::{ helpers::{estimate::EstimateCall, Call, EthCall, LoadPendingBlock, LoadState, SpawnBlocking}, - FromEthApiError, FromEvmError, FullEthApiTypes, IntoEthApiError, RpcNodeCore, RpcNodeCoreExt, + FromEvmError, FullEthApiTypes, RpcNodeCore, RpcNodeCoreExt, }; -use reth_rpc_eth_types::{revm_utils::CallFees, EthApiError, RpcInvalidTransactionError}; use reth_storage_api::{BlockReader, ProviderHeader, ProviderTx}; use reth_transaction_pool::{PoolTransaction, TransactionPool}; -use revm::{context::TxEnv, context_interface::Block, Database}; +use revm::context::TxEnv; impl EthCall for EthApi where @@ -43,7 +42,11 @@ where SignedTx = ProviderTx, >, >, - Error: FromEvmError, + RpcConvert: RpcConvert, Network = Self::NetworkTypes>, + NetworkTypes: RpcTypes>, + Error: FromEvmError + + From<::Error> + + From, > + SpawnBlocking, Provider: BlockReader, { @@ -56,99 +59,6 @@ where fn max_simulate_blocks(&self) -> u64 { self.inner.max_simulate_blocks() } - - fn create_txn_env( - &self, - evm_env: &EvmEnv>, - request: TransactionRequest, - mut db: impl Database>, - ) -> Result { - // Ensure that if versioned hashes are set, they're not empty - if request.blob_versioned_hashes.as_ref().is_some_and(|hashes| hashes.is_empty()) { - return Err(RpcInvalidTransactionError::BlobTransactionMissingBlobHashes.into_eth_err()) - } - - let tx_type = request.minimal_tx_type() as u8; - - let TransactionRequest { - from, - to, - gas_price, - max_fee_per_gas, - max_priority_fee_per_gas, - gas, - value, - input, - nonce, - access_list, - chain_id, - blob_versioned_hashes, - max_fee_per_blob_gas, - authorization_list, - transaction_type: _, - sidecar: _, - } = request; - - let CallFees { max_priority_fee_per_gas, gas_price, max_fee_per_blob_gas } = - CallFees::ensure_fees( - gas_price.map(U256::from), - max_fee_per_gas.map(U256::from), - max_priority_fee_per_gas.map(U256::from), - U256::from(evm_env.block_env.basefee), - blob_versioned_hashes.as_deref(), - max_fee_per_blob_gas.map(U256::from), - evm_env.block_env.blob_gasprice().map(U256::from), - )?; - - let gas_limit = gas.unwrap_or( - // Use maximum allowed gas limit. The reason for this - // is that both Erigon and Geth use pre-configured gas cap even if - // it's possible to derive the gas limit from the block: - // - evm_env.block_env.gas_limit, - ); - - let chain_id = chain_id.unwrap_or(evm_env.cfg_env.chain_id); - - let caller = from.unwrap_or_default(); - - let nonce = if let Some(nonce) = nonce { - nonce - } else { - db.basic(caller).map_err(Into::into)?.map(|acc| acc.nonce).unwrap_or_default() - }; - - let env = TxEnv { - tx_type, - gas_limit, - nonce, - caller, - gas_price: gas_price.saturating_to(), - gas_priority_fee: max_priority_fee_per_gas.map(|v| v.saturating_to()), - kind: to.unwrap_or(TxKind::Create), - value: value.unwrap_or_default(), - data: input - .try_into_unique_input() - .map_err(Self::Error::from_eth_err)? - .unwrap_or_default(), - chain_id: Some(chain_id), - access_list: access_list.unwrap_or_default(), - // EIP-4844 fields - blob_hashes: blob_versioned_hashes.unwrap_or_default(), - max_fee_per_blob_gas: max_fee_per_blob_gas - .map(|v| v.saturating_to()) - .unwrap_or_default(), - // EIP-7702 fields - authorization_list: authorization_list - .unwrap_or_default() - .into_iter() - .map(Either::Left) - .collect(), - }; - - Ok(env) - } } impl EstimateCall for EthApi diff --git a/crates/rpc/rpc/src/eth/helpers/fees.rs b/crates/rpc/rpc/src/eth/helpers/fees.rs index 9ee8b9702be..87adb42b2b5 100644 --- a/crates/rpc/rpc/src/eth/helpers/fees.rs +++ b/crates/rpc/rpc/src/eth/helpers/fees.rs @@ -3,13 +3,17 @@ use reth_chainspec::{ChainSpecProvider, EthChainSpec, EthereumHardforks}; use reth_rpc_eth_api::helpers::{EthFees, LoadBlock, LoadFee}; use reth_rpc_eth_types::{FeeHistoryCache, GasPriceOracle}; -use reth_storage_api::{BlockReader, BlockReaderIdExt, StateProviderFactory}; +use reth_storage_api::{BlockReader, BlockReaderIdExt, ProviderHeader, StateProviderFactory}; use crate::EthApi; impl EthFees for EthApi where - Self: LoadFee, + Self: LoadFee< + Provider: ChainSpecProvider< + ChainSpec: EthChainSpec
>, + >, + >, Provider: BlockReader, { } @@ -27,7 +31,7 @@ where } #[inline] - fn fee_history_cache(&self) -> &FeeHistoryCache { + fn fee_history_cache(&self) -> &FeeHistoryCache> { self.inner.fee_history_cache() } } diff --git a/crates/rpc/rpc/src/eth/helpers/mod.rs b/crates/rpc/rpc/src/eth/helpers/mod.rs index 03e0443a15b..15fcf612d9a 100644 --- a/crates/rpc/rpc/src/eth/helpers/mod.rs +++ b/crates/rpc/rpc/src/eth/helpers/mod.rs @@ -2,6 +2,7 @@ //! files. pub mod signer; +pub mod sync_listener; pub mod types; mod block; @@ -13,3 +14,5 @@ mod spec; mod state; mod trace; mod transaction; + +pub use sync_listener::SyncListener; diff --git a/crates/rpc/rpc/src/eth/helpers/pending_block.rs b/crates/rpc/rpc/src/eth/helpers/pending_block.rs index dac1ace7d82..dd65fd53ca9 100644 --- a/crates/rpc/rpc/src/eth/helpers/pending_block.rs +++ b/crates/rpc/rpc/src/eth/helpers/pending_block.rs @@ -1,10 +1,12 @@ //! Support for building a pending block with transactions from local view of mempool. +use crate::EthApi; use alloy_consensus::BlockHeader; use reth_chainspec::{ChainSpecProvider, EthChainSpec, EthereumHardforks}; use reth_evm::{ConfigureEvm, NextBlockEnvAttributes}; use reth_node_api::NodePrimitives; use reth_primitives_traits::SealedHeader; +use reth_rpc_convert::RpcConvert; use reth_rpc_eth_api::{ helpers::{LoadPendingBlock, SpawnBlocking}, types::RpcTypes, @@ -18,28 +20,25 @@ use reth_storage_api::{ use reth_transaction_pool::{PoolTransaction, TransactionPool}; use revm_primitives::B256; -use crate::EthApi; - impl LoadPendingBlock for EthApi where Self: SpawnBlocking< - NetworkTypes: RpcTypes
, + NetworkTypes: RpcTypes< + Header = alloy_rpc_types_eth::Header>, + >, Error: FromEvmError, + RpcConvert: RpcConvert, > + RpcNodeCore< - Provider: BlockReaderIdExt< - Transaction = reth_ethereum_primitives::TransactionSigned, - Block = reth_ethereum_primitives::Block, - Receipt = reth_ethereum_primitives::Receipt, - Header = alloy_consensus::Header, - > + ChainSpecProvider + Provider: BlockReaderIdExt + + ChainSpecProvider + StateProviderFactory, Pool: TransactionPool< Transaction: PoolTransaction>, >, Evm: ConfigureEvm< Primitives = ::Primitives, - NextBlockEnvCtx = NextBlockEnvAttributes, + NextBlockEnvCtx: From, >, Primitives: NodePrimitives< BlockHeader = ProviderHeader, @@ -48,10 +47,7 @@ where Block = ProviderBlock, >, >, - Provider: BlockReader< - Block = reth_ethereum_primitives::Block, - Receipt = reth_ethereum_primitives::Receipt, - >, + Provider: BlockReader, { #[inline] fn pending_block( @@ -71,8 +67,9 @@ where suggested_fee_recipient: parent.beneficiary(), prev_randao: B256::random(), gas_limit: parent.gas_limit(), - parent_beacon_block_root: parent.parent_beacon_block_root(), - withdrawals: None, - }) + parent_beacon_block_root: parent.parent_beacon_block_root().map(|_| B256::ZERO), + withdrawals: parent.withdrawals_root().map(|_| Default::default()), + } + .into()) } } diff --git a/crates/rpc/rpc/src/eth/helpers/state.rs b/crates/rpc/rpc/src/eth/helpers/state.rs index 19b857fa986..90c9e32c64d 100644 --- a/crates/rpc/rpc/src/eth/helpers/state.rs +++ b/crates/rpc/rpc/src/eth/helpers/state.rs @@ -36,6 +36,7 @@ where #[cfg(test)] mod tests { use super::*; + use alloy_consensus::Header; use alloy_eips::eip1559::ETHEREUM_BLOCK_GAS_LIMIT_30M; use alloy_primitives::{Address, StorageKey, StorageValue, U256}; use reth_evm_ethereum::EthEvmConfig; @@ -67,7 +68,7 @@ mod tests { DEFAULT_MAX_SIMULATE_BLOCKS, DEFAULT_ETH_PROOF_WINDOW, BlockingTaskPool::build().expect("failed to build tracing pool"), - FeeHistoryCache::new(FeeHistoryCacheConfig::default()), + FeeHistoryCache::
::new(FeeHistoryCacheConfig::default()), evm_config, DEFAULT_PROOF_PERMITS, ) @@ -93,7 +94,7 @@ mod tests { DEFAULT_MAX_SIMULATE_BLOCKS, DEFAULT_ETH_PROOF_WINDOW + 1, BlockingTaskPool::build().expect("failed to build tracing pool"), - FeeHistoryCache::new(FeeHistoryCacheConfig::default()), + FeeHistoryCache::
::new(FeeHistoryCacheConfig::default()), evm_config, DEFAULT_PROOF_PERMITS, ) diff --git a/crates/rpc/rpc/src/eth/helpers/sync_listener.rs b/crates/rpc/rpc/src/eth/helpers/sync_listener.rs new file mode 100644 index 00000000000..13c8de19b0d --- /dev/null +++ b/crates/rpc/rpc/src/eth/helpers/sync_listener.rs @@ -0,0 +1,133 @@ +//! A utility Future to asynchronously wait until a node has finished syncing. + +use futures::Stream; +use pin_project::pin_project; +use reth_network_api::NetworkInfo; +use std::{ + future::Future, + pin::Pin, + task::{ready, Context, Poll}, +}; + +/// This future resolves once the node is no longer syncing: [`NetworkInfo::is_syncing`]. +#[must_use = "futures do nothing unless polled"] +#[pin_project] +#[derive(Debug)] +pub struct SyncListener { + #[pin] + tick: St, + network_info: N, +} + +impl SyncListener { + /// Create a new [`SyncListener`] using the given tick stream. + pub const fn new(network_info: N, tick: St) -> Self { + Self { tick, network_info } + } +} + +impl Future for SyncListener +where + N: NetworkInfo, + St: Stream + Unpin, +{ + type Output = (); + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let mut this = self.project(); + + if !this.network_info.is_syncing() { + return Poll::Ready(()); + } + + loop { + let tick_event = ready!(this.tick.as_mut().poll_next(cx)); + + match tick_event { + Some(_) => { + if !this.network_info.is_syncing() { + return Poll::Ready(()); + } + } + None => return Poll::Ready(()), + } + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use alloy_rpc_types_admin::EthProtocolInfo; + use futures::stream; + use reth_network_api::{NetworkError, NetworkStatus}; + use std::{ + net::{IpAddr, SocketAddr}, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, + }; + + #[derive(Clone)] + struct TestNetwork { + syncing: Arc, + } + + impl NetworkInfo for TestNetwork { + fn local_addr(&self) -> SocketAddr { + (IpAddr::from([0, 0, 0, 0]), 0).into() + } + + async fn network_status(&self) -> Result { + #[allow(deprecated)] + Ok(NetworkStatus { + client_version: "test".to_string(), + protocol_version: 5, + eth_protocol_info: EthProtocolInfo { + network: 1, + difficulty: None, + genesis: Default::default(), + config: Default::default(), + head: Default::default(), + }, + }) + } + + fn chain_id(&self) -> u64 { + 1 + } + + fn is_syncing(&self) -> bool { + self.syncing.load(Ordering::SeqCst) + } + + fn is_initially_syncing(&self) -> bool { + self.is_syncing() + } + } + + #[tokio::test] + async fn completes_immediately_if_not_syncing() { + let network = TestNetwork { syncing: Arc::new(AtomicBool::new(false)) }; + let fut = SyncListener::new(network, stream::pending::<()>()); + fut.await; + } + + #[tokio::test] + async fn resolves_when_syncing_stops() { + use tokio::sync::mpsc::unbounded_channel; + use tokio_stream::wrappers::UnboundedReceiverStream; + + let syncing = Arc::new(AtomicBool::new(true)); + let network = TestNetwork { syncing: syncing.clone() }; + let (tx, rx) = unbounded_channel(); + let listener = SyncListener::new(network, UnboundedReceiverStream::new(rx)); + let handle = tokio::spawn(listener); + + syncing.store(false, Ordering::Relaxed); + let _ = tx.send(()); + + handle.await.unwrap(); + } +} diff --git a/crates/rpc/rpc/src/eth/helpers/types.rs b/crates/rpc/rpc/src/eth/helpers/types.rs index 90b6e6c9283..2425c15fc0b 100644 --- a/crates/rpc/rpc/src/eth/helpers/types.rs +++ b/crates/rpc/rpc/src/eth/helpers/types.rs @@ -1,12 +1,12 @@ //! L1 `eth` API types. use alloy_network::Ethereum; -use reth_ethereum_primitives::EthPrimitives; +use reth_evm_ethereum::EthEvmConfig; +use reth_rpc_convert::RpcConverter; use reth_rpc_eth_types::EthApiError; -use reth_rpc_types_compat::RpcConverter; /// An [`RpcConverter`] with its generics set to Ethereum specific. -pub type EthRpcConverter = RpcConverter; +pub type EthRpcConverter = RpcConverter; //tests for simulate #[cfg(test)] diff --git a/crates/rpc/rpc/src/eth/mod.rs b/crates/rpc/rpc/src/eth/mod.rs index b4dca3b9f2b..af8619de867 100644 --- a/crates/rpc/rpc/src/eth/mod.rs +++ b/crates/rpc/rpc/src/eth/mod.rs @@ -15,6 +15,6 @@ pub use core::{EthApi, EthApiFor}; pub use filter::EthFilter; pub use pubsub::EthPubSub; -pub use helpers::signer::DevSigner; +pub use helpers::{signer::DevSigner, sync_listener::SyncListener}; pub use reth_rpc_eth_api::{EthApiServer, EthApiTypes, FullEthApiServer, RpcNodeCore}; diff --git a/crates/rpc/rpc/src/eth/pubsub.rs b/crates/rpc/rpc/src/eth/pubsub.rs index b91318d498b..1c7982f80fd 100644 --- a/crates/rpc/rpc/src/eth/pubsub.rs +++ b/crates/rpc/rpc/src/eth/pubsub.rs @@ -2,7 +2,7 @@ use std::sync::Arc; -use alloy_primitives::TxHash; +use alloy_primitives::{TxHash, U256}; use alloy_rpc_types_eth::{ pubsub::{Params, PubSubSyncStatus, SubscriptionKind, SyncStatusMetadata}, Filter, Header, Log, @@ -15,7 +15,7 @@ use reth_chain_state::CanonStateSubscriptions; use reth_network_api::NetworkInfo; use reth_primitives_traits::NodePrimitives; use reth_rpc_eth_api::{ - pubsub::EthPubSubApiServer, EthApiTypes, RpcNodeCore, RpcTransaction, TransactionCompat, + pubsub::EthPubSubApiServer, EthApiTypes, RpcConvert, RpcNodeCore, RpcTransaction, }; use reth_rpc_eth_types::logs_utils; use reth_rpc_server_types::result::{internal_rpc_err, invalid_params_rpc_err}; @@ -55,150 +55,181 @@ impl EthPubSub { } } -#[async_trait::async_trait] -impl EthPubSubApiServer> for EthPubSub +impl EthPubSub where Eth: RpcNodeCore< - Provider: BlockNumReader + CanonStateSubscriptions, + Provider: BlockNumReader + CanonStateSubscriptions, Pool: TransactionPool, Network: NetworkInfo, > + EthApiTypes< - TransactionCompat: TransactionCompat< + RpcConvert: RpcConvert< Primitives: NodePrimitives>, >, - > + 'static, + >, { - /// Handler for `eth_subscribe` - async fn subscribe( + /// Returns the current sync status for the `syncing` subscription + pub fn sync_status(&self, is_syncing: bool) -> PubSubSyncStatus { + self.inner.sync_status(is_syncing) + } + + /// Returns a stream that yields all transaction hashes emitted by the txpool. + pub fn pending_transaction_hashes_stream(&self) -> impl Stream { + self.inner.pending_transaction_hashes_stream() + } + + /// Returns a stream that yields all transactions emitted by the txpool. + pub fn full_pending_transaction_stream( &self, - pending: PendingSubscriptionSink, + ) -> impl Stream::Transaction>> { + self.inner.full_pending_transaction_stream() + } + + /// Returns a stream that yields all new RPC blocks. + pub fn new_headers_stream(&self) -> impl Stream> { + self.inner.new_headers_stream() + } + + /// Returns a stream that yields all logs that match the given filter. + pub fn log_stream(&self, filter: Filter) -> impl Stream { + self.inner.log_stream(filter) + } + + /// The actual handler for an accepted [`EthPubSub::subscribe`] call. + pub async fn handle_accepted( + &self, + accepted_sink: SubscriptionSink, kind: SubscriptionKind, params: Option, - ) -> jsonrpsee::core::SubscriptionResult { - let sink = pending.accept().await?; - let pubsub = self.inner.clone(); - self.inner.subscription_task_spawner.spawn(Box::pin(async move { - let _ = handle_accepted(pubsub, sink, kind, params).await; - })); + ) -> Result<(), ErrorObject<'static>> { + match kind { + SubscriptionKind::NewHeads => { + pipe_from_stream(accepted_sink, self.new_headers_stream()).await + } + SubscriptionKind::Logs => { + // if no params are provided, used default filter params + let filter = match params { + Some(Params::Logs(filter)) => *filter, + Some(Params::Bool(_)) => { + return Err(invalid_params_rpc_err("Invalid params for logs")) + } + _ => Default::default(), + }; + pipe_from_stream(accepted_sink, self.log_stream(filter)).await + } + SubscriptionKind::NewPendingTransactions => { + if let Some(params) = params { + match params { + Params::Bool(true) => { + // full transaction objects requested + let stream = self.full_pending_transaction_stream().filter_map(|tx| { + let tx_value = match self + .inner + .eth_api + .tx_resp_builder() + .fill_pending(tx.transaction.to_consensus()) + { + Ok(tx) => Some(tx), + Err(err) => { + error!(target = "rpc", + %err, + "Failed to fill transaction with block context" + ); + None + } + }; + std::future::ready(tx_value) + }); + return pipe_from_stream(accepted_sink, stream).await + } + Params::Bool(false) | Params::None => { + // only hashes requested + } + Params::Logs(_) => { + return Err(invalid_params_rpc_err( + "Invalid params for newPendingTransactions", + )) + } + } + } - Ok(()) + pipe_from_stream(accepted_sink, self.pending_transaction_hashes_stream()).await + } + SubscriptionKind::Syncing => { + // get new block subscription + let mut canon_state = BroadcastStream::new( + self.inner.eth_api.provider().subscribe_to_canonical_state(), + ); + // get current sync status + let mut initial_sync_status = self.inner.eth_api.network().is_syncing(); + let current_sub_res = self.sync_status(initial_sync_status); + + // send the current status immediately + let msg = SubscriptionMessage::new( + accepted_sink.method_name(), + accepted_sink.subscription_id(), + ¤t_sub_res, + ) + .map_err(SubscriptionSerializeError::new)?; + + if accepted_sink.send(msg).await.is_err() { + return Ok(()) + } + + while canon_state.next().await.is_some() { + let current_syncing = self.inner.eth_api.network().is_syncing(); + // Only send a new response if the sync status has changed + if current_syncing != initial_sync_status { + // Update the sync status on each new block + initial_sync_status = current_syncing; + + // send a new message now that the status changed + let sync_status = self.sync_status(current_syncing); + let msg = SubscriptionMessage::new( + accepted_sink.method_name(), + accepted_sink.subscription_id(), + &sync_status, + ) + .map_err(SubscriptionSerializeError::new)?; + + if accepted_sink.send(msg).await.is_err() { + break + } + } + } + + Ok(()) + } + } } } -/// The actual handler for an accepted [`EthPubSub::subscribe`] call. -async fn handle_accepted( - pubsub: Arc>, - accepted_sink: SubscriptionSink, - kind: SubscriptionKind, - params: Option, -) -> Result<(), ErrorObject<'static>> +#[async_trait::async_trait] +impl EthPubSubApiServer> for EthPubSub where Eth: RpcNodeCore< Provider: BlockNumReader + CanonStateSubscriptions, Pool: TransactionPool, Network: NetworkInfo, > + EthApiTypes< - TransactionCompat: TransactionCompat< + RpcConvert: RpcConvert< Primitives: NodePrimitives>, >, - >, + > + 'static, { - match kind { - SubscriptionKind::NewHeads => { - pipe_from_stream(accepted_sink, pubsub.new_headers_stream()).await - } - SubscriptionKind::Logs => { - // if no params are provided, used default filter params - let filter = match params { - Some(Params::Logs(filter)) => *filter, - Some(Params::Bool(_)) => { - return Err(invalid_params_rpc_err("Invalid params for logs")) - } - _ => Default::default(), - }; - pipe_from_stream(accepted_sink, pubsub.log_stream(filter)).await - } - SubscriptionKind::NewPendingTransactions => { - if let Some(params) = params { - match params { - Params::Bool(true) => { - // full transaction objects requested - let stream = pubsub.full_pending_transaction_stream().filter_map(|tx| { - let tx_value = match pubsub - .eth_api - .tx_resp_builder() - .fill_pending(tx.transaction.to_consensus()) - { - Ok(tx) => Some(tx), - Err(err) => { - error!(target = "rpc", - %err, - "Failed to fill transaction with block context" - ); - None - } - }; - std::future::ready(tx_value) - }); - return pipe_from_stream(accepted_sink, stream).await - } - Params::Bool(false) | Params::None => { - // only hashes requested - } - Params::Logs(_) => { - return Err(invalid_params_rpc_err( - "Invalid params for newPendingTransactions", - )) - } - } - } - - pipe_from_stream(accepted_sink, pubsub.pending_transaction_hashes_stream()).await - } - SubscriptionKind::Syncing => { - // get new block subscription - let mut canon_state = - BroadcastStream::new(pubsub.eth_api.provider().subscribe_to_canonical_state()); - // get current sync status - let mut initial_sync_status = pubsub.eth_api.network().is_syncing(); - let current_sub_res = pubsub.sync_status(initial_sync_status); - - // send the current status immediately - let msg = SubscriptionMessage::new( - accepted_sink.method_name(), - accepted_sink.subscription_id(), - ¤t_sub_res, - ) - .map_err(SubscriptionSerializeError::new)?; - - if accepted_sink.send(msg).await.is_err() { - return Ok(()) - } - - while canon_state.next().await.is_some() { - let current_syncing = pubsub.eth_api.network().is_syncing(); - // Only send a new response if the sync status has changed - if current_syncing != initial_sync_status { - // Update the sync status on each new block - initial_sync_status = current_syncing; - - // send a new message now that the status changed - let sync_status = pubsub.sync_status(current_syncing); - let msg = SubscriptionMessage::new( - accepted_sink.method_name(), - accepted_sink.subscription_id(), - &sync_status, - ) - .map_err(SubscriptionSerializeError::new)?; - - if accepted_sink.send(msg).await.is_err() { - break - } - } - } + /// Handler for `eth_subscribe` + async fn subscribe( + &self, + pending: PendingSubscriptionSink, + kind: SubscriptionKind, + params: Option, + ) -> jsonrpsee::core::SubscriptionResult { + let sink = pending.accept().await?; + let pubsub = self.clone(); + self.inner.subscription_task_spawner.spawn(Box::pin(async move { + let _ = pubsub.handle_accepted(sink, kind, params).await; + })); - Ok(()) - } + Ok(()) } } @@ -322,10 +353,18 @@ where /// Returns a stream that yields all new RPC blocks. fn new_headers_stream(&self) -> impl Stream> { self.eth_api.provider().canonical_state_stream().flat_map(|new_chain| { - let headers = new_chain.committed().headers().collect::>(); - futures::stream::iter( - headers.into_iter().map(|h| Header::from_consensus(h.into(), None, None)), - ) + let headers = new_chain + .committed() + .blocks_iter() + .map(|block| { + Header::from_consensus( + block.clone_sealed_header().into(), + None, + Some(U256::from(block.rlp_length())), + ) + }) + .collect::>(); + futures::stream::iter(headers) }) } diff --git a/crates/rpc/rpc/src/eth/sim_bundle.rs b/crates/rpc/rpc/src/eth/sim_bundle.rs index 9bd78ad851e..6221f6821c1 100644 --- a/crates/rpc/rpc/src/eth/sim_bundle.rs +++ b/crates/rpc/rpc/src/eth/sim_bundle.rs @@ -2,6 +2,7 @@ use alloy_consensus::BlockHeader; use alloy_eips::BlockNumberOrTag; +use alloy_evm::overrides::apply_block_overrides; use alloy_primitives::U256; use alloy_rpc_types_eth::BlockId; use alloy_rpc_types_mev::{ @@ -17,9 +18,7 @@ use reth_rpc_eth_api::{ helpers::{block::LoadBlock, Call, EthTransactions}, FromEthApiError, FromEvmError, }; -use reth_rpc_eth_types::{ - revm_utils::apply_block_overrides, utils::recover_raw_transaction, EthApiError, -}; +use reth_rpc_eth_types::{utils::recover_raw_transaction, EthApiError}; use reth_storage_api::ProviderTx; use reth_tasks::pool::BlockingTaskGuard; use reth_transaction_pool::{PoolPooledTx, PoolTransaction, TransactionPool}; diff --git a/crates/rpc/rpc/src/lib.rs b/crates/rpc/rpc/src/lib.rs index bac57b63035..690fb33e871 100644 --- a/crates/rpc/rpc/src/lib.rs +++ b/crates/rpc/rpc/src/lib.rs @@ -49,7 +49,7 @@ mod web3; pub use admin::AdminApi; pub use debug::DebugApi; pub use engine::{EngineApi, EngineEthApi}; -pub use eth::{EthApi, EthApiBuilder, EthBundle, EthFilter, EthPubSub}; +pub use eth::{helpers::SyncListener, EthApi, EthApiBuilder, EthBundle, EthFilter, EthPubSub}; pub use miner::MinerApi; pub use net::NetApi; pub use otterscan::OtterscanApi; diff --git a/crates/rpc/rpc/src/otterscan.rs b/crates/rpc/rpc/src/otterscan.rs index 502a71ec6d4..bafbf0730bd 100644 --- a/crates/rpc/rpc/src/otterscan.rs +++ b/crates/rpc/rpc/src/otterscan.rs @@ -13,6 +13,7 @@ use alloy_rpc_types_trace::{ use async_trait::async_trait; use jsonrpsee::{core::RpcResult, types::ErrorObjectOwned}; use reth_rpc_api::{EthApiServer, OtterscanServer}; +use reth_rpc_convert::RpcTxReq; use reth_rpc_eth_api::{ helpers::{EthTransactions, TraceExt}, FullEthApiTypes, RpcBlock, RpcHeader, RpcReceipt, RpcTransaction, @@ -67,6 +68,7 @@ impl OtterscanServer, RpcHeader where Eth: EthApiServer< + RpcTxReq, RpcTransaction, RpcBlock, RpcReceipt, @@ -338,8 +340,11 @@ where num.into(), None, TracingInspectorConfig::default_parity(), - |tx_info, inspector, _, _, _| { - Ok(inspector.into_parity_builder().into_localized_transaction_traces(tx_info)) + |tx_info, ctx| { + Ok(ctx + .inspector + .into_parity_builder() + .into_localized_transaction_traces(tx_info)) }, ) .await diff --git a/crates/rpc/rpc/src/trace.rs b/crates/rpc/rpc/src/trace.rs index 4e620ec1852..73d461bf222 100644 --- a/crates/rpc/rpc/src/trace.rs +++ b/crates/rpc/rpc/src/trace.rs @@ -403,9 +403,11 @@ where Some(block.clone()), None, TracingInspectorConfig::default_parity(), - move |tx_info, inspector, _, _, _| { - let mut traces = - inspector.into_parity_builder().into_localized_transaction_traces(tx_info); + move |tx_info, ctx| { + let mut traces = ctx + .inspector + .into_parity_builder() + .into_localized_transaction_traces(tx_info); traces.retain(|trace| matcher.matches(&trace.trace)); Ok(Some(traces)) }, @@ -470,9 +472,9 @@ where block_id, None, TracingInspectorConfig::default_parity(), - |tx_info, inspector, _, _, _| { + |tx_info, ctx| { let traces = - inspector.into_parity_builder().into_localized_transaction_traces(tx_info); + ctx.inspector.into_parity_builder().into_localized_transaction_traces(tx_info); Ok(traces) }, ); @@ -507,14 +509,16 @@ where block_id, None, TracingInspectorConfig::from_parity_config(&trace_types), - move |tx_info, inspector, res, state, db| { - let mut full_trace = - inspector.into_parity_builder().into_trace_results(&res, &trace_types); + move |tx_info, ctx| { + let mut full_trace = ctx + .inspector + .into_parity_builder() + .into_trace_results(&ctx.result, &trace_types); // If statediffs were requested, populate them with the account balance and // nonce from pre-state if let Some(ref mut state_diff) = full_trace.state_diff { - populate_state_diff(state_diff, db, state.iter()) + populate_state_diff(state_diff, &ctx.db, ctx.state.iter()) .map_err(Eth::Error::from_eth_err)?; } @@ -542,10 +546,10 @@ where block_id, None, OpcodeGasInspector::default, - move |tx_info, inspector, _res, _, _| { + move |tx_info, ctx| { let trace = TransactionOpcodeGas { transaction_hash: tx_info.hash.expect("tx hash is set"), - opcode_gas: inspector.opcode_gas_iter().collect(), + opcode_gas: ctx.inspector.opcode_gas_iter().collect(), }; Ok(trace) }, diff --git a/crates/rpc/rpc/src/txpool.rs b/crates/rpc/rpc/src/txpool.rs index 8c69aaf7e0b..e910e6a101e 100644 --- a/crates/rpc/rpc/src/txpool.rs +++ b/crates/rpc/rpc/src/txpool.rs @@ -10,7 +10,8 @@ use async_trait::async_trait; use jsonrpsee::core::RpcResult; use reth_primitives_traits::NodePrimitives; use reth_rpc_api::TxPoolApiServer; -use reth_rpc_types_compat::TransactionCompat; +use reth_rpc_convert::{RpcConvert, RpcTypes}; +use reth_rpc_eth_api::RpcTransaction; use reth_transaction_pool::{ AllPoolTransactions, PoolConsensusTx, PoolTransaction, TransactionPool, }; @@ -36,18 +37,21 @@ impl TxPoolApi { impl TxPoolApi where Pool: TransactionPool> + 'static, - Eth: TransactionCompat>>, + Eth: RpcConvert>>, { - fn content(&self) -> Result, Eth::Error> { + fn content(&self) -> Result>, Eth::Error> { #[inline] fn insert( tx: &Tx, - content: &mut BTreeMap>, + content: &mut BTreeMap< + Address, + BTreeMap::TransactionResponse>, + >, resp_builder: &RpcTxB, ) -> Result<(), RpcTxB::Error> where Tx: PoolTransaction, - RpcTxB: TransactionCompat>, + RpcTxB: RpcConvert>, { content.entry(tx.sender()).or_default().insert( tx.nonce().to_string(), @@ -72,10 +76,10 @@ where } #[async_trait] -impl TxPoolApiServer for TxPoolApi +impl TxPoolApiServer> for TxPoolApi where Pool: TransactionPool> + 'static, - Eth: TransactionCompat>> + 'static, + Eth: RpcConvert>> + 'static, { /// Returns the number of transactions currently pending for inclusion in the next block(s), as /// well as the ones that are being scheduled for future execution only. @@ -129,7 +133,7 @@ where async fn txpool_content_from( &self, from: Address, - ) -> RpcResult> { + ) -> RpcResult>> { trace!(target: "rpc::eth", ?from, "Serving txpool_contentFrom"); Ok(self.content().map_err(Into::into)?.remove_from(&from)) } @@ -139,7 +143,7 @@ where /// /// See [here](https://geth.ethereum.org/docs/rpc/ns-txpool#txpool_content) for more details /// Handler for `txpool_content` - async fn txpool_content(&self) -> RpcResult> { + async fn txpool_content(&self) -> RpcResult>> { trace!(target: "rpc::eth", "Serving txpool_content"); Ok(self.content().map_err(Into::into)?) } diff --git a/crates/scroll/alloy/evm/Cargo.toml b/crates/scroll/alloy/evm/Cargo.toml index 22b2ebd3a06..a62d0dbaaf3 100644 --- a/crates/scroll/alloy/evm/Cargo.toml +++ b/crates/scroll/alloy/evm/Cargo.toml @@ -42,6 +42,7 @@ reth-scroll-chainspec.workspace = true reth-scroll-evm.workspace = true [features] +default = ["std"] std = [ "alloy-evm/std", "alloy-primitives/std", diff --git a/crates/scroll/alloy/evm/src/block/curie.rs b/crates/scroll/alloy/evm/src/block/curie.rs index 61d873bac53..7e2e853ad42 100644 --- a/crates/scroll/alloy/evm/src/block/curie.rs +++ b/crates/scroll/alloy/evm/src/block/curie.rs @@ -102,7 +102,7 @@ pub(super) fn apply_curie_hard_fork(state: &mut State) -> Resu #[cfg(test)] mod tests { - use super::*; + use super::{super::assert_bytecode_eq, *}; use revm::{ database::{ states::{bundle_state::BundleRetention, plain_account::PlainStorage, StorageSlot}, @@ -155,8 +155,24 @@ mod tests { let expected_oracle_info = AccountInfo { code_hash, code: Some(bytecode.clone()), ..Default::default() }; - assert_eq!(oracle.original_info.unwrap(), oracle_pre_fork); - assert_eq!(oracle.info.unwrap(), expected_oracle_info); + // TODO: revert back to performing equality check on `AccountInfo` once we bump revm > v78 + let oracle_original_info = oracle.original_info.unwrap(); + assert_bytecode_eq( + oracle_original_info.code.as_ref().unwrap(), + oracle_pre_fork.code.as_ref().unwrap(), + ); + assert_eq!(oracle_original_info.balance, oracle_pre_fork.balance); + assert_eq!(oracle_original_info.nonce, oracle_pre_fork.nonce); + assert_eq!(oracle_original_info.code_hash, oracle_pre_fork.code_hash); + + let oracle_post_info = oracle.info.unwrap(); + assert_bytecode_eq( + oracle_post_info.code.as_ref().unwrap(), + expected_oracle_info.code.as_ref().unwrap(), + ); + assert_eq!(oracle_post_info.balance, expected_oracle_info.balance); + assert_eq!(oracle_post_info.nonce, expected_oracle_info.nonce); + assert_eq!(oracle_post_info.code_hash, expected_oracle_info.code_hash); // check oracle storage changeset let mut storage = oracle.storage.into_iter().collect::>(); @@ -172,7 +188,7 @@ mod tests { } // check deployed contract - assert_eq!(bundle.contracts.get(&code_hash).unwrap().clone(), bytecode); + assert_bytecode_eq(bundle.contracts.get(&code_hash).unwrap(), &bytecode); Ok(()) } diff --git a/crates/scroll/alloy/evm/src/block/feynman.rs b/crates/scroll/alloy/evm/src/block/feynman.rs index 4f4467507bb..e815f87cc5f 100644 --- a/crates/scroll/alloy/evm/src/block/feynman.rs +++ b/crates/scroll/alloy/evm/src/block/feynman.rs @@ -90,7 +90,7 @@ pub(super) fn apply_feynman_hard_fork( #[cfg(test)] mod tests { - use super::*; + use super::{super::assert_bytecode_eq, *}; use revm::{ database::{ states::{bundle_state::BundleRetention, plain_account::PlainStorage, StorageSlot}, @@ -158,8 +158,24 @@ mod tests { let expected_oracle_info = AccountInfo { code_hash, code: Some(bytecode.clone()), ..Default::default() }; - assert_eq!(oracle.original_info.unwrap(), oracle_pre_fork); - assert_eq!(oracle.info.unwrap(), expected_oracle_info); + // TODO: revert back to performing equality check on `AccountInfo` once we bump revm > v78 + let oracle_original_info = oracle.original_info.unwrap(); + assert_bytecode_eq( + oracle_original_info.code.as_ref().unwrap(), + oracle_pre_fork.code.as_ref().unwrap(), + ); + assert_eq!(oracle_original_info.balance, oracle_pre_fork.balance); + assert_eq!(oracle_original_info.nonce, oracle_pre_fork.nonce); + assert_eq!(oracle_original_info.code_hash, oracle_pre_fork.code_hash); + + let oracle_post_info = oracle.info.unwrap(); + assert_bytecode_eq( + oracle_post_info.code.as_ref().unwrap(), + expected_oracle_info.code.as_ref().unwrap(), + ); + assert_eq!(oracle_post_info.balance, expected_oracle_info.balance); + assert_eq!(oracle_post_info.nonce, expected_oracle_info.nonce); + assert_eq!(oracle_post_info.code_hash, expected_oracle_info.code_hash); // check oracle storage changeset let mut storage = oracle.storage.into_iter().collect::>(); @@ -175,7 +191,7 @@ mod tests { } // check deployed contract - assert_eq!(bundle.contracts.get(&code_hash).unwrap().clone(), bytecode); + assert_bytecode_eq(bundle.contracts.get(&code_hash).unwrap(), &bytecode); Ok(()) } diff --git a/crates/scroll/alloy/evm/src/block/mod.rs b/crates/scroll/alloy/evm/src/block/mod.rs index aa7448fc992..054b5a5b96d 100644 --- a/crates/scroll/alloy/evm/src/block/mod.rs +++ b/crates/scroll/alloy/evm/src/block/mod.rs @@ -150,7 +150,7 @@ where fn apply_pre_execution_changes(&mut self) -> Result<(), BlockExecutionError> { // set state clear flag if the block is after the Spurious Dragon hardfork. let state_clear_flag = - self.spec.is_spurious_dragon_active_at_block(self.evm.block().number); + self.spec.is_spurious_dragon_active_at_block(self.evm.block().number.to()); self.evm.db_mut().set_state_clear_flag(state_clear_flag); // load the l1 gas oracle contract in cache. @@ -164,7 +164,7 @@ where if self .spec .scroll_fork_activation(ScrollHardfork::Curie) - .transitions_at_block(self.evm.block().number) + .transitions_at_block(self.evm.block().number.to()) { if let Err(err) = apply_curie_hard_fork(self.evm.db_mut()) { return Err(BlockExecutionError::msg(format!( @@ -177,7 +177,7 @@ where if self .spec .scroll_fork_activation(ScrollHardfork::Feynman) - .active_at_timestamp(self.evm.block().timestamp) + .active_at_timestamp(self.evm.block().timestamp.to()) { if let Err(err) = apply_feynman_hard_fork(self.evm.db_mut()) { return Err(BlockExecutionError::msg(format!( @@ -214,14 +214,14 @@ where let block = self.evm.block(); // verify the transaction type is accepted by the current fork. - if tx.tx().is_eip2930() && !chain_spec.is_curie_active_at_block(block.number) { + if tx.tx().is_eip2930() && !chain_spec.is_curie_active_at_block(block.number.to()) { return Err(BlockValidationError::InvalidTx { hash, error: Box::new(InvalidTransaction::Eip2930NotSupported), } .into()) } - if tx.tx().is_eip1559() && !chain_spec.is_curie_active_at_block(block.number) { + if tx.tx().is_eip1559() && !chain_spec.is_curie_active_at_block(block.number.to()) { return Err(BlockValidationError::InvalidTx { hash, error: Box::new(InvalidTransaction::Eip1559NotSupported), @@ -235,7 +235,9 @@ where } .into()) } - if tx.tx().is_eip7702() && !chain_spec.is_euclid_v2_active_at_timestamp(block.timestamp) { + if tx.tx().is_eip7702() && + !chain_spec.is_euclid_v2_active_at_timestamp(block.timestamp.to()) + { return Err(BlockValidationError::InvalidTx { hash, error: Box::new(InvalidTransaction::Eip7702NotSupported), @@ -401,3 +403,15 @@ where ScrollBlockExecutor::new(evm, ctx, &self.spec, &self.receipt_builder) } } + +// TODO: remove this when we bump revm > v78 +/// A helper function that compares asserts that two bytecode instances are equal. +#[cfg(test)] +fn assert_bytecode_eq(expected: &revm::bytecode::Bytecode, actual: &revm::bytecode::Bytecode) { + assert_eq!(expected.legacy_jump_table().unwrap().len, actual.legacy_jump_table().unwrap().len); + assert_eq!( + expected.legacy_jump_table().unwrap().table, + actual.legacy_jump_table().unwrap().table + ); + assert_eq!(expected.bytecode(), actual.bytecode()); +} diff --git a/crates/scroll/alloy/evm/src/lib.rs b/crates/scroll/alloy/evm/src/lib.rs index 9aab63fc724..a3a0ff6fbfd 100644 --- a/crates/scroll/alloy/evm/src/lib.rs +++ b/crates/scroll/alloy/evm/src/lib.rs @@ -128,8 +128,7 @@ where tx: Self::Tx, ) -> Result, Self::Error> { if self.inspect { - self.inner.set_tx(tx.into()); - self.inner.inspect_replay() + self.inner.inspect_tx(tx.into()) } else { self.inner.transact(tx.into()) } diff --git a/crates/scroll/alloy/evm/src/system_caller.rs b/crates/scroll/alloy/evm/src/system_caller.rs index 9da990d4c4d..83cd0033743 100644 --- a/crates/scroll/alloy/evm/src/system_caller.rs +++ b/crates/scroll/alloy/evm/src/system_caller.rs @@ -62,13 +62,13 @@ fn transact_blockhashes_contract_call( evm: &mut impl Evm, ) -> Result>, BlockExecutionError> { // if Feynman is not active at timestamp then no system transaction occurs. - if !spec.is_feynman_active_at_timestamp(evm.block().timestamp) { + if !spec.is_feynman_active_at_timestamp(evm.block().timestamp.to()) { return Ok(None); } // if the block number is zero (genesis block) then no system transaction may occur as per // EIP-2935 - if evm.block().number == 0 { + if evm.block().number.to::() == 0u64 { return Ok(None); } @@ -101,7 +101,6 @@ mod tests { use reth_scroll_evm::ScrollEvmConfig; use revm::{ bytecode::Bytecode, - context::ContextTr, database::{EmptyDBTyped, State}, state::AccountInfo, Database, @@ -151,7 +150,7 @@ mod tests { system_caller.apply_blockhashes_contract_call(block.parent_hash, &mut evm).unwrap(); // assert the storage slot remains unchanged. - let parent_hash = evm.db().storage(HISTORY_STORAGE_ADDRESS, U256::ZERO).unwrap(); + let parent_hash = evm.db_mut().storage(HISTORY_STORAGE_ADDRESS, U256::ZERO).unwrap(); assert_eq!(parent_hash, U256::ZERO); } @@ -197,7 +196,7 @@ mod tests { system_caller.apply_blockhashes_contract_call(block.parent_hash, &mut evm).unwrap(); // assert the hash is written to storage. - let parent_hash = evm.db().storage(HISTORY_STORAGE_ADDRESS, U256::ZERO).unwrap(); + let parent_hash = evm.db_mut().storage(HISTORY_STORAGE_ADDRESS, U256::ZERO).unwrap(); assert_eq!(Into::::into(parent_hash), block.parent_hash); } } diff --git a/crates/scroll/alloy/network/src/lib.rs b/crates/scroll/alloy/network/src/lib.rs index 102dd4247d1..3bfe222c55f 100644 --- a/crates/scroll/alloy/network/src/lib.rs +++ b/crates/scroll/alloy/network/src/lib.rs @@ -58,6 +58,10 @@ impl TransactionBuilder for ScrollTransactionRequest { self.as_mut().set_nonce(nonce); } + fn take_nonce(&mut self) -> Option { + self.as_mut().nonce.take() + } + fn input(&self) -> Option<&Bytes> { self.as_ref().input() } diff --git a/crates/scroll/cli/Cargo.toml b/crates/scroll/cli/Cargo.toml index b5c79e24940..88e102d194e 100644 --- a/crates/scroll/cli/Cargo.toml +++ b/crates/scroll/cli/Cargo.toml @@ -26,8 +26,7 @@ reth-tracing.workspace = true # scroll reth-scroll-chainspec.workspace = true reth-scroll-evm.workspace = true -reth-scroll-node.workspace = true -reth-scroll-primitives.workspace = true +reth-scroll-primitives = { workspace = true, features = ["reth-codec"] } scroll-alloy-consensus = { workspace = true, optional = true } # misc diff --git a/crates/scroll/cli/src/lib.rs b/crates/scroll/cli/src/lib.rs index 03c748dd6e2..4313f44bcaf 100644 --- a/crates/scroll/cli/src/lib.rs +++ b/crates/scroll/cli/src/lib.rs @@ -22,7 +22,6 @@ use reth_node_core::{ use reth_node_metrics::recorder::install_prometheus_recorder; use reth_scroll_chainspec::ScrollChainSpec; use reth_scroll_evm::ScrollExecutorProvider; -use reth_scroll_node::ScrollNetworkPrimitives; use reth_scroll_primitives::ScrollPrimitives; use reth_tracing::FileWorkerGuard; use std::{ffi::OsString, fmt, future::Future, sync::Arc}; @@ -128,16 +127,14 @@ where runner.run_blocking_until_ctrl_c(command.execute::()) } Commands::Import(command) => { - runner.run_blocking_until_ctrl_c(command.execute::(components)) + runner.run_blocking_until_ctrl_c(command.execute::(components)) } Commands::DumpGenesis(command) => runner.run_blocking_until_ctrl_c(command.execute()), Commands::Db(command) => runner.run_blocking_until_ctrl_c(command.execute::()), - Commands::Stage(command) => runner.run_command_until_exit(|ctx| { - command.execute::(ctx, components) - }), - Commands::P2P(command) => { - runner.run_until_ctrl_c(command.execute::()) + Commands::Stage(command) => { + runner.run_command_until_exit(|ctx| command.execute::(ctx, components)) } + Commands::P2P(command) => runner.run_until_ctrl_c(command.execute::()), Commands::Config(command) => runner.run_until_ctrl_c(command.execute()), Commands::Recover(command) => { runner.run_command_until_exit(|ctx| command.execute::(ctx)) diff --git a/crates/scroll/evm/Cargo.toml b/crates/scroll/evm/Cargo.toml index e7f5d9c840b..450681ed57c 100644 --- a/crates/scroll/evm/Cargo.toml +++ b/crates/scroll/evm/Cargo.toml @@ -50,7 +50,6 @@ eyre.workspace = true alloy-primitives = { workspace = true, features = ["getrandom"] } [features] -default = ["std"] std = [ "scroll-alloy-consensus/std", "scroll-alloy-evm/std", diff --git a/crates/scroll/evm/src/build.rs b/crates/scroll/evm/src/build.rs index 58c8e31d00b..2645dad697b 100644 --- a/crates/scroll/evm/src/build.rs +++ b/crates/scroll/evm/src/build.rs @@ -68,14 +68,14 @@ where receipts_root, withdrawals_root: None, logs_bloom, - timestamp, + timestamp: timestamp.to(), mix_hash: evm_env.block_env.prevrandao.unwrap_or_default(), nonce: BEACON_NONCE.into(), base_fee_per_gas: self .chain_spec - .is_curie_active_at_block(evm_env.block_env.number) + .is_curie_active_at_block(evm_env.block_env.number.to()) .then_some(evm_env.block_env.basefee), - number: evm_env.block_env.number, + number: evm_env.block_env.number.to(), gas_limit: evm_env.block_env.gas_limit, difficulty: evm_env.block_env.difficulty, gas_used: *gas_used, diff --git a/crates/scroll/evm/src/config.rs b/crates/scroll/evm/src/config.rs index 2b550582bac..e9baba86dff 100644 --- a/crates/scroll/evm/src/config.rs +++ b/crates/scroll/evm/src/config.rs @@ -68,9 +68,9 @@ where }; let block_env = BlockEnv { - number: header.number(), + number: U256::from(header.number()), beneficiary: coinbase, - timestamp: header.timestamp(), + timestamp: U256::from(header.timestamp()), difficulty: header.difficulty(), prevrandao: header.mix_hash(), gas_limit: header.gas_limit(), @@ -106,9 +106,9 @@ where }; let block_env = BlockEnv { - number: parent.number() + 1, + number: U256::from(parent.number() + 1), beneficiary: coinbase, - timestamp: attributes.timestamp, + timestamp: U256::from(attributes.timestamp), difficulty: U256::ONE, prevrandao: Some(B256::ZERO), gas_limit: attributes.gas_limit, @@ -238,9 +238,9 @@ mod tests { // verify block env correctly updated let expected = BlockEnv { - number: header.number, + number: U256::from(header.number), beneficiary: config.chain_spec().config.fee_vault_address.unwrap(), - timestamp: header.timestamp, + timestamp: U256::from(header.timestamp), prevrandao: Some(header.mix_hash), difficulty: U256::ZERO, basefee: header.base_fee_per_gas.unwrap_or_default(), @@ -286,9 +286,9 @@ mod tests { // verify block env let expected = BlockEnv { - number: header.number + 1, + number: U256::from(header.number + 1), beneficiary: config.chain_spec().config.fee_vault_address.unwrap(), - timestamp: attributes.timestamp, + timestamp: U256::from(attributes.timestamp), prevrandao: Some(B256::ZERO), difficulty: U256::ONE, basefee: 155157341, diff --git a/crates/scroll/evm/src/execute.rs b/crates/scroll/evm/src/execute.rs index 939e4b1c892..8648e3d8453 100644 --- a/crates/scroll/evm/src/execute.rs +++ b/crates/scroll/evm/src/execute.rs @@ -404,8 +404,21 @@ mod tests { // assert oracle contract contains updated bytecode let oracle = bundle.state.get(&L1_GAS_PRICE_ORACLE_ADDRESS).unwrap().clone(); + let oracle_bytecode = oracle.info.unwrap().code.unwrap(); let bytecode = Bytecode::new_raw(CURIE_L1_GAS_PRICE_ORACLE_BYTECODE); - assert_eq!(oracle.info.unwrap().code.unwrap(), bytecode); + + // TODO: update when we bump to revm > v78 + // Note: Eq operator fails due to the presence of `table_ptr` in the `JumpTable` struct + // therefore we do a manual comparison. + assert_eq!( + bytecode.legacy_jump_table().unwrap().len, + oracle_bytecode.legacy_jump_table().unwrap().len + ); + assert_eq!( + bytecode.legacy_jump_table().unwrap().table, + oracle_bytecode.legacy_jump_table().unwrap().table + ); + assert_eq!(bytecode.bytecode(), oracle_bytecode.bytecode()); // check oracle contract contains storage changeset let mut storage = oracle.storage.into_iter().collect::>(); diff --git a/crates/scroll/node/src/addons.rs b/crates/scroll/node/src/addons.rs index 880ee6a50ce..7e4a75b3396 100644 --- a/crates/scroll/node/src/addons.rs +++ b/crates/scroll/node/src/addons.rs @@ -134,6 +134,7 @@ impl ScrollAddOnsBuilder { ScrollEthApi::::builder(), Default::default(), Default::default(), + Default::default(), ), } } diff --git a/crates/scroll/openvm-compat/Cargo.lock b/crates/scroll/openvm-compat/Cargo.lock index 64cee04aeec..c5267e11ad0 100644 --- a/crates/scroll/openvm-compat/Cargo.lock +++ b/crates/scroll/openvm-compat/Cargo.lock @@ -22,9 +22,9 @@ checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" [[package]] name = "alloy-chains" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19a9cc9d81ace3da457883b0bdf76776e55f1b84219a9e9d55c27ad308548d3f" +checksum = "5674914c2cfdb866c21cb0c09d82374ee39a1395cf512e7515f4c014083b3fff" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -35,9 +35,9 @@ dependencies = [ [[package]] name = "alloy-consensus" -version = "1.0.12" +version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bcb57295c4b632b6b3941a089ee82d00ff31ff9eb3eac801bf605ffddc81041" +checksum = "74a694d8be621ee12b45ae23e7f18393b9a1e04f1ba47a0136767cb8c955f7f8" dependencies = [ "alloy-eips", "alloy-primitives", @@ -60,9 +60,9 @@ dependencies = [ [[package]] name = "alloy-consensus-any" -version = "1.0.12" +version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ab669be40024565acb719daf1b2a050e6dc065fc0bec6050d97a81cdb860bd7" +checksum = "1647d47f59288584cc3b40eff3e7dde6af8c88a2fca8fe02c22de7b9ab218ffa" dependencies = [ "alloy-consensus", "alloy-eips", @@ -112,9 +112,9 @@ dependencies = [ [[package]] name = "alloy-eips" -version = "1.0.12" +version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f853de9ca1819f54de80de5d03bfc1bb7c9fafcf092b480a654447141bc354d" +checksum = "715ae25d525c567481ba2fc97000415624836d516958b9c3f189f1e267d1d90a" dependencies = [ "alloy-eip2124", "alloy-eip2930", @@ -132,9 +132,9 @@ dependencies = [ [[package]] name = "alloy-evm" -version = "0.10.0" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "394b09cf3a32773eedf11828987f9c72dfa74545040be0422e3f5f09a2a3fab9" +checksum = "ff5aae4c6dc600734b206b175f3200085ee82dcdaa388760358830a984ca9869" dependencies = [ "alloy-consensus", "alloy-eips", @@ -149,22 +149,23 @@ dependencies = [ [[package]] name = "alloy-genesis" -version = "1.0.12" +version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8500bcc1037901953771c25cb77e0d4ec0bffd938d93a04715390230d21a612d" +checksum = "696a83af273bfc512e02693bd4b5056c8c57898328bd0ce594013fb864de4dcf" dependencies = [ "alloy-eips", "alloy-primitives", "alloy-serde", "alloy-trie", "serde", + "serde_with", ] [[package]] name = "alloy-hardforks" -version = "0.2.7" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "977d2492ce210e34baf7b36afaacea272c96fbe6774c47e23f97d14033c0e94f" +checksum = "819a3620fe125e0fff365363315ee5e24c23169173b19747dfd6deba33db8990" dependencies = [ "alloy-chains", "alloy-eip2124", @@ -175,9 +176,9 @@ dependencies = [ [[package]] name = "alloy-network-primitives" -version = "1.0.12" +version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3eef189583f4c53d231dd1297b28a675ff842b551fb34715f562868a1937431a" +checksum = "35648c318b4649d2d141d1ed4f6e32c69f4959bdc2f6e44d53c0a333ed615a37" dependencies = [ "alloy-consensus", "alloy-eips", @@ -199,7 +200,7 @@ dependencies = [ "derive_more", "foldhash", "hashbrown 0.15.4", - "indexmap 2.9.0", + "indexmap 2.10.0", "itoa", "k256", "keccak-asm", @@ -237,9 +238,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-engine" -version = "1.0.12" +version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e5b09d86d0c015cb8400c5d1d0483425670bef4fc1260336aea9ef6d4b9540c" +checksum = "3ed717902ec7e7e5b737cf416f29c21f43a4e86db90ff6fddde199f4ed6ea1ac" dependencies = [ "alloy-consensus", "alloy-eips", @@ -251,9 +252,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-eth" -version = "1.0.12" +version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1826285e4ffc2372a8c061d5cc145858e67a0be3309b768c5b77ddb6b9e6cbc7" +checksum = "c8300d59b0126876a1914102c588f9a4792eb4c754d483a954dc29904ddf79d6" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -271,9 +272,9 @@ dependencies = [ [[package]] name = "alloy-serde" -version = "1.0.12" +version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "906ce0190afeded19cb2e963cb8507c975a7862216b9e74f39bf91ddee6ae74b" +checksum = "8070bc2af2d48969e3aa709ea3ebf1f8316176b91c2132efe33d113f74383a9e" dependencies = [ "alloy-primitives", "serde", @@ -303,7 +304,7 @@ dependencies = [ "alloy-sol-macro-input", "const-hex", "heck", - "indexmap 2.9.0", + "indexmap 2.10.0", "proc-macro-error2", "proc-macro2", "quote", @@ -340,9 +341,9 @@ dependencies = [ [[package]] name = "alloy-trie" -version = "0.8.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "983d99aa81f586cef9dae38443245e585840fcf0fc58b09aee0b1f27aed1d500" +checksum = "bada1fc392a33665de0dc50d401a3701b62583c655e3522a323490a5da016962" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -356,9 +357,9 @@ dependencies = [ [[package]] name = "alloy-tx-macros" -version = "1.0.12" +version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b75ef8609ea2b31c799b0a56c724dca4c73105c5ccc205d9dfeb1d038df6a1da" +checksum = "472e12600c46b766110edd8382b4804d70188870f064531ee8fd61a35ed18686" dependencies = [ "alloy-primitives", "darling", @@ -820,9 +821,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.27" +version = "1.2.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d487aa071b5f64da6f19a3e848e3578944b726ee5a4854b82172f02aa876bfdc" +checksum = "5c1599538de2394445747c8cf7935946e3cc27e9625f889d979bfb2aaf569362" dependencies = [ "jobserver", "libc", @@ -1584,9 +1585,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.9.0" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cea70ddb795996207ad57735b50c5982d8844f38ba9ee5f1aedcfb708a2aa11e" +checksum = "fe4cd85333e22411419a0bcae1297d25e58c9443848b11dc6a86fefe8c78a661" dependencies = [ "equivalent", "hashbrown 0.15.4", @@ -1866,13 +1867,14 @@ dependencies = [ [[package]] name = "nybbles" -version = "0.3.4" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8983bb634df7248924ee0c4c3a749609b5abcb082c28fffe3254b3eb3602b307" +checksum = "675b3a54e5b12af997abc8b6638b0aee51a28caedab70d4967e0d5db3a3f1d06" dependencies = [ "alloy-rlp", - "const-hex", + "cfg-if", "proptest", + "ruint", "serde", "smallvec", ] @@ -1889,9 +1891,9 @@ dependencies = [ [[package]] name = "op-alloy-consensus" -version = "0.17.2" +version = "0.18.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2423a125ef2daa0d15dacc361805a0b6f76d6acfc6e24a1ff6473582087fe75" +checksum = "a8719d9b783b29cfa1cf8d591b894805786b9ab4940adc700a57fd0d5b721cf5" dependencies = [ "alloy-consensus", "alloy-eips", @@ -2340,7 +2342,7 @@ checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "reth-chainspec" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-chains", "alloy-consensus", @@ -2359,7 +2361,7 @@ dependencies = [ [[package]] name = "reth-codecs" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -2376,7 +2378,7 @@ dependencies = [ [[package]] name = "reth-codecs-derive" -version = "1.4.8" +version = "1.5.0" dependencies = [ "convert_case", "proc-macro2", @@ -2386,7 +2388,7 @@ dependencies = [ [[package]] name = "reth-db-models" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -2395,7 +2397,7 @@ dependencies = [ [[package]] name = "reth-ethereum-forks" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-eip2124", "alloy-hardforks", @@ -2406,7 +2408,7 @@ dependencies = [ [[package]] name = "reth-ethereum-primitives" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -2420,7 +2422,7 @@ dependencies = [ [[package]] name = "reth-evm" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -2441,7 +2443,7 @@ dependencies = [ [[package]] name = "reth-evm-ethereum" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -2458,7 +2460,7 @@ dependencies = [ [[package]] name = "reth-execution-errors" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-evm", "alloy-primitives", @@ -2470,7 +2472,7 @@ dependencies = [ [[package]] name = "reth-execution-types" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -2485,7 +2487,7 @@ dependencies = [ [[package]] name = "reth-network-peers" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -2496,7 +2498,7 @@ dependencies = [ [[package]] name = "reth-primitives" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "once_cell", @@ -2508,13 +2510,14 @@ dependencies = [ [[package]] name = "reth-primitives-traits" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-eips", "alloy-genesis", "alloy-primitives", "alloy-rlp", + "alloy-rpc-types-eth", "alloy-trie", "auto_impl", "bytes", @@ -2534,7 +2537,7 @@ dependencies = [ [[package]] name = "reth-prune-types" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-primitives", "derive_more", @@ -2543,7 +2546,7 @@ dependencies = [ [[package]] name = "reth-scroll-chainspec" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-chains", "alloy-consensus", @@ -2566,7 +2569,7 @@ dependencies = [ [[package]] name = "reth-scroll-evm" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -2593,7 +2596,7 @@ dependencies = [ [[package]] name = "reth-scroll-forks" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-chains", "alloy-primitives", @@ -2605,7 +2608,7 @@ dependencies = [ [[package]] name = "reth-scroll-primitives" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -2621,7 +2624,7 @@ dependencies = [ [[package]] name = "reth-stages-types" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-primitives", "reth-trie-common", @@ -2629,7 +2632,7 @@ dependencies = [ [[package]] name = "reth-static-file-types" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-primitives", "derive_more", @@ -2639,7 +2642,7 @@ dependencies = [ [[package]] name = "reth-storage-api" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -2660,7 +2663,7 @@ dependencies = [ [[package]] name = "reth-storage-errors" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -2675,7 +2678,7 @@ dependencies = [ [[package]] name = "reth-trie" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -2696,7 +2699,7 @@ dependencies = [ [[package]] name = "reth-trie-common" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -2711,7 +2714,7 @@ dependencies = [ [[package]] name = "reth-trie-sparse" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -2726,15 +2729,15 @@ dependencies = [ [[package]] name = "reth-zstd-compressors" -version = "1.4.8" +version = "1.5.0" dependencies = [ "zstd", ] [[package]] name = "revm" -version = "24.0.0" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v74#774616019e9562b12cbe1c3f1cdd110793f8084c" +version = "26.0.1" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v78#64e018f80e65d79505591aacec4f35ec46bca5ff" dependencies = [ "revm-bytecode", "revm-context", @@ -2751,8 +2754,8 @@ dependencies = [ [[package]] name = "revm-bytecode" -version = "4.0.1" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v74#774616019e9562b12cbe1c3f1cdd110793f8084c" +version = "5.0.0" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v78#64e018f80e65d79505591aacec4f35ec46bca5ff" dependencies = [ "bitvec", "once_cell", @@ -2763,8 +2766,8 @@ dependencies = [ [[package]] name = "revm-context" -version = "5.0.0" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v74#774616019e9562b12cbe1c3f1cdd110793f8084c" +version = "7.0.1" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v78#64e018f80e65d79505591aacec4f35ec46bca5ff" dependencies = [ "cfg-if", "derive-where", @@ -2777,8 +2780,8 @@ dependencies = [ [[package]] name = "revm-context-interface" -version = "5.0.0" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v74#774616019e9562b12cbe1c3f1cdd110793f8084c" +version = "7.0.1" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v78#64e018f80e65d79505591aacec4f35ec46bca5ff" dependencies = [ "alloy-eip2930", "alloy-eip7702", @@ -2791,8 +2794,8 @@ dependencies = [ [[package]] name = "revm-database" -version = "4.0.1" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v74#774616019e9562b12cbe1c3f1cdd110793f8084c" +version = "6.0.0" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v78#64e018f80e65d79505591aacec4f35ec46bca5ff" dependencies = [ "revm-bytecode", "revm-database-interface", @@ -2802,8 +2805,8 @@ dependencies = [ [[package]] name = "revm-database-interface" -version = "4.0.1" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v74#774616019e9562b12cbe1c3f1cdd110793f8084c" +version = "6.0.0" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v78#64e018f80e65d79505591aacec4f35ec46bca5ff" dependencies = [ "auto_impl", "revm-primitives", @@ -2812,10 +2815,11 @@ dependencies = [ [[package]] name = "revm-handler" -version = "5.0.0" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v74#774616019e9562b12cbe1c3f1cdd110793f8084c" +version = "7.0.1" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v78#64e018f80e65d79505591aacec4f35ec46bca5ff" dependencies = [ "auto_impl", + "derive-where", "revm-bytecode", "revm-context", "revm-context-interface", @@ -2828,10 +2832,11 @@ dependencies = [ [[package]] name = "revm-inspector" -version = "5.0.0" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v74#774616019e9562b12cbe1c3f1cdd110793f8084c" +version = "7.0.1" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v78#64e018f80e65d79505591aacec4f35ec46bca5ff" dependencies = [ "auto_impl", + "either", "revm-context", "revm-database-interface", "revm-handler", @@ -2842,8 +2847,8 @@ dependencies = [ [[package]] name = "revm-interpreter" -version = "20.0.0" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v74#774616019e9562b12cbe1c3f1cdd110793f8084c" +version = "22.0.1" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v78#64e018f80e65d79505591aacec4f35ec46bca5ff" dependencies = [ "revm-bytecode", "revm-context-interface", @@ -2852,8 +2857,8 @@ dependencies = [ [[package]] name = "revm-precompile" -version = "21.0.0" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v74#774616019e9562b12cbe1c3f1cdd110793f8084c" +version = "23.0.0" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v78#64e018f80e65d79505591aacec4f35ec46bca5ff" dependencies = [ "ark-bls12-381", "ark-bn254", @@ -2872,8 +2877,8 @@ dependencies = [ [[package]] name = "revm-primitives" -version = "19.1.0" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v74#774616019e9562b12cbe1c3f1cdd110793f8084c" +version = "20.0.0" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v78#64e018f80e65d79505591aacec4f35ec46bca5ff" dependencies = [ "alloy-primitives", "num_enum", @@ -2883,7 +2888,7 @@ dependencies = [ [[package]] name = "revm-scroll" version = "0.1.0" -source = "git+https://github.com/scroll-tech/scroll-revm#0195a04190cef78901ed67c7bc3048114034f366" +source = "git+https://github.com/scroll-tech/scroll-revm?branch=feat%2Fv78#c0609bc9e8cb23aba8f560a82e040a49726cf760" dependencies = [ "auto_impl", "enumn", @@ -2895,8 +2900,8 @@ dependencies = [ [[package]] name = "revm-state" -version = "4.0.1" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v74#774616019e9562b12cbe1c3f1cdd110793f8084c" +version = "6.0.0" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v78#64e018f80e65d79505591aacec4f35ec46bca5ff" dependencies = [ "bitflags", "revm-bytecode", @@ -3045,9 +3050,21 @@ dependencies = [ "serde_json", ] +[[package]] +name = "schemars" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82d20c4491bc164fa2f6c5d44565947a52ad80b9505d8e36f8d54c27c739fcd0" +dependencies = [ + "dyn-clone", + "ref-cast", + "serde", + "serde_json", +] + [[package]] name = "scroll-alloy-consensus" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -3062,7 +3079,7 @@ dependencies = [ [[package]] name = "scroll-alloy-evm" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -3077,7 +3094,7 @@ dependencies = [ [[package]] name = "scroll-alloy-hardforks" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-hardforks", "auto_impl", @@ -3085,7 +3102,7 @@ dependencies = [ [[package]] name = "scroll-alloy-rpc-types" -version = "1.4.8" +version = "1.5.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -3193,16 +3210,17 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.13.0" +version = "3.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf65a400f8f66fb7b0552869ad70157166676db75ed8181f8104ea91cf9d0b42" +checksum = "f2c45cd61fefa9db6f254525d46e392b852e0e61d9a1fd36e5bd183450a556d5" dependencies = [ "base64", "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.9.0", - "schemars", + "indexmap 2.10.0", + "schemars 0.9.0", + "schemars 1.0.4", "serde", "serde_derive", "serde_json", @@ -3212,9 +3230,9 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.13.0" +version = "3.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81679d9ed988d5e9a5e6531dc3f2c28efbd639cbd1dfb628df08edea6004da77" +checksum = "de90945e6565ce0d9a25098082ed4ee4002e047cb59892c318d66821e14bb30f" dependencies = [ "darling", "proc-macro2", @@ -3518,7 +3536,7 @@ version = "0.22.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" dependencies = [ - "indexmap 2.9.0", + "indexmap 2.10.0", "toml_datetime", "winnow", ] diff --git a/crates/scroll/openvm-compat/Cargo.toml b/crates/scroll/openvm-compat/Cargo.toml index 067990ba5fb..5f136c1fca4 100644 --- a/crates/scroll/openvm-compat/Cargo.toml +++ b/crates/scroll/openvm-compat/Cargo.toml @@ -28,4 +28,4 @@ scroll-alloy-consensus = { path = "../alloy/consensus", default-features = false scroll-alloy-rpc-types = { path = "../alloy/rpc-types", default-features = false } [patch.crates-io] -revm = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v74" } +revm = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" } diff --git a/crates/scroll/primitives/Cargo.toml b/crates/scroll/primitives/Cargo.toml index d9ee3c03a12..be21143bb6a 100644 --- a/crates/scroll/primitives/Cargo.toml +++ b/crates/scroll/primitives/Cargo.toml @@ -60,7 +60,6 @@ std = [ reth-codec = [ "dep:reth-codecs", "std", - "dep:arbitrary", "reth-primitives-traits/reth-codec", "scroll-alloy-consensus/reth-codec", "dep:bytes", diff --git a/crates/scroll/primitives/src/receipt.rs b/crates/scroll/primitives/src/receipt.rs index 405e33eba23..6e214b31089 100644 --- a/crates/scroll/primitives/src/receipt.rs +++ b/crates/scroll/primitives/src/receipt.rs @@ -353,8 +353,6 @@ impl InMemorySize for ScrollReceipt { } } -impl reth_primitives_traits::Receipt for ScrollReceipt {} - #[cfg(feature = "serde-bincode-compat")] impl reth_primitives_traits::serde_bincode_compat::SerdeBincodeCompat for ScrollReceipt { type BincodeRepr<'a> = Self; diff --git a/crates/scroll/rpc/Cargo.toml b/crates/scroll/rpc/Cargo.toml index f1fbbc9289c..3abfd428dc3 100644 --- a/crates/scroll/rpc/Cargo.toml +++ b/crates/scroll/rpc/Cargo.toml @@ -22,6 +22,7 @@ reth-rpc-eth-types.workspace = true reth-tasks = { workspace = true, features = ["rayon"] } reth-transaction-pool.workspace = true reth-rpc.workspace = true +reth-rpc-convert = { workspace = true, features = ["scroll"] } reth-node-api.workspace = true reth-node-builder.workspace = true reth-network-api.workspace = true diff --git a/crates/scroll/rpc/src/error.rs b/crates/scroll/rpc/src/error.rs index 63570531669..db577f7b7b8 100644 --- a/crates/scroll/rpc/src/error.rs +++ b/crates/scroll/rpc/src/error.rs @@ -2,6 +2,7 @@ use alloy_rpc_types_eth::BlockError; use reth_evm::execute::ProviderError; +use reth_rpc_convert::transaction::EthTxEnvError; use reth_rpc_eth_api::{AsEthApiError, TransactionConversionError}; use reth_rpc_eth_types::{error::api::FromEvmHalt, EthApiError}; use revm::context::result::{EVMError, HaltReason}; @@ -30,6 +31,12 @@ impl From for jsonrpsee_types::error::ErrorObject<'static> { } } +impl From for ScrollEthApiError { + fn from(value: EthTxEnvError) -> Self { + Self::Eth(EthApiError::from(value)) + } +} + impl From for ScrollEthApiError { fn from(error: BlockError) -> Self { Self::Eth(error.into()) diff --git a/crates/scroll/rpc/src/eth/call.rs b/crates/scroll/rpc/src/eth/call.rs index 2edd64d7e0a..2181ed0f01f 100644 --- a/crates/scroll/rpc/src/eth/call.rs +++ b/crates/scroll/rpc/src/eth/call.rs @@ -1,24 +1,17 @@ use super::ScrollNodeCore; use crate::{ScrollEthApi, ScrollEthApiError}; -use alloy_consensus::transaction::Either; -use alloy_primitives::{TxKind, U256}; use alloy_rpc_types_eth::transaction::TransactionRequest; -use reth_evm::{block::BlockExecutorFactory, ConfigureEvm, EvmEnv, EvmFactory, SpecFor}; +use reth_evm::{block::BlockExecutorFactory, ConfigureEvm, EvmFactory, TxEnvFor}; use reth_primitives_traits::NodePrimitives; -use reth_provider::{ProviderHeader, ProviderTx}; +use reth_provider::{errors::ProviderError, ProviderHeader, ProviderTx}; use reth_rpc_eth_api::{ helpers::{estimate::EstimateCall, Call, EthCall, LoadBlock, LoadState, SpawnBlocking}, - FromEthApiError, FullEthApiTypes, IntoEthApiError, + FullEthApiTypes, RpcConvert, RpcTypes, }; -use reth_rpc_eth_types::{ - error::FromEvmError, revm_utils::CallFees, EthApiError, RpcInvalidTransactionError, -}; -use revm::{ - context::{Block, TxEnv}, - Database, -}; -use scroll_alloy_evm::{ScrollTransactionIntoTxEnv, TX_L1_FEE_PRECISION_U256}; +use reth_rpc_eth_types::error::FromEvmError; +use revm::context::TxEnv; +use scroll_alloy_evm::ScrollTransactionIntoTxEnv; impl EthCall for ScrollEthApi where @@ -47,7 +40,11 @@ where EvmFactory: EvmFactory>, >, >, - Error: FromEvmError, + RpcConvert: RpcConvert, Network = Self::NetworkTypes>, + NetworkTypes: RpcTypes>, + Error: FromEvmError + + From<::Error> + + From, > + SpawnBlocking, Self::Error: From, N: ScrollNodeCore, @@ -61,101 +58,4 @@ where fn max_simulate_blocks(&self) -> u64 { self.inner.eth_api.max_simulate_blocks() } - - fn create_txn_env( - &self, - evm_env: &EvmEnv>, - request: TransactionRequest, - mut db: impl Database>, - ) -> Result, Self::Error> { - // Ensure that if versioned hashes are set, they're not empty - if request.blob_versioned_hashes.as_ref().is_some_and(|hashes| hashes.is_empty()) { - return Err(RpcInvalidTransactionError::BlobTransactionMissingBlobHashes.into_eth_err()) - } - - let tx_type = request.preferred_type() as u8; - - let TransactionRequest { - from, - to, - gas_price, - max_fee_per_gas, - max_priority_fee_per_gas, - gas, - value, - input, - nonce, - access_list, - chain_id, - blob_versioned_hashes, - max_fee_per_blob_gas, - authorization_list, - transaction_type: _, - sidecar: _, - } = request; - - let CallFees { max_priority_fee_per_gas, gas_price, max_fee_per_blob_gas } = - CallFees::ensure_fees( - gas_price.map(U256::from), - max_fee_per_gas.map(U256::from), - max_priority_fee_per_gas.map(U256::from), - U256::from(evm_env.block_env.basefee), - blob_versioned_hashes.as_deref(), - max_fee_per_blob_gas.map(U256::from), - evm_env.block_env.blob_gasprice().map(U256::from), - )?; - - let gas_limit = gas.unwrap_or( - // Use maximum allowed gas limit. The reason for this - // is that both Erigon and Geth use pre-configured gas cap even if - // it's possible to derive the gas limit from the block: - // - evm_env.block_env.gas_limit, - ); - - let chain_id = chain_id.unwrap_or(evm_env.cfg_env.chain_id); - - let caller = from.unwrap_or_default(); - - let nonce = if let Some(nonce) = nonce { - nonce - } else { - db.basic(caller).map_err(Into::into)?.map(|acc| acc.nonce).unwrap_or_default() - }; - - let base = TxEnv { - tx_type, - gas_limit, - nonce, - caller, - gas_price: gas_price.saturating_to(), - gas_priority_fee: max_priority_fee_per_gas.map(|v| v.saturating_to()), - kind: to.unwrap_or(TxKind::Create), - value: value.unwrap_or_default(), - data: input - .try_into_unique_input() - .map_err(Self::Error::from_eth_err)? - .unwrap_or_default(), - chain_id: Some(chain_id), - access_list: access_list.unwrap_or_default(), - // EIP-4844 fields - blob_hashes: blob_versioned_hashes.unwrap_or_default(), - max_fee_per_blob_gas: max_fee_per_blob_gas - .map(|v| v.saturating_to()) - .unwrap_or_default(), - // EIP-7702 fields - authorization_list: authorization_list - .unwrap_or_default() - .into_iter() - .map(Either::Left) - .collect(), - }; - - Ok(ScrollTransactionIntoTxEnv::new( - base, - Some(Default::default()), - Some(TX_L1_FEE_PRECISION_U256), - )) - } } diff --git a/crates/scroll/rpc/src/eth/mod.rs b/crates/scroll/rpc/src/eth/mod.rs index c89c1509fbb..1eae675040c 100644 --- a/crates/scroll/rpc/src/eth/mod.rs +++ b/crates/scroll/rpc/src/eth/mod.rs @@ -68,8 +68,7 @@ pub struct ScrollEthApi { inner: Arc>, /// Marker for the network types. _nt: PhantomData, - tx_resp_builder: - RpcConverter>, + tx_resp_builder: RpcConverter>, } impl ScrollEthApi { @@ -110,14 +109,14 @@ where Self: Send + Sync + fmt::Debug, N: ScrollNodeCore, NetworkT: Network + Clone + fmt::Debug, + ::Evm: fmt::Debug, ::Primitives: fmt::Debug, { type Error = ScrollEthApiError; type NetworkTypes = Scroll; - type TransactionCompat = - RpcConverter>; + type RpcConvert = RpcConverter>; - fn tx_resp_builder(&self) -> &Self::TransactionCompat { + fn tx_resp_builder(&self) -> &Self::RpcConvert { &self.tx_resp_builder } } @@ -199,6 +198,7 @@ where Self: Send + Sync + Clone + 'static, N: ScrollNodeCore, NetworkT: Network, + ::Evm: fmt::Debug, ::Primitives: fmt::Debug, { #[inline] @@ -232,7 +232,7 @@ where } #[inline] - fn fee_history_cache(&self) -> &FeeHistoryCache { + fn fee_history_cache(&self) -> &FeeHistoryCache> { self.inner.eth_api.fee_history_cache() } } @@ -244,6 +244,7 @@ where Pool: TransactionPool, >, NetworkT: Network, + ::Evm: fmt::Debug, ::Primitives: fmt::Debug, { } @@ -261,7 +262,11 @@ where impl EthFees for ScrollEthApi where - Self: LoadFee, + Self: LoadFee< + Provider: ChainSpecProvider< + ChainSpec: EthChainSpec
>, + >, + >, N: ScrollNodeCore, { } diff --git a/crates/scroll/rpc/src/eth/pending_block.rs b/crates/scroll/rpc/src/eth/pending_block.rs index 9bdb6b9b7d4..7b0323288ea 100644 --- a/crates/scroll/rpc/src/eth/pending_block.rs +++ b/crates/scroll/rpc/src/eth/pending_block.rs @@ -13,7 +13,7 @@ use reth_provider::{ use reth_rpc_eth_api::{ helpers::{LoadPendingBlock, SpawnBlocking}, types::RpcTypes, - EthApiTypes, RpcNodeCore, + EthApiTypes, RpcConvert, RpcNodeCore, }; use reth_rpc_eth_types::{error::FromEvmError, PendingBlock}; use reth_scroll_evm::ScrollNextBlockEnvAttributes; @@ -29,6 +29,7 @@ where Header = alloy_rpc_types_eth::Header>, >, Error: FromEvmError, + RpcConvert: RpcConvert, >, N: RpcNodeCore< Provider: BlockReaderIdExt< diff --git a/crates/scroll/rpc/src/eth/transaction.rs b/crates/scroll/rpc/src/eth/transaction.rs index d044b1c7d33..3eaac929e22 100644 --- a/crates/scroll/rpc/src/eth/transaction.rs +++ b/crates/scroll/rpc/src/eth/transaction.rs @@ -11,10 +11,10 @@ use reth_node_api::FullNodeComponents; use reth_provider::{ BlockReader, BlockReaderIdExt, ProviderTx, ReceiptProvider, TransactionsProvider, }; +use reth_rpc_convert::try_into_scroll_tx_info; use reth_rpc_eth_api::{ helpers::{EthSigner, EthTransactions, LoadTransaction, SpawnBlocking}, - try_into_scroll_tx_info, FromEthApiError, FullEthApiTypes, RpcNodeCore, RpcNodeCoreExt, - TxInfoMapper, + FromEthApiError, FullEthApiTypes, RpcNodeCore, RpcNodeCoreExt, TxInfoMapper, }; use reth_rpc_eth_types::utils::recover_raw_transaction; use reth_scroll_primitives::ScrollReceipt; diff --git a/crates/stages/api/src/error.rs b/crates/stages/api/src/error.rs index 92b1d974542..b4bbf390e22 100644 --- a/crates/stages/api/src/error.rs +++ b/crates/stages/api/src/error.rs @@ -4,7 +4,7 @@ use reth_consensus::ConsensusError; use reth_errors::{BlockExecutionError, DatabaseError, RethError}; use reth_network_p2p::error::DownloadError; use reth_provider::ProviderError; -use reth_prune::{PruneSegment, PruneSegmentError, PrunerError}; +use reth_prune::{PruneSegment, PruneSegmentError, PrunerError, UnwindTargetPrunedError}; use reth_static_file_types::StaticFileSegment; use thiserror::Error; use tokio::sync::broadcast::error::SendError; @@ -163,4 +163,7 @@ pub enum PipelineError { /// The pipeline encountered an unwind when `fail_on_unwind` was set to `true`. #[error("unexpected unwind")] UnexpectedUnwind, + /// Unwind target pruned error. + #[error(transparent)] + UnwindTargetPruned(#[from] UnwindTargetPrunedError), } diff --git a/crates/stages/api/src/pipeline/mod.rs b/crates/stages/api/src/pipeline/mod.rs index a064dd471be..b8d41e9e552 100644 --- a/crates/stages/api/src/pipeline/mod.rs +++ b/crates/stages/api/src/pipeline/mod.rs @@ -7,7 +7,7 @@ pub use event::*; use futures_util::Future; use reth_primitives_traits::constants::BEACON_CONSENSUS_REORG_UNWIND_DEPTH; use reth_provider::{ - providers::ProviderNodeTypes, writer::UnifiedStorageWriter, BlockHashReader, + providers::ProviderNodeTypes, writer::UnifiedStorageWriter, BlockHashReader, BlockNumReader, ChainStateBlockReader, ChainStateBlockWriter, DatabaseProviderFactory, ProviderFactory, StageCheckpointReader, StageCheckpointWriter, }; @@ -294,6 +294,15 @@ impl Pipeline { to: BlockNumber, bad_block: Option, ) -> Result<(), PipelineError> { + // Add validation before starting unwind + let provider = self.provider_factory.provider()?; + let latest_block = provider.last_block_number()?; + + // Get the actual pruning configuration + let prune_modes = provider.prune_modes_ref(); + + prune_modes.ensure_unwind_target_unpruned(latest_block, to)?; + // Unwind stages in reverse order of execution let unwind_pipeline = self.stages.iter_mut().rev(); diff --git a/crates/stages/stages/benches/criterion.rs b/crates/stages/stages/benches/criterion.rs index c3905df2cc0..d755f2fdd04 100644 --- a/crates/stages/stages/benches/criterion.rs +++ b/crates/stages/stages/benches/criterion.rs @@ -114,7 +114,10 @@ fn merkle(c: &mut Criterion, runtime: &Runtime) { let db = setup::txs_testdata(DEFAULT_NUM_BLOCKS); - let stage = MerkleStage::::Both { clean_threshold: u64::MAX }; + let stage = MerkleStage::::Both { + rebuild_threshold: u64::MAX, + incremental_threshold: u64::MAX, + }; measure_stage( runtime, &mut group, @@ -125,7 +128,8 @@ fn merkle(c: &mut Criterion, runtime: &Runtime) { "Merkle-incremental".to_string(), ); - let stage = MerkleStage::::Both { clean_threshold: 0 }; + let stage = + MerkleStage::::Both { rebuild_threshold: 0, incremental_threshold: 0 }; measure_stage( runtime, &mut group, diff --git a/crates/stages/stages/src/sets.rs b/crates/stages/stages/src/sets.rs index 51b99f626ad..776b00ef420 100644 --- a/crates/stages/stages/src/sets.rs +++ b/crates/stages/stages/src/sets.rs @@ -418,7 +418,8 @@ where self.stages_config.etl.clone(), )) .add_stage(MerkleStage::new_execution( - self.stages_config.merkle.clean_threshold, + self.stages_config.merkle.rebuild_threshold, + self.stages_config.merkle.incremental_threshold, self.consensus, )) } diff --git a/crates/stages/stages/src/stages/era.rs b/crates/stages/stages/src/stages/era.rs index ea0ca7a5cd0..38b7f0c0db7 100644 --- a/crates/stages/stages/src/stages/era.rs +++ b/crates/stages/stages/src/stages/era.rs @@ -13,10 +13,7 @@ use reth_provider::{ BlockReader, BlockWriter, DBProvider, HeaderProvider, StageCheckpointWriter, StaticFileProviderFactory, StaticFileWriter, }; -use reth_stages_api::{ - CheckpointBlockRange, EntitiesCheckpoint, ExecInput, ExecOutput, HeadersCheckpoint, Stage, - StageError, UnwindInput, UnwindOutput, -}; +use reth_stages_api::{ExecInput, ExecOutput, Stage, StageError, UnwindInput, UnwindOutput}; use reth_static_file_types::StaticFileSegment; use reth_storage_errors::ProviderError; use std::{ @@ -67,7 +64,7 @@ where let client = EraClient::new(Client::new(), url, folder); Self::convert(EraStream::new( - client.start_from(input.next_block()), + client, EraStreamConfig::default().start_from(input.next_block()), )) } @@ -205,22 +202,12 @@ where self.hash_collector.clear(); } - provider.save_stage_checkpoint( - StageId::Headers, - StageCheckpoint::new(height).with_headers_stage_checkpoint(HeadersCheckpoint { - block_range: CheckpointBlockRange { - from: input.checkpoint().block_number, - to: height, - }, - progress: EntitiesCheckpoint { processed: height, total: input.target() }, - }), - )?; - provider.save_stage_checkpoint( - StageId::Bodies, - StageCheckpoint::new(height).with_entities_stage_checkpoint(EntitiesCheckpoint { - processed: height, - total: input.target(), - }), + era::save_stage_checkpoints( + &provider, + input.checkpoint().block_number, + height, + height, + input.target(), )?; height diff --git a/crates/stages/stages/src/stages/execution.rs b/crates/stages/stages/src/stages/execution.rs index 6833eddc1f5..e5592cd8dec 100644 --- a/crates/stages/stages/src/stages/execution.rs +++ b/crates/stages/stages/src/stages/execution.rs @@ -1,4 +1,4 @@ -use crate::stages::MERKLE_STAGE_DEFAULT_CLEAN_THRESHOLD; +use crate::stages::MERKLE_STAGE_DEFAULT_INCREMENTAL_THRESHOLD; use alloy_consensus::{BlockHeader, Header}; use alloy_primitives::BlockNumber; use num_traits::Zero; @@ -71,7 +71,6 @@ where evm_config: E, /// The consensus instance for validating blocks. consensus: Arc>, - /// The consensu /// The commit thresholds of the execution stage. thresholds: ExecutionStageThresholds, /// The highest threshold (in number of blocks) for switching between incremental @@ -119,7 +118,7 @@ where /// Create an execution stage with the provided executor. /// - /// The commit threshold will be set to [`MERKLE_STAGE_DEFAULT_CLEAN_THRESHOLD`]. + /// The commit threshold will be set to [`MERKLE_STAGE_DEFAULT_INCREMENTAL_THRESHOLD`]. pub fn new_with_executor( evm_config: E, consensus: Arc>, @@ -128,7 +127,7 @@ where evm_config, consensus, ExecutionStageThresholds::default(), - MERKLE_STAGE_DEFAULT_CLEAN_THRESHOLD, + MERKLE_STAGE_DEFAULT_INCREMENTAL_THRESHOLD, ExExManagerHandle::empty(), ) } @@ -656,7 +655,7 @@ fn calculate_gas_used_from_headers( #[cfg(test)] mod tests { use super::*; - use crate::test_utils::TestStageDB; + use crate::{stages::MERKLE_STAGE_DEFAULT_REBUILD_THRESHOLD, test_utils::TestStageDB}; use alloy_primitives::{address, hex_literal::hex, keccak256, Address, B256, U256}; use alloy_rlp::Decodable; use assert_matches::assert_matches; @@ -693,7 +692,7 @@ mod tests { max_cumulative_gas: None, max_duration: None, }, - MERKLE_STAGE_DEFAULT_CLEAN_THRESHOLD, + MERKLE_STAGE_DEFAULT_REBUILD_THRESHOLD, ExExManagerHandle::empty(), ) } diff --git a/crates/stages/stages/src/stages/merkle.rs b/crates/stages/stages/src/stages/merkle.rs index ce310917630..7d5dd69d2bd 100644 --- a/crates/stages/stages/src/stages/merkle.rs +++ b/crates/stages/stages/src/stages/merkle.rs @@ -43,7 +43,13 @@ Once you have this information, please submit a github issue at https://github.c /// The default threshold (in number of blocks) for switching from incremental trie building /// of changes to whole rebuild. -pub const MERKLE_STAGE_DEFAULT_CLEAN_THRESHOLD: u64 = 5_000; +pub const MERKLE_STAGE_DEFAULT_REBUILD_THRESHOLD: u64 = 100_000; + +/// The default threshold (in number of blocks) to run the stage in incremental mode. The +/// incremental mode will calculate the state root for a large range of blocks by calculating the +/// new state root for this many blocks, in batches, repeating until we reach the desired block +/// number. +pub const MERKLE_STAGE_DEFAULT_INCREMENTAL_THRESHOLD: u64 = 7_000; /// The merkle hashing stage uses input from /// [`AccountHashingStage`][crate::stages::AccountHashingStage] and @@ -73,9 +79,15 @@ where { /// The execution portion of the merkle stage. Execution { + // TODO: make struct for holding incremental settings, for code reuse between `Execution` + // variant and `Both` /// The threshold (in number of blocks) for switching from incremental trie building /// of changes to whole rebuild. - clean_threshold: u64, + rebuild_threshold: u64, + /// The threshold (in number of blocks) to run the stage in incremental mode. The + /// incremental mode will calculate the state root by calculating the new state root for + /// some number of blocks, repeating until we reach the desired block number. + incremental_threshold: u64, /// Consensus. consensus: Arc>, }, @@ -89,7 +101,11 @@ where Both { /// The threshold (in number of blocks) for switching from incremental trie building /// of changes to whole rebuild. - clean_threshold: u64, + rebuild_threshold: u64, + /// The threshold (in number of blocks) to run the stage in incremental mode. The + /// incremental mode will calculate the state root by calculating the new state root for + /// some number of blocks, repeating until we reach the desired block number. + incremental_threshold: u64, }, } @@ -101,15 +117,20 @@ where pub const fn default_execution_with_consensus( consensus: Arc>, ) -> Self { - Self::Execution { clean_threshold: MERKLE_STAGE_DEFAULT_CLEAN_THRESHOLD, consensus } + Self::Execution { + rebuild_threshold: MERKLE_STAGE_DEFAULT_REBUILD_THRESHOLD, + incremental_threshold: MERKLE_STAGE_DEFAULT_INCREMENTAL_THRESHOLD, + consensus, + } } /// Create new instance of [`MerkleStage::Execution`]. pub const fn new_execution( - clean_threshold: u64, + rebuild_threshold: u64, + incremental_threshold: u64, consensus: Arc>, ) -> Self { - Self::Execution { clean_threshold, consensus } + Self::Execution { rebuild_threshold, incremental_threshold, consensus } } /// Create new instance of [`MerkleStage::Unwind`]. @@ -185,14 +206,18 @@ where /// Execute the stage. fn execute(&mut self, provider: &Provider, input: ExecInput) -> Result { - let threshold = match self { + let (threshold, incremental_threshold) = match self { Self::Unwind { .. } => { info!(target: "sync::stages::merkle::unwind", "Stage is always skipped"); return Ok(ExecOutput::done(StageCheckpoint::new(input.target()))); } - Self::Execution { clean_threshold, .. } => *clean_threshold, + Self::Execution { rebuild_threshold, incremental_threshold, .. } => { + (*rebuild_threshold, *incremental_threshold) + } #[cfg(any(test, feature = "test-utils"))] - Self::Both { clean_threshold } => *clean_threshold, + Self::Both { rebuild_threshold, incremental_threshold } => { + (*rebuild_threshold, *incremental_threshold) + } }; let range = input.next_block_range(); @@ -282,15 +307,33 @@ where } } } else { - debug!(target: "sync::stages::merkle::exec", current = ?current_block_number, target = ?to_block, "Updating trie"); - let (root, updates) = - StateRoot::incremental_root_with_updates(provider.tx_ref(), range) + debug!(target: "sync::stages::merkle::exec", current = ?current_block_number, target = ?to_block, "Updating trie in chunks"); + let mut final_root = None; + for start_block in range.step_by(incremental_threshold as usize) { + let chunk_to = std::cmp::min(start_block + incremental_threshold, to_block); + let chunk_range = start_block..=chunk_to; + debug!( + target: "sync::stages::merkle::exec", + current = ?current_block_number, + target = ?to_block, + incremental_threshold, + chunk_range = ?chunk_range, + "Processing chunk" + ); + let (root, updates) = + StateRoot::incremental_root_with_updates(provider.tx_ref(), chunk_range) .map_err(|e| { error!(target: "sync::stages::merkle", %e, ?current_block_number, ?to_block, "Incremental state root failed! {INVALID_STATE_ROOT_ERROR_MESSAGE}"); StageError::Fatal(Box::new(e)) })?; + provider.write_trie_updates(&updates)?; + final_root = Some(root); + } - provider.write_trie_updates(&updates)?; + // if we had no final root, we must have not looped above, which should not be possible + let final_root = final_root.ok_or(StageError::Fatal( + "Incremental merkle hashing did not produce a final root".into(), + ))?; let total_hashed_entries = (provider.count_entries::()? + provider.count_entries::()?) @@ -303,8 +346,8 @@ where processed: total_hashed_entries, total: total_hashed_entries, }; - - (root, entities_checkpoint) + // Save the checkpoint + (final_root, entities_checkpoint) }; // Reset the checkpoint @@ -510,14 +553,79 @@ mod tests { assert!(runner.validate_execution(input, result.ok()).is_ok(), "execution validation"); } + #[tokio::test] + async fn execute_chunked_merkle() { + let (previous_stage, stage_progress) = (200, 100); + let clean_threshold = 100; + let incremental_threshold = 10; + + // Set up the runner + let mut runner = + MerkleTestRunner { db: TestStageDB::default(), clean_threshold, incremental_threshold }; + + let input = ExecInput { + target: Some(previous_stage), + checkpoint: Some(StageCheckpoint::new(stage_progress)), + }; + + runner.seed_execution(input).expect("failed to seed execution"); + let rx = runner.execute(input); + + // Assert the successful result + let result = rx.await.unwrap(); + assert_matches!( + result, + Ok(ExecOutput { + checkpoint: StageCheckpoint { + block_number, + stage_checkpoint: Some(StageUnitCheckpoint::Entities(EntitiesCheckpoint { + processed, + total + })) + }, + done: true + }) if block_number == previous_stage && processed == total && + total == ( + runner.db.table::().unwrap().len() + + runner.db.table::().unwrap().len() + ) as u64 + ); + + // Validate the stage execution + let provider = runner.db.factory.provider().unwrap(); + let header = provider.header_by_number(previous_stage).unwrap().unwrap(); + let expected_root = header.state_root; + + let actual_root = runner + .db + .query(|tx| { + Ok(StateRoot::incremental_root_with_updates( + tx, + stage_progress + 1..=previous_stage, + )) + }) + .unwrap(); + + assert_eq!( + actual_root.unwrap().0, + expected_root, + "State root mismatch after chunked processing" + ); + } + struct MerkleTestRunner { db: TestStageDB, clean_threshold: u64, + incremental_threshold: u64, } impl Default for MerkleTestRunner { fn default() -> Self { - Self { db: TestStageDB::default(), clean_threshold: 10000 } + Self { + db: TestStageDB::default(), + clean_threshold: 10000, + incremental_threshold: 10000, + } } } @@ -529,7 +637,10 @@ mod tests { } fn stage(&self) -> Self::S { - Self::S::Both { clean_threshold: self.clean_threshold } + Self::S::Both { + rebuild_threshold: self.clean_threshold, + incremental_threshold: self.incremental_threshold, + } } } diff --git a/crates/stages/stages/src/stages/mod.rs b/crates/stages/stages/src/stages/mod.rs index e1b952db79f..726609b2350 100644 --- a/crates/stages/stages/src/stages/mod.rs +++ b/crates/stages/stages/src/stages/mod.rs @@ -169,7 +169,7 @@ mod tests { max_cumulative_gas: None, max_duration: None, }, - MERKLE_STAGE_DEFAULT_CLEAN_THRESHOLD, + MERKLE_STAGE_DEFAULT_REBUILD_THRESHOLD, ExExManagerHandle::empty(), ); diff --git a/crates/stages/stages/src/stages/sender_recovery.rs b/crates/stages/stages/src/stages/sender_recovery.rs index e55682a9c0e..e6bdb92cf20 100644 --- a/crates/stages/stages/src/stages/sender_recovery.rs +++ b/crates/stages/stages/src/stages/sender_recovery.rs @@ -599,7 +599,7 @@ mod tests { /// /// 1. If there are any entries in the [`tables::TransactionSenders`] table above a given /// block number. - /// 2. If the is no requested block entry in the bodies table, but + /// 2. If there is no requested block entry in the bodies table, but /// [`tables::TransactionSenders`] is not empty. fn ensure_no_senders_by_block(&self, block: BlockNumber) -> Result<(), TestRunnerError> { let body_result = self diff --git a/crates/stages/stages/src/stages/tx_lookup.rs b/crates/stages/stages/src/stages/tx_lookup.rs index 71a790ccb14..2010e5e3555 100644 --- a/crates/stages/stages/src/stages/tx_lookup.rs +++ b/crates/stages/stages/src/stages/tx_lookup.rs @@ -460,7 +460,7 @@ mod tests { /// /// 1. If there are any entries in the [`tables::TransactionHashNumbers`] table above a /// given block number. - /// 2. If the is no requested block entry in the bodies table, but + /// 2. If there is no requested block entry in the bodies table, but /// [`tables::TransactionHashNumbers`] is not empty. fn ensure_no_hash_by_block(&self, number: BlockNumber) -> Result<(), TestRunnerError> { let body_result = self diff --git a/crates/stateless/src/lib.rs b/crates/stateless/src/lib.rs index 254a2433ef1..35289f3cf51 100644 --- a/crates/stateless/src/lib.rs +++ b/crates/stateless/src/lib.rs @@ -37,6 +37,12 @@ extern crate alloc; /// Sparse trie implementation for stateless validation pub mod trie; + +#[doc(inline)] +pub use trie::StatelessTrie; +#[doc(inline)] +pub use validation::stateless_validation_with_trie; + /// Implementation of stateless validation pub mod validation; pub(crate) mod witness_db; diff --git a/crates/stateless/src/trie.rs b/crates/stateless/src/trie.rs index b6c57a4c72a..5a35e52a7f3 100644 --- a/crates/stateless/src/trie.rs +++ b/crates/stateless/src/trie.rs @@ -13,13 +13,42 @@ use reth_trie_sparse::{ SparseTrie, }; -/// `StatelessTrie` structure for usage during stateless validation +/// Trait for stateless trie implementations that can be used for stateless validation. +pub trait StatelessTrie: core::fmt::Debug { + /// Initialize the stateless trie using the `ExecutionWitness` + fn new( + witness: &ExecutionWitness, + pre_state_root: B256, + ) -> Result<(Self, B256Map), StatelessValidationError> + where + Self: Sized; + + /// Returns the `TrieAccount` that corresponds to the `Address` + /// + /// This method will error if the `ExecutionWitness` is not able to guarantee + /// that the account is missing from the Trie _and_ the witness was complete. + fn account(&self, address: Address) -> Result, ProviderError>; + + /// Returns the storage slot value that corresponds to the given (address, slot) tuple. + /// + /// This method will error if the `ExecutionWitness` is not able to guarantee + /// that the storage was missing from the Trie _and_ the witness was complete. + fn storage(&self, address: Address, slot: U256) -> Result; + + /// Computes the new state root from the `HashedPostState`. + fn calculate_state_root( + &mut self, + state: HashedPostState, + ) -> Result; +} + +/// `StatelessSparseTrie` structure for usage during stateless validation #[derive(Debug)] -pub struct StatelessTrie { +pub struct StatelessSparseTrie { inner: SparseStateTrie, } -impl StatelessTrie { +impl StatelessSparseTrie { /// Initialize the stateless trie using the `ExecutionWitness` /// /// Note: Currently this method does not check that the `ExecutionWitness` @@ -99,6 +128,30 @@ impl StatelessTrie { } } +impl StatelessTrie for StatelessSparseTrie { + fn new( + witness: &ExecutionWitness, + pre_state_root: B256, + ) -> Result<(Self, B256Map), StatelessValidationError> { + Self::new(witness, pre_state_root) + } + + fn account(&self, address: Address) -> Result, ProviderError> { + self.account(address) + } + + fn storage(&self, address: Address, slot: U256) -> Result { + self.storage(address, slot) + } + + fn calculate_state_root( + &mut self, + state: HashedPostState, + ) -> Result { + self.calculate_state_root(state) + } +} + /// Verifies execution witness [`ExecutionWitness`] against an expected pre-state root. /// /// This function takes the RLP-encoded values provided in [`ExecutionWitness`] diff --git a/crates/stateless/src/validation.rs b/crates/stateless/src/validation.rs index de1af4cfce4..a2a93f38e26 100644 --- a/crates/stateless/src/validation.rs +++ b/crates/stateless/src/validation.rs @@ -1,4 +1,8 @@ -use crate::{trie::StatelessTrie, witness_db::WitnessDatabase, ExecutionWitness}; +use crate::{ + trie::{StatelessSparseTrie, StatelessTrie}, + witness_db::WitnessDatabase, + ExecutionWitness, +}; use alloc::{ boxed::Box, collections::BTreeMap, @@ -131,7 +135,32 @@ pub fn stateless_validation( evm_config: E, ) -> Result where - ChainSpec: Send + Sync + EthChainSpec + EthereumHardforks + Debug, + ChainSpec: Send + Sync + EthChainSpec
+ EthereumHardforks + Debug, + E: ConfigureEvm + Clone + 'static, +{ + stateless_validation_with_trie::( + current_block, + witness, + chain_spec, + evm_config, + ) +} + +/// Performs stateless validation of a block using a custom `StatelessTrie` implementation. +/// +/// This is a generic version of `stateless_validation` that allows users to provide their own +/// implementation of the `StatelessTrie` for custom trie backends or optimizations. +/// +/// See `stateless_validation` for detailed documentation of the validation process. +pub fn stateless_validation_with_trie( + current_block: Block, + witness: ExecutionWitness, + chain_spec: Arc, + evm_config: E, +) -> Result +where + T: StatelessTrie, + ChainSpec: Send + Sync + EthChainSpec
+ EthereumHardforks + Debug, E: ConfigureEvm + Clone + 'static, { let current_block = current_block @@ -169,7 +198,7 @@ where }; // First verify that the pre-state reads are correct - let (mut trie, bytecode) = StatelessTrie::new(&witness, pre_state_root)?; + let (mut trie, bytecode) = T::new(&witness, pre_state_root)?; // Create an in-memory database that will use the reads to validate the block let db = WitnessDatabase::new(&trie, bytecode, ancestor_hashes); @@ -222,7 +251,7 @@ fn validate_block_consensus( block: &RecoveredBlock, ) -> Result<(), StatelessValidationError> where - ChainSpec: Send + Sync + EthChainSpec + EthereumHardforks + Debug, + ChainSpec: Send + Sync + EthChainSpec
+ EthereumHardforks + Debug, { let consensus = EthBeaconConsensus::new(chain_spec); diff --git a/crates/stateless/src/witness_db.rs b/crates/stateless/src/witness_db.rs index 35585948b46..4a99c286ad3 100644 --- a/crates/stateless/src/witness_db.rs +++ b/crates/stateless/src/witness_db.rs @@ -11,13 +11,16 @@ use reth_revm::{bytecode::Bytecode, state::AccountInfo, Database}; /// /// This struct implements the [`reth_revm::Database`] trait, allowing the EVM to execute /// transactions using: -/// - Account and storage slot data provided by a [`StatelessTrie`]. +/// - Account and storage slot data provided by a [`StatelessTrie`] implementation. /// - Bytecode and ancestor block hashes provided by in-memory maps. /// /// This is designed for stateless execution scenarios where direct access to a full node's /// database is not available or desired. #[derive(Debug)] -pub(crate) struct WitnessDatabase<'a> { +pub(crate) struct WitnessDatabase<'a, T> +where + T: StatelessTrie, +{ /// Map of block numbers to block hashes. /// This is used to service the `BLOCKHASH` opcode. // TODO: use Vec instead -- ancestors should be contiguous @@ -32,10 +35,13 @@ pub(crate) struct WitnessDatabase<'a> { /// TODO: Ideally we do not have this trie and instead a simple map. /// TODO: Then as a corollary we can avoid unnecessary hashing in `Database::storage` /// TODO: and `Database::basic` without needing to cache the hashed Addresses and Keys - trie: &'a StatelessTrie, + trie: &'a T, } -impl<'a> WitnessDatabase<'a> { +impl<'a, T> WitnessDatabase<'a, T> +where + T: StatelessTrie, +{ /// Creates a new [`WitnessDatabase`] instance. /// /// # Assumptions @@ -50,7 +56,7 @@ impl<'a> WitnessDatabase<'a> { /// contiguous chain of blocks. The caller is responsible for verifying the contiguity and /// the block limit. pub(crate) const fn new( - trie: &'a StatelessTrie, + trie: &'a T, bytecode: B256Map, ancestor_hashes: BTreeMap, ) -> Self { @@ -58,25 +64,26 @@ impl<'a> WitnessDatabase<'a> { } } -impl Database for WitnessDatabase<'_> { +impl Database for WitnessDatabase<'_, T> +where + T: StatelessTrie, +{ /// The database error type. type Error = ProviderError; /// Get basic account information by hashing the address and looking up the account RLP - /// in the underlying [`StatelessTrie`]. + /// in the underlying [`StatelessTrie`] implementation. /// /// Returns `Ok(None)` if the account is not found in the trie. fn basic(&mut self, address: Address) -> Result, Self::Error> { - if let Some(account) = self.trie.account(address)? { - return Ok(Some(AccountInfo { + self.trie.account(address).map(|opt| { + opt.map(|account| AccountInfo { balance: account.balance, nonce: account.nonce, code_hash: account.code_hash, code: None, - })); - }; - - Ok(None) + }) + }) } /// Get storage value of an account at a specific slot. @@ -90,11 +97,9 @@ impl Database for WitnessDatabase<'_> { /// /// Returns an error if the bytecode for the given hash is not found in the map. fn code_by_hash(&mut self, code_hash: B256) -> Result { - let bytecode = self.bytecode.get(&code_hash).ok_or_else(|| { + self.bytecode.get(&code_hash).cloned().ok_or_else(|| { ProviderError::TrieWitnessError(format!("bytecode for {code_hash} not found")) - })?; - - Ok(bytecode.clone()) + }) } /// Get block hash by block number from the provided ancestor hashes map. diff --git a/crates/storage/db-api/src/models/mod.rs b/crates/storage/db-api/src/models/mod.rs index 70c7324c29c..7edf2987ee4 100644 --- a/crates/storage/db-api/src/models/mod.rs +++ b/crates/storage/db-api/src/models/mod.rs @@ -128,7 +128,7 @@ impl Encode for StoredNibbles { fn encode(self) -> Self::Encoded { // NOTE: This used to be `to_compact`, but all it does is append the bytes to the buffer, // so we can just use the implementation of `Into>` to reuse the buffer. - self.0.into() + self.0.to_vec() } } diff --git a/crates/storage/db/src/implementation/mdbx/mod.rs b/crates/storage/db/src/implementation/mdbx/mod.rs index f07837ab347..d536e69a270 100644 --- a/crates/storage/db/src/implementation/mdbx/mod.rs +++ b/crates/storage/db/src/implementation/mdbx/mod.rs @@ -1250,6 +1250,34 @@ mod tests { } } + #[test] + fn db_walk_dup_with_not_existing_key() { + let env = create_test_db(DatabaseEnvKind::RW); + let key = Address::from_str("0xa2c122be93b0074270ebee7f6b7292c7deb45047") + .expect(ERROR_ETH_ADDRESS); + + // PUT (0,0) + let value00 = StorageEntry::default(); + env.update(|tx| tx.put::(key, value00).expect(ERROR_PUT)).unwrap(); + + // PUT (2,2) + let value22 = StorageEntry { key: B256::with_last_byte(2), value: U256::from(2) }; + env.update(|tx| tx.put::(key, value22).expect(ERROR_PUT)).unwrap(); + + // PUT (1,1) + let value11 = StorageEntry { key: B256::with_last_byte(1), value: U256::from(1) }; + env.update(|tx| tx.put::(key, value11).expect(ERROR_PUT)).unwrap(); + + // Try to walk_dup with not existing key should immediately return None + { + let tx = env.tx().expect(ERROR_INIT_TX); + let mut cursor = tx.cursor_dup_read::().unwrap(); + let not_existing_key = Address::ZERO; + let mut walker = cursor.walk_dup(Some(not_existing_key), None).unwrap(); + assert_eq!(walker.next(), None); + } + } + #[test] fn db_iterate_over_all_dup_values() { let env = create_test_db(DatabaseEnvKind::RW); diff --git a/crates/storage/libmdbx-rs/src/environment.rs b/crates/storage/libmdbx-rs/src/environment.rs index 902208f1c73..9be857796f1 100644 --- a/crates/storage/libmdbx-rs/src/environment.rs +++ b/crates/storage/libmdbx-rs/src/environment.rs @@ -262,7 +262,7 @@ unsafe impl Sync for EnvironmentInner {} /// Determines how data is mapped into memory /// -/// It only takes affect when the environment is opened. +/// It only takes effect when the environment is opened. #[derive(Debug, Clone, Copy, PartialEq, Eq, Default)] pub enum EnvironmentKind { /// Open the environment in default mode, without WRITEMAP. diff --git a/crates/storage/nippy-jar/src/lib.rs b/crates/storage/nippy-jar/src/lib.rs index bc7ef4587c8..daa272fdd1f 100644 --- a/crates/storage/nippy-jar/src/lib.rs +++ b/crates/storage/nippy-jar/src/lib.rs @@ -240,6 +240,7 @@ impl NippyJar { [self.data_path().into(), self.index_path(), self.offsets_path(), self.config_path()] { if path.exists() { + debug!(target: "nippy-jar", ?path, "Removing file."); reth_fs_util::remove_file(path)?; } } diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index 88cb18ac445..5bc5e707153 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -261,6 +261,10 @@ impl BlockNumReader for BlockchainProvider { self.database.last_block_number() } + fn earliest_block_number(&self) -> ProviderResult { + self.database.earliest_block_number() + } + fn block_number(&self, hash: B256) -> ProviderResult> { self.consistent_provider()?.block_number(hash) } @@ -526,20 +530,12 @@ impl StateProviderFactory for BlockchainProvider { let hash = provider .block_hash(block_number)? .ok_or_else(|| ProviderError::HeaderNotFound(block_number.into()))?; - self.history_by_block_hash(hash) + provider.into_state_provider_at_block_hash(hash) } fn history_by_block_hash(&self, block_hash: BlockHash) -> ProviderResult { trace!(target: "providers::blockchain", ?block_hash, "Getting history by block hash"); - - self.consistent_provider()?.get_in_memory_or_storage_by_block( - block_hash.into(), - |_| self.database.history_by_block_hash(block_hash), - |block_state| { - let state_provider = self.block_state_provider(block_state)?; - Ok(Box::new(state_provider)) - }, - ) + self.consistent_provider()?.into_state_provider_at_block_hash(block_hash) } fn state_by_block_hash(&self, hash: BlockHash) -> ProviderResult { @@ -599,7 +595,9 @@ impl StateProviderFactory for BlockchainProvider { let hash = self.safe_block_hash()?.ok_or(ProviderError::SafeBlockNotFound)?; self.state_by_block_hash(hash) } - BlockNumberOrTag::Earliest => self.history_by_block_number(0), + BlockNumberOrTag::Earliest => { + self.history_by_block_number(self.earliest_block_number()?) + } BlockNumberOrTag::Pending => self.pending(), BlockNumberOrTag::Number(num) => { let hash = self diff --git a/crates/storage/provider/src/providers/consistent.rs b/crates/storage/provider/src/providers/consistent.rs index b4fcfa6c7ff..3922e286c29 100644 --- a/crates/storage/provider/src/providers/consistent.rs +++ b/crates/storage/provider/src/providers/consistent.rs @@ -27,7 +27,7 @@ use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; use reth_storage_api::{ BlockBodyIndicesProvider, DatabaseProviderFactory, NodePrimitivesProvider, StateProvider, - StorageChangeSetReader, + StorageChangeSetReader, TryIntoHistoricalStateProvider, }; use reth_storage_errors::provider::ProviderResult; use revm_database::states::PlainStorageRevert; @@ -591,6 +591,28 @@ impl ConsistentProvider { } fetch_from_db(&self.storage_provider) } + + /// Consumes the provider and returns a state provider for the specific block hash. + pub(crate) fn into_state_provider_at_block_hash( + self, + block_hash: BlockHash, + ) -> ProviderResult> { + let Self { storage_provider, head_block, .. } = self; + let into_history_at_block_hash = |block_hash| -> ProviderResult> { + let block_number = storage_provider + .block_number(block_hash)? + .ok_or(ProviderError::BlockHashNotFound(block_hash))?; + storage_provider.try_into_history_at_block(block_number) + }; + if let Some(Some(block_state)) = + head_block.as_ref().map(|b| b.block_on_chain(block_hash.into())) + { + let anchor_hash = block_state.anchor().hash; + let latest_historical = into_history_at_block_hash(anchor_hash)?; + return Ok(Box::new(block_state.state_provider(latest_historical))); + } + into_history_at_block_hash(block_hash) + } } impl ConsistentProvider { @@ -1250,7 +1272,7 @@ impl BlockReaderIdExt for ConsistentProvider { BlockNumberOrTag::Safe => { self.canonical_in_memory_state.get_safe_header().map(|h| h.unseal()) } - BlockNumberOrTag::Earliest => self.header_by_number(0)?, + BlockNumberOrTag::Earliest => self.header_by_number(self.earliest_block_number()?)?, BlockNumberOrTag::Pending => self.canonical_in_memory_state.pending_header(), BlockNumberOrTag::Number(num) => self.header_by_number(num)?, @@ -1270,7 +1292,7 @@ impl BlockReaderIdExt for ConsistentProvider { } BlockNumberOrTag::Safe => Ok(self.canonical_in_memory_state.get_safe_header()), BlockNumberOrTag::Earliest => self - .header_by_number(0)? + .header_by_number(self.earliest_block_number()?)? .map_or_else(|| Ok(None), |h| Ok(Some(SealedHeader::seal_slow(h)))), BlockNumberOrTag::Pending => Ok(self.canonical_in_memory_state.pending_sealed_header()), BlockNumberOrTag::Number(num) => self diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index 1801c148ecb..6fdff7bfa88 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -345,6 +345,12 @@ impl BlockNumReader for ProviderFactory { self.provider()?.last_block_number() } + fn earliest_block_number(&self) -> ProviderResult { + // earliest history height tracks the lowest block number that has __not__ been expired, in + // other words, the first/earlierst available block. + Ok(self.static_file_provider.earliest_history_height()) + } + fn block_number(&self, hash: B256) -> ProviderResult> { self.provider()?.block_number(hash) } diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 0d61db4f27c..8178a8c3133 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -434,18 +434,20 @@ impl< } } -/// For a given key, unwind all history shards that are below the given block number. +/// For a given key, unwind all history shards that contain block numbers at or above the given +/// block number. /// /// S - Sharded key subtype. /// T - Table to walk over. /// C - Cursor implementation. /// /// This function walks the entries from the given start key and deletes all shards that belong to -/// the key and are below the given block number. +/// the key and contain block numbers at or above the given block number. Shards entirely below +/// the block number are preserved. /// -/// The boundary shard (the shard is split by the block number) is removed from the database. Any -/// indices that are above the block number are filtered out. The boundary shard is returned for -/// reinsertion (if it's not empty). +/// The boundary shard (the shard that spans across the block number) is removed from the database. +/// Any indices that are below the block number are filtered out and returned for reinsertion. +/// The boundary shard is returned for reinsertion (if it's not empty). fn unwind_history_shards( cursor: &mut C, start_key: T::Key, @@ -457,27 +459,41 @@ where T::Key: AsRef>, C: DbCursorRO + DbCursorRW, { + // Start from the given key and iterate through shards let mut item = cursor.seek_exact(start_key)?; while let Some((sharded_key, list)) = item { // If the shard does not belong to the key, break. if !shard_belongs_to_key(&sharded_key) { break } + + // Always delete the current shard from the database first + // We'll decide later what (if anything) to reinsert cursor.delete_current()?; - // Check the first item. - // If it is greater or eq to the block number, delete it. + // Get the first (lowest) block number in this shard + // All block numbers in a shard are sorted in ascending order let first = list.iter().next().expect("List can't be empty"); + + // Case 1: Entire shard is at or above the unwinding point + // Keep it deleted (don't return anything for reinsertion) if first >= block_number { item = cursor.prev()?; continue - } else if block_number <= sharded_key.as_ref().highest_block_number { - // Filter out all elements greater than block number. + } + // Case 2: This is a boundary shard (spans across the unwinding point) + // The shard contains some blocks below and some at/above the unwinding point + else if block_number <= sharded_key.as_ref().highest_block_number { + // Return only the block numbers that are below the unwinding point + // These will be reinserted to preserve the historical data return Ok(list.iter().take_while(|i| *i < block_number).collect::>()) } + // Case 3: Entire shard is below the unwinding point + // Return all block numbers for reinsertion (preserve entire shard) return Ok(list.iter().collect::>()) } + // No shards found or all processed Ok(Vec::new()) } @@ -2299,7 +2315,7 @@ impl TrieWriter for DatabaseProvider let tx = self.tx_ref(); let mut account_trie_cursor = tx.cursor_write::()?; for (key, updated_node) in account_updates { - let nibbles = StoredNibbles(key.clone()); + let nibbles = StoredNibbles(*key); match updated_node { Some(node) => { if !nibbles.0.is_empty() { diff --git a/crates/storage/provider/src/providers/state/historical.rs b/crates/storage/provider/src/providers/state/historical.rs index b39b5e20a68..e815f98740c 100644 --- a/crates/storage/provider/src/providers/state/historical.rs +++ b/crates/storage/provider/src/providers/state/historical.rs @@ -14,7 +14,8 @@ use reth_db_api::{ }; use reth_primitives_traits::{Account, Bytecode}; use reth_storage_api::{ - BlockNumReader, DBProvider, StateCommitmentProvider, StateProofProvider, StorageRootProvider, + BlockNumReader, BytecodeReader, DBProvider, StateCommitmentProvider, StateProofProvider, + StorageRootProvider, }; use reth_storage_errors::provider::ProviderResult; use reth_trie::{ @@ -433,7 +434,11 @@ impl BytecodeReader + for HistoricalStateProviderRef<'_, Provider> +{ /// Get account code by its hash fn bytecode_by_hash(&self, code_hash: &B256) -> ProviderResult> { self.tx().get_by_encoded_key::(code_hash).map_err(Into::into) diff --git a/crates/storage/provider/src/providers/state/latest.rs b/crates/storage/provider/src/providers/state/latest.rs index 8443e6b4c58..334e0109dcc 100644 --- a/crates/storage/provider/src/providers/state/latest.rs +++ b/crates/storage/provider/src/providers/state/latest.rs @@ -6,7 +6,7 @@ use alloy_primitives::{Address, BlockNumber, Bytes, StorageKey, StorageValue, B2 use reth_db_api::{cursor::DbDupCursorRO, tables, transaction::DbTx}; use reth_primitives_traits::{Account, Bytecode}; use reth_storage_api::{ - DBProvider, StateCommitmentProvider, StateProofProvider, StorageRootProvider, + BytecodeReader, DBProvider, StateCommitmentProvider, StateProofProvider, StorageRootProvider, }; use reth_storage_errors::provider::{ProviderError, ProviderResult}; use reth_trie::{ @@ -177,7 +177,11 @@ impl StateProv } Ok(None) } +} +impl BytecodeReader + for LatestStateProviderRef<'_, Provider> +{ /// Get account code by its hash fn bytecode_by_hash(&self, code_hash: &B256) -> ProviderResult> { self.tx().get_by_encoded_key::(code_hash).map_err(Into::into) diff --git a/crates/storage/provider/src/providers/state/macros.rs b/crates/storage/provider/src/providers/state/macros.rs index 36216755ec8..74bb371819f 100644 --- a/crates/storage/provider/src/providers/state/macros.rs +++ b/crates/storage/provider/src/providers/state/macros.rs @@ -39,6 +39,8 @@ macro_rules! delegate_provider_impls { } StateProvider $(where [$($generics)*])? { fn storage(&self, account: alloy_primitives::Address, storage_key: alloy_primitives::StorageKey) -> reth_storage_errors::provider::ProviderResult>; + } + BytecodeReader $(where [$($generics)*])? { fn bytecode_by_hash(&self, code_hash: &alloy_primitives::B256) -> reth_storage_errors::provider::ProviderResult>; } StateRootProvider $(where [$($generics)*])? { diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index a3ab41325f8..dee449c82d9 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -50,9 +50,9 @@ use std::{ marker::PhantomData, ops::{Deref, Range, RangeBounds, RangeInclusive}, path::{Path, PathBuf}, - sync::{mpsc, Arc}, + sync::{atomic::AtomicU64, mpsc, Arc}, }; -use tracing::{info, trace, warn}; +use tracing::{debug, info, trace, warn}; /// Alias type for a map that can be queried for block ranges from a transaction /// segment respectively. It uses `TxNumber` to represent the transaction end of a static file @@ -229,6 +229,27 @@ pub struct StaticFileProviderInner { /// Maintains a map which allows for concurrent access to different `NippyJars`, over different /// segments and ranges. map: DashMap<(BlockNumber, StaticFileSegment), LoadedJar>, + /// Min static file range for each segment. + /// This index is initialized on launch to keep track of the lowest, non-expired static file + /// per segment. + /// + /// This tracks the lowest static file per segment together with the block range in that + /// file. E.g. static file is batched in 500k block intervals then the lowest static file + /// is [0..499K], and the block range is start = 0, end = 499K. + /// This index is mainly used to History expiry, which targets transactions, e.g. pre-merge + /// history expiry would lead to removing all static files below the merge height. + static_files_min_block: RwLock>, + /// This is an additional index that tracks the expired height, this will track the highest + /// block number that has been expired (missing). The first, non expired block is + /// `expired_history_height + 1`. + /// + /// This is effectively the transaction range that has been expired: + /// [`StaticFileProvider::delete_transactions_below`] and mirrors + /// `static_files_min_block[transactions] - blocks_per_file`. + /// + /// This additional tracker exists for more efficient lookups because the node must be aware of + /// the expired height. + earliest_history_height: AtomicU64, /// Max static file block for each segment static_files_max_block: RwLock>, /// Available static file block ranges on disk indexed by max transactions. @@ -261,6 +282,8 @@ impl StaticFileProviderInner { let provider = Self { map: Default::default(), writers: Default::default(), + static_files_min_block: Default::default(), + earliest_history_height: Default::default(), static_files_max_block: Default::default(), static_files_tx_index: Default::default(), path: path.as_ref().to_path_buf(), @@ -422,26 +445,71 @@ impl StaticFileProvider { self.map.remove(&(fixed_block_range_end, segment)); } + /// This handles history expiry by deleting all transaction static files below the given block. + /// + /// For example if block is 1M and the blocks per file are 500K this will delete all individual + /// files below 1M, so 0-499K and 500K-999K. + /// + /// This will not delete the file that contains the block itself, because files can only be + /// removed entirely. + pub fn delete_transactions_below(&self, block: BlockNumber) -> ProviderResult<()> { + // Nothing to delete if block is 0. + if block == 0 { + return Ok(()) + } + + loop { + let Some(block_height) = + self.get_lowest_static_file_block(StaticFileSegment::Transactions) + else { + return Ok(()) + }; + + if block_height >= block { + return Ok(()) + } + + debug!( + target: "provider::static_file", + ?block_height, + "Deleting transaction static file below block" + ); + + // now we need to wipe the static file, this will take care of updating the index and + // advance the lowest tracked block height for the transactions segment. + self.delete_jar(StaticFileSegment::Transactions, block_height) + .inspect_err(|err| { + warn!( target: "provider::static_file", %block_height, ?err, "Failed to delete transaction static file below block") + }) + ?; + } + } + /// Given a segment and block, it deletes the jar and all files from the respective block range. /// /// CAUTION: destructive. Deletes files on disk. + /// + /// This will re-initialize the index after deletion, so all files are tracked. pub fn delete_jar(&self, segment: StaticFileSegment, block: BlockNumber) -> ProviderResult<()> { let fixed_block_range = self.find_fixed_range(block); let key = (fixed_block_range.end(), segment); let jar = if let Some((_, jar)) = self.map.remove(&key) { jar.jar } else { - NippyJar::::load(&self.path.join(segment.filename(&fixed_block_range))) - .map_err(ProviderError::other)? + let file = self.path.join(segment.filename(&fixed_block_range)); + debug!( + target: "provider::static_file", + ?file, + ?fixed_block_range, + ?block, + "Loading static file jar for deletion" + ); + NippyJar::::load(&file).map_err(ProviderError::other)? }; jar.delete().map_err(ProviderError::other)?; - let mut segment_max_block = None; - if fixed_block_range.start() > 0 { - segment_max_block = Some(fixed_block_range.start() - 1) - }; - self.update_index(segment, segment_max_block)?; + self.initialize_index()?; Ok(()) } @@ -597,16 +665,21 @@ impl StaticFileProvider { /// Initializes the inner transaction and block index pub fn initialize_index(&self) -> ProviderResult<()> { + let mut min_block = self.static_files_min_block.write(); let mut max_block = self.static_files_max_block.write(); let mut tx_index = self.static_files_tx_index.write(); + min_block.clear(); max_block.clear(); tx_index.clear(); for (segment, ranges) in iter_static_files(&self.path).map_err(ProviderError::other)? { - // Update last block for each segment - if let Some((block_range, _)) = ranges.last() { - max_block.insert(segment, block_range.end()); + // Update first and last block for each segment + if let Some((first_block_range, _)) = ranges.first() { + min_block.insert(segment, *first_block_range); + } + if let Some((last_block_range, _)) = ranges.last() { + max_block.insert(segment, last_block_range.end()); } // Update tx -> block_range index @@ -629,6 +702,13 @@ impl StaticFileProvider { // If this is a re-initialization, we need to clear this as well self.map.clear(); + // initialize the expired history height to the lowest static file block + if let Some(lowest_range) = min_block.get(&StaticFileSegment::Transactions) { + // the earliest height is the lowest available block number + self.earliest_history_height + .store(lowest_range.start(), std::sync::atomic::Ordering::Relaxed); + } + Ok(()) } @@ -938,7 +1018,36 @@ impl StaticFileProvider { Ok(None) } - /// Gets the highest static file block if it exists for a static file segment. + /// Returns the earliest available block number that has not been expired and is still + /// available. + /// + /// This means that the highest expired block (or expired block height) is + /// `earliest_history_height.saturating_sub(1)`. + /// + /// Returns `0` if no history has been expired. + pub fn earliest_history_height(&self) -> BlockNumber { + self.earliest_history_height.load(std::sync::atomic::Ordering::Relaxed) + } + + /// Gets the lowest transaction static file block if it exists. + /// + /// For example if the transactions static file has blocks 0-499, this will return 499.. + /// + /// If there is nothing on disk for the given segment, this will return [`None`]. + pub fn get_lowest_transaction_static_file_block(&self) -> Option { + self.get_lowest_static_file_block(StaticFileSegment::Transactions) + } + + /// Gets the lowest static file's block height if it exists for a static file segment. + /// + /// For example if the static file has blocks 0-499, this will return 499.. + /// + /// If there is nothing on disk for the given segment, this will return [`None`]. + pub fn get_lowest_static_file_block(&self, segment: StaticFileSegment) -> Option { + self.static_files_min_block.read().get(&segment).map(|range| range.end()) + } + + /// Gets the highest static file's block height if it exists for a static file segment. /// /// If there is nothing on disk for the given segment, this will return [`None`]. pub fn get_highest_static_file_block(&self, segment: StaticFileSegment) -> Option { diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index 6480e4d9253..2d0cfb665df 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -29,9 +29,9 @@ use reth_primitives_traits::{ use reth_prune_types::PruneModes; use reth_stages_types::{StageCheckpoint, StageId}; use reth_storage_api::{ - BlockBodyIndicesProvider, DBProvider, DatabaseProviderFactory, HashedPostStateProvider, - NodePrimitivesProvider, StageCheckpointReader, StateCommitmentProvider, StateProofProvider, - StorageRootProvider, + BlockBodyIndicesProvider, BytecodeReader, DBProvider, DatabaseProviderFactory, + HashedPostStateProvider, NodePrimitivesProvider, StageCheckpointReader, + StateCommitmentProvider, StateProofProvider, StorageRootProvider, }; use reth_storage_errors::provider::{ConsistentViewError, ProviderError, ProviderResult}; use reth_trie::{ @@ -876,7 +876,13 @@ where let lock = self.accounts.lock(); Ok(lock.get(&account).and_then(|account| account.storage.get(&storage_key)).copied()) } +} +impl BytecodeReader for MockEthProvider +where + T: NodePrimitives, + ChainSpec: Send + Sync, +{ fn bytecode_by_hash(&self, code_hash: &B256) -> ProviderResult> { let lock = self.accounts.lock(); Ok(lock.values().find_map(|account| { @@ -917,7 +923,9 @@ impl StatePr self.history_by_block_hash(hash) } - BlockNumberOrTag::Earliest => self.history_by_block_number(0), + BlockNumberOrTag::Earliest => { + self.history_by_block_number(self.earliest_block_number()?) + } BlockNumberOrTag::Pending => self.pending(), BlockNumberOrTag::Number(num) => self.history_by_block_number(num), } diff --git a/crates/storage/provider/src/writer/mod.rs b/crates/storage/provider/src/writer/mod.rs index 9494c865297..cbdb773c203 100644 --- a/crates/storage/provider/src/writer/mod.rs +++ b/crates/storage/provider/src/writer/mod.rs @@ -336,6 +336,7 @@ mod tests { info: account_a.clone(), status: AccountStatus::Touched | AccountStatus::Created, storage: HashMap::default(), + transaction_id: 0, }, )])); @@ -346,6 +347,7 @@ mod tests { info: account_b_changed.clone(), status: AccountStatus::Touched, storage: HashMap::default(), + transaction_id: 0, }, )])); @@ -404,6 +406,7 @@ mod tests { status: AccountStatus::Touched | AccountStatus::SelfDestructed, info: account_b_changed, storage: HashMap::default(), + transaction_id: 0, }, )])); @@ -478,6 +481,7 @@ mod tests { EvmStorageSlot { present_value: U256::from(2), ..Default::default() }, ), ]), + transaction_id: 0, }, ), ( @@ -494,6 +498,7 @@ mod tests { ..Default::default() }, )]), + transaction_id: 0, }, ), ])); @@ -595,6 +600,7 @@ mod tests { status: AccountStatus::Touched | AccountStatus::SelfDestructed, info: RevmAccountInfo::default(), storage: HashMap::default(), + transaction_id: 0, }, )])); @@ -661,6 +667,7 @@ mod tests { EvmStorageSlot { present_value: U256::from(2), ..Default::default() }, ), ]), + transaction_id: 0, }, )])); init_state.merge_transitions(BundleRetention::Reverts); @@ -693,6 +700,7 @@ mod tests { ..Default::default() }, )]), + transaction_id: 0, }, )])); state.merge_transitions(BundleRetention::Reverts); @@ -704,6 +712,7 @@ mod tests { status: AccountStatus::Touched | AccountStatus::SelfDestructed, info: account_info.clone(), storage: HashMap::default(), + transaction_id: 0, }, )])); state.merge_transitions(BundleRetention::Reverts); @@ -715,6 +724,7 @@ mod tests { status: AccountStatus::Touched | AccountStatus::Created, info: account_info.clone(), storage: HashMap::default(), + transaction_id: 0, }, )])); state.merge_transitions(BundleRetention::Reverts); @@ -742,6 +752,7 @@ mod tests { EvmStorageSlot { present_value: U256::from(6), ..Default::default() }, ), ]), + transaction_id: 0, }, )])); state.merge_transitions(BundleRetention::Reverts); @@ -753,6 +764,7 @@ mod tests { status: AccountStatus::Touched | AccountStatus::SelfDestructed, info: account_info.clone(), storage: HashMap::default(), + transaction_id: 0, }, )])); state.merge_transitions(BundleRetention::Reverts); @@ -764,6 +776,7 @@ mod tests { status: AccountStatus::Touched | AccountStatus::Created, info: account_info.clone(), storage: HashMap::default(), + transaction_id: 0, }, )])); state.commit(HashMap::from_iter([( @@ -776,6 +789,7 @@ mod tests { U256::ZERO, EvmStorageSlot { present_value: U256::from(2), ..Default::default() }, )]), + transaction_id: 0, }, )])); state.commit(HashMap::from_iter([( @@ -784,6 +798,7 @@ mod tests { status: AccountStatus::Touched | AccountStatus::SelfDestructed, info: account_info.clone(), storage: HashMap::default(), + transaction_id: 0, }, )])); state.commit(HashMap::from_iter([( @@ -792,6 +807,7 @@ mod tests { status: AccountStatus::Touched | AccountStatus::Created, info: account_info.clone(), storage: HashMap::default(), + transaction_id: 0, }, )])); state.merge_transitions(BundleRetention::Reverts); @@ -807,8 +823,10 @@ mod tests { U256::ZERO, EvmStorageSlot { present_value: U256::from(9), ..Default::default() }, )]), + transaction_id: 0, }, )])); + state.merge_transitions(BundleRetention::Reverts); let bundle = state.take_bundle(); @@ -975,6 +993,7 @@ mod tests { EvmStorageSlot { present_value: U256::from(2), ..Default::default() }, ), ]), + transaction_id: 0, }, )])); init_state.merge_transitions(BundleRetention::Reverts); @@ -998,6 +1017,7 @@ mod tests { status: AccountStatus::Touched | AccountStatus::SelfDestructed, info: account1.clone(), storage: HashMap::default(), + transaction_id: 0, }, )])); @@ -1007,6 +1027,7 @@ mod tests { status: AccountStatus::Touched | AccountStatus::Created, info: account1.clone(), storage: HashMap::default(), + transaction_id: 0, }, )])); @@ -1020,6 +1041,7 @@ mod tests { U256::from(1), EvmStorageSlot { present_value: U256::from(5), ..Default::default() }, )]), + transaction_id: 0, }, )])); @@ -1146,6 +1168,7 @@ mod tests { status: AccountStatus::Touched | AccountStatus::SelfDestructed, info: RevmAccountInfo::default(), storage: HashMap::default(), + transaction_id: 0, }, )])); state.merge_transitions(BundleRetention::PlainState); @@ -1172,8 +1195,13 @@ mod tests { info: account2.0.into(), storage: HashMap::from_iter([( slot2, - EvmStorageSlot::new_changed(account2_slot2_old_value, account2_slot2_new_value), + EvmStorageSlot::new_changed( + account2_slot2_old_value, + account2_slot2_new_value, + 0, + ), )]), + transaction_id: 0, }, )])); state.merge_transitions(BundleRetention::PlainState); @@ -1191,6 +1219,7 @@ mod tests { status: AccountStatus::Touched, info: account3.0.into(), storage: HashMap::default(), + transaction_id: 0, }, )])); state.merge_transitions(BundleRetention::PlainState); @@ -1208,6 +1237,7 @@ mod tests { status: AccountStatus::Touched, info: account4.0.into(), storage: HashMap::default(), + transaction_id: 0, }, )])); state.merge_transitions(BundleRetention::PlainState); @@ -1223,6 +1253,7 @@ mod tests { status: AccountStatus::Touched | AccountStatus::Created, info: account1_new.into(), storage: HashMap::default(), + transaction_id: 0, }, )])); state.merge_transitions(BundleRetention::PlainState); @@ -1240,8 +1271,9 @@ mod tests { info: account1_new.into(), storage: HashMap::from_iter([( slot20, - EvmStorageSlot::new_changed(U256::ZERO, account1_slot20_value), + EvmStorageSlot::new_changed(U256::ZERO, account1_slot20_value, 0), )]), + transaction_id: 0, }, )])); state.merge_transitions(BundleRetention::PlainState); diff --git a/crates/storage/storage-api/src/block_id.rs b/crates/storage/storage-api/src/block_id.rs index 9fe35a5a00b..e00ad950e2d 100644 --- a/crates/storage/storage-api/src/block_id.rs +++ b/crates/storage/storage-api/src/block_id.rs @@ -19,6 +19,11 @@ pub trait BlockNumReader: BlockHashReader + Send + Sync { /// Returns the last block number associated with the last canonical header in the database. fn last_block_number(&self) -> ProviderResult; + /// Returns earliest block number to keep track of the expired block range. + fn earliest_block_number(&self) -> ProviderResult { + Ok(0) + } + /// Gets the `BlockNumber` for the given hash. Returns `None` if no block with this hash exists. fn block_number(&self, hash: B256) -> ProviderResult>; @@ -56,7 +61,7 @@ pub trait BlockIdReader: BlockNumReader + Send + Sync { fn convert_block_number(&self, num: BlockNumberOrTag) -> ProviderResult> { let num = match num { BlockNumberOrTag::Latest => self.best_block_number()?, - BlockNumberOrTag::Earliest => 0, + BlockNumberOrTag::Earliest => self.earliest_block_number()?, BlockNumberOrTag::Pending => { return self .pending_block_num_hash() @@ -84,7 +89,7 @@ pub trait BlockIdReader: BlockNumReader + Send + Sync { .map(|res_opt| res_opt.map(|num_hash| num_hash.hash)), BlockNumberOrTag::Finalized => self.finalized_block_hash(), BlockNumberOrTag::Safe => self.safe_block_hash(), - BlockNumberOrTag::Earliest => self.block_hash(0), + BlockNumberOrTag::Earliest => self.block_hash(self.earliest_block_number()?), BlockNumberOrTag::Number(num) => self.block_hash(num), }, } diff --git a/crates/storage/storage-api/src/legacy.rs b/crates/storage/storage-api/src/legacy.rs deleted file mode 100644 index bb6a21e4e15..00000000000 --- a/crates/storage/storage-api/src/legacy.rs +++ /dev/null @@ -1,84 +0,0 @@ -//! Traits used by the legacy execution engine. -//! -//! This module is scheduled for removal in the future. - -use alloc::boxed::Box; -use alloy_eips::BlockNumHash; -use alloy_primitives::{BlockHash, BlockNumber}; -use auto_impl::auto_impl; -use reth_execution_types::ExecutionOutcome; -use reth_storage_errors::provider::{ProviderError, ProviderResult}; - -/// Blockchain trait provider that gives access to the blockchain state that is not yet committed -/// (pending). -pub trait BlockchainTreePendingStateProvider: Send + Sync { - /// Returns a state provider that includes all state changes of the given (pending) block hash. - /// - /// In other words, the state provider will return the state after all transactions of the given - /// hash have been executed. - fn pending_state_provider( - &self, - block_hash: BlockHash, - ) -> ProviderResult> { - self.find_pending_state_provider(block_hash) - .ok_or(ProviderError::StateForHashNotFound(block_hash)) - } - - /// Returns state provider if a matching block exists. - fn find_pending_state_provider( - &self, - block_hash: BlockHash, - ) -> Option>; -} - -/// Provides data required for post-block execution. -/// -/// This trait offers methods to access essential post-execution data, including the state changes -/// in accounts and storage, as well as block hashes for both the pending and canonical chains. -/// -/// The trait includes: -/// * [`ExecutionOutcome`] - Captures all account and storage changes in the pending chain. -/// * Block hashes - Provides access to the block hashes of both the pending chain and canonical -/// blocks. -#[auto_impl(&, Box)] -pub trait ExecutionDataProvider: Send + Sync { - /// Return the execution outcome. - fn execution_outcome(&self) -> &ExecutionOutcome; - /// Return block hash by block number of pending or canonical chain. - fn block_hash(&self, block_number: BlockNumber) -> Option; -} - -impl ExecutionDataProvider for ExecutionOutcome { - fn execution_outcome(&self) -> &ExecutionOutcome { - self - } - - /// Always returns [None] because we don't have any information about the block header. - fn block_hash(&self, _block_number: BlockNumber) -> Option { - None - } -} - -/// Fork data needed for execution on it. -/// -/// It contains a canonical fork, the block on what pending chain was forked from. -#[auto_impl(&, Box)] -pub trait BlockExecutionForkProvider { - /// Return canonical fork, the block on what post state was forked from. - /// - /// Needed to create state provider. - fn canonical_fork(&self) -> BlockNumHash; -} - -/// Provides comprehensive post-execution state data required for further execution. -/// -/// This trait is used to create a state provider over the pending state and is a combination of -/// [`ExecutionDataProvider`] and [`BlockExecutionForkProvider`]. -/// -/// The pending state includes: -/// * `ExecutionOutcome`: Contains all changes to accounts and storage within the pending chain. -/// * Block hashes: Represents hashes of both the pending chain and canonical blocks. -/// * Canonical fork: Denotes the block from which the pending chain forked. -pub trait FullExecutionDataProvider: ExecutionDataProvider + BlockExecutionForkProvider {} - -impl FullExecutionDataProvider for T where T: ExecutionDataProvider + BlockExecutionForkProvider {} diff --git a/crates/storage/storage-api/src/lib.rs b/crates/storage/storage-api/src/lib.rs index a82f6092494..7b326c6c82e 100644 --- a/crates/storage/storage-api/src/lib.rs +++ b/crates/storage/storage-api/src/lib.rs @@ -79,9 +79,6 @@ mod stats; #[cfg(feature = "db-api")] pub use stats::*; -mod legacy; -pub use legacy::*; - mod primitives; pub use primitives::*; diff --git a/crates/storage/storage-api/src/noop.rs b/crates/storage/storage-api/src/noop.rs index eca0beb0f7b..2afa4b616f5 100644 --- a/crates/storage/storage-api/src/noop.rs +++ b/crates/storage/storage-api/src/noop.rs @@ -2,10 +2,10 @@ use crate::{ AccountReader, BlockBodyIndicesProvider, BlockHashReader, BlockIdReader, BlockNumReader, - BlockReader, BlockReaderIdExt, BlockSource, ChangeSetReader, HashedPostStateProvider, - HeaderProvider, NodePrimitivesProvider, PruneCheckpointReader, ReceiptProvider, - ReceiptProviderIdExt, StageCheckpointReader, StateProofProvider, StateProvider, - StateProviderBox, StateProviderFactory, StateRootProvider, StorageRootProvider, + BlockReader, BlockReaderIdExt, BlockSource, BytecodeReader, ChangeSetReader, + HashedPostStateProvider, HeaderProvider, NodePrimitivesProvider, PruneCheckpointReader, + ReceiptProvider, ReceiptProviderIdExt, StageCheckpointReader, StateProofProvider, + StateProvider, StateProviderBox, StateProviderFactory, StateRootProvider, StorageRootProvider, TransactionVariant, TransactionsProvider, }; use alloc::{boxed::Box, string::String, sync::Arc, vec::Vec}; @@ -455,7 +455,9 @@ impl StateProvider for NoopProvider { ) -> ProviderResult> { Ok(None) } +} +impl BytecodeReader for NoopProvider { fn bytecode_by_hash(&self, _code_hash: &B256) -> ProviderResult> { Ok(None) } @@ -486,7 +488,9 @@ impl StateProviderFactory for NoopP self.history_by_block_hash(hash) } - BlockNumberOrTag::Earliest => self.history_by_block_number(0), + BlockNumberOrTag::Earliest => { + self.history_by_block_number(self.earliest_block_number()?) + } BlockNumberOrTag::Pending => self.pending(), BlockNumberOrTag::Number(num) => self.history_by_block_number(num), } diff --git a/crates/storage/storage-api/src/state.rs b/crates/storage/storage-api/src/state.rs index 301aebaa78a..5581248d3eb 100644 --- a/crates/storage/storage-api/src/state.rs +++ b/crates/storage/storage-api/src/state.rs @@ -34,6 +34,7 @@ pub type StateProviderBox = Box; pub trait StateProvider: BlockHashReader + AccountReader + + BytecodeReader + StateRootProvider + StorageRootProvider + StateProofProvider @@ -48,9 +49,6 @@ pub trait StateProvider: storage_key: StorageKey, ) -> ProviderResult>; - /// Get account code by its hash - fn bytecode_by_hash(&self, code_hash: &B256) -> ProviderResult>; - /// Get account code by its address. /// /// Returns `None` if the account doesn't exist or account is not a contract @@ -94,6 +92,10 @@ pub trait StateProvider: } } +/// Minimal requirements to read a full account, for example, to validate its new transactions +pub trait AccountInfoReader: AccountReader + BytecodeReader {} +impl AccountInfoReader for T {} + /// Trait implemented for database providers that can provide the [`reth_trie_db::StateCommitment`] /// type. #[cfg(feature = "db-api")] @@ -110,6 +112,13 @@ pub trait HashedPostStateProvider: Send + Sync { fn hashed_post_state(&self, bundle_state: &BundleState) -> HashedPostState; } +/// Trait for reading bytecode associated with a given code hash. +#[auto_impl(&, Arc, Box)] +pub trait BytecodeReader: Send + Sync { + /// Get account code by its hash + fn bytecode_by_hash(&self, code_hash: &B256) -> ProviderResult>; +} + /// Trait implemented for database providers that can be converted into a historical state provider. pub trait TryIntoHistoricalStateProvider { /// Returns a historical [`StateProvider`] indexed by the given historic block number. diff --git a/crates/storage/storage-api/src/transactions.rs b/crates/storage/storage-api/src/transactions.rs index 96e6a1997e7..732d0437592 100644 --- a/crates/storage/storage-api/src/transactions.rs +++ b/crates/storage/storage-api/src/transactions.rs @@ -9,7 +9,7 @@ use reth_storage_errors::provider::{ProviderError, ProviderResult}; /// Enum to control transaction hash inclusion. /// -/// This serves as a hint to the provider to include or omit exclude hashes because hashes are +/// This serves as a hint to the provider to include or omit hashes because hashes are /// stored separately and are not always needed. #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Default)] pub enum TransactionVariant { diff --git a/crates/transaction-pool/src/error.rs b/crates/transaction-pool/src/error.rs index e723dc0dc79..686c9456d39 100644 --- a/crates/transaction-pool/src/error.rs +++ b/crates/transaction-pool/src/error.rs @@ -247,6 +247,10 @@ pub enum InvalidPoolTransactionError { /// Balance of account. balance: U256, }, + /// EIP-2681 error thrown if the nonce is higher or equal than `U64::max` + /// `` + #[error("nonce exceeds u64 limit")] + Eip2681, /// EIP-4844 related errors #[error(transparent)] Eip4844(#[from] Eip4844PoolTransactionError), @@ -326,6 +330,7 @@ impl InvalidPoolTransactionError { Self::IntrinsicGasTooLow => true, Self::Overdraft { .. } => false, Self::Other(err) => err.is_bad_transaction(), + Self::Eip2681 => true, Self::Eip4844(eip4844_err) => { match eip4844_err { Eip4844PoolTransactionError::MissingEip4844BlobSidecar => { diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index 1a72585fc80..3d776510553 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -12,6 +12,127 @@ //! - monitoring memory footprint and enforce pool size limits //! - storing blob data for transactions in a separate blobstore on insertion //! +//! ## Transaction Flow: From Network/RPC to Pool +//! +//! Transactions enter the pool through two main paths: +//! +//! ### 1. Network Path (P2P) +//! +//! ```text +//! Network Peer +//! ↓ +//! Transactions or NewPooledTransactionHashes message +//! ↓ +//! TransactionsManager (crates/net/network/src/transactions/mod.rs) +//! │ +//! ├─→ For Transactions message: +//! │ ├─→ Validates message format +//! │ ├─→ Checks if transaction already known +//! │ ├─→ Marks peer as having seen the transaction +//! │ └─→ Queues for import +//! │ +//! └─→ For NewPooledTransactionHashes message: +//! ├─→ Filters out already known transactions +//! ├─→ Queues unknown hashes for fetching +//! ├─→ Sends GetPooledTransactions request +//! ├─→ Receives PooledTransactions response +//! └─→ Queues fetched transactions for import +//! ↓ +//! pool.add_external_transactions() [Origin: External] +//! ↓ +//! Transaction Validation & Pool Addition +//! ``` +//! +//! ### 2. RPC Path (Local submission) +//! +//! ```text +//! eth_sendRawTransaction RPC call +//! ├─→ Decodes raw bytes +//! └─→ Recovers sender +//! ↓ +//! pool.add_transaction() [Origin: Local] +//! ↓ +//! Transaction Validation & Pool Addition +//! ``` +//! +//! ### Transaction Origins +//! +//! - **Local**: Transactions submitted via RPC (trusted, may have different fee requirements) +//! - **External**: Transactions from network peers (untrusted, subject to stricter validation) +//! - **Private**: Local transactions that should not be propagated to the network +//! +//! ## Validation Process +//! +//! ### Stateless Checks +//! +//! Ethereum transactions undergo several stateless checks: +//! +//! - **Transaction Type**: Fork-dependent support (Legacy always, EIP-2930/1559/4844/7702 need +//! activation) +//! - **Size**: Input data ≤ 128KB (default) +//! - **Gas**: Limit ≤ block gas limit +//! - **Fees**: Priority fee ≤ max fee; local tx fee cap; external minimum priority fee +//! - **Chain ID**: Must match current chain +//! - **Intrinsic Gas**: Sufficient for data and access lists +//! - **Blobs** (EIP-4844): Valid count, KZG proofs +//! +//! ### Stateful Checks +//! +//! 1. **Sender**: No bytecode (unless EIP-7702 delegated in Prague) +//! 2. **Nonce**: ≥ account nonce +//! 3. **Balance**: Covers value + (`gas_limit` × `max_fee_per_gas`) +//! +//! ### Common Errors +//! +//! - [`NonceNotConsistent`](reth_primitives_traits::transaction::error::InvalidTransactionError::NonceNotConsistent): Nonce too low +//! - [`InsufficientFunds`](reth_primitives_traits::transaction::error::InvalidTransactionError::InsufficientFunds): Insufficient balance +//! - [`ExceedsGasLimit`](crate::error::InvalidPoolTransactionError::ExceedsGasLimit): Gas limit too +//! high +//! - [`SignerAccountHasBytecode`](reth_primitives_traits::transaction::error::InvalidTransactionError::SignerAccountHasBytecode): EOA has code +//! - [`Underpriced`](crate::error::InvalidPoolTransactionError::Underpriced): Fee too low +//! - [`ReplacementUnderpriced`](crate::error::PoolErrorKind::ReplacementUnderpriced): Replacement +//! transaction fee too low +//! - Blob errors: +//! - [`MissingEip4844BlobSidecar`](crate::error::Eip4844PoolTransactionError::MissingEip4844BlobSidecar): Missing sidecar +//! - [`InvalidEip4844Blob`](crate::error::Eip4844PoolTransactionError::InvalidEip4844Blob): +//! Invalid blob proofs +//! - [`NoEip4844Blobs`](crate::error::Eip4844PoolTransactionError::NoEip4844Blobs): EIP-4844 +//! transaction without blobs +//! - [`TooManyEip4844Blobs`](crate::error::Eip4844PoolTransactionError::TooManyEip4844Blobs): Too +//! many blobs +//! +//! ## Subpool Design +//! +//! The pool maintains four distinct subpools, each serving a specific purpose +//! +//! ### Subpools +//! +//! 1. **Pending**: Ready for inclusion (no gaps, sufficient balance/fees) +//! 2. **Queued**: Future transactions (nonce gaps or insufficient balance) +//! 3. **`BaseFee`**: Valid but below current base fee +//! 4. **Blob**: EIP-4844 transactions not pending due to insufficient base fee or blob fee +//! +//! ### State Transitions +//! +//! Transactions move between subpools based on state changes: +//! +//! ```text +//! Queued ─────────→ BaseFee/Blob ────────→ Pending +//! ↑ ↑ │ +//! │ │ │ +//! └────────────────────┴─────────────────────┘ +//! (demotions due to state changes) +//! ``` +//! +//! **Promotions**: Nonce gaps filled, balance/fee improvements +//! **Demotions**: Nonce gaps created, balance/fee degradation +//! +//! ## Pool Maintenance +//! +//! 1. **Block Updates**: Removes mined txs, updates accounts/fees, triggers movements +//! 2. **Size Enforcement**: Discards worst transactions when limits exceeded +//! 3. **Propagation**: External (always), Local (configurable), Private (never) +//! //! ## Assumptions //! //! ### Transaction type @@ -41,11 +162,7 @@ //! //! ### State Changes //! -//! Once a new block is mined, the pool needs to be updated with a changeset in order to: -//! -//! - remove mined transactions -//! - update using account changes: balance changes -//! - base fee updates +//! New blocks trigger pool updates via changesets (see Pool Maintenance). //! //! ## Implementation details //! @@ -118,9 +235,10 @@ //! use reth_transaction_pool::{TransactionValidationTaskExecutor, Pool}; //! use reth_transaction_pool::blobstore::InMemoryBlobStore; //! use reth_transaction_pool::maintain::{maintain_transaction_pool_future}; +//! use alloy_consensus::Header; //! //! async fn t(client: C, stream: St) -//! where C: StateProviderFactory + BlockReaderIdExt + ChainSpecProvider + Clone + 'static, +//! where C: StateProviderFactory + BlockReaderIdExt
+ ChainSpecProvider + Clone + 'static, //! St: Stream + Send + Unpin + 'static, //! { //! let blob_store = InMemoryBlobStore::default(); diff --git a/crates/transaction-pool/src/maintain.rs b/crates/transaction-pool/src/maintain.rs index 1c6a4a52a89..b076255f1f7 100644 --- a/crates/transaction-pool/src/maintain.rs +++ b/crates/transaction-pool/src/maintain.rs @@ -100,7 +100,11 @@ pub fn maintain_transaction_pool_future( ) -> BoxFuture<'static, ()> where N: NodePrimitives, - Client: StateProviderFactory + BlockReaderIdExt + ChainSpecProvider + Clone + 'static, + Client: StateProviderFactory + + BlockReaderIdExt
+ + ChainSpecProvider> + + Clone + + 'static, P: TransactionPoolExt> + 'static, St: Stream> + Send + Unpin + 'static, Tasks: TaskSpawner + 'static, @@ -122,7 +126,11 @@ pub async fn maintain_transaction_pool( config: MaintainPoolConfig, ) where N: NodePrimitives, - Client: StateProviderFactory + BlockReaderIdExt + ChainSpecProvider + Clone + 'static, + Client: StateProviderFactory + + BlockReaderIdExt
+ + ChainSpecProvider> + + Clone + + 'static, P: TransactionPoolExt> + 'static, St: Stream> + Send + Unpin + 'static, Tasks: TaskSpawner + 'static, @@ -137,8 +145,8 @@ pub async fn maintain_transaction_pool( block_gas_limit: latest.gas_limit(), last_seen_block_hash: latest.hash(), last_seen_block_number: latest.number(), - pending_basefee: latest - .next_block_base_fee(chain_spec.base_fee_params_at_timestamp(latest.timestamp())) + pending_basefee: chain_spec + .next_block_base_fee(latest.header(), latest.timestamp()) .unwrap_or_default(), pending_blob_fee: latest .maybe_next_block_blob_fee(chain_spec.blob_params_at_timestamp(latest.timestamp())), @@ -317,11 +325,8 @@ pub async fn maintain_transaction_pool( let chain_spec = client.chain_spec(); // fees for the next block: `new_tip+1` - let pending_block_base_fee = new_tip - .header() - .next_block_base_fee( - chain_spec.base_fee_params_at_timestamp(new_tip.timestamp()), - ) + let pending_block_base_fee = chain_spec + .next_block_base_fee(new_tip.header(), new_tip.timestamp()) .unwrap_or_default(); let pending_block_blob_fee = new_tip.header().maybe_next_block_blob_fee( chain_spec.blob_params_at_timestamp(new_tip.timestamp()), @@ -423,9 +428,8 @@ pub async fn maintain_transaction_pool( let chain_spec = client.chain_spec(); // fees for the next block: `tip+1` - let pending_block_base_fee = tip - .header() - .next_block_base_fee(chain_spec.base_fee_params_at_timestamp(tip.timestamp())) + let pending_block_base_fee = chain_spec + .next_block_base_fee(tip.header(), tip.timestamp()) .unwrap_or_default(); let pending_block_blob_fee = tip.header().maybe_next_block_blob_fee( chain_spec.blob_params_at_timestamp(tip.timestamp()), diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index 20b4b076e97..bf96431f78a 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -902,7 +902,7 @@ where .collect() } - /// Returns all pending transactions filted by [`TransactionOrigin`] + /// Returns all pending transactions filtered by [`TransactionOrigin`] pub fn get_pending_transactions_by_origin( &self, origin: TransactionOrigin, diff --git a/crates/transaction-pool/src/pool/state.rs b/crates/transaction-pool/src/pool/state.rs index d65fc05b03f..e04b463343e 100644 --- a/crates/transaction-pool/src/pool/state.rs +++ b/crates/transaction-pool/src/pool/state.rs @@ -14,7 +14,7 @@ bitflags::bitflags! { pub(crate) struct TxState: u8 { /// Set to `1` if all ancestor transactions are pending. const NO_PARKED_ANCESTORS = 0b10000000; - /// Set to `1` of the transaction is either the next transaction of the sender (on chain nonce == tx.nonce) or all prior transactions are also present in the pool. + /// Set to `1` if the transaction is either the next transaction of the sender (on chain nonce == tx.nonce) or all prior transactions are also present in the pool. const NO_NONCE_GAPS = 0b01000000; /// Bit derived from the sender's balance. /// diff --git a/crates/transaction-pool/src/test_utils/mock.rs b/crates/transaction-pool/src/test_utils/mock.rs index 9612ad5ee9d..9ddde67ba59 100644 --- a/crates/transaction-pool/src/test_utils/mock.rs +++ b/crates/transaction-pool/src/test_utils/mock.rs @@ -653,7 +653,7 @@ impl MockTransaction { matches!(self, Self::Eip2930 { .. }) } - /// Checks if the transaction is of the EIP-2930 type. + /// Checks if the transaction is of the EIP-7702 type. pub const fn is_eip7702(&self) -> bool { matches!(self, Self::Eip7702 { .. }) } diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 0e010845def..e9f58c27a32 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -1,3 +1,55 @@ +//! Transaction Pool Traits and Types +//! +//! This module defines the core abstractions for transaction pool implementations, +//! handling the complexity of different transaction representations across the +//! network, mempool, and the chain itself. +//! +//! ## Key Concepts +//! +//! ### Transaction Representations +//! +//! Transactions exist in different formats throughout their lifecycle: +//! +//! 1. **Consensus Format** ([`PoolTransaction::Consensus`]) +//! - The canonical format stored in blocks +//! - Minimal size for efficient storage +//! - Example: EIP-4844 transactions store only blob hashes: ([`TransactionSigned::Eip4844`]) +//! +//! 2. **Pooled Format** ([`PoolTransaction::Pooled`]) +//! - Extended format for network propagation +//! - Includes additional validation data +//! - Example: EIP-4844 transactions include full blob sidecars: ([`PooledTransactionVariant`]) +//! +//! ### Type Relationships +//! +//! ```text +//! NodePrimitives::SignedTx ←── NetworkPrimitives::BroadcastedTransaction +//! │ │ +//! │ (consensus format) │ (announced to peers) +//! │ │ +//! └──────────┐ ┌────────────────┘ +//! ▼ ▼ +//! PoolTransaction::Consensus +//! │ ▲ +//! │ │ from pooled (always succeeds) +//! │ │ +//! ▼ │ try_from consensus (may fail) +//! PoolTransaction::Pooled ←──→ NetworkPrimitives::PooledTransaction +//! (sent on request) +//! ``` +//! +//! ### Special Cases +//! +//! #### EIP-4844 Blob Transactions +//! - Consensus format: Only blob hashes (32 bytes each) +//! - Pooled format: Full blobs + commitments + proofs (large data per blob) +//! - Network behavior: Not broadcast automatically, only sent on explicit request +//! +//! #### Optimism Deposit Transactions +//! - Only exist in consensus format +//! - Never enter the mempool (system transactions) +//! - Conversion from consensus to pooled always fails + use crate::{ blobstore::BlobStoreError, error::{InvalidPoolTransactionError, PoolResult}, @@ -932,19 +984,62 @@ impl BestTransactionsAttributes { } } -/// Trait for transaction types used inside the pool. +/// Trait for transaction types stored in the transaction pool. +/// +/// This trait represents the actual transaction object stored in the mempool, which includes not +/// only the transaction data itself but also additional metadata needed for efficient pool +/// operations. Implementations typically cache values that are frequently accessed during +/// transaction ordering, validation, and eviction. +/// +/// ## Key Responsibilities +/// +/// 1. **Metadata Caching**: Store computed values like address, cost and encoded size +/// 2. **Representation Conversion**: Handle conversions between consensus and pooled +/// representations +/// 3. **Validation Support**: Provide methods for pool-specific validation rules +/// +/// ## Cached Metadata +/// +/// Implementations should cache frequently accessed values to avoid recomputation: +/// - **Address**: Recovered sender address of the transaction +/// - **Cost**: Max amount spendable (gas × price + value + blob costs) +/// - **Size**: RLP encoded length for mempool size limits +/// +/// See [`EthPooledTransaction`] for a reference implementation. +/// +/// ## Transaction Representations +/// +/// This trait abstracts over the different representations a transaction can have: +/// +/// 1. **Consensus representation** (`Consensus` associated type): The canonical form included in +/// blocks +/// - Compact representation without networking metadata +/// - For EIP-4844: includes only blob hashes, not the actual blobs +/// - Used for block execution and state transitions +/// +/// 2. **Pooled representation** (`Pooled` associated type): The form used for network propagation +/// - May include additional data for validation +/// - For EIP-4844: includes full blob sidecars (blobs, commitments, proofs) +/// - Used for mempool validation and p2p gossiping /// -/// This supports two transaction formats -/// - Consensus format: the form the transaction takes when it is included in a block. -/// - Pooled format: the form the transaction takes when it is gossiping around the network. +/// ## Why Two Representations? /// -/// This distinction is necessary for the EIP-4844 blob transactions, which require an additional -/// sidecar when they are gossiped around the network. It is expected that the `Consensus` format is -/// a subset of the `Pooled` format. +/// This distinction is necessary because: /// -/// The assumption is that fallible conversion from `Consensus` to `Pooled` will encapsulate -/// handling of all valid `Consensus` transactions that can't be pooled (e.g Deposit transactions or -/// blob-less EIP-4844 transactions). +/// - **EIP-4844 blob transactions**: Require large blob sidecars for validation that would bloat +/// blocks if included. Only blob hashes are stored on-chain. +/// +/// - **Network efficiency**: Blob transactions are not broadcast to all peers automatically but +/// must be explicitly requested to reduce bandwidth usage. +/// +/// - **Special transactions**: Some transactions (like OP deposit transactions) exist only in +/// consensus format and are never in the mempool. +/// +/// ## Conversion Rules +/// +/// - `Consensus` → `Pooled`: May fail for transactions that cannot be pooled (e.g., OP deposit +/// transactions, blob transactions without sidecars) +/// - `Pooled` → `Consensus`: Always succeeds (pooled is a superset) pub trait PoolTransaction: alloy_consensus::Transaction + InMemorySize + Debug + Send + Sync + Clone { @@ -959,8 +1054,13 @@ pub trait PoolTransaction: /// Define a method to convert from the `Consensus` type to `Self` /// - /// Note: this _must_ fail on any transactions that cannot be pooled (e.g OP Deposit - /// transactions). + /// This conversion may fail for transactions that are valid for inclusion in blocks + /// but cannot exist in the transaction pool. Examples include: + /// + /// - **OP Deposit transactions**: These are special system transactions that are directly + /// included in blocks by the sequencer/validator and never enter the mempool + /// - **Blob transactions without sidecars**: After being included in a block, the sidecar data + /// is pruned, making the consensus transaction unpoolable fn try_from_consensus( tx: Recovered, ) -> Result { @@ -1079,8 +1179,14 @@ pub trait EthPoolTransaction: PoolTransaction { /// The default [`PoolTransaction`] for the [Pool](crate::Pool) for Ethereum. /// -/// This type is essentially a wrapper around [`Recovered`] with additional -/// fields derived from the transaction that are frequently used by the pools for ordering. +/// This type wraps a consensus transaction with additional cached data that's +/// frequently accessed by the pool for transaction ordering and validation: +/// +/// - `cost`: Pre-calculated max cost (gas * price + value + blob costs) +/// - `encoded_length`: Cached RLP encoding length for size limits +/// - `blob_sidecar`: Blob data state (None/Missing/Present) +/// +/// This avoids recalculating these values repeatedly during pool operations. #[derive(Debug, Clone, PartialEq, Eq)] pub struct EthPooledTransaction { /// `EcRecovered` transaction, the consensus format. @@ -1330,16 +1436,31 @@ impl EthPoolTransaction for EthPooledTransaction { } /// Represents the blob sidecar of the [`EthPooledTransaction`]. +/// +/// EIP-4844 blob transactions require additional data (blobs, commitments, proofs) +/// for validation that is not included in the consensus format. This enum tracks +/// the sidecar state throughout the transaction's lifecycle in the pool. #[derive(Debug, Clone, PartialEq, Eq)] pub enum EthBlobTransactionSidecar { /// This transaction does not have a blob sidecar + /// (applies to all non-EIP-4844 transaction types) None, - /// This transaction has a blob sidecar (EIP-4844) but it is missing + /// This transaction has a blob sidecar (EIP-4844) but it is missing. /// - /// It was either extracted after being inserted into the pool or re-injected after reorg - /// without the blob sidecar + /// This can happen when: + /// - The sidecar was extracted after the transaction was added to the pool + /// - The transaction was re-injected after a reorg without its sidecar + /// - The transaction was recovered from the consensus format (e.g., from a block) Missing, - /// The eip-4844 transaction was pulled from the network and still has its blob sidecar + /// The EIP-4844 transaction was received from the network with its complete sidecar. + /// + /// This sidecar contains: + /// - The actual blob data (large data per blob) + /// - KZG commitments for each blob + /// - KZG proofs for validation + /// + /// The sidecar is required for validating the transaction but is not included + /// in blocks (only the blob hashes are included in the consensus format). Present(BlobTransactionSidecarVariant), } diff --git a/crates/transaction-pool/src/validate/constants.rs b/crates/transaction-pool/src/validate/constants.rs index 9607937c67a..d4fca5a2aeb 100644 --- a/crates/transaction-pool/src/validate/constants.rs +++ b/crates/transaction-pool/src/validate/constants.rs @@ -15,4 +15,4 @@ pub const DEFAULT_MAX_TX_INPUT_BYTES: usize = 4 * TX_SLOT_BYTE_SIZE; // 128KB pub const MAX_CODE_BYTE_SIZE: usize = revm_primitives::eip170::MAX_CODE_SIZE; /// Maximum initcode to permit in a creation transaction and create instructions. -pub const MAX_INIT_CODE_BYTE_SIZE: usize = revm_primitives::MAX_INITCODE_SIZE; +pub const MAX_INIT_CODE_BYTE_SIZE: usize = revm_primitives::eip3860::MAX_INITCODE_SIZE; diff --git a/crates/transaction-pool/src/validate/eth.rs b/crates/transaction-pool/src/validate/eth.rs index 1fb628d57c2..90a61b86ec5 100644 --- a/crates/transaction-pool/src/validate/eth.rs +++ b/crates/transaction-pool/src/validate/eth.rs @@ -28,7 +28,7 @@ use reth_primitives_traits::{ constants::MAX_TX_GAS_LIMIT_OSAKA, transaction::error::InvalidTransactionError, Block, GotExpected, SealedBlock, }; -use reth_storage_api::{StateProvider, StateProviderFactory}; +use reth_storage_api::{AccountInfoReader, StateProviderFactory}; use reth_tasks::TaskSpawner; use std::{ marker::PhantomData, @@ -62,6 +62,57 @@ impl EthTransactionValidator { pub fn client(&self) -> &Client { &self.inner.client } + + /// Returns the tracks activated forks relevant for transaction validation + pub fn fork_tracker(&self) -> &ForkTracker { + &self.inner.fork_tracker + } + + /// Returns if there are EIP-2718 type transactions + pub fn eip2718(&self) -> bool { + self.inner.eip2718 + } + + /// Returns if there are EIP-1559 type transactions + pub fn eip1559(&self) -> bool { + self.inner.eip1559 + } + + /// Returns if there are EIP-4844 blob transactions + pub fn eip4844(&self) -> bool { + self.inner.eip4844 + } + + /// Returns if there are EIP-7702 type transactions + pub fn eip7702(&self) -> bool { + self.inner.eip7702 + } + + /// Returns the current tx fee cap limit in wei locally submitted into the pool + pub fn tx_fee_cap(&self) -> &Option { + &self.inner.tx_fee_cap + } + + /// Returns the minimum priority fee to enforce for acceptance into the pool + pub fn minimum_priority_fee(&self) -> &Option { + &self.inner.minimum_priority_fee + } + + /// Returns the setup and parameters needed for validating KZG proofs. + pub fn kzg_settings(&self) -> &EnvKzgSettings { + &self.inner.kzg_settings + } + + /// Returns the config to handle [`TransactionOrigin::Local`](TransactionOrigin) transactions.. + pub fn local_transactions_config(&self) -> &LocalTransactionConfig { + &self.inner.local_transactions_config + } + + /// Returns the maximum size in bytes a single transaction can have in order to be accepted into + /// the pool. + pub fn max_tx_input_bytes(&self) -> usize { + self.inner.max_tx_input_bytes + } } impl EthTransactionValidator @@ -69,6 +120,11 @@ where Client: ChainSpecProvider + StateProviderFactory, Tx: EthPoolTransaction, { + /// Returns the current max gas limit + pub fn block_gas_limit(&self) -> u64 { + self.inner.max_gas_limit() + } + /// Validates a single transaction. /// /// See also [`TransactionValidator::validate_transaction`] @@ -77,7 +133,7 @@ where origin: TransactionOrigin, transaction: Tx, ) -> TransactionValidationOutcome { - self.inner.validate_one(origin, transaction) + self.inner.validate_one_with_provider(origin, transaction, &mut None) } /// Validates a single transaction with the provided state provider. @@ -90,35 +146,10 @@ where &self, origin: TransactionOrigin, transaction: Tx, - state: &mut Option>, + state: &mut Option>, ) -> TransactionValidationOutcome { self.inner.validate_one_with_provider(origin, transaction, state) } - - /// Validates all given transactions. - /// - /// Returns all outcomes for the given transactions in the same order. - /// - /// See also [`Self::validate_one`] - pub fn validate_all( - &self, - transactions: Vec<(TransactionOrigin, Tx)>, - ) -> Vec> { - self.inner.validate_batch(transactions) - } - - /// Validates all given transactions with origin. - /// - /// Returns all outcomes for the given transactions in the same order. - /// - /// See also [`Self::validate_one`] - pub fn validate_all_with_origin( - &self, - origin: TransactionOrigin, - transactions: impl IntoIterator + Send, - ) -> Vec> { - self.inner.validate_batch_with_origin(origin, transactions) - } } impl TransactionValidator for EthTransactionValidator @@ -140,7 +171,7 @@ where &self, transactions: Vec<(TransactionOrigin, Self::Transaction)>, ) -> Vec> { - self.validate_all(transactions) + self.inner.validate_batch(transactions) } async fn validate_transactions_with_origin( @@ -148,7 +179,7 @@ where origin: TransactionOrigin, transactions: impl IntoIterator + Send, ) -> Vec> { - self.validate_all_with_origin(origin, transactions) + self.inner.validate_batch_with_origin(origin, transactions) } fn on_new_head_block(&self, new_tip_block: &SealedBlock) @@ -234,7 +265,7 @@ where &self, origin: TransactionOrigin, transaction: Tx, - maybe_state: &mut Option>, + maybe_state: &mut Option>, ) -> TransactionValidationOutcome { match self.validate_one_no_state(origin, transaction) { Ok(transaction) => { @@ -243,7 +274,7 @@ where if maybe_state.is_none() { match self.client.latest() { Ok(new_state) => { - *maybe_state = Some(new_state); + *maybe_state = Some(Box::new(new_state)); } Err(err) => { return TransactionValidationOutcome::Error( @@ -320,6 +351,15 @@ where } }; + // Reject transactions with a nonce equal to U64::max according to EIP-2681 + let tx_nonce = transaction.nonce(); + if tx_nonce == u64::MAX { + return Err(TransactionValidationOutcome::Invalid( + transaction, + InvalidPoolTransactionError::Eip2681, + )) + } + // Reject transactions over defined size to prevent DOS attacks let tx_input_len = transaction.input().len(); if tx_input_len > self.max_tx_input_bytes { @@ -483,7 +523,7 @@ where state: P, ) -> TransactionValidationOutcome where - P: StateProvider, + P: AccountInfoReader, { // Use provider to get account info let account = match state.basic_account(transaction.sender_ref()) { @@ -635,16 +675,6 @@ where } } - /// Validates a single transaction. - fn validate_one( - &self, - origin: TransactionOrigin, - transaction: Tx, - ) -> TransactionValidationOutcome { - let mut provider = None; - self.validate_one_with_provider(origin, transaction, &mut provider) - } - /// Validates all given transactions. fn validate_batch( &self, @@ -693,7 +723,7 @@ where { self.fork_tracker .max_blob_count - .store(blob_params.max_blob_count, std::sync::atomic::Ordering::Relaxed); + .store(blob_params.max_blobs_per_tx, std::sync::atomic::Ordering::Relaxed); } self.block_gas_limit.store(new_tip_block.gas_limit(), std::sync::atomic::Ordering::Relaxed); @@ -748,12 +778,13 @@ pub struct EthTransactionValidatorBuilder { impl EthTransactionValidatorBuilder { /// Creates a new builder for the given client /// - /// By default this assumes the network is on the `Cancun` hardfork and the following + /// By default this assumes the network is on the `Prague` hardfork and the following /// transactions are allowed: /// - Legacy /// - EIP-2718 /// - EIP-1559 /// - EIP-4844 + /// - EIP-7702 pub fn new(client: Client) -> Self { Self { block_gas_limit: ETHEREUM_BLOCK_GAS_LIMIT_30M.into(), @@ -783,7 +814,7 @@ impl EthTransactionValidatorBuilder { osaka: false, // max blob count is prague by default - max_blob_count: BlobParams::prague().max_blob_count, + max_blob_count: BlobParams::prague().max_blobs_per_tx, } } @@ -907,7 +938,7 @@ impl EthTransactionValidatorBuilder { .chain_spec() .blob_params_at_timestamp(timestamp) .unwrap_or_else(BlobParams::cancun) - .max_blob_count; + .max_blobs_per_tx; self } @@ -957,11 +988,10 @@ impl EthTransactionValidatorBuilder { .. } = self; - // TODO: use osaka max blob count once is released let max_blob_count = if prague { - BlobParams::prague().max_blob_count + BlobParams::prague().max_blobs_per_tx } else { - BlobParams::cancun().max_blob_count + BlobParams::cancun().max_blobs_per_tx }; let fork_tracker = ForkTracker { @@ -972,6 +1002,9 @@ impl EthTransactionValidatorBuilder { max_blob_count: AtomicU64::new(max_blob_count), }; + // Ensure the kzg setup is loaded right away. + let _kzg_settings = kzg_settings.get(); + let inner = EthTransactionValidatorInner { client, eip2718, @@ -1047,7 +1080,7 @@ pub struct ForkTracker { pub prague: AtomicBool, /// Tracks if osaka is activated at the block's timestamp. pub osaka: AtomicBool, - /// Tracks max blob count at the block's timestamp. + /// Tracks max blob count per transaction at the block's timestamp. pub max_blob_count: AtomicU64, } @@ -1072,7 +1105,7 @@ impl ForkTracker { self.osaka.load(std::sync::atomic::Ordering::Relaxed) } - /// Returns the max blob count. + /// Returns the max allowed blob count per transaction. pub fn max_blob_count(&self) -> u64 { self.max_blob_count.load(std::sync::atomic::Ordering::Relaxed) } diff --git a/crates/trie/common/benches/prefix_set.rs b/crates/trie/common/benches/prefix_set.rs index 5883b2d17dd..1448e41502e 100644 --- a/crates/trie/common/benches/prefix_set.rs +++ b/crates/trie/common/benches/prefix_set.rs @@ -97,7 +97,7 @@ fn prefix_set_bench( let setup = || { let mut prefix_set = T::default(); for key in &preload { - prefix_set.insert(key.clone()); + prefix_set.insert(*key); } (prefix_set.freeze(), input.clone(), expected.clone()) }; @@ -131,7 +131,7 @@ fn generate_test_data(size: usize) -> (Vec, Vec, Vec) { let expected = input .iter() - .map(|prefix| preload.iter().any(|key| key.has_prefix(prefix))) + .map(|prefix| preload.iter().any(|key| key.starts_with(prefix))) .collect::>(); (preload, input, expected) } @@ -162,7 +162,7 @@ mod implementations { impl PrefixSetAbstraction for BTreeAnyPrefixSet { fn contains(&mut self, key: Nibbles) -> bool { - self.keys.iter().any(|k| k.has_prefix(&key)) + self.keys.iter().any(|k| k.starts_with(&key)) } } @@ -193,7 +193,7 @@ mod implementations { None => (Bound::Unbounded, Bound::Unbounded), }; for key in self.keys.range::(range) { - if key.has_prefix(&prefix) { + if key.starts_with(&prefix) { self.last_checked = Some(prefix); return true } @@ -237,7 +237,7 @@ mod implementations { match self.keys.binary_search(&prefix) { Ok(_) => true, Err(idx) => match self.keys.get(idx) { - Some(key) => key.has_prefix(&prefix), + Some(key) => key.starts_with(&prefix), None => false, // prefix > last key }, } @@ -271,14 +271,12 @@ mod implementations { self.sorted = true; } - let prefix = prefix; - while self.index > 0 && self.keys[self.index] > prefix { self.index -= 1; } for (idx, key) in self.keys[self.index..].iter().enumerate() { - if key.has_prefix(&prefix) { + if key.starts_with(&prefix) { self.index += idx; return true } @@ -329,7 +327,7 @@ mod implementations { Err(idx) => match self.keys.get(idx) { Some(key) => { self.last_found_idx = idx; - key.has_prefix(&prefix) + key.starts_with(&prefix) } None => false, // prefix > last key }, diff --git a/crates/trie/common/src/hash_builder/state.rs b/crates/trie/common/src/hash_builder/state.rs index 76abbd42ac6..0df582f8f5c 100644 --- a/crates/trie/common/src/hash_builder/state.rs +++ b/crates/trie/common/src/hash_builder/state.rs @@ -51,7 +51,7 @@ impl From for HashBuilder { impl From for HashBuilderState { fn from(state: HashBuilder) -> Self { Self { - key: state.key.into(), + key: state.key.to_vec(), stack: state.stack, value: state.value, groups: state.state_masks, diff --git a/crates/trie/common/src/nibbles.rs b/crates/trie/common/src/nibbles.rs index a7db55b854b..537aa07118c 100644 --- a/crates/trie/common/src/nibbles.rs +++ b/crates/trie/common/src/nibbles.rs @@ -22,34 +22,13 @@ impl From> for StoredNibbles { } } -impl PartialEq<[u8]> for StoredNibbles { - #[inline] - fn eq(&self, other: &[u8]) -> bool { - self.0.as_slice() == other - } -} - -impl PartialOrd<[u8]> for StoredNibbles { - #[inline] - fn partial_cmp(&self, other: &[u8]) -> Option { - self.0.as_slice().partial_cmp(other) - } -} - -impl core::borrow::Borrow<[u8]> for StoredNibbles { - #[inline] - fn borrow(&self) -> &[u8] { - self.0.as_slice() - } -} - #[cfg(any(test, feature = "reth-codec"))] impl reth_codecs::Compact for StoredNibbles { fn to_compact(&self, buf: &mut B) -> usize where B: bytes::BufMut + AsMut<[u8]>, { - buf.put_slice(self.0.as_slice()); + buf.put_slice(&self.0.to_vec()); self.0.len() } @@ -98,7 +77,7 @@ impl reth_codecs::Compact for StoredNibblesSubKey { assert!(self.0.len() <= 64); // right-pad with zeros - buf.put_slice(&self.0[..]); + buf.put_slice(&self.0.to_vec()); static ZERO: &[u8; 64] = &[0; 64]; buf.put_slice(&ZERO[self.0.len()..]); @@ -120,79 +99,58 @@ mod tests { #[test] fn test_stored_nibbles_from_nibbles() { - let nibbles = Nibbles::from_nibbles_unchecked(vec![0x12, 0x34, 0x56]); - let stored = StoredNibbles::from(nibbles.clone()); + let nibbles = Nibbles::from_nibbles_unchecked(vec![0x02, 0x04, 0x06]); + let stored = StoredNibbles::from(nibbles); assert_eq!(stored.0, nibbles); } #[test] fn test_stored_nibbles_from_vec() { - let bytes = vec![0x12, 0x34, 0x56]; - let stored = StoredNibbles::from(bytes.clone()); - assert_eq!(stored.0.as_slice(), bytes.as_slice()); - } - - #[test] - fn test_stored_nibbles_equality() { - let bytes = vec![0x12, 0x34]; + let bytes = vec![0x02, 0x04, 0x06]; let stored = StoredNibbles::from(bytes.clone()); - assert_eq!(stored, *bytes.as_slice()); - } - - #[test] - fn test_stored_nibbles_partial_cmp() { - let stored = StoredNibbles::from(vec![0x12, 0x34]); - let other = vec![0x12, 0x35]; - assert!(stored < *other.as_slice()); + assert_eq!(stored.0.to_vec(), bytes); } #[test] fn test_stored_nibbles_to_compact() { - let stored = StoredNibbles::from(vec![0x12, 0x34]); + let stored = StoredNibbles::from(vec![0x02, 0x04]); let mut buf = BytesMut::with_capacity(10); let len = stored.to_compact(&mut buf); assert_eq!(len, 2); - assert_eq!(buf, &vec![0x12, 0x34][..]); + assert_eq!(buf, &vec![0x02, 0x04][..]); } #[test] fn test_stored_nibbles_from_compact() { - let buf = vec![0x12, 0x34, 0x56]; + let buf = vec![0x02, 0x04, 0x06]; let (stored, remaining) = StoredNibbles::from_compact(&buf, 2); - assert_eq!(stored.0.as_slice(), &[0x12, 0x34]); - assert_eq!(remaining, &[0x56]); - } - - #[test] - fn test_stored_nibbles_subkey_from_nibbles() { - let nibbles = Nibbles::from_nibbles_unchecked(vec![0x12, 0x34]); - let subkey = StoredNibblesSubKey::from(nibbles.clone()); - assert_eq!(subkey.0, nibbles); + assert_eq!(stored.0.to_vec(), vec![0x02, 0x04]); + assert_eq!(remaining, &[0x06]); } #[test] fn test_stored_nibbles_subkey_to_compact() { - let subkey = StoredNibblesSubKey::from(vec![0x12, 0x34]); + let subkey = StoredNibblesSubKey::from(vec![0x02, 0x04]); let mut buf = BytesMut::with_capacity(65); let len = subkey.to_compact(&mut buf); assert_eq!(len, 65); - assert_eq!(buf[..2], [0x12, 0x34]); + assert_eq!(buf[..2], [0x02, 0x04]); assert_eq!(buf[64], 2); // Length byte } #[test] fn test_stored_nibbles_subkey_from_compact() { - let mut buf = vec![0x12, 0x34]; + let mut buf = vec![0x02, 0x04]; buf.resize(65, 0); buf[64] = 2; let (subkey, remaining) = StoredNibblesSubKey::from_compact(&buf, 65); - assert_eq!(subkey.0.as_slice(), &[0x12, 0x34]); + assert_eq!(subkey.0.to_vec(), vec![0x02, 0x04]); assert_eq!(remaining, &[] as &[u8]); } #[test] fn test_serialization_stored_nibbles() { - let stored = StoredNibbles::from(vec![0x12, 0x34]); + let stored = StoredNibbles::from(vec![0x02, 0x04]); let serialized = serde_json::to_string(&stored).unwrap(); let deserialized: StoredNibbles = serde_json::from_str(&serialized).unwrap(); assert_eq!(stored, deserialized); @@ -200,7 +158,7 @@ mod tests { #[test] fn test_serialization_stored_nibbles_subkey() { - let subkey = StoredNibblesSubKey::from(vec![0x12, 0x34]); + let subkey = StoredNibblesSubKey::from(vec![0x02, 0x04]); let serialized = serde_json::to_string(&subkey).unwrap(); let deserialized: StoredNibblesSubKey = serde_json::from_str(&serialized).unwrap(); assert_eq!(subkey, deserialized); diff --git a/crates/trie/common/src/prefix_set.rs b/crates/trie/common/src/prefix_set.rs index 844f3de1b62..e1f4150dd25 100644 --- a/crates/trie/common/src/prefix_set.rs +++ b/crates/trie/common/src/prefix_set.rs @@ -83,8 +83,8 @@ pub struct TriePrefixSets { /// prefix_set_mut.insert(Nibbles::from_nibbles_unchecked(&[0xa, 0xb])); /// prefix_set_mut.insert(Nibbles::from_nibbles_unchecked(&[0xa, 0xb, 0xc])); /// let mut prefix_set = prefix_set_mut.freeze(); -/// assert!(prefix_set.contains(&[0xa, 0xb])); -/// assert!(prefix_set.contains(&[0xa, 0xb, 0xc])); +/// assert!(prefix_set.contains(&Nibbles::from_nibbles_unchecked([0xa, 0xb]))); +/// assert!(prefix_set.contains(&Nibbles::from_nibbles_unchecked([0xa, 0xb, 0xc]))); /// ``` #[derive(PartialEq, Eq, Clone, Default, Debug)] pub struct PrefixSetMut { @@ -193,7 +193,7 @@ impl PrefixSet { /// incremental state root calculation performance /// ([see PR #2417](https://github.com/paradigmxyz/reth/pull/2417)). #[inline] - pub fn contains(&mut self, prefix: &[u8]) -> bool { + pub fn contains(&mut self, prefix: &Nibbles) -> bool { if self.all { return true } @@ -203,7 +203,7 @@ impl PrefixSet { } for (idx, key) in self.keys[self.index..].iter().enumerate() { - if key.has_prefix(prefix) { + if key.starts_with(prefix) { self.index += idx; return true } @@ -222,6 +222,11 @@ impl PrefixSet { self.keys.iter() } + /// Returns true if every entry should be considered changed. + pub const fn all(&self) -> bool { + self.all + } + /// Returns the number of elements in the set. pub fn len(&self) -> usize { self.keys.len() @@ -254,9 +259,9 @@ mod tests { prefix_set_mut.insert(Nibbles::from_nibbles([1, 2, 3])); // Duplicate let mut prefix_set = prefix_set_mut.freeze(); - assert!(prefix_set.contains(&[1, 2])); - assert!(prefix_set.contains(&[4, 5])); - assert!(!prefix_set.contains(&[7, 8])); + assert!(prefix_set.contains(&Nibbles::from_nibbles_unchecked([1, 2]))); + assert!(prefix_set.contains(&Nibbles::from_nibbles_unchecked([4, 5]))); + assert!(!prefix_set.contains(&Nibbles::from_nibbles_unchecked([7, 8]))); assert_eq!(prefix_set.len(), 3); // Length should be 3 (excluding duplicate) } @@ -272,9 +277,9 @@ mod tests { assert_eq!(prefix_set_mut.keys.capacity(), 4); // Capacity should be 4 (including duplicate) let mut prefix_set = prefix_set_mut.freeze(); - assert!(prefix_set.contains(&[1, 2])); - assert!(prefix_set.contains(&[4, 5])); - assert!(!prefix_set.contains(&[7, 8])); + assert!(prefix_set.contains(&Nibbles::from_nibbles_unchecked([1, 2]))); + assert!(prefix_set.contains(&Nibbles::from_nibbles_unchecked([4, 5]))); + assert!(!prefix_set.contains(&Nibbles::from_nibbles_unchecked([7, 8]))); assert_eq!(prefix_set.keys.len(), 3); // Length should be 3 (excluding duplicate) assert_eq!(prefix_set.keys.capacity(), 3); // Capacity should be 3 after shrinking } @@ -292,9 +297,9 @@ mod tests { assert_eq!(prefix_set_mut.keys.capacity(), 101); // Capacity should be 101 (including duplicate) let mut prefix_set = prefix_set_mut.freeze(); - assert!(prefix_set.contains(&[1, 2])); - assert!(prefix_set.contains(&[4, 5])); - assert!(!prefix_set.contains(&[7, 8])); + assert!(prefix_set.contains(&Nibbles::from_nibbles_unchecked([1, 2]))); + assert!(prefix_set.contains(&Nibbles::from_nibbles_unchecked([4, 5]))); + assert!(!prefix_set.contains(&Nibbles::from_nibbles_unchecked([7, 8]))); assert_eq!(prefix_set.keys.len(), 3); // Length should be 3 (excluding duplicate) assert_eq!(prefix_set.keys.capacity(), 3); // Capacity should be 3 after shrinking } diff --git a/crates/trie/common/src/proofs.rs b/crates/trie/common/src/proofs.rs index 96209382d3c..5c3b55b0920 100644 --- a/crates/trie/common/src/proofs.rs +++ b/crates/trie/common/src/proofs.rs @@ -669,7 +669,6 @@ impl AccountProof { } /// Verify the storage proofs and account proof against the provided state root. - #[expect(clippy::result_large_err)] pub fn verify(&self, root: B256) -> Result<(), ProofVerificationError> { // Verify storage proofs. for storage_proof in &self.storage_proofs { @@ -763,11 +762,10 @@ impl StorageProof { } /// Verify the proof against the provided storage root. - #[expect(clippy::result_large_err)] pub fn verify(&self, root: B256) -> Result<(), ProofVerificationError> { let expected = if self.value.is_zero() { None } else { Some(encode_fixed_size(&self.value).to_vec()) }; - verify_proof(root, self.nibbles.clone(), expected, &self.proof) + verify_proof(root, self.nibbles, expected, &self.proof) } } diff --git a/crates/trie/common/src/updates.rs b/crates/trie/common/src/updates.rs index e3146f50f90..dd82f4e192c 100644 --- a/crates/trie/common/src/updates.rs +++ b/crates/trie/common/src/updates.rs @@ -1,8 +1,11 @@ use crate::{BranchNodeCompact, HashBuilder, Nibbles}; -use alloc::vec::Vec; +use alloc::{ + collections::{btree_map::BTreeMap, btree_set::BTreeSet}, + vec::Vec, +}; use alloy_primitives::{ map::{B256Map, B256Set, HashMap, HashSet}, - B256, + FixedBytes, B256, }; /// The aggregation of trie updates. @@ -58,9 +61,9 @@ impl TrieUpdates { pub fn extend_ref(&mut self, other: &Self) { self.extend_common(other); self.account_nodes.extend(exclude_empty_from_pair( - other.account_nodes.iter().map(|(k, v)| (k.clone(), v.clone())), + other.account_nodes.iter().map(|(k, v)| (*k, v.clone())), )); - self.removed_nodes.extend(exclude_empty(other.removed_nodes.iter().cloned())); + self.removed_nodes.extend(exclude_empty(other.removed_nodes.iter().copied())); for (hashed_address, storage_trie) in &other.storage_tries { self.storage_tries.entry(*hashed_address).or_default().extend_ref(storage_trie); } @@ -114,6 +117,22 @@ impl TrieUpdates { .collect(); TrieUpdatesSorted { removed_nodes: self.removed_nodes, account_nodes, storage_tries } } + + /// Converts trie updates into [`TrieUpdatesSortedRef`]. + pub fn into_sorted_ref<'a>(&'a self) -> TrieUpdatesSortedRef<'a> { + let mut account_nodes = self.account_nodes.iter().collect::>(); + account_nodes.sort_unstable_by(|a, b| a.0.cmp(b.0)); + + TrieUpdatesSortedRef { + removed_nodes: self.removed_nodes.iter().collect::>(), + account_nodes, + storage_tries: self + .storage_tries + .iter() + .map(|m| (*m.0, m.1.into_sorted_ref().clone())) + .collect(), + } + } } /// Trie updates for storage trie of a single account. @@ -191,9 +210,9 @@ impl StorageTrieUpdates { pub fn extend_ref(&mut self, other: &Self) { self.extend_common(other); self.storage_nodes.extend(exclude_empty_from_pair( - other.storage_nodes.iter().map(|(k, v)| (k.clone(), v.clone())), + other.storage_nodes.iter().map(|(k, v)| (*k, v.clone())), )); - self.removed_nodes.extend(exclude_empty(other.removed_nodes.iter().cloned())); + self.removed_nodes.extend(exclude_empty(other.removed_nodes.iter().copied())); } fn extend_common(&mut self, other: &Self) { @@ -225,6 +244,15 @@ impl StorageTrieUpdates { storage_nodes, } } + + /// Convert storage trie updates into [`StorageTrieUpdatesSortedRef`]. + pub fn into_sorted_ref(&self) -> StorageTrieUpdatesSortedRef<'_> { + StorageTrieUpdatesSortedRef { + is_deleted: self.is_deleted, + removed_nodes: self.removed_nodes.iter().collect::>(), + storage_nodes: self.storage_nodes.iter().collect::>(), + } + } } /// Serializes and deserializes any [`HashSet`] that includes [`Nibbles`] elements, by using the @@ -350,8 +378,21 @@ mod serde_nibbles_map { } } +/// Sorted trie updates reference used for serializing trie to file. +#[derive(PartialEq, Eq, Clone, Default, Debug)] +#[cfg_attr(any(test, feature = "serde"), derive(serde::Serialize))] +pub struct TrieUpdatesSortedRef<'a> { + /// Sorted collection of updated state nodes with corresponding paths. + pub account_nodes: Vec<(&'a Nibbles, &'a BranchNodeCompact)>, + /// The set of removed state node keys. + pub removed_nodes: BTreeSet<&'a Nibbles>, + /// Storage tries stored by hashed address of the account the trie belongs to. + pub storage_tries: BTreeMap, StorageTrieUpdatesSortedRef<'a>>, +} + /// Sorted trie updates used for lookups and insertions. #[derive(PartialEq, Eq, Clone, Default, Debug)] +#[cfg_attr(any(test, feature = "serde"), derive(serde::Serialize, serde::Deserialize))] pub struct TrieUpdatesSorted { /// Sorted collection of updated state nodes with corresponding paths. pub account_nodes: Vec<(Nibbles, BranchNodeCompact)>, @@ -379,8 +420,21 @@ impl TrieUpdatesSorted { } } +/// Sorted storage trie updates reference used for serializing to file. +#[derive(PartialEq, Eq, Clone, Default, Debug)] +#[cfg_attr(any(test, feature = "serde"), derive(serde::Serialize))] +pub struct StorageTrieUpdatesSortedRef<'a> { + /// Flag indicating whether the trie has been deleted/wiped. + pub is_deleted: bool, + /// Sorted collection of updated storage nodes with corresponding paths. + pub storage_nodes: BTreeMap<&'a Nibbles, &'a BranchNodeCompact>, + /// The set of removed storage node keys. + pub removed_nodes: BTreeSet<&'a Nibbles>, +} + /// Sorted trie updates used for lookups and insertions. #[derive(PartialEq, Eq, Clone, Default, Debug)] +#[cfg_attr(any(test, feature = "serde"), derive(serde::Serialize, serde::Deserialize))] pub struct StorageTrieUpdatesSorted { /// Flag indicating whether the trie has been deleted/wiped. pub is_deleted: bool, @@ -581,13 +635,15 @@ pub mod serde_bincode_compat { let decoded: Data = bincode::deserialize(&encoded).unwrap(); assert_eq!(decoded, data); - data.trie_updates.removed_nodes.insert(Nibbles::from_vec(vec![0x0b, 0x0e, 0x0e, 0x0f])); + data.trie_updates + .removed_nodes + .insert(Nibbles::from_nibbles_unchecked([0x0b, 0x0e, 0x0e, 0x0f])); let encoded = bincode::serialize(&data).unwrap(); let decoded: Data = bincode::deserialize(&encoded).unwrap(); assert_eq!(decoded, data); data.trie_updates.account_nodes.insert( - Nibbles::from_vec(vec![0x0d, 0x0e, 0x0a, 0x0d]), + Nibbles::from_nibbles_unchecked([0x0d, 0x0e, 0x0a, 0x0d]), BranchNodeCompact::default(), ); let encoded = bincode::serialize(&data).unwrap(); @@ -614,13 +670,15 @@ pub mod serde_bincode_compat { let decoded: Data = bincode::deserialize(&encoded).unwrap(); assert_eq!(decoded, data); - data.trie_updates.removed_nodes.insert(Nibbles::from_vec(vec![0x0b, 0x0e, 0x0e, 0x0f])); + data.trie_updates + .removed_nodes + .insert(Nibbles::from_nibbles_unchecked([0x0b, 0x0e, 0x0e, 0x0f])); let encoded = bincode::serialize(&data).unwrap(); let decoded: Data = bincode::deserialize(&encoded).unwrap(); assert_eq!(decoded, data); data.trie_updates.storage_nodes.insert( - Nibbles::from_vec(vec![0x0d, 0x0e, 0x0a, 0x0d]), + Nibbles::from_nibbles_unchecked([0x0d, 0x0e, 0x0a, 0x0d]), BranchNodeCompact::default(), ); let encoded = bincode::serialize(&data).unwrap(); @@ -641,14 +699,17 @@ mod tests { let updates_deserialized: TrieUpdates = serde_json::from_str(&updates_serialized).unwrap(); assert_eq!(updates_deserialized, default_updates); - default_updates.removed_nodes.insert(Nibbles::from_vec(vec![0x0b, 0x0e, 0x0e, 0x0f])); + default_updates + .removed_nodes + .insert(Nibbles::from_nibbles_unchecked([0x0b, 0x0e, 0x0e, 0x0f])); let updates_serialized = serde_json::to_string(&default_updates).unwrap(); let updates_deserialized: TrieUpdates = serde_json::from_str(&updates_serialized).unwrap(); assert_eq!(updates_deserialized, default_updates); - default_updates - .account_nodes - .insert(Nibbles::from_vec(vec![0x0d, 0x0e, 0x0a, 0x0d]), BranchNodeCompact::default()); + default_updates.account_nodes.insert( + Nibbles::from_nibbles_unchecked([0x0d, 0x0e, 0x0a, 0x0d]), + BranchNodeCompact::default(), + ); let updates_serialized = serde_json::to_string(&default_updates).unwrap(); let updates_deserialized: TrieUpdates = serde_json::from_str(&updates_serialized).unwrap(); assert_eq!(updates_deserialized, default_updates); @@ -667,15 +728,18 @@ mod tests { serde_json::from_str(&updates_serialized).unwrap(); assert_eq!(updates_deserialized, default_updates); - default_updates.removed_nodes.insert(Nibbles::from_vec(vec![0x0b, 0x0e, 0x0e, 0x0f])); + default_updates + .removed_nodes + .insert(Nibbles::from_nibbles_unchecked([0x0b, 0x0e, 0x0e, 0x0f])); let updates_serialized = serde_json::to_string(&default_updates).unwrap(); let updates_deserialized: StorageTrieUpdates = serde_json::from_str(&updates_serialized).unwrap(); assert_eq!(updates_deserialized, default_updates); - default_updates - .storage_nodes - .insert(Nibbles::from_vec(vec![0x0d, 0x0e, 0x0a, 0x0d]), BranchNodeCompact::default()); + default_updates.storage_nodes.insert( + Nibbles::from_nibbles_unchecked([0x0d, 0x0e, 0x0a, 0x0d]), + BranchNodeCompact::default(), + ); let updates_serialized = serde_json::to_string(&default_updates).unwrap(); let updates_deserialized: StorageTrieUpdates = serde_json::from_str(&updates_serialized).unwrap(); diff --git a/crates/trie/db/src/trie_cursor.rs b/crates/trie/db/src/trie_cursor.rs index ad6b8eac171..d4cfa22f309 100644 --- a/crates/trie/db/src/trie_cursor.rs +++ b/crates/trie/db/src/trie_cursor.rs @@ -140,7 +140,7 @@ where let mut num_entries = 0; for (nibbles, maybe_updated) in storage_updates.into_iter().filter(|(n, _)| !n.is_empty()) { num_entries += 1; - let nibbles = StoredNibblesSubKey(nibbles.clone()); + let nibbles = StoredNibblesSubKey(*nibbles); // Delete the old entry if it exists. if self .cursor @@ -175,7 +175,7 @@ where ) -> Result, DatabaseError> { Ok(self .cursor - .seek_by_key_subkey(self.hashed_address, StoredNibblesSubKey(key.clone()))? + .seek_by_key_subkey(self.hashed_address, StoredNibblesSubKey(key))? .filter(|e| e.nibbles == StoredNibblesSubKey(key)) .map(|value| (value.nibbles.0, value.node))) } diff --git a/crates/trie/db/tests/trie.rs b/crates/trie/db/tests/trie.rs index 232d36e66e6..4b56911b518 100644 --- a/crates/trie/db/tests/trie.rs +++ b/crates/trie/db/tests/trie.rs @@ -431,7 +431,7 @@ fn account_and_storage_trie() { assert_eq!(account_updates.len(), 2); let (nibbles1a, node1a) = account_updates.first().unwrap(); - assert_eq!(nibbles1a[..], [0xB]); + assert_eq!(nibbles1a.to_vec(), vec![0xB]); assert_eq!(node1a.state_mask, TrieMask::new(0b1011)); assert_eq!(node1a.tree_mask, TrieMask::new(0b0001)); assert_eq!(node1a.hash_mask, TrieMask::new(0b1001)); @@ -439,7 +439,7 @@ fn account_and_storage_trie() { assert_eq!(node1a.hashes.len(), 2); let (nibbles2a, node2a) = account_updates.last().unwrap(); - assert_eq!(nibbles2a[..], [0xB, 0x0]); + assert_eq!(nibbles2a.to_vec(), vec![0xB, 0x0]); assert_eq!(node2a.state_mask, TrieMask::new(0b10001)); assert_eq!(node2a.tree_mask, TrieMask::new(0b00000)); assert_eq!(node2a.hash_mask, TrieMask::new(0b10000)); @@ -474,7 +474,7 @@ fn account_and_storage_trie() { assert_eq!(account_updates.len(), 2); let (nibbles1b, node1b) = account_updates.first().unwrap(); - assert_eq!(nibbles1b[..], [0xB]); + assert_eq!(nibbles1b.to_vec(), vec![0xB]); assert_eq!(node1b.state_mask, TrieMask::new(0b1011)); assert_eq!(node1b.tree_mask, TrieMask::new(0b0001)); assert_eq!(node1b.hash_mask, TrieMask::new(0b1011)); @@ -484,7 +484,7 @@ fn account_and_storage_trie() { assert_eq!(node1a.hashes[1], node1b.hashes[2]); let (nibbles2b, node2b) = account_updates.last().unwrap(); - assert_eq!(nibbles2b[..], [0xB, 0x0]); + assert_eq!(nibbles2b.to_vec(), vec![0xB, 0x0]); assert_eq!(node2a, node2b); tx.commit().unwrap(); @@ -525,7 +525,7 @@ fn account_and_storage_trie() { assert_eq!(trie_updates.account_nodes_ref().len(), 1); let (nibbles1c, node1c) = trie_updates.account_nodes_ref().iter().next().unwrap(); - assert_eq!(nibbles1c[..], [0xB]); + assert_eq!(nibbles1c.to_vec(), vec![0xB]); assert_eq!(node1c.state_mask, TrieMask::new(0b1011)); assert_eq!(node1c.tree_mask, TrieMask::new(0b0000)); @@ -583,7 +583,7 @@ fn account_and_storage_trie() { assert_eq!(trie_updates.account_nodes_ref().len(), 1); let (nibbles1d, node1d) = trie_updates.account_nodes_ref().iter().next().unwrap(); - assert_eq!(nibbles1d[..], [0xB]); + assert_eq!(nibbles1d.to_vec(), vec![0xB]); assert_eq!(node1d.state_mask, TrieMask::new(0b1011)); assert_eq!(node1d.tree_mask, TrieMask::new(0b0000)); @@ -742,11 +742,11 @@ fn extension_node_trie( fn assert_trie_updates(account_updates: &HashMap) { assert_eq!(account_updates.len(), 2); - let node = account_updates.get(&[0x3][..]).unwrap(); + let node = account_updates.get(&Nibbles::from_nibbles_unchecked([0x3])).unwrap(); let expected = BranchNodeCompact::new(0b0011, 0b0001, 0b0000, vec![], None); assert_eq!(node, &expected); - let node = account_updates.get(&[0x3, 0x0, 0xA, 0xF][..]).unwrap(); + let node = account_updates.get(&Nibbles::from_nibbles_unchecked([0x3, 0x0, 0xA, 0xF])).unwrap(); assert_eq!(node.state_mask, TrieMask::new(0b101100000)); assert_eq!(node.tree_mask, TrieMask::new(0b000000000)); assert_eq!(node.hash_mask, TrieMask::new(0b001000000)); diff --git a/crates/trie/db/tests/walker.rs b/crates/trie/db/tests/walker.rs index 5fd7538cd47..22316cd5ad4 100644 --- a/crates/trie/db/tests/walker.rs +++ b/crates/trie/db/tests/walker.rs @@ -66,7 +66,7 @@ where // We're traversing the path in lexicographical order. for expected in expected { walker.advance().unwrap(); - let got = walker.key().cloned(); + let got = walker.key().copied(); assert_eq!(got.unwrap(), Nibbles::from_nibbles_unchecked(expected.clone())); } @@ -115,10 +115,10 @@ fn cursor_rootnode_with_changesets() { // No changes let mut cursor = TrieWalker::state_trie(&mut trie, Default::default()); - assert_eq!(cursor.key().cloned(), Some(Nibbles::new())); // root + assert_eq!(cursor.key().copied(), Some(Nibbles::new())); // root assert!(cursor.can_skip_current_node); // due to root_hash cursor.advance().unwrap(); // skips to the end of trie - assert_eq!(cursor.key().cloned(), None); + assert_eq!(cursor.key().copied(), None); // We insert something that's not part of the existing trie/prefix. let mut changed = PrefixSetMut::default(); @@ -126,16 +126,16 @@ fn cursor_rootnode_with_changesets() { let mut cursor = TrieWalker::state_trie(&mut trie, changed.freeze()); // Root node - assert_eq!(cursor.key().cloned(), Some(Nibbles::new())); + assert_eq!(cursor.key().copied(), Some(Nibbles::new())); // Should not be able to skip state due to the changed values assert!(!cursor.can_skip_current_node); cursor.advance().unwrap(); - assert_eq!(cursor.key().cloned(), Some(Nibbles::from_nibbles([0x2]))); + assert_eq!(cursor.key().copied(), Some(Nibbles::from_nibbles([0x2]))); cursor.advance().unwrap(); - assert_eq!(cursor.key().cloned(), Some(Nibbles::from_nibbles([0x2, 0x1]))); + assert_eq!(cursor.key().copied(), Some(Nibbles::from_nibbles([0x2, 0x1]))); cursor.advance().unwrap(); - assert_eq!(cursor.key().cloned(), Some(Nibbles::from_nibbles([0x4]))); + assert_eq!(cursor.key().copied(), Some(Nibbles::from_nibbles([0x4]))); cursor.advance().unwrap(); - assert_eq!(cursor.key().cloned(), None); // the end of trie + assert_eq!(cursor.key().copied(), None); // the end of trie } diff --git a/crates/trie/parallel/src/proof.rs b/crates/trie/parallel/src/proof.rs index f090ec98c86..940a51a924e 100644 --- a/crates/trie/parallel/src/proof.rs +++ b/crates/trie/parallel/src/proof.rs @@ -25,10 +25,10 @@ use reth_trie::{ trie_cursor::{InMemoryTrieCursorFactory, TrieCursorFactory}, updates::TrieUpdatesSorted, walker::TrieWalker, - DecodedMultiProof, DecodedStorageMultiProof, HashBuilder, HashedPostStateSorted, MultiProof, - MultiProofTargets, Nibbles, StorageMultiProof, TRIE_ACCOUNT_RLP_MAX_SIZE, + DecodedMultiProof, DecodedStorageMultiProof, HashBuilder, HashedPostStateSorted, + MultiProofTargets, Nibbles, TRIE_ACCOUNT_RLP_MAX_SIZE, }; -use reth_trie_common::proof::ProofRetainer; +use reth_trie_common::proof::{DecodedProofNodes, ProofRetainer}; use reth_trie_db::{DatabaseHashedCursorFactory, DatabaseTrieCursorFactory}; use std::sync::{mpsc::Receiver, Arc}; use tracing::debug; @@ -97,7 +97,7 @@ where hashed_address: B256, prefix_set: PrefixSet, target_slots: B256Set, - ) -> Receiver> { + ) -> Receiver> { let input = StorageProofInput::new( hashed_address, prefix_set, @@ -116,7 +116,7 @@ where self, hashed_address: B256, target_slots: B256Set, - ) -> Result { + ) -> Result { let total_targets = target_slots.len(); let prefix_set = PrefixSetMut::from(target_slots.iter().map(Nibbles::unpack)); let prefix_set = prefix_set.freeze(); @@ -152,19 +152,14 @@ where hashed_address: B256, target_slots: B256Set, ) -> Result { - let proof = self.storage_proof(hashed_address, target_slots)?; - - // Now decode the nodes of the proof - let proof = proof.try_into()?; - - Ok(proof) + self.storage_proof(hashed_address, target_slots) } /// Generate a state multiproof according to specified targets. - pub fn multiproof( + pub fn decoded_multiproof( self, targets: MultiProofTargets, - ) -> Result { + ) -> Result { let mut tracker = ParallelTrieTracker::default(); // Extend prefix sets with targets @@ -199,7 +194,7 @@ where // stores the receiver for the storage proof outcome for the hashed addresses // this way we can lazily await the outcome when we iterate over the map - let mut storage_proofs = + let mut storage_proof_receivers = B256Map::with_capacity_and_hasher(storage_root_targets.len(), Default::default()); for (hashed_address, prefix_set) in @@ -210,7 +205,7 @@ where // store the receiver for that result with the hashed address so we can await this in // place when we iterate over the trie - storage_proofs.insert(hashed_address, receiver); + storage_proof_receivers.insert(hashed_address, receiver); } let provider_ro = self.view.provider_ro()?; @@ -238,8 +233,8 @@ where // Initialize all storage multiproofs as empty. // Storage multiproofs for non empty tries will be overwritten if necessary. - let mut storages: B256Map<_> = - targets.keys().map(|key| (*key, StorageMultiProof::empty())).collect(); + let mut collected_decoded_storages: B256Map = + targets.keys().map(|key| (*key, DecodedStorageMultiProof::empty())).collect(); let mut account_rlp = Vec::with_capacity(TRIE_ACCOUNT_RLP_MAX_SIZE); let mut account_node_iter = TrieNodeIter::state_trie( walker, @@ -253,11 +248,13 @@ where hash_builder.add_branch(node.key, node.value, node.children_are_in_trie); } TrieElement::Leaf(hashed_address, account) => { - let storage_multiproof = match storage_proofs.remove(&hashed_address) { - Some(rx) => rx.recv().map_err(|_| { + let decoded_storage_multiproof = match storage_proof_receivers + .remove(&hashed_address) + { + Some(rx) => rx.recv().map_err(|e| { ParallelStateRootError::StorageRoot(StorageRootError::Database( DatabaseError::Other(format!( - "channel closed for {hashed_address}" + "channel closed for {hashed_address}: {e}" )), )) })??, @@ -265,7 +262,8 @@ where // be a possibility of re-adding a non-modified leaf to the hash builder. None => { tracker.inc_missed_leaves(); - StorageProof::new_hashed( + + let raw_fallback_proof = StorageProof::new_hashed( trie_cursor_factory.clone(), hashed_cursor_factory.clone(), hashed_address, @@ -278,20 +276,23 @@ where ParallelStateRootError::StorageRoot(StorageRootError::Database( DatabaseError::Other(e.to_string()), )) - })? + })?; + + raw_fallback_proof.try_into()? } }; // Encode account account_rlp.clear(); - let account = account.into_trie_account(storage_multiproof.root); + let account = account.into_trie_account(decoded_storage_multiproof.root); account.encode(&mut account_rlp as &mut dyn BufMut); hash_builder.add_leaf(Nibbles::unpack(hashed_address), &account_rlp); // We might be adding leaves that are not necessarily our proof targets. if targets.contains_key(&hashed_address) { - storages.insert(hashed_address, storage_multiproof); + collected_decoded_storages + .insert(hashed_address, decoded_storage_multiproof); } } } @@ -302,14 +303,13 @@ where #[cfg(feature = "metrics")] self.metrics.record(stats); - let account_subtree = hash_builder.take_proof_nodes(); + let account_subtree_raw_nodes = hash_builder.take_proof_nodes(); + let decoded_account_subtree = DecodedProofNodes::try_from(account_subtree_raw_nodes)?; + let (branch_node_hash_masks, branch_node_tree_masks) = if self.collect_branch_node_masks { let updated_branch_nodes = hash_builder.updated_branch_nodes.unwrap_or_default(); ( - updated_branch_nodes - .iter() - .map(|(path, node)| (path.clone(), node.hash_mask)) - .collect(), + updated_branch_nodes.iter().map(|(path, node)| (*path, node.hash_mask)).collect(), updated_branch_nodes .into_iter() .map(|(path, node)| (path, node.tree_mask)) @@ -327,25 +327,15 @@ where leaves_added = stats.leaves_added(), missed_leaves = stats.missed_leaves(), precomputed_storage_roots = stats.precomputed_storage_roots(), - "Calculated proof" + "Calculated decoded proof" ); - Ok(MultiProof { account_subtree, branch_node_hash_masks, branch_node_tree_masks, storages }) - } - - /// Returns a [`DecodedMultiProof`] for the given proof. - /// - /// Uses `multiproof` first to get the proof, and then decodes the nodes of the multiproof. - pub fn decoded_multiproof( - self, - targets: MultiProofTargets, - ) -> Result { - let multiproof = self.multiproof(targets)?; - - // Now decode the nodes of the multiproof - let multiproof = multiproof.try_into()?; - - Ok(multiproof) + Ok(DecodedMultiProof { + account_subtree: decoded_account_subtree, + branch_node_hash_masks, + branch_node_tree_masks, + storages: collected_decoded_storages, + }) } } @@ -446,26 +436,31 @@ mod tests { Default::default(), proof_task_handle.clone(), ) - .multiproof(targets.clone()) + .decoded_multiproof(targets.clone()) .unwrap(); - let sequential_result = - Proof::new(trie_cursor_factory, hashed_cursor_factory).multiproof(targets).unwrap(); + let sequential_result_raw = Proof::new(trie_cursor_factory, hashed_cursor_factory) + .multiproof(targets.clone()) + .unwrap(); // targets might be consumed by parallel_result + let sequential_result_decoded: DecodedMultiProof = sequential_result_raw + .try_into() + .expect("Failed to decode sequential_result for test comparison"); // to help narrow down what is wrong - first compare account subtries - assert_eq!(parallel_result.account_subtree, sequential_result.account_subtree); + assert_eq!(parallel_result.account_subtree, sequential_result_decoded.account_subtree); // then compare length of all storage subtries - assert_eq!(parallel_result.storages.len(), sequential_result.storages.len()); + assert_eq!(parallel_result.storages.len(), sequential_result_decoded.storages.len()); // then compare each storage subtrie for (hashed_address, storage_proof) in ¶llel_result.storages { - let sequential_storage_proof = sequential_result.storages.get(hashed_address).unwrap(); + let sequential_storage_proof = + sequential_result_decoded.storages.get(hashed_address).unwrap(); assert_eq!(storage_proof, sequential_storage_proof); } // then compare the entire thing for any mask differences - assert_eq!(parallel_result, sequential_result); + assert_eq!(parallel_result, sequential_result_decoded); // drop the handle to terminate the task and then block on the proof task handle to make // sure it does not return any errors diff --git a/crates/trie/parallel/src/proof_task.rs b/crates/trie/parallel/src/proof_task.rs index 516d92c4daa..4dc78106963 100644 --- a/crates/trie/parallel/src/proof_task.rs +++ b/crates/trie/parallel/src/proof_task.rs @@ -22,7 +22,7 @@ use reth_trie::{ proof::{ProofBlindedProviderFactory, StorageProof}, trie_cursor::InMemoryTrieCursorFactory, updates::TrieUpdatesSorted, - HashedPostStateSorted, Nibbles, StorageMultiProof, + DecodedStorageMultiProof, HashedPostStateSorted, Nibbles, }; use reth_trie_common::prefix_set::{PrefixSet, PrefixSetMut}; use reth_trie_db::{DatabaseHashedCursorFactory, DatabaseTrieCursorFactory}; @@ -39,7 +39,7 @@ use std::{ use tokio::runtime::Handle; use tracing::debug; -type StorageProofResult = Result; +type StorageProofResult = Result; type BlindedNodeResult = Result, SparseTrieError>; /// A task that manages sending multiproof requests to a number of tasks that have longer-running @@ -244,16 +244,25 @@ where let target_slots_len = input.target_slots.len(); let proof_start = Instant::now(); - let result = StorageProof::new_hashed( + let raw_proof_result = StorageProof::new_hashed( trie_cursor_factory, hashed_cursor_factory, input.hashed_address, ) - .with_prefix_set_mut(PrefixSetMut::from(input.prefix_set.iter().cloned())) + .with_prefix_set_mut(PrefixSetMut::from(input.prefix_set.iter().copied())) .with_branch_node_masks(input.with_branch_node_masks) .storage_multiproof(input.target_slots) .map_err(|e| ParallelStateRootError::Other(e.to_string())); + let decoded_result = raw_proof_result.and_then(|raw_proof| { + raw_proof.try_into().map_err(|e: alloy_rlp::Error| { + ParallelStateRootError::Other(format!( + "Failed to decode storage proof for {}: {}", + input.hashed_address, e + )) + }) + }); + debug!( target: "trie::proof_task", hashed_address=?input.hashed_address, @@ -264,13 +273,13 @@ where ); // send the result back - if let Err(error) = result_sender.send(result) { + if let Err(error) = result_sender.send(decoded_result) { debug!( target: "trie::proof_task", hashed_address = ?input.hashed_address, ?error, task_time = ?proof_start.elapsed(), - "Failed to send proof result" + "Storage proof receiver is dropped, discarding the result" ); } @@ -525,12 +534,12 @@ impl BlindedProvider for ProofTaskBlindedNodeProvider { match self { Self::AccountNode { sender } => { let _ = sender.send(ProofTaskMessage::QueueTask( - ProofTaskKind::BlindedAccountNode(path.clone(), tx), + ProofTaskKind::BlindedAccountNode(*path, tx), )); } Self::StorageNode { sender, account } => { let _ = sender.send(ProofTaskMessage::QueueTask( - ProofTaskKind::BlindedStorageNode(*account, path.clone(), tx), + ProofTaskKind::BlindedStorageNode(*account, *path, tx), )); } } diff --git a/crates/trie/sparse-parallel/Cargo.toml b/crates/trie/sparse-parallel/Cargo.toml new file mode 100644 index 00000000000..21764ff429f --- /dev/null +++ b/crates/trie/sparse-parallel/Cargo.toml @@ -0,0 +1,42 @@ +[package] +name = "reth-trie-sparse-parallel" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +description = "Parallel Sparse MPT implementation" + +[lints] +workspace = true + +[dependencies] +# reth +reth-execution-errors.workspace = true +reth-trie-common.workspace = true +reth-trie-sparse.workspace = true +tracing = { workspace = true, features = ["attributes"] } +alloy-trie.workspace = true + +# alloy +alloy-primitives.workspace = true +alloy-rlp.workspace = true + +# misc +smallvec.workspace = true + +[dev-dependencies] +# reth +reth-primitives-traits.workspace = true +reth-trie-common = { workspace = true, features = ["test-utils", "arbitrary"] } +reth-trie.workspace = true +reth-trie-sparse = { workspace = true, features = ["test-utils"] } + +arbitrary.workspace = true +assert_matches.workspace = true +itertools.workspace = true +proptest-arbitrary-interop.workspace = true +proptest.workspace = true +rand.workspace = true +rand_08.workspace = true diff --git a/crates/trie/sparse-parallel/src/lib.rs b/crates/trie/sparse-parallel/src/lib.rs new file mode 100644 index 00000000000..6a8a7048930 --- /dev/null +++ b/crates/trie/sparse-parallel/src/lib.rs @@ -0,0 +1,6 @@ +//! The implementation of parallel sparse MPT. + +#![cfg_attr(not(test), warn(unused_crate_dependencies))] + +mod trie; +pub use trie::*; diff --git a/crates/trie/sparse-parallel/src/trie.rs b/crates/trie/sparse-parallel/src/trie.rs new file mode 100644 index 00000000000..b2d8d147f8c --- /dev/null +++ b/crates/trie/sparse-parallel/src/trie.rs @@ -0,0 +1,2838 @@ +use alloy_primitives::{ + map::{Entry, HashMap}, + B256, +}; +use alloy_rlp::Decodable; +use alloy_trie::{BranchNodeCompact, TrieMask, EMPTY_ROOT_HASH}; +use reth_execution_errors::{SparseTrieErrorKind, SparseTrieResult}; +use reth_trie_common::{ + prefix_set::{PrefixSet, PrefixSetMut}, + BranchNodeRef, ExtensionNodeRef, LeafNodeRef, Nibbles, RlpNode, TrieNode, CHILD_INDEX_RANGE, +}; +use reth_trie_sparse::{ + blinded::{BlindedProvider, RevealedNode}, + RlpNodeStackItem, SparseNode, SparseNodeType, SparseTrieUpdates, TrieMasks, +}; +use smallvec::SmallVec; +use std::sync::mpsc; +use tracing::{instrument, trace}; + +/// The maximum length of a path, in nibbles, which belongs to the upper subtrie of a +/// [`ParallelSparseTrie`]. All longer paths belong to a lower subtrie. +pub const UPPER_TRIE_MAX_DEPTH: usize = 2; + +/// Number of lower subtries which are managed by the [`ParallelSparseTrie`]. +pub const NUM_LOWER_SUBTRIES: usize = 16usize.pow(UPPER_TRIE_MAX_DEPTH as u32); + +/// A revealed sparse trie with subtries that can be updated in parallel. +/// +/// ## Invariants +/// +/// - Each leaf entry in the `subtries` and `upper_trie` collection must have a corresponding entry +/// in `values` collection. If the root node is a leaf, it must also have an entry in `values`. +/// - All keys in `values` collection are full leaf paths. +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct ParallelSparseTrie { + /// This contains the trie nodes for the upper part of the trie. + upper_subtrie: Box, + /// An array containing the subtries at the second level of the trie. + lower_subtries: [Option>; NUM_LOWER_SUBTRIES], + /// Set of prefixes (key paths) that have been marked as updated. + /// This is used to track which parts of the trie need to be recalculated. + prefix_set: PrefixSetMut, + /// Optional tracking of trie updates for later use. + updates: Option, +} + +impl Default for ParallelSparseTrie { + fn default() -> Self { + Self { + upper_subtrie: Box::default(), + lower_subtries: [const { None }; NUM_LOWER_SUBTRIES], + prefix_set: PrefixSetMut::default(), + updates: None, + } + } +} + +impl ParallelSparseTrie { + /// Returns a mutable reference to the lower `SparseSubtrie` for the given path, or None if the + /// path belongs to the upper trie. + /// + /// This method will create a new lower subtrie if one doesn't exist for the given path. + fn lower_subtrie_for_path(&mut self, path: &Nibbles) -> Option<&mut Box> { + match SparseSubtrieType::from_path(path) { + SparseSubtrieType::Upper => None, + SparseSubtrieType::Lower(idx) => { + if self.lower_subtries[idx].is_none() { + let upper_path = path.slice(..UPPER_TRIE_MAX_DEPTH); + self.lower_subtries[idx] = Some(Box::new(SparseSubtrie::new(upper_path))); + } + + self.lower_subtries[idx].as_mut() + } + } + } + + /// Returns a mutable reference to either the lower or upper `SparseSubtrie` for the given path, + /// depending on the path's length. + /// + /// This method will create a new lower subtrie if one doesn't exist for the given path. + fn subtrie_for_path(&mut self, path: &Nibbles) -> &mut Box { + match SparseSubtrieType::from_path(path) { + SparseSubtrieType::Upper => &mut self.upper_subtrie, + SparseSubtrieType::Lower(idx) => { + if self.lower_subtries[idx].is_none() { + let upper_path = path.slice(..UPPER_TRIE_MAX_DEPTH); + self.lower_subtries[idx] = Some(Box::new(SparseSubtrie::new(upper_path))); + } + + self.lower_subtries[idx].as_mut().unwrap() + } + } + } + + /// Creates a new revealed sparse trie from the given root node. + /// + /// # Returns + /// + /// A [`ParallelSparseTrie`] if successful, or an error if revealing fails. + pub fn from_root( + root_node: TrieNode, + masks: TrieMasks, + retain_updates: bool, + ) -> SparseTrieResult { + let mut trie = Self::default().with_updates(retain_updates); + trie.reveal_node(Nibbles::default(), root_node, masks)?; + Ok(trie) + } + + /// Reveals a trie node if it has not been revealed before. + /// + /// This internal function decodes a trie node and inserts it into the nodes map. + /// It handles different node types (leaf, extension, branch) by appropriately + /// adding them to the trie structure and recursively revealing their children. + /// + /// # Returns + /// + /// `Ok(())` if successful, or an error if node was not revealed. + pub fn reveal_node( + &mut self, + path: Nibbles, + node: TrieNode, + masks: TrieMasks, + ) -> SparseTrieResult<()> { + if let Some(subtrie) = self.lower_subtrie_for_path(&path) { + return subtrie.reveal_node(path, &node, masks); + } + + // If there is no subtrie for the path it means the path is UPPER_TRIE_MAX_DEPTH or less + // nibbles, and so belongs to the upper trie. + self.upper_subtrie.reveal_node(path, &node, masks)?; + + // The previous upper_trie.reveal_node call will not have revealed any child nodes via + // reveal_node_or_hash if the child node would be found on a lower subtrie. We handle that + // here by manually checking the specific cases where this could happen, and calling + // reveal_node_or_hash for each. + match node { + TrieNode::Branch(branch) => { + // If a branch is at the cutoff level of the trie then it will be in the upper trie, + // but all of its children will be in a lower trie. Check if a child node would be + // in the lower subtrie, and reveal accordingly. + if !SparseSubtrieType::path_len_is_upper(path.len() + 1) { + let mut stack_ptr = branch.as_ref().first_child_index(); + for idx in CHILD_INDEX_RANGE { + if branch.state_mask.is_bit_set(idx) { + let mut child_path = path; + child_path.push_unchecked(idx); + self.lower_subtrie_for_path(&child_path) + .expect("child_path must have a lower subtrie") + .reveal_node_or_hash(child_path, &branch.stack[stack_ptr])?; + stack_ptr += 1; + } + } + } + } + TrieNode::Extension(ext) => { + let mut child_path = path; + child_path.extend(&ext.key); + if let Some(subtrie) = self.lower_subtrie_for_path(&child_path) { + subtrie.reveal_node_or_hash(child_path, &ext.child)?; + } + } + TrieNode::EmptyRoot | TrieNode::Leaf(_) => (), + } + + Ok(()) + } + + /// Updates or inserts a leaf node at the specified key path with the provided RLP-encoded + /// value. + /// + /// This method updates the internal prefix set and, if the leaf did not previously exist, + /// adjusts the trie structure by inserting new leaf nodes, splitting branch nodes, or + /// collapsing extension nodes as needed. + /// + /// # Returns + /// + /// Returns `Ok(())` if the update is successful. + /// + /// Note: If an update requires revealing a blinded node, an error is returned if the blinded + /// provider returns an error. + pub fn update_leaf( + &mut self, + key_path: Nibbles, + value: Vec, + masks: TrieMasks, + provider: impl BlindedProvider, + ) -> SparseTrieResult<()> { + let _key_path = key_path; + let _value = value; + let _masks = masks; + let _provider = provider; + todo!() + } + + /// Returns the next node in the traversal path from the given path towards the leaf for the + /// given full leaf path, or an error if any node along the traversal path is not revealed. + /// + /// + /// ## Panics + /// + /// If `from_path` is not a prefix of `leaf_full_path`. + fn find_next_to_leaf( + from_path: &Nibbles, + from_node: &SparseNode, + leaf_full_path: &Nibbles, + ) -> SparseTrieResult { + debug_assert!(leaf_full_path.len() >= from_path.len()); + debug_assert!(leaf_full_path.starts_with(from_path)); + + match from_node { + SparseNode::Empty => Err(SparseTrieErrorKind::Blind.into()), + SparseNode::Hash(hash) => { + Err(SparseTrieErrorKind::BlindedNode { path: *from_path, hash: *hash }.into()) + } + SparseNode::Leaf { key, .. } => { + let mut found_full_path = *from_path; + found_full_path.extend(key); + + if &found_full_path == leaf_full_path { + return Ok(FindNextToLeafOutcome::Found) + } + Ok(FindNextToLeafOutcome::NotFound) + } + SparseNode::Extension { key, .. } => { + if leaf_full_path.len() == from_path.len() { + return Ok(FindNextToLeafOutcome::NotFound) + } + + let mut child_path = *from_path; + child_path.extend(key); + + if !leaf_full_path.starts_with(&child_path) { + return Ok(FindNextToLeafOutcome::NotFound) + } + Ok(FindNextToLeafOutcome::ContinueFrom(child_path)) + } + SparseNode::Branch { state_mask, .. } => { + if leaf_full_path.len() == from_path.len() { + return Ok(FindNextToLeafOutcome::NotFound) + } + + let nibble = leaf_full_path.get_unchecked(from_path.len()); + if !state_mask.is_bit_set(nibble) { + return Ok(FindNextToLeafOutcome::NotFound) + } + + let mut child_path = *from_path; + child_path.push_unchecked(nibble); + + Ok(FindNextToLeafOutcome::ContinueFrom(child_path)) + } + } + } + + /// Called when a child node has collapsed into its parent as part of `remove_leaf`. If the + /// new parent node is a leaf, then the previous child also was, and if the previous child was + /// on a lower subtrie while the parent is on an upper then the leaf value needs to be moved to + /// the upper. + fn move_value_on_leaf_removal( + &mut self, + parent_path: &Nibbles, + new_parent_node: &SparseNode, + prev_child_path: &Nibbles, + ) { + // If the parent path isn't in the upper then it doesn't matter what the new node is, + // there's no situation where a leaf value needs to be moved. + if SparseSubtrieType::from_path(parent_path).lower_index().is_some() { + return; + } + + if let SparseNode::Leaf { key, .. } = new_parent_node { + let Some(prev_child_subtrie) = self.lower_subtrie_for_path(prev_child_path) else { + return; + }; + + let mut leaf_full_path = *parent_path; + leaf_full_path.extend(key); + + let val = prev_child_subtrie.inner.values.remove(&leaf_full_path).expect("ParallelSparseTrie is in an inconsistent state, expected value on subtrie which wasn't found"); + self.upper_subtrie.inner.values.insert(leaf_full_path, val); + } + } + + /// Given the path to a parent branch node and a child node which is the sole remaining child on + /// that branch after removing a leaf, returns a node to replace the parent branch node and a + /// boolean indicating if the child should be deleted. + /// + /// ## Panics + /// + /// - If either parent or child node is not already revealed. + /// - If parent's path is not a prefix of the child's path. + fn branch_changes_on_leaf_removal( + parent_path: &Nibbles, + remaining_child_path: &Nibbles, + remaining_child_node: &SparseNode, + ) -> (SparseNode, bool) { + debug_assert!(remaining_child_path.len() > parent_path.len()); + debug_assert!(remaining_child_path.starts_with(parent_path)); + + let remaining_child_nibble = remaining_child_path.get_unchecked(parent_path.len()); + + // If we swap the branch node out either an extension or leaf, depending on + // what its remaining child is. + match remaining_child_node { + SparseNode::Empty | SparseNode::Hash(_) => { + panic!("remaining child must have been revealed already") + } + // If the only child is a leaf node, we downgrade the branch node into a + // leaf node, prepending the nibble to the key, and delete the old + // child. + SparseNode::Leaf { key, .. } => { + let mut new_key = Nibbles::from_nibbles_unchecked([remaining_child_nibble]); + new_key.extend(key); + (SparseNode::new_leaf(new_key), true) + } + // If the only child node is an extension node, we downgrade the branch + // node into an even longer extension node, prepending the nibble to the + // key, and delete the old child. + SparseNode::Extension { key, .. } => { + let mut new_key = Nibbles::from_nibbles_unchecked([remaining_child_nibble]); + new_key.extend(key); + (SparseNode::new_ext(new_key), true) + } + // If the only child is a branch node, we downgrade the current branch + // node into a one-nibble extension node. + SparseNode::Branch { .. } => ( + SparseNode::new_ext(Nibbles::from_nibbles_unchecked([remaining_child_nibble])), + false, + ), + } + } + + /// Given the path to a parent extension and its key, and a child node (not necessarily on this + /// subtrie), returns an optional replacement parent node. If a replacement is returned then the + /// child node should be deleted. + /// + /// ## Panics + /// + /// - If either parent or child node is not already revealed. + /// - If parent's path is not a prefix of the child's path. + fn extension_changes_on_leaf_removal( + parent_path: &Nibbles, + parent_key: &Nibbles, + child_path: &Nibbles, + child: &SparseNode, + ) -> Option { + debug_assert!(child_path.len() > parent_path.len()); + debug_assert!(child_path.starts_with(parent_path)); + + // If the parent node is an extension node, we need to look at its child to see + // if we need to merge it. + match child { + SparseNode::Empty | SparseNode::Hash(_) => { + panic!("child must be revealed") + } + // For a leaf node, we collapse the extension node into a leaf node, + // extending the key. While it's impossible to encounter an extension node + // followed by a leaf node in a complete trie, it's possible here because we + // could have downgraded the extension node's child into a leaf node from a + // branch in a previous call to `branch_changes_on_leaf_removal`. + SparseNode::Leaf { key, .. } => { + let mut new_key = *parent_key; + new_key.extend(key); + Some(SparseNode::new_leaf(new_key)) + } + // Similar to the leaf node, for an extension node, we collapse them into one + // extension node, extending the key. + SparseNode::Extension { key, .. } => { + let mut new_key = *parent_key; + new_key.extend(key); + Some(SparseNode::new_ext(new_key)) + } + // For a branch node, we just leave the extension node as-is. + SparseNode::Branch { .. } => None, + } + } + + /// Removes a leaf node from the trie at the specified full path of a value (that is, the leaf's + /// path + its key). + /// + /// This function removes the leaf value from the internal values map and then traverses + /// the trie to remove or adjust intermediate nodes, merging or collapsing them as necessary. + /// + /// # Returns + /// + /// Returns `Ok(())` if the leaf is successfully removed or was not present in the trie, + /// otherwise returns an error if a blinded node prevents removal. + pub fn remove_leaf( + &mut self, + leaf_full_path: &Nibbles, + provider: impl BlindedProvider, + ) -> SparseTrieResult<()> { + // When removing a leaf node it's possibly necessary to modify its parent node, and possibly + // the parent's parent node. It is not ever necessary to descend further than that; once an + // extension node is hit it must terminate in a branch or the root, which won't need further + // updates. So the situation with maximum updates is: + // + // - Leaf + // - Branch with 2 children, one being this leaf + // - Extension + // + // ...which will result in just a leaf or extension, depending on what the branch's other + // child is. + // + // Therefore, first traverse the trie in order to find the leaf node and at most its parent + // and grandparent. + + let leaf_path; + let leaf_subtrie; + + let mut branch_parent_path: Option = None; + let mut branch_parent_node: Option = None; + + let mut ext_grandparent_path: Option = None; + let mut ext_grandparent_node: Option = None; + + let mut curr_path = Nibbles::new(); // start traversal from root + let mut curr_subtrie = self.upper_subtrie.as_mut(); + let mut curr_subtrie_is_upper = true; + + loop { + let curr_node = curr_subtrie.nodes.get_mut(&curr_path).unwrap(); + + match Self::find_next_to_leaf(&curr_path, curr_node, leaf_full_path)? { + FindNextToLeafOutcome::NotFound => return Ok(()), // leaf isn't in the trie + FindNextToLeafOutcome::Found => { + // this node is the target leaf + leaf_path = curr_path; + leaf_subtrie = curr_subtrie; + break; + } + FindNextToLeafOutcome::ContinueFrom(next_path) => { + // Any branches/extensions along the path to the leaf will have their `hash` + // field unset, as it will no longer be valid once the leaf is removed. + match curr_node { + SparseNode::Branch { hash, .. } => { + *hash = None; + + // If there is already an extension leading into a branch, then that + // extension is no longer relevant. + match (&branch_parent_path, &ext_grandparent_path) { + (Some(branch), Some(ext)) if branch.len() > ext.len() => { + ext_grandparent_path = None; + ext_grandparent_node = None; + } + _ => (), + }; + branch_parent_path = Some(curr_path); + branch_parent_node = Some(curr_node.clone()); + } + SparseNode::Extension { hash, .. } => { + *hash = None; + + // We can assume a new branch node will be found after the extension, so + // there's no need to modify branch_parent_path/node even if it's + // already set. + ext_grandparent_path = Some(curr_path); + ext_grandparent_node = Some(curr_node.clone()); + } + SparseNode::Empty | SparseNode::Hash(_) | SparseNode::Leaf { .. } => { + unreachable!("find_next_to_leaf errors on non-revealed node, and return Found or NotFound on Leaf") + } + } + + curr_path = next_path; + + // If we were previously looking at the upper trie, and the new path is in the + // lower trie, we need to pull out a ref to the lower trie. + if curr_subtrie_is_upper { + if let SparseSubtrieType::Lower(idx) = + SparseSubtrieType::from_path(&curr_path) + { + curr_subtrie = self.lower_subtries[idx].as_mut().unwrap(); + curr_subtrie_is_upper = false; + } + } + } + }; + } + + // We've traversed to the leaf and collected its ancestors as necessary. Remove the leaf + // from its SparseSubtrie. + self.prefix_set.insert(*leaf_full_path); + leaf_subtrie.inner.values.remove(leaf_full_path); + leaf_subtrie.nodes.remove(&leaf_path); + + // If the leaf was at the root replace its node with the empty value. We can stop execution + // here, all remaining logic is related to the ancestors of the leaf. + if leaf_path.is_empty() { + self.upper_subtrie.nodes.insert(leaf_path, SparseNode::Empty); + return Ok(()) + } + + // If there is a parent branch node (very likely, unless the leaf is at the root) execute + // any required changes for that node, relative to the removed leaf. + if let (Some(branch_path), Some(SparseNode::Branch { mut state_mask, .. })) = + (&branch_parent_path, &branch_parent_node) + { + let child_nibble = leaf_path.get_unchecked(branch_path.len()); + state_mask.unset_bit(child_nibble); + + let new_branch_node = if state_mask.count_bits() == 1 { + // If only one child is left set in the branch node, we need to collapse it. Get + // full path of the only child node left. + let remaining_child_path = { + let mut p = *branch_path; + p.push_unchecked( + state_mask.first_set_bit_index().expect("state mask is not empty"), + ); + p + }; + + trace!( + target: "trie::parallel_sparse", + ?leaf_path, + ?branch_path, + ?remaining_child_path, + "Branch node has only one child", + ); + + let remaining_child_subtrie = self.subtrie_for_path(&remaining_child_path); + + // If the remaining child node is not yet revealed then we have to reveal it here, + // otherwise it's not possible to know how to collapse the branch. + let remaining_child_node = + match remaining_child_subtrie.nodes.get(&remaining_child_path).unwrap() { + SparseNode::Hash(_) => { + trace!( + target: "trie::parallel_sparse", + ?remaining_child_path, + "Retrieving remaining blinded branch child", + ); + if let Some(RevealedNode { node, tree_mask, hash_mask }) = + provider.blinded_node(&remaining_child_path)? + { + let decoded = TrieNode::decode(&mut &node[..])?; + trace!( + target: "trie::parallel_sparse", + ?remaining_child_path, + ?decoded, + ?tree_mask, + ?hash_mask, + "Revealing remaining blinded branch child" + ); + remaining_child_subtrie.reveal_node( + remaining_child_path, + &decoded, + TrieMasks { hash_mask, tree_mask }, + )?; + remaining_child_subtrie.nodes.get(&remaining_child_path).unwrap() + } else { + return Err(SparseTrieErrorKind::NodeNotFoundInProvider { + path: remaining_child_path, + } + .into()) + } + } + node => node, + }; + + let (new_branch_node, remove_child) = Self::branch_changes_on_leaf_removal( + branch_path, + &remaining_child_path, + remaining_child_node, + ); + + if remove_child { + remaining_child_subtrie.nodes.remove(&remaining_child_path); + self.move_value_on_leaf_removal( + branch_path, + &new_branch_node, + &remaining_child_path, + ); + } + + if let Some(updates) = self.updates.as_mut() { + updates.updated_nodes.remove(branch_path); + updates.removed_nodes.insert(*branch_path); + } + + new_branch_node + } else { + // If more than one child is left set in the branch, we just re-insert it with the + // updated state_mask. + SparseNode::new_branch(state_mask) + }; + + let branch_subtrie = self.subtrie_for_path(branch_path); + branch_subtrie.nodes.insert(*branch_path, new_branch_node.clone()); + branch_parent_node = Some(new_branch_node); + }; + + // If there is a grandparent extension node then there will necessarily be a parent branch + // node. Execute any required changes for the extension node, relative to the (possibly now + // replaced with a leaf or extension) branch node. + if let (Some(ext_path), Some(SparseNode::Extension { key: shortkey, .. })) = + (ext_grandparent_path, &ext_grandparent_node) + { + let ext_subtrie = self.subtrie_for_path(&ext_path); + let branch_path = branch_parent_path.as_ref().unwrap(); + + if let Some(new_ext_node) = Self::extension_changes_on_leaf_removal( + &ext_path, + shortkey, + branch_path, + branch_parent_node.as_ref().unwrap(), + ) { + ext_subtrie.nodes.insert(ext_path, new_ext_node.clone()); + self.subtrie_for_path(branch_path).nodes.remove(branch_path); + self.move_value_on_leaf_removal(&ext_path, &new_ext_node, branch_path); + } + } + + Ok(()) + } + + /// Recalculates and updates the RLP hashes of nodes up to level [`UPPER_TRIE_MAX_DEPTH`] of the + /// trie. + /// + /// The root node is considered to be at level 0. This method is useful for optimizing + /// hash recalculations after localized changes to the trie structure. + /// + /// This function first identifies all nodes that have changed (based on the prefix set) below + /// level [`UPPER_TRIE_MAX_DEPTH`] of the trie, then recalculates their RLP representation. + pub fn update_lower_subtrie_hashes(&mut self) { + trace!(target: "trie::parallel_sparse", "Updating subtrie hashes"); + + // Take changed subtries according to the prefix set + let mut prefix_set = core::mem::take(&mut self.prefix_set).freeze(); + let (subtries, unchanged_prefix_set) = self.take_changed_lower_subtries(&mut prefix_set); + + // Update the prefix set with the keys that didn't have matching subtries + self.prefix_set = unchanged_prefix_set; + + // Update subtrie hashes in parallel + // TODO: call `update_hashes` on each subtrie in parallel + let (tx, rx) = mpsc::channel(); + for ChangedSubtrie { index, mut subtrie, mut prefix_set } in subtries { + subtrie.update_hashes(&mut prefix_set); + tx.send((index, subtrie)).unwrap(); + } + drop(tx); + + // Return updated subtries back to the trie + for (index, subtrie) in rx { + self.lower_subtries[index] = Some(subtrie); + } + } + + /// Updates hashes for the upper subtrie, using nodes from both upper and lower subtries. + #[instrument(level = "trace", target = "engine::tree", skip_all, ret)] + fn update_upper_subtrie_hashes(&mut self, prefix_set: &mut PrefixSet) -> RlpNode { + trace!(target: "trie::parallel_sparse", "Updating upper subtrie hashes"); + + debug_assert!(self.upper_subtrie.inner.buffers.path_stack.is_empty()); + self.upper_subtrie.inner.buffers.path_stack.push(RlpNodePathStackItem { + path: Nibbles::default(), // Start from root + is_in_prefix_set: None, + }); + + while let Some(stack_item) = self.upper_subtrie.inner.buffers.path_stack.pop() { + let path = stack_item.path; + let node = if path.len() < UPPER_TRIE_MAX_DEPTH { + self.upper_subtrie.nodes.get_mut(&path).expect("upper subtrie node must exist") + } else { + let index = path_subtrie_index_unchecked(&path); + let node = self.lower_subtries[index] + .as_mut() + .expect("lower subtrie must exist") + .nodes + .get_mut(&path) + .expect("lower subtrie node must exist"); + // Lower subtrie root node hashes must be computed before updating upper subtrie + // hashes + debug_assert!(node.hash().is_some()); + node + }; + + // Calculate the RLP node for the current node using upper subtrie + self.upper_subtrie.inner.rlp_node(prefix_set, stack_item, node); + } + + debug_assert_eq!(self.upper_subtrie.inner.buffers.rlp_node_stack.len(), 1); + self.upper_subtrie.inner.buffers.rlp_node_stack.pop().unwrap().rlp_node + } + + /// Calculates and returns the root hash of the trie. + /// + /// Before computing the hash, this function processes any remaining (dirty) nodes by + /// updating their RLP encodings. The root hash is either: + /// 1. The cached hash (if no dirty nodes were found) + /// 2. The keccak256 hash of the root node's RLP representation + pub fn root(&mut self) -> B256 { + trace!(target: "trie::parallel_sparse", "Calculating trie root hash"); + + // Update all lower subtrie hashes + self.update_lower_subtrie_hashes(); + + // Update hashes for the upper subtrie using our specialized function + // that can access both upper and lower subtrie nodes + let mut prefix_set = core::mem::take(&mut self.prefix_set).freeze(); + let root_rlp = self.update_upper_subtrie_hashes(&mut prefix_set); + + // Return the root hash + root_rlp.as_hash().unwrap_or(EMPTY_ROOT_HASH) + } + + /// Configures the trie to retain information about updates. + /// + /// If `retain_updates` is true, the trie will record branch node updates and deletions. + /// This information can then be used to efficiently update an external database. + pub fn with_updates(mut self, retain_updates: bool) -> Self { + self.updates = retain_updates.then_some(SparseTrieUpdates::default()); + self + } + + /// Consumes and returns the currently accumulated trie updates. + /// + /// This is useful when you want to apply the updates to an external database, + /// and then start tracking a new set of updates. + pub fn take_updates(&mut self) -> SparseTrieUpdates { + core::iter::once(&mut self.upper_subtrie) + .chain(self.lower_subtries.iter_mut().flatten()) + .fold(SparseTrieUpdates::default(), |mut acc, subtrie| { + acc.extend(subtrie.take_updates()); + acc + }) + } + + /// Returns: + /// 1. List of lower [subtries](SparseSubtrie) that have changed according to the provided + /// [prefix set](PrefixSet). See documentation of [`ChangedSubtrie`] for more details. + /// 2. Prefix set of keys that do not belong to any lower subtrie. + /// + /// This method helps optimize hash recalculations by identifying which specific + /// lower subtries need to be updated. Each lower subtrie can then be updated in parallel. + /// + /// IMPORTANT: The method removes the subtries from `lower_subtries`, and the caller is + /// responsible for returning them back into the array. + fn take_changed_lower_subtries( + &mut self, + prefix_set: &mut PrefixSet, + ) -> (Vec, PrefixSetMut) { + // Clone the prefix set to iterate over its keys. Cloning is cheap, it's just an Arc. + let prefix_set_clone = prefix_set.clone(); + let mut prefix_set_iter = prefix_set_clone.into_iter().copied().peekable(); + let mut changed_subtries = Vec::new(); + let mut unchanged_prefix_set = PrefixSetMut::default(); + + for (index, subtrie) in self.lower_subtries.iter_mut().enumerate() { + if let Some(subtrie) = subtrie.take_if(|subtrie| prefix_set.contains(&subtrie.path)) { + let prefix_set = if prefix_set.all() { + unchanged_prefix_set = PrefixSetMut::all(); + PrefixSetMut::all() + } else { + // Take those keys from the original prefix set that start with the subtrie path + // + // Subtries are stored in the order of their paths, so we can use the same + // prefix set iterator. + let mut new_prefix_set = Vec::new(); + while let Some(key) = prefix_set_iter.peek() { + if key.starts_with(&subtrie.path) { + // If the key starts with the subtrie path, add it to the new prefix set + new_prefix_set.push(prefix_set_iter.next().unwrap()); + } else if new_prefix_set.is_empty() && key < &subtrie.path { + // If we didn't yet have any keys that belong to this subtrie, and the + // current key is still less than the subtrie path, add it to the + // unchanged prefix set + unchanged_prefix_set.insert(prefix_set_iter.next().unwrap()); + } else { + // If we're past the subtrie path, we're done with this subtrie. Do not + // advance the iterator, the next key will be processed either by the + // next subtrie or inserted into the unchanged prefix set. + break + } + } + PrefixSetMut::from(new_prefix_set) + } + .freeze(); + + changed_subtries.push(ChangedSubtrie { index, subtrie, prefix_set }); + } + } + + // Extend the unchanged prefix set with the remaining keys that are not part of any subtries + unchanged_prefix_set.extend_keys(prefix_set_iter); + + (changed_subtries, unchanged_prefix_set) + } +} + +/// This is a subtrie of the [`ParallelSparseTrie`] that contains a map from path to sparse trie +/// nodes. +#[derive(Clone, PartialEq, Eq, Debug, Default)] +pub struct SparseSubtrie { + /// The root path of this subtrie. + /// + /// This is the _full_ path to this subtrie, meaning it includes the first + /// [`UPPER_TRIE_MAX_DEPTH`] nibbles that we also use for indexing subtries in the + /// [`ParallelSparseTrie`]. + /// + /// There should be a node for this path in `nodes` map. + path: Nibbles, + /// The map from paths to sparse trie nodes within this subtrie. + nodes: HashMap, + /// Subset of fields for mutable access while `nodes` field is also being mutably borrowed. + inner: SparseSubtrieInner, +} + +/// Returned by the `find_next_to_leaf` method to indicate either that the leaf has been found, +/// traversal should be continued from the given path, or the leaf is not in the trie. +enum FindNextToLeafOutcome { + /// `Found` indicates that the leaf was found at the given path. + Found, + /// `ContinueFrom` indicates that traversal should continue from the given path. + ContinueFrom(Nibbles), + /// `NotFound` indicates that there is no way to traverse to the leaf, as it is not in the + /// trie. + NotFound, +} + +impl SparseSubtrie { + fn new(path: Nibbles) -> Self { + Self { path, ..Default::default() } + } + + /// Configures the subtrie to retain information about updates. + /// + /// If `retain_updates` is true, the trie will record branch node updates and deletions. + /// This information can then be used to efficiently update an external database. + pub fn with_updates(mut self, retain_updates: bool) -> Self { + self.inner.updates = retain_updates.then_some(SparseTrieUpdates::default()); + self + } + + /// Returns true if the current path and its child are both found in the same level. + fn is_child_same_level(current_path: &Nibbles, child_path: &Nibbles) -> bool { + let current_level = core::mem::discriminant(&SparseSubtrieType::from_path(current_path)); + let child_level = core::mem::discriminant(&SparseSubtrieType::from_path(child_path)); + current_level == child_level + } + + /// Internal implementation of the method of the same name on `ParallelSparseTrie`. + fn reveal_node( + &mut self, + path: Nibbles, + node: &TrieNode, + masks: TrieMasks, + ) -> SparseTrieResult<()> { + debug_assert!(path.starts_with(&self.path)); + + // If the node is already revealed and it's not a hash node, do nothing. + if self.nodes.get(&path).is_some_and(|node| !node.is_hash()) { + return Ok(()) + } + + if let Some(tree_mask) = masks.tree_mask { + self.inner.branch_node_tree_masks.insert(path, tree_mask); + } + if let Some(hash_mask) = masks.hash_mask { + self.inner.branch_node_hash_masks.insert(path, hash_mask); + } + + match node { + TrieNode::EmptyRoot => { + // For an empty root, ensure that we are at the root path, and at the upper subtrie. + debug_assert!(path.is_empty()); + debug_assert!(self.path.is_empty()); + self.nodes.insert(path, SparseNode::Empty); + } + TrieNode::Branch(branch) => { + // For a branch node, iterate over all potential children + let mut stack_ptr = branch.as_ref().first_child_index(); + for idx in CHILD_INDEX_RANGE { + if branch.state_mask.is_bit_set(idx) { + let mut child_path = path; + child_path.push_unchecked(idx); + if Self::is_child_same_level(&path, &child_path) { + // Reveal each child node or hash it has, but only if the child is on + // the same level as the parent. + self.reveal_node_or_hash(child_path, &branch.stack[stack_ptr])?; + } + stack_ptr += 1; + } + } + // Update the branch node entry in the nodes map, handling cases where a blinded + // node is now replaced with a revealed node. + match self.nodes.entry(path) { + Entry::Occupied(mut entry) => match entry.get() { + // Replace a hash node with a fully revealed branch node. + SparseNode::Hash(hash) => { + entry.insert(SparseNode::Branch { + state_mask: branch.state_mask, + // Memoize the hash of a previously blinded node in a new branch + // node. + hash: Some(*hash), + store_in_db_trie: Some( + masks.hash_mask.is_some_and(|mask| !mask.is_empty()) || + masks.tree_mask.is_some_and(|mask| !mask.is_empty()), + ), + }); + } + // Branch node already exists, or an extension node was placed where a + // branch node was before. + SparseNode::Branch { .. } | SparseNode::Extension { .. } => {} + // All other node types can't be handled. + node @ (SparseNode::Empty | SparseNode::Leaf { .. }) => { + return Err(SparseTrieErrorKind::Reveal { + path: *entry.key(), + node: Box::new(node.clone()), + } + .into()) + } + }, + Entry::Vacant(entry) => { + entry.insert(SparseNode::new_branch(branch.state_mask)); + } + } + } + TrieNode::Extension(ext) => match self.nodes.entry(path) { + Entry::Occupied(mut entry) => match entry.get() { + // Replace a hash node with a revealed extension node. + SparseNode::Hash(hash) => { + let mut child_path = *entry.key(); + child_path.extend(&ext.key); + entry.insert(SparseNode::Extension { + key: ext.key, + // Memoize the hash of a previously blinded node in a new extension + // node. + hash: Some(*hash), + store_in_db_trie: None, + }); + if Self::is_child_same_level(&path, &child_path) { + self.reveal_node_or_hash(child_path, &ext.child)?; + } + } + // Extension node already exists, or an extension node was placed where a branch + // node was before. + SparseNode::Extension { .. } | SparseNode::Branch { .. } => {} + // All other node types can't be handled. + node @ (SparseNode::Empty | SparseNode::Leaf { .. }) => { + return Err(SparseTrieErrorKind::Reveal { + path: *entry.key(), + node: Box::new(node.clone()), + } + .into()) + } + }, + Entry::Vacant(entry) => { + let mut child_path = *entry.key(); + child_path.extend(&ext.key); + entry.insert(SparseNode::new_ext(ext.key)); + if Self::is_child_same_level(&path, &child_path) { + self.reveal_node_or_hash(child_path, &ext.child)?; + } + } + }, + TrieNode::Leaf(leaf) => match self.nodes.entry(path) { + Entry::Occupied(mut entry) => match entry.get() { + // Replace a hash node with a revealed leaf node and store leaf node value. + SparseNode::Hash(hash) => { + let mut full = *entry.key(); + full.extend(&leaf.key); + self.inner.values.insert(full, leaf.value.clone()); + entry.insert(SparseNode::Leaf { + key: leaf.key, + // Memoize the hash of a previously blinded node in a new leaf + // node. + hash: Some(*hash), + }); + } + // Leaf node already exists. + SparseNode::Leaf { .. } => {} + // All other node types can't be handled. + node @ (SparseNode::Empty | + SparseNode::Extension { .. } | + SparseNode::Branch { .. }) => { + return Err(SparseTrieErrorKind::Reveal { + path: *entry.key(), + node: Box::new(node.clone()), + } + .into()) + } + }, + Entry::Vacant(entry) => { + let mut full = *entry.key(); + full.extend(&leaf.key); + entry.insert(SparseNode::new_leaf(leaf.key)); + self.inner.values.insert(full, leaf.value.clone()); + } + }, + } + + Ok(()) + } + + /// Reveals either a node or its hash placeholder based on the provided child data. + /// + /// When traversing the trie, we often encounter references to child nodes that + /// are either directly embedded or represented by their hash. This method + /// handles both cases: + /// + /// 1. If the child data represents a hash (32+1=33 bytes), store it as a hash node + /// 2. Otherwise, decode the data as a [`TrieNode`] and recursively reveal it using + /// `reveal_node` + /// + /// # Returns + /// + /// Returns `Ok(())` if successful, or an error if the node cannot be revealed. + /// + /// # Error Handling + /// + /// Will error if there's a conflict between a new hash node and an existing one + /// at the same path + fn reveal_node_or_hash(&mut self, path: Nibbles, child: &[u8]) -> SparseTrieResult<()> { + if child.len() == B256::len_bytes() + 1 { + let hash = B256::from_slice(&child[1..]); + match self.nodes.entry(path) { + Entry::Occupied(entry) => match entry.get() { + // Hash node with a different hash can't be handled. + SparseNode::Hash(previous_hash) if previous_hash != &hash => { + return Err(SparseTrieErrorKind::Reveal { + path: *entry.key(), + node: Box::new(SparseNode::Hash(hash)), + } + .into()) + } + _ => {} + }, + Entry::Vacant(entry) => { + entry.insert(SparseNode::Hash(hash)); + } + } + return Ok(()) + } + + self.reveal_node(path, &TrieNode::decode(&mut &child[..])?, TrieMasks::none()) + } + + /// Recalculates and updates the RLP hashes for the changed nodes in this subtrie. + /// + /// The function starts from the subtrie root, traverses down to leaves, and then calculates + /// the hashes from leaves back up to the root. It uses a stack from [`SparseSubtrieBuffers`] to + /// track the traversal and accumulate RLP encodings. + /// + /// # Parameters + /// + /// - `prefix_set`: The set of trie paths whose nodes have changed. + /// + /// # Returns + /// + /// A tuple containing the root node of the updated subtrie and an optional set of updates. + /// Updates are [`Some`] if [`Self::with_updates`] was set to `true`. + /// + /// # Panics + /// + /// If the node at the root path does not exist. + #[instrument(level = "trace", target = "engine::tree", skip_all, fields(root = ?self.path), ret)] + pub fn update_hashes(&mut self, prefix_set: &mut PrefixSet) -> RlpNode { + trace!(target: "trie::parallel_sparse", "Updating subtrie hashes"); + + debug_assert!(prefix_set.iter().all(|path| path.starts_with(&self.path))); + + debug_assert!(self.inner.buffers.path_stack.is_empty()); + self.inner + .buffers + .path_stack + .push(RlpNodePathStackItem { path: self.path, is_in_prefix_set: None }); + + while let Some(stack_item) = self.inner.buffers.path_stack.pop() { + let path = stack_item.path; + let node = self + .nodes + .get_mut(&path) + .unwrap_or_else(|| panic!("node at path {path:?} does not exist")); + + self.inner.rlp_node(prefix_set, stack_item, node); + } + + debug_assert_eq!(self.inner.buffers.rlp_node_stack.len(), 1); + self.inner.buffers.rlp_node_stack.pop().unwrap().rlp_node + } + + /// Consumes and returns the currently accumulated trie updates. + /// + /// This is useful when you want to apply the updates to an external database, + /// and then start tracking a new set of updates. + fn take_updates(&mut self) -> SparseTrieUpdates { + self.inner.updates.take().unwrap_or_default() + } +} + +/// Helper type for [`SparseSubtrie`] to mutably access only a subset of fields from the original +/// struct. +#[derive(Clone, PartialEq, Eq, Debug, Default)] +struct SparseSubtrieInner { + /// When a branch is set, the corresponding child subtree is stored in the database. + branch_node_tree_masks: HashMap, + /// When a bit is set, the corresponding child is stored as a hash in the database. + branch_node_hash_masks: HashMap, + /// Map from leaf key paths to their values. + /// All values are stored here instead of directly in leaf nodes. + values: HashMap>, + /// Optional tracking of trie updates for later use. + updates: Option, + /// Reusable buffers for [`SparseSubtrie::update_hashes`]. + buffers: SparseSubtrieBuffers, +} + +impl SparseSubtrieInner { + /// Computes the RLP encoding and its hash for a single (trie node)[`SparseNode`]. + /// + /// # Deferred Processing + /// + /// When an extension or a branch node depends on child nodes that haven't been computed yet, + /// the function pushes the current node back onto the path stack along with its children, + /// then returns early. This allows the iterative algorithm to process children first before + /// retrying the parent. + /// + /// # Parameters + /// + /// - `prefix_set`: Set of prefixes (key paths) that have been marked as updated + /// - `stack_item`: The stack item to process + /// - `node`: The sparse node to process (will be mutated to update hash) + /// + /// # Side Effects + /// + /// - Updates the node's hash field after computing RLP + /// - Pushes nodes to [`SparseSubtrieBuffers::path_stack`] to manage traversal + /// - Updates the (trie updates)[`SparseTrieUpdates`] accumulator when tracking changes, if + /// [`Some`] + /// - May push items onto the path stack for deferred processing + /// + /// # Exit condition + /// + /// Once all nodes have been processed and all RLPs and hashes calculated, pushes the root node + /// onto the [`SparseSubtrieBuffers::rlp_node_stack`] and exits. + fn rlp_node( + &mut self, + prefix_set: &mut PrefixSet, + mut stack_item: RlpNodePathStackItem, + node: &mut SparseNode, + ) { + let path = stack_item.path; + trace!( + target: "trie::parallel_sparse", + ?path, + ?node, + "Calculating node RLP" + ); + + // Check if the path is in the prefix set. + // First, check the cached value. If it's `None`, then check the prefix set, and update + // the cached value. + let mut prefix_set_contains = |path: &Nibbles| { + *stack_item.is_in_prefix_set.get_or_insert_with(|| prefix_set.contains(path)) + }; + + let (rlp_node, node_type) = match node { + SparseNode::Empty => (RlpNode::word_rlp(&EMPTY_ROOT_HASH), SparseNodeType::Empty), + SparseNode::Hash(hash) => { + // Return pre-computed hash of a blinded node immediately + (RlpNode::word_rlp(hash), SparseNodeType::Hash) + } + SparseNode::Leaf { key, hash } => { + let mut path = path; + path.extend(key); + if let Some(hash) = hash.filter(|_| !prefix_set_contains(&path)) { + // If the node hash is already computed, and the node path is not in + // the prefix set, return the pre-computed hash + (RlpNode::word_rlp(&hash), SparseNodeType::Leaf) + } else { + // Encode the leaf node and update its hash + let value = self.values.get(&path).unwrap(); + self.buffers.rlp_buf.clear(); + let rlp_node = LeafNodeRef { key, value }.rlp(&mut self.buffers.rlp_buf); + *hash = rlp_node.as_hash(); + (rlp_node, SparseNodeType::Leaf) + } + } + SparseNode::Extension { key, hash, store_in_db_trie } => { + let mut child_path = path; + child_path.extend(key); + if let Some((hash, store_in_db_trie)) = + hash.zip(*store_in_db_trie).filter(|_| !prefix_set_contains(&path)) + { + // If the node hash is already computed, and the node path is not in + // the prefix set, return the pre-computed hash + ( + RlpNode::word_rlp(&hash), + SparseNodeType::Extension { store_in_db_trie: Some(store_in_db_trie) }, + ) + } else if self.buffers.rlp_node_stack.last().is_some_and(|e| e.path == child_path) { + // Top of the stack has the child node, we can encode the extension node and + // update its hash + let RlpNodeStackItem { path: _, rlp_node: child, node_type: child_node_type } = + self.buffers.rlp_node_stack.pop().unwrap(); + self.buffers.rlp_buf.clear(); + let rlp_node = + ExtensionNodeRef::new(key, &child).rlp(&mut self.buffers.rlp_buf); + *hash = rlp_node.as_hash(); + + let store_in_db_trie_value = child_node_type.store_in_db_trie(); + + trace!( + target: "trie::parallel_sparse", + ?path, + ?child_path, + ?child_node_type, + "Extension node" + ); + + *store_in_db_trie = store_in_db_trie_value; + + ( + rlp_node, + SparseNodeType::Extension { + // Inherit the `store_in_db_trie` flag from the child node, which is + // always the branch node + store_in_db_trie: store_in_db_trie_value, + }, + ) + } else { + // Need to defer processing until child is computed, on the next + // invocation update the node's hash. + self.buffers.path_stack.extend([ + RlpNodePathStackItem { + path, + is_in_prefix_set: Some(prefix_set_contains(&path)), + }, + RlpNodePathStackItem { path: child_path, is_in_prefix_set: None }, + ]); + return + } + } + SparseNode::Branch { state_mask, hash, store_in_db_trie } => { + if let Some((hash, store_in_db_trie)) = + hash.zip(*store_in_db_trie).filter(|_| !prefix_set_contains(&path)) + { + // If the node hash is already computed, and the node path is not in + // the prefix set, return the pre-computed hash + self.buffers.rlp_node_stack.push(RlpNodeStackItem { + path, + rlp_node: RlpNode::word_rlp(&hash), + node_type: SparseNodeType::Branch { + store_in_db_trie: Some(store_in_db_trie), + }, + }); + return + } + + let retain_updates = self.updates.is_some() && prefix_set_contains(&path); + + self.buffers.branch_child_buf.clear(); + // Walk children in a reverse order from `f` to `0`, so we pop the `0` first + // from the stack and keep walking in the sorted order. + for bit in CHILD_INDEX_RANGE.rev() { + if state_mask.is_bit_set(bit) { + let mut child = path; + child.push_unchecked(bit); + self.buffers.branch_child_buf.push(child); + } + } + + self.buffers + .branch_value_stack_buf + .resize(self.buffers.branch_child_buf.len(), Default::default()); + let mut added_children = false; + + let mut tree_mask = TrieMask::default(); + let mut hash_mask = TrieMask::default(); + let mut hashes = Vec::new(); + for (i, child_path) in self.buffers.branch_child_buf.iter().enumerate() { + if self.buffers.rlp_node_stack.last().is_some_and(|e| &e.path == child_path) { + let RlpNodeStackItem { + path: _, + rlp_node: child, + node_type: child_node_type, + } = self.buffers.rlp_node_stack.pop().unwrap(); + + // Update the masks only if we need to retain trie updates + if retain_updates { + // SAFETY: it's a child, so it's never empty + let last_child_nibble = child_path.last().unwrap(); + + // Determine whether we need to set trie mask bit. + let should_set_tree_mask_bit = if let Some(store_in_db_trie) = + child_node_type.store_in_db_trie() + { + // A branch or an extension node explicitly set the + // `store_in_db_trie` flag + store_in_db_trie + } else { + // A blinded node has the tree mask bit set + child_node_type.is_hash() && + self.branch_node_tree_masks + .get(&path) + .is_some_and(|mask| mask.is_bit_set(last_child_nibble)) + }; + if should_set_tree_mask_bit { + tree_mask.set_bit(last_child_nibble); + } + + // Set the hash mask. If a child node is a revealed branch node OR + // is a blinded node that has its hash mask bit set according to the + // database, set the hash mask bit and save the hash. + let hash = child.as_hash().filter(|_| { + child_node_type.is_branch() || + (child_node_type.is_hash() && + self.branch_node_hash_masks.get(&path).is_some_and( + |mask| mask.is_bit_set(last_child_nibble), + )) + }); + if let Some(hash) = hash { + hash_mask.set_bit(last_child_nibble); + hashes.push(hash); + } + } + + // Insert children in the resulting buffer in a normal order, + // because initially we iterated in reverse. + // SAFETY: i < len and len is never 0 + let original_idx = self.buffers.branch_child_buf.len() - i - 1; + self.buffers.branch_value_stack_buf[original_idx] = child; + added_children = true; + } else { + // Need to defer processing until children are computed, on the next + // invocation update the node's hash. + debug_assert!(!added_children); + self.buffers.path_stack.push(RlpNodePathStackItem { + path, + is_in_prefix_set: Some(prefix_set_contains(&path)), + }); + self.buffers.path_stack.extend( + self.buffers + .branch_child_buf + .drain(..) + .map(|path| RlpNodePathStackItem { path, is_in_prefix_set: None }), + ); + return + } + } + + trace!( + target: "trie::parallel_sparse", + ?path, + ?tree_mask, + ?hash_mask, + "Branch node masks" + ); + + // Top of the stack has all children node, we can encode the branch node and + // update its hash + self.buffers.rlp_buf.clear(); + let branch_node_ref = + BranchNodeRef::new(&self.buffers.branch_value_stack_buf, *state_mask); + let rlp_node = branch_node_ref.rlp(&mut self.buffers.rlp_buf); + *hash = rlp_node.as_hash(); + + // Save a branch node update only if it's not a root node, and we need to + // persist updates. + let store_in_db_trie_value = if let Some(updates) = + self.updates.as_mut().filter(|_| retain_updates && !path.is_empty()) + { + let store_in_db_trie = !tree_mask.is_empty() || !hash_mask.is_empty(); + if store_in_db_trie { + // Store in DB trie if there are either any children that are stored in + // the DB trie, or any children represent hashed values + hashes.reverse(); + let branch_node = BranchNodeCompact::new( + *state_mask, + tree_mask, + hash_mask, + hashes, + hash.filter(|_| path.is_empty()), + ); + updates.updated_nodes.insert(path, branch_node); + } else if self + .branch_node_tree_masks + .get(&path) + .is_some_and(|mask| !mask.is_empty()) || + self.branch_node_hash_masks + .get(&path) + .is_some_and(|mask| !mask.is_empty()) + { + // If new tree and hash masks are empty, but previously they weren't, we + // need to remove the node update and add the node itself to the list of + // removed nodes. + updates.updated_nodes.remove(&path); + updates.removed_nodes.insert(path); + } else if self + .branch_node_hash_masks + .get(&path) + .is_none_or(|mask| mask.is_empty()) && + self.branch_node_hash_masks.get(&path).is_none_or(|mask| mask.is_empty()) + { + // If new tree and hash masks are empty, and they were previously empty + // as well, we need to remove the node update. + updates.updated_nodes.remove(&path); + } + + store_in_db_trie + } else { + false + }; + *store_in_db_trie = Some(store_in_db_trie_value); + + ( + rlp_node, + SparseNodeType::Branch { store_in_db_trie: Some(store_in_db_trie_value) }, + ) + } + }; + + self.buffers.rlp_node_stack.push(RlpNodeStackItem { path, rlp_node, node_type }); + trace!( + target: "trie::parallel_sparse", + ?path, + ?node_type, + "Added node to RLP node stack" + ); + } +} + +/// Sparse Subtrie Type. +/// +/// Used to determine the type of subtrie a certain path belongs to: +/// - Paths in the range `0x..=0xf` belong to the upper subtrie. +/// - Paths in the range `0x00..` belong to one of the lower subtries. The index of the lower +/// subtrie is determined by the first [`UPPER_TRIE_MAX_DEPTH`] nibbles of the path. +/// +/// There can be at most [`NUM_LOWER_SUBTRIES`] lower subtries. +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub enum SparseSubtrieType { + /// Upper subtrie with paths in the range `0x..=0xf` + Upper, + /// Lower subtrie with paths in the range `0x00..`. Includes the index of the subtrie, + /// according to the path prefix. + Lower(usize), +} + +impl SparseSubtrieType { + /// Returns true if a node at a path of the given length would be placed in the upper subtrie. + /// + /// Nodes with paths shorter than [`UPPER_TRIE_MAX_DEPTH`] nibbles belong to the upper subtrie, + /// while longer paths belong to the lower subtries. + pub const fn path_len_is_upper(len: usize) -> bool { + len < UPPER_TRIE_MAX_DEPTH + } + + /// Returns the type of subtrie based on the given path. + pub fn from_path(path: &Nibbles) -> Self { + if Self::path_len_is_upper(path.len()) { + Self::Upper + } else { + Self::Lower(path_subtrie_index_unchecked(path)) + } + } + + /// Returns the index of the lower subtrie, if it exists. + pub const fn lower_index(&self) -> Option { + match self { + Self::Upper => None, + Self::Lower(index) => Some(*index), + } + } +} + +/// Collection of reusable buffers for calculating subtrie hashes. +/// +/// These buffers reduce allocations when computing RLP representations during trie updates. +#[derive(Clone, PartialEq, Eq, Debug, Default)] +pub struct SparseSubtrieBuffers { + /// Stack of RLP node paths + path_stack: Vec, + /// Stack of RLP nodes + rlp_node_stack: Vec, + /// Reusable branch child path + branch_child_buf: SmallVec<[Nibbles; 16]>, + /// Reusable branch value stack + branch_value_stack_buf: SmallVec<[RlpNode; 16]>, + /// Reusable RLP buffer + rlp_buf: Vec, +} + +/// RLP node path stack item. +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct RlpNodePathStackItem { + /// Path to the node. + pub path: Nibbles, + /// Whether the path is in the prefix set. If [`None`], then unknown yet. + pub is_in_prefix_set: Option, +} + +/// Changed subtrie. +#[derive(Debug)] +struct ChangedSubtrie { + /// Lower subtrie index in the range [0, [`NUM_LOWER_SUBTRIES`]). + index: usize, + /// Changed subtrie + subtrie: Box, + /// Prefix set of keys that belong to the subtrie. + #[allow(unused)] + prefix_set: PrefixSet, +} + +/// Convert first [`UPPER_TRIE_MAX_DEPTH`] nibbles of the path into a lower subtrie index in the +/// range [0, [`NUM_LOWER_SUBTRIES`]). +/// +/// # Panics +/// +/// If the path is shorter than [`UPPER_TRIE_MAX_DEPTH`] nibbles. +fn path_subtrie_index_unchecked(path: &Nibbles) -> usize { + debug_assert_eq!(UPPER_TRIE_MAX_DEPTH, 2); + path.get_byte_unchecked(0) as usize +} + +#[cfg(test)] +mod tests { + use super::{ + path_subtrie_index_unchecked, ParallelSparseTrie, SparseSubtrie, SparseSubtrieType, + }; + use crate::trie::ChangedSubtrie; + use alloy_primitives::{ + map::{foldhash::fast::RandomState, B256Set, DefaultHashBuilder, HashMap}, + B256, + }; + use alloy_rlp::{Decodable, Encodable}; + use alloy_trie::{BranchNodeCompact, Nibbles}; + use assert_matches::assert_matches; + use itertools::Itertools; + use reth_execution_errors::SparseTrieError; + use reth_primitives_traits::Account; + use reth_trie::{ + hashed_cursor::{noop::NoopHashedAccountCursor, HashedPostStateAccountCursor}, + node_iter::{TrieElement, TrieNodeIter}, + trie_cursor::{noop::NoopAccountTrieCursor, TrieCursor}, + walker::TrieWalker, + }; + use reth_trie_common::{ + prefix_set::PrefixSetMut, + proof::{ProofNodes, ProofRetainer}, + updates::TrieUpdates, + BranchNode, ExtensionNode, HashBuilder, HashedPostState, LeafNode, RlpNode, TrieMask, + TrieNode, EMPTY_ROOT_HASH, + }; + use reth_trie_sparse::{ + blinded::{BlindedProvider, RevealedNode}, + SparseNode, TrieMasks, + }; + + /// Mock blinded provider for testing that allows pre-setting nodes at specific paths. + /// + /// This provider can be used in tests to simulate blinded nodes that need to be revealed + /// during trie operations, particularly when collapsing branch nodes during leaf removal. + #[derive(Debug, Clone)] + struct MockBlindedProvider { + /// Mapping from path to revealed node data + nodes: HashMap, + } + + impl MockBlindedProvider { + /// Creates a new empty mock provider + fn new() -> Self { + Self { nodes: HashMap::with_hasher(RandomState::default()) } + } + + /// Adds a revealed node at the specified path + fn add_revealed_node(&mut self, path: Nibbles, node: RevealedNode) { + self.nodes.insert(path, node); + } + } + + impl BlindedProvider for MockBlindedProvider { + fn blinded_node(&self, path: &Nibbles) -> Result, SparseTrieError> { + Ok(self.nodes.get(path).cloned()) + } + } + + fn create_account(nonce: u64) -> Account { + Account { nonce, ..Default::default() } + } + + fn encode_account_value(nonce: u64) -> Vec { + let account = Account { nonce, ..Default::default() }; + let trie_account = account.into_trie_account(EMPTY_ROOT_HASH); + let mut buf = Vec::new(); + trie_account.encode(&mut buf); + buf + } + + fn create_leaf_node(key: impl AsRef<[u8]>, value_nonce: u64) -> TrieNode { + TrieNode::Leaf(LeafNode::new(Nibbles::from_nibbles(key), encode_account_value(value_nonce))) + } + + fn create_extension_node(key: impl AsRef<[u8]>, child_hash: B256) -> TrieNode { + TrieNode::Extension(ExtensionNode::new( + Nibbles::from_nibbles(key), + RlpNode::word_rlp(&child_hash), + )) + } + + fn create_branch_node_with_children( + children_indices: &[u8], + child_hashes: impl IntoIterator, + ) -> TrieNode { + let mut stack = Vec::new(); + let mut state_mask = TrieMask::default(); + + for (&idx, hash) in children_indices.iter().zip(child_hashes.into_iter()) { + state_mask.set_bit(idx); + stack.push(hash); + } + + TrieNode::Branch(BranchNode::new(stack, state_mask)) + } + + /// Calculate the state root by feeding the provided state to the hash builder and retaining the + /// proofs for the provided targets. + /// + /// Returns the state root and the retained proof nodes. + fn run_hash_builder( + state: impl IntoIterator + Clone, + trie_cursor: impl TrieCursor, + destroyed_accounts: B256Set, + proof_targets: impl IntoIterator, + ) -> (B256, TrieUpdates, ProofNodes, HashMap, HashMap) + { + let mut account_rlp = Vec::new(); + + let mut hash_builder = HashBuilder::default() + .with_updates(true) + .with_proof_retainer(ProofRetainer::from_iter(proof_targets)); + + let mut prefix_set = PrefixSetMut::default(); + prefix_set.extend_keys(state.clone().into_iter().map(|(nibbles, _)| nibbles)); + prefix_set.extend_keys(destroyed_accounts.iter().map(Nibbles::unpack)); + let walker = + TrieWalker::state_trie(trie_cursor, prefix_set.freeze()).with_deletions_retained(true); + let hashed_post_state = HashedPostState::default() + .with_accounts(state.into_iter().map(|(nibbles, account)| { + (nibbles.pack().into_inner().unwrap().into(), Some(account)) + })) + .into_sorted(); + let mut node_iter = TrieNodeIter::state_trie( + walker, + HashedPostStateAccountCursor::new( + NoopHashedAccountCursor::default(), + hashed_post_state.accounts(), + ), + ); + + while let Some(node) = node_iter.try_next().unwrap() { + match node { + TrieElement::Branch(branch) => { + hash_builder.add_branch(branch.key, branch.value, branch.children_are_in_trie); + } + TrieElement::Leaf(key, account) => { + let account = account.into_trie_account(EMPTY_ROOT_HASH); + account.encode(&mut account_rlp); + + hash_builder.add_leaf(Nibbles::unpack(key), &account_rlp); + account_rlp.clear(); + } + } + } + let root = hash_builder.root(); + let proof_nodes = hash_builder.take_proof_nodes(); + let branch_node_hash_masks = hash_builder + .updated_branch_nodes + .clone() + .unwrap_or_default() + .iter() + .map(|(path, node)| (*path, node.hash_mask)) + .collect(); + let branch_node_tree_masks = hash_builder + .updated_branch_nodes + .clone() + .unwrap_or_default() + .iter() + .map(|(path, node)| (*path, node.tree_mask)) + .collect(); + + let mut trie_updates = TrieUpdates::default(); + let removed_keys = node_iter.walker.take_removed_keys(); + trie_updates.finalize(hash_builder, removed_keys, destroyed_accounts); + + (root, trie_updates, proof_nodes, branch_node_hash_masks, branch_node_tree_masks) + } + + /// Returns a `ParallelSparseTrie` pre-loaded with the given nodes, as well as leaf values + /// inferred from any provided leaf nodes. + fn new_test_trie(nodes: Nodes) -> ParallelSparseTrie + where + Nodes: Iterator, + { + let mut trie = ParallelSparseTrie::default().with_updates(true); + + for (path, node) in nodes { + let subtrie = trie.subtrie_for_path(&path); + if let SparseNode::Leaf { key, .. } = &node { + let mut full_key = path; + full_key.extend(key); + subtrie.inner.values.insert(full_key, "LEAF VALUE".into()); + } + subtrie.nodes.insert(path, node); + } + trie + } + + /// Assert that the parallel sparse trie nodes and the proof nodes from the hash builder are + /// equal. + #[allow(unused)] + fn assert_eq_parallel_sparse_trie_proof_nodes( + sparse_trie: &ParallelSparseTrie, + proof_nodes: ProofNodes, + ) { + let proof_nodes = proof_nodes + .into_nodes_sorted() + .into_iter() + .map(|(path, node)| (path, TrieNode::decode(&mut node.as_ref()).unwrap())); + + let lower_sparse_nodes = sparse_trie + .lower_subtries + .iter() + .filter_map(Option::as_ref) + .flat_map(|subtrie| subtrie.nodes.iter()); + + let upper_sparse_nodes = sparse_trie.upper_subtrie.nodes.iter(); + + let all_sparse_nodes = + lower_sparse_nodes.chain(upper_sparse_nodes).sorted_by_key(|(path, _)| *path); + + for ((proof_node_path, proof_node), (sparse_node_path, sparse_node)) in + proof_nodes.zip(all_sparse_nodes) + { + assert_eq!(&proof_node_path, sparse_node_path); + + let equals = match (&proof_node, &sparse_node) { + // Both nodes are empty + (TrieNode::EmptyRoot, SparseNode::Empty) => true, + // Both nodes are branches and have the same state mask + ( + TrieNode::Branch(BranchNode { state_mask: proof_state_mask, .. }), + SparseNode::Branch { state_mask: sparse_state_mask, .. }, + ) => proof_state_mask == sparse_state_mask, + // Both nodes are extensions and have the same key + ( + TrieNode::Extension(ExtensionNode { key: proof_key, .. }), + SparseNode::Extension { key: sparse_key, .. }, + ) | + // Both nodes are leaves and have the same key + ( + TrieNode::Leaf(LeafNode { key: proof_key, .. }), + SparseNode::Leaf { key: sparse_key, .. }, + ) => proof_key == sparse_key, + // Empty and hash nodes are specific to the sparse trie, skip them + (_, SparseNode::Empty | SparseNode::Hash(_)) => continue, + _ => false, + }; + assert!( + equals, + "path: {proof_node_path:?}\nproof node: {proof_node:?}\nsparse node: {sparse_node:?}" + ); + } + } + + #[test] + fn test_get_changed_subtries_empty() { + let mut trie = ParallelSparseTrie::default(); + let mut prefix_set = PrefixSetMut::from([Nibbles::default()]).freeze(); + + let (subtries, unchanged_prefix_set) = trie.take_changed_lower_subtries(&mut prefix_set); + assert!(subtries.is_empty()); + assert_eq!(unchanged_prefix_set, PrefixSetMut::from(prefix_set.iter().copied())); + } + + #[test] + fn test_get_changed_subtries() { + // Create a trie with three subtries + let mut trie = ParallelSparseTrie::default(); + let subtrie_1 = Box::new(SparseSubtrie::new(Nibbles::from_nibbles([0x0, 0x0]))); + let subtrie_1_index = path_subtrie_index_unchecked(&subtrie_1.path); + let subtrie_2 = Box::new(SparseSubtrie::new(Nibbles::from_nibbles([0x1, 0x0]))); + let subtrie_2_index = path_subtrie_index_unchecked(&subtrie_2.path); + let subtrie_3 = Box::new(SparseSubtrie::new(Nibbles::from_nibbles([0x3, 0x0]))); + let subtrie_3_index = path_subtrie_index_unchecked(&subtrie_3.path); + + // Add subtries at specific positions + trie.lower_subtries[subtrie_1_index] = Some(subtrie_1.clone()); + trie.lower_subtries[subtrie_2_index] = Some(subtrie_2.clone()); + trie.lower_subtries[subtrie_3_index] = Some(subtrie_3); + + let unchanged_prefix_set = PrefixSetMut::from([ + Nibbles::from_nibbles([0x0]), + Nibbles::from_nibbles([0x2, 0x0, 0x0]), + ]); + // Create a prefix set with the keys that match only the second subtrie + let mut prefix_set = PrefixSetMut::from([ + // Match second subtrie + Nibbles::from_nibbles([0x1, 0x0, 0x0]), + Nibbles::from_nibbles([0x1, 0x0, 0x1, 0x0]), + ]); + prefix_set.extend(unchanged_prefix_set); + let mut prefix_set = prefix_set.freeze(); + + // Second subtrie should be removed and returned + let (subtries, unchanged_prefix_set) = trie.take_changed_lower_subtries(&mut prefix_set); + assert_eq!( + subtries + .into_iter() + .map(|ChangedSubtrie { index, subtrie, prefix_set }| { + (index, subtrie, prefix_set.iter().copied().collect::>()) + }) + .collect::>(), + vec![( + subtrie_2_index, + subtrie_2, + vec![ + Nibbles::from_nibbles([0x1, 0x0, 0x0]), + Nibbles::from_nibbles([0x1, 0x0, 0x1, 0x0]) + ] + )] + ); + assert_eq!(unchanged_prefix_set, unchanged_prefix_set); + assert!(trie.lower_subtries[subtrie_2_index].is_none()); + + // First subtrie should remain unchanged + assert_eq!(trie.lower_subtries[subtrie_1_index], Some(subtrie_1)); + } + + #[test] + fn test_get_changed_subtries_all() { + // Create a trie with three subtries + let mut trie = ParallelSparseTrie::default(); + let subtrie_1 = Box::new(SparseSubtrie::new(Nibbles::from_nibbles([0x0, 0x0]))); + let subtrie_1_index = path_subtrie_index_unchecked(&subtrie_1.path); + let subtrie_2 = Box::new(SparseSubtrie::new(Nibbles::from_nibbles([0x1, 0x0]))); + let subtrie_2_index = path_subtrie_index_unchecked(&subtrie_2.path); + let subtrie_3 = Box::new(SparseSubtrie::new(Nibbles::from_nibbles([0x3, 0x0]))); + let subtrie_3_index = path_subtrie_index_unchecked(&subtrie_3.path); + + // Add subtries at specific positions + trie.lower_subtries[subtrie_1_index] = Some(subtrie_1.clone()); + trie.lower_subtries[subtrie_2_index] = Some(subtrie_2.clone()); + trie.lower_subtries[subtrie_3_index] = Some(subtrie_3.clone()); + + // Create a prefix set that matches any key + let mut prefix_set = PrefixSetMut::all().freeze(); + + // All subtries should be removed and returned + let (subtries, unchanged_prefix_set) = trie.take_changed_lower_subtries(&mut prefix_set); + assert_eq!( + subtries + .into_iter() + .map(|ChangedSubtrie { index, subtrie, prefix_set }| { + (index, subtrie, prefix_set.all()) + }) + .collect::>(), + vec![ + (subtrie_1_index, subtrie_1, true), + (subtrie_2_index, subtrie_2, true), + (subtrie_3_index, subtrie_3, true) + ] + ); + assert_eq!(unchanged_prefix_set, PrefixSetMut::all()); + + assert!(trie.lower_subtries.iter().all(Option::is_none)); + } + + #[test] + fn test_sparse_subtrie_type() { + assert_eq!(SparseSubtrieType::from_path(&Nibbles::new()), SparseSubtrieType::Upper); + assert_eq!( + SparseSubtrieType::from_path(&Nibbles::from_nibbles([0])), + SparseSubtrieType::Upper + ); + assert_eq!( + SparseSubtrieType::from_path(&Nibbles::from_nibbles([15])), + SparseSubtrieType::Upper + ); + assert_eq!( + SparseSubtrieType::from_path(&Nibbles::from_nibbles([0, 0])), + SparseSubtrieType::Lower(0) + ); + assert_eq!( + SparseSubtrieType::from_path(&Nibbles::from_nibbles([0, 0, 0])), + SparseSubtrieType::Lower(0) + ); + assert_eq!( + SparseSubtrieType::from_path(&Nibbles::from_nibbles([0, 1])), + SparseSubtrieType::Lower(1) + ); + assert_eq!( + SparseSubtrieType::from_path(&Nibbles::from_nibbles([0, 1, 0])), + SparseSubtrieType::Lower(1) + ); + assert_eq!( + SparseSubtrieType::from_path(&Nibbles::from_nibbles([0, 15])), + SparseSubtrieType::Lower(15) + ); + assert_eq!( + SparseSubtrieType::from_path(&Nibbles::from_nibbles([15, 0])), + SparseSubtrieType::Lower(240) + ); + assert_eq!( + SparseSubtrieType::from_path(&Nibbles::from_nibbles([15, 1])), + SparseSubtrieType::Lower(241) + ); + assert_eq!( + SparseSubtrieType::from_path(&Nibbles::from_nibbles([15, 15])), + SparseSubtrieType::Lower(255) + ); + assert_eq!( + SparseSubtrieType::from_path(&Nibbles::from_nibbles([15, 15, 15])), + SparseSubtrieType::Lower(255) + ); + } + + #[test] + fn test_reveal_node_leaves() { + let mut trie = ParallelSparseTrie::default(); + + // Reveal leaf in the upper trie + { + let path = Nibbles::from_nibbles([0x1]); + let node = create_leaf_node([0x2, 0x3], 42); + let masks = TrieMasks::none(); + + trie.reveal_node(path, node, masks).unwrap(); + + assert_matches!( + trie.upper_subtrie.nodes.get(&path), + Some(SparseNode::Leaf { key, hash: None }) + if key == &Nibbles::from_nibbles([0x2, 0x3]) + ); + + let full_path = Nibbles::from_nibbles([0x1, 0x2, 0x3]); + assert_eq!( + trie.upper_subtrie.inner.values.get(&full_path), + Some(&encode_account_value(42)) + ); + } + + // Reveal leaf in a lower trie + { + let path = Nibbles::from_nibbles([0x1, 0x2]); + let node = create_leaf_node([0x3, 0x4], 42); + let masks = TrieMasks::none(); + + trie.reveal_node(path, node, masks).unwrap(); + + // Check that the lower subtrie was created + let idx = path_subtrie_index_unchecked(&path); + assert!(trie.lower_subtries[idx].is_some()); + + let lower_subtrie = trie.lower_subtries[idx].as_ref().unwrap(); + assert_matches!( + lower_subtrie.nodes.get(&path), + Some(SparseNode::Leaf { key, hash: None }) + if key == &Nibbles::from_nibbles([0x3, 0x4]) + ); + } + } + + #[test] + fn test_reveal_node_extension_all_upper() { + let mut trie = ParallelSparseTrie::default(); + let path = Nibbles::new(); + let child_hash = B256::repeat_byte(0xab); + let node = create_extension_node([0x1], child_hash); + let masks = TrieMasks::none(); + + trie.reveal_node(path, node, masks).unwrap(); + + assert_matches!( + trie.upper_subtrie.nodes.get(&path), + Some(SparseNode::Extension { key, hash: None, .. }) + if key == &Nibbles::from_nibbles([0x1]) + ); + + // Child path should be in upper trie + let child_path = Nibbles::from_nibbles([0x1]); + assert_eq!(trie.upper_subtrie.nodes.get(&child_path), Some(&SparseNode::Hash(child_hash))); + } + + #[test] + fn test_reveal_node_extension_cross_level() { + let mut trie = ParallelSparseTrie::default(); + let path = Nibbles::new(); + let child_hash = B256::repeat_byte(0xcd); + let node = create_extension_node([0x1, 0x2, 0x3], child_hash); + let masks = TrieMasks::none(); + + trie.reveal_node(path, node, masks).unwrap(); + + // Extension node should be in upper trie + assert_matches!( + trie.upper_subtrie.nodes.get(&path), + Some(SparseNode::Extension { key, hash: None, .. }) + if key == &Nibbles::from_nibbles([0x1, 0x2, 0x3]) + ); + + // Child path (0x1, 0x2, 0x3) should be in lower trie + let child_path = Nibbles::from_nibbles([0x1, 0x2, 0x3]); + let idx = path_subtrie_index_unchecked(&child_path); + assert!(trie.lower_subtries[idx].is_some()); + + let lower_subtrie = trie.lower_subtries[idx].as_ref().unwrap(); + assert_eq!(lower_subtrie.nodes.get(&child_path), Some(&SparseNode::Hash(child_hash))); + } + + #[test] + fn test_reveal_node_extension_cross_level_boundary() { + let mut trie = ParallelSparseTrie::default(); + let path = Nibbles::from_nibbles([0x1]); + let child_hash = B256::repeat_byte(0xcd); + let node = create_extension_node([0x2], child_hash); + let masks = TrieMasks::none(); + + trie.reveal_node(path, node, masks).unwrap(); + + // Extension node should be in upper trie + assert_matches!( + trie.upper_subtrie.nodes.get(&path), + Some(SparseNode::Extension { key, hash: None, .. }) + if key == &Nibbles::from_nibbles([0x2]) + ); + + // Child path (0x1, 0x2) should be in lower trie + let child_path = Nibbles::from_nibbles([0x1, 0x2]); + let idx = path_subtrie_index_unchecked(&child_path); + assert!(trie.lower_subtries[idx].is_some()); + + let lower_subtrie = trie.lower_subtries[idx].as_ref().unwrap(); + assert_eq!(lower_subtrie.nodes.get(&child_path), Some(&SparseNode::Hash(child_hash))); + } + + #[test] + fn test_reveal_node_branch_all_upper() { + let mut trie = ParallelSparseTrie::default(); + let path = Nibbles::new(); + let child_hashes = [ + RlpNode::word_rlp(&B256::repeat_byte(0x11)), + RlpNode::word_rlp(&B256::repeat_byte(0x22)), + ]; + let node = create_branch_node_with_children(&[0x0, 0x5], child_hashes.clone()); + let masks = TrieMasks::none(); + + trie.reveal_node(path, node, masks).unwrap(); + + // Branch node should be in upper trie + assert_matches!( + trie.upper_subtrie.nodes.get(&path), + Some(SparseNode::Branch { state_mask, hash: None, .. }) + if *state_mask == 0b0000000000100001.into() + ); + + // Children should be in upper trie (paths of length 2) + let child_path_0 = Nibbles::from_nibbles([0x0]); + let child_path_5 = Nibbles::from_nibbles([0x5]); + assert_eq!( + trie.upper_subtrie.nodes.get(&child_path_0), + Some(&SparseNode::Hash(child_hashes[0].as_hash().unwrap())) + ); + assert_eq!( + trie.upper_subtrie.nodes.get(&child_path_5), + Some(&SparseNode::Hash(child_hashes[1].as_hash().unwrap())) + ); + } + + #[test] + fn test_reveal_node_branch_cross_level() { + let mut trie = ParallelSparseTrie::default(); + let path = Nibbles::from_nibbles([0x1]); // Exactly 1 nibbles - boundary case + let child_hashes = [ + RlpNode::word_rlp(&B256::repeat_byte(0x33)), + RlpNode::word_rlp(&B256::repeat_byte(0x44)), + RlpNode::word_rlp(&B256::repeat_byte(0x55)), + ]; + let node = create_branch_node_with_children(&[0x0, 0x7, 0xf], child_hashes.clone()); + let masks = TrieMasks::none(); + + trie.reveal_node(path, node, masks).unwrap(); + + // Branch node should be in upper trie + assert_matches!( + trie.upper_subtrie.nodes.get(&path), + Some(SparseNode::Branch { state_mask, hash: None, .. }) + if *state_mask == 0b1000000010000001.into() + ); + + // All children should be in lower tries since they have paths of length 3 + let child_paths = [ + Nibbles::from_nibbles([0x1, 0x0]), + Nibbles::from_nibbles([0x1, 0x7]), + Nibbles::from_nibbles([0x1, 0xf]), + ]; + + for (i, child_path) in child_paths.iter().enumerate() { + let idx = path_subtrie_index_unchecked(child_path); + let lower_subtrie = trie.lower_subtries[idx].as_ref().unwrap(); + assert_eq!( + lower_subtrie.nodes.get(child_path), + Some(&SparseNode::Hash(child_hashes[i].as_hash().unwrap())), + ); + } + } + + #[test] + fn test_update_subtrie_hashes() { + // Create a trie with three subtries + let mut trie = ParallelSparseTrie::default(); + let mut subtrie_1 = Box::new(SparseSubtrie::new(Nibbles::from_nibbles([0x0, 0x0]))); + let subtrie_1_index = path_subtrie_index_unchecked(&subtrie_1.path); + let mut subtrie_2 = Box::new(SparseSubtrie::new(Nibbles::from_nibbles([0x1, 0x0]))); + let subtrie_2_index = path_subtrie_index_unchecked(&subtrie_2.path); + let mut subtrie_3 = Box::new(SparseSubtrie::new(Nibbles::from_nibbles([0x3, 0x0]))); + let subtrie_3_index = path_subtrie_index_unchecked(&subtrie_3.path); + + // Reveal dummy leaf nodes that form an incorrect trie structure but enough to test the + // method + let leaf_1_full_path = Nibbles::from_nibbles([0; 64]); + let leaf_1_path = leaf_1_full_path.slice(..2); + let leaf_1_key = leaf_1_full_path.slice(2..); + let leaf_2_full_path = Nibbles::from_nibbles([vec![1, 0], vec![0; 62]].concat()); + let leaf_2_path = leaf_2_full_path.slice(..2); + let leaf_2_key = leaf_2_full_path.slice(2..); + let leaf_3_full_path = Nibbles::from_nibbles([vec![3, 0], vec![0; 62]].concat()); + let leaf_3_path = leaf_3_full_path.slice(..2); + let leaf_3_key = leaf_3_full_path.slice(2..); + let leaf_1 = create_leaf_node(leaf_1_key.to_vec(), 1); + let leaf_2 = create_leaf_node(leaf_2_key.to_vec(), 2); + let leaf_3 = create_leaf_node(leaf_3_key.to_vec(), 3); + subtrie_1.reveal_node(leaf_1_path, &leaf_1, TrieMasks::none()).unwrap(); + subtrie_2.reveal_node(leaf_2_path, &leaf_2, TrieMasks::none()).unwrap(); + subtrie_3.reveal_node(leaf_3_path, &leaf_3, TrieMasks::none()).unwrap(); + + // Add subtries at specific positions + trie.lower_subtries[subtrie_1_index] = Some(subtrie_1); + trie.lower_subtries[subtrie_2_index] = Some(subtrie_2); + trie.lower_subtries[subtrie_3_index] = Some(subtrie_3); + + let unchanged_prefix_set = PrefixSetMut::from([ + Nibbles::from_nibbles([0x0]), + Nibbles::from_nibbles([0x2, 0x0, 0x0]), + ]); + // Create a prefix set with the keys that match only the second subtrie + let mut prefix_set = PrefixSetMut::from([ + // Match second subtrie + Nibbles::from_nibbles([0x1, 0x0, 0x0]), + Nibbles::from_nibbles([0x1, 0x0, 0x1, 0x0]), + ]); + prefix_set.extend(unchanged_prefix_set.clone()); + trie.prefix_set = prefix_set; + + // Update subtrie hashes + trie.update_lower_subtrie_hashes(); + + // Check that the prefix set was updated + assert_eq!(trie.prefix_set, unchanged_prefix_set); + // Check that subtries were returned back to the array + assert!(trie.lower_subtries[subtrie_1_index].is_some()); + assert!(trie.lower_subtries[subtrie_2_index].is_some()); + assert!(trie.lower_subtries[subtrie_3_index].is_some()); + } + + #[test] + fn test_subtrie_update_hashes() { + let mut subtrie = + Box::new(SparseSubtrie::new(Nibbles::from_nibbles([0x0, 0x0])).with_updates(true)); + + // Create leaf nodes with paths 0x0...0, 0x00001...0, 0x0010...0 + let leaf_1_full_path = Nibbles::from_nibbles([0; 64]); + let leaf_1_path = leaf_1_full_path.slice(..5); + let leaf_1_key = leaf_1_full_path.slice(5..); + let leaf_2_full_path = Nibbles::from_nibbles([vec![0, 0, 0, 0, 1], vec![0; 59]].concat()); + let leaf_2_path = leaf_2_full_path.slice(..5); + let leaf_2_key = leaf_2_full_path.slice(5..); + let leaf_3_full_path = Nibbles::from_nibbles([vec![0, 0, 1], vec![0; 61]].concat()); + let leaf_3_path = leaf_3_full_path.slice(..3); + let leaf_3_key = leaf_3_full_path.slice(3..); + + let account_1 = create_account(1); + let account_2 = create_account(2); + let account_3 = create_account(3); + let leaf_1 = create_leaf_node(leaf_1_key.to_vec(), account_1.nonce); + let leaf_2 = create_leaf_node(leaf_2_key.to_vec(), account_2.nonce); + let leaf_3 = create_leaf_node(leaf_3_key.to_vec(), account_3.nonce); + + // Create bottom branch node + let branch_1_path = Nibbles::from_nibbles([0, 0, 0, 0]); + let branch_1 = create_branch_node_with_children( + &[0, 1], + vec![ + RlpNode::from_rlp(&alloy_rlp::encode(&leaf_1)), + RlpNode::from_rlp(&alloy_rlp::encode(&leaf_2)), + ], + ); + + // Create an extension node + let extension_path = Nibbles::from_nibbles([0, 0, 0]); + let extension_key = Nibbles::from_nibbles([0]); + let extension = create_extension_node( + extension_key.to_vec(), + RlpNode::from_rlp(&alloy_rlp::encode(&branch_1)).as_hash().unwrap(), + ); + + // Create top branch node + let branch_2_path = Nibbles::from_nibbles([0, 0]); + let branch_2 = create_branch_node_with_children( + &[0, 1], + vec![ + RlpNode::from_rlp(&alloy_rlp::encode(&extension)), + RlpNode::from_rlp(&alloy_rlp::encode(&leaf_3)), + ], + ); + + // Reveal nodes + subtrie.reveal_node(branch_2_path, &branch_2, TrieMasks::none()).unwrap(); + subtrie.reveal_node(leaf_1_path, &leaf_1, TrieMasks::none()).unwrap(); + subtrie.reveal_node(extension_path, &extension, TrieMasks::none()).unwrap(); + subtrie.reveal_node(branch_1_path, &branch_1, TrieMasks::none()).unwrap(); + subtrie.reveal_node(leaf_2_path, &leaf_2, TrieMasks::none()).unwrap(); + subtrie.reveal_node(leaf_3_path, &leaf_3, TrieMasks::none()).unwrap(); + + // Run hash builder for two leaf nodes + let (_, _, proof_nodes, _, _) = run_hash_builder( + [ + (leaf_1_full_path, account_1), + (leaf_2_full_path, account_2), + (leaf_3_full_path, account_3), + ], + NoopAccountTrieCursor::default(), + Default::default(), + [ + branch_1_path, + extension_path, + branch_2_path, + leaf_1_full_path, + leaf_2_full_path, + leaf_3_full_path, + ], + ); + + // Update hashes for the subtrie + subtrie.update_hashes( + &mut PrefixSetMut::from([leaf_1_full_path, leaf_2_full_path, leaf_3_full_path]) + .freeze(), + ); + + // Compare hashes between hash builder and subtrie + + let hash_builder_branch_1_hash = + RlpNode::from_rlp(proof_nodes.get(&branch_1_path).unwrap().as_ref()).as_hash().unwrap(); + let subtrie_branch_1_hash = subtrie.nodes.get(&branch_1_path).unwrap().hash().unwrap(); + assert_eq!(hash_builder_branch_1_hash, subtrie_branch_1_hash); + + let hash_builder_extension_hash = + RlpNode::from_rlp(proof_nodes.get(&extension_path).unwrap().as_ref()) + .as_hash() + .unwrap(); + let subtrie_extension_hash = subtrie.nodes.get(&extension_path).unwrap().hash().unwrap(); + assert_eq!(hash_builder_extension_hash, subtrie_extension_hash); + + let hash_builder_branch_2_hash = + RlpNode::from_rlp(proof_nodes.get(&branch_2_path).unwrap().as_ref()).as_hash().unwrap(); + let subtrie_branch_2_hash = subtrie.nodes.get(&branch_2_path).unwrap().hash().unwrap(); + assert_eq!(hash_builder_branch_2_hash, subtrie_branch_2_hash); + + let subtrie_leaf_1_hash = subtrie.nodes.get(&leaf_1_path).unwrap().hash().unwrap(); + let hash_builder_leaf_1_hash = + RlpNode::from_rlp(proof_nodes.get(&leaf_1_path).unwrap().as_ref()).as_hash().unwrap(); + assert_eq!(hash_builder_leaf_1_hash, subtrie_leaf_1_hash); + + let hash_builder_leaf_2_hash = + RlpNode::from_rlp(proof_nodes.get(&leaf_2_path).unwrap().as_ref()).as_hash().unwrap(); + let subtrie_leaf_2_hash = subtrie.nodes.get(&leaf_2_path).unwrap().hash().unwrap(); + assert_eq!(hash_builder_leaf_2_hash, subtrie_leaf_2_hash); + + let hash_builder_leaf_3_hash = + RlpNode::from_rlp(proof_nodes.get(&leaf_3_path).unwrap().as_ref()).as_hash().unwrap(); + let subtrie_leaf_3_hash = subtrie.nodes.get(&leaf_3_path).unwrap().hash().unwrap(); + assert_eq!(hash_builder_leaf_3_hash, subtrie_leaf_3_hash); + } + + #[test] + fn test_remove_leaf_branch_becomes_extension() { + // + // 0x: Extension (Key = 5) + // 0x5: └── Branch (Mask = 1001) + // 0x50: ├── 0 -> Extension (Key = 23) + // 0x5023: │ └── Branch (Mask = 0101) + // 0x50231: │ ├── 1 -> Leaf + // 0x50233: │ └── 3 -> Leaf + // 0x53: └── 3 -> Leaf (Key = 7) + // + // After removing 0x53, extension+branch+extension become a single extension + // + let mut trie = new_test_trie( + [ + (Nibbles::default(), SparseNode::new_ext(Nibbles::from_nibbles([0x5]))), + (Nibbles::from_nibbles([0x5]), SparseNode::new_branch(TrieMask::new(0b1001))), + ( + Nibbles::from_nibbles([0x5, 0x0]), + SparseNode::new_ext(Nibbles::from_nibbles([0x2, 0x3])), + ), + ( + Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3]), + SparseNode::new_branch(TrieMask::new(0b0101)), + ), + ( + Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x1]), + SparseNode::new_leaf(Nibbles::new()), + ), + ( + Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x3]), + SparseNode::new_leaf(Nibbles::new()), + ), + ( + Nibbles::from_nibbles([0x5, 0x3]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x7])), + ), + ] + .into_iter(), + ); + + let provider = MockBlindedProvider::new(); + + // Remove the leaf with a full path of 0x537 + let leaf_full_path = Nibbles::from_nibbles([0x5, 0x3, 0x7]); + trie.remove_leaf(&leaf_full_path, provider).unwrap(); + + let upper_subtrie = &trie.upper_subtrie; + let lower_subtrie_50 = trie.lower_subtries[0x50].as_ref().unwrap(); + let lower_subtrie_53 = trie.lower_subtries[0x53].as_ref().unwrap(); + + // Check that the leaf value was removed from the appropriate `SparseSubtrie`. + assert_matches!(lower_subtrie_53.inner.values.get(&leaf_full_path), None); + + // Check that the leaf node was removed, and that its parent/grandparent were modified + // appropriately. + assert_matches!( + upper_subtrie.nodes.get(&Nibbles::from_nibbles([])), + Some(SparseNode::Extension{ key, ..}) + if key == &Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3]) + ); + assert_matches!(upper_subtrie.nodes.get(&Nibbles::from_nibbles([0x5])), None); + assert_matches!(lower_subtrie_50.nodes.get(&Nibbles::from_nibbles([0x5, 0x0])), None); + assert_matches!( + lower_subtrie_50.nodes.get(&Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3])), + Some(SparseNode::Branch{ state_mask, .. }) + if *state_mask == 0b0101.into() + ); + assert_matches!(lower_subtrie_53.nodes.get(&Nibbles::from_nibbles([0x5, 0x3])), None); + } + + #[test] + fn test_remove_leaf_branch_becomes_leaf() { + // + // 0x: Branch (Mask = 0011) + // 0x0: ├── 0 -> Leaf (Key = 12) + // 0x1: └── 1 -> Leaf (Key = 34) + // + // After removing 0x012, branch becomes a leaf + // + let mut trie = new_test_trie( + [ + (Nibbles::default(), SparseNode::new_branch(TrieMask::new(0b0011))), + ( + Nibbles::from_nibbles([0x0]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x1, 0x2])), + ), + ( + Nibbles::from_nibbles([0x1]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x3, 0x4])), + ), + ] + .into_iter(), + ); + + // Add the branch node to updated_nodes to simulate it being modified earlier + if let Some(updates) = trie.updates.as_mut() { + updates + .updated_nodes + .insert(Nibbles::default(), BranchNodeCompact::new(0b11, 0, 0, vec![], None)); + } + + let provider = MockBlindedProvider::new(); + + // Remove the leaf with a full path of 0x012 + let leaf_full_path = Nibbles::from_nibbles([0x0, 0x1, 0x2]); + trie.remove_leaf(&leaf_full_path, provider).unwrap(); + + let upper_subtrie = &trie.upper_subtrie; + + // Check that the leaf's value was removed + assert_matches!(upper_subtrie.inner.values.get(&leaf_full_path), None); + + // Check that the branch node collapsed into a leaf node with the remaining child's key + assert_matches!( + upper_subtrie.nodes.get(&Nibbles::default()), + Some(SparseNode::Leaf{ key, ..}) + if key == &Nibbles::from_nibbles([0x1, 0x3, 0x4]) + ); + + // Check that the remaining child node was removed + assert_matches!(upper_subtrie.nodes.get(&Nibbles::from_nibbles([0x1])), None); + // Check that the removed child node was also removed + assert_matches!(upper_subtrie.nodes.get(&Nibbles::from_nibbles([0x0])), None); + + // Check that updates were tracked correctly when branch collapsed + let updates = trie.updates.as_ref().unwrap(); + + // The branch at root should be marked as removed since it collapsed + assert!(updates.removed_nodes.contains(&Nibbles::default())); + + // The branch should no longer be in updated_nodes + assert!(!updates.updated_nodes.contains_key(&Nibbles::default())); + } + + #[test] + fn test_remove_leaf_extension_becomes_leaf() { + // + // 0x: Extension (Key = 5) + // 0x5: └── Branch (Mask = 0011) + // 0x50: ├── 0 -> Leaf (Key = 12) + // 0x51: └── 1 -> Leaf (Key = 34) + // + // After removing 0x5012, extension+branch becomes a leaf + // + let mut trie = new_test_trie( + [ + (Nibbles::default(), SparseNode::new_ext(Nibbles::from_nibbles([0x5]))), + (Nibbles::from_nibbles([0x5]), SparseNode::new_branch(TrieMask::new(0b0011))), + ( + Nibbles::from_nibbles([0x5, 0x0]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x1, 0x2])), + ), + ( + Nibbles::from_nibbles([0x5, 0x1]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x3, 0x4])), + ), + ] + .into_iter(), + ); + + let provider = MockBlindedProvider::new(); + + // Remove the leaf with a full path of 0x5012 + let leaf_full_path = Nibbles::from_nibbles([0x5, 0x0, 0x1, 0x2]); + trie.remove_leaf(&leaf_full_path, provider).unwrap(); + + let upper_subtrie = &trie.upper_subtrie; + let lower_subtrie_50 = trie.lower_subtries[0x50].as_ref().unwrap(); + let lower_subtrie_51 = trie.lower_subtries[0x51].as_ref().unwrap(); + + // Check that the full key was removed + assert_matches!(lower_subtrie_50.inner.values.get(&leaf_full_path), None); + + // Check that the other leaf's value was moved to the upper trie + let other_leaf_full_value = Nibbles::from_nibbles([0x5, 0x1, 0x3, 0x4]); + assert_matches!(lower_subtrie_51.inner.values.get(&other_leaf_full_value), None); + assert_matches!(upper_subtrie.inner.values.get(&other_leaf_full_value), Some(_)); + + // Check that the extension node collapsed into a leaf node + assert_matches!( + upper_subtrie.nodes.get(&Nibbles::default()), + Some(SparseNode::Leaf{ key, ..}) + if key == &Nibbles::from_nibbles([0x5, 0x1, 0x3, 0x4]) + ); + + // Check that intermediate nodes were removed + assert_matches!(upper_subtrie.nodes.get(&Nibbles::from_nibbles([0x5])), None); + assert_matches!(lower_subtrie_50.nodes.get(&Nibbles::from_nibbles([0x5, 0x0])), None); + assert_matches!(lower_subtrie_51.nodes.get(&Nibbles::from_nibbles([0x5, 0x1])), None); + } + + #[test] + fn test_remove_leaf_branch_on_branch() { + // + // 0x: Branch (Mask = 0101) + // 0x0: ├── 0 -> Leaf (Key = 12) + // 0x2: └── 2 -> Branch (Mask = 0011) + // 0x20: ├── 0 -> Leaf (Key = 34) + // 0x21: └── 1 -> Leaf (Key = 56) + // + // After removing 0x2034, the inner branch becomes a leaf + // + let mut trie = new_test_trie( + [ + (Nibbles::default(), SparseNode::new_branch(TrieMask::new(0b0101))), + ( + Nibbles::from_nibbles([0x0]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x1, 0x2])), + ), + (Nibbles::from_nibbles([0x2]), SparseNode::new_branch(TrieMask::new(0b0011))), + ( + Nibbles::from_nibbles([0x2, 0x0]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x3, 0x4])), + ), + ( + Nibbles::from_nibbles([0x2, 0x1]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x5, 0x6])), + ), + ] + .into_iter(), + ); + + let provider = MockBlindedProvider::new(); + + // Remove the leaf with a full path of 0x2034 + let leaf_full_path = Nibbles::from_nibbles([0x2, 0x0, 0x3, 0x4]); + trie.remove_leaf(&leaf_full_path, provider).unwrap(); + + let upper_subtrie = &trie.upper_subtrie; + let lower_subtrie_20 = trie.lower_subtries[0x20].as_ref().unwrap(); + let lower_subtrie_21 = trie.lower_subtries[0x21].as_ref().unwrap(); + + // Check that the leaf's value was removed + assert_matches!(lower_subtrie_20.inner.values.get(&leaf_full_path), None); + + // Check that the other leaf's value was moved to the upper trie + let other_leaf_full_value = Nibbles::from_nibbles([0x2, 0x1, 0x5, 0x6]); + assert_matches!(lower_subtrie_21.inner.values.get(&other_leaf_full_value), None); + assert_matches!(upper_subtrie.inner.values.get(&other_leaf_full_value), Some(_)); + + // Check that the root branch still exists unchanged + assert_matches!( + upper_subtrie.nodes.get(&Nibbles::default()), + Some(SparseNode::Branch{ state_mask, .. }) + if *state_mask == 0b0101.into() + ); + + // Check that the inner branch became an extension + assert_matches!( + upper_subtrie.nodes.get(&Nibbles::from_nibbles([0x2])), + Some(SparseNode::Leaf{ key, ..}) + if key == &Nibbles::from_nibbles([0x1, 0x5, 0x6]) + ); + + // Check that the branch's child nodes were removed + assert_matches!(lower_subtrie_20.nodes.get(&Nibbles::from_nibbles([0x2, 0x0])), None); + assert_matches!(lower_subtrie_21.nodes.get(&Nibbles::from_nibbles([0x2, 0x1])), None); + } + + #[test] + fn test_remove_leaf_remaining_child_needs_reveal() { + // + // 0x: Branch (Mask = 0011) + // 0x0: ├── 0 -> Leaf (Key = 12) + // 0x1: └── 1 -> Hash (blinded leaf) + // + // After removing 0x012, the hash node needs to be revealed to collapse the branch + // + let mut trie = new_test_trie( + [ + (Nibbles::default(), SparseNode::new_branch(TrieMask::new(0b0011))), + ( + Nibbles::from_nibbles([0x0]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x1, 0x2])), + ), + (Nibbles::from_nibbles([0x1]), SparseNode::Hash(B256::repeat_byte(0xab))), + ] + .into_iter(), + ); + + // Create a mock provider that will reveal the blinded leaf + let mut provider = MockBlindedProvider::new(); + let revealed_leaf = create_leaf_node([0x3, 0x4], 42); + let mut encoded = Vec::new(); + revealed_leaf.encode(&mut encoded); + provider.add_revealed_node( + Nibbles::from_nibbles([0x1]), + RevealedNode { node: encoded.into(), tree_mask: None, hash_mask: None }, + ); + + // Remove the leaf with a full path of 0x012 + let leaf_full_path = Nibbles::from_nibbles([0x0, 0x1, 0x2]); + trie.remove_leaf(&leaf_full_path, provider).unwrap(); + + let upper_subtrie = &trie.upper_subtrie; + + // Check that the leaf value was removed + assert_matches!(upper_subtrie.inner.values.get(&leaf_full_path), None); + + // Check that the branch node collapsed into a leaf node with the revealed child's key + assert_matches!( + upper_subtrie.nodes.get(&Nibbles::default()), + Some(SparseNode::Leaf{ key, ..}) + if key == &Nibbles::from_nibbles([0x1, 0x3, 0x4]) + ); + + // Check that the remaining child node was removed (since it was merged) + assert_matches!(upper_subtrie.nodes.get(&Nibbles::from_nibbles([0x1])), None); + } + + #[test] + fn test_remove_leaf_root() { + // + // 0x: Leaf (Key = 123) + // + // After removing 0x123, the trie becomes empty + // + let mut trie = new_test_trie(std::iter::once(( + Nibbles::default(), + SparseNode::new_leaf(Nibbles::from_nibbles([0x1, 0x2, 0x3])), + ))); + + let provider = MockBlindedProvider::new(); + + // Remove the leaf with a full key of 0x123 + let leaf_full_path = Nibbles::from_nibbles([0x1, 0x2, 0x3]); + trie.remove_leaf(&leaf_full_path, provider).unwrap(); + + let upper_subtrie = &trie.upper_subtrie; + + // Check that the leaf value was removed + assert_matches!(upper_subtrie.inner.values.get(&leaf_full_path), None); + + // Check that the root node was changed to Empty + assert_matches!(upper_subtrie.nodes.get(&Nibbles::default()), Some(SparseNode::Empty)); + } + + #[test] + fn test_remove_leaf_unsets_hash_along_path() { + // + // Creates a trie structure: + // 0x: Branch (with hash set) + // 0x0: ├── Extension (with hash set) + // 0x01: │ └── Branch (with hash set) + // 0x012: │ ├── Leaf (Key = 34, with hash set) + // 0x013: │ ├── Leaf (Key = 56, with hash set) + // 0x014: │ └── Leaf (Key = 78, with hash set) + // 0x1: └── Leaf (Key = 78, with hash set) + // + // When removing leaf at 0x01234, all nodes along the path (root branch, + // extension at 0x0, branch at 0x01) should have their hash field unset + // + + let mut trie = new_test_trie( + [ + ( + Nibbles::default(), + SparseNode::Branch { + state_mask: TrieMask::new(0b0011), + hash: Some(B256::repeat_byte(0x10)), + store_in_db_trie: None, + }, + ), + ( + Nibbles::from_nibbles([0x0]), + SparseNode::Extension { + key: Nibbles::from_nibbles([0x1]), + hash: Some(B256::repeat_byte(0x20)), + store_in_db_trie: None, + }, + ), + ( + Nibbles::from_nibbles([0x0, 0x1]), + SparseNode::Branch { + state_mask: TrieMask::new(0b11100), + hash: Some(B256::repeat_byte(0x30)), + store_in_db_trie: None, + }, + ), + ( + Nibbles::from_nibbles([0x0, 0x1, 0x2]), + SparseNode::Leaf { + key: Nibbles::from_nibbles([0x3, 0x4]), + hash: Some(B256::repeat_byte(0x40)), + }, + ), + ( + Nibbles::from_nibbles([0x0, 0x1, 0x3]), + SparseNode::Leaf { + key: Nibbles::from_nibbles([0x5, 0x6]), + hash: Some(B256::repeat_byte(0x50)), + }, + ), + ( + Nibbles::from_nibbles([0x0, 0x1, 0x4]), + SparseNode::Leaf { + key: Nibbles::from_nibbles([0x6, 0x7]), + hash: Some(B256::repeat_byte(0x60)), + }, + ), + ( + Nibbles::from_nibbles([0x1]), + SparseNode::Leaf { + key: Nibbles::from_nibbles([0x7, 0x8]), + hash: Some(B256::repeat_byte(0x70)), + }, + ), + ] + .into_iter(), + ); + + let provider = MockBlindedProvider::new(); + + // Remove the leaf at path 0x01234 + let leaf_full_path = Nibbles::from_nibbles([0x0, 0x1, 0x2, 0x3, 0x4]); + trie.remove_leaf(&leaf_full_path, provider).unwrap(); + + let upper_subtrie = &trie.upper_subtrie; + let lower_subtrie_10 = trie.lower_subtries[0x01].as_ref().unwrap(); + + // Verify that hash fields are unset for all nodes along the path to the removed leaf + assert_matches!( + upper_subtrie.nodes.get(&Nibbles::default()), + Some(SparseNode::Branch { hash: None, .. }) + ); + assert_matches!( + upper_subtrie.nodes.get(&Nibbles::from_nibbles([0x0])), + Some(SparseNode::Extension { hash: None, .. }) + ); + assert_matches!( + lower_subtrie_10.nodes.get(&Nibbles::from_nibbles([0x0, 0x1])), + Some(SparseNode::Branch { hash: None, .. }) + ); + + // Verify that nodes not on the path still have their hashes + assert_matches!( + upper_subtrie.nodes.get(&Nibbles::from_nibbles([0x1])), + Some(SparseNode::Leaf { hash: Some(_), .. }) + ); + assert_matches!( + lower_subtrie_10.nodes.get(&Nibbles::from_nibbles([0x0, 0x1, 0x3])), + Some(SparseNode::Leaf { hash: Some(_), .. }) + ); + assert_matches!( + lower_subtrie_10.nodes.get(&Nibbles::from_nibbles([0x0, 0x1, 0x4])), + Some(SparseNode::Leaf { hash: Some(_), .. }) + ); + } + + #[test] + fn test_parallel_sparse_trie_root() { + let mut trie = ParallelSparseTrie::default().with_updates(true); + + // Step 1: Create the trie structure + // Extension node at 0x with key 0x2 (goes to upper subtrie) + let extension_path = Nibbles::new(); + let extension_key = Nibbles::from_nibbles([0x2]); + + // Branch node at 0x2 with children 0 and 1 (goes to upper subtrie) + let branch_path = Nibbles::from_nibbles([0x2]); + + // Leaf nodes at 0x20 and 0x21 (go to lower subtries) + let leaf_1_path = Nibbles::from_nibbles([0x2, 0x0]); + let leaf_1_key = Nibbles::from_nibbles(vec![0; 62]); // Remaining key + let leaf_1_full_path = Nibbles::from_nibbles([vec![0x2, 0x0], vec![0; 62]].concat()); + + let leaf_2_path = Nibbles::from_nibbles([0x2, 0x1]); + let leaf_2_key = Nibbles::from_nibbles(vec![0; 62]); // Remaining key + let leaf_2_full_path = Nibbles::from_nibbles([vec![0x2, 0x1], vec![0; 62]].concat()); + + // Create accounts + let account_1 = create_account(1); + let account_2 = create_account(2); + + // Create leaf nodes + let leaf_1 = create_leaf_node(leaf_1_key.to_vec(), account_1.nonce); + let leaf_2 = create_leaf_node(leaf_2_key.to_vec(), account_2.nonce); + + // Create branch node with children at indices 0 and 1 + let branch = create_branch_node_with_children( + &[0, 1], + vec![ + RlpNode::from_rlp(&alloy_rlp::encode(&leaf_1)), + RlpNode::from_rlp(&alloy_rlp::encode(&leaf_2)), + ], + ); + + // Create extension node pointing to branch + let extension = create_extension_node( + extension_key.to_vec(), + RlpNode::from_rlp(&alloy_rlp::encode(&branch)).as_hash().unwrap(), + ); + + // Step 2: Reveal nodes in the trie + trie.reveal_node(extension_path, extension, TrieMasks::none()).unwrap(); + trie.reveal_node(branch_path, branch, TrieMasks::none()).unwrap(); + trie.reveal_node(leaf_1_path, leaf_1, TrieMasks::none()).unwrap(); + trie.reveal_node(leaf_2_path, leaf_2, TrieMasks::none()).unwrap(); + + // Step 3: Reset hashes for all revealed nodes to test actual hash calculation + // Reset upper subtrie node hashes + trie.upper_subtrie.nodes.get_mut(&extension_path).unwrap().set_hash(None); + trie.upper_subtrie.nodes.get_mut(&branch_path).unwrap().set_hash(None); + + // Reset lower subtrie node hashes + let leaf_1_subtrie_idx = path_subtrie_index_unchecked(&leaf_1_path); + let leaf_2_subtrie_idx = path_subtrie_index_unchecked(&leaf_2_path); + + trie.lower_subtries[leaf_1_subtrie_idx] + .as_mut() + .unwrap() + .nodes + .get_mut(&leaf_1_path) + .unwrap() + .set_hash(None); + trie.lower_subtries[leaf_2_subtrie_idx] + .as_mut() + .unwrap() + .nodes + .get_mut(&leaf_2_path) + .unwrap() + .set_hash(None); + + // Step 4: Add changed leaf node paths to prefix set + trie.prefix_set.insert(leaf_1_full_path); + trie.prefix_set.insert(leaf_2_full_path); + + // Step 5: Calculate root using our implementation + let root = trie.root(); + + // Step 6: Calculate root using HashBuilder for comparison + let (hash_builder_root, _, _proof_nodes, _, _) = run_hash_builder( + [(leaf_1_full_path, account_1), (leaf_2_full_path, account_2)], + NoopAccountTrieCursor::default(), + Default::default(), + [extension_path, branch_path, leaf_1_full_path, leaf_2_full_path], + ); + + // Step 7: Verify the roots match + assert_eq!(root, hash_builder_root); + + // Verify hashes were computed + let leaf_1_subtrie = trie.lower_subtries[leaf_1_subtrie_idx].as_ref().unwrap(); + let leaf_2_subtrie = trie.lower_subtries[leaf_2_subtrie_idx].as_ref().unwrap(); + assert!(trie.upper_subtrie.nodes.get(&extension_path).unwrap().hash().is_some()); + assert!(trie.upper_subtrie.nodes.get(&branch_path).unwrap().hash().is_some()); + assert!(leaf_1_subtrie.nodes.get(&leaf_1_path).unwrap().hash().is_some()); + assert!(leaf_2_subtrie.nodes.get(&leaf_2_path).unwrap().hash().is_some()); + } +} diff --git a/crates/trie/sparse/benches/update.rs b/crates/trie/sparse/benches/update.rs new file mode 100644 index 00000000000..4b2971c1e05 --- /dev/null +++ b/crates/trie/sparse/benches/update.rs @@ -0,0 +1,100 @@ +#![allow(missing_docs)] + +use alloy_primitives::{B256, U256}; +use criterion::{criterion_group, criterion_main, BatchSize, BenchmarkId, Criterion}; +use proptest::{prelude::*, strategy::ValueTree}; +use rand::seq::IteratorRandom; +use reth_trie_common::Nibbles; +use reth_trie_sparse::SparseTrie; + +const LEAF_COUNTS: [usize; 2] = [1_000, 5_000]; + +fn update_leaf(c: &mut Criterion) { + let mut group = c.benchmark_group("update_leaf"); + + for leaf_count in LEAF_COUNTS { + group.bench_function(BenchmarkId::from_parameter(leaf_count), |b| { + let leaves = generate_leaves(leaf_count); + // Start with an empty trie + let mut trie = SparseTrie::revealed_empty(); + // Pre-populate with data + for (path, value) in leaves.iter().cloned() { + trie.update_leaf(path, value).unwrap(); + } + + b.iter_batched( + || { + let new_leaves = leaves + .iter() + // Update 10% of existing leaves with new values + .choose_multiple(&mut rand::rng(), leaf_count / 10) + .into_iter() + .map(|(path, _)| { + ( + path, + alloy_rlp::encode_fixed_size(&U256::from(path.len() * 2)).to_vec(), + ) + }) + .collect::>(); + + (trie.clone(), new_leaves) + }, + |(mut trie, new_leaves)| { + for (path, new_value) in new_leaves { + trie.update_leaf(*path, new_value).unwrap(); + } + trie + }, + BatchSize::LargeInput, + ); + }); + } +} + +fn remove_leaf(c: &mut Criterion) { + let mut group = c.benchmark_group("remove_leaf"); + + for leaf_count in LEAF_COUNTS { + group.bench_function(BenchmarkId::from_parameter(leaf_count), |b| { + let leaves = generate_leaves(leaf_count); + // Start with an empty trie + let mut trie = SparseTrie::revealed_empty(); + // Pre-populate with data + for (path, value) in leaves.iter().cloned() { + trie.update_leaf(path, value).unwrap(); + } + + b.iter_batched( + || { + let delete_leaves = leaves + .iter() + .map(|(path, _)| path) + // Remove 10% leaves + .choose_multiple(&mut rand::rng(), leaf_count / 10); + + (trie.clone(), delete_leaves) + }, + |(mut trie, delete_leaves)| { + for path in delete_leaves { + trie.remove_leaf(path).unwrap(); + } + trie + }, + BatchSize::LargeInput, + ); + }); + } +} + +fn generate_leaves(size: usize) -> Vec<(Nibbles, Vec)> { + proptest::collection::hash_map(any::(), any::(), size) + .new_tree(&mut Default::default()) + .unwrap() + .current() + .iter() + .map(|(key, value)| (Nibbles::unpack(key), alloy_rlp::encode_fixed_size(value).to_vec())) + .collect() +} + +criterion_group!(benches, update_leaf, remove_leaf); +criterion_main!(benches); diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index 39e305f4981..66c3596363c 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -1,6 +1,6 @@ use crate::{ blinded::{BlindedProvider, BlindedProviderFactory, DefaultBlindedProviderFactory}, - LeafLookup, RevealedSparseTrie, SparseTrie, TrieMasks, + LeafLookup, RevealedSparseTrie, SparseTrie, SparseTrieState, TrieMasks, }; use alloc::{collections::VecDeque, vec::Vec}; use alloy_primitives::{ @@ -107,6 +107,16 @@ impl SparseStateTrie { self.revealed_account_paths.contains(&Nibbles::unpack(account)) } + /// Uses the input `SparseTrieState` to populate the backing data structures in the `state` + /// trie. + pub fn populate_from(&mut self, trie: SparseTrieState) { + if let Some(new_trie) = self.state.as_revealed_mut() { + new_trie.use_allocated_state(trie); + } else { + self.state = SparseTrie::AllocatedEmpty { allocated: trie }; + } + } + /// Was the account witness for `address` complete? pub fn check_valid_account_witness(&self, address: B256) -> bool { let path = Nibbles::unpack(address); @@ -224,7 +234,7 @@ impl SparseStateTrie { continue } let node = TrieNode::decode(&mut &bytes[..])?; - trie.reveal_node(path.clone(), node, TrieMasks::none())?; + trie.reveal_node(path, node, TrieMasks::none())?; // Track the revealed path. self.revealed_account_paths.insert(path); @@ -271,7 +281,7 @@ impl SparseStateTrie { continue } let node = TrieNode::decode(&mut &bytes[..])?; - trie.reveal_node(path.clone(), node, TrieMasks::none())?; + trie.reveal_node(path, node, TrieMasks::none())?; // Track the revealed path. revealed_nodes.insert(path); @@ -343,7 +353,7 @@ impl SparseStateTrie { ) -> SparseStateTrieResult<()> { let FilteredProofNodes { nodes, - new_nodes, + new_nodes: _, total_nodes: _total_nodes, skipped_nodes: _skipped_nodes, } = filter_revealed_nodes(account_subtree, &self.revealed_account_paths)?; @@ -366,9 +376,6 @@ impl SparseStateTrie { self.retain_updates, )?; - // Reserve the capacity for new nodes ahead of time. - trie.reserve_nodes(new_nodes); - // Reveal the remaining proof nodes. for (path, node) in account_nodes { let (hash_mask, tree_mask) = if let TrieNode::Branch(_) = node { @@ -381,7 +388,7 @@ impl SparseStateTrie { }; trace!(target: "trie::sparse", ?path, ?node, ?hash_mask, ?tree_mask, "Revealing account node"); - trie.reveal_node(path.clone(), node, TrieMasks { hash_mask, tree_mask })?; + trie.reveal_node(path, node, TrieMasks { hash_mask, tree_mask })?; // Track the revealed path. self.revealed_account_paths.insert(path); @@ -456,7 +463,7 @@ impl SparseStateTrie { }; trace!(target: "trie::sparse", ?account, ?path, ?node, ?hash_mask, ?tree_mask, "Revealing storage node"); - trie.reveal_node(path.clone(), node, TrieMasks { hash_mask, tree_mask })?; + trie.reveal_node(path, node, TrieMasks { hash_mask, tree_mask })?; // Track the revealed path. revealed_nodes.insert(path); @@ -488,7 +495,7 @@ impl SparseStateTrie { TrieNode::Branch(branch) => { for (idx, maybe_child) in branch.as_ref().children() { if let Some(child_hash) = maybe_child.and_then(RlpNode::as_hash) { - let mut child_path = path.clone(); + let mut child_path = path; child_path.push_unchecked(idx); queue.push_back((child_hash, child_path, maybe_account)); } @@ -496,14 +503,14 @@ impl SparseStateTrie { } TrieNode::Extension(ext) => { if let Some(child_hash) = ext.child.as_hash() { - let mut child_path = path.clone(); - child_path.extend_from_slice_unchecked(&ext.key); + let mut child_path = path; + child_path.extend(&ext.key); queue.push_back((child_hash, child_path, maybe_account)); } } TrieNode::Leaf(leaf) => { - let mut full_path = path.clone(); - full_path.extend_from_slice_unchecked(&leaf.key); + let mut full_path = path; + full_path.extend(&leaf.key); if maybe_account.is_none() { let hashed_address = B256::from_slice(&full_path.pack()); let account = TrieAccount::decode(&mut &leaf.value[..])?; @@ -541,7 +548,7 @@ impl SparseStateTrie { storage_trie_entry .as_revealed_mut() .ok_or(SparseTrieErrorKind::Blind)? - .reveal_node(path.clone(), trie_node, TrieMasks::none())?; + .reveal_node(path, trie_node, TrieMasks::none())?; } // Track the revealed path. @@ -561,7 +568,7 @@ impl SparseStateTrie { } else { // Reveal non-root state trie node. self.state.as_revealed_mut().ok_or(SparseTrieErrorKind::Blind)?.reveal_node( - path.clone(), + path, trie_node, TrieMasks::none(), )?; @@ -650,7 +657,7 @@ impl SparseStateTrie { &mut self, ) -> SparseStateTrieResult<&mut RevealedSparseTrie> { match self.state { - SparseTrie::Blind => { + SparseTrie::Blind | SparseTrie::AllocatedEmpty { .. } => { let (root_node, hash_mask, tree_mask) = self .provider_factory .account_node_provider() @@ -745,7 +752,7 @@ impl SparseStateTrie { value: Vec, ) -> SparseStateTrieResult<()> { if !self.revealed_account_paths.contains(&path) { - self.revealed_account_paths.insert(path.clone()); + self.revealed_account_paths.insert(path); } self.state.update_leaf(path, value)?; @@ -760,7 +767,7 @@ impl SparseStateTrie { value: Vec, ) -> SparseStateTrieResult<()> { if !self.revealed_storage_paths.get(&address).is_some_and(|slots| slots.contains(&slot)) { - self.revealed_storage_paths.entry(address).or_default().insert(slot.clone()); + self.revealed_storage_paths.entry(address).or_default().insert(slot); } let storage_trie = self.storages.get_mut(&address).ok_or(SparseTrieErrorKind::Blind)?; @@ -868,6 +875,12 @@ impl SparseStateTrie { storage_trie.remove_leaf(slot)?; Ok(()) } + + /// Clears and takes the account trie. + pub fn take_cleared_account_trie_state(&mut self) -> SparseTrieState { + let trie = core::mem::take(&mut self.state); + trie.cleared() + } } /// Result of [`filter_revealed_nodes`]. @@ -1169,11 +1182,8 @@ mod tests { let slot_path_3 = Nibbles::unpack(slot_3); let value_3 = U256::from(rng.random::()); - let mut storage_hash_builder = - HashBuilder::default().with_proof_retainer(ProofRetainer::from_iter([ - slot_path_1.clone(), - slot_path_2.clone(), - ])); + let mut storage_hash_builder = HashBuilder::default() + .with_proof_retainer(ProofRetainer::from_iter([slot_path_1, slot_path_2])); storage_hash_builder.add_leaf(slot_path_1, &alloy_rlp::encode_fixed_size(&value_1)); storage_hash_builder.add_leaf(slot_path_2, &alloy_rlp::encode_fixed_size(&value_2)); @@ -1193,13 +1203,10 @@ mod tests { let account_2 = Account::arbitrary(&mut arbitrary::Unstructured::new(&bytes)).unwrap(); let mut trie_account_2 = account_2.into_trie_account(EMPTY_ROOT_HASH); - let mut hash_builder = - HashBuilder::default().with_proof_retainer(ProofRetainer::from_iter([ - address_path_1.clone(), - address_path_2.clone(), - ])); - hash_builder.add_leaf(address_path_1.clone(), &alloy_rlp::encode(trie_account_1)); - hash_builder.add_leaf(address_path_2.clone(), &alloy_rlp::encode(trie_account_2)); + let mut hash_builder = HashBuilder::default() + .with_proof_retainer(ProofRetainer::from_iter([address_path_1, address_path_2])); + hash_builder.add_leaf(address_path_1, &alloy_rlp::encode(trie_account_1)); + hash_builder.add_leaf(address_path_2, &alloy_rlp::encode(trie_account_2)); let root = hash_builder.root(); let proof_nodes = hash_builder.take_proof_nodes(); diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index e8f8c9f87a7..e2f28c2417f 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -52,6 +52,19 @@ impl TrieMasks { } } +/// A struct for keeping the hashmaps from `RevealedSparseTrie`. +#[derive(Debug, Clone, PartialEq, Eq, Default)] +pub struct SparseTrieState { + /// Map from a path (nibbles) to its corresponding sparse trie node. + nodes: HashMap, + /// When a branch is set, the corresponding child subtree is stored in the database. + branch_node_tree_masks: HashMap, + /// When a bit is set, the corresponding child is stored as a hash in the database. + branch_node_hash_masks: HashMap, + /// Map from leaf key paths to their values. + values: HashMap>, +} + /// A sparse trie that is either in a "blind" state (no nodes are revealed, root node hash is /// unknown) or in a "revealed" state (root node has been revealed and the trie can be updated). /// @@ -64,8 +77,15 @@ impl TrieMasks { /// 2. Update tracking - changes to the trie structure can be tracked and selectively persisted /// 3. Incremental operations - nodes can be revealed as needed without loading the entire trie. /// This is what gives rise to the notion of a "sparse" trie. -#[derive(PartialEq, Eq, Default)] +#[derive(PartialEq, Eq, Default, Clone)] pub enum SparseTrie

{ + /// This is a variant that can be used to store a previously allocated trie. In these cases, + /// the trie will still be treated as blind, but the allocated trie will be reused if the trie + /// becomes revealed. + AllocatedEmpty { + /// This is the state of the allocated trie. + allocated: SparseTrieState, + }, /// The trie is blind -- no nodes have been revealed /// /// This is the default state. In this state, @@ -83,6 +103,7 @@ pub enum SparseTrie

{ impl

fmt::Debug for SparseTrie

{ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { + Self::AllocatedEmpty { .. } => write!(f, "AllocatedEmpty"), Self::Blind => write!(f, "Blind"), Self::Revealed(revealed) => write!(f, "Revealed({revealed:?})"), } @@ -184,17 +205,39 @@ impl

SparseTrie

{ masks: TrieMasks, retain_updates: bool, ) -> SparseTrieResult<&mut RevealedSparseTrie

> { + // we take the allocated state here, which will make sure we are either `Blind` or + // `Revealed`, and giving us the allocated state if we were `AllocatedEmpty`. + let allocated = self.take_allocated_state(); + + // if `Blind`, we initialize the revealed trie if self.is_blind() { - *self = Self::Revealed(Box::new(RevealedSparseTrie::from_provider_and_root( - provider, - root, - masks, - retain_updates, - )?)) + let mut revealed = + RevealedSparseTrie::from_provider_and_root(provider, root, masks, retain_updates)?; + + // If we had an allocated state, we use its maps internally. use_allocated_state copies + // over any information we had from revealing. + if let Some(allocated) = allocated { + revealed.use_allocated_state(allocated); + } + + *self = Self::Revealed(Box::new(revealed)); } Ok(self.as_revealed_mut().unwrap()) } + /// Take the allocated state if this is `AllocatedEmpty`, otherwise returns `None`. + /// + /// Converts this `SparseTrie` into `Blind` if this was `AllocatedEmpty`. + pub fn take_allocated_state(&mut self) -> Option { + if let Self::AllocatedEmpty { allocated } = self { + let state = core::mem::take(allocated); + *self = Self::Blind; + Some(state) + } else { + None + } + } + /// Wipes the trie by removing all nodes and values, /// and resetting the trie to only contain an empty root node. /// @@ -205,6 +248,16 @@ impl

SparseTrie

{ Ok(()) } + /// Returns a `SparseTrieState` obtained by clearing the sparse trie state and reusing the + /// allocated state if it was `AllocatedEmpty` or `Revealed`. + pub fn cleared(self) -> SparseTrieState { + match self { + Self::Revealed(revealed) => revealed.cleared_state(), + Self::AllocatedEmpty { allocated } => allocated, + Self::Blind => Default::default(), + } + } + /// Calculates the root hash of the trie. /// /// This will update any remaining dirty nodes before computing the root hash. @@ -331,7 +384,7 @@ impl fmt::Display for RevealedSparseTrie

{ stack.push((Nibbles::default(), self.nodes_ref().get(&Nibbles::default()).unwrap(), 0)); while let Some((path, node, depth)) = stack.pop() { - if !visited.insert(path.clone()) { + if !visited.insert(path) { continue; } @@ -348,8 +401,8 @@ impl fmt::Display for RevealedSparseTrie

{ } SparseNode::Leaf { key, .. } => { // we want to append the key to the path - let mut full_path = path.clone(); - full_path.extend_from_slice_unchecked(key); + let mut full_path = path; + full_path.extend(key); let packed_path = encode_nibbles(&full_path); writeln!(f, "{packed_path} -> {node:?}")?; @@ -358,8 +411,8 @@ impl fmt::Display for RevealedSparseTrie

{ writeln!(f, "{packed_path} -> {node:?}")?; // push the child node onto the stack with increased depth - let mut child_path = path.clone(); - child_path.extend_from_slice_unchecked(key); + let mut child_path = path; + child_path.extend(key); if let Some(child_node) = self.nodes_ref().get(&child_path) { stack.push((child_path, child_node, depth + 1)); } @@ -369,7 +422,7 @@ impl fmt::Display for RevealedSparseTrie

{ for i in CHILD_INDEX_RANGE.rev() { if state_mask.is_bit_set(i) { - let mut child_path = path.clone(); + let mut child_path = path; child_path.push_unchecked(i); if let Some(child_node) = self.nodes_ref().get(&child_path) { stack.push((child_path, child_node, depth + 1)); @@ -481,6 +534,37 @@ impl

RevealedSparseTrie

{ } } + /// Sets the fields of this `RevealedSparseTrie` to the fields of the input + /// `SparseTrieState`. + /// + /// This is meant for reusing the allocated maps contained in the `SparseTrieState`. + /// + /// Copies over any existing nodes, branch masks, and values. + pub fn use_allocated_state(&mut self, mut other: SparseTrieState) { + for (path, node) in self.nodes.drain() { + other.nodes.insert(path, node); + } + for (path, mask) in self.branch_node_tree_masks.drain() { + other.branch_node_tree_masks.insert(path, mask); + } + for (path, mask) in self.branch_node_hash_masks.drain() { + other.branch_node_hash_masks.insert(path, mask); + } + for (path, value) in self.values.drain() { + other.values.insert(path, value); + } + + self.nodes = other.nodes; + self.branch_node_tree_masks = other.branch_node_tree_masks; + self.branch_node_hash_masks = other.branch_node_hash_masks; + self.values = other.values; + } + + /// Set the provider for the trie. + pub fn set_provider(&mut self, provider: P) { + self.provider = provider; + } + /// Configures the trie to retain information about updates. /// /// If `retain_updates` is true, the trie will record branch node updates and deletions. @@ -554,10 +638,10 @@ impl

RevealedSparseTrie

{ } if let Some(tree_mask) = masks.tree_mask { - self.branch_node_tree_masks.insert(path.clone(), tree_mask); + self.branch_node_tree_masks.insert(path, tree_mask); } if let Some(hash_mask) = masks.hash_mask { - self.branch_node_hash_masks.insert(path.clone(), hash_mask); + self.branch_node_hash_masks.insert(path, hash_mask); } match node { @@ -571,7 +655,7 @@ impl

RevealedSparseTrie

{ let mut stack_ptr = branch.as_ref().first_child_index(); for idx in CHILD_INDEX_RANGE { if branch.state_mask.is_bit_set(idx) { - let mut child_path = path.clone(); + let mut child_path = path; child_path.push_unchecked(idx); // Reveal each child node or hash it has self.reveal_node_or_hash(child_path, &branch.stack[stack_ptr])?; @@ -601,7 +685,7 @@ impl

RevealedSparseTrie

{ // All other node types can't be handled. node @ (SparseNode::Empty | SparseNode::Leaf { .. }) => { return Err(SparseTrieErrorKind::Reveal { - path: entry.key().clone(), + path: *entry.key(), node: Box::new(node.clone()), } .into()) @@ -616,8 +700,8 @@ impl

RevealedSparseTrie

{ Entry::Occupied(mut entry) => match entry.get() { // Replace a hash node with a revealed extension node. SparseNode::Hash(hash) => { - let mut child_path = entry.key().clone(); - child_path.extend_from_slice_unchecked(&ext.key); + let mut child_path = *entry.key(); + child_path.extend(&ext.key); entry.insert(SparseNode::Extension { key: ext.key, // Memoize the hash of a previously blinded node in a new extension @@ -633,15 +717,15 @@ impl

RevealedSparseTrie

{ // All other node types can't be handled. node @ (SparseNode::Empty | SparseNode::Leaf { .. }) => { return Err(SparseTrieErrorKind::Reveal { - path: entry.key().clone(), + path: *entry.key(), node: Box::new(node.clone()), } .into()) } }, Entry::Vacant(entry) => { - let mut child_path = entry.key().clone(); - child_path.extend_from_slice_unchecked(&ext.key); + let mut child_path = *entry.key(); + child_path.extend(&ext.key); entry.insert(SparseNode::new_ext(ext.key)); self.reveal_node_or_hash(child_path, &ext.child)?; } @@ -650,8 +734,8 @@ impl

RevealedSparseTrie

{ Entry::Occupied(mut entry) => match entry.get() { // Replace a hash node with a revealed leaf node and store leaf node value. SparseNode::Hash(hash) => { - let mut full = entry.key().clone(); - full.extend_from_slice_unchecked(&leaf.key); + let mut full = *entry.key(); + full.extend(&leaf.key); self.values.insert(full, leaf.value); entry.insert(SparseNode::Leaf { key: leaf.key, @@ -667,15 +751,15 @@ impl

RevealedSparseTrie

{ SparseNode::Extension { .. } | SparseNode::Branch { .. }) => { return Err(SparseTrieErrorKind::Reveal { - path: entry.key().clone(), + path: *entry.key(), node: Box::new(node.clone()), } .into()) } }, Entry::Vacant(entry) => { - let mut full = entry.key().clone(); - full.extend_from_slice_unchecked(&leaf.key); + let mut full = *entry.key(); + full.extend(&leaf.key); entry.insert(SparseNode::new_leaf(leaf.key)); self.values.insert(full, leaf.value); } @@ -711,7 +795,7 @@ impl

RevealedSparseTrie

{ // Hash node with a different hash can't be handled. SparseNode::Hash(previous_hash) if previous_hash != &hash => { return Err(SparseTrieErrorKind::Reveal { - path: entry.key().clone(), + path: *entry.key(), node: Box::new(SparseNode::Hash(hash)), } .into()) @@ -760,13 +844,13 @@ impl

RevealedSparseTrie

{ #[cfg(debug_assertions)] { - let mut current = current.clone(); - current.extend_from_slice_unchecked(_key); + let mut current = current; + current.extend(_key); assert_eq!(¤t, path); } nodes.push(RemovedSparseNode { - path: current.clone(), + path: current, node, unset_branch_nibble: None, }); @@ -775,20 +859,20 @@ impl

RevealedSparseTrie

{ SparseNode::Extension { key, .. } => { #[cfg(debug_assertions)] { - let mut current = current.clone(); - current.extend_from_slice_unchecked(key); + let mut current = current; + current.extend(key); assert!( path.starts_with(¤t), "path: {path:?}, current: {current:?}, key: {key:?}", ); } - let path = current.clone(); - current.extend_from_slice_unchecked(key); + let path = current; + current.extend(key); nodes.push(RemovedSparseNode { path, node, unset_branch_nibble: None }); } SparseNode::Branch { state_mask, .. } => { - let nibble = path[current.len()]; + let nibble = path.get_unchecked(current.len()); debug_assert!( state_mask.is_bit_set(nibble), "current: {current:?}, path: {path:?}, nibble: {nibble:?}, state_mask: {state_mask:?}", @@ -799,26 +883,22 @@ impl

RevealedSparseTrie

{ // Any other branch nodes will not require unsetting the nibble, because // deleting one leaf node can not remove the whole path // where the branch node is located. - let mut child_path = - Nibbles::from_nibbles([current.as_slice(), &[nibble]].concat()); + let mut child_path = current; + child_path.push_unchecked(nibble); let unset_branch_nibble = self .nodes .get(&child_path) .is_some_and(move |node| match node { SparseNode::Leaf { key, .. } => { // Get full path of the leaf node - child_path.extend_from_slice_unchecked(key); + child_path.extend(key); &child_path == path } _ => false, }) .then_some(nibble); - nodes.push(RemovedSparseNode { - path: current.clone(), - node, - unset_branch_nibble, - }); + nodes.push(RemovedSparseNode { path: current, node, unset_branch_nibble }); current.push_unchecked(nibble); } @@ -839,6 +919,33 @@ impl

RevealedSparseTrie

{ self.updates = self.updates.is_some().then(SparseTrieUpdates::wiped); } + /// This clears all data structures in the sparse trie, keeping the backing data structures + /// allocated. + /// + /// This is useful for reusing the trie without needing to reallocate memory. + pub fn clear(&mut self) { + self.nodes.clear(); + self.branch_node_tree_masks.clear(); + self.branch_node_hash_masks.clear(); + self.values.clear(); + self.prefix_set.clear(); + if let Some(updates) = self.updates.as_mut() { + updates.clear() + } + self.rlp_buf.clear(); + } + + /// Returns the cleared `SparseTrieState` for this `RevealedSparseTrie`. + pub fn cleared_state(mut self) -> SparseTrieState { + self.clear(); + SparseTrieState { + nodes: self.nodes, + branch_node_tree_masks: self.branch_node_tree_masks, + branch_node_hash_masks: self.branch_node_hash_masks, + values: self.values, + } + } + /// Calculates and returns the root hash of the trie. /// /// Before computing the hash, this function processes any remaining (dirty) nodes by @@ -936,9 +1043,9 @@ impl

RevealedSparseTrie

{ if level >= depth { targets.push((level, path)); } else { - unchanged_prefix_set.insert(path.clone()); + unchanged_prefix_set.insert(path); - path.extend_from_slice_unchecked(key); + path.extend(key); paths.push((path, level + 1)); } } @@ -950,11 +1057,11 @@ impl

RevealedSparseTrie

{ if level >= depth { targets.push((level, path)); } else { - unchanged_prefix_set.insert(path.clone()); + unchanged_prefix_set.insert(path); for bit in CHILD_INDEX_RANGE.rev() { if state_mask.is_bit_set(bit) { - let mut child_path = path.clone(); + let mut child_path = path; child_path.push_unchecked(bit); paths.push((child_path, level + 1)); } @@ -1001,7 +1108,7 @@ impl

RevealedSparseTrie

{ buffers: &mut RlpNodeBuffers, rlp_buf: &mut Vec, ) -> RlpNode { - let _starting_path = buffers.path_stack.last().map(|item| item.path.clone()); + let _starting_path = buffers.path_stack.last().map(|item| item.path); 'main: while let Some(RlpNodePathStackItem { level, path, mut is_in_prefix_set }) = buffers.path_stack.pop() @@ -1027,8 +1134,8 @@ impl

RevealedSparseTrie

{ SparseNode::Empty => (RlpNode::word_rlp(&EMPTY_ROOT_HASH), SparseNodeType::Empty), SparseNode::Hash(hash) => (RlpNode::word_rlp(hash), SparseNodeType::Hash), SparseNode::Leaf { key, hash } => { - let mut path = path.clone(); - path.extend_from_slice_unchecked(key); + let mut path = path; + path.extend(key); if let Some(hash) = hash.filter(|_| !prefix_set_contains(&path)) { (RlpNode::word_rlp(&hash), SparseNodeType::Leaf) } else { @@ -1040,8 +1147,8 @@ impl

RevealedSparseTrie

{ } } SparseNode::Extension { key, hash, store_in_db_trie } => { - let mut child_path = path.clone(); - child_path.extend_from_slice_unchecked(key); + let mut child_path = path; + child_path.extend(key); if let Some((hash, store_in_db_trie)) = hash.zip(*store_in_db_trie).filter(|_| !prefix_set_contains(&path)) { @@ -1112,7 +1219,7 @@ impl

RevealedSparseTrie

{ // from the stack and keep walking in the sorted order. for bit in CHILD_INDEX_RANGE.rev() { if state_mask.is_bit_set(bit) { - let mut child = path.clone(); + let mut child = path; child.push_unchecked(bit); buffers.branch_child_buf.push(child); } @@ -1230,7 +1337,7 @@ impl

RevealedSparseTrie

{ hashes, hash.filter(|_| path.is_empty()), ); - updates.updated_nodes.insert(path.clone(), branch_node); + updates.updated_nodes.insert(path, branch_node); } else if self .branch_node_tree_masks .get(&path) @@ -1243,7 +1350,7 @@ impl

RevealedSparseTrie

{ // need to remove the node update and add the node itself to the list of // removed nodes. updates.updated_nodes.remove(&path); - updates.removed_nodes.insert(path.clone()); + updates.removed_nodes.insert(path); } else if self .branch_node_hash_masks .get(&path) @@ -1325,22 +1432,6 @@ pub enum LeafLookup { } impl RevealedSparseTrie

{ - /// This clears all data structures in the sparse trie, keeping the backing data structures - /// allocated. - /// - /// This is useful for reusing the trie without needing to reallocate memory. - pub fn clear(&mut self) { - self.nodes.clear(); - self.branch_node_tree_masks.clear(); - self.branch_node_hash_masks.clear(); - self.values.clear(); - self.prefix_set.clear(); - if let Some(updates) = self.updates.as_mut() { - updates.clear() - } - self.rlp_buf.clear(); - } - /// Attempts to find a leaf node at the specified path. /// /// This method traverses the trie from the root down to the given path, checking @@ -1373,7 +1464,7 @@ impl RevealedSparseTrie

{ if let Some(expected) = expected_value { if actual_value != expected { return Err(LeafLookupError::ValueMismatch { - path: path.clone(), + path: *path, expected: Some(expected.clone()), actual: actual_value.clone(), }); @@ -1410,14 +1501,14 @@ impl RevealedSparseTrie

{ } Some(&SparseNode::Hash(hash)) => { // We hit a blinded node - cannot determine if leaf exists - return Err(LeafLookupError::BlindedNode { path: current.clone(), hash }); + return Err(LeafLookupError::BlindedNode { path: current, hash }); } Some(SparseNode::Leaf { key, .. }) => { // We found a leaf node before reaching our target depth // Temporarily append the leaf key to `current` let saved_len = current.len(); - current.extend_from_slice_unchecked(key); + current.extend(key); if ¤t == path { // This should have been handled by our initial values map check @@ -1436,7 +1527,7 @@ impl RevealedSparseTrie

{ Some(SparseNode::Extension { key, .. }) => { // Temporarily append the extension key to `current` let saved_len = current.len(); - current.extend_from_slice_unchecked(key); + current.extend(key); if path.len() < current.len() || !path.starts_with(¤t) { let diverged_at = current.slice(..saved_len); @@ -1447,7 +1538,7 @@ impl RevealedSparseTrie

{ } Some(SparseNode::Branch { state_mask, .. }) => { // Check if branch has a child at the next nibble in our path - let nibble = path[current.len()]; + let nibble = path.get_unchecked(current.len()); if !state_mask.is_bit_set(nibble) { // No child at this nibble - exclusion proof return Ok(LeafLookup::NonExistent { diverged_at: current }); @@ -1471,7 +1562,7 @@ impl RevealedSparseTrie

{ } } Some(&SparseNode::Hash(hash)) => { - return Err(LeafLookupError::BlindedNode { path: path.clone(), hash }); + return Err(LeafLookupError::BlindedNode { path: *path, hash }); } _ => { // No leaf at exactly the target path @@ -1502,8 +1593,8 @@ impl RevealedSparseTrie

{ /// Note: If an update requires revealing a blinded node, an error is returned if the blinded /// provider returns an error. pub fn update_leaf(&mut self, path: Nibbles, value: Vec) -> SparseTrieResult<()> { - self.prefix_set.insert(path.clone()); - let existing = self.values.insert(path.clone(), value); + self.prefix_set.insert(path); + let existing = self.values.insert(path, value); if existing.is_some() { // trie structure unchanged, return immediately return Ok(()) @@ -1520,7 +1611,7 @@ impl RevealedSparseTrie

{ return Err(SparseTrieErrorKind::BlindedNode { path: current, hash }.into()) } SparseNode::Leaf { key: current_key, .. } => { - current.extend_from_slice_unchecked(current_key); + current.extend(current_key); // this leaf is being updated if current == path { @@ -1538,7 +1629,10 @@ impl RevealedSparseTrie

{ self.nodes.reserve(3); self.nodes.insert( current.slice(..common), - SparseNode::new_split_branch(current[common], path[common]), + SparseNode::new_split_branch( + current.get_unchecked(common), + path.get_unchecked(common), + ), ); self.nodes.insert( path.slice(..=common), @@ -1552,7 +1646,7 @@ impl RevealedSparseTrie

{ break; } SparseNode::Extension { key, .. } => { - current.extend_from_slice(key); + current.extend(key); if !path.starts_with(¤t) { // find the common prefix @@ -1578,7 +1672,7 @@ impl RevealedSparseTrie

{ "Revealing extension node child", ); self.reveal_node( - current.clone(), + current, decoded, TrieMasks { hash_mask, tree_mask }, )?; @@ -1589,7 +1683,10 @@ impl RevealedSparseTrie

{ // create state mask for new branch node // NOTE: this might overwrite the current extension node self.nodes.reserve(3); - let branch = SparseNode::new_split_branch(current[common], path[common]); + let branch = SparseNode::new_split_branch( + current.get_unchecked(common), + path.get_unchecked(common), + ); self.nodes.insert(current.slice(..common), branch); // create new leaf @@ -1606,7 +1703,7 @@ impl RevealedSparseTrie

{ } } SparseNode::Branch { state_mask, .. } => { - let nibble = path[current.len()]; + let nibble = path.get_unchecked(current.len()); current.push_unchecked(nibble); if !state_mask.is_bit_set(nibble) { state_mask.set_bit(nibble); @@ -1634,28 +1731,27 @@ impl RevealedSparseTrie

{ if self.values.remove(path).is_none() { if let Some(&SparseNode::Hash(hash)) = self.nodes.get(path) { // Leaf is present in the trie, but it's blinded. - return Err(SparseTrieErrorKind::BlindedNode { path: path.clone(), hash }.into()) + return Err(SparseTrieErrorKind::BlindedNode { path: *path, hash }.into()) } trace!(target: "trie::sparse", ?path, "Leaf node is not present in the trie"); // Leaf is not present in the trie. return Ok(()) } - self.prefix_set.insert(path.clone()); + self.prefix_set.insert(*path); // If the path wasn't present in `values`, we still need to walk the trie and ensure that // there is no node at the path. When a leaf node is a blinded `Hash`, it will have an entry // in `nodes`, but not in the `values`. let mut removed_nodes = self.take_nodes_for_path(path)?; - trace!(target: "trie::sparse", ?path, ?removed_nodes, "Removed nodes for path"); // Pop the first node from the stack which is the leaf node we want to remove. let mut child = removed_nodes.pop().expect("leaf exists"); #[cfg(debug_assertions)] { - let mut child_path = child.path.clone(); + let mut child_path = child.path; let SparseNode::Leaf { key, .. } = &child.node else { panic!("expected leaf node") }; - child_path.extend_from_slice_unchecked(key); + child_path.extend(key); assert_eq!(&child_path, path); } @@ -1698,8 +1794,8 @@ impl RevealedSparseTrie

{ SparseNode::Leaf { key: leaf_key, .. } => { self.nodes.remove(&child.path); - let mut new_key = key.clone(); - new_key.extend_from_slice_unchecked(leaf_key); + let mut new_key = *key; + new_key.extend(leaf_key); SparseNode::new_leaf(new_key) } // For an extension node, we collapse them into one extension node, @@ -1707,8 +1803,8 @@ impl RevealedSparseTrie

{ SparseNode::Extension { key: extension_key, .. } => { self.nodes.remove(&child.path); - let mut new_key = key.clone(); - new_key.extend_from_slice_unchecked(extension_key); + let mut new_key = *key; + new_key.extend(extension_key); SparseNode::new_ext(new_key) } // For a branch node, we just leave the extension node as-is. @@ -1729,7 +1825,7 @@ impl RevealedSparseTrie

{ state_mask.first_set_bit_index().expect("state mask is not empty"); // Get full path of the only child node left. - let mut child_path = removed_path.clone(); + let mut child_path = removed_path; child_path.push_unchecked(child_nibble); trace!(target: "trie::sparse", ?removed_path, ?child_path, "Branch node has only one child"); @@ -1749,7 +1845,7 @@ impl RevealedSparseTrie

{ "Revealing remaining blinded branch child" ); self.reveal_node( - child_path.clone(), + child_path, decoded, TrieMasks { hash_mask, tree_mask }, )?; @@ -1776,7 +1872,7 @@ impl RevealedSparseTrie

{ delete_child = true; let mut new_key = Nibbles::from_nibbles_unchecked([child_nibble]); - new_key.extend_from_slice_unchecked(key); + new_key.extend(key); SparseNode::new_leaf(new_key) } // If the only child node is an extension node, we downgrade the branch @@ -1786,7 +1882,7 @@ impl RevealedSparseTrie

{ delete_child = true; let mut new_key = Nibbles::from_nibbles_unchecked([child_nibble]); - new_key.extend_from_slice_unchecked(key); + new_key.extend(key); SparseNode::new_ext(new_key) } // If the only child is a branch node, we downgrade the current branch @@ -1802,7 +1898,7 @@ impl RevealedSparseTrie

{ if let Some(updates) = self.updates.as_mut() { updates.updated_nodes.remove(&removed_path); - updates.removed_nodes.insert(removed_path.clone()); + updates.removed_nodes.insert(removed_path); } new_node @@ -1815,7 +1911,7 @@ impl RevealedSparseTrie

{ }; child = RemovedSparseNode { - path: removed_path.clone(), + path: removed_path, node: new_node.clone(), unset_branch_nibble: None, }; @@ -1829,7 +1925,7 @@ impl RevealedSparseTrie

{ /// Enum representing sparse trie node type. #[derive(Debug, Clone, Copy, PartialEq, Eq)] -enum SparseNodeType { +pub enum SparseNodeType { /// Empty trie node. Empty, /// A placeholder that stores only the hash for a node that has not been fully revealed. @@ -1849,15 +1945,18 @@ enum SparseNodeType { } impl SparseNodeType { - const fn is_hash(&self) -> bool { + /// Returns true if the node is a hash node. + pub const fn is_hash(&self) -> bool { matches!(self, Self::Hash) } - const fn is_branch(&self) -> bool { + /// Returns true if the node is a branch node. + pub const fn is_branch(&self) -> bool { matches!(self, Self::Branch { .. }) } - const fn store_in_db_trie(&self) -> Option { + /// Returns true if the node should be stored in the database. + pub const fn store_in_db_trie(&self) -> Option { match *self { Self::Extension { store_in_db_trie } | Self::Branch { store_in_db_trie } => { store_in_db_trie @@ -1953,6 +2052,32 @@ impl SparseNode { pub const fn is_hash(&self) -> bool { matches!(self, Self::Hash(_)) } + + /// Returns the hash of the node if it exists. + pub const fn hash(&self) -> Option { + match self { + Self::Empty => None, + Self::Hash(hash) => Some(*hash), + Self::Leaf { hash, .. } | Self::Extension { hash, .. } | Self::Branch { hash, .. } => { + *hash + } + } + } + + /// Sets the hash of the node for testing purposes. + /// + /// For [`SparseNode::Empty`] and [`SparseNode::Hash`] nodes, this method does nothing. + #[cfg(any(test, feature = "test-utils"))] + pub const fn set_hash(&mut self, new_hash: Option) { + match self { + Self::Empty | Self::Hash(_) => { + // Cannot set hash for Empty or Hash nodes + } + Self::Leaf { hash, .. } | Self::Extension { hash, .. } | Self::Branch { hash, .. } => { + *hash = new_hash; + } + } + } } /// A helper struct used to store information about a node that has been removed @@ -2006,25 +2131,25 @@ impl RlpNodeBuffers { } /// RLP node path stack item. -#[derive(Debug)] -struct RlpNodePathStackItem { +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct RlpNodePathStackItem { /// Level at which the node is located. Higher numbers correspond to lower levels in the trie. - level: usize, + pub level: usize, /// Path to the node. - path: Nibbles, + pub path: Nibbles, /// Whether the path is in the prefix set. If [`None`], then unknown yet. - is_in_prefix_set: Option, + pub is_in_prefix_set: Option, } /// RLP node stack item. -#[derive(Debug)] -struct RlpNodeStackItem { +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct RlpNodeStackItem { /// Path to the node. - path: Nibbles, + pub path: Nibbles, /// RLP node. - rlp_node: RlpNode, + pub rlp_node: RlpNode, /// Type of the node. - node_type: SparseNodeType, + pub node_type: SparseNodeType, } /// Tracks modifications to the sparse trie structure. @@ -2033,9 +2158,12 @@ struct RlpNodeStackItem { /// one to make batch updates to a persistent database. #[derive(Debug, Clone, Default, PartialEq, Eq)] pub struct SparseTrieUpdates { - pub(crate) updated_nodes: HashMap, - pub(crate) removed_nodes: HashSet, - pub(crate) wiped: bool, + /// Collection of updated intermediate nodes indexed by full path. + pub updated_nodes: HashMap, + /// Collection of removed intermediate nodes indexed by full path. + pub removed_nodes: HashSet, + /// Flag indicating whether the trie was wiped. + pub wiped: bool, } impl SparseTrieUpdates { @@ -2052,6 +2180,13 @@ impl SparseTrieUpdates { self.removed_nodes.clear(); self.wiped = false; } + + /// Extends the updates with another set of updates. + pub fn extend(&mut self, other: Self) { + self.updated_nodes.extend(other.updated_nodes); + self.removed_nodes.extend(other.removed_nodes); + self.wiped |= other.wiped; + } } #[cfg(test)] @@ -2084,7 +2219,7 @@ mod find_leaf_tests { let path = Nibbles::from_nibbles([0x1, 0x2, 0x3]); let value = b"test_value".to_vec(); - sparse.update_leaf(path.clone(), value.clone()).unwrap(); + sparse.update_leaf(path, value.clone()).unwrap(); // Check that the leaf exists let result = sparse.find_leaf(&path, None); @@ -2103,7 +2238,7 @@ mod find_leaf_tests { let value = b"test_value".to_vec(); let wrong_value = b"wrong_value".to_vec(); - sparse.update_leaf(path.clone(), value).unwrap(); + sparse.update_leaf(path, value).unwrap(); // Check with wrong expected value let result = sparse.find_leaf(&path, Some(&wrong_value)); @@ -2142,7 +2277,7 @@ mod find_leaf_tests { fn find_leaf_exists_no_value_check() { let mut sparse = RevealedSparseTrie::::default(); let path = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3, 0x4]); - sparse.update_leaf(path.clone(), VALUE_A()).unwrap(); + sparse.update_leaf(path, VALUE_A()).unwrap(); let result = sparse.find_leaf(&path, None); assert_matches!(result, Ok(LeafLookup::Exists)); @@ -2153,7 +2288,7 @@ mod find_leaf_tests { let mut sparse = RevealedSparseTrie::::default(); let path = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3, 0x4]); let value = VALUE_A(); - sparse.update_leaf(path.clone(), value.clone()).unwrap(); + sparse.update_leaf(path, value.clone()).unwrap(); let result = sparse.find_leaf(&path, Some(&value)); assert_matches!(result, Ok(LeafLookup::Exists)); @@ -2248,7 +2383,7 @@ mod find_leaf_tests { Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3]), SparseNode::new_branch(TrieMask::new(0b10000)), ); // Branch at 0x123, child 4 - nodes.insert(leaf_path.clone(), SparseNode::Hash(blinded_hash)); // Blinded node at 0x1234 + nodes.insert(leaf_path, SparseNode::Hash(blinded_hash)); // Blinded node at 0x1234 let sparse = RevealedSparseTrie { provider: DefaultBlindedProvider, @@ -2283,7 +2418,7 @@ mod find_leaf_tests { let state_mask = TrieMask::new(0b100010); nodes.insert(Nibbles::default(), SparseNode::new_branch(state_mask)); - nodes.insert(path_to_blind.clone(), SparseNode::Hash(blinded_hash)); + nodes.insert(path_to_blind, SparseNode::Hash(blinded_hash)); let path_revealed = Nibbles::from_nibbles_unchecked([0x5]); let path_revealed_leaf = Nibbles::from_nibbles_unchecked([0x5, 0x6, 0x7, 0x8]); nodes.insert( @@ -2327,7 +2462,7 @@ mod find_leaf_tests { // 1. Construct the RLP representation of the children for the root branch let rlp_node_child1 = RlpNode::word_rlp(&blinded_hash); // Blinded node - let leaf_node_child5 = LeafNode::new(revealed_leaf_suffix.clone(), revealed_value.clone()); + let leaf_node_child5 = LeafNode::new(revealed_leaf_suffix, revealed_value.clone()); let leaf_node_child5_rlp_buf = alloy_rlp::encode(&leaf_node_child5); let hash_of_child5 = keccak256(&leaf_node_child5_rlp_buf); let rlp_node_child5 = RlpNode::word_rlp(&hash_of_child5); @@ -2353,11 +2488,7 @@ mod find_leaf_tests { // 4. Explicitly reveal the leaf node for child 5 sparse - .reveal_node( - revealed_leaf_prefix.clone(), - TrieNode::Leaf(leaf_node_child5), - TrieMasks::none(), - ) + .reveal_node(revealed_leaf_prefix, TrieNode::Leaf(leaf_node_child5), TrieMasks::none()) .expect("Failed to reveal leaf node"); // Assertions after we reveal child 5 @@ -2407,13 +2538,16 @@ mod tests { fn pad_nibbles_left(nibbles: Nibbles) -> Nibbles { let mut base = Nibbles::from_nibbles_unchecked(vec![0; B256::len_bytes() * 2 - nibbles.len()]); - base.extend_from_slice_unchecked(&nibbles); + base.extend(&nibbles); base } /// Pad nibbles to the length of a B256 hash with zeros on the right. fn pad_nibbles_right(mut nibbles: Nibbles) -> Nibbles { - nibbles.extend_from_slice_unchecked(&vec![0; B256::len_bytes() * 2 - nibbles.len()]); + nibbles.extend(&Nibbles::from_nibbles_unchecked(vec![ + 0; + B256::len_bytes() * 2 - nibbles.len() + ])); nibbles } @@ -2473,14 +2607,14 @@ mod tests { .clone() .unwrap_or_default() .iter() - .map(|(path, node)| (path.clone(), node.hash_mask)) + .map(|(path, node)| (*path, node.hash_mask)) .collect(); let branch_node_tree_masks = hash_builder .updated_branch_nodes .clone() .unwrap_or_default() .iter() - .map(|(path, node)| (path.clone(), node.tree_mask)) + .map(|(path, node)| (*path, node.tree_mask)) .collect(); let mut trie_updates = TrieUpdates::default(); @@ -2554,10 +2688,10 @@ mod tests { let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes, _, _) = run_hash_builder( - [(key.clone(), value())], + [(key, value())], NoopAccountTrieCursor::default(), Default::default(), - [key.clone()], + [key], ); let mut sparse = RevealedSparseTrie::default().with_updates(true); @@ -2584,7 +2718,7 @@ mod tests { let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes, _, _) = run_hash_builder( - paths.iter().cloned().zip(std::iter::repeat_with(value)), + paths.iter().copied().zip(std::iter::repeat_with(value)), NoopAccountTrieCursor::default(), Default::default(), paths.clone(), @@ -2592,7 +2726,7 @@ mod tests { let mut sparse = RevealedSparseTrie::default().with_updates(true); for path in &paths { - sparse.update_leaf(path.clone(), value_encoded()).unwrap(); + sparse.update_leaf(*path, value_encoded()).unwrap(); } let sparse_root = sparse.root(); let sparse_updates = sparse.take_updates(); @@ -2614,7 +2748,7 @@ mod tests { let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes, _, _) = run_hash_builder( - paths.iter().cloned().zip(std::iter::repeat_with(value)), + paths.iter().copied().zip(std::iter::repeat_with(value)), NoopAccountTrieCursor::default(), Default::default(), paths.clone(), @@ -2622,7 +2756,7 @@ mod tests { let mut sparse = RevealedSparseTrie::default().with_updates(true); for path in &paths { - sparse.update_leaf(path.clone(), value_encoded()).unwrap(); + sparse.update_leaf(*path, value_encoded()).unwrap(); } let sparse_root = sparse.root(); let sparse_updates = sparse.take_updates(); @@ -2652,7 +2786,7 @@ mod tests { let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes, _, _) = run_hash_builder( - paths.iter().sorted_unstable().cloned().zip(std::iter::repeat_with(value)), + paths.iter().sorted_unstable().copied().zip(std::iter::repeat_with(value)), NoopAccountTrieCursor::default(), Default::default(), paths.clone(), @@ -2660,7 +2794,7 @@ mod tests { let mut sparse = RevealedSparseTrie::default().with_updates(true); for path in &paths { - sparse.update_leaf(path.clone(), value_encoded()).unwrap(); + sparse.update_leaf(*path, value_encoded()).unwrap(); } let sparse_root = sparse.root(); let sparse_updates = sparse.take_updates(); @@ -2691,7 +2825,7 @@ mod tests { let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes, _, _) = run_hash_builder( - paths.iter().cloned().zip(std::iter::repeat_with(|| old_value)), + paths.iter().copied().zip(std::iter::repeat_with(|| old_value)), NoopAccountTrieCursor::default(), Default::default(), paths.clone(), @@ -2699,7 +2833,7 @@ mod tests { let mut sparse = RevealedSparseTrie::default().with_updates(true); for path in &paths { - sparse.update_leaf(path.clone(), old_value_encoded.clone()).unwrap(); + sparse.update_leaf(*path, old_value_encoded.clone()).unwrap(); } let sparse_root = sparse.root(); let sparse_updates = sparse.updates_ref(); @@ -2710,14 +2844,14 @@ mod tests { let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes, _, _) = run_hash_builder( - paths.iter().cloned().zip(std::iter::repeat_with(|| new_value)), + paths.iter().copied().zip(std::iter::repeat_with(|| new_value)), NoopAccountTrieCursor::default(), Default::default(), paths.clone(), ); for path in &paths { - sparse.update_leaf(path.clone(), new_value_encoded.clone()).unwrap(); + sparse.update_leaf(*path, new_value_encoded.clone()).unwrap(); } let sparse_root = sparse.root(); let sparse_updates = sparse.take_updates(); @@ -3092,7 +3226,7 @@ mod tests { state.clone(), trie_cursor.account_trie_cursor().unwrap(), Default::default(), - state.keys().cloned().collect::>(), + state.keys().copied().collect::>(), ); // Write trie updates to the database @@ -3134,7 +3268,7 @@ mod tests { .iter() .map(|nibbles| B256::from_slice(&nibbles.pack())) .collect(), - state.keys().cloned().collect::>(), + state.keys().copied().collect::>(), ); // Write trie updates to the database @@ -3157,20 +3291,19 @@ mod tests { fn transform_updates( updates: Vec>, - mut rng: impl rand_08::Rng, + mut rng: impl rand::Rng, ) -> Vec<(BTreeMap, BTreeSet)> { let mut keys = BTreeSet::new(); updates .into_iter() .map(|update| { - keys.extend(update.keys().cloned()); + keys.extend(update.keys().copied()); let keys_to_delete_len = update.len() / 2; let keys_to_delete = (0..keys_to_delete_len) .map(|_| { - let key = rand_08::seq::IteratorRandom::choose(keys.iter(), &mut rng) - .unwrap() - .clone(); + let key = + *rand::seq::IteratorRandom::choose(keys.iter(), &mut rng).unwrap(); keys.take(&key).unwrap() }) .collect(); @@ -3715,35 +3848,35 @@ mod tests { let normal_printed = format!("{sparse}"); let expected = "\ -Root -> Extension { key: Nibbles(0x05), hash: None, store_in_db_trie: None } +Root -> Extension { key: Nibbles(0x5), hash: None, store_in_db_trie: None } 5 -> Branch { state_mask: TrieMask(0000000000001101), hash: None, store_in_db_trie: None } -50 -> Extension { key: Nibbles(0x0203), hash: None, store_in_db_trie: None } +50 -> Extension { key: Nibbles(0x23), hash: None, store_in_db_trie: None } 5023 -> Branch { state_mask: TrieMask(0000000000001010), hash: None, store_in_db_trie: None } 50231 -> Leaf { key: Nibbles(0x), hash: None } 50233 -> Leaf { key: Nibbles(0x), hash: None } -52013 -> Leaf { key: Nibbles(0x000103), hash: None } +52013 -> Leaf { key: Nibbles(0x013), hash: None } 53 -> Branch { state_mask: TrieMask(0000000000001010), hash: None, store_in_db_trie: None } -53102 -> Leaf { key: Nibbles(0x0002), hash: None } +53102 -> Leaf { key: Nibbles(0x02), hash: None } 533 -> Branch { state_mask: TrieMask(0000000000000101), hash: None, store_in_db_trie: None } -53302 -> Leaf { key: Nibbles(0x02), hash: None } -53320 -> Leaf { key: Nibbles(0x00), hash: None } +53302 -> Leaf { key: Nibbles(0x2), hash: None } +53320 -> Leaf { key: Nibbles(0x0), hash: None } "; assert_eq!(normal_printed, expected); let alternate_printed = format!("{sparse:#}"); let expected = "\ -Root -> Extension { key: Nibbles(0x05), hash: None, store_in_db_trie: None } +Root -> Extension { key: Nibbles(0x5), hash: None, store_in_db_trie: None } 5 -> Branch { state_mask: TrieMask(0000000000001101), hash: None, store_in_db_trie: None } - 50 -> Extension { key: Nibbles(0x0203), hash: None, store_in_db_trie: None } + 50 -> Extension { key: Nibbles(0x23), hash: None, store_in_db_trie: None } 5023 -> Branch { state_mask: TrieMask(0000000000001010), hash: None, store_in_db_trie: None } 50231 -> Leaf { key: Nibbles(0x), hash: None } 50233 -> Leaf { key: Nibbles(0x), hash: None } - 52013 -> Leaf { key: Nibbles(0x000103), hash: None } + 52013 -> Leaf { key: Nibbles(0x013), hash: None } 53 -> Branch { state_mask: TrieMask(0000000000001010), hash: None, store_in_db_trie: None } - 53102 -> Leaf { key: Nibbles(0x0002), hash: None } + 53102 -> Leaf { key: Nibbles(0x02), hash: None } 533 -> Branch { state_mask: TrieMask(0000000000000101), hash: None, store_in_db_trie: None } - 53302 -> Leaf { key: Nibbles(0x02), hash: None } - 53320 -> Leaf { key: Nibbles(0x00), hash: None } + 53302 -> Leaf { key: Nibbles(0x2), hash: None } + 53320 -> Leaf { key: Nibbles(0x0), hash: None } "; assert_eq!(alternate_printed, expected); diff --git a/crates/trie/trie/src/node_iter.rs b/crates/trie/trie/src/node_iter.rs index 17895d1d38e..dfb140fdf98 100644 --- a/crates/trie/trie/src/node_iter.rs +++ b/crates/trie/trie/src/node_iter.rs @@ -208,7 +208,7 @@ where #[cfg(feature = "metrics")] self.metrics.inc_branch_nodes_returned(); return Ok(Some(TrieElement::Branch(TrieBranchNode::new( - key.clone(), + *key, self.walker.hash().unwrap(), self.walker.children_are_in_trie(), )))) @@ -275,7 +275,7 @@ where // of this, we need to check that the current walker key has a prefix of the key // that we seeked to. if can_skip_node && - self.walker.key().is_some_and(|key| key.has_prefix(&seek_prefix)) && + self.walker.key().is_some_and(|key| key.starts_with(&seek_prefix)) && self.walker.children_are_in_trie() { trace!( @@ -500,7 +500,7 @@ mod tests { visited_key: Some(branch_node_0.0) }, KeyVisit { - visit_type: KeyVisitType::SeekNonExact(branch_node_2.0.clone()), + visit_type: KeyVisitType::SeekNonExact(branch_node_2.0), visited_key: Some(branch_node_2.0) }, KeyVisit { diff --git a/crates/trie/trie/src/proof/mod.rs b/crates/trie/trie/src/proof/mod.rs index 64a8f4d3b93..266aac19a39 100644 --- a/crates/trie/trie/src/proof/mod.rs +++ b/crates/trie/trie/src/proof/mod.rs @@ -167,10 +167,7 @@ where let (branch_node_hash_masks, branch_node_tree_masks) = if self.collect_branch_node_masks { let updated_branch_nodes = hash_builder.updated_branch_nodes.unwrap_or_default(); ( - updated_branch_nodes - .iter() - .map(|(path, node)| (path.clone(), node.hash_mask)) - .collect(), + updated_branch_nodes.iter().map(|(path, node)| (*path, node.hash_mask)).collect(), updated_branch_nodes .into_iter() .map(|(path, node)| (path, node.tree_mask)) @@ -308,10 +305,7 @@ where let (branch_node_hash_masks, branch_node_tree_masks) = if self.collect_branch_node_masks { let updated_branch_nodes = hash_builder.updated_branch_nodes.unwrap_or_default(); ( - updated_branch_nodes - .iter() - .map(|(path, node)| (path.clone(), node.hash_mask)) - .collect(), + updated_branch_nodes.iter().map(|(path, node)| (*path, node.hash_mask)).collect(), updated_branch_nodes .into_iter() .map(|(path, node)| (path, node.tree_mask)) diff --git a/crates/trie/trie/src/trie_cursor/in_memory.rs b/crates/trie/trie/src/trie_cursor/in_memory.rs index 40f4447daa6..4925dc8a666 100644 --- a/crates/trie/trie/src/trie_cursor/in_memory.rs +++ b/crates/trie/trie/src/trie_cursor/in_memory.rs @@ -83,7 +83,7 @@ impl<'a, C: TrieCursor> InMemoryAccountTrieCursor<'a, C> { } // Reposition the cursor to the first greater or equal node that wasn't removed. - let mut db_entry = self.cursor.seek(key.clone())?; + let mut db_entry = self.cursor.seek(key)?; while db_entry.as_ref().is_some_and(|entry| self.removed_nodes.contains(&entry.0)) { db_entry = self.cursor.next()?; } @@ -101,7 +101,7 @@ impl<'a, C: TrieCursor> InMemoryAccountTrieCursor<'a, C> { let in_memory = self.in_memory_cursor.first_after(&last); // Reposition the cursor to the first greater or equal node that wasn't removed. - let mut db_entry = self.cursor.seek(last.clone())?; + let mut db_entry = self.cursor.seek(last)?; while db_entry .as_ref() .is_some_and(|entry| entry.0 < last || self.removed_nodes.contains(&entry.0)) @@ -120,7 +120,7 @@ impl TrieCursor for InMemoryAccountTrieCursor<'_, C> { key: Nibbles, ) -> Result, DatabaseError> { let entry = self.seek_inner(key, true)?; - self.last_key = entry.as_ref().map(|(nibbles, _)| nibbles.clone()); + self.last_key = entry.as_ref().map(|(nibbles, _)| *nibbles); Ok(entry) } @@ -129,15 +129,15 @@ impl TrieCursor for InMemoryAccountTrieCursor<'_, C> { key: Nibbles, ) -> Result, DatabaseError> { let entry = self.seek_inner(key, false)?; - self.last_key = entry.as_ref().map(|(nibbles, _)| nibbles.clone()); + self.last_key = entry.as_ref().map(|(nibbles, _)| *nibbles); Ok(entry) } fn next(&mut self) -> Result, DatabaseError> { let next = match &self.last_key { Some(last) => { - let entry = self.next_inner(last.clone())?; - self.last_key = entry.as_ref().map(|entry| entry.0.clone()); + let entry = self.next_inner(*last)?; + self.last_key = entry.as_ref().map(|entry| entry.0); entry } // no previous entry was found @@ -148,7 +148,7 @@ impl TrieCursor for InMemoryAccountTrieCursor<'_, C> { fn current(&mut self) -> Result, DatabaseError> { match &self.last_key { - Some(key) => Ok(Some(key.clone())), + Some(key) => Ok(Some(*key)), None => self.cursor.current(), } } @@ -207,7 +207,7 @@ impl InMemoryStorageTrieCursor<'_, C> { } // Reposition the cursor to the first greater or equal node that wasn't removed. - let mut db_entry = self.cursor.seek(key.clone())?; + let mut db_entry = self.cursor.seek(key)?; while db_entry .as_ref() .is_some_and(|entry| self.removed_nodes.as_ref().is_some_and(|r| r.contains(&entry.0))) @@ -231,7 +231,7 @@ impl InMemoryStorageTrieCursor<'_, C> { } // Reposition the cursor to the first greater or equal node that wasn't removed. - let mut db_entry = self.cursor.seek(last.clone())?; + let mut db_entry = self.cursor.seek(last)?; while db_entry.as_ref().is_some_and(|entry| { entry.0 < last || self.removed_nodes.as_ref().is_some_and(|r| r.contains(&entry.0)) }) { @@ -249,7 +249,7 @@ impl TrieCursor for InMemoryStorageTrieCursor<'_, C> { key: Nibbles, ) -> Result, DatabaseError> { let entry = self.seek_inner(key, true)?; - self.last_key = entry.as_ref().map(|(nibbles, _)| nibbles.clone()); + self.last_key = entry.as_ref().map(|(nibbles, _)| *nibbles); Ok(entry) } @@ -258,15 +258,15 @@ impl TrieCursor for InMemoryStorageTrieCursor<'_, C> { key: Nibbles, ) -> Result, DatabaseError> { let entry = self.seek_inner(key, false)?; - self.last_key = entry.as_ref().map(|(nibbles, _)| nibbles.clone()); + self.last_key = entry.as_ref().map(|(nibbles, _)| *nibbles); Ok(entry) } fn next(&mut self) -> Result, DatabaseError> { let next = match &self.last_key { Some(last) => { - let entry = self.next_inner(last.clone())?; - self.last_key = entry.as_ref().map(|entry| entry.0.clone()); + let entry = self.next_inner(*last)?; + self.last_key = entry.as_ref().map(|entry| entry.0); entry } // no previous entry was found @@ -277,7 +277,7 @@ impl TrieCursor for InMemoryStorageTrieCursor<'_, C> { fn current(&mut self) -> Result, DatabaseError> { match &self.last_key { - Some(key) => Ok(Some(key.clone())), + Some(key) => Ok(Some(*key)), None => self.cursor.current(), } } diff --git a/crates/trie/trie/src/trie_cursor/mock.rs b/crates/trie/trie/src/trie_cursor/mock.rs index 4c7d20defb0..feda1c72a85 100644 --- a/crates/trie/trie/src/trie_cursor/mock.rs +++ b/crates/trie/trie/src/trie_cursor/mock.rs @@ -107,13 +107,13 @@ impl TrieCursor for MockTrieCursor { &mut self, key: Nibbles, ) -> Result, DatabaseError> { - let entry = self.trie_nodes.get(&key).cloned().map(|value| (key.clone(), value)); + let entry = self.trie_nodes.get(&key).cloned().map(|value| (key, value)); if let Some((key, _)) = &entry { - self.current_key = Some(key.clone()); + self.current_key = Some(*key); } self.visited_keys.lock().push(KeyVisit { visit_type: KeyVisitType::SeekExact(key), - visited_key: entry.as_ref().map(|(k, _)| k.clone()), + visited_key: entry.as_ref().map(|(k, _)| *k), }); Ok(entry) } @@ -124,14 +124,13 @@ impl TrieCursor for MockTrieCursor { key: Nibbles, ) -> Result, DatabaseError> { // Find the first key that is greater than or equal to the given key. - let entry = - self.trie_nodes.iter().find_map(|(k, v)| (k >= &key).then(|| (k.clone(), v.clone()))); + let entry = self.trie_nodes.iter().find_map(|(k, v)| (k >= &key).then(|| (*k, v.clone()))); if let Some((key, _)) = &entry { - self.current_key = Some(key.clone()); + self.current_key = Some(*key); } self.visited_keys.lock().push(KeyVisit { visit_type: KeyVisitType::SeekNonExact(key), - visited_key: entry.as_ref().map(|(k, _)| k.clone()), + visited_key: entry.as_ref().map(|(k, _)| *k), }); Ok(entry) } @@ -144,19 +143,19 @@ impl TrieCursor for MockTrieCursor { iter.find(|(k, _)| self.current_key.as_ref().is_none_or(|current| k.starts_with(current))) .expect("current key should exist in trie nodes"); // Get the next key-value pair. - let entry = iter.next().map(|(k, v)| (k.clone(), v.clone())); + let entry = iter.next().map(|(k, v)| (*k, v.clone())); if let Some((key, _)) = &entry { - self.current_key = Some(key.clone()); + self.current_key = Some(*key); } self.visited_keys.lock().push(KeyVisit { visit_type: KeyVisitType::Next, - visited_key: entry.as_ref().map(|(k, _)| k.clone()), + visited_key: entry.as_ref().map(|(k, _)| *k), }); Ok(entry) } #[instrument(level = "trace", skip(self), ret)] fn current(&mut self) -> Result, DatabaseError> { - Ok(self.current_key.clone()) + Ok(self.current_key) } } diff --git a/crates/trie/trie/src/trie_cursor/subnode.rs b/crates/trie/trie/src/trie_cursor/subnode.rs index 8443934ee6f..82a5d5e670a 100644 --- a/crates/trie/trie/src/trie_cursor/subnode.rs +++ b/crates/trie/trie/src/trie_cursor/subnode.rs @@ -74,7 +74,7 @@ impl CursorSubNode { node: Option, position: SubNodePosition, ) -> Self { - let mut full_key = key.clone(); + let mut full_key = key; if let Some(nibble) = position.as_child() { full_key.push(nibble); } diff --git a/crates/trie/trie/src/walker.rs b/crates/trie/trie/src/walker.rs index b3c30a81ef2..5bbedb23535 100644 --- a/crates/trie/trie/src/walker.rs +++ b/crates/trie/trie/src/walker.rs @@ -139,9 +139,7 @@ impl TrieWalker { #[instrument(level = "trace", skip(self), ret)] pub fn next_unprocessed_key(&self) -> Option<(B256, Nibbles)> { self.key() - .and_then( - |key| if self.can_skip_current_node { key.increment() } else { Some(key.clone()) }, - ) + .and_then(|key| if self.can_skip_current_node { key.increment() } else { Some(*key) }) .map(|key| { let mut packed = key.pack(); packed.resize(32, 0); @@ -249,8 +247,8 @@ impl TrieWalker { /// Retrieves the current root node from the DB, seeking either the exact node or the next one. fn node(&mut self, exact: bool) -> Result, DatabaseError> { - let key = self.key().expect("key must exist").clone(); - let entry = if exact { self.cursor.seek_exact(key)? } else { self.cursor.seek(key)? }; + let key = self.key().expect("key must exist"); + let entry = if exact { self.cursor.seek_exact(*key)? } else { self.cursor.seek(*key)? }; #[cfg(feature = "metrics")] self.metrics.inc_branch_nodes_seeked(); @@ -274,7 +272,7 @@ impl TrieWalker { // We need to sync the stack with the trie structure when consuming a new node. This is // necessary for proper traversal and accurately representing the trie in the stack. if !key.is_empty() && !self.stack.is_empty() { - self.stack[0].set_nibble(key[0]); + self.stack[0].set_nibble(key.get_unchecked(0)); } // The current tree mask might have been set incorrectly. diff --git a/book/cli/help.rs b/docs/cli/help.rs similarity index 81% rename from book/cli/help.rs rename to docs/cli/help.rs index 963f53deb0a..e97d0bbfc46 100755 --- a/book/cli/help.rs +++ b/docs/cli/help.rs @@ -10,25 +10,28 @@ regex = "1" --- use clap::Parser; use regex::Regex; -use std::borrow::Cow; -use std::fs::{self, File}; -use std::io::{self, Write}; -use std::iter::once; -use std::path::{Path, PathBuf}; -use std::process::{Command, Stdio}; -use std::str; -use std::sync::LazyLock; -use std::{fmt, process}; - -const SECTION_START: &str = ""; -const SECTION_END: &str = ""; -const README: &str = r#"# CLI Reference - - +use std::{ + borrow::Cow, + fmt, + fs::{self, File}, + io::{self, Write}, + iter::once, + path::{Path, PathBuf}, + process, + process::{Command, Stdio}, + str, + sync::LazyLock, +}; + +const SECTION_START: &str = "{/* CLI_REFERENCE START */}"; +const SECTION_END: &str = "{/* CLI_REFERENCE END */"; +const README: &str = r#"import Summary from './SUMMARY.mdx'; + +# CLI Reference Automatically-generated CLI reference from `--help` output. -{{#include ./SUMMARY.md}} +

"#; const TRIM_LINE_END_MARKDOWN: bool = true; @@ -49,7 +52,7 @@ struct Args { #[arg(long, default_value_t = String::from("."))] root_dir: String, - /// Indentation for the root SUMMARY.md file + /// Indentation for the root SUMMARY.mdx file #[arg(long, default_value_t = 2)] root_indentation: usize, @@ -61,7 +64,7 @@ struct Args { #[arg(long)] readme: bool, - /// Whether to update the root SUMMARY.md file + /// Whether to update the root SUMMARY.mdx file #[arg(long)] root_summary: bool, @@ -76,11 +79,7 @@ struct Args { fn write_file(file_path: &Path, content: &str) -> io::Result<()> { let content = if TRIM_LINE_END_MARKDOWN { - content - .lines() - .map(|line| line.trim_end()) - .collect::>() - .join("\n") + content.lines().map(|line| line.trim_end()).collect::>().join("\n") } else { content.to_string() }; @@ -106,25 +105,13 @@ fn main() -> io::Result<()> { while let Some(cmd) = todo_iter.pop() { let (new_subcmds, stdout) = get_entry(&cmd)?; if args.verbose && !new_subcmds.is_empty() { - println!( - "Found subcommands for \"{}\": {:?}", - cmd.command_name(), - new_subcmds - ); + println!("Found subcommands for \"{}\": {:?}", cmd.command_name(), new_subcmds); } // Add new subcommands to todo_iter (so that they are processed in the correct order). for subcmd in new_subcmds.into_iter().rev() { - let new_subcmds: Vec<_> = cmd - .subcommands - .iter() - .cloned() - .chain(once(subcmd)) - .collect(); - - todo_iter.push(Cmd { - cmd: cmd.cmd, - subcommands: new_subcmds, - }); + let new_subcmds: Vec<_> = cmd.subcommands.iter().cloned().chain(once(subcmd)).collect(); + + todo_iter.push(Cmd { cmd: cmd.cmd, subcommands: new_subcmds }); } output.push((cmd, stdout)); } @@ -134,25 +121,25 @@ fn main() -> io::Result<()> { cmd_markdown(&out_dir, cmd, stdout)?; } - // Generate SUMMARY.md. + // Generate SUMMARY.mdx. let summary: String = output .iter() .map(|(cmd, _)| cmd_summary(None, cmd, 0)) .chain(once("\n".to_string())) .collect(); - write_file(&out_dir.clone().join("SUMMARY.md"), &summary)?; + write_file(&out_dir.clone().join("SUMMARY.mdx"), &summary)?; // Generate README.md. if args.readme { - let path = &out_dir.join("README.md"); + let path = &out_dir.join("README.mdx"); if args.verbose { - println!("Writing README.md to \"{}\"", path.to_string_lossy()); + println!("Writing README.mdx to \"{}\"", path.to_string_lossy()); } write_file(path, README)?; } - // Generate root SUMMARY.md. + // Generate root SUMMARY.mdx. if args.root_summary { let root_summary: String = output .iter() @@ -166,7 +153,8 @@ fn main() -> io::Result<()> { if args.verbose { println!("Updating root summary in \"{}\"", path.to_string_lossy()); } - update_root_summary(path, &root_summary)?; + // TODO: This is where we update the cli reference sidebar.ts + // update_root_summary(path, &root_summary)?; } Ok(()) @@ -213,8 +201,7 @@ fn parse_sub_commands(s: &str) -> Vec { .lines() .take_while(|line| !line.starts_with("Options:") && !line.starts_with("Arguments:")) .filter_map(|line| { - re.captures(line) - .and_then(|cap| cap.get(1).map(|m| m.as_str().to_string())) + re.captures(line).and_then(|cap| cap.get(1).map(|m| m.as_str().to_string())) }) .filter(|cmd| cmd != "help") .map(String::from) @@ -229,7 +216,7 @@ fn cmd_markdown(out_dir: &Path, cmd: &Cmd, stdout: &str) -> io::Result<()> { let out_path = out_dir.join(cmd.to_string().replace(" ", "/")); fs::create_dir_all(out_path.parent().unwrap())?; - write_file(&out_path.with_extension("md"), &out)?; + write_file(&out_path.with_extension("mdx"), &out)?; Ok(()) } @@ -265,12 +252,12 @@ fn cmd_summary(md_root: Option, cmd: &Cmd, indent: usize) -> String { Some(md_root) => format!("{}/{}", md_root.to_string_lossy(), cmd_path), }; let indent_string = " ".repeat(indent + (cmd.subcommands.len() * 2)); - format!("{}- [`{}`](./{}.md)\n", indent_string, cmd_s, full_cmd_path) + format!("{}- [`{}`](/cli/{})\n", indent_string, cmd_s, full_cmd_path) } -/// Replaces the CLI_REFERENCE section in the root SUMMARY.md file. +/// Replaces the CLI_REFERENCE section in the root SUMMARY.mdx file. fn update_root_summary(root_dir: &Path, root_summary: &str) -> io::Result<()> { - let summary_file = root_dir.join("SUMMARY.md"); + let summary_file = root_dir.join("SUMMARY.mdx"); let original_summary_content = fs::read_to_string(&summary_file)?; let section_re = regex!(&format!(r"(?s)\s*{SECTION_START}.*?{SECTION_END}")); @@ -293,9 +280,8 @@ fn update_root_summary(root_dir: &Path, root_summary: &str) -> io::Result<()> { let root_summary_s = root_summary.trim_end().replace("\n\n", "\n"); let replace_with = format!(" {}\n{}\n{}", SECTION_START, root_summary_s, last_line); - let new_root_summary = section_re - .replace(&original_summary_content, replace_with.as_str()) - .to_string(); + let new_root_summary = + section_re.replace(&original_summary_content, replace_with.as_str()).to_string(); let mut root_summary_file = File::create(&summary_file)?; root_summary_file.write_all(new_root_summary.as_bytes()) @@ -349,17 +335,11 @@ struct Cmd<'a> { impl<'a> Cmd<'a> { fn command_name(&self) -> &str { - self.cmd - .file_name() - .and_then(|os_str| os_str.to_str()) - .expect("Expect valid command") + self.cmd.file_name().and_then(|os_str| os_str.to_str()).expect("Expect valid command") } fn new(cmd: &'a PathBuf) -> Self { - Self { - cmd, - subcommands: Vec::new(), - } + Self { cmd, subcommands: Vec::new() } } } diff --git a/docs/cli/update.sh b/docs/cli/update.sh new file mode 100755 index 00000000000..b75dbd789af --- /dev/null +++ b/docs/cli/update.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env bash +set -eo pipefail + +DOCS_ROOT="$(dirname "$(dirname "$0")")" +RETH=${1:-"$(dirname "$DOCS_ROOT")/target/debug/reth"} +VOCS_PAGES_ROOT="$DOCS_ROOT/vocs/docs/pages" +echo "Generating CLI documentation for reth at $RETH" + +echo "Using docs root: $DOCS_ROOT" +echo "Using vocs pages root: $VOCS_PAGES_ROOT" +cmd=( + "$(dirname "$0")/help.rs" + --root-dir "$DOCS_ROOT/" + --root-indentation 2 + --root-summary + --verbose + --out-dir "$VOCS_PAGES_ROOT/cli/" + "$RETH" +) +echo "Running: $" "${cmd[*]}" +"${cmd[@]}" diff --git a/docs/crates/db.md b/docs/crates/db.md index 9ebcf10d67a..4790d8daf4e 100644 --- a/docs/crates/db.md +++ b/docs/crates/db.md @@ -67,7 +67,7 @@ There are many tables within the node, all used to store different types of data ## Database -Reth's database design revolves around it's main [Database trait](https://github.com/paradigmxyz/reth/blob/bf9cac7571f018fec581fe3647862dab527aeafb/crates/storage/db-api/src/database.rs#L8-L52), which implements the database's functionality across many types. Let's take a quick look at the `Database` trait and how it works. +Reth's database design revolves around its main [Database trait](https://github.com/paradigmxyz/reth/blob/bf9cac7571f018fec581fe3647862dab527aeafb/crates/storage/db-api/src/database.rs#L8-L52), which implements the database's functionality across many types. Let's take a quick look at the `Database` trait and how it works. [File: crates/storage/db-api/src/database.rs](https://github.com/paradigmxyz/reth/blob/bf9cac7571f018fec581fe3647862dab527aeafb/crates/storage/db-api/src/database.rs#L8-L52) diff --git a/docs/crates/eth-wire.md b/docs/crates/eth-wire.md index 7ab87e914b2..1b4ba2d80e3 100644 --- a/docs/crates/eth-wire.md +++ b/docs/crates/eth-wire.md @@ -1,6 +1,6 @@ # eth-wire -The `eth-wire` crate provides abstractions over the [``RLPx``](https://github.com/ethereum/devp2p/blob/master/rlpx.md) and +The `eth-wire` crate provides abstractions over the [`RLPx`](https://github.com/ethereum/devp2p/blob/master/rlpx.md) and [Eth wire](https://github.com/ethereum/devp2p/blob/master/caps/eth.md) protocols. This crate can be thought of as having 2 components: @@ -334,7 +334,7 @@ impl Sink for EthStream { } ``` ## Unauthed streams -For a session to be established, peers in the Ethereum network must first exchange a `Hello` message in the ``RLPx`` layer and then a +For a session to be established, peers in the Ethereum network must first exchange a `Hello` message in the `RLPx` layer and then a `Status` message in the eth-wire layer. To perform these, reth has special `Unauthed` versions of streams described above. diff --git a/docs/crates/network.md b/docs/crates/network.md index a35b0c9de90..15c9c2494f5 100644 --- a/docs/crates/network.md +++ b/docs/crates/network.md @@ -494,6 +494,7 @@ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { } IncomingEthRequest::GetNodeData { .. } => {} IncomingEthRequest::GetReceipts { .. } => {} + IncomingEthRequest::GetReceipts69 { .. } => {} }, } } diff --git a/docs/design/database.md b/docs/design/database.md index b45c783bc5f..d81aced6f0c 100644 --- a/docs/design/database.md +++ b/docs/design/database.md @@ -23,7 +23,7 @@ ### Table layout -Historical state changes are indexed by `BlockNumber`. This means that `reth` stores the state for every account after every block that touched it, and it provides indexes for accessing that data quickly. While this may make the database size bigger (needs benchmark once `reth` is closer to prod). +Historical state changes are indexed by `BlockNumber`. This means that `reth` stores the state for every account after every block that touched it, and it provides indexes for accessing that data quickly. While this may make the database size bigger (needs benchmark once `reth` is closer to prod), it provides fast access to historical state. Below, you can see the table design that implements this scheme: diff --git a/docs/design/goals.md b/docs/design/goals.md index 819d6ca6fa9..a29b3a824c4 100644 --- a/docs/design/goals.md +++ b/docs/design/goals.md @@ -34,7 +34,7 @@ Why? This is a win for everyone. RPC providers meet more impressive SLAs, MEV se The biggest bottleneck in this pipeline is not the execution of the EVM interpreter itself, but rather in accessing state and managing I/O. As such, we think the largest optimizations to be made are closest to the DB layer. -Ideally, we can achieve such fast runtime operation that we can avoid storing certain things (e.g.?) on the disk, and are able to generate them on the fly, instead - minimizing disk footprint. +Ideally, we can achieve such fast runtime operation that we can avoid storing certain things (e.g., transaction receipts) on the disk, and are able to generate them on the fly, instead - minimizing disk footprint. --- diff --git a/docs/design/review.md b/docs/design/review.md index 2a3c5c20867..702ab7722f8 100644 --- a/docs/design/review.md +++ b/docs/design/review.md @@ -24,9 +24,9 @@ This document contains some of our research in how other codebases designed vari ## Header Downloaders * Erigon Header Downloader: - * A header downloader algo was introduced in [`erigon#1016`](https://github.com/ledgerwatch/erigon/pull/1016) and finished in [`erigon#1145`](https://github.com/ledgerwatch/erigon/pull/1145). At a high level, the downloader concurrently requested headers by hash, then sorted, validated and fused the responses into chain segments. Smaller segments were fused into larger as the gaps between them were filled. The downloader is also used to maintain hardcoded hashes (later renamed to preverified) to bootstrap the sync. + * A header downloader algorithm was introduced in [`erigon#1016`](https://github.com/ledgerwatch/erigon/pull/1016) and finished in [`erigon#1145`](https://github.com/ledgerwatch/erigon/pull/1145). At a high level, the downloader concurrently requested headers by hash, then sorted, validated and fused the responses into chain segments. Smaller segments were fused into larger as the gaps between them were filled. The downloader is also used to maintain hardcoded hashes (later renamed to preverified) to bootstrap the sync. * The downloader was refactored multiple times: [`erigon#1471`](https://github.com/ledgerwatch/erigon/pull/1471), [`erigon#1559`](https://github.com/ledgerwatch/erigon/pull/1559) and [`erigon#2035`](https://github.com/ledgerwatch/erigon/pull/2035). - * With PoS transition in [`erigon#3075`](https://github.com/ledgerwatch/erigon/pull/3075) terminal td was introduced to the algo to stop forward syncing. For the downward sync (post merge), the download was now delegated to [`EthBackendServer`](https://github.com/ledgerwatch/erigon/blob/3c95db00788dc740849c2207d886fe4db5a8c473/ethdb/privateapi/ethbackend.go#L245) + * With PoS transition in [`erigon#3075`](https://github.com/ledgerwatch/erigon/pull/3075) terminal td was introduced to the algorithm to stop forward syncing. For the downward sync (post merge), the downloader was now delegated to [`EthBackendServer`](https://github.com/ledgerwatch/erigon/blob/3c95db00788dc740849c2207d886fe4db5a8c473/ethdb/privateapi/ethbackend.go#L245) * Proper reverse PoS downloader was introduced in [`erigon#3092`](https://github.com/ledgerwatch/erigon/pull/3092) which downloads the header batches from tip until local head is reached. Refactored later in [`erigon#3340`](https://github.com/ledgerwatch/erigon/pull/3340) and [`erigon#3717`](https://github.com/ledgerwatch/erigon/pull/3717). * Akula Headers & Stage Downloader: diff --git a/docs/repo/layout.md b/docs/repo/layout.md index 525405216e1..8626d264432 100644 --- a/docs/repo/layout.md +++ b/docs/repo/layout.md @@ -2,7 +2,7 @@ This repository contains several Rust crates that implement the different building blocks of an Ethereum node. The high-level structure of the repository is as follows: -Generally reth is composed of a few components, with supporting crates. The main components can be defined as: +Generally, reth is composed of a few components, with supporting crates. The main components can be defined as: - [Project Layout](#project-layout) - [Documentation](#documentation) @@ -135,7 +135,7 @@ The IPC transport lives in [`rpc/ipc`](../../crates/rpc/ipc). #### Utilities Crates -- [`rpc/rpc-types-compat`](../../crates/rpc/rpc-types-compat): This crate various helper functions to convert between reth primitive types and rpc types. +- [`rpc/rpc-convert`](../../crates/rpc/rpc-convert): This crate provides various helper functions to convert between reth primitive types and rpc types. - [`rpc/layer`](../../crates/rpc/rpc-layer/): Some RPC middleware layers (e.g. `AuthValidator`, `JwtAuthValidator`) - [`rpc/rpc-testing-util`](../../crates/rpc/rpc-testing-util/): Reth RPC testing helpers diff --git a/docs/vocs/.claude/settings.local.json b/docs/vocs/.claude/settings.local.json new file mode 100644 index 00000000000..c2dc67502f5 --- /dev/null +++ b/docs/vocs/.claude/settings.local.json @@ -0,0 +1,8 @@ +{ + "permissions": { + "allow": [ + "Bash(git checkout:*)" + ], + "deny": [] + } +} \ No newline at end of file diff --git a/docs/vocs/CLAUDE.md b/docs/vocs/CLAUDE.md new file mode 100644 index 00000000000..98b57a5791f --- /dev/null +++ b/docs/vocs/CLAUDE.md @@ -0,0 +1,103 @@ +# CLAUDE.md + +This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository. + +## Project Overview + +This is the **Reth documentation website** built with [Vocs](https://vocs.dev), a modern documentation framework. The site contains comprehensive documentation for Reth, the Ethereum execution client, including installation guides, CLI references, SDK documentation, and tutorials. + +## Repository Structure + +- **`docs/pages/`**: All documentation content in MDX format + - `cli/`: Command-line interface documentation and references + - `exex/`: Execution Extensions (ExEx) guides and examples + - `installation/`: Installation and setup guides + - `introduction/`: Introduction, benchmarks, and why-reth content + - `jsonrpc/`: JSON-RPC API documentation + - `run/`: Node running guides and configuration + - `sdk/`: SDK documentation and examples +- **`docs/snippets/`**: Code examples and snippets used in documentation +- **`sidebar.ts`**: Navigation configuration +- **`vocs.config.ts`**: Vocs configuration file + +## Essential Commands + +```bash +# Install dependencies +bun install + +# Start development server +bun run dev + +# Build for production +bun run build + +# Preview production build +bun run preview +``` + +## Development Workflow + +### Content Organization + +1. **MDX Files**: All content is written in MDX (Markdown + React components) +2. **Navigation**: Update `sidebar.ts` when adding new pages +3. **Code Examples**: Place reusable code snippets in `docs/snippets/` +4. **Assets**: Place images and static assets in `docs/public/` + +### Adding New Documentation + +1. Create new `.mdx` files in appropriate subdirectories under `docs/pages/` +2. Update `sidebar.ts` to include new pages in navigation +3. Use consistent heading structure and markdown formatting +4. Reference code examples from `docs/snippets/` when possible + +### Code Examples and Snippets + +- **Live Examples**: Use the snippets system to include actual runnable code +- **Rust Code**: Include cargo project examples in `docs/snippets/sources/` +- **CLI Examples**: Show actual command usage with expected outputs + +### Configuration + +- **Base Path**: Site deploys to `/reth` path (configured in `vocs.config.ts`) +- **Theme**: Custom accent colors for light/dark themes +- **Vite**: Uses Vite as the underlying build tool + +### Content Guidelines + +1. **Be Practical**: Focus on actionable guides and real-world examples +2. **Code First**: Show working code examples before explaining concepts +3. **Consistent Structure**: Follow existing page structures for consistency +4. **Cross-References**: Link between related pages and sections +5. **Keep Current**: Ensure documentation matches latest Reth features + +### File Naming Conventions + +- Use kebab-case for file and directory names +- Match URL structure to file structure +- Use descriptive names that reflect content purpose + +### Common Tasks + +**Adding a new CLI command documentation:** +1. Create `.mdx` file in `docs/pages/cli/reth/` +2. Add to sidebar navigation +3. Include usage examples and parameter descriptions + +**Adding a new guide:** +1. Create `.mdx` file in appropriate category +2. Update sidebar with new entry +3. Include practical examples and next steps + +**Updating code examples:** +1. Modify files in `docs/snippets/sources/` +2. Ensure examples compile and run correctly +3. Test that documentation references work properly + +## Development Notes + +- This is a TypeScript/React project using Vocs framework +- Content is primarily MDX with some TypeScript configuration +- Focus on clear, practical documentation that helps users succeed with Reth +- Maintain consistency with existing documentation style and structure \ No newline at end of file diff --git a/docs/vocs/README.md b/docs/vocs/README.md new file mode 100644 index 00000000000..3bb11a44a0a --- /dev/null +++ b/docs/vocs/README.md @@ -0,0 +1 @@ +This is a [Vocs](https://vocs.dev) project bootstrapped with the Vocs CLI. diff --git a/docs/vocs/bun.lockb b/docs/vocs/bun.lockb new file mode 100755 index 00000000000..a975dd0d492 Binary files /dev/null and b/docs/vocs/bun.lockb differ diff --git a/docs/vocs/docs/components/SdkShowcase.tsx b/docs/vocs/docs/components/SdkShowcase.tsx new file mode 100644 index 00000000000..5f878206a84 --- /dev/null +++ b/docs/vocs/docs/components/SdkShowcase.tsx @@ -0,0 +1,88 @@ +import React from 'react' + +interface SdkProject { + name: string + description: string + loc: string + githubUrl: string + logoUrl?: string + company: string +} + +const projects: SdkProject[] = [ + { + name: 'Base Node', + description: "Coinbase's L2 scaling solution node implementation", + loc: '~3K', + githubUrl: 'https://github.com/base/node-reth', + company: 'Coinbase' + }, + { + name: 'Bera Reth', + description: "Berachain's high-performance EVM node with custom features", + loc: '~1K', + githubUrl: 'https://github.com/berachain/bera-reth', + company: 'Berachain' + }, + { + name: 'Reth Gnosis', + description: "Gnosis Chain's xDai-compatible execution client", + loc: '~5K', + githubUrl: 'https://github.com/gnosischain/reth_gnosis', + company: 'Gnosis' + }, + { + name: 'Reth BSC', + description: "BNB Smart Chain execution client implementation", + loc: '~6K', + githubUrl: 'https://github.com/loocapro/reth-bsc', + company: 'Binance Smart Chain' + } +] + +export function SdkShowcase() { + return ( +
+ {projects.map((project, index) => ( +
+ {/* LoC Badge */} +
+ {project.loc} LoC +
+ + {/* Content */} +
+
+

+ {project.name} +

+

+ {project.company} +

+
+ +

+ {project.description} +

+ + {/* GitHub Link */} + + + + + View on GitHub + +
+
+ ))} +
+ ) +} \ No newline at end of file diff --git a/docs/vocs/docs/components/TrustedBy.tsx b/docs/vocs/docs/components/TrustedBy.tsx new file mode 100644 index 00000000000..ef50527f8ea --- /dev/null +++ b/docs/vocs/docs/components/TrustedBy.tsx @@ -0,0 +1,49 @@ +import React from 'react' + +interface TrustedCompany { + name: string + logoUrl: string +} + +const companies: TrustedCompany[] = [ + { + name: 'Flashbots', + logoUrl: '/flashbots.png' + }, + { + name: 'Coinbase', + logoUrl: '/coinbase.png' + }, + { + name: 'Alchemy', + logoUrl: '/alchemy.png' + }, + { + name: 'Succinct Labs', + logoUrl: '/succinct.png' + } +] + +export function TrustedBy() { + return ( +
+ {companies.map((company, index) => ( +
+ {/* Company Logo */} +
+ {`${company.name} +
+
+ ))} +
+ ) +} \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/SUMMARY.mdx b/docs/vocs/docs/pages/cli/SUMMARY.mdx new file mode 100644 index 00000000000..330f32b3fd2 --- /dev/null +++ b/docs/vocs/docs/pages/cli/SUMMARY.mdx @@ -0,0 +1,47 @@ +- [`reth`](/cli/reth) + - [`reth node`](/cli/reth/node) + - [`reth init`](/cli/reth/init) + - [`reth init-state`](/cli/reth/init-state) + - [`reth import`](/cli/reth/import) + - [`reth import-era`](/cli/reth/import-era) + - [`reth dump-genesis`](/cli/reth/dump-genesis) + - [`reth db`](/cli/reth/db) + - [`reth db stats`](/cli/reth/db/stats) + - [`reth db list`](/cli/reth/db/list) + - [`reth db checksum`](/cli/reth/db/checksum) + - [`reth db diff`](/cli/reth/db/diff) + - [`reth db get`](/cli/reth/db/get) + - [`reth db get mdbx`](/cli/reth/db/get/mdbx) + - [`reth db get static-file`](/cli/reth/db/get/static-file) + - [`reth db drop`](/cli/reth/db/drop) + - [`reth db clear`](/cli/reth/db/clear) + - [`reth db clear mdbx`](/cli/reth/db/clear/mdbx) + - [`reth db clear static-file`](/cli/reth/db/clear/static-file) + - [`reth db version`](/cli/reth/db/version) + - [`reth db path`](/cli/reth/db/path) + - [`reth download`](/cli/reth/download) + - [`reth stage`](/cli/reth/stage) + - [`reth stage run`](/cli/reth/stage/run) + - [`reth stage drop`](/cli/reth/stage/drop) + - [`reth stage dump`](/cli/reth/stage/dump) + - [`reth stage dump execution`](/cli/reth/stage/dump/execution) + - [`reth stage dump storage-hashing`](/cli/reth/stage/dump/storage-hashing) + - [`reth stage dump account-hashing`](/cli/reth/stage/dump/account-hashing) + - [`reth stage dump merkle`](/cli/reth/stage/dump/merkle) + - [`reth stage unwind`](/cli/reth/stage/unwind) + - [`reth stage unwind to-block`](/cli/reth/stage/unwind/to-block) + - [`reth stage unwind num-blocks`](/cli/reth/stage/unwind/num-blocks) + - [`reth p2p`](/cli/reth/p2p) + - [`reth p2p header`](/cli/reth/p2p/header) + - [`reth p2p body`](/cli/reth/p2p/body) + - [`reth p2p rlpx`](/cli/reth/p2p/rlpx) + - [`reth p2p rlpx ping`](/cli/reth/p2p/rlpx/ping) + - [`reth config`](/cli/reth/config) + - [`reth debug`](/cli/reth/debug) + - [`reth debug execution`](/cli/reth/debug/execution) + - [`reth debug merkle`](/cli/reth/debug/merkle) + - [`reth debug in-memory-merkle`](/cli/reth/debug/in-memory-merkle) + - [`reth debug build-block`](/cli/reth/debug/build-block) + - [`reth recover`](/cli/reth/recover) + - [`reth recover storage-tries`](/cli/reth/recover/storage-tries) + - [`reth prune`](/cli/reth/prune) diff --git a/book/cli/cli.md b/docs/vocs/docs/pages/cli/cli.mdx similarity index 83% rename from book/cli/cli.md rename to docs/vocs/docs/pages/cli/cli.mdx index ef1a98af525..20046ce9e77 100644 --- a/book/cli/cli.md +++ b/docs/vocs/docs/pages/cli/cli.mdx @@ -1,7 +1,9 @@ +import Summary from './SUMMARY.mdx'; + # CLI Reference The Reth node is operated via the CLI by running the `reth node` command. To stop it, press `ctrl-c`. You may need to wait a bit as Reth tears down existing p2p connections or other cleanup tasks. However, Reth has more commands: -{{#include ./SUMMARY.md}} + diff --git a/book/cli/op-reth.md b/docs/vocs/docs/pages/cli/op-reth.md similarity index 100% rename from book/cli/op-reth.md rename to docs/vocs/docs/pages/cli/op-reth.md diff --git a/book/cli/reth.md b/docs/vocs/docs/pages/cli/reth.mdx similarity index 100% rename from book/cli/reth.md rename to docs/vocs/docs/pages/cli/reth.mdx diff --git a/book/cli/reth/config.md b/docs/vocs/docs/pages/cli/reth/config.mdx similarity index 100% rename from book/cli/reth/config.md rename to docs/vocs/docs/pages/cli/reth/config.mdx diff --git a/book/cli/reth/db.md b/docs/vocs/docs/pages/cli/reth/db.mdx similarity index 100% rename from book/cli/reth/db.md rename to docs/vocs/docs/pages/cli/reth/db.mdx diff --git a/book/cli/reth/db/checksum.md b/docs/vocs/docs/pages/cli/reth/db/checksum.mdx similarity index 100% rename from book/cli/reth/db/checksum.md rename to docs/vocs/docs/pages/cli/reth/db/checksum.mdx diff --git a/book/cli/reth/db/clear.md b/docs/vocs/docs/pages/cli/reth/db/clear.mdx similarity index 100% rename from book/cli/reth/db/clear.md rename to docs/vocs/docs/pages/cli/reth/db/clear.mdx diff --git a/book/cli/reth/db/clear/mdbx.md b/docs/vocs/docs/pages/cli/reth/db/clear/mdbx.mdx similarity index 100% rename from book/cli/reth/db/clear/mdbx.md rename to docs/vocs/docs/pages/cli/reth/db/clear/mdbx.mdx diff --git a/book/cli/reth/db/clear/static-file.md b/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx similarity index 100% rename from book/cli/reth/db/clear/static-file.md rename to docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx diff --git a/book/cli/reth/db/diff.md b/docs/vocs/docs/pages/cli/reth/db/diff.mdx similarity index 100% rename from book/cli/reth/db/diff.md rename to docs/vocs/docs/pages/cli/reth/db/diff.mdx diff --git a/book/cli/reth/db/drop.md b/docs/vocs/docs/pages/cli/reth/db/drop.mdx similarity index 100% rename from book/cli/reth/db/drop.md rename to docs/vocs/docs/pages/cli/reth/db/drop.mdx diff --git a/book/cli/reth/db/get.md b/docs/vocs/docs/pages/cli/reth/db/get.mdx similarity index 100% rename from book/cli/reth/db/get.md rename to docs/vocs/docs/pages/cli/reth/db/get.mdx diff --git a/book/cli/reth/db/get/mdbx.md b/docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx similarity index 100% rename from book/cli/reth/db/get/mdbx.md rename to docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx diff --git a/book/cli/reth/db/get/static-file.md b/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx similarity index 100% rename from book/cli/reth/db/get/static-file.md rename to docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx diff --git a/book/cli/reth/db/list.md b/docs/vocs/docs/pages/cli/reth/db/list.mdx similarity index 100% rename from book/cli/reth/db/list.md rename to docs/vocs/docs/pages/cli/reth/db/list.mdx diff --git a/book/cli/reth/db/path.md b/docs/vocs/docs/pages/cli/reth/db/path.mdx similarity index 100% rename from book/cli/reth/db/path.md rename to docs/vocs/docs/pages/cli/reth/db/path.mdx diff --git a/book/cli/reth/db/stats.md b/docs/vocs/docs/pages/cli/reth/db/stats.mdx similarity index 100% rename from book/cli/reth/db/stats.md rename to docs/vocs/docs/pages/cli/reth/db/stats.mdx diff --git a/book/cli/reth/db/version.md b/docs/vocs/docs/pages/cli/reth/db/version.mdx similarity index 100% rename from book/cli/reth/db/version.md rename to docs/vocs/docs/pages/cli/reth/db/version.mdx diff --git a/book/cli/reth/debug.md b/docs/vocs/docs/pages/cli/reth/debug.mdx similarity index 100% rename from book/cli/reth/debug.md rename to docs/vocs/docs/pages/cli/reth/debug.mdx diff --git a/book/cli/reth/debug/build-block.md b/docs/vocs/docs/pages/cli/reth/debug/build-block.mdx similarity index 100% rename from book/cli/reth/debug/build-block.md rename to docs/vocs/docs/pages/cli/reth/debug/build-block.mdx diff --git a/book/cli/reth/debug/execution.md b/docs/vocs/docs/pages/cli/reth/debug/execution.mdx similarity index 100% rename from book/cli/reth/debug/execution.md rename to docs/vocs/docs/pages/cli/reth/debug/execution.mdx diff --git a/book/cli/reth/debug/in-memory-merkle.md b/docs/vocs/docs/pages/cli/reth/debug/in-memory-merkle.mdx similarity index 100% rename from book/cli/reth/debug/in-memory-merkle.md rename to docs/vocs/docs/pages/cli/reth/debug/in-memory-merkle.mdx diff --git a/book/cli/reth/debug/merkle.md b/docs/vocs/docs/pages/cli/reth/debug/merkle.mdx similarity index 100% rename from book/cli/reth/debug/merkle.md rename to docs/vocs/docs/pages/cli/reth/debug/merkle.mdx diff --git a/book/cli/reth/download.md b/docs/vocs/docs/pages/cli/reth/download.mdx similarity index 100% rename from book/cli/reth/download.md rename to docs/vocs/docs/pages/cli/reth/download.mdx diff --git a/book/cli/reth/dump-genesis.md b/docs/vocs/docs/pages/cli/reth/dump-genesis.mdx similarity index 100% rename from book/cli/reth/dump-genesis.md rename to docs/vocs/docs/pages/cli/reth/dump-genesis.mdx diff --git a/book/cli/reth/import-era.md b/docs/vocs/docs/pages/cli/reth/import-era.mdx similarity index 100% rename from book/cli/reth/import-era.md rename to docs/vocs/docs/pages/cli/reth/import-era.mdx diff --git a/book/cli/reth/import.md b/docs/vocs/docs/pages/cli/reth/import.mdx similarity index 100% rename from book/cli/reth/import.md rename to docs/vocs/docs/pages/cli/reth/import.mdx diff --git a/book/cli/reth/init-state.md b/docs/vocs/docs/pages/cli/reth/init-state.mdx similarity index 100% rename from book/cli/reth/init-state.md rename to docs/vocs/docs/pages/cli/reth/init-state.mdx diff --git a/book/cli/reth/init.md b/docs/vocs/docs/pages/cli/reth/init.mdx similarity index 100% rename from book/cli/reth/init.md rename to docs/vocs/docs/pages/cli/reth/init.mdx diff --git a/book/cli/reth/node.md b/docs/vocs/docs/pages/cli/reth/node.mdx similarity index 97% rename from book/cli/reth/node.md rename to docs/vocs/docs/pages/cli/reth/node.mdx index f97980b34c6..c0679868ea3 100644 --- a/book/cli/reth/node.md +++ b/docs/vocs/docs/pages/cli/reth/node.mdx @@ -619,6 +619,11 @@ Debug: --debug.healthy-node-rpc-url The RPC URL of a healthy node to use for comparing invalid block hook results against. + Debug setting that enables execution witness comparison for troubleshooting bad blocks. + When enabled, the node will collect execution witnesses from the specified source and + compare them against local execution when a bad block is encountered, helping identify + discrepancies in state execution. + Database: --db.log-level Database logging level. Levels higher than "notice" require a debug build @@ -721,6 +726,15 @@ Pruning: --prune.storagehistory.before Prune storage history before the specified block number. The specified block number is not pruned + --prune.bodies.pre-merge + Prune bodies before the merge block + + --prune.bodies.distance + Prune bodies before the `head-N` block number. In other words, keep last N + 1 blocks + + --prune.bodies.before + Prune storage history before the specified block number. The specified block number is not pruned + Engine: --engine.persistence-threshold Configure persistence threshold for engine experimental diff --git a/book/cli/reth/p2p.md b/docs/vocs/docs/pages/cli/reth/p2p.mdx similarity index 100% rename from book/cli/reth/p2p.md rename to docs/vocs/docs/pages/cli/reth/p2p.mdx diff --git a/book/cli/reth/p2p/body.md b/docs/vocs/docs/pages/cli/reth/p2p/body.mdx similarity index 100% rename from book/cli/reth/p2p/body.md rename to docs/vocs/docs/pages/cli/reth/p2p/body.mdx diff --git a/book/cli/reth/p2p/header.md b/docs/vocs/docs/pages/cli/reth/p2p/header.mdx similarity index 100% rename from book/cli/reth/p2p/header.md rename to docs/vocs/docs/pages/cli/reth/p2p/header.mdx diff --git a/book/cli/reth/p2p/rlpx.md b/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx similarity index 100% rename from book/cli/reth/p2p/rlpx.md rename to docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx diff --git a/book/cli/reth/p2p/rlpx/ping.md b/docs/vocs/docs/pages/cli/reth/p2p/rlpx/ping.mdx similarity index 100% rename from book/cli/reth/p2p/rlpx/ping.md rename to docs/vocs/docs/pages/cli/reth/p2p/rlpx/ping.mdx diff --git a/book/cli/reth/prune.md b/docs/vocs/docs/pages/cli/reth/prune.mdx similarity index 100% rename from book/cli/reth/prune.md rename to docs/vocs/docs/pages/cli/reth/prune.mdx diff --git a/book/cli/reth/recover.md b/docs/vocs/docs/pages/cli/reth/recover.mdx similarity index 100% rename from book/cli/reth/recover.md rename to docs/vocs/docs/pages/cli/reth/recover.mdx diff --git a/book/cli/reth/recover/storage-tries.md b/docs/vocs/docs/pages/cli/reth/recover/storage-tries.mdx similarity index 100% rename from book/cli/reth/recover/storage-tries.md rename to docs/vocs/docs/pages/cli/reth/recover/storage-tries.mdx diff --git a/book/cli/reth/stage.md b/docs/vocs/docs/pages/cli/reth/stage.mdx similarity index 100% rename from book/cli/reth/stage.md rename to docs/vocs/docs/pages/cli/reth/stage.mdx diff --git a/book/cli/reth/stage/drop.md b/docs/vocs/docs/pages/cli/reth/stage/drop.mdx similarity index 100% rename from book/cli/reth/stage/drop.md rename to docs/vocs/docs/pages/cli/reth/stage/drop.mdx diff --git a/book/cli/reth/stage/dump.md b/docs/vocs/docs/pages/cli/reth/stage/dump.mdx similarity index 100% rename from book/cli/reth/stage/dump.md rename to docs/vocs/docs/pages/cli/reth/stage/dump.mdx diff --git a/book/cli/reth/stage/dump/account-hashing.md b/docs/vocs/docs/pages/cli/reth/stage/dump/account-hashing.mdx similarity index 100% rename from book/cli/reth/stage/dump/account-hashing.md rename to docs/vocs/docs/pages/cli/reth/stage/dump/account-hashing.mdx diff --git a/book/cli/reth/stage/dump/execution.md b/docs/vocs/docs/pages/cli/reth/stage/dump/execution.mdx similarity index 100% rename from book/cli/reth/stage/dump/execution.md rename to docs/vocs/docs/pages/cli/reth/stage/dump/execution.mdx diff --git a/book/cli/reth/stage/dump/merkle.md b/docs/vocs/docs/pages/cli/reth/stage/dump/merkle.mdx similarity index 100% rename from book/cli/reth/stage/dump/merkle.md rename to docs/vocs/docs/pages/cli/reth/stage/dump/merkle.mdx diff --git a/book/cli/reth/stage/dump/storage-hashing.md b/docs/vocs/docs/pages/cli/reth/stage/dump/storage-hashing.mdx similarity index 100% rename from book/cli/reth/stage/dump/storage-hashing.md rename to docs/vocs/docs/pages/cli/reth/stage/dump/storage-hashing.mdx diff --git a/book/cli/reth/stage/run.md b/docs/vocs/docs/pages/cli/reth/stage/run.mdx similarity index 100% rename from book/cli/reth/stage/run.md rename to docs/vocs/docs/pages/cli/reth/stage/run.mdx diff --git a/book/cli/reth/stage/unwind.md b/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx similarity index 100% rename from book/cli/reth/stage/unwind.md rename to docs/vocs/docs/pages/cli/reth/stage/unwind.mdx diff --git a/book/cli/reth/stage/unwind/num-blocks.md b/docs/vocs/docs/pages/cli/reth/stage/unwind/num-blocks.mdx similarity index 100% rename from book/cli/reth/stage/unwind/num-blocks.md rename to docs/vocs/docs/pages/cli/reth/stage/unwind/num-blocks.mdx diff --git a/book/cli/reth/stage/unwind/to-block.md b/docs/vocs/docs/pages/cli/reth/stage/unwind/to-block.mdx similarity index 100% rename from book/cli/reth/stage/unwind/to-block.md rename to docs/vocs/docs/pages/cli/reth/stage/unwind/to-block.mdx diff --git a/book/cli/reth/test-vectors/tables.md b/docs/vocs/docs/pages/cli/reth/test-vectors/tables.mdx similarity index 100% rename from book/cli/reth/test-vectors/tables.md rename to docs/vocs/docs/pages/cli/reth/test-vectors/tables.mdx diff --git a/book/developers/exex/hello-world.md b/docs/vocs/docs/pages/exex/hello-world.mdx similarity index 70% rename from book/developers/exex/hello-world.md rename to docs/vocs/docs/pages/exex/hello-world.mdx index c1f3e5af944..30eac91ee99 100644 --- a/book/developers/exex/hello-world.md +++ b/docs/vocs/docs/pages/exex/hello-world.mdx @@ -1,3 +1,7 @@ +--- +description: Example of a minimal Hello World ExEx in Reth. +--- + # Hello World Let's write a simple "Hello World" ExEx that emits a log every time a new chain of blocks is committed, reverted, or reorged. @@ -14,15 +18,15 @@ cd my-exex And add Reth as a dependency in `Cargo.toml` ```toml -{{#include ../../sources/exex/hello-world/Cargo.toml}} +// [!include ~/snippets/sources/exex/hello-world/Cargo.toml] ``` ### Default Reth node Now, let's jump to our `main.rs` and start by initializing and launching a default Reth node -```rust,norun,noplayground,ignore -{{#include ../../sources/exex/hello-world/src/bin/1.rs}} +```rust +// [!include ~/snippets/sources/exex/hello-world/src/bin/1.rs] ``` You can already test that it works by running the binary and initializing the Holesky node in a custom datadir @@ -42,8 +46,8 @@ $ cargo run -- init --chain holesky --datadir data The simplest ExEx is just an async function that never returns. We need to install it into our node -```rust,norun,noplayground,ignore -{{#include ../../sources/exex/hello-world/src/bin/2.rs}} +```rust +// [!include ~/snippets/sources/exex/hello-world/src/bin/2.rs] ``` See that unused `_ctx`? That's the context that we'll use to listen to new notifications coming from the main node, @@ -63,17 +67,17 @@ If you try running a node with an ExEx that exits, the node will exit as well. Now, let's extend our simplest ExEx and start actually listening to new notifications, log them, and send events back to the main node -```rust,norun,noplayground,ignore -{{#include ../../sources/exex/hello-world/src/bin/3.rs}} +```rust +// [!include ~/snippets/sources/exex/hello-world/src/bin/3.rs] ``` Woah, there's a lot of new stuff here! Let's go through it step by step: -- First, we've added a `while let Some(notification) = ctx.notifications.recv().await` loop that waits for new notifications to come in. - - The main node is responsible for sending notifications to the ExEx, so we're waiting for them to come in. -- Next, we've added a `match ¬ification { ... }` block that matches on the type of the notification. - - In each case, we're logging the notification and the corresponding block range, be it a chain commit, revert, or reorg. -- Finally, we're checking if the notification contains a committed chain, and if it does, we're sending a `ExExEvent::FinishedHeight` event back to the main node using the `ctx.events.send` method. +- First, we've added a `while let Some(notification) = ctx.notifications.recv().await` loop that waits for new notifications to come in. + - The main node is responsible for sending notifications to the ExEx, so we're waiting for them to come in. +- Next, we've added a `match ¬ification { ... }` block that matches on the type of the notification. + - In each case, we're logging the notification and the corresponding block range, be it a chain commit, revert, or reorg. +- Finally, we're checking if the notification contains a committed chain, and if it does, we're sending a `ExExEvent::FinishedHeight` event back to the main node using the `ctx.events.send` method.
@@ -88,4 +92,4 @@ What we've arrived at is the [minimal ExEx example](https://github.com/paradigmx ## What's next? -Let's do something a bit more interesting, and see how you can [keep track of some state](./tracking-state.md) inside your ExEx. +Let's do something a bit more interesting, and see how you can [keep track of some state](/exex/tracking-state) inside your ExEx. diff --git a/book/developers/exex/how-it-works.md b/docs/vocs/docs/pages/exex/how-it-works.mdx similarity index 67% rename from book/developers/exex/how-it-works.md rename to docs/vocs/docs/pages/exex/how-it-works.mdx index 7f80d71cbff..21162a75620 100644 --- a/book/developers/exex/how-it-works.md +++ b/docs/vocs/docs/pages/exex/how-it-works.mdx @@ -1,3 +1,7 @@ +--- +description: How Execution Extensions (ExExes) work in Reth. +--- + # How do ExExes work? ExExes are just [Futures](https://doc.rust-lang.org/std/future/trait.Future.html) that run indefinitely alongside Reth @@ -7,12 +11,13 @@ An ExEx is usually driven by and acts on new notifications about chain commits, They are installed into the node by using the [node builder](https://reth.rs/docs/reth/builder/struct.NodeBuilder.html). Reth manages the lifecycle of all ExExes, including: -- Polling ExEx futures -- Sending [notifications](https://reth.rs/docs/reth_exex/enum.ExExNotification.html) about new chain, reverts, - and reorgs from historical and live sync -- Processing [events](https://reth.rs/docs/reth_exex/enum.ExExEvent.html) emitted by ExExes -- Pruning (in case of a full or pruned node) only the data that has been processed by all ExExes -- Shutting ExExes down when the node is shut down + +- Polling ExEx futures +- Sending [notifications](https://reth.rs/docs/reth_exex/enum.ExExNotification.html) about new chain, reverts, + and reorgs from historical and live sync +- Processing [events](https://reth.rs/docs/reth_exex/enum.ExExEvent.html) emitted by ExExes +- Pruning (in case of a full or pruned node) only the data that has been processed by all ExExes +- Shutting ExExes down when the node is shut down ## Pruning diff --git a/book/developers/exex/exex.md b/docs/vocs/docs/pages/exex/overview.mdx similarity index 62% rename from book/developers/exex/exex.md rename to docs/vocs/docs/pages/exex/overview.mdx index 25372a7c922..abfcc8f3b82 100644 --- a/book/developers/exex/exex.md +++ b/docs/vocs/docs/pages/exex/overview.mdx @@ -1,9 +1,13 @@ +--- +description: Introduction to Execution Extensions (ExEx) in Reth. +--- + # Execution Extensions (ExEx) ## What are Execution Extensions? Execution Extensions (or ExExes, for short) allow developers to build their own infrastructure that relies on Reth -as a base for driving the chain (be it [Ethereum](../../run/mainnet.md) or [OP Stack](../../run/optimism.md)) forward. +as a base for driving the chain (be it [Ethereum](/run/ethereum) or [OP Stack](/run/opstack)) forward. An Execution Extension is a task that derives its state from changes in Reth's state. Some examples of such state derivations are rollups, bridges, and indexers. @@ -18,14 +22,18 @@ Read more about things you can build with Execution Extensions in the [Paradigm Execution Extensions are not separate processes that connect to the main Reth node process. Instead, ExExes are compiled into the same binary as Reth, and run alongside it, using shared memory for communication. -If you want to build an Execution Extension that sends data into a separate process, check out the [Remote](./remote.md) chapter. +If you want to build an Execution Extension that sends data into a separate process, check out the [Remote](/exex/remote) chapter. ## How do I build an Execution Extension? Let's dive into how to build our own ExEx from scratch, add tests for it, and run it on the Holesky testnet. -1. [How do ExExes work?](./how-it-works.md) -1. [Hello World](./hello-world.md) -1. [Tracking State](./tracking-state.md) -1. [Remote](./remote.md) +1. [How do ExExes work?](/exex/how-it-works) +1. [Hello World](/exex/hello-world) +1. [Tracking State](/exex/tracking-state) +1. [Remote](/exex/remote) + +:::tip +For more practical examples and ready-to-use ExEx implementations, check out the [reth-exex-examples](https://github.com/paradigmxyz/reth-exex-examples) repository which contains various ExEx examples including indexers, bridges, and other state derivation patterns. +::: diff --git a/book/developers/exex/remote.md b/docs/vocs/docs/pages/exex/remote.mdx similarity index 76% rename from book/developers/exex/remote.md rename to docs/vocs/docs/pages/exex/remote.mdx index 0ec704308ff..772b56d7fd7 100644 --- a/book/developers/exex/remote.md +++ b/docs/vocs/docs/pages/exex/remote.mdx @@ -1,10 +1,15 @@ +--- +description: Building a remote ExEx that communicates via gRPC. +--- + # Remote Execution Extensions In this chapter, we will learn how to create an ExEx that emits all notifications to an external process. We will use [Tonic](https://github.com/hyperium/tonic) to create a gRPC server and a client. -- The server binary will have the Reth client, our ExEx and the gRPC server. -- The client binary will have the gRPC client that connects to the server. + +- The server binary will have the Reth client, our ExEx and the gRPC server. +- The client binary will have the gRPC client that connects to the server. ## Prerequisites @@ -21,11 +26,11 @@ $ cargo new --lib exex-remote $ cd exex-remote ``` -We will also need a bunch of dependencies. Some of them you know from the [Hello World](./hello-world.md) chapter, +We will also need a bunch of dependencies. Some of them you know from the [Hello World](/exex/hello-world) chapter, but some of specific to what we need now. ```toml -{{#include ../../sources/exex/remote/Cargo.toml}} +// [!include ~/snippets/sources/exex/remote/Cargo.toml] ``` We also added a build dependency for Tonic. We will use it to generate the Rust code for our @@ -33,8 +38,9 @@ Protobuf definitions at compile time. Read more about using Tonic in the [introductory tutorial](https://github.com/hyperium/tonic/blob/6a213e9485965db0628591e30577ed81cdaeaf2b/examples/helloworld-tutorial.md). Also, we now have two separate binaries: -- `exex` is the server binary that will run the ExEx and the gRPC server. -- `consumer` is the client binary that will connect to the server and receive notifications. + +- `exex` is the server binary that will run the ExEx and the gRPC server. +- `consumer` is the client binary that will connect to the server and receive notifications. ### Create the Protobuf definitions @@ -53,12 +59,13 @@ For an example of a full schema, see the [Remote ExEx](https://github.com/paradi
```protobuf -{{#include ../../sources/exex/remote/proto/exex.proto}} +// [!include ~/snippets/sources/exex/remote/proto/exex.proto] ``` To instruct Tonic to generate the Rust code using this `.proto`, add the following lines to your `lib.rs` file: -```rust,norun,noplayground,ignore -{{#include ../../sources/exex/remote/src/lib.rs}} + +```rust +// [!include ~/snippets/sources/exex/remote/src/lib.rs] ``` ## ExEx and gRPC server @@ -70,8 +77,8 @@ We will now create the ExEx and the gRPC server in our `src/exex.rs` file. Let's create a minimal gRPC server that listens on the port `:10000`, and spawn it using the [NodeBuilder](https://reth.rs/docs/reth/builder/struct.NodeBuilder.html)'s [task executor](https://reth.rs/docs/reth/tasks/struct.TaskExecutor.html). -```rust,norun,noplayground,ignore -{{#include ../../sources/exex/remote/src/exex_1.rs}} +```rust +// [!include ~/snippets/sources/exex/remote/src/exex_1.rs] ``` Currently, it does not send anything on the stream. @@ -81,8 +88,8 @@ to send new `ExExNotification` on it. Let's create this channel in the `main` function where we will have both gRPC server and ExEx initiated, and save the sender part (that way we will be able to create new receivers) of this channel in our gRPC server. -```rust,norun,noplayground,ignore -{{#include ../../sources/exex/remote/src/exex_2.rs}} +```rust +// [!include ~/snippets/sources/exex/remote/src/exex_2.rs] ``` And with that, we're ready to handle incoming notifications, serialize them with [bincode](https://docs.rs/bincode/) @@ -91,8 +98,8 @@ and send back to the client. For each incoming request, we spawn a separate tokio task that will run in the background, and then return the stream receiver to the client. -```rust,norun,noplayground,ignore -{{#rustdoc_include ../../sources/exex/remote/src/exex_3.rs:snippet}} +```rust +// [!include ~/snippets/sources/exex/remote/src/exex_3.rs] ``` That's it for the gRPC server part! It doesn't receive anything on the `notifications` channel yet, @@ -110,25 +117,24 @@ Don't forget to emit `ExExEvent::FinishedHeight` -```rust,norun,noplayground,ignore -{{#rustdoc_include ../../sources/exex/remote/src/exex_4.rs:snippet}} +```rust +// [!include ~/snippets/sources/exex/remote/src/exex_4.rs] ``` All that's left is to connect all pieces together: install our ExEx in the node and pass the sender part of communication channel to it. -```rust,norun,noplayground,ignore -{{#rustdoc_include ../../sources/exex/remote/src/exex.rs:snippet}} +```rust +// [!include ~/snippets/sources/exex/remote/src/exex.rs] ``` ### Full `exex.rs` code
-Click to expand - -```rust,norun,noplayground,ignore -{{#include ../../sources/exex/remote/src/exex.rs}} -``` + Click to expand + ```rust + // [!include ~/snippets/sources/exex/remote/src/exex.rs] + ```
## Consumer @@ -143,8 +149,8 @@ because notifications can get very heavy -```rust,norun,noplayground,ignore -{{#include ../../sources/exex/remote/src/consumer.rs}} +```rust +// [!include ~/snippets/sources/exex/remote/src/consumer.rs] ``` ## Running @@ -162,4 +168,4 @@ And in the other, we will run our consumer: cargo run --bin consumer --release ``` - +![remote_exex](/remote_exex.png) diff --git a/book/developers/exex/tracking-state.md b/docs/vocs/docs/pages/exex/tracking-state.mdx similarity index 61% rename from book/developers/exex/tracking-state.md rename to docs/vocs/docs/pages/exex/tracking-state.mdx index d2a9fe6ca3e..fb3486e7fab 100644 --- a/book/developers/exex/tracking-state.md +++ b/docs/vocs/docs/pages/exex/tracking-state.mdx @@ -1,8 +1,12 @@ +--- +description: How to track state in a custom ExEx. +--- + # Tracking State In this chapter, we'll learn how to keep track of some state inside our ExEx. -Let's continue with our Hello World example from the [previous chapter](./hello-world.md). +Let's continue with our Hello World example from the [previous chapter](/exex/hello-world). ### Turning ExEx into a struct @@ -18,8 +22,8 @@ because you can't access variables inside the function to assert the state of yo -```rust,norun,noplayground,ignore -{{#include ../../sources/exex/tracking-state/src/bin/1.rs}} +```rust +// [!include ~/snippets/sources/exex/tracking-state/src/bin/1.rs] ``` For those who are not familiar with how async Rust works on a lower level, that may seem scary, @@ -27,7 +31,7 @@ but let's unpack what's going on here: 1. Our ExEx is now a `struct` that contains the context and implements the `Future` trait. It's now pollable (hence `await`-able). 1. We can't use `self` directly inside our `poll` method, and instead need to acquire a mutable reference to the data inside of the `Pin`. - Read more about pinning in [the book](https://rust-lang.github.io/async-book/04_pinning/01_chapter.html). + Read more about pinning in [the book](https://rust-lang.github.io/async-book/part-reference/pinning.html). 1. We also can't use `await` directly inside `poll`, and instead need to poll futures manually. We wrap the call to `poll_recv(cx)` into a [`ready!`](https://doc.rust-lang.org/std/task/macro.ready.html) macro, so that if the channel of notifications has no value ready, we will instantly return `Poll::Pending` from our Future. @@ -39,23 +43,25 @@ With all that done, we're now free to add more fields to our `MyExEx` struct, an Our ExEx will count the number of transactions in each block and log it to the console. -```rust,norun,noplayground,ignore -{{#include ../../sources/exex/tracking-state/src/bin/2.rs}} +```rust +// [!include ~/snippets/sources/exex/tracking-state/src/bin/2.rs] ``` As you can see, we added two fields to our ExEx struct: -- `first_block` to keep track of the first block that was committed since the start of the ExEx. -- `transactions` to keep track of the total number of transactions committed, accounting for reorgs and reverts. + +- `first_block` to keep track of the first block that was committed since the start of the ExEx. +- `transactions` to keep track of the total number of transactions committed, accounting for reorgs and reverts. We also changed our `match` block to two `if` clauses: -- First one checks if there's a reverted chain using `notification.reverted_chain()`. If there is: - - We subtract the number of transactions in the reverted chain from the total number of transactions. - - It's important to do the `saturating_sub` here, because if we just started our node and - instantly received a reorg, our `transactions` field will still be zero. -- Second one checks if there's a committed chain using `notification.committed_chain()`. If there is: - - We update the `first_block` field to the first block of the committed chain. - - We add the number of transactions in the committed chain to the total number of transactions. - - We send a `FinishedHeight` event back to the main node. + +- First one checks if there's a reverted chain using `notification.reverted_chain()`. If there is: + - We subtract the number of transactions in the reverted chain from the total number of transactions. + - It's important to do the `saturating_sub` here, because if we just started our node and + instantly received a reorg, our `transactions` field will still be zero. +- Second one checks if there's a committed chain using `notification.committed_chain()`. If there is: + - We update the `first_block` field to the first block of the committed chain. + - We add the number of transactions in the committed chain to the total number of transactions. + - We send a `FinishedHeight` event back to the main node. Finally, on every notification, we log the total number of transactions and the first block that was committed since the start of the ExEx. diff --git a/docs/vocs/docs/pages/index.mdx b/docs/vocs/docs/pages/index.mdx new file mode 100644 index 00000000000..5e65d0695ce --- /dev/null +++ b/docs/vocs/docs/pages/index.mdx @@ -0,0 +1,162 @@ +--- +content: + width: 100% +layout: landing +showLogo: false +title: Reth +description: Secure, performant and modular node implementation that supports both Ethereum and OP-Stack chains. +--- + +import { HomePage, Sponsors } from "vocs/components"; +import { SdkShowcase } from "../components/SdkShowcase"; +import { TrustedBy } from "../components/TrustedBy"; + +
+
+
+
+
+ Reth +
Secure, performant, and modular blockchain SDK and node.
+
+
+ Run a Node + Build a Node + Why Reth? +
+
+
+
+ :::code-group + + ```bash [Run a Node] + # Install the binary + brew install paradigmxyz/brew/reth + + # Run the node with JSON-RPC enabled + reth node --http --http.api eth,trace + ``` + + ```rust [Build a Node] + // .. snip .. + let handle = node_builder + .with_types::() + .with_components(EthereumNode::components()) + .with_add_ons(EthereumAddOns::default()) + .launch() + .await?; + ``` + + ::: +
+
+ +
+
+ stars +
+
+ 4.7K +
+
+
+
+ + +
+
+ contributors +
+
+ 580+ +
+
+
+ +
+
+
+ +
Institutional Security
+
Run reliable staking nodes trusted by Coinbase Staking
+
+
+
+
+
+
+
+ +
Performant
+
Sync faster with optimal transaction processing
+
+
+
+
+
+
+ +
+ +## Trusted by the Best + +Leading infra companies use Reth for MEV applications, staking, RPC services and generating zero-knowledge proofs. + +
+ +
+ +## Built with Reth SDK + +Production chains and networks powered by Reth's modular architecture. These nodes are built using existing components without forking, saving several engineering hours while improving maintainability. + +
+ +
+ +## Supporters + + +
diff --git a/book/installation/binaries.md b/docs/vocs/docs/pages/installation/binaries.mdx similarity index 90% rename from book/installation/binaries.md rename to docs/vocs/docs/pages/installation/binaries.mdx index fc741805cd9..56c5cf2bacc 100644 --- a/book/installation/binaries.md +++ b/docs/vocs/docs/pages/installation/binaries.mdx @@ -1,3 +1,7 @@ +--- +description: Instructions for installing Reth using pre-built binaries for Windows, macOS, and Linux, including Homebrew and Arch Linux AUR options. Explains how to verify binary signatures and provides details about the release signing key. +--- + # Binaries [**Archives of precompiled binaries of reth are available for Windows, macOS and Linux.**](https://github.com/paradigmxyz/reth/releases) They are static executables. Users of platforms not explicitly listed below should download one of these archives. @@ -41,7 +45,7 @@ Replace the filenames by those corresponding to the downloaded Reth release. Releases are signed using the key with ID [`50FB7CC55B2E8AFA59FE03B7AA5ED56A7FBF253E`](https://keyserver.ubuntu.com/pks/lookup?search=50FB7CC55B2E8AFA59FE03B7AA5ED56A7FBF253E&fingerprint=on&op=index). -```none +```text -----BEGIN PGP PUBLIC KEY BLOCK----- mDMEZl4GjhYJKwYBBAHaRw8BAQdAU5gnINBAfIgF9S9GzZ1zHDwZtv/WcJRIQI+h diff --git a/book/installation/build-for-arm-devices.md b/docs/vocs/docs/pages/installation/build-for-arm-devices.mdx similarity index 81% rename from book/installation/build-for-arm-devices.md rename to docs/vocs/docs/pages/installation/build-for-arm-devices.mdx index 21d32c9e8bd..23b91e08770 100644 --- a/book/installation/build-for-arm-devices.md +++ b/docs/vocs/docs/pages/installation/build-for-arm-devices.mdx @@ -1,3 +1,7 @@ +--- +description: Building and troubleshooting Reth on ARM devices. +--- + # Building for ARM devices Reth can be built for and run on ARM devices, but there are a few things to take into consideration before. @@ -37,12 +41,12 @@ Some newer versions of ARM architecture offer support for Large Virtual Address ### Additional Resources -- [ARM developer documentation](https://developer.arm.com/documentation/ddi0406/cb/Appendixes/ARMv4-and-ARMv5-Differences/System-level-memory-model/Virtual-memory-support) -- [ARM Community Forums](https://community.arm.com) +- [ARM developer documentation](https://developer.arm.com/documentation/ddi0406/cb/Appendixes/ARMv4-and-ARMv5-Differences/System-level-memory-model/Virtual-memory-support) +- [ARM Community Forums](https://community.arm.com) ## Build Reth -If both your CPU architecture and the memory layout are valid, the instructions for building Reth will not differ from [the standard process](https://paradigmxyz.github.io/reth/installation/source.html). +If both your CPU architecture and the memory layout are valid, the instructions for building Reth will not differ from [the standard process](https://reth.rs/installation/source/). ## Troubleshooting @@ -57,16 +61,21 @@ This error is raised whenever MDBX can not open a database due to the limitation You will need to recompile the Linux Kernel to fix the issue. A simple and safe approach to achieve this is to use the Armbian build framework to create a new image of the OS that will be flashed to a storage device of your choice - an SD card for example - with the following kernel feature values: -- **Page Size**: 64 KB -- **Virtual Address Space Size**: 48 Bits + +- **Page Size**: 64 KB +- **Virtual Address Space Size**: 48 Bits To be able to build an Armbian image and set those values, you will need to: -- Clone the Armbian build framework repository + +- Clone the Armbian build framework repository + ```bash git clone https://github.com/armbian/build cd build ``` -- Run the compile script with the following parameters: + +- Run the compile script with the following parameters: + ```bash ./compile.sh \ BUILD_MINIMAL=yes \ @@ -74,5 +83,6 @@ BUILD_DESKTOP=no \ KERNEL_CONFIGURE=yes \ CARD_DEVICE="/dev/sdX" # Replace sdX with your own storage device ``` -- From there, you will be able to select the target board, the OS release and branch. Then, once you get in the **Kernel Configuration** screen, select the **Kernel Features options** and set the previous values accordingly. -- Wait for the process to finish, plug your storage device into your board and start it. You can now download or install Reth and it should work properly. + +- From there, you will be able to select the target board, the OS release and branch. Then, once you get in the **Kernel Configuration** screen, select the **Kernel Features options** and set the previous values accordingly. +- Wait for the process to finish, plug your storage device into your board and start it. You can now download or install Reth and it should work properly. diff --git a/book/installation/docker.md b/docs/vocs/docs/pages/installation/docker.mdx similarity index 80% rename from book/installation/docker.md rename to docs/vocs/docs/pages/installation/docker.mdx index 6ce2ae50a5b..ecf55f6b3da 100644 --- a/book/installation/docker.md +++ b/docs/vocs/docs/pages/installation/docker.mdx @@ -1,3 +1,7 @@ +--- +description: Guide to running Reth using Docker, including obtaining images from GitHub or building locally, using Docker Compose. +--- + # Docker There are two ways to obtain a Reth Docker image: @@ -8,9 +12,10 @@ There are two ways to obtain a Reth Docker image: Once you have obtained the Docker image, proceed to [Using the Docker image](#using-the-docker-image). -> **Note** -> -> Reth requires Docker Engine version 20.10.10 or higher due to [missing support](https://docs.docker.com/engine/release-notes/20.10/#201010) for the `clone3` syscall in previous versions. +:::note +Reth requires Docker Engine version 20.10.10 or higher due to [missing support](https://docs.docker.com/engine/release-notes/20.10/#201010) for the `clone3` syscall in previous versions. +::: + ## GitHub Reth docker images for both x86_64 and ARM64 machines are published with every release of reth on GitHub Container Registry. @@ -52,6 +57,7 @@ docker run reth:local --version ## Using the Docker image There are two ways to use the Docker image: + 1. [Using Docker](#using-plain-docker) 2. [Using Docker Compose](#using-docker-compose) @@ -86,12 +92,12 @@ To run Reth with Docker Compose, run the following command from a shell inside t docker compose -f etc/docker-compose.yml -f etc/lighthouse.yml up -d ``` -> **Note** -> -> If you want to run Reth with a CL that is not Lighthouse: -> -> - The JWT for the consensus client can be found at `etc/jwttoken/jwt.hex` in this repository, after the `etc/generate-jwt.sh` script is run -> - The Reth Engine API is accessible on `localhost:8551` +:::note +If you want to run Reth with a CL that is not Lighthouse: + +- The JWT for the consensus client can be found at `etc/jwttoken/jwt.hex` in this repository, after the `etc/generate-jwt.sh` script is run +- The Reth Engine API is accessible on `localhost:8551` + ::: To check if Reth is running correctly, run: @@ -101,18 +107,19 @@ docker compose -f etc/docker-compose.yml -f etc/lighthouse.yml logs -f reth The default `docker-compose.yml` file will create three containers: -- Reth -- Prometheus -- Grafana +- Reth +- Prometheus +- Grafana The optional `lighthouse.yml` file will create two containers: -- Lighthouse -- [`ethereum-metrics-exporter`](https://github.com/ethpandaops/ethereum-metrics-exporter) +- Lighthouse +- [`ethereum-metrics-exporter`](https://github.com/ethpandaops/ethereum-metrics-exporter) Grafana will be exposed on `localhost:3000` and accessible via default credentials (username and password is `admin`), with two available dashboards: -- reth -- Ethereum Metrics Exporter (works only if Lighthouse is also running) + +- reth +- Ethereum Metrics Exporter (works only if Lighthouse is also running) ## Interacting with Reth inside Docker @@ -124,7 +131,7 @@ docker exec -it reth bash **If Reth is running with Docker Compose, replace `reth` with `reth-reth-1` in the above command** -Refer to the [CLI docs](../cli/cli.md) to interact with Reth once inside the Reth container. +Refer to the [CLI docs](/cli/reth) to interact with Reth once inside the Reth container. ## Run only Grafana in Docker @@ -134,4 +141,4 @@ This allows importing existing Grafana dashboards, without running Reth in Docke docker compose -f etc/docker-compose.yml up -d --no-deps grafana ``` -After login with `admin:admin` credentials, Prometheus should be listed under [`Grafana datasources`](http://localhost:3000/connections/datasources). Replace its `Prometheus server URL` so it points to locally running one. On Mac or Windows, use `http://host.docker.internal:9090`. On Linux, try `http://172.17.0.1:9090`. \ No newline at end of file +After login with `admin:admin` credentials, Prometheus should be listed under [`Grafana datasources`](http://localhost:3000/connections/datasources). Replace its `Prometheus server URL` so it points to locally running one. On Mac or Windows, use `http://host.docker.internal:9090`. On Linux, try `http://172.17.0.1:9090`. diff --git a/docs/vocs/docs/pages/installation/overview.mdx b/docs/vocs/docs/pages/installation/overview.mdx new file mode 100644 index 00000000000..2a5c21522e2 --- /dev/null +++ b/docs/vocs/docs/pages/installation/overview.mdx @@ -0,0 +1,18 @@ +--- +description: Installation instructions for Reth and hardware recommendations. +--- + +# Installation + +Reth runs on Linux and macOS (Windows tracked). + +There are three core methods to obtain Reth: + +- [Pre-built binaries](/installation/binaries) +- [Docker images](/installation/docker) +- [Building from source.](/installation/source) + +:::note +If you have Docker installed, we recommend using the [Docker Compose](/installation/docker#using-docker-compose) configuration +that will get you Reth, Lighthouse (Consensus Client), Prometheus and Grafana running and syncing with just one command. +::: diff --git a/docs/vocs/docs/pages/installation/priorities.mdx b/docs/vocs/docs/pages/installation/priorities.mdx new file mode 100644 index 00000000000..4494083e399 --- /dev/null +++ b/docs/vocs/docs/pages/installation/priorities.mdx @@ -0,0 +1,22 @@ +--- +description: Explains Reth update priorities for user classes such as payload builders and non-payload builders. +--- + +# Update Priorities + +When publishing releases, reth will include an "Update Priority" section in the release notes, in the same manner Lighthouse does. + +The "Update Priority" section will include a table which may appear like so: + +| User Class | Priority | +| -------------------- | --------------- | +| Payload Builders | Medium Priority | +| Non-Payload Builders | Low Priority | + +To understand this table, the following terms are important: + +- _Payload builders_ are those who use reth to build and validate payloads. +- _Non-payload builders_ are those who run reth for other purposes (e.g., data analysis, RPC or applications). +- _High priority_ updates should be completed as soon as possible (e.g., hours or days). +- _Medium priority_ updates should be completed at the next convenience (e.g., days or a week). +- _Low priority_ updates should be completed in the next routine update cycle (e.g., two weeks). diff --git a/book/installation/source.md b/docs/vocs/docs/pages/installation/source.mdx similarity index 72% rename from book/installation/source.md rename to docs/vocs/docs/pages/installation/source.mdx index d9642c4bc48..a7e1a2c33cc 100644 --- a/book/installation/source.md +++ b/docs/vocs/docs/pages/installation/source.mdx @@ -1,14 +1,18 @@ +--- +description: How to build, update, and troubleshoot Reth from source. +--- + # Build from Source You can build Reth on Linux, macOS, Windows, and Windows WSL2. -> **Note** -> -> Reth does **not** work on Windows WSL1. +:::note +Reth does **not** work on Windows WSL1. +::: ## Dependencies -First, **install Rust** using [rustup](https://rustup.rs/): +First, **install Rust** using [rustup](https://rustup.rs/): ```bash curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh @@ -16,19 +20,20 @@ curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh The rustup installer provides an easy way to update the Rust compiler, and works on all platforms. -> **Tips** -> -> - During installation, when prompted, enter `1` for the default installation. -> - After Rust installation completes, try running `cargo version` . If it cannot -> be found, run `source $HOME/.cargo/env`. After that, running `cargo version` should return the version, for example `cargo 1.68.2`. -> - It's generally advisable to append `source $HOME/.cargo/env` to `~/.bashrc`. +:::tip + +- During installation, when prompted, enter `1` for the default installation. +- After Rust installation completes, try running `cargo version` . If it cannot + be found, run `source $HOME/.cargo/env`. After that, running `cargo version` should return the version, for example `cargo 1.68.2`. +- It's generally advisable to append `source $HOME/.cargo/env` to `~/.bashrc`. + ::: With Rust installed, follow the instructions below to install dependencies relevant to your operating system: -- **Ubuntu**: `apt-get install libclang-dev pkg-config build-essential` -- **macOS**: `brew install llvm pkg-config` -- **Windows**: `choco install llvm` or `winget install LLVM.LLVM` +- **Ubuntu**: `apt-get install libclang-dev pkg-config build-essential` +- **macOS**: `brew install llvm pkg-config` +- **Windows**: `choco install llvm` or `winget install LLVM.LLVM` These are needed to build bindings for Reth's database. @@ -60,7 +65,7 @@ cargo build --release This will place the reth binary under `./target/release/reth`, and you can copy it to your directory of preference after that. -Compilation may take around 10 minutes. Installation was successful if `reth --help` displays the [command-line documentation](../cli/cli.md). +Compilation may take around 10 minutes. Installation was successful if `reth --help` displays the [command-line documentation](/cli/reth). If you run into any issues, please check the [Troubleshooting](#troubleshooting) section, or reach out to us on [Telegram](https://t.me/paradigm_reth). @@ -88,11 +93,11 @@ You can customise the compiler settings used to compile Reth via Reth includes several profiles which can be selected via the Cargo flag `--profile`. -* `release`: default for source builds, enables most optimisations while not taking too long to - compile. -* `maxperf`: default for binary releases, enables aggressive optimisations including full LTO. - Although compiling with this profile improves some benchmarks by around 20% compared to `release`, - it imposes a _significant_ cost at compile time and is only recommended if you have a fast CPU. +- `release`: default for source builds, enables most optimisations while not taking too long to + compile. +- `maxperf`: default for binary releases, enables aggressive optimisations including full LTO. + Although compiling with this profile improves some benchmarks by around 20% compared to `release`, + it imposes a _significant_ cost at compile time and is only recommended if you have a fast CPU. **Rust compiler flags** @@ -107,9 +112,10 @@ RUSTFLAGS="-C target-cpu=native" cargo build --profile maxperf Finally, some optional features are present that may improve performance, but may not very portable, and as such might not compile on your particular system. These are currently: -- `jemalloc`: replaces the default system memory allocator with [`jemalloc`](https://jemalloc.net/); this feature is unstable on Windows -- `asm-keccak`: replaces the default, pure-Rust implementation of Keccak256 with one implemented in assembly; see [the `keccak-asm` crate](https://github.com/DaniPopes/keccak-asm) for more details and supported targets -- `min-LEVEL-logs`, where `LEVEL` is one of `error`, `warn`, `info`, `debug`, `trace`: disables compilation of logs of lower level than the given one; this in general isn't that significant, and is not recommended due to the loss of debugging that the logs would provide + +- `jemalloc`: replaces the default system memory allocator with [`jemalloc`](https://jemalloc.net/); this feature is unstable on Windows +- `asm-keccak`: replaces the default, pure-Rust implementation of Keccak256 with one implemented in assembly; see [the `keccak-asm` crate](https://github.com/DaniPopes/keccak-asm) for more details and supported targets +- `min-LEVEL-logs`, where `LEVEL` is one of `error`, `warn`, `info`, `debug`, `trace`: disables compilation of logs of lower level than the given one; this in general isn't that significant, and is not recommended due to the loss of debugging that the logs would provide You can activate features by passing them to the `--features` or `-F` Cargo flag; multiple features can be activated with a space- or comma-separated list to the flag: @@ -136,7 +142,7 @@ Rust Version (MSRV) which is listed under the `rust-version` key in Reth's If compilation fails with `(signal: 9, SIGKILL: kill)`, this could mean your machine ran out of memory during compilation. If you are on Docker, consider increasing the memory of the container, or use a [pre-built -binary](../installation/binaries.md). +binary](/installation/binaries). If compilation fails in either the `keccak-asm` or `sha3-asm` crates, it is likely that your current system configuration is not supported. See the [`keccak-asm` target table](https://github.com/DaniPopes/keccak-asm?tab=readme-ov-file#support) for supported targets. @@ -147,7 +153,7 @@ _(Thanks to Sigma Prime for this section from [their Lighthouse book](https://li ### Bus error (WSL2) -In WSL 2 on Windows, the default virtual disk size is set to 1TB. +In WSL 2 on Windows, the default virtual disk size is set to 1TB. You must increase the allocated disk size for your WSL2 instance before syncing reth. diff --git a/docs/vocs/docs/pages/introduction/contributing.mdx b/docs/vocs/docs/pages/introduction/contributing.mdx new file mode 100644 index 00000000000..aa30ee5faf2 --- /dev/null +++ b/docs/vocs/docs/pages/introduction/contributing.mdx @@ -0,0 +1,258 @@ +# Contributing to Reth + +Reth has docs specifically geared for developers and contributors, including documentation on the structure and architecture of reth, the general workflow we employ, and other useful tips. + +## Getting Help + +Need support or have questions? Open a github issue and/or join the TG chat: + +- **GitHub Issues**: [Open an issue](https://github.com/paradigmxyz/reth/issues/new) for bugs or feature requests +- **Telegram Chat**: [Join our Telegram](https://t.me/paradigm_reth) for real-time support and discussions + +## Repository and Project Structure + +Reth is organized as a modular codebase with clear separation and a contributor friendly architecture, you can read about it in detail [here](https://github.com/paradigmxyz/reth/tree/main/docs). Here's the TL;DR: + +### Design + +Reth follows a modular architecture where each component can be used independently: + +- **Consensus**: Block validation and consensus rules +- **Storage**: Hybrid database with MDBX + static files +- **Networking**: P2P networking stack +- **RPC**: JSON-RPC server implementation +- **Engine**: Consensus layer integration +- **EVM**: Transaction execution +- **Node Builder**: High-level orchestration + +### Crates + +The repository is organized into focused crates under `/crates/`: + +``` +crates/ +├── consensus/ # Consensus and validation logic +├── storage/ # Database and storage implementations +├── net/ # Networking components +├── rpc/ # JSON-RPC server and APIs +├── engine/ # Engine API and consensus integration +├── evm/ # EVM execution +├── node/ # Node building and orchestration +├── ethereum/ # Ethereum-specific implementations +├── optimism/ # Optimism L2 support +└── ... +``` + +## Workflow: The Lifecycle of PRs + +### 1. Before You Start + +- Check existing issues to avoid duplicate work +- For large features, open an issue first to discuss the approach +- Fork the repository and create a feature branch + +### 2. Development Process + +#### Setting Up Your Environment + +```bash +# Clone your fork +git clone https://github.com/YOUR_USERNAME/reth.git +cd reth + +# Install dependencies and tools +# Use nightly Rust for formatting +rustup install nightly +rustup component add rustfmt --toolchain nightly + +# Run the validation suite +make pr +``` + +#### Code Style and Standards + +- **Formatting**: Use nightly rustfmt (`cargo +nightly fmt`) +- **Linting**: All clippy warnings must be addressed +- **Documentation**: Add doc comments for public APIs +- **Testing**: Include appropriate tests for your changes + +#### Recommended VS Code Settings + +Install the `rust-analyzer` extension and use these settings for the best development experience: + +```json +{ + "rust-analyzer.rustfmt.overrideCommand": ["rustfmt", "+nightly"], + "rust-analyzer.check.overrideCommand": [ + "cargo", + "clippy", + "--workspace", + "--message-format=json", + "--all-targets", + "--all-features" + ] +} +``` + +### 3. Testing Your Changes + +Reth uses comprehensive testing at multiple levels: + +#### Unit Tests + +Test specific functions and components: + +```bash +cargo test --package reth-ethereum-consensus +``` + +#### Integration Tests + +Test component interactions: + +```bash +cargo test --test integration_tests +``` + +#### Full Test Suite + +Run all tests including Ethereum Foundation tests: + +```bash +make test +``` + +#### Validation Suite + +Before submitting, always run: + +```bash +make pr +``` + +This runs: + +- Code formatting checks +- Clippy linting +- Documentation generation +- Full test suite + +### 4. Submitting Your PR + +#### Draft PRs for Large Features + +For substantial changes, open a draft PR early to get feedback on the approach. + +#### PR Requirements + +- [ ] Clear, descriptive title and description +- [ ] Tests for new functionality +- [ ] Documentation updates if needed +- [ ] All CI checks passing +- [ ] Commit messages follow conventional format + +#### Commit Message Format + +``` +type: brief description + +More detailed explanation if needed. + +- feat: new feature +- fix: bug fix +- docs: documentation changes +- refactor: code refactoring +- test: adding tests +- chore: maintenance tasks +``` + +### 5. Review Process + +#### Who Can Review + +Any community member can review PRs. We encourage participation from all skill levels. + +#### What Reviewers Look For + +- **Does the change improve Reth?** +- **Are there clear bugs or issues?** +- **Are commit messages clear and descriptive?** +- **Is the code well-tested?** +- **Is documentation updated appropriately?** + +#### Review Guidelines + +- Be constructive and respectful +- Provide specific, actionable feedback +- Focus on significant issues first +- Acknowledge good work and improvements + +## Releases: How Reth is Released + +### Release Schedule + +- **Regular releases**: Following semantic versioning +- **Security releases**: As needed for critical vulnerabilities +- **Pre-releases**: For testing major changes + +### Release Process + +1. **Version bump**: Update version numbers across crates +2. **Changelog**: Update `CHANGELOG.md` with notable changes +3. **Testing**: Final validation on testnet and mainnet +4. **Tagging**: Create release tags and GitHub releases +5. **Distribution**: Update package registries and Docker images + +### Release Criteria + +- All CI checks passing +- No known critical bugs +- Documentation up to date +- Backwards compatibility considerations addressed + +## Ways to Contribute + +### 💡 Feature Requests + +For feature requests, please include: + +- **Detailed explanation**: What should the feature do? +- **Context and motivation**: Why is this feature needed? +- **Examples**: How would it be used? +- **Similar tools**: References to similar functionality elsewhere + +### 📝 Documentation + +Documentation improvements are always welcome: + +- Add missing documentation +- Improve code examples +- Create tutorials or guides + +### 🔧 Code Contributions + +Contributing code changes: + +- Fix bugs identified in issues +- Implement requested features +- Improve performance +- Refactor for better maintainability + +## Code of Conduct + +Reth follows the [Rust Code of Conduct](https://www.rust-lang.org/conduct.html). We are committed to providing a welcoming and inclusive environment for all contributors. + +### Our Standards + +- Be respectful and constructive +- Focus on what's best for the community +- Show empathy towards other contributors +- Accept constructive criticism gracefully + +### Reporting Issues + +If you experience or witness behavior that violates our code of conduct, please report it to [georgios@paradigm.xyz](mailto:georgios@paradigm.xyz). + +:::note +Also read [CONTRIBUTING.md](https://github.com/paradigmxyz/reth/blob/main/CONTRIBUTING.md) for in-depth guidelines. +::: diff --git a/docs/vocs/docs/pages/introduction/why-reth.mdx b/docs/vocs/docs/pages/introduction/why-reth.mdx new file mode 100644 index 00000000000..f140c0e3128 --- /dev/null +++ b/docs/vocs/docs/pages/introduction/why-reth.mdx @@ -0,0 +1,50 @@ +--- +description: Why Reth is the future of Ethereum infrastructure - powering everything from production staking to cutting-edge L2s and ZK applications. +--- + +# Why Reth? + +Reth is more than just another Ethereum client—it's the foundation upon which the next generation of blockchain infrastructure is being built. From powering production staking environments at institutions like Coinbase to enabling cutting-edge L2 sequencers and ZK applications, Reth represents the convergence of security, performance, and extensibility that the ecosystem demands. + +Every piece of crypto infrastructure will be touching Reth one way or another. Here's why the world's leading developers and institutions are choosing Reth as their node of choice. + +## Institutional-Grade Security + +Reth secures real value on Ethereum mainnet today, trusted by institutions like [Coinbase](https://x.com/CoinbasePltfrm/status/1933546893742579890) for production staking infrastructure. It powers RPC providers such as Alchemy. + +## Future Proof Performance + +Reth pushes the performance frontier across every dimension, from L2 sequencers to MEV block building. + +- **L2 Sequencer Performance**: Used by [Base](https://www.base.org/), other production L2s and also rollup-as-a-service providers such as [Conduit](https://conduit.xyz) which require high throughput and fast block times. +- **MEV & Block Building**: [rbuilder](https://github.com/flashbots/rbuilder) is an open-source implementation of a block builder built on Reth due to developer friendless and blazing fast performance. + +## Infinitely Customizable + +Reth's modular architecture means you are not locked into someone else's design decisions—build exactly the chain you need. + +- **Component-Based Design**: Swap out consensus, execution, mempool, or networking modules independently +- **Custom Transaction Types**: Build specialized DeFi chains, and unique economic models +- **Rapid Development**: Reth SDK accelerates custom blockchain development with pre-built components + +## ZK & Stateless Ready + +Reth is designed from the ground up to excel in the zero-knowledge future with stateless execution and modular architecture. + +[SP1](https://github.com/succinctlabs/sp1), a zkVM for proving arbitrary Rust programs, and [Ress](https://www.paradigm.xyz/2025/03/stateless-reth-nodes), an experimental stateless node, demonstrate how Reth enables scalable zero-knowledge applications for Ethereum. + +## Thriving Open Source Ecosystem + +The most important factor in Reth's success is our vibrant open source community building the future together. + +500+ geo-distributed developers from leading companies and academia have played a role to build Reth into what it is today. + +## Join the community + +Reth isn't just a tool—it's a movement toward better blockchain infrastructure. Whether you're running a validator, building the next generation of L2s, or creating cutting-edge ZK applications, Reth provides the foundation you need to succeed. + +**Ready to build the future?** + +- [Get Started](/run/ethereum) with running your first Reth node +- [Explore the SDK](/sdk/overview) to build custom blockchain infrastructure +- [Join the Community](https://github.com/paradigmxyz/reth) and contribute to the future of Ethereum diff --git a/book/jsonrpc/admin.md b/docs/vocs/docs/pages/jsonrpc/admin.mdx similarity index 79% rename from book/jsonrpc/admin.md rename to docs/vocs/docs/pages/jsonrpc/admin.mdx index b85cd194b6d..cf1ef29c05b 100644 --- a/book/jsonrpc/admin.md +++ b/docs/vocs/docs/pages/jsonrpc/admin.mdx @@ -1,10 +1,13 @@ +--- +description: Admin API for node configuration and peer management. +--- # `admin` Namespace The `admin` API allows you to configure your node, including adding and removing peers. -> **Note** -> -> As this namespace can configure your node at runtime, it is generally **not advised** to expose it publicly. +:::note +As this namespace can configure your node at runtime, it is generally **not advised** to expose it publicly. +::: ## `admin_addPeer` @@ -13,7 +16,7 @@ Add the given peer to the current peer set of the node. The method accepts a single argument, the [`enode`][enode] URL of the remote peer to connect to, and returns a `bool` indicating whether the peer was accepted or not. | Client | Method invocation | -|--------|------------------------------------------------| +| ------ | ---------------------------------------------- | | RPC | `{"method": "admin_addPeer", "params": [url]}` | ### Example @@ -27,9 +30,9 @@ The method accepts a single argument, the [`enode`][enode] URL of the remote pee Disconnects from a peer if the connection exists. Returns a `bool` indicating whether the peer was successfully removed or not. -| Client | Method invocation | -|--------|----------------------------------------------------| -| RPC | `{"method": "admin_removePeer", "params": [url]}` | +| Client | Method invocation | +| ------ | ------------------------------------------------- | +| RPC | `{"method": "admin_removePeer", "params": [url]}` | ### Example @@ -45,7 +48,7 @@ Adds the given peer to a list of trusted peers, which allows the peer to always It returns a `bool` indicating whether the peer was added to the list or not. | Client | Method invocation | -|--------|-------------------------------------------------------| +| ------ | ----------------------------------------------------- | | RPC | `{"method": "admin_addTrustedPeer", "params": [url]}` | ### Example @@ -62,7 +65,7 @@ Removes a remote node from the trusted peer set, but it does not disconnect it a Returns true if the peer was successfully removed. | Client | Method invocation | -|--------|----------------------------------------------------------| +| ------ | -------------------------------------------------------- | | RPC | `{"method": "admin_removeTrustedPeer", "params": [url]}` | ### Example @@ -79,7 +82,7 @@ Returns all information known about the running node. These include general information about the node itself, as well as what protocols it participates in, its IP and ports. | Client | Method invocation | -|--------|--------------------------------| +| ------ | ------------------------------ | | RPC | `{"method": "admin_nodeInfo"}` | ### Example @@ -121,9 +124,9 @@ Like other subscription methods, this returns the ID of the subscription, which To unsubscribe from peer events, call `admin_peerEvents_unsubscribe` with the subscription ID. -| Client | Method invocation | -|--------|-------------------------------------------------------| -| RPC | `{"method": "admin_peerEvents", "params": []}` | +| Client | Method invocation | +| ------ | ------------------------------------------------------------ | +| RPC | `{"method": "admin_peerEvents", "params": []}` | | RPC | `{"method": "admin_peerEvents_unsubscribe", "params": [id]}` | ### Event Types @@ -132,20 +135,20 @@ The subscription emits events with the following structure: ```json { - "jsonrpc": "2.0", - "method": "admin_subscription", - "params": { - "subscription": "0xcd0c3e8af590364c09d0fa6a1210faf5", - "result": { - "type": "add", // or "drop", "error" - "peer": { - "id": "44826a5d6a55f88a18298bca4773fca5749cdc3a5c9f308aa7d810e9b31123f3e7c5fba0b1d70aac5308426f47df2a128a6747040a3815cc7dd7167d03be320d", - "enode": "enode://44826a5d6a55f88a18298bca4773fca5749cdc3a5c9f308aa7d810e9b31123f3e7c5fba0b1d70aac5308426f47df2a128a6747040a3815cc7dd7167d03be320d@192.168.1.1:30303", - "addr": "192.168.1.1:30303" - }, - "error": "reason for disconnect or error" // only present for "drop" and "error" events + "jsonrpc": "2.0", + "method": "admin_subscription", + "params": { + "subscription": "0xcd0c3e8af590364c09d0fa6a1210faf5", + "result": { + "type": "add", // or "drop", "error" + "peer": { + "id": "44826a5d6a55f88a18298bca4773fca5749cdc3a5c9f308aa7d810e9b31123f3e7c5fba0b1d70aac5308426f47df2a128a6747040a3815cc7dd7167d03be320d", + "enode": "enode://44826a5d6a55f88a18298bca4773fca5749cdc3a5c9f308aa7d810e9b31123f3e7c5fba0b1d70aac5308426f47df2a128a6747040a3815cc7dd7167d03be320d@192.168.1.1:30303", + "addr": "192.168.1.1:30303" + }, + "error": "reason for disconnect or error" // only present for "drop" and "error" events + } } - } } ``` diff --git a/book/jsonrpc/debug.md b/docs/vocs/docs/pages/jsonrpc/debug.mdx similarity index 80% rename from book/jsonrpc/debug.md rename to docs/vocs/docs/pages/jsonrpc/debug.mdx index 7965e2e0d50..aa3a47685c6 100644 --- a/book/jsonrpc/debug.md +++ b/docs/vocs/docs/pages/jsonrpc/debug.mdx @@ -1,3 +1,6 @@ +--- +description: Debug API for inspecting Ethereum state and traces. +--- # `debug` Namespace The `debug` API provides several methods to inspect the Ethereum state, including Geth-style traces. @@ -7,7 +10,7 @@ The `debug` API provides several methods to inspect the Ethereum state, includin Returns an RLP-encoded header. | Client | Method invocation | -|--------|-------------------------------------------------------| +| ------ | ----------------------------------------------------- | | RPC | `{"method": "debug_getRawHeader", "params": [block]}` | ## `debug_getRawBlock` @@ -15,7 +18,7 @@ Returns an RLP-encoded header. Retrieves and returns the RLP encoded block by number, hash or tag. | Client | Method invocation | -|--------|------------------------------------------------------| +| ------ | ---------------------------------------------------- | | RPC | `{"method": "debug_getRawBlock", "params": [block]}` | ## `debug_getRawTransaction` @@ -23,7 +26,7 @@ Retrieves and returns the RLP encoded block by number, hash or tag. Returns an EIP-2718 binary-encoded transaction. | Client | Method invocation | -|--------|--------------------------------------------------------------| +| ------ | ------------------------------------------------------------ | | RPC | `{"method": "debug_getRawTransaction", "params": [tx_hash]}` | ## `debug_getRawReceipts` @@ -31,7 +34,7 @@ Returns an EIP-2718 binary-encoded transaction. Returns an array of EIP-2718 binary-encoded receipts. | Client | Method invocation | -|--------|---------------------------------------------------------| +| ------ | ------------------------------------------------------- | | RPC | `{"method": "debug_getRawReceipts", "params": [block]}` | ## `debug_getBadBlocks` @@ -39,7 +42,7 @@ Returns an array of EIP-2718 binary-encoded receipts. Returns an array of recent bad blocks that the client has seen on the network. | Client | Method invocation | -|--------|--------------------------------------------------| +| ------ | ------------------------------------------------ | | RPC | `{"method": "debug_getBadBlocks", "params": []}` | ## `debug_traceChain` @@ -47,7 +50,7 @@ Returns an array of recent bad blocks that the client has seen on the network. Returns the structured logs created during the execution of EVM between two blocks (excluding start) as a JSON object. | Client | Method invocation | -|--------|----------------------------------------------------------------------| +| ------ | -------------------------------------------------------------------- | | RPC | `{"method": "debug_traceChain", "params": [start_block, end_block]}` | ## `debug_traceBlock` @@ -57,11 +60,11 @@ The `debug_traceBlock` method will return a full stack trace of all invoked opco This expects an RLP-encoded block. > **Note** -> +> > The parent of this block must be present, or it will fail. | Client | Method invocation | -|--------|---------------------------------------------------------| +| ------ | ------------------------------------------------------- | | RPC | `{"method": "debug_traceBlock", "params": [rlp, opts]}` | ## `debug_traceBlockByHash` @@ -69,7 +72,7 @@ This expects an RLP-encoded block. Similar to [`debug_traceBlock`](#debug_traceblock), `debug_traceBlockByHash` accepts a block hash and will replay the block that is already present in the database. | Client | Method invocation | -|--------|----------------------------------------------------------------------| +| ------ | -------------------------------------------------------------------- | | RPC | `{"method": "debug_traceBlockByHash", "params": [block_hash, opts]}` | ## `debug_traceBlockByNumber` @@ -77,15 +80,15 @@ Similar to [`debug_traceBlock`](#debug_traceblock), `debug_traceBlockByHash` acc Similar to [`debug_traceBlockByHash`](#debug_traceblockbyhash), `debug_traceBlockByNumber` accepts a block number and will replay the block that is already present in the database. | Client | Method invocation | -|--------|--------------------------------------------------------------------------| +| ------ | ------------------------------------------------------------------------ | | RPC | `{"method": "debug_traceBlockByNumber", "params": [block_number, opts]}` | ## `debug_traceTransaction` The `debug_traceTransaction` debugging method will attempt to run the transaction in the exact same manner as it was executed on the network. It will replay any transaction that may have been executed prior to this one before it will finally attempt to execute the transaction that corresponds to the given hash. -| Client | Method invocation | -|--------|-------------------------------------------------------------| +| Client | Method invocation | +| ------ | ----------------------------------------------------------------- | | RPC | `{"method": "debug_traceTransaction", "params": [tx_hash, opts]}` | ## `debug_traceCall` @@ -97,5 +100,5 @@ The first argument (just as in `eth_call`) is a transaction request. The block can optionally be specified either by hash or by number as the second argument. | Client | Method invocation | -|--------|-----------------------------------------------------------------------| +| ------ | --------------------------------------------------------------------- | | RPC | `{"method": "debug_traceCall", "params": [call, block_number, opts]}` | diff --git a/book/jsonrpc/eth.md b/docs/vocs/docs/pages/jsonrpc/eth.mdx similarity index 72% rename from book/jsonrpc/eth.md rename to docs/vocs/docs/pages/jsonrpc/eth.mdx index 0a3003c4052..052beb4c7b9 100644 --- a/book/jsonrpc/eth.md +++ b/docs/vocs/docs/pages/jsonrpc/eth.mdx @@ -1,3 +1,7 @@ +--- +description: Standard Ethereum JSON-RPC API methods. +--- + # `eth` Namespace Documentation for the API methods in the `eth` namespace can be found on [ethereum.org](https://ethereum.org/en/developers/docs/apis/json-rpc/). diff --git a/book/jsonrpc/intro.md b/docs/vocs/docs/pages/jsonrpc/intro.mdx similarity index 69% rename from book/jsonrpc/intro.md rename to docs/vocs/docs/pages/jsonrpc/intro.mdx index 6f9b894988d..93cccf46921 100644 --- a/book/jsonrpc/intro.md +++ b/docs/vocs/docs/pages/jsonrpc/intro.mdx @@ -1,3 +1,7 @@ +--- +description: Overview of Reth's JSON-RPC API and namespaces. +--- + # JSON-RPC You can interact with Reth over JSON-RPC. Reth supports all standard Ethereum JSON-RPC API methods. @@ -12,22 +16,21 @@ Each namespace must be explicitly enabled. The methods are grouped into namespaces, which are listed below: -| Namespace | Description | Sensitive | -|-------------------------|--------------------------------------------------------------------------------------------------------|-----------| -| [`eth`](./eth.md) | The `eth` API allows you to interact with Ethereum. | Maybe | -| [`web3`](./web3.md) | The `web3` API provides utility functions for the web3 client. | No | -| [`net`](./net.md) | The `net` API provides access to network information of the node. | No | -| [`txpool`](./txpool.md) | The `txpool` API allows you to inspect the transaction pool. | No | -| [`debug`](./debug.md) | The `debug` API provides several methods to inspect the Ethereum state, including Geth-style traces. | No | -| [`trace`](./trace.md) | The `trace` API provides several methods to inspect the Ethereum state, including Parity-style traces. | No | -| [`admin`](./admin.md) | The `admin` API allows you to configure your node. | **Yes** | -| [`rpc`](./rpc.md) | The `rpc` API provides information about the RPC server and its modules. | No | +| Namespace | Description | Sensitive | +| -------------------- | ------------------------------------------------------------------------------------------------------ | --------- | +| [`eth`](/jsonrpc/eth) | The `eth` API allows you to interact with Ethereum. | Maybe | +| [`web3`](/jsonrpc/web3) | The `web3` API provides utility functions for the web3 client. | No | +| [`net`](/jsonrpc/net) | The `net` API provides access to network information of the node. | No | +| [`txpool`](/jsonrpc/txpool) | The `txpool` API allows you to inspect the transaction pool. | No | +| [`debug`](/jsonrpc/debug) | The `debug` API provides several methods to inspect the Ethereum state, including Geth-style traces. | No | +| [`trace`](/jsonrpc/trace) | The `trace` API provides several methods to inspect the Ethereum state, including Parity-style traces. | No | +| [`admin`](/jsonrpc/admin) | The `admin` API allows you to configure your node. | **Yes** | +| [`rpc`](/jsonrpc/rpc) | The `rpc` API provides information about the RPC server and its modules. | No | Note that some APIs are sensitive, since they can be used to configure your node (`admin`), or access accounts stored on the node (`eth`). Generally, it is advisable to not expose any JSONRPC namespace publicly, unless you know what you are doing. - ## Transports Reth supports HTTP, WebSockets and IPC. @@ -90,10 +93,10 @@ Because WebSockets are bidirectional, nodes can push events to clients, which en The configuration of the WebSocket server follows the same pattern as the HTTP server: -- Enable it using `--ws` -- Configure the server address by passing `--ws.addr` and `--ws.port` (default `8546`) -- Configure cross-origin requests using `--ws.origins` -- Enable APIs using `--ws.api` +- Enable it using `--ws` +- Configure the server address by passing `--ws.addr` and `--ws.port` (default `8546`) +- Configure cross-origin requests using `--ws.origins` +- Enable APIs using `--ws.api` ### IPC diff --git a/book/jsonrpc/net.md b/docs/vocs/docs/pages/jsonrpc/net.mdx similarity index 82% rename from book/jsonrpc/net.md rename to docs/vocs/docs/pages/jsonrpc/net.mdx index ac40c75b2ab..145b9c27676 100644 --- a/book/jsonrpc/net.md +++ b/docs/vocs/docs/pages/jsonrpc/net.mdx @@ -1,3 +1,7 @@ +--- +description: net_ namespace for Ethereum nodes. +--- + # `net` Namespace The `net` API provides information about the networking component of the node. @@ -7,7 +11,7 @@ The `net` API provides information about the networking component of the node. Returns a `bool` indicating whether or not the node is listening for network connections. | Client | Method invocation | -|--------|---------------------------------------------| +| ------ | ------------------------------------------- | | RPC | `{"method": "net_listening", "params": []}` | ### Example @@ -22,7 +26,7 @@ Returns a `bool` indicating whether or not the node is listening for network con Returns the number of peers connected to the node. | Client | Method invocation | -|--------|---------------------------------------------| +| ------ | ------------------------------------------- | | RPC | `{"method": "net_peerCount", "params": []}` | ### Example @@ -37,7 +41,7 @@ Returns the number of peers connected to the node. Returns the network ID (e.g. 1 for mainnet) | Client | Method invocation | -|--------|-------------------------------------------| +| ------ | ----------------------------------------- | | RPC | `{"method": "net_version", "params": []}` | ### Example @@ -45,4 +49,4 @@ Returns the network ID (e.g. 1 for mainnet) ```js // > {"jsonrpc":"2.0","id":1,"method":"net_version","params":[]} {"jsonrpc":"2.0","id":1,"result":1} -``` \ No newline at end of file +``` diff --git a/book/jsonrpc/rpc.md b/docs/vocs/docs/pages/jsonrpc/rpc.mdx similarity index 91% rename from book/jsonrpc/rpc.md rename to docs/vocs/docs/pages/jsonrpc/rpc.mdx index 0a4739718be..c85babcfe3c 100644 --- a/book/jsonrpc/rpc.md +++ b/docs/vocs/docs/pages/jsonrpc/rpc.mdx @@ -1,3 +1,7 @@ +--- +description: rpc_ namespace for retrieving server information such as enabled namespaces +--- + # `rpc` Namespace The `rpc` API provides methods to get information about the RPC server itself, such as the enabled namespaces. @@ -7,7 +11,7 @@ The `rpc` API provides methods to get information about the RPC server itself, s Lists the enabled RPC namespaces and the versions of each. | Client | Method invocation | -|--------|-------------------------------------------| +| ------ | ----------------------------------------- | | RPC | `{"method": "rpc_modules", "params": []}` | ### Example diff --git a/book/jsonrpc/trace.md b/docs/vocs/docs/pages/jsonrpc/trace.mdx similarity index 86% rename from book/jsonrpc/trace.md rename to docs/vocs/docs/pages/jsonrpc/trace.mdx index ba0f2490b57..464832db70e 100644 --- a/book/jsonrpc/trace.md +++ b/docs/vocs/docs/pages/jsonrpc/trace.mdx @@ -1,33 +1,37 @@ +--- +description: Trace API for inspecting Ethereum state and transactions. +--- + # `trace` Namespace - +{/* TODO: We should probably document the format of the traces themselves, OE does not do that */} The `trace` API provides several methods to inspect the Ethereum state, including Parity-style traces. -A similar module exists (with other debug functions) with Geth-style traces ([`debug`](./debug.md)). +A similar module exists (with other debug functions) with Geth-style traces ([`debug`](/jsonrpc/debug)). The `trace` API gives deeper insight into transaction processing. There are two types of methods in this API: -- **Ad-hoc tracing APIs** for performing diagnostics on calls or transactions (historical or hypothetical). -- **Transaction-trace filtering APIs** for getting full externality traces on any transaction executed by reth. +- **Ad-hoc tracing APIs** for performing diagnostics on calls or transactions (historical or hypothetical). +- **Transaction-trace filtering APIs** for getting full externality traces on any transaction executed by reth. ## Ad-hoc tracing APIs Ad-hoc tracing APIs allow you to perform diagnostics on calls or transactions (historical or hypothetical), including: -- Transaction traces (`trace`) -- VM traces (`vmTrace`) -- State difference traces (`stateDiff`) +- Transaction traces (`trace`) +- VM traces (`vmTrace`) +- State difference traces (`stateDiff`) The ad-hoc tracing APIs are: -- [`trace_call`](#trace_call) -- [`trace_callMany`](#trace_callmany) -- [`trace_rawTransaction`](#trace_rawtransaction) -- [`trace_replayBlockTransactions`](#trace_replayblocktransactions) -- [`trace_replayTransaction`](#trace_replaytransaction) +- [`trace_call`](#trace_call) +- [`trace_callMany`](#trace_callmany) +- [`trace_rawTransaction`](#trace_rawtransaction) +- [`trace_replayBlockTransactions`](#trace_replayblocktransactions) +- [`trace_replayTransaction`](#trace_replaytransaction) ## Transaction-trace filtering APIs @@ -37,10 +41,10 @@ Information returned includes the execution of all contract creations, destructi The transaction trace filtering APIs are: -- [`trace_block`](#trace_block) -- [`trace_filter`](#trace_filter) -- [`trace_get`](#trace_get) -- [`trace_transaction`](#trace_transaction) +- [`trace_block`](#trace_block) +- [`trace_filter`](#trace_filter) +- [`trace_get`](#trace_get) +- [`trace_transaction`](#trace_transaction) ## `trace_call` @@ -53,7 +57,7 @@ The second parameter is an array of one or more trace types (`vmTrace`, `trace`, The third and optional parameter is a block number, block hash, or a block tag (`latest`, `finalized`, `safe`, `earliest`, `pending`). | Client | Method invocation | -|--------|-----------------------------------------------------------| +| ------ | --------------------------------------------------------- | | RPC | `{"method": "trace_call", "params": [tx, type[], block]}` | ### Example @@ -90,7 +94,7 @@ The first parameter is a list of call traces, where each call trace is of the fo The second and optional parameter is a block number, block hash, or a block tag (`latest`, `finalized`, `safe`, `earliest`, `pending`). | Client | Method invocation | -|--------|--------------------------------------------------------| +| ------ | ------------------------------------------------------ | | RPC | `{"method": "trace_call", "params": [trace[], block]}` | ### Example @@ -154,7 +158,7 @@ The second and optional parameter is a block number, block hash, or a block tag Traces a call to `eth_sendRawTransaction` without making the call, returning the traces. | Client | Method invocation | -|--------|--------------------------------------------------------| +| ------ | ------------------------------------------------------ | | RPC | `{"method": "trace_call", "params": [raw_tx, type[]]}` | ### Example @@ -187,7 +191,7 @@ Traces a call to `eth_sendRawTransaction` without making the call, returning the Replays all transactions in a block returning the requested traces for each transaction. | Client | Method invocation | -|--------|--------------------------------------------------------------------------| +| ------ | ------------------------------------------------------------------------ | | RPC | `{"method": "trace_replayBlockTransactions", "params": [block, type[]]}` | ### Example @@ -224,7 +228,7 @@ Replays all transactions in a block returning the requested traces for each tran Replays a transaction, returning the traces. | Client | Method invocation | -|--------|----------------------------------------------------------------------| +| ------ | -------------------------------------------------------------------- | | RPC | `{"method": "trace_replayTransaction", "params": [tx_hash, type[]]}` | ### Example @@ -257,7 +261,7 @@ Replays a transaction, returning the traces. Returns traces created at given block. | Client | Method invocation | -|--------|------------------------------------------------| +| ------ | ---------------------------------------------- | | RPC | `{"method": "trace_block", "params": [block]}` | ### Example @@ -300,17 +304,17 @@ Returns traces matching given filter. Filters are objects with the following properties: -- `fromBlock`: Returns traces from the given block (a number, hash, or a tag like `latest`). -- `toBlock`: Returns traces to the given block. -- `fromAddress`: Sent from these addresses -- `toAddress`: Sent to these addresses -- `after`: The offset trace number -- `count`: The number of traces to display in a batch +- `fromBlock`: Returns traces from the given block (a number, hash, or a tag like `latest`). +- `toBlock`: Returns traces to the given block. +- `fromAddress`: Sent from these addresses +- `toAddress`: Sent to these addresses +- `after`: The offset trace number +- `count`: The number of traces to display in a batch All properties are optional. | Client | Method invocation | -|--------|--------------------------------------------------| +| ------ | ------------------------------------------------ | | RPC | `{"method": "trace_filter", "params": [filter]}` | ### Example @@ -352,7 +356,7 @@ All properties are optional. Returns trace at given position. | Client | Method invocation | -|--------|----------------------------------------------------------| +| ------ | -------------------------------------------------------- | | RPC | `{"method": "trace_get", "params": [tx_hash,indices[]]}` | ### Example @@ -393,7 +397,7 @@ Returns trace at given position. Returns all traces of given transaction | Client | Method invocation | -|--------|--------------------------------------------------------| +| ------ | ------------------------------------------------------ | | RPC | `{"method": "trace_transaction", "params": [tx_hash]}` | ### Example @@ -430,4 +434,4 @@ Returns all traces of given transaction ... ] } -``` \ No newline at end of file +``` diff --git a/book/jsonrpc/txpool.md b/docs/vocs/docs/pages/jsonrpc/txpool.mdx similarity index 81% rename from book/jsonrpc/txpool.md rename to docs/vocs/docs/pages/jsonrpc/txpool.mdx index cb9e9c0e69d..57f89c643c6 100644 --- a/book/jsonrpc/txpool.md +++ b/docs/vocs/docs/pages/jsonrpc/txpool.mdx @@ -1,3 +1,7 @@ +--- +description: API for inspecting the transaction pool. +--- + # `txpool` Namespace The `txpool` API allows you to inspect the transaction pool. @@ -9,7 +13,7 @@ Returns the details of all transactions currently pending for inclusion in the n See [here](https://geth.ethereum.org/docs/rpc/ns-txpool#txpool-content) for more details | Client | Method invocation | -|--------|----------------------------------------------| +| ------ | -------------------------------------------- | | RPC | `{"method": "txpool_content", "params": []}` | ## `txpool_contentFrom` @@ -19,7 +23,7 @@ Retrieves the transactions contained within the txpool, returning pending as wel See [here](https://geth.ethereum.org/docs/rpc/ns-txpool#txpool-contentfrom) for more details | Client | Method invocation | -|--------|---------------------------------------------------------| +| ------ | ------------------------------------------------------- | | RPC | `{"method": "txpool_contentFrom", "params": [address]}` | ## `txpool_inspect` @@ -29,7 +33,7 @@ Returns a summary of all the transactions currently pending for inclusion in the See [here](https://geth.ethereum.org/docs/rpc/ns-txpool#txpool-inspect) for more details | Client | Method invocation | -|--------|----------------------------------------------| +| ------ | -------------------------------------------- | | RPC | `{"method": "txpool_inspect", "params": []}` | ## `txpool_status` @@ -39,5 +43,5 @@ Returns the number of transactions currently pending for inclusion in the next b See [here](https://geth.ethereum.org/docs/rpc/ns-txpool#txpool-status) for more details | Client | Method invocation | -|--------|---------------------------------------------| -| RPC | `{"method": "txpool_status", "params": []}` | \ No newline at end of file +| ------ | ------------------------------------------- | +| RPC | `{"method": "txpool_status", "params": []}` | diff --git a/book/jsonrpc/web3.md b/docs/vocs/docs/pages/jsonrpc/web3.mdx similarity index 83% rename from book/jsonrpc/web3.md rename to docs/vocs/docs/pages/jsonrpc/web3.mdx index 8221e5c2507..f1eb68bcafe 100644 --- a/book/jsonrpc/web3.md +++ b/docs/vocs/docs/pages/jsonrpc/web3.mdx @@ -1,3 +1,7 @@ +--- +description: Web3 API utility methods for Ethereum clients. +--- + # `web3` Namespace The `web3` API provides utility functions for the web3 client. @@ -6,9 +10,8 @@ The `web3` API provides utility functions for the web3 client. Get the web3 client version. - | Client | Method invocation | -|--------|------------------------------------| +| ------ | ---------------------------------- | | RPC | `{"method": "web3_clientVersion"}` | ### Example @@ -23,7 +26,7 @@ Get the web3 client version. Get the Keccak-256 hash of the given data. | Client | Method invocation | -|--------|----------------------------------------------| +| ------ | -------------------------------------------- | | RPC | `{"method": "web3_sha3", "params": [bytes]}` | ### Example @@ -36,4 +39,4 @@ Get the Keccak-256 hash of the given data. ```js // > {"jsonrpc":"2.0","id":1,"method":"web3_sha3","params":["0x7275737420697320617765736f6d65"]} {"jsonrpc":"2.0","id":1,"result":"0xe421b3428564a5c509ac118bad93a3b84485ec3f927e214b0c4c23076d4bc4e0"} -``` \ No newline at end of file +``` diff --git a/book/intro.md b/docs/vocs/docs/pages/overview.mdx similarity index 71% rename from book/intro.md rename to docs/vocs/docs/pages/overview.mdx index 6abd3da7acf..e467dacc03f 100644 --- a/book/intro.md +++ b/docs/vocs/docs/pages/overview.mdx @@ -1,15 +1,14 @@ -# Reth Book -_Documentation for Reth users and developers._ +--- +description: Reth - A secure, performant, and modular blockchain SDK and Ethereum node. +--- -[![Telegram Chat][tg-badge]][tg-url] +# Reth [Documentation for Reth users and developers] Reth (short for Rust Ethereum, [pronunciation](https://twitter.com/kelvinfichter/status/1597653609411268608)) is an **Ethereum full node implementation that is focused on being user-friendly, highly modular, as well as being fast and efficient.** Reth is production ready, and suitable for usage in mission-critical environments such as staking or high-uptime services. We also actively recommend professional node operators to switch to Reth in production for performance and cost reasons in use cases where high performance with great margins is required such as RPC, MEV, Indexing, Simulations, and P2P activities. - - - +![Reth](https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-prod.png) ## What is this about? @@ -60,8 +59,9 @@ We envision that Reth will be configurable enough for the tradeoffs that each te ## Who is this for? Reth is a new Ethereum full node that allows users to sync and interact with the entire blockchain, including its historical state if in archive mode. -- Full node: It can be used as a full node, which stores and processes the entire blockchain, validates blocks and transactions, and participates in the consensus process. -- Archive node: It can also be used as an archive node, which stores the entire history of the blockchain and is useful for applications that need access to historical data. + +- Full node: It can be used as a full node, which stores and processes the entire blockchain, validates blocks and transactions, and participates in the consensus process. +- Archive node: It can also be used as an archive node, which stores the entire history of the blockchain and is useful for applications that need access to historical data. As a data engineer/analyst, or as a data indexer, you'll want to use Archive mode. For all other use cases where historical access is not needed, you can use Full mode. @@ -79,22 +79,36 @@ We have completed an audit of the [Reth v1.0.0-rc.2](https://github.com/paradigm [Revm](https://github.com/bluealloy/revm) (the EVM used in Reth) underwent an audit with [Guido Vranken](https://twitter.com/guidovranken) (#1 [Ethereum Bug Bounty](https://ethereum.org/en/bug-bounty)). We will publish the results soon. +## Reth Metrics + +We operate several public Reth nodes across different networks. You can monitor their performance metrics through our public Grafana dashboards: + +| Name | Chain ID | Type | Grafana | +| -------- | -------- | ------- | ---------------------------------------------------------------------------------- | +| Ethereum | 1 | Full | [View](https://reth.ithaca.xyz/public-dashboards/23ceb3bd26594e349aaaf2bcf336d0d4) | +| Ethereum | 1 | Archive | [View](https://reth.ithaca.xyz/public-dashboards/a49fa110dc9149298fa6763d5c89c8c0) | +| Base | 8453 | Archive | [View](https://reth.ithaca.xyz/public-dashboards/b3e9f2e668ee4b86960b7fac691b5e64) | +| OP | 10 | Archive | [View](https://reth.ithaca.xyz/public-dashboards/aa32f6c39a664f9aa371399b59622527) | + +:::tip +Want to set up metrics for your own Reth node? Check out our [monitoring guide](/run/monitoring) to learn how to configure Prometheus metrics and build your own dashboards. +::: ## Sections Here are some useful sections to jump to: -- Install Reth by following the [guide](./installation/installation.md). -- Sync your node on any [official network](./run/run-a-node.md). -- View [statistics and metrics](./run/observability.md) about your node. -- Query the [JSON-RPC](./jsonrpc/intro.md) using Foundry's `cast` or `curl`. -- Set up your [development environment and contribute](./developers/contribute.md)! +- Install Reth by following the [guide](/installation/overview). +- Sync your node on any [official network](/run/overview). +- View [statistics and metrics](/run/monitoring) about your node. +- Query the [JSON-RPC](/jsonrpc/intro) using Foundry's `cast` or `curl`. +- Set up your [development environment and contribute](/introduction/contributing)! -> 📖 **About this book** -> -> The book is continuously rendered [here](https://paradigmxyz.github.io/reth/)! -> You can contribute to this book on [GitHub][gh-book]. +:::note +The documentation is continuously rendered [here](https://reth.rs)! +You can contribute to the docs on [GitHub][gh-docs]. +::: [tg-badge]: https://img.shields.io/endpoint?color=neon&logo=telegram&label=chat&url=https%3A%2F%2Ftg.sumanjay.workers.dev%2Fparadigm%5Freth [tg-url]: https://t.me/paradigm_reth -[gh-book]: https://github.com/paradigmxyz/reth/tree/main/book +[gh-docs]: https://github.com/paradigmxyz/reth/tree/main/book diff --git a/book/run/config.md b/docs/vocs/docs/pages/run/configuration.mdx similarity index 90% rename from book/run/config.md rename to docs/vocs/docs/pages/run/configuration.mdx index bb28d855de8..8f34cfc691f 100644 --- a/book/run/config.md +++ b/docs/vocs/docs/pages/run/configuration.mdx @@ -1,32 +1,36 @@ +--- +description: How to configure Reth using reth.toml and its options. +--- + # Configuring Reth Reth places a configuration file named `reth.toml` in the data directory specified when starting the node. It is written in the [TOML] format. The default data directory is platform dependent: -- Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` -- Windows: `{FOLDERID_RoamingAppData}/reth/` -- macOS: `$HOME/Library/Application Support/reth/` +- Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` +- Windows: `{FOLDERID_RoamingAppData}/reth/` +- macOS: `$HOME/Library/Application Support/reth/` The configuration file contains the following sections: -- [`[stages]`](#the-stages-section) -- Configuration of the individual sync stages - - [`headers`](#headers) - - [`bodies`](#bodies) - - [`sender_recovery`](#sender_recovery) - - [`execution`](#execution) - - [`account_hashing`](#account_hashing) - - [`storage_hashing`](#storage_hashing) - - [`merkle`](#merkle) - - [`transaction_lookup`](#transaction_lookup) - - [`index_account_history`](#index_account_history) - - [`index_storage_history`](#index_storage_history) -- [`[peers]`](#the-peers-section) - - [`connection_info`](#connection_info) - - [`reputation_weights`](#reputation_weights) - - [`backoff_durations`](#backoff_durations) -- [`[sessions]`](#the-sessions-section) -- [`[prune]`](#the-prune-section) +- [`[stages]`](#the-stages-section) -- Configuration of the individual sync stages + - [`headers`](#headers) + - [`bodies`](#bodies) + - [`sender_recovery`](#sender_recovery) + - [`execution`](#execution) + - [`account_hashing`](#account_hashing) + - [`storage_hashing`](#storage_hashing) + - [`merkle`](#merkle) + - [`transaction_lookup`](#transaction_lookup) + - [`index_account_history`](#index_account_history) + - [`index_storage_history`](#index_storage_history) +- [`[peers]`](#the-peers-section) + - [`connection_info`](#connection_info) + - [`reputation_weights`](#reputation_weights) + - [`backoff_durations`](#backoff_durations) +- [`[sessions]`](#the-sessions-section) +- [`[prune]`](#the-prune-section) ## The `[stages]` section @@ -305,8 +309,8 @@ The sessions section configures the internal behavior of a single peer-to-peer c You can configure the session buffer sizes, which limits the amount of pending events (incoming messages) and commands (outgoing messages) each session can hold before it will start to ignore messages. > **Note** -> -> These buffers are allocated *per peer*, which means that increasing the buffer sizes can have large impact on memory consumption. +> +> These buffers are allocated _per peer_, which means that increasing the buffer sizes can have large impact on memory consumption. ```toml [sessions] @@ -342,10 +346,11 @@ No pruning, run as archive node. ### Example of the custom pruning configuration This configuration will: -- Run pruning every 5 blocks -- Continuously prune all transaction senders, account history and storage history before the block `head-100_000`, -i.e. keep the data for the last `100_000` blocks -- Prune all receipts before the block 1920000, i.e. keep receipts from the block 1920000 + +- Run pruning every 5 blocks +- Continuously prune all transaction senders, account history and storage history before the block `head-100_000`, + i.e. keep the data for the last `100_000` blocks +- Prune all receipts before the block 1920000, i.e. keep receipts from the block 1920000 ```toml [prune] @@ -370,6 +375,7 @@ storage_history = { distance = 100_000 } # Prune all historical storage states b ``` We can also prune receipts more granular, using the logs filtering: + ```toml # Receipts pruning configuration by retaining only those receipts that contain logs emitted # by the specified addresses, discarding all others. This setting is overridden by `receipts`. diff --git a/book/run/mainnet.md b/docs/vocs/docs/pages/run/ethereum.mdx similarity index 73% rename from book/run/mainnet.md rename to docs/vocs/docs/pages/run/ethereum.mdx index c4908971f69..3c488416ec9 100644 --- a/book/run/mainnet.md +++ b/docs/vocs/docs/pages/run/ethereum.mdx @@ -1,3 +1,7 @@ +--- +description: How to run Reth on Ethereum mainnet and testnets. +--- + # Running Reth on Ethereum Mainnet or testnets Reth is an [_execution client_](https://ethereum.org/en/developers/docs/nodes-and-clients/#execution-clients). After Ethereum's transition to Proof of Stake (aka the Merge) it became required to run a [_consensus client_](https://ethereum.org/en/developers/docs/nodes-and-clients/#consensus-clients) along your execution client in order to sync into any "post-Merge" network. This is because the Ethereum execution layer now outsources consensus to a separate component, known as the consensus client. @@ -6,12 +10,12 @@ Consensus clients decide what blocks are part of the chain, while execution clie By running both an execution client like Reth and a consensus client, such as Lighthouse 🦀 (which we will assume for this guide), you can effectively contribute to the Ethereum network and participate in the consensus process, even if you don't intend to run validators. -| Client | Role | -|-------------|--------------------------------------------------| -| Execution | Validates transactions and blocks | -| | (checks their validity and global state) | -| Consensus | Determines which blocks are part of the chain | -| | (makes consensus decisions) | +| Client | Role | +| --------- | --------------------------------------------- | +| Execution | Validates transactions and blocks | +| | (checks their validity and global state) | +| Consensus | Determines which blocks are part of the chain | +| | (makes consensus decisions) | ## Running the Reth Node @@ -24,15 +28,22 @@ reth node ``` And to start the full node, run: + ```bash reth node --full ``` -On differences between archive and full nodes, see [Pruning & Full Node](./pruning.md#basic-concepts) section. +On differences between archive and full nodes, see [Pruning & Full Node](/run/faq/pruning#basic-concepts) section. + +:::note +These commands will not open any HTTP/WS ports by default. + +You can change this by adding the `--http`, `--ws` flags, respectively and using the `--http.api` and `--ws.api` flags to enable various [JSON-RPC APIs](/jsonrpc/intro). -> Note that these commands will not open any HTTP/WS ports by default. You can change this by adding the `--http`, `--ws` flags, respectively and using the `--http.api` and `--ws.api` flags to enable various [JSON-RPC APIs](../jsonrpc/intro.md). For more commands, see the [`reth node` CLI reference](../cli/reth/node.md). +For more commands, see the [`reth node` CLI reference](/cli/cli). +::: -The EL <> CL communication happens over the [Engine API](https://github.com/ethereum/execution-apis/blob/main/src/engine/common.md), which is by default exposed at `http://localhost:8551`. The connection is authenticated over JWT using a JWT secret which is auto-generated by Reth and placed in a file called `jwt.hex` in the data directory, which on Linux by default is `$HOME/.local/share/reth/` (`/Users//Library/Application Support/reth/mainnet/jwt.hex` in Mac). +The EL \<> CL communication happens over the [Engine API](https://github.com/ethereum/execution-apis/blob/main/src/engine/common), which is by default exposed at `http://localhost:8551`. The connection is authenticated over JWT using a JWT secret which is auto-generated by Reth and placed in a file called `jwt.hex` in the data directory, which on Linux by default is `$HOME/.local/share/reth/` (`/Users//Library/Application Support/reth/mainnet/jwt.hex` in Mac). You can override this path using the `--authrpc.jwtsecret` option. You MUST use the same JWT secret in BOTH Reth and the chosen Consensus Layer. If you want to override the address or port, you can use the `--authrpc.addr` and `--authrpc.port` options, respectively. @@ -62,24 +73,24 @@ lighthouse bn \ If you don't intend on running validators on your node you can add: -``` bash +```bash --disable-deposit-contract-sync ``` -The `--checkpoint-sync-url` argument value can be replaced with any checkpoint sync endpoint from a [community maintained list](https://eth-clients.github.io/checkpoint-sync-endpoints/#mainnet). +The `--checkpoint-sync-url` argument value can be replaced with any checkpoint sync endpoint from a [community maintained list](https://eth-clients.github.io/checkpoint-sync-endpoints/#mainnet). Your Reth node should start receiving "fork choice updated" messages, and begin syncing the chain. ## Verify the chain is growing You can easily verify that by inspecting the logs, and seeing that headers are arriving in Reth. Sit back now and wait for the stages to run! -In the meantime, consider setting up [observability](./observability.md) to monitor your node's health or [test the JSON RPC API](../jsonrpc/intro.md). +In the meantime, consider setting up [observability](/run/monitoring) to monitor your node's health or [test the JSON RPC API](/jsonrpc/intro). - +{/* TODO: Add more logs to help node operators debug any weird CL to EL messages! */} -[installation]: ./../installation/installation.md +[installation]: ./../installation/installation [docs]: https://github.com/paradigmxyz/reth/tree/main/docs -[metrics]: https://github.com/paradigmxyz/reth/blob/main/docs/design/metrics.md#current-metrics +[metrics]: https://github.com/paradigmxyz/reth/blob/main/docs/design/metrics#current-metrics ## Running without a Consensus Layer @@ -90,7 +101,8 @@ We provide a method for running Reth without a Consensus Layer via the `--debug. You can use `--debug.etherscan` to run Reth with a fake consensus client that advances the chain using recent blocks on Etherscan. This requires an Etherscan API key (set via `ETHERSCAN_API_KEY` environment variable). Optionally, specify a custom API URL with `--debug.etherscan `. Example: + ```bash export ETHERSCAN_API_KEY=your_api_key_here reth node --debug.etherscan -``` \ No newline at end of file +``` diff --git a/docs/vocs/docs/pages/run/ethereum/snapshots.mdx b/docs/vocs/docs/pages/run/ethereum/snapshots.mdx new file mode 100644 index 00000000000..116d4359e53 --- /dev/null +++ b/docs/vocs/docs/pages/run/ethereum/snapshots.mdx @@ -0,0 +1 @@ +# Snapshots \ No newline at end of file diff --git a/docs/vocs/docs/pages/run/faq.mdx b/docs/vocs/docs/pages/run/faq.mdx new file mode 100644 index 00000000000..bdd0a9f68e7 --- /dev/null +++ b/docs/vocs/docs/pages/run/faq.mdx @@ -0,0 +1,11 @@ +# FAQ + +1. [Transaction Types](/run/faq/transactions) - Learn about the transaction types supported by Reth. + +2. [Pruning & Full Node](/run/faq/pruning) - Understand the differences between archive nodes, full nodes, and pruned nodes. Learn how to configure pruning options and what RPC methods are available for each node type. + +3. [Ports](/run/faq/ports) - Information about the network ports used by Reth for P2P communication, JSON-RPC APIs, and the Engine API for consensus layer communication. + +4. [Profiling](/run/faq/profiling) - Performance profiling techniques and tools for analyzing Reth node performance, including CPU profiling, memory analysis, and bottleneck identification. + +5. [Sync OP Mainnet](/run/faq/sync-op-mainnet) - Detailed guide for syncing a Reth node with OP Mainnet, including specific configuration requirements and considerations for the Optimism ecosystem. diff --git a/docs/vocs/docs/pages/run/faq/ports.mdx b/docs/vocs/docs/pages/run/faq/ports.mdx new file mode 100644 index 00000000000..f9a3ba9950d --- /dev/null +++ b/docs/vocs/docs/pages/run/faq/ports.mdx @@ -0,0 +1,42 @@ +--- +description: Ports used by Reth. +--- + +# Ports + +This section provides essential information about the ports used by the system, their primary purposes, and recommendations for exposure settings. + +## Peering Ports + +- **Port:** `30303` +- **Protocol:** TCP and UDP +- **Purpose:** Peering with other nodes for synchronization of blockchain data. Nodes communicate through this port to maintain network consensus and share updated information. +- **Exposure Recommendation:** This port should be exposed to enable seamless interaction and synchronization with other nodes in the network. + +## Metrics Port + +- **Port:** `9001` +- **Protocol:** TCP +- **Purpose:** This port is designated for serving metrics related to the system's performance and operation. It allows internal monitoring and data collection for analysis. +- **Exposure Recommendation:** By default, this port should not be exposed to the public. It is intended for internal monitoring and analysis purposes. + +## HTTP RPC Port + +- **Port:** `8545` +- **Protocol:** TCP +- **Purpose:** Port 8545 provides an HTTP-based Remote Procedure Call (RPC) interface. It enables external applications to interact with the blockchain by sending requests over HTTP. +- **Exposure Recommendation:** Similar to the metrics port, exposing this port to the public is not recommended by default due to security considerations. + +## WS RPC Port + +- **Port:** `8546` +- **Protocol:** TCP +- **Purpose:** Port 8546 offers a WebSocket-based Remote Procedure Call (RPC) interface. It allows real-time communication between external applications and the blockchain. +- **Exposure Recommendation:** As with the HTTP RPC port, the WS RPC port should not be exposed by default for security reasons. + +## Engine API Port + +- **Port:** `8551` +- **Protocol:** TCP +- **Purpose:** Port 8551 facilitates communication between specific components, such as "reth" and "CL" (assuming their definitions are understood within the context of the system). It enables essential internal processes. +- **Exposure Recommendation:** This port is not meant to be exposed to the public by default. It should be reserved for internal communication between vital components of the system. diff --git a/book/developers/profiling.md b/docs/vocs/docs/pages/run/faq/profiling.mdx similarity index 84% rename from book/developers/profiling.md rename to docs/vocs/docs/pages/run/faq/profiling.mdx index fdae94e2d4a..123808ad2d3 100644 --- a/book/developers/profiling.md +++ b/docs/vocs/docs/pages/run/faq/profiling.mdx @@ -1,11 +1,8 @@ -# Profiling reth +--- +description: Profiling and debugging memory usage in Reth. +--- -#### Table of Contents - - [Memory profiling](#memory-profiling) - - [Jemalloc](#jemalloc) - - [Monitoring memory usage](#monitoring-memory-usage) - - [Limiting process memory](#limiting-process-memory) - - [Understanding allocation with jeprof](#understanding-allocation-with-jeprof) +# Profiling Reth ## Memory profiling @@ -16,10 +13,11 @@ Reth is also a complex program, with many moving pieces, and it can be difficult Understanding how to profile memory usage is an extremely valuable skill when faced with this type of problem, and can quickly help shed light on the root cause of a memory leak. In this tutorial, we will be reviewing: - * How to monitor reth's memory usage, - * How to emulate a low-memory environment to lab-reproduce OOM crashes, - * How to enable `jemalloc` and its built-in memory profiling, and - * How to use `jeprof` to interpret heap profiles and identify potential root causes for a memory leak. + +- How to monitor reth's memory usage, +- How to emulate a low-memory environment to lab-reproduce OOM crashes, +- How to enable `jemalloc` and its built-in memory profiling, and +- How to use `jeprof` to interpret heap profiles and identify potential root causes for a memory leak. ### Jemalloc @@ -27,21 +25,24 @@ In this tutorial, we will be reviewing: We've seen significant performance benefits in reth when using jemalloc, but will be primarily focusing on its profiling capabilities. Jemalloc also provides tools for analyzing and visualizing its allocation profiles it generates, notably `jeprof`. - #### Enabling jemalloc in reth + Reth includes a `jemalloc` feature to explicitly use jemalloc instead of the system allocator: + ``` cargo build --features jemalloc ``` While the `jemalloc` feature does enable jemalloc, reth has an additional feature, `profiling`, that must be used to enable heap profiling. This feature implicitly enables the `jemalloc` feature as well: + ``` cargo build --features jemalloc-prof ``` When performing a longer-running or performance-sensitive task with reth, such as a sync test or load benchmark, it's usually recommended to use the `maxperf` profile. However, the `maxperf` profile does not enable debug symbols, which are required for tools like `perf` and `jemalloc` to produce results that a human can interpret. Reth includes a performance profile with debug symbols called `profiling`. To compile reth with debug symbols, jemalloc, profiling, and a performance profile: + ``` cargo build --features jemalloc-prof --profile profiling @@ -51,19 +52,39 @@ RUSTFLAGS="-C target-cpu=native" cargo build --features jemalloc-prof --profile ### Monitoring memory usage -Reth's dashboard has a few metrics that are important when monitoring memory usage. The **Jemalloc memory** graph shows reth's memory usage. The *allocated* label shows the memory used by the reth process which cannot be reclaimed unless reth frees that memory. This metric exceeding the available system memory would cause reth to be killed by the OOM killer. -Jemalloc memory +Reth's dashboard has a few metrics that are important when monitoring memory usage. The **Jemalloc memory** graph shows reth's memory usage. The _allocated_ label shows the memory used by the reth process which cannot be reclaimed unless reth frees that memory. This metric exceeding the available system memory would cause reth to be killed by the OOM killer. + +Jemalloc memory Some of reth's internal components also have metrics for the memory usage of certain data structures, usually data structures that are likely to contain many elements or may consume a lot of memory at peak load. **The bodies downloader buffer**: -The bodies downloader buffer graph + +The bodies downloader buffer graph **The blockchain tree block buffer**: -The blockchain tree block buffer graph + +The blockchain tree block buffer graph **The transaction pool subpools**: -The transaction pool subpool size graph + +The transaction pool subpool size graph One of these metrics growing beyond, 2GB for example, is likely a bug and could lead to an OOM on a low memory machine. It isn't likely for that to happen frequently, so in the best case these metrics can be used to rule out these components from having a leak, if an OOM is occurring. @@ -81,28 +102,37 @@ See the [canonical documentation for cgroups](https://git.kernel.org/pub/scm/lin In order to use cgroups to limit process memory, sometimes it must be explicitly enabled as a kernel parameter. For example, the following line is sometimes necessary to enable cgroup memory limits on Ubuntu machines that use GRUB: + ``` GRUB_CMDLINE_LINUX_DEFAULT="cgroup_enable=memory" ``` + Then, create a named cgroup: + ``` sudo cgcreate -t $USER:$USER -a $USER:$USER -g memory:rethMemory ``` + The memory limit for the named cgroup can be set in `sys/fs/cgroup/memory`. This for example sets an 8 gigabyte memory limit: + ``` echo 8G > /sys/fs/cgroup/memory/rethMemory/memory.limit_in_bytes ``` + If the intention of setting up the cgroup is to strictly limit memory and simulate OOMs, a high amount of swap may prevent those OOMs from happening. To check swap, use `free -m`: + ``` ubuntu@bench-box:~/reth$ free -m total used free shared buff/cache available Mem: 257668 10695 218760 12 28213 244761 Swap: 8191 159 8032 ``` + If this is a problem, it may be worth either adjusting the system swappiness or disabling swap overall. Finally, `cgexec` can be used to run reth under the cgroup: + ``` cgexec -g memory:rethMemory reth node ``` @@ -111,11 +141,13 @@ cgexec -g memory:rethMemory reth node When reth is built with the `jemalloc-prof` feature and debug symbols, the profiling still needs to be configured and enabled at runtime. This is done with the `_RJEM_MALLOC_CONF` environment variable. Take the following command to launch reth with jemalloc profiling enabled: + ``` _RJEM_MALLOC_CONF=prof:true,lg_prof_interval:32,lg_prof_sample:19 reth node ``` If reth is not built properly, you will see this when you try to run reth: + ``` ~/p/reth (dan/managing-memory)> _RJEM_MALLOC_CONF=prof:true,lg_prof_interval:32,lg_prof_sample:19 reth node : Invalid conf pair: prof:true diff --git a/book/run/pruning.md b/docs/vocs/docs/pages/run/faq/pruning.mdx similarity index 92% rename from book/run/pruning.md rename to docs/vocs/docs/pages/run/faq/pruning.mdx index 25d11b4e46e..2a800b7bae8 100644 --- a/book/run/pruning.md +++ b/docs/vocs/docs/pages/run/faq/pruning.mdx @@ -1,8 +1,14 @@ +--- +description: Pruning and full node options in Reth. +--- + # Pruning & Full Node -> Pruning and full node are new features of Reth, -> and we will be happy to hear about your experience using them either -> on [GitHub](https://github.com/paradigmxyz/reth/issues) or in the [Telegram group](https://t.me/paradigm_reth). +:::info +Pruning and full node are new features of Reth, +and we will be happy to hear about your experience using them either +on [GitHub](https://github.com/paradigmxyz/reth/issues) or in the [Telegram group](https://t.me/paradigm_reth). +::: By default, Reth runs as an archive node. Such nodes have all historical blocks and the state at each of these blocks available for querying and tracing. @@ -12,31 +18,31 @@ the steps for running Reth as a full node, what caveats to expect and how to con ## Basic concepts -- Archive node – Reth node that has all historical data from genesis. -- Pruned node – Reth node that has its historical data pruned partially or fully through - a [custom configuration](./config.md#the-prune-section). -- Full Node – Reth node that has the latest state and historical data for only the last 10064 blocks available - for querying in the same way as an archive node. +- Archive node – Reth node that has all historical data from genesis. +- Pruned node – Reth node that has its historical data pruned partially or fully through + a [custom configuration](/run/configuration#the-prune-section). +- Full Node – Reth node that has the latest state and historical data for only the last 10064 blocks available + for querying in the same way as an archive node. -The node type that was chosen when first [running a node](./run-a-node.md) **cannot** be changed after +The node type that was chosen when first [running a node](/run/overview) **cannot** be changed after the initial sync. Turning Archive into Pruned, or Pruned into Full is not supported. ## Modes ### Archive Node -Default mode, follow the steps from the previous chapter on [how to run on mainnet or official testnets](./mainnet.md). +Default mode, follow the steps from the previous chapter on [how to run on mainnet or official testnets](/run/ethereum). ### Pruned Node -To run Reth as a pruned node configured through a [custom configuration](./config.md#the-prune-section), +To run Reth as a pruned node configured through a [custom configuration](/run/configuration#the-prune-section), modify the `reth.toml` file and run Reth in the same way as archive node by following the steps from -the previous chapter on [how to run on mainnet or official testnets](./mainnet.md). +the previous chapter on [how to run on mainnet or official testnets](/run/ethereum). ### Full Node To run Reth as a full node, follow the steps from the previous chapter on -[how to run on mainnet or official testnets](./mainnet.md), and add a `--full` flag. For example: +[how to run on mainnet or official testnets](/run/ethereum), and add a `--full` flag. For example: ```bash reth node \ @@ -95,21 +101,21 @@ storage_history = { distance = 10_064 } Meaning, it prunes: -- Account History and Storage History up to the last 10064 blocks -- All of Sender Recovery data. The caveat is that it's pruned gradually after the initial sync - is completed, so the disk space is reclaimed slowly. -- Receipts up to the last 10064 blocks, preserving all receipts with the logs from Beacon Deposit Contract +- Account History and Storage History up to the last 10064 blocks +- All of Sender Recovery data. The caveat is that it's pruned gradually after the initial sync + is completed, so the disk space is reclaimed slowly. +- Receipts up to the last 10064 blocks, preserving all receipts with the logs from Beacon Deposit Contract ## RPC support -As it was mentioned in the [pruning configuration chapter](./config.md#the-prune-section), there are several segments which can be pruned +As it was mentioned in the [pruning configuration chapter](/run/configuration#the-prune-section), there are several segments which can be pruned independently of each other: -- Sender Recovery -- Transaction Lookup -- Receipts -- Account History -- Storage History +- Sender Recovery +- Transaction Lookup +- Receipts +- Account History +- Storage History Pruning of each of these segments disables different RPC methods, because the historical data or lookup indexes become unavailable. @@ -215,8 +221,8 @@ The following tables describe RPC methods available in the full node. The following tables describe the requirements for prune segments, per RPC method: -- ✅ – if the segment is pruned, the RPC method still works -- ❌ - if the segment is pruned, the RPC method doesn't work anymore +- ✅ – if the segment is pruned, the RPC method still works +- ❌ - if the segment is pruned, the RPC method doesn't work anymore #### `debug` namespace diff --git a/book/run/sync-op-mainnet.md b/docs/vocs/docs/pages/run/faq/sync-op-mainnet.mdx similarity index 70% rename from book/run/sync-op-mainnet.md rename to docs/vocs/docs/pages/run/faq/sync-op-mainnet.mdx index 0e2090acbcb..e895331288e 100644 --- a/book/run/sync-op-mainnet.md +++ b/docs/vocs/docs/pages/run/faq/sync-op-mainnet.mdx @@ -1,13 +1,17 @@ +--- +description: Syncing Reth with OP Mainnet and Bedrock state. +--- + # Sync OP Mainnet To sync OP mainnet, Bedrock state needs to be imported as a starting point. There are currently two ways: -* Minimal bootstrap **(recommended)**: only state snapshot at Bedrock block is imported without any OVM historical data. -* Full bootstrap **(not recommended)**: state, blocks and receipts are imported. *Not recommended for now: [storage consistency issue](https://github.com/paradigmxyz/reth/pull/11099) tldr: sudden crash may break the node +- Minimal bootstrap **(recommended)**: only state snapshot at Bedrock block is imported without any OVM historical data. +- Full bootstrap **(not recommended)**: state, blocks and receipts are imported. \*Not recommended for now: [storage consistency issue](https://github.com/paradigmxyz/reth/pull/11099) tldr: sudden crash may break the node ## Minimal bootstrap (recommended) -**The state snapshot at Bedrock block is required.** It can be exported from [op-geth](https://github.com/testinprod-io/op-erigon/blob/pcw109550/bedrock-db-migration/bedrock-migration.md#export-state) (**.jsonl**) or downloaded directly from [here](https://mega.nz/file/GdZ1xbAT#a9cBv3AqzsTGXYgX7nZc_3fl--tcBmOAIwIA5ND6kwc). +**The state snapshot at Bedrock block is required.** It can be exported from [op-geth](https://github.com/testinprod-io/op-erigon/blob/pcw109550/bedrock-db-migration/bedrock-migration#export-state) (**.jsonl**) or downloaded directly from [here](https://mega.nz/file/GdZ1xbAT#a9cBv3AqzsTGXYgX7nZc_3fl--tcBmOAIwIA5ND6kwc). Import the state snapshot @@ -21,12 +25,11 @@ Sync the node to a recent finalized block (e.g. 125200000) to catch up close to $ op-reth node --chain optimism --datadir op-mainnet --debug.tip 0x098f87b75c8b861c775984f9d5dbe7b70cbbbc30fc15adb03a5044de0144f2d0 # block #125200000 ``` - ## Full bootstrap (not recommended) **Not recommended for now**: [storage consistency issue](https://github.com/paradigmxyz/reth/pull/11099) tldr: sudden crash may break the node. -### Import state +### Import state To sync OP mainnet, the Bedrock datadir needs to be imported to use as starting point. Blocks lower than the OP mainnet Bedrock fork, are built on the OVM and cannot be executed on the EVM. @@ -35,15 +38,15 @@ execution in reth's sync pipeline. Importing OP mainnet Bedrock datadir requires exported data: -- Blocks [and receipts] below Bedrock -- State snapshot at first Bedrock block +- Blocks [and receipts] below Bedrock +- State snapshot at first Bedrock block ### Manual Export Steps -The `op-geth` Bedrock datadir can be downloaded from . +The `op-geth` Bedrock datadir can be downloaded from [https://datadirs.optimism.io](https://datadirs.optimism.io). To export the OVM chain from `op-geth`, clone the `testinprod-io/op-geth` repo and checkout -. Commands to export blocks, receipts and state dump can be +[testinprod-io/op-geth#1](https://github.com/testinprod-io/op-geth/pull/1). Commands to export blocks, receipts and state dump can be found in `op-geth/migrate.sh`. ### Manual Import Steps @@ -64,7 +67,7 @@ This step is optional. To run a full node, skip this step. If however receipts a corresponding transactions must already be imported (see [step 1](#1-import-blocks)). Imports a `.rlp` file of receipts, that has been exported with command specified in - (command for exporting receipts uses custom RLP-encoding). +[testinprod-io/op-geth#1](https://github.com/testinprod-io/op-geth/pull/1) (command for exporting receipts uses custom RLP-encoding). Import of >100 million OVM receipts, from genesis to Bedrock, completes in 30 minutes. @@ -86,7 +89,7 @@ $ op-reth init-state --chain optimism ## Sync from Bedrock to tip Running the node with `--debug.tip `syncs the node without help from CL until a fixed tip. The -block hash can be taken from the latest block on . +block hash can be taken from the latest block on [https://optimistic.etherscan.io](https://optimistic.etherscan.io). Use `op-node` to track the tip. Start `op-node` with `--syncmode=execution-layer` and `--l2.enginekind=reth`. If `op-node`'s RPC connection to L1 is over localhost, `--l1.trustrpc` can be set to improve performance. diff --git a/book/run/transactions.md b/docs/vocs/docs/pages/run/faq/transactions.mdx similarity index 97% rename from book/run/transactions.md rename to docs/vocs/docs/pages/run/faq/transactions.mdx index edb3a24d76f..a4d19df38d5 100644 --- a/book/run/transactions.md +++ b/docs/vocs/docs/pages/run/faq/transactions.mdx @@ -1,3 +1,7 @@ +--- +description: Overview of Ethereum transaction types in Reth. +--- + # Transaction types Over time, the Ethereum network has undergone various upgrades and improvements to enhance transaction efficiency, security, and user experience. Four significant transaction types that have evolved are: diff --git a/book/run/troubleshooting.md b/docs/vocs/docs/pages/run/faq/troubleshooting.mdx similarity index 50% rename from book/run/troubleshooting.md rename to docs/vocs/docs/pages/run/faq/troubleshooting.mdx index 7b8ec6ba19c..08b9c6fbe5d 100644 --- a/book/run/troubleshooting.md +++ b/docs/vocs/docs/pages/run/faq/troubleshooting.mdx @@ -1,102 +1,107 @@ +--- +description: Troubleshooting common Reth node and database issues. +--- + # Troubleshooting This page tries to answer how to deal with the most popular issues. -- [Troubleshooting](#troubleshooting) - - [Database](#database) - - [Docker](#docker) - - [Error code 13](#error-code-13) - - [Slow database inserts and updates](#slow-database-inserts-and-updates) - - [Compact the database](#compact-the-database) - - [Re-sync from scratch](#re-sync-from-scratch) - - [Database write error](#database-write-error) - - [Concurrent database access error (using containers/Docker)](#concurrent-database-access-error-using-containersdocker) - - [Hardware Performance Testing](#hardware-performance-testing) - - [Disk Speed Testing with IOzone](#disk-speed-testing-with-iozone) - +- [Troubleshooting](#troubleshooting) + - [Database](#database) + - [Docker](#docker) + - [Error code 13](#error-code-13) + - [Slow database inserts and updates](#slow-database-inserts-and-updates) + - [Compact the database](#compact-the-database) + - [Re-sync from scratch](#re-sync-from-scratch) + - [Database write error](#database-write-error) + - [Concurrent database access error (using containers/Docker)](#concurrent-database-access-error-using-containersdocker) + - [Hardware Performance Testing](#hardware-performance-testing) + - [Disk Speed Testing with IOzone](#disk-speed-testing-with-iozone) ## Database -### Docker +### Docker Externally accessing a `datadir` inside a named docker volume will usually come with folder/file ownership/permissions issues. **It is not recommended** to use the path to the named volume as it will trigger an error code 13. `RETH_DB_PATH: /var/lib/docker/volumes/named_volume/_data/eth/db cargo r --examples db-access --path ` is **DISCOURAGED** and a mounted volume with the right permissions should be used instead. -### Error code 13 +### Error code 13 `the environment opened in read-only code: 13` Externally accessing a database in a read-only folder is not supported, **UNLESS** there's no `mdbx.lck` present, and it's called with `exclusive` on calling `open_db_read_only`. Meaning that there's no node syncing concurrently. -If the error persists, ensure that you have the right `rx` permissions on the `datadir` **and its parent** folders. Eg. the following command should succeed: +If the error persists, ensure that you have the right `rx` permissions on the `datadir` **and its parent** folders. Eg. the following command should succeed: ```bash,ignore stat /full/path/datadir ``` - ### Slow database inserts and updates If you're: + 1. Running behind the tip -2. Have slow canonical commit time according to the `Canonical Commit Latency Time` chart on [Grafana dashboard](./observability.md#prometheus--grafana) (more than 2-3 seconds) -3. Seeing warnings in your logs such as - ```console - 2023-11-08T15:17:24.789731Z WARN providers::db: Transaction insertion took too long block_number=18528075 tx_num=2150227643 hash=0xb7de1d6620efbdd3aa8547c47a0ff09a7fd3e48ba3fd2c53ce94c6683ed66e7c elapsed=6.793759034s - ``` +2. Have slow canonical commit time according to the `Canonical Commit Latency Time` chart on [Grafana dashboard](/run/monitoring#prometheus--grafana) (more than 2-3 seconds) +3. Seeing warnings in your logs such as + ```console + 2023-11-08T15:17:24.789731Z WARN providers::db: Transaction insertion took too long block_number=18528075 tx_num=2150227643 hash=0xb7de1d6620efbdd3aa8547c47a0ff09a7fd3e48ba3fd2c53ce94c6683ed66e7c elapsed=6.793759034s + ``` then most likely you're experiencing issues with the [database freelist](https://github.com/paradigmxyz/reth/issues/5228). -To confirm it, check if the values on the `Freelist` chart on [Grafana dashboard](./observability.md#prometheus--grafana) +To confirm it, check if the values on the `Freelist` chart on [Grafana dashboard](/run/monitoring#prometheus--grafana) is greater than 10M. Currently, there are two main ways to fix this issue. - #### Compact the database + It will take around 5-6 hours and require **additional** disk space located on the same or different drive -equal to the [freshly synced node](../installation/installation.md#hardware-requirements). +equal to the [freshly synced node](/installation/overview#hardware-requirements). 1. Clone Reth - ```bash - git clone https://github.com/paradigmxyz/reth - cd reth - ``` + ```bash + git clone https://github.com/paradigmxyz/reth + cd reth + ``` 2. Build database debug tools - ```bash - make db-tools - ``` + ```bash + make db-tools + ``` 3. Run compaction (this step will take 5-6 hours, depending on the I/O speed) - ```bash - ./db-tools/mdbx_copy -c $(reth db path) reth_compact.dat - ``` + ```bash + ./db-tools/mdbx_copy -c $(reth db path) reth_compact.dat + ``` 4. Stop Reth 5. Backup original database - ```bash - mv $(reth db path)/mdbx.dat reth_old.dat - ``` + ```bash + mv $(reth db path)/mdbx.dat reth_old.dat + ``` 6. Move compacted database in place of the original database - ```bash - mv reth_compact.dat $(reth db path)/mdbx.dat - ``` + ```bash + mv reth_compact.dat $(reth db path)/mdbx.dat + ``` 7. Start Reth 8. Confirm that the values on the `Freelist` chart are near zero and the values on the `Canonical Commit Latency Time` chart -is less than 1 second. + is less than 1 second. 9. Delete original database - ```bash - rm reth_old.dat - ``` + ```bash + rm reth_old.dat + ``` #### Re-sync from scratch + It will take the same time as initial sync. 1. Stop Reth -2. Drop the database using [`reth db drop`](../cli/reth/db/drop.md) +2. Drop the database using [`reth db drop`](/cli/reth/db/drop) 3. Start reth ### Database write error -If you encounter an irrecoverable database-related errors, in most of the cases it's related to the RAM/NVMe/SSD you use. For example: +If you encounter irrecoverable database-related errors, in most cases it's related to the RAM/NVMe/SSD you use. For example: + ```console Error: A stage encountered an irrecoverable error. @@ -132,6 +137,7 @@ If you encounter an error while accessing the database from multiple processes a ```console mdbx:0: panic: Assertion `osal_rdt_unlock() failed: err 1' failed. ``` + or ```console @@ -151,61 +157,71 @@ If your hardware performance is significantly lower than these reference numbers ### Disk Speed Testing with [IOzone](https://linux.die.net/man/1/iozone) 1. Test disk speed: - ```bash - iozone -e -t1 -i0 -i2 -r1k -s1g /tmp - ``` - Reference numbers (on Latitude c3.large.x86): - - ```console - Children see throughput for 1 initial writers = 907733.81 kB/sec - Parent sees throughput for 1 initial writers = 907239.68 kB/sec - Children see throughput for 1 rewriters = 1765222.62 kB/sec - Parent sees throughput for 1 rewriters = 1763433.35 kB/sec - Children see throughput for 1 random readers = 1557497.38 kB/sec - Parent sees throughput for 1 random readers = 1554846.58 kB/sec - Children see throughput for 1 random writers = 984428.69 kB/sec - Parent sees throughput for 1 random writers = 983476.67 kB/sec - ``` + + ```bash + iozone -e -t1 -i0 -i2 -r1k -s1g /tmp + ``` + + Reference numbers (on Latitude c3.large.x86): + + ```console + Children see throughput for 1 initial writers = 907733.81 kB/sec + Parent sees throughput for 1 initial writers = 907239.68 kB/sec + Children see throughput for 1 rewriters = 1765222.62 kB/sec + Parent sees throughput for 1 rewriters = 1763433.35 kB/sec + Children see throughput for 1 random readers = 1557497.38 kB/sec + Parent sees throughput for 1 random readers = 1554846.58 kB/sec + Children see throughput for 1 random writers = 984428.69 kB/sec + Parent sees throughput for 1 random writers = 983476.67 kB/sec + ``` + 2. Test disk speed with memory-mapped files: - ```bash - iozone -B -G -e -t1 -i0 -i2 -r1k -s1g /tmp - ``` - Reference numbers (on Latitude c3.large.x86): - - ```console - Children see throughput for 1 initial writers = 56471.06 kB/sec - Parent sees throughput for 1 initial writers = 56365.14 kB/sec - Children see throughput for 1 rewriters = 241650.69 kB/sec - Parent sees throughput for 1 rewriters = 239067.96 kB/sec - Children see throughput for 1 random readers = 6833161.00 kB/sec - Parent sees throughput for 1 random readers = 5597659.65 kB/sec - Children see throughput for 1 random writers = 220248.53 kB/sec - Parent sees throughput for 1 random writers = 219112.26 kB/sec + + ```bash + iozone -B -G -e -t1 -i0 -i2 -r1k -s1g /tmp + ``` + + Reference numbers (on Latitude c3.large.x86): + + ```console + Children see throughput for 1 initial writers = 56471.06 kB/sec + Parent sees throughput for 1 initial writers = 56365.14 kB/sec + Children see throughput for 1 rewriters = 241650.69 kB/sec + Parent sees throughput for 1 rewriters = 239067.96 kB/sec + Children see throughput for 1 random readers = 6833161.00 kB/sec + Parent sees throughput for 1 random readers = 5597659.65 kB/sec + Children see throughput for 1 random writers = 220248.53 kB/sec + Parent sees throughput for 1 random writers = 219112.26 kB/sec ``` ### RAM Speed and Health Testing 1. Check RAM speed with [lshw](https://linux.die.net/man/1/lshw): - ```bash - sudo lshw -short -C memory - ``` - Look for the frequency in the output. Reference output: - - ```console - H/W path Device Class Description - ================================================================ - /0/24/0 memory 64GiB DIMM DDR4 Synchronous Registered (Buffered) 3200 MHz (0.3 ns) - /0/24/1 memory 64GiB DIMM DDR4 Synchronous Registered (Buffered) 3200 MHz (0.3 ns) - ... - ``` + + ```bash + sudo lshw -short -C memory + ``` + + Look for the frequency in the output. Reference output: + + ```console + H/W path Device Class Description + ================================================================ + /0/24/0 memory 64GiB DIMM DDR4 Synchronous Registered (Buffered) 3200 MHz (0.3 ns) + /0/24/1 memory 64GiB DIMM DDR4 Synchronous Registered (Buffered) 3200 MHz (0.3 ns) + ... + ``` 2. Test RAM health with [memtester](https://linux.die.net/man/8/memtester): - ```bash - sudo memtester 10G - ``` - This will take a while. You can test with a smaller amount first: - - ```bash - sudo memtester 1G 1 - ``` - All checks should report "ok". + + ```bash + sudo memtester 10G + ``` + + This will take a while. You can test with a smaller amount first: + + ```bash + sudo memtester 1G 1 + ``` + + All checks should report "ok". diff --git a/book/run/observability.md b/docs/vocs/docs/pages/run/monitoring.mdx similarity index 92% rename from book/run/observability.md rename to docs/vocs/docs/pages/run/monitoring.mdx index aa4e9387a0b..d09b795dc4b 100644 --- a/book/run/observability.md +++ b/docs/vocs/docs/pages/run/monitoring.mdx @@ -1,3 +1,7 @@ +--- +description: Reth observability and metrics with Prometheus and Grafana. +--- + # Observability with Prometheus & Grafana Reth exposes a number of metrics which can be enabled by adding the `--metrics` flag: @@ -41,6 +45,7 @@ brew install grafana ### Linux #### Debian/Ubuntu + ```bash # Install Prometheus # Visit https://prometheus.io/download/ for the latest version @@ -58,6 +63,7 @@ sudo apt-get install grafana ``` #### Fedora/RHEL/CentOS + ```bash # Install Prometheus # Visit https://prometheus.io/download/ for the latest version @@ -74,16 +80,18 @@ sudo dnf install -y https://dl.grafana.com/oss/release/grafana-latest-1.x86_64.r ### Windows #### Using Chocolatey + ```powershell choco install prometheus choco install grafana ``` #### Manual installation + 1. Download the latest Prometheus from [prometheus.io/download](https://prometheus.io/download/) - - Select the Windows binary (.zip) for your architecture (typically windows-amd64) + - Select the Windows binary (.zip) for your architecture (typically windows-amd64) 2. Download the latest Grafana from [grafana.com/grafana/download](https://grafana.com/grafana/download) - - Choose the Windows installer (.msi) or standalone version + - Choose the Windows installer (.msi) or standalone version 3. Extract Prometheus to a location of your choice (e.g., `C:\prometheus`) 4. Install Grafana by running the installer or extracting the standalone version 5. Configure Prometheus and Grafana to run as services if needed @@ -95,7 +103,7 @@ Then, kick off the Prometheus and Grafana services: brew services start prometheus brew services start grafana -# For Linux (systemd-based distributions) +# For Linux (syst-based distributions) sudo systemctl start prometheus sudo systemctl start grafana-server @@ -110,9 +118,9 @@ You can find an example config for the Prometheus service in the repo here: [`et Depending on your installation you may find the config for your Prometheus service at: -- OSX: `/opt/homebrew/etc/prometheus.yml` -- Linuxbrew: `/home/linuxbrew/.linuxbrew/etc/prometheus.yml` -- Others: `/usr/local/etc/prometheus/prometheus.yml` +- OSX: `/opt/homebrew/etc/prometheus.yml` +- Linuxbrew: `/home/linuxbrew/.linuxbrew/etc/prometheus.yml` +- Others: `/usr/local/etc/prometheus/prometheus.yml` Next, open up "localhost:3000" in your browser, which is the default URL for Grafana. Here, "admin" is the default for both the username and password. @@ -130,7 +138,7 @@ In this runbook, we took you through starting the node, exposing different log l This will all be very useful to you, whether you're simply running a home node and want to keep an eye on its performance, or if you're a contributor and want to see the effect that your (or others') changes have on Reth's operations. -[installation]: ../installation/installation.md +[installation]: ../installation/installation [release-profile]: https://doc.rust-lang.org/cargo/reference/profiles.html#release [docs]: https://github.com/paradigmxyz/reth/tree/main/docs -[metrics]: https://github.com/paradigmxyz/reth/blob/main/docs/design/metrics.md#current-metrics +[metrics]: https://github.com/paradigmxyz/reth/blob/main/docs/design/metrics#current-metrics diff --git a/docs/vocs/docs/pages/run/networks.mdx b/docs/vocs/docs/pages/run/networks.mdx new file mode 100644 index 00000000000..1bb6593b2e4 --- /dev/null +++ b/docs/vocs/docs/pages/run/networks.mdx @@ -0,0 +1 @@ +# Networks diff --git a/book/run/optimism.md b/docs/vocs/docs/pages/run/opstack.mdx similarity index 92% rename from book/run/optimism.md rename to docs/vocs/docs/pages/run/opstack.mdx index aa85d1aa93b..86e9ad72438 100644 --- a/book/run/optimism.md +++ b/docs/vocs/docs/pages/run/opstack.mdx @@ -1,7 +1,12 @@ +--- +description: Running Reth on Optimism and OP Stack chains. +--- + # Running Reth on OP Stack chains `reth` ships with the `optimism` feature flag in several crates, including the binary, enabling support for OP Stack chains out of the box. Optimism has a small diff from the [L1 EELS][l1-el-spec], comprising of the following key changes: + 1. A new transaction type, [`0x7E (Deposit)`][deposit-spec], which is used to deposit funds from L1 to L2. 1. Modifications to the `PayloadAttributes` that allow the [sequencer][sequencer] to submit transactions to the EL through the Engine API. Payloads will be built with deposit transactions at the top of the block, with the first deposit transaction always being the "L1 Info Transaction." @@ -19,6 +24,7 @@ Since 1.4.0 op-reth has built in support for all chains in the [superchain regis ## Running on Optimism You will need three things to run `op-reth`: + 1. An archival L1 node, synced to the settlement layer of the OP Stack chain you want to sync (e.g. `reth`, `geth`, `besu`, `nethermind`, etc.) 1. A rollup node (e.g. `op-node`, `magi`, `hildr`, etc.) 1. An instance of `op-reth`. @@ -40,6 +46,7 @@ This will install the `op-reth` binary to `~/.cargo/bin/op-reth`. ### Installing a Rollup Node Next, you'll need to install a [Rollup Node][rollup-node-spec], which is the equivalent to the Consensus Client on the OP Stack. Available options include: + 1. [`op-node`][op-node] 1. [`magi`][magi] 1. [`hildr`][hildr] @@ -49,12 +56,13 @@ For the sake of this tutorial, we'll use the reference implementation of the Rol ### Running `op-reth` op-reth supports additional OP Stack specific CLI arguments: + 1. `--rollup.sequencer-http ` - The sequencer endpoint to connect to. Transactions sent to the `op-reth` EL are also forwarded to this sequencer endpoint for inclusion, as the sequencer is the entity that builds blocks on OP Stack chains. 1. `--rollup.disable-tx-pool-gossip` - Disables gossiping of transactions in the mempool to peers. This can be omitted for personal nodes, though providers should always opt to enable this flag. -1. `--rollup.enable-genesis-walkback` - Disables setting the forkchoice status to tip on startup, making the `op-node` walk back to genesis and verify the integrity of the chain before starting to sync. This can be omitted unless a corruption of local chainstate is suspected. 1. `--rollup.discovery.v4` - Enables the discovery v4 protocol for peer discovery. By default, op-reth, similar to op-geth, has discovery v5 enabled and discovery v4 disabled, whereas regular reth has discovery v4 enabled and discovery v5 disabled. First, ensure that your L1 archival node is running and synced to tip. Also make sure that the beacon node / consensus layer client is running and has http APIs enabled. Then, start `op-reth` with the `--rollup.sequencer-http` flag set to the `Base Mainnet` sequencer endpoint: + ```sh op-reth node \ --chain base \ @@ -66,6 +74,7 @@ op-reth node \ ``` Then, once `op-reth` has been started, start up the `op-node`: + ```sh op-node \ --network="base-mainnet" \ @@ -82,17 +91,15 @@ op-node \ Consider adding the `--l1.trustrpc` flag to improve performance, if the connection to l1 is over localhost. [l1-el-spec]: https://github.com/ethereum/execution-specs -[rollup-node-spec]: https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/rollup-node.md +[rollup-node-spec]: https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/rollup-node [op-geth-forkdiff]: https://op-geth.optimism.io -[sequencer]: https://github.com/ethereum-optimism/specs/blob/main/specs/background.md#sequencers +[sequencer]: https://github.com/ethereum-optimism/specs/blob/main/specs/background#sequencers [op-stack-spec]: https://github.com/ethereum-optimism/specs/blob/main/specs -[l2-el-spec]: https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/exec-engine.md -[deposit-spec]: https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/deposits.md -[derivation-spec]: https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/derivation.md +[l2-el-spec]: https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/exec-engine +[deposit-spec]: https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/deposits +[derivation-spec]: https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/derivation [superchain-registry]: https://github.com/ethereum-optimism/superchain-registry - [op-node-docker]: https://console.cloud.google.com/artifacts/docker/oplabs-tools-artifacts/us/images/op-node - [reth]: https://github.com/paradigmxyz/reth [op-node]: https://github.com/ethereum-optimism/optimism/tree/develop/op-node [magi]: https://github.com/a16z/magi diff --git a/docs/vocs/docs/pages/run/opstack/op-mainnet-caveats.mdx b/docs/vocs/docs/pages/run/opstack/op-mainnet-caveats.mdx new file mode 100644 index 00000000000..94f1024dfca --- /dev/null +++ b/docs/vocs/docs/pages/run/opstack/op-mainnet-caveats.mdx @@ -0,0 +1 @@ +# Caveats OP-Mainnet \ No newline at end of file diff --git a/docs/vocs/docs/pages/run/overview.mdx b/docs/vocs/docs/pages/run/overview.mdx new file mode 100644 index 00000000000..06b595ad482 --- /dev/null +++ b/docs/vocs/docs/pages/run/overview.mdx @@ -0,0 +1,47 @@ +--- +description: Guide to running a Reth node. +--- + +# Run a Node + +Congratulations, now that you have installed Reth, it's time to run it! + +In this section, we'll guide you through running a Reth node on various networks and configurations. + +## Networks + +Choose the network you want to run your node on: + +- **[Ethereum](/run/ethereum)** - Run a node on Ethereum mainnet or testnets +- **[OP-stack](/run/opstack)** - Run a node on OP Stack chains like Base, Optimism, and others +- **[Private testnets](/run/private-testnets)** - Set up and run private test networks + +## Configuration & Monitoring + +Learn how to configure and monitor your node: + +- **[Configuration](/run/configuration)** - Configure your node using reth.toml +- **[Monitoring](/run/monitoring)** - Set up logs, metrics, and observability + +## Frequently Asked Questions + +Find answers to common questions and troubleshooting tips: + +- **[Transaction Types](/run/faq/transactions)** - Understanding different transaction types +- **[Pruning & Full Node](/run/faq/pruning)** - Storage management and node types +- **[Ports](/run/faq/ports)** - Network port configuration +- **[Profiling](/run/faq/profiling)** - Performance profiling and optimization +- **[Sync OP Mainnet](/run/faq/sync-op-mainnet)** - Tips for syncing OP Mainnet + +## List of Supported Networks + +| Network | Chain ID | RPC URL | +| --------------- | -------- | ------------------------------------ | +| Ethereum | 1 | https://reth-ethereum.ithaca.xyz/rpc | +| Sepolia Testnet | 11155111 | https://sepolia.drpc.org | +| Base | 8453 | https://base-mainnet.rpc.ithaca.xyz | +| Base Sepolia | 84532 | https://base-sepolia.rpc.ithaca.xyz | + +:::tip +Want to add more networks to this table? Feel free to [contribute](https://github.com/paradigmxyz/reth/edit/main/book/vocs/docs/pages/run/overview.mdx) by submitting a PR with additional networks that Reth supports! +::: diff --git a/book/run/private-testnet.md b/docs/vocs/docs/pages/run/private-testnets.mdx similarity index 90% rename from book/run/private-testnet.md rename to docs/vocs/docs/pages/run/private-testnets.mdx index 28253ca9f01..af281fc5127 100644 --- a/book/run/private-testnet.md +++ b/docs/vocs/docs/pages/run/private-testnets.mdx @@ -1,10 +1,17 @@ +--- +description: Running Reth in a private testnet using Kurtosis. +--- + # Run Reth in a private testnet using Kurtosis + For those who need a private testnet to validate functionality or scale with Reth. ## Using Docker locally + This guide uses [Kurtosis' ethereum-package](https://github.com/ethpandaops/ethereum-package) and assumes you have Kurtosis and Docker installed and have Docker already running on your machine. -* Go [here](https://docs.kurtosis.com/install/) to install Kurtosis -* Go [here](https://docs.docker.com/get-docker/) to install Docker + +- Go [here](https://docs.kurtosis.com/install/) to install Kurtosis +- Go [here](https://docs.docker.com/get-docker/) to install Docker The [`ethereum-package`](https://github.com/ethpandaops/ethereum-package) is a [package](https://docs.kurtosis.com/advanced-concepts/packages) for a general purpose Ethereum testnet definition used for instantiating private testnets at any scale over Docker or Kubernetes, locally or in the cloud. This guide will go through how to spin up a local private testnet with Reth and various CL clients locally. Specifically, you will instantiate a 2-node network over Docker with Reth/Lighthouse and Reth/Teku client combinations. @@ -13,17 +20,19 @@ To see all possible configurations and flags you can use, including metrics and Genesis data will be generated using this [genesis-generator](https://github.com/ethpandaops/ethereum-genesis-generator) to be used to bootstrap the EL and CL clients for each node. The end result will be a private testnet with nodes deployed as Docker containers in an ephemeral, isolated environment on your machine called an [enclave](https://docs.kurtosis.com/advanced-concepts/enclaves/). Read more about how the `ethereum-package` works by going [here](https://github.com/ethpandaops/ethereum-package/). ### Step 1: Define the parameters and shape of your private network + First, in your home directory, create a file with the name `network_params.yaml` with the following contents: + ```yaml participants: - - el_type: reth - el_image: ghcr.io/paradigmxyz/reth - cl_type: lighthouse - cl_image: sigp/lighthouse:latest - - el_type: reth - el_image: ghcr.io/paradigmxyz/reth - cl_type: teku - cl_image: consensys/teku:latest + - el_type: reth + el_image: ghcr.io/paradigmxyz/reth + cl_type: lighthouse + cl_image: sigp/lighthouse:latest + - el_type: reth + el_image: ghcr.io/paradigmxyz/reth + cl_type: teku + cl_image: consensys/teku:latest ``` > [!TIP] @@ -32,10 +41,13 @@ participants: ### Step 2: Spin up your network Next, run the following command from your command line: + ```bash kurtosis run github.com/ethpandaops/ethereum-package --args-file ~/network_params.yaml --image-download always ``` + Kurtosis will spin up an [enclave](https://docs.kurtosis.com/advanced-concepts/enclaves/) (i.e an ephemeral, isolated environment) and begin to configure and instantiate the nodes in your network. In the end, Kurtosis will print the services running in your enclave that form your private testnet alongside all the container ports and files that were generated & used to start up the private testnet. Here is a sample output: + ```console INFO[2024-07-09T12:01:35+02:00] ======================================================== INFO[2024-07-09T12:01:35+02:00] || Created enclave: silent-mountain || @@ -88,14 +100,18 @@ f0a7d5343346 vc-1-reth-lighthouse metrics: 8080/tc Great! You now have a private network with 2 full Ethereum nodes on your local machine over Docker - one that is a Reth/Lighthouse pair and another that is Reth/Teku. Check out the [Kurtosis docs](https://docs.kurtosis.com/cli) to learn about the various ways you can interact with and inspect your network. ## Using Kurtosis on Kubernetes + Kurtosis packages are portable and reproducible, meaning they will work the same way over Docker or Kubernetes, locally or on remote infrastructure. For use cases that require a larger scale, Kurtosis can be deployed on Kubernetes by following these docs [here](https://docs.kurtosis.com/k8s/). ## Running the network with additional services + The [`ethereum-package`](https://github.com/ethpandaops/ethereum-package) comes with many optional flags and arguments you can enable for your private network. Some include: -- A Grafana + Prometheus instance -- A transaction spammer called [`tx-fuzz`](https://github.com/MariusVanDerWijden/tx-fuzz) -- [A network metrics collector](https://github.com/dapplion/beacon-metrics-gazer) -- Flashbot's `mev-boost` implementation of PBS (to test/simulate MEV workflows) + +- A Grafana + Prometheus instance +- A transaction spammer called [`tx-fuzz`](https://github.com/MariusVanDerWijden/tx-fuzz) +- [A network metrics collector](https://github.com/dapplion/beacon-metrics-gazer) +- Flashbot's `mev-boost` implementation of PBS (to test/simulate MEV workflows) ### Questions? + Please reach out to the [Kurtosis discord](https://discord.com/invite/6Jjp9c89z9) should you have any questions about how to use the `ethereum-package` for your private testnet needs. Thanks! diff --git a/book/installation/installation.md b/docs/vocs/docs/pages/run/system-requirements.mdx similarity index 50% rename from book/installation/installation.md rename to docs/vocs/docs/pages/run/system-requirements.mdx index 602601b9f30..60e30189f6a 100644 --- a/book/installation/installation.md +++ b/docs/vocs/docs/pages/run/system-requirements.mdx @@ -1,72 +1,71 @@ -# Installation +# System Requirements -Reth runs on Linux and macOS (Windows tracked). - -There are three core methods to obtain Reth: - -* [Pre-built binaries](./binaries.md) -* [Docker images](./docker.md) -* [Building from source.](./source.md) +The hardware requirements for running Reth depend on the node configuration and can change over time as the network grows or new features are implemented. -> **Note** -> -> If you have Docker installed, we recommend using the [Docker Compose](./docker.md#using-docker-compose) configuration -> that will get you Reth, Lighthouse (Consensus Client), Prometheus and Grafana running and syncing with just one command. +The most important requirement is by far the disk, whereas CPU and RAM requirements are relatively flexible. -## Hardware Requirements +## Chain Specific Requirements -The hardware requirements for running Reth depend on the node configuration and can change over time as the network grows or new features are implemented. +### Ethereum Mainnet -The most important requirement is by far the disk, whereas CPU and RAM requirements are relatively flexible. +Below are the requirements for running an Ethereum Mainnet node as of 2025-06-23 block number `22700000`: | | Archive Node | Full Node | -|-----------|---------------------------------------|---------------------------------------| -| Disk | At least 2.8TB (TLC NVMe recommended) | At least 1.8TB (TLC NVMe recommended) | +| --------- | ------------------------------------- | ------------------------------------- | +| Disk | At least 2.8TB (TLC NVMe recommended) | At least 1.2TB (TLC NVMe recommended) | | Memory | 16GB+ | 8GB+ | | CPU | Higher clock speed over core count | Higher clock speeds over core count | | Bandwidth | Stable 24Mbps+ | Stable 24Mbps+ | -#### QLC and TLC +### Base Mainnet -It is crucial to understand the difference between QLC and TLC NVMe drives when considering the disk requirement. +Below are the minimum system requirements for running a Base Mainnet node as of 2025-06-23, block number `31900000`: -QLC (Quad-Level Cell) NVMe drives utilize four bits of data per cell, allowing for higher storage density and lower manufacturing costs. However, this increased density comes at the expense of performance. QLC drives have slower read and write speeds compared to TLC drives. They also have a lower endurance, meaning they may have a shorter lifespan and be less suitable for heavy workloads or constant data rewriting. +| | Archive Node | Full Node | +| --------- | -------------------------------------------- | -------------------------------------------- | +| Disk | At least 4.1TB (TLC NVMe recommended) | At least 2TB (TLC NVMe recommended) | +| Memory | 128GB+ | 128GB+ | +| CPU | 6 cores+, Higher clock speed over core count | 6 cores+, Higher clock speed over core count | +| Bandwidth | Stable 24Mbps+ | Stable 24Mbps+ | -TLC (Triple-Level Cell) NVMe drives, on the other hand, use three bits of data per cell. While they have a slightly lower storage density compared to QLC drives, TLC drives offer faster performance. They typically have higher read and write speeds, making them more suitable for demanding tasks such as data-intensive applications, gaming, and multimedia editing. TLC drives also tend to have a higher endurance, making them more durable and longer-lasting. +:::note +**On CPU clock speeds**: The AMD EPYC 4005/4004 series is a cost-effective high-clock speed option with support for up to 192GB memory. + +**On CPU cores for Base**: 5+ cores are needed because the state root task splits work into separate threads that run in parallel with each other. The state root task is generally more performant and can scale with the number of CPU cores, while regular state root always uses only one core. This is not a requirement for Mainnet, but for Base you may encounter block processing latencies of more than 2s, which can lead to lagging behind the head of the chain. +::: -Prior to purchasing an NVMe drive, it is advisable to research and determine whether the disk will be based on QLC or TLC technology. An overview of recommended and not-so-recommended NVMe boards can be found at [here]( https://gist.github.com/yorickdowne/f3a3e79a573bf35767cd002cc977b038). +## Disk -### Disk +Simplest approach: Use a [good TLC NVMe](https://gist.github.com/yorickdowne/f3a3e79a573bf35767cd002cc977b038) drive for everything. -There are multiple types of disks to sync Reth, with varying size requirements, depending on the syncing mode. -As of April 2025 at block number 22.1M: +Advanced Storage Optimization (Optional): -* Archive Node: At least 2.8TB is required -* Full Node: At least 1.8TB is required +- TLC NVMe: All application data except static files (`--datadir`) +- SATA SSD/HDD: Static files can be stored on slower & cheaper storage (`--datadir.static-files`) -NVMe based SSD drives are recommended for the best performance, with SATA SSDs being a cheaper alternative. HDDs are the cheapest option, but they will take the longest to sync, and are not recommended. +### QLC and TLC + +It is crucial to understand the difference between QLC and TLC NVMe drives when considering the disk requirement. -As of February 2024, syncing an Ethereum mainnet node to block 19.3M on NVMe drives takes about 50 hours, while on a GCP "Persistent SSD" it takes around 5 days. +QLC (Quad-Level Cell) NVMe drives utilize four bits of data per cell, allowing for higher storage density and lower manufacturing costs. However, this increased density comes at the expense of performance. QLC drives have slower read and write speeds compared to TLC drives. They also have a lower endurance, meaning they may have a shorter lifespan and be less suitable for heavy workloads or constant data rewriting. -> **Note** -> -> It is highly recommended to choose a TLC drive when using an NVMe drive, and not a QLC drive. See [the note](#qlc-and-tlc) above. A list of recommended drives can be found [here]( https://gist.github.com/yorickdowne/f3a3e79a573bf35767cd002cc977b038). +TLC (Triple-Level Cell) NVMe drives, on the other hand, use three bits of data per cell. While they have a slightly lower storage density compared to QLC drives, TLC drives offer faster performance. They typically have higher read and write speeds, making them more suitable for demanding tasks such as data-intensive applications, gaming, and multimedia editing. TLC drives also tend to have a higher endurance, making them more durable and longer-lasting. -### CPU +## CPU Most of the time during syncing is spent executing transactions, which is a single-threaded operation due to potential state dependencies of a transaction on previous ones. -As a result, the number of cores matters less, but in general higher clock speeds are better. More cores are better for parallelizable [stages](https://github.com/paradigmxyz/reth/blob/main/docs/crates/stages.md) (like sender recovery or bodies downloading), but these stages are not the primary bottleneck for syncing. +As a result, the number of cores matters less, but in general higher clock speeds are better. More cores are better for parallelizable [stages](https://github.com/paradigmxyz/reth/blob/main/docs/crates/stages) (like sender recovery or bodies downloading), but these stages are not the primary bottleneck for syncing. -### Memory +## Memory -It is recommended to use at least 8GB of RAM. +It is recommended to use at least 16GB of RAM. Most of Reth's components tend to consume a low amount of memory, unless you are under heavy RPC load, so this should matter less than the other requirements. Higher memory is generally better as it allows for better caching, resulting in less stress on the disk. -### Bandwidth +## Bandwidth A stable and dependable internet connection is crucial for both syncing a node from genesis and for keeping up with the chain's tip. @@ -76,6 +75,13 @@ Once you're synced to the tip you will need a reliable connection, especially if ## What hardware can I get? -If you are buying your own NVMe SSD, please consult [this hardware comparison](https://gist.github.com/yorickdowne/f3a3e79a573bf35767cd002cc977b038) which is being actively maintained. We recommend against buying DRAM-less or QLC devices as these are noticeably slower. +### Build your own + +- Storage: Consult the [Great and less great SSDs for Ethereum nodes](https://gist.github.com/yorickdowne/f3a3e79a573bf35767cd002cc977b038) gist. The Seagate Firecuda 530 and WD Black SN850(X) are popular TLC NVMEe options. Ensure proper cooling via heatsinks or active fans. +- CPU: AMD Ryzen 5000/7000/9000 series, AMD EPYC 4004/4005 or Intel Core i5/i7 (11th gen or newer) with at least 6 cores. The AMD Ryzen 9000 series and the AMD EPYC 4005 series offer good value. +- Memory: 32GB DDR4 or DDR5 (ECC if your motherboard & CPU supports it). + +### Hosted -All our benchmarks have been produced on [Latitude.sh](https://www.latitude.sh/), a bare metal provider. We use `c3.large.x86` boxes, and also recommend trying the `c3.small.x86` box for pruned/full nodes. So far our experience has been smooth with some users reporting that the NVMEs there outperform AWS NVMEs by 3x or more. We're excited for more Reth nodes on Latitude.sh, so for a limited time you can use `RETH400` for a $250 discount. [Run a node now!](https://metal.new/reth) +- [Latitude.sh](https://www.latitude.sh): `f4.metal.small`, `c3.large.x86` or better +- [OVH](https://www.ovhcloud.com/en/bare-metal/advance/): `Advance-1` or better diff --git a/docs/vocs/docs/pages/sdk/custom-node/modifications.mdx b/docs/vocs/docs/pages/sdk/custom-node/modifications.mdx new file mode 100644 index 00000000000..b375feb901b --- /dev/null +++ b/docs/vocs/docs/pages/sdk/custom-node/modifications.mdx @@ -0,0 +1 @@ +# Modifying Node Components diff --git a/docs/vocs/docs/pages/sdk/custom-node/prerequisites.mdx b/docs/vocs/docs/pages/sdk/custom-node/prerequisites.mdx new file mode 100644 index 00000000000..8dbf0a1bf48 --- /dev/null +++ b/docs/vocs/docs/pages/sdk/custom-node/prerequisites.mdx @@ -0,0 +1 @@ +# Prerequisites and Considerations diff --git a/docs/vocs/docs/pages/sdk/examples/modify-node.mdx b/docs/vocs/docs/pages/sdk/examples/modify-node.mdx new file mode 100644 index 00000000000..b8f21a06bbf --- /dev/null +++ b/docs/vocs/docs/pages/sdk/examples/modify-node.mdx @@ -0,0 +1,16 @@ +# How to Modify an Existing Node + +This guide demonstrates how to extend a Reth node with custom functionality, including adding RPC endpoints, modifying transaction validation, and implementing custom services. + +## Adding Custom RPC Endpoints + +One of the most common modifications is adding custom RPC methods to expose additional functionality. + +### Basic Custom RPC Module + + +## Next Steps + +- Explore [Standalone Components](/sdk/examples/standalone-components) for direct blockchain interaction +- Learn about [Custom Node Building](/sdk/custom-node/prerequisites) for production deployments +- Review [Type System](/sdk/typesystem/block) for working with blockchain data diff --git a/docs/vocs/docs/pages/sdk/examples/standalone-components.mdx b/docs/vocs/docs/pages/sdk/examples/standalone-components.mdx new file mode 100644 index 00000000000..3c16e1cf123 --- /dev/null +++ b/docs/vocs/docs/pages/sdk/examples/standalone-components.mdx @@ -0,0 +1,12 @@ +# Using Standalone Components + +This guide demonstrates how to use Reth components independently without running a full node. This is useful for building tools, analyzers, indexers, or any application that needs direct access to blockchain data. + +## Direct Database Access + + +## Next Steps + +- Learn about [Modifying Nodes](/sdk/examples/modify-node) to add functionality +- Explore the [Type System](/sdk/typesystem/block) for working with data +- Check [Custom Node Building](/sdk/custom-node/prerequisites) for production use diff --git a/docs/vocs/docs/pages/sdk/node-components.mdx b/docs/vocs/docs/pages/sdk/node-components.mdx new file mode 100644 index 00000000000..d569d499dd9 --- /dev/null +++ b/docs/vocs/docs/pages/sdk/node-components.mdx @@ -0,0 +1,112 @@ +# Node Components + +Reth's modular architecture allows developers to customize and extend individual components of the node. Each component serves a specific purpose and can be replaced or modified to suit your needs. + +## Architecture Overview + +A Reth node consists of several key components that work together and can interact with each other: + +```mermaid +graph LR + Network[Network] --> Pool[Transaction Pool] + Network --> Consensus[Consensus] + Pool --> DB[(Database)] + Consensus --> EVM + EVM --> DB[(Database)] + RPC[RPC Server] --> Pool + RPC --> DB + RPC --> EVM +``` + +## Core Components + +### [Network](/sdk/node-components/network) +Handles P2P communication, peer discovery, and block/transaction propagation. The network component is responsible for: +- Peer discovery and management +- Transaction gossip +- State synchronization (downloading blocks) +- Protocol message handling + +### [Transaction Pool](/sdk/node-components/pool) +Manages pending transactions before they're included in blocks: +- Transaction validation +- Ordering and prioritization +- Transaction replacement logic +- Pool size management and eviction + +### [Consensus](/sdk/node-components/consensus) +Validates blocks according to protocol rules: +- Header validation (e.g. gas limit, base fee) +- Block body validation (e.g. transaction root) + +### [EVM](/sdk/node-components/evm) +Executes transactions and manages state transitions: +- Block execution +- Transaction execution +- Block building + +### [RPC](/sdk/node-components/rpc) +Provides external API access to the node: +- Standard Ethereum JSON-RPC methods +- Custom endpoints +- WebSocket subscriptions + +## Component Customization + +Each component can be customized through Reth's builder pattern: + +```rust +use reth_ethereum::node::{EthereumNode, NodeBuilder}; + +let node = NodeBuilder::new(config) + .with_types::() + .with_components(|ctx| { + // Use the ComponentBuilder to customize components + ctx.components_builder() + // Custom network configuration + .network(|network_builder| { + network_builder + .peer_manager(custom_peer_manager) + .build() + }) + // Custom transaction pool + .pool(|pool_builder| { + pool_builder + .validator(custom_validator) + .ordering(custom_ordering) + .build() + }) + // Custom consensus + .consensus(custom_consensus) + // Custom EVM configuration + .evm(|evm_builder| { + evm_builder + .with_precompiles(custom_precompiles) + .build() + }) + // Build all components + .build() + }) + .build() + .await?; +``` + +## Component Lifecycle + +Components follow a specific lifecycle starting from node builder initialization to shutdown: + +1. **Initialization**: Components are created with their dependencies +2. **Configuration**: Settings and parameters are applied +3. **Startup**: Components begin their main operations +4. **Runtime**: Components process requests and events +5. **Shutdown**: Graceful cleanup and resource release + + +## Next Steps + +Explore each component in detail: +- [Network Component](/sdk/node-components/network) - P2P and synchronization +- [Transaction Pool](/sdk/node-components/pool) - Mempool management +- [Consensus](/sdk/node-components/consensus) - Block validation +- [EVM](/sdk/node-components/evm) - Transaction execution +- [RPC](/sdk/node-components/rpc) - External APIs diff --git a/docs/vocs/docs/pages/sdk/node-components/consensus.mdx b/docs/vocs/docs/pages/sdk/node-components/consensus.mdx new file mode 100644 index 00000000000..1541d351d5f --- /dev/null +++ b/docs/vocs/docs/pages/sdk/node-components/consensus.mdx @@ -0,0 +1,45 @@ +# Consensus Component + +The consensus component validates blocks according to Ethereum protocol rules, handles chain reorganizations, and manages the canonical chain state. + +## Overview + +The consensus component is responsible for: +- Validating block headers and bodies +- Verifying state transitions +- Managing fork choice rules +- Handling chain reorganizations +- Tracking finalized and safe blocks +- Validating blob transactions (EIP-4844) + +## Key Concepts + +### Block Validation +The consensus component performs multiple validation steps: +1. **Pre-execution validation**: Header and body checks before running transactions +2. **Post-execution validation**: State root and receipts verification after execution + +### Header Validation +Headers must pass several checks: +- **Timestamp**: Must be greater than parent's timestamp +- **Gas limit**: Changes must be within protocol limits (1/1024 of parent) +- **Extra data**: Size restrictions based on network rules +- **Difficulty/PoS**: Appropriate validation for pre/post-merge + +### Body Validation +Block bodies are validated against headers: +- **Transaction root**: Merkle root must match header +- **Withdrawals root**: For post-Shanghai blocks +- **Blob validation**: For EIP-4844 transactions + +### Fork Choice +The consensus engine determines the canonical chain: +- Tracks multiple chain branches +- Applies fork choice rules (longest chain, most work, etc.) +- Handles reorganizations when better chains are found + +## Next Steps + +- Explore [EVM](/sdk/node-components/evm) execution +- Learn about [RPC](/sdk/node-components/rpc) server integration +- Understand [Transaction Pool](/sdk/node-components/pool) interaction \ No newline at end of file diff --git a/docs/vocs/docs/pages/sdk/node-components/evm.mdx b/docs/vocs/docs/pages/sdk/node-components/evm.mdx new file mode 100644 index 00000000000..6047f69bd73 --- /dev/null +++ b/docs/vocs/docs/pages/sdk/node-components/evm.mdx @@ -0,0 +1,45 @@ +# EVM Component + +The EVM (Ethereum Virtual Machine) component handles transaction execution and state transitionss. It's responsible for processing transactions and updating the blockchain state. + +## Overview + +The EVM component manages: +- Transaction execution +- State transitions and updates +- Gas calculation and metering +- Custom precompiles and opcodes +- Block execution and validation +- State management and caching + +## Architecture + + +## Key Concepts + +### Transaction Execution +The EVM executes transactions in a deterministic way: +1. **Environment Setup**: Configure block and transaction context +2. **State Access**: Load accounts and storage from the database +3. **Execution**: Run EVM bytecode with gas metering +4. **State Updates**: Apply changes to accounts and storage +5. **Receipt Generation**: Create execution receipts with logs + +### Block Execution +Block executors process all transactions in a block: +- Validate pre-state conditions +- Execute transactions sequentially +- Apply block rewards +- Verify post-state (state root, receipts root) + +### Block Building +Block builders construct new blocks for proposal: +- Select transactions (e.g. mempool) +- Order and execute transactions +- Seal the block with a header (state root) + +## Next Steps + +- Learn about [RPC](/sdk/node-components/rpc) server integration +- Explore [Transaction Pool](/sdk/node-components/pool) interaction +- Review [Consensus](/sdk/node-components/consensus) validation \ No newline at end of file diff --git a/docs/vocs/docs/pages/sdk/node-components/network.mdx b/docs/vocs/docs/pages/sdk/node-components/network.mdx new file mode 100644 index 00000000000..308087305ac --- /dev/null +++ b/docs/vocs/docs/pages/sdk/node-components/network.mdx @@ -0,0 +1,55 @@ +# Network Component + +The network component handles all peer-to-peer communication in Reth, including peer discovery, connection management, and protocol message handling. + +## Overview + +The network stack implements the Ethereum Wire Protocol (ETH) and provides: +- Peer discovery via discv4 and discv5 +- Connection management with configurable peer limits +- Transaction propagation +- State synchronization +- Request/response protocols (e.g. GetBHeaders, GetBodies) + +## Architecture + +```mermaid +graph TD + NetworkManager[Network Manager] --> Discovery[Discovery] + NetworkManager --> Sessions[Session Manager] + NetworkManager --> Swarm[Swarm] + + Discovery --> discv4[discv4] + Discovery --> discv5[discv5] + Discovery --> DNS[DNS Discovery] + + Sessions --> ETH[ETH Protocol] +``` + +## Key Concepts + +### Peer Discovery +The network uses multiple discovery mechanisms to find and connect to peers: +- **discv4**: UDP-based discovery protocol for finding peers +- **discv5**: Improved discovery protocol with better security +- **DNS Discovery**: Peer lists published via DNS for bootstrap + +### Connection Management +- Maintains separate limits for inbound and outbound connections +- Implements peer scoring and reputation tracking +- Handles connection lifecycle and graceful disconnections + +### Protocol Support +- **ETH Protocol**: Core Ethereum wire protocol for blocks and transactions + +### Message Broadcasting +The network efficiently propagates new blocks and transactions to peers using: +- Transaction pooling and deduplication +- Block announcement strategies +- Bandwidth management + +## Next Steps + +- Learn about the [Transaction Pool](/sdk/node-components/pool) +- Understand [Consensus](/sdk/node-components/consensus) integration +- Explore [RPC](/sdk/node-components/rpc) server setup \ No newline at end of file diff --git a/docs/vocs/docs/pages/sdk/node-components/pool.mdx b/docs/vocs/docs/pages/sdk/node-components/pool.mdx new file mode 100644 index 00000000000..301d794b3fd --- /dev/null +++ b/docs/vocs/docs/pages/sdk/node-components/pool.mdx @@ -0,0 +1,80 @@ +# Transaction Pool Component + +The transaction pool (mempool) manages pending transactions before they are included in blocks. It handles validation, ordering, replacement, and eviction of transactions. + +## Overview + +The transaction pool is responsible for: +- Validating incoming transactions +- Maintaining transaction ordering (e.g. by fees) +- Handling transaction replacement +- Managing pool size limits +- Broadcasting transactions to peers +- Providing transactions for block building + +## Architecture + +```mermaid +graph TD + API[Pool API] --> Validator[Transaction Validator] + API --> Pool[Transaction Pool] + + Pool --> SubPools[Sub-Pools] + SubPools --> Pending[Pending Pool] + SubPools --> Queued[Queued Pool] + SubPools --> Base[Base Fee Pool] + + Pool --> Ordering[Transaction Ordering] + Pool --> Listeners[Event Listeners] + + Validator --> Checks[Validation Checks] + Checks --> Nonce[Nonce Check] + Checks --> Balance[Balance Check] +``` + +## Key Concepts + +### Transaction Validation +The pool validates transactions before accepting them, checking: +- Sender has sufficient balance for gas and value +- Nonce is correct (either next expected or future) +- Gas price meets minimum requirements +- Transaction size is within limits +- Signature is valid + +### Transaction Ordering +Transactions are ordered by their effective tip per gas to maximize block rewards. Custom ordering strategies can prioritize certain addresses or implement MEV protection. + +### Sub-Pools +- **Pending**: Transactions ready for inclusion (correct nonce) +- **Queued**: Future transactions (nonce gap exists) +- **Base Fee**: Transactions priced below current base fee + +### Pool Maintenance +The pool requires periodic maintenance to: +- Remove stale transactions +- Revalidate after chain reorganizations +- Update base fee thresholds +- Enforce size limits + +## Advanced Features + +### Blob Transaction Support +EIP-4844 introduces blob transactions with separate blob storage and special validation rules. + +### Transaction Filters +Custom filters can block specific addresses, limit gas prices, or implement custom acceptance criteria. + +### Event System +The pool supports an event system that allows other components to listen for transaction lifecycle events such as: +- Transaction added +- Transaction removed +- Transaction replaced +- Transaction promoted to pending state + + +## Next Steps + +- Learn about [Consensus](/sdk/node-components/consensus) validation +- Explore [EVM](/sdk/node-components/evm) execution +- Understand [RPC](/sdk/node-components/rpc) server integration \ No newline at end of file diff --git a/docs/vocs/docs/pages/sdk/node-components/rpc.mdx b/docs/vocs/docs/pages/sdk/node-components/rpc.mdx new file mode 100644 index 00000000000..4f9fa1e3d7b --- /dev/null +++ b/docs/vocs/docs/pages/sdk/node-components/rpc.mdx @@ -0,0 +1,20 @@ +# RPC Component + +The RPC component provides external API access to the node, implementing the Ethereum JSON-RPC specification and allowing custom extensions. + +## Overview + +The RPC component provides: +- Standard Ethereum JSON-RPC methods +- WebSocket subscriptions +- Custom method extensions +- Rate limiting and access control +- Request batching support +- Multiple transport protocols (HTTP, WebSocket, IPC) + + +## Next Steps + +- Explore [Network](/sdk/node-components/network) component integration +- Learn about [Transaction Pool](/sdk/node-components/pool) APIs +- Understand [EVM](/sdk/node-components/evm) execution context \ No newline at end of file diff --git a/docs/vocs/docs/pages/sdk/overview.mdx b/docs/vocs/docs/pages/sdk/overview.mdx new file mode 100644 index 00000000000..b308dee77ae --- /dev/null +++ b/docs/vocs/docs/pages/sdk/overview.mdx @@ -0,0 +1,125 @@ +# Reth for Developers + +Reth can be used as a library to build custom Ethereum nodes, interact with blockchain data, or create specialized tools for blockchain analysis and indexing. + +## What is the Reth SDK? + +The Reth SDK allows developers to: + +- Use components of the Reth node as libraries +- Build custom Ethereum execution nodes with modified behavior (e.g. payload building) +- Access blockchain data directly from the database +- Create high-performance indexing solutions +- Extend a new with new RPC endpoints and functionality +- Implement custom consensus mechanisms +- Build specialized tools for blockchain analysis + +## Quick Start + +Add Reth to your project + +### Ethereum + +```toml +[dependencies] +# Ethereum meta crate +reth-ethereum = { git = "https://github.com/paradigmxyz/reth" } +``` + +### Opstack + +```toml +[dependencies] +reth-op = { git = "https://github.com/paradigmxyz/reth" } +``` + +## Key Concepts + +### Node Architecture + +Reth is built with modularity in mind. The main components include: + +- **Primitives**: Core data type abstractions like `Block` +- **Node Builder**: Constructs and configures node instances +- **Database**: Efficient storage using MDBX and static files +- **Network**: P2P communication and block synchronization +- **Consensus**: Block validation and chain management +- **EVM**: Transaction execution and state transitions +- **RPC**: JSON-RPC server for external communication +- **Transaction Pool**: Pending transaction management + +### Dependency Management + +Reth is primarily built on top of the [alloy](https://github.com/alloy-rs/alloy) ecosystem, which provides the necessary abstractions and implementations for core ethereum blockchain data types, transaction handling, and EVM execution. + +### Type System + +Reth uses its own type system to handle different representations of blockchain data: + +- **Primitives**: Core types like `B256`, `Address`, `U256` +- **Transactions**: Multiple representations for different contexts (pooled, consensus, RPC) +- **Blocks**: Headers, bodies, and sealed blocks with proven properties +- **State**: Accounts, storage, and state transitions + +### Building Custom Nodes + +The node builder pattern allows you to customize every aspect of node behavior: + +```rust +use reth_ethereum::node::{EthereumNode, NodeBuilder}; + +// Build a custom node with modified components +let node = NodeBuilder::new(config) + // install the ethereum specific node primitives + .with_types::() + .with_components(|components| { + // Customize components here + components + }) + .build() + .await?; +``` + +## Architecture Overview + +```mermaid +graph TD + A[Node Builder] --> B[Database] + A --> C[Network] + A --> D[Consensus] + A --> E[EVM] + A --> F[RPC Server] + A --> G[Transaction Pool] + + B --> H[DB Storage] + B --> I[Static Files] + + C --> J[Discovery] + C --> K[ETH Protocol] + + E --> L[State Provider] + E --> M[Block Executor] +``` + +## Nodes Built with Reth + +Several production networks have been built using Reth's node builder pattern: + +| Node | Company | Description | Lines of Code | +|------|---------|-------------|---------------| +| [Base Node](https://github.com/base/node-reth) | Coinbase | Coinbase's L2 scaling solution node implementation | ~3K | +| [Bera Reth](https://github.com/berachain/bera-reth) | Berachain | Berachain's high-performance EVM node with custom features | ~1K | +| [Reth Gnosis](https://github.com/gnosischain/reth_gnosis) | Gnosis | Gnosis Chain's xDai-compatible execution client | ~5K | +| [Reth BSC](https://github.com/loocapro/reth-bsc) | Binance Smart Chain | BNB Smart Chain execution client implementation | ~6K | + +## Next Steps + +- **[Node Components](/sdk/node-components)**: Deep dive into each component +- **[Type System](/sdk/typesystem/block)**: Understanding Reth's type system +- **[Custom Nodes](/sdk/custom-node/prerequisites)**: Building production nodes +- **[Examples](/sdk/examples/modify-node)**: Real-world implementations + +## Resources + +- [API Documentation](https://docs.rs/reth/latest/reth/) +- [GitHub Repository](https://github.com/paradigmxyz/reth) diff --git a/docs/vocs/docs/pages/sdk/typesystem/block.mdx b/docs/vocs/docs/pages/sdk/typesystem/block.mdx new file mode 100644 index 00000000000..450b4f93d1a --- /dev/null +++ b/docs/vocs/docs/pages/sdk/typesystem/block.mdx @@ -0,0 +1,26 @@ +# Block Types + +The Reth type system provides a flexible abstraction for blocks through traits, allowing different implementations while maintaining type safety and consistency. + +## Type Relationships + +```mermaid +graph TD + Block[Block Trait] --> Header[BlockHeader Trait] + Block --> Body[BlockBody Trait] + + SealedBlock -.-> Block + SealedBlock --> SealedHeader + RecoveredBlock --> SealedBlock + + SealedHeader --> Header + + Body --> Transaction[Transactions] + Body --> Withdrawals[Withdrawals] +``` + +## Next Steps + +- Learn about [Transaction Types](/sdk/typesystem/transaction-types) +- Understand [Consensus](/sdk/node-components/consensus) validation +- Explore [EVM](/sdk/node-components/evm) execution diff --git a/docs/vocs/docs/pages/sdk/typesystem/transaction-types.mdx b/docs/vocs/docs/pages/sdk/typesystem/transaction-types.mdx new file mode 100644 index 00000000000..e541727da87 --- /dev/null +++ b/docs/vocs/docs/pages/sdk/typesystem/transaction-types.mdx @@ -0,0 +1,92 @@ +# Transaction Types and Representations + +Reth provides multiple transaction representations optimized for different stages of the transaction lifecycle. Understanding these types is crucial for working with the node's transaction handling pipeline. + +## Transaction Lifecycle + +Transactions go through several stages, each with its own optimized representation: + +```mermaid +graph LR + RPC[RPC Transaction] --> Pool[Pooled Transaction] + Pool --> Consensus[Consensus Transaction] + Consensus --> Executed[Executed Transaction] + + Pool -.-> RPC + Consensus -.-> Pool +``` + +## Transaction Representations + +### RPC Transaction + +The RPC representation is designed for JSON-RPC communication with external clients. It uses JSON-compatible types and includes all information clients need to understand transaction status. + +Key characteristics: +- **JSON-compatible types**: Uses U256 for numbers, hex strings for binary data +- **Optional fields**: Supports both legacy and EIP-1559 transactions with appropriate fields +- **Block context**: Includes block hash, number, and index when transaction is mined +- **Human-readable**: Optimized for external consumption and debugging +- **Complete information**: Contains all transaction details including signature components + +Use cases: +- Sending transactions via `eth_sendTransaction` +- Querying transaction details via `eth_getTransactionByHash` +- Transaction receipts and history +- Block explorer displays + +### Pooled Transaction + +The pooled representation is optimized for mempool storage and validation. It pre-computes expensive values and includes additional data needed for pool management. + +Key characteristics: +- **Cached values**: Pre-computed sender address and transaction cost to avoid repeated calculations +- **Validation ready**: Includes all data needed for quick pool validation +- **Blob support**: Handles EIP-4844 blob sidecars separately from the core transaction +- **Memory efficient**: Optimized structure for storing thousands of pending transactions +- **Priority ordering**: Structured for efficient sorting by gas price/priority fee + +Use cases: +- Transaction pool storage and management +- Gas price ordering and replacement logic +- Validation against account state +- Broadcasting to peers + +### Consensus Transaction + +The consensus representation is the canonical format used in blocks and for network propagation. It's the most compact representation and follows Ethereum's wire protocol. + +Key characteristics: +- **Type safety**: Enum variants for different transaction types (Legacy, EIP-2930, EIP-1559, EIP-4844) +- **Compact encoding**: For storage on disk +- **No redundancy**: Minimal data, with values like sender recovered from signature when needed + +Use cases: +- Block construction and validation +- Network propagation between nodes +- Persistent storage in the database +- State transition execution + +## Representation Conversions + +### RPC → Pooled +When transactions arrive via RPC: +1. Validate JSON format and fields +2. Convert to consensus format +3. Recover sender from signature +4. Create pooled representation + +### Pooled → Consensus +When including in a block: +1. Extract core transaction consensus data +2. Remove cached values (sender, cost) + +### Consensus → RPC +When serving RPC requests: +1. Add block context (hash, number, index) + +## Next Steps + +- Learn about [Block Types](/sdk/typesystem/block) and how transactions fit in blocks +- Understand [Transaction Pool](/sdk/node-components/pool) management +- Explore [EVM](/sdk/node-components/evm) transaction execution \ No newline at end of file diff --git a/docs/vocs/docs/public/alchemy.png b/docs/vocs/docs/public/alchemy.png new file mode 100644 index 00000000000..422feb03277 Binary files /dev/null and b/docs/vocs/docs/public/alchemy.png differ diff --git a/docs/vocs/docs/public/coinbase.png b/docs/vocs/docs/public/coinbase.png new file mode 100644 index 00000000000..2e71f9ec3a1 Binary files /dev/null and b/docs/vocs/docs/public/coinbase.png differ diff --git a/docs/vocs/docs/public/flashbots.png b/docs/vocs/docs/public/flashbots.png new file mode 100644 index 00000000000..1a4622becd2 Binary files /dev/null and b/docs/vocs/docs/public/flashbots.png differ diff --git a/docs/vocs/docs/public/logo.png b/docs/vocs/docs/public/logo.png new file mode 100644 index 00000000000..fa113d2a674 Binary files /dev/null and b/docs/vocs/docs/public/logo.png differ diff --git a/book/developers/exex/assets/remote_exex.png b/docs/vocs/docs/public/remote_exex.png similarity index 100% rename from book/developers/exex/assets/remote_exex.png rename to docs/vocs/docs/public/remote_exex.png diff --git a/docs/vocs/docs/public/reth-prod.png b/docs/vocs/docs/public/reth-prod.png new file mode 100644 index 00000000000..5b31a569a36 Binary files /dev/null and b/docs/vocs/docs/public/reth-prod.png differ diff --git a/docs/vocs/docs/public/succinct.png b/docs/vocs/docs/public/succinct.png new file mode 100644 index 00000000000..1261974aa8a Binary files /dev/null and b/docs/vocs/docs/public/succinct.png differ diff --git a/docs/vocs/docs/snippets/sources/Cargo.toml b/docs/vocs/docs/snippets/sources/Cargo.toml new file mode 100644 index 00000000000..245734ce83a --- /dev/null +++ b/docs/vocs/docs/snippets/sources/Cargo.toml @@ -0,0 +1,42 @@ +[workspace] +members = ["exex/hello-world", "exex/remote", "exex/tracking-state"] + +# Explicitly set the resolver to version 2, which is the default for packages with edition >= 2021 +# https://doc.rust-lang.org/edition-guide/rust-2021/default-cargo-resolver.html +resolver = "2" + +[patch.'https://github.com/paradigmxyz/reth'] +reth = { path = "../../bin/reth" } +reth-exex = { path = "../../crates/exex/exex" } +reth-node-ethereum = { path = "../../crates/ethereum/node" } +reth-tracing = { path = "../../crates/tracing" } +reth-node-api = { path = "../../crates/node/api" } + +[patch.crates-io] +alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "08fa016ed950b6e65f810fc9cdef7cf38fbc63f6" } +alloy-contract = { git = "https://github.com/alloy-rs/alloy", rev = "08fa016ed950b6e65f810fc9cdef7cf38fbc63f6" } +alloy-eips = { git = "https://github.com/alloy-rs/alloy", rev = "08fa016ed950b6e65f810fc9cdef7cf38fbc63f6" } +alloy-genesis = { git = "https://github.com/alloy-rs/alloy", rev = "08fa016ed950b6e65f810fc9cdef7cf38fbc63f6" } +alloy-json-rpc = { git = "https://github.com/alloy-rs/alloy", rev = "08fa016ed950b6e65f810fc9cdef7cf38fbc63f6" } +alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "08fa016ed950b6e65f810fc9cdef7cf38fbc63f6" } +alloy-network-primitives = { git = "https://github.com/alloy-rs/alloy", rev = "08fa016ed950b6e65f810fc9cdef7cf38fbc63f6" } +alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "08fa016ed950b6e65f810fc9cdef7cf38fbc63f6" } +alloy-pubsub = { git = "https://github.com/alloy-rs/alloy", rev = "08fa016ed950b6e65f810fc9cdef7cf38fbc63f6" } +alloy-rpc-client = { git = "https://github.com/alloy-rs/alloy", rev = "08fa016ed950b6e65f810fc9cdef7cf38fbc63f6" } +alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "08fa016ed950b6e65f810fc9cdef7cf38fbc63f6" } +alloy-rpc-types-admin = { git = "https://github.com/alloy-rs/alloy", rev = "08fa016ed950b6e65f810fc9cdef7cf38fbc63f6" } +alloy-rpc-types-anvil = { git = "https://github.com/alloy-rs/alloy", rev = "08fa016ed950b6e65f810fc9cdef7cf38fbc63f6" } +alloy-rpc-types-beacon = { git = "https://github.com/alloy-rs/alloy", rev = "08fa016ed950b6e65f810fc9cdef7cf38fbc63f6" } +alloy-rpc-types-debug = { git = "https://github.com/alloy-rs/alloy", rev = "08fa016ed950b6e65f810fc9cdef7cf38fbc63f6" } +alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/alloy", rev = "08fa016ed950b6e65f810fc9cdef7cf38fbc63f6" } +alloy-rpc-types-eth = { git = "https://github.com/alloy-rs/alloy", rev = "08fa016ed950b6e65f810fc9cdef7cf38fbc63f6" } +alloy-rpc-types-mev = { git = "https://github.com/alloy-rs/alloy", rev = "08fa016ed950b6e65f810fc9cdef7cf38fbc63f6" } +alloy-rpc-types-trace = { git = "https://github.com/alloy-rs/alloy", rev = "08fa016ed950b6e65f810fc9cdef7cf38fbc63f6" } +alloy-rpc-types-txpool = { git = "https://github.com/alloy-rs/alloy", rev = "08fa016ed950b6e65f810fc9cdef7cf38fbc63f6" } +alloy-serde = { git = "https://github.com/alloy-rs/alloy", rev = "08fa016ed950b6e65f810fc9cdef7cf38fbc63f6" } +alloy-signer = { git = "https://github.com/alloy-rs/alloy", rev = "08fa016ed950b6e65f810fc9cdef7cf38fbc63f6" } +alloy-signer-local = { git = "https://github.com/alloy-rs/alloy", rev = "08fa016ed950b6e65f810fc9cdef7cf38fbc63f6" } +alloy-transport = { git = "https://github.com/alloy-rs/alloy", rev = "08fa016ed950b6e65f810fc9cdef7cf38fbc63f6" } +alloy-transport-http = { git = "https://github.com/alloy-rs/alloy", rev = "08fa016ed950b6e65f810fc9cdef7cf38fbc63f6" } +alloy-transport-ipc = { git = "https://github.com/alloy-rs/alloy", rev = "08fa016ed950b6e65f810fc9cdef7cf38fbc63f6" } +alloy-transport-ws = { git = "https://github.com/alloy-rs/alloy", rev = "08fa016ed950b6e65f810fc9cdef7cf38fbc63f6" } diff --git a/book/sources/exex/hello-world/Cargo.toml b/docs/vocs/docs/snippets/sources/exex/hello-world/Cargo.toml similarity index 100% rename from book/sources/exex/hello-world/Cargo.toml rename to docs/vocs/docs/snippets/sources/exex/hello-world/Cargo.toml diff --git a/book/sources/exex/hello-world/src/bin/1.rs b/docs/vocs/docs/snippets/sources/exex/hello-world/src/bin/1.rs similarity index 100% rename from book/sources/exex/hello-world/src/bin/1.rs rename to docs/vocs/docs/snippets/sources/exex/hello-world/src/bin/1.rs diff --git a/book/sources/exex/hello-world/src/bin/2.rs b/docs/vocs/docs/snippets/sources/exex/hello-world/src/bin/2.rs similarity index 100% rename from book/sources/exex/hello-world/src/bin/2.rs rename to docs/vocs/docs/snippets/sources/exex/hello-world/src/bin/2.rs diff --git a/book/sources/exex/hello-world/src/bin/3.rs b/docs/vocs/docs/snippets/sources/exex/hello-world/src/bin/3.rs similarity index 100% rename from book/sources/exex/hello-world/src/bin/3.rs rename to docs/vocs/docs/snippets/sources/exex/hello-world/src/bin/3.rs diff --git a/book/sources/exex/remote/Cargo.toml b/docs/vocs/docs/snippets/sources/exex/remote/Cargo.toml similarity index 100% rename from book/sources/exex/remote/Cargo.toml rename to docs/vocs/docs/snippets/sources/exex/remote/Cargo.toml diff --git a/book/sources/exex/remote/build.rs b/docs/vocs/docs/snippets/sources/exex/remote/build.rs similarity index 100% rename from book/sources/exex/remote/build.rs rename to docs/vocs/docs/snippets/sources/exex/remote/build.rs diff --git a/book/sources/exex/remote/proto/exex.proto b/docs/vocs/docs/snippets/sources/exex/remote/proto/exex.proto similarity index 100% rename from book/sources/exex/remote/proto/exex.proto rename to docs/vocs/docs/snippets/sources/exex/remote/proto/exex.proto diff --git a/book/sources/exex/remote/src/consumer.rs b/docs/vocs/docs/snippets/sources/exex/remote/src/consumer.rs similarity index 100% rename from book/sources/exex/remote/src/consumer.rs rename to docs/vocs/docs/snippets/sources/exex/remote/src/consumer.rs diff --git a/book/sources/exex/remote/src/exex.rs b/docs/vocs/docs/snippets/sources/exex/remote/src/exex.rs similarity index 100% rename from book/sources/exex/remote/src/exex.rs rename to docs/vocs/docs/snippets/sources/exex/remote/src/exex.rs diff --git a/book/sources/exex/remote/src/exex_1.rs b/docs/vocs/docs/snippets/sources/exex/remote/src/exex_1.rs similarity index 100% rename from book/sources/exex/remote/src/exex_1.rs rename to docs/vocs/docs/snippets/sources/exex/remote/src/exex_1.rs diff --git a/book/sources/exex/remote/src/exex_2.rs b/docs/vocs/docs/snippets/sources/exex/remote/src/exex_2.rs similarity index 100% rename from book/sources/exex/remote/src/exex_2.rs rename to docs/vocs/docs/snippets/sources/exex/remote/src/exex_2.rs diff --git a/book/sources/exex/remote/src/exex_3.rs b/docs/vocs/docs/snippets/sources/exex/remote/src/exex_3.rs similarity index 100% rename from book/sources/exex/remote/src/exex_3.rs rename to docs/vocs/docs/snippets/sources/exex/remote/src/exex_3.rs diff --git a/book/sources/exex/remote/src/exex_4.rs b/docs/vocs/docs/snippets/sources/exex/remote/src/exex_4.rs similarity index 100% rename from book/sources/exex/remote/src/exex_4.rs rename to docs/vocs/docs/snippets/sources/exex/remote/src/exex_4.rs diff --git a/book/sources/exex/remote/src/lib.rs b/docs/vocs/docs/snippets/sources/exex/remote/src/lib.rs similarity index 100% rename from book/sources/exex/remote/src/lib.rs rename to docs/vocs/docs/snippets/sources/exex/remote/src/lib.rs diff --git a/book/sources/exex/tracking-state/Cargo.toml b/docs/vocs/docs/snippets/sources/exex/tracking-state/Cargo.toml similarity index 100% rename from book/sources/exex/tracking-state/Cargo.toml rename to docs/vocs/docs/snippets/sources/exex/tracking-state/Cargo.toml diff --git a/book/sources/exex/tracking-state/src/bin/1.rs b/docs/vocs/docs/snippets/sources/exex/tracking-state/src/bin/1.rs similarity index 100% rename from book/sources/exex/tracking-state/src/bin/1.rs rename to docs/vocs/docs/snippets/sources/exex/tracking-state/src/bin/1.rs diff --git a/book/sources/exex/tracking-state/src/bin/2.rs b/docs/vocs/docs/snippets/sources/exex/tracking-state/src/bin/2.rs similarity index 100% rename from book/sources/exex/tracking-state/src/bin/2.rs rename to docs/vocs/docs/snippets/sources/exex/tracking-state/src/bin/2.rs diff --git a/docs/vocs/docs/styles.css b/docs/vocs/docs/styles.css new file mode 100644 index 00000000000..fcfc8cf2cd6 --- /dev/null +++ b/docs/vocs/docs/styles.css @@ -0,0 +1,31 @@ +@import "tailwindcss" important; + +@custom-variant dark (&:where(.dark, .dark *)); + +[data-layout="landing"] .vocs_Button_button { + border-radius: 4px !important; + height: 36px !important; + padding: 0 16px !important; +} + +[data-layout="landing"] .vocs_Content { + position: inherit; +} + +#home-install .vocs_CodeGroup { + display: flex; + height: 100%; + flex-direction: column; +} + +#home-install .vocs_Tabs_content { + flex: 1; +} + +#home-install .vocs_Code { + font-size: 18px; +} + +.border-accent { + border: 1px solid var(--vocs-color_borderAccent) !important; +} diff --git a/docs/vocs/links-report.json b/docs/vocs/links-report.json new file mode 100644 index 00000000000..830568362a2 --- /dev/null +++ b/docs/vocs/links-report.json @@ -0,0 +1,17 @@ +{ + "timestamp": "2025-06-23T11:20:27.303Z", + "totalFiles": 106, + "totalLinks": 150, + "brokenLinks": [ + { + "file": "docs/pages/index.mdx", + "link": "/introduction/benchmarks", + "line": 110, + "reason": "Absolute path not found: /introduction/benchmarks" + } + ], + "summary": { + "brokenCount": 1, + "validCount": 149 + } +} \ No newline at end of file diff --git a/docs/vocs/package.json b/docs/vocs/package.json new file mode 100644 index 00000000000..f8d43111c51 --- /dev/null +++ b/docs/vocs/package.json @@ -0,0 +1,26 @@ +{ + "name": "vocs", + "private": true, + "version": "0.0.0", + "type": "module", + "scripts": { + "dev": "vocs dev", + "build": "bash scripts/build-cargo-docs.sh && vocs build && bun scripts/generate-redirects.ts && bun scripts/inject-cargo-docs.ts", + "preview": "vocs preview", + "check-links": "bun scripts/check-links.ts", + "generate-redirects": "bun scripts/generate-redirects.ts", + "build-cargo-docs": "bash scripts/build-cargo-docs.sh", + "inject-cargo-docs": "bun scripts/inject-cargo-docs.ts" + }, + "dependencies": { + "react": "latest", + "react-dom": "latest", + "vocs": "latest" + }, + "devDependencies": { + "@types/node": "latest", + "@types/react": "latest", + "glob": "^11.0.3", + "typescript": "latest" + } +} \ No newline at end of file diff --git a/docs/vocs/redirects.config.ts b/docs/vocs/redirects.config.ts new file mode 100644 index 00000000000..6d30c882a14 --- /dev/null +++ b/docs/vocs/redirects.config.ts @@ -0,0 +1,30 @@ +export const redirects: Record = { + '/intro': '/overview', + // Installation redirects + '/installation/installation': '/installation/overview', + '/binaries': '/installation/binaries', + '/docker': '/installation/docker', + '/source': '/installation/source', + // Run a node redirects + '/run/run-a-node': '/run/overview', + '/run/mainnet': '/run/ethereum', + '/run/optimism': '/run/opstack', + '/run/sync-op-mainnet': '/run/faq/sync-op-mainnet', + '/run/private-testnet': '/run/private-testnets', + '/run/observability': '/run/monitoring', + '/run/config': '/run/configuration', + '/run/transactions': '/run/faq/transactions', + '/run/pruning': '/run/faq/pruning', + '/run/ports': '/run/faq/ports', + '/run/troubleshooting': '/run/faq/troubleshooting', + // Exex + '/developers/exex': '/exex/overview', + '/developers/exex/how-it-works': '/exex/how-it-works', + '/developers/exex/hello-world': '/exex/hello-world', + '/developers/exex/tracking-state': '/exex/tracking-state', + '/developers/exex/remote': '/exex/remote', + // Contributing + '/developers/contribute': '/introduction/contributing', +} + +export const basePath = '/'; \ No newline at end of file diff --git a/docs/vocs/scripts/build-cargo-docs.sh b/docs/vocs/scripts/build-cargo-docs.sh new file mode 100755 index 00000000000..a1a8eeec0a7 --- /dev/null +++ b/docs/vocs/scripts/build-cargo-docs.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +# Script to build cargo docs with the same flags as used in CI + +# Navigate to the reth root directory (two levels up from book/vocs) +cd ../.. || exit 1 + +echo "Building cargo docs..." + +# Build the documentation +export RUSTDOCFLAGS="--cfg docsrs --show-type-layout --generate-link-to-definition --enable-index-page -Zunstable-options" +cargo docs --exclude "example-*" + +echo "Cargo docs built successfully at ./target/doc" \ No newline at end of file diff --git a/docs/vocs/scripts/check-links.ts b/docs/vocs/scripts/check-links.ts new file mode 100644 index 00000000000..e6bf42c8cb5 --- /dev/null +++ b/docs/vocs/scripts/check-links.ts @@ -0,0 +1,316 @@ +#!/usr/bin/env bun +import { Glob } from "bun"; +import { readFileSync } from "node:fs"; +import { join, dirname, resolve, relative } from "node:path"; + +const CONFIG = { + DOCS_DIR: "./docs/pages", + PUBLIC_DIR: "./docs/public", + REPORT_PATH: "links-report.json", + FILE_PATTERNS: "**/*.{md,mdx}", + MARKDOWN_EXTENSIONS: /\.(md|mdx)$/, +} as const; + +interface BrokenLink { + file: string; + link: string; + line: number; + reason: string; +} + +interface LinkCheckReport { + timestamp: string; + totalFiles: number; + totalLinks: number; + brokenLinks: Array; + summary: { + brokenCount: number; + validCount: number; + }; +} + +main(); + +async function main() { + try { + const report = await checkLinks(); + await saveReport(report); + displayResults(report); + + process.exit(report.summary.brokenCount > 0 ? 1 : 0); + } catch (error) { + console.error("\n❌ Fatal error during link checking:"); + + if (error instanceof Error) { + console.error(` ${error.message}`); + if (error.stack) { + [console.error("\nStack trace:"), console.error(error.stack)]; + } + } else console.error(error); + + process.exit(2); + } +} + +async function checkLinks(): Promise { + console.log("🔍 Finding markdown files..."); + const files = await getAllMarkdownFiles(); + console.log(`📄 Found ${files.length} markdown files`); + + console.log("🔍 Finding public assets..."); + const publicAssets = await getAllPublicAssets(); + console.log(`🖼️ Found ${publicAssets.length} public assets`); + + console.log("🗺️ Building file path map..."); + const pathMap = buildFilePathMap(files, publicAssets); + console.log(`📍 Mapped ${pathMap.size} possible paths`); + + const brokenLinks: BrokenLink[] = []; + let totalLinks = 0; + + console.log("🔗 Checking links in files..."); + + for (let index = 0; index < files.length; index++) { + const file = files[index]; + + try { + const content = readFileSync(file, "utf-8"); + const links = extractLinksFromMarkdown(content); + + for (const { link, line } of links) { + totalLinks++; + const error = validateLink(link, file, pathMap); + + if (error) { + brokenLinks.push({ + file: relative(process.cwd(), file), + link, + line, + reason: error, + }); + } + } + } catch (error) { + console.error(`\nError reading ${file}:`, error); + } + } + + console.log("\n✅ Link checking complete!"); + + return { + timestamp: new Date().toISOString(), + totalFiles: files.length, + totalLinks, + brokenLinks, + summary: { + brokenCount: brokenLinks.length, + validCount: totalLinks - brokenLinks.length, + }, + }; +} + +async function getAllMarkdownFiles(): Promise { + const glob = new Glob(CONFIG.FILE_PATTERNS); + const files = await Array.fromAsync(glob.scan({ cwd: CONFIG.DOCS_DIR })); + return files.map((file) => join(CONFIG.DOCS_DIR, file)); +} + +async function getAllPublicAssets(): Promise { + const glob = new Glob("**/*"); + const files = await Array.fromAsync(glob.scan({ cwd: CONFIG.PUBLIC_DIR })); + return files; +} + +function buildFilePathMap( + files: Array, + publicAssets: Array, +): Set { + const pathMap = new Set(); + + const addPath = (path: string) => { + if (path && typeof path === "string") pathMap.add(path); + }; + + for (const file of files) { + const relativePath = relative(CONFIG.DOCS_DIR, file); + + addPath(relativePath); + + const withoutExt = relativePath.replace(CONFIG.MARKDOWN_EXTENSIONS, ""); + addPath(withoutExt); + + if (withoutExt.endsWith("/index")) + addPath(withoutExt.replace("/index", "")); + + addPath(`/${withoutExt}`); + if (withoutExt.endsWith("/index")) + addPath(`/${withoutExt.replace("/index", "")}`); + } + + for (const asset of publicAssets) addPath(`/${asset}`); + + return pathMap; +} + +function extractLinksFromMarkdown( + content: string, +): Array<{ link: string; line: number }> { + const lines = content.split("\n"); + const links: Array<{ link: string; line: number }> = []; + let inCodeBlock = false; + + for (let lineIndex = 0; lineIndex < lines.length; lineIndex++) { + const line = lines[lineIndex]; + const lineNumber = lineIndex + 1; + + // Toggle code block state + if (line.trim().startsWith("```")) { + inCodeBlock = !inCodeBlock; + continue; + } + + if (inCodeBlock) continue; + + const processedLine = line + .split("`") + .filter((_, index) => index % 2 === 0) + .join(""); + + links.push(...extractMarkdownLinks(processedLine, lineNumber)); + links.push(...extractHtmlLinks(processedLine, lineNumber)); + } + + return links; +} + +function extractMarkdownLinks( + line: string, + lineNumber: number, +): Array<{ link: string; line: number }> { + const regex = /\[([^\]]*)\]\(([^)]+)\)/g; + return [...line.matchAll(regex)] + .map(([, , url]) => ({ link: url, line: lineNumber })) + .filter(({ link }) => isInternalLink(link)); +} + +function extractHtmlLinks( + line: string, + lineNumber: number, +): Array<{ link: string; line: number }> { + const regex = /]+href=["']([^"']+)["'][^>]*>/g; + return [...line.matchAll(regex)] + .map(([, url]) => ({ link: url, line: lineNumber })) + .filter(({ link }) => isInternalLink(link)); +} + +function isInternalLink(url: string): boolean { + return ( + !url.startsWith("http") && + !url.startsWith("mailto:") && + !url.startsWith("#") + ); +} + +function validateLink( + link: string, + sourceFile: string, + pathMap: Set, +): string | null { + const [linkPath] = link.split("#"); + if (!linkPath) return null; // Pure anchor link + + if (linkPath.startsWith("/")) return validateAbsolutePath(linkPath, pathMap); + return validateRelativePath(linkPath, sourceFile, pathMap); +} + +function validateAbsolutePath( + linkPath: string, + pathMap: Set, +): string | null { + const variations = [ + linkPath, + linkPath.slice(1), // Remove leading slash + linkPath.replace(/\/$/, ""), // Remove trailing slash + linkPath + .slice(1) + .replace(/\/$/, ""), // Remove both + ]; + + return variations.some((path) => pathMap.has(path)) + ? null + : `Absolute path not found: ${linkPath}`; +} + +function validateRelativePath( + linkPath: string, + sourceFile: string, + pathMap: Set, +): string | null { + const sourceDir = dirname(relative(CONFIG.DOCS_DIR, sourceFile)); + const resolvedPath = resolve(sourceDir, linkPath); + const normalizedPath = relative(".", resolvedPath); + + const variations = [ + linkPath, + normalizedPath, + `/${normalizedPath}`, + normalizedPath.replace(CONFIG.MARKDOWN_EXTENSIONS, ""), + `/${normalizedPath.replace(CONFIG.MARKDOWN_EXTENSIONS, "")}`, + ]; + + return variations.some((path) => pathMap.has(path)) + ? null + : `Relative path not found: ${linkPath} (resolved to: ${normalizedPath})`; +} + +async function saveReport(report: LinkCheckReport) { + try { + await Bun.write(CONFIG.REPORT_PATH, JSON.stringify(report, null, 2)); + console.log(`\n📝 Report saved to: ${CONFIG.REPORT_PATH}`); + } catch (error) { + console.error( + `\n⚠️ Warning: Failed to save report to ${CONFIG.REPORT_PATH}`, + ); + console.error(error); + } +} + +function displayResults(report: LinkCheckReport) { + LinkCheckReporter.printSummary(report); + + if (report.brokenLinks.length > 0) + LinkCheckReporter.printBrokenLinks(report.brokenLinks); + else console.log("\n✅ All links are valid!"); +} + +const LinkCheckReporter = { + printSummary: (report: LinkCheckReport) => { + console.log("\n📊 Link Check Summary:"); + console.log(` 📄 Files checked: ${report.totalFiles}`); + console.log(` 🔗 Total links: ${report.totalLinks}`); + console.log(` ✅ Valid links: ${report.summary.validCount}`); + console.log(` ❌ Broken links: ${report.summary.brokenCount}`); + }, + printBrokenLinks: (brokenLinks: Array) => { + if (brokenLinks.length === 0) return; + + console.log("\n❌ Broken Links Found:\n"); + + const byFile = brokenLinks.reduce( + (acc, broken) => { + if (!acc[broken.file]) acc[broken.file] = []; + acc[broken.file].push(broken); + return acc; + }, + {} as Record, + ); + + for (const [file, links] of Object.entries(byFile)) { + console.log(`📄 ${file}:`); + for (const broken of links) { + console.log(` Line ${broken.line}: ${broken.link}`); + console.log(` └─ ${broken.reason}\n`); + } + } + }, +}; \ No newline at end of file diff --git a/docs/vocs/scripts/generate-redirects.ts b/docs/vocs/scripts/generate-redirects.ts new file mode 100644 index 00000000000..c56861a5a90 --- /dev/null +++ b/docs/vocs/scripts/generate-redirects.ts @@ -0,0 +1,54 @@ +#!/usr/bin/env bun +import { writeFileSync, mkdirSync } from 'fs' +import { join, dirname } from 'path' +import { redirects, basePath } from '../redirects.config' +// Base path for the site + +function generateRedirectHtml(targetPath: string): string { + return ` + + + + Redirecting... + + + + + +

Reth mdbook has been migrated to new docs. If you are not redirected please click here.

+ +` +} + +// Generate redirect files +Object.entries(redirects).forEach(([from, to]) => { + // Add base path to target if it doesn't already have it + const finalTarget = to.startsWith(basePath) ? to : `${basePath}${to}` + + // Remove base path if present in from path + const fromPath = from.replace(/^\/reth\//, '') + + // Generate both with and without .html + const paths = [fromPath] + if (!fromPath.endsWith('.html')) { + paths.push(`${fromPath}.html`) + } + + paths.forEach(path => { + const filePath = join('./docs/dist', path) + if (!path.includes('.')) { + // It's a directory path, create index.html + const indexPath = join('./docs/dist', path, 'index.html') + mkdirSync(dirname(indexPath), { recursive: true }) + writeFileSync(indexPath, generateRedirectHtml(finalTarget)) + } else { + // It's a file path + mkdirSync(dirname(filePath), { recursive: true }) + writeFileSync(filePath, generateRedirectHtml(finalTarget)) + } + }) +}) + +console.log('Redirects generated successfully!') \ No newline at end of file diff --git a/docs/vocs/scripts/inject-cargo-docs.ts b/docs/vocs/scripts/inject-cargo-docs.ts new file mode 100644 index 00000000000..1f8fee260d9 --- /dev/null +++ b/docs/vocs/scripts/inject-cargo-docs.ts @@ -0,0 +1,105 @@ +import { promises as fs } from 'fs'; +import { join, relative } from 'path'; +import { glob } from 'glob'; + +const CARGO_DOCS_PATH = '../../target/doc'; +const VOCS_DIST_PATH = './docs/dist/docs'; +const BASE_PATH = '/docs'; + +async function injectCargoDocs() { + console.log('Injecting cargo docs into Vocs dist...'); + + // Check if cargo docs exist + try { + await fs.access(CARGO_DOCS_PATH); + } catch { + console.error(`Error: Cargo docs not found at ${CARGO_DOCS_PATH}`); + console.error("Please run: cargo doc --no-deps --workspace --exclude 'example-*'"); + process.exit(1); + } + + // Check if Vocs dist exists + try { + await fs.access('./docs/dist'); + } catch { + console.error('Error: Vocs dist not found. Please run: bun run build'); + process.exit(1); + } + + // Create docs directory in dist if it doesn't exist + await fs.mkdir(VOCS_DIST_PATH, { recursive: true }); + + // Copy all cargo docs to the dist/docs folder + console.log(`Copying cargo docs to ${VOCS_DIST_PATH}...`); + await fs.cp(CARGO_DOCS_PATH, VOCS_DIST_PATH, { recursive: true }); + + // Fix relative paths in HTML files to work from /reth/docs + console.log('Fixing relative paths in HTML files...'); + + const htmlFiles = await glob(`${VOCS_DIST_PATH}/**/*.html`); + + for (const file of htmlFiles) { + let content = await fs.readFile(file, 'utf-8'); + + // Fix static file references + content = content + // CSS and JS in static.files + .replace(/href="\.\/static\.files\//g, `href="${BASE_PATH}/static.files/`) + .replace(/src="\.\/static\.files\//g, `src="${BASE_PATH}/static.files/`) + .replace(/href="\.\.\/static\.files\//g, `href="${BASE_PATH}/static.files/`) + .replace(/src="\.\.\/static\.files\//g, `src="${BASE_PATH}/static.files/`) + + // Fix the dynamic font loading in the script tag + .replace(/href="\$\{f\}"/g, `href="${BASE_PATH}/static.files/\${f}"`) + .replace(/href="\.\/static\.files\/\$\{f\}"/g, `href="${BASE_PATH}/static.files/\${f}"`) + + // Fix crate navigation links + .replace(/href="\.\/([^/]+)\/index\.html"/g, `href="${BASE_PATH}/$1/index.html"`) + .replace(/href="\.\.\/([^/]+)\/index\.html"/g, `href="${BASE_PATH}/$1/index.html"`) + // Fix simple crate links (without ./ or ../) + .replace(/href="([^/:"]+)\/index\.html"/g, `href="${BASE_PATH}/$1/index.html"`) + + // Fix root index.html links + .replace(/href="\.\/index\.html"/g, `href="${BASE_PATH}/index.html"`) + .replace(/href="\.\.\/index\.html"/g, `href="${BASE_PATH}/index.html"`) + + // Fix rustdoc data attributes + .replace(/data-root-path="\.\/"/g, `data-root-path="${BASE_PATH}/"`) + .replace(/data-root-path="\.\.\/"/g, `data-root-path="${BASE_PATH}/"`) + .replace(/data-static-root-path="\.\/static\.files\/"/g, `data-static-root-path="${BASE_PATH}/static.files/"`) + .replace(/data-static-root-path="\.\.\/static\.files\/"/g, `data-static-root-path="${BASE_PATH}/static.files/"`) + + // Fix search index paths + .replace(/data-search-index-js="([^"]+)"/g, `data-search-index-js="${BASE_PATH}/static.files/$1"`) + .replace(/data-search-js="([^"]+)"/g, `data-search-js="${BASE_PATH}/static.files/$1"`) + .replace(/data-settings-js="([^"]+)"/g, `data-settings-js="${BASE_PATH}/static.files/$1"`) + + // Fix logo paths + .replace(/src="\.\/static\.files\/rust-logo/g, `src="${BASE_PATH}/static.files/rust-logo`) + .replace(/src="\.\.\/static\.files\/rust-logo/g, `src="${BASE_PATH}/static.files/rust-logo`); + + await fs.writeFile(file, content, 'utf-8'); + } + + // Also fix paths in JavaScript files + const jsFiles = await glob(`${VOCS_DIST_PATH}/**/*.js`); + + for (const file of jsFiles) { + let content = await fs.readFile(file, 'utf-8'); + + // Fix any hardcoded paths in JS files + content = content + .replace(/"\.\/static\.files\//g, `"${BASE_PATH}/static.files/`) + .replace(/"\.\.\/static\.files\//g, `"${BASE_PATH}/static.files/`) + .replace(/"\.\/([^/]+)\/index\.html"/g, `"${BASE_PATH}/$1/index.html"`) + .replace(/"\.\.\/([^/]+)\/index\.html"/g, `"${BASE_PATH}/$1/index.html"`); + + await fs.writeFile(file, content, 'utf-8'); + } + + console.log('Cargo docs successfully injected!'); + console.log(`The crate documentation will be available at ${BASE_PATH}`); +} + +// Run the script +injectCargoDocs().catch(console.error); \ No newline at end of file diff --git a/docs/vocs/sidebar.ts b/docs/vocs/sidebar.ts new file mode 100644 index 00000000000..65829d8e48c --- /dev/null +++ b/docs/vocs/sidebar.ts @@ -0,0 +1,514 @@ +import { SidebarItem } from "vocs"; + +export const sidebar: SidebarItem[] = [ + { + text: "Introduction", + items: [ + { + text: "Overview", + link: "/overview" + }, + { + text: "Why Reth?", + link: "/introduction/why-reth" + }, + { + text: "Contributing", + link: "/introduction/contributing" + } + ] + }, + { + text: "Reth for Node Operators", + items: [ + { + text: "System Requirements", + link: "/run/system-requirements" + }, + { + text: "Installation", + collapsed: true, + items: [ + { + text: "Overview", + link: "/installation/overview" + }, + { + text: "Pre-Built Binaries", + link: "/installation/binaries" + }, + { + text: "Docker", + link: "/installation/docker" + }, + { + text: "Build from Source", + link: "/installation/source" + }, + { + text: "Build for ARM devices", + link: "/installation/build-for-arm-devices" + }, + { + text: "Update Priorities", + link: "/installation/priorities" + } + ] + }, + { + text: "Running a Node", + items: [ + { + text: "Overview", + link: "/run/overview", + }, + { + text: "Networks", + // link: "/run/networks", + items: [ + { + text: "Ethereum", + link: "/run/ethereum", + // items: [ + // { + // text: "Snapshots", + // link: "/run/ethereum/snapshots" + // } + // ] + }, + { + text: "OP-stack", + link: "/run/opstack", + // items: [ + // { + // text: "Caveats OP-Mainnet", + // link: "/run/opstack/op-mainnet-caveats" + // } + // ] + }, + { + text: "Private testnets", + link: "/run/private-testnets" + } + ] + }, + ] + }, + { + text: "Configuration", + link: "/run/configuration" + }, + { + text: "Monitoring", + link: "/run/monitoring" + }, + { + text: "FAQ", + link: "/run/faq", + collapsed: true, + items: [ + { + text: "Transaction Types", + link: "/run/faq/transactions" + }, + { + text: "Pruning & Full Node", + link: "/run/faq/pruning" + }, + { + text: "Ports", + link: "/run/faq/ports" + }, + { + text: "Profiling", + link: "/run/faq/profiling" + }, + { + text: "Sync OP Mainnet", + link: "/run/faq/sync-op-mainnet" + } + ] + } + ] + }, + { + text: "Reth as a library", + items: [ + { + text: "Overview", + link: "/sdk/overview" + }, + { + text: "Typesystem", + items: [ + { + text: "Block", + link: "/sdk/typesystem/block" + }, + { + text: "Transaction types", + link: "/sdk/typesystem/transaction-types" + } + ] + }, + { + text: "What is in a node?", + collapsed: false, + items: [ + { + text: "Network", + link: "/sdk/node-components/network" + }, + { + text: "Pool", + link: "/sdk/node-components/pool" + }, + { + text: "Consensus", + link: "/sdk/node-components/consensus" + }, + { + text: "EVM", + link: "/sdk/node-components/evm" + }, + { + text: "RPC", + link: "/sdk/node-components/rpc" + } + ] + }, + // TODO + // { + // text: "Build a custom node", + // items: [ + // { + // text: "Prerequisites and Considerations", + // link: "/sdk/custom-node/prerequisites" + // }, + // { + // text: "What modifications and how", + // link: "/sdk/custom-node/modifications" + // } + // ] + // }, + // { + // text: "Examples", + // items: [ + // { + // text: "How to modify an existing node", + // items: [ + // { + // text: "Additional features: RPC endpoints, services", + // link: "/sdk/examples/modify-node" + // } + // ] + // }, + // { + // text: "How to use standalone components", + // items: [ + // { + // text: "Interact with the disk directly + caveats", + // link: "/sdk/examples/standalone-components" + // } + // ] + // } + // ] + // } + ] + }, + { + text: "Execution Extensions", + items: [ + { + text: "Overview", + link: "/exex/overview" + }, + { + text: "How do ExExes work?", + link: "/exex/how-it-works" + }, + { + text: "Hello World", + link: "/exex/hello-world" + }, + { + text: "Tracking State", + link: "/exex/tracking-state" + }, + { + text: "Remote", + link: "/exex/remote" + } + ] + }, + { + text: "Interacting with Reth over JSON-RPC", + + items: [ + { + text: "Overview", + link: "/jsonrpc/intro", + }, + { + text: "eth", + link: "/jsonrpc/eth" + }, + { + text: "web3", + link: "/jsonrpc/web3" + }, + { + text: "net", + link: "/jsonrpc/net" + }, + { + text: "txpool", + link: "/jsonrpc/txpool" + }, + { + text: "debug", + link: "/jsonrpc/debug" + }, + { + text: "trace", + link: "/jsonrpc/trace" + }, + { + text: "admin", + link: "/jsonrpc/admin" + }, + { + text: "rpc", + link: "/jsonrpc/rpc" + } + ] + }, + { + text: "CLI Reference", + link: "/cli/cli", + collapsed: false, + items: [ + { + text: "reth", + link: "/cli/reth", + collapsed: false, + items: [ + { + text: "reth node", + link: "/cli/reth/node" + }, + { + text: "reth init", + link: "/cli/reth/init" + }, + { + text: "reth init-state", + link: "/cli/reth/init-state" + }, + { + text: "reth import", + link: "/cli/reth/import" + }, + { + text: "reth import-era", + link: "/cli/reth/import-era" + }, + { + text: "reth dump-genesis", + link: "/cli/reth/dump-genesis" + }, + { + text: "reth db", + link: "/cli/reth/db", + collapsed: true, + items: [ + { + text: "reth db stats", + link: "/cli/reth/db/stats" + }, + { + text: "reth db list", + link: "/cli/reth/db/list" + }, + { + text: "reth db checksum", + link: "/cli/reth/db/checksum" + }, + { + text: "reth db diff", + link: "/cli/reth/db/diff" + }, + { + text: "reth db get", + link: "/cli/reth/db/get", + collapsed: true, + items: [ + { + text: "reth db get mdbx", + link: "/cli/reth/db/get/mdbx" + }, + { + text: "reth db get static-file", + link: "/cli/reth/db/get/static-file" + } + ] + }, + { + text: "reth db drop", + link: "/cli/reth/db/drop" + }, + { + text: "reth db clear", + link: "/cli/reth/db/clear", + collapsed: true, + items: [ + { + text: "reth db clear mdbx", + link: "/cli/reth/db/clear/mdbx" + }, + { + text: "reth db clear static-file", + link: "/cli/reth/db/clear/static-file" + } + ] + }, + { + text: "reth db version", + link: "/cli/reth/db/version" + }, + { + text: "reth db path", + link: "/cli/reth/db/path" + } + ] + }, + { + text: "reth download", + link: "/cli/reth/download" + }, + { + text: "reth stage", + link: "/cli/reth/stage", + collapsed: true, + items: [ + { + text: "reth stage run", + link: "/cli/reth/stage/run" + }, + { + text: "reth stage drop", + link: "/cli/reth/stage/drop" + }, + { + text: "reth stage dump", + link: "/cli/reth/stage/dump", + collapsed: true, + items: [ + { + text: "reth stage dump execution", + link: "/cli/reth/stage/dump/execution" + }, + { + text: "reth stage dump storage-hashing", + link: "/cli/reth/stage/dump/storage-hashing" + }, + { + text: "reth stage dump account-hashing", + link: "/cli/reth/stage/dump/account-hashing" + }, + { + text: "reth stage dump merkle", + link: "/cli/reth/stage/dump/merkle" + } + ] + }, + { + text: "reth stage unwind", + link: "/cli/reth/stage/unwind", + collapsed: true, + items: [ + { + text: "reth stage unwind to-block", + link: "/cli/reth/stage/unwind/to-block" + }, + { + text: "reth stage unwind num-blocks", + link: "/cli/reth/stage/unwind/num-blocks" + } + ] + } + ] + }, + { + text: "reth p2p", + link: "/cli/reth/p2p", + collapsed: true, + items: [ + { + text: "reth p2p header", + link: "/cli/reth/p2p/header" + }, + { + text: "reth p2p body", + link: "/cli/reth/p2p/body" + }, + { + text: "reth p2p rlpx", + link: "/cli/reth/p2p/rlpx", + collapsed: true, + items: [ + { + text: "reth p2p rlpx ping", + link: "/cli/reth/p2p/rlpx/ping" + } + ] + } + ] + }, + { + text: "reth config", + link: "/cli/reth/config" + }, + { + text: "reth debug", + link: "/cli/reth/debug", + collapsed: true, + items: [ + { + text: "reth debug execution", + link: "/cli/reth/debug/execution" + }, + { + text: "reth debug merkle", + link: "/cli/reth/debug/merkle" + }, + { + text: "reth debug in-memory-merkle", + link: "/cli/reth/debug/in-memory-merkle" + }, + { + text: "reth debug build-block", + link: "/cli/reth/debug/build-block" + } + ] + }, + { + text: "reth recover", + link: "/cli/reth/recover", + collapsed: true, + items: [ + { + text: "reth recover storage-tries", + link: "/cli/reth/recover/storage-tries" + } + ] + }, + { + text: "reth prune", + link: "/cli/reth/prune" + } + ] + } + ] + }, +] \ No newline at end of file diff --git a/docs/vocs/tsconfig.json b/docs/vocs/tsconfig.json new file mode 100644 index 00000000000..d2636aac47e --- /dev/null +++ b/docs/vocs/tsconfig.json @@ -0,0 +1,24 @@ +{ + "compilerOptions": { + "target": "ES2020", + "useDefineForClassFields": true, + "lib": ["ES2020", "DOM", "DOM.Iterable"], + "module": "ESNext", + "skipLibCheck": true, + + /* Bundler mode */ + "moduleResolution": "bundler", + "allowImportingTsExtensions": true, + "resolveJsonModule": true, + "isolatedModules": true, + "noEmit": true, + "jsx": "react-jsx", + + /* Linting */ + "strict": true, + "noUnusedLocals": true, + "noUnusedParameters": true, + "noFallthroughCasesInSwitch": true + }, + "include": ["**/*.ts", "**/*.tsx"] +} diff --git a/docs/vocs/vocs.config.ts b/docs/vocs/vocs.config.ts new file mode 100644 index 00000000000..1f1b76f6a70 --- /dev/null +++ b/docs/vocs/vocs.config.ts @@ -0,0 +1,70 @@ +import { defineConfig } from 'vocs' +import { sidebar } from './sidebar' +import { basePath } from './redirects.config' + +export default defineConfig({ + title: 'Reth', + logoUrl: '/logo.png', + iconUrl: '/logo.png', + ogImageUrl: '/reth-prod.png', + sidebar, + basePath, + topNav: [ + { text: 'Run', link: '/run/ethereum' }, + { text: 'SDK', link: '/sdk/overview' }, + { text: 'Rustdocs', link: '/docs' }, + { text: 'GitHub', link: 'https://github.com/paradigmxyz/reth' }, + { + text: 'v1.5.0', + items: [ + { + text: 'Releases', + link: 'https://github.com/paradigmxyz/reth/releases' + }, + { + text: 'Contributing', + link: 'https://github.com/paradigmxyz/reth/blob/main/CONTRIBUTING.md' + } + ] + } + ], + socials: [ + { + icon: 'github', + link: 'https://github.com/paradigmxyz/reth', + }, + { + icon: 'telegram', + link: 'https://t.me/paradigm_reth', + }, + ], + sponsors: [ + { + name: 'Collaborators', + height: 120, + items: [ + [ + { + name: 'Paradigm', + link: 'https://paradigm.xyz', + image: 'https://raw.githubusercontent.com/wevm/.github/main/content/sponsors/paradigm-light.svg', + }, + { + name: 'Ithaca', + link: 'https://ithaca.xyz', + image: 'https://raw.githubusercontent.com/wevm/.github/main/content/sponsors/ithaca-light.svg', + } + ] + ] + } + ], + theme: { + accentColor: { + light: '#1f1f1f', + dark: '#ffffff', + } + }, + editLink: { + pattern: "https://github.com/paradigmxyz/reth/edit/main/book/vocs/docs/pages/:path", + } +}) diff --git a/etc/grafana/dashboards/overview.json b/etc/grafana/dashboards/overview.json index edfabec7f17..af794a3b1c5 100644 --- a/etc/grafana/dashboards/overview.json +++ b/etc/grafana/dashboards/overview.json @@ -14,6 +14,13 @@ "description": "", "type": "datasource", "pluginId": "__expr__" + }, + { + "name": "VAR_INSTANCE_LABEL", + "type": "constant", + "label": "Instance Label", + "value": "job", + "description": "" } ], "__elements": {}, @@ -177,7 +184,7 @@ }, "editorMode": "builder", "exemplar": false, - "expr": "reth_info{job=\"$job\"}", + "expr": "reth_info{$instance_label=\"$instance\"}", "instant": true, "legendFormat": "{{version}}", "range": false, @@ -245,7 +252,7 @@ }, "editorMode": "builder", "exemplar": false, - "expr": "reth_info{job=\"$job\"}", + "expr": "reth_info{$instance_label=\"$instance\"}", "instant": true, "legendFormat": "{{build_timestamp}}", "range": false, @@ -313,7 +320,7 @@ }, "editorMode": "builder", "exemplar": false, - "expr": "reth_info{job=\"$job\"}", + "expr": "reth_info{$instance_label=\"$instance\"}", "instant": true, "legendFormat": "{{git_sha}}", "range": false, @@ -381,7 +388,7 @@ }, "editorMode": "builder", "exemplar": false, - "expr": "reth_info{job=\"$job\"}", + "expr": "reth_info{$instance_label=\"$instance\"}", "instant": true, "legendFormat": "{{build_profile}}", "range": false, @@ -449,7 +456,7 @@ }, "editorMode": "builder", "exemplar": false, - "expr": "reth_info{job=\"$job\"}", + "expr": "reth_info{$instance_label=\"$instance\"}", "instant": true, "legendFormat": "{{target_triple}}", "range": false, @@ -517,7 +524,7 @@ }, "editorMode": "builder", "exemplar": false, - "expr": "reth_info{job=\"$job\"}", + "expr": "reth_info{$instance_label=\"$instance\"}", "instant": true, "legendFormat": "{{cargo_features}}", "range": false, @@ -594,7 +601,7 @@ }, "editorMode": "builder", "exemplar": false, - "expr": "reth_network_connected_peers{job=\"$job\"}", + "expr": "reth_network_connected_peers{$instance_label=\"$instance\"}", "instant": true, "legendFormat": "__auto", "range": false, @@ -668,7 +675,7 @@ }, "editorMode": "builder", "exemplar": false, - "expr": "reth_sync_checkpoint{job=\"$job\"}", + "expr": "reth_sync_checkpoint{$instance_label=\"$instance\"}", "instant": true, "legendFormat": "{{stage}}", "range": false, @@ -767,7 +774,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "expr": "sum(reth_db_table_size{job=\"$job\"})", + "expr": "sum(reth_db_table_size{$instance_label=\"$instance\"})", "legendFormat": "Database", "range": true, "refId": "A" @@ -778,7 +785,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "sum(reth_db_freelist{job=\"$job\"} * reth_db_page_size{job=\"$job\"})", + "expr": "sum(reth_db_freelist{$instance_label=\"$instance\"} * reth_db_page_size{$instance_label=\"$instance\"})", "hide": false, "instant": false, "legendFormat": "Freelist", @@ -791,7 +798,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "expr": "sum(reth_static_files_segment_size{job=\"$job\"})", + "expr": "sum(reth_static_files_segment_size{$instance_label=\"$instance\"})", "hide": false, "instant": false, "legendFormat": "Static Files", @@ -804,7 +811,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "sum(reth_db_table_size{job=\"$job\"}) + sum(reth_db_freelist{job=\"$job\"} * reth_db_page_size{job=\"$job\"}) + sum(reth_static_files_segment_size{job=\"$job\"})", + "expr": "sum(reth_db_table_size{$instance_label=\"$instance\"}) + sum(reth_db_freelist{$instance_label=\"$instance\"} * reth_db_page_size{$instance_label=\"$instance\"}) + sum(reth_static_files_segment_size{$instance_label=\"$instance\"})", "hide": false, "instant": false, "legendFormat": "Total", @@ -903,7 +910,7 @@ "uid": "${datasource}" }, "editorMode": "builder", - "expr": "reth_sync_entities_processed{job=\"$job\"} / reth_sync_entities_total{job=\"$job\"}", + "expr": "reth_sync_entities_processed{$instance_label=\"$instance\"} / reth_sync_entities_total{$instance_label=\"$instance\"}", "legendFormat": "{{stage}}", "range": true, "refId": "A" @@ -1000,7 +1007,7 @@ "uid": "${datasource}" }, "editorMode": "builder", - "expr": "reth_sync_checkpoint{job=\"$job\"}", + "expr": "reth_sync_checkpoint{$instance_label=\"$instance\"}", "legendFormat": "{{stage}}", "range": true, "refId": "A" @@ -1121,7 +1128,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v1{job=\"$job\", quantile=\"0\"}", + "expr": "reth_engine_rpc_fork_choice_updated_v1{$instance_label=\"$instance\", quantile=\"0\"}", "fullMetaSearch": false, "includeNullMetadata": true, "legendFormat": "engine_forkchoiceUpdatedV1 min", @@ -1136,7 +1143,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v1{job=\"$job\", quantile=\"0.5\"}", + "expr": "reth_engine_rpc_fork_choice_updated_v1{$instance_label=\"$instance\", quantile=\"0.5\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -1152,7 +1159,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v1{job=\"$job\", quantile=\"0.9\"}", + "expr": "reth_engine_rpc_fork_choice_updated_v1{$instance_label=\"$instance\", quantile=\"0.9\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -1168,7 +1175,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v1{job=\"$job\", quantile=\"0.95\"}", + "expr": "reth_engine_rpc_fork_choice_updated_v1{$instance_label=\"$instance\", quantile=\"0.95\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -1184,7 +1191,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v1{job=\"$job\", quantile=\"0.99\"}", + "expr": "reth_engine_rpc_fork_choice_updated_v1{$instance_label=\"$instance\", quantile=\"0.99\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -1200,7 +1207,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v2{job=\"$job\", quantile=\"0\"}", + "expr": "reth_engine_rpc_fork_choice_updated_v2{$instance_label=\"$instance\", quantile=\"0\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -1216,7 +1223,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v2{job=\"$job\", quantile=\"0.5\"}", + "expr": "reth_engine_rpc_fork_choice_updated_v2{$instance_label=\"$instance\", quantile=\"0.5\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -1232,7 +1239,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v2{job=\"$job\", quantile=\"0.9\"}", + "expr": "reth_engine_rpc_fork_choice_updated_v2{$instance_label=\"$instance\", quantile=\"0.9\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -1248,7 +1255,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v2{job=\"$job\", quantile=\"0.95\"}", + "expr": "reth_engine_rpc_fork_choice_updated_v2{$instance_label=\"$instance\", quantile=\"0.95\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -1264,7 +1271,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v2{job=\"$job\", quantile=\"0.99\"}", + "expr": "reth_engine_rpc_fork_choice_updated_v2{$instance_label=\"$instance\", quantile=\"0.99\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -1280,7 +1287,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v3{job=\"$job\", quantile=\"0\"}", + "expr": "reth_engine_rpc_fork_choice_updated_v3{$instance_label=\"$instance\", quantile=\"0\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -1296,7 +1303,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v3{job=\"$job\", quantile=\"0.5\"}", + "expr": "reth_engine_rpc_fork_choice_updated_v3{$instance_label=\"$instance\", quantile=\"0.5\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -1312,7 +1319,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v3{job=\"$job\", quantile=\"0.9\"}", + "expr": "reth_engine_rpc_fork_choice_updated_v3{$instance_label=\"$instance\", quantile=\"0.9\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -1328,7 +1335,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v3{job=\"$job\", quantile=\"0.95\"}", + "expr": "reth_engine_rpc_fork_choice_updated_v3{$instance_label=\"$instance\", quantile=\"0.95\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -1344,7 +1351,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v3{job=\"$job\", quantile=\"0.99\"}", + "expr": "reth_engine_rpc_fork_choice_updated_v3{$instance_label=\"$instance\", quantile=\"0.99\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -1469,7 +1476,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v1{job=\"$job\", quantile=\"0\"}", + "expr": "reth_engine_rpc_new_payload_v1{$instance_label=\"$instance\", quantile=\"0\"}", "fullMetaSearch": false, "includeNullMetadata": true, "legendFormat": "engine_newPayloadV1 min", @@ -1484,7 +1491,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v1{job=\"$job\", quantile=\"0.5\"}", + "expr": "reth_engine_rpc_new_payload_v1{$instance_label=\"$instance\", quantile=\"0.5\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -1500,7 +1507,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v1{job=\"$job\", quantile=\"0.9\"}", + "expr": "reth_engine_rpc_new_payload_v1{$instance_label=\"$instance\", quantile=\"0.9\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -1516,7 +1523,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v1{job=\"$job\", quantile=\"0.95\"}", + "expr": "reth_engine_rpc_new_payload_v1{$instance_label=\"$instance\", quantile=\"0.95\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -1532,7 +1539,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v1{job=\"$job\", quantile=\"0.99\"}", + "expr": "reth_engine_rpc_new_payload_v1{$instance_label=\"$instance\", quantile=\"0.99\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -1548,7 +1555,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v2{job=\"$job\", quantile=\"0\"}", + "expr": "reth_engine_rpc_new_payload_v2{$instance_label=\"$instance\", quantile=\"0\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -1564,7 +1571,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v2{job=\"$job\", quantile=\"0.5\"}", + "expr": "reth_engine_rpc_new_payload_v2{$instance_label=\"$instance\", quantile=\"0.5\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -1580,7 +1587,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v2{job=\"$job\", quantile=\"0.9\"}", + "expr": "reth_engine_rpc_new_payload_v2{$instance_label=\"$instance\", quantile=\"0.9\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -1596,7 +1603,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v2{job=\"$job\", quantile=\"0.95\"}", + "expr": "reth_engine_rpc_new_payload_v2{$instance_label=\"$instance\", quantile=\"0.95\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -1612,7 +1619,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v2{job=\"$job\", quantile=\"0.99\"}", + "expr": "reth_engine_rpc_new_payload_v2{$instance_label=\"$instance\", quantile=\"0.99\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -1628,7 +1635,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v3{job=\"$job\", quantile=\"0\"}", + "expr": "reth_engine_rpc_new_payload_v3{$instance_label=\"$instance\", quantile=\"0\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -1644,7 +1651,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v3{job=\"$job\", quantile=\"0.5\"}", + "expr": "reth_engine_rpc_new_payload_v3{$instance_label=\"$instance\", quantile=\"0.5\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -1660,7 +1667,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v3{job=\"$job\", quantile=\"0.9\"}", + "expr": "reth_engine_rpc_new_payload_v3{$instance_label=\"$instance\", quantile=\"0.9\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -1676,7 +1683,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v3{job=\"$job\", quantile=\"0.95\"}", + "expr": "reth_engine_rpc_new_payload_v3{$instance_label=\"$instance\", quantile=\"0.95\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -1692,7 +1699,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v3{job=\"$job\", quantile=\"0.99\"}", + "expr": "reth_engine_rpc_new_payload_v3{$instance_label=\"$instance\", quantile=\"0.99\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -1708,7 +1715,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v4{job=\"$job\", quantile=\"0\"}", + "expr": "reth_engine_rpc_new_payload_v4{$instance_label=\"$instance\", quantile=\"0\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -1724,7 +1731,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v4{job=\"$job\", quantile=\"0.5\"}", + "expr": "reth_engine_rpc_new_payload_v4{$instance_label=\"$instance\", quantile=\"0.5\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -1740,7 +1747,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v4{job=\"$job\", quantile=\"0.9\"}", + "expr": "reth_engine_rpc_new_payload_v4{$instance_label=\"$instance\", quantile=\"0.9\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -1756,7 +1763,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v4{job=\"$job\", quantile=\"0.95\"}", + "expr": "reth_engine_rpc_new_payload_v4{$instance_label=\"$instance\", quantile=\"0.95\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -1772,7 +1779,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v4{job=\"$job\", quantile=\"0.99\"}", + "expr": "reth_engine_rpc_new_payload_v4{$instance_label=\"$instance\", quantile=\"0.99\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -1785,6 +1792,268 @@ "title": "Engine API newPayload Latency", "type": "timeseries" }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "description": "The metric is the amount of gas processed in a block", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "sishort" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 29 + }, + "id": 1004, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "reth_engine_rpc_new_payload_total_gas{$instance_label=\"$instance\", quantile=\"0.5\"}", + "legendFormat": "p50", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "reth_engine_rpc_new_payload_total_gas{$instance_label=\"$instance\", quantile=\"0.9\"}", + "hide": false, + "legendFormat": "p90", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "reth_engine_rpc_new_payload_total_gas{$instance_label=\"$instance\", quantile=\"0.95\"}", + "hide": false, + "legendFormat": "p95", + "range": true, + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "reth_engine_rpc_new_payload_total_gas{$instance_label=\"$instance\", quantile=\"0.99\"}", + "hide": false, + "legendFormat": "p99", + "range": true, + "refId": "D" + } + ], + "title": "Engine API newPayload Total Gas", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "description": "The throughput of the Engine API newPayload method. The metric is the amount of gas processed in a block, divided by the time it took to process the newPayload request.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "si: gas/s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 29 + }, + "id": 1003, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "reth_engine_rpc_new_payload_gas_per_second{$instance_label=\"$instance\", quantile=\"0.5\"}", + "legendFormat": "p50", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "reth_engine_rpc_new_payload_gas_per_second{$instance_label=\"$instance\", quantile=\"0.9\"}", + "hide": false, + "legendFormat": "p90", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "reth_engine_rpc_new_payload_gas_per_second{$instance_label=\"$instance\", quantile=\"0.95\"}", + "hide": false, + "legendFormat": "p95", + "range": true, + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "reth_engine_rpc_new_payload_gas_per_second{$instance_label=\"$instance\", quantile=\"0.99\"}", + "hide": false, + "legendFormat": "p99", + "range": true, + "refId": "D" + } + ], + "title": "Engine API newPayload Throughput", + "type": "timeseries" + }, { "datasource": { "type": "prometheus", @@ -1847,7 +2116,7 @@ "h": 8, "w": 12, "x": 0, - "y": 29 + "y": 37 }, "id": 56, "options": { @@ -1871,7 +2140,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "expr": "reth_sync_execution_gas_per_second{job=\"$job\"}", + "expr": "reth_sync_execution_gas_per_second{$instance_label=\"$instance\"}", "legendFormat": "Gas/s", "range": true, "refId": "A" @@ -1883,7 +2152,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "avg_over_time(reth_sync_execution_gas_per_second{job=\"$job\"}[1m])", + "expr": "avg_over_time(reth_sync_execution_gas_per_second{$instance_label=\"$instance\"}[1m])", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -1899,7 +2168,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "avg_over_time(reth_sync_execution_gas_per_second{job=\"$job\"}[5m])", + "expr": "avg_over_time(reth_sync_execution_gas_per_second{$instance_label=\"$instance\"}[5m])", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -1915,7 +2184,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "avg_over_time(reth_sync_execution_gas_per_second{job=\"$job\"}[10m])", + "expr": "avg_over_time(reth_sync_execution_gas_per_second{$instance_label=\"$instance\"}[10m])", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -1931,7 +2200,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "avg_over_time(reth_sync_execution_gas_per_second{job=\"$job\"}[30m])", + "expr": "avg_over_time(reth_sync_execution_gas_per_second{$instance_label=\"$instance\"}[30m])", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -1947,7 +2216,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "avg_over_time(reth_sync_execution_gas_per_second{job=\"$job\"}[1h])", + "expr": "avg_over_time(reth_sync_execution_gas_per_second{$instance_label=\"$instance\"}[1h])", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -1963,7 +2232,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "avg_over_time(reth_sync_execution_gas_per_second{job=\"$job\"}[24h])", + "expr": "avg_over_time(reth_sync_execution_gas_per_second{$instance_label=\"$instance\"}[24h])", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -2041,7 +2310,7 @@ "h": 8, "w": 12, "x": 12, - "y": 29 + "y": 37 }, "id": 240, "options": { @@ -2066,7 +2335,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_sync_block_validation_state_root_duration{job=\"$job\"}", + "expr": "reth_sync_block_validation_state_root_duration{$instance_label=\"$instance\"}", "fullMetaSearch": false, "includeNullMetadata": true, "instant": false, @@ -2082,7 +2351,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_sync_execution_execution_duration{job=\"$job\"}", + "expr": "reth_sync_execution_execution_duration{$instance_label=\"$instance\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -2102,7 +2371,7 @@ "h": 1, "w": 24, "x": 0, - "y": 37 + "y": 45 }, "id": 87, "panels": [], @@ -2175,7 +2444,7 @@ "h": 8, "w": 12, "x": 0, - "y": 38 + "y": 46 }, "id": 84, "options": { @@ -2199,7 +2468,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "rate(reth_consensus_engine_beacon_forkchoice_updated_messages{job=\"$job\"}[$__rate_interval])", + "expr": "rate(reth_consensus_engine_beacon_forkchoice_updated_messages{$instance_label=\"$instance\"}[$__rate_interval])", "legendFormat": "forkchoiceUpdated", "range": true, "refId": "A" @@ -2210,7 +2479,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "expr": "rate(reth_consensus_engine_beacon_new_payload_messages{job=\"$job\"}[$__rate_interval])", + "expr": "rate(reth_consensus_engine_beacon_new_payload_messages{$instance_label=\"$instance\"}[$__rate_interval])", "hide": false, "legendFormat": "newPayload", "range": true, @@ -2286,7 +2555,7 @@ "h": 8, "w": 12, "x": 12, - "y": 38 + "y": 46 }, "id": 249, "options": { @@ -2310,7 +2579,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "expr": "rate(reth_consensus_engine_beacon_failed_new_payload_response_deliveries{job=\"$job\"}[$__rate_interval])", + "expr": "rate(reth_consensus_engine_beacon_failed_new_payload_response_deliveries{$instance_label=\"$instance\"}[$__rate_interval])", "legendFormat": "newPayload", "range": true, "refId": "A" @@ -2321,7 +2590,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "rate(reth_consensus_engine_beacon_failed_forkchoice_updated_response_deliveries{job=\"$job\"}[$__rate_interval])", + "expr": "rate(reth_consensus_engine_beacon_failed_forkchoice_updated_response_deliveries{$instance_label=\"$instance\"}[$__rate_interval])", "legendFormat": "forkchoiceUpdated", "range": true, "refId": "B" @@ -2396,7 +2665,7 @@ "h": 8, "w": 12, "x": 0, - "y": 46 + "y": 54 }, "id": 213, "options": { @@ -2420,7 +2689,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "reth_engine_rpc_new_payload_forkchoice_updated_time_diff{job=\"$job\"}", + "expr": "reth_engine_rpc_new_payload_forkchoice_updated_time_diff{$instance_label=\"$instance\"}", "legendFormat": "p{{quantile}}", "range": true, "refId": "A" @@ -2495,7 +2764,7 @@ "h": 8, "w": 12, "x": 12, - "y": 46 + "y": 54 }, "id": 212, "options": { @@ -2520,7 +2789,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_engine_rpc_get_payload_bodies_by_hash_v1{job=\"$job\", quantile=\"0\"}", + "expr": "reth_engine_rpc_get_payload_bodies_by_hash_v1{$instance_label=\"$instance\", quantile=\"0\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -2536,7 +2805,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_engine_rpc_get_payload_bodies_by_hash_v1{job=\"$job\", quantile=\"0.5\"}", + "expr": "reth_engine_rpc_get_payload_bodies_by_hash_v1{$instance_label=\"$instance\", quantile=\"0.5\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -2552,7 +2821,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_engine_rpc_get_payload_bodies_by_hash_v1{job=\"$job\", quantile=\"0.9\"}", + "expr": "reth_engine_rpc_get_payload_bodies_by_hash_v1{$instance_label=\"$instance\", quantile=\"0.9\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -2568,7 +2837,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_engine_rpc_get_payload_bodies_by_hash_v1{job=\"$job\", quantile=\"0.95\"}", + "expr": "reth_engine_rpc_get_payload_bodies_by_hash_v1{$instance_label=\"$instance\", quantile=\"0.95\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -2584,7 +2853,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_engine_rpc_get_payload_bodies_by_hash_v1{job=\"$job\", quantile=\"0.99\"}", + "expr": "reth_engine_rpc_get_payload_bodies_by_hash_v1{$instance_label=\"$instance\", quantile=\"0.99\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -2600,7 +2869,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_engine_rpc_get_payload_bodies_by_range_v1{job=\"$job\", quantile=\"0\"}", + "expr": "reth_engine_rpc_get_payload_bodies_by_range_v1{$instance_label=\"$instance\", quantile=\"0\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -2616,7 +2885,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_engine_rpc_get_payload_bodies_by_range_v1{job=\"$job\", quantile=\"0.5\"}", + "expr": "reth_engine_rpc_get_payload_bodies_by_range_v1{$instance_label=\"$instance\", quantile=\"0.5\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -2632,7 +2901,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_engine_rpc_get_payload_bodies_by_range_v1{job=\"$job\", quantile=\"0.9\"}", + "expr": "reth_engine_rpc_get_payload_bodies_by_range_v1{$instance_label=\"$instance\", quantile=\"0.9\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -2648,7 +2917,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_engine_rpc_get_payload_bodies_by_range_v1{job=\"$job\", quantile=\"0.95\"}", + "expr": "reth_engine_rpc_get_payload_bodies_by_range_v1{$instance_label=\"$instance\", quantile=\"0.95\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -2664,7 +2933,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_engine_rpc_get_payload_bodies_by_range_v1{job=\"$job\", quantile=\"0.99\"}", + "expr": "reth_engine_rpc_get_payload_bodies_by_range_v1{$instance_label=\"$instance\", quantile=\"0.99\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -2738,7 +3007,7 @@ "h": 8, "w": 12, "x": 0, - "y": 54 + "y": 62 }, "id": 1000, "options": { @@ -2762,7 +3031,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "expr": "rate(reth_engine_rpc_blobs_blob_count{job=\"$job\"}[$__rate_interval])", + "expr": "rate(reth_engine_rpc_blobs_blob_count{$instance_label=\"$instance\"}[$__rate_interval])", "legendFormat": "Found", "range": true, "refId": "A" @@ -2773,7 +3042,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "rate(reth_engine_rpc_blobs_blob_misses{job=\"$job\"}[$__rate_interval])", + "expr": "rate(reth_engine_rpc_blobs_blob_misses{$instance_label=\"$instance\"}[$__rate_interval])", "hide": false, "legendFormat": "Missed", "range": true, @@ -2848,7 +3117,7 @@ "h": 8, "w": 12, "x": 12, - "y": 54 + "y": 62 }, "id": 258, "options": { @@ -2873,7 +3142,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_engine_rpc_get_blobs_v1{job=\"$job\", quantile=\"0.5\"}", + "expr": "reth_engine_rpc_get_blobs_v1{$instance_label=\"$instance\", quantile=\"0.5\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -2889,7 +3158,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_engine_rpc_get_blobs_v1{job=\"$job\", quantile=\"0.95\"}", + "expr": "reth_engine_rpc_get_blobs_v1{$instance_label=\"$instance\", quantile=\"0.95\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -2905,7 +3174,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_engine_rpc_get_blobs_v1{job=\"$job\", quantile=\"0.99\"}", + "expr": "reth_engine_rpc_get_blobs_v1{$instance_label=\"$instance\", quantile=\"0.99\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -2921,7 +3190,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_engine_rpc_get_blobs_v1{job=\"$job\", quantile=\"0\"}", + "expr": "reth_engine_rpc_get_blobs_v1{$instance_label=\"$instance\", quantile=\"0\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -2937,7 +3206,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_engine_rpc_get_blobs_v1{job=\"$job\", quantile=\"1\"}", + "expr": "reth_engine_rpc_get_blobs_v1{$instance_label=\"$instance\", quantile=\"1\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -3015,7 +3284,7 @@ "h": 8, "w": 12, "x": 0, - "y": 62 + "y": 70 }, "id": 85, "options": { @@ -3039,7 +3308,7 @@ "uid": "${datasource}" }, "editorMode": "builder", - "expr": "reth_consensus_engine_beacon_pipeline_runs{job=\"$job\"}", + "expr": "reth_consensus_engine_beacon_pipeline_runs{$instance_label=\"$instance\"}", "legendFormat": "Pipeline runs", "range": true, "refId": "A" @@ -3113,7 +3382,7 @@ "h": 8, "w": 12, "x": 12, - "y": 62 + "y": 70 }, "id": 83, "options": { @@ -3137,7 +3406,7 @@ "uid": "${datasource}" }, "editorMode": "builder", - "expr": "reth_consensus_engine_beacon_active_block_downloads{job=\"$job\"}", + "expr": "reth_consensus_engine_beacon_active_block_downloads{$instance_label=\"$instance\"}", "legendFormat": "Active block downloads", "range": true, "refId": "A" @@ -3152,7 +3421,7 @@ "h": 1, "w": 24, "x": 0, - "y": 70 + "y": 78 }, "id": 46, "panels": [], @@ -3225,7 +3494,7 @@ "h": 8, "w": 12, "x": 0, - "y": 71 + "y": 79 }, "id": 1001, "options": { @@ -3250,7 +3519,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_sync_block_validation_state_root_duration{job=\"$job\"}", + "expr": "reth_sync_block_validation_state_root_duration{$instance_label=\"$instance\"}", "fullMetaSearch": false, "includeNullMetadata": true, "instant": false, @@ -3266,7 +3535,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_sync_execution_execution_duration{job=\"$job\"}", + "expr": "reth_sync_execution_execution_duration{$instance_label=\"$instance\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -3345,7 +3614,7 @@ "h": 8, "w": 12, "x": 12, - "y": 71 + "y": 79 }, "id": 251, "options": { @@ -3370,7 +3639,7 @@ }, "disableTextWrap": false, "editorMode": "code", - "expr": "reth_sync_caching_account_cache_hits{job=\"$job\"} / (reth_sync_caching_account_cache_hits{job=\"$job\"} + reth_sync_caching_account_cache_misses{job=\"$job\"})", + "expr": "reth_sync_caching_account_cache_hits{$instance_label=\"$instance\"} / (reth_sync_caching_account_cache_hits{$instance_label=\"$instance\"} + reth_sync_caching_account_cache_misses{$instance_label=\"$instance\"})", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -3387,7 +3656,7 @@ }, "disableTextWrap": false, "editorMode": "code", - "expr": "reth_sync_caching_storage_cache_hits{job=\"$job\"} / (reth_sync_caching_storage_cache_hits{job=\"$job\"} + reth_sync_caching_storage_cache_misses{job=\"$job\"})", + "expr": "reth_sync_caching_storage_cache_hits{$instance_label=\"$instance\"} / (reth_sync_caching_storage_cache_hits{$instance_label=\"$instance\"} + reth_sync_caching_storage_cache_misses{$instance_label=\"$instance\"})", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -3404,7 +3673,7 @@ }, "disableTextWrap": false, "editorMode": "code", - "expr": "reth_sync_caching_code_cache_hits{job=\"$job\"} / (reth_sync_caching_code_cache_hits{job=\"$job\"} + reth_sync_caching_code_cache_misses{job=\"$job\"})", + "expr": "reth_sync_caching_code_cache_hits{$instance_label=\"$instance\"} / (reth_sync_caching_code_cache_hits{$instance_label=\"$instance\"} + reth_sync_caching_code_cache_misses{$instance_label=\"$instance\"})", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -3484,7 +3753,7 @@ "h": 8, "w": 12, "x": 0, - "y": 79 + "y": 87 }, "id": 252, "options": { @@ -3509,7 +3778,7 @@ }, "disableTextWrap": false, "editorMode": "code", - "expr": "reth_sync_block_validation_trie_input_duration{job=\"$job\", quantile=~\"(0|0.5|0.9|0.95|1)\"}", + "expr": "reth_sync_block_validation_trie_input_duration{$instance_label=\"$instance\", quantile=~\"(0|0.5|0.9|0.95|1)\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -3523,13 +3792,117 @@ "title": "Block validation overhead", "type": "timeseries" }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "max": 1, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 87 + }, + "id": 1005, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "reth_sync_caching_precompile_cache_hits{$instance_label=\"$instance\"} / (reth_sync_caching_precompile_cache_hits{$instance_label=\"$instance\"} + reth_sync_caching_precompile_cache_misses{$instance_label=\"$instance\"})", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Precompile cache hits", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Precompile cache hitrate", + "type": "timeseries" + }, { "collapsed": false, "gridPos": { "h": 1, "w": 24, "x": 0, - "y": 87 + "y": 95 }, "id": 214, "panels": [], @@ -3600,7 +3973,7 @@ "h": 8, "w": 12, "x": 0, - "y": 88 + "y": 96 }, "id": 255, "options": { @@ -3624,7 +3997,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "expr": "reth_tree_root_proofs_processed_histogram{job=\"$job\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", + "expr": "reth_tree_root_proofs_processed_histogram{$instance_label=\"$instance\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", "instant": false, "legendFormat": "{{quantile}} percentile", "range": true, @@ -3699,7 +4072,7 @@ "h": 8, "w": 12, "x": 12, - "y": 88 + "y": 96 }, "id": 254, "options": { @@ -3723,7 +4096,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "expr": "reth_tree_root_proof_calculation_duration_histogram{job=\"$job\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", + "expr": "reth_tree_root_proof_calculation_duration_histogram{$instance_label=\"$instance\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", "instant": false, "legendFormat": "{{quantile}} percentile", "range": true, @@ -3798,7 +4171,7 @@ "h": 8, "w": 12, "x": 0, - "y": 96 + "y": 104 }, "id": 257, "options": { @@ -3822,7 +4195,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "expr": "reth_tree_root_pending_multiproofs_histogram{job=\"$job\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", + "expr": "reth_tree_root_pending_multiproofs_histogram{$instance_label=\"$instance\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", "instant": false, "legendFormat": "{{quantile}} percentile", "range": true, @@ -3897,7 +4270,7 @@ "h": 8, "w": 12, "x": 12, - "y": 96 + "y": 104 }, "id": 256, "options": { @@ -3921,7 +4294,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "expr": "reth_tree_root_inflight_multiproofs_histogram{job=\"$job\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", + "expr": "reth_tree_root_inflight_multiproofs_histogram{$instance_label=\"$instance\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", "instant": false, "legendFormat": "{{quantile}} percentile", "range": true, @@ -3996,7 +4369,7 @@ "h": 8, "w": 12, "x": 0, - "y": 104 + "y": 112 }, "id": 260, "options": { @@ -4020,7 +4393,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "expr": "reth_sparse_state_trie_multiproof_skipped_storage_nodes{job=\"$job\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", + "expr": "reth_sparse_state_trie_multiproof_skipped_storage_nodes{$instance_label=\"$instance\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", "instant": false, "legendFormat": "Storage {{quantile}} percentile", "range": true, @@ -4095,7 +4468,7 @@ "h": 8, "w": 12, "x": 12, - "y": 104 + "y": 112 }, "id": 259, "options": { @@ -4119,7 +4492,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "expr": "reth_sparse_state_trie_multiproof_skipped_account_nodes{job=\"$job\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", + "expr": "reth_sparse_state_trie_multiproof_skipped_account_nodes{$instance_label=\"$instance\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", "instant": false, "legendFormat": "Account {{quantile}} percentile", "range": true, @@ -4194,7 +4567,7 @@ "h": 8, "w": 12, "x": 0, - "y": 112 + "y": 120 }, "id": 262, "options": { @@ -4218,7 +4591,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "expr": "reth_sparse_state_trie_multiproof_total_account_nodes{job=\"$job\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", + "expr": "reth_sparse_state_trie_multiproof_total_account_nodes{$instance_label=\"$instance\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", "hide": false, "instant": false, "legendFormat": "Account {{quantile}} percentile", @@ -4294,7 +4667,7 @@ "h": 8, "w": 12, "x": 12, - "y": 112 + "y": 120 }, "id": 261, "options": { @@ -4318,7 +4691,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "expr": "reth_sparse_state_trie_multiproof_total_storage_nodes{job=\"$job\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", + "expr": "reth_sparse_state_trie_multiproof_total_storage_nodes{$instance_label=\"$instance\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", "instant": false, "legendFormat": "Storage {{quantile}} percentile", "range": true, @@ -4394,7 +4767,7 @@ "h": 8, "w": 12, "x": 12, - "y": 120 + "y": 128 }, "id": 263, "options": { @@ -4418,7 +4791,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "expr": "reth_tree_root_multiproof_task_total_duration_histogram{job=\"$job\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", + "expr": "reth_tree_root_multiproof_task_total_duration_histogram{$instance_label=\"$instance\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", "hide": false, "instant": false, "legendFormat": "Task duration {{quantile}} percentile", @@ -4435,7 +4808,7 @@ "h": 1, "w": 24, "x": 0, - "y": 128 + "y": 136 }, "id": 38, "panels": [], @@ -4506,7 +4879,7 @@ "h": 8, "w": 12, "x": 0, - "y": 129 + "y": 137 }, "id": 40, "options": { @@ -4531,7 +4904,7 @@ }, "editorMode": "code", "exemplar": false, - "expr": "avg(rate(reth_database_transaction_close_duration_seconds_sum{job=\"$job\", outcome=\"commit\"}[$__rate_interval]) / rate(reth_database_transaction_close_duration_seconds_count{job=\"$job\", outcome=\"commit\"}[$__rate_interval]) >= 0)", + "expr": "avg(rate(reth_database_transaction_close_duration_seconds_sum{$instance_label=\"$instance\", outcome=\"commit\"}[$__rate_interval]) / rate(reth_database_transaction_close_duration_seconds_count{$instance_label=\"$instance\", outcome=\"commit\"}[$__rate_interval]) >= 0)", "format": "time_series", "instant": false, "legendFormat": "Commit time", @@ -4567,7 +4940,7 @@ "h": 8, "w": 12, "x": 12, - "y": 129 + "y": 137 }, "id": 42, "maxDataPoints": 25, @@ -4621,7 +4994,7 @@ }, "editorMode": "code", "exemplar": false, - "expr": "avg(max_over_time(reth_database_transaction_close_duration_seconds{job=\"$job\", outcome=\"commit\"}[$__rate_interval])) by (quantile)", + "expr": "avg(max_over_time(reth_database_transaction_close_duration_seconds{$instance_label=\"$instance\", outcome=\"commit\"}[$__rate_interval])) by (quantile)", "format": "time_series", "instant": false, "legendFormat": "{{quantile}}", @@ -4695,7 +5068,7 @@ "h": 8, "w": 12, "x": 0, - "y": 137 + "y": 145 }, "id": 117, "options": { @@ -4720,7 +5093,7 @@ }, "editorMode": "code", "exemplar": false, - "expr": "sum(rate(reth_database_transaction_open_duration_seconds_sum{job=\"$job\", outcome!=\"\"}[$__rate_interval]) / rate(reth_database_transaction_open_duration_seconds_count{job=\"$job\", outcome!=\"\"}[$__rate_interval])) by (outcome, mode)", + "expr": "sum(rate(reth_database_transaction_open_duration_seconds_sum{$instance_label=\"$instance\", outcome!=\"\"}[$__rate_interval]) / rate(reth_database_transaction_open_duration_seconds_count{$instance_label=\"$instance\", outcome!=\"\"}[$__rate_interval])) by (outcome, mode)", "format": "time_series", "instant": false, "legendFormat": "{{mode}}, {{outcome}}", @@ -4793,7 +5166,7 @@ "h": 8, "w": 12, "x": 12, - "y": 137 + "y": 145 }, "id": 116, "options": { @@ -4818,7 +5191,7 @@ }, "editorMode": "code", "exemplar": false, - "expr": "max(max_over_time(reth_database_transaction_open_duration_seconds{job=\"$job\", outcome!=\"\", quantile=\"1\"}[$__interval])) by (outcome, mode)", + "expr": "max(max_over_time(reth_database_transaction_open_duration_seconds{$instance_label=\"$instance\", outcome!=\"\", quantile=\"1\"}[$__interval])) by (outcome, mode)", "format": "time_series", "instant": false, "legendFormat": "{{mode}}, {{outcome}}", @@ -4922,7 +5295,7 @@ "h": 8, "w": 12, "x": 0, - "y": 145 + "y": 153 }, "id": 119, "options": { @@ -4948,7 +5321,7 @@ "disableTextWrap": false, "editorMode": "code", "exemplar": false, - "expr": "sum(reth_database_transaction_opened_total{job=\"$job\", mode=\"read-write\"})", + "expr": "sum(reth_database_transaction_opened_total{$instance_label=\"$instance\", mode=\"read-write\"})", "format": "time_series", "fullMetaSearch": false, "includeNullMetadata": true, @@ -4965,7 +5338,7 @@ }, "editorMode": "code", "exemplar": false, - "expr": "sum(reth_database_transaction_closed_total{job=\"$job\", mode=\"read-write\"})", + "expr": "sum(reth_database_transaction_closed_total{$instance_label=\"$instance\", mode=\"read-write\"})", "format": "time_series", "instant": false, "legendFormat": "Closed {{mode}}", @@ -5079,7 +5452,7 @@ "h": 8, "w": 12, "x": 12, - "y": 145 + "y": 153 }, "id": 250, "options": { @@ -5105,7 +5478,7 @@ "disableTextWrap": false, "editorMode": "builder", "exemplar": false, - "expr": "reth_database_transaction_opened_total{job=\"$job\", mode=\"read-only\"}", + "expr": "reth_database_transaction_opened_total{$instance_label=\"$instance\", mode=\"read-only\"}", "format": "time_series", "fullMetaSearch": false, "includeNullMetadata": true, @@ -5123,7 +5496,7 @@ "disableTextWrap": false, "editorMode": "builder", "exemplar": false, - "expr": "sum(reth_database_transaction_closed_total{job=\"$job\", mode=\"read-only\"})", + "expr": "sum(reth_database_transaction_closed_total{$instance_label=\"$instance\", mode=\"read-only\"})", "format": "time_series", "fullMetaSearch": false, "includeNullMetadata": true, @@ -5174,7 +5547,7 @@ "h": 8, "w": 12, "x": 0, - "y": 153 + "y": 161 }, "id": 48, "options": { @@ -5205,7 +5578,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", - "expr": "reth_db_table_size{job=\"$job\"}", + "expr": "reth_db_table_size{$instance_label=\"$instance\"}", "interval": "", "legendFormat": "{{table}}", "range": true, @@ -5281,7 +5654,7 @@ "h": 8, "w": 12, "x": 12, - "y": 153 + "y": 161 }, "id": 118, "options": { @@ -5306,7 +5679,7 @@ }, "editorMode": "code", "exemplar": false, - "expr": "max(max_over_time(reth_database_operation_large_value_duration_seconds{job=\"$job\", quantile=\"1\"}[$__interval]) > 0) by (table)", + "expr": "max(max_over_time(reth_database_operation_large_value_duration_seconds{$instance_label=\"$instance\", quantile=\"1\"}[$__interval]) > 0) by (table)", "format": "time_series", "instant": false, "legendFormat": "{{table}}", @@ -5344,7 +5717,7 @@ "h": 8, "w": 12, "x": 0, - "y": 161 + "y": 169 }, "id": 50, "options": { @@ -5374,7 +5747,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", - "expr": "sum by (type) ( reth_db_table_pages{job=\"$job\"} )", + "expr": "sum by (type) ( reth_db_table_pages{$instance_label=\"$instance\"} )", "legendFormat": "__auto", "range": true, "refId": "A" @@ -5450,7 +5823,7 @@ "h": 8, "w": 12, "x": 12, - "y": 161 + "y": 169 }, "id": 52, "options": { @@ -5474,7 +5847,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "expr": "sum by (job) ( reth_db_table_size{job=\"$job\"} )", + "expr": "sum by (job) ( reth_db_table_size{$instance_label=\"$instance\"} )", "legendFormat": "Size ({{job}})", "range": true, "refId": "A" @@ -5549,7 +5922,7 @@ "h": 8, "w": 12, "x": 0, - "y": 169 + "y": 177 }, "id": 113, "options": { @@ -5573,7 +5946,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "expr": "sum(reth_db_freelist{job=\"$job\"}) by (job)", + "expr": "sum(reth_db_freelist{$instance_label=\"$instance\"}) by (job)", "legendFormat": "Pages ({{job}})", "range": true, "refId": "A" @@ -5709,7 +6082,7 @@ "h": 8, "w": 12, "x": 12, - "y": 169 + "y": 177 }, "id": 58, "options": { @@ -5731,7 +6104,7 @@ }, "editorMode": "code", "exemplar": false, - "expr": "sort_desc(reth_db_table_pages{job=\"$job\", type=\"overflow\"} != 0)", + "expr": "sort_desc(reth_db_table_pages{$instance_label=\"$instance\", type=\"overflow\"} != 0)", "format": "table", "instant": true, "legendFormat": "__auto", @@ -5748,7 +6121,7 @@ "h": 1, "w": 24, "x": 0, - "y": 177 + "y": 185 }, "id": 203, "panels": [], @@ -5782,7 +6155,7 @@ "h": 8, "w": 8, "x": 0, - "y": 178 + "y": 186 }, "id": 202, "options": { @@ -5813,7 +6186,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "expr": "reth_static_files_segment_size{job=\"$job\"}", + "expr": "reth_static_files_segment_size{$instance_label=\"$instance\"}", "interval": "", "legendFormat": "{{segment}}", "range": true, @@ -5938,7 +6311,7 @@ "h": 8, "w": 8, "x": 8, - "y": 178 + "y": 186 }, "id": 204, "options": { @@ -5960,7 +6333,7 @@ }, "editorMode": "code", "exemplar": false, - "expr": "reth_static_files_segment_entries{job=\"$job\"}", + "expr": "reth_static_files_segment_entries{$instance_label=\"$instance\"}", "format": "table", "instant": true, "legendFormat": "__auto", @@ -6086,7 +6459,7 @@ "h": 8, "w": 8, "x": 16, - "y": 178 + "y": 186 }, "id": 205, "options": { @@ -6108,7 +6481,7 @@ }, "editorMode": "code", "exemplar": false, - "expr": "reth_static_files_segment_files{job=\"$job\"}", + "expr": "reth_static_files_segment_files{$instance_label=\"$instance\"}", "format": "table", "instant": true, "legendFormat": "__auto", @@ -6185,7 +6558,7 @@ "h": 8, "w": 12, "x": 0, - "y": 186 + "y": 194 }, "id": 206, "options": { @@ -6209,7 +6582,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "expr": "sum by (job) ( reth_static_files_segment_size{job=\"$job\"} )", + "expr": "sum by (job) ( reth_static_files_segment_size{$instance_label=\"$instance\"} )", "legendFormat": "__auto", "range": true, "refId": "A" @@ -6284,7 +6657,7 @@ "h": 8, "w": 12, "x": 12, - "y": 186 + "y": 194 }, "id": 207, "options": { @@ -6308,7 +6681,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "expr": "max(max_over_time(reth_static_files_jar_provider_write_duration_seconds{job=\"$job\", operation=\"commit-writer\", quantile=\"1\"}[$__interval]) > 0) by (segment)", + "expr": "max(max_over_time(reth_static_files_jar_provider_write_duration_seconds{$instance_label=\"$instance\", operation=\"commit-writer\", quantile=\"1\"}[$__interval]) > 0) by (segment)", "legendFormat": "{{segment}}", "range": true, "refId": "A" @@ -6323,7 +6696,7 @@ "h": 1, "w": 24, "x": 0, - "y": 194 + "y": 202 }, "id": 79, "panels": [], @@ -6396,7 +6769,7 @@ "h": 8, "w": 12, "x": 0, - "y": 195 + "y": 203 }, "id": 74, "options": { @@ -6420,7 +6793,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", - "expr": "reth_blockchain_tree_canonical_chain_height{job=\"$job\"}", + "expr": "reth_blockchain_tree_canonical_chain_height{$instance_label=\"$instance\"}", "hide": false, "legendFormat": "Canonical chain height", "range": true, @@ -6495,7 +6868,7 @@ "h": 8, "w": 12, "x": 12, - "y": 195 + "y": 203 }, "id": 80, "options": { @@ -6519,7 +6892,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", - "expr": "reth_blockchain_tree_block_buffer_blocks{job=\"$job\"}", + "expr": "reth_blockchain_tree_block_buffer_blocks{$instance_label=\"$instance\"}", "hide": false, "legendFormat": "Buffered blocks", "range": true, @@ -6594,7 +6967,7 @@ "h": 8, "w": 12, "x": 0, - "y": 203 + "y": 211 }, "id": 1002, "options": { @@ -6618,7 +6991,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "expr": "increase(reth_blockchain_tree_reorgs{job=\"$job\"}[$__rate_interval])", + "expr": "increase(reth_blockchain_tree_reorgs{$instance_label=\"$instance\"}[$__rate_interval])", "instant": false, "legendFormat": "__auto", "range": true, @@ -6693,7 +7066,7 @@ "h": 8, "w": 12, "x": 12, - "y": 203 + "y": 211 }, "id": 190, "options": { @@ -6717,7 +7090,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "expr": "reth_blockchain_tree_latest_reorg_depth{job=\"$job\"}", + "expr": "reth_blockchain_tree_latest_reorg_depth{$instance_label=\"$instance\"}", "instant": false, "legendFormat": "__auto", "range": true, @@ -6733,7 +7106,7 @@ "h": 1, "w": 24, "x": 0, - "y": 211 + "y": 219 }, "id": 108, "panels": [], @@ -6831,7 +7204,7 @@ "h": 8, "w": 12, "x": 0, - "y": 212 + "y": 220 }, "id": 109, "options": { @@ -6856,7 +7229,7 @@ }, "disableTextWrap": false, "editorMode": "code", - "expr": "sum(reth_rpc_server_connections_connections_opened_total{job=\"$job\"} - reth_rpc_server_connections_connections_closed_total{job=\"$job\"}) by (transport)", + "expr": "sum(reth_rpc_server_connections_connections_opened_total{$instance_label=\"$instance\"} - reth_rpc_server_connections_connections_closed_total{$instance_label=\"$instance\"}) by (transport)", "format": "time_series", "fullMetaSearch": false, "includeNullMetadata": true, @@ -6894,7 +7267,7 @@ "h": 8, "w": 12, "x": 12, - "y": 212 + "y": 220 }, "id": 111, "maxDataPoints": 25, @@ -6948,7 +7321,7 @@ }, "editorMode": "code", "exemplar": false, - "expr": "avg(max_over_time(reth_rpc_server_connections_request_time_seconds{job=\"$job\"}[$__rate_interval]) > 0) by (quantile)", + "expr": "avg(max_over_time(reth_rpc_server_connections_request_time_seconds{$instance_label=\"$instance\"}[$__rate_interval]) > 0) by (quantile)", "format": "time_series", "instant": false, "legendFormat": "__auto", @@ -7024,7 +7397,7 @@ "h": 8, "w": 12, "x": 0, - "y": 220 + "y": 228 }, "id": 120, "options": { @@ -7048,7 +7421,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "expr": "max(max_over_time(reth_rpc_server_calls_time_seconds{job=\"$job\"}[$__rate_interval])) by (method) > 0", + "expr": "max(max_over_time(reth_rpc_server_calls_time_seconds{$instance_label=\"$instance\"}[$__rate_interval])) by (method) > 0", "instant": false, "legendFormat": "__auto", "range": true, @@ -7083,7 +7456,7 @@ "h": 8, "w": 12, "x": 12, - "y": 220 + "y": 228 }, "id": 112, "maxDataPoints": 25, @@ -7137,7 +7510,7 @@ }, "editorMode": "code", "exemplar": false, - "expr": "avg(max_over_time(reth_rpc_server_calls_time_seconds{job=\"$job\"}[$__rate_interval]) > 0) by (quantile)", + "expr": "avg(max_over_time(reth_rpc_server_calls_time_seconds{$instance_label=\"$instance\"}[$__rate_interval]) > 0) by (quantile)", "format": "time_series", "instant": false, "legendFormat": "{{quantile}}", @@ -7196,8 +7569,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -7249,7 +7621,7 @@ "h": 8, "w": 12, "x": 0, - "y": 228 + "y": 236 }, "id": 198, "options": { @@ -7274,7 +7646,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_rpc_eth_cache_cached_count{job=\"$job\", cache=\"headers\"}", + "expr": "reth_rpc_eth_cache_cached_count{$instance_label=\"$instance\", cache=\"headers\"}", "fullMetaSearch": false, "includeNullMetadata": true, "instant": false, @@ -7290,7 +7662,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_rpc_eth_cache_queued_consumers_count{job=\"$job\", cache=\"receipts\"}", + "expr": "reth_rpc_eth_cache_queued_consumers_count{$instance_label=\"$instance\", cache=\"receipts\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -7307,7 +7679,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_rpc_eth_cache_queued_consumers_count{job=\"$job\", cache=\"headers\"}", + "expr": "reth_rpc_eth_cache_queued_consumers_count{$instance_label=\"$instance\", cache=\"headers\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -7324,7 +7696,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_rpc_eth_cache_queued_consumers_count{job=\"$job\", cache=\"blocks\"}", + "expr": "reth_rpc_eth_cache_queued_consumers_count{$instance_label=\"$instance\", cache=\"blocks\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -7341,7 +7713,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_rpc_eth_cache_memory_usage{job=\"$job\", cache=\"blocks\"}", + "expr": "reth_rpc_eth_cache_memory_usage{$instance_label=\"$instance\", cache=\"blocks\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -7358,7 +7730,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_rpc_eth_cache_cached_count{job=\"$job\", cache=\"receipts\"}", + "expr": "reth_rpc_eth_cache_cached_count{$instance_label=\"$instance\", cache=\"receipts\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -7375,7 +7747,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_rpc_eth_cache_memory_usage{job=\"$job\", cache=\"receipts\"}", + "expr": "reth_rpc_eth_cache_memory_usage{$instance_label=\"$instance\", cache=\"receipts\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -7392,7 +7764,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_rpc_eth_cache_cached_count{job=\"$job\", cache=\"blocks\"}", + "expr": "reth_rpc_eth_cache_cached_count{$instance_label=\"$instance\", cache=\"blocks\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -7454,8 +7826,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -7471,7 +7842,7 @@ "h": 8, "w": 12, "x": 12, - "y": 228 + "y": 236 }, "id": 246, "options": { @@ -7511,7 +7882,7 @@ "h": 1, "w": 24, "x": 0, - "y": 236 + "y": 244 }, "id": 24, "panels": [], @@ -7567,8 +7938,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -7608,7 +7978,7 @@ "h": 8, "w": 12, "x": 0, - "y": 237 + "y": 245 }, "id": 26, "options": { @@ -7632,7 +8002,7 @@ "uid": "${datasource}" }, "editorMode": "builder", - "expr": "reth_downloaders_headers_total_downloaded{job=\"$job\"}", + "expr": "reth_downloaders_headers_total_downloaded{$instance_label=\"$instance\"}", "legendFormat": "Downloaded", "range": true, "refId": "A" @@ -7643,7 +8013,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", - "expr": "reth_downloaders_headers_total_flushed{job=\"$job\"}", + "expr": "reth_downloaders_headers_total_flushed{$instance_label=\"$instance\"}", "hide": false, "legendFormat": "Flushed", "range": true, @@ -7655,7 +8025,7 @@ "uid": "${datasource}" }, "editorMode": "builder", - "expr": "rate(reth_downloaders_headers_total_downloaded{job=\"$job\"}[$__rate_interval])", + "expr": "rate(reth_downloaders_headers_total_downloaded{$instance_label=\"$instance\"}[$__rate_interval])", "hide": false, "instant": false, "legendFormat": "Downloaded/s", @@ -7668,7 +8038,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", - "expr": "rate(reth_downloaders_headers_total_flushed{job=\"$job\"}[$__rate_interval])", + "expr": "rate(reth_downloaders_headers_total_flushed{$instance_label=\"$instance\"}[$__rate_interval])", "hide": false, "legendFormat": "Flushed/s", "range": true, @@ -7727,8 +8097,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -7744,7 +8113,7 @@ "h": 8, "w": 12, "x": 12, - "y": 237 + "y": 245 }, "id": 33, "options": { @@ -7768,7 +8137,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", - "expr": "rate(reth_downloaders_headers_timeout_errors{job=\"$job\"}[$__rate_interval])", + "expr": "rate(reth_downloaders_headers_timeout_errors{$instance_label=\"$instance\"}[$__rate_interval])", "legendFormat": "Request timed out", "range": true, "refId": "A" @@ -7779,7 +8148,7 @@ "uid": "${datasource}" }, "editorMode": "builder", - "expr": "rate(reth_downloaders_headers_unexpected_errors{job=\"$job\"}[$__rate_interval])", + "expr": "rate(reth_downloaders_headers_unexpected_errors{$instance_label=\"$instance\"}[$__rate_interval])", "hide": false, "legendFormat": "Unexpected error", "range": true, @@ -7791,7 +8160,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", - "expr": "rate(reth_downloaders_headers_validation_errors{job=\"$job\"}[$__rate_interval])", + "expr": "rate(reth_downloaders_headers_validation_errors{$instance_label=\"$instance\"}[$__rate_interval])", "hide": false, "legendFormat": "Invalid response", "range": true, @@ -7850,8 +8219,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -7866,7 +8234,7 @@ "h": 8, "w": 12, "x": 0, - "y": 245 + "y": 253 }, "id": 36, "options": { @@ -7890,7 +8258,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", - "expr": "reth_downloaders_headers_in_flight_requests{job=\"$job\"}", + "expr": "reth_downloaders_headers_in_flight_requests{$instance_label=\"$instance\"}", "legendFormat": "In flight requests", "range": true, "refId": "A" @@ -7901,7 +8269,7 @@ "uid": "${datasource}" }, "editorMode": "builder", - "expr": "reth_network_connected_peers{job=\"$job\"}", + "expr": "reth_network_connected_peers{$instance_label=\"$instance\"}", "hide": false, "legendFormat": "Connected peers", "range": true, @@ -7917,7 +8285,7 @@ "h": 1, "w": 24, "x": 0, - "y": 253 + "y": 261 }, "id": 32, "panels": [], @@ -8022,7 +8390,7 @@ "h": 8, "w": 12, "x": 0, - "y": 254 + "y": 262 }, "id": 30, "options": { @@ -8045,7 +8413,7 @@ "uid": "${datasource}" }, "editorMode": "builder", - "expr": "reth_downloaders_bodies_total_downloaded{job=\"$job\"}", + "expr": "reth_downloaders_bodies_total_downloaded{$instance_label=\"$instance\"}", "legendFormat": "Downloaded", "range": true, "refId": "A" @@ -8056,7 +8424,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", - "expr": "reth_downloaders_bodies_total_flushed{job=\"$job\"}", + "expr": "reth_downloaders_bodies_total_flushed{$instance_label=\"$instance\"}", "hide": false, "legendFormat": "Flushed", "range": true, @@ -8068,7 +8436,7 @@ "uid": "${datasource}" }, "editorMode": "builder", - "expr": "rate(reth_downloaders_bodies_total_flushed{job=\"$job\"}[$__rate_interval])", + "expr": "rate(reth_downloaders_bodies_total_flushed{$instance_label=\"$instance\"}[$__rate_interval])", "hide": false, "legendFormat": "Flushed/s", "range": true, @@ -8080,7 +8448,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", - "expr": "rate(reth_downloaders_bodies_total_downloaded{job=\"$job\"}[$__rate_interval])", + "expr": "rate(reth_downloaders_bodies_total_downloaded{$instance_label=\"$instance\"}[$__rate_interval])", "hide": false, "legendFormat": "Downloaded/s", "range": true, @@ -8092,7 +8460,7 @@ "uid": "${datasource}" }, "editorMode": "builder", - "expr": "reth_downloaders_bodies_buffered_responses{job=\"$job\"}", + "expr": "reth_downloaders_bodies_buffered_responses{$instance_label=\"$instance\"}", "hide": false, "legendFormat": "Buffered responses", "range": true, @@ -8104,7 +8472,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", - "expr": "reth_downloaders_bodies_buffered_blocks{job=\"$job\"}", + "expr": "reth_downloaders_bodies_buffered_blocks{$instance_label=\"$instance\"}", "hide": false, "legendFormat": "Buffered blocks", "range": true, @@ -8116,7 +8484,7 @@ "uid": "${datasource}" }, "editorMode": "builder", - "expr": "reth_downloaders_bodies_queued_blocks{job=\"$job\"}", + "expr": "reth_downloaders_bodies_queued_blocks{$instance_label=\"$instance\"}", "hide": false, "legendFormat": "Queued blocks", "range": true, @@ -8187,7 +8555,7 @@ "h": 8, "w": 12, "x": 12, - "y": 254 + "y": 262 }, "id": 28, "options": { @@ -8210,7 +8578,7 @@ "uid": "${datasource}" }, "editorMode": "builder", - "expr": "rate(reth_downloaders_bodies_timeout_errors{job=\"$job\"}[$__rate_interval])", + "expr": "rate(reth_downloaders_bodies_timeout_errors{$instance_label=\"$instance\"}[$__rate_interval])", "legendFormat": "Request timed out", "range": true, "refId": "A" @@ -8221,7 +8589,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", - "expr": "rate(reth_downloaders_bodies_unexpected_errors{job=\"$job\"}[$__rate_interval])", + "expr": "rate(reth_downloaders_bodies_unexpected_errors{$instance_label=\"$instance\"}[$__rate_interval])", "hide": false, "legendFormat": "Unexpected error", "range": true, @@ -8233,7 +8601,7 @@ "uid": "${datasource}" }, "editorMode": "builder", - "expr": "rate(reth_downloaders_bodies_validation_errors{job=\"$job\"}[$__rate_interval])", + "expr": "rate(reth_downloaders_bodies_validation_errors{$instance_label=\"$instance\"}[$__rate_interval])", "hide": false, "legendFormat": "Invalid response", "range": true, @@ -8306,7 +8674,7 @@ "h": 8, "w": 12, "x": 0, - "y": 262 + "y": 270 }, "id": 35, "options": { @@ -8329,7 +8697,7 @@ "uid": "${datasource}" }, "editorMode": "builder", - "expr": "reth_downloaders_bodies_in_flight_requests{job=\"$job\"}", + "expr": "reth_downloaders_bodies_in_flight_requests{$instance_label=\"$instance\"}", "legendFormat": "In flight requests", "range": true, "refId": "A" @@ -8340,7 +8708,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", - "expr": "reth_network_connected_peers{job=\"$job\"}", + "expr": "reth_network_connected_peers{$instance_label=\"$instance\"}", "hide": false, "legendFormat": "Connected peers", "range": true, @@ -8431,7 +8799,7 @@ "h": 8, "w": 12, "x": 12, - "y": 262 + "y": 270 }, "id": 73, "options": { @@ -8454,7 +8822,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", - "expr": "reth_downloaders_bodies_buffered_blocks_size_bytes{job=\"$job\"}", + "expr": "reth_downloaders_bodies_buffered_blocks_size_bytes{$instance_label=\"$instance\"}", "hide": false, "legendFormat": "Buffered blocks size ", "range": true, @@ -8466,7 +8834,7 @@ "uid": "${datasource}" }, "editorMode": "builder", - "expr": "reth_downloaders_bodies_buffered_blocks{job=\"$job\"}", + "expr": "reth_downloaders_bodies_buffered_blocks{$instance_label=\"$instance\"}", "hide": false, "legendFormat": "Buffered blocks", "range": true, @@ -8557,7 +8925,7 @@ "h": 8, "w": 12, "x": 0, - "y": 270 + "y": 278 }, "id": 102, "options": { @@ -8580,7 +8948,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "reth_downloaders_bodies_response_response_size_bytes{job=\"$job\"}", + "expr": "reth_downloaders_bodies_response_response_size_bytes{$instance_label=\"$instance\"}", "hide": false, "legendFormat": "Response size", "range": true, @@ -8592,7 +8960,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", - "expr": "reth_downloaders_bodies_response_response_length{job=\"$job\"}", + "expr": "reth_downloaders_bodies_response_response_length{$instance_label=\"$instance\"}", "hide": false, "legendFormat": "Individual response length (number of bodies in response)", "range": true, @@ -8604,7 +8972,7 @@ "uid": "${datasource}" }, "editorMode": "builder", - "expr": "reth_downloaders_bodies_response_response_size_bytes / reth_downloaders_bodies_response_response_length{job=\"$job\"}", + "expr": "reth_downloaders_bodies_response_response_size_bytes / reth_downloaders_bodies_response_response_length{$instance_label=\"$instance\"}", "hide": false, "instant": false, "legendFormat": "Mean body size in response", @@ -8621,7 +8989,7 @@ "h": 1, "w": 24, "x": 0, - "y": 278 + "y": 286 }, "id": 226, "panels": [], @@ -8717,7 +9085,7 @@ "h": 8, "w": 12, "x": 0, - "y": 279 + "y": 287 }, "id": 225, "options": { @@ -8742,7 +9110,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "rate(reth_network_eth_headers_requests_received_total{job=\"$job\"}[$__rate_interval])", + "expr": "rate(reth_network_eth_headers_requests_received_total{$instance_label=\"$instance\"}[$__rate_interval])", "format": "time_series", "fullMetaSearch": false, "includeNullMetadata": true, @@ -8844,7 +9212,7 @@ "h": 8, "w": 12, "x": 12, - "y": 279 + "y": 287 }, "id": 227, "options": { @@ -8869,7 +9237,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "rate(reth_network_eth_receipts_requests_received_total{job=\"$job\"}[$__rate_interval])", + "expr": "rate(reth_network_eth_receipts_requests_received_total{$instance_label=\"$instance\"}[$__rate_interval])", "format": "time_series", "fullMetaSearch": false, "includeNullMetadata": true, @@ -8971,7 +9339,7 @@ "h": 8, "w": 12, "x": 0, - "y": 287 + "y": 295 }, "id": 235, "options": { @@ -8996,7 +9364,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "rate(reth_network_eth_bodies_requests_received_total{job=\"$job\"}[$__rate_interval])", + "expr": "rate(reth_network_eth_bodies_requests_received_total{$instance_label=\"$instance\"}[$__rate_interval])", "format": "time_series", "fullMetaSearch": false, "includeNullMetadata": true, @@ -9098,7 +9466,7 @@ "h": 8, "w": 12, "x": 12, - "y": 287 + "y": 295 }, "id": 234, "options": { @@ -9123,7 +9491,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "rate(reth_network_eth_node_data_requests_received_total{job=\"$job\"}[$__rate_interval])", + "expr": "rate(reth_network_eth_node_data_requests_received_total{$instance_label=\"$instance\"}[$__rate_interval])", "format": "time_series", "fullMetaSearch": false, "includeNullMetadata": true, @@ -9142,7 +9510,7 @@ "h": 1, "w": 24, "x": 0, - "y": 295 + "y": 303 }, "id": 68, "panels": [], @@ -9213,7 +9581,7 @@ "h": 8, "w": 12, "x": 0, - "y": 296 + "y": 304 }, "id": 60, "options": { @@ -9236,7 +9604,7 @@ "uid": "${datasource}" }, "editorMode": "builder", - "expr": "reth_payloads_active_jobs{job=\"$job\"}", + "expr": "reth_payloads_active_jobs{$instance_label=\"$instance\"}", "legendFormat": "Active Jobs", "range": true, "refId": "A" @@ -9308,7 +9676,7 @@ "h": 8, "w": 12, "x": 12, - "y": 296 + "y": 304 }, "id": 62, "options": { @@ -9331,7 +9699,7 @@ "uid": "${datasource}" }, "editorMode": "builder", - "expr": "reth_payloads_initiated_jobs{job=\"$job\"}", + "expr": "reth_payloads_initiated_jobs{$instance_label=\"$instance\"}", "legendFormat": "Initiated Jobs", "range": true, "refId": "A" @@ -9403,7 +9771,7 @@ "h": 8, "w": 12, "x": 0, - "y": 304 + "y": 312 }, "id": 64, "options": { @@ -9426,7 +9794,7 @@ "uid": "${datasource}" }, "editorMode": "builder", - "expr": "reth_payloads_failed_jobs{job=\"$job\"}", + "expr": "reth_payloads_failed_jobs{$instance_label=\"$instance\"}", "legendFormat": "Failed Jobs", "range": true, "refId": "A" @@ -9441,7 +9809,7 @@ "h": 1, "w": 24, "x": 0, - "y": 312 + "y": 320 }, "id": 105, "panels": [], @@ -9512,7 +9880,7 @@ "h": 8, "w": 12, "x": 0, - "y": 313 + "y": 321 }, "id": 106, "options": { @@ -9535,7 +9903,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "rate(reth_pruner_duration_seconds_sum{job=\"$job\"}[$__rate_interval]) / rate(reth_pruner_duration_seconds_count{job=\"$job\"}[$__rate_interval])", + "expr": "rate(reth_pruner_duration_seconds_sum{$instance_label=\"$instance\"}[$__rate_interval]) / rate(reth_pruner_duration_seconds_count{$instance_label=\"$instance\"}[$__rate_interval])", "instant": false, "legendFormat": "__auto", "range": true, @@ -9609,7 +9977,7 @@ "h": 8, "w": 12, "x": 12, - "y": 313 + "y": 321 }, "id": 107, "options": { @@ -9632,7 +10000,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "rate(reth_pruner_segments_duration_seconds_sum{job=\"$job\"}[$__rate_interval]) / rate(reth_pruner_segments_duration_seconds_count{job=\"$job\"}[$__rate_interval])", + "expr": "rate(reth_pruner_segments_duration_seconds_sum{$instance_label=\"$instance\"}[$__rate_interval]) / rate(reth_pruner_segments_duration_seconds_count{$instance_label=\"$instance\"}[$__rate_interval])", "instant": false, "legendFormat": "{{segment}}", "range": true, @@ -9705,7 +10073,7 @@ "h": 8, "w": 12, "x": 0, - "y": 321 + "y": 329 }, "id": 217, "options": { @@ -9728,7 +10096,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "reth_pruner_segments_highest_pruned_block{job=\"$job\"}", + "expr": "reth_pruner_segments_highest_pruned_block{$instance_label=\"$instance\"}", "instant": false, "legendFormat": "{{segment}}", "range": true, @@ -9744,7 +10112,7 @@ "h": 1, "w": 24, "x": 0, - "y": 329 + "y": 337 }, "id": 97, "panels": [], @@ -9827,7 +10195,7 @@ "h": 8, "w": 12, "x": 0, - "y": 330 + "y": 338 }, "id": 98, "options": { @@ -9850,7 +10218,7 @@ "uid": "${datasource}" }, "editorMode": "builder", - "expr": "reth_jemalloc_active{job=\"$job\"}", + "expr": "reth_jemalloc_active{$instance_label=\"$instance\"}", "instant": false, "legendFormat": "Active", "range": true, @@ -9862,7 +10230,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", - "expr": "reth_jemalloc_allocated{job=\"$job\"}", + "expr": "reth_jemalloc_allocated{$instance_label=\"$instance\"}", "hide": false, "instant": false, "legendFormat": "Allocated", @@ -9875,7 +10243,7 @@ "uid": "${datasource}" }, "editorMode": "builder", - "expr": "reth_jemalloc_mapped{job=\"$job\"}", + "expr": "reth_jemalloc_mapped{$instance_label=\"$instance\"}", "hide": false, "instant": false, "legendFormat": "Mapped", @@ -9888,7 +10256,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", - "expr": "reth_jemalloc_metadata{job=\"$job\"}", + "expr": "reth_jemalloc_metadata{$instance_label=\"$instance\"}", "hide": false, "instant": false, "legendFormat": "Metadata", @@ -9901,7 +10269,7 @@ "uid": "${datasource}" }, "editorMode": "builder", - "expr": "reth_jemalloc_resident{job=\"$job\"}", + "expr": "reth_jemalloc_resident{$instance_label=\"$instance\"}", "hide": false, "instant": false, "legendFormat": "Resident", @@ -9914,7 +10282,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", - "expr": "reth_jemalloc_retained{job=\"$job\"}", + "expr": "reth_jemalloc_retained{$instance_label=\"$instance\"}", "hide": false, "instant": false, "legendFormat": "Retained", @@ -9989,7 +10357,7 @@ "h": 8, "w": 12, "x": 12, - "y": 330 + "y": 338 }, "id": 101, "options": { @@ -10012,7 +10380,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "expr": "reth_process_resident_memory_bytes{job=\"$job\"}", + "expr": "reth_process_resident_memory_bytes{$instance_label=\"$instance\"}", "instant": false, "legendFormat": "Resident", "range": true, @@ -10086,7 +10454,7 @@ "h": 8, "w": 12, "x": 0, - "y": 338 + "y": 346 }, "id": 99, "options": { @@ -10109,7 +10477,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", - "expr": "avg(rate(reth_process_cpu_seconds_total{job=\"$job\"}[1m]))", + "expr": "avg(rate(reth_process_cpu_seconds_total{$instance_label=\"$instance\"}[1m]))", "instant": false, "legendFormat": "Process", "range": true, @@ -10183,7 +10551,7 @@ "h": 8, "w": 12, "x": 12, - "y": 338 + "y": 346 }, "id": 100, "options": { @@ -10206,7 +10574,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", - "expr": "reth_process_open_fds{job=\"$job\"}", + "expr": "reth_process_open_fds{$instance_label=\"$instance\"}", "instant": false, "legendFormat": "Open", "range": true, @@ -10280,7 +10648,7 @@ "h": 8, "w": 12, "x": 0, - "y": 346 + "y": 354 }, "id": 248, "options": { @@ -10304,7 +10672,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "expr": "reth_executor_spawn_critical_tasks_total{job=\"$job\"}- reth_executor_spawn_finished_critical_tasks_total{job=\"$job\"}", + "expr": "reth_executor_spawn_critical_tasks_total{$instance_label=\"$instance\"}- reth_executor_spawn_finished_critical_tasks_total{$instance_label=\"$instance\"}", "hide": false, "instant": false, "legendFormat": "Tasks running", @@ -10392,7 +10760,7 @@ "h": 8, "w": 12, "x": 12, - "y": 346 + "y": 354 }, "id": 247, "options": { @@ -10418,7 +10786,7 @@ "disableTextWrap": false, "editorMode": "code", "exemplar": false, - "expr": "rate(reth_executor_spawn_regular_tasks_total{job=\"$job\"}[$__rate_interval])", + "expr": "rate(reth_executor_spawn_regular_tasks_total{$instance_label=\"$instance\"}[$__rate_interval])", "fullMetaSearch": false, "hide": false, "includeNullMetadata": false, @@ -10434,7 +10802,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "reth_executor_spawn_regular_tasks_total{job=\"$job\"} - reth_executor_spawn_finished_regular_tasks_total{job=\"$job\"}", + "expr": "reth_executor_spawn_regular_tasks_total{$instance_label=\"$instance\"} - reth_executor_spawn_finished_regular_tasks_total{$instance_label=\"$instance\"}", "hide": false, "instant": false, "legendFormat": "Tasks running", @@ -10451,7 +10819,7 @@ "h": 1, "w": 24, "x": 0, - "y": 354 + "y": 362 }, "id": 236, "panels": [ @@ -10541,7 +10909,7 @@ "uid": "${datasource}" }, "editorMode": "builder", - "expr": "reth_exex_notifications_sent_total{job=\"$job\"}", + "expr": "reth_exex_notifications_sent_total{$instance_label=\"$instance\"}", "hide": false, "legendFormat": "Total Notifications Sent", "range": true, @@ -10637,7 +11005,7 @@ "uid": "${datasource}" }, "editorMode": "builder", - "expr": "reth_exex_events_sent_total{job=\"$job\"}", + "expr": "reth_exex_events_sent_total{$instance_label=\"$instance\"}", "hide": false, "legendFormat": "Total Events Sent", "range": true, @@ -10733,7 +11101,7 @@ "uid": "${datasource}" }, "editorMode": "builder", - "expr": "reth_exex_manager_current_capacity{job=\"$job\"}", + "expr": "reth_exex_manager_current_capacity{$instance_label=\"$instance\"}", "hide": false, "legendFormat": "Current size", "range": true, @@ -10745,7 +11113,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", - "expr": "max_over_time(reth_exex_manager_max_capacity{job=\"$job\"}[1h])", + "expr": "max_over_time(reth_exex_manager_max_capacity{$instance_label=\"$instance\"}[1h])", "hide": false, "legendFormat": "Max size", "range": true, @@ -10841,7 +11209,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", - "expr": "reth_exex_manager_buffer_size{job=\"$job\"}", + "expr": "reth_exex_manager_buffer_size{$instance_label=\"$instance\"}", "hide": false, "legendFormat": "Max size", "range": true, @@ -10909,7 +11277,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", - "expr": "reth_exex_manager_num_exexs{job=\"$job\"}", + "expr": "reth_exex_manager_num_exexs{$instance_label=\"$instance\"}", "hide": false, "legendFormat": "Number of ExExs", "range": true, @@ -10929,7 +11297,7 @@ "h": 1, "w": 24, "x": 0, - "y": 355 + "y": 363 }, "id": 241, "panels": [ @@ -11020,7 +11388,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "expr": "reth_exex_wal_lowest_committed_block_height{job=\"$job\"}", + "expr": "reth_exex_wal_lowest_committed_block_height{$instance_label=\"$instance\"}", "hide": false, "instant": false, "legendFormat": "Lowest Block", @@ -11033,7 +11401,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "reth_exex_wal_highest_committed_block_height{job=\"$job\"}", + "expr": "reth_exex_wal_highest_committed_block_height{$instance_label=\"$instance\"}", "hide": false, "instant": false, "legendFormat": "Highest Block", @@ -11131,7 +11499,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "reth_exex_wal_committed_blocks_count{job=\"$job\"}", + "expr": "reth_exex_wal_committed_blocks_count{$instance_label=\"$instance\"}", "hide": false, "instant": false, "legendFormat": "Committed Blocks", @@ -11144,7 +11512,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "expr": "reth_exex_wal_notifications_count{job=\"$job\"}", + "expr": "reth_exex_wal_notifications_count{$instance_label=\"$instance\"}", "hide": false, "instant": false, "legendFormat": "Notifications", @@ -11243,7 +11611,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "expr": "reth_exex_wal_size_bytes{job=\"$job\"}", + "expr": "reth_exex_wal_size_bytes{$instance_label=\"$instance\"}", "hide": false, "instant": false, "legendFormat": "__auto", @@ -11270,18 +11638,19 @@ "type": "prometheus", "uid": "${datasource}" }, - "definition": "query_result(reth_info)", + "definition": "label_values(reth_info,$instance_label)", "includeAll": false, - "label": "Job", - "name": "job", + "label": "Instance", + "name": "instance", "options": [], "query": { - "qryType": 3, - "query": "query_result(reth_info)", + "qryType": 1, + "query": "label_values(reth_info,$instance_label)", "refId": "PrometheusVariableQueryEditor-VariableQuery" }, "refresh": 1, - "regex": "/.*job=\\\"([^\\\"]*).*/", + "regex": "", + "sort": 1, "type": "query" }, { @@ -11294,6 +11663,26 @@ "refresh": 1, "regex": "", "type": "datasource" + }, + { + "hide": 2, + "label": "Instance Label", + "name": "instance_label", + "query": "${VAR_INSTANCE_LABEL}", + "skipUrlSync": true, + "type": "constant", + "current": { + "value": "${VAR_INSTANCE_LABEL}", + "text": "${VAR_INSTANCE_LABEL}", + "selected": false + }, + "options": [ + { + "value": "${VAR_INSTANCE_LABEL}", + "text": "${VAR_INSTANCE_LABEL}", + "selected": false + } + ] } ] }, @@ -11305,6 +11694,6 @@ "timezone": "", "title": "Reth", "uid": "2k8BXz24x", - "version": 4, + "version": 10, "weekStart": "" } diff --git a/etc/grafana/dashboards/reth-performance.json b/etc/grafana/dashboards/reth-performance.json deleted file mode 100644 index 02d890dceef..00000000000 --- a/etc/grafana/dashboards/reth-performance.json +++ /dev/null @@ -1,346 +0,0 @@ -{ - "__inputs": [ - { - "name": "DS_PROMETHEUS", - "label": "Prometheus", - "description": "", - "type": "datasource", - "pluginId": "prometheus", - "pluginName": "Prometheus" - } - ], - "__elements": {}, - "__requires": [ - { - "type": "grafana", - "id": "grafana", - "name": "Grafana", - "version": "11.1.0" - }, - { - "type": "datasource", - "id": "prometheus", - "name": "Prometheus", - "version": "1.0.0" - }, - { - "type": "panel", - "id": "timeseries", - "name": "Time series", - "version": "" - } - ], - "annotations": { - "list": [ - { - "builtIn": 1, - "datasource": { - "type": "grafana", - "uid": "-- Grafana --" - }, - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" - } - ] - }, - "editable": true, - "fiscalYearStartMonth": 0, - "graphTooltip": 0, - "id": null, - "links": [], - "panels": [ - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 0 - }, - "id": 3, - "panels": [], - "title": "Block Validation", - "type": "row" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "This tracks the proportion of various tasks that take up time during block validation", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 25, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "percent" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "s" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 1 - }, - "id": 1, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_sync_block_validation_state_root_duration{instance=\"$instance\"}", - "fullMetaSearch": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "State Root Duration", - "range": true, - "refId": "A", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_sync_execution_execution_duration{instance=\"$instance\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "Execution Duration", - "range": true, - "refId": "B", - "useBackend": false - } - ], - "title": "Block Validation Overview", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "This tracks the total block validation latency, as well as the latency for validation sub-tasks ", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 25, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "normal" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "s" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 1 - }, - "id": 2, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_sync_block_validation_state_root_duration{instance=\"$instance\"}", - "fullMetaSearch": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "State Root Duration", - "range": true, - "refId": "A", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_sync_execution_execution_duration{instance=\"$instance\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "Execution Duration", - "range": true, - "refId": "B", - "useBackend": false - } - ], - "title": "Block Validation Latency", - "type": "timeseries" - } - ], - "refresh": "30s", - "schemaVersion": 39, - "tags": [], - "templating": { - "list": [ - { - "current": {}, - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "definition": "query_result(reth_info)", - "hide": 0, - "includeAll": false, - "label": "instance", - "multi": false, - "name": "instance", - "options": [], - "query": { - "qryType": 3, - "query": "query_result(reth_info)", - "refId": "PrometheusVariableQueryEditor-VariableQuery" - }, - "refresh": 1, - "regex": "/.*instance=\\\"([^\\\"]*).*/", - "skipUrlSync": false, - "sort": 0, - "type": "query" - } - ] - }, - "time": { - "from": "now-1h", - "to": "now" - }, - "timepicker": {}, - "timezone": "browser", - "title": "Reth - Performance", - "uid": "bdywb3xjphfy8a", - "version": 2, - "weekStart": "" -} diff --git a/examples/beacon-api-sidecar-fetcher/src/main.rs b/examples/beacon-api-sidecar-fetcher/src/main.rs index 61cf1ce3410..382261a39d2 100644 --- a/examples/beacon-api-sidecar-fetcher/src/main.rs +++ b/examples/beacon-api-sidecar-fetcher/src/main.rs @@ -1,7 +1,7 @@ //! Run with //! //! ```sh -//! cargo run -p beacon-api-beacon-sidecar-fetcher --node --full +//! cargo run -p example-beacon-api-sidecar-fetcher -- node --full //! ``` //! //! This launches a regular reth instance and subscribes to payload attributes event stream. diff --git a/examples/beacon-api-sse/src/main.rs b/examples/beacon-api-sse/src/main.rs index 46bb0ddd444..fee20e09b1f 100644 --- a/examples/beacon-api-sse/src/main.rs +++ b/examples/beacon-api-sse/src/main.rs @@ -5,7 +5,7 @@ //! Run with //! //! ```sh -//! cargo run -p beacon-api-sse -- node +//! cargo run -p example-beacon-api-sse -- node //! ``` //! //! This launches a regular reth instance and subscribes to payload attributes event stream. diff --git a/examples/custom-evm/src/main.rs b/examples/custom-evm/src/main.rs index 7e41b17aad7..93127bbc91b 100644 --- a/examples/custom-evm/src/main.rs +++ b/examples/custom-evm/src/main.rs @@ -84,7 +84,7 @@ impl ExecutorBuilder for MyExecutorBuilder where Node: FullNodeTypes>, { - type EVM = EthEvmConfig; + type EVM = EthEvmConfig; async fn build_evm(self, ctx: &BuilderContext) -> eyre::Result { let evm_config = diff --git a/examples/custom-node/Cargo.toml b/examples/custom-node/Cargo.toml index 4821ef54f40..d190fef9f85 100644 --- a/examples/custom-node/Cargo.toml +++ b/examples/custom-node/Cargo.toml @@ -15,9 +15,10 @@ reth-optimism-forks.workspace = true reth-db-api.workspace = true reth-op = { workspace = true, features = ["node", "pool"] } reth-payload-builder.workspace = true +reth-primitives-traits.workspace = true reth-rpc-api.workspace = true reth-rpc-engine-api.workspace = true -reth-ethereum = { workspace = true, features = ["node-api", "network", "evm", "pool"] } +reth-ethereum = { workspace = true, features = ["node-api", "network", "evm", "pool", "trie", "storage-api"] } # revm revm.workspace = true @@ -43,7 +44,7 @@ derive_more.workspace = true eyre.workspace = true jsonrpsee.workspace = true serde.workspace = true - +thiserror.workspace = true modular-bitfield.workspace = true [dev-dependencies] @@ -64,5 +65,6 @@ arbitrary = [ "reth-ethereum/arbitrary", "alloy-rpc-types-engine/arbitrary", "reth-db-api/arbitrary", + "reth-primitives-traits/arbitrary", ] default = [] diff --git a/examples/custom-node/src/engine.rs b/examples/custom-node/src/engine.rs index ab938be82d4..e3bc6019d7b 100644 --- a/examples/custom-node/src/engine.rs +++ b/examples/custom-node/src/engine.rs @@ -1,19 +1,32 @@ -use crate::primitives::CustomNodePrimitives; +use crate::{ + chainspec::CustomChainSpec, + primitives::{CustomHeader, CustomNodePrimitives, CustomTransaction}, +}; use op_alloy_rpc_types_engine::{OpExecutionData, OpExecutionPayload}; use reth_chain_state::ExecutedBlockWithTrieUpdates; use reth_ethereum::{ node::api::{ - BuiltPayload, ExecutionPayload, NodePrimitives, PayloadAttributes, - PayloadBuilderAttributes, PayloadTypes, + validate_version_specific_fields, AddOnsContext, BuiltPayload, EngineApiMessageVersion, + EngineObjectValidationError, EngineValidator, ExecutionPayload, FullNodeComponents, + InvalidPayloadAttributesError, NewPayloadError, NodePrimitives, NodeTypes, + PayloadAttributes, PayloadBuilderAttributes, PayloadOrAttributes, PayloadTypes, + PayloadValidator, }, - primitives::SealedBlock, + primitives::{RecoveredBlock, SealedBlock}, + storage::StateProviderFactory, + trie::{KeccakKeyHasher, KeyHasher}, }; +use reth_node_builder::rpc::EngineValidatorBuilder; use reth_op::{ - node::{OpBuiltPayload, OpPayloadAttributes, OpPayloadBuilderAttributes}, + node::{ + engine::OpEngineValidator, OpBuiltPayload, OpPayloadAttributes, OpPayloadBuilderAttributes, + }, OpTransactionSigned, }; use revm_primitives::U256; use serde::{Deserialize, Serialize}; +use std::sync::Arc; +use thiserror::Error; #[derive(Debug, Clone, Copy, Serialize, Deserialize)] pub struct CustomPayloadTypes; @@ -25,6 +38,10 @@ pub struct CustomExecutionData { } impl ExecutionPayload for CustomExecutionData { + fn parent_hash(&self) -> revm_primitives::B256 { + self.inner.parent_hash() + } + fn block_hash(&self) -> revm_primitives::B256 { self.inner.block_hash() } @@ -33,24 +50,20 @@ impl ExecutionPayload for CustomExecutionData { self.inner.block_number() } - fn parent_hash(&self) -> revm_primitives::B256 { - self.inner.parent_hash() + fn withdrawals(&self) -> Option<&Vec> { + None } - fn gas_used(&self) -> u64 { - self.inner.gas_used() + fn parent_beacon_block_root(&self) -> Option { + self.inner.parent_beacon_block_root() } fn timestamp(&self) -> u64 { self.inner.timestamp() } - fn parent_beacon_block_root(&self) -> Option { - self.inner.parent_beacon_block_root() - } - - fn withdrawals(&self) -> Option<&Vec> { - None + fn gas_used(&self) -> u64 { + self.inner.gas_used() } } @@ -62,10 +75,6 @@ pub struct CustomPayloadAttributes { } impl PayloadAttributes for CustomPayloadAttributes { - fn parent_beacon_block_root(&self) -> Option { - self.inner.parent_beacon_block_root() - } - fn timestamp(&self) -> u64 { self.inner.timestamp() } @@ -73,6 +82,10 @@ impl PayloadAttributes for CustomPayloadAttributes { fn withdrawals(&self) -> Option<&Vec> { self.inner.withdrawals() } + + fn parent_beacon_block_root(&self) -> Option { + self.inner.parent_beacon_block_root() + } } #[derive(Debug, Clone)] @@ -101,28 +114,28 @@ impl PayloadBuilderAttributes for CustomPayloadBuilderAttributes { }) } - fn parent(&self) -> revm_primitives::B256 { - self.inner.parent() + fn payload_id(&self) -> alloy_rpc_types_engine::PayloadId { + self.inner.payload_id() } - fn parent_beacon_block_root(&self) -> Option { - self.inner.parent_beacon_block_root() + fn parent(&self) -> revm_primitives::B256 { + self.inner.parent() } - fn payload_id(&self) -> alloy_rpc_types_engine::PayloadId { - self.inner.payload_id() + fn timestamp(&self) -> u64 { + self.inner.timestamp() } - fn prev_randao(&self) -> revm_primitives::B256 { - self.inner.prev_randao() + fn parent_beacon_block_root(&self) -> Option { + self.inner.parent_beacon_block_root() } fn suggested_fee_recipient(&self) -> revm_primitives::Address { self.inner.suggested_fee_recipient() } - fn timestamp(&self) -> u64 { - self.inner.timestamp() + fn prev_randao(&self) -> revm_primitives::B256 { + self.inner.prev_randao() } fn withdrawals(&self) -> &alloy_eips::eip4895::Withdrawals { @@ -140,14 +153,14 @@ impl BuiltPayload for CustomBuiltPayload { self.0.block() } - fn executed_block(&self) -> Option> { - self.0.executed_block() - } - fn fees(&self) -> U256 { self.0.fees() } + fn executed_block(&self) -> Option> { + self.0.executed_block() + } + fn requests(&self) -> Option { self.0.requests() } @@ -162,10 +175,10 @@ impl From } impl PayloadTypes for CustomPayloadTypes { + type ExecutionData = CustomExecutionData; type BuiltPayload = CustomBuiltPayload; type PayloadAttributes = CustomPayloadAttributes; type PayloadBuilderAttributes = CustomPayloadBuilderAttributes; - type ExecutionData = CustomExecutionData; fn block_to_payload( block: SealedBlock< @@ -179,3 +192,128 @@ impl PayloadTypes for CustomPayloadTypes { CustomExecutionData { inner: OpExecutionData { payload, sidecar }, extension } } } + +/// Custom engine validator +#[derive(Debug, Clone)] +pub struct CustomEngineValidator

{ + inner: OpEngineValidator, +} + +impl

CustomEngineValidator

+where + P: Send + Sync + Unpin + 'static, +{ + /// Instantiates a new validator. + pub fn new(chain_spec: Arc, provider: P) -> Self { + Self { inner: OpEngineValidator::new::(chain_spec, provider) } + } + + /// Returns the chain spec used by the validator. + #[inline] + fn chain_spec(&self) -> &CustomChainSpec { + self.inner.chain_spec() + } +} + +impl

PayloadValidator for CustomEngineValidator

+where + P: StateProviderFactory + Send + Sync + Unpin + 'static, +{ + type Block = crate::primitives::block::Block; + type ExecutionData = CustomExecutionData; + + fn ensure_well_formed_payload( + &self, + payload: CustomExecutionData, + ) -> Result, NewPayloadError> { + let sealed_block = self.inner.ensure_well_formed_payload(payload.inner)?; + let (block, senders) = sealed_block.split_sealed(); + let (header, body) = block.split_sealed_header_body(); + let header = CustomHeader { inner: header.into_header(), extension: payload.extension }; + let body = body.map_ommers(|_| CustomHeader::default()); + let block = SealedBlock::::from_parts_unhashed(header, body); + + Ok(block.with_senders(senders)) + } +} + +impl EngineValidator for CustomEngineValidator

+where + P: StateProviderFactory + Send + Sync + Unpin + 'static, + T: PayloadTypes< + PayloadAttributes = CustomPayloadAttributes, + ExecutionData = CustomExecutionData, + >, +{ + fn validate_version_specific_fields( + &self, + version: EngineApiMessageVersion, + payload_or_attrs: PayloadOrAttributes<'_, Self::ExecutionData, T::PayloadAttributes>, + ) -> Result<(), EngineObjectValidationError> { + validate_version_specific_fields(self.chain_spec(), version, payload_or_attrs) + } + + fn ensure_well_formed_attributes( + &self, + version: EngineApiMessageVersion, + attributes: &T::PayloadAttributes, + ) -> Result<(), EngineObjectValidationError> { + validate_version_specific_fields( + self.chain_spec(), + version, + PayloadOrAttributes::::PayloadAttributes( + attributes, + ), + )?; + + // custom validation logic - ensure that the custom field is not zero + if attributes.extension == 0 { + return Err(EngineObjectValidationError::invalid_params( + CustomError::CustomFieldIsNotZero, + )) + } + + Ok(()) + } + + fn validate_payload_attributes_against_header( + &self, + _attr: &::PayloadAttributes, + _header: &::Header, + ) -> Result<(), InvalidPayloadAttributesError> { + // skip default timestamp validation + Ok(()) + } +} + +/// Custom error type used in payload attributes validation +#[derive(Debug, Error)] +pub enum CustomError { + #[error("Custom field is not zero")] + CustomFieldIsNotZero, +} + +/// Custom engine validator builder +#[derive(Debug, Default, Clone, Copy)] +#[non_exhaustive] +pub struct CustomEngineValidatorBuilder; + +impl EngineValidatorBuilder for CustomEngineValidatorBuilder +where + N: FullNodeComponents< + Types: NodeTypes< + Payload = CustomPayloadTypes, + ChainSpec = CustomChainSpec, + Primitives = CustomNodePrimitives, + >, + >, +{ + type Validator = CustomEngineValidator; + + async fn build(self, ctx: &AddOnsContext<'_, N>) -> eyre::Result { + Ok(CustomEngineValidator::new::( + ctx.config.chain.clone(), + ctx.node.provider().clone(), + )) + } +} diff --git a/examples/custom-node/src/engine_api.rs b/examples/custom-node/src/engine_api.rs index 0484be19d45..bc92ffb8a99 100644 --- a/examples/custom-node/src/engine_api.rs +++ b/examples/custom-node/src/engine_api.rs @@ -27,15 +27,20 @@ pub struct CustomExecutionPayloadInput {} #[derive(Clone, serde::Serialize)] pub struct CustomExecutionPayloadEnvelope { execution_payload: ExecutionPayloadV3, + extension: u64, } impl From for CustomExecutionPayloadEnvelope { fn from(value: CustomBuiltPayload) -> Self { let sealed_block = value.0.into_sealed_block(); let hash = sealed_block.hash(); + let extension = sealed_block.header().extension; let block = sealed_block.into_block(); - Self { execution_payload: ExecutionPayloadV3::from_block_unchecked(hash, &block.clone()) } + Self { + execution_payload: ExecutionPayloadV3::from_block_unchecked(hash, &block.clone()), + extension, + } } } diff --git a/examples/custom-node/src/evm/env.rs b/examples/custom-node/src/evm/env.rs index 91e7293b92c..dfe0fe93f9c 100644 --- a/examples/custom-node/src/evm/env.rs +++ b/examples/custom-node/src/evm/env.rs @@ -257,28 +257,6 @@ impl TransactionEnv for CustomTxEnv { } } -impl FromRecoveredTx for PaymentTxEnv { - fn from_recovered_tx(tx: &CustomTransaction, sender: Address) -> Self { - PaymentTxEnv(match tx { - CustomTransaction::BuiltIn(tx) => { - OpTransaction::::from_recovered_tx(tx, sender).base - } - CustomTransaction::Other(tx) => TxEnv::from_recovered_tx(tx, sender), - }) - } -} - -impl FromTxWithEncoded for PaymentTxEnv { - fn from_encoded_tx(tx: &CustomTransaction, sender: Address, encoded: Bytes) -> Self { - PaymentTxEnv(match tx { - CustomTransaction::BuiltIn(tx) => { - OpTransaction::::from_encoded_tx(tx, sender, encoded).base - } - CustomTransaction::Other(tx) => TxEnv::from_encoded_tx(tx, sender, encoded), - }) - } -} - impl FromRecoveredTx for TxEnv { fn from_recovered_tx(tx: &TxPayment, caller: Address) -> Self { let TxPayment { @@ -317,6 +295,12 @@ impl FromTxWithEncoded for TxEnv { } } +impl FromTxWithEncoded for TxEnv { + fn from_encoded_tx(tx: &TxPayment, sender: Address, _encoded: Bytes) -> Self { + Self::from_recovered_tx(tx, sender) + } +} + impl FromRecoveredTx for CustomTxEnv { fn from_recovered_tx(tx: &OpTxEnvelope, sender: Address) -> Self { Self::Op(OpTransaction::from_recovered_tx(tx, sender)) @@ -332,8 +316,8 @@ impl FromTxWithEncoded for CustomTxEnv { impl FromRecoveredTx for CustomTxEnv { fn from_recovered_tx(tx: &CustomTransaction, sender: Address) -> Self { match tx { - CustomTransaction::BuiltIn(tx) => Self::from_recovered_tx(tx, sender), - CustomTransaction::Other(tx) => { + CustomTransaction::Op(tx) => Self::from_recovered_tx(tx, sender), + CustomTransaction::Payment(tx) => { Self::Payment(PaymentTxEnv(TxEnv::from_recovered_tx(tx, sender))) } } @@ -343,8 +327,8 @@ impl FromRecoveredTx for CustomTxEnv { impl FromTxWithEncoded for CustomTxEnv { fn from_encoded_tx(tx: &CustomTransaction, sender: Address, encoded: Bytes) -> Self { match tx { - CustomTransaction::BuiltIn(tx) => Self::from_encoded_tx(tx, sender, encoded), - CustomTransaction::Other(tx) => { + CustomTransaction::Op(tx) => Self::from_encoded_tx(tx, sender, encoded), + CustomTransaction::Payment(tx) => { Self::Payment(PaymentTxEnv(TxEnv::from_encoded_tx(tx, sender, encoded))) } } diff --git a/examples/custom-node/src/evm/executor.rs b/examples/custom-node/src/evm/executor.rs index 976c45ef528..2c5a58d7584 100644 --- a/examples/custom-node/src/evm/executor.rs +++ b/examples/custom-node/src/evm/executor.rs @@ -43,13 +43,11 @@ where f: impl FnOnce(&ExecutionResult<::HaltReason>) -> CommitChanges, ) -> Result, BlockExecutionError> { match tx.tx() { - CustomTransaction::BuiltIn(op_tx) => { - self.inner.execute_transaction_with_commit_condition( - Recovered::new_unchecked(op_tx, *tx.signer()), - f, - ) - } - CustomTransaction::Other(..) => todo!(), + CustomTransaction::Op(op_tx) => self.inner.execute_transaction_with_commit_condition( + Recovered::new_unchecked(op_tx, *tx.signer()), + f, + ), + CustomTransaction::Payment(..) => todo!(), } } diff --git a/examples/custom-node/src/pool.rs b/examples/custom-node/src/pool.rs index 8fda09d7129..09f0b667c79 100644 --- a/examples/custom-node/src/pool.rs +++ b/examples/custom-node/src/pool.rs @@ -1,5 +1,28 @@ -use crate::primitives::CustomTransactionEnvelope; +use crate::primitives::{CustomTransaction, CustomTransactionEnvelope}; +use alloy_consensus::error::ValueError; use op_alloy_consensus::OpPooledTransaction; use reth_ethereum::primitives::Extended; pub type CustomPooledTransaction = Extended; + +impl From for CustomTransaction { + fn from(tx: CustomPooledTransaction) -> Self { + match tx { + CustomPooledTransaction::BuiltIn(tx) => Self::Op(tx.into()), + CustomPooledTransaction::Other(tx) => Self::Payment(tx), + } + } +} + +impl TryFrom for CustomPooledTransaction { + type Error = ValueError; + + fn try_from(tx: CustomTransaction) -> Result { + match tx { + CustomTransaction::Op(op) => Ok(Self::BuiltIn( + OpPooledTransaction::try_from(op).map_err(|op| op.map(CustomTransaction::Op))?, + )), + CustomTransaction::Payment(payment) => Ok(Self::Other(payment)), + } + } +} diff --git a/examples/custom-node/src/primitives/header.rs b/examples/custom-node/src/primitives/header.rs index 884c9c4cb1c..acf80bb26e5 100644 --- a/examples/custom-node/src/primitives/header.rs +++ b/examples/custom-node/src/primitives/header.rs @@ -4,8 +4,7 @@ use alloy_primitives::{ }; use alloy_rlp::{Encodable, RlpDecodable, RlpEncodable}; use reth_codecs::Compact; -use reth_ethereum::primitives::{BlockHeader, InMemorySize}; -use reth_op::primitives::block::header::BlockHeaderMut; +use reth_ethereum::primitives::{serde_bincode_compat::RlpBincode, BlockHeader, InMemorySize}; use revm_primitives::keccak256; use serde::{Deserialize, Serialize}; @@ -182,40 +181,12 @@ impl reth_db_api::table::Decompress for CustomHeader { } } -impl BlockHeader for CustomHeader {} - -impl BlockHeaderMut for CustomHeader { +impl reth_primitives_traits::block::header::BlockHeaderMut for CustomHeader { fn extra_data_mut(&mut self) -> &mut Bytes { &mut self.inner.extra_data } } -mod serde_bincode_compat { - use alloy_consensus::serde_bincode_compat::Header; - use reth_ethereum::primitives::serde_bincode_compat::SerdeBincodeCompat; - use serde::{Deserialize, Serialize}; - - #[derive(Serialize, Deserialize, Debug)] - pub struct CustomHeader<'a> { - inner: Header<'a>, - extension: u64, - } - - impl From> for super::CustomHeader { - fn from(value: CustomHeader) -> Self { - Self { inner: value.inner.into(), extension: value.extension } - } - } - - impl SerdeBincodeCompat for super::CustomHeader { - type BincodeRepr<'a> = CustomHeader<'a>; - - fn as_repr(&self) -> Self::BincodeRepr<'_> { - CustomHeader { inner: self.inner.as_repr(), extension: self.extension } - } +impl BlockHeader for CustomHeader {} - fn from_repr(repr: Self::BincodeRepr<'_>) -> Self { - repr.into() - } - } -} +impl RlpBincode for CustomHeader {} diff --git a/examples/custom-node/src/primitives/tx.rs b/examples/custom-node/src/primitives/tx.rs index ce365b2c405..48348f6839a 100644 --- a/examples/custom-node/src/primitives/tx.rs +++ b/examples/custom-node/src/primitives/tx.rs @@ -1,34 +1,39 @@ -use super::{TxPayment, TxTypeCustom}; +use super::TxPayment; use alloy_consensus::{ crypto::{ secp256k1::{recover_signer, recover_signer_unchecked}, RecoveryError, }, transaction::SignerRecoverable, - SignableTransaction, Signed, Transaction, + SignableTransaction, Signed, Transaction, TransactionEnvelope, }; -use alloy_eips::{eip2718::Eip2718Result, Decodable2718, Encodable2718, Typed2718}; -use alloy_primitives::{keccak256, Signature, TxHash}; +use alloy_eips::{ + eip2718::{Eip2718Result, IsTyped2718}, + Decodable2718, Encodable2718, Typed2718, +}; +use alloy_primitives::{bytes::Buf, keccak256, Sealed, Signature, TxHash, B256}; use alloy_rlp::{BufMut, Decodable, Encodable, Result as RlpResult}; -use op_alloy_consensus::OpTxEnvelope; +use op_alloy_consensus::{OpTxEnvelope, TxDeposit}; use reth_codecs::{ alloy::transaction::{FromTxCompact, ToTxCompact}, Compact, }; -use reth_ethereum::primitives::{serde_bincode_compat::SerdeBincodeCompat, InMemorySize}; -use reth_op::{ - primitives::{Extended, SignedTransaction}, - OpTransaction, -}; +use reth_ethereum::primitives::{serde_bincode_compat::RlpBincode, InMemorySize}; +use reth_op::{primitives::SignedTransaction, OpTransaction}; use revm_primitives::{Address, Bytes}; use serde::{Deserialize, Serialize}; -/// An [`OpTxEnvelope`] that is [`Extended`] by one more variant of [`CustomTransactionEnvelope`]. -pub type CustomTransaction = ExtendedOpTxEnvelope; - -/// A [`SignedTransaction`] implementation that combines the [`OpTxEnvelope`] and another -/// transaction type. -pub type ExtendedOpTxEnvelope = Extended; +/// Either [`OpTxEnvelope`] or [`CustomTransactionEnvelope`]. +#[derive(Debug, Clone, TransactionEnvelope)] +#[envelope(tx_type_name = TxTypeCustom)] +pub enum CustomTransaction { + /// A regular Optimism transaction as defined by [`OpTxEnvelope`]. + #[envelope(flatten)] + Op(OpTxEnvelope), + /// A [`TxPayment`] tagged with type 0x7E. + #[envelope(ty = 42)] + Payment(CustomTransactionEnvelope), +} #[derive(Debug, Clone, Serialize, Deserialize, Hash, Eq, PartialEq)] pub struct CustomTransactionEnvelope { @@ -98,7 +103,7 @@ impl Transaction for CustomTransactionEnvelope { self.inner.tx().access_list() } - fn blob_versioned_hashes(&self) -> Option<&[revm_primitives::B256]> { + fn blob_versioned_hashes(&self) -> Option<&[B256]> { self.inner.tx().blob_versioned_hashes() } @@ -198,20 +203,8 @@ impl ToTxCompact for CustomTransactionEnvelope { } } -#[derive(Debug, Serialize, Deserialize)] -pub struct BincodeCompatSignedTxCustom(pub Signed); - -impl SerdeBincodeCompat for CustomTransactionEnvelope { - type BincodeRepr<'a> = BincodeCompatSignedTxCustom; - - fn as_repr(&self) -> Self::BincodeRepr<'_> { - BincodeCompatSignedTxCustom(self.inner.clone()) - } - - fn from_repr(repr: Self::BincodeRepr<'_>) -> Self { - Self { inner: repr.0.clone() } - } -} +impl RlpBincode for CustomTransactionEnvelope {} +impl RlpBincode for CustomTransaction {} impl reth_codecs::alloy::transaction::Envelope for CustomTransactionEnvelope { fn signature(&self) -> &Signature { @@ -219,14 +212,14 @@ impl reth_codecs::alloy::transaction::Envelope for CustomTransactionEnvelope { } fn tx_type(&self) -> Self::TxType { - TxTypeCustom::Custom + TxTypeCustom::Payment } } impl Compact for CustomTransactionEnvelope { fn to_compact(&self, buf: &mut B) -> usize where - B: alloy_rlp::bytes::BufMut + AsMut<[u8]>, + B: BufMut + AsMut<[u8]>, { self.inner.tx().to_compact(buf) } @@ -239,8 +232,101 @@ impl Compact for CustomTransactionEnvelope { } } +impl reth_codecs::Compact for CustomTransaction { + fn to_compact(&self, buf: &mut Buf) -> usize + where + Buf: BufMut + AsMut<[u8]>, + { + buf.put_u8(self.ty()); + match self { + Self::Op(tx) => tx.to_compact(buf), + Self::Payment(tx) => tx.to_compact(buf), + } + } + + fn from_compact(mut buf: &[u8], len: usize) -> (Self, &[u8]) { + let type_byte = buf.get_u8(); + + if ::is_type(type_byte) { + let (tx, remaining) = OpTxEnvelope::from_compact(buf, len); + return (Self::Op(tx), remaining); + } + + let (tx, remaining) = CustomTransactionEnvelope::from_compact(buf, len); + (Self::Payment(tx), remaining) + } +} + impl OpTransaction for CustomTransactionEnvelope { fn is_deposit(&self) -> bool { false } + + fn as_deposit(&self) -> Option<&Sealed> { + None + } +} + +impl OpTransaction for CustomTransaction { + fn is_deposit(&self) -> bool { + match self { + CustomTransaction::Op(op) => op.is_deposit(), + CustomTransaction::Payment(payment) => payment.is_deposit(), + } + } + + fn as_deposit(&self) -> Option<&Sealed> { + match self { + CustomTransaction::Op(op) => op.as_deposit(), + CustomTransaction::Payment(payment) => payment.as_deposit(), + } + } +} + +impl SignerRecoverable for CustomTransaction { + fn recover_signer(&self) -> Result { + match self { + CustomTransaction::Op(tx) => SignerRecoverable::recover_signer(tx), + CustomTransaction::Payment(tx) => SignerRecoverable::recover_signer(tx), + } + } + + fn recover_signer_unchecked(&self) -> Result { + match self { + CustomTransaction::Op(tx) => SignerRecoverable::recover_signer_unchecked(tx), + CustomTransaction::Payment(tx) => SignerRecoverable::recover_signer_unchecked(tx), + } + } +} + +impl SignedTransaction for CustomTransaction { + fn recover_signer_unchecked_with_buf( + &self, + buf: &mut Vec, + ) -> Result { + match self { + CustomTransaction::Op(tx) => { + SignedTransaction::recover_signer_unchecked_with_buf(tx, buf) + } + CustomTransaction::Payment(tx) => { + SignedTransaction::recover_signer_unchecked_with_buf(tx, buf) + } + } + } + + fn tx_hash(&self) -> &B256 { + match self { + CustomTransaction::Op(tx) => SignedTransaction::tx_hash(tx), + CustomTransaction::Payment(tx) => SignedTransaction::tx_hash(tx), + } + } +} + +impl InMemorySize for CustomTransaction { + fn size(&self) -> usize { + match self { + CustomTransaction::Op(tx) => InMemorySize::size(tx), + CustomTransaction::Payment(tx) => InMemorySize::size(tx), + } + } } diff --git a/examples/custom-node/src/primitives/tx_custom.rs b/examples/custom-node/src/primitives/tx_custom.rs index c44a5e9c4db..8729378bd59 100644 --- a/examples/custom-node/src/primitives/tx_custom.rs +++ b/examples/custom-node/src/primitives/tx_custom.rs @@ -1,4 +1,4 @@ -use crate::primitives::{TxTypeCustom, TRANSFER_TX_TYPE_ID}; +use crate::primitives::PAYMENT_TX_TYPE_ID; use alloy_consensus::{ transaction::{RlpEcdsaDecodableTx, RlpEcdsaEncodableTx}, SignableTransaction, Transaction, @@ -7,7 +7,7 @@ use alloy_eips::{eip2930::AccessList, eip7702::SignedAuthorization, Typed2718}; use alloy_primitives::{Address, Bytes, ChainId, Signature, TxKind, B256, U256}; use alloy_rlp::{BufMut, Decodable, Encodable}; use core::mem; -use reth_ethereum::primitives::{serde_bincode_compat::SerdeBincodeCompat, InMemorySize}; +use reth_ethereum::primitives::{serde_bincode_compat::RlpBincode, InMemorySize}; /// A transaction with a priority fee ([EIP-1559](https://eips.ethereum.org/EIPS/eip-1559)). #[derive( @@ -71,8 +71,8 @@ pub struct TxPayment { impl TxPayment { /// Get the transaction type #[doc(alias = "transaction_type")] - pub const fn tx_type() -> TxTypeCustom { - TxTypeCustom::Custom + pub const fn tx_type() -> super::tx::TxTypeCustom { + super::tx::TxTypeCustom::Payment } /// Calculates a heuristic for the in-memory size of the [TxPayment] @@ -115,7 +115,7 @@ impl RlpEcdsaEncodableTx for TxPayment { } impl RlpEcdsaDecodableTx for TxPayment { - const DEFAULT_TX_TYPE: u8 = { Self::tx_type() as u8 }; + const DEFAULT_TX_TYPE: u8 = { PAYMENT_TX_TYPE_ID }; /// Decodes the inner [TxPayment] fields from RLP bytes. /// @@ -244,7 +244,7 @@ impl Transaction for TxPayment { impl Typed2718 for TxPayment { fn ty(&self) -> u8 { - TRANSFER_TX_TYPE_ID + PAYMENT_TX_TYPE_ID } } @@ -254,7 +254,7 @@ impl SignableTransaction for TxPayment { } fn encode_for_signing(&self, out: &mut dyn alloy_rlp::BufMut) { - out.put_u8(Self::tx_type() as u8); + out.put_u8(Self::tx_type().ty()); self.encode(out) } @@ -285,17 +285,4 @@ impl InMemorySize for TxPayment { } } -#[derive(Debug, serde::Serialize, serde::Deserialize)] -pub struct BincodeCompatTxCustom(pub TxPayment); - -impl SerdeBincodeCompat for TxPayment { - type BincodeRepr<'a> = BincodeCompatTxCustom; - - fn as_repr(&self) -> Self::BincodeRepr<'_> { - BincodeCompatTxCustom(self.clone()) - } - - fn from_repr(repr: Self::BincodeRepr<'_>) -> Self { - repr.0.clone() - } -} +impl RlpBincode for TxPayment {} diff --git a/examples/custom-node/src/primitives/tx_type.rs b/examples/custom-node/src/primitives/tx_type.rs index 36160024792..20b9e4be4cd 100644 --- a/examples/custom-node/src/primitives/tx_type.rs +++ b/examples/custom-node/src/primitives/tx_type.rs @@ -1,21 +1,8 @@ +use crate::primitives::TxTypeCustom; use alloy_primitives::bytes::{Buf, BufMut}; use reth_codecs::{txtype::COMPACT_EXTENDED_IDENTIFIER_FLAG, Compact}; -use serde::{Deserialize, Serialize}; -pub const TRANSFER_TX_TYPE_ID: u8 = 42; - -/// An enum for the custom transaction type(s) -#[repr(u8)] -#[derive(Debug, Clone, Serialize, Deserialize, Hash, Eq, PartialEq)] -pub enum TxTypeCustom { - Custom = TRANSFER_TX_TYPE_ID, -} - -impl From for u8 { - fn from(value: TxTypeCustom) -> Self { - value as Self - } -} +pub const PAYMENT_TX_TYPE_ID: u8 = 42; impl Compact for TxTypeCustom { fn to_compact(&self, buf: &mut B) -> usize @@ -23,26 +10,27 @@ impl Compact for TxTypeCustom { B: BufMut + AsMut<[u8]>, { match self { - Self::Custom => { - buf.put_u8(TRANSFER_TX_TYPE_ID); + Self::Op(ty) => ty.to_compact(buf), + Self::Payment => { + buf.put_u8(PAYMENT_TX_TYPE_ID); COMPACT_EXTENDED_IDENTIFIER_FLAG } } } fn from_compact(mut buf: &[u8], identifier: usize) -> (Self, &[u8]) { - ( - match identifier { - COMPACT_EXTENDED_IDENTIFIER_FLAG => { + match identifier { + COMPACT_EXTENDED_IDENTIFIER_FLAG => ( + { let extended_identifier = buf.get_u8(); match extended_identifier { - TRANSFER_TX_TYPE_ID => Self::Custom, + PAYMENT_TX_TYPE_ID => Self::Payment, _ => panic!("Unsupported TxType identifier: {extended_identifier}"), } - } - _ => panic!("Unknown identifier for TxType: {identifier}"), - }, - buf, - ) + }, + buf, + ), + v => Self::from_compact(buf, v), + } } } diff --git a/examples/network-proxy/src/main.rs b/examples/network-proxy/src/main.rs index 461fe348360..51ba8e2b4a4 100644 --- a/examples/network-proxy/src/main.rs +++ b/examples/network-proxy/src/main.rs @@ -81,6 +81,7 @@ async fn main() -> eyre::Result<()> { IncomingEthRequest::GetBlockBodies { .. } => {} IncomingEthRequest::GetNodeData { .. } => {} IncomingEthRequest::GetReceipts { .. } => {} + IncomingEthRequest::GetReceipts69 { .. } => {} } } transaction_message = transactions_rx.recv() => { diff --git a/examples/precompile-cache/src/main.rs b/examples/precompile-cache/src/main.rs index ed8143b36bf..e72fee598cc 100644 --- a/examples/precompile-cache/src/main.rs +++ b/examples/precompile-cache/src/main.rs @@ -4,7 +4,7 @@ use alloy_evm::{ eth::EthEvmContext, - precompiles::{DynPrecompile, Precompile, PrecompilesMap}, + precompiles::{DynPrecompile, Precompile, PrecompileInput, PrecompilesMap}, Evm, EvmFactory, }; use alloy_genesis::Genesis; @@ -45,11 +45,9 @@ type PrecompileLRUCache = LruMap<(Bytes, u64), PrecompileResult>; /// A cache for precompile inputs / outputs. /// -/// This assumes that the precompile is a standard precompile, as in `StandardPrecompileFn`, meaning -/// its inputs are only `(Bytes, u64)`. -/// -/// NOTE: This does not work with "context stateful precompiles", ie `ContextStatefulPrecompile` or -/// `ContextStatefulPrecompileMut`. They are explicitly banned. +/// This cache works with standard precompiles that take input data and gas limit as parameters. +/// The cache key is composed of the input bytes and gas limit, and the cached value is the +/// precompile execution result. #[derive(Debug)] pub struct PrecompileCache { /// Caches for each precompile input / output. @@ -120,15 +118,14 @@ impl WrappedPrecompile { /// wrapper that can be used inside Evm. fn wrap(precompile: DynPrecompile, cache: Arc>) -> DynPrecompile { let wrapped = Self::new(precompile, cache); - move |data: &[u8], gas_limit: u64| -> PrecompileResult { wrapped.call(data, gas_limit) } - .into() + move |input: PrecompileInput<'_>| -> PrecompileResult { wrapped.call(input) }.into() } } impl Precompile for WrappedPrecompile { - fn call(&self, data: &[u8], gas: u64) -> PrecompileResult { + fn call(&self, input: PrecompileInput<'_>) -> PrecompileResult { let mut cache = self.cache.write(); - let key = (Bytes::copy_from_slice(data), gas); + let key = (Bytes::copy_from_slice(input.data), input.gas); // get the result if it exists if let Some(result) = cache.cache.get(&key) { @@ -136,7 +133,7 @@ impl Precompile for WrappedPrecompile { } // call the precompile if cache miss - let output = self.precompile.call(data, gas); + let output = self.precompile.call(input); // insert the result into the cache cache.cache.insert(key, output.clone()); @@ -166,7 +163,7 @@ impl ExecutorBuilder for MyExecutorBuilder where Node: FullNodeTypes>, { - type EVM = EthEvmConfig; + type EVM = EthEvmConfig; async fn build_evm(self, ctx: &BuilderContext) -> eyre::Result { let evm_config = EthEvmConfig::new_with_evm_factory( diff --git a/fork.yaml b/fork.yaml index c3db7adb2f5..4c171211256 100644 --- a/fork.yaml +++ b/fork.yaml @@ -4,7 +4,7 @@ footer: | base: name: reth url: https://github.com/paradigmxyz/reth - hash: 41ed7e0b7961ae5d9cb29de66eed2992ca5528d4 + hash: f67629fe918fcb90697b08e1d2b4d9dfafbfef49 fork: name: scroll-reth url: https://github.com/scroll-tech/reth diff --git a/testing/ef-tests/src/cases/blockchain_test.rs b/testing/ef-tests/src/cases/blockchain_test.rs index 1cf905ff2d1..4c463c612a6 100644 --- a/testing/ef-tests/src/cases/blockchain_test.rs +++ b/testing/ef-tests/src/cases/blockchain_test.rs @@ -108,7 +108,7 @@ impl BlockchainTestCase { } // A block processing failure occurred. - Err(Error::BlockProcessingFailed { block_number }) => match expectation { + err @ Err(Error::BlockProcessingFailed { block_number, .. }) => match expectation { // It happened on exactly the block we were told to fail on Some((expected, _)) if block_number == expected => Ok(()), @@ -122,7 +122,7 @@ impl BlockchainTestCase { ))), // No failure expected at all - bubble up original error. - None => Err(Error::BlockProcessingFailed { block_number }), + None => err, }, // Non‑processing error – forward as‑is. @@ -199,15 +199,15 @@ fn run_case(case: &BlockchainTest) -> Result<(), Error> { provider .insert_block(genesis_block.clone(), StorageLocation::Database) - .map_err(|_| Error::BlockProcessingFailed { block_number: 0 })?; + .map_err(|err| Error::block_failed(0, err))?; let genesis_state = case.pre.clone().into_genesis_state(); insert_genesis_state(&provider, genesis_state.iter()) - .map_err(|_| Error::BlockProcessingFailed { block_number: 0 })?; + .map_err(|err| Error::block_failed(0, err))?; insert_genesis_hashes(&provider, genesis_state.iter()) - .map_err(|_| Error::BlockProcessingFailed { block_number: 0 })?; + .map_err(|err| Error::block_failed(0, err))?; insert_genesis_history(&provider, genesis_state.iter()) - .map_err(|_| Error::BlockProcessingFailed { block_number: 0 })?; + .map_err(|err| Error::block_failed(0, err))?; // Decode blocks let blocks = decode_blocks(&case.blocks)?; @@ -223,11 +223,11 @@ fn run_case(case: &BlockchainTest) -> Result<(), Error> { // Insert the block into the database provider .insert_block(block.clone(), StorageLocation::Database) - .map_err(|_| Error::BlockProcessingFailed { block_number })?; + .map_err(|err| Error::block_failed(block_number, err))?; // Consensus checks before block execution pre_execution_checks(chain_spec.clone(), &parent, block) - .map_err(|_| Error::BlockProcessingFailed { block_number })?; + .map_err(|err| Error::block_failed(block_number, err))?; let mut witness_record = ExecutionWitnessRecord::default(); @@ -240,11 +240,11 @@ fn run_case(case: &BlockchainTest) -> Result<(), Error> { .execute_with_state_closure(&(*block).clone(), |statedb: &State<_>| { witness_record.record_executed_state(statedb); }) - .map_err(|_| Error::BlockProcessingFailed { block_number })?; + .map_err(|err| Error::block_failed(block_number, err))?; // Consensus checks after block execution validate_block_post_execution(block, &chain_spec, &output.receipts, &output.requests) - .map_err(|_| Error::BlockProcessingFailed { block_number })?; + .map_err(|err| Error::block_failed(block_number, err))?; // Generate the stateless witness // TODO: Most of this code is copy-pasted from debug_executionWitness @@ -278,9 +278,12 @@ fn run_case(case: &BlockchainTest) -> Result<(), Error> { HashedPostState::from_bundle_state::(output.state.state()); let (computed_state_root, _) = StateRoot::overlay_root_with_updates(provider.tx_ref(), hashed_state.clone()) - .map_err(|_| Error::BlockProcessingFailed { block_number })?; + .map_err(|err| Error::block_failed(block_number, err))?; if computed_state_root != block.state_root { - return Err(Error::BlockProcessingFailed { block_number }) + return Err(Error::block_failed( + block_number, + Error::Assertion("state root mismatch".to_string()), + )) } // Commit the post state/state diff to the database @@ -290,14 +293,14 @@ fn run_case(case: &BlockchainTest) -> Result<(), Error> { OriginalValuesKnown::Yes, StorageLocation::Database, ) - .map_err(|_| Error::BlockProcessingFailed { block_number })?; + .map_err(|err| Error::block_failed(block_number, err))?; provider .write_hashed_state(&hashed_state.into_sorted()) - .map_err(|_| Error::BlockProcessingFailed { block_number })?; + .map_err(|err| Error::block_failed(block_number, err))?; provider .update_history_indices(block.number..=block.number) - .map_err(|_| Error::BlockProcessingFailed { block_number })?; + .map_err(|err| Error::block_failed(block_number, err))?; // Since there were no errors, update the parent block parent = block.clone() @@ -341,12 +344,10 @@ fn decode_blocks( let block_number = (block_index + 1) as u64; let decoded = SealedBlock::::decode(&mut block.rlp.as_ref()) - .map_err(|_| Error::BlockProcessingFailed { block_number })?; + .map_err(|err| Error::block_failed(block_number, err))?; - let recovered_block = decoded - .clone() - .try_recover() - .map_err(|_| Error::BlockProcessingFailed { block_number })?; + let recovered_block = + decoded.clone().try_recover().map_err(|err| Error::block_failed(block_number, err))?; blocks.push(recovered_block); } diff --git a/testing/ef-tests/src/result.rs b/testing/ef-tests/src/result.rs index a1bed359b07..f53a4fab256 100644 --- a/testing/ef-tests/src/result.rs +++ b/testing/ef-tests/src/result.rs @@ -23,10 +23,13 @@ pub enum Error { /// Block processing failed /// Note: This includes but is not limited to execution. /// For example, the header number could be incorrect. - #[error("block {block_number} failed to process")] + #[error("block {block_number} failed to process: {err}")] BlockProcessingFailed { /// The block number for the block that failed block_number: u64, + /// The specific error + #[source] + err: Box, }, /// An IO error occurred #[error("an error occurred interacting with the file system at {path}: {error}")] @@ -63,6 +66,16 @@ pub enum Error { ConsensusError(#[from] reth_consensus::ConsensusError), } +impl Error { + /// Create a new [`Error::BlockProcessingFailed`] error. + pub fn block_failed( + block_number: u64, + err: impl std::error::Error + Send + Sync + 'static, + ) -> Self { + Self::BlockProcessingFailed { block_number, err: Box::new(err) } + } +} + /// The result of running a test. #[derive(Debug)] pub struct CaseResult {