From 72908f9142d2a4e1ed9b4658f638a707474a3db6 Mon Sep 17 00:00:00 2001 From: clabby Date: Fri, 20 Sep 2024 12:53:17 -0400 Subject: [PATCH] feat(derive): Typed error handling (#540) * checkpoint * checkpoint * compiling + passing tests * rename error * lint * fix test-utils feature * add resets * codecov * clean * update codecov rules * upload test results * nextest cfg --- .config/nextest.toml | 2 + .github/codecov.yml | 5 +- .github/workflows/coverage.yaml | 11 +- Cargo.lock | 42 +- Cargo.toml | 1 + bin/client/src/l1/blob_provider.rs | 13 +- bin/client/src/l1/chain_provider.rs | 2 + bin/client/src/l1/driver.rs | 21 +- bin/client/src/l2/chain_provider.rs | 6 +- crates/derive/Cargo.toml | 5 +- crates/derive/src/batch/mod.rs | 14 +- crates/derive/src/batch/span_batch/bits.rs | 4 +- crates/derive/src/batch/span_batch/errors.rs | 86 +--- crates/derive/src/errors.rs | 405 ++++++------------ crates/derive/src/online/alloy_providers.rs | 100 +++-- crates/derive/src/online/beacon_client.rs | 53 +-- crates/derive/src/online/blob_provider.rs | 123 +++--- crates/derive/src/online/test_utils.rs | 2 + crates/derive/src/pipeline/core.rs | 48 ++- crates/derive/src/pipeline/mod.rs | 2 +- crates/derive/src/sources/blobs.rs | 37 +- crates/derive/src/sources/calldata.rs | 24 +- crates/derive/src/sources/ethereum.rs | 4 +- crates/derive/src/sources/variant.rs | 4 +- crates/derive/src/stages/attributes_queue.rs | 51 +-- .../src/stages/attributes_queue/builder.rs | 80 ++-- .../src/stages/attributes_queue/deposits.rs | 22 +- crates/derive/src/stages/batch_queue.rs | 102 ++--- crates/derive/src/stages/channel_bank.rs | 64 +-- crates/derive/src/stages/channel_reader.rs | 35 +- crates/derive/src/stages/frame_queue.rs | 31 +- crates/derive/src/stages/l1_retrieval.rs | 32 +- crates/derive/src/stages/l1_traversal.rs | 47 +- .../src/stages/test_utils/attributes_queue.rs | 24 +- .../src/stages/test_utils/batch_queue.rs | 14 +- .../src/stages/test_utils/channel_bank.rs | 14 +- .../src/stages/test_utils/channel_reader.rs | 14 +- .../src/stages/test_utils/frame_queue.rs | 14 +- .../stages/test_utils/sys_config_fetcher.rs | 2 + crates/derive/src/stages/utils.rs | 10 +- crates/derive/src/traits/attributes.rs | 4 +- crates/derive/src/traits/data_sources.rs | 18 +- crates/derive/src/traits/pipeline.rs | 11 +- crates/derive/src/traits/providers.rs | 28 +- crates/derive/src/traits/stages.rs | 6 +- crates/derive/src/traits/test_utils.rs | 35 +- crates/primitives/Cargo.toml | 1 + crates/primitives/src/blob.rs | 14 +- crates/primitives/src/payload.rs | 78 ++-- examples/trusted-sync/src/main.rs | 23 +- 50 files changed, 894 insertions(+), 894 deletions(-) create mode 100644 .config/nextest.toml diff --git a/.config/nextest.toml b/.config/nextest.toml new file mode 100644 index 000000000..76fd74b5d --- /dev/null +++ b/.config/nextest.toml @@ -0,0 +1,2 @@ +[profile.ci.junit] +path = "junit.xml" diff --git a/.github/codecov.yml b/.github/codecov.yml index e79b3c7e5..d53af6a17 100644 --- a/.github/codecov.yml +++ b/.github/codecov.yml @@ -19,8 +19,9 @@ coverage: ignore: - "tests" - - "test_util*" - - "test_utils" + - "test_util.rs" + - "test_utils.rs" + - "crates/derive/src/stages/test_utils" - "bin/" # Make comments less noisy diff --git a/.github/workflows/coverage.yaml b/.github/workflows/coverage.yaml index 9cdd4d41e..bbab1da37 100644 --- a/.github/workflows/coverage.yaml +++ b/.github/workflows/coverage.yaml @@ -25,13 +25,20 @@ jobs: if: hashFiles('Cargo.lock') == '' run: cargo generate-lockfile - name: cargo llvm-cov - run: cargo llvm-cov nextest --locked --workspace --lcov --output-path lcov.info --features test-utils + run: | + cargo llvm-cov nextest --locked --workspace --lcov --output-path lcov.info --features test-utils --profile ci && \ + mv ./target/nextest/ci/junit.xml ./junit.xml - name: Record Rust version run: echo "RUST=$(rustc --version)" >> "$GITHUB_ENV" - - name: Upload to codecov.io + - name: Upload coverage to codecov.io uses: codecov/codecov-action@v4 with: fail_ci_if_error: true token: ${{ secrets.CODECOV_TOKEN }} env_vars: OS,RUST files: lcov.info + - name: Upload test results to codecov.io + if: ${{ !cancelled() }} + uses: codecov/test-results-action@v1 + with: + token: ${{ secrets.CODECOV_TOKEN }} diff --git a/Cargo.lock b/Cargo.lock index 932905488..e81ad3524 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -340,7 +340,7 @@ dependencies = [ "alloy-sol-types", "serde", "serde_json", - "thiserror", + "thiserror 1.0.63 (registry+https://github.com/rust-lang/crates.io-index)", "tracing", ] @@ -362,7 +362,7 @@ dependencies = [ "async-trait", "auto_impl", "futures-utils-wasm", - "thiserror", + "thiserror 1.0.63 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -389,7 +389,7 @@ dependencies = [ "rand", "serde_json", "tempfile", - "thiserror", + "thiserror 1.0.63 (registry+https://github.com/rust-lang/crates.io-index)", "tracing", "url", ] @@ -444,7 +444,7 @@ dependencies = [ "reqwest", "serde", "serde_json", - "thiserror", + "thiserror 1.0.63 (registry+https://github.com/rust-lang/crates.io-index)", "tokio", "tracing", "url", @@ -562,7 +562,7 @@ dependencies = [ "auto_impl", "elliptic-curve", "k256", - "thiserror", + "thiserror 1.0.63 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -635,7 +635,7 @@ dependencies = [ "futures-utils-wasm", "serde", "serde_json", - "thiserror", + "thiserror 1.0.63 (registry+https://github.com/rust-lang/crates.io-index)", "tokio", "tower 0.5.1", "tracing", @@ -1276,7 +1276,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7bb11bd1378bf3731b182997b40cefe00aba6a6cc74042c8318c1b271d3badf7" dependencies = [ "nix 0.27.1", - "thiserror", + "thiserror 1.0.63 (registry+https://github.com/rust-lang/crates.io-index)", "tokio", ] @@ -2367,6 +2367,7 @@ dependencies = [ "serde", "serde_json", "spin", + "thiserror 1.0.63 (git+https://github.com/quartiq/thiserror?branch=no-std)", "tokio", "tracing", "tracing-subscriber", @@ -2489,6 +2490,7 @@ dependencies = [ "serde", "serde_json", "sha2", + "thiserror 1.0.63 (git+https://github.com/quartiq/thiserror?branch=no-std)", "tracing", ] @@ -3090,7 +3092,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c73c26c01b8c87956cea613c907c9d6ecffd8d18a2a5908e5de0adfaa185cea" dependencies = [ "memchr", - "thiserror", + "thiserror 1.0.63 (registry+https://github.com/rust-lang/crates.io-index)", "ucd-trie", ] @@ -3195,7 +3197,7 @@ dependencies = [ "smallvec", "symbolic-demangle", "tempfile", - "thiserror", + "thiserror 1.0.63 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -3304,7 +3306,7 @@ dependencies = [ "parking_lot", "procfs", "protobuf", - "thiserror", + "thiserror 1.0.63 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -4364,7 +4366,15 @@ version = "1.0.63" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0342370b38b6a11b6cc11d6a805569958d54cfa061a29969c3b5ce2ea405724" dependencies = [ - "thiserror-impl", + "thiserror-impl 1.0.63 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "thiserror" +version = "1.0.63" +source = "git+https://github.com/quartiq/thiserror?branch=no-std#44737a516b7fd0cc9dabcab07e7b1f927f8f5636" +dependencies = [ + "thiserror-impl 1.0.63 (git+https://github.com/quartiq/thiserror?branch=no-std)", ] [[package]] @@ -4378,6 +4388,16 @@ dependencies = [ "syn 2.0.77", ] +[[package]] +name = "thiserror-impl" +version = "1.0.63" +source = "git+https://github.com/quartiq/thiserror?branch=no-std#44737a516b7fd0cc9dabcab07e7b1f927f8f5636" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.77", +] + [[package]] name = "thread_local" version = "1.1.8" diff --git a/Cargo.toml b/Cargo.toml index f3daa4b99..3b380861e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -48,6 +48,7 @@ kona-primitives = { path = "crates/primitives", version = "0.0.2", default-featu # General anyhow = { version = "1.0.86", default-features = false } +thiserror = { git = "https://github.com/quartiq/thiserror", branch = "no-std", default-features = false } cfg-if = "1.0.0" hashbrown = "0.14.5" spin = { version = "0.9.8", features = ["mutex"] } diff --git a/bin/client/src/l1/blob_provider.rs b/bin/client/src/l1/blob_provider.rs index 8152e260e..5fe636339 100644 --- a/bin/client/src/l1/blob_provider.rs +++ b/bin/client/src/l1/blob_provider.rs @@ -5,8 +5,9 @@ use alloc::{boxed::Box, sync::Arc, vec::Vec}; use alloy_consensus::Blob; use alloy_eips::eip4844::FIELD_ELEMENTS_PER_BLOB; use alloy_primitives::keccak256; +use anyhow::Result; use async_trait::async_trait; -use kona_derive::{errors::BlobProviderError, traits::BlobProvider}; +use kona_derive::traits::BlobProvider; use kona_preimage::{CommsClient, PreimageKey, PreimageKeyType}; use kona_primitives::IndexedBlobHash; use op_alloy_protocol::BlockInfo; @@ -32,11 +33,7 @@ impl OracleBlobProvider { /// ## Returns /// - `Ok(blob)`: The blob. /// - `Err(e)`: The blob could not be retrieved. - async fn get_blob( - &self, - block_ref: &BlockInfo, - blob_hash: &IndexedBlobHash, - ) -> Result { + async fn get_blob(&self, block_ref: &BlockInfo, blob_hash: &IndexedBlobHash) -> Result { let mut blob_req_meta = [0u8; 48]; blob_req_meta[0..32].copy_from_slice(blob_hash.hash.as_ref()); blob_req_meta[32..40].copy_from_slice((blob_hash.index as u64).to_be_bytes().as_ref()); @@ -76,11 +73,13 @@ impl OracleBlobProvider { #[async_trait] impl BlobProvider for OracleBlobProvider { + type Error = anyhow::Error; + async fn get_blobs( &mut self, block_ref: &BlockInfo, blob_hashes: &[IndexedBlobHash], - ) -> Result, BlobProviderError> { + ) -> Result, Self::Error> { let mut blobs = Vec::with_capacity(blob_hashes.len()); for hash in blob_hashes { blobs.push(self.get_blob(block_ref, hash).await?); diff --git a/bin/client/src/l1/chain_provider.rs b/bin/client/src/l1/chain_provider.rs index 70c3484cc..dd0d0e7bf 100644 --- a/bin/client/src/l1/chain_provider.rs +++ b/bin/client/src/l1/chain_provider.rs @@ -31,6 +31,8 @@ impl OracleL1ChainProvider { #[async_trait] impl ChainProvider for OracleL1ChainProvider { + type Error = anyhow::Error; + async fn header_by_hash(&mut self, hash: B256) -> Result
{ // Send a hint for the block header. self.oracle.write(&HintType::L1BlockHeader.encode_with(&[hash.as_ref()])).await?; diff --git a/bin/client/src/l1/driver.rs b/bin/client/src/l1/driver.rs index 31bc4e72e..fbe5c0ccc 100644 --- a/bin/client/src/l1/driver.rs +++ b/bin/client/src/l1/driver.rs @@ -10,14 +10,14 @@ use alloy_consensus::{Header, Sealed}; use anyhow::{anyhow, Result}; use core::fmt::Debug; use kona_derive::{ - errors::StageError, + errors::PipelineErrorKind, pipeline::{DerivationPipeline, Pipeline, PipelineBuilder, StepResult}, sources::EthereumDataSource, stages::{ AttributesQueue, BatchQueue, ChannelBank, ChannelReader, FrameQueue, L1Retrieval, L1Traversal, StatefulAttributesBuilder, }, - traits::{BlobProvider, ChainProvider, L2ChainProvider}, + traits::{BlobProvider, ChainProvider, L2ChainProvider, OriginProvider}, }; use kona_mpt::TrieDBFetcher; use kona_preimage::{CommsClient, PreimageKey, PreimageKeyType}; @@ -171,8 +171,21 @@ where // Break the loop unless the error signifies that there is not enough data to // complete the current step. In this case, we retry the step to see if other // stages can make progress. - if !matches!(e, StageError::NotEnoughData | StageError::Temporary(_)) { - break; + match e { + PipelineErrorKind::Temporary(_) => { /* continue */ } + PipelineErrorKind::Reset(_) => { + // Reset the pipeline to the initial L2 safe head and L1 origin, + // and try again. + self.pipeline + .reset( + self.l2_safe_head.block_info, + self.pipeline + .origin() + .ok_or_else(|| anyhow!("Missing L1 origin"))?, + ) + .await?; + } + PipelineErrorKind::Critical(_) => return Err(e.into()), } } } diff --git a/bin/client/src/l2/chain_provider.rs b/bin/client/src/l2/chain_provider.rs index 6304f38ec..796945393 100644 --- a/bin/client/src/l2/chain_provider.rs +++ b/bin/client/src/l2/chain_provider.rs @@ -69,12 +69,14 @@ impl OracleL2ChainProvider { #[async_trait] impl L2ChainProvider for OracleL2ChainProvider { + type Error = anyhow::Error; + async fn l2_block_info_by_number(&mut self, number: u64) -> Result { // Get the payload at the given block number. let payload = self.payload_by_number(number).await?; // Construct the system config from the payload. - payload.to_l2_block_ref(&self.boot_info.rollup_config) + payload.to_l2_block_ref(&self.boot_info.rollup_config).map_err(Into::into) } async fn payload_by_number(&mut self, number: u64) -> Result { @@ -114,7 +116,7 @@ impl L2ChainProvider for OracleL2ChainProvider let payload = self.payload_by_number(number).await?; // Construct the system config from the payload. - payload.to_system_config(rollup_config.as_ref()) + payload.to_system_config(rollup_config.as_ref()).map_err(Into::into) } } diff --git a/crates/derive/Cargo.toml b/crates/derive/Cargo.toml index 23cde6238..36572e9a1 100644 --- a/crates/derive/Cargo.toml +++ b/crates/derive/Cargo.toml @@ -26,7 +26,7 @@ unsigned-varint.workspace = true miniz_oxide.workspace = true brotli.workspace = true alloc-no-stdlib.workspace = true -anyhow.workspace = true +thiserror.workspace = true tracing.workspace = true async-trait.workspace = true @@ -52,9 +52,11 @@ alloy-rpc-client = { workspace = true, optional = true } tracing-subscriber = { workspace = true, optional = true } alloy-node-bindings = { workspace = true, optional = true } alloy-transport-http = { workspace = true, optional = true } +anyhow = { workspace = true, optional = true } [dev-dependencies] spin.workspace = true +anyhow.workspace = true alloy-rpc-client.workspace = true alloy-transport-http.workspace = true tokio.workspace = true @@ -91,6 +93,7 @@ online = [ ] test-utils = [ "dep:spin", + "dep:anyhow", "dep:alloy-transport-http", "dep:alloy-node-bindings", "dep:tracing-subscriber", diff --git a/crates/derive/src/batch/mod.rs b/crates/derive/src/batch/mod.rs index f26058c57..2aa9d4faa 100644 --- a/crates/derive/src/batch/mod.rs +++ b/crates/derive/src/batch/mod.rs @@ -5,7 +5,7 @@ use alloy_rlp::{Buf, Decodable}; use op_alloy_genesis::RollupConfig; use op_alloy_protocol::{BlockInfo, L2BlockInfo}; -use crate::{errors::DecodeError, traits::L2ChainProvider}; +use crate::{errors::PipelineEncodingError, traits::L2ChainProvider}; mod batch_type; pub use batch_type::BatchType; @@ -79,9 +79,9 @@ impl Batch { } /// Attempts to decode a batch from a reader. - pub fn decode(r: &mut &[u8], cfg: &RollupConfig) -> Result { + pub fn decode(r: &mut &[u8], cfg: &RollupConfig) -> Result { if r.is_empty() { - return Err(DecodeError::EmptyBuffer); + return Err(PipelineEncodingError::EmptyBuffer); } // Read the batch type @@ -90,15 +90,15 @@ impl Batch { match batch_type { BatchType::Single => { - let single_batch = SingleBatch::decode(r)?; + let single_batch = + SingleBatch::decode(r).map_err(PipelineEncodingError::AlloyRlpError)?; Ok(Batch::Single(single_batch)) } BatchType::Span => { - let mut raw_span_batch = - RawSpanBatch::decode(r, cfg).map_err(DecodeError::SpanBatchError)?; + let mut raw_span_batch = RawSpanBatch::decode(r, cfg)?; let span_batch = raw_span_batch .derive(cfg.block_time, cfg.genesis.l2_time, cfg.l2_chain_id) - .map_err(DecodeError::SpanBatchError)?; + .map_err(PipelineEncodingError::SpanBatchError)?; Ok(Batch::Span(span_batch)) } } diff --git a/crates/derive/src/batch/span_batch/bits.rs b/crates/derive/src/batch/span_batch/bits.rs index b8e1e2251..4bb0a864d 100644 --- a/crates/derive/src/batch/span_batch/bits.rs +++ b/crates/derive/src/batch/span_batch/bits.rs @@ -1,12 +1,10 @@ //! Module for working with span batch bits. +use super::{errors::SpanBatchError, FJORD_MAX_SPAN_BATCH_BYTES, MAX_SPAN_BATCH_BYTES}; use alloc::{vec, vec::Vec}; use alloy_rlp::Buf; -use anyhow::Result; use core::cmp::Ordering; -use super::{errors::SpanBatchError, FJORD_MAX_SPAN_BATCH_BYTES, MAX_SPAN_BATCH_BYTES}; - /// Type for span batch bits. #[derive(Debug, Default, Clone, PartialEq, Eq)] pub struct SpanBatchBits(pub Vec); diff --git a/crates/derive/src/batch/span_batch/errors.rs b/crates/derive/src/batch/span_batch/errors.rs index 7245a41ab..23feb603d 100644 --- a/crates/derive/src/batch/span_batch/errors.rs +++ b/crates/derive/src/batch/span_batch/errors.rs @@ -1,108 +1,62 @@ //! Span Batch Errors -use core::fmt::Display; +use thiserror::Error; /// Span Batch Errors -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Error, Debug, Clone, PartialEq, Eq)] pub enum SpanBatchError { /// The span batch is too big + #[error("The span batch is too big.")] TooBigSpanBatchSize, /// The bit field is too long + #[error("The bit field is too long")] BitfieldTooLong, - /// Failed to set [alloy_primitives::U256] from big-endian slice - InvalidBitSlice, /// Empty Span Batch + #[error("Empty span batch")] EmptySpanBatch, /// Missing L1 origin + #[error("Missing L1 origin")] MissingL1Origin, - /// Encoding errors - Encoding(EncodingError), /// Decoding errors - Decoding(SpanDecodingError), -} - -impl Display for SpanBatchError { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - match self { - SpanBatchError::TooBigSpanBatchSize => write!(f, "The span batch is too big"), - SpanBatchError::BitfieldTooLong => write!(f, "The bit field is too long"), - SpanBatchError::InvalidBitSlice => { - write!(f, "Failed to set [alloy_primitives::U256] from big-endian slice") - } - SpanBatchError::EmptySpanBatch => write!(f, "Empty Span Batch"), - SpanBatchError::MissingL1Origin => write!(f, "Missing L1 origin"), - SpanBatchError::Encoding(e) => write!(f, "Encoding error: {:?}", e), - SpanBatchError::Decoding(e) => write!(f, "Decoding error: {:?}", e), - } - } -} - -/// Encoding Error -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum EncodingError { - /// Failed to encode span batch - SpanBatch, - /// Failed to encode span batch bits - SpanBatchBits, -} - -impl Display for EncodingError { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - match self { - EncodingError::SpanBatch => write!(f, "Failed to encode span batch"), - EncodingError::SpanBatchBits => write!(f, "Failed to encode span batch bits"), - } - } + #[error("Span batch decoding error: {0}")] + Decoding(#[from] SpanDecodingError), } /// Decoding Error -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Error, Debug, Clone, PartialEq, Eq)] pub enum SpanDecodingError { /// Failed to decode relative timestamp + #[error("Failed to decode relative timestamp")] RelativeTimestamp, /// Failed to decode L1 origin number + #[error("Failed to decode L1 origin number")] L1OriginNumber, /// Failed to decode parent check + #[error("Failed to decode parent check")] ParentCheck, /// Failed to decode L1 origin check + #[error("Failed to decode L1 origin check")] L1OriginCheck, /// Failed to decode block count + #[error("Failed to decode block count")] BlockCount, /// Failed to decode block tx counts + #[error("Failed to decode block tx counts")] BlockTxCounts, /// Failed to decode transaction nonces + #[error("Failed to decode transaction nonces")] TxNonces, /// Mismatch in length between the transaction type and signature arrays in a span batch /// transaction payload. + #[error("Mismatch in length between the transaction type and signature arrays")] TypeSignatureLenMismatch, /// Invalid transaction type + #[error("Invalid transaction type")] InvalidTransactionType, /// Invalid transaction data + #[error("Invalid transaction data")] InvalidTransactionData, /// Invalid transaction signature + #[error("Invalid transaction signature")] InvalidTransactionSignature, } - -impl Display for SpanDecodingError { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - match self { - SpanDecodingError::RelativeTimestamp => { - write!(f, "Failed to decode relative timestamp") - } - SpanDecodingError::L1OriginNumber => write!(f, "Failed to decode L1 origin number"), - SpanDecodingError::ParentCheck => write!(f, "Failed to decode parent check"), - SpanDecodingError::L1OriginCheck => write!(f, "Failed to decode L1 origin check"), - SpanDecodingError::BlockCount => write!(f, "Failed to decode block count"), - SpanDecodingError::BlockTxCounts => write!(f, "Failed to decode block tx counts"), - SpanDecodingError::TxNonces => write!(f, "Failed to decode transaction nonces"), - SpanDecodingError::TypeSignatureLenMismatch => { - write!(f, "Mismatch in length between the transaction type and signature arrays in a span batch transaction payload") - } - SpanDecodingError::InvalidTransactionType => write!(f, "Invalid transaction type"), - SpanDecodingError::InvalidTransactionData => write!(f, "Invalid transaction data"), - SpanDecodingError::InvalidTransactionSignature => { - write!(f, "Invalid transaction signature") - } - } - } -} diff --git a/crates/derive/src/errors.rs b/crates/derive/src/errors.rs index 25cb5152e..1e6579ceb 100644 --- a/crates/derive/src/errors.rs +++ b/crates/derive/src/errors.rs @@ -1,324 +1,201 @@ //! This module contains derivation errors thrown within the pipeline. use crate::batch::SpanBatchError; -use alloc::vec::Vec; +use alloc::string::String; use alloy_eips::BlockNumHash; -use alloy_primitives::{Bytes, B256}; -use core::fmt::Display; +use alloy_primitives::B256; +use kona_primitives::BlobDecodingError; use op_alloy_genesis::system::SystemConfigUpdateError; -use op_alloy_protocol::Frame; +use op_alloy_protocol::DepositError; +use thiserror::Error; /// A result type for the derivation pipeline stages. -pub type StageResult = Result; +pub type PipelineResult = Result; + +/// [crate::ensure] is a short-hand for bubbling up errors in the case of a condition not being met. +#[macro_export] +macro_rules! ensure { + ($cond:expr, $err:expr) => { + if !($cond) { + return Err($err); + } + }; +} + +/// A top level filter for [PipelineError] that sorts by severity. +#[derive(Error, Debug, PartialEq, Eq)] +pub enum PipelineErrorKind { + /// A temporary error. + #[error("Temporary error: {0}")] + Temporary(#[source] PipelineError), + /// A critical error. + #[error("Critical error: {0}")] + Critical(#[source] PipelineError), + /// A reset error. + #[error("Pipeline reset: {0}")] + Reset(#[from] ResetError), +} -/// An error that is thrown within the stages of the derivation pipeline. -#[derive(Debug)] -pub enum StageError { +/// An error encountered during the processing. +#[derive(Error, Debug, PartialEq, Eq)] +pub enum PipelineError { /// There is no data to read from the channel bank. + #[error("EOF")] Eof, - /// A temporary error that allows the operation to be retried. - Temporary(anyhow::Error), - /// A critical error. - Critical(anyhow::Error), - /// There is not enough data progress, but if we wait, the stage will eventually return data - /// or produce an EOF error. + /// There is not enough data to complete the processing of the stage. If the operation is + /// re-tried, more data will come in allowing the pipeline to progress, or eventually a + /// [PipelineError::Eof] will be encountered. + #[error("Not enough data")] NotEnoughData, - /// Failed to fetch block info and transactions by hash. - BlockFetch(B256), - /// No item returned from the previous stage iterator. - Empty, - /// No channels are available in the channel bank. - NoChannelsAvailable, - /// No channel returned by the [crate::stages::ChannelReader] stage. - NoChannel, - /// Failed to find channel. + /// No channels are available in the [ChannelBank]. + /// + /// [ChannelBank]: crate::stages::ChannelBank + #[error("The channel bank is empty")] + ChannelBankEmpty, + /// Failed to find channel in the [ChannelBank]. + /// + /// [ChannelBank]: crate::stages::ChannelBank + #[error("Channel not found in channel bank")] ChannelNotFound, + /// No channel returned by the [ChannelReader] stage. + /// + /// [ChannelReader]: crate::stages::ChannelReader + #[error("The channel reader has no channel available")] + ChannelReaderEmpty, + /// The [BatchQueue] is empty. + /// + /// [BatchQueue]: crate::stages::BatchQueue + #[error("The batch queue has no batches available")] + BatchQueueEmpty, /// Missing L1 origin. + #[error("Missing L1 origin from previous stage")] MissingOrigin, - /// Failed to build the [OptimismPayloadAttributes] for the next batch. + /// Missing data from [L1Retrieval]. /// - /// [OptimismPayloadAttributes]: op_alloy_rpc_types_engine::OptimismPayloadAttributes - AttributesBuild(BuilderError), - /// Reset the pipeline. - Reset(ResetError), - /// The stage detected a block reorg. - /// The first argument is the expected block hash. - /// The second argument is the paren_hash of the next l1 origin block. - ReorgDetected(B256, B256), - /// Receipt fetching error. - ReceiptFetch(anyhow::Error), - /// [op_alloy_protocol::BlockInfo] fetching error. - BlockInfoFetch(anyhow::Error), - /// [op_alloy_genesis::SystemConfig] update error. + /// [L1Retrieval]: crate::stages::L1Retrieval + #[error("L1 Retrieval missing data")] + MissingL1Data, + /// [SystemConfig] update error. + /// + /// [SystemConfig]: op_alloy_genesis::SystemConfig + #[error("Error updating system config: {0}")] SystemConfigUpdate(SystemConfigUpdateError), - /// Other wildcard error. - Custom(anyhow::Error), -} - -impl PartialEq for StageError { - fn eq(&self, other: &StageError) -> bool { - // if it's a reorg detected check the block hashes - if let (StageError::ReorgDetected(a, b), StageError::ReorgDetected(c, d)) = (self, other) { - return a == c && b == d; - } - if let (StageError::Reset(a), StageError::Reset(b)) = (self, other) { - return a == b; - } - matches!( - (self, other), - (StageError::Eof, StageError::Eof) | - (StageError::Temporary(_), StageError::Temporary(_)) | - (StageError::Critical(_), StageError::Critical(_)) | - (StageError::NotEnoughData, StageError::NotEnoughData) | - (StageError::NoChannelsAvailable, StageError::NoChannelsAvailable) | - (StageError::NoChannel, StageError::NoChannel) | - (StageError::ChannelNotFound, StageError::ChannelNotFound) | - (StageError::MissingOrigin, StageError::MissingOrigin) | - (StageError::AttributesBuild(_), StageError::AttributesBuild(_)) | - (StageError::ReceiptFetch(_), StageError::ReceiptFetch(_)) | - (StageError::BlockInfoFetch(_), StageError::BlockInfoFetch(_)) | - (StageError::SystemConfigUpdate(_), StageError::SystemConfigUpdate(_)) | - (StageError::Custom(_), StageError::Custom(_)) - ) - } -} - -/// Converts a stage result into a vector of frames. -pub fn into_frames>(result: StageResult) -> anyhow::Result> { - match result { - Ok(data) => Ok(Frame::parse_frames(&data.into()).map_err(|e| anyhow::anyhow!(e))?), - Err(e) => Err(anyhow::anyhow!(e)), - } -} - -impl From for StageError { - fn from(e: anyhow::Error) -> Self { - StageError::Custom(e) + /// Attributes builder error variant, with [BuilderError]. + #[error("Attributes builder error: {0}")] + AttributesBuilder(#[from] BuilderError), + /// [PipelineEncodingError] variant. + #[error("Decode error: {0}")] + BadEncoding(#[from] PipelineEncodingError), + /// Provider error variant. + #[error("Blob provider error: {0}")] + Provider(String), +} + +impl PipelineError { + /// Wrap [self] as a [PipelineErrorKind::Critical]. + pub fn crit(self) -> PipelineErrorKind { + PipelineErrorKind::Critical(self) } -} - -impl Display for StageError { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - match self { - StageError::Eof => write!(f, "End of file"), - StageError::Temporary(e) => write!(f, "Temporary error: {}", e), - StageError::Critical(e) => write!(f, "Critical error: {}", e), - StageError::NotEnoughData => write!(f, "Not enough data"), - StageError::BlockFetch(hash) => { - write!(f, "Failed to fetch block info and transactions by hash: {}", hash) - } - StageError::Empty => write!(f, "Empty"), - StageError::NoChannelsAvailable => write!(f, "No channels available"), - StageError::NoChannel => write!(f, "No channel"), - StageError::ChannelNotFound => write!(f, "Channel not found"), - StageError::MissingOrigin => write!(f, "Missing L1 origin"), - StageError::AttributesBuild(e) => write!(f, "Attributes build error: {}", e), - StageError::Reset(e) => write!(f, "Reset error: {}", e), - StageError::ReceiptFetch(e) => write!(f, "Receipt fetch error: {}", e), - StageError::SystemConfigUpdate(e) => write!(f, "System config update error: {}", e), - StageError::ReorgDetected(current, next) => { - write!(f, "Block reorg detected: {} -> {}", current, next) - } - StageError::BlockInfoFetch(e) => write!(f, "Block info fetch error: {}", e), - StageError::Custom(e) => write!(f, "Custom error: {}", e), - } - } -} -/// An error returned by the [BlobProviderError]. -#[derive(Debug)] -pub enum BlobProviderError { - /// The number of specified blob hashes did not match the number of returned sidecars. - SidecarLengthMismatch(usize, usize), - /// Slot derivation error. - Slot(anyhow::Error), - /// A custom [anyhow::Error] occurred. - Custom(anyhow::Error), -} - -impl PartialEq for BlobProviderError { - fn eq(&self, other: &Self) -> bool { - match (self, other) { - (Self::SidecarLengthMismatch(a, b), Self::SidecarLengthMismatch(c, d)) => { - a == c && b == d - } - (Self::Slot(_), Self::Slot(_)) | (Self::Custom(_), Self::Custom(_)) => true, - _ => false, - } - } -} - -impl Display for BlobProviderError { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - match self { - Self::SidecarLengthMismatch(a, b) => write!(f, "expected {} sidecars but got {}", a, b), - Self::Slot(e) => { - write!(f, "Slot Derivation Error: {}", e) - } - Self::Custom(err) => write!(f, "{}", err), - } - } -} - -impl From for BlobProviderError { - fn from(err: anyhow::Error) -> Self { - Self::Custom(err) + /// Wrap [self] as a [PipelineErrorKind::Temporary]. + pub fn temp(self) -> PipelineErrorKind { + PipelineErrorKind::Temporary(self) } } /// A reset error -#[derive(Debug)] +#[derive(Error, Debug, Eq, PartialEq)] pub enum ResetError { /// The batch has a bad parent hash. /// The first argument is the expected parent hash, and the second argument is the actual /// parent hash. + #[error("Bad parent hash: expected {0}, got {1}")] BadParentHash(B256, B256), /// The batch has a bad timestamp. /// The first argument is the expected timestamp, and the second argument is the actual /// timestamp. + #[error("Bad timestamp: expected {0}, got {1}")] BadTimestamp(u64, u64), - /// A reorg is required. - ReorgRequired, - /// A new expired challenge. - NewExpiredChallenge, -} - -impl PartialEq for ResetError { - fn eq(&self, other: &ResetError) -> bool { - match (self, other) { - (ResetError::BadParentHash(e1, a1), ResetError::BadParentHash(e2, a2)) => { - e1 == e2 && a1 == a2 - } - (ResetError::BadTimestamp(e1, a1), ResetError::BadTimestamp(e2, a2)) => { - e1 == e2 && a1 == a2 - } - (ResetError::ReorgRequired, ResetError::ReorgRequired) => true, - (ResetError::NewExpiredChallenge, ResetError::NewExpiredChallenge) => true, - _ => false, - } - } -} - -impl Display for ResetError { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - match self { - ResetError::BadParentHash(expected, actual) => { - write!(f, "Bad parent hash: expected {}, got {}", expected, actual) - } - ResetError::BadTimestamp(expected, actual) => { - write!(f, "Bad timestamp: expected {}, got {}", expected, actual) - } - ResetError::ReorgRequired => write!(f, "Reorg required"), - ResetError::NewExpiredChallenge => write!(f, "New expired challenge"), - } - } + /// L1 origin mismatch. + #[error("L1 origin mismatch. Expected {0:?}, got {1:?}")] + L1OriginMismatch(u64, u64), + /// The stage detected a block reorg. + /// The first argument is the expected block hash. + /// The second argument is the parent_hash of the next l1 origin block. + #[error("L1 reorg detected: expected {0}, got {1}")] + ReorgDetected(B256, B256), + /// Attributes builder error variant, with [BuilderError]. + #[error("Attributes builder error: {0}")] + AttributesBuilder(#[from] BuilderError), } /// A decoding error. -#[derive(Debug)] -pub enum DecodeError { +#[derive(Error, Debug, PartialEq, Eq)] +pub enum PipelineEncodingError { /// The buffer is empty. + #[error("Empty buffer")] EmptyBuffer, + /// Deposit decoding error. + #[error("Error decoding deposit: {0}")] + DepositError(#[from] DepositError), /// Alloy RLP Encoding Error. + #[error("RLP error: {0}")] AlloyRlpError(alloy_rlp::Error), /// Span Batch Error. - SpanBatchError(SpanBatchError), + #[error(transparent)] + SpanBatchError(#[from] SpanBatchError), } -impl From for DecodeError { - fn from(e: alloy_rlp::Error) -> Self { - DecodeError::AlloyRlpError(e) - } -} - -impl PartialEq for DecodeError { - fn eq(&self, other: &DecodeError) -> bool { - matches!( - (self, other), - (DecodeError::EmptyBuffer, DecodeError::EmptyBuffer) | - (DecodeError::AlloyRlpError(_), DecodeError::AlloyRlpError(_)) - ) - } -} - -impl Display for DecodeError { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - match self { - DecodeError::EmptyBuffer => write!(f, "Empty buffer"), - DecodeError::AlloyRlpError(e) => write!(f, "Alloy RLP Decoding Error: {}", e), - DecodeError::SpanBatchError(e) => write!(f, "Span Batch Decoding Error: {:?}", e), - } - } +/// A frame decompression error. +#[derive(Error, Debug, PartialEq, Eq)] +pub enum BatchDecompressionError { + /// The buffer exceeds the [FJORD_MAX_SPAN_BATCH_BYTES] protocol parameter. + /// + /// [FJORD_MAX_SPAN_BATCH_BYTES]: crate::batch::FJORD_MAX_SPAN_BATCH_BYTES + #[error("The batch exceeds the maximum size of {max_size} bytes", max_size = crate::batch::FJORD_MAX_SPAN_BATCH_BYTES)] + BatchTooLarge, } /// An [AttributesBuilder] Error. /// /// [AttributesBuilder]: crate::stages::AttributesBuilder -#[derive(Debug)] +#[derive(Error, Debug, PartialEq, Eq)] pub enum BuilderError { /// Mismatched blocks. + #[error("Block mismatch. Expected {0:?}, got {1:?}")] BlockMismatch(BlockNumHash, BlockNumHash), /// Mismatched blocks for the start of an Epoch. + #[error("Block mismatch on epoch reset. Expected {0:?}, got {1:?}")] BlockMismatchEpochReset(BlockNumHash, BlockNumHash, B256), /// [SystemConfig] update failed. /// /// [SystemConfig]: op_alloy_genesis::SystemConfig + #[error("System config update failed")] SystemConfigUpdate, /// Broken time invariant between L2 and L1. + #[error("Time invariant broken. L1 origin: {0:?} | Next L2 time: {1} | L1 block: {2:?} | L1 timestamp {3:?}")] BrokenTimeInvariant(BlockNumHash, u64, BlockNumHash, u64), - /// A custom error wrapping [anyhow::Error]. - Custom(anyhow::Error), + /// Attributes unavailable. + #[error("Attributes unavailable")] + AttributesUnavailable, + /// A custom error. + #[error("Error in attributes builder: {0}")] + Custom(String), } -impl PartialEq for BuilderError { - fn eq(&self, other: &BuilderError) -> bool { - match (self, other) { - (BuilderError::BlockMismatch(b1, e1), BuilderError::BlockMismatch(b2, e2)) => { - b1 == b2 && e1 == e2 - } - ( - BuilderError::BlockMismatchEpochReset(b1, e1, e2), - BuilderError::BlockMismatchEpochReset(b2, e3, e4), - ) => e1 == e3 && e2 == e4 && b1 == b2, - ( - BuilderError::BrokenTimeInvariant(b1, t1, b2, t2), - BuilderError::BrokenTimeInvariant(b3, t3, b4, t4), - ) => b1 == b3 && t1 == t3 && b2 == b4 && t2 == t4, - (BuilderError::SystemConfigUpdate, BuilderError::SystemConfigUpdate) | - (BuilderError::Custom(_), BuilderError::Custom(_)) => true, - _ => false, - } - } -} - -impl From for BuilderError { - fn from(e: anyhow::Error) -> Self { - BuilderError::Custom(e) - } -} - -impl Display for BuilderError { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - match self { - BuilderError::BlockMismatch(block_id, parent) => { - write!(f, "Block mismatch with L1 origin {:?} (parent {:?})", block_id, parent) - } - BuilderError::BlockMismatchEpochReset(block_id, parent, origin) => { - write!( - f, - "Block mismatch with L1 origin {:?} (parent {:?}) on top of L1 origin {}", - block_id, parent, origin - ) - } - BuilderError::SystemConfigUpdate => write!(f, "System config update failed"), - BuilderError::BrokenTimeInvariant(block_id, l2_time, parent, l1_time) => { - write!( - f, - "Cannot build L2 block on top {:?} (time {}) before L1 origin {:?} (time {})", - block_id, l2_time, parent, l1_time - ) - } - BuilderError::Custom(e) => write!(f, "Custom error: {}", e), - } - } +/// An error returned by the [BlobProviderError]. +#[derive(Error, Debug, PartialEq, Eq)] +pub enum BlobProviderError { + /// The number of specified blob hashes did not match the number of returned sidecars. + #[error("Blob sidecar length mismatch: expected {0}, got {1}")] + SidecarLengthMismatch(usize, usize), + /// Slot derivation error. + #[error("Failed to derive slot")] + SlotDerivation, + /// Blob decoding error. + #[error("Blob decoding error: {0}")] + BlobDecoding(#[from] BlobDecodingError), + /// Error pertaining to the backend transport. + #[error("{0}")] + Backend(String), } diff --git a/crates/derive/src/online/alloy_providers.rs b/crates/derive/src/online/alloy_providers.rs index ba94c3bdb..0a6e4cafc 100644 --- a/crates/derive/src/online/alloy_providers.rs +++ b/crates/derive/src/online/alloy_providers.rs @@ -6,10 +6,9 @@ use alloy_consensus::{Header, Receipt, ReceiptWithBloom, TxEnvelope, TxType}; use alloy_primitives::{Bytes, B256, U64}; use alloy_provider::{Provider, ReqwestProvider}; use alloy_rlp::{Buf, Decodable}; -use alloy_transport::TransportResult; -use anyhow::{anyhow, Result}; +use alloy_transport::{RpcError, TransportErrorKind, TransportResult}; use async_trait::async_trait; -use core::num::NonZeroUsize; +use core::{num::NonZeroUsize, str::FromStr}; use kona_primitives::{Block, L2ExecutionPayloadEnvelope, OpBlock}; use lru::LruCache; use op_alloy_genesis::{RollupConfig, SystemConfig}; @@ -60,20 +59,24 @@ impl AlloyChainProvider { } /// Returns the chain ID. - pub async fn chain_id(&mut self) -> Result { + pub async fn chain_id(&mut self) -> Result> { let chain_id: TransportResult = self.inner.raw_request("eth_chainId".into(), ()).await; let chain_id = match chain_id { - Ok(s) => alloc::string::String::from(s.trim_start_matches("0x")), - Err(e) => return Err(anyhow!(e)), + Ok(s) => { + U64::from_str(s.as_str()).map_err(|e| RpcError::LocalUsageError(Box::new(e)))? + } + Err(e) => return Err(e), }; - u64::from_str_radix(&chain_id, 16).map_err(|e| anyhow!(e)) + Ok(chain_id.to::()) } } #[async_trait] impl ChainProvider for AlloyChainProvider { - async fn header_by_hash(&mut self, hash: B256) -> Result
{ + type Error = RpcError; + + async fn header_by_hash(&mut self, hash: B256) -> Result { crate::inc!(PROVIDER_CALLS, &["chain_provider", "header_by_hash"]); crate::timer!(START, PROVIDER_RESPONSE_TIME, &["chain_provider", "header_by_hash"], timer); if let Some(header) = self.header_by_hash_cache.get(&hash) { @@ -82,7 +85,7 @@ impl ChainProvider for AlloyChainProvider { let raw_header: TransportResult = self.inner.raw_request("debug_getRawHeader".into(), [hash]).await; - let raw_header: Bytes = match raw_header.map_err(|e| anyhow!(e)) { + let raw_header: Bytes = match raw_header { Ok(b) => b, Err(e) => { crate::timer!(DISCARD, timer); @@ -93,7 +96,7 @@ impl ChainProvider for AlloyChainProvider { return Err(e); } }; - match Header::decode(&mut raw_header.as_ref()).map_err(|e| anyhow!(e)) { + match Header::decode(&mut raw_header.as_ref()) { Ok(header) => { self.header_by_hash_cache.put(hash, header.clone()); Ok(header) @@ -101,12 +104,12 @@ impl ChainProvider for AlloyChainProvider { Err(e) => { crate::timer!(DISCARD, timer); crate::inc!(PROVIDER_ERRORS, &["chain_provider", "header_by_hash", "decode"]); - Err(e) + Err(RpcError::LocalUsageError(Box::new(e))) } } } - async fn block_info_by_number(&mut self, number: u64) -> Result { + async fn block_info_by_number(&mut self, number: u64) -> Result { crate::inc!(PROVIDER_CALLS, &["chain_provider", "block_info_by_number"]); crate::timer!( START, @@ -120,7 +123,7 @@ impl ChainProvider for AlloyChainProvider { let raw_header: TransportResult = self.inner.raw_request("debug_getRawHeader".into(), [U64::from(number)]).await; - let raw_header: Bytes = match raw_header.map_err(|e| anyhow!(e)) { + let raw_header: Bytes = match raw_header { Ok(b) => b, Err(e) => { crate::timer!(DISCARD, timer); @@ -131,12 +134,12 @@ impl ChainProvider for AlloyChainProvider { return Err(e); } }; - let header = match Header::decode(&mut raw_header.as_ref()).map_err(|e| anyhow!(e)) { + let header = match Header::decode(&mut raw_header.as_ref()) { Ok(h) => h, Err(e) => { crate::timer!(DISCARD, timer); crate::inc!(PROVIDER_ERRORS, &["chain_provider", "block_info_by_number", "decode"]); - return Err(e); + return Err(RpcError::LocalUsageError(Box::new(e))); } }; @@ -150,7 +153,7 @@ impl ChainProvider for AlloyChainProvider { Ok(block_info) } - async fn receipts_by_hash(&mut self, hash: B256) -> Result> { + async fn receipts_by_hash(&mut self, hash: B256) -> Result, Self::Error> { crate::inc!(PROVIDER_CALLS, &["chain_provider", "receipts_by_hash"]); crate::timer!( START, @@ -164,7 +167,7 @@ impl ChainProvider for AlloyChainProvider { let raw_receipts: TransportResult> = self.inner.raw_request("debug_getRawReceipts".into(), [hash]).await; - let raw_receipts: Vec = match raw_receipts.map_err(|e| anyhow!(e)) { + let raw_receipts: Vec = match raw_receipts { Ok(r) => r, Err(e) => { crate::timer!(DISCARD, timer); @@ -186,9 +189,11 @@ impl ChainProvider for AlloyChainProvider { r.advance(1); } - Ok(ReceiptWithBloom::decode(r).map_err(|e| anyhow!(e))?.receipt) + Ok(ReceiptWithBloom::decode(r) + .map_err(|e| RpcError::LocalUsageError(Box::new(e)))? + .receipt) }) - .collect::>>() + .collect::, Self::Error>>() { Ok(r) => r, Err(e) => { @@ -204,7 +209,7 @@ impl ChainProvider for AlloyChainProvider { async fn block_info_and_transactions_by_hash( &mut self, hash: B256, - ) -> Result<(BlockInfo, Vec)> { + ) -> Result<(BlockInfo, Vec), Self::Error> { crate::inc!(PROVIDER_CALLS, &["chain_provider", "block_info_and_transactions_by_hash"]); crate::timer!( START, @@ -219,7 +224,7 @@ impl ChainProvider for AlloyChainProvider { let raw_block: TransportResult = self.inner.raw_request("debug_getRawBlock".into(), [hash]).await; - let raw_block: Bytes = match raw_block.map_err(|e| anyhow!(e)) { + let raw_block: Bytes = match raw_block { Ok(b) => b, Err(e) => { crate::timer!(DISCARD, timer); @@ -230,7 +235,7 @@ impl ChainProvider for AlloyChainProvider { return Err(e); } }; - let block = match Block::decode(&mut raw_block.as_ref()).map_err(|e| anyhow!(e)) { + let block = match Block::decode(&mut raw_block.as_ref()) { Ok(b) => b, Err(e) => { crate::timer!(DISCARD, timer); @@ -238,7 +243,7 @@ impl ChainProvider for AlloyChainProvider { PROVIDER_ERRORS, &["chain_provider", "block_info_and_transactions_by_hash", "decode"] ); - return Err(e); + return Err(RpcError::LocalUsageError(Box::new(e))); } }; @@ -286,27 +291,27 @@ impl AlloyL2ChainProvider { } /// Returns the chain ID. - pub async fn chain_id(&mut self) -> Result { + pub async fn chain_id(&mut self) -> Result> { let chain_id: TransportResult = self.inner.raw_request("eth_chainId".into(), ()).await; let chain_id = match chain_id { - Ok(s) => alloc::string::String::from(s.trim_start_matches("0x")), - Err(e) => return Err(anyhow!(e)), + Ok(s) => { + U64::from_str(s.as_str()).map_err(|e| RpcError::LocalUsageError(Box::new(e)))? + } + Err(e) => return Err(e), }; - u64::from_str_radix(&chain_id, 16).map_err(|e| anyhow!(e)) + Ok(chain_id.to::()) } /// Returns the latest L2 block number. - pub async fn latest_block_number(&mut self) -> Result { - let b: TransportResult = - self.inner.raw_request("eth_blockNumber".into(), ()).await; - match b { - Ok(s) => { - let s = alloc::string::String::from(s.trim_start_matches("0x")); - u64::from_str_radix(&s, 16).map_err(|e| anyhow!(e)) - } - Err(e) => Err(anyhow!(e)), - } + pub async fn latest_block_number(&mut self) -> Result> { + let s = self + .inner + .raw_request::<(), alloc::string::String>("eth_blockNumber".into(), ()) + .await?; + U64::from_str(s.as_str()) + .map_err(|e| RpcError::LocalUsageError(Box::new(e))) + .map(|u| u.to::()) } /// Creates a new [AlloyL2ChainProvider] from the provided [reqwest::Url]. @@ -318,7 +323,9 @@ impl AlloyL2ChainProvider { #[async_trait] impl L2ChainProvider for AlloyL2ChainProvider { - async fn l2_block_info_by_number(&mut self, number: u64) -> Result { + type Error = RpcError; + + async fn l2_block_info_by_number(&mut self, number: u64) -> Result { crate::inc!(PROVIDER_CALLS, &["l2_chain_provider", "l2_block_info_by_number"]); crate::timer!( START, @@ -349,14 +356,17 @@ impl L2ChainProvider for AlloyL2ChainProvider { PROVIDER_ERRORS, &["l2_chain_provider", "l2_block_info_by_number", "to_l2_block_ref"] ); - return Err(e); + return Err(RpcError::LocalUsageError(Box::new(e))); } }; self.l2_block_info_by_number_cache.put(number, l2_block_info); Ok(l2_block_info) } - async fn payload_by_number(&mut self, number: u64) -> Result { + async fn payload_by_number( + &mut self, + number: u64, + ) -> Result { crate::inc!(PROVIDER_CALLS, &["l2_chain_provider", "payload_by_number"]); crate::timer!( START, @@ -370,7 +380,7 @@ impl L2ChainProvider for AlloyL2ChainProvider { let raw_block: TransportResult = self.inner.raw_request("debug_getRawBlock".into(), [U64::from(number)]).await; - let raw_block: Bytes = match raw_block.map_err(|e| anyhow!(e)) { + let raw_block: Bytes = match raw_block { Ok(b) => b, Err(e) => { crate::timer!(DISCARD, timer); @@ -381,12 +391,12 @@ impl L2ChainProvider for AlloyL2ChainProvider { return Err(e); } }; - let block = match OpBlock::decode(&mut raw_block.as_ref()).map_err(|e| anyhow!(e)) { + let block = match OpBlock::decode(&mut raw_block.as_ref()) { Ok(b) => b, Err(e) => { crate::timer!(DISCARD, timer); crate::inc!(PROVIDER_ERRORS, &["l2_chain_provider", "payload_by_number", "decode"]); - return Err(e); + return Err(RpcError::LocalUsageError(Box::new(e))); } }; let payload_envelope: L2ExecutionPayloadEnvelope = block.into(); @@ -399,7 +409,7 @@ impl L2ChainProvider for AlloyL2ChainProvider { &mut self, number: u64, rollup_config: Arc, - ) -> Result { + ) -> Result { crate::inc!(PROVIDER_CALLS, &["l2_chain_provider", "system_config_by_number"]); crate::timer!( START, @@ -430,7 +440,7 @@ impl L2ChainProvider for AlloyL2ChainProvider { PROVIDER_ERRORS, &["l2_chain_provider", "system_config_by_number", "to_system_config"] ); - return Err(e); + return Err(RpcError::LocalUsageError(Box::new(e))); } }; self.system_config_by_number_cache.put(number, sys_config); diff --git a/crates/derive/src/online/beacon_client.rs b/crates/derive/src/online/beacon_client.rs index f0a8cd41c..8aa97a3ec 100644 --- a/crates/derive/src/online/beacon_client.rs +++ b/crates/derive/src/online/beacon_client.rs @@ -1,8 +1,13 @@ //! Contains an online implementation of the [BeaconClient] trait. -use alloc::{boxed::Box, format, string::String, vec::Vec}; -use anyhow::{anyhow, Result}; +use alloc::{ + boxed::Box, + format, + string::{String, ToString}, + vec::Vec, +}; use async_trait::async_trait; +use core::fmt::Display; use reqwest::Client; use kona_primitives::{ @@ -22,11 +27,14 @@ pub(crate) const SIDECARS_METHOD_PREFIX: &str = "eth/v1/beacon/blob_sidecars"; /// The [BeaconClient] is a thin wrapper around the Beacon API. #[async_trait] pub trait BeaconClient { + /// The error type for [BeaconClient] implementations. + type Error: Display + ToString; + /// Returns the config spec. - async fn config_spec(&self) -> Result; + async fn config_spec(&self) -> Result; /// Returns the beacon genesis. - async fn beacon_genesis(&self) -> Result; + async fn beacon_genesis(&self) -> Result; /// Fetches blob sidecars that were confirmed in the specified L1 block with the given indexed /// hashes. Order of the returned sidecars is guaranteed to be that of the hashes. Blob data is @@ -35,7 +43,7 @@ pub trait BeaconClient { &self, slot: u64, hashes: &[IndexedBlobHash], - ) -> Result>; + ) -> Result, Self::Error>; } /// An online implementation of the [BeaconClient] trait. @@ -56,16 +64,12 @@ impl OnlineBeaconClient { #[async_trait] impl BeaconClient for OnlineBeaconClient { - async fn config_spec(&self) -> Result { + type Error = reqwest::Error; + + async fn config_spec(&self) -> Result { crate::inc!(PROVIDER_CALLS, &["beacon_client", "config_spec"]); crate::timer!(START, PROVIDER_RESPONSE_TIME, &["beacon_client", "config_spec"], timer); - let first = match self - .inner - .get(format!("{}/{}", self.base, SPEC_METHOD)) - .send() - .await - .map_err(|e| anyhow!(e)) - { + let first = match self.inner.get(format!("{}/{}", self.base, SPEC_METHOD)).send().await { Ok(response) => response, Err(e) => { crate::timer!(DISCARD, timer); @@ -73,7 +77,7 @@ impl BeaconClient for OnlineBeaconClient { return Err(e); } }; - match first.json::().await.map_err(|e| anyhow!(e)) { + match first.json::().await { Ok(response) => Ok(response), Err(e) => { crate::timer!(DISCARD, timer); @@ -83,16 +87,10 @@ impl BeaconClient for OnlineBeaconClient { } } - async fn beacon_genesis(&self) -> Result { + async fn beacon_genesis(&self) -> Result { crate::inc!(PROVIDER_CALLS, &["beacon_client", "beacon_genesis"]); crate::timer!(START, PROVIDER_RESPONSE_TIME, &["beacon_client", "beacon_genesis"], timer); - let first = match self - .inner - .get(format!("{}/{}", self.base, GENESIS_METHOD)) - .send() - .await - .map_err(|e| anyhow!(e)) - { + let first = match self.inner.get(format!("{}/{}", self.base, GENESIS_METHOD)).send().await { Ok(response) => response, Err(e) => { crate::timer!(DISCARD, timer); @@ -100,7 +98,7 @@ impl BeaconClient for OnlineBeaconClient { return Err(e); } }; - match first.json::().await.map_err(|e| anyhow!(e)) { + match first.json::().await { Ok(response) => Ok(response), Err(e) => { crate::timer!(DISCARD, timer); @@ -114,7 +112,7 @@ impl BeaconClient for OnlineBeaconClient { &self, slot: u64, hashes: &[IndexedBlobHash], - ) -> Result> { + ) -> Result, Self::Error> { crate::inc!(PROVIDER_CALLS, &["beacon_client", "beacon_blob_side_cars"]); crate::timer!( START, @@ -127,7 +125,6 @@ impl BeaconClient for OnlineBeaconClient { .get(format!("{}/{}/{}", self.base, SIDECARS_METHOD_PREFIX, slot)) .send() .await - .map_err(|e| anyhow!(e)) { Ok(response) => response, Err(e) => { @@ -139,11 +136,7 @@ impl BeaconClient for OnlineBeaconClient { return Err(e); } }; - let raw_response = match raw_response - .json::() - .await - .map_err(|e| anyhow!(e)) - { + let raw_response = match raw_response.json::().await { Ok(response) => response, Err(e) => { crate::timer!(DISCARD, timer); diff --git a/crates/derive/src/online/blob_provider.rs b/crates/derive/src/online/blob_provider.rs index 1ce989116..f1e2d7dcf 100644 --- a/crates/derive/src/online/blob_provider.rs +++ b/crates/derive/src/online/blob_provider.rs @@ -1,24 +1,27 @@ //! Contains an online implementation of the [BlobProvider] trait. -use alloc::{boxed::Box, string::String, vec::Vec}; +use crate::{ + ensure, + errors::BlobProviderError, + online::{BeaconClient, OnlineBeaconClient}, + traits::BlobProvider, +}; +use alloc::{ + boxed::Box, + string::{String, ToString}, + vec::Vec, +}; use alloy_eips::eip4844::Blob; -use anyhow::{anyhow, ensure}; use async_trait::async_trait; use core::marker::PhantomData; use kona_primitives::{APIBlobSidecar, BlobSidecar, IndexedBlobHash}; use op_alloy_protocol::BlockInfo; use tracing::warn; -use crate::{ - errors::BlobProviderError, - online::{BeaconClient, OnlineBeaconClient}, - traits::BlobProvider, -}; - /// Specifies the derivation of a slot from a timestamp. pub trait SlotDerivation { /// Converts a timestamp to a slot number. - fn slot(genesis: u64, slot_time: u64, timestamp: u64) -> anyhow::Result; + fn slot(genesis: u64, slot_time: u64, timestamp: u64) -> Result; } /// An online implementation of the [BlobProvider] trait. @@ -47,11 +50,24 @@ impl OnlineBlobProvider { /// Loads the beacon genesis and config spec pub async fn load_configs(&mut self) -> Result<(), BlobProviderError> { if self.genesis_time.is_none() { - self.genesis_time = Some(self.beacon_client.beacon_genesis().await?.data.genesis_time); + self.genesis_time = Some( + self.beacon_client + .beacon_genesis() + .await + .map_err(|e| BlobProviderError::Backend(e.to_string()))? + .data + .genesis_time, + ); } if self.slot_interval.is_none() { - self.slot_interval = - Some(self.beacon_client.config_spec().await?.data.seconds_per_slot); + self.slot_interval = Some( + self.beacon_client + .config_spec() + .await + .map_err(|e| BlobProviderError::Backend(e.to_string()))? + .data + .seconds_per_slot, + ); } Ok(()) } @@ -65,7 +81,7 @@ impl OnlineBlobProvider { self.beacon_client .beacon_blob_side_cars(slot, hashes) .await - .map_err(BlobProviderError::Custom) + .map_err(|e| BlobProviderError::Backend(e.to_string())) } /// Fetches blob sidecars for the given block reference and blob hashes. @@ -83,8 +99,7 @@ impl OnlineBlobProvider { let interval = self.slot_interval.expect("Config Spec Loaded"); // Calculate the slot for the given timestamp. - let slot = - S::slot(genesis, interval, block_ref.timestamp).map_err(BlobProviderError::Slot)?; + let slot = S::slot(genesis, interval, block_ref.timestamp)?; // Fetch blob sidecars for the slot using the given blob hashes. let sidecars = self.fetch_sidecars(slot, blob_hashes).await?; @@ -110,11 +125,8 @@ impl OnlineBlobProvider { pub struct SimpleSlotDerivation; impl SlotDerivation for SimpleSlotDerivation { - fn slot(genesis: u64, slot_time: u64, timestamp: u64) -> anyhow::Result { - ensure!( - timestamp >= genesis, - "provided timestamp ({timestamp}) precedes genesis time ({genesis})" - ); + fn slot(genesis: u64, slot_time: u64, timestamp: u64) -> Result { + ensure!(timestamp >= genesis, BlobProviderError::SlotDerivation); Ok((timestamp - genesis) / slot_time) } } @@ -125,6 +137,8 @@ where B: BeaconClient + Send + Sync, S: SlotDerivation + Send + Sync, { + type Error = BlobProviderError; + /// Fetches blob sidecars that were confirmed in the specified L1 block with the given indexed /// hashes. The blobs are validated for their index and hashes using the specified /// [IndexedBlobHash]. @@ -132,7 +146,7 @@ where &mut self, block_ref: &BlockInfo, blob_hashes: &[IndexedBlobHash], - ) -> Result, BlobProviderError> { + ) -> Result, Self::Error> { crate::inc!(PROVIDER_CALLS, &["blob_provider", "get_blobs"]); crate::timer!(START, PROVIDER_RESPONSE_TIME, &["blob_provider", "get_blobs"], timer); // Fetches the genesis timestamp and slot interval from the @@ -161,19 +175,21 @@ where .into_iter() .enumerate() .map(|(i, sidecar)| { - let hash = blob_hashes.get(i).ok_or(anyhow!("failed to get blob hash"))?; + let hash = blob_hashes + .get(i) + .ok_or(BlobProviderError::Backend("Missing blob hash".to_string()))?; match sidecar.verify_blob(hash) { Ok(_) => Ok(sidecar.blob), - Err(e) => Err(e), + Err(e) => Err(BlobProviderError::Backend(e.to_string())), } }) - .collect::>>() + .collect::, BlobProviderError>>() { Ok(blobs) => blobs, Err(e) => { crate::timer!(DISCARD, timer); crate::inc!(PROVIDER_ERRORS, &["blob_provider", "get_blobs", "verify_blob"]); - return Err(BlobProviderError::Custom(e)); + return Err(BlobProviderError::Backend(e.to_string())); } }; @@ -193,7 +209,7 @@ pub trait BlobSidecarProvider { &self, slot: u64, hashes: &[IndexedBlobHash], - ) -> anyhow::Result>; + ) -> Result, BlobProviderError>; } /// Blanket implementation of the [BlobSidecarProvider] trait for all types that @@ -204,8 +220,10 @@ impl BlobSidecarProvider for B { &self, slot: u64, hashes: &[IndexedBlobHash], - ) -> anyhow::Result> { - self.beacon_blob_side_cars(slot, hashes).await + ) -> Result, BlobProviderError> { + self.beacon_blob_side_cars(slot, hashes) + .await + .map_err(|e| BlobProviderError::Backend(e.to_string())) } } @@ -246,9 +264,9 @@ impl blob_hashes: &[IndexedBlobHash], ) -> Result, BlobProviderError> { let Some(fallback) = self.fallback.as_ref() else { - return Err(BlobProviderError::Custom(anyhow::anyhow!( - "cannot fetch blobs: the primary blob provider failed, and no fallback is configured" - ))); + return Err(BlobProviderError::Backend( + "cannot fetch blobs: the primary blob provider failed, and no fallback is configured".to_string() + )); }; if blob_hashes.is_empty() { @@ -260,8 +278,7 @@ impl self.primary.genesis_time.expect("Genesis Config Loaded"), self.primary.slot_interval.expect("Config Spec Loaded"), block_ref.timestamp, - ) - .map_err(BlobProviderError::Slot)?; + )?; // Fetch blob sidecars for the given block reference and blob hashes. let sidecars = fallback.beacon_blob_side_cars(slot, blob_hashes).await?; @@ -289,6 +306,8 @@ where F: BlobSidecarProvider + Send + Sync, S: SlotDerivation + Send + Sync, { + type Error = BlobProviderError; + /// Fetches blob sidecars that were confirmed in the specified L1 block with the given indexed /// hashes. The blobs are validated for their index and hashes using the specified /// [IndexedBlobHash]. @@ -322,15 +341,15 @@ where .into_iter() .enumerate() .map(|(i, sidecar)| { - let hash = blob_hashes - .get(i) - .ok_or(anyhow!("fallback: failed to get blob hash"))?; + let hash = blob_hashes.get(i).ok_or(BlobProviderError::Backend( + "fallback: failed to get blob hash".to_string(), + ))?; match sidecar.verify_blob(hash) { Ok(_) => Ok(sidecar.blob), - Err(e) => Err(e), + Err(e) => Err(BlobProviderError::Backend(e.to_string())), } }) - .collect::>>() + .collect::, BlobProviderError>>() { Ok(blobs) => blobs, Err(e) => { @@ -338,7 +357,7 @@ where PROVIDER_ERRORS, &["blob_provider", "get_blobs", "fallback_verify_blob"] ); - return Err(BlobProviderError::Custom(e)); + return Err(e); } }; @@ -544,7 +563,7 @@ mod tests { let result = blob_provider.get_blobs(&block_ref, &blob_hashes).await; assert_eq!( result.unwrap_err(), - BlobProviderError::Custom(anyhow::anyhow!("failed to get beacon genesis")) + BlobProviderError::Backend("beacon_genesis not set".to_string()) ); } @@ -561,7 +580,7 @@ mod tests { let result = blob_provider.get_blobs(&block_ref, &blob_hashes).await; assert_eq!( result.unwrap_err(), - BlobProviderError::Custom(anyhow::anyhow!("failed to get config spec")) + BlobProviderError::Backend("config_spec not set".to_string()) ); } @@ -577,12 +596,7 @@ mod tests { let block_ref = BlockInfo { timestamp: 5, ..Default::default() }; let blob_hashes = vec![IndexedBlobHash::default()]; let result = blob_provider.get_blobs(&block_ref, &blob_hashes).await; - assert_eq!( - result.unwrap_err(), - BlobProviderError::Slot(anyhow::anyhow!( - "provided timestamp (5) precedes genesis time (10)" - )) - ); + assert_eq!(result.unwrap_err(), BlobProviderError::SlotDerivation); } #[tokio::test] @@ -599,7 +613,7 @@ mod tests { let result = blob_provider.get_blobs(&block_ref, &blob_hashes).await; assert_eq!( result.unwrap_err(), - BlobProviderError::Custom(anyhow::anyhow!("blob_sidecars not set")) + BlobProviderError::Backend("blob_sidecars not set".to_string()) ); } @@ -659,9 +673,10 @@ mod tests { let result = blob_provider.get_blobs(&block_ref, &blob_hashes).await; assert_eq!( result.unwrap_err(), - BlobProviderError::Custom(anyhow::anyhow!( + BlobProviderError::Backend( "invalid sidecar ordering, blob hash index 4 does not match sidecar index 0" - )) + .to_string() + ) ); } @@ -683,10 +698,10 @@ mod tests { ..Default::default() }]; let result = blob_provider.get_blobs(&block_ref, &blob_hashes).await; - assert_eq!(result.unwrap_err(), BlobProviderError::Custom(anyhow::anyhow!("expected hash 0x0101010101010101010101010101010101010101010101010101010101010101 for blob at index 0 but got 0x01b0761f87b081d5cf10757ccc89f12be355c70e2e29df288b65b30710dcbcd1"))); + assert_eq!(result.unwrap_err(), BlobProviderError::Backend("expected hash 0x0101010101010101010101010101010101010101010101010101010101010101 for blob at index 0 but got 0x01b0761f87b081d5cf10757ccc89f12be355c70e2e29df288b65b30710dcbcd1".to_string())); } - #[tokio::test] + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn test_get_blobs_failed_verification() { let beacon_client = MockBeaconClient { beacon_genesis: Some(APIGenesisResponse::new(10)), @@ -705,8 +720,8 @@ mod tests { }]; let result = blob_provider.get_blobs(&block_ref, &blob_hashes).await; assert_eq!( - result.unwrap_err(), - BlobProviderError::Custom(anyhow::anyhow!("blob at index 0 failed verification")) + result, + Err(BlobProviderError::Backend("blob at index 0 failed verification".to_string())) ); } diff --git a/crates/derive/src/online/test_utils.rs b/crates/derive/src/online/test_utils.rs index ef2a59b08..668fcf9d3 100644 --- a/crates/derive/src/online/test_utils.rs +++ b/crates/derive/src/online/test_utils.rs @@ -48,6 +48,8 @@ pub struct MockBeaconClient { #[async_trait] impl BeaconClient for MockBeaconClient { + type Error = anyhow::Error; + async fn config_spec(&self) -> Result { self.config_spec.clone().ok_or_else(|| anyhow!("config_spec not set")) } diff --git a/crates/derive/src/pipeline/core.rs b/crates/derive/src/pipeline/core.rs index c5069b9ec..d82b489c1 100644 --- a/crates/derive/src/pipeline/core.rs +++ b/crates/derive/src/pipeline/core.rs @@ -1,11 +1,11 @@ //! Contains the core derivation pipeline. use super::{ - L2ChainProvider, NextAttributes, OriginAdvancer, OriginProvider, Pipeline, ResettableStage, - StageError, StepResult, + L2ChainProvider, NextAttributes, OriginAdvancer, OriginProvider, Pipeline, PipelineError, + PipelineResult, ResettableStage, StepResult, }; -use alloc::{boxed::Box, collections::VecDeque, sync::Arc}; -use anyhow::bail; +use crate::errors::PipelineErrorKind; +use alloc::{boxed::Box, collections::VecDeque, string::ToString, sync::Arc}; use async_trait::async_trait; use core::fmt::Debug; use op_alloy_genesis::RollupConfig; @@ -94,17 +94,21 @@ where &mut self, l2_block_info: BlockInfo, l1_block_info: BlockInfo, - ) -> anyhow::Result<()> { + ) -> PipelineResult<()> { let system_config = self .l2_chain_provider .system_config_by_number(l2_block_info.number, Arc::clone(&self.rollup_config)) - .await?; + .await + .map_err(|e| PipelineError::Provider(e.to_string()).temp())?; match self.attributes.reset(l1_block_info, &system_config).await { Ok(()) => trace!(target: "pipeline", "Stages reset"), - Err(StageError::Eof) => trace!(target: "pipeline", "Stages reset with EOF"), Err(err) => { - error!(target: "pipeline", "Stage reset errored: {:?}", err); - bail!(err); + if let PipelineErrorKind::Temporary(PipelineError::Eof) = err { + trace!(target: "pipeline", "Stages reset with EOF"); + } else { + error!(target: "pipeline", "Stage reset errored: {:?}", err); + return Err(err); + } } } Ok(()) @@ -114,12 +118,14 @@ where /// /// ## Returns /// - /// A [StageError::Eof] is returned if the pipeline is blocked by waiting for new L1 data. + /// A [PipelineError::Eof] is returned if the pipeline is blocked by waiting for new L1 data. /// Any other error is critical and the derivation pipeline should be reset. /// An error is expected when the underlying source closes. /// /// When [DerivationPipeline::step] returns [Ok(())], it should be called again, to continue the /// derivation process. + /// + /// [PipelineError]: crate::errors::PipelineError async fn step(&mut self, cursor: L2BlockInfo) -> StepResult { match self.attributes.next_attributes(cursor).await { Ok(a) => { @@ -127,17 +133,19 @@ where self.prepared.push_back(a); StepResult::PreparedAttributes } - Err(StageError::Eof) => { - trace!(target: "pipeline", "Pipeline advancing origin"); - if let Err(e) = self.attributes.advance_origin().await { - return StepResult::OriginAdvanceErr(e); + Err(err) => match err { + PipelineErrorKind::Temporary(PipelineError::Eof) => { + trace!(target: "pipeline", "Pipeline advancing origin"); + if let Err(e) = self.attributes.advance_origin().await { + return StepResult::OriginAdvanceErr(e); + } + StepResult::AdvancedOrigin } - StepResult::AdvancedOrigin - } - Err(err) => { - warn!(target: "pipeline", "Attributes queue step failed: {:?}", err); - StepResult::StepFailed(err) - } + _ => { + warn!(target: "pipeline", "Attributes queue step failed: {:?}", err); + StepResult::StepFailed(err) + } + }, } } } diff --git a/crates/derive/src/pipeline/mod.rs b/crates/derive/src/pipeline/mod.rs index e360e0e3c..ede4c1212 100644 --- a/crates/derive/src/pipeline/mod.rs +++ b/crates/derive/src/pipeline/mod.rs @@ -10,7 +10,7 @@ pub use crate::traits::{ pub use crate::stages::AttributesBuilder; /// Re-export error types. -pub use crate::errors::{StageError, StageResult}; +pub use crate::errors::{PipelineError, PipelineResult}; mod builder; pub use builder::PipelineBuilder; diff --git a/crates/derive/src/sources/blobs.rs b/crates/derive/src/sources/blobs.rs index eb0982ef6..9f28debca 100644 --- a/crates/derive/src/sources/blobs.rs +++ b/crates/derive/src/sources/blobs.rs @@ -1,19 +1,17 @@ //! Blob Data Source -use alloc::{boxed::Box, vec::Vec}; +use crate::{ + errors::{BlobProviderError, PipelineError, PipelineResult}, + traits::{AsyncIterator, BlobProvider, ChainProvider}, +}; +use alloc::{boxed::Box, format, string::ToString, vec::Vec}; use alloy_consensus::{Transaction, TxEip4844Variant, TxEnvelope, TxType}; use alloy_primitives::{Address, Bytes, TxKind}; -use anyhow::{anyhow, Result}; use async_trait::async_trait; use kona_primitives::{BlobData, IndexedBlobHash}; use op_alloy_protocol::BlockInfo; use tracing::warn; -use crate::{ - errors::{StageError, StageResult}, - traits::{AsyncIterator, BlobProvider, ChainProvider}, -}; - /// A data iterator that reads from a blob. #[derive(Debug, Clone)] pub struct BlobSource @@ -122,13 +120,16 @@ where } /// Loads blob data into the source if it is not open. - async fn load_blobs(&mut self) -> Result<()> { + async fn load_blobs(&mut self) -> Result<(), BlobProviderError> { if self.open { return Ok(()); } - let info = - self.chain_provider.block_info_and_transactions_by_hash(self.block_ref.hash).await?; + let info = self + .chain_provider + .block_info_and_transactions_by_hash(self.block_ref.hash) + .await + .map_err(|e| BlobProviderError::Backend(e.to_string()))?; let (mut data, blob_hashes) = self.extract_blob_data(info.1); @@ -142,7 +143,7 @@ where let blobs = self.blob_fetcher.get_blobs(&self.block_ref, &blob_hashes).await.map_err(|e| { warn!(target: "blob-source", "Failed to fetch blobs: {e}"); - anyhow!("Failed to fetch blobs: {e}") + BlobProviderError::Backend(e.to_string()) })?; // Fill the blob pointers. @@ -153,7 +154,7 @@ where blob_index += 1; } Err(e) => { - return Err(e); + return Err(e.into()); } } } @@ -164,9 +165,9 @@ where } /// Extracts the next data from the source. - fn next_data(&mut self) -> Result> { + fn next_data(&mut self) -> Result> { if self.data.is_empty() { - return Err(Err(StageError::Eof)); + return Err(Err(PipelineError::Eof.temp())); } Ok(self.data.remove(0)) @@ -181,9 +182,13 @@ where { type Item = Bytes; - async fn next(&mut self) -> StageResult { + async fn next(&mut self) -> PipelineResult { if self.load_blobs().await.is_err() { - return Err(StageError::BlockFetch(self.block_ref.hash)); + return Err(PipelineError::Provider(format!( + "Failed to load blobs from stream: {}", + self.block_ref.hash + )) + .temp()); } let next_data = match self.next_data() { diff --git a/crates/derive/src/sources/calldata.rs b/crates/derive/src/sources/calldata.rs index 38a42b8fe..4fd75f629 100644 --- a/crates/derive/src/sources/calldata.rs +++ b/crates/derive/src/sources/calldata.rs @@ -1,17 +1,15 @@ //! CallData Source -use alloc::{boxed::Box, collections::VecDeque}; +use crate::{ + errors::{PipelineError, PipelineResult}, + traits::{AsyncIterator, ChainProvider}, +}; +use alloc::{boxed::Box, collections::VecDeque, format}; use alloy_consensus::{Transaction, TxEnvelope}; use alloy_primitives::{Address, Bytes, TxKind}; -use anyhow::Result; use async_trait::async_trait; use op_alloy_protocol::BlockInfo; -use crate::{ - errors::{StageError, StageResult}, - traits::{AsyncIterator, ChainProvider}, -}; - /// A data iterator that reads from calldata. #[derive(Debug, Clone)] pub struct CalldataSource @@ -51,7 +49,7 @@ impl CalldataSource { } /// Loads the calldata into the source if it is not open. - async fn load_calldata(&mut self) -> Result<()> { + async fn load_calldata(&mut self) -> Result<(), CP::Error> { if self.open { return Ok(()); } @@ -90,10 +88,14 @@ impl CalldataSource { impl AsyncIterator for CalldataSource { type Item = Bytes; - async fn next(&mut self) -> StageResult { + async fn next(&mut self) -> PipelineResult { if self.load_calldata().await.is_err() { - return Err(StageError::BlockFetch(self.block_ref.hash)); + return Err(PipelineError::Provider(format!( + "Failed to load calldata for block {}", + self.block_ref.hash + )) + .temp()); } - self.calldata.pop_front().ok_or(StageError::Eof) + self.calldata.pop_front().ok_or(PipelineError::Eof.temp()) } } diff --git a/crates/derive/src/sources/ethereum.rs b/crates/derive/src/sources/ethereum.rs index 3f39744c2..7ca5dd55f 100644 --- a/crates/derive/src/sources/ethereum.rs +++ b/crates/derive/src/sources/ethereum.rs @@ -2,12 +2,12 @@ //! [DataAvailabilityProvider] trait for the Ethereum protocol. use crate::{ + errors::PipelineResult, sources::{BlobSource, CalldataSource, EthereumDataSourceVariant}, traits::{BlobProvider, ChainProvider, DataAvailabilityProvider}, }; use alloc::{boxed::Box, fmt::Debug}; use alloy_primitives::{Address, Bytes}; -use anyhow::Result; use async_trait::async_trait; use op_alloy_genesis::RollupConfig; use op_alloy_protocol::BlockInfo; @@ -62,7 +62,7 @@ where type Item = Bytes; type DataIter = EthereumDataSourceVariant; - async fn open_data(&self, block_ref: &BlockInfo) -> Result { + async fn open_data(&self, block_ref: &BlockInfo) -> PipelineResult { let ecotone_enabled = self.ecotone_timestamp.map(|e| block_ref.timestamp >= e).unwrap_or(false); if ecotone_enabled { diff --git a/crates/derive/src/sources/variant.rs b/crates/derive/src/sources/variant.rs index 25a5e185c..f4c6b9b4d 100644 --- a/crates/derive/src/sources/variant.rs +++ b/crates/derive/src/sources/variant.rs @@ -5,7 +5,7 @@ use alloy_primitives::Bytes; use async_trait::async_trait; use crate::{ - errors::StageResult, + errors::PipelineResult, sources::{BlobSource, CalldataSource}, traits::{AsyncIterator, BlobProvider, ChainProvider}, }; @@ -31,7 +31,7 @@ where { type Item = Bytes; - async fn next(&mut self) -> StageResult { + async fn next(&mut self) -> PipelineResult { match self { EthereumDataSourceVariant::Calldata(c) => c.next().await, EthereumDataSourceVariant::Blob(b) => b.next().await, diff --git a/crates/derive/src/stages/attributes_queue.rs b/crates/derive/src/stages/attributes_queue.rs index 0ed7854f5..c683903ed 100644 --- a/crates/derive/src/stages/attributes_queue.rs +++ b/crates/derive/src/stages/attributes_queue.rs @@ -10,7 +10,7 @@ use tracing::info; use crate::{ batch::SingleBatch, - errors::{ResetError, StageError, StageResult}, + errors::{PipelineError, PipelineResult, ResetError}, traits::{NextAttributes, OriginAdvancer, OriginProvider, ResettableStage}, }; @@ -26,7 +26,7 @@ pub use builder::{AttributesBuilder, StatefulAttributesBuilder}; #[async_trait] pub trait AttributesProvider { /// Returns the next valid batch upon the given safe head. - async fn next_batch(&mut self, parent: L2BlockInfo) -> StageResult; + async fn next_batch(&mut self, parent: L2BlockInfo) -> PipelineResult; /// Returns whether the current batch is the last in its span. fn is_last_in_span(&self) -> bool; @@ -75,20 +75,20 @@ where } /// Loads a [SingleBatch] from the [AttributesProvider] if needed. - pub async fn load_batch(&mut self, parent: L2BlockInfo) -> StageResult { + pub async fn load_batch(&mut self, parent: L2BlockInfo) -> PipelineResult { if self.batch.is_none() { let batch = self.prev.next_batch(parent).await?; self.batch = Some(batch); self.is_last_in_span = self.prev.is_last_in_span(); } - self.batch.as_ref().cloned().ok_or(StageError::Eof) + self.batch.as_ref().cloned().ok_or(PipelineError::Eof.temp()) } /// Returns the next [OptimismAttributesWithParent] from the current batch. pub async fn next_attributes( &mut self, parent: L2BlockInfo, - ) -> StageResult { + ) -> PipelineResult { crate::timer!(START, STAGE_ADVANCE_RESPONSE_TIME, &["attributes_queue"], timer); let batch = match self.load_batch(parent).await { Ok(batch) => batch, @@ -124,28 +124,21 @@ where &mut self, batch: SingleBatch, parent: L2BlockInfo, - ) -> StageResult { + ) -> PipelineResult { // Sanity check parent hash if batch.parent_hash != parent.block_info.hash { - return Err(StageError::Reset(ResetError::BadParentHash( - batch.parent_hash, - parent.block_info.hash, - ))); + return Err(ResetError::BadParentHash(batch.parent_hash, parent.block_info.hash).into()); } // Sanity check timestamp let actual = parent.block_info.timestamp + self.cfg.block_time; if actual != batch.timestamp { - return Err(StageError::Reset(ResetError::BadTimestamp(batch.timestamp, actual))); + return Err(ResetError::BadTimestamp(batch.timestamp, actual).into()); } // Prepare the payload attributes let tx_count = batch.transactions.len(); - let mut attributes = self - .builder - .prepare_payload_attributes(parent, batch.epoch()) - .await - .map_err(StageError::AttributesBuild)?; + let mut attributes = self.builder.prepare_payload_attributes(parent, batch.epoch()).await?; attributes.no_tx_pool = Some(true); match attributes.transactions { Some(ref mut txs) => txs.extend(batch.transactions), @@ -172,7 +165,7 @@ where P: AttributesProvider + OriginAdvancer + OriginProvider + ResettableStage + Debug + Send, AB: AttributesBuilder + Debug + Send, { - async fn advance_origin(&mut self) -> StageResult<()> { + async fn advance_origin(&mut self) -> PipelineResult<()> { self.prev.advance_origin().await } } @@ -186,7 +179,7 @@ where async fn next_attributes( &mut self, parent: L2BlockInfo, - ) -> StageResult { + ) -> PipelineResult { self.next_attributes(parent).await } } @@ -211,7 +204,7 @@ where &mut self, block_info: BlockInfo, system_config: &SystemConfig, - ) -> StageResult<()> { + ) -> PipelineResult<()> { self.prev.reset(block_info, system_config).await?; self.batch = None; self.is_last_in_span = false; @@ -224,7 +217,7 @@ where mod tests { use super::*; use crate::{ - errors::BuilderError, + errors::{BuilderError, PipelineErrorKind}, stages::test_utils::{ new_attributes_provider, MockAttributesBuilder, MockAttributesProvider, }, @@ -251,7 +244,7 @@ mod tests { fn new_attributes_queue( cfg: Option, origin: Option, - batches: Vec>, + batches: Vec>, ) -> AttributesQueue { let cfg = cfg.unwrap_or_default(); let mock_batch_queue = new_attributes_provider(origin, batches); @@ -264,7 +257,7 @@ mod tests { let mut attributes_queue = new_attributes_queue(None, None, vec![]); let parent = L2BlockInfo::default(); let result = attributes_queue.load_batch(parent).await.unwrap_err(); - assert_eq!(result, StageError::Eof); + assert_eq!(result, PipelineError::Eof.temp()); } #[tokio::test] @@ -288,7 +281,7 @@ mod tests { let result = attributes_queue.create_next_attributes(batch, parent).await.unwrap_err(); assert_eq!( result, - StageError::Reset(super::ResetError::BadParentHash(Default::default(), bad_hash)) + PipelineErrorKind::Reset(ResetError::BadParentHash(Default::default(), bad_hash)) ); } @@ -298,7 +291,7 @@ mod tests { let parent = L2BlockInfo::default(); let batch = SingleBatch { timestamp: 1, ..Default::default() }; let result = attributes_queue.create_next_attributes(batch, parent).await.unwrap_err(); - assert_eq!(result, StageError::Reset(super::ResetError::BadTimestamp(1, 0))); + assert_eq!(result, PipelineErrorKind::Reset(ResetError::BadTimestamp(1, 0))); } #[tokio::test] @@ -310,7 +303,7 @@ mod tests { }; let batch = SingleBatch { timestamp: 1, ..Default::default() }; let result = attributes_queue.create_next_attributes(batch, parent).await.unwrap_err(); - assert_eq!(result, StageError::Reset(super::ResetError::BadTimestamp(1, 2))); + assert_eq!(result, PipelineErrorKind::Reset(ResetError::BadTimestamp(1, 2))); } #[tokio::test] @@ -323,7 +316,7 @@ mod tests { }; let batch = SingleBatch { timestamp: 1, ..Default::default() }; let result = attributes_queue.create_next_attributes(batch, parent).await.unwrap_err(); - assert_eq!(result, StageError::Reset(super::ResetError::BadTimestamp(1, 2))); + assert_eq!(result, PipelineErrorKind::Reset(ResetError::BadTimestamp(1, 2))); } #[tokio::test] @@ -334,9 +327,7 @@ mod tests { let result = attributes_queue.create_next_attributes(batch, parent).await.unwrap_err(); assert_eq!( result, - StageError::AttributesBuild(BuilderError::Custom(anyhow::anyhow!( - "missing payload attribute" - ))) + PipelineError::AttributesBuilder(BuilderError::AttributesUnavailable).crit() ); } @@ -366,7 +357,7 @@ mod tests { let mut attributes_queue = new_attributes_queue(None, None, vec![]); let parent = L2BlockInfo::default(); let result = attributes_queue.next_attributes(parent).await.unwrap_err(); - assert_eq!(result, StageError::Eof); + assert_eq!(result, PipelineError::Eof.temp()); } #[tokio::test] diff --git a/crates/derive/src/stages/attributes_queue/builder.rs b/crates/derive/src/stages/attributes_queue/builder.rs index a4db614d0..09e8b67d1 100644 --- a/crates/derive/src/stages/attributes_queue/builder.rs +++ b/crates/derive/src/stages/attributes_queue/builder.rs @@ -2,11 +2,11 @@ use super::derive_deposits; use crate::{ - errors::BuilderError, + errors::{BuilderError, PipelineError, PipelineErrorKind, PipelineResult}, params::SEQUENCER_FEE_VAULT_ADDRESS, traits::{ChainProvider, L2ChainProvider}, }; -use alloc::{boxed::Box, fmt::Debug, sync::Arc, vec, vec::Vec}; +use alloc::{boxed::Box, fmt::Debug, string::ToString, sync::Arc, vec, vec::Vec}; use alloy_eips::{eip2718::Encodable2718, BlockNumHash}; use alloy_primitives::Bytes; use alloy_rlp::Encodable; @@ -32,7 +32,7 @@ pub trait AttributesBuilder { &mut self, l2_parent: L2BlockInfo, epoch: BlockNumHash, - ) -> Result; + ) -> PipelineResult; } /// A stateful implementation of the [AttributesBuilder]. @@ -71,43 +71,63 @@ where &mut self, l2_parent: L2BlockInfo, epoch: BlockNumHash, - ) -> Result { + ) -> PipelineResult { let l1_header; let deposit_transactions: Vec; + let mut sys_config = self .config_fetcher .system_config_by_number(l2_parent.block_info.number, self.rollup_cfg.clone()) - .await?; + .await + .map_err(|e| PipelineError::Provider(e.to_string()).temp())?; // If the L1 origin changed in this block, then we are in the first block of the epoch. // In this case we need to fetch all transaction receipts from the L1 origin block so // we can scan for user deposits. let sequence_number = if l2_parent.l1_origin.number != epoch.number { - let header = self.receipts_fetcher.header_by_hash(epoch.hash).await?; + let header = self + .receipts_fetcher + .header_by_hash(epoch.hash) + .await + .map_err(|e| PipelineError::Provider(e.to_string()).temp())?; if l2_parent.l1_origin.hash != header.parent_hash { - return Err(BuilderError::BlockMismatchEpochReset( - epoch, - l2_parent.l1_origin, - header.parent_hash, + return Err(PipelineErrorKind::Reset( + BuilderError::BlockMismatchEpochReset( + epoch, + l2_parent.l1_origin, + header.parent_hash, + ) + .into(), )); } - let receipts = self.receipts_fetcher.receipts_by_hash(epoch.hash).await?; + let receipts = self + .receipts_fetcher + .receipts_by_hash(epoch.hash) + .await + .map_err(|e| PipelineError::Provider(e.to_string()).temp())?; + let deposits = + derive_deposits(epoch.hash, &receipts, self.rollup_cfg.deposit_contract_address) + .await + .map_err(|e| PipelineError::BadEncoding(e).crit())?; sys_config .update_with_receipts(&receipts, &self.rollup_cfg, header.timestamp) - .map_err(|e| BuilderError::Custom(anyhow::anyhow!(e)))?; - let deposits = - derive_deposits(epoch.hash, receipts, self.rollup_cfg.deposit_contract_address) - .await?; + .map_err(|e| PipelineError::SystemConfigUpdate(e).crit())?; l1_header = header; deposit_transactions = deposits; 0 } else { #[allow(clippy::collapsible_else_if)] if l2_parent.l1_origin.hash != epoch.hash { - return Err(BuilderError::BlockMismatch(epoch, l2_parent.l1_origin)); + return Err(PipelineErrorKind::Reset( + BuilderError::BlockMismatch(epoch, l2_parent.l1_origin).into(), + )); } - let header = self.receipts_fetcher.header_by_hash(epoch.hash).await?; + let header = self + .receipts_fetcher + .header_by_hash(epoch.hash) + .await + .map_err(|e| PipelineError::Provider(e.to_string()).temp())?; l1_header = header; deposit_transactions = vec![]; l2_parent.seq_num + 1 @@ -117,11 +137,14 @@ where // between L1 and L2. let next_l2_time = l2_parent.block_info.timestamp + self.rollup_cfg.block_time; if next_l2_time < l1_header.timestamp { - return Err(BuilderError::BrokenTimeInvariant( - l2_parent.l1_origin, - next_l2_time, - BlockNumHash { hash: l1_header.hash_slow(), number: l1_header.number }, - l1_header.timestamp, + return Err(PipelineErrorKind::Reset( + BuilderError::BrokenTimeInvariant( + l2_parent.l1_origin, + next_l2_time, + BlockNumHash { hash: l1_header.hash_slow(), number: l1_header.number }, + l1_header.timestamp, + ) + .into(), )); } @@ -145,7 +168,9 @@ where &l1_header, next_l2_time, ) - .map_err(|e| BuilderError::Custom(anyhow::anyhow!(e)))?; + .map_err(|e| { + PipelineError::AttributesBuilder(BuilderError::Custom(e.to_string())).crit() + })?; let mut encoded_l1_info_tx = Vec::with_capacity(l1_info_tx_envelope.length()); l1_info_tx_envelope.encode_2718(&mut encoded_l1_info_tx); @@ -187,7 +212,8 @@ where mod tests { use super::*; use crate::{ - stages::test_utils::MockSystemConfigL2Fetcher, traits::test_utils::TestChainProvider, + errors::ResetError, stages::test_utils::MockSystemConfigL2Fetcher, + traits::test_utils::TestChainProvider, }; use alloy_consensus::Header; use alloy_primitives::B256; @@ -216,7 +242,7 @@ mod tests { let expected = BuilderError::BlockMismatchEpochReset(epoch, l2_parent.l1_origin, B256::default()); let err = builder.prepare_payload_attributes(l2_parent, epoch).await.unwrap_err(); - assert_eq!(err, expected); + assert_eq!(err, PipelineErrorKind::Reset(expected.into())); } #[tokio::test] @@ -240,7 +266,7 @@ mod tests { // Here the default header is used whose hash will not equal the custom `l2_hash` above. let expected = BuilderError::BlockMismatch(epoch, l2_parent.l1_origin); let err = builder.prepare_payload_attributes(l2_parent, epoch).await.unwrap_err(); - assert_eq!(err, expected); + assert_eq!(err, PipelineErrorKind::Reset(ResetError::AttributesBuilder(expected))); } #[tokio::test] @@ -271,7 +297,7 @@ mod tests { timestamp, ); let err = builder.prepare_payload_attributes(l2_parent, epoch).await.unwrap_err(); - assert_eq!(err, expected); + assert_eq!(err, PipelineErrorKind::Reset(ResetError::AttributesBuilder(expected))); } #[tokio::test] diff --git a/crates/derive/src/stages/attributes_queue/deposits.rs b/crates/derive/src/stages/attributes_queue/deposits.rs index 7420c8bb2..3a635d6db 100644 --- a/crates/derive/src/stages/attributes_queue/deposits.rs +++ b/crates/derive/src/stages/attributes_queue/deposits.rs @@ -1,5 +1,6 @@ //! Contains a helper method to derive deposit transactions from L1 Receipts. +use crate::errors::PipelineEncodingError; use alloc::vec::Vec; use alloy_consensus::{Eip658Value, Receipt}; use alloy_primitives::{Address, Bytes, B256}; @@ -12,9 +13,9 @@ use op_alloy_protocol::{decode_deposit, DEPOSIT_EVENT_ABI_HASH}; /// must be the [DEPOSIT_EVENT_ABI_HASH]. pub(crate) async fn derive_deposits( block_hash: B256, - receipts: Vec, + receipts: &[Receipt], deposit_contract: Address, -) -> anyhow::Result> { +) -> Result, PipelineEncodingError> { let mut global_index = 0; let mut res = Vec::new(); for r in receipts.iter() { @@ -30,8 +31,7 @@ pub(crate) async fn derive_deposits( if l.address != deposit_contract { continue; } - let decoded = - decode_deposit(block_hash, curr_index, l).map_err(|e| anyhow::anyhow!(e))?; + let decoded = decode_deposit(block_hash, curr_index, l)?; res.push(decoded); } } @@ -99,7 +99,7 @@ mod tests { async fn test_derive_deposits_empty() { let receipts = vec![]; let deposit_contract = Address::default(); - let result = derive_deposits(B256::default(), receipts, deposit_contract).await; + let result = derive_deposits(B256::default(), &receipts, deposit_contract).await; assert!(result.unwrap().is_empty()); } @@ -109,7 +109,7 @@ mod tests { let mut invalid = generate_valid_receipt(); invalid.logs[0].data = LogData::new_unchecked(vec![], Bytes::default()); let receipts = vec![generate_valid_receipt(), generate_valid_receipt(), invalid]; - let result = derive_deposits(B256::default(), receipts, deposit_contract).await; + let result = derive_deposits(B256::default(), &receipts, deposit_contract).await; assert_eq!(result.unwrap().len(), 5); } @@ -119,7 +119,7 @@ mod tests { let mut invalid = generate_valid_receipt(); invalid.logs[0].address = Address::default(); let receipts = vec![generate_valid_receipt(), generate_valid_receipt(), invalid]; - let result = derive_deposits(B256::default(), receipts, deposit_contract).await; + let result = derive_deposits(B256::default(), &receipts, deposit_contract).await; assert_eq!(result.unwrap().len(), 5); } @@ -130,16 +130,16 @@ mod tests { invalid.logs[0].data = LogData::new_unchecked(vec![DEPOSIT_EVENT_ABI_HASH], Bytes::default()); let receipts = vec![generate_valid_receipt(), generate_valid_receipt(), invalid]; - let result = derive_deposits(B256::default(), receipts, deposit_contract).await; - let downcasted = result.unwrap_err().downcast::().unwrap(); - assert_eq!(downcasted, DepositError::UnexpectedTopicsLen(1)); + let result = derive_deposits(B256::default(), &receipts, deposit_contract).await; + let downcasted = result.unwrap_err(); + assert_eq!(downcasted, DepositError::UnexpectedTopicsLen(1).into()); } #[tokio::test] async fn test_derive_deposits_succeeds() { let deposit_contract = address!("1111111111111111111111111111111111111111"); let receipts = vec![generate_valid_receipt(), generate_valid_receipt()]; - let result = derive_deposits(B256::default(), receipts, deposit_contract).await; + let result = derive_deposits(B256::default(), &receipts, deposit_contract).await; assert_eq!(result.unwrap().len(), 4); } } diff --git a/crates/derive/src/stages/batch_queue.rs b/crates/derive/src/stages/batch_queue.rs index 85cb51750..3ab203132 100644 --- a/crates/derive/src/stages/batch_queue.rs +++ b/crates/derive/src/stages/batch_queue.rs @@ -1,30 +1,28 @@ //! This module contains the `BatchQueue` stage implementation. +use crate::{ + batch::{Batch, BatchValidity, BatchWithInclusionBlock, SingleBatch}, + errors::{PipelineEncodingError, PipelineError, PipelineErrorKind, PipelineResult, ResetError}, + stages::attributes_queue::AttributesProvider, + traits::{L2ChainProvider, OriginAdvancer, OriginProvider, ResettableStage}, +}; use alloc::{boxed::Box, sync::Arc, vec::Vec}; -use anyhow::anyhow; use async_trait::async_trait; use core::fmt::Debug; use op_alloy_genesis::{RollupConfig, SystemConfig}; use op_alloy_protocol::{BlockInfo, L2BlockInfo}; use tracing::{error, info, warn}; -use crate::{ - batch::{Batch, BatchValidity, BatchWithInclusionBlock, SingleBatch}, - errors::{StageError, StageResult}, - stages::attributes_queue::AttributesProvider, - traits::{L2ChainProvider, OriginAdvancer, OriginProvider, ResettableStage}, -}; - /// Provides [Batch]es for the [BatchQueue] stage. #[async_trait] pub trait BatchQueueProvider { /// Returns the next [Batch] in the [ChannelReader] stage, if the stage is not complete. /// This function can only be called once while the stage is in progress, and will return /// [`None`] on subsequent calls unless the stage is reset or complete. If the stage is - /// complete and the batch has been consumed, an [StageError::Eof] error is returned. + /// complete and the batch has been consumed, an [PipelineError::Eof] error is returned. /// /// [ChannelReader]: crate::stages::ChannelReader - async fn next_batch(&mut self) -> StageResult; + async fn next_batch(&mut self) -> PipelineResult; } /// [BatchQueue] is responsible for o rdering unordered batches @@ -53,7 +51,6 @@ where prev: P, /// The l1 block ref origin: Option, - /// A consecutive, time-centric window of L1 Blocks. /// Every L1 origin of unsafe L2 Blocks must be included in this list. /// If every L2 Block corresponding to a single L1 Block becomes safe, @@ -61,15 +58,12 @@ where /// If new L2 Block's L1 origin is not included in this list, fetch and /// push it to the list. l1_blocks: Vec, - /// A set of batches in order from when we've seen them. batches: Vec, - /// A set of cached [SingleBatch]es derived from [SpanBatch]es. /// /// [SpanBatch]: crate::batch::SpanBatch next_spans: Vec, - /// Used to validate the batches. fetcher: BF, } @@ -108,15 +102,15 @@ where /// Derives the next batch to apply on top of the current L2 safe head. /// Follows the validity rules imposed on consecutive batches. /// Based on currently available buffered batch and L1 origin information. - /// A [StageError::Eof] is returned if no batch can be derived yet. + /// A [PipelineError::Eof] is returned if no batch can be derived yet. pub async fn derive_next_batch( &mut self, empty: bool, parent: L2BlockInfo, - ) -> StageResult { + ) -> PipelineResult { // Cannot derive a batch if no origin was prepared. if self.l1_blocks.is_empty() { - return Err(StageError::MissingOrigin); + return Err(PipelineError::MissingOrigin.crit()); } // Get the epoch @@ -127,10 +121,9 @@ where // This is in the case where we auto generate all batches in an epoch & advance the epoch // but don't advance the L2 Safe Head's epoch if parent.l1_origin != epoch.id() && parent.l1_origin.number != epoch.number - 1 { - return Err(StageError::Custom(anyhow!( - "buffered L1 chain epoch {} in batch queue does not match safe head origin {:?}", - epoch, - parent.l1_origin + return Err(PipelineErrorKind::Reset(ResetError::L1OriginMismatch( + parent.l1_origin.number, + epoch.number - 1, ))); } @@ -166,7 +159,7 @@ where BatchValidity::Undecided => { remaining.extend_from_slice(&self.batches[i..]); self.batches = remaining; - return Err(StageError::Eof); + return Err(PipelineError::Eof.temp()); } } } @@ -180,7 +173,7 @@ where // If the current epoch is too old compared to the L1 block we are at, // i.e. if the sequence window expired, we create empty batches for the current epoch let expiry_epoch = epoch.number + self.cfg.seq_window_size; - let bq_origin = self.origin.ok_or(StageError::MissingOrigin)?; + let bq_origin = self.origin.ok_or(PipelineError::MissingOrigin.crit())?; let force_empty_batches = (expiry_epoch == bq_origin.number && empty) || expiry_epoch < bq_origin.number; let first_of_epoch = epoch.number == parent.l1_origin.number + 1; @@ -189,7 +182,7 @@ where // there is still room to receive batches for the current epoch. // No need to force-create empty batch(es) towards the next epoch yet. if !force_empty_batches { - return Err(StageError::Eof); + return Err(PipelineError::Eof.temp()); } info!( @@ -200,7 +193,7 @@ where // The next L1 block is needed to proceed towards the next epoch. if self.l1_blocks.len() < 2 { - return Err(StageError::Eof); + return Err(PipelineError::Eof.temp()); } let next_epoch = self.l1_blocks[1]; @@ -227,16 +220,16 @@ where next_epoch.number, next_timestamp, next_epoch.timestamp ); self.l1_blocks.remove(0); - Err(StageError::Eof) + Err(PipelineError::Eof.temp()) } /// Adds a batch to the queue. - pub async fn add_batch(&mut self, batch: Batch, parent: L2BlockInfo) -> StageResult<()> { + pub async fn add_batch(&mut self, batch: Batch, parent: L2BlockInfo) -> PipelineResult<()> { if self.l1_blocks.is_empty() { error!(target: "batch-queue", "Cannot add batch without an origin"); panic!("Cannot add batch without an origin"); } - let origin = self.origin.ok_or(StageError::MissingOrigin)?; + let origin = self.origin.ok_or(PipelineError::MissingOrigin.crit())?; let data = BatchWithInclusionBlock { inclusion_block: origin, batch }; // If we drop the batch, validation logs the drop reason with WARN level. if data.check_batch(&self.cfg, &self.l1_blocks, parent, &mut self.fetcher).await.is_drop() { @@ -253,7 +246,7 @@ where P: BatchQueueProvider + OriginAdvancer + OriginProvider + ResettableStage + Send + Debug, BF: L2ChainProvider + Send + Debug, { - async fn advance_origin(&mut self) -> StageResult<()> { + async fn advance_origin(&mut self) -> PipelineResult<()> { self.prev.advance_origin().await } } @@ -266,16 +259,14 @@ where { /// Returns the next valid batch upon the given safe head. /// Also returns the boolean that indicates if the batch is the last block in the batch. - async fn next_batch(&mut self, parent: L2BlockInfo) -> StageResult { + async fn next_batch(&mut self, parent: L2BlockInfo) -> PipelineResult { crate::timer!(START, STAGE_ADVANCE_RESPONSE_TIME, &["batch_queue"], timer); if !self.next_spans.is_empty() { // There are cached singular batches derived from the span batch. // Check if the next cached batch matches the given parent block. if self.next_spans[0].timestamp == parent.block_info.timestamp + self.cfg.block_time { crate::timer!(DISCARD, timer); - return self - .pop_next_batch(parent) - .ok_or(anyhow!("failed to pop next batch from span batch").into()); + return self.pop_next_batch(parent).ok_or(PipelineError::BatchQueueEmpty.crit()); } // Parent block does not match the next batch. // Means the previously returned batch is invalid. @@ -317,11 +308,11 @@ where if self.origin != self.prev.origin() { self.origin = self.prev.origin(); if !origin_behind { - let origin = match self.origin.as_ref().ok_or_else(|| anyhow!("missing origin")) { + let origin = match self.origin.as_ref().ok_or(PipelineError::MissingOrigin.crit()) { Ok(o) => o, Err(e) => { crate::timer!(DISCARD, timer); - return Err(StageError::Custom(e)); + return Err(e); } }; self.l1_blocks.push(*origin); @@ -345,10 +336,13 @@ where warn!(target: "batch-queue", "Dropping batch: Origin is behind"); } } - Err(StageError::Eof) => out_of_data = true, Err(e) => { - crate::timer!(DISCARD, timer); - return Err(e); + if let PipelineErrorKind::Temporary(PipelineError::Eof) = e { + out_of_data = true; + } else { + crate::timer!(DISCARD, timer); + return Err(e); + } } } @@ -357,9 +351,9 @@ where if origin_behind { crate::timer!(DISCARD, timer); if out_of_data { - return Err(StageError::Eof); + return Err(PipelineError::Eof.temp()); } - return Err(StageError::NotEnoughData); + return Err(PipelineError::NotEnoughData.temp()); } // Attempt to derive more batches. @@ -368,11 +362,11 @@ where Err(e) => { crate::timer!(DISCARD, timer); match e { - StageError::Eof => { + PipelineErrorKind::Temporary(PipelineError::Eof) => { if out_of_data { - return Err(StageError::Eof); + return Err(PipelineError::Eof.temp()); } - return Err(StageError::NotEnoughData); + return Err(PipelineError::NotEnoughData.temp()); } _ => return Err(e), } @@ -385,9 +379,7 @@ where Batch::Single(sb) => Ok(sb), Batch::Span(sb) => { let batches = match sb.get_singular_batches(&self.l1_blocks, parent).map_err(|e| { - StageError::Custom(anyhow!( - "Could not get singular batches from span batch: {e}" - )) + PipelineError::BadEncoding(PipelineEncodingError::SpanBatchError(e)).crit() }) { Ok(b) => b, Err(e) => { @@ -398,12 +390,12 @@ where self.next_spans = batches; let nb = match self .pop_next_batch(parent) - .ok_or_else(|| anyhow!("failed to pop next batch from span batch")) + .ok_or(PipelineError::BatchQueueEmpty.crit()) { Ok(b) => b, Err(e) => { crate::timer!(DISCARD, timer); - return Err(StageError::Custom(e)); + return Err(e); } }; Ok(nb) @@ -433,7 +425,7 @@ where P: BatchQueueProvider + OriginAdvancer + OriginProvider + ResettableStage + Send + Debug, BF: L2ChainProvider + Send + Debug, { - async fn reset(&mut self, base: BlockInfo, system_config: &SystemConfig) -> StageResult<()> { + async fn reset(&mut self, base: BlockInfo, system_config: &SystemConfig) -> PipelineResult<()> { self.prev.reset(base, system_config).await?; self.origin = Some(base); self.batches.clear(); @@ -487,7 +479,7 @@ mod tests { let mut bq = BatchQueue::new(cfg, mock, fetcher); let parent = L2BlockInfo::default(); let result = bq.derive_next_batch(false, parent).await.unwrap_err(); - assert_eq!(result, StageError::MissingOrigin); + assert_eq!(result, PipelineError::MissingOrigin.crit()); } #[tokio::test] @@ -499,7 +491,7 @@ mod tests { let fetcher = TestL2ChainProvider::default(); let mut bq = BatchQueue::new(cfg, mock, fetcher); let res = bq.next_batch(L2BlockInfo::default()).await.unwrap_err(); - assert_eq!(res, StageError::NotEnoughData); + assert_eq!(res, PipelineError::NotEnoughData.temp()); assert!(bq.is_last_in_span()); } @@ -507,7 +499,7 @@ mod tests { async fn test_next_batch_origin_behind() { let mut reader = new_batch_reader(); let cfg = Arc::new(RollupConfig::default()); - let mut batch_vec: Vec> = vec![]; + let mut batch_vec: Vec> = vec![]; while let Some(batch) = reader.next_batch(cfg.as_ref()) { batch_vec.push(Ok(batch)); } @@ -520,7 +512,7 @@ mod tests { ..Default::default() }; let res = bq.next_batch(parent).await.unwrap_err(); - assert_eq!(res, StageError::NotEnoughData); + assert_eq!(res, PipelineError::NotEnoughData.temp()); } #[tokio::test] @@ -545,7 +537,7 @@ mod tests { batch_inbox_address: address!("6887246668a3b87f54deb3b94ba47a6f63f32985"), ..Default::default() }); - let mut batch_vec: Vec> = vec![]; + let mut batch_vec: Vec> = vec![]; let mut batch_txs: Vec = vec![]; let mut second_batch_txs: Vec = vec![]; while let Some(batch) = reader.next_batch(cfg.as_ref()) { @@ -655,7 +647,7 @@ mod tests { let warns = trace_store.get_by_level(Level::WARN); assert_eq!(warns.len(), 1); assert!(warns[0].contains("span batch has no new blocks after safe head")); - assert_eq!(res, StageError::NotEnoughData); + assert_eq!(res, PipelineError::NotEnoughData.temp()); } #[tokio::test] diff --git a/crates/derive/src/stages/channel_bank.rs b/crates/derive/src/stages/channel_bank.rs index d1272be94..5f949c699 100644 --- a/crates/derive/src/stages/channel_bank.rs +++ b/crates/derive/src/stages/channel_bank.rs @@ -1,14 +1,13 @@ //! This module contains the `ChannelBank` struct. use crate::{ - errors::{StageError, StageResult}, + errors::{PipelineError, PipelineErrorKind, PipelineResult}, params::MAX_CHANNEL_BANK_SIZE, stages::ChannelReaderProvider, traits::{OriginAdvancer, OriginProvider, ResettableStage}, }; use alloc::{boxed::Box, collections::VecDeque, sync::Arc}; use alloy_primitives::{hex, Bytes}; -use anyhow::anyhow; use async_trait::async_trait; use core::fmt::Debug; use hashbrown::HashMap; @@ -22,7 +21,7 @@ pub trait ChannelBankProvider { /// Retrieves the next [Frame] from the [FrameQueue] stage. /// /// [FrameQueue]: crate::stages::FrameQueue - async fn next_frame(&mut self) -> StageResult; + async fn next_frame(&mut self) -> PipelineResult; } /// [ChannelBank] is a stateful stage that does the following: @@ -68,19 +67,20 @@ where /// Prunes the Channel bank, until it is below [MAX_CHANNEL_BANK_SIZE]. /// Prunes from the high-priority channel since it failed to be read. - pub fn prune(&mut self) -> StageResult<()> { + pub fn prune(&mut self) -> PipelineResult<()> { let mut total_size = self.size(); while total_size > MAX_CHANNEL_BANK_SIZE { - let id = self.channel_queue.pop_front().ok_or(StageError::NoChannelsAvailable)?; - let channel = self.channels.remove(&id).ok_or(StageError::ChannelNotFound)?; + let id = + self.channel_queue.pop_front().ok_or(PipelineError::ChannelBankEmpty.crit())?; + let channel = self.channels.remove(&id).ok_or(PipelineError::ChannelNotFound.crit())?; total_size -= channel.size(); } Ok(()) } /// Adds new L1 data to the channel bank. Should only be called after all data has been read. - pub fn ingest_frame(&mut self, frame: Frame) -> StageResult<()> { - let origin = self.origin().ok_or(StageError::MissingOrigin)?; + pub fn ingest_frame(&mut self, frame: Frame) -> PipelineResult<()> { + let origin = self.origin().ok_or(PipelineError::MissingOrigin.crit())?; // Get the channel for the frame, or create a new one if it doesn't exist. let current_channel = self.channels.entry(frame.id).or_insert_with(|| { @@ -125,18 +125,18 @@ where /// Read the raw data of the first channel, if it's timed-out or closed. /// /// Returns an error if there is nothing new to read. - pub fn read(&mut self) -> StageResult> { + pub fn read(&mut self) -> PipelineResult> { // Bail if there are no channels to read from. if self.channel_queue.is_empty() { trace!(target: "channel-bank", "No channels to read from"); - return Err(StageError::Eof); + return Err(PipelineError::Eof.temp()); } // Return an `Ok(None)` if the first channel is timed out. There may be more timed // out channels at the head of the queue and we want to remove them all. let first = self.channel_queue[0]; - let channel = self.channels.get(&first).ok_or(StageError::ChannelNotFound)?; - let origin = self.origin().ok_or(StageError::MissingOrigin)?; + let channel = self.channels.get(&first).ok_or(PipelineError::ChannelBankEmpty.crit())?; + let origin = self.origin().ok_or(PipelineError::ChannelBankEmpty.crit())?; if channel.open_block_number() + self.cfg.channel_timeout(origin.timestamp) < origin.number { warn!( @@ -160,7 +160,7 @@ where // At this point we have removed all timed out channels from the front of the // `channel_queue`. Pre-Canyon we simply check the first index. // Post-Canyon we read the entire channelQueue for the first ready channel. - // If no channel is available, we return StageError::Eof. + // If no channel is available, we return `PipelineError::Eof`. // Canyon is activated when the first L1 block whose time >= CanyonTime, not on the L2 // timestamp. if !self.cfg.is_canyon_active(origin.timestamp) { @@ -171,29 +171,30 @@ where (0..self.channel_queue.len()).find_map(|i| self.try_read_channel_at_index(i).ok()); match channel_data { Some(data) => Ok(Some(data)), - None => Err(StageError::Eof), + None => Err(PipelineError::Eof.temp()), } } /// Attempts to read the channel at the specified index. If the channel is not ready or timed /// out, it will return an error. /// If the channel read was successful, it will remove the channel from the channel queue. - fn try_read_channel_at_index(&mut self, index: usize) -> StageResult { + fn try_read_channel_at_index(&mut self, index: usize) -> PipelineResult { let channel_id = self.channel_queue[index]; - let channel = self.channels.get(&channel_id).ok_or(StageError::ChannelNotFound)?; - let origin = self.origin().ok_or(StageError::MissingOrigin)?; + let channel = + self.channels.get(&channel_id).ok_or(PipelineError::ChannelBankEmpty.crit())?; + let origin = self.origin().ok_or(PipelineError::MissingOrigin.crit())?; let timed_out = channel.open_block_number() + self.cfg.channel_timeout(origin.timestamp) < origin.number; if timed_out || !channel.is_ready() { - return Err(StageError::Eof); + return Err(PipelineError::Eof.temp()); } let frame_data = channel.frame_data(); self.channels.remove(&channel_id); self.channel_queue.remove(index); - frame_data.ok_or_else(|| StageError::Custom(anyhow!("Channel data is empty"))) + frame_data.ok_or(PipelineError::ChannelBankEmpty.crit()) } } @@ -202,7 +203,7 @@ impl

OriginAdvancer for ChannelBank

where P: ChannelBankProvider + OriginAdvancer + OriginProvider + ResettableStage + Send + Debug, { - async fn advance_origin(&mut self) -> StageResult<()> { + async fn advance_origin(&mut self) -> PipelineResult<()> { self.prev.advance_origin().await } } @@ -212,15 +213,14 @@ impl

ChannelReaderProvider for ChannelBank

where P: ChannelBankProvider + OriginAdvancer + OriginProvider + ResettableStage + Send + Debug, { - async fn next_data(&mut self) -> StageResult> { + async fn next_data(&mut self) -> PipelineResult> { crate::timer!(START, STAGE_ADVANCE_RESPONSE_TIME, &["channel_bank"], timer); match self.read() { - Err(StageError::Eof) => { - // continue - we will attempt to load data into the channel bank - } Err(e) => { - crate::timer!(DISCARD, timer); - return Err(anyhow!("Error fetching next data from channel bank: {:?}", e).into()); + if !matches!(e, PipelineErrorKind::Temporary(PipelineError::Eof)) { + crate::timer!(DISCARD, timer); + return Err(PipelineError::ChannelBankEmpty.crit()); + } } data => return data, }; @@ -236,7 +236,7 @@ where let res = self.ingest_frame(frame); crate::timer!(DISCARD, timer); res?; - Err(StageError::NotEnoughData) + Err(PipelineError::NotEnoughData.temp()) } } @@ -258,7 +258,7 @@ where &mut self, block_info: BlockInfo, system_config: &SystemConfig, - ) -> StageResult<()> { + ) -> PipelineResult<()> { self.prev.reset(block_info, system_config).await?; self.channels.clear(); self.channel_queue = VecDeque::with_capacity(10); @@ -287,7 +287,7 @@ mod tests { let mut channel_bank = ChannelBank::new(cfg, mock); let frame = Frame::default(); let err = channel_bank.ingest_frame(frame).unwrap_err(); - assert_eq!(err, StageError::MissingOrigin); + assert_eq!(err, PipelineError::MissingOrigin.crit()); } #[test] @@ -346,9 +346,9 @@ mod tests { let cfg = Arc::new(RollupConfig::default()); let mut channel_bank = ChannelBank::new(cfg, mock); let err = channel_bank.read().unwrap_err(); - assert_eq!(err, StageError::Eof); + assert_eq!(err, PipelineError::Eof.temp()); let err = channel_bank.next_data().await.unwrap_err(); - assert_eq!(err, StageError::NotEnoughData); + assert_eq!(err, PipelineError::NotEnoughData.temp()); } #[tokio::test] @@ -367,7 +367,7 @@ mod tests { // Ingest first frame let err = channel_bank.next_data().await.unwrap_err(); - assert_eq!(err, StageError::NotEnoughData); + assert_eq!(err, PipelineError::NotEnoughData.temp()); for _ in 0..cfg.channel_timeout + 1 { channel_bank.advance_origin().await.unwrap(); diff --git a/crates/derive/src/stages/channel_reader.rs b/crates/derive/src/stages/channel_reader.rs index c6189b4e0..347837b87 100644 --- a/crates/derive/src/stages/channel_reader.rs +++ b/crates/derive/src/stages/channel_reader.rs @@ -2,20 +2,18 @@ use crate::{ batch::Batch, - errors::{StageError, StageResult}, + errors::{PipelineError, PipelineResult}, stages::{decompress_brotli, BatchQueueProvider}, traits::{OriginAdvancer, OriginProvider, ResettableStage}, }; -use anyhow::anyhow; -use op_alloy_genesis::{RollupConfig, SystemConfig}; -use op_alloy_protocol::BlockInfo; - use alloc::{boxed::Box, sync::Arc, vec::Vec}; use alloy_primitives::Bytes; use alloy_rlp::Decodable; use async_trait::async_trait; use core::fmt::Debug; use miniz_oxide::inflate::decompress_to_vec_zlib; +use op_alloy_genesis::{RollupConfig, SystemConfig}; +use op_alloy_protocol::BlockInfo; use tracing::{debug, error, warn}; /// ZLIB Deflate Compression Method. @@ -34,7 +32,7 @@ pub trait ChannelReaderProvider { /// of the channel bank prior to loading data in (unlike most other stages). This is to /// ensure maintain consistency around channel bank pruning which depends upon the order /// of operations. - async fn next_data(&mut self) -> StageResult>; + async fn next_data(&mut self) -> PipelineResult>; } /// [ChannelReader] is a stateful stage that does the following: @@ -62,10 +60,10 @@ where } /// Creates the batch reader from available channel data. - async fn set_batch_reader(&mut self) -> StageResult<()> { + async fn set_batch_reader(&mut self) -> PipelineResult<()> { if self.next_batch.is_none() { let channel = - self.prev.next_data().await?.ok_or(StageError::Temporary(anyhow!("No channel")))?; + self.prev.next_data().await?.ok_or(PipelineError::ChannelReaderEmpty.temp())?; self.next_batch = Some(BatchReader::from(&channel[..])); } Ok(()) @@ -83,7 +81,7 @@ impl

OriginAdvancer for ChannelReader

where P: ChannelReaderProvider + OriginAdvancer + OriginProvider + ResettableStage + Send + Debug, { - async fn advance_origin(&mut self) -> StageResult<()> { + async fn advance_origin(&mut self) -> PipelineResult<()> { self.prev.advance_origin().await } } @@ -93,7 +91,7 @@ impl

BatchQueueProvider for ChannelReader

where P: ChannelReaderProvider + OriginAdvancer + OriginProvider + ResettableStage + Send + Debug, { - async fn next_batch(&mut self) -> StageResult { + async fn next_batch(&mut self) -> PipelineResult { crate::timer!(START, STAGE_ADVANCE_RESPONSE_TIME, &["channel_reader"], timer); if let Err(e) = self.set_batch_reader().await { debug!(target: "channel-reader", "Failed to set batch reader: {:?}", e); @@ -106,7 +104,7 @@ where .as_mut() .expect("Cannot be None") .next_batch(self.cfg.as_ref()) - .ok_or(StageError::NotEnoughData) + .ok_or(PipelineError::NotEnoughData.temp()) { Ok(batch) => Ok(batch), Err(e) => { @@ -132,7 +130,7 @@ impl

ResettableStage for ChannelReader

where P: ChannelReaderProvider + OriginAdvancer + OriginProvider + ResettableStage + Debug + Send, { - async fn reset(&mut self, base: BlockInfo, cfg: &SystemConfig) -> StageResult<()> { + async fn reset(&mut self, base: BlockInfo, cfg: &SystemConfig) -> PipelineResult<()> { self.prev.reset(base, cfg).await?; self.next_channel(); crate::inc!(STAGE_RESETS, &["channel-reader"]); @@ -223,7 +221,7 @@ impl>> From for BatchReader { #[cfg(test)] mod test { use super::*; - use crate::stages::test_utils::MockChannelReaderProvider; + use crate::{errors::PipelineErrorKind, stages::test_utils::MockChannelReaderProvider}; use alloc::vec; fn new_compressed_batch_data() -> Bytes { @@ -236,9 +234,9 @@ mod test { #[tokio::test] async fn test_next_batch_batch_reader_set_fails() { - let mock = MockChannelReaderProvider::new(vec![Err(StageError::Eof)]); + let mock = MockChannelReaderProvider::new(vec![Err(PipelineError::Eof.temp())]); let mut reader = ChannelReader::new(mock, Arc::new(RollupConfig::default())); - assert_eq!(reader.next_batch().await, Err(StageError::Eof)); + assert_eq!(reader.next_batch().await, Err(PipelineError::Eof.temp())); assert!(reader.next_batch.is_none()); } @@ -246,7 +244,10 @@ mod test { async fn test_next_batch_batch_reader_no_data() { let mock = MockChannelReaderProvider::new(vec![Ok(None)]); let mut reader = ChannelReader::new(mock, Arc::new(RollupConfig::default())); - assert!(matches!(reader.next_batch().await.unwrap_err(), StageError::Temporary(_))); + assert!(matches!( + reader.next_batch().await.unwrap_err(), + PipelineErrorKind::Temporary(PipelineError::ChannelReaderEmpty) + )); assert!(reader.next_batch.is_none()); } @@ -256,7 +257,7 @@ mod test { let second = first.split_to(first.len() / 2); let mock = MockChannelReaderProvider::new(vec![Ok(Some(first)), Ok(Some(second))]); let mut reader = ChannelReader::new(mock, Arc::new(RollupConfig::default())); - assert_eq!(reader.next_batch().await, Err(StageError::NotEnoughData)); + assert_eq!(reader.next_batch().await, Err(PipelineError::NotEnoughData.temp())); assert!(reader.next_batch.is_none()); } diff --git a/crates/derive/src/stages/frame_queue.rs b/crates/derive/src/stages/frame_queue.rs index 067f2eebf..d7886e378 100644 --- a/crates/derive/src/stages/frame_queue.rs +++ b/crates/derive/src/stages/frame_queue.rs @@ -1,13 +1,12 @@ //! This module contains the [FrameQueue] stage of the derivation pipeline. use crate::{ - errors::{into_frames, StageError, StageResult}, + errors::{PipelineError, PipelineResult}, stages::ChannelBankProvider, traits::{OriginAdvancer, OriginProvider, ResettableStage}, }; use alloc::{boxed::Box, collections::VecDeque}; use alloy_primitives::Bytes; -use anyhow::anyhow; use async_trait::async_trait; use core::fmt::Debug; use op_alloy_genesis::SystemConfig; @@ -23,7 +22,7 @@ pub trait FrameQueueProvider { /// Retrieves the next data item from the L1 retrieval stage. /// If there is data, it pushes it into the next stage. /// If there is no data, it returns an error. - async fn next_data(&mut self) -> StageResult; + async fn next_data(&mut self) -> PipelineResult; } /// The [FrameQueue] stage of the derivation pipeline. @@ -59,7 +58,7 @@ impl

OriginAdvancer for FrameQueue

where P: FrameQueueProvider + OriginAdvancer + OriginProvider + ResettableStage + Send + Debug, { - async fn advance_origin(&mut self) -> StageResult<()> { + async fn advance_origin(&mut self) -> PipelineResult<()> { self.prev.advance_origin().await } } @@ -69,11 +68,11 @@ impl

ChannelBankProvider for FrameQueue

where P: FrameQueueProvider + OriginAdvancer + OriginProvider + ResettableStage + Send + Debug, { - async fn next_frame(&mut self) -> StageResult { + async fn next_frame(&mut self) -> PipelineResult { if self.queue.is_empty() { match self.prev.next_data().await { Ok(data) => { - if let Ok(frames) = into_frames(Ok(data)) { + if let Ok(frames) = Frame::parse_frames(&data.into()) { crate::inc!(DERIVED_FRAMES_COUNT, frames.len() as f64, &["success"]); self.queue.extend(frames); } else { @@ -93,10 +92,10 @@ where // If we did not add more frames but still have more data, retry this function. if self.queue.is_empty() { trace!(target: "frame-queue", "Queue is empty after fetching data. Retrying next_frame."); - return Err(StageError::NotEnoughData); + return Err(PipelineError::NotEnoughData.temp()); } - self.queue.pop_front().ok_or_else(|| anyhow!("Frame queue is impossibly empty.").into()) + Ok(self.queue.pop_front().expect("Frame queue impossibly empty")) } } @@ -118,7 +117,7 @@ where &mut self, block_info: BlockInfo, system_config: &SystemConfig, - ) -> StageResult<()> { + ) -> PipelineResult<()> { self.prev.reset(block_info, system_config).await?; self.queue = VecDeque::default(); crate::inc!(STAGE_RESETS, &["frame-queue"]); @@ -160,16 +159,16 @@ pub(crate) mod tests { let mock = MockFrameQueueProvider { data }; let mut frame_queue = FrameQueue::new(mock); let err = frame_queue.next_frame().await.unwrap_err(); - assert_eq!(err, StageError::NotEnoughData); + assert_eq!(err, PipelineError::NotEnoughData.temp()); } #[tokio::test] async fn test_frame_queue_no_frames_decoded() { - let data = vec![Err(StageError::Eof), Ok(Bytes::default())]; + let data = vec![Err(PipelineError::Eof.temp()), Ok(Bytes::default())]; let mock = MockFrameQueueProvider { data }; let mut frame_queue = FrameQueue::new(mock); let err = frame_queue.next_frame().await.unwrap_err(); - assert_eq!(err, StageError::NotEnoughData); + assert_eq!(err, PipelineError::NotEnoughData.temp()); } #[tokio::test] @@ -178,7 +177,7 @@ pub(crate) mod tests { let mock = MockFrameQueueProvider { data }; let mut frame_queue = FrameQueue::new(mock); let err = frame_queue.next_frame().await.unwrap_err(); - assert_eq!(err, StageError::NotEnoughData); + assert_eq!(err, PipelineError::NotEnoughData.temp()); } #[tokio::test] @@ -187,7 +186,7 @@ pub(crate) mod tests { let mock = MockFrameQueueProvider { data }; let mut frame_queue = FrameQueue::new(mock); let err = frame_queue.next_frame().await.unwrap_err(); - assert_eq!(err, StageError::NotEnoughData); + assert_eq!(err, PipelineError::NotEnoughData.temp()); } #[tokio::test] @@ -199,7 +198,7 @@ pub(crate) mod tests { let frame = new_test_frames(1); assert_eq!(frame[0], frame_decoded); let err = frame_queue.next_frame().await.unwrap_err(); - assert_eq!(err, StageError::Eof); + assert_eq!(err, PipelineError::Eof.temp()); } #[tokio::test] @@ -212,6 +211,6 @@ pub(crate) mod tests { assert_eq!(frame_decoded.number, i); } let err = frame_queue.next_frame().await.unwrap_err(); - assert_eq!(err, StageError::Eof); + assert_eq!(err, PipelineError::Eof.temp()); } } diff --git a/crates/derive/src/stages/l1_retrieval.rs b/crates/derive/src/stages/l1_retrieval.rs index f98e1fa31..e25ec64fe 100644 --- a/crates/derive/src/stages/l1_retrieval.rs +++ b/crates/derive/src/stages/l1_retrieval.rs @@ -1,7 +1,7 @@ //! Contains the [L1Retrieval] stage of the derivation pipeline. use crate::{ - errors::{StageError, StageResult}, + errors::{PipelineError, PipelineErrorKind, PipelineResult}, stages::FrameQueueProvider, traits::{ AsyncIterator, DataAvailabilityProvider, OriginAdvancer, OriginProvider, ResettableStage, @@ -9,7 +9,6 @@ use crate::{ }; use alloc::boxed::Box; use alloy_primitives::Address; -use anyhow::anyhow; use async_trait::async_trait; use op_alloy_genesis::SystemConfig; use op_alloy_protocol::BlockInfo; @@ -21,10 +20,10 @@ pub trait L1RetrievalProvider { /// Returns the next L1 [BlockInfo] in the [L1Traversal] stage, if the stage is not complete. /// This function can only be called once while the stage is in progress, and will return /// [`None`] on subsequent calls unless the stage is reset or complete. If the stage is - /// complete and the [BlockInfo] has been consumed, an [StageError::Eof] error is returned. + /// complete and the [BlockInfo] has been consumed, an [PipelineError::Eof] error is returned. /// /// [L1Traversal]: crate::stages::L1Traversal - async fn next_l1_block(&mut self) -> StageResult>; + async fn next_l1_block(&mut self) -> PipelineResult>; /// Returns the batcher [Address] from the [op_alloy_genesis::SystemConfig]. fn batcher_addr(&self) -> Address; @@ -73,7 +72,7 @@ where DAP: DataAvailabilityProvider + Send, P: L1RetrievalProvider + OriginAdvancer + OriginProvider + ResettableStage + Send, { - async fn advance_origin(&mut self) -> StageResult<()> { + async fn advance_origin(&mut self) -> PipelineResult<()> { self.prev.advance_origin().await } } @@ -86,23 +85,24 @@ where { type Item = DAP::Item; - async fn next_data(&mut self) -> StageResult { + async fn next_data(&mut self) -> PipelineResult { if self.data.is_none() { let next = self .prev .next_l1_block() .await? // SAFETY: This question mark bubbles up the Eof error. - .ok_or_else(|| anyhow!("No block to retrieve data from"))?; + .ok_or(PipelineError::MissingL1Data.temp())?; self.data = Some(self.provider.open_data(&next).await?); } match self.data.as_mut().expect("Cannot be None").next().await { Ok(data) => Ok(data), - Err(StageError::Eof) => { - self.data = None; - Err(StageError::Eof) + Err(e) => { + if let PipelineErrorKind::Temporary(PipelineError::Eof) = e { + self.data = None; + } + Err(e) } - Err(e) => Err(e), } } } @@ -123,7 +123,7 @@ where DAP: DataAvailabilityProvider + Send, P: L1RetrievalProvider + OriginAdvancer + OriginProvider + ResettableStage + Send, { - async fn reset(&mut self, base: BlockInfo, cfg: &SystemConfig) -> StageResult<()> { + async fn reset(&mut self, base: BlockInfo, cfg: &SystemConfig) -> PipelineResult<()> { self.prev.reset(base, cfg).await?; self.data = Some(self.provider.open_data(&base).await?); crate::inc!(STAGE_RESETS, &["l1-retrieval"]); @@ -153,7 +153,7 @@ mod tests { #[tokio::test] async fn test_l1_retrieval_next_data() { let traversal = new_populated_test_traversal(); - let results = vec![Err(StageError::Eof), Ok(Bytes::default())]; + let results = vec![Err(PipelineError::Eof.temp()), Ok(Bytes::default())]; let dap = TestDAP { results, batch_inbox_address: Address::default() }; let mut retrieval = L1Retrieval::new(traversal, dap); assert_eq!(retrieval.data, None); @@ -166,7 +166,7 @@ mod tests { assert_eq!(retrieval_data.open_data_calls[0].1, Address::default()); // Data should be reset to none and the error should be bubbled up. let data = retrieval.next_data().await.unwrap_err(); - assert_eq!(data, StageError::Eof); + assert_eq!(data, PipelineError::Eof.temp()); assert!(retrieval.data.is_none()); } @@ -193,13 +193,13 @@ mod tests { async fn test_l1_retrieval_existing_data_errors() { let data = TestIter { open_data_calls: vec![(BlockInfo::default(), Address::default())], - results: vec![Err(StageError::Eof)], + results: vec![Err(PipelineError::Eof.temp())], }; let traversal = new_populated_test_traversal(); let dap = TestDAP { results: vec![], batch_inbox_address: Address::default() }; let mut retrieval = L1Retrieval { prev: traversal, provider: dap, data: Some(data) }; let data = retrieval.next_data().await.unwrap_err(); - assert_eq!(data, StageError::Eof); + assert_eq!(data, PipelineError::Eof.temp()); assert!(retrieval.data.is_none()); } } diff --git a/crates/derive/src/stages/l1_traversal.rs b/crates/derive/src/stages/l1_traversal.rs index 92d17aa2c..2f0a6fab6 100644 --- a/crates/derive/src/stages/l1_traversal.rs +++ b/crates/derive/src/stages/l1_traversal.rs @@ -1,11 +1,11 @@ //! Contains the [L1Traversal] stage of the derivation pipeline. use crate::{ - errors::{StageError, StageResult}, + errors::{PipelineError, PipelineResult, ResetError}, stages::L1RetrievalProvider, traits::{ChainProvider, OriginAdvancer, OriginProvider, ResettableStage}, }; -use alloc::{boxed::Box, sync::Arc}; +use alloc::{boxed::Box, string::ToString, sync::Arc}; use alloy_primitives::Address; use async_trait::async_trait; use op_alloy_genesis::{RollupConfig, SystemConfig}; @@ -39,12 +39,12 @@ impl L1RetrievalProvider for L1Traversal { self.system_config.batcher_address } - async fn next_l1_block(&mut self) -> StageResult> { + async fn next_l1_block(&mut self) -> PipelineResult> { if !self.done { self.done = true; Ok(self.block) } else { - Err(StageError::Eof) + Err(PipelineError::Eof.temp()) } } } @@ -73,30 +73,30 @@ impl OriginAdvancer for L1Traversal { /// Advances the internal state of the [L1Traversal] stage to the next L1 block. /// This function fetches the next L1 [BlockInfo] from the data source and updates the /// [SystemConfig] with the receipts from the block. - async fn advance_origin(&mut self) -> StageResult<()> { + async fn advance_origin(&mut self) -> PipelineResult<()> { // Pull the next block or return EOF. - // StageError::EOF has special handling further up the pipeline. + // PipelineError::EOF has special handling further up the pipeline. let block = match self.block { Some(block) => block, None => { warn!(target: "l1-traversal", "Missing current block, can't advance origin with no reference."); - return Err(StageError::Eof); + return Err(PipelineError::Eof.temp()); } }; let next_l1_origin = match self.data_source.block_info_by_number(block.number + 1).await { Ok(block) => block, - Err(e) => return Err(StageError::BlockInfoFetch(e)), + Err(e) => return Err(PipelineError::Provider(e.to_string()).temp()), }; // Check block hashes for reorgs. if block.hash != next_l1_origin.parent_hash { - return Err(StageError::ReorgDetected(block.hash, next_l1_origin.parent_hash)); + return Err(ResetError::ReorgDetected(block.hash, next_l1_origin.parent_hash).into()); } // Fetch receipts for the next l1 block and update the system config. let receipts = match self.data_source.receipts_by_hash(next_l1_origin.hash).await { Ok(receipts) => receipts, - Err(e) => return Err(StageError::ReceiptFetch(e)), + Err(e) => return Err(PipelineError::Provider(e.to_string()).temp()), }; if let Err(e) = self.system_config.update_with_receipts( @@ -104,7 +104,7 @@ impl OriginAdvancer for L1Traversal { &self.rollup_config, next_l1_origin.timestamp, ) { - return Err(StageError::SystemConfigUpdate(e)); + return Err(PipelineError::SystemConfigUpdate(e).crit()); } crate::set!(ORIGIN_GAUGE, next_l1_origin.number as i64); @@ -122,7 +122,7 @@ impl OriginProvider for L1Traversal { #[async_trait] impl ResettableStage for L1Traversal { - async fn reset(&mut self, base: BlockInfo, cfg: &SystemConfig) -> StageResult<()> { + async fn reset(&mut self, base: BlockInfo, cfg: &SystemConfig) -> PipelineResult<()> { self.block = Some(base); self.done = false; self.system_config = *cfg; @@ -135,6 +135,7 @@ impl ResettableStage for L1Traversal { pub(crate) mod tests { use super::*; use crate::{ + errors::PipelineErrorKind, params::{CONFIG_UPDATE_EVENT_VERSION_0, CONFIG_UPDATE_TOPIC}, traits::test_utils::TestChainProvider, }; @@ -204,7 +205,7 @@ pub(crate) mod tests { let receipts = new_receipts(); let mut traversal = new_test_traversal(blocks, receipts); assert_eq!(traversal.next_l1_block().await.unwrap(), Some(BlockInfo::default())); - assert_eq!(traversal.next_l1_block().await.unwrap_err(), StageError::Eof); + assert_eq!(traversal.next_l1_block().await.unwrap_err(), PipelineError::Eof.temp()); assert!(traversal.advance_origin().await.is_ok()); } @@ -213,8 +214,11 @@ pub(crate) mod tests { let blocks = vec![BlockInfo::default(), BlockInfo::default()]; let mut traversal = new_test_traversal(blocks, vec![]); assert_eq!(traversal.next_l1_block().await.unwrap(), Some(BlockInfo::default())); - assert_eq!(traversal.next_l1_block().await.unwrap_err(), StageError::Eof); - matches!(traversal.advance_origin().await.unwrap_err(), StageError::ReceiptFetch(_)); + assert_eq!(traversal.next_l1_block().await.unwrap_err(), PipelineError::Eof.temp()); + matches!( + traversal.advance_origin().await.unwrap_err(), + PipelineErrorKind::Temporary(PipelineError::Provider(_)) + ); } #[tokio::test] @@ -226,15 +230,18 @@ pub(crate) mod tests { let mut traversal = new_test_traversal(blocks, receipts); assert!(traversal.advance_origin().await.is_ok()); let err = traversal.advance_origin().await.unwrap_err(); - assert_eq!(err, StageError::ReorgDetected(block.hash, block.parent_hash)); + assert_eq!(err, ResetError::ReorgDetected(block.hash, block.parent_hash).into()); } #[tokio::test] async fn test_l1_traversal_missing_blocks() { let mut traversal = new_test_traversal(vec![], vec![]); assert_eq!(traversal.next_l1_block().await.unwrap(), Some(BlockInfo::default())); - assert_eq!(traversal.next_l1_block().await.unwrap_err(), StageError::Eof); - matches!(traversal.advance_origin().await.unwrap_err(), StageError::BlockInfoFetch(_)); + assert_eq!(traversal.next_l1_block().await.unwrap_err(), PipelineError::Eof.temp()); + matches!( + traversal.advance_origin().await.unwrap_err(), + PipelineErrorKind::Temporary(PipelineError::Provider(_)) + ); } #[tokio::test] @@ -250,7 +257,7 @@ pub(crate) mod tests { // Only the second block should fail since the second receipt // contains invalid logs that will error for a system config update. let err = traversal.advance_origin().await.unwrap_err(); - matches!(err, StageError::SystemConfigUpdate(_)); + matches!(err, PipelineErrorKind::Critical(PipelineError::SystemConfigUpdate(_))); } #[tokio::test] @@ -259,7 +266,7 @@ pub(crate) mod tests { let receipts = new_receipts(); let mut traversal = new_test_traversal(blocks, receipts); assert_eq!(traversal.next_l1_block().await.unwrap(), Some(BlockInfo::default())); - assert_eq!(traversal.next_l1_block().await.unwrap_err(), StageError::Eof); + assert_eq!(traversal.next_l1_block().await.unwrap_err(), PipelineError::Eof.temp()); assert!(traversal.advance_origin().await.is_ok()); let expected = address!("000000000000000000000000000000000000bEEF"); assert_eq!(traversal.system_config.batcher_address, expected); diff --git a/crates/derive/src/stages/test_utils/attributes_queue.rs b/crates/derive/src/stages/test_utils/attributes_queue.rs index 93729a816..6ad6fe07d 100644 --- a/crates/derive/src/stages/test_utils/attributes_queue.rs +++ b/crates/derive/src/stages/test_utils/attributes_queue.rs @@ -2,11 +2,11 @@ use crate::{ batch::SingleBatch, - errors::{BuilderError, StageError, StageResult}, + errors::{BuilderError, PipelineError, PipelineErrorKind, PipelineResult}, stages::attributes_queue::{AttributesBuilder, AttributesProvider}, traits::{OriginAdvancer, OriginProvider, ResettableStage}, }; -use alloc::{boxed::Box, vec::Vec}; +use alloc::{boxed::Box, string::ToString, vec::Vec}; use alloy_eips::BlockNumHash; use async_trait::async_trait; use op_alloy_genesis::SystemConfig; @@ -27,11 +27,13 @@ impl AttributesBuilder for MockAttributesBuilder { &mut self, _l2_parent: L2BlockInfo, _epoch: BlockNumHash, - ) -> Result { + ) -> PipelineResult { match self.attributes.pop() { Some(Ok(attrs)) => Ok(attrs), - Some(Err(err)) => Err(BuilderError::Custom(err)), - None => Err(BuilderError::Custom(anyhow::anyhow!("no attributes available"))), + Some(Err(err)) => { + Err(PipelineErrorKind::Temporary(BuilderError::Custom(err.to_string()).into())) + } + None => Err(PipelineErrorKind::Critical(BuilderError::AttributesUnavailable.into())), } } } @@ -42,7 +44,7 @@ pub struct MockAttributesProvider { /// The origin of the L1 block. origin: Option, /// A list of batches to return. - batches: Vec>, + batches: Vec>, } impl OriginProvider for MockAttributesProvider { @@ -53,22 +55,22 @@ impl OriginProvider for MockAttributesProvider { #[async_trait] impl OriginAdvancer for MockAttributesProvider { - async fn advance_origin(&mut self) -> StageResult<()> { + async fn advance_origin(&mut self) -> PipelineResult<()> { Ok(()) } } #[async_trait] impl ResettableStage for MockAttributesProvider { - async fn reset(&mut self, _base: BlockInfo, _cfg: &SystemConfig) -> StageResult<()> { + async fn reset(&mut self, _base: BlockInfo, _cfg: &SystemConfig) -> PipelineResult<()> { Ok(()) } } #[async_trait] impl AttributesProvider for MockAttributesProvider { - async fn next_batch(&mut self, _parent: L2BlockInfo) -> StageResult { - self.batches.pop().ok_or(StageError::Eof)? + async fn next_batch(&mut self, _parent: L2BlockInfo) -> PipelineResult { + self.batches.pop().ok_or(PipelineError::Eof.temp())? } fn is_last_in_span(&self) -> bool { @@ -79,7 +81,7 @@ impl AttributesProvider for MockAttributesProvider { /// Creates a new [`MockAttributesProvider`] with the given origin and batches. pub fn new_attributes_provider( origin: Option, - batches: Vec>, + batches: Vec>, ) -> MockAttributesProvider { MockAttributesProvider { origin, batches } } diff --git a/crates/derive/src/stages/test_utils/batch_queue.rs b/crates/derive/src/stages/test_utils/batch_queue.rs index 1baa39348..036c97e3c 100644 --- a/crates/derive/src/stages/test_utils/batch_queue.rs +++ b/crates/derive/src/stages/test_utils/batch_queue.rs @@ -2,7 +2,7 @@ use crate::{ batch::Batch, - errors::{StageError, StageResult}, + errors::{PipelineError, PipelineResult}, stages::batch_queue::BatchQueueProvider, traits::{OriginAdvancer, OriginProvider, ResettableStage}, }; @@ -17,12 +17,12 @@ pub struct MockBatchQueueProvider { /// The origin of the L1 block. pub origin: Option, /// A list of batches to return. - pub batches: Vec>, + pub batches: Vec>, } impl MockBatchQueueProvider { /// Creates a new [MockBatchQueueProvider] with the given origin and batches. - pub fn new(batches: Vec>) -> Self { + pub fn new(batches: Vec>) -> Self { Self { origin: Some(BlockInfo::default()), batches } } } @@ -35,21 +35,21 @@ impl OriginProvider for MockBatchQueueProvider { #[async_trait] impl BatchQueueProvider for MockBatchQueueProvider { - async fn next_batch(&mut self) -> StageResult { - self.batches.pop().ok_or(StageError::Eof)? + async fn next_batch(&mut self) -> PipelineResult { + self.batches.pop().ok_or(PipelineError::Eof.temp())? } } #[async_trait] impl OriginAdvancer for MockBatchQueueProvider { - async fn advance_origin(&mut self) -> StageResult<()> { + async fn advance_origin(&mut self) -> PipelineResult<()> { Ok(()) } } #[async_trait] impl ResettableStage for MockBatchQueueProvider { - async fn reset(&mut self, _base: BlockInfo, _cfg: &SystemConfig) -> StageResult<()> { + async fn reset(&mut self, _base: BlockInfo, _cfg: &SystemConfig) -> PipelineResult<()> { Ok(()) } } diff --git a/crates/derive/src/stages/test_utils/channel_bank.rs b/crates/derive/src/stages/test_utils/channel_bank.rs index 5da07b721..18efec24d 100644 --- a/crates/derive/src/stages/test_utils/channel_bank.rs +++ b/crates/derive/src/stages/test_utils/channel_bank.rs @@ -1,7 +1,7 @@ //! Mock testing utilities for the [ChannelBank] stage. use crate::{ - errors::{StageError, StageResult}, + errors::{PipelineError, PipelineResult}, stages::ChannelBankProvider, traits::{OriginAdvancer, OriginProvider, ResettableStage}, }; @@ -14,14 +14,14 @@ use op_alloy_protocol::{BlockInfo, Frame}; #[derive(Debug, Default)] pub struct MockChannelBankProvider { /// The data to return. - pub data: Vec>, + pub data: Vec>, /// The block info pub block_info: Option, } impl MockChannelBankProvider { /// Creates a new [MockChannelBankProvider] with the given data. - pub fn new(data: Vec>) -> Self { + pub fn new(data: Vec>) -> Self { Self { data, block_info: Some(BlockInfo::default()) } } } @@ -34,7 +34,7 @@ impl OriginProvider for MockChannelBankProvider { #[async_trait] impl OriginAdvancer for MockChannelBankProvider { - async fn advance_origin(&mut self) -> StageResult<()> { + async fn advance_origin(&mut self) -> PipelineResult<()> { self.block_info = self.block_info.map(|mut bi| { bi.number += 1; bi @@ -45,14 +45,14 @@ impl OriginAdvancer for MockChannelBankProvider { #[async_trait] impl ChannelBankProvider for MockChannelBankProvider { - async fn next_frame(&mut self) -> StageResult { - self.data.pop().unwrap_or(Err(StageError::Eof)) + async fn next_frame(&mut self) -> PipelineResult { + self.data.pop().unwrap_or(Err(PipelineError::Eof.temp())) } } #[async_trait] impl ResettableStage for MockChannelBankProvider { - async fn reset(&mut self, _base: BlockInfo, _cfg: &SystemConfig) -> StageResult<()> { + async fn reset(&mut self, _base: BlockInfo, _cfg: &SystemConfig) -> PipelineResult<()> { Ok(()) } } diff --git a/crates/derive/src/stages/test_utils/channel_reader.rs b/crates/derive/src/stages/test_utils/channel_reader.rs index ce1fa7e68..df7461d49 100644 --- a/crates/derive/src/stages/test_utils/channel_reader.rs +++ b/crates/derive/src/stages/test_utils/channel_reader.rs @@ -1,7 +1,7 @@ //! Test utilities for the [ChannelReader] stage. use crate::{ - errors::{StageError, StageResult}, + errors::{PipelineError, PipelineResult}, stages::ChannelReaderProvider, traits::{OriginAdvancer, OriginProvider, ResettableStage}, }; @@ -15,14 +15,14 @@ use op_alloy_protocol::BlockInfo; #[derive(Debug, Default)] pub struct MockChannelReaderProvider { /// The data to return. - pub data: Vec>>, + pub data: Vec>>, /// The origin block info pub block_info: Option, } impl MockChannelReaderProvider { /// Creates a new [MockChannelReaderProvider] with the given data. - pub fn new(data: Vec>>) -> Self { + pub fn new(data: Vec>>) -> Self { Self { data, block_info: Some(BlockInfo::default()) } } } @@ -35,21 +35,21 @@ impl OriginProvider for MockChannelReaderProvider { #[async_trait] impl OriginAdvancer for MockChannelReaderProvider { - async fn advance_origin(&mut self) -> StageResult<()> { + async fn advance_origin(&mut self) -> PipelineResult<()> { Ok(()) } } #[async_trait] impl ChannelReaderProvider for MockChannelReaderProvider { - async fn next_data(&mut self) -> StageResult> { - self.data.pop().unwrap_or(Err(StageError::Eof)) + async fn next_data(&mut self) -> PipelineResult> { + self.data.pop().unwrap_or(Err(PipelineError::Eof.temp())) } } #[async_trait] impl ResettableStage for MockChannelReaderProvider { - async fn reset(&mut self, _base: BlockInfo, _cfg: &SystemConfig) -> StageResult<()> { + async fn reset(&mut self, _base: BlockInfo, _cfg: &SystemConfig) -> PipelineResult<()> { Ok(()) } } diff --git a/crates/derive/src/stages/test_utils/frame_queue.rs b/crates/derive/src/stages/test_utils/frame_queue.rs index 6d8491003..f31f6dd74 100644 --- a/crates/derive/src/stages/test_utils/frame_queue.rs +++ b/crates/derive/src/stages/test_utils/frame_queue.rs @@ -1,7 +1,7 @@ //! Mock types for the [FrameQueue] stage. use crate::{ - errors::{StageError, StageResult}, + errors::{PipelineError, PipelineResult}, stages::FrameQueueProvider, traits::{OriginAdvancer, OriginProvider, ResettableStage}, }; @@ -15,12 +15,12 @@ use op_alloy_protocol::BlockInfo; #[derive(Debug, Default)] pub struct MockFrameQueueProvider { /// The data to return. - pub data: Vec>, + pub data: Vec>, } impl MockFrameQueueProvider { /// Creates a new [MockFrameQueueProvider] with the given data. - pub fn new(data: Vec>) -> Self { + pub fn new(data: Vec>) -> Self { Self { data } } } @@ -33,7 +33,7 @@ impl OriginProvider for MockFrameQueueProvider { #[async_trait] impl OriginAdvancer for MockFrameQueueProvider { - async fn advance_origin(&mut self) -> StageResult<()> { + async fn advance_origin(&mut self) -> PipelineResult<()> { Ok(()) } } @@ -42,14 +42,14 @@ impl OriginAdvancer for MockFrameQueueProvider { impl FrameQueueProvider for MockFrameQueueProvider { type Item = Bytes; - async fn next_data(&mut self) -> StageResult { - self.data.pop().unwrap_or(Err(StageError::Eof)) + async fn next_data(&mut self) -> PipelineResult { + self.data.pop().unwrap_or(Err(PipelineError::Eof.temp())) } } #[async_trait] impl ResettableStage for MockFrameQueueProvider { - async fn reset(&mut self, _base: BlockInfo, _cfg: &SystemConfig) -> StageResult<()> { + async fn reset(&mut self, _base: BlockInfo, _cfg: &SystemConfig) -> PipelineResult<()> { Ok(()) } } diff --git a/crates/derive/src/stages/test_utils/sys_config_fetcher.rs b/crates/derive/src/stages/test_utils/sys_config_fetcher.rs index 5829d442e..b11dd054a 100644 --- a/crates/derive/src/stages/test_utils/sys_config_fetcher.rs +++ b/crates/derive/src/stages/test_utils/sys_config_fetcher.rs @@ -30,6 +30,8 @@ impl MockSystemConfigL2Fetcher { #[async_trait] impl L2ChainProvider for MockSystemConfigL2Fetcher { + type Error = anyhow::Error; + async fn system_config_by_number( &mut self, number: u64, diff --git a/crates/derive/src/stages/utils.rs b/crates/derive/src/stages/utils.rs index ac549146d..7d23a271b 100644 --- a/crates/derive/src/stages/utils.rs +++ b/crates/derive/src/stages/utils.rs @@ -1,15 +1,14 @@ //! Stage Utilities -use crate::batch::FJORD_MAX_SPAN_BATCH_BYTES; +use crate::{batch::FJORD_MAX_SPAN_BATCH_BYTES, ensure, errors::BatchDecompressionError}; use alloc::{vec, vec::Vec}; use alloc_no_stdlib::*; -use anyhow::{ensure, Result}; use brotli::*; use core::ops; /// Decompresses the given bytes data using the Brotli decompressor implemented /// in the [`brotli`](https://crates.io/crates/brotli) crate. -pub fn decompress_brotli(data: &[u8]) -> Result> { +pub fn decompress_brotli(data: &[u8]) -> Result, BatchDecompressionError> { declare_stack_allocator_struct!(MemPool, 4096, stack); let mut u8_buffer = vec![0; 32 * 1024 * 1024].into_boxed_slice(); @@ -47,7 +46,10 @@ pub fn decompress_brotli(data: &[u8]) -> Result> { let old_len = output.len(); let new_len = old_len * 2; - ensure!(new_len as u64 <= FJORD_MAX_SPAN_BATCH_BYTES, "Output buffer too large"); + ensure!( + new_len as u64 <= FJORD_MAX_SPAN_BATCH_BYTES, + BatchDecompressionError::BatchTooLarge + ); output.resize(new_len, 0); available_out += old_len; diff --git a/crates/derive/src/traits/attributes.rs b/crates/derive/src/traits/attributes.rs index 8bf79ad09..1b6751556 100644 --- a/crates/derive/src/traits/attributes.rs +++ b/crates/derive/src/traits/attributes.rs @@ -5,7 +5,7 @@ use async_trait::async_trait; use op_alloy_protocol::L2BlockInfo; use op_alloy_rpc_types_engine::OptimismAttributesWithParent; -use crate::errors::StageResult; +use crate::errors::PipelineResult; /// [NextAttributes] defines the interface for pulling attributes from /// the top level `AttributesQueue` stage of the pipeline. @@ -15,5 +15,5 @@ pub trait NextAttributes { async fn next_attributes( &mut self, parent: L2BlockInfo, - ) -> StageResult; + ) -> PipelineResult; } diff --git a/crates/derive/src/traits/data_sources.rs b/crates/derive/src/traits/data_sources.rs index 40cc8e0fc..265324d2b 100644 --- a/crates/derive/src/traits/data_sources.rs +++ b/crates/derive/src/traits/data_sources.rs @@ -1,25 +1,27 @@ //! Contains traits that describe the functionality of various data sources used in the derivation //! pipeline's stages. +use crate::errors::PipelineResult; use alloc::{boxed::Box, fmt::Debug, vec::Vec}; use alloy_eips::eip4844::Blob; use alloy_primitives::Bytes; -use anyhow::Result; use async_trait::async_trait; +use core::fmt::Display; use kona_primitives::IndexedBlobHash; use op_alloy_protocol::BlockInfo; -use crate::errors::{BlobProviderError, StageResult}; - /// The BlobProvider trait specifies the functionality of a data source that can provide blobs. #[async_trait] pub trait BlobProvider { + /// The error type for the [BlobProvider]. + type Error: Display; + /// Fetches blobs for a given block ref and the blob hashes. async fn get_blobs( &mut self, block_ref: &BlockInfo, blob_hashes: &[IndexedBlobHash], - ) -> Result, BlobProviderError>; + ) -> Result, Self::Error>; } /// Describes the functionality of a data source that can provide data availability information. @@ -32,7 +34,7 @@ pub trait DataAvailabilityProvider { /// Returns the data availability for the block with the given hash, or an error if the block /// does not exist in the data source. - async fn open_data(&self, block_ref: &BlockInfo) -> Result; + async fn open_data(&self, block_ref: &BlockInfo) -> PipelineResult; } /// A simple asynchronous iterator trait. @@ -42,7 +44,7 @@ pub trait AsyncIterator { /// The item type of the iterator. type Item: Send + Sync + Debug + Into; - /// Returns the next item in the iterator, or [crate::errors::StageError::Eof] if the iterator - /// is exhausted. - async fn next(&mut self) -> StageResult; + /// Returns the next item in the iterator, or [crate::errors::PipelineError::Eof] if the + /// iterator is exhausted. + async fn next(&mut self) -> PipelineResult; } diff --git a/crates/derive/src/traits/pipeline.rs b/crates/derive/src/traits/pipeline.rs index 06012def1..3732d81eb 100644 --- a/crates/derive/src/traits/pipeline.rs +++ b/crates/derive/src/traits/pipeline.rs @@ -1,14 +1,13 @@ //! Defines the interface for the core derivation pipeline. +use super::OriginProvider; +use crate::errors::{PipelineErrorKind, PipelineResult}; use alloc::boxed::Box; use async_trait::async_trait; use core::iter::Iterator; use op_alloy_protocol::{BlockInfo, L2BlockInfo}; use op_alloy_rpc_types_engine::OptimismAttributesWithParent; -use super::OriginProvider; -use crate::errors::StageError; - /// A pipeline error. #[derive(Debug)] pub enum StepResult { @@ -17,9 +16,9 @@ pub enum StepResult { /// Origin was advanced. AdvancedOrigin, /// Origin advance failed. - OriginAdvanceErr(StageError), + OriginAdvanceErr(PipelineErrorKind), /// Step failed. - StepFailed(StageError), + StepFailed(PipelineErrorKind), } /// This trait defines the interface for interacting with the derivation pipeline. @@ -29,7 +28,7 @@ pub trait Pipeline: OriginProvider + Iterator Option<&OptimismAttributesWithParent>; /// Resets the pipeline on the next [Pipeline::step] call. - async fn reset(&mut self, l2_block_info: BlockInfo, origin: BlockInfo) -> anyhow::Result<()>; + async fn reset(&mut self, l2_block_info: BlockInfo, origin: BlockInfo) -> PipelineResult<()>; /// Attempts to progress the pipeline. async fn step(&mut self, cursor: L2BlockInfo) -> StepResult; diff --git a/crates/derive/src/traits/providers.rs b/crates/derive/src/traits/providers.rs index 98c265888..282067ee5 100644 --- a/crates/derive/src/traits/providers.rs +++ b/crates/derive/src/traits/providers.rs @@ -1,7 +1,8 @@ -use alloc::{boxed::Box, sync::Arc, vec::Vec}; +use core::fmt::Display; + +use alloc::{boxed::Box, string::ToString, sync::Arc, vec::Vec}; use alloy_consensus::{Header, Receipt, TxEnvelope}; use alloy_primitives::B256; -use anyhow::Result; use async_trait::async_trait; use kona_primitives::L2ExecutionPayloadEnvelope; use op_alloy_genesis::{RollupConfig, SystemConfig}; @@ -10,39 +11,48 @@ use op_alloy_protocol::{BlockInfo, L2BlockInfo}; /// Describes the functionality of a data source that can provide information from the blockchain. #[async_trait] pub trait ChainProvider { + /// The error type for the [ChainProvider]. + type Error: Display + ToString; + /// Fetch the L1 [Header] for the given [B256] hash. - async fn header_by_hash(&mut self, hash: B256) -> Result

; + async fn header_by_hash(&mut self, hash: B256) -> Result; /// Returns the block at the given number, or an error if the block does not exist in the data /// source. - async fn block_info_by_number(&mut self, number: u64) -> Result; + async fn block_info_by_number(&mut self, number: u64) -> Result; /// Returns all receipts in the block with the given hash, or an error if the block does not /// exist in the data source. - async fn receipts_by_hash(&mut self, hash: B256) -> Result>; + async fn receipts_by_hash(&mut self, hash: B256) -> Result, Self::Error>; /// Returns the [BlockInfo] and list of [TxEnvelope]s from the given block hash. async fn block_info_and_transactions_by_hash( &mut self, hash: B256, - ) -> Result<(BlockInfo, Vec)>; + ) -> Result<(BlockInfo, Vec), Self::Error>; } /// Describes the functionality of a data source that fetches safe blocks. #[async_trait] pub trait L2ChainProvider { + /// The error type for the [L2ChainProvider]. + type Error: Display + ToString; + /// Returns the L2 block info given a block number. /// Errors if the block does not exist. - async fn l2_block_info_by_number(&mut self, number: u64) -> Result; + async fn l2_block_info_by_number(&mut self, number: u64) -> Result; /// Returns an execution payload for a given number. /// Errors if the execution payload does not exist. - async fn payload_by_number(&mut self, number: u64) -> Result; + async fn payload_by_number( + &mut self, + number: u64, + ) -> Result; /// Returns the [SystemConfig] by L2 number. async fn system_config_by_number( &mut self, number: u64, rollup_config: Arc, - ) -> Result; + ) -> Result; } diff --git a/crates/derive/src/traits/stages.rs b/crates/derive/src/traits/stages.rs index 8055b12e8..f32ce2117 100644 --- a/crates/derive/src/traits/stages.rs +++ b/crates/derive/src/traits/stages.rs @@ -5,13 +5,13 @@ use async_trait::async_trait; use op_alloy_genesis::SystemConfig; use op_alloy_protocol::BlockInfo; -use crate::errors::StageResult; +use crate::errors::PipelineResult; /// Describes the functionality fo a resettable stage within the derivation pipeline. #[async_trait] pub trait ResettableStage { /// Resets the derivation stage to its initial state. - async fn reset(&mut self, base: BlockInfo, cfg: &SystemConfig) -> StageResult<()>; + async fn reset(&mut self, base: BlockInfo, cfg: &SystemConfig) -> PipelineResult<()>; } /// Provides a method for accessing the pipeline's current L1 origin. @@ -25,5 +25,5 @@ pub trait OriginProvider { pub trait OriginAdvancer { /// Advances the internal state of the lowest stage to the next l1 origin. /// This method is the equivalent of the reference implementation `advance_l1_block`. - async fn advance_origin(&mut self) -> StageResult<()>; + async fn advance_origin(&mut self) -> PipelineResult<()>; } diff --git a/crates/derive/src/traits/test_utils.rs b/crates/derive/src/traits/test_utils.rs index e16fbf503..2ed33b35d 100644 --- a/crates/derive/src/traits/test_utils.rs +++ b/crates/derive/src/traits/test_utils.rs @@ -1,5 +1,11 @@ //! Test Utilities for derive traits +use crate::{ + errors::{BlobProviderError, PipelineError, PipelineResult}, + traits::{ + AsyncIterator, BlobProvider, ChainProvider, DataAvailabilityProvider, L2ChainProvider, + }, +}; use alloc::{boxed::Box, sync::Arc, vec, vec::Vec}; use alloy_consensus::{Header, Receipt, TxEnvelope}; use alloy_eips::eip4844::Blob; @@ -12,28 +18,21 @@ use kona_primitives::{IndexedBlobHash, L2ExecutionPayloadEnvelope}; use op_alloy_genesis::{RollupConfig, SystemConfig}; use op_alloy_protocol::{BlockInfo, L2BlockInfo}; -use crate::{ - errors::{BlobProviderError, StageError, StageResult}, - traits::{ - AsyncIterator, BlobProvider, ChainProvider, DataAvailabilityProvider, L2ChainProvider, - }, -}; - /// Mock data iterator #[derive(Debug, Default, PartialEq)] pub struct TestIter { /// Holds open data calls with args for assertions. pub(crate) open_data_calls: Vec<(BlockInfo, Address)>, /// A queue of results to return as the next iterated data. - pub(crate) results: Vec>, + pub(crate) results: Vec>, } #[async_trait] impl AsyncIterator for TestIter { type Item = Bytes; - async fn next(&mut self) -> StageResult { - self.results.pop().unwrap_or_else(|| Err(StageError::Eof)) + async fn next(&mut self) -> PipelineResult { + self.results.pop().unwrap_or(Err(PipelineError::Eof.temp())) } } @@ -43,7 +42,7 @@ pub struct TestDAP { /// The batch inbox address. pub batch_inbox_address: Address, /// Specifies the stage results the test iter returns as data. - pub(crate) results: Vec>, + pub(crate) results: Vec>, } #[async_trait] @@ -51,16 +50,16 @@ impl DataAvailabilityProvider for TestDAP { type Item = Bytes; type DataIter = TestIter; - async fn open_data(&self, block_ref: &BlockInfo) -> Result { + async fn open_data(&self, block_ref: &BlockInfo) -> PipelineResult { // Construct a new vec of results to return. let results = self .results .iter() .map(|i| match i { Ok(r) => Ok(r.clone()), - Err(_) => Err(StageError::Eof), + Err(_) => Err(PipelineError::Eof.temp()), }) - .collect::>>(); + .collect::>>(); Ok(TestIter { open_data_calls: vec![(*block_ref, self.batch_inbox_address)], results }) } } @@ -130,6 +129,8 @@ impl TestChainProvider { #[async_trait] impl ChainProvider for TestChainProvider { + type Error = anyhow::Error; + async fn header_by_hash(&mut self, hash: B256) -> Result
{ if let Some((_, header)) = self.headers.iter().find(|(_, b)| b.hash_slow() == hash) { Ok(header.clone()) @@ -195,11 +196,13 @@ impl TestBlobProvider { #[async_trait] impl BlobProvider for TestBlobProvider { + type Error = BlobProviderError; + async fn get_blobs( &mut self, _block_ref: &BlockInfo, blob_hashes: &[IndexedBlobHash], - ) -> Result, BlobProviderError> { + ) -> Result, Self::Error> { let mut blobs = Vec::new(); for blob_hash in blob_hashes { if let Some(data) = self.blobs.get(&blob_hash.hash) { @@ -236,6 +239,8 @@ impl TestL2ChainProvider { #[async_trait] impl L2ChainProvider for TestL2ChainProvider { + type Error = anyhow::Error; + async fn l2_block_info_by_number(&mut self, number: u64) -> Result { if self.short_circuit { return self.blocks.first().copied().ok_or_else(|| anyhow::anyhow!("Block not found")); diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index 1277afdaf..e86406fd4 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -11,6 +11,7 @@ homepage.workspace = true [dependencies] # General anyhow.workspace = true +thiserror.workspace = true # Alloy alloy-eips.workspace = true diff --git a/crates/primitives/src/blob.rs b/crates/primitives/src/blob.rs index 089c8e9a4..20c3b4415 100644 --- a/crates/primitives/src/blob.rs +++ b/crates/primitives/src/blob.rs @@ -3,7 +3,7 @@ use alloc::vec; use alloy_eips::eip4844::{Blob, BYTES_PER_BLOB, VERSIONED_HASH_VERSION_KZG}; use alloy_primitives::{Bytes, B256}; -use anyhow::Result; +use thiserror::Error; /// The blob encoding version pub(crate) const BLOB_ENCODING_VERSION: u8 = 0; @@ -31,15 +31,19 @@ impl PartialEq for IndexedBlobHash { } /// Blob Decuding Error -#[derive(Debug)] +#[derive(Error, Debug, PartialEq, Eq)] pub enum BlobDecodingError { /// Invalid field element + #[error("Invalid field element")] InvalidFieldElement, /// Invalid encoding version + #[error("Invalid encoding version")] InvalidEncodingVersion, /// Invalid length + #[error("Invalid length")] InvalidLength, /// Missing Data + #[error("Missing data")] MissingData, } @@ -171,18 +175,18 @@ impl BlobData { /// Fills in the pointers to the fetched blob bodies. /// There should be exactly one placeholder blobOrCalldata /// element for each blob, otherwise an error is returned. - pub fn fill(&mut self, blobs: &[Blob], index: usize) -> Result<()> { + pub fn fill(&mut self, blobs: &[Blob], index: usize) -> Result<(), BlobDecodingError> { // Do not fill if there is no calldata to fill if self.calldata.as_ref().map_or(false, |data| data.is_empty()) { return Ok(()); } if index >= blobs.len() { - return Err(anyhow::anyhow!("Insufficient blob count")); + return Err(BlobDecodingError::InvalidLength); } if blobs[index].is_empty() { - return Err(anyhow::anyhow!("Empty blob")); + return Err(BlobDecodingError::MissingData); } self.data = Some(Bytes::from(blobs[index])); diff --git a/crates/primitives/src/payload.rs b/crates/primitives/src/payload.rs index 843d54ed3..2b2aab8b3 100644 --- a/crates/primitives/src/payload.rs +++ b/crates/primitives/src/payload.rs @@ -7,12 +7,13 @@ use alloy_eips::{ }; use alloy_primitives::{Address, Bloom, Bytes, B256}; use alloy_rlp::Encodable; -use anyhow::Result; use op_alloy_consensus::{OpTxEnvelope, OpTxType}; use op_alloy_genesis::{RollupConfig, SystemConfig}; use op_alloy_protocol::{ - BlockInfo, L1BlockInfoBedrock, L1BlockInfoEcotone, L1BlockInfoTx, L2BlockInfo, + block_info::DecodeError, BlockInfo, L1BlockInfoBedrock, L1BlockInfoEcotone, L1BlockInfoTx, + L2BlockInfo, }; +use thiserror::Error; /// Fixed and variable memory costs for a payload. /// ~1000 bytes per payload, with some margin for overhead like map data. @@ -122,40 +123,65 @@ pub struct L2ExecutionPayload { pub excess_blob_gas: Option, } +/// An error encountered during [L2ExecutionPayloadEnvelope] conversion. +#[derive(Error, Debug)] +pub enum PayloadConversionError { + /// Invalid genesis hash. + #[error("Invalid genesis hash. Expected {0}, got {1}")] + InvalidGenesisHash(B256, B256), + /// Invalid transaction type. + #[error("First payload transaction has unexpected type: {0}")] + InvalidTxType(u8), + /// L1 Info error + #[error(transparent)] + L1InfoError(#[from] DecodeError), + /// EIP 2718 RLP error + #[error("EIP 2718 RLP error: {0}")] + Eip2718Error(alloy_eips::eip2718::Eip2718Error), + /// Missing system config in genesis block. + #[error("Missing system config in genesis block")] + MissingSystemConfigGenesis, + /// Empty transactions. + #[error("Empty transactions in payload. Block hash: {0}")] + EmptyTransactions(B256), +} + impl L2ExecutionPayloadEnvelope { /// Converts the [L2ExecutionPayloadEnvelope] to an [L2BlockInfo], by checking against the L1 /// information transaction or the genesis block. - pub fn to_l2_block_ref(&self, rollup_config: &RollupConfig) -> Result { + pub fn to_l2_block_ref( + &self, + rollup_config: &RollupConfig, + ) -> Result { let L2ExecutionPayloadEnvelope { execution_payload, .. } = self; let (l1_origin, sequence_number) = if execution_payload.block_number == rollup_config.genesis.l2.number { if execution_payload.block_hash != rollup_config.genesis.l2.hash { - anyhow::bail!("Invalid genesis hash"); + return Err(PayloadConversionError::InvalidGenesisHash( + rollup_config.genesis.l2.hash, + execution_payload.block_hash, + )); } (rollup_config.genesis.l1, 0) } else { if execution_payload.transactions.is_empty() { - anyhow::bail!( - "L2 block is missing L1 info deposit transaction, block hash: {}", - execution_payload.block_hash - ); + return Err(PayloadConversionError::EmptyTransactions(execution_payload.block_hash)); } let ty = execution_payload.transactions[0][0]; if ty != OpTxType::Deposit as u8 { - anyhow::bail!("First payload transaction has unexpected type: {:?}", ty); + return Err(PayloadConversionError::InvalidTxType(ty)); } let tx = OpTxEnvelope::decode_2718(&mut execution_payload.transactions[0].as_ref()) - .map_err(|e| anyhow::anyhow!(e))?; + .map_err(PayloadConversionError::Eip2718Error)?; let OpTxEnvelope::Deposit(tx) = tx else { - anyhow::bail!("First payload transaction has unexpected type: {:?}", tx.tx_type()); + return Err(PayloadConversionError::InvalidTxType(tx.tx_type() as u8)); }; - let l1_info = L1BlockInfoTx::decode_calldata(tx.input.as_ref()) - .map_err(|e| anyhow::anyhow!(e))?; + let l1_info = L1BlockInfoTx::decode_calldata(tx.input.as_ref())?; (l1_info.id(), l1_info.sequence_number()) }; @@ -172,38 +198,40 @@ impl L2ExecutionPayloadEnvelope { } /// Converts the [L2ExecutionPayloadEnvelope] to a partial [SystemConfig]. - pub fn to_system_config(&self, rollup_config: &RollupConfig) -> Result { + pub fn to_system_config( + &self, + rollup_config: &RollupConfig, + ) -> Result { let L2ExecutionPayloadEnvelope { execution_payload, .. } = self; if execution_payload.block_number == rollup_config.genesis.l2.number { if execution_payload.block_hash != rollup_config.genesis.l2.hash { - anyhow::bail!("Invalid genesis hash"); + return Err(PayloadConversionError::InvalidGenesisHash( + rollup_config.genesis.l2.hash, + execution_payload.block_hash, + )); } return rollup_config .genesis .system_config - .ok_or_else(|| anyhow::anyhow!("Missing system config in genesis block")); + .ok_or(PayloadConversionError::MissingSystemConfigGenesis); } if execution_payload.transactions.is_empty() { - anyhow::bail!( - "L2 block is missing L1 info deposit transaction, block hash: {}", - execution_payload.block_hash - ); + return Err(PayloadConversionError::EmptyTransactions(execution_payload.block_hash)); } let ty = execution_payload.transactions[0][0]; if ty != OpTxType::Deposit as u8 { - anyhow::bail!("First payload transaction has unexpected type: {:?}", ty); + return Err(PayloadConversionError::InvalidTxType(ty)); } let tx = OpTxEnvelope::decode_2718(&mut execution_payload.transactions[0].as_ref()) - .map_err(|e| anyhow::anyhow!(e))?; + .map_err(PayloadConversionError::Eip2718Error)?; let OpTxEnvelope::Deposit(tx) = tx else { - anyhow::bail!("First payload transaction has unexpected type: {:?}", tx.tx_type()); + return Err(PayloadConversionError::InvalidTxType(tx.tx_type() as u8)); }; - let l1_info = - L1BlockInfoTx::decode_calldata(tx.input.as_ref()).map_err(|e| anyhow::anyhow!(e))?; + let l1_info = L1BlockInfoTx::decode_calldata(tx.input.as_ref())?; let l1_fee_scalar = match l1_info { L1BlockInfoTx::Bedrock(L1BlockInfoBedrock { l1_fee_scalar, .. }) => l1_fee_scalar, L1BlockInfoTx::Ecotone(L1BlockInfoEcotone { diff --git a/examples/trusted-sync/src/main.rs b/examples/trusted-sync/src/main.rs index fb4a8756d..da226785c 100644 --- a/examples/trusted-sync/src/main.rs +++ b/examples/trusted-sync/src/main.rs @@ -1,6 +1,9 @@ use anyhow::Result; use clap::Parser; -use kona_derive::{errors::StageError, online::*}; +use kona_derive::{ + errors::{PipelineError, PipelineErrorKind}, + online::*, +}; use std::sync::Arc; use superchain::ROLLUP_CONFIGS; use tracing::{debug, error, info, trace, warn}; @@ -216,9 +219,21 @@ async fn sync(cli: cli::Cli) -> Result<()> { warn!(target: "loop", "Could not advance origin: {:?}", e); } StepResult::StepFailed(e) => match e { - StageError::NotEnoughData => { - metrics::PIPELINE_STEPS.with_label_values(&["not_enough_data"]).inc(); - debug!(target: "loop", "Not enough data to step derivation pipeline"); + PipelineErrorKind::Temporary(e) => { + if matches!(e, PipelineError::NotEnoughData) { + metrics::PIPELINE_STEPS.with_label_values(&["not_enough_data"]).inc(); + debug!(target: "loop", "Not enough data to step derivation pipeline"); + } + } + PipelineErrorKind::Reset(_) => { + metrics::PIPELINE_STEPS.with_label_values(&["reset"]).inc(); + warn!(target: "loop", "Resetting pipeline: {:?}", e); + pipeline + .reset( + cursor.block_info, + pipeline.origin().ok_or(anyhow::anyhow!("Missing origin"))?, + ) + .await?; } _ => { metrics::PIPELINE_STEPS.with_label_values(&["failure"]).inc();