From a0de2ac8a1f60f003eadf6f721fefb8b8ea092e7 Mon Sep 17 00:00:00 2001 From: clabby Date: Fri, 19 Apr 2024 12:56:41 -0400 Subject: [PATCH 1/4] fix(derive): Span batch bitlist encoding (#122) ## Overview Fixes the spanbatch bitlist encoding by following the spec of Golang's `big.Int`. Internally, this type uses a big-endian, zero-padded array of u8s, and when the number grows, it extends itself left in memory. **Metadata** closes https://github.com/ethereum-optimism/kona/issues/112 --- .../derive/src/types/batch/span_batch/bits.rs | 117 ++++++++++++++---- 1 file changed, 91 insertions(+), 26 deletions(-) diff --git a/crates/derive/src/types/batch/span_batch/bits.rs b/crates/derive/src/types/batch/span_batch/bits.rs index 7a986afaf..32cf8fe3d 100644 --- a/crates/derive/src/types/batch/span_batch/bits.rs +++ b/crates/derive/src/types/batch/span_batch/bits.rs @@ -4,6 +4,7 @@ use crate::types::{SpanBatchError, MAX_SPAN_BATCH_SIZE}; use alloc::{vec, vec::Vec}; use alloy_rlp::Buf; use anyhow::Result; +use core::cmp::Ordering; /// Type for span batch bits. #[derive(Debug, Default, Clone, PartialEq, Eq)] @@ -47,12 +48,11 @@ impl SpanBatchBits { b.advance(buffer_len); v }; - let sb_bits = SpanBatchBits(bits.to_vec()); + let sb_bits = SpanBatchBits(bits); - // TODO(clabby): Why doesn't this check work? - // if sb_bits.bit_len() > bit_length { - // return Err(SpanBatchError::BitfieldTooLong); - // } + if sb_bits.bit_len() > bit_length { + return Err(SpanBatchError::BitfieldTooLong); + } Ok(sb_bits) } @@ -65,10 +65,9 @@ impl SpanBatchBits { bit_length: usize, bits: &SpanBatchBits, ) -> Result<(), SpanBatchError> { - // TODO(clabby): Why doesn't this check work? - // if bits.bit_len() > bit_length { - // return Err(SpanBatchError::BitfieldTooLong); - // } + if bits.bit_len() > bit_length { + return Err(SpanBatchError::BitfieldTooLong); + } // Round up, ensure enough bytes when number of bits is not a multiple of 8. // Alternative of (L+7)/8 is not overflow-safe. @@ -90,12 +89,12 @@ impl SpanBatchBits { // Check if the byte index is within the bounds of the bitlist if byte_index < self.0.len() { // Retrieve the specific byte that contains the bit we're interested in - let byte = self.0[byte_index]; + let byte = self.0[self.0.len() - byte_index - 1]; // Shift the bits of the byte to the right, based on the bit index, and // mask it with 1 to isolate the bit we're interested in. // If the result is not zero, the bit is set to 1, otherwise it's 0. - Some(if byte & (1 << (8 - bit_index)) != 0 { 1 } else { 0 }) + Some(if byte & (1 << bit_index) != 0 { 1 } else { 0 }) } else { // Return None if the index is out of bounds None @@ -110,34 +109,58 @@ impl SpanBatchBits { // Ensure the vector is large enough to contain the bit at 'index'. // If not, resize the vector, filling with 0s. if byte_index >= self.0.len() { - self.0.resize(byte_index + 1, 0); + Self::resize_from_right(&mut self.0, byte_index + 1); } // Retrieve the specific byte to modify - let byte = &mut self.0[byte_index]; + let len = self.0.len(); + let byte = &mut self.0[len - byte_index - 1]; if value { // Set the bit to 1 - *byte |= 1 << (8 - bit_index); + *byte |= 1 << bit_index; } else { // Set the bit to 0 - *byte &= !(1 << (8 - bit_index)); + *byte &= !(1 << bit_index); } } /// Calculates the bit length of the [SpanBatchBits] bitfield. pub fn bit_len(&self) -> usize { - if let Some((top_word, rest)) = self.0.split_last() { - // Calculate bit length. Rust's leading_zeros counts zeros from the MSB, so subtract - // from total bits. - let significant_bits = 8 - top_word.leading_zeros() as usize; - - // Return total bits, taking into account the full words in `rest` and the significant - // bits in `top`. - rest.len() * 8 + significant_bits - } else { - // If the slice is empty, return 0. - 0 + // Iterate over the bytes from left to right to find the first non-zero byte + for (i, &byte) in self.0.iter().enumerate() { + if byte != 0 { + // Calculate the index of the most significant bit in the byte + let msb_index = 7 - byte.leading_zeros() as usize; // 0-based index + + // Calculate the total bit length + let total_bit_length = msb_index + 1 + ((self.0.len() - i - 1) * 8); + return total_bit_length; + } + } + + // If all bytes are zero, the bitlist is considered to have a length of 0 + 0 + } + + /// Resizes an array from the right. Useful for big-endian zero extension. + fn resize_from_right(vec: &mut Vec, new_size: usize) { + let current_size = vec.len(); + match new_size.cmp(¤t_size) { + Ordering::Less => { + // Remove elements from the beginning. + let remove_count = current_size - new_size; + vec.drain(0..remove_count); + } + Ordering::Greater => { + // Calculate how many new elements to add. + let additional = new_size - current_size; + // Prepend new elements with default values. + let mut prepend_elements = vec![T::default(); additional]; + prepend_elements.append(vec); + *vec = prepend_elements; + } + Ordering::Equal => { /* If new_size == current_size, do nothing. */ } } } } @@ -156,6 +179,48 @@ mod test { SpanBatchBits::encode(&mut encoded, bits.0.len() * 8, &bits).unwrap(); assert_eq!(encoded, bits.0); } + + #[test] + fn test_span_bitlist_bitlen(index in 0usize..65536) { + let mut bits = SpanBatchBits::default(); + bits.set_bit(index, true); + assert_eq!(bits.0.len(), (index / 8) + 1); + assert_eq!(bits.bit_len(), index + 1); + } + + #[test] + fn test_span_bitlist_bitlen_shrink(first_index in 8usize..65536) { + let second_index = first_index.clamp(0, first_index - 8); + let mut bits = SpanBatchBits::default(); + + // Set and clear first index. + bits.set_bit(first_index, true); + assert_eq!(bits.0.len(), (first_index / 8) + 1); + assert_eq!(bits.bit_len(), first_index + 1); + bits.set_bit(first_index, false); + assert_eq!(bits.0.len(), (first_index / 8) + 1); + assert_eq!(bits.bit_len(), 0); + + // Set second bit. Even though the array is larger, as it was originally allocated with more words, + // the bitlength should still be lowered as the higher-order words are 0'd out. + bits.set_bit(second_index, true); + assert_eq!(bits.0.len(), (first_index / 8) + 1); + assert_eq!(bits.bit_len(), second_index + 1); + } + } + + #[test] + fn bitlist_big_endian_zero_extended() { + let mut bits = SpanBatchBits::default(); + + bits.set_bit(1, true); + bits.set_bit(6, true); + bits.set_bit(8, true); + bits.set_bit(15, true); + assert_eq!(bits.0[0], 0b1000_0001); + assert_eq!(bits.0[1], 0b0100_0010); + assert_eq!(bits.0.len(), 2); + assert_eq!(bits.bit_len(), 16); } #[test] From 0587b4df88432fb84d5c774677f06f423e548231 Mon Sep 17 00:00:00 2001 From: clabby Date: Fri, 19 Apr 2024 13:22:20 -0400 Subject: [PATCH 2/4] feat(derive): Use `L2ChainProvider` for system config fetching in attributes builder (#123) Reuses the `L2ChainProvider` for fetching the `SystemConfig` at a given block, rather than having an exlusive trait that extends the `L2ChainProvider`. Fix tests --- crates/derive/src/online/alloy_providers.rs | 21 ++- crates/derive/src/sources/factory.rs | 2 +- crates/derive/src/stages/attributes_queue.rs | 2 +- .../src/stages/attributes_queue/builder.rs | 125 ++++++++++-------- crates/derive/src/stages/mod.rs | 1 - .../stages/test_utils/sys_config_fetcher.rs | 40 ++++-- crates/derive/src/traits/data_sources.rs | 12 +- .../src/traits/test_utils/data_sources.rs | 26 +++- crates/derive/src/types/l1_block_info.rs | 20 ++- crates/derive/src/types/payload.rs | 59 ++++++++- crates/derive/src/types/rollup_config.rs | 5 - crates/derive/src/types/system_config.rs | 2 - 12 files changed, 226 insertions(+), 89 deletions(-) diff --git a/crates/derive/src/online/alloy_providers.rs b/crates/derive/src/online/alloy_providers.rs index 75e11b7b6..ce7b11ddc 100644 --- a/crates/derive/src/online/alloy_providers.rs +++ b/crates/derive/src/online/alloy_providers.rs @@ -3,7 +3,10 @@ use crate::{ traits::{ChainProvider, L2ChainProvider}, - types::{Block, BlockInfo, L2BlockInfo, L2ExecutionPayloadEnvelope, OpBlock, RollupConfig}, + types::{ + Block, BlockInfo, L2BlockInfo, L2ExecutionPayloadEnvelope, OpBlock, RollupConfig, + SystemConfig, + }, }; use alloc::{boxed::Box, sync::Arc, vec::Vec}; use alloy_consensus::{Header, Receipt, ReceiptWithBloom, TxEnvelope, TxType}; @@ -165,6 +168,8 @@ pub struct AlloyL2ChainProvider>> { payload_by_number_cache: LruCache, /// `l2_block_info_by_number` LRU cache. l2_block_info_by_number_cache: LruCache, + /// `system_config_by_l2_hash` LRU cache. + system_config_by_number_cache: LruCache, } impl>> AlloyL2ChainProvider { @@ -175,6 +180,7 @@ impl>> AlloyL2ChainProvider { rollup_config, payload_by_number_cache: LruCache::new(NonZeroUsize::new(CACHE_SIZE).unwrap()), l2_block_info_by_number_cache: LruCache::new(NonZeroUsize::new(CACHE_SIZE).unwrap()), + system_config_by_number_cache: LruCache::new(NonZeroUsize::new(CACHE_SIZE).unwrap()), } } } @@ -209,4 +215,17 @@ impl>> L2ChainProvider for AlloyL2ChainProvide self.payload_by_number_cache.put(number, payload_envelope.clone()); Ok(payload_envelope) } + + async fn system_config_by_number( + &mut self, + number: u64, + rollup_config: Arc, + ) -> Result { + if let Some(system_config) = self.system_config_by_number_cache.get(&number) { + return Ok(*system_config); + } + + let envelope = self.payload_by_number(number).await?; + envelope.to_system_config(&rollup_config) + } } diff --git a/crates/derive/src/sources/factory.rs b/crates/derive/src/sources/factory.rs index 231d42c2e..1d39d4b27 100644 --- a/crates/derive/src/sources/factory.rs +++ b/crates/derive/src/sources/factory.rs @@ -41,7 +41,7 @@ where blob_provider: blobs, ecotone_timestamp: cfg.ecotone_time, plasma_enabled: cfg.is_plasma_enabled(), - signer: cfg.l1_signer_address(), + signer: cfg.genesis.system_config.batcher_addr, } } } diff --git a/crates/derive/src/stages/attributes_queue.rs b/crates/derive/src/stages/attributes_queue.rs index fe549fae7..98383de83 100644 --- a/crates/derive/src/stages/attributes_queue.rs +++ b/crates/derive/src/stages/attributes_queue.rs @@ -16,7 +16,7 @@ mod deposits; pub(crate) use deposits::derive_deposits; mod builder; -pub use builder::{AttributesBuilder, StatefulAttributesBuilder, SystemConfigL2Fetcher}; +pub use builder::{AttributesBuilder, StatefulAttributesBuilder}; /// [AttributesProvider] is a trait abstraction that generalizes the [BatchQueue] stage. /// diff --git a/crates/derive/src/stages/attributes_queue/builder.rs b/crates/derive/src/stages/attributes_queue/builder.rs index ee0d9ac6e..4282671da 100644 --- a/crates/derive/src/stages/attributes_queue/builder.rs +++ b/crates/derive/src/stages/attributes_queue/builder.rs @@ -3,14 +3,13 @@ use super::derive_deposits; use crate::{ params::SEQUENCER_FEE_VAULT_ADDRESS, - traits::ChainProvider, + traits::{ChainProvider, L2ChainProvider}, types::{ BlockID, BuilderError, EcotoneTransactionBuilder, L1BlockInfoTx, L2BlockInfo, - L2PayloadAttributes, RawTransaction, RollupConfig, SystemConfig, + L2PayloadAttributes, RawTransaction, RollupConfig, }, }; use alloc::{boxed::Box, fmt::Debug, sync::Arc, vec, vec::Vec}; -use alloy_primitives::B256; use alloy_rlp::Encodable; use async_trait::async_trait; @@ -32,43 +31,37 @@ pub trait AttributesBuilder { ) -> Result; } -/// The [SystemConfigL2Fetcher] fetches the system config by L2 hash. -pub trait SystemConfigL2Fetcher { - /// Fetch the system config by L2 hash. - fn system_config_by_l2_hash(&self, hash: B256) -> anyhow::Result; -} - /// A stateful implementation of the [AttributesBuilder]. #[derive(Debug, Default)] -pub struct StatefulAttributesBuilder +pub struct StatefulAttributesBuilder where - S: SystemConfigL2Fetcher + Debug, - R: ChainProvider + Debug, + L1P: ChainProvider + Debug, + L2P: L2ChainProvider + Debug, { /// The rollup config. rollup_cfg: Arc, /// The system config fetcher. - config_fetcher: S, + config_fetcher: L2P, /// The L1 receipts fetcher. - receipts_fetcher: R, + receipts_fetcher: L1P, } -impl StatefulAttributesBuilder +impl StatefulAttributesBuilder where - S: SystemConfigL2Fetcher + Debug, - R: ChainProvider + Debug, + L1P: ChainProvider + Debug, + L2P: L2ChainProvider + Debug, { /// Create a new [StatefulAttributesBuilder] with the given epoch. - pub fn new(rcfg: Arc, cfg: S, receipts: R) -> Self { - Self { rollup_cfg: rcfg, config_fetcher: cfg, receipts_fetcher: receipts } + pub fn new(rcfg: Arc, sys_cfg_fetcher: L2P, receipts: L1P) -> Self { + Self { rollup_cfg: rcfg, config_fetcher: sys_cfg_fetcher, receipts_fetcher: receipts } } } #[async_trait] -impl AttributesBuilder for StatefulAttributesBuilder +impl AttributesBuilder for StatefulAttributesBuilder where - S: SystemConfigL2Fetcher + Send + Debug, - R: ChainProvider + Send + Debug, + L1P: ChainProvider + Debug + Send, + L2P: L2ChainProvider + Debug + Send, { async fn prepare_payload_attributes( &mut self, @@ -77,8 +70,10 @@ where ) -> Result { let l1_header; let deposit_transactions: Vec; - let mut sys_config = - self.config_fetcher.system_config_by_l2_hash(l2_parent.block_info.hash)?; + let mut sys_config = self + .config_fetcher + .system_config_by_number(l2_parent.block_info.number, self.rollup_cfg.clone()) + .await?; // If the L1 origin changed in this block, then we are in the first block of the epoch. // In this case we need to fetch all transaction receipts from the L1 origin block so @@ -177,27 +172,28 @@ where mod tests { use super::*; use crate::{ - stages::test_utils::MockSystemConfigL2Fetcher, traits::test_utils::TestChainProvider, - types::BlockInfo, + stages::test_utils::MockSystemConfigL2Fetcher, + traits::test_utils::TestChainProvider, + types::{BlockInfo, SystemConfig}, }; use alloy_consensus::Header; - use alloy_primitives::b256; + use alloy_primitives::B256; #[tokio::test] async fn test_prepare_payload_block_mismatch_epoch_reset() { let cfg = Arc::new(RollupConfig::default()); - let l2_hash = b256!("0000000000000000000000000000000000000000000000000000000000000002"); + let l2_number = 1; let mut fetcher = MockSystemConfigL2Fetcher::default(); - fetcher.insert(l2_hash, SystemConfig::default()); + fetcher.insert(l2_number, SystemConfig::default()); let mut provider = TestChainProvider::default(); let header = Header::default(); let hash = header.hash_slow(); provider.insert_header(hash, header); let mut builder = StatefulAttributesBuilder::new(cfg, fetcher, provider); - let epoch = BlockID { hash, number: 1 }; + let epoch = BlockID { hash, number: l2_number }; let l2_parent = L2BlockInfo { - block_info: BlockInfo { hash: l2_hash, number: 1, ..Default::default() }, - l1_origin: BlockID { hash: l2_hash, number: 2 }, + block_info: BlockInfo { hash: B256::ZERO, number: l2_number, ..Default::default() }, + l1_origin: BlockID { hash: B256::left_padding_from(&[0xFF]), number: 2 }, seq_num: 0, }; // This should error because the l2 parent's l1_origin.hash should equal the epoch header @@ -211,18 +207,18 @@ mod tests { #[tokio::test] async fn test_prepare_payload_block_mismatch() { let cfg = Arc::new(RollupConfig::default()); - let l2_hash = b256!("0000000000000000000000000000000000000000000000000000000000000002"); + let l2_number = 1; let mut fetcher = MockSystemConfigL2Fetcher::default(); - fetcher.insert(l2_hash, SystemConfig::default()); + fetcher.insert(l2_number, SystemConfig::default()); let mut provider = TestChainProvider::default(); let header = Header::default(); let hash = header.hash_slow(); provider.insert_header(hash, header); let mut builder = StatefulAttributesBuilder::new(cfg, fetcher, provider); - let epoch = BlockID { hash, number: 1 }; + let epoch = BlockID { hash, number: l2_number }; let l2_parent = L2BlockInfo { - block_info: BlockInfo { hash: l2_hash, number: 1, ..Default::default() }, - l1_origin: BlockID { hash: l2_hash, number: 1 }, + block_info: BlockInfo { hash: B256::ZERO, number: l2_number, ..Default::default() }, + l1_origin: BlockID { hash: B256::ZERO, number: l2_number }, seq_num: 0, }; // This should error because the l2 parent's l1_origin.hash should equal the epoch hash @@ -237,18 +233,18 @@ mod tests { let block_time = 10; let timestamp = 100; let cfg = Arc::new(RollupConfig { block_time, ..Default::default() }); - let l2_hash = b256!("0000000000000000000000000000000000000000000000000000000000000002"); + let l2_number = 1; let mut fetcher = MockSystemConfigL2Fetcher::default(); - fetcher.insert(l2_hash, SystemConfig::default()); + fetcher.insert(l2_number, SystemConfig::default()); let mut provider = TestChainProvider::default(); let header = Header { timestamp, ..Default::default() }; let hash = header.hash_slow(); provider.insert_header(hash, header); let mut builder = StatefulAttributesBuilder::new(cfg, fetcher, provider); - let epoch = BlockID { hash, number: 1 }; + let epoch = BlockID { hash, number: l2_number }; let l2_parent = L2BlockInfo { - block_info: BlockInfo { hash: l2_hash, number: 1, ..Default::default() }, - l1_origin: BlockID { hash, number: 1 }, + block_info: BlockInfo { hash: B256::ZERO, number: l2_number, ..Default::default() }, + l1_origin: BlockID { hash, number: l2_number }, seq_num: 0, }; let next_l2_time = l2_parent.block_info.timestamp + block_time; @@ -268,19 +264,24 @@ mod tests { let block_time = 10; let timestamp = 100; let cfg = Arc::new(RollupConfig { block_time, ..Default::default() }); - let l2_hash = b256!("0000000000000000000000000000000000000000000000000000000000000002"); + let l2_number = 1; let mut fetcher = MockSystemConfigL2Fetcher::default(); - fetcher.insert(l2_hash, SystemConfig::default()); + fetcher.insert(l2_number, SystemConfig::default()); let mut provider = TestChainProvider::default(); let header = Header { timestamp, ..Default::default() }; let prev_randao = header.mix_hash; let hash = header.hash_slow(); provider.insert_header(hash, header); let mut builder = StatefulAttributesBuilder::new(cfg, fetcher, provider); - let epoch = BlockID { hash, number: 1 }; + let epoch = BlockID { hash, number: l2_number }; let l2_parent = L2BlockInfo { - block_info: BlockInfo { hash: l2_hash, number: 1, timestamp, parent_hash: hash }, - l1_origin: BlockID { hash, number: 1 }, + block_info: BlockInfo { + hash: B256::ZERO, + number: l2_number, + timestamp, + parent_hash: hash, + }, + l1_origin: BlockID { hash, number: l2_number }, seq_num: 0, }; let next_l2_time = l2_parent.block_info.timestamp + block_time; @@ -306,19 +307,24 @@ mod tests { let block_time = 10; let timestamp = 100; let cfg = Arc::new(RollupConfig { block_time, canyon_time: Some(0), ..Default::default() }); - let l2_hash = b256!("0000000000000000000000000000000000000000000000000000000000000002"); + let l2_number = 1; let mut fetcher = MockSystemConfigL2Fetcher::default(); - fetcher.insert(l2_hash, SystemConfig::default()); + fetcher.insert(l2_number, SystemConfig::default()); let mut provider = TestChainProvider::default(); let header = Header { timestamp, ..Default::default() }; let prev_randao = header.mix_hash; let hash = header.hash_slow(); provider.insert_header(hash, header); let mut builder = StatefulAttributesBuilder::new(cfg, fetcher, provider); - let epoch = BlockID { hash, number: 1 }; + let epoch = BlockID { hash, number: l2_number }; let l2_parent = L2BlockInfo { - block_info: BlockInfo { hash: l2_hash, number: 1, timestamp, parent_hash: hash }, - l1_origin: BlockID { hash, number: 1 }, + block_info: BlockInfo { + hash: B256::ZERO, + number: l2_number, + timestamp, + parent_hash: hash, + }, + l1_origin: BlockID { hash, number: l2_number }, seq_num: 0, }; let next_l2_time = l2_parent.block_info.timestamp + block_time; @@ -345,9 +351,9 @@ mod tests { let timestamp = 100; let cfg = Arc::new(RollupConfig { block_time, ecotone_time: Some(0), ..Default::default() }); - let l2_hash = b256!("0000000000000000000000000000000000000000000000000000000000000002"); + let l2_number = 1; let mut fetcher = MockSystemConfigL2Fetcher::default(); - fetcher.insert(l2_hash, SystemConfig::default()); + fetcher.insert(l2_number, SystemConfig::default()); let mut provider = TestChainProvider::default(); let header = Header { timestamp, ..Default::default() }; let parent_beacon_block_root = Some(header.parent_beacon_block_root.unwrap_or_default()); @@ -355,10 +361,15 @@ mod tests { let hash = header.hash_slow(); provider.insert_header(hash, header); let mut builder = StatefulAttributesBuilder::new(cfg, fetcher, provider); - let epoch = BlockID { hash, number: 1 }; + let epoch = BlockID { hash, number: l2_number }; let l2_parent = L2BlockInfo { - block_info: BlockInfo { hash: l2_hash, number: 1, timestamp, parent_hash: hash }, - l1_origin: BlockID { hash, number: 1 }, + block_info: BlockInfo { + hash: B256::ZERO, + number: l2_number, + timestamp, + parent_hash: hash, + }, + l1_origin: BlockID { hash, number: l2_number }, seq_num: 0, }; let next_l2_time = l2_parent.block_info.timestamp + block_time; diff --git a/crates/derive/src/stages/mod.rs b/crates/derive/src/stages/mod.rs index 9517d3435..a428572e7 100644 --- a/crates/derive/src/stages/mod.rs +++ b/crates/derive/src/stages/mod.rs @@ -34,7 +34,6 @@ pub use batch_queue::{BatchQueue, BatchQueueProvider}; mod attributes_queue; pub use attributes_queue::{ AttributesBuilder, AttributesProvider, AttributesQueue, StatefulAttributesBuilder, - SystemConfigL2Fetcher, }; #[cfg(test)] diff --git a/crates/derive/src/stages/test_utils/sys_config_fetcher.rs b/crates/derive/src/stages/test_utils/sys_config_fetcher.rs index 2a34ff691..53489faa9 100644 --- a/crates/derive/src/stages/test_utils/sys_config_fetcher.rs +++ b/crates/derive/src/stages/test_utils/sys_config_fetcher.rs @@ -1,20 +1,25 @@ //! Implements a mock [L2SystemConfigFetcher] for testing. -use crate::{stages::attributes_queue::SystemConfigL2Fetcher, types::SystemConfig}; -use alloy_primitives::B256; +use crate::{ + traits::L2ChainProvider, + types::{L2BlockInfo, L2ExecutionPayloadEnvelope, RollupConfig, SystemConfig}, +}; +use alloc::{boxed::Box, sync::Arc}; +use anyhow::Result; +use async_trait::async_trait; use hashbrown::HashMap; /// A mock implementation of the [`SystemConfigL2Fetcher`] for testing. #[derive(Debug, Default)] pub struct MockSystemConfigL2Fetcher { - /// A map from [B256] block hash to a [SystemConfig]. - pub system_configs: HashMap, + /// A map from [u64] block number to a [SystemConfig]. + pub system_configs: HashMap, } impl MockSystemConfigL2Fetcher { - /// Inserts a new system config into the mock fetcher with the given hash. - pub fn insert(&mut self, hash: B256, config: SystemConfig) { - self.system_configs.insert(hash, config); + /// Inserts a new system config into the mock fetcher with the given block number. + pub fn insert(&mut self, number: u64, config: SystemConfig) { + self.system_configs.insert(number, config); } /// Clears all system configs from the mock fetcher. @@ -23,11 +28,24 @@ impl MockSystemConfigL2Fetcher { } } -impl SystemConfigL2Fetcher for MockSystemConfigL2Fetcher { - fn system_config_by_l2_hash(&self, hash: B256) -> anyhow::Result { +#[async_trait] +impl L2ChainProvider for MockSystemConfigL2Fetcher { + async fn system_config_by_number( + &mut self, + number: u64, + _: Arc, + ) -> Result { self.system_configs - .get(&hash) + .get(&number) .cloned() - .ok_or_else(|| anyhow::anyhow!("system config not found")) + .ok_or_else(|| anyhow::anyhow!("system config not found: {number}")) + } + + async fn l2_block_info_by_number(&mut self, _: u64) -> Result { + unimplemented!() + } + + async fn payload_by_number(&mut self, _: u64) -> Result { + unimplemented!() } } diff --git a/crates/derive/src/traits/data_sources.rs b/crates/derive/src/traits/data_sources.rs index 6c61b0b22..1061b84ea 100644 --- a/crates/derive/src/traits/data_sources.rs +++ b/crates/derive/src/traits/data_sources.rs @@ -2,9 +2,10 @@ //! pipeline's stages. use crate::types::{ - Blob, BlockInfo, IndexedBlobHash, L2BlockInfo, L2ExecutionPayloadEnvelope, StageResult, + Blob, BlockInfo, IndexedBlobHash, L2BlockInfo, L2ExecutionPayloadEnvelope, RollupConfig, + StageResult, SystemConfig, }; -use alloc::{boxed::Box, fmt::Debug, vec::Vec}; +use alloc::{boxed::Box, fmt::Debug, sync::Arc, vec::Vec}; use alloy_consensus::{Header, Receipt, TxEnvelope}; use alloy_primitives::{Address, Bytes, B256}; use anyhow::Result; @@ -41,6 +42,13 @@ pub trait L2ChainProvider { /// Returns an execution payload for a given number. /// Errors if the execution payload does not exist. async fn payload_by_number(&mut self, number: u64) -> Result; + + /// Returns the [SystemConfig] by L2 number. + async fn system_config_by_number( + &mut self, + number: u64, + rollup_config: Arc, + ) -> Result; } /// The BlobProvider trait specifies the functionality of a data source that can provide blobs. diff --git a/crates/derive/src/traits/test_utils/data_sources.rs b/crates/derive/src/traits/test_utils/data_sources.rs index ae122c568..1fb473f3e 100644 --- a/crates/derive/src/traits/test_utils/data_sources.rs +++ b/crates/derive/src/traits/test_utils/data_sources.rs @@ -2,13 +2,14 @@ use crate::{ traits::{ChainProvider, L2ChainProvider}, - types::{BlockInfo, L2BlockInfo, L2ExecutionPayloadEnvelope}, + types::{BlockInfo, L2BlockInfo, L2ExecutionPayloadEnvelope, RollupConfig, SystemConfig}, }; -use alloc::{boxed::Box, vec::Vec}; +use alloc::{boxed::Box, sync::Arc, vec::Vec}; use alloy_consensus::{Header, Receipt, TxEnvelope}; use alloy_primitives::B256; use anyhow::Result; use async_trait::async_trait; +use hashbrown::HashMap; /// A mock block fetcher. #[derive(Debug, Default)] @@ -17,12 +18,18 @@ pub struct MockBlockFetcher { pub blocks: Vec, /// Payloads pub payloads: Vec, + /// System configs + pub system_configs: HashMap, } impl MockBlockFetcher { /// Creates a new [MockBlockFetcher] with the given origin and batches. - pub fn new(blocks: Vec, payloads: Vec) -> Self { - Self { blocks, payloads } + pub fn new( + blocks: Vec, + payloads: Vec, + system_configs: HashMap, + ) -> Self { + Self { blocks, payloads, system_configs } } } @@ -43,6 +50,17 @@ impl L2ChainProvider for MockBlockFetcher { .cloned() .ok_or_else(|| anyhow::anyhow!("Payload not found")) } + + async fn system_config_by_number( + &mut self, + number: u64, + _: Arc, + ) -> Result { + self.system_configs + .get(&number) + .ok_or_else(|| anyhow::anyhow!("System config not found")) + .cloned() + } } /// A mock chain provider for testing. diff --git a/crates/derive/src/types/l1_block_info.rs b/crates/derive/src/types/l1_block_info.rs index 11f3d9386..053f7117e 100644 --- a/crates/derive/src/types/l1_block_info.rs +++ b/crates/derive/src/types/l1_block_info.rs @@ -226,8 +226,8 @@ impl L1BlockInfoTx { /// Encodes the [L1BlockInfoTx] object into Ethereum transaction calldata. pub fn encode_calldata(&self) -> Bytes { match self { - Self::Ecotone(ecotone_tx) => ecotone_tx.encode_calldata(), Self::Bedrock(bedrock_tx) => bedrock_tx.encode_calldata(), + Self::Ecotone(ecotone_tx) => ecotone_tx.encode_calldata(), } } @@ -243,11 +243,27 @@ impl L1BlockInfoTx { } } + /// Returns the L1 fee overhead for the info transaction. After ecotone, this value is ignored. + pub fn l1_fee_overhead(&self) -> U256 { + match self { + Self::Bedrock(L1BlockInfoBedrock { l1_fee_overhead, .. }) => *l1_fee_overhead, + Self::Ecotone(_) => U256::ZERO, + } + } + + /// Returns the batcher address for the info transaction + pub fn batcher_address(&self) -> Address { + match self { + Self::Bedrock(L1BlockInfoBedrock { batcher_address, .. }) => *batcher_address, + Self::Ecotone(L1BlockInfoEcotone { batcher_address, .. }) => *batcher_address, + } + } + /// Returns the sequence number for the info transaction pub fn sequence_number(&self) -> u64 { match self { - Self::Ecotone(L1BlockInfoEcotone { sequence_number, .. }) => *sequence_number, Self::Bedrock(L1BlockInfoBedrock { sequence_number, .. }) => *sequence_number, + Self::Ecotone(L1BlockInfoEcotone { sequence_number, .. }) => *sequence_number, } } } diff --git a/crates/derive/src/types/payload.rs b/crates/derive/src/types/payload.rs index 6e762a95a..347646f67 100644 --- a/crates/derive/src/types/payload.rs +++ b/crates/derive/src/types/payload.rs @@ -1,7 +1,7 @@ //! Contains the execution payload type. use alloc::vec::Vec; -use alloy_primitives::{Address, Bloom, Bytes, B256}; +use alloy_primitives::{Address, Bloom, Bytes, B256, U256}; use anyhow::Result; use op_alloy_consensus::OpTxEnvelope; @@ -13,7 +13,11 @@ pub const PAYLOAD_MEM_FIXED_COST: u64 = 1000; /// 24 bytes per tx overhead (size of slice header in memory). pub const PAYLOAD_TX_MEM_OVERHEAD: u64 = 24; -use super::{Block, BlockInfo, L1BlockInfoTx, L2BlockInfo, OpBlock, RollupConfig, Withdrawal}; +use crate::types::{L1BlockInfoBedrock, L1BlockInfoEcotone}; + +use super::{ + Block, BlockInfo, L1BlockInfoTx, L2BlockInfo, OpBlock, RollupConfig, SystemConfig, Withdrawal, +}; use alloy_rlp::{Decodable, Encodable}; #[cfg(feature = "serde")] @@ -153,6 +157,57 @@ impl L2ExecutionPayloadEnvelope { seq_num: sequence_number, }) } + + /// Converts the [L2ExecutionPayloadEnvelope] to a partial [SystemConfig]. + pub fn to_system_config(&self, rollup_config: &RollupConfig) -> Result { + let L2ExecutionPayloadEnvelope { execution_payload, .. } = self; + + if execution_payload.block_number == rollup_config.genesis.l2.number { + if execution_payload.block_hash != rollup_config.genesis.l2.hash { + anyhow::bail!("Invalid genesis hash"); + } + return Ok(rollup_config.genesis.system_config); + } + + if execution_payload.transactions.is_empty() { + anyhow::bail!( + "L2 block is missing L1 info deposit transaction, block hash: {}", + execution_payload.block_hash + ); + } + let tx = OpTxEnvelope::decode(&mut execution_payload.transactions[0].as_ref()) + .map_err(|e| anyhow::anyhow!(e))?; + + let OpTxEnvelope::Deposit(tx) = tx else { + anyhow::bail!("First payload transaction has unexpected type: {:?}", tx.tx_type()); + }; + + let l1_info = L1BlockInfoTx::decode_calldata(tx.input.as_ref())?; + let l1_fee_scalar = match l1_info { + L1BlockInfoTx::Bedrock(L1BlockInfoBedrock { l1_fee_scalar, .. }) => l1_fee_scalar, + L1BlockInfoTx::Ecotone(L1BlockInfoEcotone { + blob_base_fee, + blob_base_fee_scalar, + .. + }) => { + // Translate Ecotone values back into encoded scalar if needed. + // We do not know if it was derived from a v0 or v1 scalar, + // but v1 is fine, a 0 blob base fee has the same effect. + let mut buf = B256::ZERO; + buf[0] = 0x01; + buf[24..28].copy_from_slice(blob_base_fee_scalar.to_be_bytes().as_ref()); + buf[28..32].copy_from_slice(blob_base_fee.to_be_bytes().as_ref()); + buf.into() + } + }; + + Ok(SystemConfig { + batcher_addr: l1_info.batcher_address(), + l1_fee_overhead: l1_info.l1_fee_overhead(), + l1_fee_scalar, + gas_limit: U256::from(execution_payload.gas_limit), + }) + } } impl From for L2ExecutionPayloadEnvelope { diff --git a/crates/derive/src/types/rollup_config.rs b/crates/derive/src/types/rollup_config.rs index 751f4413d..c27739f73 100644 --- a/crates/derive/src/types/rollup_config.rs +++ b/crates/derive/src/types/rollup_config.rs @@ -84,11 +84,6 @@ impl RollupConfig { self.regolith_time.map_or(false, |t| timestamp >= t) } - /// Returns the L1 Signer Address. - pub fn l1_signer_address(&self) -> Address { - self.genesis.system_config.unsafe_block_signer - } - /// Returns true if Canyon is active at the given timestamp. pub fn is_canyon_active(&self, timestamp: u64) -> bool { self.canyon_time.map_or(false, |t| timestamp >= t) diff --git a/crates/derive/src/types/system_config.rs b/crates/derive/src/types/system_config.rs index 7a3d83631..2f5391fe8 100644 --- a/crates/derive/src/types/system_config.rs +++ b/crates/derive/src/types/system_config.rs @@ -22,8 +22,6 @@ pub struct SystemConfig { /// Fee scalar #[cfg_attr(feature = "serde", serde(rename = "scalar"))] pub l1_fee_scalar: U256, - /// Sequencer's signer for unsafe blocks - pub unsafe_block_signer: Address, } /// Represents type of update to the system config. From 5a60f8db2dd86488f5488b25d4ee02c1732f4aa7 Mon Sep 17 00:00:00 2001 From: refcell Date: Fri, 19 Apr 2024 13:37:56 -0400 Subject: [PATCH 3/4] feat(derive): Span Batch Validation (#121) * feat(derive): span batch validation * feat(derive): span batch validity unit tests * feat(derive): span batch unit tests with acceptance test * fix(derive): unit tests * fix(derive): add more unit tests * feat(derive): span batch validity unit tests for txs --- crates/derive/src/stages/batch_queue.rs | 23 +- .../derive/src/stages/test_utils/tracing.rs | 26 +- crates/derive/src/types/batch/mod.rs | 8 +- .../src/types/batch/span_batch/batch.rs | 1347 ++++++++++++++++- .../src/types/batch/span_batch/element.rs | 2 +- crates/derive/src/types/payload.rs | 2 +- 6 files changed, 1385 insertions(+), 23 deletions(-) diff --git a/crates/derive/src/stages/batch_queue.rs b/crates/derive/src/stages/batch_queue.rs index ca6d80ae9..9a66fb20a 100644 --- a/crates/derive/src/stages/batch_queue.rs +++ b/crates/derive/src/stages/batch_queue.rs @@ -107,7 +107,11 @@ where /// Follows the validity rules imposed on consecutive batches. /// Based on currently available buffered batch and L1 origin information. /// A [StageError::Eof] is returned if no batch can be derived yet. - pub fn derive_next_batch(&mut self, empty: bool, parent: L2BlockInfo) -> StageResult { + pub async fn derive_next_batch( + &mut self, + empty: bool, + parent: L2BlockInfo, + ) -> StageResult { // Cannot derive a batch if no origin was prepared. if self.l1_blocks.is_empty() { return Err(StageError::MissingOrigin); @@ -140,7 +144,8 @@ where let mut remaining = Vec::new(); for i in 0..self.batches.len() { let batch = &self.batches[i]; - let validity = batch.check_batch(&self.cfg, &self.l1_blocks, parent, &self.fetcher); + let validity = + batch.check_batch(&self.cfg, &self.l1_blocks, parent, &mut self.fetcher).await; match validity { BatchValidity::Future => { remaining.push(batch.clone()); @@ -221,7 +226,7 @@ where } /// Adds a batch to the queue. - pub fn add_batch(&mut self, batch: Batch, parent: L2BlockInfo) -> StageResult<()> { + pub async fn add_batch(&mut self, batch: Batch, parent: L2BlockInfo) -> StageResult<()> { if self.l1_blocks.is_empty() { error!("Cannot add batch without an origin"); panic!("Cannot add batch without an origin"); @@ -229,7 +234,7 @@ where let origin = self.origin.ok_or_else(|| anyhow!("cannot add batch with missing origin"))?; let data = BatchWithInclusionBlock { inclusion_block: origin, batch }; // If we drop the batch, validation logs the drop reason with WARN level. - if data.check_batch(&self.cfg, &self.l1_blocks, parent, &self.fetcher).is_drop() { + if data.check_batch(&self.cfg, &self.l1_blocks, parent, &mut self.fetcher).await.is_drop() { return Ok(()); } self.batches.push(data); @@ -310,7 +315,7 @@ where match self.prev.next_batch().await { Ok(b) => { if !origin_behind { - self.add_batch(b, parent).ok(); + self.add_batch(b, parent).await.ok(); } else { warn!("Dropping batch: Origin is behind"); } @@ -329,7 +334,7 @@ where } // Attempt to derive more batches. - let batch = match self.derive_next_batch(out_of_data, parent) { + let batch = match self.derive_next_batch(out_of_data, parent).await { Ok(b) => b, Err(e) => match e { StageError::Eof => { @@ -418,15 +423,15 @@ mod tests { BatchReader::from(compressed) } - #[test] - fn test_derive_next_batch_missing_origin() { + #[tokio::test] + async fn test_derive_next_batch_missing_origin() { let data = vec![Ok(Batch::Single(SingleBatch::default()))]; let cfg = Arc::new(RollupConfig::default()); let mock = MockBatchQueueProvider::new(data); let fetcher = MockBlockFetcher::default(); let mut bq = BatchQueue::new(cfg, mock, fetcher); let parent = L2BlockInfo::default(); - let result = bq.derive_next_batch(false, parent).unwrap_err(); + let result = bq.derive_next_batch(false, parent).await.unwrap_err(); assert_eq!(result, StageError::MissingOrigin); } diff --git a/crates/derive/src/stages/test_utils/tracing.rs b/crates/derive/src/stages/test_utils/tracing.rs index a73910e9b..eb065c203 100644 --- a/crates/derive/src/stages/test_utils/tracing.rs +++ b/crates/derive/src/stages/test_utils/tracing.rs @@ -7,7 +7,29 @@ use tracing::{Event, Level, Subscriber}; use tracing_subscriber::{layer::Context, Layer}; /// The storage for the collected traces. -pub type TraceStorage = Arc>>; +#[derive(Debug, Default, Clone)] +pub struct TraceStorage(pub Arc>>); + +impl TraceStorage { + /// Returns the items in the storage that match the specified level. + pub fn get_by_level(&self, level: Level) -> Vec { + self.0 + .lock() + .iter() + .filter_map(|(l, message)| if *l == level { Some(message.clone()) } else { None }) + .collect() + } + + /// Locks the storage and returns the items. + pub fn lock(&self) -> spin::MutexGuard<'_, Vec<(Level, String)>> { + self.0.lock() + } + + /// Returns if the storage is empty. + pub fn is_empty(&self) -> bool { + self.0.lock().is_empty() + } +} #[derive(Debug, Default)] pub struct CollectingLayer { @@ -26,7 +48,7 @@ impl Layer for CollectingLayer { let level = *metadata.level(); let message = format!("{:?}", event); - let mut storage = self.storage.lock(); + let mut storage = self.storage.0.lock(); storage.push((level, message)); } } diff --git a/crates/derive/src/types/batch/mod.rs b/crates/derive/src/types/batch/mod.rs index e01be15b7..877fae810 100644 --- a/crates/derive/src/types/batch/mod.rs +++ b/crates/derive/src/types/batch/mod.rs @@ -40,19 +40,21 @@ impl BatchWithInclusionBlock { /// One or more consecutive l1_blocks should be provided. /// In case of only a single L1 block, the decision whether a batch is valid may have to stay /// undecided. - pub fn check_batch( + pub async fn check_batch( &self, cfg: &RollupConfig, l1_blocks: &[BlockInfo], l2_safe_head: L2BlockInfo, - fetcher: &BF, + fetcher: &mut BF, ) -> BatchValidity { match &self.batch { Batch::Single(single_batch) => { single_batch.check_batch(cfg, l1_blocks, l2_safe_head, &self.inclusion_block) } Batch::Span(span_batch) => { - span_batch.check_batch(cfg, l1_blocks, l2_safe_head, &self.inclusion_block, fetcher) + span_batch + .check_batch(cfg, l1_blocks, l2_safe_head, &self.inclusion_block, fetcher) + .await } } } diff --git a/crates/derive/src/types/batch/span_batch/batch.rs b/crates/derive/src/types/batch/span_batch/batch.rs index 050ba2892..f82288465 100644 --- a/crates/derive/src/types/batch/span_batch/batch.rs +++ b/crates/derive/src/types/batch/span_batch/batch.rs @@ -12,6 +12,8 @@ use crate::{ }; use alloc::{vec, vec::Vec}; use alloy_primitives::FixedBytes; +use op_alloy_consensus::OpTxType; +use tracing::{info, warn}; /// The span batch contains the input to build a span of L2 blocks in derived form. #[derive(Debug, Default, Clone, PartialEq, Eq)] @@ -40,16 +42,280 @@ impl SpanBatch { self.batches[0].timestamp } + /// Returns the epoch number for the first batch in the span. + pub fn starting_epoch_num(&self) -> u64 { + self.batches[0].epoch_num + } + + /// Checks if the first 20 bytes of the given hash match the L1 origin check. + pub fn check_origin_hash(&self, hash: FixedBytes<32>) -> bool { + self.l1_origin_check == hash[..20] + } + + /// Checks if the first 20 bytes of the given hash match the parent check. + pub fn check_parent_hash(&self, hash: FixedBytes<32>) -> bool { + self.parent_check == hash[..20] + } + /// Checks if the span batch is valid. - pub fn check_batch( + pub async fn check_batch( &self, - _cfg: &RollupConfig, - _l1_blocks: &[BlockInfo], - _l2_safe_head: L2BlockInfo, - _inclusion_block: &BlockInfo, - _fetcher: &BF, + cfg: &RollupConfig, + l1_blocks: &[BlockInfo], + l2_safe_head: L2BlockInfo, + inclusion_block: &BlockInfo, + fetcher: &mut BF, ) -> BatchValidity { - unimplemented!() + if l1_blocks.is_empty() { + warn!("missing L1 block input, cannot proceed with batch checking"); + return BatchValidity::Undecided; + } + if self.batches.is_empty() { + warn!("empty span batch, cannot proceed with batch checking"); + return BatchValidity::Undecided; + } + let epoch = l1_blocks[0]; + let mut batch_origin = epoch; + let starting_epoch_num = self.starting_epoch_num(); + if starting_epoch_num == batch_origin.number + 1 { + if l1_blocks.len() < 2 { + info!("eager batch wants to advance current epoch {}, but could not without more L1 blocks", epoch.id()); + return BatchValidity::Undecided; + } + batch_origin = l1_blocks[1]; + } + + // Span batches are only valid after the Delta hard fork. + if !cfg.is_delta_active(batch_origin.timestamp) { + warn!( + "received SpanBatch (id {}) with L1 origin (timestamp {}) before Delta hard fork", + batch_origin.id(), + batch_origin.timestamp + ); + return BatchValidity::Drop; + } + + // Skip out of order batches. + let next_timestamp = l2_safe_head.block_info.timestamp + cfg.block_time; + if self.timestamp() > next_timestamp { + warn!( + "received out-of-order batch for future processing after next batch ({} > {})", + self.timestamp(), + next_timestamp + ); + return BatchValidity::Future; + } + // SAFETY: The span batch is not empty so the last element exists. + if self.batches.last().unwrap().timestamp < next_timestamp { + warn!("span batch has no new blocks after safe head"); + return BatchValidity::Drop; + } + + // Find the parent block of the span batch. + // If the span batch does not overlap the current safe chain, parent block should be the L2 + // safe head. + let mut parent_num = l2_safe_head.block_info.number; + let parent_block = l2_safe_head; + if self.timestamp() < next_timestamp { + if self.timestamp() > l2_safe_head.block_info.timestamp { + // Batch timestamp cannot be between safe head and next timestamp. + warn!("batch has misaligned timestamp, block time is too short"); + return BatchValidity::Drop; + } + if (l2_safe_head.block_info.timestamp - self.timestamp()) % cfg.block_time != 0 { + warn!("batch has misaligned timestamp, not overlapped exactly"); + return BatchValidity::Drop; + } + parent_num = l2_safe_head.block_info.number - + (l2_safe_head.block_info.timestamp - self.timestamp()) / cfg.block_time - + 1; + let parent_block = match fetcher.l2_block_info_by_number(parent_num).await { + Ok(block) => block, + Err(e) => { + warn!("failed to fetch L2 block number {parent_num}: {e}"); + // Unable to validate the batch for now. Retry later. + return BatchValidity::Undecided; + } + }; + } + if !self.check_parent_hash(parent_block.block_info.parent_hash) { + warn!( + "parent block number mismatch, expected: {parent_num}, received: {}", + parent_block.block_info.number + ); + return BatchValidity::Drop; + } + + // Filter out batches that were included too late. + if starting_epoch_num + cfg.seq_window_size < inclusion_block.number { + warn!("batch was included too late, sequence window expired"); + return BatchValidity::Drop; + } + + // Check the L1 origin of the batch + if starting_epoch_num > parent_block.l1_origin.number + 1 { + warn!( + "batch is for future epoch too far ahead, while it has the next timestamp, so it must be invalid, current_epoch: {}", + epoch.id() + ); + return BatchValidity::Drop; + } + + // Verify the l1 origin hash for each l1 block. + // SAFETY: The span batch is not empty so the last element exists. + let end_epoch_num = self.batches.last().unwrap().epoch_num; + let mut origin_checked = false; + // l1Blocks is supplied from batch queue and its length is limited to SequencerWindowSize. + for l1_block in l1_blocks { + if l1_block.number == end_epoch_num { + if !self.check_origin_hash(l1_block.hash) { + warn!( + "batch is for different L1 chain, epoch hash does not match, expected: {}", + l1_block.hash + ); + return BatchValidity::Drop; + } + origin_checked = true; + break; + } + } + if !origin_checked { + info!("need more l1 blocks to check entire origins of span batch"); + return BatchValidity::Undecided; + } + + // Check if the batch is too old. + if starting_epoch_num < parent_block.l1_origin.number { + warn!("dropped batch, epoch is too old, minimum: {}", parent_block.block_info.id()); + return BatchValidity::Drop; + } + + let mut origin_index = 0; + let mut origin_advanced = starting_epoch_num == parent_block.l1_origin.number + 1; + for (i, batch) in self.batches.iter().enumerate() { + if batch.timestamp <= l2_safe_head.block_info.timestamp { + continue; + } + // Find the L1 origin for the batch. + for (j, j_block) in l1_blocks.iter().enumerate().skip(origin_index) { + if batch.epoch_num == j_block.number { + origin_index = j; + break; + } + } + let l1_origin = l1_blocks[origin_index]; + if i > 0 { + origin_advanced = false; + if batch.epoch_num > self.batches[i - 1].epoch_num { + origin_advanced = true; + } + } + let block_timestamp = batch.timestamp; + if block_timestamp < l1_origin.timestamp { + warn!( + "block timestamp is less than L1 origin timestamp, l2_timestamp: {}, l1_timestamp: {}, origin: {}", + block_timestamp, + l1_origin.timestamp, + l1_origin.id() + ); + return BatchValidity::Drop; + } + // Check if we ran out of sequencer time drift + if block_timestamp > l1_origin.timestamp + cfg.max_sequencer_drift { + if batch.transactions.is_empty() { + // If the sequencer is co-operating by producing an empty batch, + // then allow the batch if it was the right thing to do to maintain the L2 time + // >= L1 time invariant. We only check batches that do not + // advance the epoch, to ensure epoch advancement regardless of time drift is + // allowed. + if !origin_advanced { + if origin_index + 1 >= l1_blocks.len() { + info!("without the next L1 origin we cannot determine yet if this empty batch that exceeds the time drift is still valid"); + return BatchValidity::Undecided; + } + if block_timestamp >= l1_blocks[origin_index + 1].timestamp { + // check if the next L1 origin could have been adopted + info!("batch exceeded sequencer time drift without adopting next origin, and next L1 origin would have been valid"); + return BatchValidity::Drop; + } else { + info!("continuing with empty batch before late L1 block to preserve L2 time invariant"); + } + } + } else { + // If the sequencer is ignoring the time drift rule, then drop the batch and + // force an empty batch instead, as the sequencer is not + // allowed to include anything past this point without moving to the next epoch. + warn!( + "batch exceeded sequencer time drift, sequencer must adopt new L1 origin to include transactions again, max_time: {}", + l1_origin.timestamp + cfg.max_sequencer_drift + ); + return BatchValidity::Drop; + } + } + + // Check that the transactions are not empty and do not contain any deposits. + for (tx_index, tx_bytes) in batch.transactions.iter().enumerate() { + if tx_bytes.is_empty() { + warn!( + "transaction data must not be empty, but found empty tx, tx_index: {}", + tx_index + ); + return BatchValidity::Drop; + } + if tx_bytes.0[0] == OpTxType::Deposit as u8 { + warn!("sequencers may not embed any deposits into batch data, but found tx that has one, tx_index: {}", tx_index); + return BatchValidity::Drop; + } + } + } + + // Check overlapped blocks + if self.timestamp() < next_timestamp { + for i in 0..(l2_safe_head.block_info.number - parent_num) { + let safe_block_num = parent_num + i + 1; + let safe_block_payload = match fetcher.payload_by_number(safe_block_num).await { + Ok(p) => p, + Err(e) => { + warn!("failed to fetch payload for block number {safe_block_num}: {e}"); + return BatchValidity::Undecided; + } + }; + let safe_block_txs = &safe_block_payload.execution_payload.transactions; + let batch_txs = &self.batches[i as usize].transactions; + // Execution payload has deposit txs but batch does not. + let deposit_count: usize = safe_block_txs + .iter() + .map(|tx| if tx.0[0] == OpTxType::Deposit as u8 { 1 } else { 0 }) + .sum(); + if safe_block_txs.len() - deposit_count != batch_txs.len() { + warn!( + "overlapped block's tx count does not match, safe_block_txs: {}, batch_txs: {}", + safe_block_txs.len(), + batch_txs.len() + ); + return BatchValidity::Drop; + } + for j in 0..batch_txs.len() { + if safe_block_txs[j + deposit_count] != batch_txs[j].0 { + warn!("overlapped block's transaction does not match"); + return BatchValidity::Drop; + } + } + let safe_block_ref = match safe_block_payload.to_l2_block_ref(cfg) { + Ok(r) => r, + Err(e) => { + warn!("failed to extract L2BlockInfo from execution payload, hash: {}, err: {e}", safe_block_payload.execution_payload.block_hash); + return BatchValidity::Drop; + } + }; + if safe_block_ref.l1_origin.number != self.batches[i as usize].epoch_num { + warn!("overlapped block's L1 origin number does not match"); + return BatchValidity::Drop; + } + } + } + + BatchValidity::Accept } /// Converts the span batch to a raw span batch. @@ -162,3 +428,1070 @@ impl SpanBatch { &self.batches[self.batches.len() - 1 - n] } } + +#[cfg(test)] +mod tests { + use super::*; + use crate::{ + stages::test_utils::{CollectingLayer, TraceStorage}, + traits::test_utils::MockBlockFetcher, + types::{BlockID, Genesis, L2ExecutionPayload, L2ExecutionPayloadEnvelope, RawTransaction}, + }; + use alloy_primitives::{b256, Bytes, B256}; + use op_alloy_consensus::OpTxType; + use tracing::Level; + use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt}; + + #[test] + fn test_timestamp() { + let timestamp = 10; + let first_element = SpanBatchElement { timestamp, ..Default::default() }; + let mut batch = + SpanBatch { batches: vec![first_element, Default::default()], ..Default::default() }; + assert_eq!(batch.timestamp(), timestamp); + } + + #[test] + fn test_starting_epoch_num() { + let epoch_num = 10; + let first_element = SpanBatchElement { epoch_num, ..Default::default() }; + let mut batch = + SpanBatch { batches: vec![first_element, Default::default()], ..Default::default() }; + assert_eq!(batch.starting_epoch_num(), epoch_num); + } + + #[test] + fn test_check_origin_hash() { + let l1_origin_check = FixedBytes::from([17u8; 20]); + let hash = b256!("1111111111111111111111111111111111111111000000000000000000000000"); + let batch = SpanBatch { l1_origin_check, ..Default::default() }; + assert!(batch.check_origin_hash(hash)); + // This hash has 19 matching bytes, the other 13 are zeros. + let invalid = b256!("1111111111111111111111111111111111111100000000000000000000000000"); + assert!(!batch.check_origin_hash(invalid)); + } + + #[test] + fn test_check_parent_hash() { + let parent_check = FixedBytes::from([17u8; 20]); + let hash = b256!("1111111111111111111111111111111111111111000000000000000000000000"); + let batch = SpanBatch { parent_check, ..Default::default() }; + assert!(batch.check_parent_hash(hash)); + // This hash has 19 matching bytes, the other 13 are zeros. + let invalid = b256!("1111111111111111111111111111111111111100000000000000000000000000"); + assert!(!batch.check_parent_hash(invalid)); + } + + #[tokio::test] + async fn test_check_batch_missing_l1_block_input() { + let trace_store: TraceStorage = Default::default(); + let layer = CollectingLayer::new(trace_store.clone()); + tracing_subscriber::Registry::default().with(layer).init(); + + let cfg = RollupConfig::default(); + let l1_blocks = vec![]; + let l2_safe_head = L2BlockInfo::default(); + let inclusion_block = BlockInfo::default(); + let mut fetcher = MockBlockFetcher::default(); + let batch = SpanBatch::default(); + assert_eq!( + batch.check_batch(&cfg, &l1_blocks, l2_safe_head, &inclusion_block, &mut fetcher).await, + BatchValidity::Undecided + ); + let logs = trace_store.get_by_level(Level::WARN); + assert_eq!(logs.len(), 1); + assert!(logs[0].contains("missing L1 block input, cannot proceed with batch checking")); + } + + #[tokio::test] + async fn test_check_batches_is_empty() { + let trace_store: TraceStorage = Default::default(); + let layer = CollectingLayer::new(trace_store.clone()); + tracing_subscriber::Registry::default().with(layer).init(); + + let cfg = RollupConfig::default(); + let l1_blocks = vec![BlockInfo::default()]; + let l2_safe_head = L2BlockInfo::default(); + let inclusion_block = BlockInfo::default(); + let mut fetcher = MockBlockFetcher::default(); + let batch = SpanBatch::default(); + assert_eq!( + batch.check_batch(&cfg, &l1_blocks, l2_safe_head, &inclusion_block, &mut fetcher).await, + BatchValidity::Undecided + ); + let logs = trace_store.get_by_level(Level::WARN); + assert_eq!(logs.len(), 1); + assert!(logs[0].contains("empty span batch, cannot proceed with batch checking")); + } + + #[tokio::test] + async fn test_eager_block_missing_origins() { + let trace_store: TraceStorage = Default::default(); + let layer = CollectingLayer::new(trace_store.clone()); + tracing_subscriber::Registry::default().with(layer).init(); + + let cfg = RollupConfig::default(); + let block = BlockInfo { number: 9, ..Default::default() }; + let l1_blocks = vec![block]; + let l2_safe_head = L2BlockInfo::default(); + let inclusion_block = BlockInfo::default(); + let mut fetcher = MockBlockFetcher::default(); + let first = SpanBatchElement { epoch_num: 10, ..Default::default() }; + let batch = SpanBatch { batches: vec![first], ..Default::default() }; + assert_eq!( + batch.check_batch(&cfg, &l1_blocks, l2_safe_head, &inclusion_block, &mut fetcher).await, + BatchValidity::Undecided + ); + let logs = trace_store.get_by_level(Level::INFO); + assert_eq!(logs.len(), 1); + let str = alloc::format!( + "eager batch wants to advance current epoch {}, but could not without more L1 blocks", + block.id() + ); + assert!(logs[0].contains(&str)); + } + + #[tokio::test] + async fn test_check_batch_delta_inactive() { + let trace_store: TraceStorage = Default::default(); + let layer = CollectingLayer::new(trace_store.clone()); + tracing_subscriber::Registry::default().with(layer).init(); + + let cfg = RollupConfig { delta_time: Some(10), ..Default::default() }; + let block = BlockInfo { number: 10, timestamp: 9, ..Default::default() }; + let l1_blocks = vec![block]; + let l2_safe_head = L2BlockInfo::default(); + let inclusion_block = BlockInfo::default(); + let mut fetcher = MockBlockFetcher::default(); + let first = SpanBatchElement { epoch_num: 10, timestamp: 10, ..Default::default() }; + let batch = SpanBatch { batches: vec![first], ..Default::default() }; + assert_eq!( + batch.check_batch(&cfg, &l1_blocks, l2_safe_head, &inclusion_block, &mut fetcher).await, + BatchValidity::Drop + ); + let logs = trace_store.get_by_level(Level::WARN); + assert_eq!(logs.len(), 1); + let str = alloc::format!( + "received SpanBatch (id {}) with L1 origin (timestamp {}) before Delta hard fork", + block.id(), + block.timestamp + ); + assert!(logs[0].contains(&str)); + } + + #[tokio::test] + async fn test_check_batch_out_of_order() { + let trace_store: TraceStorage = Default::default(); + let layer = CollectingLayer::new(trace_store.clone()); + tracing_subscriber::Registry::default().with(layer).init(); + + let cfg = RollupConfig { delta_time: Some(0), block_time: 10, ..Default::default() }; + let block = BlockInfo { number: 10, timestamp: 10, ..Default::default() }; + let l1_blocks = vec![block]; + let l2_safe_head = L2BlockInfo { + block_info: BlockInfo { timestamp: 10, ..Default::default() }, + ..Default::default() + }; + let inclusion_block = BlockInfo::default(); + let mut fetcher = MockBlockFetcher::default(); + let first = SpanBatchElement { epoch_num: 10, timestamp: 21, ..Default::default() }; + let batch = SpanBatch { batches: vec![first], ..Default::default() }; + assert_eq!( + batch.check_batch(&cfg, &l1_blocks, l2_safe_head, &inclusion_block, &mut fetcher).await, + BatchValidity::Future + ); + let logs = trace_store.get_by_level(Level::WARN); + assert_eq!(logs.len(), 1); + assert!(logs[0].contains( + "received out-of-order batch for future processing after next batch (21 > 20)" + )); + } + + #[tokio::test] + async fn test_check_batch_no_new_blocks() { + let trace_store: TraceStorage = Default::default(); + let layer = CollectingLayer::new(trace_store.clone()); + tracing_subscriber::Registry::default().with(layer).init(); + + let cfg = RollupConfig { delta_time: Some(0), block_time: 10, ..Default::default() }; + let block = BlockInfo { number: 10, timestamp: 10, ..Default::default() }; + let l1_blocks = vec![block]; + let l2_safe_head = L2BlockInfo { + block_info: BlockInfo { timestamp: 10, ..Default::default() }, + ..Default::default() + }; + let inclusion_block = BlockInfo::default(); + let mut fetcher = MockBlockFetcher::default(); + let first = SpanBatchElement { epoch_num: 10, timestamp: 10, ..Default::default() }; + let batch = SpanBatch { batches: vec![first], ..Default::default() }; + assert_eq!( + batch.check_batch(&cfg, &l1_blocks, l2_safe_head, &inclusion_block, &mut fetcher).await, + BatchValidity::Drop + ); + let logs = trace_store.get_by_level(Level::WARN); + assert_eq!(logs.len(), 1); + assert!(logs[0].contains("span batch has no new blocks after safe head")); + } + + #[tokio::test] + async fn test_check_batch_misaligned_timestamp() { + let trace_store: TraceStorage = Default::default(); + let layer = CollectingLayer::new(trace_store.clone()); + tracing_subscriber::Registry::default().with(layer).init(); + + let cfg = RollupConfig { delta_time: Some(0), block_time: 10, ..Default::default() }; + let block = BlockInfo { number: 10, timestamp: 10, ..Default::default() }; + let l1_blocks = vec![block]; + let l2_safe_head = L2BlockInfo { + block_info: BlockInfo { timestamp: 10, ..Default::default() }, + ..Default::default() + }; + let inclusion_block = BlockInfo::default(); + let mut fetcher = MockBlockFetcher::default(); + let first = SpanBatchElement { epoch_num: 10, timestamp: 11, ..Default::default() }; + let second = SpanBatchElement { epoch_num: 11, timestamp: 21, ..Default::default() }; + let batch = SpanBatch { batches: vec![first, second], ..Default::default() }; + assert_eq!( + batch.check_batch(&cfg, &l1_blocks, l2_safe_head, &inclusion_block, &mut fetcher).await, + BatchValidity::Drop + ); + let logs = trace_store.get_by_level(Level::WARN); + assert_eq!(logs.len(), 1); + assert!(logs[0].contains("batch has misaligned timestamp, block time is too short")); + } + + #[tokio::test] + async fn test_check_batch_misaligned_without_overlap() { + let trace_store: TraceStorage = Default::default(); + let layer = CollectingLayer::new(trace_store.clone()); + tracing_subscriber::Registry::default().with(layer).init(); + + let cfg = RollupConfig { delta_time: Some(0), block_time: 10, ..Default::default() }; + let block = BlockInfo { number: 10, timestamp: 10, ..Default::default() }; + let l1_blocks = vec![block]; + let l2_safe_head = L2BlockInfo { + block_info: BlockInfo { timestamp: 10, ..Default::default() }, + ..Default::default() + }; + let inclusion_block = BlockInfo::default(); + let mut fetcher = MockBlockFetcher::default(); + let first = SpanBatchElement { epoch_num: 10, timestamp: 8, ..Default::default() }; + let second = SpanBatchElement { epoch_num: 11, timestamp: 20, ..Default::default() }; + let batch = SpanBatch { batches: vec![first, second], ..Default::default() }; + assert_eq!( + batch.check_batch(&cfg, &l1_blocks, l2_safe_head, &inclusion_block, &mut fetcher).await, + BatchValidity::Drop + ); + let logs = trace_store.get_by_level(Level::WARN); + assert_eq!(logs.len(), 1); + assert!(logs[0].contains("batch has misaligned timestamp, not overlapped exactly")); + } + + #[tokio::test] + async fn test_check_batch_failed_to_fetch_l2_block() { + let trace_store: TraceStorage = Default::default(); + let layer = CollectingLayer::new(trace_store.clone()); + tracing_subscriber::Registry::default().with(layer).init(); + + let cfg = RollupConfig { delta_time: Some(0), block_time: 10, ..Default::default() }; + let block = BlockInfo { number: 10, timestamp: 10, ..Default::default() }; + let l1_blocks = vec![block]; + let l2_safe_head = L2BlockInfo { + block_info: BlockInfo { number: 41, timestamp: 10, ..Default::default() }, + ..Default::default() + }; + let inclusion_block = BlockInfo::default(); + let mut fetcher = MockBlockFetcher::default(); + let first = SpanBatchElement { epoch_num: 10, timestamp: 10, ..Default::default() }; + let second = SpanBatchElement { epoch_num: 11, timestamp: 20, ..Default::default() }; + let batch = SpanBatch { batches: vec![first, second], ..Default::default() }; + // parent number = 41 - (10 - 10) / 10 - 1 = 40 + assert_eq!( + batch.check_batch(&cfg, &l1_blocks, l2_safe_head, &inclusion_block, &mut fetcher).await, + BatchValidity::Undecided + ); + let logs = trace_store.get_by_level(Level::WARN); + assert_eq!(logs.len(), 1); + assert!(logs[0].contains("failed to fetch L2 block number 40: Block not found")); + } + + #[tokio::test] + async fn test_check_batch_parent_hash_fail() { + let trace_store: TraceStorage = Default::default(); + let layer = CollectingLayer::new(trace_store.clone()); + tracing_subscriber::Registry::default().with(layer).init(); + + let cfg = RollupConfig { delta_time: Some(0), block_time: 10, ..Default::default() }; + let block = BlockInfo { number: 10, timestamp: 10, ..Default::default() }; + let l1_blocks = vec![block]; + let l2_safe_head = L2BlockInfo { + block_info: BlockInfo { number: 41, timestamp: 10, ..Default::default() }, + ..Default::default() + }; + let inclusion_block = BlockInfo::default(); + let l2_block = L2BlockInfo { + block_info: BlockInfo { number: 40, ..Default::default() }, + ..Default::default() + }; + let mut fetcher = MockBlockFetcher { blocks: vec![l2_block], payloads: vec![] }; + let first = SpanBatchElement { epoch_num: 10, timestamp: 10, ..Default::default() }; + let second = SpanBatchElement { epoch_num: 11, timestamp: 20, ..Default::default() }; + let batch = SpanBatch { + batches: vec![first, second], + parent_check: FixedBytes::<20>::from_slice( + &b256!("1111111111111111111111111111111111111111000000000000000000000000")[..20], + ), + ..Default::default() + }; + // parent number = 41 - (10 - 10) / 10 - 1 = 40 + assert_eq!( + batch.check_batch(&cfg, &l1_blocks, l2_safe_head, &inclusion_block, &mut fetcher).await, + BatchValidity::Drop + ); + let logs = trace_store.get_by_level(Level::WARN); + assert_eq!(logs.len(), 1); + assert!(logs[0].contains("parent block number mismatch, expected: 40, received: 41")); + } + + #[tokio::test] + async fn test_check_sequence_window_expired() { + let trace_store: TraceStorage = Default::default(); + let layer = CollectingLayer::new(trace_store.clone()); + tracing_subscriber::Registry::default().with(layer).init(); + + let cfg = RollupConfig { delta_time: Some(0), block_time: 10, ..Default::default() }; + let block = BlockInfo { number: 10, timestamp: 10, ..Default::default() }; + let l1_blocks = vec![block]; + let parent_hash = b256!("1111111111111111111111111111111111111111000000000000000000000000"); + let l2_safe_head = L2BlockInfo { + block_info: BlockInfo { number: 41, timestamp: 10, parent_hash, ..Default::default() }, + ..Default::default() + }; + let inclusion_block = BlockInfo { number: 50, ..Default::default() }; + let l2_block = L2BlockInfo { + block_info: BlockInfo { number: 40, ..Default::default() }, + ..Default::default() + }; + let mut fetcher = MockBlockFetcher { blocks: vec![l2_block], payloads: vec![] }; + let first = SpanBatchElement { epoch_num: 10, timestamp: 10, ..Default::default() }; + let second = SpanBatchElement { epoch_num: 11, timestamp: 20, ..Default::default() }; + let batch = SpanBatch { + batches: vec![first, second], + parent_check: FixedBytes::<20>::from_slice(&parent_hash[..20]), + ..Default::default() + }; + // parent number = 41 - (10 - 10) / 10 - 1 = 40 + assert_eq!( + batch.check_batch(&cfg, &l1_blocks, l2_safe_head, &inclusion_block, &mut fetcher).await, + BatchValidity::Drop + ); + let logs = trace_store.get_by_level(Level::WARN); + assert_eq!(logs.len(), 1); + assert!(logs[0].contains("batch was included too late, sequence window expired")); + } + + #[tokio::test] + async fn test_starting_epoch_too_far_ahead() { + let trace_store: TraceStorage = Default::default(); + let layer = CollectingLayer::new(trace_store.clone()); + tracing_subscriber::Registry::default().with(layer).init(); + + let cfg = RollupConfig { + seq_window_size: 100, + delta_time: Some(0), + block_time: 10, + ..Default::default() + }; + let block = BlockInfo { number: 10, timestamp: 10, ..Default::default() }; + let l1_blocks = vec![block]; + let parent_hash = b256!("1111111111111111111111111111111111111111000000000000000000000000"); + let l2_safe_head = L2BlockInfo { + block_info: BlockInfo { number: 41, timestamp: 10, parent_hash, ..Default::default() }, + l1_origin: BlockID { number: 8, ..Default::default() }, + ..Default::default() + }; + let inclusion_block = BlockInfo { number: 50, ..Default::default() }; + let l2_block = L2BlockInfo { + block_info: BlockInfo { number: 40, ..Default::default() }, + ..Default::default() + }; + let mut fetcher = MockBlockFetcher { blocks: vec![l2_block], payloads: vec![] }; + let first = SpanBatchElement { epoch_num: 10, timestamp: 10, ..Default::default() }; + let second = SpanBatchElement { epoch_num: 11, timestamp: 20, ..Default::default() }; + let batch = SpanBatch { + batches: vec![first, second], + parent_check: FixedBytes::<20>::from_slice(&parent_hash[..20]), + ..Default::default() + }; + // parent number = 41 - (10 - 10) / 10 - 1 = 40 + assert_eq!( + batch.check_batch(&cfg, &l1_blocks, l2_safe_head, &inclusion_block, &mut fetcher).await, + BatchValidity::Drop + ); + let logs = trace_store.get_by_level(Level::WARN); + assert_eq!(logs.len(), 1); + let str = alloc::format!( + "batch is for future epoch too far ahead, while it has the next timestamp, so it must be invalid, current_epoch: {}", + block.id(), + ); + assert!(logs[0].contains(&str)); + } + + #[tokio::test] + async fn test_check_batch_epoch_hash_mismatch() { + let trace_store: TraceStorage = Default::default(); + let layer = CollectingLayer::new(trace_store.clone()); + tracing_subscriber::Registry::default().with(layer).init(); + + let cfg = RollupConfig { + seq_window_size: 100, + delta_time: Some(0), + block_time: 10, + ..Default::default() + }; + let l1_block_hash = + b256!("3333333333333333333333333333333333333333000000000000000000000000"); + let block = + BlockInfo { number: 11, timestamp: 10, hash: l1_block_hash, ..Default::default() }; + let l1_blocks = vec![block]; + let parent_hash = b256!("1111111111111111111111111111111111111111000000000000000000000000"); + let l2_safe_head = L2BlockInfo { + block_info: BlockInfo { number: 41, timestamp: 10, parent_hash, ..Default::default() }, + l1_origin: BlockID { number: 9, ..Default::default() }, + ..Default::default() + }; + let inclusion_block = BlockInfo { number: 50, ..Default::default() }; + let l2_block = L2BlockInfo { + block_info: BlockInfo { number: 40, ..Default::default() }, + ..Default::default() + }; + let mut fetcher = MockBlockFetcher { blocks: vec![l2_block], payloads: vec![] }; + let first = SpanBatchElement { epoch_num: 10, timestamp: 10, ..Default::default() }; + let second = SpanBatchElement { epoch_num: 11, timestamp: 20, ..Default::default() }; + let batch = SpanBatch { + batches: vec![first, second], + parent_check: FixedBytes::<20>::from_slice(&parent_hash[..20]), + ..Default::default() + }; + assert_eq!( + batch.check_batch(&cfg, &l1_blocks, l2_safe_head, &inclusion_block, &mut fetcher).await, + BatchValidity::Drop + ); + let logs = trace_store.get_by_level(Level::WARN); + assert_eq!(logs.len(), 1); + let str = alloc::format!( + "batch is for different L1 chain, epoch hash does not match, expected: {}", + l1_block_hash, + ); + assert!(logs[0].contains(&str)); + } + + #[tokio::test] + async fn test_need_more_l1_blocks() { + let trace_store: TraceStorage = Default::default(); + let layer = CollectingLayer::new(trace_store.clone()); + tracing_subscriber::Registry::default().with(layer).init(); + + let cfg = RollupConfig { + seq_window_size: 100, + delta_time: Some(0), + block_time: 10, + ..Default::default() + }; + let l1_block_hash = + b256!("3333333333333333333333333333333333333333000000000000000000000000"); + let block = + BlockInfo { number: 10, timestamp: 10, hash: l1_block_hash, ..Default::default() }; + let l1_blocks = vec![block]; + let parent_hash = b256!("1111111111111111111111111111111111111111000000000000000000000000"); + let l2_safe_head = L2BlockInfo { + block_info: BlockInfo { number: 41, timestamp: 10, parent_hash, ..Default::default() }, + l1_origin: BlockID { number: 9, ..Default::default() }, + ..Default::default() + }; + let inclusion_block = BlockInfo { number: 50, ..Default::default() }; + let l2_block = L2BlockInfo { + block_info: BlockInfo { number: 40, ..Default::default() }, + ..Default::default() + }; + let mut fetcher = MockBlockFetcher { blocks: vec![l2_block], payloads: vec![] }; + let first = SpanBatchElement { epoch_num: 10, timestamp: 10, ..Default::default() }; + let second = SpanBatchElement { epoch_num: 11, timestamp: 20, ..Default::default() }; + let batch = SpanBatch { + batches: vec![first, second], + parent_check: FixedBytes::<20>::from_slice(&parent_hash[..20]), + l1_origin_check: FixedBytes::<20>::from_slice(&l1_block_hash[..20]), + ..Default::default() + }; + assert_eq!( + batch.check_batch(&cfg, &l1_blocks, l2_safe_head, &inclusion_block, &mut fetcher).await, + BatchValidity::Undecided + ); + let logs = trace_store.get_by_level(Level::INFO); + assert_eq!(logs.len(), 1); + assert!(logs[0].contains("need more l1 blocks to check entire origins of span batch")); + } + + #[tokio::test] + async fn test_drop_batch_epoch_too_old() { + let trace_store: TraceStorage = Default::default(); + let layer = CollectingLayer::new(trace_store.clone()); + tracing_subscriber::Registry::default().with(layer).init(); + + let cfg = RollupConfig { + seq_window_size: 100, + delta_time: Some(0), + block_time: 10, + ..Default::default() + }; + let l1_block_hash = + b256!("3333333333333333333333333333333333333333000000000000000000000000"); + let block = + BlockInfo { number: 11, timestamp: 10, hash: l1_block_hash, ..Default::default() }; + let l1_blocks = vec![block]; + let parent_hash = b256!("1111111111111111111111111111111111111111000000000000000000000000"); + let l2_safe_head = L2BlockInfo { + block_info: BlockInfo { number: 41, timestamp: 10, parent_hash, ..Default::default() }, + l1_origin: BlockID { number: 13, ..Default::default() }, + ..Default::default() + }; + let inclusion_block = BlockInfo { number: 50, ..Default::default() }; + let l2_block = L2BlockInfo { + block_info: BlockInfo { number: 40, ..Default::default() }, + ..Default::default() + }; + let mut fetcher = MockBlockFetcher { blocks: vec![l2_block], payloads: vec![] }; + let first = SpanBatchElement { epoch_num: 10, timestamp: 10, ..Default::default() }; + let second = SpanBatchElement { epoch_num: 11, timestamp: 20, ..Default::default() }; + let batch = SpanBatch { + batches: vec![first, second], + parent_check: FixedBytes::<20>::from_slice(&parent_hash[..20]), + l1_origin_check: FixedBytes::<20>::from_slice(&l1_block_hash[..20]), + ..Default::default() + }; + assert_eq!( + batch.check_batch(&cfg, &l1_blocks, l2_safe_head, &inclusion_block, &mut fetcher).await, + BatchValidity::Drop + ); + let logs = trace_store.get_by_level(Level::WARN); + assert_eq!(logs.len(), 1); + let str = alloc::format!( + "dropped batch, epoch is too old, minimum: {}", + l2_safe_head.block_info.id(), + ); + assert!(logs[0].contains(&str)); + } + + // TODO: Test block timestamp less than L1 origin + + // TODO: Test missing l1 origin for empty batch + + #[tokio::test] + async fn test_check_batch_exceeds_max_seq_drif() { + let trace_store: TraceStorage = Default::default(); + let layer = CollectingLayer::new(trace_store.clone()); + tracing_subscriber::Registry::default().with(layer).init(); + + let cfg = RollupConfig { + seq_window_size: 100, + max_sequencer_drift: 0, + delta_time: Some(0), + block_time: 10, + ..Default::default() + }; + let l1_block_hash = + b256!("3333333333333333333333333333333333333333000000000000000000000000"); + let block = + BlockInfo { number: 11, timestamp: 10, hash: l1_block_hash, ..Default::default() }; + let second_block = + BlockInfo { number: 12, timestamp: 10, hash: l1_block_hash, ..Default::default() }; + let l1_blocks = vec![block, second_block]; + let parent_hash = b256!("1111111111111111111111111111111111111111000000000000000000000000"); + let l2_safe_head = L2BlockInfo { + block_info: BlockInfo { number: 41, timestamp: 10, parent_hash, ..Default::default() }, + l1_origin: BlockID { number: 9, ..Default::default() }, + ..Default::default() + }; + let inclusion_block = BlockInfo { number: 50, ..Default::default() }; + let l2_block = L2BlockInfo { + block_info: BlockInfo { number: 40, ..Default::default() }, + ..Default::default() + }; + let mut fetcher = MockBlockFetcher { blocks: vec![l2_block], payloads: vec![] }; + let first = SpanBatchElement { epoch_num: 10, timestamp: 20, ..Default::default() }; + let second = SpanBatchElement { epoch_num: 10, timestamp: 20, ..Default::default() }; + let third = SpanBatchElement { epoch_num: 11, timestamp: 20, ..Default::default() }; + let batch = SpanBatch { + batches: vec![first, second, third], + parent_check: FixedBytes::<20>::from_slice(&parent_hash[..20]), + l1_origin_check: FixedBytes::<20>::from_slice(&l1_block_hash[..20]), + ..Default::default() + }; + assert_eq!( + batch.check_batch(&cfg, &l1_blocks, l2_safe_head, &inclusion_block, &mut fetcher).await, + BatchValidity::Drop + ); + let logs = trace_store.get_by_level(Level::INFO); + assert_eq!(logs.len(), 1); + assert!(logs[0].contains("batch exceeded sequencer time drift without adopting next origin, and next L1 origin would have been valid")); + } + + #[tokio::test] + async fn test_continuing_with_empty_batch() { + let trace_store: TraceStorage = Default::default(); + let layer = CollectingLayer::new(trace_store.clone()); + tracing_subscriber::Registry::default().with(layer).init(); + + let cfg = RollupConfig { + seq_window_size: 100, + max_sequencer_drift: 0, + delta_time: Some(0), + block_time: 10, + ..Default::default() + }; + let l1_block_hash = + b256!("3333333333333333333333333333333333333333000000000000000000000000"); + let block = + BlockInfo { number: 11, timestamp: 10, hash: l1_block_hash, ..Default::default() }; + let second_block = + BlockInfo { number: 12, timestamp: 21, hash: l1_block_hash, ..Default::default() }; + let l1_blocks = vec![block, second_block]; + let parent_hash = b256!("1111111111111111111111111111111111111111000000000000000000000000"); + let l2_safe_head = L2BlockInfo { + block_info: BlockInfo { number: 41, timestamp: 10, parent_hash, ..Default::default() }, + l1_origin: BlockID { number: 9, ..Default::default() }, + ..Default::default() + }; + let inclusion_block = BlockInfo { number: 50, ..Default::default() }; + let l2_block = L2BlockInfo { + block_info: BlockInfo { number: 40, ..Default::default() }, + ..Default::default() + }; + let mut fetcher = MockBlockFetcher { blocks: vec![l2_block], payloads: vec![] }; + let first = SpanBatchElement { epoch_num: 10, timestamp: 20, transactions: vec![] }; + let second = SpanBatchElement { epoch_num: 10, timestamp: 20, transactions: vec![] }; + let third = SpanBatchElement { epoch_num: 11, timestamp: 20, transactions: vec![] }; + let batch = SpanBatch { + batches: vec![first, second, third], + parent_check: FixedBytes::<20>::from_slice(&parent_hash[..20]), + l1_origin_check: FixedBytes::<20>::from_slice(&l1_block_hash[..20]), + txs: SpanBatchTransactions::default(), + ..Default::default() + }; + assert_eq!( + batch.check_batch(&cfg, &l1_blocks, l2_safe_head, &inclusion_block, &mut fetcher).await, + BatchValidity::Accept + ); + let infos = trace_store.get_by_level(Level::INFO); + assert_eq!(infos.len(), 1); + assert!(infos[0].contains( + "continuing with empty batch before late L1 block to preserve L2 time invariant" + )); + } + + #[tokio::test] + async fn test_check_batch_exceeds_sequencer_time_drift() { + let trace_store: TraceStorage = Default::default(); + let layer = CollectingLayer::new(trace_store.clone()); + tracing_subscriber::Registry::default().with(layer).init(); + + let cfg = RollupConfig { + seq_window_size: 100, + max_sequencer_drift: 0, + delta_time: Some(0), + block_time: 10, + ..Default::default() + }; + let l1_block_hash = + b256!("3333333333333333333333333333333333333333000000000000000000000000"); + let block = + BlockInfo { number: 11, timestamp: 10, hash: l1_block_hash, ..Default::default() }; + let second_block = + BlockInfo { number: 12, timestamp: 10, hash: l1_block_hash, ..Default::default() }; + let l1_blocks = vec![block, second_block]; + let parent_hash = b256!("1111111111111111111111111111111111111111000000000000000000000000"); + let l2_safe_head = L2BlockInfo { + block_info: BlockInfo { number: 41, timestamp: 10, parent_hash, ..Default::default() }, + l1_origin: BlockID { number: 9, ..Default::default() }, + ..Default::default() + }; + let inclusion_block = BlockInfo { number: 50, ..Default::default() }; + let l2_block = L2BlockInfo { + block_info: BlockInfo { number: 40, ..Default::default() }, + ..Default::default() + }; + let mut fetcher = MockBlockFetcher { blocks: vec![l2_block], payloads: vec![] }; + let first = SpanBatchElement { + epoch_num: 10, + timestamp: 20, + transactions: vec![Default::default()], + }; + let second = SpanBatchElement { + epoch_num: 10, + timestamp: 20, + transactions: vec![Default::default()], + }; + let third = SpanBatchElement { + epoch_num: 11, + timestamp: 20, + transactions: vec![Default::default()], + }; + let batch = SpanBatch { + batches: vec![first, second, third], + parent_check: FixedBytes::<20>::from_slice(&parent_hash[..20]), + l1_origin_check: FixedBytes::<20>::from_slice(&l1_block_hash[..20]), + txs: SpanBatchTransactions::default(), + ..Default::default() + }; + assert_eq!( + batch.check_batch(&cfg, &l1_blocks, l2_safe_head, &inclusion_block, &mut fetcher).await, + BatchValidity::Drop + ); + let logs = trace_store.get_by_level(Level::WARN); + assert_eq!(logs.len(), 1); + assert!(logs[0].contains("batch exceeded sequencer time drift, sequencer must adopt new L1 origin to include transactions again, max_time: 10")); + } + + #[tokio::test] + async fn test_check_batch_empty_txs() { + let trace_store: TraceStorage = Default::default(); + let layer = CollectingLayer::new(trace_store.clone()); + tracing_subscriber::Registry::default().with(layer).init(); + + let cfg = RollupConfig { + seq_window_size: 100, + max_sequencer_drift: 100, + delta_time: Some(0), + block_time: 10, + ..Default::default() + }; + let l1_block_hash = + b256!("3333333333333333333333333333333333333333000000000000000000000000"); + let block = + BlockInfo { number: 11, timestamp: 10, hash: l1_block_hash, ..Default::default() }; + let second_block = + BlockInfo { number: 12, timestamp: 21, hash: l1_block_hash, ..Default::default() }; + let l1_blocks = vec![block, second_block]; + let parent_hash = b256!("1111111111111111111111111111111111111111000000000000000000000000"); + let l2_safe_head = L2BlockInfo { + block_info: BlockInfo { number: 41, timestamp: 10, parent_hash, ..Default::default() }, + l1_origin: BlockID { number: 9, ..Default::default() }, + ..Default::default() + }; + let inclusion_block = BlockInfo { number: 50, ..Default::default() }; + let l2_block = L2BlockInfo { + block_info: BlockInfo { number: 40, ..Default::default() }, + ..Default::default() + }; + let mut fetcher = MockBlockFetcher { blocks: vec![l2_block], payloads: vec![] }; + let first = SpanBatchElement { + epoch_num: 10, + timestamp: 20, + transactions: vec![Default::default()], + }; + let second = SpanBatchElement { + epoch_num: 10, + timestamp: 20, + transactions: vec![Default::default()], + }; + let third = SpanBatchElement { epoch_num: 11, timestamp: 20, transactions: vec![] }; + let batch = SpanBatch { + batches: vec![first, second, third], + parent_check: FixedBytes::<20>::from_slice(&parent_hash[..20]), + l1_origin_check: FixedBytes::<20>::from_slice(&l1_block_hash[..20]), + txs: SpanBatchTransactions::default(), + ..Default::default() + }; + assert_eq!( + batch.check_batch(&cfg, &l1_blocks, l2_safe_head, &inclusion_block, &mut fetcher).await, + BatchValidity::Drop + ); + let logs = trace_store.get_by_level(Level::WARN); + assert_eq!(logs.len(), 1); + assert!(logs[0].contains("transaction data must not be empty, but found empty tx")); + } + + #[tokio::test] + async fn test_check_batch_with_deposit_tx() { + let trace_store: TraceStorage = Default::default(); + let layer = CollectingLayer::new(trace_store.clone()); + tracing_subscriber::Registry::default().with(layer).init(); + + let cfg = RollupConfig { + seq_window_size: 100, + max_sequencer_drift: 100, + delta_time: Some(0), + block_time: 10, + ..Default::default() + }; + let l1_block_hash = + b256!("3333333333333333333333333333333333333333000000000000000000000000"); + let block = + BlockInfo { number: 11, timestamp: 10, hash: l1_block_hash, ..Default::default() }; + let second_block = + BlockInfo { number: 12, timestamp: 21, hash: l1_block_hash, ..Default::default() }; + let l1_blocks = vec![block, second_block]; + let parent_hash = b256!("1111111111111111111111111111111111111111000000000000000000000000"); + let l2_safe_head = L2BlockInfo { + block_info: BlockInfo { number: 41, timestamp: 10, parent_hash, ..Default::default() }, + l1_origin: BlockID { number: 9, ..Default::default() }, + ..Default::default() + }; + let inclusion_block = BlockInfo { number: 50, ..Default::default() }; + let l2_block = L2BlockInfo { + block_info: BlockInfo { number: 40, ..Default::default() }, + ..Default::default() + }; + let mut fetcher = MockBlockFetcher { blocks: vec![l2_block], payloads: vec![] }; + let filler_bytes = RawTransaction(Bytes::copy_from_slice(&[OpTxType::Eip1559 as u8])); + let first = SpanBatchElement { + epoch_num: 10, + timestamp: 20, + transactions: vec![filler_bytes.clone()], + }; + let second = SpanBatchElement { + epoch_num: 10, + timestamp: 20, + transactions: vec![RawTransaction(Bytes::copy_from_slice(&[OpTxType::Deposit as u8]))], + }; + let third = + SpanBatchElement { epoch_num: 11, timestamp: 20, transactions: vec![filler_bytes] }; + let batch = SpanBatch { + batches: vec![first, second, third], + parent_check: FixedBytes::<20>::from_slice(&parent_hash[..20]), + l1_origin_check: FixedBytes::<20>::from_slice(&l1_block_hash[..20]), + txs: SpanBatchTransactions::default(), + ..Default::default() + }; + assert_eq!( + batch.check_batch(&cfg, &l1_blocks, l2_safe_head, &inclusion_block, &mut fetcher).await, + BatchValidity::Drop + ); + let logs = trace_store.get_by_level(Level::WARN); + assert_eq!(logs.len(), 1); + assert!(logs[0].contains("sequencers may not embed any deposits into batch data, but found tx that has one, tx_index: 0")); + } + + #[tokio::test] + async fn test_check_batch_failed_to_fetch_payload() { + let trace_store: TraceStorage = Default::default(); + let layer = CollectingLayer::new(trace_store.clone()); + tracing_subscriber::Registry::default().with(layer).init(); + + let cfg = RollupConfig { + seq_window_size: 100, + delta_time: Some(0), + block_time: 10, + ..Default::default() + }; + let l1_block_hash = + b256!("3333333333333333333333333333333333333333000000000000000000000000"); + let block = + BlockInfo { number: 11, timestamp: 10, hash: l1_block_hash, ..Default::default() }; + let l1_blocks = vec![block]; + let parent_hash = b256!("1111111111111111111111111111111111111111000000000000000000000000"); + let l2_safe_head = L2BlockInfo { + block_info: BlockInfo { number: 41, timestamp: 10, parent_hash, ..Default::default() }, + l1_origin: BlockID { number: 9, ..Default::default() }, + ..Default::default() + }; + let inclusion_block = BlockInfo { number: 50, ..Default::default() }; + let l2_block = L2BlockInfo { + block_info: BlockInfo { number: 40, ..Default::default() }, + ..Default::default() + }; + let mut fetcher = MockBlockFetcher { blocks: vec![l2_block], payloads: vec![] }; + let first = SpanBatchElement { epoch_num: 10, timestamp: 10, ..Default::default() }; + let second = SpanBatchElement { epoch_num: 11, timestamp: 20, ..Default::default() }; + let batch = SpanBatch { + batches: vec![first, second], + parent_check: FixedBytes::<20>::from_slice(&parent_hash[..20]), + l1_origin_check: FixedBytes::<20>::from_slice(&l1_block_hash[..20]), + ..Default::default() + }; + assert_eq!( + batch.check_batch(&cfg, &l1_blocks, l2_safe_head, &inclusion_block, &mut fetcher).await, + BatchValidity::Undecided + ); + let logs = trace_store.get_by_level(Level::WARN); + assert_eq!(logs.len(), 1); + assert!(logs[0].contains("failed to fetch payload for block number 41: Payload not found")); + } + + // TODO: Test overlap block tx count mismatch + + // TODO: Test overlap block tx doesn't match + + #[tokio::test] + async fn test_check_batch_failed_to_extract_l2_block_info() { + let trace_store: TraceStorage = Default::default(); + let layer = CollectingLayer::new(trace_store.clone()); + tracing_subscriber::Registry::default().with(layer).init(); + + let cfg = RollupConfig { + seq_window_size: 100, + delta_time: Some(0), + block_time: 10, + ..Default::default() + }; + let l1_block_hash = + b256!("3333333333333333333333333333333333333333000000000000000000000000"); + let block = + BlockInfo { number: 11, timestamp: 10, hash: l1_block_hash, ..Default::default() }; + let l1_blocks = vec![block]; + let parent_hash = b256!("1111111111111111111111111111111111111111000000000000000000000000"); + let l2_safe_head = L2BlockInfo { + block_info: BlockInfo { number: 41, timestamp: 10, parent_hash, ..Default::default() }, + l1_origin: BlockID { number: 9, ..Default::default() }, + ..Default::default() + }; + let inclusion_block = BlockInfo { number: 50, ..Default::default() }; + let l2_block = L2BlockInfo { + block_info: BlockInfo { number: 40, ..Default::default() }, + ..Default::default() + }; + let payload = L2ExecutionPayloadEnvelope { + parent_beacon_block_root: None, + execution_payload: L2ExecutionPayload { block_number: 41, ..Default::default() }, + }; + let mut fetcher = MockBlockFetcher { blocks: vec![l2_block], payloads: vec![payload] }; + let first = SpanBatchElement { epoch_num: 10, timestamp: 10, ..Default::default() }; + let second = SpanBatchElement { epoch_num: 11, timestamp: 20, ..Default::default() }; + let batch = SpanBatch { + batches: vec![first, second], + parent_check: FixedBytes::<20>::from_slice(&parent_hash[..20]), + l1_origin_check: FixedBytes::<20>::from_slice(&l1_block_hash[..20]), + ..Default::default() + }; + assert_eq!( + batch.check_batch(&cfg, &l1_blocks, l2_safe_head, &inclusion_block, &mut fetcher).await, + BatchValidity::Drop + ); + let logs = trace_store.get_by_level(Level::WARN); + assert_eq!(logs.len(), 1); + let str = alloc::format!( + "failed to extract L2BlockInfo from execution payload, hash: {}", + B256::default(), + ); + assert!(logs[0].contains(&str)); + } + + #[tokio::test] + async fn test_overlapped_blocks_origin_mismatch() { + let trace_store: TraceStorage = Default::default(); + let layer = CollectingLayer::new(trace_store.clone()); + tracing_subscriber::Registry::default().with(layer).init(); + + let payload_block_hash = + b256!("4444444444444444444444444444444444444444444444444444444444444444"); + let cfg = RollupConfig { + seq_window_size: 100, + delta_time: Some(0), + block_time: 10, + genesis: Genesis { + l2: BlockID { number: 41, hash: payload_block_hash }, + ..Default::default() + }, + ..Default::default() + }; + let l1_block_hash = + b256!("3333333333333333333333333333333333333333000000000000000000000000"); + let block = + BlockInfo { number: 11, timestamp: 10, hash: l1_block_hash, ..Default::default() }; + let l1_blocks = vec![block]; + let parent_hash = b256!("1111111111111111111111111111111111111111000000000000000000000000"); + let l2_safe_head = L2BlockInfo { + block_info: BlockInfo { number: 41, timestamp: 10, parent_hash, ..Default::default() }, + l1_origin: BlockID { number: 9, ..Default::default() }, + ..Default::default() + }; + let inclusion_block = BlockInfo { number: 50, ..Default::default() }; + let l2_block = L2BlockInfo { + block_info: BlockInfo { number: 40, ..Default::default() }, + ..Default::default() + }; + let payload = L2ExecutionPayloadEnvelope { + parent_beacon_block_root: None, + execution_payload: L2ExecutionPayload { + block_number: 41, + block_hash: payload_block_hash, + ..Default::default() + }, + }; + let mut fetcher = MockBlockFetcher { blocks: vec![l2_block], payloads: vec![payload] }; + let first = SpanBatchElement { epoch_num: 10, timestamp: 10, ..Default::default() }; + let second = SpanBatchElement { epoch_num: 11, timestamp: 20, ..Default::default() }; + let batch = SpanBatch { + batches: vec![first, second], + parent_check: FixedBytes::<20>::from_slice(&parent_hash[..20]), + l1_origin_check: FixedBytes::<20>::from_slice(&l1_block_hash[..20]), + ..Default::default() + }; + assert_eq!( + batch.check_batch(&cfg, &l1_blocks, l2_safe_head, &inclusion_block, &mut fetcher).await, + BatchValidity::Drop + ); + let logs = trace_store.get_by_level(Level::WARN); + assert_eq!(logs.len(), 1); + assert!(logs[0].contains("overlapped block's L1 origin number does not match")); + } + + #[tokio::test] + async fn test_check_batch_valid_with_genesis_epoch() { + let trace_store: TraceStorage = Default::default(); + let layer = CollectingLayer::new(trace_store.clone()); + tracing_subscriber::Registry::default().with(layer).init(); + + let payload_block_hash = + b256!("4444444444444444444444444444444444444444444444444444444444444444"); + let cfg = RollupConfig { + seq_window_size: 100, + delta_time: Some(0), + block_time: 10, + genesis: Genesis { + l2: BlockID { number: 41, hash: payload_block_hash }, + l1: BlockID { number: 10, ..Default::default() }, + ..Default::default() + }, + ..Default::default() + }; + let l1_block_hash = + b256!("3333333333333333333333333333333333333333000000000000000000000000"); + let block = + BlockInfo { number: 11, timestamp: 10, hash: l1_block_hash, ..Default::default() }; + let l1_blocks = vec![block]; + let parent_hash = b256!("1111111111111111111111111111111111111111000000000000000000000000"); + let l2_safe_head = L2BlockInfo { + block_info: BlockInfo { number: 41, timestamp: 10, parent_hash, ..Default::default() }, + l1_origin: BlockID { number: 9, ..Default::default() }, + ..Default::default() + }; + let inclusion_block = BlockInfo { number: 50, ..Default::default() }; + let l2_block = L2BlockInfo { + block_info: BlockInfo { number: 40, ..Default::default() }, + ..Default::default() + }; + let payload = L2ExecutionPayloadEnvelope { + parent_beacon_block_root: None, + execution_payload: L2ExecutionPayload { + block_number: 41, + block_hash: payload_block_hash, + ..Default::default() + }, + }; + let mut fetcher = MockBlockFetcher { blocks: vec![l2_block], payloads: vec![payload] }; + let first = SpanBatchElement { epoch_num: 10, timestamp: 10, ..Default::default() }; + let second = SpanBatchElement { epoch_num: 11, timestamp: 20, ..Default::default() }; + let batch = SpanBatch { + batches: vec![first, second], + parent_check: FixedBytes::<20>::from_slice(&parent_hash[..20]), + l1_origin_check: FixedBytes::<20>::from_slice(&l1_block_hash[..20]), + ..Default::default() + }; + assert_eq!( + batch.check_batch(&cfg, &l1_blocks, l2_safe_head, &inclusion_block, &mut fetcher).await, + BatchValidity::Accept + ); + assert!(trace_store.is_empty()); + } +} diff --git a/crates/derive/src/types/batch/span_batch/element.rs b/crates/derive/src/types/batch/span_batch/element.rs index 4be577660..0ff115694 100644 --- a/crates/derive/src/types/batch/span_batch/element.rs +++ b/crates/derive/src/types/batch/span_batch/element.rs @@ -6,7 +6,7 @@ use alloc::vec::Vec; /// A single batch element is similar to the [SingleBatch] type /// but does not contain the parent hash and epoch hash since spans /// do not contain this data for every block in the span. -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug, Default, Clone, PartialEq, Eq)] pub struct SpanBatchElement { /// The epoch number of the L1 block pub epoch_num: u64, diff --git a/crates/derive/src/types/payload.rs b/crates/derive/src/types/payload.rs index 347646f67..2dda40aed 100644 --- a/crates/derive/src/types/payload.rs +++ b/crates/derive/src/types/payload.rs @@ -48,7 +48,7 @@ impl L2ExecutionPayloadEnvelope { /// The execution payload. #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug, Default, Clone, PartialEq, Eq)] pub struct L2ExecutionPayload { /// The parent hash. #[cfg_attr(feature = "serde", serde(rename = "parentHash"))] From 72896979e7f9a58e1889e04c67bc70f2529306bf Mon Sep 17 00:00:00 2001 From: clabby Date: Fri, 19 Apr 2024 14:46:30 -0400 Subject: [PATCH 4/4] fix(derive): Rebase span batch validation tests (#125) ## Overview Rebases the span batch validation tests on top of #123 --- .../src/types/batch/span_batch/batch.rs | 42 ++++++++++++------- 1 file changed, 27 insertions(+), 15 deletions(-) diff --git a/crates/derive/src/types/batch/span_batch/batch.rs b/crates/derive/src/types/batch/span_batch/batch.rs index f82288465..ac50a064f 100644 --- a/crates/derive/src/types/batch/span_batch/batch.rs +++ b/crates/derive/src/types/batch/span_batch/batch.rs @@ -733,7 +733,7 @@ mod tests { block_info: BlockInfo { number: 40, ..Default::default() }, ..Default::default() }; - let mut fetcher = MockBlockFetcher { blocks: vec![l2_block], payloads: vec![] }; + let mut fetcher = MockBlockFetcher { blocks: vec![l2_block], ..Default::default() }; let first = SpanBatchElement { epoch_num: 10, timestamp: 10, ..Default::default() }; let second = SpanBatchElement { epoch_num: 11, timestamp: 20, ..Default::default() }; let batch = SpanBatch { @@ -772,7 +772,7 @@ mod tests { block_info: BlockInfo { number: 40, ..Default::default() }, ..Default::default() }; - let mut fetcher = MockBlockFetcher { blocks: vec![l2_block], payloads: vec![] }; + let mut fetcher = MockBlockFetcher { blocks: vec![l2_block], ..Default::default() }; let first = SpanBatchElement { epoch_num: 10, timestamp: 10, ..Default::default() }; let second = SpanBatchElement { epoch_num: 11, timestamp: 20, ..Default::default() }; let batch = SpanBatch { @@ -815,7 +815,7 @@ mod tests { block_info: BlockInfo { number: 40, ..Default::default() }, ..Default::default() }; - let mut fetcher = MockBlockFetcher { blocks: vec![l2_block], payloads: vec![] }; + let mut fetcher = MockBlockFetcher { blocks: vec![l2_block], ..Default::default() }; let first = SpanBatchElement { epoch_num: 10, timestamp: 10, ..Default::default() }; let second = SpanBatchElement { epoch_num: 11, timestamp: 20, ..Default::default() }; let batch = SpanBatch { @@ -865,7 +865,7 @@ mod tests { block_info: BlockInfo { number: 40, ..Default::default() }, ..Default::default() }; - let mut fetcher = MockBlockFetcher { blocks: vec![l2_block], payloads: vec![] }; + let mut fetcher = MockBlockFetcher { blocks: vec![l2_block], ..Default::default() }; let first = SpanBatchElement { epoch_num: 10, timestamp: 10, ..Default::default() }; let second = SpanBatchElement { epoch_num: 11, timestamp: 20, ..Default::default() }; let batch = SpanBatch { @@ -914,7 +914,7 @@ mod tests { block_info: BlockInfo { number: 40, ..Default::default() }, ..Default::default() }; - let mut fetcher = MockBlockFetcher { blocks: vec![l2_block], payloads: vec![] }; + let mut fetcher = MockBlockFetcher { blocks: vec![l2_block], ..Default::default() }; let first = SpanBatchElement { epoch_num: 10, timestamp: 10, ..Default::default() }; let second = SpanBatchElement { epoch_num: 11, timestamp: 20, ..Default::default() }; let batch = SpanBatch { @@ -960,7 +960,7 @@ mod tests { block_info: BlockInfo { number: 40, ..Default::default() }, ..Default::default() }; - let mut fetcher = MockBlockFetcher { blocks: vec![l2_block], payloads: vec![] }; + let mut fetcher = MockBlockFetcher { blocks: vec![l2_block], ..Default::default() }; let first = SpanBatchElement { epoch_num: 10, timestamp: 10, ..Default::default() }; let second = SpanBatchElement { epoch_num: 11, timestamp: 20, ..Default::default() }; let batch = SpanBatch { @@ -1017,7 +1017,7 @@ mod tests { block_info: BlockInfo { number: 40, ..Default::default() }, ..Default::default() }; - let mut fetcher = MockBlockFetcher { blocks: vec![l2_block], payloads: vec![] }; + let mut fetcher = MockBlockFetcher { blocks: vec![l2_block], ..Default::default() }; let first = SpanBatchElement { epoch_num: 10, timestamp: 20, ..Default::default() }; let second = SpanBatchElement { epoch_num: 10, timestamp: 20, ..Default::default() }; let third = SpanBatchElement { epoch_num: 11, timestamp: 20, ..Default::default() }; @@ -1067,7 +1067,7 @@ mod tests { block_info: BlockInfo { number: 40, ..Default::default() }, ..Default::default() }; - let mut fetcher = MockBlockFetcher { blocks: vec![l2_block], payloads: vec![] }; + let mut fetcher = MockBlockFetcher { blocks: vec![l2_block], ..Default::default() }; let first = SpanBatchElement { epoch_num: 10, timestamp: 20, transactions: vec![] }; let second = SpanBatchElement { epoch_num: 10, timestamp: 20, transactions: vec![] }; let third = SpanBatchElement { epoch_num: 11, timestamp: 20, transactions: vec![] }; @@ -1120,7 +1120,7 @@ mod tests { block_info: BlockInfo { number: 40, ..Default::default() }, ..Default::default() }; - let mut fetcher = MockBlockFetcher { blocks: vec![l2_block], payloads: vec![] }; + let mut fetcher = MockBlockFetcher { blocks: vec![l2_block], ..Default::default() }; let first = SpanBatchElement { epoch_num: 10, timestamp: 20, @@ -1183,7 +1183,7 @@ mod tests { block_info: BlockInfo { number: 40, ..Default::default() }, ..Default::default() }; - let mut fetcher = MockBlockFetcher { blocks: vec![l2_block], payloads: vec![] }; + let mut fetcher = MockBlockFetcher { blocks: vec![l2_block], ..Default::default() }; let first = SpanBatchElement { epoch_num: 10, timestamp: 20, @@ -1242,7 +1242,7 @@ mod tests { block_info: BlockInfo { number: 40, ..Default::default() }, ..Default::default() }; - let mut fetcher = MockBlockFetcher { blocks: vec![l2_block], payloads: vec![] }; + let mut fetcher = MockBlockFetcher { blocks: vec![l2_block], ..Default::default() }; let filler_bytes = RawTransaction(Bytes::copy_from_slice(&[OpTxType::Eip1559 as u8])); let first = SpanBatchElement { epoch_num: 10, @@ -1300,7 +1300,7 @@ mod tests { block_info: BlockInfo { number: 40, ..Default::default() }, ..Default::default() }; - let mut fetcher = MockBlockFetcher { blocks: vec![l2_block], payloads: vec![] }; + let mut fetcher = MockBlockFetcher { blocks: vec![l2_block], ..Default::default() }; let first = SpanBatchElement { epoch_num: 10, timestamp: 10, ..Default::default() }; let second = SpanBatchElement { epoch_num: 11, timestamp: 20, ..Default::default() }; let batch = SpanBatch { @@ -1354,7 +1354,11 @@ mod tests { parent_beacon_block_root: None, execution_payload: L2ExecutionPayload { block_number: 41, ..Default::default() }, }; - let mut fetcher = MockBlockFetcher { blocks: vec![l2_block], payloads: vec![payload] }; + let mut fetcher = MockBlockFetcher { + blocks: vec![l2_block], + payloads: vec![payload], + ..Default::default() + }; let first = SpanBatchElement { epoch_num: 10, timestamp: 10, ..Default::default() }; let second = SpanBatchElement { epoch_num: 11, timestamp: 20, ..Default::default() }; let batch = SpanBatch { @@ -1418,7 +1422,11 @@ mod tests { ..Default::default() }, }; - let mut fetcher = MockBlockFetcher { blocks: vec![l2_block], payloads: vec![payload] }; + let mut fetcher = MockBlockFetcher { + blocks: vec![l2_block], + payloads: vec![payload], + ..Default::default() + }; let first = SpanBatchElement { epoch_num: 10, timestamp: 10, ..Default::default() }; let second = SpanBatchElement { epoch_num: 11, timestamp: 20, ..Default::default() }; let batch = SpanBatch { @@ -1479,7 +1487,11 @@ mod tests { ..Default::default() }, }; - let mut fetcher = MockBlockFetcher { blocks: vec![l2_block], payloads: vec![payload] }; + let mut fetcher = MockBlockFetcher { + blocks: vec![l2_block], + payloads: vec![payload], + ..Default::default() + }; let first = SpanBatchElement { epoch_num: 10, timestamp: 10, ..Default::default() }; let second = SpanBatchElement { epoch_num: 11, timestamp: 20, ..Default::default() }; let batch = SpanBatch {