Skip to content

Commit

Permalink
fix(derive): so close :sadge
Browse files Browse the repository at this point in the history
  • Loading branch information
refcell committed Apr 19, 2024
1 parent f75eb5e commit 1f7fb9e
Show file tree
Hide file tree
Showing 4 changed files with 184 additions and 41 deletions.
155 changes: 143 additions & 12 deletions crates/derive/src/stages/batch_queue.rs
Original file line number Diff line number Diff line change
Expand Up @@ -334,7 +334,6 @@ where
}

// Attempt to derive more batches.
assert!(self.l1_blocks.is_empty());
let batch = match self.derive_next_batch(out_of_data, parent).await {
Ok(b) => b,
Err(e) => match e {
Expand Down Expand Up @@ -414,10 +413,16 @@ mod tests {
test_utils::{CollectingLayer, MockBatchQueueProvider, TraceStorage},
},
traits::test_utils::MockBlockFetcher,
types::{BatchType, BlockID},
types::{
BatchType, BlockID, Genesis, L1BlockInfoBedrock, L1BlockInfoTx, L2ExecutionPayload,
L2ExecutionPayloadEnvelope,
},
};
use alloc::vec;
use alloy_primitives::{address, b256, Address, Bytes, TxKind, B256, U256};
use alloy_rlp::{BytesMut, Encodable};
use miniz_oxide::deflate::compress_to_vec_zlib;
use op_alloy_consensus::{OpTxType, TxDeposit};
use tracing::Level;
use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt};

Expand Down Expand Up @@ -474,27 +479,152 @@ mod tests {
assert_eq!(res, StageError::NotEnoughData);
}

// TODO: fix
#[tokio::test]
async fn test_next_batch_succeeds() {
async fn test_next_batch_missing_origin() {
let trace_store: TraceStorage = Default::default();
let layer = CollectingLayer::new(trace_store.clone());
tracing_subscriber::Registry::default().with(layer).init();

let mut reader = new_batch_reader();
let cfg = Arc::new(RollupConfig::default());
let payload_block_hash =
b256!("4444444444444444444444444444444444444444444444444444444444444444");
let cfg = Arc::new(RollupConfig {
delta_time: Some(0),
block_time: 100,
max_sequencer_drift: 10000000,
seq_window_size: 10000000,
genesis: Genesis {
l2: BlockID { number: 8, hash: payload_block_hash },
l1: BlockID { number: 16988980031808077784, ..Default::default() },
..Default::default()
},
..Default::default()
});
let mut batch_vec: Vec<StageResult<Batch>> = vec![];
let mut batch_txs: Vec<Bytes> = vec![];
let mut second_batch_txs: Vec<Bytes> = vec![];
while let Some(batch) = reader.next_batch(cfg.as_ref()) {
// assert_eq!(batch, Batch::Span(Default::default()));
if let Batch::Span(span) = &batch {
let bys = span.batches[0]
.transactions
.iter()
.cloned()
.map(|tx| tx.0)
.collect::<Vec<Bytes>>();
let sbys = span.batches[1]
.transactions
.iter()
.cloned()
.map(|tx| tx.0)
.collect::<Vec<Bytes>>();
second_batch_txs.extend(sbys);
batch_txs.extend(bys);
}
batch_vec.push(Ok(batch));
}
// Insert a deposit transaction in the front of the second batch txs
let expected = L1BlockInfoBedrock {
number: 16988980031808077784,
time: 1697121143,
base_fee: 10419034451,
block_hash: b256!("392012032675be9f94aae5ab442de73c5f4fb1bf30fa7dd0d2442239899a40fc"),
sequence_number: 4,
batcher_address: address!("6887246668a3b87f54deb3b94ba47a6f63f32985"),
l1_fee_overhead: U256::from(0xbc),
l1_fee_scalar: U256::from(0xa6fe0),
};
let deposit_tx_calldata: Bytes = L1BlockInfoTx::Bedrock(expected).encode_calldata();
let tx = TxDeposit {
source_hash: B256::left_padding_from(&[0xde, 0xad]),
from: Address::left_padding_from(&[0xbe, 0xef]),
mint: Some(1),
gas_limit: 2,
to: TxKind::Call(Address::left_padding_from(&[3])),
value: U256::from(4_u64),
input: deposit_tx_calldata,
is_system_transaction: false,
};
let mut buf = BytesMut::new();
tx.encode(&mut buf);
let prefixed = [&[OpTxType::Deposit as u8], &buf[..]].concat();
second_batch_txs.insert(0, Bytes::copy_from_slice(&prefixed));
let mut mock = MockBatchQueueProvider::new(batch_vec);
mock.origin = Some(BlockInfo::default());
let fetcher = MockBlockFetcher::default();
let origin_check =
b256!("8527cdb6f601acf9b483817abd1da92790c92b19000000000000000000000000");
mock.origin = Some(BlockInfo {
number: 16988980031808077784,
// 1639845645
timestamp: 1639845845,
parent_hash: Default::default(),
hash: origin_check,
});
let origin = mock.origin;

let parent_check =
b256!("01ddf682e2f8a6f10c2207e02322897e65317196000000000000000000000000");
let block_nine = L2BlockInfo {
block_info: BlockInfo {
number: 9,
timestamp: 1639845645,
parent_hash: parent_check,
hash: origin_check,
},
..Default::default()
};
let block_seven = L2BlockInfo {
block_info: BlockInfo {
number: 7,
timestamp: 1639845745,
parent_hash: parent_check,
hash: origin_check,
},
..Default::default()
};
let payload = L2ExecutionPayloadEnvelope {
parent_beacon_block_root: None,
execution_payload: L2ExecutionPayload {
block_number: 8,
block_hash: payload_block_hash,
transactions: batch_txs,
..Default::default()
},
};
let second = L2ExecutionPayloadEnvelope {
parent_beacon_block_root: None,
execution_payload: L2ExecutionPayload {
block_number: 9,
block_hash: payload_block_hash,
transactions: second_batch_txs,
..Default::default()
},
};
let fetcher = MockBlockFetcher {
blocks: vec![block_nine, block_seven],
payloads: vec![payload, second],
};
let mut bq = BatchQueue::new(cfg, mock, fetcher);
let res = bq.next_batch(L2BlockInfo::default()).await.unwrap_err();
let logs = trace_store.get_by_level(Level::WARN);
let str = "Deriving next batch for epoch: 1";
assert_eq!(logs[0], str);
let parent = L2BlockInfo {
block_info: BlockInfo {
number: 9,
timestamp: 1639845745,
parent_hash: parent_check,
hash: origin_check,
},
l1_origin: BlockID { number: 16988980031808077784, hash: origin_check },
..Default::default()
};
let res = bq.next_batch(parent).await.unwrap_err();
let logs = trace_store.get_by_level(Level::INFO);
assert_eq!(logs.len(), 5);
let str = alloc::format!("Advancing batch queue origin: {:?}", origin);
assert!(logs[0].contains(&str));
assert!(logs[1].contains("Deriving next batch for epoch: 16988980031808077784"));
assert!(logs[2].contains("Next batch found:"));
let warns = trace_store.get_by_level(Level::WARN);
assert_eq!(warns.len(), 0);
let str = "Could not get singular batches from span batch: Missing L1 origin";
assert_eq!(res, StageError::Custom(anyhow::anyhow!(str)));
}

#[tokio::test]
Expand All @@ -506,6 +636,7 @@ mod tests {
let mut bq = BatchQueue::new(cfg, mock, fetcher);
let parent = L2BlockInfo::default();
let result = bq.next_batch(parent).await;
assert!(result.is_err());
assert!(result.is_ok());
// assert_eq!(result, Err(StageError::NotEnoughData));
}
}
2 changes: 1 addition & 1 deletion crates/derive/src/stages/l1_retrieval.rs
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ where
if self.data.is_none() {
let next = self
.prev
.next_l1_block()?
.next_l1_block()? // SAFETY: This question mark bubbles up the Eof error.
.ok_or_else(|| anyhow!("No block to retrieve data from"))?;
self.data = Some(self.provider.open_data(&next, self.prev.batcher_addr()).await?);
}
Expand Down
20 changes: 16 additions & 4 deletions crates/derive/src/types/batch/span_batch/batch.rs
Original file line number Diff line number Diff line change
Expand Up @@ -140,8 +140,10 @@ impl SpanBatch {
}
if !self.check_parent_hash(parent_block.block_info.parent_hash) {
warn!(
"parent block number mismatch, expected: {parent_num}, received: {}",
parent_block.block_info.number
"parent block number mismatch, expected: {parent_num}, received: {}, parent hash: {}, self hash: {}",
parent_block.block_info.number,
parent_block.block_info.parent_hash,
self.parent_check,
);
return BatchValidity::Drop;
}
Expand All @@ -156,7 +158,7 @@ impl SpanBatch {
if starting_epoch_num > parent_block.l1_origin.number + 1 {
warn!(
"batch is for future epoch too far ahead, while it has the next timestamp, so it must be invalid, current_epoch: {}",
epoch.id()
epoch.id(),
);
return BatchValidity::Drop;
}
Expand Down Expand Up @@ -309,7 +311,10 @@ impl SpanBatch {
}
};
if safe_block_ref.l1_origin.number != self.batches[i as usize].epoch_num {
warn!("overlapped block's L1 origin number does not match");
warn!(
"overlapped block's L1 origin number does not match {}, {}",
safe_block_ref.l1_origin.number, self.batches[i as usize].epoch_num
);
return BatchValidity::Drop;
}
}
Expand Down Expand Up @@ -362,6 +367,13 @@ impl SpanBatch {
if batch.timestamp <= l2_safe_head.block_info.timestamp {
continue;
}
tracing::info!(
"checking {} l1 origins with first timestamp: {}, batch timestamp: {}, {}",
l1_origins.len(),
l1_origins[0].timestamp,
batch.timestamp,
batch.epoch_num
);
let origin_epoch_hash = l1_origins[origin_index..l1_origins.len()]
.iter()
.enumerate()
Expand Down
48 changes: 24 additions & 24 deletions crates/derive/src/types/payload.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
use alloc::vec::Vec;
use alloy_primitives::{Address, Bloom, Bytes, B256};
use anyhow::Result;
use op_alloy_consensus::OpTxEnvelope;
use op_alloy_consensus::TxDeposit;

/// Fixed and variable memory costs for a payload.
/// ~1000 bytes per payload, with some margin for overhead like map data.
Expand Down Expand Up @@ -117,30 +117,30 @@ impl L2ExecutionPayloadEnvelope {
pub fn to_l2_block_ref(&self, rollup_config: &RollupConfig) -> Result<L2BlockInfo> {
let L2ExecutionPayloadEnvelope { execution_payload, .. } = self;

let (l1_origin, sequence_number) = if execution_payload.block_number ==
rollup_config.genesis.l2.number
{
if execution_payload.block_hash != rollup_config.genesis.l2.hash {
anyhow::bail!("Invalid genesis hash");
}
(rollup_config.genesis.l1, 0)
} else {
if execution_payload.transactions.is_empty() {
anyhow::bail!(
"L2 block is missing L1 info deposit transaction, block hash: {}",
execution_payload.block_hash
);
}
let tx = OpTxEnvelope::decode(&mut execution_payload.transactions[0].as_ref())
.map_err(|e| anyhow::anyhow!(e))?;

let OpTxEnvelope::Deposit(tx) = tx else {
anyhow::bail!("First payload transaction has unexpected type: {:?}", tx.tx_type());
};
let (l1_origin, sequence_number) =
if execution_payload.block_number == rollup_config.genesis.l2.number {
if execution_payload.block_hash != rollup_config.genesis.l2.hash {
anyhow::bail!("Invalid genesis hash");
}
(rollup_config.genesis.l1, 0)
} else {
if execution_payload.transactions.is_empty() {
anyhow::bail!(
"L2 block is missing L1 info deposit transaction, block hash: {}",
execution_payload.block_hash
);
}

let tx = TxDeposit::decode(&mut execution_payload.transactions[0][1..].as_ref())
.map_err(|e| anyhow::anyhow!(e))?;

let l1_info = L1BlockInfoTx::decode_calldata(tx.input.as_ref())?;
(l1_info.id(), l1_info.sequence_number())
};
// let OpTxEnvelope::Deposit(tx) = tx else {
// anyhow::bail!("First payload transaction has unexpected type: {:?}",
// tx.tx_type()); };

let l1_info = L1BlockInfoTx::decode_calldata(tx.input.as_ref())?;
(l1_info.id(), l1_info.sequence_number())
};

Ok(L2BlockInfo {
block_info: BlockInfo {
Expand Down

0 comments on commit 1f7fb9e

Please sign in to comment.