From da627d1d90c35f79c6048c272d1c0d08453e2c29 Mon Sep 17 00:00:00 2001 From: Gustavo Madeira Krieger Date: Fri, 24 Jan 2025 17:35:54 -0300 Subject: [PATCH] test(client): add tests to block_production_task --- Cargo.lock | 3 + .../madara/client/block_production/Cargo.toml | 7 + .../madara/client/block_production/src/lib.rs | 910 +++++++++++++++++- 3 files changed, 906 insertions(+), 14 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 50ce03c38..fd5d4ee92 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5512,9 +5512,11 @@ dependencies = [ "bitvec", "blockifier", "lazy_static", + "m-cairo-test-contracts", "mc-analytics", "mc-block-import", "mc-db", + "mc-devnet", "mc-exec", "mc-mempool", "mockall", @@ -5539,6 +5541,7 @@ dependencies = [ "serde_json", "starknet-core", "starknet-types-core 0.1.7 (git+https://github.com/kasarlabs/types-rs.git?branch=feat-deserialize-v0.1.7)", + "starknet-types-rpc", "starknet_api", "thiserror 2.0.3", "tokio", diff --git a/crates/madara/client/block_production/Cargo.toml b/crates/madara/client/block_production/Cargo.toml index ca4311a1a..5543a9970 100644 --- a/crates/madara/client/block_production/Cargo.toml +++ b/crates/madara/client/block_production/Cargo.toml @@ -18,6 +18,9 @@ targets = ["x86_64-unknown-linux-gnu"] rstest = { workspace = true } mc-db = { workspace = true, features = ["testing"] } +mc-mempool = { workspace = true, features = ["testing"] } +mc-devnet.workspace = true +mp-utils = { workspace = true, features = ["testing"] } tokio = { workspace = true, features = ["rt-multi-thread"] } proptest.workspace = true proptest-derive.workspace = true @@ -27,6 +30,10 @@ mockall.workspace = true assert_matches.workspace = true lazy_static.workspace = true serde_json.workspace = true +starknet-types-rpc.workspace = true + +# Compile the test contracts in test cfg. +m-cairo-test-contracts.workspace = true [features] testing = ["blockifier/testing", "mc-db/testing", "mockall"] diff --git a/crates/madara/client/block_production/src/lib.rs b/crates/madara/client/block_production/src/lib.rs index b5783b8dd..94f6183d8 100644 --- a/crates/madara/client/block_production/src/lib.rs +++ b/crates/madara/client/block_production/src/lib.rs @@ -624,36 +624,50 @@ impl BlockProductionTask { #[cfg(test)] mod tests { - use std::{collections::HashMap, sync::Arc}; + use std::{collections::HashMap, sync::Arc, time::Duration}; use blockifier::{ - bouncer::BouncerWeights, compiled_class_hash, nonce, state::cached_state::StateMaps, storage_key, + bouncer::{BouncerConfig, BouncerWeights}, + compiled_class_hash, nonce, + state::cached_state::StateMaps, + storage_key, + }; + use mc_block_import::{BlockImporter, BlockValidationContext}; + use mc_db::{db_block_id::DbBlockId, MadaraBackend}; + use mc_devnet::{Call, ChainGenesisDescription, DevnetKeys, DevnetPredeployedContract, Multicall, Selector}; + use mc_mempool::{Mempool, MempoolLimits, MempoolProvider, MockL1DataProvider}; + use mp_block::{ + header::{GasPrices, L1DataAvailabilityMode}, + MadaraBlockInner, MadaraPendingBlock, VisitedSegments, }; - use mc_db::MadaraBackend; - use mc_mempool::Mempool; - use mp_block::VisitedSegments; use mp_chain_config::ChainConfig; + use mp_class::ConvertedClass; use mp_convert::ToFelt; use mp_state_update::{ ContractStorageDiffItem, DeclaredClassItem, DeployedContractItem, NonceUpdate, ReplacedClassItem, StateDiff, StorageEntry, }; + use mp_transactions::{BroadcastedTransactionExt, Transaction}; use starknet_api::{ class_hash, contract_address, core::{ClassHash, ContractAddress, PatriciaKey}, felt, patricia_key, }; use starknet_types_core::felt::Felt; + use starknet_types_rpc::{ + BroadcastedDeclareTxn, BroadcastedDeclareTxnV3, BroadcastedInvokeTxn, BroadcastedTxn, DaMode, InvokeTxnV3, + ResourceBounds, ResourceBoundsMapping, + }; use crate::{ finalize_execution_state::state_map_to_state_diff, metrics::BlockProductionMetrics, BlockProductionTask, }; - type TxFixtureInfo = (mp_transactions::Transaction, mp_receipt::TransactionReceipt); + type TxFixtureInfo = (Transaction, mp_receipt::TransactionReceipt); #[rstest::fixture] fn backend() -> Arc { - MadaraBackend::open_for_testing(Arc::new(ChainConfig::madara_test())) + MadaraBackend::open_for_testing(Arc::new(ChainConfig::madara_devnet())) } #[rstest::fixture] @@ -667,6 +681,72 @@ mod tests { ) } + #[rstest::fixture] + async fn devnet_setup( + #[default(16)] execution_batch_size: usize, + #[default(Duration::from_secs(30))] block_time: Duration, + #[default(Duration::from_secs(2))] pending_block_update_time: Duration, + #[default(false)] use_bouncer_weights: bool, + ) -> ( + Arc, + Arc, + Arc, + Arc, + Arc, + DevnetKeys, + ) { + let mut g = ChainGenesisDescription::base_config().unwrap(); + let contracts = g.add_devnet_contracts(10).unwrap(); + + let chain_config: Arc; + + if use_bouncer_weights { + let bouncer_weights = bouncer_weights(); + + chain_config = Arc::new(ChainConfig { + execution_batch_size, + block_time, + pending_block_update_time, + bouncer_config: BouncerConfig { block_max_capacity: bouncer_weights }, + ..ChainConfig::madara_devnet() + }); + } else { + chain_config = Arc::new(ChainConfig { + execution_batch_size, + block_time, + pending_block_update_time, + ..ChainConfig::madara_devnet() + }); + } + + let block = g.build(&chain_config).unwrap(); + let backend = MadaraBackend::open_for_testing(Arc::clone(&chain_config)); + let importer = Arc::new(BlockImporter::new(Arc::clone(&backend), None).unwrap()); + + let _ = importer + .add_block(block, BlockValidationContext::new(chain_config.chain_id.clone()).trust_class_hashes(true)) + .await; + + let mut l1_data_provider = MockL1DataProvider::new(); + l1_data_provider.expect_get_da_mode().return_const(L1DataAvailabilityMode::Blob); + l1_data_provider.expect_get_gas_prices().return_const(GasPrices { + eth_l1_gas_price: 128, + strk_l1_gas_price: 128, + eth_l1_data_gas_price: 128, + strk_l1_data_gas_price: 128, + }); + let l1_data_provider = Arc::new(l1_data_provider); + + ( + Arc::clone(&backend), + Arc::clone(&importer), + Arc::new(BlockProductionMetrics::register()), + Arc::clone(&l1_data_provider), + Arc::new(Mempool::new(backend, l1_data_provider, MempoolLimits::for_testing())), + contracts, + ) + } + #[rstest::fixture] fn tx_invoke_v0(#[default(Felt::ZERO)] contract_address: Felt) -> TxFixtureInfo { ( @@ -716,6 +796,33 @@ mod tests { ) } + #[rstest::fixture] + fn pending_block_inner( + // Transactions + #[from(tx_invoke_v0)] + #[with(Felt::ONE)] + tx_invoke_v0_1: TxFixtureInfo, + #[from(tx_l1_handler)] + #[with(Felt::TWO)] + tx_l1_handler_2: TxFixtureInfo, + #[from(tx_declare_v0)] + #[with(Felt::THREE)] + tx_declare_v0_3: TxFixtureInfo, + tx_deploy: TxFixtureInfo, + tx_deploy_account: TxFixtureInfo, + ) -> MadaraBlockInner { + MadaraBlockInner { + transactions: vec![ + tx_invoke_v0_1.0, + tx_l1_handler_2.0, + tx_declare_v0_3.0, + tx_deploy.0, + tx_deploy_account.0, + ], + receipts: vec![tx_invoke_v0_1.1, tx_l1_handler_2.1, tx_declare_v0_3.1, tx_deploy.1, tx_deploy_account.1], + } + } + #[rstest::fixture] fn converted_class_legacy(#[default(Felt::ZERO)] class_hash: Felt) -> mp_class::ConvertedClass { mp_class::ConvertedClass::Legacy(mp_class::LegacyConvertedClass { @@ -758,6 +865,66 @@ mod tests { }) } + #[rstest::fixture] + fn pending_block_converted_classes( + // Converted classes + #[from(converted_class_legacy)] + #[with(Felt::ZERO)] + converted_class_legacy_0: mp_class::ConvertedClass, + #[from(converted_class_sierra)] + #[with(Felt::ONE, Felt::ONE)] + converted_class_sierra_1: mp_class::ConvertedClass, + #[from(converted_class_sierra)] + #[with(Felt::TWO, Felt::TWO)] + converted_class_sierra_2: mp_class::ConvertedClass, + ) -> Vec { + vec![converted_class_legacy_0.clone(), converted_class_sierra_1.clone(), converted_class_sierra_2.clone()] + } + + #[rstest::fixture] + fn pending_block_state_diff() -> StateDiff { + StateDiff { + storage_diffs: vec![ + ContractStorageDiffItem { + address: Felt::ONE, + storage_entries: vec![ + StorageEntry { key: Felt::ZERO, value: Felt::ZERO }, + StorageEntry { key: Felt::ONE, value: Felt::ONE }, + StorageEntry { key: Felt::TWO, value: Felt::TWO }, + ], + }, + ContractStorageDiffItem { + address: Felt::TWO, + storage_entries: vec![ + StorageEntry { key: Felt::ZERO, value: Felt::ZERO }, + StorageEntry { key: Felt::ONE, value: Felt::ONE }, + StorageEntry { key: Felt::TWO, value: Felt::TWO }, + ], + }, + ContractStorageDiffItem { + address: Felt::THREE, + storage_entries: vec![ + StorageEntry { key: Felt::ZERO, value: Felt::ZERO }, + StorageEntry { key: Felt::ONE, value: Felt::ONE }, + StorageEntry { key: Felt::TWO, value: Felt::TWO }, + ], + }, + ], + deprecated_declared_classes: vec![Felt::ZERO], + declared_classes: vec![ + DeclaredClassItem { class_hash: Felt::ONE, compiled_class_hash: Felt::ONE }, + DeclaredClassItem { class_hash: Felt::TWO, compiled_class_hash: Felt::TWO }, + ], + deployed_contracts: vec![DeployedContractItem { address: Felt::THREE, class_hash: Felt::THREE }], + replaced_classes: vec![ReplacedClassItem { contract_address: Felt::TWO, class_hash: Felt::TWO }], + nonces: vec![ + NonceUpdate { contract_address: Felt::ONE, nonce: Felt::ONE }, + NonceUpdate { contract_address: Felt::TWO, nonce: Felt::TWO }, + NonceUpdate { contract_address: Felt::THREE, nonce: Felt::THREE }, + ], + } + } + #[rstest::fixture] fn visited_segments() -> mp_block::VisitedSegments { mp_block::VisitedSegments(vec![ @@ -790,8 +957,108 @@ mod tests { } } + fn sign_and_add_declare_tx( + contract: &DevnetPredeployedContract, + backend: &Arc, + mempool: &Arc, + nonce: Felt, + ) { + let sierra_class: starknet_core::types::contract::SierraClass = + serde_json::from_slice(m_cairo_test_contracts::TEST_CONTRACT_SIERRA).unwrap(); + let flattened_class: mp_class::FlattenedSierraClass = sierra_class.clone().flatten().unwrap().into(); + + // starkli class-hash target/dev/madara_contracts_TestContract.compiled_contract_class.json + let compiled_contract_class_hash = + Felt::from_hex("0x0138105ded3d2e4ea1939a0bc106fb80fd8774c9eb89c1890d4aeac88e6a1b27").unwrap(); + + let mut declare_txn: BroadcastedDeclareTxn = BroadcastedDeclareTxn::V3(BroadcastedDeclareTxnV3 { + sender_address: contract.address, + compiled_class_hash: compiled_contract_class_hash, + signature: vec![], + nonce, + contract_class: flattened_class.into(), + resource_bounds: ResourceBoundsMapping { + l1_gas: ResourceBounds { max_amount: 210000, max_price_per_unit: 10000 }, + l2_gas: ResourceBounds { max_amount: 60000, max_price_per_unit: 10000 }, + }, + tip: 0, + paymaster_data: vec![], + account_deployment_data: vec![], + nonce_data_availability_mode: DaMode::L1, + fee_data_availability_mode: DaMode::L1, + }); + + let (blockifier_tx, _class) = BroadcastedTxn::Declare(declare_txn.clone()) + .into_blockifier(backend.chain_config().chain_id.to_felt(), backend.chain_config().latest_protocol_version) + .unwrap(); + let signature = contract.secret.sign(&mc_mempool::transaction_hash(&blockifier_tx)).unwrap(); + + let tx_signature = match &mut declare_txn { + BroadcastedDeclareTxn::V1(tx) => &mut tx.signature, + BroadcastedDeclareTxn::V2(tx) => &mut tx.signature, + BroadcastedDeclareTxn::V3(tx) => &mut tx.signature, + _ => unreachable!("the declare tx is not query only"), + }; + *tx_signature = vec![signature.r, signature.s]; + + let _ = mempool.tx_accept_declare(declare_txn); + } + + fn sign_and_add_invoke_tx( + contract_sender: &DevnetPredeployedContract, + contract_receiver: &DevnetPredeployedContract, + backend: &Arc, + mempool: &Arc, + nonce: Felt, + ) { + let erc20_contract_address = + Felt::from_hex_unchecked("0x04718f5a0fc34cc1af16a1cdee98ffb20c31f5cd61d6ab07201858f4287c938d"); + + let mut invoke_txn: BroadcastedInvokeTxn = BroadcastedInvokeTxn::V3(InvokeTxnV3 { + sender_address: contract_sender.address, + calldata: Multicall::default() + .with(Call { + to: erc20_contract_address, + selector: Selector::from("transfer"), + calldata: vec![ + contract_receiver.address, + (9_999u128 * 1_000_000_000_000_000_000).into(), + Felt::ZERO, + ], + }) + .flatten() + .collect(), + signature: vec![], + nonce, + resource_bounds: ResourceBoundsMapping { + l1_gas: ResourceBounds { max_amount: 60000, max_price_per_unit: 10000 }, + l2_gas: ResourceBounds { max_amount: 60000, max_price_per_unit: 10000 }, + }, + tip: 0, + paymaster_data: vec![], + account_deployment_data: vec![], + nonce_data_availability_mode: DaMode::L1, + fee_data_availability_mode: DaMode::L1, + }); + + let (blockifier_tx, _classes) = BroadcastedTxn::Invoke(invoke_txn.clone()) + .into_blockifier(backend.chain_config().chain_id.to_felt(), backend.chain_config().latest_protocol_version) + .unwrap(); + let signature = contract_sender.secret.sign(&mc_mempool::transaction_hash(&blockifier_tx)).unwrap(); + + let tx_signature = match &mut invoke_txn { + BroadcastedInvokeTxn::V0(tx) => &mut tx.signature, + BroadcastedInvokeTxn::V1(tx) => &mut tx.signature, + BroadcastedInvokeTxn::V3(tx) => &mut tx.signature, + _ => unreachable!("the invoke tx is not query only"), + }; + *tx_signature = vec![signature.r, signature.s]; + + let _ = mempool.tx_accept_invoke(invoke_txn); + } + #[rstest::rstest] - fn block_prod_state_map_to_state_diff(backend: Arc) { + fn test_block_prod_state_map_to_state_diff(backend: Arc) { let mut nonces = HashMap::new(); nonces.insert(contract_address!(1u32), nonce!(1)); nonces.insert(contract_address!(2u32), nonce!(2)); @@ -918,7 +1185,7 @@ mod tests { #[rstest::rstest] #[tokio::test] #[allow(clippy::too_many_arguments)] - async fn block_prod_pending_close_on_startup_pass( + async fn test_block_prod_pending_close_on_startup_pass( setup: (Arc, Arc, Arc), #[with(Felt::ONE)] tx_invoke_v0: TxFixtureInfo, #[with(Felt::TWO)] tx_l1_handler: TxFixtureInfo, @@ -1086,7 +1353,7 @@ mod tests { #[rstest::rstest] #[tokio::test] #[allow(clippy::too_many_arguments)] - async fn block_prod_pending_close_on_startup_pass_on_top( + async fn test_block_prod_pending_close_on_startup_pass_on_top( setup: (Arc, Arc, Arc), // Transactions @@ -1349,7 +1616,7 @@ mod tests { /// task even if there is no pending block in db at the time of startup. #[rstest::rstest] #[tokio::test] - async fn block_prod_pending_close_on_startup_no_pending( + async fn test_block_prod_pending_close_on_startup_no_pending( setup: (Arc, Arc, Arc), ) { let (backend, importer, metrics) = setup; @@ -1372,7 +1639,7 @@ mod tests { #[rstest::rstest] #[tokio::test] #[allow(clippy::too_many_arguments)] - async fn block_prod_pending_close_on_startup_no_visited_segments( + async fn test_block_prod_pending_close_on_startup_no_visited_segments( setup: (Arc, Arc, Arc), #[with(Felt::ONE)] tx_invoke_v0: TxFixtureInfo, #[with(Felt::TWO)] tx_l1_handler: TxFixtureInfo, @@ -1519,7 +1786,7 @@ mod tests { #[rstest::rstest] #[tokio::test] #[allow(clippy::too_many_arguments)] - async fn block_prod_pending_close_on_startup_fail_missing_class( + async fn test_block_prod_pending_close_on_startup_fail_missing_class( setup: (Arc, Arc, Arc), #[with(Felt::ONE)] tx_invoke_v0: TxFixtureInfo, #[with(Felt::TWO)] tx_l1_handler: TxFixtureInfo, @@ -1619,7 +1886,7 @@ mod tests { #[rstest::rstest] #[tokio::test] #[allow(clippy::too_many_arguments)] - async fn block_prod_pending_close_on_startup_fail_missing_class_legacy( + async fn test_block_prod_pending_close_on_startup_fail_missing_class_legacy( setup: (Arc, Arc, Arc), #[with(Felt::ONE)] tx_invoke_v0: TxFixtureInfo, #[with(Felt::TWO)] tx_l1_handler: TxFixtureInfo, @@ -1713,4 +1980,619 @@ mod tests { assert!(err.contains("Failed to retrieve pending declared class at hash")); assert!(err.contains("not found in db")); } + + #[rstest::rstest] + #[tokio::test] + #[allow(clippy::too_many_arguments)] + async fn test_block_prod_on_pending_block_tick_block_still_pending( + #[future] devnet_setup: ( + Arc, + Arc, + Arc, + Arc, + Arc, + DevnetKeys, + ), + ) { + let (backend, importer, metrics, l1_data_provider, mempool, contracts) = devnet_setup.await; + + // ================================================================== // + // PART 1: add a transaction to the mempool // + // ================================================================== // + + // The transaction itself is meaningless, it's just to check + // if the task correctly reads it and process it + sign_and_add_declare_tx(&contracts.0[0], &backend, &mempool, Felt::ZERO); + assert!(!mempool.is_empty()); + + // ================================================================== // + // PART 2: create block production task // + // ================================================================== // + + // Since there are no new pending blocks, this shouldn't + // seal any blocks + let mut block_production_task = + BlockProductionTask::new(Arc::clone(&backend), importer, Arc::clone(&mempool), metrics, l1_data_provider) + .await + .unwrap(); + + let pending_block: mp_block::MadaraMaybePendingBlock = backend.get_block(&DbBlockId::Pending).unwrap().unwrap(); + + assert_eq!(pending_block.inner.transactions.len(), 0); + assert_eq!(backend.get_latest_block_n().unwrap().unwrap(), 0); + + // ================================================================== // + // PART 3: call on pending time tick // + // ================================================================== // + + // The block should still be pending since we haven't + // reached the block limit and there should be no new + // finalized blocks + block_production_task.set_current_pending_tick(1); + block_production_task.on_pending_time_tick().await.unwrap(); + + let pending_block: mp_block::MadaraMaybePendingBlock = backend.get_block(&DbBlockId::Pending).unwrap().unwrap(); + + assert!(mempool.is_empty()); + assert_eq!(pending_block.inner.transactions.len(), 1); + assert_eq!(backend.get_latest_block_n().unwrap().unwrap(), 0); + } + + #[rstest::rstest] + #[tokio::test] + #[allow(clippy::too_many_arguments)] + async fn test_block_prod_on_pending_block_tick_updates_correct_block( + #[future] devnet_setup: ( + Arc, + Arc, + Arc, + Arc, + Arc, + DevnetKeys, + ), + + // Pending block info + pending_block_inner: MadaraBlockInner, + pending_block_converted_classes: Vec, + pending_block_state_diff: StateDiff, + + // Pending data + visited_segments: VisitedSegments, + bouncer_weights: BouncerWeights, + ) { + let (backend, importer, metrics, l1_data_provider, mempool, contracts) = devnet_setup.await; + + // ================================================================== // + // PART 1: add a transaction to the mempool // + // ================================================================== // + + // The transaction itself is meaningless, it's just to check + // if the task correctly reads it and process it + sign_and_add_declare_tx(&contracts.0[0], &backend, &mempool, Felt::ZERO); + assert!(!mempool.is_empty()); + + // ================================================================== // + // PART 2: create block production task // + // ================================================================== // + + // Since there are no new pending blocks, this shouldn't + // seal any blocks + let mut block_production_task = + BlockProductionTask::new(Arc::clone(&backend), importer, Arc::clone(&mempool), metrics, l1_data_provider) + .await + .unwrap(); + + let pending_block: mp_block::MadaraMaybePendingBlock = backend.get_block(&DbBlockId::Pending).unwrap().unwrap(); + + assert_eq!(pending_block.inner.transactions.len(), 0); + assert_eq!(backend.get_latest_block_n().unwrap().unwrap(), 0); + + // ================================================================== // + // PART 3: call on pending time tick once // + // ================================================================== // + + block_production_task.set_current_pending_tick(1); + block_production_task.on_pending_time_tick().await.unwrap(); + + let pending_block: mp_block::MadaraMaybePendingBlock = backend.get_block(&DbBlockId::Pending).unwrap().unwrap(); + + assert!(mempool.is_empty()); + assert_eq!(pending_block.inner.transactions.len(), 1); + assert_eq!(backend.get_latest_block_n().unwrap().unwrap(), 0); + + // ================================================================== // + // PART 4: storing the pending block // + // ================================================================== // + + // We insert a pending block to check if the block production task + // keeps a consistent state + backend + .store_block( + mp_block::MadaraMaybePendingBlock { + info: mp_block::MadaraMaybePendingBlockInfo::Pending(mp_block::MadaraPendingBlockInfo { + header: mp_block::header::PendingHeader::default(), + tx_hashes: vec![Felt::ONE, Felt::TWO, Felt::THREE], + }), + inner: pending_block_inner.clone(), + }, + pending_block_state_diff.clone(), + pending_block_converted_classes.clone(), + Some(visited_segments.clone()), + Some(bouncer_weights), + ) + .expect("Failed to store pending block"); + + // ================================================================== // + // PART 5: add more transactions to the mempool // + // ================================================================== // + + sign_and_add_invoke_tx(&contracts.0[0], &contracts.0[1], &backend, &mempool, Felt::ONE); + assert!(!mempool.is_empty()); + + // ================================================================== // + // PART 6: call on pending time tick again // + // ================================================================== // + + block_production_task.on_pending_time_tick().await.unwrap(); + + let pending_block = backend.get_block(&DbBlockId::Pending).unwrap().unwrap(); + + assert!(mempool.is_empty()); + assert_eq!(pending_block.inner.transactions.len(), 2); + assert_eq!(backend.get_latest_block_n().unwrap().unwrap(), 0); + } + + // THIS TEST IS CURRENTLY NOT WORKING PROPERLY DUE TO A BUG + // THAT NEEDS TO BE INVESTIGATED + #[rstest::rstest] + #[tokio::test] + #[allow(clippy::too_many_arguments)] + async fn test_block_prod_on_pending_block_tick_closes_block( + #[future] + #[with(16, Duration::from_secs(60000), Duration::from_millis(3), true)] + devnet_setup: ( + Arc, + Arc, + Arc, + Arc, + Arc, + DevnetKeys, + ), + ) { + let (backend, importer, metrics, l1_data_provider, mempool, contracts) = devnet_setup.await; + + // ================================================================== // + // PART 1: add transactions to the mempool // + // ================================================================== // + + // The transaction itself is meaningless, it's just to check + // if the task correctly reads it and process it + sign_and_add_declare_tx(&contracts.0[0], &backend, &mempool, Felt::ZERO); + sign_and_add_invoke_tx(&contracts.0[0], &contracts.0[1], &backend, &mempool, Felt::ONE); + assert!(!mempool.is_empty()); + + // ================================================================== // + // PART 2: create block production task // + // ================================================================== // + + let mut block_production_task = + BlockProductionTask::new(Arc::clone(&backend), importer, Arc::clone(&mempool), metrics, l1_data_provider) + .await + .unwrap(); + + let pending_block: mp_block::MadaraMaybePendingBlock = backend.get_block(&DbBlockId::Pending).unwrap().unwrap(); + + assert_eq!(pending_block.inner.transactions.len(), 0); + assert_eq!(backend.get_latest_block_n().unwrap().unwrap(), 0); + + // ================================================================== // + // PART 3: call on pending time tick // + // ================================================================== // + + // The small bouncer weights should make the tick close + // the block when called + block_production_task.set_current_pending_tick(1); + block_production_task.on_pending_time_tick().await.unwrap(); + + let pending_block: mp_block::MadaraMaybePendingBlock = backend.get_block(&DbBlockId::Pending).unwrap().unwrap(); + + assert!(mempool.is_empty()); + assert_eq!(pending_block.inner.transactions.len(), 0); + assert_eq!(backend.get_latest_block_n().unwrap().unwrap(), 0); + } + + #[rstest::rstest] + #[tokio::test] + #[allow(clippy::too_many_arguments)] + async fn test_block_prod_on_block_time_tick_closes_block( + #[future] devnet_setup: ( + Arc, + Arc, + Arc, + Arc, + Arc, + DevnetKeys, + ), + ) { + let (backend, importer, metrics, l1_data_provider, mempool, contracts) = devnet_setup.await; + + // ================================================================== // + // PART 1: add a transaction to the mempool // + // ================================================================== // + + // The transaction itself is meaningless, it's just to check + // if the task correctly reads it and process it + sign_and_add_declare_tx(&contracts.0[0], &backend, &mempool, Felt::ZERO); + assert!(!mempool.is_empty()); + + // ================================================================== // + // PART 2: create block production task // + // ================================================================== // + + // Since there are no new pending blocks, this shouldn't + // seal any blocks + let mut block_production_task = + BlockProductionTask::new(Arc::clone(&backend), importer, Arc::clone(&mempool), metrics, l1_data_provider) + .await + .unwrap(); + + let pending_block: mp_block::MadaraMaybePendingBlock = backend.get_block(&DbBlockId::Pending).unwrap().unwrap(); + + assert_eq!(pending_block.inner.transactions.len(), 0); + assert_eq!(backend.get_latest_block_n().unwrap().unwrap(), 0); + + // ================================================================== // + // PART 3: call on block time // + // ================================================================== // + + block_production_task.on_block_time().await.unwrap(); + + let pending_block = backend.get_block(&DbBlockId::Pending).unwrap().unwrap(); + + assert!(mempool.is_empty()); + assert!(pending_block.inner.transactions.is_empty()); + assert_eq!(backend.get_latest_block_n().unwrap().unwrap(), 1); + } + + #[rstest::rstest] + #[tokio::test] + #[allow(clippy::too_many_arguments)] + async fn test_block_prod_on_block_time_fails_inconsistent_state( + #[future] devnet_setup: ( + Arc, + Arc, + Arc, + Arc, + Arc, + DevnetKeys, + ), + + // Pending block info + pending_block_inner: MadaraBlockInner, + pending_block_converted_classes: Vec, + pending_block_state_diff: StateDiff, + + // Pending data + visited_segments: VisitedSegments, + bouncer_weights: BouncerWeights, + ) { + let (backend, importer, metrics, l1_data_provider, mempool, contracts) = devnet_setup.await; + + // ================================================================== // + // PART 1: add a transaction to the mempool // + // ================================================================== // + + // The transaction itself is meaningless, it's just to check + // if the task correctly reads it and process it + sign_and_add_declare_tx(&contracts.0[0], &backend, &mempool, Felt::ZERO); + assert!(!mempool.is_empty()); + + // ================================================================== // + // PART 2: create block production task // + // ================================================================== // + + // Since there are no new pending blocks, this shouldn't + // seal any blocks + let mut block_production_task = + BlockProductionTask::new(Arc::clone(&backend), importer, Arc::clone(&mempool), metrics, l1_data_provider) + .await + .unwrap(); + + let pending_block: mp_block::MadaraMaybePendingBlock = backend.get_block(&DbBlockId::Pending).unwrap().unwrap(); + + assert_eq!(pending_block.inner.transactions.len(), 0); + assert_eq!(backend.get_latest_block_n().unwrap().unwrap(), 0); + + // ================================================================== // + // PART 3: storing the pending block // + // ================================================================== // + + backend + .store_block( + mp_block::MadaraMaybePendingBlock { + info: mp_block::MadaraMaybePendingBlockInfo::Pending(mp_block::MadaraPendingBlockInfo { + header: mp_block::header::PendingHeader::default(), + tx_hashes: vec![Felt::ONE, Felt::TWO, Felt::THREE], + }), + inner: pending_block_inner.clone(), + }, + pending_block_state_diff.clone(), + pending_block_converted_classes.clone(), + Some(visited_segments.clone()), + Some(bouncer_weights), + ) + .expect("Failed to store pending block"); + + // ================================================================== // + // PART 4: changing the task pending block // + // ================================================================== // + + // Here we purposefully change the block being worked on + let pending_block = MadaraPendingBlock { + info: mp_block::MadaraPendingBlockInfo { + header: mp_block::header::PendingHeader::default(), + tx_hashes: vec![Felt::ONE, Felt::TWO, Felt::THREE], + }, + inner: pending_block_inner.clone(), + }; + + block_production_task.block = pending_block; + + // ================================================================== // + // PART 5: call on block time // + // ================================================================== // + + // If the program ran correctly, the pending block should + // have no transactions on it after the method ran for at + // least block_time + + let result = block_production_task.on_block_time().await; + + assert!(result.is_err(), "Expected an error due to mismatched parent hash"); + assert!(mempool.is_empty()); + assert_eq!(backend.get_latest_block_n().unwrap().unwrap(), 0); + } + + #[rstest::rstest] + #[tokio::test] + #[allow(clippy::too_many_arguments)] + async fn test_block_prod_start_block_production_task_normal_setup( + #[future] devnet_setup: ( + Arc, + Arc, + Arc, + Arc, + Arc, + DevnetKeys, + ), + ) { + let (backend, importer, metrics, l1_data_provider, mempool, contracts) = devnet_setup.await; + + // ================================================================== // + // PART 1: add a transaction to the mempool // + // ================================================================== // + + // The transaction itself is meaningless, it's just to check + // if the task correctly reads it and process it + sign_and_add_declare_tx(&contracts.0[0], &backend, &mempool, Felt::ZERO); + assert!(!mempool.is_empty()); + + // ================================================================== // + // PART 2: create block production task // + // ================================================================== // + + // If the program ran correctly, the pending block should + // have no transactions on it after the method ran for at + // least block_time + + let block_production_task = + BlockProductionTask::new(Arc::clone(&backend), importer, Arc::clone(&mempool), metrics, l1_data_provider) + .await + .unwrap(); + + assert_eq!(backend.get_latest_block_n().unwrap().unwrap(), 0); + + // ================================================================== // + // PART 3: init block production task // + // ================================================================== // + + let task_handle = tokio::spawn(async move { + block_production_task.block_production_task(mp_utils::service::ServiceContext::new_for_testing()).await + }); + + // We force a timeout after the minimum execution time + // plus a little bit to guarantee + let _ = + tokio::time::timeout(backend.chain_config().block_time + std::time::Duration::from_secs(1), task_handle) + .await; + + let pending_block = backend.get_block(&DbBlockId::Pending).unwrap().unwrap(); + + assert!(mempool.is_empty()); + assert!(pending_block.inner.transactions.is_empty()); + assert_eq!(backend.get_latest_block_n().unwrap().unwrap(), 1); + } + + #[rstest::rstest] + #[tokio::test] + #[allow(clippy::too_many_arguments)] + async fn test_block_prod_start_block_production_task_pending_tick_too_small( + #[future] + #[with(16, Duration::from_secs(30), Duration::from_micros(1), false)] + devnet_setup: ( + Arc, + Arc, + Arc, + Arc, + Arc, + DevnetKeys, + ), + ) { + let (backend, importer, metrics, l1_data_provider, mempool, contracts) = devnet_setup.await; + + // ================================================================== // + // PART 1: we add a transaction to the mempool // + // ================================================================== // + + // The transaction itself is meaningless, it's just to check + // if the task correctly reads it and process it + sign_and_add_declare_tx(&contracts.0[0], &backend, &mempool, Felt::ZERO); + assert!(!mempool.is_empty()); + + // ================================================================== // + // PART 2: create block production task // + // ================================================================== // + + // If the program ran correctly, the pending block should + // have no transactions on it after the method ran for at + // least block_time + + let block_production_task = + BlockProductionTask::new(Arc::clone(&backend), importer, Arc::clone(&mempool), metrics, l1_data_provider) + .await + .unwrap(); + + assert_eq!(backend.get_latest_block_n().unwrap().unwrap(), 0); + + // ================================================================== // + // PART 3: init block production task // + // ================================================================== // + + let result = + block_production_task.block_production_task(mp_utils::service::ServiceContext::new_for_testing()).await; + + assert!(result.is_err(), "Expected an error due to very small pending block time"); + assert!(!mempool.is_empty()); + } + + #[rstest::rstest] + #[tokio::test] + #[allow(clippy::too_many_arguments)] + async fn test_block_prod_start_block_production_task_closes_block_right_after_pending( + #[future] + #[with(1, Duration::from_micros(1002), Duration::from_micros(1001), false)] + devnet_setup: ( + Arc, + Arc, + Arc, + Arc, + Arc, + DevnetKeys, + ), + ) { + let (backend, importer, metrics, l1_data_provider, mempool, contracts) = devnet_setup.await; + + // ================================================================== // + // PART 1: we add a transaction to the mempool // + // ================================================================== // + + // The transaction itself is meaningless, it's just to check + // if the task correctly reads it and process it + sign_and_add_declare_tx(&contracts.0[0], &backend, &mempool, Felt::ZERO); + sign_and_add_invoke_tx(&contracts.0[0], &contracts.0[1], &backend, &mempool, Felt::ONE); + assert!(!mempool.is_empty()); + + // ================================================================== // + // PART 2: create block production task // + // ================================================================== // + + let mut block_production_task = + BlockProductionTask::new(Arc::clone(&backend), importer, Arc::clone(&mempool), metrics, l1_data_provider) + .await + .unwrap(); + block_production_task.set_current_pending_tick(1); + + assert_eq!(backend.get_latest_block_n().unwrap().unwrap(), 0); + + // ================================================================== // + // PART 3: init block production task // + // ================================================================== // + + let task_handle = tokio::spawn(async move { + block_production_task.block_production_task(mp_utils::service::ServiceContext::new_for_testing()).await + }); + + let _ = tokio::time::timeout(std::time::Duration::from_millis(100), task_handle).await; + + let pending_block = backend.get_block(&DbBlockId::Pending).unwrap().unwrap(); + + assert!(mempool.is_empty()); + assert!(pending_block.inner.transactions.is_empty()); + } + + #[rstest::rstest] + #[tokio::test] + #[allow(clippy::too_many_arguments)] + async fn test_block_prod_start_block_production_task_ungracious_shutdown_and_restart( + #[future] devnet_setup: ( + Arc, + Arc, + Arc, + Arc, + Arc, + DevnetKeys, + ), + ) { + let (backend, importer, metrics, l1_data_provider, mempool, contracts) = devnet_setup.await; + + // ================================================================== // + // PART 1: we add a transaction to the mempool // + // ================================================================== // + + // The transaction itself is meaningless, it's just to check + // if the task correctly reads it and process it + sign_and_add_declare_tx(&contracts.0[0], &backend, &mempool, Felt::ZERO); + sign_and_add_invoke_tx(&contracts.0[0], &contracts.0[1], &backend, &mempool, Felt::ONE); + assert!(!mempool.is_empty()); + + // ================================================================== // + // PART 2: create block production task // + // ================================================================== // + + // I don't really understand why Arc::clone doesn't work with dyn + // but .clone() works, so I had to make due + let block_production_task = BlockProductionTask::new( + Arc::clone(&backend), + Arc::clone(&importer), + Arc::clone(&mempool), + Arc::clone(&metrics), + l1_data_provider.clone(), + ) + .await + .unwrap(); + + assert_eq!(backend.get_latest_block_n().unwrap().unwrap(), 0); + + // ================================================================== // + // PART 3: init block production task // + // ================================================================== // + + let task_handle = tokio::spawn(async move { + block_production_task.block_production_task(mp_utils::service::ServiceContext::new_for_testing()).await + }); + + let _ = tokio::time::timeout(std::time::Duration::from_secs(3), task_handle).await; + + // ================================================================== // + // PART 4: we add more transactions to the mempool // + // ================================================================== // + + // The transaction itself is meaningless, it's just to check + // if the task correctly reads it and process it + sign_and_add_declare_tx(&contracts.0[0], &backend, &mempool, Felt::TWO); + sign_and_add_invoke_tx(&contracts.0[0], &contracts.0[1], &backend, &mempool, Felt::THREE); + assert!(!mempool.is_empty()); + + // ================================================================== // + // PART 5: create block production task // + // ================================================================== // + + // This should seal the previous pending block + let _block_production_task = + BlockProductionTask::new(Arc::clone(&backend), importer, Arc::clone(&mempool), metrics, l1_data_provider) + .await + .unwrap(); + + assert_eq!(backend.get_latest_block_n().unwrap().unwrap(), 1); + } }