diff --git a/.db-versions.yml b/.db-versions.yml index db6802319..9a9dea695 100644 --- a/.db-versions.yml +++ b/.db-versions.yml @@ -1,4 +1,6 @@ -current_version: 0 +current_version: 1 versions: + - version: 1 + pr: 450 - version: 0 pr: 372 diff --git a/.gitignore b/.gitignore index a256a077c..162dbe853 100644 --- a/.gitignore +++ b/.gitignore @@ -48,9 +48,6 @@ madara.log starknet-e2e-test/contracts/cache starknet-e2e-test/contracts/build -# proptest report -**/proptest-regressions/ - # vscode settings .vscode/settings.json diff --git a/Cargo.lock b/Cargo.lock index 5f4ad8a5b..8e1c5c0b5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -650,6 +650,12 @@ version = "1.0.91" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c042108f3ed77fd83760a5fd79b53be043192bb3b9dba91d8c574c0ada7850c8" +[[package]] +name = "arbitrary" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dde20b3d026af13f561bdd0f15edf01fc734f0dafcedbaf42bba506a9517f223" + [[package]] name = "ark-ec" version = "0.4.2" @@ -5803,6 +5809,7 @@ dependencies = [ "opentelemetry_sdk", "proptest", "proptest-derive", + "proptest-state-machine", "reqwest 0.12.8", "rstest 0.18.2", "serde", @@ -7008,6 +7015,15 @@ dependencies = [ "syn 2.0.89", ] +[[package]] +name = "proptest-state-machine" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e943d140e09d07740fb496487c51fb8eb31c70389ac4a2e9dcd8a0d9fdf228d4" +dependencies = [ + "proptest", +] + [[package]] name = "prost" version = "0.13.3" @@ -8424,6 +8440,7 @@ name = "starknet-types-core" version = "0.1.7" source = "git+https://github.com/kasarlabs/types-rs.git?branch=feat-deserialize-v0.1.7#e5f3769150684d3c726951dd3f075815cdcdba5d" dependencies = [ + "arbitrary", "lambdaworks-crypto", "lambdaworks-math", "lazy_static", diff --git a/Cargo.toml b/Cargo.toml index 4f5e29d95..1aa14a013 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -217,6 +217,7 @@ bincode = "1.3" fdlimit = "0.3.0" proptest = "1.5.0" proptest-derive = "0.5.0" +proptest-state-machine = "0.3.1" dotenv = "0.15.0" httpmock = "0.7.0" tempfile = "3.10.1" diff --git a/crates/madara/client/block_production/src/lib.rs b/crates/madara/client/block_production/src/lib.rs index e7355e1d4..131620c9b 100644 --- a/crates/madara/client/block_production/src/lib.rs +++ b/crates/madara/client/block_production/src/lib.rs @@ -32,7 +32,7 @@ use mp_class::compile::ClassCompilationError; use mp_class::ConvertedClass; use mp_convert::ToFelt; use mp_receipt::from_blockifier_execution_info; -use mp_state_update::{ContractStorageDiffItem, StateDiff, StorageEntry}; +use mp_state_update::{ContractStorageDiffItem, NonceUpdate, StateDiff, StorageEntry}; use mp_transactions::TransactionWithHash; use mp_utils::service::ServiceContext; use opentelemetry::KeyValue; @@ -207,7 +207,7 @@ impl BlockProductionTask { let to_take = batch_size.saturating_sub(txs_to_process.len()); let cur_len = txs_to_process.len(); if to_take > 0 { - self.mempool.take_txs_chunk(/* extend */ &mut txs_to_process, batch_size); + self.mempool.txs_take_chunk(/* extend */ &mut txs_to_process, batch_size); txs_to_process_blockifier.extend(txs_to_process.iter().skip(cur_len).map(|tx| tx.clone_tx())); } @@ -284,7 +284,7 @@ impl BlockProductionTask { // Add back the unexecuted transactions to the mempool. stats.n_re_added_to_mempool = txs_to_process.len(); self.mempool - .re_add_txs(txs_to_process, executed_txs) + .txs_re_add(txs_to_process, executed_txs) .map_err(|err| Error::Unexpected(format!("Mempool error: {err:#}").into()))?; tracing::debug!( @@ -331,6 +331,12 @@ impl BlockProductionTask { ) .await?; + // Removes nonces in the mempool nonce cache which have been included + // into the current block. + for NonceUpdate { contract_address, .. } in state_diff.nonces.iter() { + self.mempool.tx_mark_included(contract_address); + } + // Flush changes to disk self.backend.flush().map_err(|err| BlockImportError::Internal(format!("DB flushing error: {err:#}").into()))?; diff --git a/crates/madara/client/block_production/src/re_add_finalized_to_blockifier.rs b/crates/madara/client/block_production/src/re_add_finalized_to_blockifier.rs index 1895bf27d..ce7defaa3 100644 --- a/crates/madara/client/block_production/src/re_add_finalized_to_blockifier.rs +++ b/crates/madara/client/block_production/src/re_add_finalized_to_blockifier.rs @@ -4,6 +4,7 @@ use mc_db::{MadaraBackend, MadaraStorageError}; use mc_mempool::{MempoolProvider, MempoolTransaction}; use mp_block::{header::BlockTimestamp, BlockId, BlockTag, MadaraMaybePendingBlock}; use mp_transactions::{ToBlockifierError, TransactionWithHash}; +use starknet_api::StarknetApiError; use starknet_core::types::Felt; #[derive(Debug, thiserror::Error)] @@ -18,6 +19,9 @@ pub enum ReAddTxsToMempoolError { #[error("Converting transaction with hash {tx_hash:#x}: Blockifier conversion error: {err:#}")] ToBlockifierError { tx_hash: Felt, err: ToBlockifierError }, + #[error("Error converting to a MempoolTransaction: {0:#}")] + ConvertToMempoolError(#[from] StarknetApiError), + /// This error should never happen unless we are running on a platform where SystemTime cannot represent the timestamp we are making. #[error("Converting transaction with hash {tx_hash:#x}: Could not create arrived_at timestamp with block_timestamp={block_timestamp} and tx_index={tx_index}")] MakingArrivedAtTimestamp { tx_hash: Felt, block_timestamp: BlockTimestamp, tx_index: usize }, @@ -68,14 +72,19 @@ pub fn re_add_txs_to_mempool( let arrived_at = make_arrived_at(block_timestamp, tx_index).ok_or_else(|| { ReAddTxsToMempoolError::MakingArrivedAtTimestamp { tx_hash, block_timestamp, tx_index } })?; - Ok(MempoolTransaction { tx, arrived_at, converted_class }) + + Ok::<_, ReAddTxsToMempoolError>(MempoolTransaction::new_from_blockifier_tx( + tx, + arrived_at, + converted_class, + )?) }) .collect::>()?; let n = txs_to_reexec.len(); mempool - .insert_txs_no_validation(txs_to_reexec, /* force insertion */ true) + .txs_insert_no_validation(txs_to_reexec, /* force insertion */ true) .expect("Mempool force insertion should never fail"); Ok(n) diff --git a/crates/madara/client/db/build.rs b/crates/madara/client/db/build.rs index 899d25809..be19bce37 100644 --- a/crates/madara/client/db/build.rs +++ b/crates/madara/client/db/build.rs @@ -95,10 +95,10 @@ fn parse_version(content: &str) -> Result { content .lines() .find(|line| line.starts_with("current_version:")) - .ok_or_else(|| BuildError::Parse(Cow::Borrowed("Could not find current_version")))? + .ok_or(BuildError::Parse(Cow::Borrowed("Could not find current_version")))? .split(':') .nth(1) - .ok_or_else(|| BuildError::Parse(Cow::Borrowed("Invalid current_version format")))? + .ok_or(BuildError::Parse(Cow::Borrowed("Invalid current_version format")))? .trim() .parse() .map_err(|_| BuildError::Parse(Cow::Borrowed("Could not parse current_version as u32"))) diff --git a/crates/madara/client/db/src/l1_db.rs b/crates/madara/client/db/src/l1_db.rs index 10c235091..eb7bf4fd6 100644 --- a/crates/madara/client/db/src/l1_db.rs +++ b/crates/madara/client/db/src/l1_db.rs @@ -1,4 +1,4 @@ -use rocksdb::WriteOptions; +use rocksdb::{IteratorMode, WriteOptions}; use serde::{Deserialize, Serialize}; use starknet_api::core::Nonce; @@ -128,4 +128,13 @@ impl MadaraBackend { self.db.put_cf_opt(&nonce_column, bincode::serialize(&nonce)?, /* empty value */ [], &writeopts)?; Ok(()) } + + /// Retrieve the latest L1 messaging [Nonce] if one is available, otherwise + /// returns [None]. + pub fn get_l1_messaging_nonce_latest(&self) -> Result, MadaraStorageError> { + let nonce_column = self.db.get_column(Column::L1MessagingNonce); + let mut iter = self.db.iterator_cf(&nonce_column, IteratorMode::End); + let nonce = iter.next().transpose()?.map(|(bytes, _)| bincode::deserialize(&bytes)).transpose()?; + Ok(nonce) + } } diff --git a/crates/madara/client/db/src/mempool_db.rs b/crates/madara/client/db/src/mempool_db.rs index 721b0f068..7aade92b2 100644 --- a/crates/madara/client/db/src/mempool_db.rs +++ b/crates/madara/client/db/src/mempool_db.rs @@ -3,10 +3,57 @@ use crate::{Column, MadaraBackend, MadaraStorageError}; use mp_class::ConvertedClass; use rocksdb::IteratorMode; use serde::{Deserialize, Serialize}; +use starknet_api::core::Nonce; use starknet_types_core::felt::Felt; type Result = std::result::Result; +/// A nonce is deemed ready when it directly follows the previous nonce in db +/// for a contract address. This guarantees that dependent transactions are not +/// executed out of order by the mempool. +#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq, Eq)] +pub enum NonceStatus { + #[default] + Ready, + Pending, +} + +/// Information used to assess the [readiness] of a transaction. +/// +/// A transaction is deemed ready when its nonce directly follows the previous +/// nonce store in db for that contract address. +/// +/// [nonce] and [nonce_next] are precomputed to avoid operating on a [Felt] +/// inside the hot path in the mempool. +/// +/// [readiness]: NonceStatus +/// [nonce]: Self::nonce +/// [nonce_next]: Self::nonce_next +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] +pub struct NonceInfo { + pub readiness: NonceStatus, + pub nonce: Nonce, + pub nonce_next: Nonce, +} + +impl NonceInfo { + pub fn ready(nonce: Nonce, nonce_next: Nonce) -> Self { + debug_assert_eq!(nonce_next, nonce.try_increment().unwrap()); + Self { readiness: NonceStatus::Ready, nonce, nonce_next } + } + + pub fn pending(nonce: Nonce, nonce_next: Nonce) -> Self { + debug_assert_eq!(nonce_next, nonce.try_increment().unwrap()); + Self { readiness: NonceStatus::Pending, nonce, nonce_next } + } +} + +impl Default for NonceInfo { + fn default() -> Self { + Self::ready(Nonce(Felt::ZERO), Nonce(Felt::ONE)) + } +} + #[derive(Serialize, Deserialize)] pub struct SavedTransaction { pub tx: mp_transactions::Transaction, @@ -17,28 +64,33 @@ pub struct SavedTransaction { } #[derive(Serialize)] -struct TransactionWithConvertedClassRef<'a> { - tx: &'a SavedTransaction, +/// This struct is used as a template to serialize Mempool transactions from the +/// database without any further allocation. +struct DbMempoolTxInfoEncoder<'a> { + saved_tx: &'a SavedTransaction, converted_class: &'a Option, + nonce_info: &'a NonceInfo, } -#[derive(Serialize, Deserialize)] -struct TransactionWithConvertedClass { - tx: SavedTransaction, - converted_class: Option, + +#[derive(Deserialize)] +/// This struct is used as a template to deserialize Mempool transactions from +/// the database. +pub struct DbMempoolTxInfoDecoder { + pub saved_tx: SavedTransaction, + pub converted_class: Option, + pub nonce_readiness: NonceInfo, } impl MadaraBackend { #[tracing::instrument(skip(self), fields(module = "MempoolDB"))] - pub fn get_mempool_transactions( - &self, - ) -> impl Iterator)>> + '_ { + pub fn get_mempool_transactions(&self) -> impl Iterator> + '_ { let col = self.db.get_column(Column::MempoolTransactions); self.db.iterator_cf(&col, IteratorMode::Start).map(|kv| { let (k, v) = kv?; let hash: Felt = bincode::deserialize(&k)?; - let tx: TransactionWithConvertedClass = bincode::deserialize(&v)?; + let tx_info: DbMempoolTxInfoDecoder = bincode::deserialize(&v)?; - Result::<_>::Ok((hash, tx.tx, tx.converted_class)) + Result::<_>::Ok((hash, tx_info)) }) } @@ -54,18 +106,19 @@ impl MadaraBackend { Ok(()) } - #[tracing::instrument(skip(self, tx), fields(module = "MempoolDB"))] + #[tracing::instrument(skip(self, saved_tx), fields(module = "MempoolDB"))] pub fn save_mempool_transaction( &self, - tx: &SavedTransaction, + saved_tx: &SavedTransaction, tx_hash: Felt, converted_class: &Option, + nonce_info: &NonceInfo, ) -> Result<()> { // Note: WAL is used here // This is because we want it to be saved even if the node crashes before the next flush let col = self.db.get_column(Column::MempoolTransactions); - let tx_with_class = TransactionWithConvertedClassRef { tx, converted_class }; + let tx_with_class = DbMempoolTxInfoEncoder { saved_tx, converted_class, nonce_info }; self.db.put_cf(&col, bincode::serialize(&tx_hash)?, bincode::serialize(&tx_with_class)?)?; tracing::debug!("save_mempool_tx {:?}", tx_hash); Ok(()) diff --git a/crates/madara/client/devnet/src/lib.rs b/crates/madara/client/devnet/src/lib.rs index 36ddf28b0..d80637bb4 100644 --- a/crates/madara/client/devnet/src/lib.rs +++ b/crates/madara/client/devnet/src/lib.rs @@ -219,7 +219,7 @@ mod tests { &self, mut tx: BroadcastedInvokeTxn, contract: &DevnetPredeployedContract, - ) -> Result, mc_mempool::Error> { + ) -> Result, mc_mempool::MempoolError> { let (blockifier_tx, _classes) = BroadcastedTxn::Invoke(tx.clone()) .into_blockifier( self.backend.chain_config().chain_id.to_felt(), @@ -238,14 +238,14 @@ mod tests { tracing::debug!("tx: {:?}", tx); - self.mempool.accept_invoke_tx(tx) + self.mempool.tx_accept_invoke(tx) } pub fn sign_and_add_declare_tx( &self, mut tx: BroadcastedDeclareTxn, contract: &DevnetPredeployedContract, - ) -> Result, mc_mempool::Error> { + ) -> Result, mc_mempool::MempoolError> { let (blockifier_tx, _classes) = BroadcastedTxn::Declare(tx.clone()) .into_blockifier( self.backend.chain_config().chain_id.to_felt(), @@ -262,14 +262,14 @@ mod tests { }; *tx_signature = vec![signature.r, signature.s]; - self.mempool.accept_declare_tx(tx) + self.mempool.tx_accept_declare(tx) } pub fn sign_and_add_deploy_account_tx( &self, mut tx: BroadcastedDeployAccountTxn, contract: &DevnetPredeployedContract, - ) -> Result, mc_mempool::Error> { + ) -> Result, mc_mempool::MempoolError> { let (blockifier_tx, _classes) = BroadcastedTxn::DeployAccount(tx.clone()) .into_blockifier( self.backend.chain_config().chain_id.to_felt(), @@ -285,7 +285,7 @@ mod tests { }; *tx_signature = vec![signature.r, signature.s]; - self.mempool.accept_deploy_account_tx(tx) + self.mempool.tx_accept_deploy_account(tx) } /// (STRK in FRI, ETH in WEI) @@ -716,7 +716,7 @@ mod tests { assert_matches!( result, - Err(mc_mempool::Error::InnerMempool(mc_mempool::TxInsersionError::Limit( + Err(mc_mempool::MempoolError::InnerMempool(mc_mempool::TxInsertionError::Limit( mc_mempool::MempoolLimitReached::MaxTransactions { max: 5 } ))) ) diff --git a/crates/madara/client/eth/src/l1_messaging.rs b/crates/madara/client/eth/src/l1_messaging.rs index ee99286f1..52b4af78e 100644 --- a/crates/madara/client/eth/src/l1_messaging.rs +++ b/crates/madara/client/eth/src/l1_messaging.rs @@ -155,7 +155,7 @@ async fn process_l1_message( } }; - let res = mempool.accept_l1_handler_tx(transaction.into(), fees)?; + let res = mempool.tx_accept_l1_handler(transaction.into(), fees)?; // TODO: remove unwraps // Ques: shall it panic if no block number of event_index? diff --git a/crates/madara/client/mempool/Cargo.toml b/crates/madara/client/mempool/Cargo.toml index aaad380af..79e4ac2e1 100644 --- a/crates/madara/client/mempool/Cargo.toml +++ b/crates/madara/client/mempool/Cargo.toml @@ -21,6 +21,7 @@ mc-db = { workspace = true, features = ["testing"] } tokio = { workspace = true, features = ["rt-multi-thread"] } proptest.workspace = true proptest-derive.workspace = true +proptest-state-machine.workspace = true bitvec.workspace = true tracing = { workspace = true, features = ["log"] } tracing-test.workspace = true @@ -29,6 +30,7 @@ mockall.workspace = true assert_matches.workspace = true lazy_static.workspace = true serde_json.workspace = true +starknet-types-core = { workspace = true, features = ["arbitrary"] } [features] testing = ["blockifier/testing", "mc-db/testing", "mockall"] diff --git a/crates/madara/client/mempool/proptest-regressions/inner.txt b/crates/madara/client/mempool/proptest-regressions/inner.txt deleted file mode 100644 index 3ab554f44..000000000 --- a/crates/madara/client/mempool/proptest-regressions/inner.txt +++ /dev/null @@ -1,11 +0,0 @@ -# Seeds for failure cases proptest has generated in the past. It is -# automatically read and these particular cases re-run before any -# novel cases are generated. -# -# It is recommended to check this file in to source control so that -# everyone who runs the test benefits from these saved cases. -cc 606af7856ddbb4fa59d49993b59a603681411ecf692f02dab0c8ea234bacbf97 # shrinks to pb = MempoolInvariantsProblem([Insert(Insert(ty=Declare,arrived_at=SystemTime { tv_sec: 0, tv_nsec: 207000000 },tx_hash=TransactionHash(0x0),contract_address=ContractAddress(PatriciaKey(0x0)),nonce=Nonce(0x0),force=false)), Insert(Insert(ty=Declare,arrived_at=SystemTime { tv_sec: 0, tv_nsec: 207000000 },tx_hash=TransactionHash(0x0),contract_address=ContractAddress(PatriciaKey(0x0)),nonce=Nonce(0x1),force=false))]) -cc 29f49e0ef0eea98a11ffe4648dd6f160299f0841ab5033098dba809dcc974a40 # shrinks to pb = MempoolInvariantsProblem([Insert(Insert(ty=Declare,arrived_at=SystemTime { tv_sec: 0, tv_nsec: 0 },tx_hash=TransactionHash(0x57b92afb0507883960ce6022d158e86e6f71ee6d5b8e56936872ffe0cd5b6cd),contract_address=ContractAddress(PatriciaKey(0x81)),nonce=Nonce(0x24),force=false)), Insert(Insert(ty=Declare,arrived_at=SystemTime { tv_sec: 0, tv_nsec: 0 },tx_hash=TransactionHash(0x507230d365c6eb2de9b5cc6ef286a337887c9c3d814bab629169b4e2caff6ea),contract_address=ContractAddress(PatriciaKey(0x0)),nonce=Nonce(0x0),force=false)), Insert(Insert(ty=Declare,arrived_at=SystemTime { tv_sec: 0, tv_nsec: 0 },tx_hash=TransactionHash(0x507230d365c6eb2de9b5cc6ef286a337887c9c3d814bab629169b4e2caff6ea),contract_address=ContractAddress(PatriciaKey(0x0)),nonce=Nonce(0x0),force=false)), Insert(Insert(ty=Declare,arrived_at=SystemTime { tv_sec: 0, tv_nsec: 0 },tx_hash=TransactionHash(0x507230d365c6eb2de9b5cc6ef286a337887c9c3d814bab629169b4e2caff6ea),contract_address=ContractAddress(PatriciaKey(0x0)),nonce=Nonce(0x0),force=false))]) -cc 1bcf72bf5e5a4c741ab6e6ed269c40b584ac0fd4ffa785e7826410b0d210d8ca # shrinks to pb = MempoolInvariantsProblem([Insert(Insert(ty=Declare,arrived_at=SystemTime { tv_sec: 0, tv_nsec: 66000000 },tx_hash=TransactionHash(0x1295cbfdf4ed4e4ffdf8b661498fd2682235e3660388c0e1159a01d4b045d55),contract_address=ContractAddress(PatriciaKey(0x53)),nonce=Nonce(0x0),force=false)), Insert(Insert(ty=Declare,arrived_at=SystemTime { tv_sec: 0, tv_nsec: 0 },tx_hash=TransactionHash(0x1295cbfdf4ed4e4ffdf8b661498fd2682235e3660388c0e1159a01d4b045d55),contract_address=ContractAddress(PatriciaKey(0x53)),nonce=Nonce(0x0),force=false))]) -cc 523d7eadd1091bd7fa740e368300a1db40f0021620f1ce4c2c171330f5b62857 # shrinks to pb = MempoolInvariantsProblem([Insert(Insert(ty=Declare,arrived_at=SystemTime { tv_sec: 0, tv_nsec: 0 },tx_hash=TransactionHash(0x507230d365c6eb2de9b5cc6ef286a337887c9c3d814bab629169b4e2caff6ea),contract_address=ContractAddress(PatriciaKey(0x0)),nonce=Nonce(0x0),force=false)), Insert(Insert(ty=Declare,arrived_at=SystemTime { tv_sec: 0, tv_nsec: 0 },tx_hash=TransactionHash(0x507230d365c6eb2de9b5cc6ef286a337887c9c3d814bab629169b4e2caff6ea),contract_address=ContractAddress(PatriciaKey(0x0)),nonce=Nonce(0x0),force=false)), Insert(Insert(ty=DeployAccount,arrived_at=SystemTime { tv_sec: 0, tv_nsec: 0 },tx_hash=TransactionHash(0x1fc21bdea99da580a2ae18cd35e73807659ef33b93421601bfac0efeb6be459),contract_address=ContractAddress(PatriciaKey(0x7f)),nonce=Nonce(0x0),force=false)), Insert(Insert(ty=DeployAccount,arrived_at=SystemTime { tv_sec: 0, tv_nsec: 0 },tx_hash=TransactionHash(0x156376e23be4af75f1114fa70a12b2f9ffd93ceea21d0d7f4ae45b72377762e),contract_address=ContractAddress(PatriciaKey(0x7f)),nonce=Nonce(0x1),force=true))]) -cc 8bb2a6bb05105d3c1e1795e5a6f1fd45f11d83e3cf509ba8f82104669617e6f5 # shrinks to pb = MempoolInvariantsProblem([Insert(Insert(ty=DeployAccount,arrived_at=SystemTime { tv_sec: 0, tv_nsec: 0 },tx_hash=TransactionHash(0x3cd9a5152e98cdfa806a24a02d916b80e337c0e31012ff91ce1f69801da7b6e),contract_address=ContractAddress(PatriciaKey(0x0)),nonce=Nonce(0xe4),force=false)), Insert(Insert(ty=DeployAccount,arrived_at=SystemTime { tv_sec: 0, tv_nsec: 0 },tx_hash=TransactionHash(0x3cd9a5152e98cdfa806a24a02d916b80e337c0e31012ff91ce1f69801da7b6e),contract_address=ContractAddress(PatriciaKey(0x0)),nonce=Nonce(0xe4),force=true))]) diff --git a/crates/madara/client/mempool/proptest-regressions/inner/proptest.txt b/crates/madara/client/mempool/proptest-regressions/inner/proptest.txt new file mode 100644 index 000000000..db77733c7 --- /dev/null +++ b/crates/madara/client/mempool/proptest-regressions/inner/proptest.txt @@ -0,0 +1,8 @@ +# Seeds for failure cases proptest has generated in the past. It is +# automatically read and these particular cases re-run before any +# novel cases are generated. +# +# It is recommended to check this file in to source control so that +# everyone who runs the test benefits from these saved cases. +cc e0fe24c0917216b42d3ab91ff4bad7efc965970cc8928b3be2fe78ad1b456a41 # shrinks to (initial_state, transitions, seen_counter) = (MempoolInner { nonce_mapping: {}, tx_intent_queue_ready: {}, tx_intent_queue_pending_by_nonce: {}, tx_intent_queue_pending_by_timestamp: {}, deployed_contracts: DeployedContracts({}), limiter: MempoolLimiter { config: MempoolLimits { max_transactions: 18446744073709551615, max_declare_transactions: 18446744073709551615, max_age: Some(3600s) }, current_transactions: 0, current_declare_transactions: 0 } }, [Pop, Push { tx: MempoolTransaction { tx_hash: 0x0, nonce: 0x0, contract_address: 0x2b, tx_type: InvokeFunction, arrived_at: SystemTime { tv_sec: 1736763022, tv_nsec: 105722909 } }, force: false, nonce_info: NonceInfo { readiness: Ready, nonce: Nonce(0x0), nonce_next: Nonce(0x1) } }, Push { tx: MempoolTransaction { tx_hash: 0x0, nonce: 0x0, contract_address: 0x2b, tx_type: InvokeFunction, arrived_at: SystemTime { tv_sec: 1736763786, tv_nsec: 335204314 } }, force: false, nonce_info: NonceInfo { readiness: Pending, nonce: Nonce(0x1), nonce_next: Nonce(0x2) } }, Pop, Push { tx: MempoolTransaction { tx_hash: 0x0, nonce: 0x0, contract_address: 0x2b, tx_type: InvokeFunction, arrived_at: SystemTime { tv_sec: 1736765304, tv_nsec: 335666467 } }, force: true, nonce_info: NonceInfo { readiness: Ready, nonce: Nonce(0x0), nonce_next: Nonce(0x1) } }, Pop, Pop], None) +cc 50c3ac55269d0e60677604a18a7c72064a812f7ea5f8e15d8ca6b5d36e7520a6 # shrinks to (initial_state, transitions, seen_counter) = (MempoolInner { nonce_mapping: {}, tx_intent_queue_ready: {}, tx_intent_queue_pending_by_nonce: {}, tx_intent_queue_pending_by_timestamp: {}, deployed_contracts: DeployedContracts({}), limiter: MempoolLimiter { config: MempoolLimits { max_transactions: 18446744073709551615, max_declare_transactions: 18446744073709551615, max_age: Some(3600s) }, current_transactions: 0, current_declare_transactions: 0 }, nonce_by_account: {} }, [Push { tx: MempoolTransaction { tx_hash: 0x0, nonce: 0x0, nonce_next: 0x1, contract_address: 0x38, tx_type: InvokeFunction, arrived_at: SystemTime { tv_sec: 1736763594, tv_nsec: 612309376 } }, force: false, nonce_info: NonceInfo { readiness: Ready, nonce: Nonce(0x0), nonce_next: Nonce(0x1) } }], None) diff --git a/crates/madara/client/mempool/src/inner/deployed_contracts.rs b/crates/madara/client/mempool/src/inner/deployed_contracts.rs index 546d94566..919f40374 100644 --- a/crates/madara/client/mempool/src/inner/deployed_contracts.rs +++ b/crates/madara/client/mempool/src/inner/deployed_contracts.rs @@ -6,6 +6,7 @@ use starknet_api::core::ContractAddress; /// When force inserting transaction, it may happen that we run into a duplicate deploy_account transaction. Keep a count for that purpose. #[derive(Debug, Clone, Default)] pub struct DeployedContracts(HashMap); + impl DeployedContracts { pub fn decrement(&mut self, address: ContractAddress) { match self.0.entry(address) { @@ -19,14 +20,17 @@ impl DeployedContracts { hash_map::Entry::Vacant(_) => unreachable!("invariant violated"), } } + pub fn increment(&mut self, address: ContractAddress) { *self.0.entry(address).or_insert(0) += 1 } - #[cfg(test)] - pub fn is_empty(&self) -> bool { - self.0.is_empty() - } + pub fn contains(&self, address: &ContractAddress) -> bool { self.0.contains_key(address) } + + #[cfg(any(test, feature = "testing"))] + pub fn count(&self) -> u64 { + self.0.iter().fold(0, |acc, entry| acc + entry.1) + } } diff --git a/crates/madara/client/mempool/src/inner/intent.rs b/crates/madara/client/mempool/src/inner/intent.rs new file mode 100644 index 000000000..49ad9ed12 --- /dev/null +++ b/crates/madara/client/mempool/src/inner/intent.rs @@ -0,0 +1,233 @@ +//! Structures representing the *intent* of executing a transaction. +//! +//! Intents are structures containing essential information for the +//! indentification of a [MempoolTransaction] inside of a [NonceTxMapping]. +//! Transaction intents are received, ordered by [ArrivedAtTimestamp] and +//! resolved (polled) at a later time. +//! +//! # Readiness +//! +//! Intents are categorized by readiness. A transaction intent is marked as +//! [TransactionIntentReady] if its nonce directly follows that of the contract +//! sending the transaction, else it marked as pending. +//! +//! # Pending intents +//! +//! There are two types of pending intents [TransactionIntentPendingByNonce] and +//! [TransactionIntentPendingByTimestamp]. Each pending intent contains the same +//! data but with slightly different ordering rules. This is because the +//! [MempoolInner] holds two ordered queues for pending intents: +//! +//! - One is ordered by timestamp to facilitate the removal of age-exceeded +//! pending intents. +//! +//! - The other is ordered by nonces to be able to easily retrieve the +//! transaction with the next nonce for a specific contract. +//! +//! > You can convert from one pending intent type to another with [by_timestamp] +//! > and [by_nonce]. +//! +//! Both pending intents remain pending until the transaction preceding them has +//! been polled from the [Mempool], at which point [TransactionIntentPendingByNonce] +//! is converted to a ready intent with [TransactionIntentPendingByNonce::ready], +//! while any [TransactionIntentPendingByTimestamp] are removed from the queue. +//! +//! [MempoolTransaction]: super::MempoolTransaction +//! [NonceTxMapping]: super::NonceTxMapping +//! [MempoolInner]: super::MempoolInner +//! [Mempool]: super::super::Mempool +//! [by_timestamp]: TransactionIntentPendingByNonce::by_timestamp +//! [by_nonce]: TransactionIntentPendingByTimestamp::by_nonce + +use starknet_api::core::Nonce; +use starknet_types_core::felt::Felt; +use std::{cmp, marker::PhantomData}; + +#[cfg(any(test, feature = "testing"))] +use crate::CheckInvariants; + +use super::ArrivedAtTimestamp; + +#[derive(Debug)] +#[cfg_attr(any(test, feature = "testing"), derive(Clone))] +pub(crate) struct MarkerReady; + +#[derive(Debug)] +#[cfg_attr(any(test, feature = "testing"), derive(Clone))] +pub(crate) struct MarkerPendingByNonce; + +#[derive(Debug)] +#[cfg_attr(any(test, feature = "testing"), derive(Clone))] +pub(crate) struct MarkerPendingByTimestamp; + +/// A [transaction intent] which is ready to be consumed. +/// +/// [transaction intent]: TransactionIntent +pub(crate) type TransactionIntentReady = TransactionIntent; + +impl Ord for TransactionIntentReady { + fn cmp(&self, other: &Self) -> cmp::Ordering { + // Important: Fallback on contract addr here. + // There can be timestamp collisions. + self.timestamp + .cmp(&other.timestamp) + .then_with(|| self.contract_address.cmp(&other.contract_address)) + .then_with(|| self.nonce.cmp(&other.nonce)) + } +} + +impl PartialOrd for TransactionIntentReady { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +/// A [transaction intent] which is waiting for the [Nonce] before it to be +/// consumed by the [Mempool]. It cannot be polled until then. +/// +/// [transaction intent]: TransactionIntent +/// [Mempool]: super::super::Mempool +pub(crate) type TransactionIntentPendingByNonce = TransactionIntent; + +impl TransactionIntentPendingByNonce { + /// Converts this [intent] to a [TransactionIntentReady] to be added to the + /// ready intent queue in the [MempoolInner] + /// + /// [intent]: self + /// [MempoolInner]: super::MempoolInner + pub(crate) fn ready(&self) -> TransactionIntentReady { + TransactionIntentReady { + contract_address: self.contract_address, + timestamp: self.timestamp, + nonce: self.nonce, + nonce_next: self.nonce_next, + phantom: std::marker::PhantomData, + } + } + + /// Converts this [intent] to a [TransactionIntentPendingByTimestamp] to be + /// used to remove aged pending transactions from the [MempoolInner]. + /// + /// [intent]: self + /// [MempoolInner]: super::MempoolInner + pub(crate) fn by_timestamp(&self) -> TransactionIntentPendingByTimestamp { + TransactionIntentPendingByTimestamp { + contract_address: self.contract_address, + timestamp: self.timestamp, + nonce: self.nonce, + nonce_next: self.nonce_next, + phantom: std::marker::PhantomData, + } + } +} + +impl Ord for TransactionIntentPendingByNonce { + fn cmp(&self, other: &Self) -> cmp::Ordering { + // Pending transactions are simply ordered by nonce + self.nonce + .cmp(&other.nonce) + .then_with(|| self.timestamp.cmp(&other.timestamp)) + .then_with(|| self.contract_address.cmp(&other.contract_address)) + } +} + +impl PartialOrd for TransactionIntentPendingByNonce { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +/// A [pending transaction intent] which is ordered by timestamp. This is +/// necessary to be able to remove pending transactions which have grown too old +/// in the [Mempool]. +/// +/// [pending transaction intent]: TransactionIntentPendingByNonce +/// [Mempool]: super::super::Mempool +pub(crate) type TransactionIntentPendingByTimestamp = TransactionIntent; + +impl TransactionIntentPendingByTimestamp { + pub(crate) fn by_nonce(self) -> TransactionIntentPendingByNonce { + TransactionIntentPendingByNonce { + contract_address: self.contract_address, + timestamp: self.timestamp, + nonce: self.nonce, + nonce_next: self.nonce_next, + phantom: PhantomData, + } + } +} + +impl Ord for TransactionIntentPendingByTimestamp { + fn cmp(&self, other: &Self) -> cmp::Ordering { + // Important: Fallback on contract addr here. + // There can be timestamp collisions. + self.timestamp + .cmp(&other.timestamp) + .then_with(|| self.contract_address.cmp(&other.contract_address)) + .then_with(|| self.nonce.cmp(&other.nonce)) + } +} + +impl PartialOrd for TransactionIntentPendingByTimestamp { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +/// An [intent] to be consumed by the [Mempool]. +/// +/// This data struct will check [timestamp], [contract_address] and [nonce] +/// (in that order) for equality. [nonce_next] is not considered as it should +/// directly follow from [nonce] and therefore its equality and order is implied. +/// +/// # Type Safety +/// +/// This struct is statically wrapped by [TransactionIntentReady], +/// [TransactionIntentPendingByNonce] and [TransactionIntentPendingByTimestamp] +/// to provide type safety between intent types while avoiding too much code +/// duplication. +/// +/// # [Invariants] +/// +/// - [nonce_next] must always be equal to [nonce] + [Felt::ONE]. +/// +/// [intent]: self +/// [Mempool]: super::super::Mempool +/// [timestamp]: Self::timestamp +/// [contract_address]: Self::contract_address +/// [nonce]: Self::nonce +/// [nonce_next]: Self::nonce_next +/// [Invariants]: CheckInvariants +#[derive(Debug)] +#[cfg_attr(any(test, feature = "testing"), derive(Clone))] +pub(crate) struct TransactionIntent { + /// The contract responsible for sending the transaction. + pub(crate) contract_address: Felt, + /// Time at which the transaction was received by the mempool. + pub(crate) timestamp: ArrivedAtTimestamp, + /// The [Nonce] of the transaction associated to this intent. We use this + /// for retrieval purposes later on. + pub(crate) nonce: Nonce, + /// This is the [Nonce] of the transaction right after this one. We + /// precompute this to avoid making calculations on a [Felt] in the hot + /// loop, as this can be expensive. + pub(crate) nonce_next: Nonce, + pub(crate) phantom: PhantomData, +} + +#[cfg(any(test, feature = "testing"))] +impl CheckInvariants for TransactionIntent { + fn check_invariants(&self) { + assert_eq!(self.nonce_next, self.nonce.try_increment().unwrap()); + } +} + +impl PartialEq for TransactionIntent { + fn eq(&self, other: &Self) -> bool { + self.timestamp == other.timestamp + && self.contract_address == other.contract_address + && self.nonce == other.nonce + } +} + +impl Eq for TransactionIntent {} diff --git a/crates/madara/client/mempool/src/inner/limits.rs b/crates/madara/client/mempool/src/inner/limits.rs index d21627560..8443e6c81 100644 --- a/crates/madara/client/mempool/src/inner/limits.rs +++ b/crates/madara/client/mempool/src/inner/limits.rs @@ -7,6 +7,7 @@ use mp_chain_config::ChainConfig; use crate::MempoolTransaction; #[derive(Debug)] +#[cfg_attr(any(test, feature = "testing"), derive(Clone))] pub struct MempoolLimits { pub max_transactions: usize, pub max_declare_transactions: usize, @@ -31,6 +32,7 @@ impl MempoolLimits { /// tick has been executed and excess transactions are added back into the mempool. /// This means that the inner mempool may have fewer transactions than what the limits says at a given time. #[derive(Debug)] +#[cfg_attr(any(test, feature = "testing"), derive(Clone))] pub(crate) struct MempoolLimiter { pub config: MempoolLimits, current_transactions: usize, @@ -47,6 +49,7 @@ pub enum MempoolLimitReached { Age { max: Duration }, } +#[derive(Debug)] pub(crate) struct TransactionCheckedLimits { check_tx_limit: bool, check_declare_limit: bool, @@ -88,6 +91,10 @@ impl TransactionCheckedLimits { }, } } + + pub fn checks_age(&self) -> bool { + self.check_age + } } impl MempoolLimiter { diff --git a/crates/madara/client/mempool/src/inner/mod.rs b/crates/madara/client/mempool/src/inner/mod.rs index 7beb301c0..d37ab3478 100644 --- a/crates/madara/client/mempool/src/inner/mod.rs +++ b/crates/madara/client/mempool/src/inner/mod.rs @@ -3,62 +3,191 @@ //! Insertion and popping should be O(log n). //! We also really don't want to poison the lock by panicking. -use blockifier::transaction::account_transaction::AccountTransaction; use blockifier::transaction::transaction_execution::Transaction; +use blockifier::transaction::{account_transaction::AccountTransaction, transaction_types::TransactionType}; use deployed_contracts::DeployedContracts; +use mc_db::mempool_db::{NonceInfo, NonceStatus}; +use mc_exec::execution::TxInfo; use mp_convert::ToFelt; -use nonce_chain::{InsertedPosition, NonceChain, NonceChainNewState, ReplacedState}; -use starknet_api::core::ContractAddress; +use starknet_api::core::{ContractAddress, Nonce}; use starknet_types_core::felt::Felt; -use std::{ - cmp, - collections::{hash_map, BTreeSet, HashMap}, -}; +use std::collections::{btree_map, hash_map, BTreeMap, BTreeSet, HashMap}; mod deployed_contracts; +mod intent; mod limits; -mod nonce_chain; +mod nonce_mapping; mod proptest; mod tx; +pub(crate) use intent::*; pub use limits::*; +pub use nonce_mapping::*; pub use tx::*; -#[derive(Clone, Debug, PartialEq, Eq)] -struct AccountOrderedByTimestamp { - contract_addr: Felt, - timestamp: ArrivedAtTimestamp, -} - -impl Ord for AccountOrderedByTimestamp { - fn cmp(&self, other: &Self) -> cmp::Ordering { - // Important: Fallback on contract addr here. - // There can be timestamp collisions. - self.timestamp.cmp(&other.timestamp).then_with(|| self.contract_addr.cmp(&other.contract_addr)) - } -} -impl PartialOrd for AccountOrderedByTimestamp { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - +#[cfg(any(test, feature = "testing"))] +use crate::CheckInvariants; + +#[cfg(any(test, feature = "testing"))] +use starknet_api::transaction::TransactionHash; + +/// A struct responsible for the rapid ordering and disposal of transactions by +/// their [readiness] and time of arrival. +/// +/// # Intent Queues: +/// +/// These do not actually store transactions but the *intent* and the *order* of +/// these transaction being added to the [Mempool]. We *intend* to execute a +/// transaction from a given contract address, based on its readiness and order +/// of arrival. A transaction is deemed ready if its [Nonce] directly follows +/// the previous [Nonce] used by that contract. This is retrieved from the +/// database before the transaction is added to the inner mempool. This means +/// that transactions which are not ready (these are [pending] transactions) +/// will remain waiting for the required transactions to be processed before +/// they are marked as [ready] themselves. +/// +/// ## [Ready] +/// +/// FIFO queue. We use a [BTreeSet] to maintain logarithmic complexity and high +/// performance with low reordering of the memory even in the case of very high +/// transaction throughput. +/// +/// ## [Pending] +/// +/// FIFO queue. The queue has an entry per contract address in the mempool, with +/// each contract address having a [BTreeMap] queue of its transactions mapped +/// to it. We do this to have access to [BTreeMap::entry] which avoids a double +/// lookup in [pop_next] when moving pending intents to the ready queue. Intents +/// in this queue are ordered by their [Nonce]. +/// +/// While this is handy to retrieve the tx with the next nonce for a particular +/// contract, it is a performance bottleneck when removing age exceeded pending +/// transaction. For this reason, we keep a [separate ordering] of all pending +/// transactions, sorted by their time of arrival. +/// +/// # Updating Transaction Intent +/// +/// Transaction intents in each queue are updated as follows: +/// +/// - When [pop_next] is called, the next [ready] intent (if any are available) +/// is popped from its queue. +/// +/// - The information contained in that intent is used to retrieve the +/// [MempoolTransaction] associated with it, which is stored inside a +/// [NonceTxMapping], inside [nonce_mapping]. +/// +/// - Once this is done, we retrieve the pending queue for that contract address +/// in [tx_intent_queue_pending_by_nonce] and check if the next [pending] +/// intent has the right nonce. If so, we pop it, convert it to [ready] and +/// add it to [tx_intent_queue_ready]. If this was the last element in that +/// queue, we remove the mapping for that contract address in +/// [tx_intent_queue_pending_by_nonce]. +/// +/// - Finally, we update [tx_intent_queue_pending_by_timestamp] to reflect the +/// changes in [tx_intent_queue_pending_by_nonce] +/// +/// # Emptying the [Mempool] +/// +/// Currently, the mempool is emptied on each call to [insert_tx] based on the +/// age of a transaction. There are several issues with that. +/// +/// 1. First of all, we might want to limit this check to once every few seconds +/// for performance reasons. +/// +/// 2. We currently do not empty the mempool in case of congestion. This is +/// complicated because we would need to be able to differentiate between +/// declare and non-declare transactions in the mempool (declare txs have +/// their own congestion limit). This can be done by adding another readiness +/// and pending queue which are both reserved to declare transactions, but I +/// am done with refactoring for the moment and I don't even know if this +/// would be a good idea. FIXME +/// +/// # Invariants +/// +/// The inner mempool adheres to the following invariants: +/// +/// - every [MempoolTransaction] mapping in [nonce_mapping] should have a +/// one-to-one match with an entry in either [tx_intent_queue_ready] or +/// [tx_intent_queue_pending_by_nonce]. +/// +/// - every [Felt] key in [nonce_mapping] should have a one-to-one match with +/// the contract address of an entry in either [tx_intent_queue_ready] or +/// [tx_intent_queue_pending_by_nonce]. +/// +/// - Every [`AccountTransaction::DeployAccount`] transaction should have a one +/// to one match with [deployed_contracts]. +/// +/// - Every intent in [tx_intent_queue_pending_by_nonce] should have a one-to-one +/// mapping with [tx_intent_queue_pending_by_timestamp]. +/// +/// - The invariants of [TransactionIntentReady], [TransactionIntentPendingByNonce] +/// and [TransactionIntentPendingByTimestamp] must be respected. +/// +/// These invariants can be checked by calling [check_invariants] in a test +/// environment. +/// +/// [Nonce]: starknet_api::core::Nonce +/// [BTreeMap]: std::collections::BTreeMap +/// [BTreeMap::entry]: std::collections::BTreeMap::entry +/// [readiness]: intent +/// [Ready]: Self::tx_intent_queue_ready +/// [Pending]: Self::tx_intent_queue_pending_by_nonce +/// [Mempool]: super::Mempool +/// [pending]: TransactionIntentPending +/// [ready]: TransactionIntentReady +/// [pop_next]: Self::pop_next +/// [nonce_mapping]: Self::nonce_mapping +/// [insert_tx]: Self::insert_tx +/// [tx_intent_queue_ready]: Self::tx_intent_queue_ready +/// [tx_intent_queue_pending_by_nonce]: Self::tx_intent_queue_pending_by_nonce +/// [tx_intent_queue_pending_by_timestamp]: Self::tx_intent_queue_pending_by_timestamp +/// [deployed_contracts]: Self::deployed_contracts +/// [check_invariants]: Self::check_invariants +/// [separate ordering]: Self::tx_intent_queue_pending_by_timestamp #[derive(Debug)] -/// Invariants: -/// - Every nonce chain in `nonce_chains` should have a one to one match with `tx_queue`. -/// - Every [`AccountTransaction::DeployAccount`] transaction should have a one to one match with `deployed_contracts`. -/// - See [`NonceChain`] invariants. -pub(crate) struct MempoolInner { - /// We have one nonce chain per contract address. - nonce_chains: HashMap, - /// FCFS queue. - tx_queue: BTreeSet, - deployed_contracts: DeployedContracts, +#[cfg_attr(any(test, feature = "testing"), derive(Clone))] +pub struct MempoolInner { + /// We have one [Nonce] to [MempoolTransaction] mapping per contract + /// address. + /// + /// [Nonce]: starknet_api::core::Nonce + // TODO: this can be replace with a hasmap with a tupple key + pub nonce_mapping: HashMap, + /// FIFO queue of all [ready] intents. + /// + /// [ready]: TransactionIntentReady + pub(crate) tx_intent_queue_ready: BTreeSet, + /// FIFO queue of all [pending] intents, sorted by their [Nonce]. + /// + /// [pending]: TransactionIntentPendingByNonce + // TODO: can remove contract_address from TransactionIntentPendingByNonce + pub(crate) tx_intent_queue_pending_by_nonce: HashMap>, + /// FIFO queue of all [pending] intents, sorted by their time of arrival. + /// + /// This is required for the rapid removal of age-exceeded txs in + /// [remove_age_exceeded_txs] and must be kept in sync with + /// [tx_intent_queue_pending_by_nonce]. + /// + /// [pending]: TransactionIntentPendingByTimestamp + /// [remove_age_exceeded_txs]: Self::remove_age_exceeded_txs + /// [tx_intent_queue_pending_by_nonce]: Self::tx_intent_queue_pending_by_nonce + // TODO: can remove nonce_next from TransactionIntentPendingByTimestamp + pub(crate) tx_intent_queue_pending_by_timestamp: BTreeSet, + /// List of all new deployed contracts currently in the mempool. + pub(crate) deployed_contracts: DeployedContracts, + /// Constraints on the number of transactions allowed in the [Mempool] + /// + /// [Mempool]: super::Mempool limiter: MempoolLimiter, + + /// This is just a helper field to use during tests to get the current nonce + /// of a contract as known by the [MempoolInner]. + #[cfg(any(test, feature = "testing"))] + nonce_cache_inner: HashMap, } -#[derive(thiserror::Error, Debug, PartialEq, Eq)] -pub enum TxInsersionError { +#[derive(thiserror::Error, Debug, PartialEq)] +pub enum TxInsertionError { #[error("A transaction with this nonce already exists in the transaction pool")] NonceConflict, #[error("A transaction with this hash already exists in the transaction pool")] @@ -67,52 +196,155 @@ pub enum TxInsersionError { Limit(#[from] MempoolLimitReached), } +#[cfg(any(test, feature = "testing"))] +impl CheckInvariants for MempoolInner { + fn check_invariants(&self) { + // tx_intent_queue_ready can only contain one tx of every contract + let mut tx_counts = HashMap::::default(); + let mut deployed_count = 0; + for intent in self.tx_intent_queue_ready.iter() { + intent.check_invariants(); + + let nonce_mapping = self + .nonce_mapping + .get(&intent.contract_address) + .unwrap_or_else(|| panic!("Missing nonce mapping for contract address {}", &intent.contract_address)); + + let mempool_tx = + nonce_mapping.transactions.get(&intent.nonce).expect("Missing nonce mapping for ready intent"); + + assert_eq!(mempool_tx.nonce, intent.nonce); + assert_eq!(mempool_tx.nonce_next, intent.nonce_next); + assert_eq!(mempool_tx.arrived_at, intent.timestamp); + + if let Transaction::AccountTransaction(AccountTransaction::DeployAccount(tx)) = &mempool_tx.tx { + let contract_address = tx.contract_address; + assert!( + self.has_deployed_contract(&contract_address), + "Ready deploy account tx from sender {contract_address:x?} is not part of deployed contacts" + ); + deployed_count += 1; + } + + *tx_counts.entry(intent.contract_address).or_insert(0) += 1; + } + + let mut count = 0; + for (contract_address, queue) in self.tx_intent_queue_pending_by_nonce.iter() { + assert!(!queue.is_empty()); + + for intent in queue.keys() { + let intent_pending_by_timestamp = intent.by_timestamp(); + self.tx_intent_queue_pending_by_timestamp.get(&intent_pending_by_timestamp).unwrap_or_else(|| { + panic!( + "Missing pending intent by timestamp: {intent_pending_by_timestamp:#?}, available: {:#?}", + self.tx_intent_queue_pending_by_timestamp + ) + }); + count += 1; + + intent.check_invariants(); + assert_eq!(&intent.contract_address, contract_address); + + let nonce_mapping = self.nonce_mapping.get(&intent.contract_address).unwrap_or_else(|| { + panic!("Missing nonce mapping for contract address {}", &intent.contract_address) + }); + + let mempool_tx = nonce_mapping.transactions.get(&intent.nonce).unwrap_or_else(|| { + panic!( + "Missing nonce mapping for pending intent: required {:?}, available {:?}", + intent.nonce, + nonce_mapping.transactions.keys() + ) + }); + + assert_eq!(mempool_tx.nonce, intent.nonce); + assert_eq!(mempool_tx.nonce_next, intent.nonce_next); + assert_eq!(mempool_tx.arrived_at, intent.timestamp); + + if let Transaction::AccountTransaction(AccountTransaction::DeployAccount(tx)) = &mempool_tx.tx { + let contract_address = tx.contract_address; + assert!( + self.has_deployed_contract(&contract_address), + "Pending deploy account tx from sender {contract_address:x?} is not part of deployed contacts", + ); + deployed_count += 1; + } + + *tx_counts.entry(intent.contract_address).or_insert(0) += 1; + } + } + + assert_eq!( + self.tx_intent_queue_pending_by_timestamp.len(), + count, + "Excess transactions by timetamp, remaining: {:#?}", + self.tx_intent_queue_pending_by_timestamp + ); + + for (contract_address, nonce_mapping) in self.nonce_mapping.iter() { + let count = tx_counts.get(contract_address).unwrap_or_else(|| { + panic!( + "Extra nonce mapping at contract address {contract_address}, remaining nonces are: {:?}", + nonce_mapping.transactions.keys() + ) + }); + + assert_eq!( + &nonce_mapping.transactions.len(), + count, + "Extra transactions in nonce mapping at contract address {contract_address}, remaining nonces are: {:?}", + nonce_mapping.transactions.keys() + ); + } + + assert_eq!( + deployed_count, + self.deployed_contracts.count(), + "{} extra deployed contract mappings, remaining contract mappings are: {:?}, counted {}.\nready intents are {:#?}\npending intents are {:#?}", + self.deployed_contracts.count().saturating_sub(deployed_count), + self.deployed_contracts, + deployed_count, + self.tx_intent_queue_ready, + self.tx_intent_queue_pending_by_nonce + ); + } +} + impl MempoolInner { pub fn new(limits_config: MempoolLimits) -> Self { Self { - nonce_chains: Default::default(), - tx_queue: Default::default(), + nonce_mapping: Default::default(), + tx_intent_queue_ready: Default::default(), + tx_intent_queue_pending_by_nonce: Default::default(), + tx_intent_queue_pending_by_timestamp: Default::default(), deployed_contracts: Default::default(), limiter: MempoolLimiter::new(limits_config), + #[cfg(any(test, feature = "testing"))] + nonce_cache_inner: Default::default(), } } - #[cfg(test)] - pub fn check_invariants(&self) { - self.nonce_chains.values().for_each(NonceChain::check_invariants); - let mut tx_queue = self.tx_queue.clone(); - for (k, v) in &self.nonce_chains { - assert!(tx_queue.remove(&AccountOrderedByTimestamp { contract_addr: *k, timestamp: v.front_arrived_at })) - } - assert_eq!(tx_queue, Default::default()); - let mut deployed_contracts = self.deployed_contracts.clone(); - for (contract, _) in self.nonce_chains.values().flat_map(|chain| &chain.transactions) { - if let Transaction::AccountTransaction(AccountTransaction::DeployAccount(tx)) = &contract.0.tx { - deployed_contracts.decrement(tx.contract_address) - } - } - assert!(deployed_contracts.is_empty(), "remaining deployed_contracts: {deployed_contracts:?}"); - } - /// When `force` is `true`, this function should never return any error. - /// `update_limits` is `false` when the transaction has been removed from the mempool in the past without updating the limits. + /// `update_limits` is `false` when the transaction has been removed from + /// the mempool in the past without updating the limits. pub fn insert_tx( &mut self, mempool_tx: MempoolTransaction, force: bool, update_limits: bool, - ) -> Result<(), TxInsersionError> { + nonce_info: NonceInfo, + ) -> Result<(), TxInsertionError> { // delete age-exceeded txs from the mempool - // todo(perf): this may want to limit this check once every few seconds to avoid it being in the hot path? - self.remove_age_exceeded_txs(); - - // check limits + // todo(perf): this may want to limit this check once every few seconds + // to avoid it being in the hot path? let limits_for_tx = TransactionCheckedLimits::limits_for(&mempool_tx); if !force { + self.remove_age_exceeded_txs(); self.limiter.check_insert_limits(&limits_for_tx)?; } - let contract_addr = mempool_tx.contract_address().to_felt(); + let contract_address = mempool_tx.contract_address().to_felt(); let arrived_at = mempool_tx.arrived_at; let deployed_contract_address = if let Transaction::AccountTransaction(AccountTransaction::DeployAccount(tx)) = &mempool_tx.tx { @@ -121,53 +353,178 @@ impl MempoolInner { None }; - let is_replaced = match self.nonce_chains.entry(contract_addr) { + // Inserts the transaction into the nonce tx mapping for the current + // contract + match self.nonce_mapping.entry(contract_address) { hash_map::Entry::Occupied(mut entry) => { // Handle nonce collision. - let chain: &mut NonceChain = entry.get_mut(); - let (position, is_replaced) = match chain.insert(mempool_tx, force) { - Ok(position) => position, + let nonce_tx_mapping = entry.get_mut(); + let replaced = match nonce_tx_mapping.insert(mempool_tx, nonce_info.nonce, force) { + Ok(replaced) => replaced, Err(nonce_collision_or_duplicate_hash) => { - debug_assert!(!force); // "Force add should never error + debug_assert!(!force); // Force add should never error return Err(nonce_collision_or_duplicate_hash); } }; - match position { - InsertedPosition::Front { former_head_arrived_at } => { - // If we inserted at the front, it has invalidated the tx queue. Update the tx queue. - let removed = self - .tx_queue - .remove(&AccountOrderedByTimestamp { contract_addr, timestamp: former_head_arrived_at }); - debug_assert!(removed); + // Update the tx queues. + match nonce_info.readiness { + NonceStatus::Ready => { + // Remove old value (if collision and force == true) + if let ReplacedState::Replaced { previous } = replaced { + let removed = self.tx_intent_queue_ready.remove(&TransactionIntentReady { + contract_address, + timestamp: previous.arrived_at, + nonce: nonce_info.nonce, + nonce_next: nonce_info.nonce_next, + phantom: std::marker::PhantomData, + }); + debug_assert!(removed); + self.limiter.mark_removed(&TransactionCheckedLimits::limits_for(&previous)); + + // So! This is a pretty nasty edge case. If we + // replace a transaction, and the previous tx was + // a deploy account, we must DECREMENT the count for + // that address. If the NEW transactions is a deploy + // but not the old one, we must INCREMENT the count. + // Otherwise, we have replaced a deploy account with + // another deploy account and we do nothing. + if let Some(contract_address) = &deployed_contract_address { + if previous.tx.tx_type() != TransactionType::DeployAccount { + self.deployed_contracts.increment(*contract_address); + } + } else if previous.tx.tx_type() == TransactionType::DeployAccount { + self.deployed_contracts.decrement(previous.contract_address()); + } + } else if let Some(contract_address) = &deployed_contract_address { + self.deployed_contracts.increment(*contract_address) + } + + // Insert new value + let insert = self.tx_intent_queue_ready.insert(TransactionIntentReady { + contract_address, + timestamp: arrived_at, + nonce: nonce_info.nonce, + nonce_next: nonce_info.nonce_next, + phantom: Default::default(), + }); + debug_assert!(insert); + } + NonceStatus::Pending => { + // The pending queue works a little bit differently as + // it is split into individual sub-queues for each + // contract address + let queue = self.tx_intent_queue_pending_by_nonce.entry(contract_address).or_default(); + + // Remove old value (if collision and force == true) + if let ReplacedState::Replaced { previous } = replaced { + let removed = queue.remove(&TransactionIntentPendingByNonce { + contract_address, + timestamp: previous.arrived_at, + nonce: nonce_info.nonce, + nonce_next: nonce_info.nonce_next, + phantom: std::marker::PhantomData, + }); + debug_assert!(removed.is_some()); + + let removed = self.tx_intent_queue_pending_by_timestamp.remove( + &TransactionIntentPendingByTimestamp { + contract_address, + timestamp: previous.arrived_at, + nonce: nonce_info.nonce, + nonce_next: nonce_info.nonce_next, + phantom: std::marker::PhantomData, + }, + ); + debug_assert!(removed); + + self.limiter.mark_removed(&TransactionCheckedLimits::limits_for(&previous)); + + if let Some(contract_address) = &deployed_contract_address { + if previous.tx.tx_type() != TransactionType::DeployAccount { + self.deployed_contracts.increment(*contract_address); + } + } else if previous.tx.tx_type() == TransactionType::DeployAccount { + self.deployed_contracts.decrement(previous.contract_address()) + } + } else if let Some(contract_address) = &deployed_contract_address { + self.deployed_contracts.increment(*contract_address); + } + + // Insert new value + let inserted = queue.insert( + TransactionIntentPendingByNonce { + contract_address, + timestamp: arrived_at, + nonce: nonce_info.nonce, + nonce_next: nonce_info.nonce_next, + phantom: std::marker::PhantomData, + }, + (), + ); + debug_assert!(inserted.is_none()); + let inserted = - self.tx_queue.insert(AccountOrderedByTimestamp { contract_addr, timestamp: arrived_at }); + self.tx_intent_queue_pending_by_timestamp.insert(TransactionIntentPendingByTimestamp { + contract_address, + timestamp: arrived_at, + nonce: nonce_info.nonce, + nonce_next: nonce_info.nonce_next, + phantom: std::marker::PhantomData, + }); debug_assert!(inserted); } - InsertedPosition::Other => { - // No need to update the tx queue. - } - } - is_replaced + }; } hash_map::Entry::Vacant(entry) => { - // Insert the new nonce chain - let nonce_chain = NonceChain::new_with_first_tx(mempool_tx); - entry.insert(nonce_chain); - - // Also update the tx queue. - let inserted = self.tx_queue.insert(AccountOrderedByTimestamp { contract_addr, timestamp: arrived_at }); + // Insert the new nonce tx mapping + let nonce_tx_mapping = NonceTxMapping::new_with_first_tx(mempool_tx, nonce_info.nonce); + entry.insert(nonce_tx_mapping); + + // Update the tx queues. + let inserted = match nonce_info.readiness { + NonceStatus::Ready => self.tx_intent_queue_ready.insert(TransactionIntentReady { + contract_address, + timestamp: arrived_at, + nonce: nonce_info.nonce, + nonce_next: nonce_info.nonce_next, + phantom: std::marker::PhantomData, + }), + NonceStatus::Pending => { + let insert_1 = + self.tx_intent_queue_pending_by_timestamp.insert(TransactionIntentPendingByTimestamp { + contract_address, + timestamp: arrived_at, + nonce: nonce_info.nonce, + nonce_next: nonce_info.nonce_next, + phantom: std::marker::PhantomData, + }); + + let insert_2 = self + .tx_intent_queue_pending_by_nonce + .entry(contract_address) + .or_default() + .insert( + TransactionIntentPendingByNonce { + contract_address, + timestamp: arrived_at, + nonce: nonce_info.nonce, + nonce_next: nonce_info.nonce_next, + phantom: std::marker::PhantomData, + }, + (), + ) + .is_none(); + + insert_1 && insert_2 + } + }; debug_assert!(inserted); - ReplacedState::NotReplaced + if let Some(contract_address) = &deployed_contract_address { + self.deployed_contracts.increment(*contract_address) + } } - }; - - if let ReplacedState::Replaced { previous } = is_replaced { - // Mark the previous transaction as deleted - self.limiter.mark_removed(&TransactionCheckedLimits::limits_for(&previous)); - } else if let Some(contract_address) = &deployed_contract_address { - self.deployed_contracts.increment(*contract_address) } // Update transaction limits @@ -182,73 +539,207 @@ impl MempoolInner { self.deployed_contracts.contains(addr) } - fn pop_tx_queue_account(&mut self, tx_queue_account: &AccountOrderedByTimestamp) -> MempoolTransaction { - // Update nonce chain. - let nonce_chain = - self.nonce_chains.get_mut(&tx_queue_account.contract_addr).expect("Nonce chain does not match tx queue"); - let (mempool_tx, nonce_chain_new_state) = nonce_chain.pop(); - match nonce_chain_new_state { - NonceChainNewState::Empty => { - // Remove the nonce chain. - let removed = self.nonce_chains.remove(&tx_queue_account.contract_addr); - debug_assert!(removed.is_some()); - } - NonceChainNewState::NotEmpty => { - // Re-add to tx queue. - let inserted = self.tx_queue.insert(AccountOrderedByTimestamp { - contract_addr: tx_queue_account.contract_addr, - timestamp: nonce_chain.front_arrived_at, - }); - debug_assert!(inserted); + pub fn remove_age_exceeded_txs(&mut self) { + let mut ready_no_age_check = vec![]; + + // We take advantage of the fact that TransactionIntentReady is + // ordered by timestamp, so as soon as we find a transaction which has + // not exceeded its max age (and that transaction supports age limits) + // we know no more transactions can be removed. + while let Some(intent) = self.tx_intent_queue_ready.first() { + let hash_map::Entry::Occupied(mut entry) = self.nonce_mapping.entry(intent.contract_address) else { + unreachable!("Nonce chain does not match tx queue"); + }; + + let nonce_mapping = entry.get_mut(); + let btree_map::Entry::Occupied(nonce_mapping_entry) = nonce_mapping.transactions.entry(intent.nonce) else { + unreachable!("Nonce chain without a tx"); + }; + + let limits = TransactionCheckedLimits::limits_for(nonce_mapping_entry.get()); + if self.limiter.tx_age_exceeded(&limits) { + let mempool_tx = nonce_mapping_entry.remove(); + + // We must remember to update the deploy contract count on + // removal! + if let Transaction::AccountTransaction(AccountTransaction::DeployAccount(tx)) = mempool_tx.tx { + self.deployed_contracts.decrement(tx.contract_address); + } + + if nonce_mapping.transactions.is_empty() { + entry.remove(); + } + + self.tx_intent_queue_ready.pop_first(); + } else if limits.checks_age() { + break; + } else { + // Some intents are not checked for age. Right now this is only + // the case for l1 handler intents. If we run into one of those, + // we still need to check if the intents after it have timed + // out, so we pop it to get the next. We will then add back the + // intents which were popped in a separate loop outside this + // one. + // + // In practice this is ok as l1 handler transactions are few and + // far between. Note that removing this check will result in an + // infinite loop if ever an l1 transaction is encountered. + ready_no_age_check.push(self.tx_intent_queue_ready.pop_first().expect("Already inside loop")); } } - // Update deployed contracts. - if let Transaction::AccountTransaction(AccountTransaction::DeployAccount(tx)) = &mempool_tx.tx { - self.deployed_contracts.decrement(tx.contract_address); + // Adding back ready transactions with no age check to them + for intent in ready_no_age_check { + self.tx_intent_queue_ready.insert(intent); } - mempool_tx - } + let mut pending_no_age_check = vec![]; - pub fn remove_age_exceeded_txs(&mut self) { - // Pop tx queue. - // too bad there's no first_entry api, we should check if hashbrown has it to avoid the double lookup. - while let Some(tx_queue_account) = self.tx_queue.first() { - let tx_queue_account = tx_queue_account.clone(); // clone is cheap for this struct - let nonce_chain = self - .nonce_chains - .get_mut(&tx_queue_account.contract_addr) - .expect("Nonce chain does not match tx queue"); - let (k, _v) = nonce_chain.transactions.first_key_value().expect("Nonce chain without a tx"); - - if self.limiter.tx_age_exceeded(&TransactionCheckedLimits::limits_for(&k.0)) { - let tx = self.pop_tx_queue_account(&tx_queue_account); - let _res = self.tx_queue.pop_first().expect("Cannot be empty, checked just above"); - self.limiter.mark_removed(&TransactionCheckedLimits::limits_for(&tx)); - } else { + // The code for removing age-exceeded pending transactions is similar, + // but with the added complexity that we need to keep + // tx_intent_queue_pending and tx_intent_queue_pending_by_timestamp in + // sync. This means removals need to take place across both queues. + while let Some(intent) = self.tx_intent_queue_pending_by_timestamp.first() { + // Set 1: look for a pending transaction which is too old + let hash_map::Entry::Occupied(mut entry) = self.nonce_mapping.entry(intent.contract_address) else { + unreachable!("Nonce chain does not match tx queue"); + }; + + let nonce_mapping = entry.get_mut(); + let btree_map::Entry::Occupied(nonce_mapping_entry) = nonce_mapping.transactions.entry(intent.nonce) else { + unreachable!("Nonce chain without a tx"); + }; + + let limits = TransactionCheckedLimits::limits_for(nonce_mapping_entry.get()); + if self.limiter.tx_age_exceeded(&limits) { + // Step 2: we found it! Now we remove the entry in + // tx_intent_queue_pending_by_timestamp + + let mempool_tx = nonce_mapping_entry.remove(); // *- snip -* + if let Transaction::AccountTransaction(AccountTransaction::DeployAccount(tx)) = mempool_tx.tx { + // Remember to update the deployed contract count along the + // way! + self.deployed_contracts.decrement(tx.contract_address); + } + + if nonce_mapping.transactions.is_empty() { + entry.remove(); // *- snip -* + } + + let intent = self + .tx_intent_queue_pending_by_timestamp + .pop_first() // *- snip -* + .expect("Already in loop, first entry must exist"); + + // Step 3: we remove the TransactionIntentPendingByNonce + // associated to the TransactionIntentPendingByTimestamp we just + // removed. + + let hash_map::Entry::Occupied(mut entry) = + self.tx_intent_queue_pending_by_nonce.entry(intent.contract_address) + else { + unreachable!("Missing pending intent mapping for {:?}", intent.contract_address); + }; + + let queue = entry.get_mut(); + let remove = queue.remove(&intent.by_nonce()); // *- snip -* + debug_assert!(remove.is_some()); + + // Step 4: tx_intent_queue_pending is essentially a tree of + // trees. We cannot leave empty sub-trees or else the memory + // will fill up! So if the queue of pending intents by nonce is + // empty, we remove it as well. + + if queue.is_empty() { + entry.remove(); // *- snip -* + } + } else if limits.checks_age() { break; + } else { + pending_no_age_check + .push(self.tx_intent_queue_pending_by_timestamp.pop_first().expect("Already inside loop")); } } + + for intent in pending_no_age_check { + self.tx_intent_queue_pending_by_timestamp.insert(intent); + } } pub fn pop_next(&mut self) -> Option { // Pop tx queue. - let mempool_tx = loop { - let tx_queue_account = self.tx_queue.pop_first()?; // Bubble up None if the mempool is empty. - let mempool_tx = self.pop_tx_queue_account(&tx_queue_account); + let (tx_mempool, contract_address, nonce_next) = loop { + // Bubble up None if the mempool is empty. + let tx_intent = self.tx_intent_queue_ready.pop_first()?; + let tx_mempool = self.pop_tx_from_intent(&tx_intent); - let limits = TransactionCheckedLimits::limits_for(&mempool_tx); + let limits = TransactionCheckedLimits::limits_for(&tx_mempool); if !self.limiter.tx_age_exceeded(&limits) { - break mempool_tx; + break (tx_mempool, tx_intent.contract_address, tx_intent.nonce_next); } // transaction age exceeded, remove the tx from mempool. self.limiter.mark_removed(&limits); }; + // Looks for the next transaction from the same account in the pending + // queue and marks it as ready if found. + 'pending: { + if let hash_map::Entry::Occupied(mut entry) = self.tx_intent_queue_pending_by_nonce.entry(contract_address) + { + let queue = entry.get_mut(); + + let entry_inner = queue.first_entry().expect("Intent queue cannot be empty"); + let nonce = entry_inner.key().nonce; + + if nonce != nonce_next { + break 'pending; + } + + let intent_pending_by_nonce = entry_inner.remove_entry().0; + + // This works like a NonceMapping: if a pending intent queue is + // empty, we remove the mapping. + if queue.is_empty() { + entry.remove(); + } + + // We need to keep pending intents by timestamp in sync! + let intent_pending_by_timestamp = intent_pending_by_nonce.by_timestamp(); + let removed = self.tx_intent_queue_pending_by_timestamp.remove(&intent_pending_by_timestamp); + debug_assert!(removed); + + let intent_ready = intent_pending_by_nonce.ready(); + self.tx_intent_queue_ready.insert(intent_ready); + } + } + + #[cfg(any(test, feature = "testing"))] + self.nonce_cache_inner.insert(tx_mempool.contract_address(), tx_mempool.nonce_next); + // do not update mempool limits, block prod will update it with re-add txs. - Some(mempool_tx) + Some(tx_mempool) + } + + fn pop_tx_from_intent(&mut self, tx_queue_account: &TransactionIntentReady) -> MempoolTransaction { + let nonce_tx_mapping = self + .nonce_mapping + .get_mut(&tx_queue_account.contract_address) + .expect("Nonce chain does not match tx queue"); + + // Get the next ready transaction from the nonce chain + let (mempool_tx, nonce_tx_mapping_new_state) = nonce_tx_mapping.pop(); + if nonce_tx_mapping_new_state == NonceTxMappingNewState::Empty { + let removed = self.nonce_mapping.remove(&tx_queue_account.contract_address); + debug_assert!(removed.is_some()); + } + + // Update deployed contracts. + if let Transaction::AccountTransaction(AccountTransaction::DeployAccount(tx)) = &mempool_tx.tx { + self.deployed_contracts.decrement(tx.contract_address); + } + + mempool_tx } pub fn pop_next_chunk(&mut self, dest: &mut impl Extend, n: usize) { @@ -267,25 +758,111 @@ impl MempoolInner { } for tx in txs { let force = true; - self.insert_tx(tx, force, false).expect("Force insert tx should not error"); + // Since this is re-adding a transaction which was already popped + // from the mempool, we can be sure it is ready + let nonce = tx.nonce; + let nonce_next = tx.nonce_next; + self.insert_tx(tx, force, false, NonceInfo::ready(nonce, nonce_next)) + .expect("Force insert tx should not error"); } } - /// This is called by the block production after a batch of transaction is executed. - /// Mark the consumed txs as consumed, and re-add the transactions that are not consumed in the mempool. + // This is called by the block production when loading the pending block + // from db pub fn insert_txs( &mut self, txs: impl IntoIterator, force: bool, - ) -> Result<(), TxInsersionError> { + ) -> Result<(), TxInsertionError> { for tx in txs { - self.insert_tx(tx, force, true)?; + // Transactions are marked as ready as they were already included + // into the pending block + let nonce = tx.nonce; + let nonce_next = tx.nonce_next; + self.insert_tx(tx, force, true, NonceInfo::ready(nonce, nonce_next))?; } Ok(()) } + /// Returns true if [MempoolInner] has the transaction at a contract address + /// and [Nonce] in the ready queue. + pub fn nonce_is_ready(&self, sender_address: Felt, nonce: Nonce) -> bool { + let mempool_tx = self.nonce_mapping.get(&sender_address).map(|mapping| mapping.transactions.get(&nonce)); + let Some(Some(mempool_tx)) = mempool_tx else { + return false; + }; + + self.tx_intent_queue_ready.contains(&TransactionIntentReady { + contract_address: sender_address, + timestamp: mempool_tx.arrived_at, + nonce, + nonce_next: mempool_tx.nonce_next, + phantom: std::marker::PhantomData, + }) + } + + /// Returns true if [MempoolInner] has a transaction from a contract address + /// with a specific [Nonce] in _both_ pending queues. + #[cfg(any(test, feature = "testing"))] + pub fn nonce_is_pending(&self, sender_address: Felt, nonce: Nonce) -> bool { + let mempool_tx = self.nonce_mapping.get(&sender_address).map(|mapping| mapping.transactions.get(&nonce)); + let Some(Some(mempool_tx)) = mempool_tx else { + return false; + }; + + let queue = self + .tx_intent_queue_pending_by_nonce + .get(&sender_address) + .unwrap_or_else(|| panic!("Missing mapping for pending contract address {sender_address:x?}")); + + let contains_by_nonce = || { + queue.contains_key(&TransactionIntentPendingByNonce { + contract_address: sender_address, + timestamp: mempool_tx.arrived_at, + nonce, + nonce_next: mempool_tx.nonce_next, + phantom: std::marker::PhantomData, + }) + }; + + let contains_by_timestamp = || { + self.tx_intent_queue_pending_by_timestamp.contains(&TransactionIntentPendingByTimestamp { + contract_address: sender_address, + timestamp: mempool_tx.arrived_at, + nonce, + nonce_next: mempool_tx.nonce_next, + phantom: std::marker::PhantomData, + }) + }; + + contains_by_nonce() && contains_by_timestamp() + } + + /// Returns true if [MempoolInner] contains a transaction with a specific + /// [Nonce] from a given contract address. + #[cfg(any(test, feature = "testing"))] + pub fn nonce_exists(&self, sender_address: Felt, nonce: Nonce) -> bool { + self.nonce_mapping + .get(&sender_address) + .map(|mapping| mapping.transactions.contains_key(&nonce)) + .unwrap_or(false) + } + + /// Returns true if [MempoolInner] contains a transaction with a specific + /// hash which was emitted by a given contract address. + #[cfg(any(test, feature = "testing"))] + pub fn tx_hash_exists(&self, sender_address: Felt, nonce: Nonce, tx_hash: TransactionHash) -> bool { + let mempool_tx = self.nonce_mapping.get(&sender_address).map(|mapping| mapping.transactions.get(&nonce)); + + let Some(Some(mempool_tx)) = mempool_tx else { + return false; + }; + + mempool_tx.tx_hash() == tx_hash + } + #[cfg(any(test, feature = "testing"))] pub fn is_empty(&self) -> bool { - self.tx_queue.is_empty() + self.tx_intent_queue_ready.is_empty() } } diff --git a/crates/madara/client/mempool/src/inner/nonce_chain.rs b/crates/madara/client/mempool/src/inner/nonce_chain.rs deleted file mode 100644 index 501d4cfbd..000000000 --- a/crates/madara/client/mempool/src/inner/nonce_chain.rs +++ /dev/null @@ -1,149 +0,0 @@ -use super::tx::{ArrivedAtTimestamp, MempoolTransaction}; -use crate::TxInsersionError; -use starknet_api::{core::Nonce, transaction::TransactionHash}; -use std::collections::{btree_map, BTreeMap}; -use std::{cmp, iter}; - -#[derive(Debug, Clone)] -pub struct OrderMempoolTransactionByNonce(pub MempoolTransaction); - -impl PartialEq for OrderMempoolTransactionByNonce { - fn eq(&self, other: &Self) -> bool { - self.cmp(other).is_eq() - } -} -impl Eq for OrderMempoolTransactionByNonce {} -impl Ord for OrderMempoolTransactionByNonce { - fn cmp(&self, other: &Self) -> cmp::Ordering { - self.0.nonce().cmp(&other.0.nonce()) - } -} -impl PartialOrd for OrderMempoolTransactionByNonce { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -/// Invariants: -/// - front_nonce, front_arrived_at and front_tx_hash must match the front transaction timestamp. -/// - No nonce chain should ever be empty in the mempool. -#[derive(Debug)] -pub struct NonceChain { - /// Use a BTreeMap to so that we can use the entry api. - // TODO(perf): to avoid some double lookups here, we should remove the `OrderMempoolTransactionByNonce` struct - // and make this a BTreeMap - pub(crate) transactions: BTreeMap, - pub(crate) front_arrived_at: ArrivedAtTimestamp, - pub(crate) front_nonce: Nonce, - pub(crate) front_tx_hash: TransactionHash, -} - -#[derive(Eq, PartialEq, Debug)] -pub enum InsertedPosition { - Front { former_head_arrived_at: ArrivedAtTimestamp }, - Other, -} - -#[derive(Debug)] -pub enum ReplacedState { - Replaced { previous: MempoolTransaction }, - NotReplaced, -} - -#[derive(Eq, PartialEq, Debug)] -pub enum NonceChainNewState { - Empty, - NotEmpty, -} - -impl NonceChain { - pub fn new_with_first_tx(tx: MempoolTransaction) -> Self { - Self { - front_arrived_at: tx.arrived_at, - front_tx_hash: tx.tx_hash(), - front_nonce: tx.nonce(), - transactions: iter::once((OrderMempoolTransactionByNonce(tx), ())).collect(), - } - } - - #[cfg(test)] - pub fn check_invariants(&self) { - assert!(!self.transactions.is_empty()); - let (front, _) = self.transactions.first_key_value().unwrap(); - assert_eq!(front.0.tx_hash(), self.front_tx_hash); - assert_eq!(front.0.nonce(), self.front_nonce); - assert_eq!(front.0.arrived_at, self.front_arrived_at); - } - - /// Returns where in the chain it was inserted. - /// When `force` is `true`, this function should never return any error. - pub fn insert( - &mut self, - mempool_tx: MempoolTransaction, - force: bool, - ) -> Result<(InsertedPosition, ReplacedState), TxInsersionError> { - let mempool_tx_arrived_at = mempool_tx.arrived_at; - let mempool_tx_nonce = mempool_tx.nonce(); - let mempool_tx_hash = mempool_tx.tx_hash(); - - let replaced = if force { - // double lookup here unfortunately.. that's because we're using the keys in a hacky way and can't update the - // entry key using the entry api. - let mempool_tx = OrderMempoolTransactionByNonce(mempool_tx); - if let Some((previous, _)) = self.transactions.remove_entry(&mempool_tx) { - let previous = previous.0.clone(); - let inserted = self.transactions.insert(mempool_tx, ()); - debug_assert!(inserted.is_none()); - ReplacedState::Replaced { previous } - } else { - let inserted = self.transactions.insert(mempool_tx, ()); - debug_assert!(inserted.is_none()); - ReplacedState::NotReplaced - } - } else { - match self.transactions.entry(OrderMempoolTransactionByNonce(mempool_tx)) { - btree_map::Entry::Occupied(entry) => { - // duplicate nonce, either it's because the hash is duplicated or nonce conflict with another tx. - if entry.key().0.tx_hash() == mempool_tx_hash { - return Err(TxInsersionError::DuplicateTxn); - } else { - return Err(TxInsersionError::NonceConflict); - } - } - btree_map::Entry::Vacant(entry) => *entry.insert(()), - } - - ReplacedState::NotReplaced - }; - - let position = if self.front_nonce >= mempool_tx_nonce { - // We insrted at the front here - let former_head_arrived_at = core::mem::replace(&mut self.front_arrived_at, mempool_tx_arrived_at); - self.front_nonce = mempool_tx_nonce; - self.front_tx_hash = mempool_tx_hash; - InsertedPosition::Front { former_head_arrived_at } - } else { - InsertedPosition::Other - }; - - debug_assert_eq!( - self.transactions.first_key_value().expect("Getting the first tx").0 .0.tx_hash(), - self.front_tx_hash - ); - - Ok((position, replaced)) - } - - pub fn pop(&mut self) -> (MempoolTransaction, NonceChainNewState) { - // TODO(perf): avoid double lookup - let (tx, _) = self.transactions.pop_first().expect("Nonce chain should not be empty"); - if let Some((new_front, _)) = self.transactions.first_key_value() { - self.front_arrived_at = new_front.0.arrived_at; - self.front_tx_hash = new_front.0.tx_hash(); - self.front_nonce = new_front.0.nonce(); - (tx.0, NonceChainNewState::NotEmpty) - } else { - (tx.0, NonceChainNewState::Empty) - } - } -} diff --git a/crates/madara/client/mempool/src/inner/nonce_mapping.rs b/crates/madara/client/mempool/src/inner/nonce_mapping.rs new file mode 100644 index 000000000..74ea81e3c --- /dev/null +++ b/crates/madara/client/mempool/src/inner/nonce_mapping.rs @@ -0,0 +1,82 @@ +use super::tx::MempoolTransaction; +use crate::TxInsertionError; +use starknet_api::core::Nonce; +use std::collections::{btree_map, BTreeMap}; +use std::iter; + +/// A wrapper around a [BTreeMap] which provides a mapping from a [Nonce] to the +/// associated transaction. +#[derive(Debug)] +#[cfg_attr(any(test, feature = "testing"), derive(Clone))] +pub struct NonceTxMapping { + /// An ordered mapping of the transactions to come from an account, accessed + /// by [Nonce]. + pub(crate) transactions: BTreeMap, +} + +#[derive(Debug)] +pub enum ReplacedState { + Replaced { previous: MempoolTransaction }, + NotReplaced, +} + +#[derive(Eq, PartialEq, Debug)] +pub enum NonceTxMappingNewState { + Empty, + NotEmpty, +} + +impl NonceTxMapping { + pub fn new_with_first_tx(tx: MempoolTransaction, nonce: Nonce) -> Self { + Self { transactions: iter::once((nonce, tx)).collect() } + } + + /// Returns where in the chain it was inserted. + /// When `force` is `true`, this function should never return any error. + pub fn insert( + &mut self, + mempool_tx: MempoolTransaction, + nonce: Nonce, + force: bool, + ) -> Result { + let replaced = if force { + match self.transactions.entry(nonce) { + btree_map::Entry::Vacant(entry) => { + entry.insert(mempool_tx); + ReplacedState::NotReplaced + } + btree_map::Entry::Occupied(mut entry) => { + let previous = entry.insert(mempool_tx); + ReplacedState::Replaced { previous } + } + } + } else { + match self.transactions.entry(nonce) { + btree_map::Entry::Occupied(entry) => { + // duplicate nonce, either it's because the hash is + // duplicated or nonce conflict with another tx. + if entry.get().tx_hash() == mempool_tx.tx_hash() { + return Err(TxInsertionError::DuplicateTxn); + } else { + return Err(TxInsertionError::NonceConflict); + } + } + btree_map::Entry::Vacant(entry) => { + entry.insert(mempool_tx); + ReplacedState::NotReplaced + } + } + }; + + Ok(replaced) + } + + pub fn pop(&mut self) -> (MempoolTransaction, NonceTxMappingNewState) { + let (_, tx) = self.transactions.pop_first().expect("Nonce chain should not be empty"); + if self.transactions.is_empty() { + (tx, NonceTxMappingNewState::Empty) + } else { + (tx, NonceTxMappingNewState::NotEmpty) + } + } +} diff --git a/crates/madara/client/mempool/src/inner/proptest.rs b/crates/madara/client/mempool/src/inner/proptest.rs index e448e07a5..a36304b2f 100644 --- a/crates/madara/client/mempool/src/inner/proptest.rs +++ b/crates/madara/client/mempool/src/inner/proptest.rs @@ -1,269 +1,347 @@ +//! A [proptest], or property test, is a hypothesis-based test where a function +//! is repeatedly tested against procedurally generated inputs. This continues +//! until either: +//! +//! - Enough success cases have passed. +//! - A failure case has been found. +//! +//! If a failure case is detected, [proptest] will attempt to regress that case +//! into a minimal reproducible example by simplifying ([shrinking]) the inputs. +//! Failure cases, alongside the seed used to generate these cases, are stored +//! inside `proptest-regressions`. These failure cases will _always be run_ at +//! the start of a new proptest to make sure the code functions against +//! historical bugs. DO NOT MANUALLY EDIT OR DELETE THIS FILE. +//! +//! We use [state machine testing] to validate the execution of the inner +//! mempool against a procedurally run state machine. Shrinking still applies, +//! so if a failure case is found, [proptest_state_machine] will automatically +//! shrink it down to a minimal state transition. +//! +//! > The code in this section is based off the [state machine heap] example +//! > in the proptest state machine repository. +//! +//! [shrinking]: https://proptest-rs.github.io/proptest/proptest/tutorial/shrinking-basics.html +//! [state machine testing]: https://proptest-rs.github.io/proptest/proptest/state-machine.html +//! [state machine heap]: https://github.com/proptest-rs/proptest/blob/main/proptest-state-machine/examples/state_machine_heap.rs + #![cfg(test)] use super::*; use ::proptest::prelude::*; -use blockifier::{ - execution::contract_class::ClassInfo, - test_utils::{contracts::FeatureContract, CairoVersion}, - transaction::{transaction_execution::Transaction, transaction_types::TransactionType}, -}; -use mc_exec::execution::TxInfo; -use mp_convert::ToFelt; use proptest_derive::Arbitrary; -use starknet_api::{ - core::{calculate_contract_address, ChainId, Nonce}, - data_availability::DataAvailabilityMode, - transaction::{ - ContractAddressSalt, DeclareTransactionV3, DeployAccountTransactionV3, InvokeTransactionV3, Resource, - ResourceBounds, ResourceBoundsMapping, TransactionHash, TransactionHasher, TransactionVersion, - }, -}; +use proptest_state_machine::{ReferenceStateMachine, StateMachineTest}; use starknet_types_core::felt::Felt; -use blockifier::abi::abi_utils::selector_from_name; -use starknet_api::transaction::Fee; -use std::{ - collections::HashSet, - fmt, - time::{Duration, SystemTime}, -}; +use std::time::Duration; -lazy_static::lazy_static! { - static ref DUMMY_CLASS: ClassInfo = { - let dummy_contract_class = FeatureContract::TestContract(CairoVersion::Cairo1); - ClassInfo::new(&dummy_contract_class.get_class(), 100, 100).unwrap() - }; +proptest_state_machine::prop_state_machine! { + #![proptest_config(proptest::ProptestConfig { + // Enable verbose mode to make the state machine test print the + // transitions for each case. + verbose: 1, + // The number of tests which need to be valid for this to pass. + cases: 64, + // Max duration (in milliseconds) for each generated case. + timeout: 1_000, + ..Default::default() + })] - static ref NOW: SystemTime = SystemTime::now(); + /// Simulates transaction insertion and removal into [MempoolInner]. + /// + /// Each iteration will simulate the insertion of between 1 and 256 + /// [MempoolTransaction]s into the mempool. Note that insertions happen + /// twice as often as popping from the mempool. + #[test] + fn mempool_proptest(sequential 1..256 => MempoolInner); } -struct Insert(MempoolTransaction, /* force */ bool); -impl fmt::Debug for Insert { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!( - f, - "Insert(ty={:?},arrived_at={:?},tx_hash={:?},contract_address={:?},nonce={:?},force={:?})", - self.0.tx.tx_type(), - self.0.arrived_at, - self.0.tx_hash(), - self.0.contract_address(), - self.0.nonce(), - self.1, - ) - } +pub struct MempoolStateMachine; + +/// Transactions to insert into the [MempoolInner] during proptesting. +#[derive(Clone, Debug, Arbitrary)] +pub enum TxTy { + Declare, + DeployAccount, + Invoke, + L1Handler, } -impl Arbitrary for Insert { - type Parameters = (); - type Strategy = BoxedStrategy; - fn arbitrary_with(_args: Self::Parameters) -> Self::Strategy { - #[derive(Debug, Arbitrary)] - enum TxTy { - Declare, - DeployAccount, - InvokeFunction, - L1Handler, +impl TxTy { + fn tx(self, contract_address: Felt) -> blockifier::transaction::transaction_execution::Transaction { + match self { + Self::Declare => blockifier::transaction::transaction_execution::Transaction::AccountTransaction( + blockifier::transaction::account_transaction::AccountTransaction::Declare( + blockifier::transaction::transactions::DeclareTransaction::new( + starknet_api::transaction::DeclareTransaction::V0( + starknet_api::transaction::DeclareTransactionV0V1 { + sender_address: ContractAddress::try_from(contract_address).unwrap(), + ..Default::default() + }, + ), + starknet_api::transaction::TransactionHash::default(), + blockifier::execution::contract_class::ClassInfo::new( + &blockifier::execution::contract_class::ContractClass::V0( + blockifier::execution::contract_class::ContractClassV0::default(), + ), + 0, + 0, + ) + .unwrap(), + ) + .unwrap(), + ), + ), + Self::DeployAccount => blockifier::transaction::transaction_execution::Transaction::AccountTransaction( + blockifier::transaction::account_transaction::AccountTransaction::DeployAccount( + blockifier::transaction::transactions::DeployAccountTransaction { + tx: starknet_api::transaction::DeployAccountTransaction::V1( + starknet_api::transaction::DeployAccountTransactionV1::default(), + ), + tx_hash: starknet_api::transaction::TransactionHash::default(), + contract_address: ContractAddress::try_from(contract_address).unwrap(), + only_query: false, + }, + ), + ), + Self::Invoke => blockifier::transaction::transaction_execution::Transaction::AccountTransaction( + blockifier::transaction::account_transaction::AccountTransaction::Invoke( + blockifier::transaction::transactions::InvokeTransaction { + tx: starknet_api::transaction::InvokeTransaction::V0( + starknet_api::transaction::InvokeTransactionV0 { + contract_address: ContractAddress::try_from(contract_address).unwrap(), + ..Default::default() + }, + ), + tx_hash: starknet_api::transaction::TransactionHash::default(), + only_query: false, + }, + ), + ), + Self::L1Handler => blockifier::transaction::transaction_execution::Transaction::L1HandlerTransaction( + blockifier::transaction::transactions::L1HandlerTransaction { + tx: starknet_api::transaction::L1HandlerTransaction { + contract_address: ContractAddress::try_from(contract_address).unwrap(), + ..Default::default() + }, + tx_hash: starknet_api::transaction::TransactionHash::default(), + paid_fee_on_l1: starknet_api::transaction::Fee::default(), + }, + ), } + } +} + +prop_compose! { + /// This function defines a strategy: that is to say, a way for [proptest] + /// to generate and _shrink_ values to a minimal reproducible case. + /// + /// You can learn more about [strategies] and [shrinking] in the + /// [proptest book]. + /// + /// [strategies]: https://proptest-rs.github.io/proptest/proptest/tutorial/macro-prop-compose.html + /// [shrinking]: https://proptest-rs.github.io/proptest/proptest/tutorial/shrinking-basics.html + /// [proptest book]: https://proptest-rs.github.io/proptest/proptest/index.html + fn felt_upto(range: u128)(felt in 0..range) -> Felt { + Felt::from(felt) + } +} - <(TxTy, u8, u8, u8, bool)>::arbitrary() - .prop_map(|(ty, arrived_at, contract_address, nonce, force)| { - let arrived_at = *NOW + Duration::from_millis(arrived_at.into()); - let contract_addr = ContractAddress::try_from(Felt::from(contract_address)).unwrap(); - let nonce = Nonce(Felt::from(nonce)); +prop_compose! { + fn nonce_upto(range: u128)(felt in felt_upto(range)) -> Nonce { + Nonce(felt) + } +} - let resource_bounds = ResourceBoundsMapping( - [ - (Resource::L1Gas, ResourceBounds { max_amount: 5, max_price_per_unit: 5 }), - (Resource::L2Gas, ResourceBounds { max_amount: 5, max_price_per_unit: 5 }), - ] - .into(), - ); +prop_compose! { + fn mempool_transaction(contract_address: Felt)( + txty in any::(), + dt in -5400..5400i32, + nonce in nonce_upto(4), + ) -> MempoolTransaction { + // IMPORTANT: we fiddle with the transaction arrival time so it + // is anywhere between 1h30 before now, or 1h30 into the future. Note + // that the transaction age limit for the mempool is set to 1h. + let arrived_at = if dt < 0 { + ArrivedAtTimestamp::now().checked_sub(Duration::from_secs(dt.unsigned_abs() as u64)).unwrap() + } else { + ArrivedAtTimestamp::now().checked_add(Duration::from_secs(dt as u64)).unwrap() + }; + let tx = txty.tx(contract_address); - let tx = match ty { - TxTy::Declare => starknet_api::transaction::Transaction::Declare( - starknet_api::transaction::DeclareTransaction::V3(DeclareTransactionV3 { - resource_bounds, - tip: Default::default(), - signature: Default::default(), - nonce, - class_hash: Default::default(), - compiled_class_hash: Default::default(), - sender_address: contract_addr, - nonce_data_availability_mode: DataAvailabilityMode::L1, - fee_data_availability_mode: DataAvailabilityMode::L1, - paymaster_data: Default::default(), - account_deployment_data: Default::default(), - }), - ), - TxTy::DeployAccount => starknet_api::transaction::Transaction::DeployAccount( - starknet_api::transaction::DeployAccountTransaction::V3(DeployAccountTransactionV3 { - resource_bounds, - tip: Default::default(), - signature: Default::default(), - nonce, - class_hash: Default::default(), - nonce_data_availability_mode: DataAvailabilityMode::L1, - fee_data_availability_mode: DataAvailabilityMode::L1, - paymaster_data: Default::default(), - contract_address_salt: ContractAddressSalt(contract_addr.to_felt()), - constructor_calldata: Default::default(), - }), - ), - TxTy::InvokeFunction => starknet_api::transaction::Transaction::Invoke( - starknet_api::transaction::InvokeTransaction::V3(InvokeTransactionV3 { - resource_bounds, - tip: Default::default(), - signature: Default::default(), - nonce, - sender_address: contract_addr, - calldata: Default::default(), - nonce_data_availability_mode: DataAvailabilityMode::L1, - fee_data_availability_mode: DataAvailabilityMode::L1, - paymaster_data: Default::default(), - account_deployment_data: Default::default(), - }), - ), - // TODO: maybe update the values? - TxTy::L1Handler => starknet_api::transaction::Transaction::L1Handler( - starknet_api::transaction::L1HandlerTransaction { - version: TransactionVersion::ZERO, - nonce, - contract_address: contract_addr, - entry_point_selector: selector_from_name("l1_handler_set_value"), - calldata: Default::default(), - }, - ), - }; + // IMPORTANT: the nonce and the address of the contracts sending + // them should be kept low or else we will never be popping + // ready transactions. + let nonce_next = nonce.try_increment().unwrap(); - let deployed = if let starknet_api::transaction::Transaction::DeployAccount(tx) = &tx { - Some( - calculate_contract_address( - tx.contract_address_salt(), - Default::default(), - &Default::default(), - Default::default(), - ) - .unwrap(), - ) - } else { - None - }; + MempoolTransaction { tx, arrived_at, converted_class: None, nonce, nonce_next } + } +} - // providing dummy l1 gas for now - let l1_gas_paid = match &tx { - starknet_api::transaction::Transaction::L1Handler(_) => Some(Fee(1)), - _ => None, - }; +prop_compose! { + /// This is a higher-order strategy, that is to say: a strategy which is + /// generated from another strategy. + /// + /// Since the syntax used to generate this can be pretty confusing, I + /// suggest you check out the section of the [proptest book] on [strategies] + /// and [higher-order strategies]. + /// + /// [proptest book]: https://proptest-rs.github.io/proptest/proptest/index.html + /// [strategies]: https://proptest-rs.github.io/proptest/proptest/tutorial/macro-prop-compose.html + /// [higher-order strategies]: https://proptest-rs.github.io/proptest/proptest/tutorial/higher-order.html + fn mempool_transition_push(mempool: MempoolInner)(contract_address in felt_upto(100))( + mut tx in mempool_transaction(contract_address), + force in any::(), + ) -> MempoolTransition { + let target = mempool.nonce_cache_inner.get(&tx.contract_address()).copied().unwrap_or(Nonce(Felt::ZERO)); - let tx_hash = tx.calculate_transaction_hash(&ChainId::Mainnet, &TransactionVersion::THREE).unwrap(); + // IMPORTANT: we fiddle with the generated nonce to make sure it is + // always greater than any previously popped transactions for that + // account. Normally this would be checked by the outer mempool, so we + // can be sure that transactions with an invalid nonce would not be + // inserted into the inner mempool anyway. + if tx.nonce < target { + tx.nonce = Nonce(*target + *tx.nonce); + tx.nonce_next = tx.nonce.try_increment().unwrap(); + } + let (nonce, nonce_next) = (tx.nonce, tx.nonce_next); - let tx = Transaction::from_api(tx, tx_hash, Some(DUMMY_CLASS.clone()), l1_gas_paid, deployed, false) - .unwrap(); + let nonce_info = if nonce == Nonce(Felt::ZERO) || nonce == target { + NonceInfo::ready(nonce, nonce_next) + } else { + NonceInfo::pending(nonce, nonce_next) + }; - Insert(MempoolTransaction { tx, arrived_at, converted_class: None }, force) - }) - .boxed() + MempoolTransition::Push { tx, force, nonce_info } } } -#[derive(Debug, Arbitrary)] -enum Operation { - Insert(Insert), +/// A valid state transitions for the proptest. +/// +/// We can only either [pop] or [push] transactions into [MempoolInner] +/// +/// [pop]: MempoolTransition::Pop +/// [push]: MempoolTransition::Push +#[derive(Clone, Debug)] +pub enum MempoolTransition { + /// Removes the next ready transaction from the mempool, if any. Pop, + /// Tries to add a transaction into the mempool. + Push { tx: MempoolTransaction, force: bool, nonce_info: NonceInfo }, } -#[derive(Debug, Arbitrary)] -struct MempoolInvariantsProblem(Vec); -impl MempoolInvariantsProblem { - fn check(&self) { - tracing::debug!("\n\n\n\n\nCase: {:#?}", self); - let mut mempool = MempoolInner::new(MempoolLimits::for_testing()); - mempool.check_invariants(); +impl ReferenceStateMachine for MempoolStateMachine { + type State = MempoolInner; + type Transition = MempoolTransition; - let mut inserted = HashSet::new(); - let mut inserted_contract_nonce_pairs = HashSet::new(); - let mut new_contracts = HashSet::new(); + fn init_state() -> BoxedStrategy { + Just(MempoolInner::new(MempoolLimits { + // Transactions in the mempool cannot be older than 1h + max_age: Some(Duration::from_secs(3_600)), + ..MempoolLimits::for_testing() + })) + .boxed() + } - let handle_pop = |res: Option, - inserted: &mut HashSet, - inserted_contract_nonce_pairs: &mut HashSet<(Nonce, ContractAddress)>, - new_contracts: &mut HashSet| { - if let Some(res) = &res { - let removed = inserted.remove(&res.tx_hash()); - assert!(removed); - let removed = inserted_contract_nonce_pairs.remove(&(res.nonce(), res.contract_address())); - assert!(removed); + fn transitions(state: &Self::State) -> BoxedStrategy { + // Insertions happen twice as often as removals + prop_oneof![ + 1 => Just(MempoolTransition::Pop), + 2 => mempool_transition_push(state.clone()), + ] + .boxed() + } - if res.tx.tx_type() == TransactionType::DeployAccount { - let _removed = new_contracts.remove(&res.contract_address()); - // there can be multiple deploy_account txs. - // assert!(removed) - } - } else { - assert!(inserted.is_empty()) + fn apply(mut state: Self::State, transition: &Self::Transition) -> Self::State { + match transition.to_owned() { + MempoolTransition::Pop => { + state.pop_next(); } - tracing::trace!("Popped {:?}", res.map(|el| Insert(el, false))); - }; + MempoolTransition::Push { tx, force, nonce_info } => { + // We do not check invariants here + let update_limits = true; + let _ = state.insert_tx(tx, force, update_limits, nonce_info); + } + } + + state + } +} - for op in &self.0 { - match op { - Operation::Insert(insert) => { - let force = insert.1; - tracing::trace!("Insert {:?}", insert); - let res = mempool.insert_tx(insert.0.clone(), insert.1, true); +impl StateMachineTest for MempoolInner { + type SystemUnderTest = Self; + type Reference = MempoolStateMachine; - let expected = if !force - && inserted_contract_nonce_pairs.contains(&(insert.0.nonce(), insert.0.contract_address())) - { - if inserted.contains(&insert.0.tx_hash()) { - Err(TxInsersionError::DuplicateTxn) - } else { - Err(TxInsersionError::NonceConflict) - } - } else { - Ok(()) - }; + fn init_test(_ref_state: &::State) -> Self::SystemUnderTest { + // Transactions cannot live longer than 1h + Self::new(MempoolLimits { max_age: Some(Duration::from_secs(3_600)), ..MempoolLimits::for_testing() }) + } + + fn apply( + mut state: Self::SystemUnderTest, + _ref_state: &::State, + transition: ::Transition, + ) -> Self::SystemUnderTest { + match transition { + MempoolTransition::Pop => { + state.pop_next(); + } + MempoolTransition::Push { tx, force, nonce_info } => { + // tx info + let readiness = nonce_info.readiness.clone(); + let nonce = nonce_info.nonce; + let contract_address = **tx.contract_address(); + let tx_hash = tx.tx_hash(); + + // age check + let arrived_at = tx.arrived_at; + let now = ArrivedAtTimestamp::now(); + let too_old = if arrived_at < now { + now.duration_since(arrived_at).unwrap() > state.limiter.config.max_age.unwrap_or(Duration::MAX) + } else { + false + }; - assert_eq!(expected, res); + // apply the actual state change + let update_limits = true; + let res = state.insert_tx(tx, force, update_limits, nonce_info); - if expected.is_ok() { - if insert.0.tx.tx_type() == TransactionType::DeployAccount { - new_contracts.insert(insert.0.contract_address()); + // check for invalid state + match res { + Ok(_) => match readiness { + NonceStatus::Ready => assert!( + state.nonce_is_ready(contract_address, nonce), + "tx at {contract_address:x?} and {nonce:?} should be ready" + ), + NonceStatus::Pending => assert!( + state.nonce_is_pending(contract_address, nonce), + "tx at {contract_address:x?} and {nonce:?} should be pending" + ), + }, + Err(err) => { + assert!(!force, "Force-insertions should not error!"); + match err { + TxInsertionError::NonceConflict => assert!( + state.nonce_exists(contract_address, nonce), + "tx at {contract_address:x?} and {nonce:?} should already exist" + ), + TxInsertionError::DuplicateTxn => { + assert!(state.tx_hash_exists(contract_address, nonce, tx_hash)) + } + TxInsertionError::Limit(_) => { + assert!( + too_old, + "Incorrectly marked transaction at {contract_address:x?} and P{nonce:?} as too old" + ) + } } - inserted.insert(insert.0.tx_hash()); - inserted_contract_nonce_pairs.insert((insert.0.nonce(), insert.0.contract_address())); } - - tracing::trace!("Result {:?}", res); - } - Operation::Pop => { - tracing::trace!("Pop"); - let res = mempool.pop_next(); - handle_pop(res, &mut inserted, &mut inserted_contract_nonce_pairs, &mut new_contracts); } } - tracing::trace!("State: {mempool:#?}"); - mempool.check_invariants(); - } - - loop { - tracing::trace!("Pop"); - let Some(res) = mempool.pop_next() else { break }; - handle_pop(Some(res), &mut inserted, &mut inserted_contract_nonce_pairs, &mut new_contracts); - tracing::trace!("State: {mempool:#?}"); - mempool.check_invariants(); } - assert!(inserted.is_empty()); - assert!(inserted_contract_nonce_pairs.is_empty()); - assert!(new_contracts.is_empty()); - tracing::trace!("Done :)"); + state } -} -::proptest::proptest! { - #[tracing_test::traced_test] - #[test] - fn proptest_mempool(pb in any::()) { - pb.check(); + fn check_invariants(_state: &Self::SystemUnderTest, ref_state: &::State) { + ref_state.check_invariants(); } } diff --git a/crates/madara/client/mempool/src/inner/tx.rs b/crates/madara/client/mempool/src/inner/tx.rs index 0106a67df..6a51997d6 100644 --- a/crates/madara/client/mempool/src/inner/tx.rs +++ b/crates/madara/client/mempool/src/inner/tx.rs @@ -6,22 +6,42 @@ use mp_convert::FeltHexDisplay; use starknet_api::{ core::{ContractAddress, Nonce}, transaction::TransactionHash, + StarknetApiError, }; use std::{fmt, time::SystemTime}; pub type ArrivedAtTimestamp = SystemTime; +/// Wrapper around a blockifier [Transaction] with some added information needed +/// by the [Mempool] +/// +/// [Mempool]: super::super::Mempool pub struct MempoolTransaction { pub tx: Transaction, + /// Time at which the transaction was inserted into the mempool (+ or -) pub arrived_at: ArrivedAtTimestamp, + /// TODO: What is this? pub converted_class: Option, + /// We need this to be able to retrieve the transaction once from the + /// [NonceTxMapping] once it has been inserted into the [Mempool] + /// + /// [NonceTxMapping]: super::NonceTxMapping + /// [Mempool]: super::super::Mempool + pub nonce: Nonce, + /// We include this in the struct to avoid having to recompute the next + /// nonce of a ready transaction once it is re-added into the [Mempool] by + /// the block production task. + /// + /// [Mempool]: super::super::Mempool + pub nonce_next: Nonce, } impl fmt::Debug for MempoolTransaction { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("MempoolTransaction") .field("tx_hash", &self.tx_hash().hex_display()) - .field("nonce", &self.nonce().hex_display()) + .field("nonce", &self.nonce.hex_display()) + .field("nonce_next", &self.nonce_next.hex_display()) .field("contract_address", &self.contract_address().hex_display()) .field("tx_type", &self.tx.tx_type()) .field("arrived_at", &self.arrived_at) @@ -35,11 +55,24 @@ impl Clone for MempoolTransaction { tx: clone_transaction(&self.tx), arrived_at: self.arrived_at, converted_class: self.converted_class.clone(), + nonce: self.nonce, + nonce_next: self.nonce_next, } } } impl MempoolTransaction { + pub fn new_from_blockifier_tx( + tx: Transaction, + arrived_at: ArrivedAtTimestamp, + converted_class: Option, + ) -> Result { + let nonce = nonce(&tx); + let nonce_next = nonce.try_increment()?; + + Ok(Self { tx, arrived_at, converted_class, nonce, nonce_next }) + } + pub fn clone_tx(&self) -> Transaction { clone_transaction(&self.tx) } diff --git a/crates/madara/client/mempool/src/lib.rs b/crates/madara/client/mempool/src/lib.rs index 9337cf148..91eb6fc27 100644 --- a/crates/madara/client/mempool/src/lib.rs +++ b/crates/madara/client/mempool/src/lib.rs @@ -7,6 +7,7 @@ use blockifier::transaction::transactions::{ }; use header::make_pending_header; use mc_db::db_block_id::DbBlockId; +use mc_db::mempool_db::{DbMempoolTxInfoDecoder, NonceInfo}; use mc_db::{MadaraBackend, MadaraStorageError}; use mc_exec::ExecutionContext; use metrics::MempoolMetrics; @@ -20,11 +21,13 @@ use mp_transactions::L1HandlerTransactionResult; use mp_transactions::ToBlockifierError; use starknet_api::core::{ContractAddress, Nonce}; use starknet_api::transaction::TransactionHash; +use starknet_api::StarknetApiError; use starknet_types_core::felt::Felt; use starknet_types_rpc::{ AddInvokeTransactionResult, BroadcastedDeclareTxn, BroadcastedDeployAccountTxn, BroadcastedInvokeTxn, BroadcastedTxn, ClassAndTxnHash, ContractAndTxnHash, }; +use std::collections::{BTreeMap, VecDeque}; use std::sync::{Arc, RwLock}; use std::time::SystemTime; use tx::blockifier_to_saved_tx; @@ -43,50 +46,61 @@ mod tx; pub use inner::*; #[derive(thiserror::Error, Debug)] -pub enum Error { +pub enum MempoolError { #[error("Storage error: {0:#}")] StorageError(#[from] MadaraStorageError), #[error("Validation error: {0:#}")] Validation(#[from] StatefulValidatorError), #[error(transparent)] - InnerMempool(#[from] TxInsersionError), + InnerMempool(#[from] TxInsertionError), #[error(transparent)] Exec(#[from] mc_exec::Error), + #[error(transparent)] + StarknetApi(#[from] StarknetApiError), #[error("Preprocessing transaction: {0:#}")] BroadcastedToBlockifier(#[from] ToBlockifierError), } -impl Error { +impl MempoolError { pub fn is_internal(&self) -> bool { - matches!(self, Error::StorageError(_) | Error::BroadcastedToBlockifier(_)) + matches!(self, MempoolError::StorageError(_) | MempoolError::BroadcastedToBlockifier(_)) } } +#[cfg(any(test, feature = "testing"))] +#[allow(unused)] +pub(crate) trait CheckInvariants { + /// Validates the invariants of this struct. + /// + /// This method should cause a panic if these invariants are not met. + fn check_invariants(&self); +} + #[cfg_attr(test, mockall::automock)] pub trait MempoolProvider: Send + Sync { - fn accept_invoke_tx(&self, tx: BroadcastedInvokeTxn) -> Result, Error>; - fn accept_declare_v0_tx(&self, tx: BroadcastedDeclareTransactionV0) -> Result, Error>; - fn accept_declare_tx(&self, tx: BroadcastedDeclareTxn) -> Result, Error>; - fn accept_deploy_account_tx( + fn tx_accept_invoke( + &self, + tx: BroadcastedInvokeTxn, + ) -> Result, MempoolError>; + fn tx_accept_declare_v0(&self, tx: BroadcastedDeclareTransactionV0) -> Result, MempoolError>; + fn tx_accept_declare(&self, tx: BroadcastedDeclareTxn) -> Result, MempoolError>; + fn tx_accept_deploy_account( &self, tx: BroadcastedDeployAccountTxn, - ) -> Result, Error>; - fn accept_l1_handler_tx( + ) -> Result, MempoolError>; + fn tx_accept_l1_handler( &self, tx: L1HandlerTransaction, paid_fees_on_l1: u128, - ) -> Result; - fn take_txs_chunk + 'static>(&self, dest: &mut I, n: usize) - where - Self: Sized; - fn take_tx(&self) -> Option; - fn re_add_txs + 'static>( + ) -> Result; + fn txs_take_chunk(&self, dest: &mut VecDeque, n: usize); + fn tx_take(&mut self) -> Option; + fn tx_mark_included(&self, contract_address: &Felt); + fn txs_re_add( &self, - txs: I, + txs: VecDeque, consumed_txs: Vec, - ) -> Result<(), Error> - where - Self: Sized; - fn insert_txs_no_validation(&self, txs: Vec, force: bool) -> Result<(), Error> + ) -> Result<(), MempoolError>; + fn txs_insert_no_validation(&self, txs: Vec, force: bool) -> Result<(), MempoolError> where Self: Sized; fn chain_id(&self) -> Felt; @@ -97,6 +111,7 @@ pub struct Mempool { l1_data_provider: Arc, inner: RwLock, metrics: MempoolMetrics, + nonce_cache: RwLock>, } impl Mempool { @@ -106,18 +121,20 @@ impl Mempool { l1_data_provider, inner: RwLock::new(MempoolInner::new(limits)), metrics: MempoolMetrics::register(), + nonce_cache: RwLock::new(BTreeMap::new()), } } pub fn load_txs_from_db(&mut self) -> Result<(), anyhow::Error> { for res in self.backend.get_mempool_transactions() { - let (tx_hash, saved_tx, converted_class) = res.context("Getting mempool transactions")?; + let (tx_hash, DbMempoolTxInfoDecoder { saved_tx, converted_class, nonce_readiness }) = + res.context("Getting mempool transactions")?; let (tx, arrived_at) = saved_to_blockifier_tx(saved_tx, tx_hash, &converted_class) .context("Converting saved tx to blockifier")?; - if let Err(err) = self.accept_tx(tx, converted_class, arrived_at) { + if let Err(err) = self.accept_tx(tx, converted_class, arrived_at, nonce_readiness) { match err { - Error::InnerMempool(TxInsersionError::Limit(MempoolLimitReached::Age { .. })) => {} // do nothing + MempoolError::InnerMempool(TxInsertionError::Limit(MempoolLimitReached::Age { .. })) => {} // do nothing err => tracing::warn!("Could not re-add mempool transaction from db: {err:#}"), } } @@ -131,7 +148,8 @@ impl Mempool { tx: Transaction, converted_class: Option, arrived_at: SystemTime, - ) -> Result<(), Error> { + nonce_info: NonceInfo, + ) -> Result<(), MempoolError> { // Get pending block. let pending_block_info = if let Some(block) = self.backend.get_block_info(&DbBlockId::Pending)? { block @@ -177,14 +195,20 @@ impl Mempool { tracing::debug!("Adding to inner mempool tx_hash={:#x}", tx_hash); // Add to db let saved_tx = blockifier_to_saved_tx(&tx, arrived_at); - self.backend.save_mempool_transaction(&saved_tx, tx_hash, &converted_class)?; + + // TODO: should we update this to store only if the mempool accepts + // this transaction? + self.backend.save_mempool_transaction(&saved_tx, tx_hash, &converted_class, &nonce_info)?; // Add it to the inner mempool let force = false; + let nonce = nonce_info.nonce; + let nonce_next = nonce_info.nonce_next; self.inner.write().expect("Poisoned lock").insert_tx( - MempoolTransaction { tx, arrived_at, converted_class }, + MempoolTransaction { tx, arrived_at, converted_class, nonce, nonce_next }, force, true, + nonce_info, )?; self.metrics.accepted_transaction_counter.add(1, &[]); @@ -197,6 +221,67 @@ impl Mempool { pub fn is_empty(&self) -> bool { self.inner.read().expect("Poisoned lock").is_empty() } + + /// Determines the status of a transaction based on the address of the + /// contract sending it and its nonce. + /// + /// Several mechanisms are used to keep the latest nonce for a contract in + /// sync with the state of the node, even if the db is not being updated + /// fast enough. These include keeping a mapping of the latest nonce for + /// contracts to be included in the upcoming block as well as checking the + /// state of the mempool to see if previous transactions are marked as + /// ready. + fn retrieve_nonce_info(&self, sender_address: Felt, nonce: Felt) -> Result { + let nonce = Nonce(nonce); + let nonce_next = nonce.try_increment()?; + + let nonce_prev_check = || { + // We don't need an underflow check here as nonces are incremental + // and non negative, so there is no nonce s.t nonce != nonce_target, + // nonce < nonce_target & nonce = 0 + let nonce_prev = Nonce(nonce.0 - Felt::ONE); + let nonce_prev_ready = self.inner.read().expect("Poisoned lock").nonce_is_ready(sender_address, nonce_prev); + + // If the mempool has the transaction before this one ready, then + // this transaction is ready too. Even if the db has not been + // updated yet, this transaction will always be polled and executed + // afterwards. + if nonce_prev_ready { + Ok::<_, MempoolError>(NonceInfo::ready(nonce, nonce_next)) + } else { + Ok::<_, MempoolError>(NonceInfo::pending(nonce, nonce_next)) + } + }; + + // It is possible for a transaction to be polled, and the + // transaction right after it to be added into the mempoool, before + // the db is updated. For this reason, nonce_cache keeps track of + // the nonces of contracts which have not yet been included in the + // block but are scheduled to. + let nonce_cached = self.nonce_cache.read().expect("Poisoned lock").get(&sender_address).cloned(); + + if let Some(nonce_cached) = nonce_cached { + match nonce.cmp(&nonce_cached) { + std::cmp::Ordering::Less => Err(MempoolError::StorageError(MadaraStorageError::InvalidNonce)), + std::cmp::Ordering::Equal => Ok(NonceInfo::ready(nonce, nonce_next)), + std::cmp::Ordering::Greater => nonce_prev_check(), + } + } else { + // The nonce cache avoids us a db lookup if the previous transaction + // is already scheduled for inclusion in this block. + let nonce_target = self + .backend + .get_contract_nonce_at(&BlockId::Tag(BlockTag::Latest), &sender_address)? + .map(Nonce) + .unwrap_or_default(); // Defaults to Felt::ZERO if no nonce in db + + match nonce.cmp(&nonce_target) { + std::cmp::Ordering::Less => Err(MempoolError::StorageError(MadaraStorageError::InvalidNonce)), + std::cmp::Ordering::Equal => Ok(NonceInfo::ready(nonce, nonce_next)), + std::cmp::Ordering::Greater => nonce_prev_check(), + } + } + } } pub fn transaction_hash(tx: &Transaction) -> Felt { @@ -226,43 +311,83 @@ fn deployed_contract_address(tx: &Transaction) -> Option { impl MempoolProvider for Mempool { #[tracing::instrument(skip(self), fields(module = "Mempool"))] - fn accept_invoke_tx(&self, tx: BroadcastedInvokeTxn) -> Result, Error> { + fn tx_accept_invoke( + &self, + tx: BroadcastedInvokeTxn, + ) -> Result, MempoolError> { + let nonce_info = match &tx { + BroadcastedInvokeTxn::V1(ref tx) => self.retrieve_nonce_info(tx.sender_address, tx.nonce)?, + BroadcastedInvokeTxn::V3(ref tx) => self.retrieve_nonce_info(tx.sender_address, tx.nonce)?, + BroadcastedInvokeTxn::QueryV1(ref tx) => self.retrieve_nonce_info(tx.sender_address, tx.nonce)?, + BroadcastedInvokeTxn::QueryV3(ref tx) => self.retrieve_nonce_info(tx.sender_address, tx.nonce)?, + BroadcastedInvokeTxn::V0(_) | &BroadcastedInvokeTxn::QueryV0(_) => NonceInfo::default(), + }; + let tx = BroadcastedTxn::Invoke(tx); let (btx, class) = tx.into_blockifier(self.chain_id(), self.backend.chain_config().latest_protocol_version)?; let res = AddInvokeTransactionResult { transaction_hash: transaction_hash(&btx) }; - self.accept_tx(btx, class, ArrivedAtTimestamp::now())?; + self.accept_tx(btx, class, ArrivedAtTimestamp::now(), nonce_info)?; Ok(res) } #[tracing::instrument(skip(self), fields(module = "Mempool"))] - fn accept_declare_v0_tx(&self, tx: BroadcastedDeclareTransactionV0) -> Result, Error> { + fn tx_accept_declare_v0(&self, tx: BroadcastedDeclareTransactionV0) -> Result, MempoolError> { let (btx, class) = tx.into_blockifier(self.chain_id(), self.backend.chain_config().latest_protocol_version)?; let res = ClassAndTxnHash { transaction_hash: transaction_hash(&btx), class_hash: declare_class_hash(&btx).expect("Created transaction should be declare"), }; - self.accept_tx(btx, class, ArrivedAtTimestamp::now())?; + + self.accept_tx(btx, class, ArrivedAtTimestamp::now(), NonceInfo::default())?; Ok(res) } #[tracing::instrument(skip(self), fields(module = "Mempool"))] - fn accept_l1_handler_tx( + fn tx_accept_l1_handler( &self, tx: L1HandlerTransaction, paid_fees_on_l1: u128, - ) -> Result { + ) -> Result { + let nonce = Nonce(Felt::from(tx.nonce)); let (btx, class) = tx.into_blockifier(self.chain_id(), self.backend.chain_config().latest_protocol_version, paid_fees_on_l1)?; + // L1 Handler nonces represent the ordering of L1 transactions sent by + // the core L1 contract. In principle this is a bit strange, as there + // currently is only 1 core L1 contract, so all transactions should be + // ordered by default. Moreover, these transaction are infrequent, so + // the risk that two transactions are emitted at very short intervals + // seems unlikely. Still, who knows? + // + // INFO: L1 nonce are stored differently in the db because of this, which is + // why we do not use `retrieve_nonce_readiness`. + let nonce_next = nonce.try_increment()?; + let nonce_target = + self.backend.get_l1_messaging_nonce_latest()?.map(|nonce| nonce.try_increment()).unwrap_or(Ok(nonce))?; + let nonce_info = if nonce != nonce_target { + NonceInfo::pending(nonce, nonce_next) + } else { + NonceInfo::ready(nonce, nonce_next) + }; + let res = L1HandlerTransactionResult { transaction_hash: transaction_hash(&btx) }; - self.accept_tx(btx, class, ArrivedAtTimestamp::now())?; + self.accept_tx(btx, class, ArrivedAtTimestamp::now(), nonce_info)?; Ok(res) } #[tracing::instrument(skip(self), fields(module = "Mempool"))] - fn accept_declare_tx(&self, tx: BroadcastedDeclareTxn) -> Result, Error> { + fn tx_accept_declare(&self, tx: BroadcastedDeclareTxn) -> Result, MempoolError> { + let nonce_info = match &tx { + BroadcastedDeclareTxn::V1(ref tx) => self.retrieve_nonce_info(tx.sender_address, tx.nonce)?, + BroadcastedDeclareTxn::V2(ref tx) => self.retrieve_nonce_info(tx.sender_address, tx.nonce)?, + BroadcastedDeclareTxn::V3(ref tx) => self.retrieve_nonce_info(tx.sender_address, tx.nonce)?, + BroadcastedDeclareTxn::QueryV1(ref tx) => self.retrieve_nonce_info(tx.sender_address, tx.nonce)?, + BroadcastedDeclareTxn::QueryV2(ref tx) => self.retrieve_nonce_info(tx.sender_address, tx.nonce)?, + BroadcastedDeclareTxn::QueryV3(ref tx) => self.retrieve_nonce_info(tx.sender_address, tx.nonce)?, + }; + let tx = BroadcastedTxn::Declare(tx); let (btx, class) = tx.into_blockifier(self.chain_id(), self.backend.chain_config().latest_protocol_version)?; @@ -270,15 +395,15 @@ impl MempoolProvider for Mempool { transaction_hash: transaction_hash(&btx), class_hash: declare_class_hash(&btx).expect("Created transaction should be declare"), }; - self.accept_tx(btx, class, ArrivedAtTimestamp::now())?; + self.accept_tx(btx, class, ArrivedAtTimestamp::now(), nonce_info)?; Ok(res) } #[tracing::instrument(skip(self), fields(module = "Mempool"))] - fn accept_deploy_account_tx( + fn tx_accept_deploy_account( &self, tx: BroadcastedDeployAccountTxn, - ) -> Result, Error> { + ) -> Result, MempoolError> { let tx = BroadcastedTxn::DeployAccount(tx); let (btx, class) = tx.into_blockifier(self.chain_id(), self.backend.chain_config().latest_protocol_version)?; @@ -286,54 +411,112 @@ impl MempoolProvider for Mempool { transaction_hash: transaction_hash(&btx), contract_address: deployed_contract_address(&btx).expect("Created transaction should be deploy account"), }; - self.accept_tx(btx, class, ArrivedAtTimestamp::now())?; + self.accept_tx(btx, class, ArrivedAtTimestamp::now(), NonceInfo::default())?; Ok(res) } /// Warning: A lock is held while a user-supplied function (extend) is run - Callers should be careful #[tracing::instrument(skip(self, dest, n), fields(module = "Mempool"))] - fn take_txs_chunk + 'static>(&self, dest: &mut I, n: usize) { + fn txs_take_chunk(&self, dest: &mut VecDeque, n: usize) { let mut inner = self.inner.write().expect("Poisoned lock"); - inner.pop_next_chunk(dest, n) + let mut nonce_cache = self.nonce_cache.write().expect("Poisoned lock"); + + let from = dest.len(); + inner.pop_next_chunk(dest, n); + + for mempool_tx in dest.iter().skip(from) { + let contract_address = mempool_tx.contract_address().to_felt(); + let nonce_next = mempool_tx.nonce_next; + nonce_cache.insert(contract_address, nonce_next); + } } #[tracing::instrument(skip(self), fields(module = "Mempool"))] - fn take_tx(&self) -> Option { - let mut inner = self.inner.write().expect("Poisoned lock"); - inner.pop_next() + fn tx_take(&mut self) -> Option { + if let Some(mempool_tx) = self.inner.write().expect("Poisoned lock").pop_next() { + let contract_address = mempool_tx.contract_address().to_felt(); + let nonce_next = mempool_tx.nonce_next; + self.nonce_cache.write().expect("Poisoned lock").insert(contract_address, nonce_next); + + Some(mempool_tx) + } else { + None + } + } + + #[tracing::instrument(skip(self, contract_address), fields(module = "Mempool"))] + fn tx_mark_included(&self, contract_address: &Felt) { + let removed = self.nonce_cache.write().expect("Poisoned lock").remove(contract_address); + debug_assert!(removed.is_some()); } - /// Warning: A lock is taken while a user-supplied function (iterator stuff) is run - Callers should be careful - /// This is called by the block production after a batch of transaction is executed. - /// Mark the consumed txs as consumed, and re-add the transactions that are not consumed in the mempool. + /// Warning: A lock is taken while a user-supplied function (iterator stuff) + /// is run - Callers should be careful This is called by the block + /// production after a batch of transaction is executed. Mark the consumed + /// txs as consumed, and re-add the transactions that are not consumed in + /// the mempool. #[tracing::instrument(skip(self, txs, consumed_txs), fields(module = "Mempool"))] - fn re_add_txs>( + fn txs_re_add( &self, - txs: I, + txs: VecDeque, consumed_txs: Vec, - ) -> Result<(), Error> { + ) -> Result<(), MempoolError> { let mut inner = self.inner.write().expect("Poisoned lock"); + let mut nonce_cache = self.nonce_cache.write().expect("Poisoned lock"); + + for tx in txs.iter() { + // Nonce cache is invalidated upon re-insertion into the mempool + // as it is currently possible for these transactions to be + // removed if their age exceeds the limit. In the future, we + // might want to update this if we make it so only pending + // transactions can be removed this way. + let contract_address = **tx.contract_address(); + let removed = nonce_cache.remove(&contract_address); + debug_assert!(removed.is_some()); + } + let hashes = consumed_txs.iter().map(|tx| tx.tx_hash()).collect::>(); inner.re_add_txs(txs, consumed_txs); drop(inner); + for tx_hash in hashes { self.backend.remove_mempool_transaction(&tx_hash.to_felt())?; } + Ok(()) } + /// This is called by the block production task to re-add transaction from + /// the pending block back into the mempool #[tracing::instrument(skip(self, txs), fields(module = "Mempool"))] - fn insert_txs_no_validation(&self, txs: Vec, force: bool) -> Result<(), Error> { + fn txs_insert_no_validation(&self, txs: Vec, force: bool) -> Result<(), MempoolError> { + let mut nonce_cache = self.nonce_cache.write().expect("Poisoned lock"); + for tx in &txs { + // Theoretically we should not have to invalidate the nonce cache + // here as this function should ONLY be called when adding the + // pending block back into the mempool from the db. However I am + // afraid someone will end up using this incorrectly so I am adding + // this here. + nonce_cache.remove(&tx.contract_address()); + + // Save to db. Transactions are marked as ready since they were + // already previously included into the pending block + let nonce_info = NonceInfo::ready(tx.nonce, tx.nonce_next); let saved_tx = blockifier_to_saved_tx(&tx.tx, tx.arrived_at); - // save to db - self.backend.save_mempool_transaction(&saved_tx, tx.tx_hash().to_felt(), &tx.converted_class)?; + self.backend.save_mempool_transaction( + &saved_tx, + tx.tx_hash().to_felt(), + &tx.converted_class, + &nonce_info, + )?; } + let mut inner = self.inner.write().expect("Poisoned lock"); inner.insert_txs(txs, force)?; + Ok(()) } - fn chain_id(&self) -> Felt { Felt::from_bytes_be_slice(format!("{}", self.backend.chain_config().chain_id).as_bytes()) } @@ -415,6 +598,12 @@ pub(crate) fn clone_transaction(tx: &Transaction) -> Transaction { #[cfg(test)] mod test { + use std::time::Duration; + + use mc_db::mempool_db::NonceStatus; + use mp_block::{MadaraBlockInfo, MadaraBlockInner, MadaraMaybePendingBlock, MadaraMaybePendingBlockInfo}; + use mp_state_update::{NonceUpdate, StateDiff}; + use super::*; #[rstest::fixture] @@ -437,15 +626,20 @@ mod test { } #[rstest::fixture] - fn tx_account_v0_valid() -> blockifier::transaction::transaction_execution::Transaction { + fn tx_account_v0_valid( + #[default(Felt::ZERO)] contract_address: Felt, + ) -> blockifier::transaction::transaction_execution::Transaction { blockifier::transaction::transaction_execution::Transaction::AccountTransaction( blockifier::transaction::account_transaction::AccountTransaction::Invoke( blockifier::transaction::transactions::InvokeTransaction { tx: starknet_api::transaction::InvokeTransaction::V0( - starknet_api::transaction::InvokeTransactionV0::default(), + starknet_api::transaction::InvokeTransactionV0 { + contract_address: ContractAddress::try_from(contract_address).unwrap(), + ..Default::default() + }, ), - tx_hash: starknet_api::transaction::TransactionHash(Felt::default()), - only_query: true, + tx_hash: starknet_api::transaction::TransactionHash::default(), + only_query: false, }, ), ) @@ -459,32 +653,1315 @@ mod test { tx: starknet_api::transaction::InvokeTransaction::V1( starknet_api::transaction::InvokeTransactionV1::default(), ), - tx_hash: starknet_api::transaction::TransactionHash(Felt::default()), + tx_hash: starknet_api::transaction::TransactionHash::default(), only_query: true, }, ), ) } + #[rstest::fixture] + fn tx_deploy_v1_valid( + #[default(Felt::ZERO)] contract_address: Felt, + ) -> blockifier::transaction::transaction_execution::Transaction { + blockifier::transaction::transaction_execution::Transaction::AccountTransaction( + blockifier::transaction::account_transaction::AccountTransaction::DeployAccount( + blockifier::transaction::transactions::DeployAccountTransaction { + tx: starknet_api::transaction::DeployAccountTransaction::V1( + starknet_api::transaction::DeployAccountTransactionV1::default(), + ), + tx_hash: starknet_api::transaction::TransactionHash::default(), + contract_address: ContractAddress::try_from(contract_address).unwrap(), + only_query: false, + }, + ), + ) + } + + #[rstest::fixture] + fn tx_l1_handler_valid( + #[default(Felt::ZERO)] contract_address: Felt, + ) -> blockifier::transaction::transaction_execution::Transaction { + blockifier::transaction::transaction_execution::Transaction::L1HandlerTransaction( + blockifier::transaction::transactions::L1HandlerTransaction { + tx: starknet_api::transaction::L1HandlerTransaction { + contract_address: ContractAddress::try_from(contract_address).unwrap(), + ..Default::default() + }, + tx_hash: starknet_api::transaction::TransactionHash::default(), + paid_fee_on_l1: starknet_api::transaction::Fee::default(), + }, + ) + } + #[rstest::rstest] + #[timeout(Duration::from_millis(1_000))] fn mempool_accept_tx_pass( backend: Arc, l1_data_provider: Arc, tx_account_v0_valid: blockifier::transaction::transaction_execution::Transaction, ) { let mempool = Mempool::new(backend, l1_data_provider, MempoolLimits::for_testing()); - let result = mempool.accept_tx(tx_account_v0_valid, None, ArrivedAtTimestamp::now()); + let result = mempool.accept_tx(tx_account_v0_valid, None, ArrivedAtTimestamp::now(), NonceInfo::default()); + assert_matches::assert_matches!(result, Ok(())); + + mempool.inner.read().expect("Poisoned lock").check_invariants(); + } + + /// This test checks if a ready transaction is indeed inserted into the + /// ready queue. + #[rstest::rstest] + #[timeout(Duration::from_millis(1_000))] + fn mempool_accept_tx_ready( + backend: Arc, + l1_data_provider: Arc, + tx_account_v0_valid: blockifier::transaction::transaction_execution::Transaction, + ) { + let sender_address = Felt::ZERO; + let nonce = Nonce(Felt::ZERO); + + let mempool = Mempool::new(backend, l1_data_provider, MempoolLimits::for_testing()); + let arrived_at = ArrivedAtTimestamp::now(); + let result = mempool.accept_tx(tx_account_v0_valid, None, arrived_at, NonceInfo::default()); assert_matches::assert_matches!(result, Ok(())); + + let inner = mempool.inner.read().expect("Poisoned lock"); + inner.check_invariants(); + + assert!(inner.tx_intent_queue_ready.contains(&TransactionIntentReady { + contract_address: Felt::ZERO, + timestamp: arrived_at, + nonce: Nonce(Felt::ZERO), + nonce_next: Nonce(Felt::ONE), + phantom: std::marker::PhantomData, + })); + + assert!(inner.nonce_is_ready(sender_address, nonce)); } #[rstest::rstest] + #[timeout(Duration::from_millis(1_000))] fn mempool_accept_tx_fail_validate( backend: Arc, l1_data_provider: Arc, tx_account_v1_invalid: blockifier::transaction::transaction_execution::Transaction, ) { let mempool = Mempool::new(backend, l1_data_provider, MempoolLimits::for_testing()); - let result = mempool.accept_tx(tx_account_v1_invalid, None, ArrivedAtTimestamp::now()); - assert_matches::assert_matches!(result, Err(crate::Error::Validation(_))); + let result = mempool.accept_tx(tx_account_v1_invalid, None, ArrivedAtTimestamp::now(), NonceInfo::default()); + assert_matches::assert_matches!(result, Err(crate::MempoolError::Validation(_))); + + mempool.inner.read().expect("Poisoned lock").check_invariants(); + } + + /// This test makes sure that taking a transaction from the mempool works as + /// intended. + #[rstest::rstest] + #[timeout(Duration::from_millis(1_000))] + fn mempool_take_tx_pass( + backend: Arc, + l1_data_provider: Arc, + tx_account_v0_valid: blockifier::transaction::transaction_execution::Transaction, + ) { + let mut mempool = Mempool::new(backend, l1_data_provider, MempoolLimits::for_testing()); + let timestamp = ArrivedAtTimestamp::now(); + let result = mempool.accept_tx(tx_account_v0_valid, None, timestamp, NonceInfo::default()); + assert_matches::assert_matches!(result, Ok(())); + + let mempool_tx = mempool.tx_take().expect("Mempool should contain a transaction"); + assert_eq!(mempool_tx.arrived_at, timestamp); + + assert!(mempool.tx_take().is_none(), "It should not be possible to take a transaction from an empty mempool"); + + mempool.inner.read().expect("Poisoned lock").check_invariants(); + } + + /// This test makes sure that all deploy account transactions inserted into + /// [MempoolInner] are accounted for. Replacements are not taken into + /// account. + #[rstest::rstest] + #[timeout(Duration::from_millis(1_000))] + fn mempool_deploy_count( + backend: Arc, + l1_data_provider: Arc, + tx_deploy_v1_valid: blockifier::transaction::transaction_execution::Transaction, + ) { + let mempool = Mempool::new(backend, l1_data_provider, MempoolLimits::for_testing()); + + let nonce_info = NonceInfo::ready(Nonce(Felt::ZERO), Nonce(Felt::ONE)); + let mempool_tx = MempoolTransaction { + tx: tx_deploy_v1_valid, + arrived_at: ArrivedAtTimestamp::now(), + converted_class: None, + nonce: nonce_info.nonce, + nonce_next: nonce_info.nonce_next, + }; + let contract_address = mempool_tx.contract_address(); + + let force = true; + let update_limits = true; + let result = + mempool.inner.write().expect("Poisoned lock").insert_tx(mempool_tx, force, update_limits, nonce_info); + assert_matches::assert_matches!(result, Ok(())); + + let inner = mempool.inner.read().expect("Poisoned lock"); + assert!(inner.deployed_contracts.contains(&contract_address)); + inner.check_invariants(); + } + + /// This test makes sure that all deploy account transactions inserted into + /// [MempoolInner] are accounted for, even after a non-deploy transaction + /// has been replaced. + /// + /// > This bug was originally detected through proptesting. + #[rstest::rstest] + #[timeout(Duration::from_millis(1_000))] + fn mempool_deploy_replace( + backend: Arc, + l1_data_provider: Arc, + tx_account_v0_valid: blockifier::transaction::transaction_execution::Transaction, + tx_deploy_v1_valid: blockifier::transaction::transaction_execution::Transaction, + ) { + let mempool = Mempool::new(backend, l1_data_provider, MempoolLimits::for_testing()); + + let nonce_info = NonceInfo::ready(Nonce(Felt::ZERO), Nonce(Felt::ONE)); + let mempool_tx = MempoolTransaction { + tx: tx_account_v0_valid, + arrived_at: ArrivedAtTimestamp::now(), + converted_class: None, + nonce: nonce_info.nonce, + nonce_next: nonce_info.nonce_next, + }; + let contract_address = mempool_tx.contract_address(); + + // We hack our way into the inner mempool to avoid having to generate a + // valid deploy transaction since the outer mempool checks this + let force = false; + let update_limits = true; + let result = + mempool.inner.write().expect("Poisoned lock").insert_tx(mempool_tx, force, update_limits, nonce_info); + assert_matches::assert_matches!(result, Ok(())); + + // We insert a first non-deploy tx. This should not update the count of + // deploy transactions. + let inner = mempool.inner.read().expect("Poisoned lock"); + assert!(!inner.deployed_contracts.contains(&contract_address)); + inner.check_invariants(); + drop(inner); + + let nonce_info = NonceInfo::ready(Nonce(Felt::ZERO), Nonce(Felt::ONE)); + let mempool_tx = MempoolTransaction { + tx: tx_deploy_v1_valid, + arrived_at: ArrivedAtTimestamp::now(), + converted_class: None, + nonce: nonce_info.nonce, + nonce_next: nonce_info.nonce_next, + }; + let contract_address = mempool_tx.contract_address(); + + // Now we replace the previous transaction with a deploy account tx + let force = true; + let update_limits = true; + let result = + mempool.inner.write().expect("Poisoned lock").insert_tx(mempool_tx, force, update_limits, nonce_info); + assert_matches::assert_matches!(result, Ok(())); + + // This should have updated the count of deploy transactions. + let inner = mempool.inner.read().expect("Poisoned lock"); + assert!(inner.deployed_contracts.contains(&contract_address)); + inner.check_invariants(); + } + + /// This test makes sure that replacing a deploy account transaction with a + /// non-deploy account transaction reduces the deploy transaction count. + /// + /// > This bug was originally detected through proptesting + #[rstest::rstest] + #[timeout(Duration::from_millis(1_000))] + fn mempool_deploy_replace2( + backend: Arc, + l1_data_provider: Arc, + tx_deploy_v1_valid: blockifier::transaction::transaction_execution::Transaction, + tx_account_v0_valid: blockifier::transaction::transaction_execution::Transaction, + ) { + let mempool = Mempool::new(backend, l1_data_provider, MempoolLimits::for_testing()); + + // First, we insert the deploy account transaction + let nonce_info = NonceInfo::ready(Nonce(Felt::ZERO), Nonce(Felt::ONE)); + let mempool_tx = MempoolTransaction { + tx: tx_deploy_v1_valid, + arrived_at: ArrivedAtTimestamp::now(), + converted_class: None, + nonce: nonce_info.nonce, + nonce_next: nonce_info.nonce_next, + }; + let contract_address = mempool_tx.contract_address(); + + let force = true; + let update_limits = true; + let result = + mempool.inner.write().expect("Poisoned lock").insert_tx(mempool_tx, force, update_limits, nonce_info); + assert_matches::assert_matches!(result, Ok(())); + + let inner = mempool.inner.read().expect("Poisoned lock"); + assert!(inner.deployed_contracts.contains(&contract_address)); + inner.check_invariants(); + drop(inner); + + // Now we replace the previous transaction with a non-deploy account tx + let nonce_info = NonceInfo::ready(Nonce(Felt::ZERO), Nonce(Felt::ONE)); + let mempool_tx = MempoolTransaction { + tx: tx_account_v0_valid, + arrived_at: ArrivedAtTimestamp::now(), + converted_class: None, + nonce: nonce_info.nonce, + nonce_next: nonce_info.nonce_next, + }; + let contract_address = mempool_tx.contract_address(); + + let force = true; + let update_limits = true; + let result = + mempool.inner.write().expect("Poisoned lock").insert_tx(mempool_tx, force, update_limits, nonce_info); + assert_matches::assert_matches!(result, Ok(())); + + // The deploy transaction count at that address should be 0 + let inner = mempool.inner.read().expect("Poisoned lock"); + assert!(!inner.deployed_contracts.contains(&contract_address)); + inner.check_invariants(); + } + + /// This tests makes sure that when deploy account transactions are removed + /// because their age exceeds the allowed limit, then the deploy transaction + /// count is also updated. + /// + /// > This bug was originally detected through proptesting + #[rstest::rstest] + #[timeout(Duration::from_millis(1_000))] + fn mempool_deploy_remove_age_exceeded( + backend: Arc, + l1_data_provider: Arc, + tx_deploy_v1_valid: blockifier::transaction::transaction_execution::Transaction, + ) { + let mempool = Mempool::new( + backend, + l1_data_provider, + MempoolLimits { max_age: Some(Duration::from_secs(3_600)), ..MempoolLimits::for_testing() }, + ); + + // First, we insert the deploy account transaction + let nonce_info = NonceInfo::ready(Nonce(Felt::ZERO), Nonce(Felt::ONE)); + let mempool_tx = MempoolTransaction { + tx: tx_deploy_v1_valid, + arrived_at: ArrivedAtTimestamp::UNIX_EPOCH, + converted_class: None, + nonce: nonce_info.nonce, + nonce_next: nonce_info.nonce_next, + }; + let contract_address = mempool_tx.contract_address(); + + let force = true; + let update_limits = true; + let result = + mempool.inner.write().expect("Poisoned lock").insert_tx(mempool_tx, force, update_limits, nonce_info); + assert_matches::assert_matches!(result, Ok(())); + + let inner = mempool.inner.read().expect("Poisoned lock"); + assert!(inner.deployed_contracts.contains(&contract_address)); + inner.check_invariants(); + drop(inner); + + // Next, we manually call `remove_age_exceeded_txs` + mempool.inner.write().expect("Poisoned lock").remove_age_exceeded_txs(); + + // This should have removed the deploy contract transaction and updated + // the count at that address + let inner = mempool.inner.read().expect("Poisoned lock"); + assert!(!inner.deployed_contracts.contains(&contract_address)); + inner.check_invariants(); + } + + /// This test makes sure that old transactions are removed from the + /// [mempool], whether they be represented by ready or pending intents. + /// + /// # Setup: + /// + /// - We assume `tx_*_n `are from the same contract but with increasing + /// nonces. + /// + /// - `tx_new` are transactions which _should not_ be removed from the + /// mempool, `tx_old` are transactions which _should_ be removed from the + /// mempool + /// + /// - `tx_new_3`, `tx_old_3` and `tx_new_2_bis` and `tx_old_4` are in the + /// pending queue, all other transactions are in the ready queue. + /// + /// [mempool]: inner::MempoolInner + #[rstest::rstest] + #[timeout(Duration::from_millis(1_000))] + #[allow(clippy::too_many_arguments)] + fn mempool_remove_aged_tx_pass( + backend: Arc, + l1_data_provider: Arc, + #[from(tx_account_v0_valid)] // We are reusing the tx_account_v0_valid fixture... + #[with(Felt::ZERO)] // ...with different arguments + tx_new_1: blockifier::transaction::transaction_execution::Transaction, + #[from(tx_account_v0_valid)] + #[with(Felt::ONE)] + tx_new_2: blockifier::transaction::transaction_execution::Transaction, + #[from(tx_account_v0_valid)] + #[with(Felt::TWO)] + tx_new_3: blockifier::transaction::transaction_execution::Transaction, + #[from(tx_account_v0_valid)] + #[with(Felt::ZERO)] + tx_old_1: blockifier::transaction::transaction_execution::Transaction, + #[from(tx_account_v0_valid)] + #[with(Felt::ONE)] + tx_old_2: blockifier::transaction::transaction_execution::Transaction, + #[from(tx_account_v0_valid)] + #[with(Felt::TWO)] + tx_old_3: blockifier::transaction::transaction_execution::Transaction, + #[from(tx_account_v0_valid)] + #[with(Felt::THREE)] + tx_old_4: blockifier::transaction::transaction_execution::Transaction, + ) { + let mempool = Mempool::new( + backend, + l1_data_provider, + MempoolLimits { max_age: Some(Duration::from_secs(3600)), ..MempoolLimits::for_testing() }, + ); + + // ================================================================== // + // STEP 1 // + // ================================================================== // + + // First, we begin by inserting all our transactions and making sure + // they are in the ready as well as the pending intent queues. + let arrived_at = ArrivedAtTimestamp::now(); + let tx_new_1_mempool = MempoolTransaction { + tx: tx_new_1, + arrived_at, + converted_class: None, + nonce: Nonce(Felt::ZERO), + nonce_next: Nonce(Felt::ONE), + }; + let res = mempool.inner.write().expect("Poisoned lock").insert_tx( + tx_new_1_mempool.clone(), + true, + false, + NonceInfo::ready(Nonce(Felt::ZERO), Nonce(Felt::ONE)), + ); + assert!(res.is_ok()); + assert!( + mempool.inner.read().expect("Poisoned lock").tx_intent_queue_ready.contains(&TransactionIntentReady { + contract_address: **tx_new_1_mempool.contract_address(), + timestamp: tx_new_1_mempool.arrived_at, + nonce: tx_new_1_mempool.nonce, + nonce_next: tx_new_1_mempool.nonce_next, + phantom: std::marker::PhantomData + }), + "ready transaction intents are: {:#?}\npending transaction intents are: {:#?}", + mempool.inner.read().expect("Poisoned lock").tx_intent_queue_ready, + mempool.inner.read().expect("Poisoned lock").tx_intent_queue_pending_by_nonce + ); + + let arrived_at = ArrivedAtTimestamp::now(); + let tx_new_2_mempool = MempoolTransaction { + tx: tx_new_2, + arrived_at, + converted_class: None, + nonce: Nonce(Felt::ZERO), + nonce_next: Nonce(Felt::ONE), + }; + let res = mempool.inner.write().expect("Poisoned lock").insert_tx( + tx_new_2_mempool.clone(), + true, + false, + NonceInfo::ready(Nonce(Felt::ZERO), Nonce(Felt::ONE)), + ); + assert!(res.is_ok()); + assert!( + mempool.inner.read().expect("Poisoned lock").tx_intent_queue_ready.contains(&TransactionIntentReady { + contract_address: **tx_new_2_mempool.contract_address(), + timestamp: tx_new_2_mempool.arrived_at, + nonce: tx_new_2_mempool.nonce, + nonce_next: tx_new_2_mempool.nonce_next, + phantom: std::marker::PhantomData + }), + "ready transaction intents are: {:#?}\npending transaction intents are: {:#?}", + mempool.inner.read().expect("Poisoned lock").tx_intent_queue_ready, + mempool.inner.read().expect("Poisoned lock").tx_intent_queue_pending_by_nonce + ); + + let arrived_at = ArrivedAtTimestamp::now(); + let tx_new_3_mempool = MempoolTransaction { + tx: tx_new_3, + arrived_at, + converted_class: None, + nonce: Nonce(Felt::ONE), + nonce_next: Nonce(Felt::TWO), + }; + let res = mempool.inner.write().expect("Poisoned lock").insert_tx( + tx_new_3_mempool.clone(), + true, + false, + NonceInfo::pending(Nonce(Felt::ONE), Nonce(Felt::TWO)), + ); + assert!(res.is_ok()); + assert!( + mempool + .inner + .read() + .expect("Poisoned lock") + .tx_intent_queue_pending_by_nonce + .get(&**tx_new_3_mempool.contract_address()) + .expect("Missing nonce mapping for tx_new_3") + .contains_key(&TransactionIntentPendingByNonce { + contract_address: **tx_new_3_mempool.contract_address(), + timestamp: tx_new_3_mempool.arrived_at, + nonce: tx_new_3_mempool.nonce, + nonce_next: tx_new_3_mempool.nonce_next, + phantom: std::marker::PhantomData + }), + "ready transaction intents are: {:#?}\npending transaction intents are: {:#?}", + mempool.inner.read().expect("Poisoned lock").tx_intent_queue_ready, + mempool.inner.read().expect("Poisoned lock").tx_intent_queue_pending_by_nonce + ); + + let arrived_at = ArrivedAtTimestamp::UNIX_EPOCH; + let tx_old_1_mempool = MempoolTransaction { + tx: tx_old_1, + arrived_at, + converted_class: None, + nonce: Nonce(Felt::ONE), + nonce_next: Nonce(Felt::TWO), + }; + let res = mempool.inner.write().expect("Poisoned lock").insert_tx( + tx_old_1_mempool.clone(), + true, + false, + NonceInfo::ready(Nonce(Felt::ONE), Nonce(Felt::TWO)), + ); + assert!(res.is_ok()); + assert!( + mempool.inner.read().expect("Poisoned lock").tx_intent_queue_ready.contains(&TransactionIntentReady { + contract_address: **tx_old_1_mempool.contract_address(), + timestamp: tx_old_1_mempool.arrived_at, + nonce: tx_old_1_mempool.nonce, + nonce_next: tx_old_1_mempool.nonce_next, + phantom: std::marker::PhantomData + }), + "ready transaction intents are: {:#?}\npending transaction intents are: {:#?}", + mempool.inner.read().expect("Poisoned lock").tx_intent_queue_ready, + mempool.inner.read().expect("Poisoned lock").tx_intent_queue_pending_by_nonce + ); + + let arrived_at = ArrivedAtTimestamp::UNIX_EPOCH; + let tx_old_2_mempool = MempoolTransaction { + tx: tx_old_2, + arrived_at, + converted_class: None, + nonce: Nonce(Felt::ONE), + nonce_next: Nonce(Felt::TWO), + }; + let res = mempool.inner.write().expect("Poisoned lock").insert_tx( + tx_old_2_mempool.clone(), + true, + false, + NonceInfo::ready(Nonce(Felt::ONE), Nonce(Felt::TWO)), + ); + assert!(res.is_ok()); + assert!( + mempool.inner.read().expect("Poisoned lock").tx_intent_queue_ready.contains(&TransactionIntentReady { + contract_address: **tx_old_2_mempool.contract_address(), + timestamp: tx_old_2_mempool.arrived_at, + nonce: tx_old_2_mempool.nonce, + nonce_next: tx_old_2_mempool.nonce_next, + phantom: std::marker::PhantomData + }), + "ready transaction intents are: {:#?}\npending transaction intents are: {:#?}", + mempool.inner.read().expect("Poisoned lock").tx_intent_queue_ready, + mempool.inner.read().expect("Poisoned lock").tx_intent_queue_pending_by_nonce + ); + + let arrived_at = ArrivedAtTimestamp::UNIX_EPOCH; + let tx_old_3_mempool = MempoolTransaction { + tx: tx_old_3, + arrived_at, + converted_class: None, + nonce: Nonce(Felt::TWO), + nonce_next: Nonce(Felt::THREE), + }; + let res = mempool.inner.write().expect("Poisoned lock").insert_tx( + tx_old_3_mempool.clone(), + true, + false, + NonceInfo::pending(Nonce(Felt::TWO), Nonce(Felt::THREE)), + ); + assert!(res.is_ok()); + assert!( + mempool + .inner + .read() + .expect("Poisoned lock") + .tx_intent_queue_pending_by_nonce + .get(&**tx_old_3_mempool.contract_address()) + .expect("Missing nonce mapping for tx_old_3") + .contains_key(&TransactionIntentPendingByNonce { + contract_address: **tx_old_3_mempool.contract_address(), + timestamp: tx_old_3_mempool.arrived_at, + nonce: tx_old_3_mempool.nonce, + nonce_next: tx_old_3_mempool.nonce_next, + phantom: std::marker::PhantomData + }), + "ready transaction intents are: {:#?}\npending transaction intents are: {:#?}", + mempool.inner.read().expect("Poisoned lock").tx_intent_queue_ready, + mempool.inner.read().expect("Poisoned lock").tx_intent_queue_pending_by_nonce + ); + + let arrived_at = ArrivedAtTimestamp::UNIX_EPOCH; + let tx_old_4_mempool = MempoolTransaction { + tx: tx_old_4, + arrived_at, + converted_class: None, + nonce: Nonce(Felt::ONE), + nonce_next: Nonce(Felt::TWO), + }; + let res = mempool.inner.write().expect("Poisoned lock").insert_tx( + tx_old_4_mempool.clone(), + true, + false, + NonceInfo::pending(Nonce(Felt::ONE), Nonce(Felt::TWO)), + ); + assert!(res.is_ok()); + assert!( + mempool + .inner + .read() + .expect("Poisoned lock") + .tx_intent_queue_pending_by_nonce + .get(&**tx_old_4_mempool.contract_address()) + .expect("Missing nonce_mapping for tx_old_4") + .contains_key(&TransactionIntentPendingByNonce { + contract_address: **tx_old_4_mempool.contract_address(), + timestamp: tx_old_4_mempool.arrived_at, + nonce: tx_old_4_mempool.nonce, + nonce_next: tx_old_4_mempool.nonce_next, + phantom: std::marker::PhantomData + }), + "ready transaction intents are: {:#?}\npending transaction intents are: {:#?}", + mempool.inner.read().expect("Poisoned lock").tx_intent_queue_ready, + mempool.inner.read().expect("Poisoned lock").tx_intent_queue_pending_by_nonce + ); + + // Make sure we have not entered an invalid state. + mempool.inner.read().expect("Poisoned lock").check_invariants(); + + // ================================================================== // + // STEP 2 // + // ================================================================== // + + // Now we actually remove the transactions. All the old transactions + // should be removed. + mempool.inner.write().expect("Poisoned lock").remove_age_exceeded_txs(); + + // tx_new_1 and tx_new_2 should still be in the mempool + assert!( + mempool.inner.read().expect("Poisoned lock").tx_intent_queue_ready.contains(&TransactionIntentReady { + contract_address: **tx_new_1_mempool.contract_address(), + timestamp: tx_new_1_mempool.arrived_at, + nonce: tx_new_1_mempool.nonce, + nonce_next: tx_new_1_mempool.nonce_next, + phantom: std::marker::PhantomData + }), + "ready transaction intents are: {:#?}\npending transaction intents are: {:#?}", + mempool.inner.read().expect("Poisoned lock").tx_intent_queue_ready, + mempool.inner.read().expect("Poisoned lock").tx_intent_queue_pending_by_nonce + ); + assert!( + mempool.inner.read().expect("Poisoned lock").tx_intent_queue_ready.contains(&TransactionIntentReady { + contract_address: **tx_new_2_mempool.contract_address(), + timestamp: tx_new_2_mempool.arrived_at, + nonce: tx_new_2_mempool.nonce, + nonce_next: tx_new_2_mempool.nonce_next, + phantom: std::marker::PhantomData + }), + "ready transaction intents are: {:#?}\npending transaction intents are: {:#?}", + mempool.inner.read().expect("Poisoned lock").tx_intent_queue_ready, + mempool.inner.read().expect("Poisoned lock").tx_intent_queue_pending_by_nonce + ); + + // tx_old_1 and tx_old_2 should no longer be in the ready queue + assert!( + !mempool.inner.read().expect("Poisoned lock").tx_intent_queue_ready.contains(&TransactionIntentReady { + contract_address: **tx_old_1_mempool.contract_address(), + timestamp: tx_old_1_mempool.arrived_at, + nonce: tx_old_1_mempool.nonce, + nonce_next: tx_old_1_mempool.nonce_next, + phantom: std::marker::PhantomData + }), + "ready transaction intents are: {:#?}\npending transaction intents are: {:#?}", + mempool.inner.read().expect("Poisoned lock").tx_intent_queue_ready, + mempool.inner.read().expect("Poisoned lock").tx_intent_queue_pending_by_nonce + ); + assert!( + !mempool.inner.read().expect("Poisoned lock").tx_intent_queue_ready.contains(&TransactionIntentReady { + contract_address: **tx_old_2_mempool.contract_address(), + timestamp: tx_old_2_mempool.arrived_at, + nonce: tx_old_2_mempool.nonce, + nonce_next: tx_old_2_mempool.nonce_next, + phantom: std::marker::PhantomData + }), + "ready transaction intents are: {:#?}\npending transaction intents are: {:#?}", + mempool.inner.read().expect("Poisoned lock").tx_intent_queue_ready, + mempool.inner.read().expect("Poisoned lock").tx_intent_queue_pending_by_nonce + ); + + // tx_new_3 should still be in the pending queue but tx_old_3 should not + assert!( + mempool + .inner + .read() + .expect("Poisoned lock") + .tx_intent_queue_pending_by_nonce + .get(&**tx_new_3_mempool.contract_address()) + .expect("Missing nonce mapping for tx_new_3") + .contains_key(&TransactionIntentPendingByNonce { + contract_address: **tx_new_3_mempool.contract_address(), + timestamp: tx_new_3_mempool.arrived_at, + nonce: tx_new_3_mempool.nonce, + nonce_next: tx_new_3_mempool.nonce_next, + phantom: std::marker::PhantomData + }), + "ready transaction intents are: {:#?}\npending transaction intents are: {:#?}", + mempool.inner.read().expect("Poisoned lock").tx_intent_queue_ready, + mempool.inner.read().expect("Poisoned lock").tx_intent_queue_pending_by_nonce + ); + assert!( + !mempool + .inner + .read() + .expect("Poisoned lock") + .tx_intent_queue_pending_by_nonce + .get(&**tx_old_3_mempool.contract_address()) + .expect("Missing nonce mapping for tx_old_3") + .contains_key(&TransactionIntentPendingByNonce { + contract_address: **tx_old_3_mempool.contract_address(), + timestamp: tx_old_3_mempool.arrived_at, + nonce: tx_old_3_mempool.nonce, + nonce_next: tx_old_3_mempool.nonce_next, + phantom: std::marker::PhantomData + }), + "ready transaction intents are: {:#?}\npending transaction intents are: {:#?}", + mempool.inner.read().expect("Poisoned lock").tx_intent_queue_ready, + mempool.inner.read().expect("Poisoned lock").tx_intent_queue_pending_by_nonce + ); + + // tx_old_4 should no longer be in the pending queue and since it was + // the only transaction for that contract address, that pending queue + // should have been emptied + assert!( + !mempool + .inner + .read() + .expect("Poisoned lock") + .tx_intent_queue_pending_by_nonce + .contains_key(&**tx_old_4_mempool.contract_address()), + "ready transaction intents are: {:#?}\npending transaction intents are: {:#?}", + mempool.inner.read().expect("Poisoned lock").tx_intent_queue_ready, + mempool.inner.read().expect("Poisoned lock").tx_intent_queue_pending_by_nonce + ); + + // Make sure we have not entered an invalid state. + mempool.inner.read().expect("Poisoned lock").check_invariants(); + } + + /// This test makes sure that adding an l1 handler to [MempoolInner] does + /// not cause an infinite loop when trying to remove age exceeded + /// transactions. + /// + /// This is important as age is not checked on l1 handlers, and so if they + /// are not handled properly they cam cause an infinite loop. + /// + /// > This bug was originally detected through proptesting. + #[rstest::rstest] + #[timeout(Duration::from_millis(1_000))] + fn mempool_remove_aged_tx_pass_l1_handler( + backend: Arc, + l1_data_provider: Arc, + tx_l1_handler_valid: blockifier::transaction::transaction_execution::Transaction, + ) { + let mempool = Mempool::new(backend, l1_data_provider, MempoolLimits::for_testing()); + + let nonce_info = NonceInfo::ready(Nonce(Felt::ZERO), Nonce(Felt::ONE)); + let mempool_tx = MempoolTransaction { + tx: tx_l1_handler_valid, + arrived_at: ArrivedAtTimestamp::now(), + converted_class: None, + nonce: nonce_info.nonce, + nonce_next: nonce_info.nonce_next, + }; + + let force = false; + let update_limits = true; + let result = + mempool.inner.write().expect("Poisoned lock").insert_tx(mempool_tx, force, update_limits, nonce_info); + assert_matches::assert_matches!(result, Ok(())); + + let inner = mempool.inner.read().expect("Poisoned lock"); + assert!(inner.nonce_is_ready(Felt::ZERO, Nonce(Felt::ZERO))); + inner.check_invariants(); + drop(inner); + + // This should not loop! + mempool.inner.write().expect("Poisoned lock").remove_age_exceeded_txs(); + let inner = mempool.inner.read().expect("Poisoned lock"); + assert!(inner.nonce_is_ready(Felt::ZERO, Nonce(Felt::ZERO))); + inner.check_invariants(); + } + + /// This tests makes sure that if a transaction is inserted as [pending], + /// and the transaction before it is polled, then that transaction becomes + /// [ready]. + /// + /// # Setup: + /// + /// - We assume `tx_ready` and `tx_pending` are from the same contract. + /// + /// - `tx_ready` has the correct nonce while `tx_pending` has the nonce + /// right after that. + /// + /// [ready]: inner::TransactionIntentReady; + /// [pending]: inner::TransactionInentPending; + #[rstest::rstest] + #[timeout(Duration::from_millis(1_000))] + fn mempool_readiness_check( + backend: Arc, + l1_data_provider: Arc, + #[from(tx_account_v0_valid)] tx_ready: blockifier::transaction::transaction_execution::Transaction, + #[from(tx_account_v0_valid)] tx_pending: blockifier::transaction::transaction_execution::Transaction, + ) { + let mut mempool = Mempool::new(backend, l1_data_provider, MempoolLimits::for_testing()); + + // Insert pending transaction + + let nonce_info = NonceInfo::pending(Nonce(Felt::ONE), Nonce(Felt::TWO)); + let timestamp_pending = ArrivedAtTimestamp::now(); + let result = mempool.accept_tx(tx_pending, None, timestamp_pending, nonce_info); + assert_matches::assert_matches!(result, Ok(())); + + let inner = mempool.inner.read().expect("Poisoned lock"); + inner.check_invariants(); + inner + .tx_intent_queue_pending_by_nonce + .get(&Felt::ZERO) + .expect("Mempool should have a pending queue for contract address ZERO") + .get(&TransactionIntentPendingByNonce { + contract_address: Felt::ZERO, + timestamp: timestamp_pending, + nonce: Nonce(Felt::ONE), + nonce_next: Nonce(Felt::TWO), + phantom: Default::default(), + }) + .expect("Mempool should contain pending transaction"); + + assert_eq!(inner.tx_intent_queue_pending_by_nonce.len(), 1); + + drop(inner); + + // Insert ready transaction + + let nonce_info = NonceInfo::ready(Nonce(Felt::ZERO), Nonce(Felt::ONE)); + let timestamp_ready = ArrivedAtTimestamp::now(); + let result = mempool.accept_tx(tx_ready, None, timestamp_ready, nonce_info); + assert_matches::assert_matches!(result, Ok(())); + + let inner = mempool.inner.read().expect("Poisoned lock"); + inner.check_invariants(); + inner + .tx_intent_queue_ready + .get(&TransactionIntentReady { + contract_address: Felt::ZERO, + timestamp: timestamp_ready, + nonce: Nonce(Felt::ZERO), + nonce_next: Nonce(Felt::ONE), + phantom: Default::default(), + }) + .expect("Mempool should receive ready transaction"); + + assert_eq!(inner.tx_intent_queue_ready.len(), 1); + + drop(inner); + + // Take the next transaction. The pending transaction was received first + // but this should return the ready transaction instead. + + let mempool_tx = mempool.tx_take().expect("Mempool contains a ready transaction!"); + + let inner = mempool.inner.read().expect("Poisoned lock"); + inner.check_invariants(); + inner + .tx_intent_queue_ready + .get(&TransactionIntentReady { + contract_address: Felt::ZERO, + timestamp: timestamp_pending, + nonce: Nonce(Felt::ONE), + nonce_next: Nonce(Felt::TWO), + phantom: Default::default(), + }) + .expect("Mempool should have converted pending transaction to ready"); + + // Taking the ready transaction should mark the pending transaction as + // ready + + assert_eq!(mempool_tx.arrived_at, timestamp_ready); + assert_eq!(inner.tx_intent_queue_ready.len(), 1); + assert!(inner.tx_intent_queue_pending_by_nonce.is_empty()); + + drop(inner); + + // Take the next transaction. The pending transaction has been marked as + // ready and should be returned here + + let mempool_tx = mempool.tx_take().expect("Mempool contains a ready transaction!"); + + let inner = mempool.inner.read().expect("Poisoned lock"); + inner.check_invariants(); + + // No more transactions left + + assert_eq!(mempool_tx.arrived_at, timestamp_pending); + assert!(inner.tx_intent_queue_ready.is_empty()); + assert!(inner.tx_intent_queue_pending_by_nonce.is_empty()); + } + + /// This tests makes sure that if a transaction is inserted into the + /// [mempool], and its nonce does not match what is expected by the db BUT + /// the transaction with the nonce preceding it is already marked as + /// [ready], then it is marked as ready as well. + /// + /// This handles the case where the database has not yet been updated to + /// reflect the state of the mempool, which should happen often since nonces + /// are not updated until transaction execution. This way we limit the + /// number of transactions we have in the [pending] queue. + /// + /// # Setup: + /// + /// - We assume `tx_1` and `tx_2` are from the same contract. + /// + /// - `tx_1` has the correct nonce while `tx_2` has the nonce right after + /// that. + /// + /// [mempool]: inner::MempoolInner + /// [ready]: inner::TransactionIntentReady; + /// [pending]: inner::TransactionInentPending; + #[rstest::rstest] + #[timeout(Duration::from_millis(1_000))] + fn mempool_readiness_check_against_db( + backend: Arc, + l1_data_provider: Arc, + #[from(tx_account_v0_valid)] tx_1: blockifier::transaction::transaction_execution::Transaction, + #[from(tx_account_v0_valid)] tx_2: blockifier::transaction::transaction_execution::Transaction, + ) { + let mempool = Mempool::new(backend, l1_data_provider, MempoolLimits::for_testing()); + + // Insert the first transaction + + let nonce_info = mempool.retrieve_nonce_info(Felt::ZERO, Felt::ZERO).expect("Failed to retrieve nonce info"); + let timestamp_1 = ArrivedAtTimestamp::now(); + let result = mempool.accept_tx(tx_1, None, timestamp_1, nonce_info); + assert_matches::assert_matches!(result, Ok(())); + + let inner = mempool.inner.read().expect("Poisoned lock"); + inner.check_invariants(); + let contains = inner.tx_intent_queue_ready.contains(&TransactionIntentReady { + contract_address: Felt::ZERO, + timestamp: timestamp_1, + nonce: Nonce(Felt::ZERO), + nonce_next: Nonce(Felt::ONE), + phantom: Default::default(), + }); + assert!( + contains, + "Mempool should contain transaction 1, ready transaction intents are: {:#?}\npending transaction intents are: {:#?}", + mempool.inner.read().expect("Poisoned lock").tx_intent_queue_ready, + mempool.inner.read().expect("Poisoned lock").tx_intent_queue_pending_by_nonce + ); + + assert_eq!(inner.tx_intent_queue_ready.len(), 1); + + drop(inner); + + // Insert the next transaction. It should be marked as ready even if it + // does not have the correct nonce compared to the db (the test db + // defaults to nonce 0 for all contracts) since the transaction before + // it has already been marked as ready in the mempool. + + let nonce_info = mempool.retrieve_nonce_info(Felt::ZERO, Felt::ONE).expect("Failed to retrieve nonce info"); + let timestamp_2 = ArrivedAtTimestamp::now(); + let result = mempool.accept_tx(tx_2, None, timestamp_2, nonce_info); + assert_matches::assert_matches!(result, Ok(())); + + let inner = mempool.inner.read().expect("Poisoned lock"); + inner.check_invariants(); + let contains = inner.tx_intent_queue_ready.contains(&TransactionIntentReady { + contract_address: Felt::ZERO, + timestamp: timestamp_2, + nonce: Nonce(Felt::ONE), + nonce_next: Nonce(Felt::TWO), + phantom: Default::default(), + }); + assert!( + contains, + "Mempool should contain transaction 2, ready transaction intents are: {:#?}\npending transaction intents are: {:#?}", + mempool.inner.read().expect("Poisoned lock").tx_intent_queue_ready, + mempool.inner.read().expect("Poisoned lock").tx_intent_queue_pending_by_nonce + ); + + assert_eq!(inner.tx_intent_queue_ready.len(), 2); + } + + /// This tests makes sure that a transaction is marked as [ready] if its + /// [Nonce] is in the nonce cache for that contract address. + /// + /// This is used to check the readiness of transactions before committing + /// the changes to db. + /// + /// # Setup: + /// + /// - We assume `tx_ready` and `tx_pending` are from the same contract. + /// + /// - `tx_ready` has the correct nonce while `tx_pending` has the nonce + /// right after that. + /// + /// [ready]: inner::TransactionIntentReady; + #[rstest::rstest] + #[timeout(Duration::from_millis(1_000))] + fn mempool_readiness_check_agains_nonce_cache( + backend: Arc, + l1_data_provider: Arc, + ) { + let mempool = Mempool::new(backend, l1_data_provider, MempoolLimits::for_testing()); + + let contract_address = Felt::ZERO; + let nonce = Nonce(Felt::ONE); + let nonce_next = nonce.try_increment().unwrap(); + mempool.nonce_cache.write().expect("Poisoned lock").insert(contract_address, nonce); + + // Despite the tx nonce being Felt::ONE, it should be marked as ready + // since it is present in the nonce cache + assert_eq!(mempool.retrieve_nonce_info(contract_address, *nonce).unwrap(), NonceInfo::ready(nonce, nonce_next)); + + // We remove the nonce cache at that address + mempool.tx_mark_included(&contract_address); + + // Now the nonce should be marked as pending + assert_eq!( + mempool.retrieve_nonce_info(contract_address, *nonce).unwrap(), + NonceInfo::pending(nonce, nonce_next) + ); + } + + /// This test makes sure that retrieve nonce_info has proper error handling + /// and returns correct values based on the state of the db. + #[rstest::rstest] + #[timeout(Duration::from_millis(1_000))] + fn mempool_retrieve_nonce_info(backend: Arc, l1_data_provider: Arc) { + let mempool = Mempool::new(Arc::clone(&backend), l1_data_provider, MempoolLimits::for_testing()); + + backend + .store_block( + MadaraMaybePendingBlock { + info: MadaraMaybePendingBlockInfo::NotPending(MadaraBlockInfo::default()), + inner: MadaraBlockInner::default(), + }, + StateDiff { + nonces: vec![NonceUpdate { contract_address: Felt::ZERO, nonce: Felt::ZERO }], + ..Default::default() + }, + vec![], + None, + None, + ) + .expect("Failed to store block"); + + // Transactions which meet the nonce in db should be marked as ready... + + assert_eq!( + mempool.retrieve_nonce_info(Felt::ZERO, Felt::ZERO).unwrap(), + NonceInfo { readiness: NonceStatus::Ready, nonce: Nonce(Felt::ZERO), nonce_next: Nonce(Felt::ONE) } + ); + + // ...otherwise they should be marked as pending + + assert_eq!( + mempool.retrieve_nonce_info(Felt::ZERO, Felt::ONE).unwrap(), + NonceInfo { readiness: NonceStatus::Pending, nonce: Nonce(Felt::ONE), nonce_next: Nonce(Felt::TWO) } + ); + + // Contract addresses which do not yet have a nonce store in db should + // should not fail during nonce info retrieval + + assert_eq!( + mempool.retrieve_nonce_info(Felt::ONE, Felt::ZERO).unwrap(), + NonceInfo { readiness: NonceStatus::Ready, nonce: Nonce(Felt::ZERO), nonce_next: Nonce(Felt::ONE) } + ); + + // Transactions with a nonce less than that in db should fail during + // nonce info retrieval + + backend + .store_block( + MadaraMaybePendingBlock { + info: MadaraMaybePendingBlockInfo::NotPending(MadaraBlockInfo::default()), + inner: MadaraBlockInner::default(), + }, + StateDiff { + nonces: vec![NonceUpdate { contract_address: Felt::ZERO, nonce: Felt::ONE }], + ..Default::default() + }, + vec![], + None, + None, + ) + .expect("Failed to store block"); + + assert_matches::assert_matches!( + mempool.retrieve_nonce_info(Felt::ZERO, Felt::ZERO), + Err(MempoolError::StorageError(MadaraStorageError::InvalidNonce)) + ); + + // We need to compute the next nonce inside retrieve nonce_info, so + // passing Felt::MAX is not allowed. + assert_matches::assert_matches!( + mempool.retrieve_nonce_info(Felt::ZERO, Felt::MAX), + Err(MempoolError::StarknetApi(StarknetApiError::OutOfRange { .. })) + ); + } + + /// This test check the replacement logic for the [mempool] in case of force + /// inserts. + /// + /// # Setup: + /// + /// - `tx_1` and `tx_2` are from the same account, `tx_3` is not. + /// + /// - `tx_1` and `tx_2` share the same nonce but a different timestamp. + /// + /// [mempool]: inner::MempoolInner + #[rstest::rstest] + #[timeout(Duration::from_millis(1_000))] + fn mempool_replace_pass( + backend: Arc, + l1_data_provider: Arc, + #[from(tx_account_v0_valid)] tx_1: blockifier::transaction::transaction_execution::Transaction, + #[from(tx_account_v0_valid)] tx_2: blockifier::transaction::transaction_execution::Transaction, + #[from(tx_account_v0_valid)] + #[with(Felt::ONE)] + tx_3: blockifier::transaction::transaction_execution::Transaction, + ) { + let mempool = Mempool::new(backend, l1_data_provider, MempoolLimits::for_testing()); + + // Insert the first transaction + + let force = true; + let update_tx_limits = true; + + let arrived_at = ArrivedAtTimestamp::now(); + let nonce_info = NonceInfo::ready(Nonce(Felt::ZERO), Nonce(Felt::ONE)); + let tx_1_mempool = MempoolTransaction { + tx: tx_1, + arrived_at, + converted_class: None, + nonce: nonce_info.nonce, + nonce_next: nonce_info.nonce_next, + }; + let result = mempool.inner.write().expect("Poisoned lock").insert_tx( + tx_1_mempool.clone(), + force, + update_tx_limits, + nonce_info, + ); + assert_matches::assert_matches!(result, Ok(())); + + let inner = mempool.inner.read().expect("Poisoned lock"); + let contains = inner.tx_intent_queue_ready.contains(&TransactionIntentReady { + contract_address: **tx_1_mempool.contract_address(), + timestamp: tx_1_mempool.arrived_at, + nonce: tx_1_mempool.nonce, + nonce_next: tx_1_mempool.nonce_next, + phantom: std::marker::PhantomData, + }); + assert!( + contains, + "Mempool should contain transaction 1, ready transaction intents are: {:#?}\npending transaction intents are: {:#?}", + mempool.inner.read().expect("Poisoned lock").tx_intent_queue_ready, + mempool.inner.read().expect("Poisoned lock").tx_intent_queue_pending_by_nonce + ); + + assert_eq!(inner.tx_intent_queue_ready.len(), 1); + + inner.check_invariants(); + + drop(inner); + + // Inserts the second transaction with the same nonce as the first once. + // This should replace the first transaction. + + let force = true; + let update_tx_limits = true; + + let arrived_at = ArrivedAtTimestamp::now(); + let nonce_info = NonceInfo::ready(Nonce(Felt::ZERO), Nonce(Felt::ONE)); + let tx_2_mempool = MempoolTransaction { + tx: tx_2, + arrived_at, + converted_class: None, + nonce: nonce_info.nonce, + nonce_next: nonce_info.nonce_next, + }; + let result = mempool.inner.write().expect("Poisoned lock").insert_tx( + tx_2_mempool.clone(), + force, + update_tx_limits, + nonce_info, + ); + assert_matches::assert_matches!(result, Ok(())); + + let inner = mempool.inner.read().expect("Poisoned lock"); + let contains = inner.tx_intent_queue_ready.contains(&TransactionIntentReady { + contract_address: **tx_2_mempool.contract_address(), + timestamp: tx_2_mempool.arrived_at, + nonce: tx_2_mempool.nonce, + nonce_next: tx_2_mempool.nonce_next, + phantom: std::marker::PhantomData, + }); + assert!( + contains, + "mempool should contain transaction 2, ready transaction intents are: {:#?}\npending transaction intents are: {:#?}", + mempool.inner.read().expect("poisoned lock").tx_intent_queue_ready, + mempool.inner.read().expect("poisoned lock").tx_intent_queue_pending_by_nonce + ); + let contains = inner.tx_intent_queue_ready.contains(&TransactionIntentReady { + contract_address: **tx_1_mempool.contract_address(), + timestamp: tx_1_mempool.arrived_at, + nonce: tx_1_mempool.nonce, + nonce_next: tx_1_mempool.nonce_next, + phantom: std::marker::PhantomData, + }); + assert!( + !contains, + "Mempool should not contain transaction 1 after it has been replaced, ready transaction intents are: {:#?}\npending transaction intents are: {:#?}", + mempool.inner.read().expect("Poisoned lock").tx_intent_queue_ready, + mempool.inner.read().expect("Poisoned lock").tx_intent_queue_pending_by_nonce + ); + + assert_eq!(inner.tx_intent_queue_ready.len(), 1); + + inner.check_invariants(); + + drop(inner); + + // Inserts the third transaction. This should not trigger a replacement + // since it has a different contract address + + let force = true; + let update_tx_limits = true; + + let arrived_at = ArrivedAtTimestamp::now(); + let nonce_info = NonceInfo::ready(Nonce(Felt::ZERO), Nonce(Felt::ONE)); + let tx_3_mempool = MempoolTransaction { + tx: tx_3, + arrived_at, + converted_class: None, + nonce: nonce_info.nonce, + nonce_next: nonce_info.nonce_next, + }; + let result = mempool.inner.write().expect("Poisoned lock").insert_tx( + tx_3_mempool.clone(), + force, + update_tx_limits, + nonce_info.clone(), + ); + assert_matches::assert_matches!(result, Ok(())); + + let inner = mempool.inner.read().expect("Poisoned lock"); + let contains = inner.tx_intent_queue_ready.contains(&TransactionIntentReady { + contract_address: **tx_3_mempool.contract_address(), + timestamp: tx_3_mempool.arrived_at, + nonce: tx_3_mempool.nonce, + nonce_next: tx_3_mempool.nonce_next, + phantom: std::marker::PhantomData, + }); + assert!( + contains, + "Mempool should contain transaction 3, ready transaction intents are: {:#?}\npending transaction intents are: {:#?}", + mempool.inner.read().expect("Poisoned lock").tx_intent_queue_ready, + mempool.inner.read().expect("Poisoned lock").tx_intent_queue_pending_by_nonce + ); + let contains = inner.tx_intent_queue_ready.contains(&TransactionIntentReady { + contract_address: **tx_2_mempool.contract_address(), + timestamp: tx_2_mempool.arrived_at, + nonce: tx_2_mempool.nonce, + nonce_next: tx_2_mempool.nonce_next, + phantom: std::marker::PhantomData, + }); + assert!( + contains, + "mempool should contain transaction 2, ready transaction intents are: {:#?}\npending transaction intents are: {:#?}", + mempool.inner.read().expect("poisoned lock").tx_intent_queue_ready, + mempool.inner.read().expect("poisoned lock").tx_intent_queue_pending_by_nonce + ); + + assert_eq!(inner.tx_intent_queue_ready.len(), 2); + + inner.check_invariants(); + + drop(inner); + + // Inserting a transaction again should fail if force if false. This + // should not change the state of the mempool whatsoever! + + let force = false; + let update_tx_limits = true; + + let result = mempool.inner.write().expect("Poisoned lock").insert_tx( + tx_3_mempool.clone(), + force, + update_tx_limits, + nonce_info, + ); + assert_eq!(result, Err(TxInsertionError::DuplicateTxn)); + + let inner = mempool.inner.read().expect("Poisoned lock"); + let contains = inner.tx_intent_queue_ready.contains(&TransactionIntentReady { + contract_address: **tx_3_mempool.contract_address(), + timestamp: tx_3_mempool.arrived_at, + nonce: tx_3_mempool.nonce, + nonce_next: tx_3_mempool.nonce_next, + phantom: std::marker::PhantomData, + }); + assert!( + contains, + "Mempool should contain transaction 3, ready transaction intents are: {:#?}\npending transaction intents are: {:#?}", + mempool.inner.read().expect("Poisoned lock").tx_intent_queue_ready, + mempool.inner.read().expect("Poisoned lock").tx_intent_queue_pending_by_nonce + ); + let contains = inner.tx_intent_queue_ready.contains(&TransactionIntentReady { + contract_address: **tx_2_mempool.contract_address(), + timestamp: tx_2_mempool.arrived_at, + nonce: tx_2_mempool.nonce, + nonce_next: tx_2_mempool.nonce_next, + phantom: std::marker::PhantomData, + }); + assert!( + contains, + "mempool should contain transaction 2, ready transaction intents are: {:#?}\npending transaction intents are: {:#?}", + mempool.inner.read().expect("poisoned lock").tx_intent_queue_ready, + mempool.inner.read().expect("poisoned lock").tx_intent_queue_pending_by_nonce + ); + + assert_eq!(inner.tx_intent_queue_ready.len(), 2); + + inner.check_invariants(); } } diff --git a/crates/madara/client/rpc/src/providers/mempool.rs b/crates/madara/client/rpc/src/providers/mempool.rs index 9ce43a410..c4c24b0a5 100644 --- a/crates/madara/client/rpc/src/providers/mempool.rs +++ b/crates/madara/client/rpc/src/providers/mempool.rs @@ -13,6 +13,8 @@ use std::sync::Arc; /// This [`AddTransactionProvider`] adds the received transactions to a mempool. pub struct MempoolAddTxProvider { + // PERF: this can go as we are always wrapping MempoolAddTxProvider inside + // Arc mempool: Arc, } @@ -22,24 +24,24 @@ impl MempoolAddTxProvider { } } -impl From for StarknetRpcApiError { - fn from(value: mc_mempool::Error) -> Self { +impl From for StarknetRpcApiError { + fn from(value: mc_mempool::MempoolError) -> Self { match value { - mc_mempool::Error::InnerMempool(mc_mempool::TxInsersionError::DuplicateTxn) => { + mc_mempool::MempoolError::InnerMempool(mc_mempool::TxInsertionError::DuplicateTxn) => { StarknetRpcApiError::DuplicateTxn } - mc_mempool::Error::InnerMempool(mc_mempool::TxInsersionError::Limit(limit)) => { + mc_mempool::MempoolError::InnerMempool(mc_mempool::TxInsertionError::Limit(limit)) => { StarknetRpcApiError::FailedToReceiveTxn { err: Some(format!("{}", limit).into()) } } - mc_mempool::Error::InnerMempool(mc_mempool::TxInsersionError::NonceConflict) => { + mc_mempool::MempoolError::InnerMempool(mc_mempool::TxInsertionError::NonceConflict) => { StarknetRpcApiError::FailedToReceiveTxn { err: Some("A transaction with this nonce and sender address already exists".into()), } } - mc_mempool::Error::Validation(err) => { + mc_mempool::MempoolError::Validation(err) => { StarknetRpcApiError::ValidationFailure { error: format!("{err:#}").into() } } - mc_mempool::Error::Exec(err) => { + mc_mempool::MempoolError::Exec(err) => { StarknetRpcApiError::TxnExecutionError { tx_index: 0, error: format!("{err:#}") } } err => { @@ -56,24 +58,24 @@ impl AddTransactionProvider for MempoolAddTxProvider { &self, declare_v0_transaction: BroadcastedDeclareTransactionV0, ) -> RpcResult> { - Ok(self.mempool.accept_declare_v0_tx(declare_v0_transaction).map_err(StarknetRpcApiError::from)?) + Ok(self.mempool.tx_accept_declare_v0(declare_v0_transaction).map_err(StarknetRpcApiError::from)?) } async fn add_declare_transaction( &self, declare_transaction: BroadcastedDeclareTxn, ) -> RpcResult> { - Ok(self.mempool.accept_declare_tx(declare_transaction).map_err(StarknetRpcApiError::from)?) + Ok(self.mempool.tx_accept_declare(declare_transaction).map_err(StarknetRpcApiError::from)?) } async fn add_deploy_account_transaction( &self, deploy_account_transaction: BroadcastedDeployAccountTxn, ) -> RpcResult> { - Ok(self.mempool.accept_deploy_account_tx(deploy_account_transaction).map_err(StarknetRpcApiError::from)?) + Ok(self.mempool.tx_accept_deploy_account(deploy_account_transaction).map_err(StarknetRpcApiError::from)?) } async fn add_invoke_transaction( &self, invoke_transaction: BroadcastedInvokeTxn, ) -> RpcResult> { - Ok(self.mempool.accept_invoke_tx(invoke_transaction).map_err(StarknetRpcApiError::from)?) + Ok(self.mempool.tx_accept_invoke(invoke_transaction).map_err(StarknetRpcApiError::from)?) } } diff --git a/scripts/js-tests.sh b/scripts/js-tests.sh new file mode 100755 index 000000000..095c138f3 --- /dev/null +++ b/scripts/js-tests.sh @@ -0,0 +1,28 @@ +#!/bin/bash + +cargo build --bin madara --release +./target/release/madara \ + --name madara \ + --base-path ../madara_db \ + --rpc-port 9944 \ + --rpc-cors "*" \ + --rpc-external \ + --devnet \ + --preset devnet \ + --gas-price 0 \ + --blob-gas-price 0 \ + --strk-gas-price 0 \ + --strk-blob-gas-price 0 \ + --no-l1-sync & + +MADARA_PID=$! + +while ! echo exit | nc localhost 9944 + do sleep 1; +done + +cd tests/js_tests +npm install +npm test + +kill $MADARA_PID