diff --git a/bin/host/src/fetcher/hint.rs b/bin/host/src/fetcher/hint.rs index 54990c4a9..401bdc295 100644 --- a/bin/host/src/fetcher/hint.rs +++ b/bin/host/src/fetcher/hint.rs @@ -19,12 +19,17 @@ pub enum HintType { L2BlockHeader, /// A hint that specifies the transactions of a layer 2 block. L2Transactions, - /// A hint that specifies the state node in the L2 state trie. - L2StateNode, /// A hint that specifies the code of a contract on layer 2. L2Code, /// A hint that specifies the output root of a block on layer 2. L2Output, + /// A hint that specifies the state node in the L2 state trie. + L2StateNode, + /// A hint that specifies the proof on the path to an account in the L2 state trie. + L2AccountProof, + /// A hint that specifies the proof on the path to a storage slot in an account within in the + /// L2 state trie. + L2AccountStorageProof, } impl TryFrom<&str> for HintType { @@ -39,9 +44,11 @@ impl TryFrom<&str> for HintType { "l1-precompile" => Ok(HintType::L1Precompile), "l2-block-header" => Ok(HintType::L2BlockHeader), "l2-transactions" => Ok(HintType::L2Transactions), - "l2-state-node" => Ok(HintType::L2StateNode), "l2-code" => Ok(HintType::L2Code), "l2-output" => Ok(HintType::L2Output), + "l2-state-node" => Ok(HintType::L2StateNode), + "l2-account-proof" => Ok(HintType::L2AccountProof), + "l2-account-storage-proof" => Ok(HintType::L2AccountStorageProof), _ => anyhow::bail!("Invalid hint type: {value}"), } } @@ -57,9 +64,11 @@ impl From for &str { HintType::L1Precompile => "l1-precompile", HintType::L2BlockHeader => "l2-block-header", HintType::L2Transactions => "l2-transactions", - HintType::L2StateNode => "l2-state-node", HintType::L2Code => "l2-code", HintType::L2Output => "l2-output", + HintType::L2StateNode => "l2-state-node", + HintType::L2AccountProof => "l2-account-proof", + HintType::L2AccountStorageProof => "l2-account-storage-proof", } } } diff --git a/bin/host/src/fetcher/mod.rs b/bin/host/src/fetcher/mod.rs index 84e3bc09c..89b197d23 100644 --- a/bin/host/src/fetcher/mod.rs +++ b/bin/host/src/fetcher/mod.rs @@ -184,9 +184,72 @@ where } HintType::L2BlockHeader => todo!(), HintType::L2Transactions => todo!(), - HintType::L2StateNode => todo!(), HintType::L2Code => todo!(), HintType::L2Output => todo!(), + HintType::L2StateNode => todo!(), + HintType::L2AccountProof => { + if hint_data.len() != 8 + 20 { + anyhow::bail!("Invalid hint data length: {}", hint_data.len()); + } + + let block_number = u64::from_be_bytes( + hint_data.as_ref()[..8] + .try_into() + .map_err(|e| anyhow!("Error converting hint data to u64: {e}"))?, + ); + let address = Address::from_slice(&hint_data.as_ref()[8..]); + + let proof_response = self + .l2_provider + .get_proof(address, Default::default(), block_number.into()) + .await + .map_err(|e| anyhow!("Failed to fetch account proof: {e}"))?; + + let mut kv_write_lock = self.kv_store.write().await; + + // Write the account proof nodes to the key-value store. + proof_response.account_proof.into_iter().for_each(|node| { + let node_hash = keccak256(node.as_ref()); + let key = PreimageKey::new(*node_hash, PreimageKeyType::Keccak256); + kv_write_lock.set(key.into(), node.into()); + }); + } + HintType::L2AccountStorageProof => { + if hint_data.len() != 8 + 20 + 32 { + anyhow::bail!("Invalid hint data length: {}", hint_data.len()); + } + + let block_number = u64::from_be_bytes( + hint_data.as_ref()[..8] + .try_into() + .map_err(|e| anyhow!("Error converting hint data to u64: {e}"))?, + ); + let address = Address::from_slice(&hint_data.as_ref()[8..]); + let slot = B256::from_slice(&hint_data.as_ref()[28..]); + + let mut proof_response = self + .l2_provider + .get_proof(address, vec![slot], block_number.into()) + .await + .map_err(|e| anyhow!("Failed to fetch account proof: {e}"))?; + + let mut kv_write_lock = self.kv_store.write().await; + + // Write the account proof nodes to the key-value store. + proof_response.account_proof.into_iter().for_each(|node| { + let node_hash = keccak256(node.as_ref()); + let key = PreimageKey::new(*node_hash, PreimageKeyType::Keccak256); + kv_write_lock.set(key.into(), node.into()); + }); + + // Write the storage proof nodes to the key-value store. + let storage_proof = proof_response.storage_proof.remove(0); + storage_proof.proof.into_iter().for_each(|node| { + let node_hash = keccak256(node.as_ref()); + let key = PreimageKey::new(*node_hash, PreimageKeyType::Keccak256); + kv_write_lock.set(key.into(), node.into()); + }); + } } Ok(()) diff --git a/bin/programs/client/src/l2/executor/canyon.rs b/bin/programs/client/src/l2/executor/canyon.rs index 5687b26c9..4880d0fd2 100644 --- a/bin/programs/client/src/l2/executor/canyon.rs +++ b/bin/programs/client/src/l2/executor/canyon.rs @@ -1,8 +1,9 @@ //! Contains logic specific to Canyon hardfork activation. -use alloy_consensus::Header; use alloy_primitives::{address, b256, hex, Address, Bytes, B256}; +use anyhow::Result; use kona_derive::types::RollupConfig; +use kona_mpt::{TrieDB, TrieDBFetcher, TrieDBHinter}; use revm::{ primitives::{Account, Bytecode, HashMap}, DatabaseCommit, State, @@ -21,18 +22,20 @@ const CREATE_2_DEPLOYER_BYTECODE: [u8; 1584] = hex!("608060405260043610610043576 /// The Canyon hardfork issues an irregular state transition that force-deploys the create2 /// deployer contract. This is done by directly setting the code of the create2 deployer account /// prior to executing any transactions on the timestamp activation of the fork. -pub(crate) fn ensure_create2_deployer_canyon( - db: &mut State, +pub(crate) fn ensure_create2_deployer_canyon( + db: &mut State>, config: &RollupConfig, timestamp: u64, - parent_header: &Header, -) -> Result<(), DB::Error> +) -> Result<()> where - DB: revm::Database, + F: TrieDBFetcher, + H: TrieDBHinter, { // If the canyon hardfork is active at the current timestamp, and it was not active at the // previous block timestamp, then we need to force-deploy the create2 deployer contract. - if config.is_canyon_active(timestamp) && !config.is_canyon_active(parent_header.timestamp) { + if config.is_canyon_active(timestamp) && + !config.is_canyon_active(db.database.parent_block_header().timestamp) + { // Load the create2 deployer account from the cache. let acc = db.load_cache_account(CREATE_2_DEPLOYER_ADDR)?; diff --git a/bin/programs/client/src/l2/executor/fetcher.rs b/bin/programs/client/src/l2/executor/fetcher.rs index c68ac55c8..7cd10be43 100644 --- a/bin/programs/client/src/l2/executor/fetcher.rs +++ b/bin/programs/client/src/l2/executor/fetcher.rs @@ -2,13 +2,13 @@ //! //! [TrieDB]: kona_mpt::TrieDB -use crate::CachingOracle; +use crate::{CachingOracle, HINT_WRITER}; use alloy_consensus::Header; -use alloy_primitives::{Bytes, B256}; +use alloy_primitives::{hex, Address, Bytes, B256}; use alloy_rlp::Decodable; use anyhow::{anyhow, Result}; -use kona_mpt::TrieDBFetcher; -use kona_preimage::{PreimageKey, PreimageKeyType, PreimageOracleClient}; +use kona_mpt::{TrieDBFetcher, TrieDBHinter}; +use kona_preimage::{HintWriterClient, PreimageKey, PreimageKeyType, PreimageOracleClient}; /// The [TrieDBFetcher] implementation for the block executor's [TrieDB]. /// @@ -56,3 +56,49 @@ impl<'a, const N: usize> TrieDBFetcher for TrieDBProvider<'a, N> { }) } } + +/// The [TrieDBHinter] implementation for the block executor's [TrieDB]. +/// +/// [TrieDB]: kona_mpt::TrieDB +#[derive(Debug)] +pub struct TrieDBHintWriter; + +impl TrieDBHinter for TrieDBHintWriter { + fn hint_trie_node(&self, hash: B256) -> Result<()> { + kona_common::block_on(async move { + HINT_WRITER + .write(&alloc::format!("l2-state-node {}", hex::encode(hash.as_slice()))) + .await + }) + } + + fn hint_account_proof(&self, address: Address, block_number: u64) -> Result<()> { + kona_common::block_on(async move { + HINT_WRITER + .write(&alloc::format!( + "l2-account-proof {}{}", + hex::encode(block_number.to_be_bytes()), + hex::encode(address.as_slice()) + )) + .await + }) + } + + fn hint_storage_proof( + &self, + address: alloy_primitives::Address, + slot: alloy_primitives::U256, + block_number: u64, + ) -> Result<()> { + kona_common::block_on(async move { + HINT_WRITER + .write(&alloc::format!( + "l2-account-storage-proof {}{}{}", + hex::encode(block_number.to_be_bytes()), + hex::encode(address.as_slice()), + hex::encode(slot.to_be_bytes::<32>()) + )) + .await + }) + } +} diff --git a/bin/programs/client/src/l2/executor/mod.rs b/bin/programs/client/src/l2/executor/mod.rs index f4d77c59d..56a0f2a0a 100644 --- a/bin/programs/client/src/l2/executor/mod.rs +++ b/bin/programs/client/src/l2/executor/mod.rs @@ -7,7 +7,7 @@ use alloy_eips::eip2718::{Decodable2718, Encodable2718}; use alloy_primitives::{address, keccak256, Address, Bytes, TxKind, B256, U256}; use anyhow::{anyhow, Result}; use kona_derive::types::{L2PayloadAttributes, RawTransaction, RollupConfig}; -use kona_mpt::{ordered_trie_with_encoder, TrieDB, TrieDBFetcher}; +use kona_mpt::{ordered_trie_with_encoder, TrieDB, TrieDBFetcher, TrieDBHinter}; use op_alloy_consensus::{OpReceipt, OpReceiptEnvelope, OpReceiptWithBloom, OpTxEnvelope}; use revm::{ db::{states::bundle_state::BundleRetention, State}, @@ -19,7 +19,7 @@ use revm::{ }; mod fetcher; -pub use fetcher::TrieDBProvider; +pub use fetcher::{TrieDBHintWriter, TrieDBProvider}; mod eip4788; pub(crate) use eip4788::pre_block_beacon_root_contract_call; @@ -35,34 +35,40 @@ use self::util::{extract_tx_gas_limit, is_system_transaction}; /// The block executor for the L2 client program. Operates off of a [TrieDB] backed [State], /// allowing for stateless block execution of OP Stack blocks. #[derive(Debug)] -pub struct StatelessL2BlockExecutor +pub struct StatelessL2BlockExecutor where F: TrieDBFetcher, + H: TrieDBHinter, { /// The [RollupConfig]. config: Arc, - /// The parent header - parent_header: Sealed
, /// The inner state database component. - state: State>, + state: State>, } -impl StatelessL2BlockExecutor +impl StatelessL2BlockExecutor where F: TrieDBFetcher, + H: TrieDBHinter, { /// Constructs a new [StatelessL2BlockExecutor] with the given starting state root, parent hash, /// and [TrieDBFetcher]. - pub fn new(config: Arc, parent_header: Sealed
, fetcher: F) -> Self { - let trie_db = TrieDB::new(parent_header.state_root, parent_header.seal(), fetcher); + pub fn new( + config: Arc, + parent_header: Sealed
, + fetcher: F, + hinter: H, + ) -> Self { + let trie_db = TrieDB::new(parent_header.state_root, parent_header, fetcher, hinter); let state = StateBuilder::new_with_database(trie_db).with_bundle_update().build(); - Self { config, parent_header, state } + Self { config, state } } } -impl StatelessL2BlockExecutor +impl StatelessL2BlockExecutor where F: TrieDBFetcher, + H: TrieDBHinter, { /// Executes the given block, returning the resulting state root. /// @@ -85,7 +91,7 @@ where let initialized_block_env = Self::prepare_block_env( self.revm_spec_id(payload.timestamp), self.config.as_ref(), - &self.parent_header, + self.state.database.parent_block_header(), &payload, ); let initialized_cfg = self.evm_cfg_env(payload.timestamp); @@ -105,12 +111,7 @@ where )?; // Ensure that the create2 contract is deployed upon transition to the Canyon hardfork. - ensure_create2_deployer_canyon( - &mut self.state, - self.config.as_ref(), - payload.timestamp, - &self.parent_header, - )?; + ensure_create2_deployer_canyon(&mut self.state, self.config.as_ref(), payload.timestamp)?; // Construct the EVM with the given configuration. // TODO(clabby): Accelerate precompiles w/ custom precompile handler. @@ -130,7 +131,6 @@ where for (transaction, raw_transaction) in transactions { // The sum of the transaction’s gas limit, Tg, and the gas utilized in this block prior, // must be no greater than the block’s gasLimit. - // let block_available_gas = gas_limit - cumulative_gas_used; let block_available_gas = (gas_limit - cumulative_gas_used) as u128; if extract_tx_gas_limit(&transaction) > block_available_gas && (is_regolith || !is_system_transaction(&transaction)) @@ -225,11 +225,10 @@ where .config .is_ecotone_active(payload.timestamp) .then(|| { - let excess_blob_gas = if self.config.is_ecotone_active(self.parent_header.timestamp) - { - let parent_excess_blob_gas = - self.parent_header.excess_blob_gas.unwrap_or_default(); - let parent_blob_gas_used = self.parent_header.blob_gas_used.unwrap_or_default(); + let parent_header = self.state.database.parent_block_header(); + let excess_blob_gas = if self.config.is_ecotone_active(parent_header.timestamp) { + let parent_excess_blob_gas = parent_header.excess_blob_gas.unwrap_or_default(); + let parent_blob_gas_used = parent_header.blob_gas_used.unwrap_or_default(); calc_excess_blob_gas(parent_excess_blob_gas as u64, parent_blob_gas_used as u64) } else { // For the first post-fork block, both blob gas fields are evaluated to 0. @@ -242,7 +241,7 @@ where // Construct the new header. let header = Header { - parent_hash: self.parent_header.seal(), + parent_hash: self.state.database.parent_block_header().seal(), ommers_hash: EMPTY_OMMER_ROOT_HASH, beneficiary: payload.fee_recipient, state_root, @@ -267,12 +266,9 @@ where .seal_slow(); // Update the parent block hash in the state database. - self.state.database.set_parent_block_hash(header.seal()); + self.state.database.set_parent_block_header(header); - // Update the parent header in the executor. - self.parent_header = header; - - Ok(&self.parent_header) + Ok(self.state.database.parent_block_header()) } /// Computes the current output root of the executor, based on the parent header and the @@ -308,11 +304,12 @@ where }; // Construct the raw output. + let parent_header = self.state.database.parent_block_header(); let mut raw_output = [0u8; 97]; raw_output[0] = OUTPUT_ROOT_VERSION; - raw_output[1..33].copy_from_slice(self.parent_header.state_root.as_ref()); + raw_output[1..33].copy_from_slice(parent_header.state_root.as_ref()); raw_output[33..65].copy_from_slice(storage_root.as_ref()); - raw_output[65..97].copy_from_slice(self.parent_header.seal().as_ref()); + raw_output[65..97].copy_from_slice(parent_header.seal().as_ref()); // Hash the output and return Ok(keccak256(raw_output)) @@ -584,6 +581,7 @@ mod test { use super::*; use alloy_primitives::{address, b256, hex}; use alloy_rlp::Decodable; + use kona_mpt::NoopTrieDBHinter; use serde::Deserialize; use std::{collections::HashMap, format}; @@ -656,6 +654,7 @@ mod test { Arc::new(rollup_config), header.seal_slow(), TestdataTrieDBFetcher::new("block_120794432_exec"), + NoopTrieDBHinter, ); let raw_tx = hex!("7ef8f8a003b511b9b71520cd62cad3b5fd5b1b8eaebd658447723c31c7f1eba87cfe98c894deaddeaddeaddeaddeaddeaddeaddeaddead00019442000000000000000000000000000000000000158080830f424080b8a4440a5e2000000558000c5fc5000000000000000300000000665a33a70000000001310e960000000000000000000000000000000000000000000000000000000214d2697300000000000000000000000000000000000000000000000000000000000000015346d208a396843018a2e666c8e7832067358433fb87ca421273c6a4e69f78d50000000000000000000000006887246668a3b87f54deb3b94ba47a6f63f32985"); @@ -674,7 +673,10 @@ mod test { let produced_header = l2_block_executor.execute_payload(payload_attrs).unwrap().clone(); assert_eq!(produced_header, expected_header); - assert_eq!(l2_block_executor.parent_header.seal(), expected_header.hash_slow()); + assert_eq!( + l2_block_executor.state.database.parent_block_header().seal(), + expected_header.hash_slow() + ); } #[test] @@ -703,6 +705,7 @@ mod test { Arc::new(rollup_config), parent_header.seal_slow(), TestdataTrieDBFetcher::new("block_121049889_exec"), + NoopTrieDBHinter, ); let raw_txs = alloc::vec![ @@ -725,7 +728,10 @@ mod test { let produced_header = l2_block_executor.execute_payload(payload_attrs).unwrap().clone(); assert_eq!(produced_header, expected_header); - assert_eq!(l2_block_executor.parent_header.seal(), expected_header.hash_slow()); + assert_eq!( + l2_block_executor.state.database.parent_block_header().seal(), + expected_header.hash_slow() + ); } #[test] @@ -754,6 +760,7 @@ mod test { Arc::new(rollup_config), parent_header.seal_slow(), TestdataTrieDBFetcher::new("block_121003241_exec"), + NoopTrieDBHinter, ); let raw_txs = alloc::vec![ @@ -783,7 +790,10 @@ mod test { let produced_header = l2_block_executor.execute_payload(payload_attrs).unwrap().clone(); assert_eq!(produced_header, expected_header); - assert_eq!(l2_block_executor.parent_header.seal(), expected_header.hash_slow()); + assert_eq!( + l2_block_executor.state.database.parent_block_header().seal(), + expected_header.hash_slow() + ); } #[test] @@ -812,6 +822,7 @@ mod test { Arc::new(rollup_config), parent_header.seal_slow(), TestdataTrieDBFetcher::new("block_121057303_exec"), + NoopTrieDBHinter, ); let raw_txs = alloc::vec![ @@ -835,7 +846,10 @@ mod test { let produced_header = l2_block_executor.execute_payload(payload_attrs).unwrap().clone(); assert_eq!(produced_header, expected_header); - assert_eq!(l2_block_executor.parent_header.seal(), expected_header.hash_slow()); + assert_eq!( + l2_block_executor.state.database.parent_block_header().seal(), + expected_header.hash_slow() + ); } #[test] @@ -864,6 +878,7 @@ mod test { Arc::new(rollup_config), parent_header.seal_slow(), TestdataTrieDBFetcher::new("block_121065789_exec"), + NoopTrieDBHinter, ); let raw_txs = alloc::vec![ @@ -896,7 +911,10 @@ mod test { let produced_header = l2_block_executor.execute_payload(payload_attrs).unwrap().clone(); assert_eq!(produced_header, expected_header); - assert_eq!(l2_block_executor.parent_header.seal(), expected_header.hash_slow()); + assert_eq!( + l2_block_executor.state.database.parent_block_header().seal(), + expected_header.hash_slow() + ); } #[test] @@ -925,6 +943,7 @@ mod test { Arc::new(rollup_config), parent_header.seal_slow(), TestdataTrieDBFetcher::new("block_121135704_exec"), + NoopTrieDBHinter, ); let raw_txs = alloc::vec![ @@ -962,6 +981,9 @@ mod test { let produced_header = l2_block_executor.execute_payload(payload_attrs).unwrap().clone(); assert_eq!(produced_header, expected_header); - assert_eq!(l2_block_executor.parent_header.seal(), expected_header.hash_slow()); + assert_eq!( + l2_block_executor.state.database.parent_block_header().seal(), + expected_header.hash_slow() + ); } } diff --git a/bin/programs/client/src/l2/mod.rs b/bin/programs/client/src/l2/mod.rs index f4b12dbe5..c0c13b8ba 100644 --- a/bin/programs/client/src/l2/mod.rs +++ b/bin/programs/client/src/l2/mod.rs @@ -2,4 +2,4 @@ //! [StatelessL2BlockExecutor] mod executor; -pub use executor::{StatelessL2BlockExecutor, TrieDBProvider}; +pub use executor::{StatelessL2BlockExecutor, TrieDBHintWriter, TrieDBProvider}; diff --git a/crates/mpt/src/db/mod.rs b/crates/mpt/src/db/mod.rs index 0031c244e..92da669c6 100644 --- a/crates/mpt/src/db/mod.rs +++ b/crates/mpt/src/db/mod.rs @@ -1,9 +1,9 @@ //! This module contains an implementation of an in-memory Trie DB for [revm], that allows for //! incremental updates through fetching node preimages on the fly during execution. -use crate::{TrieDBFetcher, TrieNode}; +use crate::{TrieDBFetcher, TrieDBHinter, TrieNode}; use alloc::vec::Vec; -use alloy_consensus::EMPTY_ROOT_HASH; +use alloy_consensus::{Header, Sealed, EMPTY_ROOT_HASH}; use alloy_primitives::{keccak256, Address, B256, U256}; use alloy_rlp::{Decodable, Encodable}; use alloy_trie::Nibbles; @@ -44,16 +44,21 @@ pub use account::TrieAccount; /// /// **Example Construction**: /// ```rust -/// use alloy_consensus::Header; +/// use alloy_consensus::{Header, Sealable}; /// use alloy_primitives::{Bytes, B256}; /// use anyhow::Result; -/// use kona_mpt::{NoopTrieDBFetcher, TrieDB}; +/// use kona_mpt::{NoopTrieDBFetcher, NoopTrieDBHinter, TrieDB}; /// use revm::{db::states::bundle_state::BundleRetention, EvmBuilder, StateBuilder}; /// /// let mock_starting_root = B256::default(); -/// let mock_parent_block_hash = B256::default(); +/// let mock_parent_block_header = Header::default(); /// -/// let trie_db = TrieDB::new(mock_starting_root, mock_parent_block_hash, NoopTrieDBFetcher); +/// let trie_db = TrieDB::new( +/// mock_starting_root, +/// mock_parent_block_header.seal_slow(), +/// NoopTrieDBFetcher, +/// NoopTrieDBHinter, +/// ); /// let mut state = StateBuilder::new_with_database(trie_db).with_bundle_update().build(); /// let evm = EvmBuilder::default().with_db(&mut state).build(); /// @@ -69,31 +74,36 @@ pub use account::TrieAccount; /// /// [State]: revm::State #[derive(Debug, Clone)] -pub struct TrieDB +pub struct TrieDB where F: TrieDBFetcher, + H: TrieDBHinter, { /// The [TrieNode] representation of the root node. root_node: TrieNode, /// Storage roots of accounts within the trie. storage_roots: HashMap, /// The parent block hash of the current block. - parent_block_hash: B256, + parent_block_header: Sealed
, /// The [TrieDBFetcher] fetcher: F, + /// The [TrieDBHinter] + hinter: H, } -impl TrieDB +impl TrieDB where F: TrieDBFetcher, + H: TrieDBHinter, { /// Creates a new [TrieDB] with the given root node. - pub fn new(root: B256, parent_block_hash: B256, fetcher: F) -> Self { + pub fn new(root: B256, parent_block_header: Sealed
, fetcher: F, hinter: H) -> Self { Self { root_node: TrieNode::new_blinded(root), storage_roots: Default::default(), - parent_block_hash, + parent_block_header, fetcher, + hinter, } } @@ -153,13 +163,18 @@ where self.root_node.blinded_commitment().ok_or(anyhow!("State root node is not a blinded node")) } - /// Sets the parent block hash of the trie DB. Should be called after a block has been executed - /// and the Header has been created. + /// Returns a reference to the current parent block header of the trie DB. + pub fn parent_block_header(&self) -> &Sealed
{ + &self.parent_block_header + } + + /// Sets the parent block header of the trie DB. Should be called after a block has been + /// executed and the Header has been created. /// /// ## Takes - /// - `parent_block_hash`: The parent block hash of the current block. - pub fn set_parent_block_hash(&mut self, parent_block_hash: B256) { - self.parent_block_hash = parent_block_hash; + /// - `parent_block_header`: The parent block header of the current block. + pub fn set_parent_block_header(&mut self, parent_block_header: Sealed
) { + self.parent_block_header = parent_block_header; } /// Fetches the [TrieAccount] of an account from the trie DB. @@ -172,6 +187,9 @@ where /// - `Ok(None)`: If the account does not exist in the trie. /// - `Err(_)`: If the account could not be fetched. pub fn get_trie_account(&mut self, address: &Address) -> Result> { + // Send a hint to the host to fetch the account proof. + self.hinter.hint_account_proof(*address, self.parent_block_header.number)?; + // Fetch the account from the trie. let hashed_address_nibbles = Nibbles::unpack(keccak256(address.as_slice())); let Some(trie_account_rlp) = self.root_node.open(&hashed_address_nibbles, &self.fetcher)? @@ -200,7 +218,7 @@ where // If the account was destroyed, delete it from the trie. if bundle_account.was_destroyed() { - self.root_node.delete(&account_path, &self.fetcher)?; + self.root_node.delete(&account_path, &self.fetcher, &self.hinter)?; self.storage_roots.remove(address); continue; } @@ -220,7 +238,13 @@ where .entry(*address) .or_insert_with(|| TrieNode::new_blinded(EMPTY_ROOT_HASH)); bundle_account.storage.iter().try_for_each(|(index, value)| { - Self::change_storage(acc_storage_root, *index, value.present_value, &self.fetcher) + Self::change_storage( + acc_storage_root, + *index, + value.present_value, + &self.fetcher, + &self.hinter, + ) })?; // Recompute the account storage root. @@ -257,6 +281,7 @@ where index: U256, value: U256, fetcher: &F, + hinter: &H, ) -> Result<()> { // RLP encode the storage slot value. let mut rlp_buf = Vec::with_capacity(value.length()); @@ -266,7 +291,7 @@ where let hashed_slot_key = Nibbles::unpack(keccak256(index.to_be_bytes::<32>().as_slice())); if value.is_zero() { // If the storage slot is being set to zero, prune it from the trie. - storage_root.delete(&hashed_slot_key, fetcher)?; + storage_root.delete(&hashed_slot_key, fetcher, hinter)?; } else { // Otherwise, update the storage slot. storage_root.insert(&hashed_slot_key, rlp_buf.into(), fetcher)?; @@ -276,9 +301,10 @@ where } } -impl Database for TrieDB +impl Database for TrieDB where F: TrieDBFetcher, + H: TrieDBHinter, { type Error = anyhow::Error; @@ -310,6 +336,9 @@ where } fn storage(&mut self, address: Address, index: U256) -> Result { + // Send a hint to the host to fetch the storage proof. + self.hinter.hint_storage_proof(address, index, self.parent_block_header.number)?; + // Fetch the account's storage root from the cache. If storage is being accessed, the // account should have been loaded into the cache by the `basic` method. If the account was // non-existing, the storage root will not be present. @@ -341,9 +370,8 @@ where // The block number is guaranteed to be within the range of a u64. let u64_block_number: u64 = block_number.to(); - // Fetch the block header from the preimage fetcher. - let mut block_hash = self.parent_block_hash; - let mut header = self.fetcher.header_by_hash(block_hash)?; + // Copy the current header + let mut header = self.parent_block_header.inner().clone(); // Check if the block number is in range. If not, we can fail early. if u64_block_number > header.number || @@ -354,10 +382,9 @@ where // Walk back the block headers to the desired block number. while header.number > u64_block_number { - block_hash = header.parent_hash; - header = self.fetcher.header_by_hash(block_hash)?; + header = self.fetcher.header_by_hash(header.parent_hash)?; } - Ok(block_hash) + Ok(header.hash_slow()) } } diff --git a/crates/mpt/src/fetcher.rs b/crates/mpt/src/fetcher.rs index 645b2c9b1..277cb584c 100644 --- a/crates/mpt/src/fetcher.rs +++ b/crates/mpt/src/fetcher.rs @@ -2,7 +2,7 @@ //! headers. use alloy_consensus::Header; -use alloy_primitives::{Bytes, B256}; +use alloy_primitives::{Address, Bytes, B256, U256}; use anyhow::Result; /// The [TrieDBFetcher] trait defines the synchronous interface for fetching trie node preimages and @@ -45,6 +45,43 @@ pub trait TrieDBFetcher { fn header_by_hash(&self, hash: B256) -> Result
; } +/// The [TrieDBHinter] trait defines the synchronous interface for hinting the host to fetch trie +/// node preimages. +pub trait TrieDBHinter { + /// Hints the host to fetch the trie node preimage by hash. + /// + /// ## Takes + /// - `hash`: The hash of the trie node to hint. + /// + /// ## Returns + /// - Ok(()): If the hint was successful. + fn hint_trie_node(&self, hash: B256) -> Result<()>; + + /// Hints the host to fetch the trie node preimages on the path to the given address. + /// + /// ## Takes + /// - `address` - The address of the contract whose trie node preimages are to be fetched. + /// - `block_number` - The block number at which the trie node preimages are to be fetched. + /// + /// ## Returns + /// - Ok(()): If the hint was successful. + /// - Err(anyhow::Error): If the hint was unsuccessful. + fn hint_account_proof(&self, address: Address, block_number: u64) -> Result<()>; + + /// Hints the host to fetch the trie node preimages on the path to the storage slot within the + /// given account's storage trie. + /// + /// ## Takes + /// - `address` - The address of the contract whose trie node preimages are to be fetched. + /// - `slot` - The storage slot whose trie node preimages are to be fetched. + /// - `block_number` - The block number at which the trie node preimages are to be fetched. + /// + /// ## Returns + /// - Ok(()): If the hint was successful. + /// - Err(anyhow::Error): If the hint was unsuccessful. + fn hint_storage_proof(&self, address: Address, slot: U256, block_number: u64) -> Result<()>; +} + /// The default, no-op implementation of the [TrieDBFetcher] trait, used for testing. #[derive(Debug, Clone, Copy)] pub struct NoopTrieDBFetcher; @@ -62,3 +99,21 @@ impl TrieDBFetcher for NoopTrieDBFetcher { Ok(Header::default()) } } + +/// The default, no-op implementation of the [TrieDBHinter] trait, used for testing. +#[derive(Debug, Clone, Copy)] +pub struct NoopTrieDBHinter; + +impl TrieDBHinter for NoopTrieDBHinter { + fn hint_trie_node(&self, _hash: B256) -> Result<()> { + Ok(()) + } + + fn hint_account_proof(&self, _address: Address, _block_number: u64) -> Result<()> { + Ok(()) + } + + fn hint_storage_proof(&self, _address: Address, _slot: U256, _block_number: u64) -> Result<()> { + Ok(()) + } +} diff --git a/crates/mpt/src/lib.rs b/crates/mpt/src/lib.rs index dddf2c1ab..b6b4f377e 100644 --- a/crates/mpt/src/lib.rs +++ b/crates/mpt/src/lib.rs @@ -10,7 +10,7 @@ mod db; pub use db::{TrieAccount, TrieDB}; mod fetcher; -pub use fetcher::{NoopTrieDBFetcher, TrieDBFetcher}; +pub use fetcher::{NoopTrieDBFetcher, NoopTrieDBHinter, TrieDBFetcher, TrieDBHinter}; mod node; pub use node::TrieNode; diff --git a/crates/mpt/src/node.rs b/crates/mpt/src/node.rs index 11b4ff894..4207bc1eb 100644 --- a/crates/mpt/src/node.rs +++ b/crates/mpt/src/node.rs @@ -3,7 +3,7 @@ use crate::{ util::{rlp_list_element_length, unpack_path_to_nibbles}, - TrieDBFetcher, + TrieDBFetcher, TrieDBHinter, }; use alloc::{boxed::Box, vec, vec::Vec}; use alloy_primitives::{keccak256, Bytes, B256}; @@ -329,16 +329,22 @@ impl TrieNode { /// ## Returns /// - `Err(_)` - Could not delete the node at the given path in the trie. /// - `Ok(())` - The node was successfully deleted at the given path. - pub fn delete(&mut self, path: &Nibbles, fetcher: &F) -> Result<()> { - self.delete_inner(path, 0, fetcher) + pub fn delete( + &mut self, + path: &Nibbles, + fetcher: &F, + hinter: &H, + ) -> Result<()> { + self.delete_inner(path, 0, fetcher, hinter) } /// Inner alias for `delete` that keeps track of the nibble offset. - fn delete_inner( + fn delete_inner( &mut self, path: &Nibbles, nibble_offset: usize, fetcher: &F, + hinter: &H, ) -> Result<()> { let remaining_nibbles = path.slice(nibble_offset..); match self { @@ -362,23 +368,23 @@ impl TrieNode { return Ok(()); } - node.delete_inner(path, nibble_offset + prefix.len(), fetcher)?; + node.delete_inner(path, nibble_offset + prefix.len(), fetcher, hinter)?; // Simplify extension if possible after the deletion - self.collapse_if_possible(fetcher) + self.collapse_if_possible(fetcher, hinter) } TrieNode::Branch { stack } => { let branch_nibble = remaining_nibbles[0] as usize; let nibble_offset = nibble_offset + BRANCH_NODE_NIBBLES; - stack[branch_nibble].delete_inner(path, nibble_offset, fetcher)?; + stack[branch_nibble].delete_inner(path, nibble_offset, fetcher, hinter)?; // Simplify the branch if possible after the deletion - self.collapse_if_possible(fetcher) + self.collapse_if_possible(fetcher, hinter) } TrieNode::Blinded { .. } => { self.unblind(fetcher)?; - self.delete_inner(path, nibble_offset, fetcher) + self.delete_inner(path, nibble_offset, fetcher, hinter) } } } @@ -391,7 +397,11 @@ impl TrieNode { /// ## Returns /// - `Ok(())` - The node was successfully collapsed /// - `Err(_)` - Could not collapse the node - fn collapse_if_possible(&mut self, fetcher: &F) -> Result<()> { + fn collapse_if_possible( + &mut self, + fetcher: &F, + hinter: &H, + ) -> Result<()> { match self { TrieNode::Extension { prefix, node } => match node.as_mut() { TrieNode::Extension { prefix: child_prefix, node: child_node } => { @@ -415,7 +425,7 @@ impl TrieNode { } TrieNode::Blinded { .. } => { node.unblind(fetcher)?; - self.collapse_if_possible(fetcher)?; + self.collapse_if_possible(fetcher, hinter)?; } _ => {} }, @@ -444,9 +454,14 @@ impl TrieNode { ); *self = TrieNode::Extension { prefix: new_prefix, node: node.clone() }; } - TrieNode::Blinded { .. } => { + TrieNode::Blinded { commitment } => { + // In this special case, we need to send a hint to fetch the preimage of + // the blinded node, since it is outside of the paths that have been + // traversed so far. + hinter.hint_trie_node(*commitment)?; + non_empty_node.unblind(fetcher)?; - self.collapse_if_possible(fetcher)?; + self.collapse_if_possible(fetcher, hinter)?; } _ => {} };