diff --git a/Cargo.lock b/Cargo.lock index 85f9d1cbde..b4d7e31614 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3199,6 +3199,7 @@ dependencies = [ "light-heap", "light-indexed-merkle-tree", "light-macros", + "light-merkle-tree-metadata", "light-utils", "light-verifier", "light-zero-copy", diff --git a/examples/token-escrow/programs/token-escrow/src/escrow_with_compressed_pda/escrow.rs b/examples/token-escrow/programs/token-escrow/src/escrow_with_compressed_pda/escrow.rs index 5e6e46b06d..a16a312be2 100644 --- a/examples/token-escrow/programs/token-escrow/src/escrow_with_compressed_pda/escrow.rs +++ b/examples/token-escrow/programs/token-escrow/src/escrow_with_compressed_pda/escrow.rs @@ -12,7 +12,7 @@ use light_sdk::{ legacy::create_cpi_inputs_for_new_account, light_system_accounts, verify::verify, LightTraits, }; use light_system_program::{ - invoke::processor::CompressedProof, + processor::processor::CompressedProof, sdk::{ address::derive_address_legacy, compressed_account::{CompressedAccount, CompressedAccountData, PackedMerkleContext}, diff --git a/examples/token-escrow/programs/token-escrow/src/escrow_with_compressed_pda/sdk.rs b/examples/token-escrow/programs/token-escrow/src/escrow_with_compressed_pda/sdk.rs index 54f5c78b6c..49975d5cb9 100644 --- a/examples/token-escrow/programs/token-escrow/src/escrow_with_compressed_pda/sdk.rs +++ b/examples/token-escrow/programs/token-escrow/src/escrow_with_compressed_pda/sdk.rs @@ -7,7 +7,7 @@ use light_compressed_token::process_transfer::{ TokenTransferOutputData, }; use light_system_program::{ - invoke::processor::CompressedProof, + processor::processor::CompressedProof, sdk::{ address::{add_and_get_remaining_account_indices, pack_new_address_params}, compressed_account::{pack_merkle_context, CompressedAccount, MerkleContext}, diff --git a/examples/token-escrow/programs/token-escrow/src/escrow_with_compressed_pda/withdrawal.rs b/examples/token-escrow/programs/token-escrow/src/escrow_with_compressed_pda/withdrawal.rs index d4b4e5e8b2..86b67a7966 100644 --- a/examples/token-escrow/programs/token-escrow/src/escrow_with_compressed_pda/withdrawal.rs +++ b/examples/token-escrow/programs/token-escrow/src/escrow_with_compressed_pda/withdrawal.rs @@ -7,7 +7,7 @@ use light_compressed_token::process_transfer::{ use light_hasher::{DataHasher, Poseidon}; use light_sdk::verify::verify; use light_system_program::{ - invoke::processor::CompressedProof, + processor::processor::CompressedProof, sdk::{ compressed_account::{ CompressedAccount, CompressedAccountData, PackedCompressedAccountWithMerkleContext, diff --git a/examples/token-escrow/programs/token-escrow/src/escrow_with_pda/escrow.rs b/examples/token-escrow/programs/token-escrow/src/escrow_with_pda/escrow.rs index eda63e39cc..732327f240 100644 --- a/examples/token-escrow/programs/token-escrow/src/escrow_with_pda/escrow.rs +++ b/examples/token-escrow/programs/token-escrow/src/escrow_with_pda/escrow.rs @@ -7,7 +7,7 @@ use light_compressed_token::{ program::LightCompressedToken, }; use light_sdk::{light_system_accounts, LightTraits}; -use light_system_program::invoke::processor::CompressedProof; +use light_system_program::processor::processor::CompressedProof; use crate::create_change_output_compressed_token_account; diff --git a/examples/token-escrow/programs/token-escrow/src/escrow_with_pda/sdk.rs b/examples/token-escrow/programs/token-escrow/src/escrow_with_pda/sdk.rs index 695214e496..7ae31ce432 100644 --- a/examples/token-escrow/programs/token-escrow/src/escrow_with_pda/sdk.rs +++ b/examples/token-escrow/programs/token-escrow/src/escrow_with_pda/sdk.rs @@ -10,7 +10,7 @@ use light_compressed_token::process_transfer::{ TokenTransferOutputData, }; use light_system_program::{ - invoke::processor::CompressedProof, + processor::processor::CompressedProof, sdk::{ address::add_and_get_remaining_account_indices, compressed_account::{CompressedAccount, MerkleContext}, diff --git a/examples/token-escrow/programs/token-escrow/src/escrow_with_pda/withdrawal.rs b/examples/token-escrow/programs/token-escrow/src/escrow_with_pda/withdrawal.rs index a2b4f260fc..39a30cb8d7 100644 --- a/examples/token-escrow/programs/token-escrow/src/escrow_with_pda/withdrawal.rs +++ b/examples/token-escrow/programs/token-escrow/src/escrow_with_pda/withdrawal.rs @@ -3,7 +3,7 @@ use light_compressed_token::process_transfer::{ CompressedTokenInstructionDataTransfer, InputTokenDataWithContext, PackedTokenTransferOutputData, }; -use light_system_program::invoke::processor::CompressedProof; +use light_system_program::processor::processor::CompressedProof; use crate::{ create_change_output_compressed_token_account, EscrowCompressedTokensWithPda, EscrowError, diff --git a/examples/token-escrow/programs/token-escrow/src/lib.rs b/examples/token-escrow/programs/token-escrow/src/lib.rs index 05e7f169b7..104191d9c7 100644 --- a/examples/token-escrow/programs/token-escrow/src/lib.rs +++ b/examples/token-escrow/programs/token-escrow/src/lib.rs @@ -3,7 +3,7 @@ use anchor_lang::{prelude::*, solana_program::pubkey::Pubkey}; use light_compressed_token::process_transfer::{ InputTokenDataWithContext, PackedTokenTransferOutputData, }; -use light_system_program::invoke::processor::CompressedProof; +use light_system_program::processor::processor::CompressedProof; pub mod escrow_with_compressed_pda; pub mod escrow_with_pda; diff --git a/program-libs/batched-merkle-tree/src/batch.rs b/program-libs/batched-merkle-tree/src/batch.rs index 957fd01580..310bceb1c4 100644 --- a/program-libs/batched-merkle-tree/src/batch.rs +++ b/program-libs/batched-merkle-tree/src/batch.rs @@ -895,7 +895,7 @@ mod tests { // to modify private Batch variables for assertions. #[test] fn test_get_num_inserted() { - let mut account_data = vec![0u8; 920]; + let mut account_data = vec![0u8; 984]; let mut queue_metadata = QueueMetadata::default(); let associated_merkle_tree = Pubkey::new_unique(); queue_metadata.associated_merkle_tree = associated_merkle_tree; @@ -911,6 +911,7 @@ mod tests { zkp_batch_size, num_iters, bloom_filter_capacity, + Pubkey::new_unique(), ) .unwrap(); // Tree height 4 -> capacity 16 diff --git a/program-libs/batched-merkle-tree/src/batch_metadata.rs b/program-libs/batched-merkle-tree/src/batch_metadata.rs index 4332c38f5d..a146b69f67 100644 --- a/program-libs/batched-merkle-tree/src/batch_metadata.rs +++ b/program-libs/batched-merkle-tree/src/batch_metadata.rs @@ -279,7 +279,7 @@ fn test_batch_size_validation() { #[test] fn test_output_queue_account_size() { let metadata = BatchMetadata::new_output_queue(10, 2).unwrap(); - let queue_size = 472 + (16 + 10 * 32) * 2 + (16 + 5 * 32) * 2; + let queue_size = 472 + (16 + 10 * 32) * 2 + (16 + 5 * 32) * 2 + 64; assert_eq!( metadata .queue_account_size(QueueType::BatchedOutput as u64) diff --git a/program-libs/batched-merkle-tree/src/initialize_address_tree.rs b/program-libs/batched-merkle-tree/src/initialize_address_tree.rs index d07a7e3eb1..48dc186ed4 100644 --- a/program-libs/batched-merkle-tree/src/initialize_address_tree.rs +++ b/program-libs/batched-merkle-tree/src/initialize_address_tree.rs @@ -114,7 +114,13 @@ pub fn init_batched_address_merkle_tree_from_account_info( let mt_data = &mut mt_account_info .try_borrow_mut_data() .map_err(|_| UtilsError::BorrowAccountDataFailed)?; - init_batched_address_merkle_tree_account(owner, params, mt_data, merkle_tree_rent)?; + init_batched_address_merkle_tree_account( + owner, + params, + mt_data, + merkle_tree_rent, + (*mt_account_info.key).into(), + )?; Ok(()) } @@ -123,6 +129,7 @@ pub fn init_batched_address_merkle_tree_account( params: InitAddressTreeAccountsInstructionData, mt_account_data: &mut [u8], merkle_tree_rent: u64, + pubkey: Pubkey, ) -> Result, BatchedMerkleTreeError> { let height = params.height; @@ -151,6 +158,7 @@ pub fn init_batched_address_merkle_tree_account( }; BatchedMerkleTreeAccount::init( mt_account_data, + &pubkey, metadata, params.root_history_capacity, params.input_queue_batch_size, diff --git a/program-libs/batched-merkle-tree/src/initialize_state_tree.rs b/program-libs/batched-merkle-tree/src/initialize_state_tree.rs index c9e851187e..1b52d76d47 100644 --- a/program-libs/batched-merkle-tree/src/initialize_state_tree.rs +++ b/program-libs/batched-merkle-tree/src/initialize_state_tree.rs @@ -6,7 +6,8 @@ use light_merkle_tree_metadata::{ rollover::{check_rollover_fee_sufficient, RolloverMetadata}, }; use light_utils::{ - account::check_account_balance_is_rent_exempt, fee::compute_rollover_fee, pubkey::Pubkey, + account::check_account_balance_is_rent_exempt, fee::compute_rollover_fee, + hashv_to_bn254_field_size_be, pubkey::Pubkey, }; use solana_program::{account_info::AccountInfo, msg}; @@ -204,6 +205,7 @@ pub fn init_batched_state_merkle_tree_accounts<'a>( // Output queues have no bloom filter. 0, 0, + output_queue_pubkey, )?; } let metadata = MerkleTreeMetadata { @@ -228,6 +230,7 @@ pub fn init_batched_state_merkle_tree_accounts<'a>( // to prove inclusion of this state for which we need a root from the tree account. BatchedMerkleTreeAccount::init( mt_account_data, + &mt_pubkey, metadata, params.root_history_capacity, params.input_queue_batch_size, @@ -379,6 +382,7 @@ pub struct CreateOutputQueueParams { pub additional_bytes: u64, pub rent: u64, pub associated_merkle_tree: Pubkey, + pub queue_pubkey: Pubkey, pub height: u32, pub network_fee: u64, } @@ -389,6 +393,7 @@ impl CreateOutputQueueParams { owner: Pubkey, rent: u64, associated_merkle_tree: Pubkey, + queue_pubkey: Pubkey, ) -> Self { Self { owner, @@ -403,6 +408,7 @@ impl CreateOutputQueueParams { associated_merkle_tree, height: params.height, network_fee: params.network_fee.unwrap_or_default(), + queue_pubkey, } } } @@ -439,5 +445,9 @@ pub fn create_output_queue_account(params: CreateOutputQueueParams) -> BatchedQu metadata, batch_metadata, tree_capacity: 2u64.pow(params.height), + hashed_merkle_tree_pubkey: hashv_to_bn254_field_size_be(&[¶ms + .associated_merkle_tree + .to_bytes()]), + hashed_queue_pubkey: hashv_to_bn254_field_size_be(&[¶ms.queue_pubkey.to_bytes()]), } } diff --git a/program-libs/batched-merkle-tree/src/merkle_tree.rs b/program-libs/batched-merkle-tree/src/merkle_tree.rs index b056fc90f1..6c4101c5e1 100644 --- a/program-libs/batched-merkle-tree/src/merkle_tree.rs +++ b/program-libs/batched-merkle-tree/src/merkle_tree.rs @@ -9,6 +9,7 @@ use light_merkle_tree_metadata::{ use light_utils::{ account::{check_account_info, set_discriminator, DISCRIMINATOR_LEN}, hashchain::create_hash_chain_from_array, + hashv_to_bn254_field_size_be, pubkey::Pubkey, }; use light_verifier::{ @@ -213,6 +214,7 @@ impl<'a> BatchedMerkleTreeAccount<'a> { #[allow(clippy::too_many_arguments)] pub fn init( account_data: &'a mut [u8], + pubkey: &Pubkey, metadata: MerkleTreeMetadata, root_history_capacity: u32, input_queue_batch_size: u64, @@ -229,6 +231,7 @@ impl<'a> BatchedMerkleTreeAccount<'a> { let (mut account_metadata, account_data) = Ref::<&'a mut [u8], BatchedMerkleTreeMetadata>::from_prefix(account_data) .map_err(ZeroCopyError::from)?; + account_metadata.hashed_pubkey = hashv_to_bn254_field_size_be(&[&pubkey.to_bytes()]); account_metadata.metadata = metadata; account_metadata.root_history_capacity = root_history_capacity; account_metadata.height = height; @@ -900,6 +903,7 @@ pub fn get_merkle_tree_account_size( ..Default::default() }, capacity: 2u64.pow(height), + ..Default::default() }; mt_account.get_account_size().unwrap() } @@ -1006,15 +1010,17 @@ mod test { /// 9. Batch 1 is inserted and Batch 0 is full and overlapping roots exist #[test] fn test_zero_out() { - let mut account_data = vec![0u8; 3128]; + let mut account_data = vec![0u8; 3160]; let batch_size = 4; let zkp_batch_size = 1; let num_zkp_updates = batch_size / zkp_batch_size; let root_history_len = 10; let num_iter = 1; let bloom_filter_capacity = 8000; + let pubkey = Pubkey::new_unique(); BatchedMerkleTreeAccount::init( &mut account_data, + &pubkey, MerkleTreeMetadata::default(), root_history_len, batch_size, @@ -1377,7 +1383,7 @@ mod test { #[test] fn test_tree_is_full() { - let mut account_data = vec![0u8; 15672]; + let mut account_data = vec![0u8; 15704]; let batch_size = 200; let zkp_batch_size = 1; let root_history_len = 10; @@ -1387,6 +1393,7 @@ mod test { let tree_capacity = 2u64.pow(height); let account = BatchedMerkleTreeAccount::init( &mut account_data, + &Pubkey::new_unique(), MerkleTreeMetadata::default(), root_history_len, batch_size, @@ -1415,7 +1422,7 @@ mod test { #[test] fn test_check_non_inclusion() { - let mut account_data = vec![0u8; 3192]; + let mut account_data = vec![0u8; 3224]; let batch_size = 5; let zkp_batch_size = 1; let root_history_len = 10; @@ -1424,6 +1431,7 @@ mod test { let height = 4; let mut account = BatchedMerkleTreeAccount::init( &mut account_data, + &Pubkey::new_unique(), MerkleTreeMetadata::default(), root_history_len, batch_size, diff --git a/program-libs/batched-merkle-tree/src/merkle_tree_metadata.rs b/program-libs/batched-merkle-tree/src/merkle_tree_metadata.rs index 6ee089abe0..a90a7d8a5a 100644 --- a/program-libs/batched-merkle-tree/src/merkle_tree_metadata.rs +++ b/program-libs/batched-merkle-tree/src/merkle_tree_metadata.rs @@ -5,7 +5,7 @@ use light_merkle_tree_metadata::{ queue::QueueType, rollover::RolloverMetadata, }; -use light_utils::{fee::compute_rollover_fee, pubkey::Pubkey}; +use light_utils::{fee::compute_rollover_fee, hashv_to_bn254_field_size_be, pubkey::Pubkey}; use light_zero_copy::cyclic_vec::ZeroCopyCyclicVecU64; use zerocopy::{FromBytes, Immutable, IntoBytes, KnownLayout}; @@ -41,6 +41,7 @@ pub struct BatchedMerkleTreeMetadata { pub root_history_capacity: u32, pub capacity: u64, pub queue_metadata: BatchMetadata, + pub hashed_pubkey: [u8; 32], } impl Default for BatchedMerkleTreeMetadata { @@ -61,6 +62,7 @@ impl Default for BatchedMerkleTreeMetadata { zkp_batch_size: 10, ..Default::default() }, + hashed_pubkey: [0u8; 32], } } } @@ -120,6 +122,7 @@ impl BatchedMerkleTreeMetadata { root_history_capacity, height, num_iters, + tree_pubkey, } = params; Self { metadata: MerkleTreeMetadata { @@ -153,6 +156,7 @@ impl BatchedMerkleTreeMetadata { ) .unwrap(), capacity: 2u64.pow(height), + hashed_pubkey: hashv_to_bn254_field_size_be(&[&tree_pubkey.to_bytes()]), } } } @@ -171,9 +175,14 @@ pub struct CreateTreeParams { pub root_history_capacity: u32, pub height: u32, pub num_iters: u64, + pub tree_pubkey: Pubkey, } impl CreateTreeParams { - pub fn from_state_ix_params(data: InitStateTreeAccountsInstructionData, owner: Pubkey) -> Self { + pub fn from_state_ix_params( + data: InitStateTreeAccountsInstructionData, + owner: Pubkey, + tree_pubkey: Pubkey, + ) -> Self { CreateTreeParams { owner, program_owner: data.program_owner, @@ -187,12 +196,14 @@ impl CreateTreeParams { root_history_capacity: data.root_history_capacity, height: data.height, num_iters: data.bloom_filter_num_iters, + tree_pubkey, } } pub fn from_address_ix_params( data: InitAddressTreeAccountsInstructionData, owner: Pubkey, + tree_pubkey: Pubkey, ) -> Self { CreateTreeParams { owner, @@ -207,6 +218,7 @@ impl CreateTreeParams { root_history_capacity: data.root_history_capacity, height: data.height, num_iters: data.bloom_filter_num_iters, + tree_pubkey, } } } diff --git a/program-libs/batched-merkle-tree/src/queue.rs b/program-libs/batched-merkle-tree/src/queue.rs index 9a04c24953..5cdaea4289 100644 --- a/program-libs/batched-merkle-tree/src/queue.rs +++ b/program-libs/batched-merkle-tree/src/queue.rs @@ -8,6 +8,7 @@ use light_merkle_tree_metadata::{ }; use light_utils::{ account::{check_account_info, set_discriminator, DISCRIMINATOR_LEN}, + hashv_to_bn254_field_size_be, pubkey::Pubkey, }; use light_zero_copy::{errors::ZeroCopyError, vec::ZeroCopyVecU64}; @@ -47,6 +48,8 @@ pub struct BatchedQueueMetadata { /// Maximum number of leaves that can fit in the tree, calculated as 2^height. /// For example, a tree with height 3 can hold up to 8 leaves. pub tree_capacity: u64, + pub hashed_merkle_tree_pubkey: [u8; 32], + pub hashed_queue_pubkey: [u8; 32], } impl BatchedQueueMetadata { @@ -57,6 +60,7 @@ impl BatchedQueueMetadata { zkp_batch_size: u64, bloom_filter_capacity: u64, num_iters: u64, + queue_pubkey: &Pubkey, ) -> Result<(), BatchedMerkleTreeError> { self.metadata = meta_data; self.batch_metadata.init(batch_size, zkp_batch_size)?; @@ -70,6 +74,9 @@ impl BatchedQueueMetadata { batch_size * (i as u64), ); } + self.hashed_merkle_tree_pubkey = + hashv_to_bn254_field_size_be(&[&meta_data.associated_merkle_tree.to_bytes()]); + self.hashed_queue_pubkey = hashv_to_bn254_field_size_be(&[&queue_pubkey.to_bytes()]); Ok(()) } } @@ -118,6 +125,7 @@ impl BatchedQueueMetadata { /// - `prove_inclusion_by_index` #[derive(Debug, PartialEq)] pub struct BatchedQueueAccount<'a> { + pubkey: Pubkey, metadata: Ref<&'a mut [u8], BatchedQueueMetadata>, pub value_vecs: [ZeroCopyVecU64<'a, [u8; 32]>; 2], pub hash_chain_stores: [ZeroCopyVecU64<'a, [u8; 32]>; 2], @@ -153,7 +161,7 @@ impl<'a> BatchedQueueAccount<'a> { let account_data: &'a mut [u8] = unsafe { std::slice::from_raw_parts_mut(account_data.as_mut_ptr(), account_data.len()) }; - Self::from_bytes::(account_data) + Self::from_bytes::(account_data, (*account_info.key).into()) } /// Deserialize a BatchedQueueAccount from bytes. @@ -166,11 +174,12 @@ impl<'a> BatchedQueueAccount<'a> { light_utils::account::check_discriminator::( &account_data[..DISCRIMINATOR_LEN], )?; - Self::from_bytes::(account_data) + Self::from_bytes::(account_data, Pubkey::default()) } fn from_bytes( account_data: &'a mut [u8], + pubkey: Pubkey, ) -> Result, BatchedMerkleTreeError> { let (_discriminator, account_data) = account_data.split_at_mut(DISCRIMINATOR_LEN); let (metadata, account_data) = @@ -188,6 +197,7 @@ impl<'a> BatchedQueueAccount<'a> { let (hashchain_store2, _account_data) = ZeroCopyVecU64::from_bytes_at(account_data)?; Ok(BatchedQueueAccount { + pubkey, metadata, value_vecs: [value_vec1, value_vec2], hash_chain_stores: [hashchain_store1, hashchain_store2], @@ -201,6 +211,7 @@ impl<'a> BatchedQueueAccount<'a> { output_queue_zkp_batch_size: u64, num_iters: u64, bloom_filter_capacity: u64, + pubkey: Pubkey, ) -> Result, BatchedMerkleTreeError> { let account_data_len = account_data.len(); let (discriminator, account_data) = account_data.split_at_mut(DISCRIMINATOR_LEN); @@ -216,6 +227,7 @@ impl<'a> BatchedQueueAccount<'a> { output_queue_zkp_batch_size, bloom_filter_capacity, num_iters, + &pubkey, )?; if account_data_len @@ -242,6 +254,7 @@ impl<'a> BatchedQueueAccount<'a> { let (vec_1, account_data) = ZeroCopyVecU64::new_at(hash_chain_capacity, account_data)?; let (vec_2, _) = ZeroCopyVecU64::new_at(hash_chain_capacity, account_data)?; Ok(BatchedQueueAccount { + pubkey, metadata: account_metadata, value_vecs: [value_vecs_1, value_vecs_2], hash_chain_stores: [vec_1, vec_2], @@ -376,6 +389,10 @@ impl<'a> BatchedQueueAccount<'a> { } Ok(()) } + + pub fn pubkey(&self) -> &Pubkey { + &self.pubkey + } } impl Deref for BatchedQueueAccount<'_> { @@ -585,7 +602,7 @@ pub fn assert_queue_zero_copy_inited(account_data: &mut [u8], ref_account: Batch #[test] fn test_from_bytes_invalid_tree_type() { let mut account_data = vec![0u8; get_output_queue_account_size_default()]; - let account = BatchedQueueAccount::from_bytes::<6>(&mut account_data); + let account = BatchedQueueAccount::from_bytes::<6>(&mut account_data, Pubkey::default()); assert_eq!( account.unwrap_err(), MerkleTreeMetadataError::InvalidQueueType.into() @@ -595,11 +612,14 @@ fn test_from_bytes_invalid_tree_type() { #[test] fn test_batched_queue_metadata_init() { let mut metadata = BatchedQueueMetadata::default(); - let queue_metadata = QueueMetadata::default(); + let mt_pubkey = Pubkey::new_unique(); + let mut queue_metadata = QueueMetadata::default(); + queue_metadata.associated_merkle_tree = mt_pubkey; let batch_size = 4; let zkp_batch_size = 2; let bloom_filter_capacity = 10; let num_iters = 5; + let queue_pubkey = Pubkey::new_unique(); let result = metadata.init( queue_metadata, @@ -607,6 +627,7 @@ fn test_batched_queue_metadata_init() { zkp_batch_size, bloom_filter_capacity, num_iters, + &queue_pubkey, ); assert!(result.is_ok()); @@ -622,11 +643,18 @@ fn test_batched_queue_metadata_init() { assert_eq!(batch.zkp_batch_size, zkp_batch_size); assert_eq!(batch.start_index, batch_size * (i as u64)); } + let hashed_merkle_tree_pubkey = hashv_to_bn254_field_size_be(&[&mt_pubkey.to_bytes()]); + let hashed_queue_pubkey = hashv_to_bn254_field_size_be(&[&queue_pubkey.to_bytes()]); + assert_eq!( + metadata.hashed_merkle_tree_pubkey, + hashed_merkle_tree_pubkey + ); + assert_eq!(metadata.hashed_queue_pubkey, hashed_queue_pubkey); } #[test] fn test_check_is_associated() { - let mut account_data = vec![0u8; 920]; + let mut account_data = vec![0u8; 984]; let mut queue_metadata = QueueMetadata::default(); let associated_merkle_tree = Pubkey::new_unique(); queue_metadata.associated_merkle_tree = associated_merkle_tree; @@ -642,6 +670,7 @@ fn test_check_is_associated() { zkp_batch_size, num_iters, bloom_filter_capacity, + Pubkey::new_unique(), ) .unwrap(); // 1. Functional diff --git a/program-libs/batched-merkle-tree/src/rollover_address_tree.rs b/program-libs/batched-merkle-tree/src/rollover_address_tree.rs index 37433d5225..7972adaeb8 100644 --- a/program-libs/batched-merkle-tree/src/rollover_address_tree.rs +++ b/program-libs/batched-merkle-tree/src/rollover_address_tree.rs @@ -63,7 +63,7 @@ pub fn rollover_batched_address_tree<'a>( // 3. Initialize the new address merkle tree. let params = create_batched_address_tree_init_params(old_merkle_tree, network_fee); let owner = old_merkle_tree.metadata.access_metadata.owner; - init_batched_address_merkle_tree_account(owner, params, new_mt_data, new_mt_rent) + init_batched_address_merkle_tree_account(owner, params, new_mt_data, new_mt_rent, new_mt_pubkey) } fn create_batched_address_tree_init_params( diff --git a/program-libs/batched-merkle-tree/tests/initialize_address_tree.rs b/program-libs/batched-merkle-tree/tests/initialize_address_tree.rs index 16aa6a472e..88b7ef61bd 100644 --- a/program-libs/batched-merkle-tree/tests/initialize_address_tree.rs +++ b/program-libs/batched-merkle-tree/tests/initialize_address_tree.rs @@ -14,16 +14,23 @@ use rand::{rngs::StdRng, Rng}; #[test] fn test_account_init() { let owner = Pubkey::new_unique(); + let tree_pubkey = Pubkey::new_unique(); let mt_account_size = get_merkle_tree_account_size_default(); let mut mt_account_data = vec![0; mt_account_size]; let merkle_tree_rent = 1_000_000_000; let params = InitAddressTreeAccountsInstructionData::test_default(); - let mt_params = CreateTreeParams::from_address_ix_params(params, owner); + let mt_params = CreateTreeParams::from_address_ix_params(params, owner, tree_pubkey); let ref_mt_account = BatchedMerkleTreeMetadata::new_address_tree(mt_params, merkle_tree_rent); - init_batched_address_merkle_tree_account(owner, params, &mut mt_account_data, merkle_tree_rent) - .unwrap(); + init_batched_address_merkle_tree_account( + owner, + params, + &mut mt_account_data, + merkle_tree_rent, + tree_pubkey, + ) + .unwrap(); assert_address_mt_zero_copy_inited(&mut mt_account_data, ref_mt_account); } @@ -35,6 +42,7 @@ fn test_rnd_account_init() { for _ in 0..10000 { println!("next iter ------------------------------------"); let owner = Pubkey::new_unique(); + let tree_pubkey = Pubkey::new_unique(); let program_owner = if rng.gen_bool(0.5) { Some(Pubkey::new_unique()) @@ -98,9 +106,10 @@ fn test_rnd_account_init() { params, &mut mt_account_data, merkle_tree_rent, + tree_pubkey, ) .unwrap(); - let mt_params = CreateTreeParams::from_address_ix_params(params, owner); + let mt_params = CreateTreeParams::from_address_ix_params(params, owner, tree_pubkey); let ref_mt_account = BatchedMerkleTreeMetadata::new_address_tree(mt_params, merkle_tree_rent); assert_address_mt_zero_copy_inited(&mut mt_account_data, ref_mt_account); diff --git a/program-libs/batched-merkle-tree/tests/initialize_state_tree.rs b/program-libs/batched-merkle-tree/tests/initialize_state_tree.rs index 6660ddab02..a5f22d35cb 100644 --- a/program-libs/batched-merkle-tree/tests/initialize_state_tree.rs +++ b/program-libs/batched-merkle-tree/tests/initialize_state_tree.rs @@ -40,6 +40,7 @@ fn test_different_parameters() { ); let mut mt_account_data = vec![0; mt_account_size]; let mt_pubkey = Pubkey::new_unique(); + let queue_pubkey = Pubkey::new_unique(); let merkle_tree_rent = 1_000_000_000; let queue_rent = 1_000_000_000; @@ -61,13 +62,14 @@ fn test_different_parameters() { owner, merkle_tree_rent + queue_rent + additional_bytes_rent, mt_pubkey, + queue_pubkey, ); let ref_output_queue_account = create_output_queue_account(queue_account_params); assert_queue_zero_copy_inited( output_queue_account_data.as_mut_slice(), ref_output_queue_account, ); - let mt_params = CreateTreeParams::from_state_ix_params(params, owner); + let mt_params = CreateTreeParams::from_state_ix_params(params, owner, mt_pubkey); let ref_mt_account = BatchedMerkleTreeMetadata::new_state_tree(mt_params, output_queue_pubkey); assert_state_mt_zero_copy_inited(&mut mt_account_data, ref_mt_account); @@ -109,13 +111,14 @@ fn test_account_init() { owner, merkle_tree_rent + queue_rent + additional_bytes_rent, mt_pubkey, + output_queue_pubkey, ); let ref_output_queue_account = create_output_queue_account(queue_account_params); assert_queue_zero_copy_inited( output_queue_account_data.as_mut_slice(), ref_output_queue_account, ); - let mt_params = CreateTreeParams::from_state_ix_params(params, owner); + let mt_params = CreateTreeParams::from_state_ix_params(params, owner, mt_pubkey); let ref_mt_account = BatchedMerkleTreeMetadata::new_state_tree(mt_params, output_queue_pubkey); assert_state_mt_zero_copy_inited(&mut mt_account_data, ref_mt_account); } @@ -239,13 +242,14 @@ fn test_rnd_account_init() { owner, merkle_tree_rent + queue_rent + additional_bytes_rent, mt_pubkey, + output_queue_pubkey, ); let ref_output_queue_account = create_output_queue_account(queue_account_params); assert_queue_zero_copy_inited( output_queue_account_data.as_mut_slice(), ref_output_queue_account, ); - let mt_params = CreateTreeParams::from_state_ix_params(params, owner); + let mt_params = CreateTreeParams::from_state_ix_params(params, owner, mt_pubkey); let ref_mt_account = BatchedMerkleTreeMetadata::new_state_tree(mt_params, output_queue_pubkey); diff --git a/program-libs/batched-merkle-tree/tests/merkle_tree.rs b/program-libs/batched-merkle-tree/tests/merkle_tree.rs index abd031196a..245e13f4e8 100644 --- a/program-libs/batched-merkle-tree/tests/merkle_tree.rs +++ b/program-libs/batched-merkle-tree/tests/merkle_tree.rs @@ -1844,6 +1844,7 @@ async fn test_fill_address_tree_completely() { params, &mut mt_account_data, merkle_tree_rent, + mt_pubkey, ) .unwrap(); use rand::SeedableRng; diff --git a/program-libs/batched-merkle-tree/tests/queue.rs b/program-libs/batched-merkle-tree/tests/queue.rs index 4b24ebdced..1ebe5d8ddb 100644 --- a/program-libs/batched-merkle-tree/tests/queue.rs +++ b/program-libs/batched-merkle-tree/tests/queue.rs @@ -58,6 +58,7 @@ fn test_output_queue_account() { // 1 batch in progress, 1 batch ready to be processed let bloom_filter_capacity = 0; let bloom_filter_num_iters = 0; + let queue_pubkey = Pubkey::new_unique(); { let queue_type = QueueType::BatchedOutput; let (ref_account, mut account_data) = @@ -69,6 +70,7 @@ fn test_output_queue_account() { 10, bloom_filter_num_iters, bloom_filter_capacity, + queue_pubkey, ) .unwrap(); @@ -86,8 +88,17 @@ fn test_output_queue_account() { fn test_value_exists_in_value_vec_present() { let (account, mut account_data) = get_test_account_and_account_data(100, QueueType::BatchedOutput, 0); - let mut account = - BatchedQueueAccount::init(&mut account_data, account.metadata, 100, 10, 0, 0).unwrap(); + let queue_pubkey = Pubkey::new_unique(); + let mut account = BatchedQueueAccount::init( + &mut account_data, + account.metadata, + 100, + 10, + 0, + 0, + queue_pubkey, + ) + .unwrap(); let value = [1u8; 32]; let value2 = [2u8; 32]; diff --git a/program-libs/batched-merkle-tree/tests/rollover_address_tree.rs b/program-libs/batched-merkle-tree/tests/rollover_address_tree.rs index 4eae50f34f..fcad7f9e6d 100644 --- a/program-libs/batched-merkle-tree/tests/rollover_address_tree.rs +++ b/program-libs/batched-merkle-tree/tests/rollover_address_tree.rs @@ -30,6 +30,7 @@ use rand::thread_rng; #[test] fn test_rollover() { let owner = Pubkey::new_unique(); + let mt_pubkey = Pubkey::new_unique(); let mt_account_size = get_merkle_tree_account_size_default(); let mut mt_account_data = vec![0; mt_account_size]; @@ -38,10 +39,16 @@ fn test_rollover() { let merkle_tree_rent = 1_000_000_000; // create first merkle tree - init_batched_address_merkle_tree_account(owner, params, &mut mt_account_data, merkle_tree_rent) - .unwrap(); + init_batched_address_merkle_tree_account( + owner, + params, + &mut mt_account_data, + merkle_tree_rent, + mt_pubkey, + ) + .unwrap(); - let create_tree_params = CreateTreeParams::from_address_ix_params(params, owner); + let create_tree_params = CreateTreeParams::from_address_ix_params(params, owner, mt_pubkey); let ref_mt_account = BatchedMerkleTreeMetadata::new_address_tree(create_tree_params, merkle_tree_rent); @@ -199,7 +206,7 @@ fn test_rnd_rollover() { assert_eq!(mt_account_size, ref_account_size); } let mut mt_account_data = vec![0; mt_account_size]; - + let mt_pubkey = Pubkey::new_unique(); let merkle_tree_rent = rng.gen_range(0..10000000); init_batched_address_merkle_tree_account( @@ -207,9 +214,10 @@ fn test_rnd_rollover() { params, &mut mt_account_data, merkle_tree_rent, + mt_pubkey, ) .unwrap(); - let create_tree_params = CreateTreeParams::from_address_ix_params(params, owner); + let create_tree_params = CreateTreeParams::from_address_ix_params(params, owner, mt_pubkey); let ref_mt_account = BatchedMerkleTreeMetadata::new_address_tree(create_tree_params, merkle_tree_rent); @@ -266,9 +274,11 @@ fn test_batched_tree_is_ready_for_rollover() { }, ..Default::default() }; + let mt_pubkey = Pubkey::new_unique(); let mut account = BatchedMerkleTreeAccount::init( &mut account_data, + &mt_pubkey, metadata, root_history_len, batch_size, diff --git a/program-libs/batched-merkle-tree/tests/rollover_state_tree.rs b/program-libs/batched-merkle-tree/tests/rollover_state_tree.rs index d0c30a4123..f0fc57f771 100644 --- a/program-libs/batched-merkle-tree/tests/rollover_state_tree.rs +++ b/program-libs/batched-merkle-tree/tests/rollover_state_tree.rs @@ -69,13 +69,13 @@ fn test_rollover() { ) .unwrap(); - let create_tree_params = CreateTreeParams::from_state_ix_params(params, owner); + let create_tree_params = CreateTreeParams::from_state_ix_params(params, owner, mt_pubkey); let ref_mt_account = BatchedMerkleTreeMetadata::new_state_tree(create_tree_params, queue_pubkey); assert_state_mt_zero_copy_inited(&mut mt_account_data, ref_mt_account); let total_rent = merkle_tree_rent + additional_bytes_rent + queue_rent; let output_queue_params = - CreateOutputQueueParams::from(params, owner, total_rent, mt_pubkey); + CreateOutputQueueParams::from(params, owner, total_rent, mt_pubkey, queue_pubkey); let ref_output_queue_account = create_output_queue_account(output_queue_params); assert_queue_zero_copy_inited(queue_account_data.as_mut_slice(), ref_output_queue_account); let mut new_mt_account_data = vec![0; mt_account_size]; @@ -373,14 +373,14 @@ fn test_rollover() { } let mut new_mt_account_data = vec![0; mt_account_size]; let mut new_queue_account_data = vec![0; queue_account_size]; - let create_tree_params = CreateTreeParams::from_state_ix_params(params, owner); + let create_tree_params = CreateTreeParams::from_state_ix_params(params, owner, mt_pubkey); let mut ref_mt_account = BatchedMerkleTreeMetadata::new_state_tree(create_tree_params, queue_pubkey); ref_mt_account.metadata.access_metadata.forester = forester; let total_rent = merkle_tree_rent + additional_bytes_rent + queue_rent; let output_queue_params = - CreateOutputQueueParams::from(params, owner, total_rent, mt_pubkey); + CreateOutputQueueParams::from(params, owner, total_rent, mt_pubkey, queue_pubkey); let mut ref_output_queue_account = create_output_queue_account(output_queue_params); ref_output_queue_account .metadata @@ -513,6 +513,8 @@ fn test_rnd_rollover() { let mut mt_account_data = vec![0; mt_account_size]; let mt_pubkey = Pubkey::new_unique(); + println!("mt_pubkey {:?}", mt_pubkey); + println!("queue_pubkey {:?}", output_queue_pubkey); let merkle_tree_rent = rng.gen_range(0..10000000); let queue_rent = rng.gen_range(0..10000000); @@ -535,13 +537,14 @@ fn test_rnd_rollover() { owner, merkle_tree_rent + queue_rent + additional_bytes_rent, mt_pubkey, + output_queue_pubkey, ); let ref_output_queue_account = create_output_queue_account(queue_account_params); assert_queue_zero_copy_inited( output_queue_account_data.as_mut_slice(), ref_output_queue_account, ); - let create_tree_params = CreateTreeParams::from_state_ix_params(params, owner); + let create_tree_params = CreateTreeParams::from_state_ix_params(params, owner, mt_pubkey); let ref_mt_account = BatchedMerkleTreeMetadata::new_state_tree(create_tree_params, output_queue_pubkey); diff --git a/program-libs/utils/src/hashchain.rs b/program-libs/utils/src/hashchain.rs index c2e2bc4659..999ff5da7a 100644 --- a/program-libs/utils/src/hashchain.rs +++ b/program-libs/utils/src/hashchain.rs @@ -101,8 +101,6 @@ pub fn create_tx_hash_from_hash_chains( let version = [0u8; 32]; let mut current_slot_bytes = [0u8; 32]; current_slot_bytes[24..].copy_from_slice(¤t_slot.to_be_bytes()); - // let inputs_hash_chain = create_hash_chain_from_slice(input_compressed_account_hashes)?; - // let outputs_hash_chain = create_hash_chain_from_slice(output_compressed_account_hashes)?; let hash_chain = create_hash_chain_from_slice(&[ version, *inputs_hash_chain, diff --git a/program-libs/zero-copy/src/borsh.rs b/program-libs/zero-copy/src/borsh.rs index c4661c4065..2b370a107d 100644 --- a/program-libs/zero-copy/src/borsh.rs +++ b/program-libs/zero-copy/src/borsh.rs @@ -16,7 +16,6 @@ where fn deserialize_at(bytes: &'a [u8]) -> Result<(Self::Output, &'a [u8]), ZeroCopyError>; } -// Macro to implement Deserialize for [u8; N] with Ref for N >= 32 macro_rules! impl_deserialize_for_u8_array_ref { ( $( $N:expr ),+ ) => { $( @@ -37,7 +36,7 @@ macro_rules! impl_deserialize_for_u8_array_ref { )+ }; } -// 1 -32 + impl_deserialize_for_u8_array_ref!(1); impl_deserialize_for_u8_array_ref!(2); impl_deserialize_for_u8_array_ref!(3); @@ -76,7 +75,6 @@ impl_deserialize_for_u8_array_ref!(256); impl_deserialize_for_u8_array_ref!(512); impl_deserialize_for_u8_array_ref!(1024); -// TODO: add deserialize at mut impl<'a, T: KnownLayout + Immutable + FromBytes> Deserialize<'a> for Ref<&'a [u8], T> { type Output = Ref<&'a [u8], T>; diff --git a/program-libs/zero-copy/src/slice.rs b/program-libs/zero-copy/src/slice.rs index 0913329328..4d5a2d22f8 100644 --- a/program-libs/zero-copy/src/slice.rs +++ b/program-libs/zero-copy/src/slice.rs @@ -12,6 +12,7 @@ pub type ZeroCopySliceU16<'a, T> = ZeroCopySlice<'a, u16, T>; pub type ZeroCopySliceU8<'a, T> = ZeroCopySlice<'a, u8, T>; pub type ZeroCopySliceBorsh<'a, T> = ZeroCopySlice<'a, U32, T, false>; +#[derive(Clone)] pub struct ZeroCopySlice<'a, L, T, const PAD: bool = true> where L: ZeroCopyTraits, diff --git a/program-tests/compressed-token-test/tests/test.rs b/program-tests/compressed-token-test/tests/test.rs index 2839dcc26e..7bc10e2197 100644 --- a/program-tests/compressed-token-test/tests/test.rs +++ b/program-tests/compressed-token-test/tests/test.rs @@ -34,7 +34,7 @@ use light_program_test::{ use light_prover_client::gnark::helpers::{kill_prover, spawn_prover, ProofType, ProverConfig}; use light_sdk::token::{AccountState, TokenDataWithMerkleContext}; use light_system_program::{ - invoke::processor::CompressedProof, + processor::processor::CompressedProof, sdk::compressed_account::{CompressedAccountWithMerkleContext, MerkleContext}, }; use light_test_utils::{ @@ -5303,7 +5303,6 @@ async fn perform_transfer_failing_test( rpc.process_transaction(transaction).await } -#[serial] #[serial] #[tokio::test] async fn mint_with_batched_tree() { @@ -5322,7 +5321,7 @@ async fn mint_with_batched_tree() { .unwrap(); let mint = create_mint_helper(&mut rpc, &payer).await; let amount = 10000u64; - let num_recipients = 25; + let num_recipients = 33; mint_tokens_helper( &mut rpc, &mut test_indexer, diff --git a/program-tests/create-address-test-program/src/create_pda.rs b/program-tests/create-address-test-program/src/create_pda.rs index 12a194ddf4..2dfed939f5 100644 --- a/program-tests/create-address-test-program/src/create_pda.rs +++ b/program-tests/create-address-test-program/src/create_pda.rs @@ -2,7 +2,7 @@ use account_compression::{program::AccountCompression, utils::constants::CPI_AUT use anchor_lang::prelude::*; use light_hasher::{errors::HasherError, DataHasher, Poseidon}; use light_system_program::{ - invoke::processor::CompressedProof, + processor::processor::CompressedProof, program::LightSystemProgram, sdk::{ address::derive_address, diff --git a/program-tests/create-address-test-program/src/lib.rs b/program-tests/create-address-test-program/src/lib.rs index efa7f29c58..09f4bf4218 100644 --- a/program-tests/create-address-test-program/src/lib.rs +++ b/program-tests/create-address-test-program/src/lib.rs @@ -6,7 +6,9 @@ use anchor_lang::{ solana_program::{instruction::Instruction, pubkey::Pubkey}, InstructionData, }; -use light_system_program::{invoke::processor::CompressedProof, utils::get_registered_program_pda}; +use light_system_program::{ + processor::processor::CompressedProof, utils::get_registered_program_pda, +}; pub mod create_pda; pub use create_pda::*; use light_system_program::NewAddressParamsPacked; diff --git a/program-tests/system-cpi-test/src/create_pda.rs b/program-tests/system-cpi-test/src/create_pda.rs index bfbcacca39..a21eaa1df7 100644 --- a/program-tests/system-cpi-test/src/create_pda.rs +++ b/program-tests/system-cpi-test/src/create_pda.rs @@ -7,7 +7,7 @@ use light_hasher::{ errors::HasherError, DataHasher, Discriminator as LightDiscriminator, Poseidon, }; use light_system_program::{ - invoke::processor::CompressedProof, + processor::processor::CompressedProof, program::LightSystemProgram, sdk::{ address::{derive_address, derive_address_legacy}, diff --git a/program-tests/system-cpi-test/src/invalidate_not_owned_account.rs b/program-tests/system-cpi-test/src/invalidate_not_owned_account.rs index f85c9632c5..845cb0848c 100644 --- a/program-tests/system-cpi-test/src/invalidate_not_owned_account.rs +++ b/program-tests/system-cpi-test/src/invalidate_not_owned_account.rs @@ -10,7 +10,7 @@ use light_compressed_token::{ CompressedTokenInstructionDataBurn, }; use light_system_program::{ - invoke::processor::CompressedProof, + processor::processor::CompressedProof, program::LightSystemProgram, sdk::{ compressed_account::{CompressedAccount, PackedCompressedAccountWithMerkleContext}, diff --git a/program-tests/system-cpi-test/src/lib.rs b/program-tests/system-cpi-test/src/lib.rs index 62f3e609cb..044862b7c2 100644 --- a/program-tests/system-cpi-test/src/lib.rs +++ b/program-tests/system-cpi-test/src/lib.rs @@ -1,7 +1,7 @@ #![allow(clippy::too_many_arguments)] use account_compression::{program::AccountCompression, utils::constants::CPI_AUTHORITY_PDA_SEED}; use anchor_lang::{prelude::*, solana_program::pubkey::Pubkey}; -use light_system_program::invoke::processor::CompressedProof; +use light_system_program::processor::processor::CompressedProof; pub mod create_pda; pub use create_pda::*; pub mod sdk; diff --git a/program-tests/system-cpi-test/src/sdk.rs b/program-tests/system-cpi-test/src/sdk.rs index 1534673d07..e1fba6e1d1 100644 --- a/program-tests/system-cpi-test/src/sdk.rs +++ b/program-tests/system-cpi-test/src/sdk.rs @@ -11,7 +11,7 @@ use light_compressed_token::{ get_token_pool_pda, process_transfer::transfer_sdk::to_account_metas, }; use light_system_program::{ - invoke::processor::CompressedProof, + processor::processor::CompressedProof, sdk::{ address::{ pack_new_address_params, pack_read_only_accounts, pack_read_only_address_params, diff --git a/program-tests/system-test/tests/test.rs b/program-tests/system-test/tests/test.rs index 5a69de7817..ab23c0ec96 100644 --- a/program-tests/system-test/tests/test.rs +++ b/program-tests/system-test/tests/test.rs @@ -23,7 +23,7 @@ use light_prover_client::gnark::helpers::{spawn_prover, ProofType, ProverConfig, use light_registry::protocol_config::state::ProtocolConfig; use light_system_program::{ errors::SystemProgramError, - invoke::processor::CompressedProof, + processor::processor::CompressedProof, sdk::{ address::{derive_address, derive_address_legacy}, compressed_account::{ diff --git a/programs/account-compression/src/sdk.rs b/program-tests/utils/src/acp_sdk.rs similarity index 64% rename from programs/account-compression/src/sdk.rs rename to program-tests/utils/src/acp_sdk.rs index f888744767..a1e2684a3b 100644 --- a/programs/account-compression/src/sdk.rs +++ b/program-tests/utils/src/acp_sdk.rs @@ -1,7 +1,6 @@ #![cfg(not(target_os = "solana"))] - -use anchor_lang::prelude::borsh::BorshSerialize; -use anchor_lang::{system_program, InstructionData, ToAccountMetas}; +// TODO: move file to light-test-utils +use anchor_lang::InstructionData; use solana_sdk::{ instruction::{AccountMeta, Instruction}, pubkey::Pubkey, @@ -11,8 +10,7 @@ use crate::{ instruction::{ InitializeAddressMerkleTreeAndQueue, InitializeStateMerkleTreeAndNullifierQueue, }, - AddressMerkleTreeConfig, AddressQueueConfig, AppendLeavesInput, NullifierQueueConfig, - StateMerkleTreeConfig, + AddressMerkleTreeConfig, AddressQueueConfig, NullifierQueueConfig, StateMerkleTreeConfig, }; pub fn create_initialize_merkle_tree_instruction( @@ -50,47 +48,47 @@ pub fn create_initialize_merkle_tree_instruction( } } -pub fn create_insert_leaves_instruction( - leaves: Vec<(u8, [u8; 32])>, - fee_payer: Pubkey, - authority: Pubkey, - merkle_tree_pubkeys: Vec, -) -> Instruction { - let data = leaves - .iter() - .into_iter() - .map(|x| AppendLeavesInput { - index: x.0, - leaf: x.1, - }) - .collect::>(); +// pub fn create_insert_leaves_instruction( +// leaves: Vec<(u8, [u8; 32])>, +// fee_payer: Pubkey, +// authority: Pubkey, +// merkle_tree_pubkeys: Vec, +// ) -> Instruction { +// let data = leaves +// .iter() +// .into_iter() +// .map(|x| AppendLeavesInput { +// index: x.0, +// leaf: x.1, +// }) +// .collect::>(); - let mut bytes = Vec::new(); - data.serialize(&mut bytes).unwrap(); +// let mut bytes = Vec::new(); +// data.serialize(&mut bytes).unwrap(); - let instruction_data = crate::instruction::AppendLeavesToMerkleTrees { bytes }; +// let instruction_data = crate::instruction::AppendLeavesToMerkleTrees { bytes }; - let accounts = crate::accounts::AppendLeaves { - fee_payer, - authority, - registered_program_pda: None, - system_program: system_program::ID, - }; - let merkle_tree_account_metas = merkle_tree_pubkeys - .iter() - .map(|pubkey| AccountMeta::new(*pubkey, false)) - .collect::>(); +// let accounts = crate::accounts::AppendLeaves { +// fee_payer, +// authority, +// registered_program_pda: None, +// system_program: system_program::ID, +// }; +// let merkle_tree_account_metas = merkle_tree_pubkeys +// .iter() +// .map(|pubkey| AccountMeta::new(*pubkey, false)) +// .collect::>(); - Instruction { - program_id: crate::ID, - accounts: [ - accounts.to_account_metas(Some(true)), - merkle_tree_account_metas, - ] - .concat(), - data: instruction_data.data(), - } -} +// Instruction { +// program_id: crate::ID, +// accounts: [ +// accounts.to_account_metas(Some(true)), +// merkle_tree_account_metas, +// ] +// .concat(), +// data: instruction_data.data(), +// } +// } pub fn create_initialize_address_merkle_tree_and_queue_instruction( index: u64, diff --git a/program-tests/utils/src/address.rs b/program-tests/utils/src/address.rs index 86c0e3abe0..4277fdbda4 100644 --- a/program-tests/utils/src/address.rs +++ b/program-tests/utils/src/address.rs @@ -1,4 +1,3 @@ -use account_compression::instruction::InsertAddresses; use anchor_lang::{prelude::AccountMeta, system_program, InstructionData, ToAccountMetas}; use light_client::rpc::{RpcConnection, RpcError}; use solana_sdk::{ @@ -8,44 +7,44 @@ use solana_sdk::{ transaction::Transaction, }; -pub async fn insert_addresses( - context: &mut R, - address_queue_pubkey: Pubkey, - address_merkle_tree_pubkey: Pubkey, - addresses: Vec<[u8; 32]>, -) -> Result { - let num_addresses = addresses.len(); - let instruction_data = InsertAddresses { addresses }; - let accounts = account_compression::accounts::InsertIntoQueues { - fee_payer: context.get_payer().pubkey(), - authority: context.get_payer().pubkey(), - registered_program_pda: None, - system_program: system_program::ID, - }; - let insert_ix = Instruction { - program_id: account_compression::ID, - accounts: [ - accounts.to_account_metas(Some(true)), - vec![ - vec![ - AccountMeta::new(address_queue_pubkey, false), - AccountMeta::new(address_merkle_tree_pubkey, false) - ]; - num_addresses - ] - .iter() - .flat_map(|x| x.to_vec()) - .collect::>(), - ] - .concat(), - data: instruction_data.data(), - }; - let latest_blockhash = context.get_latest_blockhash().await.unwrap(); - let transaction = Transaction::new_signed_with_payer( - &[insert_ix], - Some(&context.get_payer().pubkey()), - &[&context.get_payer()], - latest_blockhash, - ); - context.process_transaction(transaction).await -} +// pub async fn insert_addresses( +// context: &mut R, +// address_queue_pubkey: Pubkey, +// address_merkle_tree_pubkey: Pubkey, +// addresses: Vec<[u8; 32]>, +// ) -> Result { +// let num_addresses = addresses.len(); +// let instruction_data = InsertAddresses { addresses }; +// let accounts = account_compression::accounts::InsertIntoQueues { +// fee_payer: context.get_payer().pubkey(), +// authority: context.get_payer().pubkey(), +// registered_program_pda: None, +// system_program: system_program::ID, +// }; +// let insert_ix = Instruction { +// program_id: account_compression::ID, +// accounts: [ +// accounts.to_account_metas(Some(true)), +// vec![ +// vec![ +// AccountMeta::new(address_queue_pubkey, false), +// AccountMeta::new(address_merkle_tree_pubkey, false) +// ]; +// num_addresses +// ] +// .iter() +// .flat_map(|x| x.to_vec()) +// .collect::>(), +// ] +// .concat(), +// data: instruction_data.data(), +// }; +// let latest_blockhash = context.get_latest_blockhash().await.unwrap(); +// let transaction = Transaction::new_signed_with_payer( +// &[insert_ix], +// Some(&context.get_payer().pubkey()), +// &[&context.get_payer()], +// latest_blockhash, +// ); +// context.process_transaction(transaction).await +// } diff --git a/program-tests/utils/src/conversions.rs b/program-tests/utils/src/conversions.rs index 162826896f..ddcdac2155 100644 --- a/program-tests/utils/src/conversions.rs +++ b/program-tests/utils/src/conversions.rs @@ -3,10 +3,8 @@ use light_compressed_token::{ }; use light_sdk::{self as sdk, proof::CompressedProof}; use light_system_program::{ - invoke::{ - processor::CompressedProof as ProgramCompressedProof, - OutputCompressedAccountWithPackedContext as ProgramOutputCompressedAccountWithPackedContext, - }, + invoke::instruction::OutputCompressedAccountWithPackedContext as ProgramOutputCompressedAccountWithPackedContext, + processor::processor::CompressedProof as ProgramCompressedProof, sdk::{ compressed_account::{ CompressedAccount as ProgramCompressedAccount, diff --git a/program-tests/utils/src/create_address_test_program_sdk.rs b/program-tests/utils/src/create_address_test_program_sdk.rs index e69159e650..a7aa9cd6e4 100644 --- a/program-tests/utils/src/create_address_test_program_sdk.rs +++ b/program-tests/utils/src/create_address_test_program_sdk.rs @@ -9,7 +9,7 @@ use light_client::{ use light_compressed_token::process_transfer::transfer_sdk::to_account_metas; use light_program_test::{indexer::TestIndexerExtensions, test_env::EnvAccounts}; use light_system_program::{ - invoke::processor::CompressedProof, + processor::processor::CompressedProof, sdk::address::{derive_address, pack_new_address_params}, NewAddressParams, }; diff --git a/program-tests/utils/src/spl.rs b/program-tests/utils/src/spl.rs index d78abfdcdc..830aecb563 100644 --- a/program-tests/utils/src/spl.rs +++ b/program-tests/utils/src/spl.rs @@ -27,7 +27,7 @@ use light_hasher::Poseidon; use light_program_test::indexer::TestIndexerExtensions; use light_sdk::token::TokenDataWithMerkleContext; use light_system_program::{ - invoke::processor::CompressedProof, + processor::processor::CompressedProof, sdk::{compressed_account::MerkleContext, event::PublicTransactionEvent}, }; use solana_program_test::BanksClientError; diff --git a/program-tests/utils/src/test_forester.rs b/program-tests/utils/src/test_forester.rs index 900dee4824..d9123de532 100644 --- a/program-tests/utils/src/test_forester.rs +++ b/program-tests/utils/src/test_forester.rs @@ -1,5 +1,5 @@ use account_compression::{ - instruction::{InsertAddresses, UpdateAddressMerkleTree}, + instruction::UpdateAddressMerkleTree, state::QueueAccount, utils::constants::{ADDRESS_MERKLE_TREE_HEIGHT, ADDRESS_MERKLE_TREE_ROOTS}, AddressMerkleTreeAccount, StateMerkleTreeAccount, ID, SAFETY_MARGIN, diff --git a/programs/account-compression/Cargo.toml b/programs/account-compression/Cargo.toml index cadd6edd54..1f99980e14 100644 --- a/programs/account-compression/Cargo.toml +++ b/programs/account-compression/Cargo.toml @@ -17,9 +17,9 @@ no-log-ix-name = [] cpi = ["no-entrypoint"] custom-heap = ["light-heap"] mem-profiling = [] -default = ["custom-heap", "test"] +default = [ "test"] test-sbf = [] -bench-sbf = [] +bench-sbf = ["custom-heap"] test = [] diff --git a/programs/account-compression/src/context.rs b/programs/account-compression/src/context.rs index be687ea51a..4ad7f96348 100644 --- a/programs/account-compression/src/context.rs +++ b/programs/account-compression/src/context.rs @@ -16,12 +16,24 @@ use crate::{ errors::AccountCompressionErrorCode, state_merkle_tree_from_bytes_zero_copy_mut, utils::{ - check_signer_is_registered_or_authority::manual_check_signer_is_registered_or_authority, + check_signer_is_registered_or_authority::{ + manual_check_signer_is_registered_or_authority, GroupAccess, + }, constants::CPI_AUTHORITY_PDA_SEED, }, AddressMerkleTreeAccount, QueueAccount, StateMerkleTreeAccount, }; +impl GroupAccess for BatchedQueueAccount<'_> { + fn get_owner(&self) -> Pubkey { + self.metadata.access_metadata.owner.into() + } + + fn get_program_owner(&self) -> Pubkey { + self.metadata.access_metadata.program_owner.into() + } +} + use super::RegisteredProgram; pub struct LightContext<'a, 'info> { pub accounts: Vec>, @@ -59,40 +71,21 @@ impl<'a, 'info> LightContext<'a, 'info> { } } - // pub fn registered_program_pda(&self) -> Option<&(Pubkey, Pubkey)> { - // match &self.accounts[REGISTERED_PROGRAM_PDA_INDEX] { - // AcpAccount::RegisteredProgramPda(registered_program_pda) => { - // Some(registered_program_pda) - // } - // _ => None, - // } - // } - - pub fn system_program(&self) -> &AccountInfo<'info> { - let offset = if self.invoked_by_program { 2 } else { 1 }; - match self.accounts[offset] { - AcpAccount::SystemProgram(account) => account, - _ => panic!("Invalid fee payer account"), - } - } - #[inline(always)] pub fn remaining_accounts_mut(&mut self) -> &mut [AcpAccount<'a, 'info>] { - let offset = if self.invoked_by_program { 3 } else { 2 }; + let offset = if self.invoked_by_program { 2 } else { 1 }; &mut self.accounts[offset..] } #[inline(always)] pub fn remaining_accounts(&self) -> &[AcpAccount<'a, 'info>] { - let offset = if self.invoked_by_program { 3 } else { 2 }; + let offset = if self.invoked_by_program { 2 } else { 1 }; &self.accounts[offset..] } } const FEE_PAYER_INDEX: usize = 0; const AUTHORITY_INDEX: usize = 1; -const REGISTERED_PROGRAM_PDA_INDEX: usize = 2; -const SYSTEM_PROGRAM_INDEX: usize = 3; #[derive(Debug)] pub enum AcpAccount<'a, 'info> { @@ -118,7 +111,6 @@ impl<'a, 'info> AcpAccount<'a, 'info> { /// 1. Fee payer /// 2. Authority /// 3. Option - /// 4. System program ( todo remove we don't need it anymore don't do any transfers) /// ... other accounts #[inline(always)] pub fn from_account_infos( @@ -127,7 +119,7 @@ impl<'a, 'info> AcpAccount<'a, 'info> { invoked_by_program: bool, bump: u8, ) -> std::result::Result>, AccountCompressionErrorCode> { - // TODO: remove + 1 and passed in fee_payer once we removed anchor. + // TODO: remove + 1 and pass in fee_payer once we removed anchor. let mut vec = Vec::with_capacity(account_infos.len() + 1); vec.push(AcpAccount::FeePayer(&fee_payer)); vec.push(AcpAccount::Authority(&account_infos[0])); @@ -152,17 +144,7 @@ impl<'a, 'info> AcpAccount<'a, 'info> { } false => None, }; - { - let system_program_account = &account_infos[skip as usize]; - if system_program_account.key() != Pubkey::default() { - msg!("system_program_account {:?}", system_program_account.key()); - panic!("Invalid system program account"); - // return Err(AccountCompressionErrorCode::InvalidAuthority); - } - vec.push(AcpAccount::SystemProgram(&system_program_account)); - } - skip += 1; account_infos.iter().skip(skip).for_each(|account_info| { let account = AcpAccount::try_from_account_info(account_info, &vec[1], &derived_address).unwrap(); @@ -172,7 +154,7 @@ impl<'a, 'info> AcpAccount<'a, 'info> { } #[inline(always)] - pub fn try_from_account_info( + pub(crate) fn try_from_account_info( account_info: &'info AccountInfo<'info>, authority: &AcpAccount<'a, 'info>, registered_program_pda: &Option<(Pubkey, Pubkey)>, @@ -181,7 +163,7 @@ impl<'a, 'info> AcpAccount<'a, 'info> { msg!("Invalid owner {:?}", account_info.owner); return Err(AccountCompressionErrorCode::InputDeserializationFailed); } - let mut discriminator = account_info + let discriminator = account_info .try_borrow_data() .map_err(|_| AccountCompressionErrorCode::InputDeserializationFailed)?[..8] .try_into() @@ -210,6 +192,7 @@ impl<'a, 'info> AcpAccount<'a, 'info> { &tree, ) .unwrap(); + Ok(AcpAccount::BatchedStateTree(tree)) } _ => Err(AccountCompressionErrorCode::InputDeserializationFailed), @@ -224,6 +207,7 @@ impl<'a, 'info> AcpAccount<'a, 'info> { &queue, ) .unwrap(); + Ok(AcpAccount::OutputQueue(queue)) } StateMerkleTreeAccount::DISCRIMINATOR => { diff --git a/programs/account-compression/src/instructions/append_nullify_create_address.rs b/programs/account-compression/src/instructions/append_nullify_create_address.rs deleted file mode 100644 index 14cc3f62db..0000000000 --- a/programs/account-compression/src/instructions/append_nullify_create_address.rs +++ /dev/null @@ -1,581 +0,0 @@ -use anchor_lang::prelude::*; -use light_batched_merkle_tree::{ - merkle_tree::BatchedMerkleTreeAccount, queue::BatchedQueueAccount, -}; -use light_zero_copy::{errors::ZeroCopyError, slice_mut::ZeroCopySliceMut}; -use num_bigint::BigUint; -use std::mem::size_of; -use std::ops::{Deref, DerefMut}; -use zerocopy::{little_endian::U32, FromBytes, Immutable, IntoBytes, KnownLayout, Ref, Unaligned}; - -use crate::{ - context::AcpAccount, errors::AccountCompressionErrorCode, queue_from_bytes_zero_copy_mut, - QueueAccount, -}; - -use super::AppendLeavesInput; - -#[repr(C)] -#[derive( - FromBytes, IntoBytes, KnownLayout, Immutable, Copy, Clone, PartialEq, Debug, Unaligned, -)] -pub struct AppendNullifyCreateAddressInputsMeta { - is_invoked_by_program: u8, - pub bump: u8, - pub num_queues: u8, - pub num_unique_appends: u8, - pub num_address_appends: u8, - pub tx_hash: [u8; 32], -} - -#[derive(Debug)] -pub struct AppendNullifyCreateAddressInputs<'a> { - meta: Ref<&'a mut [u8], AppendNullifyCreateAddressInputsMeta>, - pub leaves: ZeroCopySliceMut<'a, u8, AppendLeavesInput, false>, - pub nullifiers: ZeroCopySliceMut<'a, u8, InsertNullifierInput, false>, - pub addresses: ZeroCopySliceMut<'a, u8, InsertAddressInput, false>, - // Don't add sequence numbers we don't want to deserialize these here. -} - -impl<'a> AppendNullifyCreateAddressInputs<'a> { - pub fn is_invoked_by_program(&self) -> bool { - self.meta.is_invoked_by_program == 1 - } - - pub fn set_invoked_by_program(&mut self, value: bool) { - self.meta.is_invoked_by_program = value as u8; - } - - pub fn required_size_for_capacity( - leaves_capacity: u8, - nullifiers_capacity: u8, - addresses_capacity: u8, - ) -> usize { - size_of::() - + ZeroCopySliceMut::::required_size_for_capacity( - leaves_capacity, - ) - + ZeroCopySliceMut::::required_size_for_capacity( - nullifiers_capacity, - ) - + ZeroCopySliceMut::::required_size_for_capacity( - addresses_capacity, - ) - } - - pub fn new( - bytes: &'a mut [u8], - leaves_capacity: u8, - nullifiers_capacity: u8, - addresses_capacity: u8, - ) -> std::result::Result { - let (meta, bytes) = bytes.split_at_mut(size_of::()); - let meta = Ref::<&mut [u8], AppendNullifyCreateAddressInputsMeta>::from_bytes(meta)?; - let (leaves, bytes) = - ZeroCopySliceMut::::new_at(leaves_capacity, bytes)?; - let (nullifiers, bytes) = ZeroCopySliceMut::::new_at( - nullifiers_capacity, - bytes, - )?; - let addresses = - ZeroCopySliceMut::::new(addresses_capacity, bytes)?; - Ok(AppendNullifyCreateAddressInputs { - meta, - leaves, - nullifiers, - addresses, - }) - } -} - -impl Deref for AppendNullifyCreateAddressInputs<'_> { - type Target = AppendNullifyCreateAddressInputsMeta; - - fn deref(&self) -> &Self::Target { - &self.meta - } -} - -impl DerefMut for AppendNullifyCreateAddressInputs<'_> { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.meta - } -} - -pub fn deserialize_nullify_append_create_address_inputs<'a>( - bytes: &'a mut [u8], -) -> std::result::Result, ZeroCopyError> { - let (metadata, bytes) = bytes.split_at_mut(size_of::()); - let meta = Ref::<&mut [u8], AppendNullifyCreateAddressInputsMeta>::from_bytes(metadata)?; - - let (leaves, bytes) = ZeroCopySliceMut::::from_bytes_at(bytes)?; - - let (nullifiers, bytes) = - ZeroCopySliceMut::::from_bytes_at(bytes)?; - let (addresses, _bytes) = - ZeroCopySliceMut::::from_bytes_at(bytes)?; - Ok(AppendNullifyCreateAddressInputs { - meta, - leaves, - nullifiers, - addresses, - }) -} - -#[repr(C)] -#[derive( - KnownLayout, IntoBytes, Immutable, Copy, Clone, FromBytes, PartialEq, Debug, Unaligned, -)] -pub struct InsertNullifierInput { - pub account_hash: [u8; 32], - pub leaf_index: U32, - pub prove_by_index: u8, - pub tree_index: u8, - pub queue_index: u8, -} - -#[inline(always)] -pub fn insert_nullifiers<'a, 'info>( - num_queues: u8, - tx_hash: [u8; 32], - nullifiers: &[InsertNullifierInput], - accounts: &mut [AcpAccount<'a, 'info>], - // (tree_index, rollover_fee) no rollover fee for the input queue all rollover fees are paid in the output queue. - // rollover_fee_vec: Vec<(u8, u64)>, -) -> Result<()> { - if nullifiers.is_empty() { - return Ok(()); - } - let mut inserted_nullifiers = 0; - let mut current_tree_index = nullifiers[0].tree_index; - let mut current_queue_index = nullifiers[0].queue_index; - let mut dedup_vec = Vec::with_capacity(num_queues as usize); - // let mut queue_account_info_index = start_account_index; - for _ in 0..num_queues { - // TODO: extract into function and test it. - // let (queue, accounts) = accounts.split_at_mut((current_queue_index + 1) as usize); - // let (queue_account, merkle_tree_account) = if current_tree_index > current_queue_index { - // let merkle_tree = - // &mut accounts[(current_tree_index - (current_queue_index + 1)) as usize]; - // let queue_account = &mut queue[current_queue_index as usize]; - // (queue_account, merkle_tree) - // } else { - // let (tree, queue) = queue.split_at_mut((current_tree_index + 1) as usize); - // let merkle_tree = &mut tree[current_tree_index as usize]; - // let queue_account = - // &mut queue[(current_queue_index - (current_tree_index + 1)) as usize]; - // (queue_account, merkle_tree) - // }; - let (queue_account, merkle_tree_account) = get_queue_and_tree_accounts( - accounts, - current_queue_index as usize, - current_tree_index as usize, - ) - .unwrap(); - - match queue_account { - AcpAccount::OutputQueue(queue) => { - inserted_nullifiers += refactored_process_nullifier_v2( - merkle_tree_account, - queue, - &tx_hash, - nullifiers, - current_queue_index, - )?; - } - AcpAccount::V1Queue(queue_account_info) => { - inserted_nullifiers += refactored_process_nullifier_v1( - merkle_tree_account, - queue_account_info, - nullifiers, - current_queue_index, - )?; - } - AcpAccount::BatchedStateTree(_) => { - msg!("BatchedStateTree"); - unimplemented!(); - } - AcpAccount::StateTree(_) => { - msg!("StateTree"); - unimplemented!(); - } - AcpAccount::BatchedAddressTree(_) => { - msg!("BatchedAddressTree"); - unimplemented!(); - } - _ => { - unimplemented!() - } - } - - dedup_vec.push(current_queue_index); - if dedup_vec.len() == num_queues as usize { - break; - } - // find next tree index which doesn't exist in dedup vec yet - let input = nullifiers - .iter() - .find(|x| { - !dedup_vec - .iter() - .any(|&queue_index| queue_index == x.queue_index) - }) - .unwrap(); - current_tree_index = input.tree_index; - current_queue_index = input.queue_index; - } - if inserted_nullifiers != nullifiers.len() { - msg!("inserted_nullifiers {:?}", inserted_nullifiers); - msg!("nullifiers.len() {:?}", nullifiers.len()); - return err!(AccountCompressionErrorCode::NotAllLeavesProcessed); - } - Ok(()) -} - -#[inline(always)] -fn refactored_process_nullifier_v2<'a, 'info>( - merkle_tree: &mut AcpAccount<'a, 'info>, - output_queue: &mut BatchedQueueAccount<'info>, - tx_hash: &[u8; 32], - nullifiers: &[InsertNullifierInput], - current_queue_index: u8, -) -> Result { - let nullifiers = nullifiers - .iter() - .filter(|x| x.queue_index == current_queue_index); - let merkle_tree = if let AcpAccount::BatchedStateTree(tree) = merkle_tree { - tree - } else { - panic!("Invalid account"); - }; - // 3. Check queue and Merkle tree are associated. - output_queue - .check_is_associated(merkle_tree.pubkey()) - .map_err(ProgramError::from)?; - - let mut num_elements = 0; - - for nullifier in nullifiers { - num_elements += 1; - light_heap::bench_sbf_start!("acp_insert_nf_into_queue_v2"); - // 4. check for every account whether the value is still in the queue and zero it out. - // If checked fail if the value is not in the queue. - let proof_index = if nullifier.prove_by_index == 1 { - true - } else if nullifier.prove_by_index == 0 { - false - } else { - panic!("invalid value"); - }; - let leaf_index = nullifier.leaf_index.into(); - output_queue - .prove_inclusion_by_index_and_zero_out_leaf( - leaf_index, - &nullifier.account_hash, - proof_index, - ) - .map_err(ProgramError::from)?; - - // 5. Insert the nullifiers into the current input queue batch. - merkle_tree - .insert_nullifier_into_current_batch(&nullifier.account_hash, leaf_index, &tx_hash) - .map_err(ProgramError::from)?; - light_heap::bench_sbf_end!("acp_insert_nf_into_queue_v2"); - } - msg!("v2 num_elements {:?}", num_elements); - Ok(num_elements) -} - -fn refactored_process_nullifier_v1<'a, 'info>( - merkle_tree: &mut AcpAccount<'a, 'info>, - nullifier_queue: &mut AccountInfo<'info>, - nullifiers: &[InsertNullifierInput], - current_queue_index: u8, -) -> Result { - let nullifiers = nullifiers - .iter() - .filter(|x| x.queue_index == current_queue_index); - let (merkle_pubkey, merkle_tree) = if let AcpAccount::StateTree(tree) = merkle_tree { - tree - } else { - panic!("Invalid account"); - }; - msg!("refactored_process_nullifier_v1"); - { - let queue_data = nullifier_queue - .try_borrow_data() - .map_err(ProgramError::from)?; - msg!("data len {:?}", queue_data.len()); - msg!("discriminator {:?}", queue_data[0..32].to_vec()); - msg!("queue pubkey {:?}", nullifier_queue.key()); - let queue = bytemuck::from_bytes::(&queue_data[8..QueueAccount::LEN]); - // 3. Check queue and Merkle tree are associated. - if queue.metadata.associated_merkle_tree != (*merkle_pubkey).into() { - msg!( - "Queue account {:?} is not associated with Merkle tree {:?}", - nullifier_queue.key(), - *merkle_pubkey - ); - return err!(AccountCompressionErrorCode::MerkleTreeAndQueueNotAssociated); - } - } - let mut num_elements = 0; - // 5. Insert the nullifiers into the queues hash set. - msg!("refactored_process_nullifier_v1 2"); - - let sequence_number = { - // let merkle_tree = merkle_tree.try_borrow_data()?; - // let merkle_tree = state_merkle_tree_from_bytes_zero_copy(&merkle_tree)?; - merkle_tree.sequence_number() - }; - let mut queue = nullifier_queue.try_borrow_mut_data()?; - let mut queue = unsafe { queue_from_bytes_zero_copy_mut(&mut queue).unwrap() }; - light_heap::bench_sbf_end!("acp_prep_insertion"); - light_heap::bench_sbf_start!("acp_insert_nf_into_queue"); - for nullifier in nullifiers { - if nullifier.prove_by_index == 1 { - return Err(AccountCompressionErrorCode::V1AccountMarkedAsProofByIndex.into()); - } - num_elements += 1; - let element = BigUint::from_bytes_be(nullifier.account_hash.as_slice()); - queue - .insert(&element, sequence_number) - .map_err(ProgramError::from)?; - } - light_heap::bench_sbf_end!("acp_insert_nf_into_queue"); - msg!("v1 num_elements {:?}", num_elements); - - Ok(num_elements) -} - -fn get_queue_and_tree_accounts<'a, 'b, 'info>( - accounts: &'b mut [AcpAccount<'a, 'info>], - queue_index: usize, - tree_index: usize, -) -> std::result::Result< - (&'b mut AcpAccount<'a, 'info>, &'b mut AcpAccount<'a, 'info>), - AccountCompressionErrorCode, -> { - // if queue_index == tree_index { - // return Err(AccountCompressionErrorCode::SameIndex); - // } - let (smaller, bigger) = if queue_index < tree_index { - (queue_index, tree_index) - } else { - (tree_index, queue_index) - }; - // if bigger >= accounts.len() { - // return Err(AccountCompressionErrorCode::OutOfBounds); - // } - let (left, right) = accounts.split_at_mut(bigger); - let smaller_ref = &mut left[smaller]; - let bigger_ref = &mut right[0]; - Ok(if queue_index < tree_index { - (smaller_ref, bigger_ref) - } else { - (bigger_ref, smaller_ref) - }) -} - -#[repr(C)] -#[derive( - KnownLayout, - IntoBytes, - Immutable, - Copy, - Clone, - FromBytes, - AnchorSerialize, - AnchorDeserialize, - PartialEq, - Debug, - Unaligned, -)] -pub struct InsertAddressInput { - pub address: [u8; 32], - pub tree_index: u8, - pub queue_index: u8, -} - -pub trait AccountCompressionProgramAccount { - fn append(&mut self, batch_size: usize, leaves: &[AppendLeavesInput]) -> Result; - fn insert(&mut self) -> Result<()>; -} - -impl<'a> AccountCompressionProgramAccount for BatchedQueueAccount<'a> { - fn append(&mut self, batch_size: usize, leaves: &[AppendLeavesInput]) -> Result { - for leaf in leaves { - self.insert_into_current_batch(&leaf.leaf) - .map_err(ProgramError::from)?; - } - - let rollover_fee = self.metadata.rollover_metadata.rollover_fee * batch_size as u64; - Ok(rollover_fee) - } - - fn insert(&mut self) -> Result<()> { - unimplemented!("Batched queue accounts only append.") - } -} - -#[inline(always)] -pub fn insert_addresses<'a, 'info>( - num_queues: u8, - addresses: &[InsertAddressInput], - accounts: &mut [AcpAccount<'a, 'info>], - // (tree_index, rollover_fee) no rollover fee for the input queue all rollover fees are paid in the output queue. - // rollover_fee_vec: Vec<(u8, u64)>, -) -> Result<()> { - if addresses.is_empty() { - return Ok(()); - } - msg!("num address queues {:?}", num_queues); - - let mut inserted_nullifiers = 0; - let mut current_tree_index = addresses[0].tree_index; - let mut current_queue_index = addresses[0].queue_index; - let mut dedup_vec = Vec::with_capacity(num_queues as usize); - // let mut queue_account_info_index = start_account_index; - for _ in 0..num_queues { - // TODO: extract into function and test it. - // let (queue, accounts) = accounts.split_at_mut((current_queue_index + 1) as usize); - // let (queue_account, merkle_tree_account) = if current_tree_index > current_queue_index { - // let merkle_tree = - // &mut accounts[(current_tree_index - (current_queue_index + 1)) as usize]; - // let queue_account = &mut queue[current_queue_index as usize]; - // (queue_account, merkle_tree) - // } else { - // let (tree, queue) = queue.split_at_mut((current_tree_index + 1) as usize); - // let merkle_tree = &mut tree[current_tree_index as usize]; - // let queue_account = - // &mut queue[(current_queue_index - (current_tree_index + 1)) as usize]; - // (queue_account, merkle_tree) - // }; - let (queue_account, merkle_tree_account) = get_queue_and_tree_accounts( - accounts, - current_queue_index as usize, - current_tree_index as usize, - ) - .unwrap(); - - match queue_account { - AcpAccount::BatchedAddressTree(address_tree) => { - inserted_nullifiers += - process_address_v2(address_tree, addresses, current_queue_index)?; - } - AcpAccount::V1Queue(queue_account_info) => { - inserted_nullifiers += refactored_process_address_v1( - merkle_tree_account, - queue_account_info, - addresses, - current_queue_index, - )?; - } - _ => unimplemented!(), - } - - dedup_vec.push(current_queue_index); - if dedup_vec.len() == num_queues as usize { - break; - } - // find next tree index which doesn't exist in dedup vec yet - let input = addresses - .iter() - .find(|x| { - !dedup_vec - .iter() - .any(|&queue_index| queue_index == x.queue_index) - }) - .unwrap(); - current_tree_index = input.tree_index; - current_queue_index = input.queue_index; - } - if inserted_nullifiers != addresses.len() { - msg!("inserted_nullifiers {:?}", inserted_nullifiers); - msg!("nullifiers.len() {:?}", addresses.len()); - return err!(AccountCompressionErrorCode::NotAllLeavesProcessed); - } - Ok(()) -} - -/// Insert a batch of addresses into the address queue. -/// 1. Check discriminator and account ownership. -/// 2. Check that the signer is the authority or registered program. -/// 3. Insert the addresses into the current batch. -/// 4. Return rollover fee. -fn process_address_v2<'info>( - addresse_tree: &mut BatchedMerkleTreeAccount<'info>, - addresses: &[InsertAddressInput], - current_queue_index: u8, -) -> Result { - let addresses = addresses - .iter() - .filter(|x| x.queue_index == current_queue_index); - // 3. Insert the addresses into the current batch. - // for element in queue_bundle.elements.iter() { - light_heap::bench_sbf_start!("acp_insert_nf_into_queue_v2"); - let mut num_elements = 0; - for address in addresses { - num_elements += 1; - addresse_tree - .insert_address_into_current_batch(&address.address) - .map_err(ProgramError::from)?; - light_heap::bench_sbf_end!("acp_insert_nf_into_queue_v2"); - } - Ok(num_elements) -} - -fn refactored_process_address_v1<'a, 'info>( - merkle_tree: &mut AcpAccount<'a, 'info>, - nullifier_queue: &mut AccountInfo<'info>, - addresses: &[InsertAddressInput], - current_queue_index: u8, -) -> Result { - let addresses = addresses - .iter() - .filter(|x| x.queue_index == current_queue_index); - let (merkle_pubkey, merkle_tree) = if let AcpAccount::AddressTree(tree) = merkle_tree { - tree - } else { - panic!("Invalid account"); - }; - msg!("refactored_process_nullifier_v1"); - { - let queue_data = nullifier_queue - .try_borrow_data() - .map_err(ProgramError::from)?; - msg!("data len {:?}", queue_data.len()); - msg!("discriminator {:?}", queue_data[0..32].to_vec()); - msg!("queue pubkey {:?}", nullifier_queue.key()); - let queue = bytemuck::from_bytes::(&queue_data[8..QueueAccount::LEN]); - // 3. Check queue and Merkle tree are associated. - if queue.metadata.associated_merkle_tree != (*merkle_pubkey).into() { - msg!( - "Queue account {:?} is not associated with Merkle tree {:?}", - nullifier_queue.key(), - *merkle_pubkey - ); - return err!(AccountCompressionErrorCode::MerkleTreeAndQueueNotAssociated); - } - } - let mut num_elements = 0; - // 5. Insert the addresses into the queues hash set. - msg!("refactored_process_nullifier_v1 2"); - - let sequence_number = merkle_tree.sequence_number(); - let mut queue = nullifier_queue.try_borrow_mut_data()?; - let mut queue = unsafe { queue_from_bytes_zero_copy_mut(&mut queue).unwrap() }; - light_heap::bench_sbf_end!("acp_prep_insertion"); - light_heap::bench_sbf_start!("acp_insert_nf_into_queue"); - for address in addresses { - num_elements += 1; - let element = BigUint::from_bytes_be(address.address.as_slice()); - queue - .insert(&element, sequence_number) - .map_err(ProgramError::from)?; - } - light_heap::bench_sbf_end!("acp_insert_nf_into_queue"); - msg!("v1 num_elements {:?}", num_elements); - - Ok(num_elements) -} diff --git a/programs/account-compression/src/instructions/initialize_address_merkle_tree_and_queue.rs b/programs/account-compression/src/instructions/initialize_address_merkle_tree_and_queue.rs index cac521340e..4d0d1fc8c2 100644 --- a/programs/account-compression/src/instructions/initialize_address_merkle_tree_and_queue.rs +++ b/programs/account-compression/src/instructions/initialize_address_merkle_tree_and_queue.rs @@ -3,8 +3,10 @@ use light_utils::account::check_account_balance_is_rent_exempt; use crate::{ errors::AccountCompressionErrorCode, - initialize_address_merkle_tree::process_initialize_address_merkle_tree, - initialize_address_queue::process_initialize_address_queue, + processor::{ + initialize_address_merkle_tree::process_initialize_address_merkle_tree, + initialize_address_queue::process_initialize_address_queue, + }, state::QueueAccount, utils::{ check_signer_is_registered_or_authority::{ diff --git a/programs/account-compression/src/instructions/initialize_state_merkle_tree_and_nullifier_queue.rs b/programs/account-compression/src/instructions/initialize_state_merkle_tree_and_nullifier_queue.rs index b9e33f2d75..93c9e17e02 100644 --- a/programs/account-compression/src/instructions/initialize_state_merkle_tree_and_nullifier_queue.rs +++ b/programs/account-compression/src/instructions/initialize_state_merkle_tree_and_nullifier_queue.rs @@ -5,8 +5,10 @@ use light_utils::account::check_account_balance_is_rent_exempt; use crate::{ errors::AccountCompressionErrorCode, - initialize_concurrent_merkle_tree::process_initialize_state_merkle_tree, - initialize_nullifier_queue::process_initialize_nullifier_queue, + processor::{ + initialize_concurrent_merkle_tree::process_initialize_state_merkle_tree, + initialize_nullifier_queue::process_initialize_nullifier_queue, + }, state::{QueueAccount, StateMerkleTreeAccount}, utils::{ check_signer_is_registered_or_authority::{ diff --git a/programs/account-compression/src/instructions/insert_into_queues.rs b/programs/account-compression/src/instructions/insert_into_queues.rs index 9f0a233d55..6d5b0ff8ff 100644 --- a/programs/account-compression/src/instructions/insert_into_queues.rs +++ b/programs/account-compression/src/instructions/insert_into_queues.rs @@ -1,444 +1,139 @@ -use std::collections::HashMap; - -use anchor_lang::{ - prelude::*, - solana_program::{log::sol_log_compute_units, pubkey::Pubkey}, - Discriminator, -}; -use light_batched_merkle_tree::{ - merkle_tree::BatchedMerkleTreeAccount, queue::BatchedQueueAccount, -}; -use light_hasher::Discriminator as LightDiscriminator; -use light_merkle_tree_metadata::queue::{check_queue_type, QueueType}; -use num_bigint::BigUint; - -use crate::{ - errors::AccountCompressionErrorCode, - state::queue::{queue_from_bytes_zero_copy_mut, QueueAccount}, - state_merkle_tree_from_bytes_zero_copy, - utils::{ - check_signer_is_registered_or_authority::check_signer_is_registered_or_authority, - queue::{QueueBundle, QueueMap}, - transfer_lamports::transfer_lamports_cpi, - }, - RegisteredProgram, -}; - -#[derive(Accounts)] -pub struct InsertIntoQueues<'info> { - /// Fee payer pays rollover fee. - #[account(mut)] - pub fee_payer: Signer<'info>, - /// CHECK: should only be accessed by a registered program or owner. - pub authority: Signer<'info>, - pub registered_program_pda: Option>, - pub system_program: Program<'info, System>, +use light_zero_copy::{errors::ZeroCopyError, slice_mut::ZeroCopySliceMut}; +use std::mem::size_of; +use std::ops::{Deref, DerefMut}; +use zerocopy::{FromBytes, Immutable, IntoBytes, KnownLayout, Ref, Unaligned}; + +use crate::processor::insert_addresses::InsertAddressInput; +use crate::processor::insert_leaves::AppendLeavesInput; +use crate::processor::insert_nullifiers::InsertNullifierInput; +use crate::{context::AcpAccount, errors::AccountCompressionErrorCode}; + +#[repr(C)] +#[derive( + FromBytes, IntoBytes, KnownLayout, Immutable, Copy, Clone, PartialEq, Debug, Unaligned, +)] +pub struct AppendNullifyCreateAddressInputsMeta { + is_invoked_by_program: u8, + pub bump: u8, + pub num_queues: u8, + pub num_unique_appends: u8, + pub num_address_appends: u8, + pub tx_hash: [u8; 32], } -/// Insert elements into the queues. -/// 1. Deduplicate tree and queue pairs. -/// 1.1 select logic to create queue element -/// based on the account discriminator. -/// 2. Check that all leaves are processed. -/// 3. For each queue bundle: -/// 3.1 Process bundle -/// 3.1.1 Check account discriminators and account ownership. -/// 3.1.2 Check accounts are associated. -/// 3.1.3 Check that the signer is the authority or registered program. -/// 3.1.4 Insert elements into the queue. -/// 3.2 Transfer rollover fee. -pub fn process_insert_into_queues<'a, 'b, 'c: 'info, 'info>( - ctx: Context<'a, 'b, 'c, 'info, InsertIntoQueues<'info>>, - elements: &[[u8; 32]], - indices: Vec, - queue_type: QueueType, - prove_by_index: Option>, - tx_hash: Option<[u8; 32]>, -) -> Result<()> { - if elements.is_empty() { - return err!(AccountCompressionErrorCode::InputElementsEmpty); - } - - light_heap::bench_sbf_start!("acp_create_queue_map"); - - let mut queue_map = QueueMap::new(); - // 1. Deduplicate tree and queue pairs. - // So that we iterate over every pair only once, - // and pay rollover fees only once. - let mut current_index = 0; - for (index, element) in elements.iter().enumerate() { - let current_account_discriminator = ctx - .remaining_accounts - .get(current_index) - .unwrap() - .try_borrow_data()?[0..8] - .try_into() - .unwrap(); - // 1.1 select logic to create queue element - // based on the account discriminator. - match current_account_discriminator { - // V1 nullifier or address queue. - QueueAccount::DISCRIMINATOR => { - if queue_type == QueueType::NullifierQueue - && prove_by_index.as_ref().unwrap()[index] - { - return err!(AccountCompressionErrorCode::V1AccountMarkedAsProofByIndex); - } - - add_queue_bundle_v1( - &mut current_index, - queue_type, - &mut queue_map, - element, - ctx.remaining_accounts, - )? - } - // V2 nullifier (input state) queue - BatchedQueueAccount::DISCRIMINATOR => add_nullifier_queue_bundle_v2( - &mut current_index, - queue_type, - &mut queue_map, - element, - indices[index], - prove_by_index.as_ref().unwrap()[index], - ctx.remaining_accounts, - )?, - // V2 Address queue is part of the address Merkle tree account. - BatchedMerkleTreeAccount::DISCRIMINATOR => add_address_queue_bundle_v2( - &mut current_index, - queue_type, - &mut queue_map, - element, - ctx.remaining_accounts, - )?, - _ => { - msg!( - "Invalid account discriminator {:?}", - current_account_discriminator - ); - return err!(anchor_lang::error::ErrorCode::AccountDiscriminatorMismatch); - } - } - } - - // 2. Check that all leaves are processed. - if current_index != ctx.remaining_accounts.len() { - msg!( - "Number of remaining accounts does not match, expected {}, got {}", - current_index, - ctx.remaining_accounts.len() - ); - return err!(crate::errors::AccountCompressionErrorCode::NumberOfLeavesMismatch); - } - - light_heap::bench_sbf_end!("acp_create_queue_map"); - - for queue_bundle in queue_map.values() { - // 3.1 Process bundle - let rollover_fee = match queue_bundle.queue_type { - QueueType::NullifierQueue => process_queue_bundle_v1(&ctx, queue_bundle), - QueueType::AddressQueue => process_queue_bundle_v1(&ctx, queue_bundle), - QueueType::BatchedInput => { - process_nullifier_queue_bundle_v2(&ctx, queue_bundle, &tx_hash) - } - QueueType::BatchedAddress => process_address_queue_bundle_v2(&ctx, queue_bundle), - _ => { - msg!("Queue type {:?} is not supported", queue_bundle.queue_type); - return err!(AccountCompressionErrorCode::InvalidQueueType); - } - }?; - - // 3.2 Transfer rollover fee. - if rollover_fee > 0 { - transfer_lamports_cpi( - &ctx.accounts.fee_payer, - &queue_bundle.accounts[0].to_account_info(), - rollover_fee, - )?; - } - } - - Ok(()) +#[derive(Debug)] +pub struct AppendNullifyCreateAddressInputs<'a> { + meta: Ref<&'a mut [u8], AppendNullifyCreateAddressInputsMeta>, + pub leaves: ZeroCopySliceMut<'a, u8, AppendLeavesInput, false>, + pub nullifiers: ZeroCopySliceMut<'a, u8, InsertNullifierInput, false>, + pub addresses: ZeroCopySliceMut<'a, u8, InsertAddressInput, false>, + // Don't add sequence numbers we don't want to deserialize these here. } -/// Process a v1 nullifier or address queue bundle. -/// 1. Check queue discriminator and account ownership -/// (AccountLoader). -/// 2. Check that queue has expected queue type. -/// 3. Check queue and Merkle tree are associated. -/// 4. Check that the signer is the authority or registered program. -/// 5. Insert the nullifiers into the queues hash set. -/// 6. Return rollover fee. -fn process_queue_bundle_v1<'info>( - ctx: &Context<'_, '_, '_, 'info, InsertIntoQueues<'info>>, - queue_bundle: &QueueBundle<'_, '_>, -) -> Result { - // 1. Check discriminator and account ownership - let queue = AccountLoader::::try_from(queue_bundle.accounts[0])?; - let merkle_tree = queue_bundle.accounts[1]; - let associated_merkle_tree = { - let queue = queue.load()?; - // 2. Check that queue has expected queue type. - check_queue_type(&queue.metadata.queue_type, &queue_bundle.queue_type) - .map_err(ProgramError::from)?; - queue.metadata.associated_merkle_tree - }; - // 3. Check queue and Merkle tree are associated. - if merkle_tree.key() != associated_merkle_tree.into() { - msg!( - "Queue account {:?} is not associated with Merkle tree {:?}", - queue.key(), - merkle_tree.key() - ); - return err!(AccountCompressionErrorCode::MerkleTreeAndQueueNotAssociated); +impl<'a> AppendNullifyCreateAddressInputs<'a> { + pub fn is_invoked_by_program(&self) -> bool { + self.meta.is_invoked_by_program == 1 } - light_heap::bench_sbf_start!("acp_prep_insertion"); - let rollover_fee = { - let queue = queue.load()?; - // 4. Check that the signer is the authority or registered program. - check_signer_is_registered_or_authority::(ctx, &queue)?; - queue.metadata.rollover_metadata.rollover_fee * queue_bundle.elements.len() as u64 - }; - // 5. Insert the nullifiers into the queues hash set. - { - let sequence_number = { - let merkle_tree = merkle_tree.try_borrow_data()?; - let merkle_tree = state_merkle_tree_from_bytes_zero_copy(&merkle_tree)?; - merkle_tree.sequence_number() - }; - let queue = queue.to_account_info(); - let mut queue = queue.try_borrow_mut_data()?; - let mut queue = unsafe { queue_from_bytes_zero_copy_mut(&mut queue).unwrap() }; - light_heap::bench_sbf_end!("acp_prep_insertion"); - light_heap::bench_sbf_start!("acp_insert_nf_into_queue"); - for element in queue_bundle.elements.iter() { - let element = BigUint::from_bytes_be(element.as_slice()); - queue - .insert(&element, sequence_number) - .map_err(ProgramError::from)?; - } - light_heap::bench_sbf_end!("acp_insert_nf_into_queue"); + pub fn set_invoked_by_program(&mut self, value: bool) { + self.meta.is_invoked_by_program = value as u8; } - // 6. Return rollover fee. - Ok(rollover_fee) -} - -/// Insert nullifiers into the batched nullifier queue. -/// 1. Check discriminator & account ownership -/// (state_from_account_info). -/// 2. Check discriminator & account ownership -/// (output_from_account_info). -/// 3. Check queue and Merkle tree are associated. -/// 3. Check that the signer is the authority or registered program. -/// 4. prove inclusion by index and zero out the leaf. -/// Note that this check doesn't fail if -/// the leaf index of the element is out of range for the queue. -/// This check needs to be relaxed since we want to use -/// compressed account which are in the Merkle tree -/// not only in the queue. -/// 5. Insert the nullifiers into the current input queue batch. -/// 6. Return rollover fee. -fn process_nullifier_queue_bundle_v2<'info>( - ctx: &Context<'_, '_, '_, 'info, InsertIntoQueues<'info>>, - queue_bundle: &QueueBundle<'_, '_>, - tx_hash: &Option<[u8; 32]>, -) -> Result { - // 1. Check discriminator & account ownership of Merkle tree. - let merkle_tree = - &mut BatchedMerkleTreeAccount::state_from_account_info(queue_bundle.accounts[0]) - .map_err(ProgramError::from)?; - - // 2. Check discriminator & account ownership of output queue. - let output_queue = &mut BatchedQueueAccount::output_from_account_info(queue_bundle.accounts[1]) - .map_err(ProgramError::from)?; - - // 3. Check queue and Merkle tree are associated. - output_queue - .check_is_associated(&queue_bundle.accounts[0].key().into()) - .map_err(ProgramError::from)?; - - // 4. Check that the signer is the authority or registered program. - check_signer_is_registered_or_authority::( - ctx, - merkle_tree, - )?; - for ((element, leaf_index), prove_by_index) in queue_bundle - .elements - .iter() - .zip(queue_bundle.indices.iter()) - .zip(queue_bundle.prove_by_index.iter()) - { - let tx_hash = tx_hash.ok_or(AccountCompressionErrorCode::TxHashUndefined)?; - light_heap::bench_sbf_start!("acp_insert_nf_into_queue_v2"); - // 4. check for every account whether the value is still in the queue and zero it out. - // If checked fail if the value is not in the queue. - output_queue - .prove_inclusion_by_index_and_zero_out_leaf( - *leaf_index as u64, - element, - *prove_by_index, + pub fn required_size_for_capacity( + leaves_capacity: u8, + nullifiers_capacity: u8, + addresses_capacity: u8, + ) -> usize { + size_of::() + + ZeroCopySliceMut::::required_size_for_capacity( + leaves_capacity, + ) + + ZeroCopySliceMut::::required_size_for_capacity( + nullifiers_capacity, + ) + + ZeroCopySliceMut::::required_size_for_capacity( + addresses_capacity, ) - .map_err(ProgramError::from)?; - - // 5. Insert the nullifiers into the current input queue batch. - merkle_tree - .insert_nullifier_into_current_batch(element, *leaf_index as u64, &tx_hash) - .map_err(ProgramError::from)?; - light_heap::bench_sbf_end!("acp_insert_nf_into_queue_v2"); } - // 6. Return rollover fee. - let rollover_fee = - merkle_tree.metadata.rollover_metadata.rollover_fee * queue_bundle.elements.len() as u64; - Ok(rollover_fee) -} -/// Insert a batch of addresses into the address queue. -/// 1. Check discriminator and account ownership. -/// 2. Check that the signer is the authority or registered program. -/// 3. Insert the addresses into the current batch. -/// 4. Return rollover fee. -fn process_address_queue_bundle_v2<'info>( - ctx: &Context<'_, '_, '_, 'info, InsertIntoQueues<'info>>, - queue_bundle: &QueueBundle<'_, '_>, -) -> Result { - // 1. Check discriminator and account ownership. - let merkle_tree = - &mut BatchedMerkleTreeAccount::address_from_account_info(queue_bundle.accounts[0]) - .map_err(ProgramError::from)?; - // 2. Check that the signer is the authority or registered program. - check_signer_is_registered_or_authority::( - ctx, - merkle_tree, - )?; - // 3. Insert the addresses into the current batch. - for element in queue_bundle.elements.iter() { - light_heap::bench_sbf_start!("acp_insert_nf_into_queue_v2"); - merkle_tree - .insert_address_into_current_batch(element) - .map_err(ProgramError::from)?; - light_heap::bench_sbf_end!("acp_insert_nf_into_queue_v2"); + pub fn new( + bytes: &'a mut [u8], + leaves_capacity: u8, + nullifiers_capacity: u8, + addresses_capacity: u8, + ) -> std::result::Result { + let (meta, bytes) = bytes.split_at_mut(size_of::()); + let meta = Ref::<&mut [u8], AppendNullifyCreateAddressInputsMeta>::from_bytes(meta)?; + let (leaves, bytes) = + ZeroCopySliceMut::::new_at(leaves_capacity, bytes)?; + let (nullifiers, bytes) = ZeroCopySliceMut::::new_at( + nullifiers_capacity, + bytes, + )?; + let addresses = + ZeroCopySliceMut::::new(addresses_capacity, bytes)?; + Ok(AppendNullifyCreateAddressInputs { + meta, + leaves, + nullifiers, + addresses, + }) } - // 4. Return rollover fee. - let rollover_fee = - merkle_tree.metadata.rollover_metadata.rollover_fee * queue_bundle.elements.len() as u64; - Ok(rollover_fee) } -/// Add to/create new queue bundle for v1 nullifier or address queue. -fn add_queue_bundle_v1<'a, 'info>( - remaining_accounts_index: &mut usize, - queue_type: QueueType, - queue_map: &mut HashMap>, - element: &'a [u8; 32], - remaining_accounts: &'info [AccountInfo<'info>], -) -> Result<()> { - let queue = remaining_accounts.get(*remaining_accounts_index).unwrap(); - let merkle_tree = remaining_accounts - .get(*remaining_accounts_index + 1) - .unwrap(); - queue_map - .entry(queue.key()) - .or_insert_with(|| QueueBundle::new(queue_type, vec![queue, merkle_tree])) - .elements - .push(element); - *remaining_accounts_index += 2; - Ok(()) -} +impl Deref for AppendNullifyCreateAddressInputs<'_> { + type Target = AppendNullifyCreateAddressInputsMeta; -/// Add to/create a new state queue bundle. -/// 1. Check that the queue type is a nullifier queue. -/// 2. Get or create a queue bundle. -/// 3. Add the element to the queue bundle. -/// 4. Add the index to the queue bundle. -fn add_nullifier_queue_bundle_v2<'a, 'info>( - remaining_accounts_index: &mut usize, - queue_type: QueueType, - queue_map: &mut HashMap>, - element: &'a [u8; 32], - index: u32, - prove_by_index: bool, - remaining_accounts: &'info [AccountInfo<'info>], -) -> Result<()> { - // 1. Check that the queue type is a nullifier queue. - // Queue type is v1 nullifier queue type since we are using the same - // instruction with both tree types via cpi from the system program. - // (sanity check) - if queue_type != QueueType::NullifierQueue { - return err!(AccountCompressionErrorCode::InvalidQueueType); + fn deref(&self) -> &Self::Target { + &self.meta } - let output_queue = remaining_accounts.get(*remaining_accounts_index).unwrap(); - let merkle_tree = remaining_accounts - .get(*remaining_accounts_index + 1) - .unwrap(); - msg!("hashsetinsert"); - sol_log_compute_units(); - // 2. Get or create a queue bundle. - // 3. Add the element to the queue bundle. - queue_map - .entry(merkle_tree.key()) - .or_insert_with(|| { - QueueBundle::new(QueueType::BatchedInput, vec![merkle_tree, output_queue]) - }) - .elements - .push(element); - sol_log_compute_units(); - // 4. Add the index and proof by index to the queue bundle. - queue_map.entry(merkle_tree.key()).and_modify(|x| { - x.indices.push(index); - x.prove_by_index.push(prove_by_index); - }); - sol_log_compute_units(); - *remaining_accounts_index += 2; - - Ok(()) } -/// Add to/create a new address queue bundle. -/// 1. Check that the queue type is an address queue. -/// 2. Check that the Merkle tree is passed twice. -/// 3. Add the address to or create new queue bundle. -fn add_address_queue_bundle_v2<'a, 'info>( - remaining_accounts_index: &mut usize, - queue_type: QueueType, - queue_map: &mut HashMap>, - address: &'a [u8; 32], - remaining_accounts: &'info [AccountInfo<'info>], -) -> Result<()> { - // 1. Check that the queue type is an address queue. - // (sanity check) - if queue_type != QueueType::AddressQueue { - return err!(AccountCompressionErrorCode::InvalidQueueType); +impl DerefMut for AppendNullifyCreateAddressInputs<'_> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.meta } - let merkle_tree = remaining_accounts.get(*remaining_accounts_index).unwrap(); +} - // 2. Check that the Merkle tree is passed twice. - // We pass the same pubkey twice for consistency with the - // nullification and address v1 instructions. - if merkle_tree.key() - != remaining_accounts - .get(*remaining_accounts_index + 1) - .unwrap() - .key() - { - msg!( - "Merkle tree accounts {:?} inconsistent {:?}", - merkle_tree.key(), - remaining_accounts - .get(*remaining_accounts_index + 1) - .unwrap() - .key() - ); - return err!(AccountCompressionErrorCode::MerkleTreeAndQueueNotAssociated); - } - // 3. Add the address to or create new queue bundle. - queue_map - .entry(merkle_tree.key()) - .or_insert_with(|| QueueBundle::new(QueueType::BatchedAddress, vec![merkle_tree])) - .elements - .push(address); - *remaining_accounts_index += 2; +pub fn deserialize_nullify_append_create_address_inputs<'a>( + bytes: &'a mut [u8], +) -> std::result::Result, ZeroCopyError> { + let (metadata, bytes) = bytes.split_at_mut(size_of::()); + let meta = Ref::<&mut [u8], AppendNullifyCreateAddressInputsMeta>::from_bytes(metadata)?; + + let (leaves, bytes) = ZeroCopySliceMut::::from_bytes_at(bytes)?; + + let (nullifiers, bytes) = + ZeroCopySliceMut::::from_bytes_at(bytes)?; + let (addresses, _bytes) = + ZeroCopySliceMut::::from_bytes_at(bytes)?; + Ok(AppendNullifyCreateAddressInputs { + meta, + leaves, + nullifiers, + addresses, + }) +} - Ok(()) +pub fn get_queue_and_tree_accounts<'a, 'b, 'info>( + accounts: &'b mut [AcpAccount<'a, 'info>], + queue_index: usize, + tree_index: usize, +) -> std::result::Result< + (&'b mut AcpAccount<'a, 'info>, &'b mut AcpAccount<'a, 'info>), + AccountCompressionErrorCode, +> { + let (smaller, bigger) = if queue_index < tree_index { + (queue_index, tree_index) + } else { + (tree_index, queue_index) + }; + let (left, right) = accounts.split_at_mut(bigger); + let smaller_ref = &mut left[smaller]; + let bigger_ref = &mut right[0]; + Ok(if queue_index < tree_index { + (smaller_ref, bigger_ref) + } else { + (bigger_ref, smaller_ref) + }) } diff --git a/programs/account-compression/src/instructions/migrate_state.rs b/programs/account-compression/src/instructions/migrate_state.rs index 7c12b43f61..d344e8bf26 100644 --- a/programs/account-compression/src/instructions/migrate_state.rs +++ b/programs/account-compression/src/instructions/migrate_state.rs @@ -184,7 +184,10 @@ mod migrate_state_test { ], }, tree_capacity: 2u64.pow(32), + hashed_merkle_tree_pubkey: [0u8; 32], + hashed_queue_pubkey: [0u8; 32], }; + let queue_pubkey = Pubkey::new_unique(); let account_data: Vec = vec![ 0; account @@ -209,6 +212,7 @@ mod migrate_state_test { account.batch_metadata.zkp_batch_size, 3, account.batch_metadata.bloom_filter_capacity, + queue_pubkey.into(), ) .unwrap(); mock_account.account = Some(output_queue); diff --git a/programs/account-compression/src/instructions/mod.rs b/programs/account-compression/src/instructions/mod.rs index 1b82464841..1896ee4e11 100644 --- a/programs/account-compression/src/instructions/mod.rs +++ b/programs/account-compression/src/instructions/mod.rs @@ -4,15 +4,9 @@ pub use initialize_address_merkle_tree_and_queue::*; pub mod update_address_merkle_tree; pub use update_address_merkle_tree::*; -pub mod insert_into_queues; -pub use insert_into_queues::*; - pub mod initialize_state_merkle_tree_and_nullifier_queue; pub use initialize_state_merkle_tree_and_nullifier_queue::*; -pub mod append_leaves; -pub use append_leaves::*; - pub mod nullify_leaves; pub use nullify_leaves::*; @@ -58,8 +52,7 @@ pub use rollover_batched_address_merkle_tree::*; pub mod migrate_state; pub use migrate_state::*; -pub mod append_nullify_create_address; -// pub use append_nullify_create_address::*; +pub mod insert_into_queues; pub mod generic; pub use generic::*; diff --git a/programs/account-compression/src/instructions/nullify_leaves.rs b/programs/account-compression/src/instructions/nullify_leaves.rs index 5b44bdb13e..bbdb20a6ef 100644 --- a/programs/account-compression/src/instructions/nullify_leaves.rs +++ b/programs/account-compression/src/instructions/nullify_leaves.rs @@ -12,7 +12,7 @@ use crate::{ }, state_merkle_tree_from_bytes_zero_copy_mut, utils::check_signer_is_registered_or_authority::{ - check_signer_is_registered_or_authority, GroupAccounts, + check_signer_is_registered_or_authority, GroupAccess, GroupAccounts, }, RegisteredProgram, }; @@ -30,6 +30,16 @@ pub struct NullifyLeaves<'info> { pub nullifier_queue: AccountLoader<'info, QueueAccount>, } +impl GroupAccess for StateMerkleTreeAccount { + fn get_owner(&self) -> Pubkey { + self.metadata.access_metadata.owner.into() + } + + fn get_program_owner(&self) -> Pubkey { + self.metadata.access_metadata.program_owner.into() + } +} + impl<'info> GroupAccounts<'info> for NullifyLeaves<'info> { fn get_authority(&self) -> &Signer<'info> { &self.authority diff --git a/programs/account-compression/src/instructions/rollover_address_merkle_tree_and_queue.rs b/programs/account-compression/src/instructions/rollover_address_merkle_tree_and_queue.rs index 43c5a8658e..a6bf919d0b 100644 --- a/programs/account-compression/src/instructions/rollover_address_merkle_tree_and_queue.rs +++ b/programs/account-compression/src/instructions/rollover_address_merkle_tree_and_queue.rs @@ -3,8 +3,10 @@ use light_utils::account::check_account_balance_is_rent_exempt; use crate::{ address_merkle_tree_from_bytes_zero_copy, - initialize_address_merkle_tree::process_initialize_address_merkle_tree, - initialize_address_queue::process_initialize_address_queue, + processor::{ + initialize_address_merkle_tree::process_initialize_address_merkle_tree, + initialize_address_queue::process_initialize_address_queue, + }, state::{queue_from_bytes_zero_copy_mut, QueueAccount}, utils::{ check_signer_is_registered_or_authority::{ diff --git a/programs/account-compression/src/lib.rs b/programs/account-compression/src/lib.rs index 93eda87e3c..d0292a0d00 100644 --- a/programs/account-compression/src/lib.rs +++ b/programs/account-compression/src/lib.rs @@ -5,11 +5,9 @@ pub mod instructions; pub use instructions::*; pub mod state; pub use state::*; +pub mod context; pub mod processor; pub mod utils; -pub use processor::*; -mod context; -pub mod sdk; use anchor_lang::prelude::*; use errors::AccountCompressionErrorCode; use light_batched_merkle_tree::{ @@ -28,22 +26,12 @@ solana_security_txt::security_txt! { policy: "https://github.com/Lightprotocol/light-protocol/blob/main/SECURITY.md", source_code: "https://github.com/Lightprotocol/light-protocol" } + #[program] pub mod account_compression { - use core::panic; - - use light_merkle_tree_metadata::queue::QueueType; - use light_zero_copy::slice_mut::ZeroCopySliceMutBorsh; - - use crate::{ - append_nullify_create_address::{ - deserialize_nullify_append_create_address_inputs, insert_nullifiers, - }, - context::LightContext, - }; + use crate::processor::insert_into_queues::process_insert_into_queues; - use self::insert_into_queues::{process_insert_into_queues, InsertIntoQueues}; use super::*; pub fn initialize_address_merkle_tree_and_queue<'info>( @@ -64,20 +52,6 @@ pub mod account_compression { ) } - pub fn insert_addresses<'a, 'b, 'c: 'info, 'info>( - ctx: Context<'a, 'b, 'c, 'info, InsertIntoQueues<'info>>, - addresses: Vec<[u8; 32]>, - ) -> Result<()> { - process_insert_into_queues( - ctx, - addresses.as_slice(), - Vec::new(), - QueueType::AddressQueue, - None, - None, - ) - } - /// Updates the address Merkle tree with a new address. pub fn update_address_merkle_tree<'info>( ctx: Context<'_, '_, '_, 'info, UpdateAddressMerkleTree<'info>>, @@ -178,49 +152,7 @@ pub mod account_compression { ctx: Context<'a, 'b, 'c, 'info, GenericInstruction<'info>>, bytes: Vec, ) -> Result<()> { - let fee_payer = ctx.accounts.fee_payer.to_account_info(); - let mut bytes = bytes; - let inputs = - deserialize_nullify_append_create_address_inputs(bytes.as_mut_slice()).unwrap(); - let mut context = LightContext::new( - ctx.remaining_accounts, - &fee_payer, - inputs.is_invoked_by_program(), - inputs.bump, - ); - // process_append_leaves_to_merkle_trees(&ctx, inputs.leaves.as_slice())?; - insert_nullifiers( - inputs.num_queues, - inputs.tx_hash, - inputs.nullifiers.as_slice(), - context.remaining_accounts_mut(), - )?; - - process_append_leaves_to_merkle_trees( - inputs.leaves.as_slice(), - inputs.num_unique_appends, - context.remaining_accounts_mut(), - )?; - - crate::append_nullify_create_address::insert_addresses( - inputs.num_address_appends, - inputs.addresses.as_slice(), - context.remaining_accounts_mut(), - )?; - // return (Pubkey, rollover_fee) and transfer in system program to - // reduce cpi call depth by 1 - Ok(()) - } - - pub fn append_leaves_to_merkle_trees<'a, 'b, 'c: 'info, 'info>( - ctx: Context<'a, 'b, 'c, 'info, AppendLeaves<'info>>, - bytes: Vec, - ) -> Result<()> { - let mut bytes = bytes; - let leaves = - ZeroCopySliceMutBorsh::::from_bytes(bytes.as_mut_slice()).unwrap(); - // process_append_leaves_to_merkle_trees(&ctx, leaves.as_slice()) - panic!("process_append_leaves_to_merkle_trees not implemented") + process_insert_into_queues(&ctx, bytes) } pub fn nullify_leaves<'a, 'b, 'c: 'info, 'info>( @@ -239,23 +171,6 @@ pub mod account_compression { ) } - pub fn insert_into_nullifier_queues<'a, 'b, 'c: 'info, 'info>( - ctx: Context<'a, 'b, 'c, 'info, InsertIntoQueues<'info>>, - nullifiers: Vec<[u8; 32]>, - leaf_indices: Vec, - prove_by_index: Vec, - tx_hash: [u8; 32], - ) -> Result<()> { - process_insert_into_queues( - ctx, - &nullifiers, - leaf_indices, - QueueType::NullifierQueue, - Some(prove_by_index), - Some(tx_hash), - ) - } - pub fn rollover_state_merkle_tree_and_nullifier_queue<'a, 'b, 'c: 'info, 'info>( ctx: Context<'a, 'b, 'c, 'info, RolloverStateMerkleTreeAndNullifierQueue<'info>>, ) -> Result<()> { diff --git a/programs/account-compression/src/processor/insert_addresses.rs b/programs/account-compression/src/processor/insert_addresses.rs new file mode 100644 index 0000000000..dd6bf1ef56 --- /dev/null +++ b/programs/account-compression/src/processor/insert_addresses.rs @@ -0,0 +1,165 @@ +use anchor_lang::prelude::*; +use light_batched_merkle_tree::merkle_tree::BatchedMerkleTreeAccount; +use num_bigint::BigUint; +use zerocopy::{FromBytes, Immutable, IntoBytes, KnownLayout, Unaligned}; + +use crate::{ + context::AcpAccount, errors::AccountCompressionErrorCode, + insert_into_queues::get_queue_and_tree_accounts, queue_from_bytes_zero_copy_mut, QueueAccount, +}; + +#[repr(C)] +#[derive( + KnownLayout, + IntoBytes, + Immutable, + Copy, + Clone, + FromBytes, + AnchorSerialize, + AnchorDeserialize, + PartialEq, + Debug, + Unaligned, +)] +pub struct InsertAddressInput { + pub address: [u8; 32], + pub tree_index: u8, + pub queue_index: u8, +} + +#[inline(always)] +pub fn insert_addresses<'a, 'info>( + num_queues: u8, + addresses: &[InsertAddressInput], + accounts: &mut [AcpAccount<'a, 'info>], +) -> Result<()> { + if addresses.is_empty() { + return Ok(()); + } + + let mut inserted_nullifiers = 0; + let mut current_tree_index = addresses[0].tree_index; + let mut current_queue_index = addresses[0].queue_index; + let mut dedup_vec = Vec::with_capacity(num_queues as usize); + for _ in 0..num_queues { + let (queue_account, merkle_tree_account) = get_queue_and_tree_accounts( + accounts, + current_queue_index as usize, + current_tree_index as usize, + ) + .unwrap(); + + match queue_account { + AcpAccount::BatchedAddressTree(address_tree) => { + inserted_nullifiers += + process_address_v2(address_tree, addresses, current_queue_index)?; + } + AcpAccount::V1Queue(queue_account_info) => { + inserted_nullifiers += refactored_process_address_v1( + merkle_tree_account, + queue_account_info, + addresses, + current_queue_index, + )?; + } + _ => unimplemented!(), + } + + dedup_vec.push(current_queue_index); + if dedup_vec.len() == num_queues as usize { + break; + } + // find next tree index which doesn't exist in dedup vec yet + let input = addresses + .iter() + .find(|x| { + !dedup_vec + .iter() + .any(|&queue_index| queue_index == x.queue_index) + }) + .unwrap(); + current_tree_index = input.tree_index; + current_queue_index = input.queue_index; + } + if inserted_nullifiers != addresses.len() { + msg!("inserted_nullifiers {:?}", inserted_nullifiers); + msg!("nullifiers.len() {:?}", addresses.len()); + return err!(AccountCompressionErrorCode::NotAllLeavesProcessed); + } + Ok(()) +} + +/// Insert a batch of addresses into the address queue. +fn process_address_v2<'info>( + addresse_tree: &mut BatchedMerkleTreeAccount<'info>, + addresses: &[InsertAddressInput], + current_queue_index: u8, +) -> Result { + let addresses = addresses + .iter() + .filter(|x| x.queue_index == current_queue_index); + #[cfg(feature = "bench-sbf")] + light_heap::bench_sbf_start!("acp_insert_nf_into_queue_v2"); + let mut num_elements = 0; + for address in addresses { + num_elements += 1; + addresse_tree + .insert_address_into_current_batch(&address.address) + .map_err(ProgramError::from)?; + #[cfg(feature = "bench-sbf")] + light_heap::bench_sbf_end!("acp_insert_nf_into_queue_v2"); + } + Ok(num_elements) +} + +fn refactored_process_address_v1<'a, 'info>( + merkle_tree: &mut AcpAccount<'a, 'info>, + nullifier_queue: &mut AccountInfo<'info>, + addresses: &[InsertAddressInput], + current_queue_index: u8, +) -> Result { + let addresses = addresses + .iter() + .filter(|x| x.queue_index == current_queue_index); + let (merkle_pubkey, merkle_tree) = if let AcpAccount::AddressTree(tree) = merkle_tree { + tree + } else { + panic!("Invalid account"); + }; + { + let queue_data = nullifier_queue + .try_borrow_data() + .map_err(ProgramError::from)?; + let queue = bytemuck::from_bytes::(&queue_data[8..QueueAccount::LEN]); + // 1. Check queue and Merkle tree are associated. + if queue.metadata.associated_merkle_tree != (*merkle_pubkey).into() { + msg!( + "Queue account {:?} is not associated with Merkle tree {:?}", + nullifier_queue.key(), + *merkle_pubkey + ); + return err!(AccountCompressionErrorCode::MerkleTreeAndQueueNotAssociated); + } + } + let mut num_elements = 0; + // 2. Insert the addresses into the queues hash set. + + let sequence_number = merkle_tree.sequence_number(); + let mut queue = nullifier_queue.try_borrow_mut_data()?; + let mut queue = unsafe { queue_from_bytes_zero_copy_mut(&mut queue).unwrap() }; + #[cfg(feature = "bench-sbf")] + light_heap::bench_sbf_start!("acp_insert_nf_into_queue"); + for address in addresses { + num_elements += 1; + let element = BigUint::from_bytes_be(address.address.as_slice()); + queue + .insert(&element, sequence_number) + .map_err(ProgramError::from)?; + } + #[cfg(feature = "bench-sbf")] + light_heap::bench_sbf_end!("acp_insert_nf_into_queue"); + msg!("v1 num_elements {:?}", num_elements); + + Ok(num_elements) +} diff --git a/programs/account-compression/src/processor/insert_into_queues.rs b/programs/account-compression/src/processor/insert_into_queues.rs new file mode 100644 index 0000000000..fe76d69669 --- /dev/null +++ b/programs/account-compression/src/processor/insert_into_queues.rs @@ -0,0 +1,44 @@ +use anchor_lang::prelude::*; + +use super::{ + insert_addresses::insert_addresses, insert_leaves::process_append_leaves_to_merkle_trees, + insert_nullifiers::insert_nullifiers, +}; +use crate::context::LightContext; +use crate::insert_into_queues::deserialize_nullify_append_create_address_inputs; + +use crate::GenericInstruction; + +pub fn process_insert_into_queues<'a, 'b, 'c: 'info, 'info>( + ctx: &Context<'a, 'b, 'c, 'info, GenericInstruction<'info>>, + bytes: Vec, +) -> Result<()> { + let fee_payer = ctx.accounts.fee_payer.to_account_info(); + let mut bytes = bytes; + let inputs = deserialize_nullify_append_create_address_inputs(bytes.as_mut_slice()).unwrap(); + let mut context = LightContext::new( + ctx.remaining_accounts, + &fee_payer, + inputs.is_invoked_by_program(), + inputs.bump, + ); + + insert_nullifiers( + inputs.num_queues, + inputs.tx_hash, + inputs.nullifiers.as_slice(), + context.remaining_accounts_mut(), + )?; + + process_append_leaves_to_merkle_trees( + inputs.leaves.as_slice(), + inputs.num_unique_appends, + context.remaining_accounts_mut(), + )?; + + insert_addresses( + inputs.num_address_appends, + inputs.addresses.as_slice(), + context.remaining_accounts_mut(), + ) +} diff --git a/programs/account-compression/src/instructions/append_leaves.rs b/programs/account-compression/src/processor/insert_leaves.rs similarity index 51% rename from programs/account-compression/src/instructions/append_leaves.rs rename to programs/account-compression/src/processor/insert_leaves.rs index ed56acbd81..ddb2a64d87 100644 --- a/programs/account-compression/src/instructions/append_leaves.rs +++ b/programs/account-compression/src/processor/insert_leaves.rs @@ -1,65 +1,8 @@ -use anchor_lang::{prelude::*, solana_program::pubkey::Pubkey}; -use light_batched_merkle_tree::queue::BatchedQueueAccount; - -use crate::{ - context::AcpAccount, - errors::AccountCompressionErrorCode, - state::StateMerkleTreeAccount, - utils::check_signer_is_registered_or_authority::{GroupAccess, GroupAccounts}, - RegisteredProgram, -}; - -#[derive(Accounts)] -pub struct AppendLeaves<'info> { - #[account(mut)] - /// Fee payer pays rollover fee. - pub fee_payer: Signer<'info>, - /// Checked whether instruction is accessed by a registered program or owner = authority. - pub authority: Signer<'info>, - /// Some assumes that the Merkle trees are accessed by a registered program. - /// None assumes that the Merkle trees are accessed by its owner. - pub registered_program_pda: Option>, - pub system_program: Program<'info, System>, -} - -impl GroupAccess for StateMerkleTreeAccount { - fn get_owner(&self) -> Pubkey { - self.metadata.access_metadata.owner.into() - } - - fn get_program_owner(&self) -> Pubkey { - self.metadata.access_metadata.program_owner.into() - } -} - -impl<'a> GroupAccess for BatchedQueueAccount<'a> { - fn get_owner(&self) -> Pubkey { - self.metadata.access_metadata.owner.into() - } - - fn get_program_owner(&self) -> Pubkey { - self.metadata.access_metadata.program_owner.into() - } -} - -impl<'info> GroupAccounts<'info> for AppendLeaves<'info> { - fn get_authority(&self) -> &Signer<'info> { - &self.authority - } - fn get_registered_program_pda(&self) -> &Option> { - &self.registered_program_pda - } -} - -#[derive(AnchorSerialize, AnchorDeserialize)] -pub struct ZeroOutLeafIndex { - pub tree_index: u8, - pub batch_index: u8, - pub leaf_index: u16, -} +use anchor_lang::prelude::*; use zerocopy::{FromBytes, Immutable, IntoBytes, KnownLayout, Unaligned}; +use crate::{context::AcpAccount, errors::AccountCompressionErrorCode}; #[repr(C)] #[derive( KnownLayout, @@ -150,38 +93,3 @@ pub fn process_append_leaves_to_merkle_trees<'a, 'b, 'c: 'info, 'info>( Ok(()) } } - -// /// Append a batch of leaves to a concurrent Merkle tree. -// /// 1. Check StateMerkleTreeAccount discriminator and ownership (AccountLoader) -// /// 2. Check signer is registered or authority -// /// 3. Append leaves to Merkle tree -// /// 4. Return rollover fee -// fn append_to_concurrent_merkle_tree<'a, 'b, 'c: 'info, 'info>( -// merkle_tree_acc_info: ConcurrentMerkleTree26<'info>, -// batch_size: usize, -// leaves: &[&[u8; 32]], -// ) -> Result<()> { -// // let rollover_fee = { -// // let merkle_tree_account = -// // AccountLoader::::try_from(merkle_tree_acc_info) -// // .map_err(ProgramError::from)?; - -// // { -// // let merkle_tree_account = merkle_tree_account.load()?; -// // let rollover_fee = -// // merkle_tree_account.metadata.rollover_metadata.rollover_fee * batch_size as u64; - -// // check_signer_is_registered_or_authority::( -// // ctx, -// // &merkle_tree_account, -// // )?; - -// // rollover_fee -// // } -// // }; - -// merkle_tree -// .append_batch(leaves) -// .map_err(ProgramError::from)?; -// Ok() -// } diff --git a/programs/account-compression/src/processor/insert_nullifiers.rs b/programs/account-compression/src/processor/insert_nullifiers.rs new file mode 100644 index 0000000000..1d82075925 --- /dev/null +++ b/programs/account-compression/src/processor/insert_nullifiers.rs @@ -0,0 +1,224 @@ +use anchor_lang::prelude::*; +use light_batched_merkle_tree::queue::BatchedQueueAccount; +use num_bigint::BigUint; + +use zerocopy::{little_endian::U32, FromBytes, Immutable, IntoBytes, KnownLayout, Unaligned}; + +use crate::{ + context::AcpAccount, errors::AccountCompressionErrorCode, + insert_into_queues::get_queue_and_tree_accounts, queue_from_bytes_zero_copy_mut, QueueAccount, +}; + +#[repr(C)] +#[derive( + KnownLayout, IntoBytes, Immutable, Copy, Clone, FromBytes, PartialEq, Debug, Unaligned, +)] +pub struct InsertNullifierInput { + pub account_hash: [u8; 32], + pub leaf_index: U32, + pub prove_by_index: u8, + pub tree_index: u8, + pub queue_index: u8, +} + +#[inline(always)] +pub fn insert_nullifiers<'a, 'info>( + num_queues: u8, + tx_hash: [u8; 32], + nullifiers: &[InsertNullifierInput], + accounts: &mut [AcpAccount<'a, 'info>], +) -> Result<()> { + if nullifiers.is_empty() { + return Ok(()); + } + let mut inserted_nullifiers = 0; + let mut current_tree_index = nullifiers[0].tree_index; + let mut current_queue_index = nullifiers[0].queue_index; + let mut dedup_vec = Vec::with_capacity(num_queues as usize); + for _ in 0..num_queues { + let (queue_account, merkle_tree_account) = get_queue_and_tree_accounts( + accounts, + current_queue_index as usize, + current_tree_index as usize, + ) + .unwrap(); + + match queue_account { + AcpAccount::OutputQueue(queue) => { + inserted_nullifiers += refactored_process_nullifier_v2( + merkle_tree_account, + queue, + &tx_hash, + nullifiers, + current_queue_index, + )?; + } + AcpAccount::V1Queue(queue_account_info) => { + inserted_nullifiers += refactored_process_nullifier_v1( + merkle_tree_account, + queue_account_info, + nullifiers, + current_queue_index, + )?; + } + AcpAccount::BatchedStateTree(_) => { + msg!("BatchedStateTree"); + unimplemented!(); + } + AcpAccount::StateTree(_) => { + msg!("StateTree"); + unimplemented!(); + } + AcpAccount::BatchedAddressTree(_) => { + msg!("BatchedAddressTree"); + unimplemented!(); + } + _ => { + unimplemented!() + } + } + + dedup_vec.push(current_queue_index); + if dedup_vec.len() == num_queues as usize { + break; + } + // find next tree index which doesn't exist in dedup vec yet + let input = nullifiers + .iter() + .find(|x| { + !dedup_vec + .iter() + .any(|&queue_index| queue_index == x.queue_index) + }) + .unwrap(); + current_tree_index = input.tree_index; + current_queue_index = input.queue_index; + } + if inserted_nullifiers != nullifiers.len() { + msg!("inserted_nullifiers {:?}", inserted_nullifiers); + msg!("nullifiers.len() {:?}", nullifiers.len()); + return err!(AccountCompressionErrorCode::NotAllLeavesProcessed); + } + Ok(()) +} + +#[inline(always)] +fn refactored_process_nullifier_v2<'a, 'info>( + merkle_tree: &mut AcpAccount<'a, 'info>, + output_queue: &mut BatchedQueueAccount<'info>, + tx_hash: &[u8; 32], + nullifiers: &[InsertNullifierInput], + current_queue_index: u8, +) -> Result { + let nullifiers = nullifiers + .iter() + .filter(|x| x.queue_index == current_queue_index); + let merkle_tree = if let AcpAccount::BatchedStateTree(tree) = merkle_tree { + tree + } else { + panic!("Invalid account"); + }; + // 3. Check queue and Merkle tree are associated. + output_queue + .check_is_associated(merkle_tree.pubkey()) + .map_err(ProgramError::from)?; + + let mut num_elements = 0; + + for nullifier in nullifiers { + num_elements += 1; + #[cfg(feature = "bench-sbf")] + light_heap::bench_sbf_start!("acp_insert_nf_into_queue_v2"); + // 4. check for every account whether the value is still in the queue and zero it out. + // If checked fail if the value is not in the queue. + let proof_index = if nullifier.prove_by_index == 1 { + true + } else if nullifier.prove_by_index == 0 { + false + } else { + panic!("invalid value"); + }; + let leaf_index = nullifier.leaf_index.into(); + output_queue + .prove_inclusion_by_index_and_zero_out_leaf( + leaf_index, + &nullifier.account_hash, + proof_index, + ) + .map_err(ProgramError::from)?; + + // 5. Insert the nullifiers into the current input queue batch. + merkle_tree + .insert_nullifier_into_current_batch(&nullifier.account_hash, leaf_index, &tx_hash) + .map_err(ProgramError::from)?; + #[cfg(feature = "bench-sbf")] + light_heap::bench_sbf_end!("acp_insert_nf_into_queue_v2"); + } + msg!("v2 num_elements {:?}", num_elements); + Ok(num_elements) +} + +fn refactored_process_nullifier_v1<'a, 'info>( + merkle_tree: &mut AcpAccount<'a, 'info>, + nullifier_queue: &mut AccountInfo<'info>, + nullifiers: &[InsertNullifierInput], + current_queue_index: u8, +) -> Result { + let nullifiers = nullifiers + .iter() + .filter(|x| x.queue_index == current_queue_index); + let (merkle_pubkey, merkle_tree) = if let AcpAccount::StateTree(tree) = merkle_tree { + tree + } else { + panic!("Invalid account"); + }; + msg!("refactored_process_nullifier_v1"); + { + let queue_data = nullifier_queue + .try_borrow_data() + .map_err(ProgramError::from)?; + msg!("data len {:?}", queue_data.len()); + msg!("discriminator {:?}", queue_data[0..32].to_vec()); + msg!("queue pubkey {:?}", nullifier_queue.key()); + let queue = bytemuck::from_bytes::(&queue_data[8..QueueAccount::LEN]); + // 3. Check queue and Merkle tree are associated. + if queue.metadata.associated_merkle_tree != (*merkle_pubkey).into() { + msg!( + "Queue account {:?} is not associated with Merkle tree {:?}", + nullifier_queue.key(), + *merkle_pubkey + ); + return err!(AccountCompressionErrorCode::MerkleTreeAndQueueNotAssociated); + } + } + let mut num_elements = 0; + // 5. Insert the nullifiers into the queues hash set. + msg!("refactored_process_nullifier_v1 2"); + + let sequence_number = { + // let merkle_tree = merkle_tree.try_borrow_data()?; + // let merkle_tree = state_merkle_tree_from_bytes_zero_copy(&merkle_tree)?; + merkle_tree.sequence_number() + }; + let mut queue = nullifier_queue.try_borrow_mut_data()?; + let mut queue = unsafe { queue_from_bytes_zero_copy_mut(&mut queue).unwrap() }; + #[cfg(feature = "bench-sbf")] + light_heap::bench_sbf_end!("acp_prep_insertion"); + #[cfg(feature = "bench-sbf")] + light_heap::bench_sbf_start!("acp_insert_nf_into_queue"); + for nullifier in nullifiers { + if nullifier.prove_by_index == 1 { + return Err(AccountCompressionErrorCode::V1AccountMarkedAsProofByIndex.into()); + } + num_elements += 1; + let element = BigUint::from_bytes_be(nullifier.account_hash.as_slice()); + queue + .insert(&element, sequence_number) + .map_err(ProgramError::from)?; + } + #[cfg(feature = "bench-sbf")] + light_heap::bench_sbf_end!("acp_insert_nf_into_queue"); + msg!("v1 num_elements {:?}", num_elements); + + Ok(num_elements) +} diff --git a/programs/account-compression/src/processor/mod.rs b/programs/account-compression/src/processor/mod.rs index 4322b3adb5..c2df951537 100644 --- a/programs/account-compression/src/processor/mod.rs +++ b/programs/account-compression/src/processor/mod.rs @@ -2,3 +2,7 @@ pub mod initialize_address_merkle_tree; pub mod initialize_address_queue; pub mod initialize_concurrent_merkle_tree; pub mod initialize_nullifier_queue; +pub mod insert_addresses; +pub mod insert_into_queues; +pub mod insert_leaves; +pub mod insert_nullifiers; diff --git a/programs/account-compression/src/state/queue.rs b/programs/account-compression/src/state/queue.rs index b4412aeac2..993bf115d8 100644 --- a/programs/account-compression/src/state/queue.rs +++ b/programs/account-compression/src/state/queue.rs @@ -9,10 +9,7 @@ use light_merkle_tree_metadata::{ rollover::RolloverMetadata, }; -use crate::{ - utils::check_signer_is_registered_or_authority::{GroupAccess, GroupAccounts}, - InsertIntoQueues, RegisteredProgram, -}; +use crate::utils::check_signer_is_registered_or_authority::GroupAccess; #[account(zero_copy)] #[derive(AnchorDeserialize, Debug, PartialEq)] @@ -48,15 +45,6 @@ impl GroupAccess for QueueAccount { } } -impl<'info> GroupAccounts<'info> for InsertIntoQueues<'info> { - fn get_authority(&self) -> &Signer<'info> { - &self.authority - } - fn get_registered_program_pda(&self) -> &Option> { - &self.registered_program_pda - } -} - impl QueueAccount { pub fn size(capacity: usize) -> Result { Ok(8 + mem::size_of::() + HashSet::size_in_account(capacity)) diff --git a/programs/compressed-token/src/burn.rs b/programs/compressed-token/src/burn.rs index 763f5960c8..331cf44a5c 100644 --- a/programs/compressed-token/src/burn.rs +++ b/programs/compressed-token/src/burn.rs @@ -1,7 +1,7 @@ use anchor_lang::prelude::*; use anchor_spl::token::TokenAccount; use light_system_program::{ - invoke::processor::CompressedProof, + processor::processor::CompressedProof, sdk::{compressed_account::PackedCompressedAccountWithMerkleContext, CompressedCpiContext}, OutputCompressedAccountWithPackedContext, }; @@ -190,7 +190,7 @@ pub mod sdk { use anchor_lang::{AnchorSerialize, InstructionData, ToAccountMetas}; use light_system_program::{ - invoke::processor::CompressedProof, + processor::processor::CompressedProof, sdk::compressed_account::{CompressedAccount, MerkleContext}, }; use solana_sdk::{instruction::Instruction, pubkey::Pubkey}; diff --git a/programs/compressed-token/src/delegation.rs b/programs/compressed-token/src/delegation.rs index 57541c7e67..9acaad2f66 100644 --- a/programs/compressed-token/src/delegation.rs +++ b/programs/compressed-token/src/delegation.rs @@ -1,6 +1,6 @@ use anchor_lang::prelude::*; use light_system_program::{ - invoke::processor::CompressedProof, + processor::processor::CompressedProof, sdk::{compressed_account::PackedCompressedAccountWithMerkleContext, CompressedCpiContext}, OutputCompressedAccountWithPackedContext, }; @@ -263,7 +263,7 @@ pub mod sdk { use anchor_lang::{AnchorSerialize, InstructionData, ToAccountMetas}; use light_system_program::{ - invoke::processor::CompressedProof, + processor::processor::CompressedProof, sdk::compressed_account::{CompressedAccount, MerkleContext}, }; use solana_sdk::{instruction::Instruction, pubkey::Pubkey}; diff --git a/programs/compressed-token/src/freeze.rs b/programs/compressed-token/src/freeze.rs index 572e90beaa..5ce17b81f0 100644 --- a/programs/compressed-token/src/freeze.rs +++ b/programs/compressed-token/src/freeze.rs @@ -1,7 +1,7 @@ use anchor_lang::prelude::*; use light_hasher::{DataHasher, Poseidon}; use light_system_program::{ - invoke::processor::CompressedProof, + processor::processor::CompressedProof, sdk::{ compressed_account::{ CompressedAccount, CompressedAccountData, PackedCompressedAccountWithMerkleContext, @@ -200,7 +200,7 @@ pub mod sdk { use anchor_lang::{AnchorSerialize, InstructionData, ToAccountMetas}; use light_system_program::{ - invoke::processor::CompressedProof, + processor::processor::CompressedProof, sdk::compressed_account::{CompressedAccount, MerkleContext}, }; use solana_sdk::{instruction::Instruction, pubkey::Pubkey}; diff --git a/programs/compressed-token/src/process_transfer.rs b/programs/compressed-token/src/process_transfer.rs index 82c16ed136..94871b65bd 100644 --- a/programs/compressed-token/src/process_transfer.rs +++ b/programs/compressed-token/src/process_transfer.rs @@ -3,7 +3,7 @@ use anchor_lang::{prelude::*, solana_program::program_error::ProgramError, Ancho use light_hasher::Poseidon; use light_heap::{bench_sbf_end, bench_sbf_start}; use light_system_program::{ - invoke::processor::CompressedProof, + processor::processor::CompressedProof, sdk::{ accounts::{InvokeAccounts, SignerAccounts}, compressed_account::{ @@ -595,7 +595,7 @@ pub mod transfer_sdk { use anchor_lang::{error_code, AnchorSerialize, Id, InstructionData, ToAccountMetas}; use anchor_spl::{token::Token, token_2022::Token2022}; use light_system_program::{ - invoke::processor::CompressedProof, + processor::processor::CompressedProof, sdk::compressed_account::{CompressedAccount, MerkleContext, PackedMerkleContext}, }; use solana_sdk::{ diff --git a/programs/system/Cargo.toml b/programs/system/Cargo.toml index 838a2af9d0..152bc63f92 100644 --- a/programs/system/Cargo.toml +++ b/programs/system/Cargo.toml @@ -15,10 +15,11 @@ no-log-ix-name = [] cpi = ["no-entrypoint"] custom-heap = ["light-heap"] mem-profiling = [] -default = ["custom-heap", "idl-build"] +default = ["debug","idl-build"] test-sbf = [] -bench-sbf = [] +bench-sbf = ["custom-heap"] idl-build = ["anchor-lang/idl-build"] +debug = [] [dependencies] @@ -36,6 +37,7 @@ solana-security-txt = "1.1.0" light-batched-merkle-tree = {workspace = true, features = ["solana"] } light-zero-copy = {workspace = true, features = ["solana"] } zerocopy = {workspace = true} +light-merkle-tree-metadata = {workspace = true} [target.'cfg(not(target_os = "solana"))'.dependencies] solana-sdk = { workspace = true } diff --git a/programs/system/src/check_accounts.rs b/programs/system/src/check_accounts.rs new file mode 100644 index 0000000000..d10713c7f3 --- /dev/null +++ b/programs/system/src/check_accounts.rs @@ -0,0 +1,167 @@ +use account_compression::{ + address_merkle_tree_from_bytes_zero_copy_mut, context::AcpAccount, + state_merkle_tree_from_bytes_zero_copy_mut, AddressMerkleTreeAccount, QueueAccount, + StateMerkleTreeAccount, +}; +use anchor_lang::{ + prelude::{AccountInfo, AccountLoader}, + solana_program::msg, + Discriminator as AnchorDiscriminator, Key, ToAccountInfo, +}; +use light_batched_merkle_tree::{ + merkle_tree::BatchedMerkleTreeAccount, queue::BatchedQueueAccount, +}; +use light_hasher::Discriminator; +use light_merkle_tree_metadata::merkle_tree::TreeType; +use light_utils::{hash_to_bn254_field_size_be, pubkey::Pubkey}; + +use crate::{ + context::{MerkleTreeContext, SystemContext}, + errors::SystemProgramError, +}; + +pub(crate) fn try_from_account_infos<'a, 'info>( + account_infos: &'info [AccountInfo<'info>], + context: &mut SystemContext<'info>, +) -> std::result::Result>, SystemProgramError> { + let mut accounts = Vec::with_capacity(account_infos.len()); + for (index, account_info) in account_infos.iter().enumerate() { + let account = try_from_account_info(account_info, context, index as u8)?; + accounts.push(account); + } + Ok(accounts) +} + +#[inline(always)] +pub(crate) fn try_from_account_info<'a, 'info>( + account_info: &'info AccountInfo<'info>, + context: &mut SystemContext<'info>, + index: u8, +) -> std::result::Result, SystemProgramError> { + if account_compression::ID != *account_info.owner { + msg!("Invalid owner {:?}", account_info.owner); + return Err(SystemProgramError::InvalidAccount); + } + let discriminator = account_info + .try_borrow_data() + .map_err(|_| SystemProgramError::InvalidAccount)?[..8] + .try_into() + .unwrap(); + + let (account, program_owner) = match discriminator { + BatchedMerkleTreeAccount::DISCRIMINATOR => { + let mut tree_type = [0u8; 8]; + tree_type.copy_from_slice( + &account_info + .try_borrow_data() + .map_err(|_| SystemProgramError::InvalidAccount)?[8..16], + ); + let tree_type = TreeType::from(u64::from_le_bytes(tree_type)); + match tree_type { + TreeType::BatchedAddress => { + let tree = + BatchedMerkleTreeAccount::address_from_account_info(account_info).unwrap(); + let program_owner = tree.metadata.access_metadata.program_owner; + context.set_address_fee(tree.metadata.rollover_metadata.network_fee, index); + + Ok((AcpAccount::BatchedAddressTree(tree), program_owner)) + } + TreeType::BatchedState => { + let tree = + BatchedMerkleTreeAccount::state_from_account_info(account_info).unwrap(); + let program_owner = tree.metadata.access_metadata.program_owner; + context.set_network_fee(tree.metadata.rollover_metadata.network_fee, index); + Ok((AcpAccount::BatchedStateTree(tree), program_owner)) + } + _ => Err(SystemProgramError::InvalidAccount), + } + } + BatchedQueueAccount::DISCRIMINATOR => { + let queue = BatchedQueueAccount::output_from_account_info(account_info).unwrap(); + let program_owner = queue.metadata.access_metadata.program_owner; + context.set_network_fee(queue.metadata.rollover_metadata.network_fee, index); + Ok((AcpAccount::OutputQueue(queue), program_owner)) + } + StateMerkleTreeAccount::DISCRIMINATOR => { + let program_owner = { + let merkle_tree = + AccountLoader::::try_from(&account_info).unwrap(); + let merkle_tree = merkle_tree.load().unwrap(); + context.set_network_fee(merkle_tree.metadata.rollover_metadata.network_fee, index); + context.legacy_merkle_context.push(( + index, + (MerkleTreeContext { + rollover_fee: merkle_tree.metadata.rollover_metadata.rollover_fee, + hashed_pubkey: hash_to_bn254_field_size_be(&account_info.key().to_bytes()) + .unwrap() + .0, + }), + )); + + merkle_tree.metadata.access_metadata.program_owner + }; + let mut merkle_tree = account_info + .try_borrow_mut_data() + .map_err(|_| SystemProgramError::InvalidAccount)?; + let data_slice: &'info mut [u8] = unsafe { + std::slice::from_raw_parts_mut(merkle_tree.as_mut_ptr(), merkle_tree.len()) + }; + Ok(( + AcpAccount::StateTree(( + account_info.key(), + state_merkle_tree_from_bytes_zero_copy_mut(data_slice).unwrap(), + )), + program_owner, + )) + } + AddressMerkleTreeAccount::DISCRIMINATOR => { + let program_owner = { + let merkle_tree = + AccountLoader::::try_from(&account_info).unwrap(); + let merkle_tree = merkle_tree.load().unwrap(); + context.set_address_fee(merkle_tree.metadata.rollover_metadata.network_fee, index); + context.legacy_merkle_context.push(( + index, + MerkleTreeContext { + rollover_fee: merkle_tree.metadata.rollover_metadata.rollover_fee, + hashed_pubkey: [0u8; 32], // not used for address trees + }, + )); + merkle_tree.metadata.access_metadata.program_owner + }; + let mut merkle_tree = account_info + .try_borrow_mut_data() + .map_err(|_| SystemProgramError::InvalidAccount)?; + let data_slice: &'info mut [u8] = unsafe { + std::slice::from_raw_parts_mut(merkle_tree.as_mut_ptr(), merkle_tree.len()) + }; + Ok(( + AcpAccount::AddressTree(( + account_info.key(), + address_merkle_tree_from_bytes_zero_copy_mut(data_slice).unwrap(), + )), + program_owner, + )) + } + QueueAccount::DISCRIMINATOR => Ok(( + AcpAccount::V1Queue(account_info.to_account_info()), + Pubkey::default(), + )), + _ => panic!("invalid account"), + }?; + + if program_owner != Pubkey::default().into() { + if let Some(invoking_program) = context.invoking_program_id { + if invoking_program != program_owner.into() { + msg!( + "invoking_program.key() {:?} == merkle_tree_unpacked.program_owner {:?}", + invoking_program, + program_owner + ); + return Err(SystemProgramError::InvalidMerkleTreeOwner); + } + } + } + + Ok(account) +} diff --git a/programs/system/src/compressed_account.rs b/programs/system/src/compressed_account.rs new file mode 100644 index 0000000000..472883e56d --- /dev/null +++ b/programs/system/src/compressed_account.rs @@ -0,0 +1,281 @@ +use anchor_lang::prelude::*; +use light_hasher::Hasher; +use light_utils::hash_to_bn254_field_size_be; + +use crate::instruction_data::ZCompressedAccount; + +/// Hashing scheme: +/// H(owner || leaf_index || merkle_tree_pubkey || lamports || address || data.discriminator || data.data_hash) +impl ZCompressedAccount<'_> { + pub fn hash_with_hashed_values( + &self, + &owner_hashed: &[u8; 32], + &merkle_tree_hashed: &[u8; 32], + leaf_index: &u32, + ) -> Result<[u8; 32]> { + let capacity = 3 + + std::cmp::min(u64::from(self.lamports), 1) as usize + + self.address.is_some() as usize + + self.data.is_some() as usize * 2; + let mut vec: Vec<&[u8]> = Vec::with_capacity(capacity); + vec.push(owner_hashed.as_slice()); + + // leaf index and merkle tree pubkey are used to make every compressed account hash unique + let leaf_index = leaf_index.to_le_bytes(); + vec.push(leaf_index.as_slice()); + + vec.push(merkle_tree_hashed.as_slice()); + + // Lamports are only hashed if non-zero to safe CU + // For safety we prefix the lamports with 1 in 1 byte. + // Thus even if the discriminator has the same value as the lamports, the hash will be different. + let mut lamports_bytes = [1, 0, 0, 0, 0, 0, 0, 0, 0]; + if self.lamports != 0 { + lamports_bytes[1..].copy_from_slice(&(u64::from(self.lamports)).to_le_bytes()); + vec.push(lamports_bytes.as_slice()); + } + + if self.address.is_some() { + vec.push(self.address.as_ref().unwrap().as_slice()); + } + + let mut discriminator_bytes = [2, 0, 0, 0, 0, 0, 0, 0, 0]; + if let Some(data) = &self.data { + discriminator_bytes[1..].copy_from_slice(data.discriminator.as_slice()); + vec.push(&discriminator_bytes); + vec.push(data.data_hash.as_slice()); + } + let hash = H::hashv(&vec).map_err(ProgramError::from)?; + Ok(hash) + } + + pub fn hash( + &self, + &merkle_tree_pubkey: &Pubkey, + leaf_index: &u32, + ) -> Result<[u8; 32]> { + self.hash_with_hashed_values::( + &hash_to_bn254_field_size_be(&self.owner.to_bytes()) + .unwrap() + .0, + &hash_to_bn254_field_size_be(&merkle_tree_pubkey.to_bytes()) + .unwrap() + .0, + leaf_index, + ) + } +} + +#[cfg(test)] +mod tests { + use light_hasher::Poseidon; + use solana_sdk::signature::{Keypair, Signer}; + + // TODO: remove sdk + // TODO: replace with imports from actual sdk. + use crate::sdk::compressed_account::{CompressedAccount, CompressedAccountData}; + + use super::*; + /// Tests: + /// 1. functional with all inputs set + /// 2. no data + /// 3. no address + /// 4. no address and no lamports + /// 5. no address and no data + /// 6. no address, no data, no lamports + #[test] + fn test_compressed_account_hash() { + let owner = Keypair::new().pubkey(); + let address = [1u8; 32]; + let data = CompressedAccountData { + discriminator: [1u8; 8], + data: vec![2u8; 32], + data_hash: [3u8; 32], + }; + let lamports = 100; + let compressed_account = CompressedAccount { + owner, + lamports, + address: Some(address), + data: Some(data.clone()), + }; + let merkle_tree_pubkey = Keypair::new().pubkey(); + let leaf_index = 1; + let hash = compressed_account + .hash::(&merkle_tree_pubkey, &leaf_index) + .unwrap(); + let hash_manual = Poseidon::hashv(&[ + hash_to_bn254_field_size_be(&owner.to_bytes()) + .unwrap() + .0 + .as_slice(), + leaf_index.to_le_bytes().as_slice(), + hash_to_bn254_field_size_be(&merkle_tree_pubkey.to_bytes()) + .unwrap() + .0 + .as_slice(), + [&[1u8], lamports.to_le_bytes().as_slice()] + .concat() + .as_slice(), + address.as_slice(), + [&[2u8], data.discriminator.as_slice()].concat().as_slice(), + &data.data_hash, + ]) + .unwrap(); + assert_eq!(hash, hash_manual); + assert_eq!(hash.len(), 32); + + // no data + let compressed_account = CompressedAccount { + owner, + lamports, + address: Some(address), + data: None, + }; + let no_data_hash = compressed_account + .hash::(&merkle_tree_pubkey, &leaf_index) + .unwrap(); + + let hash_manual = Poseidon::hashv(&[ + hash_to_bn254_field_size_be(&owner.to_bytes()) + .unwrap() + .0 + .as_slice(), + leaf_index.to_le_bytes().as_slice(), + hash_to_bn254_field_size_be(&merkle_tree_pubkey.to_bytes()) + .unwrap() + .0 + .as_slice(), + [&[1u8], lamports.to_le_bytes().as_slice()] + .concat() + .as_slice(), + address.as_slice(), + ]) + .unwrap(); + assert_eq!(no_data_hash, hash_manual); + assert_ne!(hash, no_data_hash); + + // no address + let compressed_account = CompressedAccount { + owner, + lamports, + address: None, + data: Some(data.clone()), + }; + let no_address_hash = compressed_account + .hash::(&merkle_tree_pubkey, &leaf_index) + .unwrap(); + let hash_manual = Poseidon::hashv(&[ + hash_to_bn254_field_size_be(&owner.to_bytes()) + .unwrap() + .0 + .as_slice(), + leaf_index.to_le_bytes().as_slice(), + hash_to_bn254_field_size_be(&merkle_tree_pubkey.to_bytes()) + .unwrap() + .0 + .as_slice(), + [&[1u8], lamports.to_le_bytes().as_slice()] + .concat() + .as_slice(), + [&[2u8], data.discriminator.as_slice()].concat().as_slice(), + &data.data_hash, + ]) + .unwrap(); + assert_eq!(no_address_hash, hash_manual); + assert_ne!(hash, no_address_hash); + assert_ne!(no_data_hash, no_address_hash); + + // no address no lamports + let compressed_account = CompressedAccount { + owner, + lamports: 0, + address: None, + data: Some(data.clone()), + }; + let no_address_no_lamports_hash = compressed_account + .hash::(&merkle_tree_pubkey, &leaf_index) + .unwrap(); + let hash_manual = Poseidon::hashv(&[ + hash_to_bn254_field_size_be(&owner.to_bytes()) + .unwrap() + .0 + .as_slice(), + leaf_index.to_le_bytes().as_slice(), + hash_to_bn254_field_size_be(&merkle_tree_pubkey.to_bytes()) + .unwrap() + .0 + .as_slice(), + [&[2u8], data.discriminator.as_slice()].concat().as_slice(), + &data.data_hash, + ]) + .unwrap(); + assert_eq!(no_address_no_lamports_hash, hash_manual); + assert_ne!(hash, no_address_no_lamports_hash); + assert_ne!(no_data_hash, no_address_no_lamports_hash); + assert_ne!(no_address_hash, no_address_no_lamports_hash); + + // no address and no data + let compressed_account = CompressedAccount { + owner, + lamports, + address: None, + data: None, + }; + let no_address_no_data_hash = compressed_account + .hash::(&merkle_tree_pubkey, &leaf_index) + .unwrap(); + let hash_manual = Poseidon::hashv(&[ + hash_to_bn254_field_size_be(&owner.to_bytes()) + .unwrap() + .0 + .as_slice(), + leaf_index.to_le_bytes().as_slice(), + hash_to_bn254_field_size_be(&merkle_tree_pubkey.to_bytes()) + .unwrap() + .0 + .as_slice(), + [&[1u8], lamports.to_le_bytes().as_slice()] + .concat() + .as_slice(), + ]) + .unwrap(); + assert_eq!(no_address_no_data_hash, hash_manual); + assert_ne!(hash, no_address_no_data_hash); + assert_ne!(no_data_hash, no_address_no_data_hash); + assert_ne!(no_address_hash, no_address_no_data_hash); + assert_ne!(no_address_no_lamports_hash, no_address_no_data_hash); + + // no address, no data, no lamports + let compressed_account = CompressedAccount { + owner, + lamports: 0, + address: None, + data: None, + }; + let no_address_no_data_no_lamports_hash = compressed_account + .hash::(&merkle_tree_pubkey, &leaf_index) + .unwrap(); + let hash_manual = Poseidon::hashv(&[ + hash_to_bn254_field_size_be(&owner.to_bytes()) + .unwrap() + .0 + .as_slice(), + leaf_index.to_le_bytes().as_slice(), + hash_to_bn254_field_size_be(&merkle_tree_pubkey.to_bytes()) + .unwrap() + .0 + .as_slice(), + ]) + .unwrap(); + assert_eq!(no_address_no_data_no_lamports_hash, hash_manual); + assert_ne!(no_address_no_data_hash, no_address_no_data_no_lamports_hash); + assert_ne!(hash, no_address_no_data_no_lamports_hash); + assert_ne!(no_data_hash, no_address_no_data_no_lamports_hash); + assert_ne!(no_address_hash, no_address_no_data_no_lamports_hash); + assert_ne!( + no_address_no_lamports_hash, + no_address_no_data_no_lamports_hash + ); + } +} diff --git a/programs/system/src/context.rs b/programs/system/src/context.rs new file mode 100644 index 0000000000..e5ab6db3c9 --- /dev/null +++ b/programs/system/src/context.rs @@ -0,0 +1,125 @@ +use account_compression::utils::transfer_lamports::transfer_lamports_cpi; +use anchor_lang::prelude::*; +use anchor_lang::Result; +use light_utils::hash_to_bn254_field_size_be; + +// TODO: +// 1. only one iteration per inputs, addresses, read-only, and outputs. +// -> do all the checks in one place and collect data in bytes for cpi. +pub struct SystemContext<'info> { + pub account_indices: Vec, + pub accounts: Vec, + // Would be better to store references. + pub account_infos: Vec>, + // TODO: switch to store account indices once we have new context. + // TODO: switch to (u8, [u8; 32]) + pub hashed_pubkeys: Vec<(Pubkey, [u8; 32])>, + // Addresses for deduplication. + // Try to find a way without storing the addresses. + pub addresses: Vec>, + // Index of account and fee to be paid. + pub rollover_fee_payments: Vec<(u8, u64)>, + pub address_fee_is_set: bool, + pub network_fee_is_set: bool, + pub legacy_merkle_context: Vec<(u8, MerkleTreeContext)>, + pub invoking_program_id: Option, +} + +/// Helper for legacy trees. +pub struct MerkleTreeContext { + pub rollover_fee: u64, + pub hashed_pubkey: [u8; 32], +} + +impl SystemContext<'_> { + pub fn set_address_fee(&mut self, fee: u64, index: u8) { + if !self.address_fee_is_set { + self.address_fee_is_set = true; + self.rollover_fee_payments.push((index, fee)); + } + } + + pub fn set_network_fee(&mut self, fee: u64, index: u8) { + if !self.network_fee_is_set { + self.network_fee_is_set = true; + self.rollover_fee_payments.push((index, fee)); + } + } + + pub fn get_or_hash_pubkey(&mut self, pubkey: Pubkey) -> [u8; 32] { + let hashed_pubkey = self + .hashed_pubkeys + .iter() + .find(|a| a.0 == pubkey) + .map(|a| a.1); + match hashed_pubkey { + Some(hashed_pubkey) => hashed_pubkey, + None => { + let hashed_pubkey = hash_to_bn254_field_size_be(&pubkey.to_bytes()).unwrap().0; + self.hashed_pubkeys.push((pubkey, hashed_pubkey)); + hashed_pubkey + } + } + } +} + +impl<'info> SystemContext<'info> { + pub fn get_index_or_insert( + &mut self, + ix_data_index: u8, + remaining_accounts: &[AccountInfo<'info>], + ) -> u8 { + let queue_index = self + .account_indices + .iter() + .position(|a| *a == ix_data_index); + let queue_index = match queue_index { + Some(index) => index as u8, + None => { + self.account_indices.push(ix_data_index); + let account_info = &remaining_accounts[ix_data_index as usize]; + self.accounts.push(AccountMeta { + pubkey: account_info.key(), + is_signer: false, + is_writable: true, + }); + self.account_infos.push(account_info.clone()); + self.account_indices.len() as u8 - 1 + } + }; + queue_index + } + + pub fn set_rollover_fee(&mut self, ix_data_index: u8, fee: u64) { + let payment = self + .rollover_fee_payments + .iter_mut() + .find(|a| a.0 == ix_data_index); + match payment { + Some(payment) => payment.1 += fee, + None => self.rollover_fee_payments.push((ix_data_index, fee)), + }; + } + + /// Network fee distribution: + /// - if any account is created or modified -> transfer network fee (5000 lamports) + /// (Previously we didn't charge for appends now we have to since values go into a queue.) + /// - if an address is created -> transfer an additional network fee (5000 lamports) + /// + /// Examples: + /// 1. create account with address network fee 10,000 lamports + /// 2. token transfer network fee 5,000 lamports + /// 3. mint token network fee 5,000 lamports + /// Transfers rollover and network fees. + pub fn transfer_fees( + &self, + accounts: &[AccountInfo<'info>], + fee_payer: &AccountInfo<'info>, + ) -> Result<()> { + // TODO: if len is 1 don't do a cpi mutate lamports. + for (i, fee) in self.rollover_fee_payments.iter() { + transfer_lamports_cpi(fee_payer, &accounts[*i as usize], *fee)?; + } + Ok(()) + } +} diff --git a/programs/system/src/errors.rs b/programs/system/src/errors.rs index abbaaa0035..45658da80d 100644 --- a/programs/system/src/errors.rs +++ b/programs/system/src/errors.rs @@ -74,4 +74,5 @@ pub enum SystemProgramError { InvalidAddressTreeHeight, InvalidStateTreeHeight, InvalidArgument, + InvalidAccount, } diff --git a/programs/system/src/instruction_data.rs b/programs/system/src/instruction_data.rs index 3f992b6c32..0e6b376c0d 100644 --- a/programs/system/src/instruction_data.rs +++ b/programs/system/src/instruction_data.rs @@ -1,4 +1,3 @@ -use anchor_lang::solana_program::log::sol_log_compute_units; use light_utils::pubkey::Pubkey; use light_verifier::CompressedProof; use light_zero_copy::{borsh::Deserialize, errors::ZeroCopyError, slice::ZeroCopySliceBorsh}; @@ -10,7 +9,10 @@ use zerocopy::{ use crate::{ sdk::{ - compressed_account::{CompressedAccount, CompressedAccountData}, + compressed_account::{ + CompressedAccount, CompressedAccountData, PackedCompressedAccountWithMerkleContext, + PackedMerkleContext, + }, CompressedCpiContext, }, OutputCompressedAccountWithPackedContext, @@ -74,24 +76,24 @@ impl ZPackedMerkleContext { impl<'a> Deserialize<'a> for ZPackedMerkleContext { type Output = Ref<&'a [u8], Self>; - fn deserialize_at(bytes: &'a [u8]) -> Result<(Self::Output, &[u8]), ZeroCopyError> { + fn deserialize_at(bytes: &'a [u8]) -> Result<(Self::Output, &'a [u8]), ZeroCopyError> { Ok(Ref::<&[u8], Self>::from_prefix(bytes)?) } } #[repr(C)] -#[derive(Debug, PartialEq)] +#[derive(Debug, PartialEq, Clone)] pub struct ZOutputCompressedAccountWithPackedContext<'a> { pub compressed_account: ZCompressedAccount<'a>, pub merkle_tree_index: u8, } -impl<'a> From> +impl<'a> From<&ZOutputCompressedAccountWithPackedContext<'a>> for OutputCompressedAccountWithPackedContext { - fn from(output_compressed_account: ZOutputCompressedAccountWithPackedContext<'a>) -> Self { + fn from(output_compressed_account: &ZOutputCompressedAccountWithPackedContext<'a>) -> Self { OutputCompressedAccountWithPackedContext { - compressed_account: output_compressed_account.compressed_account.into(), + compressed_account: (&output_compressed_account.compressed_account).into(), merkle_tree_index: output_compressed_account.merkle_tree_index, } } @@ -114,7 +116,7 @@ impl<'a> Deserialize<'a> for ZOutputCompressedAccountWithPackedContext<'a> { } } -#[derive(Debug, PartialEq)] +#[derive(Debug, PartialEq, Clone)] pub struct ZCompressedAccountData<'a> { pub discriminator: Ref<&'a [u8], [u8; 8]>, pub data: &'a [u8], @@ -163,7 +165,7 @@ pub struct AccountDesMeta { address_option: u8, } -#[derive(Debug, PartialEq)] +#[derive(Debug, PartialEq, Clone)] pub struct ZCompressedAccount<'a> { meta: Ref<&'a [u8], AccountDesMeta>, pub address: Option>, @@ -178,8 +180,8 @@ impl Deref for ZCompressedAccount<'_> { } } -impl<'a> From> for CompressedAccount { - fn from(compressed_account: ZCompressedAccount) -> Self { +impl<'a> From<&ZCompressedAccount<'a>> for CompressedAccount { + fn from(compressed_account: &ZCompressedAccount) -> Self { let data: Option = if let Some(data) = compressed_account.data.as_ref() { Some(CompressedAccountData { @@ -199,7 +201,6 @@ impl<'a> From> for CompressedAccount { } } -use anchor_lang::solana_program::msg; impl<'a> Deserialize<'a> for ZCompressedAccount<'a> { type Output = Self; @@ -236,14 +237,34 @@ pub struct ZPackedCompressedAccountWithMerkleContextMeta { read_only: u8, } -#[derive(Debug, PartialEq)] +impl From for PackedMerkleContext { + fn from(merkle_context: ZPackedMerkleContext) -> Self { + PackedMerkleContext { + merkle_tree_pubkey_index: merkle_context.merkle_tree_pubkey_index, + nullifier_queue_pubkey_index: merkle_context.nullifier_queue_pubkey_index, + leaf_index: merkle_context.leaf_index.into(), + prove_by_index: merkle_context.prove_by_index == 1, + } + } +} + +#[derive(Debug, PartialEq, Clone)] pub struct ZPackedCompressedAccountWithMerkleContext<'a> { pub compressed_account: ZCompressedAccount<'a>, - meta: Ref<&'a [u8], ZPackedCompressedAccountWithMerkleContextMeta>, // pub merkle_context: Ref<&'a [u8], ZPackedMerkleContext>, - // /// Index of root used in inclusion validity proof. - // pub root_index: Ref<&'a [u8], U16>, - // /// Placeholder to mark accounts read-only unimplemented set to false. - // pub read_only: bool, + meta: Ref<&'a [u8], ZPackedCompressedAccountWithMerkleContextMeta>, +} + +impl From<&ZPackedCompressedAccountWithMerkleContext<'_>> + for PackedCompressedAccountWithMerkleContext +{ + fn from(packed_compressed_account: &ZPackedCompressedAccountWithMerkleContext<'_>) -> Self { + PackedCompressedAccountWithMerkleContext { + compressed_account: (&packed_compressed_account.compressed_account).into(), + merkle_context: packed_compressed_account.merkle_context.into(), + root_index: packed_compressed_account.root_index.into(), + read_only: packed_compressed_account.read_only == 1, + } + } } impl Deref for ZPackedCompressedAccountWithMerkleContext<'_> { @@ -258,13 +279,10 @@ impl<'a> Deserialize<'a> for ZPackedCompressedAccountWithMerkleContext<'a> { type Output = Self; fn deserialize_at(bytes: &'a [u8]) -> Result<(Self, &'a [u8]), ZeroCopyError> { let (compressed_account, bytes) = ZCompressedAccount::deserialize_at(bytes)?; - // let (merkle_context, bytes) = ZPackedMerkleContext::deserialize_at(bytes)?; - // let (root_index, bytes) = Ref::<&[u8], U16>::from_prefix(bytes)?; - // let (read_only, bytes) = u8::deserialize_at(bytes)?; let (meta, bytes) = Ref::<&[u8], ZPackedCompressedAccountWithMerkleContextMeta>::from_prefix(bytes)?; if meta.read_only == 1 { - unimplemented!("Read only accounts not implemented"); + unimplemented!("Read only accounts are implemented as a separate instruction."); } Ok(( @@ -379,30 +397,16 @@ impl<'a> From> for ZInstructionDataInvoke<'a> { impl<'a> Deserialize<'a> for ZInstructionDataInvokeCpi<'a> { type Output = Self; fn deserialize_at(bytes: &'a [u8]) -> Result<(Self, &'a [u8]), ZeroCopyError> { - // msg!("proof"); - // sol_log_compute_units(); let (proof, bytes) = Option::::deserialize_at(bytes)?; - // sol_log_compute_units(); - // msg!("address"); - // sol_log_compute_units(); let (new_address_params, bytes) = ZeroCopySliceBorsh::from_bytes_at(bytes)?; - // msg!("inputs"); - // sol_log_compute_units(); let (input_compressed_accounts_with_merkle_context, bytes) = Vec::::deserialize_at(bytes)?; - // msg!("outputs"); - // sol_log_compute_units(); let (output_compressed_accounts, bytes) = Vec::::deserialize_at(bytes)?; - // sol_log_compute_units(); - // msg!("relay fee"); - // sol_log_compute_units(); let (option_relay_fee, bytes) = bytes.split_at(1); if option_relay_fee[0] == 1 { unimplemented!(" Relay fee is unimplemented"); } - // sol_log_compute_units(); - let (compress_or_decompress_lamports, bytes) = Option::>::deserialize_at(bytes)?; let (is_compress, bytes) = u8::deserialize_at(bytes)?; @@ -456,19 +460,7 @@ pub struct ZPackedReadOnlyCompressedAccount { impl<'a> Deserialize<'a> for ZPackedReadOnlyCompressedAccount { type Output = Self; fn deserialize_at(_bytes: &'a [u8]) -> Result<(Self, &'a [u8]), ZeroCopyError> { - unimplemented!(""); - - // let (account_hash, bytes) = bytes.split_at(size_of::<[u8; 32]>()); - // // let (merkle_context, bytes) = ZPackedMerkleContext::deserialize_at(bytes)?; - // let (root_index, bytes) = U16::ref_from_prefix(bytes)?; - // Ok(( - // ZPackedReadOnlyCompressedAccount { - // account_hash: account_hash.try_into().unwrap(), - // merkle_context: ZPackedMerkleContext::default(), - // root_index: (*root_index).into(), - // }, - // bytes, - // )) + unimplemented!("Place holder to satisfy trait bounds."); } } @@ -512,7 +504,7 @@ mod test { InstructionDataInvokeCpi, OutputCompressedAccountWithPackedContext, }; use crate::{ - invoke::processor::CompressedProof, InstructionDataInvoke, NewAddressParamsPacked, + processor::processor::CompressedProof, InstructionDataInvoke, NewAddressParamsPacked, }; use anchor_lang::AnchorSerialize; diff --git a/programs/system/src/invoke/address.rs b/programs/system/src/invoke/address.rs deleted file mode 100644 index 778f027ca8..0000000000 --- a/programs/system/src/invoke/address.rs +++ /dev/null @@ -1,186 +0,0 @@ -use account_compression::{ - append_nullify_create_address::AppendNullifyCreateAddressInputs, - errors::AccountCompressionErrorCode, utils::constants::CPI_AUTHORITY_PDA_SEED, - AddressMerkleTreeAccount, -}; -use anchor_lang::{prelude::*, Discriminator}; -use light_batched_merkle_tree::merkle_tree::BatchedMerkleTreeAccount; -use light_hasher::Discriminator as LightDiscriminator; - -use crate::{ - constants::CPI_AUTHORITY_PDA_BUMP, - errors::SystemProgramError, - instruction_data::ZNewAddressParamsPacked, - invoke_cpi::verify_signer::check_program_owner_address_merkle_tree, - sdk::address::{derive_address, derive_address_legacy}, -}; - -use super::cpi_acp::CpiData; - -pub fn derive_new_addresses<'info>( - invoking_program_id: &Option, - new_address_params: &[ZNewAddressParamsPacked], - num_input_compressed_accounts: usize, - remaining_accounts: &'info [AccountInfo<'info>], - invoking_program: &Option, - cpi_data: &mut CpiData<'info>, - cpi_ix_data: &mut AppendNullifyCreateAddressInputs<'_>, -) -> Result> { - let init_len = cpi_data.account_indices.len(); - let invoking_program_id_bytes = invoking_program_id - .as_ref() - .map(|invoking_program_id| invoking_program_id.to_bytes()); - let mut network_fee_bundle = None; - new_address_params - .iter() - .enumerate() - .try_for_each(|(i, new_address_params)| { - let mut discriminator_bytes = [0u8; 8]; - discriminator_bytes.copy_from_slice( - &remaining_accounts[new_address_params.address_merkle_tree_account_index as usize] - .try_borrow_data()?[0..8], - ); - let address = match discriminator_bytes { - AddressMerkleTreeAccount::DISCRIMINATOR => derive_address_legacy( - &remaining_accounts - [new_address_params.address_merkle_tree_account_index as usize] - .key(), - &new_address_params.seed, - ) - .map_err(ProgramError::from)?, - BatchedMerkleTreeAccount::DISCRIMINATOR => { - let invoking_program_id_bytes = - if let Some(bytes) = invoking_program_id_bytes.as_ref() { - Ok(bytes) - } else { - err!(SystemProgramError::DeriveAddressError) - }?; - derive_address( - &new_address_params.seed, - &remaining_accounts - [new_address_params.address_merkle_tree_account_index as usize] - .key() - .to_bytes(), - invoking_program_id_bytes, - ) - } - _ => { - return err!( - AccountCompressionErrorCode::AddressMerkleTreeAccountDiscriminatorMismatch - ) - } - }; - // If at least one new address is created an address network fee is - // paid.The network fee is paid once per transaction, defined in the - // state Merkle tree and transferred to the nullifier queue because the - // nullifier queue is mutable. The network fee field in the queue is not - // used. - let (network_fee, rollover_fee) = check_program_owner_address_merkle_tree( - &remaining_accounts[new_address_params.address_merkle_tree_account_index as usize], - invoking_program, - )?; - // We select the first network fee we find. All Merkle trees are - // initialized with the same network fee. - if network_fee_bundle.is_none() && network_fee.is_some() { - network_fee_bundle = Some(( - new_address_params.address_queue_account_index, - network_fee.unwrap(), - )); - } - // We are inserting addresses into two vectors to avoid unwrapping - // the option in following functions. - cpi_data.addresses[i + num_input_compressed_accounts] = Some(address); - cpi_ix_data.addresses[i].address = address; - // TODO: skip for v2 trees - cpi_ix_data.addresses[i].queue_index = cpi_data.get_index_or_insert( - new_address_params.address_queue_account_index, - &remaining_accounts, - ); - cpi_ix_data.addresses[i].tree_index = cpi_data.get_index_or_insert( - new_address_params.address_merkle_tree_account_index, - &remaining_accounts, - ); - cpi_data.set_rollover_fee(new_address_params.address_queue_account_index, rollover_fee); - Ok(()) - })?; - cpi_ix_data.num_address_appends = (cpi_data.account_indices.len() - init_len) as u8; - - Ok(network_fee_bundle) -} - -// pub fn insert_addresses_into_address_merkle_tree_queue< -// 'a, -// 'b, -// 'c: 'info, -// 'info, -// A: InvokeAccounts<'info> + SignerAccounts<'info> + Bumps, -// >( -// ctx: &'a Context<'a, 'b, 'c, 'info, A>, -// addresses: &'a [[u8; 32]], -// new_address_params: &'a [NewAddressParamsPacked], -// invoking_program: &Option, -// ) -> anchor_lang::Result> { -// let mut remaining_accounts = Vec::::with_capacity(new_address_params.len() * 2); -// let mut network_fee_bundle = None; - -// new_address_params.iter().try_for_each(|params| { -// remaining_accounts -// .push(ctx.remaining_accounts[params.address_queue_account_index as usize].clone()); - -// remaining_accounts.push( -// ctx.remaining_accounts[params.address_merkle_tree_account_index as usize].clone(), -// ); -// // If at least one new address is created an address network fee is -// // paid.The network fee is paid once per transaction, defined in the -// // state Merkle tree and transferred to the nullifier queue because the -// // nullifier queue is mutable. The network fee field in the queue is not -// // used. -// let network_fee = check_program_owner_address_merkle_tree( -// &ctx.remaining_accounts[params.address_merkle_tree_account_index as usize], -// invoking_program, -// )?; -// // We select the first network fee we find. All Merkle trees are -// // initialized with the same network fee. -// if network_fee_bundle.is_none() && network_fee.is_some() { -// network_fee_bundle = Some((params.address_queue_account_index, network_fee.unwrap())); -// } -// anchor_lang::Result::Ok(()) -// })?; - -// insert_addresses_cpi( -// ctx.accounts.get_account_compression_program(), -// &ctx.accounts.get_fee_payer().to_account_info(), -// ctx.accounts.get_account_compression_authority(), -// &ctx.accounts.get_registered_program_pda().to_account_info(), -// &ctx.accounts.get_system_program().to_account_info(), -// remaining_accounts, -// addresses.to_vec(), -// )?; -// Ok(network_fee_bundle) -// } - -#[allow(clippy::too_many_arguments)] -pub fn insert_addresses_cpi<'a, 'b>( - account_compression_program_id: &'b AccountInfo<'a>, - fee_payer: &'b AccountInfo<'a>, - authority: &'b AccountInfo<'a>, - registered_program_pda: &'b AccountInfo<'a>, - system_program: &'b AccountInfo<'a>, - remaining_accounts: Vec>, - addresses: Vec<[u8; 32]>, -) -> Result<()> { - let bump = &[CPI_AUTHORITY_PDA_BUMP]; - let seeds = &[&[CPI_AUTHORITY_PDA_SEED, bump][..]]; - let accounts = account_compression::cpi::accounts::InsertIntoQueues { - fee_payer: fee_payer.to_account_info(), - authority: authority.to_account_info(), - registered_program_pda: Some(registered_program_pda.to_account_info()), - system_program: system_program.to_account_info(), - }; - - let mut cpi_ctx = - CpiContext::new_with_signer(account_compression_program_id.clone(), accounts, seeds); - cpi_ctx.remaining_accounts.extend(remaining_accounts); - - account_compression::cpi::insert_addresses(cpi_ctx, addresses) -} diff --git a/programs/system/src/invoke/append_state.rs b/programs/system/src/invoke/append_state.rs deleted file mode 100644 index 1b2ead83fc..0000000000 --- a/programs/system/src/invoke/append_state.rs +++ /dev/null @@ -1,308 +0,0 @@ -use crate::{ - errors::SystemProgramError, instruction_data::ZOutputCompressedAccountWithPackedContext, - invoke_cpi::verify_signer::check_program_owner_state_merkle_tree, - sdk::event::MerkleTreeSequenceNumber, -}; -use account_compression::append_nullify_create_address::AppendNullifyCreateAddressInputs; -use anchor_lang::{prelude::*, solana_program::pubkey::Pubkey}; -use light_hasher::{Hasher, Poseidon}; -use light_utils::hash_to_bn254_field_size_be; - -use super::cpi_acp::CpiData; - -// #[allow(clippy::too_many_arguments)] -// #[heap_neutral] -// pub fn insert_output_compressed_accounts_into_state_merkle_tree< -// 'a, -// 'b, -// 'c: 'info, -// 'info, -// A: InvokeAccounts<'info> + SignerAccounts<'info> + Bumps, -// >( -// output_compressed_accounts: &[OutputCompressedAccountWithPackedContext], -// ctx: &'a Context<'a, 'b, 'c, 'info, A>, -// output_compressed_account_indices: &'a mut [u32], -// // output_compressed_account_hashes: &'a mut [[u8; 32]], -// // compressed_account_addresses: &'a mut Vec>, -// invoking_program: &Option, -// // hashed_pubkeys: &'a mut Vec<(Pubkey, [u8; 32])>, -// // sequence_numbers: &'a mut Vec, -// cpi_data: &'a mut CpiData<'info>, -// cpi_ix_data: &'a mut AppendNullifyCreateAddressInputs<'a>, -// ) -> Result> { -// bench_sbf_start!("cpda_append_data_init"); -// // let mut account_infos = vec![ -// // ctx.accounts.get_fee_payer().to_account_info(), // fee payer -// // ctx.accounts -// // .get_account_compression_authority() // authority -// // .to_account_info(), -// // ctx.accounts.get_registered_program_pda().to_account_info(), -// // ctx.accounts.get_system_program().to_account_info(), -// // ]; -// // let mut accounts = vec![ -// // AccountMeta { -// // pubkey: account_infos[0].key(), -// // is_signer: true, -// // is_writable: true, -// // }, -// // AccountMeta { -// // pubkey: account_infos[1].key(), -// // is_signer: true, -// // is_writable: false, -// // }, -// // AccountMeta::new_readonly(account_infos[2].key(), false), -// // AccountMeta::new_readonly(account_infos[3].key(), false), -// // ]; - -// let (instruction_data, network_fee_bundle) = create_cpi_accounts_and_instruction_data( -// output_compressed_accounts, -// output_compressed_account_indices, -// // output_compressed_account_hashes, -// // compressed_account_addresses, -// invoking_program, -// // hashed_pubkeys, -// // sequence_numbers, -// ctx.remaining_accounts, -// // &mut account_infos, -// // &mut accounts, -// )?; - -// // let bump = &[CPI_AUTHORITY_PDA_BUMP]; -// // let seeds = &[&[CPI_AUTHORITY_PDA_SEED, bump][..]]; -// // let instruction = anchor_lang::solana_program::instruction::Instruction { -// // program_id: account_compression::ID, -// // accounts, -// // data: instruction_data, -// // }; -// // invoke_signed(&instruction, account_infos.as_slice(), seeds)?; -// bench_sbf_end!("cpda_append_rest"); - -// Ok(network_fee_bundle) -// } - -/// Creates CPI accounts, instruction data, and performs checks. -/// - Merkle tree indices must be in order. -/// - Hashes output accounts for insertion and event. -/// - Collects sequence numbers for event. -/// -/// Checks: -/// 1. Checks whether a Merkle tree is program owned, if so checks write -/// eligibility. -/// 2. Checks ordering of Merkle tree indices. -/// 3. Checks that addresses in output compressed accounts have been created or -/// exist in input compressed accounts. An address may not be used in an -/// output compressed accounts. This will close the account. -#[allow(clippy::too_many_arguments)] -#[allow(clippy::type_complexity)] -pub fn create_cpi_accounts_and_instruction_data<'a, 'info>( - output_compressed_accounts: &[ZOutputCompressedAccountWithPackedContext<'a>], - output_compressed_account_indices: &mut [u32], - // output_compressed_account_hashes: &mut [[u8; 32]], - // compressed_account_addresses: &mut Vec>, - invoking_program: &Option, - // hashed_pubkeys: &mut Vec<(Pubkey, [u8; 32])>, - sequence_numbers: &mut Vec, - remaining_accounts: &'info [AccountInfo<'info>], - // account_infos: &mut Vec>, - // accounts: &mut Vec, - cpi_data: &mut CpiData<'info>, - cpi_ix_data: &mut AppendNullifyCreateAddressInputs<'a>, -) -> Result<(Option<(u8, u64)>, [u8; 32])> { - let mut current_index: i16 = -1; - let mut num_leaves_in_tree: u32 = 0; - let mut mt_next_index = 0; - let mut network_fee_bundle = None; - // let mut instruction_data = Vec::::with_capacity(16 + 33 * num_leaves); - let mut hashed_merkle_tree = [0u8; 32]; - let mut index_merkle_tree_account = 0; - let number_of_merkle_trees = - output_compressed_accounts.last().unwrap().merkle_tree_index as usize + 1; - let mut merkle_tree_pubkeys = Vec::::with_capacity(number_of_merkle_trees); - let mut hash_chain = [0u8; 32]; - let mut rollover_fee = 0; - - // Anchor instruction signature. - // instruction_data.extend_from_slice(&[199, 144, 10, 82, 247, 142, 143, 7]); - // // Bytes Vec length. - // // instruction_data.extend_from_slice(&(instruction_data.capacity() as u32 - 8).to_le_bytes()); - // instruction_data.extend_from_slice( - // &((num_leaves * size_of::() + 4) as u32).to_le_bytes(), - // ); - // // leaves vector length (for borsh compat) - // instruction_data.extend_from_slice(&(num_leaves as u32).to_le_bytes()); - - for (j, account) in output_compressed_accounts.iter().enumerate() { - // if mt index == current index Merkle tree account info has already been added. - // if mt index != current index, Merkle tree account info is new, add it. - #[allow(clippy::comparison_chain)] - if account.merkle_tree_index as i16 == current_index { - // Do nothing, but it is the most common case. - } else if account.merkle_tree_index as i16 > current_index { - current_index = account.merkle_tree_index.into(); - let seq; - let merkle_tree_pubkey; - let network_fee; - let int_rollover_fee; - // Check 1. - ( - mt_next_index, - network_fee, - seq, - merkle_tree_pubkey, - int_rollover_fee, - ) = check_program_owner_state_merkle_tree::( - &remaining_accounts[account.merkle_tree_index as usize], - invoking_program, - )?; - rollover_fee = int_rollover_fee; - if network_fee_bundle.is_none() && network_fee.is_some() { - network_fee_bundle = Some((account.merkle_tree_index, network_fee.unwrap())); - } - let account_info = - remaining_accounts[account.merkle_tree_index as usize].to_account_info(); - sequence_numbers.push(MerkleTreeSequenceNumber { - pubkey: account_info.key(), - seq, - }); - - hashed_merkle_tree = match cpi_data - .hashed_pubkeys - .iter() - .find(|x| x.0 == merkle_tree_pubkey) - { - Some(hashed_merkle_tree) => hashed_merkle_tree.1, - None => { - hash_to_bn254_field_size_be(&merkle_tree_pubkey.to_bytes()) - .unwrap() - .0 - } - }; - // check Merkle tree uniqueness - if merkle_tree_pubkeys.contains(&account_info.key()) { - return err!(SystemProgramError::OutputMerkleTreeNotUnique); - } else { - merkle_tree_pubkeys.push(account_info.key()); - } - // cpi_data.accounts.push(AccountMeta { - // pubkey: account_info.key(), - // is_signer: false, - // is_writable: true, - // }); - // cpi_data.account_infos.push(account_info); - // cpi_data.account_indices.push(index_merkle_tree_account); - cpi_data.get_index_or_insert(account.merkle_tree_index, &remaining_accounts); - num_leaves_in_tree = 0; - index_merkle_tree_account += 1; - } else { - // Check 2. - // Output Merkle tree indices must be in order since we use the - // number of leaves in a Merkle tree to determine the correct leaf - // index. Since the leaf index is part of the hash this is security - // critical. - return err!(SystemProgramError::OutputMerkleTreeIndicesNotInOrder); - } - - // Check 3. - if let Some(address) = account.compressed_account.address { - if let Some(position) = cpi_data - .addresses - .iter() - .filter(|x| x.is_some()) - .position(|&x| x.unwrap() == *address) - { - cpi_data.addresses.remove(position); - } else { - msg!("Address {:?}, is no new address and does not exist in input compressed accounts.", address); - msg!( - "Remaining compressed_account_addresses: {:?}", - cpi_data.addresses - ); - return Err(SystemProgramError::InvalidAddress.into()); - } - } - - output_compressed_account_indices[j] = mt_next_index + num_leaves_in_tree; - num_leaves_in_tree += 1; - if account.compressed_account.data.is_some() && invoking_program.is_none() { - msg!("Invoking program is not provided."); - msg!("Only program owned compressed accounts can have data."); - return err!(SystemProgramError::InvokingProgramNotProvided); - } - let hashed_owner = match cpi_data - .hashed_pubkeys - .iter() - .find(|x| x.0 == account.compressed_account.owner.into()) - { - Some(hashed_owner) => hashed_owner.1, - None => { - let hashed_owner = - hash_to_bn254_field_size_be(&account.compressed_account.owner.to_bytes()) - .unwrap() - .0; - cpi_data - .hashed_pubkeys - .push((account.compressed_account.owner.into(), hashed_owner)); - hashed_owner - } - }; - // Compute output compressed account hash. - cpi_ix_data.leaves[j].leaf = account - .compressed_account - .hash_with_hashed_values::( - &hashed_owner, - &hashed_merkle_tree, - &output_compressed_account_indices[j], - )?; - cpi_ix_data.leaves[j].index = index_merkle_tree_account - 1; - // cpi_data.get_index_or_insert(current_index as u8, remaining_accounts); - // - 1 since we want the index of the next account index. - // instruction_data.extend_from_slice(&[index_merkle_tree_account - 1]); - // instruction_data.extend_from_slice(&output_compressed_account_hashes[j]); - if !cpi_ix_data.nullifiers.is_empty() { - if j == 0 { - hash_chain = cpi_ix_data.leaves[j].leaf; - } else { - hash_chain = Poseidon::hashv(&[&hash_chain, &cpi_ix_data.leaves[j].leaf]) - .map_err(ProgramError::from)?; - } - } - cpi_data.set_rollover_fee(index_merkle_tree_account - 1, rollover_fee); - } - - cpi_ix_data.num_unique_appends = cpi_data.account_indices.len() as u8; - Ok((network_fee_bundle, hash_chain)) -} - -#[test] -fn test_instruction_data_borsh_compat() { - use account_compression::AppendLeavesInput; - use light_zero_copy::slice_mut::ZeroCopySliceMutU32; - let mut vec = Vec::::new(); - vec.extend_from_slice(&((2 * size_of::() + 4) as u32).to_le_bytes()); - vec.extend_from_slice(&2u32.to_le_bytes()); - vec.push(1); - vec.extend_from_slice(&[2u8; 32]); - vec.push(3); - vec.extend_from_slice(&[4u8; 32]); - - let refe = vec![ - AppendLeavesInput { - index: 1, - leaf: [2u8; 32], - }, - AppendLeavesInput { - index: 3, - leaf: [4u8; 32], - }, - ]; - let mut bytes = Vec::new(); - refe.serialize(&mut bytes).unwrap(); - use anchor_lang::InstructionData; - let instruction_data = account_compression::instruction::AppendLeavesToMerkleTrees { bytes }; - println!("discriminator {:?}", instruction_data.data()[0..8].to_vec()); - let serialized = instruction_data.data()[8..].to_vec(); - assert_eq!(serialized, vec); - let res = ZeroCopySliceMutU32::::from_bytes(&mut vec[4..]).unwrap(); - - assert_eq!(res.as_slice(), refe.as_slice()); -} diff --git a/programs/system/src/invoke/cpi_acp.rs b/programs/system/src/invoke/cpi_acp.rs deleted file mode 100644 index ac2de4d8a0..0000000000 --- a/programs/system/src/invoke/cpi_acp.rs +++ /dev/null @@ -1,162 +0,0 @@ -use account_compression::{ - append_nullify_create_address::AppendNullifyCreateAddressInputs, - utils::{constants::CPI_AUTHORITY_PDA_SEED, transfer_lamports::transfer_lamports_cpi}, -}; -use anchor_lang::{prelude::*, Bumps}; - -use crate::{ - constants::CPI_AUTHORITY_PDA_BUMP, - sdk::accounts::{InvokeAccounts, SignerAccounts}, -}; - -// TODO: -// 1. only one iteration per inputs, addresses, read-only, and outputs. -// -> do all the checks in one place and collect data in bytes for cpi. -pub struct CpiData<'info> { - pub account_indices: Vec, - pub accounts: Vec, - // Would be better to store references. - pub account_infos: Vec>, - // TODO: switch to store account indices once we have new context. - pub hashed_pubkeys: Vec<(Pubkey, [u8; 32])>, - // Addresses for deduplication. - // Try to find a way without storing the addresses. - pub addresses: Vec>, - // Index of account and fee to be paid. - pub rollover_fee_payments: Vec<(u8, u64)>, -} - -// TODO: remove event to expose all output account data we need to copy the -// entire accounts including vectors. -// we can probably just put this data at the end of the vec so that -// we can easily skip it the account compression program. -// Maybe I can even just get the pointer to the data -// and put that into a separate cpi. -pub fn create_cpi_data< - 'a, - 'b, - 'c: 'info, - 'info, - A: InvokeAccounts<'info> + SignerAccounts<'info> + Bumps, ->( - ctx: &'a Context<'a, 'b, 'c, 'info, A>, - num_leaves: u8, - num_nullifiers: u8, - num_new_addresses: u8, - hashed_pubkeys_capacity: usize, -) -> Result<(CpiData<'info>, Vec)> { - let account_infos = vec![ - ctx.accounts.get_fee_payer().to_account_info(), - ctx.accounts - .get_account_compression_authority() - .to_account_info(), - ctx.accounts.get_registered_program_pda().to_account_info(), - ctx.accounts.get_system_program().to_account_info(), - ]; - let accounts = vec![ - AccountMeta { - pubkey: account_infos[0].key(), - is_signer: true, - is_writable: true, - }, - AccountMeta::new_readonly(account_infos[1].key(), true), - AccountMeta::new_readonly(account_infos[2].key(), false), - AccountMeta::new_readonly(account_infos[3].key(), false), - ]; - let account_indices = - Vec::::with_capacity((num_nullifiers + num_leaves + num_new_addresses) as usize); - let bytes_size = AppendNullifyCreateAddressInputs::required_size_for_capacity( - num_leaves, - num_nullifiers, - num_new_addresses, - ); - let bytes = vec![0u8; bytes_size]; - Ok(( - CpiData { - account_indices, - accounts, - account_infos, - hashed_pubkeys: Vec::with_capacity(hashed_pubkeys_capacity), - // TODO: init with capacity. - addresses: Vec::new(), - rollover_fee_payments: Vec::new(), - }, - bytes, - )) -} -impl<'info> CpiData<'info> { - pub fn get_index_or_insert( - &mut self, - ix_data_index: u8, - remaining_accounts: &[AccountInfo<'info>], - ) -> u8 { - let queue_index = self - .account_indices - .iter() - .position(|a| *a == ix_data_index); - let queue_index = match queue_index { - Some(index) => index as u8, - None => { - self.account_indices.push(ix_data_index); - let account_info = &remaining_accounts[ix_data_index as usize]; - self.accounts.push(AccountMeta { - pubkey: account_info.key(), - is_signer: false, - is_writable: true, - }); - self.account_infos.push(account_info.clone()); - self.account_indices.len() as u8 - 1 - } - }; - queue_index - } - - pub fn set_rollover_fee(&mut self, ix_data_index: u8, fee: u64) { - let payment = self - .rollover_fee_payments - .iter_mut() - .find(|a| a.0 == ix_data_index); - match payment { - Some(payment) => payment.1 += fee, - None => self.rollover_fee_payments.push((ix_data_index, fee)), - }; - } - - pub fn transfer_rollover_fees( - &self, - accounts: &[AccountInfo<'info>], - fee_payer: &AccountInfo<'info>, - ) -> Result<()> { - // TODO: if len is 1 don't do a cpi mutate lamports. - for (i, fee) in self.rollover_fee_payments.iter() { - transfer_lamports_cpi(fee_payer, &accounts[*i as usize], *fee)?; - } - Ok(()) - } -} -use anchor_lang::{InstructionData, Result}; - -pub fn cpi_account_compression_program(cpi_context: CpiData, bytes: Vec) -> Result<()> { - let CpiData { - accounts, - account_infos, - .. - } = cpi_context; - let instruction_data = account_compression::instruction::NullifyAppendCreateAddress { bytes }; - - let data = instruction_data.data(); - light_heap::bench_sbf_end!("cpda_instruction_data"); - let bump = &[CPI_AUTHORITY_PDA_BUMP]; - let seeds = &[&[CPI_AUTHORITY_PDA_SEED, bump][..]]; - let instruction = anchor_lang::solana_program::instruction::Instruction { - program_id: account_compression::ID, - accounts, - data, - }; - anchor_lang::solana_program::program::invoke_signed( - &instruction, - account_infos.as_slice(), - seeds, - )?; - Ok(()) -} diff --git a/programs/system/src/invoke/emit_event.rs b/programs/system/src/invoke/emit_event.rs deleted file mode 100644 index 865f9d90ee..0000000000 --- a/programs/system/src/invoke/emit_event.rs +++ /dev/null @@ -1,77 +0,0 @@ -use account_compression::emit_indexer_event; -use anchor_lang::{prelude::*, Bumps}; - -use crate::{ - errors::SystemProgramError, - instruction_data::ZInstructionDataInvoke, - sdk::{ - accounts::InvokeAccounts, - compressed_account::{CompressedAccount, CompressedAccountData}, - event::{MerkleTreeSequenceNumber, PublicTransactionEvent}, - }, -}; - -pub fn emit_state_transition_event<'a, 'b, 'c: 'info, 'info, A: InvokeAccounts<'info> + Bumps>( - inputs: ZInstructionDataInvoke<'a>, - ctx: &'a Context<'a, 'b, 'c, 'info, A>, - input_compressed_account_hashes: Vec<[u8; 32]>, - output_compressed_account_hashes: Vec<[u8; 32]>, - output_leaf_indices: Vec, - sequence_numbers: Vec, -) -> Result<()> { - // Note: message is unimplemented - // (if we compute the tx hash in indexer we don't need to modify the event.) - let event = PublicTransactionEvent { - input_compressed_account_hashes, - output_compressed_account_hashes, - output_compressed_accounts: inputs - .output_compressed_accounts - .iter() - .map(|x| { - let data = if let Some(data) = x.compressed_account.data.as_ref() { - Some(CompressedAccountData { - discriminator: *data.discriminator, - data: data.data.to_vec(), - data_hash: *data.data_hash, - }) - } else { - None - }; - super::OutputCompressedAccountWithPackedContext { - compressed_account: CompressedAccount { - owner: x.compressed_account.owner.into(), - lamports: u64::from(x.compressed_account.lamports), - address: x.compressed_account.address.map(|x| *x), - data, - }, - merkle_tree_index: x.merkle_tree_index, - } - }) - .collect(), - output_leaf_indices, - sequence_numbers, - relay_fee: inputs.relay_fee.map(|x| (*x).into()), - pubkey_array: ctx.remaining_accounts.iter().map(|x| x.key()).collect(), - compress_or_decompress_lamports: inputs - .compress_or_decompress_lamports - .map(|x| (*x).into()), - message: None, - is_compress: inputs.is_compress, - }; - - // 10240 = 10 * 1024 the max instruction data of a cpi. - let data_capacity = 10240; - let mut data = Vec::with_capacity(data_capacity); - event.man_serialize(&mut data)?; - - if data_capacity != data.capacity() { - msg!( - "Event serialization exceeded capacity. Used {}, allocated {}.", - data.capacity(), - data_capacity - ); - return err!(SystemProgramError::InvalidCapacity); - } - - emit_indexer_event(data, ctx.accounts.get_noop_program()) -} diff --git a/programs/system/src/invoke/instruction.rs b/programs/system/src/invoke/instruction.rs index 513ffe9d86..6dbde9e93b 100644 --- a/programs/system/src/invoke/instruction.rs +++ b/programs/system/src/invoke/instruction.rs @@ -1,9 +1,8 @@ -use account_compression::{program::AccountCompression, utils::constants::CPI_AUTHORITY_PDA_SEED}; +use account_compression::program::AccountCompression; use anchor_lang::prelude::*; -use super::processor::CompressedProof; use crate::{ - invoke::sol_compression::SOL_POOL_PDA_SEED, + processor::{processor::CompressedProof, sol_compression::SOL_POOL_PDA_SEED}, sdk::{ accounts::{InvokeAccounts, SignerAccounts}, compressed_account::{CompressedAccount, PackedCompressedAccountWithMerkleContext}, @@ -20,15 +19,15 @@ pub struct InvokeInstruction<'info> { pub fee_payer: Signer<'info>, pub authority: Signer<'info>, /// CHECK: this account - #[account( - seeds = [&crate::ID.to_bytes()], bump, seeds::program = &account_compression::ID, - )] + // #[account( + // seeds = [&crate::ID.to_bytes()], bump, seeds::program = &account_compression::ID, + // )] pub registered_program_pda: AccountInfo<'info>, /// CHECK: is checked when emitting the event. pub noop_program: UncheckedAccount<'info>, /// CHECK: this account in account compression program. /// This pda is used to invoke the account compression program. - #[account(seeds = [CPI_AUTHORITY_PDA_SEED], bump)] + // #[account(seeds = [CPI_AUTHORITY_PDA_SEED], bump)] pub account_compression_authority: UncheckedAccount<'info>, /// CHECK: Account compression program is used to update state and address /// Merkle trees. diff --git a/programs/system/src/invoke/mod.rs b/programs/system/src/invoke/mod.rs index 04587325a7..7328f88737 100644 --- a/programs/system/src/invoke/mod.rs +++ b/programs/system/src/invoke/mod.rs @@ -1,12 +1,3 @@ pub mod instruction; pub use instruction::*; -pub mod address; -pub mod append_state; -pub mod cpi_acp; -pub mod emit_event; -pub mod nullify_state; -pub mod processor; -pub mod sol_compression; -pub mod sum_check; -pub mod verify_proof; pub mod verify_signer; diff --git a/programs/system/src/invoke/nullify_state.rs b/programs/system/src/invoke/nullify_state.rs deleted file mode 100644 index d8031e971b..0000000000 --- a/programs/system/src/invoke/nullify_state.rs +++ /dev/null @@ -1,110 +0,0 @@ -// use account_compression::append_nullify_create_address::AppendNullifyCreateAddressInputs; -// use anchor_lang::{prelude::*, solana_program::pubkey::Pubkey, Bumps}; -// use light_macros::heap_neutral; - -// use crate::{ -// constants::CPI_AUTHORITY_PDA_BUMP, -// invoke::cpi_acp::{create_cpi_data, get_index_or_insert, CpiData}, -// invoke_cpi::verify_signer::check_program_owner_state_merkle_tree, -// sdk::{ -// accounts::{InvokeAccounts, SignerAccounts}, -// compressed_account::PackedCompressedAccountWithMerkleContext, -// }, -// }; - -// /// 1. Checks that if nullifier queue has program_owner it invoking_program is -// /// program_owner. -// /// 2. Inserts nullifiers into the queue. -// #[heap_neutral] -// pub fn insert_nullifiers< -// 'a, -// 'b, -// 'c: 'info, -// 'info, -// A: InvokeAccounts<'info> + SignerAccounts<'info> + Bumps, -// >( -// input_compressed_accounts_with_merkle_context: &'a [PackedCompressedAccountWithMerkleContext], -// ctx: &'a Context<'a, 'b, 'c, 'info, A>, -// nullifiers: &'a [[u8; 32]], -// invoking_program: &Option, -// // tx_hash: [u8; 32], -// ) -> Result> { -// light_heap::bench_sbf_start!("cpda_insert_nullifiers_prep_accs"); -// msg!( -// "get_account_compression_authority {:?}", -// ctx.accounts.get_account_compression_authority().key() -// ); -// let num_leaves = 0; -// let num_nullifiers = nullifiers.len() as u8; -// let num_new_addresses = 0; -// let CpiData { -// mut bytes, -// mut account_indices, -// mut accounts, -// mut account_infos, -// .. -// } = create_cpi_data(ctx, num_nullifiers, num_nullifiers, num_new_addresses)?; - -// let mut append_nullify_create_address_inputs = AppendNullifyCreateAddressInputs::new( -// &mut bytes, -// num_leaves, -// num_nullifiers, -// num_new_addresses, -// ) -// .map_err(ProgramError::from)?; -// append_nullify_create_address_inputs.set_invoked_by_program(true); -// // append_nullify_create_address_inputs.tx_hash = tx_hash; -// append_nullify_create_address_inputs.bump = CPI_AUTHORITY_PDA_BUMP; -// // let mut leaf_indices = Vec::with_capacity(input_compressed_accounts_with_merkle_context.len()); -// // let mut prove_by_index = -// // Vec::with_capacity(input_compressed_accounts_with_merkle_context.len()); -// // If the transaction contains at least one input compressed account a -// // network fee is paid. This network fee is paid in addition to the address -// // network fee. The network fee is paid once per transaction, defined in the -// // state Merkle tree and transferred to the nullifier queue because the -// // nullifier queue is mutable. The network fee field in the queue is not -// // used. -// let mut network_fee_bundle = None; -// for (i, account) in input_compressed_accounts_with_merkle_context -// .iter() -// .enumerate() -// { -// append_nullify_create_address_inputs.nullifiers[i].account_hash = nullifiers[i]; -// append_nullify_create_address_inputs.nullifiers[i].leaf_index = -// account.merkle_context.leaf_index.into(); -// append_nullify_create_address_inputs.nullifiers[i].prove_by_index = -// account.merkle_context.prove_by_index as u8; -// // let queue_index = get_index_or_insert( -// // account.merkle_context.nullifier_queue_pubkey_index, -// // &mut account_indices, -// // &mut account_infos, -// // &mut accounts, -// // ctx.remaining_accounts, -// // ); -// // append_nullify_create_address_inputs.nullifiers[i].queue_index = queue_index; -// // let tree_index = get_index_or_insert( -// // account.merkle_context.merkle_tree_pubkey_index, -// // &mut account_indices, -// // &mut account_infos, -// // &mut accounts, -// // ctx.remaining_accounts, -// // ); -// // append_nullify_create_address_inputs.nullifiers[i].tree_index = tree_index; - -// // 1. Check invoking signer is eligible to write to the nullifier queue. -// let (_, network_fee, _, _) = check_program_owner_state_merkle_tree::( -// &ctx.remaining_accounts[account.merkle_context.merkle_tree_pubkey_index as usize], -// invoking_program, -// )?; -// if network_fee_bundle.is_none() && network_fee.is_some() { -// network_fee_bundle = Some(( -// account.merkle_context.nullifier_queue_pubkey_index, -// network_fee.unwrap(), -// )); -// } -// } -// append_nullify_create_address_inputs.num_queues = account_indices.len() as u8 / 2; -// light_heap::bench_sbf_end!("cpda_insert_nullifiers_prep_accs"); -// light_heap::bench_sbf_start!("cpda_instruction_data"); -// Ok(network_fee_bundle) -// } diff --git a/programs/system/src/invoke/verify_proof.rs b/programs/system/src/invoke/verify_proof.rs deleted file mode 100644 index c71c3fa584..0000000000 --- a/programs/system/src/invoke/verify_proof.rs +++ /dev/null @@ -1,546 +0,0 @@ -use std::mem; - -use account_compression::{ - append_nullify_create_address::{AppendNullifyCreateAddressInputs, InsertNullifierInput}, - errors::AccountCompressionErrorCode, - AddressMerkleTreeAccount, StateMerkleTreeAccount, -}; -use anchor_lang::{prelude::*, Discriminator}; -use light_batched_merkle_tree::{ - constants::{DEFAULT_BATCH_ADDRESS_TREE_HEIGHT, DEFAULT_BATCH_STATE_TREE_HEIGHT}, - merkle_tree::BatchedMerkleTreeAccount, - queue::BatchedQueueAccount, -}; -use light_concurrent_merkle_tree::zero_copy::ConcurrentMerkleTreeZeroCopy; -use light_hasher::{Discriminator as LightDiscriminator, Hasher, Poseidon}; -use light_indexed_merkle_tree::zero_copy::IndexedMerkleTreeZeroCopy; -use light_macros::heap_neutral; -use light_utils::{ - hash_to_bn254_field_size_be, - hashchain::{create_hash_chain_from_slice, create_two_inputs_hash_chain}, -}; -use light_verifier::{ - select_verifying_key, verify_create_addresses_and_inclusion_proof, - verify_create_addresses_proof, verify_inclusion_proof, CompressedProof, -}; - -use super::cpi_acp::CpiData; -use crate::{ - errors::SystemProgramError, - instruction_data::{ - ZNewAddressParamsPacked, ZPackedCompressedAccountWithMerkleContext, ZPackedReadOnlyAddress, - ZPackedReadOnlyCompressedAccount, - }, - invoke_cpi::verify_signer::check_program_owner_state_merkle_tree, -}; - -const IS_READ_ONLY: bool = true; -const IS_NOT_READ_ONLY: bool = false; -const IS_STATE: bool = true; -const IS_NOT_STATE: bool = false; - -#[inline(never)] -#[heap_neutral] -pub fn read_input_state_roots<'a>( - remaining_accounts: &'a [AccountInfo<'_>], - input_compressed_accounts_with_merkle_context: &'a [ZPackedCompressedAccountWithMerkleContext<'a>], - read_only_accounts: &'a [ZPackedReadOnlyCompressedAccount], - input_roots: &'a mut Vec<[u8; 32]>, -) -> Result { - let mut state_tree_height = 0; - for input_compressed_account_with_context in - input_compressed_accounts_with_merkle_context.iter() - { - if input_compressed_account_with_context - .merkle_context - .prove_by_index() - { - continue; - } - let internal_height = read_root::( - &remaining_accounts[input_compressed_account_with_context - .merkle_context - .merkle_tree_pubkey_index as usize], - u16::from(input_compressed_account_with_context.root_index), - input_roots, - )?; - if state_tree_height == 0 { - state_tree_height = internal_height; - } else if state_tree_height != internal_height { - msg!( - "tree height {} != internal height {}", - state_tree_height, - internal_height - ); - return err!(SystemProgramError::InvalidStateTreeHeight); - } - } - for readonly_input_account in read_only_accounts.iter() { - if readonly_input_account.merkle_context.prove_by_index() { - continue; - } - let internal_height = read_root::( - &remaining_accounts[readonly_input_account - .merkle_context - .merkle_tree_pubkey_index as usize], - readonly_input_account.root_index.into(), - input_roots, - )?; - if state_tree_height == 0 { - state_tree_height = internal_height; - } else if state_tree_height != internal_height { - msg!( - "tree height {} != internal height {}", - state_tree_height, - internal_height - ); - return err!(SystemProgramError::InvalidStateTreeHeight); - } - } - Ok(state_tree_height) -} - -#[inline(never)] -#[heap_neutral] -pub fn read_address_roots<'a>( - remaining_accounts: &'a [AccountInfo<'_>], - new_address_params: &'a [ZNewAddressParamsPacked], - read_only_addresses: &'a [ZPackedReadOnlyAddress], - address_roots: &'a mut Vec<[u8; 32]>, -) -> Result { - let mut address_tree_height = 0; - for new_address_param in new_address_params.iter() { - let internal_height = read_root::( - &remaining_accounts[new_address_param.address_merkle_tree_account_index as usize], - new_address_param.address_merkle_tree_root_index.into(), - address_roots, - )?; - if address_tree_height == 0 { - address_tree_height = internal_height; - } else if address_tree_height != internal_height { - msg!( - "tree height {} != internal height {}", - address_tree_height, - internal_height - ); - return err!(SystemProgramError::InvalidAddressTreeHeight); - } - } - for read_only_address in read_only_addresses.iter() { - let internal_height = read_root::( - &remaining_accounts[read_only_address.address_merkle_tree_account_index as usize], - read_only_address.address_merkle_tree_root_index.into(), - address_roots, - )?; - if address_tree_height == 0 { - address_tree_height = internal_height; - } else if address_tree_height != internal_height { - msg!( - "tree height {} != internal height {}", - address_tree_height, - internal_height - ); - return err!(SystemProgramError::InvalidAddressTreeHeight); - } - } - - Ok(address_tree_height) -} - -fn read_root( - merkle_tree_account_info: &AccountInfo<'_>, - root_index: u16, - roots: &mut Vec<[u8; 32]>, -) -> Result { - let height; - let mut discriminator_bytes = [0u8; 8]; - discriminator_bytes.copy_from_slice(&merkle_tree_account_info.try_borrow_data()?[0..8]); - match discriminator_bytes { - AddressMerkleTreeAccount::DISCRIMINATOR => { - if IS_READ_ONLY { - msg!("Read only addresses are only supported for batched address trees."); - return err!( - AccountCompressionErrorCode::AddressMerkleTreeAccountDiscriminatorMismatch - ); - } - let merkle_tree = merkle_tree_account_info.try_borrow_data()?; - let merkle_tree = - IndexedMerkleTreeZeroCopy::::from_bytes_zero_copy( - &merkle_tree[8 + mem::size_of::()..], - ) - .map_err(ProgramError::from)?; - height = merkle_tree.height as u8; - (*roots).push(merkle_tree.roots[root_index as usize]); - } - BatchedMerkleTreeAccount::DISCRIMINATOR => { - if IS_STATE { - let merkle_tree = - BatchedMerkleTreeAccount::state_from_account_info(merkle_tree_account_info) - .map_err(ProgramError::from)?; - (*roots).push(merkle_tree.root_history[root_index as usize]); - height = merkle_tree.height as u8; - } else { - let merkle_tree = - BatchedMerkleTreeAccount::address_from_account_info(merkle_tree_account_info) - .map_err(ProgramError::from)?; - height = merkle_tree.height as u8; - (*roots).push(merkle_tree.root_history[root_index as usize]); - } - } - StateMerkleTreeAccount::DISCRIMINATOR => { - if IS_READ_ONLY { - msg!("Read only addresses are only supported for batched address trees."); - return err!( - AccountCompressionErrorCode::StateMerkleTreeAccountDiscriminatorMismatch - ); - } - - let merkle_tree = &mut merkle_tree_account_info.try_borrow_mut_data()?; - let merkle_tree = ConcurrentMerkleTreeZeroCopy::::from_bytes_zero_copy( - &merkle_tree[8 + mem::size_of::()..], - ) - .map_err(ProgramError::from)?; - - let fetched_roots = &merkle_tree.roots; - - (*roots).push(fetched_roots[root_index as usize]); - height = merkle_tree.height as u8; - } - _ => { - if IS_STATE { - return err!( - AccountCompressionErrorCode::StateMerkleTreeAccountDiscriminatorMismatch - ); - } else { - return err!( - AccountCompressionErrorCode::AddressMerkleTreeAccountDiscriminatorMismatch - ); - } - } - } - Ok(height) -} - -/// For each read-only account -/// 1. prove inclusion by index in the output queue if leaf index should exist in the output queue. -/// 1.1. if inclusion was proven by index, return Ok. -/// 2. prove non-inclusion in the bloom filters -/// 2.1. skip cleared batches. -/// 2.2. prove non-inclusion in the bloom filters for each batch. -#[inline(always)] -pub fn verify_read_only_account_inclusion_by_index<'a>( - remaining_accounts: &'a [AccountInfo<'_>], - read_only_accounts: &'a [ZPackedReadOnlyCompressedAccount], -) -> Result { - let mut num_prove_read_only_accounts_prove_by_index = 0; - for read_only_account in read_only_accounts.iter() { - let output_queue_account_info = &remaining_accounts[read_only_account - .merkle_context - .nullifier_queue_pubkey_index - as usize]; - let output_queue = - &mut BatchedQueueAccount::output_from_account_info(output_queue_account_info) - .map_err(ProgramError::from)?; - let merkle_tree_account_info = - &remaining_accounts[read_only_account.merkle_context.merkle_tree_pubkey_index as usize]; - output_queue - .check_is_associated(&(*merkle_tree_account_info.key).into()) - .map_err(ProgramError::from)?; - - // Checks inclusion by index in the output queue if leaf index should exist in the output queue. - // Else does nothing. - let proved_inclusion = output_queue - .prove_inclusion_by_index( - read_only_account.merkle_context.leaf_index.into(), - &read_only_account.account_hash, - ) - .map_err(|_| SystemProgramError::ReadOnlyAccountDoesNotExist)?; - if read_only_account.merkle_context.prove_by_index() { - num_prove_read_only_accounts_prove_by_index += 1; - } - // If a read-only account is marked as proven by index - // inclusion proof by index has to be successful - // -> proved_inclusion == true. - if !proved_inclusion && read_only_account.merkle_context.prove_by_index() { - msg!("Expected read-only account in the output queue but account does not exist."); - return err!(SystemProgramError::ReadOnlyAccountDoesNotExist); - } - // If we prove inclusion by index we do not need to check non-inclusion in bloom filters. - // Since proving inclusion by index of non-read - // only accounts overwrites the leaf in the output queue. - if !proved_inclusion { - let merkle_tree = - &mut BatchedMerkleTreeAccount::state_from_account_info(merkle_tree_account_info) - .map_err(ProgramError::from)?; - merkle_tree - .check_input_queue_non_inclusion(&read_only_account.account_hash) - .map_err(|_| SystemProgramError::ReadOnlyAccountDoesNotExist)?; - } - } - Ok(num_prove_read_only_accounts_prove_by_index) -} - -#[inline(always)] -pub fn verify_read_only_address_queue_non_inclusion<'a>( - remaining_accounts: &'a [AccountInfo<'_>], - read_only_addresses: &'a [ZPackedReadOnlyAddress], -) -> Result<()> { - if read_only_addresses.is_empty() { - return Ok(()); - } - let mut index = read_only_addresses[0].address_merkle_tree_account_index; - let mut merkle_tree = - BatchedMerkleTreeAccount::address_from_account_info(&remaining_accounts[index as usize]) - .map_err(ProgramError::from)?; - for read_only_address in read_only_addresses.iter() { - if index != read_only_address.address_merkle_tree_account_index { - index = read_only_address.address_merkle_tree_account_index; - merkle_tree = BatchedMerkleTreeAccount::address_from_account_info( - &remaining_accounts[index as usize], - ) - .map_err(ProgramError::from)?; - } - merkle_tree - .check_input_queue_non_inclusion(&read_only_address.address) - .map_err(|_| SystemProgramError::ReadOnlyAddressAlreadyExists)?; - } - Ok(()) -} - -/// Hashes the input compressed accounts and stores the results in the leaves array. -/// Merkle tree pubkeys are hashed and stored in the hashed_pubkeys array. -/// Merkle tree pubkeys should be ordered for efficiency. -#[inline(always)] -// #[heap_neutral] -#[allow(unused_mut)] -pub fn hash_input_compressed_accounts<'a, 'b, 'c: 'info, 'info>( - remaining_accounts: &'info [AccountInfo<'info>], - input_compressed_accounts_with_merkle_context: &'a [ZPackedCompressedAccountWithMerkleContext<'a>], - invoking_program: &Option, - cpi_data: &mut CpiData<'info>, - cpi_ix_data: &mut AppendNullifyCreateAddressInputs<'a>, -) -> Result<(Option<(u8, u64)>, [u8; 32])> { - let mut network_fee_bundle = None; - let mut owner_pubkey = input_compressed_accounts_with_merkle_context[0] - .compressed_account - .owner; - let mut hashed_owner = hash_to_bn254_field_size_be(&owner_pubkey.to_bytes()) - .unwrap() - .0; - cpi_data - .hashed_pubkeys - .push((owner_pubkey.into(), hashed_owner)); - #[allow(unused)] - let mut current_hashed_mt = [0u8; 32]; - let mut hash_chain = [0u8; 32]; - - let mut current_mt_index: i16 = -1; - for (j, input_compressed_account_with_context) in input_compressed_accounts_with_merkle_context - .iter() - .enumerate() - { - // For heap neutrality we cannot allocate new heap memory in this function. - if let Some(address) = &input_compressed_account_with_context - .compressed_account - .address - { - cpi_data.addresses[j] = Some(**address); - } - - #[allow(clippy::comparison_chain)] - if current_mt_index - != input_compressed_account_with_context - .merkle_context - .merkle_tree_pubkey_index as i16 - { - current_mt_index = input_compressed_account_with_context - .merkle_context - .merkle_tree_pubkey_index as i16; - let merkle_tree_pubkey = remaining_accounts[input_compressed_account_with_context - .merkle_context - .merkle_tree_pubkey_index - as usize] - .key(); - current_hashed_mt = match cpi_data - .hashed_pubkeys - .iter() - .find(|x| x.0 == merkle_tree_pubkey) - { - Some(hashed_merkle_tree_pubkey) => hashed_merkle_tree_pubkey.1, - None => { - let hashed_merkle_tree_pubkey = - hash_to_bn254_field_size_be(&merkle_tree_pubkey.to_bytes()) - .unwrap() - .0; - cpi_data - .hashed_pubkeys - .push((merkle_tree_pubkey, hashed_merkle_tree_pubkey)); - hashed_merkle_tree_pubkey - } - }; - } - // Without cpi context all input compressed accounts have the same owner. - // With cpi context the owners will be different. - if owner_pubkey - != input_compressed_account_with_context - .compressed_account - .owner - { - owner_pubkey = input_compressed_account_with_context - .compressed_account - .owner; - hashed_owner = match cpi_data.hashed_pubkeys.iter().find(|x| { - x.0 == input_compressed_account_with_context - .compressed_account - .owner - .into() - }) { - Some(hashed_owner) => hashed_owner.1, - None => { - let hashed_owner = hash_to_bn254_field_size_be( - &input_compressed_account_with_context - .compressed_account - .owner - .to_bytes(), - ) - .unwrap() - .0; - cpi_data.hashed_pubkeys.push(( - input_compressed_account_with_context - .compressed_account - .owner - .into(), - hashed_owner, - )); - hashed_owner - } - }; - } - let queue_index = cpi_data.get_index_or_insert( - input_compressed_account_with_context - .merkle_context - .nullifier_queue_pubkey_index, - remaining_accounts, - ); - let tree_index = cpi_data.get_index_or_insert( - input_compressed_account_with_context - .merkle_context - .merkle_tree_pubkey_index, - remaining_accounts, - ); - cpi_ix_data.nullifiers[j] = InsertNullifierInput { - account_hash: input_compressed_account_with_context - .compressed_account - .hash_with_hashed_values::( - &hashed_owner, - ¤t_hashed_mt, - &input_compressed_account_with_context - .merkle_context - .leaf_index - .into(), - )?, - leaf_index: input_compressed_account_with_context - .merkle_context - .leaf_index - .into(), - prove_by_index: input_compressed_account_with_context - .merkle_context - .prove_by_index() as u8, - queue_index, - tree_index, - }; - if j == 0 { - hash_chain = cpi_ix_data.nullifiers[j].account_hash; - } else { - hash_chain = Poseidon::hashv(&[&hash_chain, &cpi_ix_data.nullifiers[j].account_hash]) - .map_err(ProgramError::from)?; - } - // TODO: transfer network fee with set_rollover_fee once we switch to context - let (_, network_fee, _, _, _) = check_program_owner_state_merkle_tree::( - &remaining_accounts[input_compressed_account_with_context - .merkle_context - .merkle_tree_pubkey_index as usize], - invoking_program, - )?; - if network_fee_bundle.is_none() && network_fee.is_some() { - network_fee_bundle = Some(( - input_compressed_account_with_context - .merkle_context - .nullifier_queue_pubkey_index, - network_fee.unwrap(), - )); - } - } - cpi_ix_data.num_queues = input_compressed_accounts_with_merkle_context - .iter() - .enumerate() - .filter(|(i, x)| { - let candidate = x.merkle_context.nullifier_queue_pubkey_index; - !input_compressed_accounts_with_merkle_context[..*i] - .iter() - .any(|y| y.merkle_context.nullifier_queue_pubkey_index == candidate) - }) - .count() as u8; - - Ok((network_fee_bundle, hash_chain)) -} - -#[allow(clippy::too_many_arguments)] -#[heap_neutral] -pub fn verify_proof( - roots: &[[u8; 32]], - leaves: &[[u8; 32]], - address_roots: &[[u8; 32]], - addresses: &[[u8; 32]], - compressed_proof: &CompressedProof, - address_tree_height: u8, - state_tree_height: u8, -) -> anchor_lang::Result<()> { - if state_tree_height as u32 == DEFAULT_BATCH_STATE_TREE_HEIGHT - || address_tree_height as u32 == DEFAULT_BATCH_ADDRESS_TREE_HEIGHT - { - let public_input_hash = if !leaves.is_empty() && !addresses.is_empty() { - // combined inclusion & non-inclusion proof - let inclusion_hash = - create_two_inputs_hash_chain(roots, leaves).map_err(ProgramError::from)?; - let non_inclusion_hash = create_two_inputs_hash_chain(address_roots, addresses) - .map_err(ProgramError::from)?; - create_hash_chain_from_slice(&[inclusion_hash, non_inclusion_hash]) - .map_err(ProgramError::from)? - } else if !leaves.is_empty() { - // inclusion proof - create_two_inputs_hash_chain(roots, leaves).map_err(ProgramError::from)? - } else { - // TODO: compute with addresses - // non-inclusion proof - create_two_inputs_hash_chain(address_roots, addresses).map_err(ProgramError::from)? - }; - - let vk = select_verifying_key(leaves.len(), addresses.len()).map_err(ProgramError::from)?; - light_verifier::verify(&[public_input_hash], compressed_proof, vk) - .map_err(ProgramError::from)?; - } else if state_tree_height == 26 && address_tree_height == 26 { - // legacy combined inclusion & non-inclusion proof - verify_create_addresses_and_inclusion_proof( - roots, - leaves, - address_roots, - addresses, - compressed_proof, - ) - .map_err(ProgramError::from)?; - } else if state_tree_height == 26 { - // legacy inclusion proof - verify_inclusion_proof(roots, leaves, compressed_proof).map_err(ProgramError::from)?; - } else if address_tree_height == 26 { - // legacy non-inclusion proof - verify_create_addresses_proof(address_roots, addresses, compressed_proof) - .map_err(ProgramError::from)?; - } else { - msg!("state tree height: {}", state_tree_height); - msg!("address tree height: {}", address_tree_height); - return err!(SystemProgramError::InvalidAddressTreeHeight); - } - - Ok(()) -} diff --git a/programs/system/src/invoke_cpi/account.rs b/programs/system/src/invoke_cpi/account.rs index 5e8724aa16..e32b5d9286 100644 --- a/programs/system/src/invoke_cpi/account.rs +++ b/programs/system/src/invoke_cpi/account.rs @@ -1,7 +1,11 @@ +use std::slice; + use aligned_sized::aligned_sized; use anchor_lang::prelude::*; +use light_zero_copy::{borsh::Deserialize, errors::ZeroCopyError}; +use zerocopy::{little_endian::U32, Ref}; -use crate::InstructionDataInvokeCpi; +use crate::{instruction_data::ZInstructionDataInvokeCpi, InstructionDataInvokeCpi}; /// Collects instruction data without executing a compressed transaction. /// Signer checks are performed on instruction data. @@ -12,9 +16,11 @@ use crate::InstructionDataInvokeCpi; #[aligned_sized(anchor)] #[derive(Debug, PartialEq, Default)] #[account] +#[repr(C)] pub struct CpiContextAccount { pub fee_payer: Pubkey, pub associated_merkle_tree: Pubkey, + // Offset 72 pub context: Vec, } @@ -24,3 +30,34 @@ impl CpiContextAccount { self.context = Vec::new(); } } + +pub struct ZCpiContextAccount<'a> { + pub fee_payer: Ref<&'a mut [u8], light_utils::pubkey::Pubkey>, + pub associated_merkle_tree: Ref<&'a mut [u8], light_utils::pubkey::Pubkey>, + pub context: Vec>, +} + +// TODO: test +pub fn deserialize_cpi_context_account<'info, 'a>( + account_info: &AccountInfo<'info>, +) -> std::result::Result, ZeroCopyError> { + let mut account_data = account_info.try_borrow_mut_data().unwrap(); + let data = unsafe { slice::from_raw_parts_mut(account_data.as_mut_ptr(), account_data.len()) }; + let (fee_payer, data) = Ref::<&'a mut [u8], light_utils::pubkey::Pubkey>::from_prefix(data)?; + let (associated_merkle_tree, data) = + Ref::<&'a mut [u8], light_utils::pubkey::Pubkey>::from_prefix(data)?; + let (len, data) = Ref::<&'a mut [u8], U32>::from_prefix(data)?; + let mut data = &*data; + let mut context = Vec::new(); + + for _ in 0..(u64::from(*len)) as usize { + let (context_item, new_data) = ZInstructionDataInvokeCpi::deserialize_at(data)?; + context.push(context_item); + data = new_data; + } + Ok(ZCpiContextAccount { + fee_payer, + associated_merkle_tree, + context, + }) +} diff --git a/programs/system/src/invoke_cpi/instruction.rs b/programs/system/src/invoke_cpi/instruction.rs index 7dae64f1bf..32bb614761 100644 --- a/programs/system/src/invoke_cpi/instruction.rs +++ b/programs/system/src/invoke_cpi/instruction.rs @@ -1,4 +1,4 @@ -use account_compression::{program::AccountCompression, utils::constants::CPI_AUTHORITY_PDA_SEED}; +use account_compression::program::AccountCompression; use anchor_lang::{ prelude::*, solana_program::pubkey::Pubkey, system_program::System, AnchorDeserialize, AnchorSerialize, @@ -6,7 +6,8 @@ use anchor_lang::{ use super::account::CpiContextAccount; use crate::{ - invoke::{processor::CompressedProof, sol_compression::SOL_POOL_PDA_SEED}, + instruction_data::ZInstructionDataInvokeCpi, + processor::{processor::CompressedProof, sol_compression::SOL_POOL_PDA_SEED}, sdk::{ accounts::{InvokeAccounts, SignerAccounts}, compressed_account::{ @@ -23,15 +24,11 @@ pub struct InvokeCpiInstruction<'info> { #[account(mut)] pub fee_payer: Signer<'info>, pub authority: Signer<'info>, - /// CHECK: - #[account( - seeds = [&crate::ID.to_bytes()], bump, seeds::program = &account_compression::ID, - )] + /// CHECK: in account compression program pub registered_program_pda: AccountInfo<'info>, /// CHECK: checked in emit_event.rs. pub noop_program: UncheckedAccount<'info>, - /// CHECK: - #[account(seeds = [CPI_AUTHORITY_PDA_SEED], bump)] + /// CHECK: used to invoke account compression program cpi sign will fail if invalid account is provided seeds = [CPI_AUTHORITY_PDA_SEED]. pub account_compression_authority: UncheckedAccount<'info>, /// CHECK: pub account_compression_program: Program<'info, AccountCompression>, @@ -102,15 +99,19 @@ pub struct InstructionDataInvokeCpi { pub cpi_context: Option, } -impl InstructionDataInvokeCpi { - pub fn combine(&mut self, other: &[InstructionDataInvokeCpi]) { +impl<'a, 'info: 'a> ZInstructionDataInvokeCpi<'a> { + pub fn combine(&mut self, other: Vec>) { for other in other { - self.new_address_params - .extend_from_slice(&other.new_address_params); - self.input_compressed_accounts_with_merkle_context - .extend_from_slice(&other.input_compressed_accounts_with_merkle_context); - self.output_compressed_accounts - .extend_from_slice(&other.output_compressed_accounts); + // TODO: support address creation with cpi context + // self.new_address_params + // .extend_from_slice(&other.new_address_params); + for i in other.input_compressed_accounts_with_merkle_context.iter() { + self.input_compressed_accounts_with_merkle_context + .push((*i).clone()); + } + for i in other.output_compressed_accounts.iter() { + self.output_compressed_accounts.push((*i).clone()); + } } } } @@ -126,8 +127,11 @@ pub struct InstructionDataInvokeCpiWithReadOnly { mod tests { use std::vec; + use anchor_lang::AnchorSerialize; + use light_zero_copy::borsh::Deserialize; + use crate::{ - invoke::processor::CompressedProof, + instruction_data::ZInstructionDataInvokeCpi, processor::processor::CompressedProof, sdk::compressed_account::PackedCompressedAccountWithMerkleContext, InstructionDataInvokeCpi, NewAddressParamsPacked, OutputCompressedAccountWithPackedContext, }; @@ -135,7 +139,7 @@ mod tests { // test combine instruction data transfer #[test] fn test_combine_instruction_data_transfer() { - let mut instruction_data_transfer = InstructionDataInvokeCpi { + let instruction_data_transfer = InstructionDataInvokeCpi { proof: Some(CompressedProof { a: [0; 32], b: [0; 64], @@ -167,7 +171,14 @@ mod tests { new_address_params: vec![NewAddressParamsPacked::default()], cpi_context: None, }; - instruction_data_transfer.combine(&[other]); + let mut vec = Vec::new(); + instruction_data_transfer.serialize(&mut vec).unwrap(); + let mut other_vec = Vec::new(); + other.serialize(&mut other_vec).unwrap(); + let (mut instruction_data_transfer, _) = + ZInstructionDataInvokeCpi::deserialize_at(&vec).unwrap(); + let (other, _) = ZInstructionDataInvokeCpi::deserialize_at(&other_vec).unwrap(); + instruction_data_transfer.combine(vec![other]); assert_eq!(instruction_data_transfer.new_address_params.len(), 2); assert_eq!( instruction_data_transfer diff --git a/programs/system/src/invoke_cpi/process_cpi_context.rs b/programs/system/src/invoke_cpi/process_cpi_context.rs index dd579b101c..81569cea14 100644 --- a/programs/system/src/invoke_cpi/process_cpi_context.rs +++ b/programs/system/src/invoke_cpi/process_cpi_context.rs @@ -1,7 +1,13 @@ use anchor_lang::prelude::*; -use super::{account::CpiContextAccount, InstructionDataInvokeCpi}; -use crate::{errors::SystemProgramError, instruction_data::ZInstructionDataInvokeCpi}; +use super::{ + account::{deserialize_cpi_context_account, CpiContextAccount}, + InstructionDataInvokeCpi, +}; +use crate::{ + errors::SystemProgramError, instruction_data::ZInstructionDataInvokeCpi, + sdk::compressed_account::PackedCompressedAccountWithMerkleContext, +}; /// Cpi context enables the use of input compressed accounts owned by different /// programs. @@ -26,7 +32,7 @@ use crate::{errors::SystemProgramError, instruction_data::ZInstructionDataInvoke /// with verified inputs from the cpi context. The proof is verified and /// other state transition is executed with the combined inputs. pub fn process_cpi_context<'a, 'info>( - inputs: ZInstructionDataInvokeCpi<'a>, + mut inputs: ZInstructionDataInvokeCpi<'a>, cpi_context_account: &mut Option>, fee_payer: Pubkey, remaining_accounts: &[AccountInfo<'info>], @@ -38,7 +44,6 @@ pub fn process_cpi_context<'a, 'info>( } if let Some(cpi_context) = cpi_context { - unimplemented!("cpda_process_cpi_context"); let cpi_context_account = match cpi_context_account { Some(cpi_context_account) => cpi_context_account, None => return err!(SystemProgramError::CpiContextAccountUndefined), @@ -65,7 +70,7 @@ pub fn process_cpi_context<'a, 'info>( return err!(SystemProgramError::CpiContextAssociatedMerkleTreeMismatch); } if cpi_context.set_context() { - // set_cpi_context(fee_payer, cpi_context_account, inputs)?; + set_cpi_context(fee_payer, cpi_context_account, inputs)?; return Ok(None); } else { if cpi_context_account.context.is_empty() { @@ -80,7 +85,11 @@ pub fn process_cpi_context<'a, 'info>( msg!("cpi context : {:?}", cpi_context); return err!(SystemProgramError::CpiContextFeePayerMismatch); } - // inputs.combine(&cpi_context_account.context); + + let z_cpi_context_account = + deserialize_cpi_context_account(&cpi_context_account.to_account_info()) + .map_err(ProgramError::from)?; + inputs.combine(z_cpi_context_account.context); // Reset cpi context account cpi_context_account.context = Vec::new(); cpi_context_account.fee_payer = Pubkey::default(); @@ -89,44 +98,70 @@ pub fn process_cpi_context<'a, 'info>( Ok(Some(inputs)) } -// pub fn set_cpi_context( -// fee_payer: Pubkey, -// cpi_context_account: &mut CpiContextAccount, -// inputs: ZInstructionDataInvokeCpi, -// ) -> Result<()> { -// // SAFETY Assumptions: -// // - previous data in cpi_context_account -// // -> we require the account to be cleared in the beginning of a -// // transaction -// // - leaf over data: There cannot be any leftover data in the -// // account since if the transaction fails the account doesn't change. - -// // Expected usage: -// // 1. The first invocation is marked with -// // No need to store the proof (except in first invokation), -// // cpi context, compress_or_decompress_lamports, -// // relay_fee -// // 2. Subsequent invocations check the proof and fee payer -// if inputs.cpi_context.unwrap().first_set_context { -// clean_input_data(&mut inputs); -// cpi_context_account.context = vec![inputs]; -// cpi_context_account.fee_payer = fee_payer; -// } else if fee_payer == cpi_context_account.fee_payer && !cpi_context_account.context.is_empty() -// { -// clean_input_data(&mut inputs); -// cpi_context_account.context.push(inputs); -// } else { -// msg!(" {} != {}", fee_payer, cpi_context_account.fee_payer); -// return err!(SystemProgramError::CpiContextFeePayerMismatch); -// } -// Ok(()) -// } +pub fn set_cpi_context( + fee_payer: Pubkey, + cpi_context_account: &mut CpiContextAccount, + inputs: ZInstructionDataInvokeCpi, +) -> Result<()> { + // SAFETY Assumptions: + // - previous data in cpi_context_account + // -> we require the account to be cleared in the beginning of a + // transaction + // - leaf over data: There cannot be any leftover data in the + // account since if the transaction fails the account doesn't change. + + // Expected usage: + // 1. The first invocation is marked with + // No need to store the proof (except in first invokation), + // cpi context, compress_or_decompress_lamports, + // relay_fee + // 2. Subsequent invocations check the proof and fee payer + if inputs.cpi_context.unwrap().first_set_context() { + if !inputs.new_address_params.is_empty() { + unimplemented!("new addresses are not supported with cpi context"); + } + cpi_context_account.context = vec![(&inputs).into()]; + cpi_context_account.fee_payer = fee_payer; + } else if fee_payer == cpi_context_account.fee_payer && !cpi_context_account.context.is_empty() + { + if !inputs.new_address_params.is_empty() { + unimplemented!("new addresses are not supported with cpi context"); + } + cpi_context_account.context.push((&inputs).into()); + } else { + msg!(" {} != {}", fee_payer, cpi_context_account.fee_payer); + return err!(SystemProgramError::CpiContextFeePayerMismatch); + } + Ok(()) +} -fn clean_input_data(inputs: &mut InstructionDataInvokeCpi) { - inputs.cpi_context = None; - inputs.compress_or_decompress_lamports = None; - inputs.relay_fee = None; - inputs.proof = None; +/// fn clean_input_data(inputs: &mut InstructionDataInvokeCpi) { +/// inputs.cpi_context = None; +/// inputs.compress_or_decompress_lamports = None; +/// inputs.relay_fee = None; +/// inputs.proof = None; +/// } +impl From<&ZInstructionDataInvokeCpi<'_>> for InstructionDataInvokeCpi { + fn from(data: &ZInstructionDataInvokeCpi<'_>) -> Self { + Self { + proof: None, + new_address_params: vec![], + input_compressed_accounts_with_merkle_context: data + .input_compressed_accounts_with_merkle_context + .iter() + .map(|x| PackedCompressedAccountWithMerkleContext::from(x)) + .collect::>(), + output_compressed_accounts: data + .output_compressed_accounts + .iter() + .map(|x| crate::OutputCompressedAccountWithPackedContext::from(x)) + .collect::>(), + relay_fee: None, + compress_or_decompress_lamports: None, + is_compress: data.is_compress, + cpi_context: None, + } + } } // /// Set cpi context tests: diff --git a/programs/system/src/invoke_cpi/processor.rs b/programs/system/src/invoke_cpi/processor.rs index 4c4858d7e0..97d0faec9a 100644 --- a/programs/system/src/invoke_cpi/processor.rs +++ b/programs/system/src/invoke_cpi/processor.rs @@ -1,4 +1,5 @@ pub use anchor_lang::prelude::*; +#[cfg(feature = "bench-sbf")] use light_heap::{bench_sbf_end, bench_sbf_start}; use light_zero_copy::slice::ZeroCopySliceBorsh; @@ -7,8 +8,8 @@ use crate::{ instruction_data::{ ZInstructionDataInvokeCpi, ZPackedReadOnlyAddress, ZPackedReadOnlyCompressedAccount, }, - invoke::processor::process, invoke_cpi::instruction::InvokeCpiInstruction, + processor::processor::process, sdk::accounts::SignerAccounts, }; @@ -23,6 +24,7 @@ pub fn process_invoke_cpi<'a, 'b, 'c: 'info + 'b, 'info>( read_only_addresses: Option>, read_only_accounts: Option>, ) -> Result<()> { + #[cfg(feature = "bench-sbf")] bench_sbf_start!("cpda_cpi_signer_checks"); cpi_signer_checks( &ctx.accounts.invoking_program.key(), @@ -30,7 +32,9 @@ pub fn process_invoke_cpi<'a, 'b, 'c: 'info + 'b, 'info>( &inputs.input_compressed_accounts_with_merkle_context, &inputs.output_compressed_accounts, )?; + #[cfg(feature = "bench-sbf")] bench_sbf_end!("cpda_cpi_signer_checks"); + #[cfg(feature = "bench-sbf")] bench_sbf_start!("cpda_process_cpi_context"); #[allow(unused)] let mut cpi_context_inputs_len = if let Some(value) = ctx.accounts.cpi_context_account.as_ref() @@ -49,6 +53,7 @@ pub fn process_invoke_cpi<'a, 'b, 'c: 'info + 'b, 'info>( Ok(None) => return Ok(()), Err(err) => return Err(err), }; + #[cfg(feature = "bench-sbf")] bench_sbf_end!("cpda_process_cpi_context"); process( diff --git a/programs/system/src/invoke_cpi/verify_signer.rs b/programs/system/src/invoke_cpi/verify_signer.rs index 8c4b90de50..4954ff5e41 100644 --- a/programs/system/src/invoke_cpi/verify_signer.rs +++ b/programs/system/src/invoke_cpi/verify_signer.rs @@ -1,17 +1,8 @@ -use std::mem; +use account_compression::utils::constants::CPI_AUTHORITY_PDA_SEED; +use anchor_lang::prelude::*; -use account_compression::{ - errors::AccountCompressionErrorCode, utils::constants::CPI_AUTHORITY_PDA_SEED, - AddressMerkleTreeAccount, StateMerkleTreeAccount, -}; -use anchor_lang::{prelude::*, Discriminator}; -use light_batched_merkle_tree::{ - merkle_tree::BatchedMerkleTreeAccount, queue::BatchedQueueAccount, -}; -use light_concurrent_merkle_tree::zero_copy::ConcurrentMerkleTreeZeroCopy; -use light_hasher::{Discriminator as LightDiscriminator, Poseidon}; +#[cfg(feature = "bench-sbf")] use light_heap::{bench_sbf_end, bench_sbf_start}; -use light_macros::heap_neutral; use crate::{ errors::SystemProgramError, @@ -32,24 +23,30 @@ pub fn cpi_signer_checks( input_compressed_accounts_with_merkle_context: &[ZPackedCompressedAccountWithMerkleContext], output_compressed_accounts: &[ZOutputCompressedAccountWithPackedContext], ) -> Result<()> { + #[cfg(feature = "bench-sbf")] bench_sbf_start!("cpda_cpi_signer_checks"); cpi_signer_check(invoking_programid, authority)?; + #[cfg(feature = "bench-sbf")] bench_sbf_end!("cpda_cpi_signer_checks"); + #[cfg(feature = "bench-sbf")] bench_sbf_start!("cpd_input_checks"); input_compressed_accounts_signer_check( input_compressed_accounts_with_merkle_context, invoking_programid, )?; + #[cfg(feature = "bench-sbf")] bench_sbf_end!("cpd_input_checks"); + #[cfg(feature = "bench-sbf")] bench_sbf_start!("cpda_cpi_write_checks"); output_compressed_accounts_write_access_check(output_compressed_accounts, invoking_programid)?; + #[cfg(feature = "bench-sbf")] bench_sbf_end!("cpda_cpi_write_checks"); Ok(()) } /// Cpi signer check, validates that the provided invoking program /// is the actual invoking program. -#[heap_neutral] + pub fn cpi_signer_check(invoking_program: &Pubkey, authority: &Pubkey) -> Result<()> { let seeds = [CPI_AUTHORITY_PDA_SEED]; let derived_signer = Pubkey::try_find_program_address(&seeds, invoking_program) @@ -123,179 +120,6 @@ pub fn output_compressed_accounts_write_access_check( Ok(()) } -pub fn check_program_owner_state_merkle_tree<'a, 'info, const IS_NULLIFY: bool>( - merkle_tree_acc_info: &'info AccountInfo<'info>, - invoking_program: &Option, -) -> Result<(u32, Option, u64, Pubkey, u64)> { - let (seq, next_index, network_fee, program_owner, merkle_tree_pubkey, rollover_fee) = { - let mut discriminator_bytes = [0u8; 8]; - discriminator_bytes.copy_from_slice(&merkle_tree_acc_info.try_borrow_data()?[0..8]); - match discriminator_bytes { - StateMerkleTreeAccount::DISCRIMINATOR => { - let (seq, next_index) = { - let merkle_tree = merkle_tree_acc_info.try_borrow_mut_data()?; - let merkle_tree = - ConcurrentMerkleTreeZeroCopy::::from_bytes_zero_copy( - &merkle_tree[8 + mem::size_of::()..], - ) - .map_err(ProgramError::from)?; - - let seq = merkle_tree.sequence_number() as u64 + 1; - let next_index: u32 = merkle_tree.next_index().try_into().unwrap(); - (seq, next_index) - }; - let merkle_tree = - AccountLoader::::try_from(merkle_tree_acc_info) - .unwrap(); - let merkle_tree_unpacked = merkle_tree.load()?; - ( - seq, - next_index, - merkle_tree_unpacked.metadata.rollover_metadata.network_fee, - merkle_tree_unpacked.metadata.access_metadata.program_owner, - merkle_tree_acc_info.key(), - merkle_tree_unpacked.metadata.rollover_metadata.rollover_fee, - ) - } - BatchedMerkleTreeAccount::DISCRIMINATOR => { - if !IS_NULLIFY { - return err!( - AccountCompressionErrorCode::StateMerkleTreeAccountDiscriminatorMismatch - ); - } - let merkle_tree = - BatchedMerkleTreeAccount::state_from_account_info(merkle_tree_acc_info) - .map_err(ProgramError::from)?; - let seq = merkle_tree.sequence_number + 1; - let next_index: u32 = merkle_tree.next_index.try_into().unwrap(); - - ( - seq, - next_index, - merkle_tree.metadata.rollover_metadata.network_fee, - merkle_tree.metadata.access_metadata.program_owner, - merkle_tree_acc_info.key(), - merkle_tree.metadata.rollover_metadata.rollover_fee, - ) - } - BatchedQueueAccount::DISCRIMINATOR => { - if IS_NULLIFY { - return err!( - AccountCompressionErrorCode::StateMerkleTreeAccountDiscriminatorMismatch - ); - } - let merkle_tree = - BatchedQueueAccount::output_from_account_info(merkle_tree_acc_info) - .map_err(ProgramError::from)?; - let seq = u64::MAX; - let next_index: u32 = merkle_tree.batch_metadata.next_index.try_into().unwrap(); - ( - seq, - next_index, - merkle_tree.metadata.rollover_metadata.network_fee, - merkle_tree.metadata.access_metadata.program_owner, - merkle_tree.metadata.associated_merkle_tree.into(), - merkle_tree.metadata.rollover_metadata.rollover_fee, - ) - } - _ => { - return err!( - AccountCompressionErrorCode::StateMerkleTreeAccountDiscriminatorMismatch - ); - } - } - }; - - let network_fee = if network_fee != 0 { - Some(network_fee) - } else { - None - }; - if program_owner != Pubkey::default().into() { - if let Some(invoking_program) = invoking_program { - if *invoking_program == program_owner.into() { - return Ok(( - next_index, - network_fee, - seq, - merkle_tree_pubkey, - rollover_fee, - )); - } - } - msg!( - "invoking_program.key() {:?} == merkle_tree_unpacked.program_owner {:?}", - invoking_program, - program_owner - ); - return err!(SystemProgramError::InvalidMerkleTreeOwner); - } - Ok(( - next_index, - network_fee, - seq, - merkle_tree_pubkey, - rollover_fee, - )) -} - -// TODO: extend to match batched trees -pub fn check_program_owner_address_merkle_tree<'a, 'b: 'a>( - merkle_tree_acc_info: &'b AccountInfo<'a>, - invoking_program: &Option, -) -> Result<(Option, u64)> { - let discriminator_bytes = merkle_tree_acc_info.try_borrow_data()?[0..8] - .try_into() - .unwrap(); - - let metadata = match discriminator_bytes { - AddressMerkleTreeAccount::DISCRIMINATOR => { - let merkle_tree = - AccountLoader::::try_from(merkle_tree_acc_info).unwrap(); - let merkle_tree_unpacked = merkle_tree.load()?; - merkle_tree_unpacked.metadata - } - BatchedMerkleTreeAccount::DISCRIMINATOR => { - let merkle_tree = - BatchedMerkleTreeAccount::address_from_account_info(merkle_tree_acc_info) - .map_err(ProgramError::from)?; - merkle_tree.metadata - } - _ => { - return err!( - AccountCompressionErrorCode::AddressMerkleTreeAccountDiscriminatorMismatch - ); - } - }; - - let network_fee = if metadata.rollover_metadata.network_fee != 0 { - Some(metadata.rollover_metadata.network_fee) - } else { - None - }; - - if metadata.access_metadata.program_owner != Pubkey::default().into() { - if let Some(invoking_program) = invoking_program { - if *invoking_program == metadata.access_metadata.program_owner.into() { - msg!( - "invoking_program.key() {:?} == merkle_tree_unpacked.program_owner {:?}", - invoking_program, - metadata.access_metadata.program_owner - ); - return Ok((network_fee, metadata.rollover_metadata.rollover_fee)); - } - } - msg!( - "invoking_program.key() {:?} == merkle_tree_unpacked.program_owner {:?}", - invoking_program, - metadata.access_metadata.program_owner - ); - err!(SystemProgramError::InvalidMerkleTreeOwner) - } else { - Ok((network_fee, metadata.rollover_metadata.rollover_fee)) - } -} - #[cfg(test)] mod test { use super::*; diff --git a/programs/system/src/lib.rs b/programs/system/src/lib.rs index 18246910d0..743473f64c 100644 --- a/programs/system/src/lib.rs +++ b/programs/system/src/lib.rs @@ -1,15 +1,20 @@ use anchor_lang::{prelude::*, solana_program::pubkey::Pubkey}; use light_hasher::Discriminator as LightDiscriminator; +mod check_accounts; pub mod instruction_data; -pub mod invoke; -pub use invoke::instruction::*; pub mod invoke_cpi; +pub mod processor; +pub use invoke::instruction::*; pub use invoke_cpi::{initialize::*, instruction::*}; +pub mod compressed_account; pub mod constants; +pub mod context; pub mod errors; +pub mod invoke; pub mod sdk; pub mod utils; + use errors::SystemProgramError; use light_zero_copy::borsh::Deserialize; use sdk::event::PublicTransactionEvent; @@ -32,16 +37,18 @@ pub mod light_system_program { use account_compression::{errors::AccountCompressionErrorCode, StateMerkleTreeAccount}; use anchor_lang::solana_program::log::sol_log_compute_units; use light_batched_merkle_tree::merkle_tree::BatchedMerkleTreeAccount; + #[cfg(feature = "bench-sbf")] use light_heap::{bench_sbf_end, bench_sbf_start}; - use crate::instruction_data::{ - ZInstructionDataInvoke, ZInstructionDataInvokeCpi, ZInstructionDataInvokeCpiWithReadOnly, + use crate::{ + instruction_data::{ + ZInstructionDataInvoke, ZInstructionDataInvokeCpi, + ZInstructionDataInvokeCpiWithReadOnly, + }, + invoke::verify_signer::input_compressed_accounts_signer_check, }; - use self::{ - invoke::{processor::process, verify_signer::input_compressed_accounts_signer_check}, - invoke_cpi::processor::process_invoke_cpi, - }; + use self::{invoke_cpi::processor::process_invoke_cpi, processor::processor::process}; use super::*; pub fn init_cpi_context_account(ctx: Context) -> Result<()> { @@ -67,37 +74,52 @@ pub mod light_system_program { ctx: Context<'a, 'b, 'c, 'info, InvokeInstruction<'info>>, inputs: Vec, ) -> Result<()> { + sol_log_compute_units(); + + #[cfg(feature = "bench-sbf")] bench_sbf_start!("invoke_deserialize"); msg!("Invoke instruction"); - sol_log_compute_units(); let (inputs, _) = ZInstructionDataInvoke::deserialize_at(inputs.as_slice()).unwrap(); sol_log_compute_units(); + #[cfg(feature = "bench-sbf")] bench_sbf_end!("invoke_deserialize"); input_compressed_accounts_signer_check( &inputs.input_compressed_accounts_with_merkle_context, &ctx.accounts.authority.key(), )?; - process(inputs, None, ctx, 0, None, None) + process(inputs, None, ctx, 0, None, None)?; + sol_log_compute_units(); + Ok(()) } pub fn invoke_cpi<'a, 'b, 'c: 'info, 'info>( ctx: Context<'a, 'b, 'c, 'info, InvokeCpiInstruction<'info>>, inputs: Vec, ) -> Result<()> { + sol_log_compute_units(); + #[cfg(feature = "bench-sbf")] bench_sbf_start!("cpda_deserialize"); let (inputs, _) = ZInstructionDataInvokeCpi::deserialize_at(inputs.as_slice()).unwrap(); + #[cfg(feature = "bench-sbf")] bench_sbf_end!("cpda_deserialize"); - process_invoke_cpi(ctx, inputs, None, None) + process_invoke_cpi(ctx, inputs, None, None)?; + sol_log_compute_units(); + // 22,903 bytes heap with 33 outputs + #[cfg(feature = "bench-sbf")] + light_heap::bench_sbf_end!("total_usage"); + Ok(()) } pub fn invoke_cpi_with_read_only<'a, 'b, 'c: 'info, 'info>( ctx: Context<'a, 'b, 'c, 'info, InvokeCpiInstruction<'info>>, inputs: Vec, ) -> Result<()> { + #[cfg(feature = "bench-sbf")] bench_sbf_start!("cpda_deserialize"); let (inputs, _) = ZInstructionDataInvokeCpiWithReadOnly::deserialize_at(inputs.as_slice()).unwrap(); + #[cfg(feature = "bench-sbf")] bench_sbf_end!("cpda_deserialize"); // disable set cpi context because cpi context account uses InvokeCpiInstruction if let Some(cpi_context) = inputs.invoke_cpi.cpi_context { diff --git a/programs/system/src/processor/cpi.rs b/programs/system/src/processor/cpi.rs new file mode 100644 index 0000000000..cfd4dba19d --- /dev/null +++ b/programs/system/src/processor/cpi.rs @@ -0,0 +1,95 @@ +use account_compression::{ + insert_into_queues::AppendNullifyCreateAddressInputs, utils::constants::CPI_AUTHORITY_PDA_SEED, +}; + +use anchor_lang::{ + prelude::Pubkey, + prelude::{AccountMeta, Context}, + Bumps, InstructionData, Key, Result, ToAccountInfo, +}; + +use crate::{ + constants::CPI_AUTHORITY_PDA_BUMP, + context::SystemContext, + sdk::accounts::{InvokeAccounts, SignerAccounts}, +}; + +pub fn create_cpi_data_and_context< + 'a, + 'b, + 'c: 'info, + 'info, + A: InvokeAccounts<'info> + SignerAccounts<'info> + Bumps, +>( + ctx: &'a Context<'a, 'b, 'c, 'info, A>, + num_leaves: u8, + num_nullifiers: u8, + num_new_addresses: u8, + hashed_pubkeys_capacity: usize, + invoking_program_id: Option, +) -> Result<(SystemContext<'info>, Vec)> { + let account_infos = vec![ + ctx.accounts.get_fee_payer().to_account_info(), + ctx.accounts + .get_account_compression_authority() + .to_account_info(), + ctx.accounts.get_registered_program_pda().to_account_info(), + ]; + let accounts = vec![ + AccountMeta { + pubkey: account_infos[0].key(), + is_signer: true, + is_writable: true, + }, + AccountMeta::new_readonly(account_infos[1].key(), true), + AccountMeta::new_readonly(account_infos[2].key(), false), + ]; + let account_indices = + Vec::::with_capacity((num_nullifiers + num_leaves + num_new_addresses) as usize); + let bytes_size = AppendNullifyCreateAddressInputs::required_size_for_capacity( + num_leaves, + num_nullifiers, + num_new_addresses, + ); + let bytes = vec![0u8; bytes_size]; + Ok(( + SystemContext { + account_indices, + accounts, + account_infos, + hashed_pubkeys: Vec::with_capacity(hashed_pubkeys_capacity), + // TODO: init with capacity. + addresses: Vec::new(), + rollover_fee_payments: Vec::new(), + address_fee_is_set: false, + network_fee_is_set: false, + legacy_merkle_context: Vec::new(), + invoking_program_id, + }, + bytes, + )) +} + +pub fn cpi_account_compression_program(cpi_context: SystemContext, bytes: Vec) -> Result<()> { + let SystemContext { + accounts, + account_infos, + .. + } = cpi_context; + let instruction_data = account_compression::instruction::NullifyAppendCreateAddress { bytes }; + + let data = instruction_data.data(); + let bump = &[CPI_AUTHORITY_PDA_BUMP]; + let seeds = &[&[CPI_AUTHORITY_PDA_SEED, bump][..]]; + let instruction = anchor_lang::solana_program::instruction::Instruction { + program_id: account_compression::ID, + accounts, + data, + }; + anchor_lang::solana_program::program::invoke_signed( + &instruction, + account_infos.as_slice(), + seeds, + )?; + Ok(()) +} diff --git a/programs/system/src/processor/create_address_cpi_data.rs b/programs/system/src/processor/create_address_cpi_data.rs new file mode 100644 index 0000000000..709bfe2389 --- /dev/null +++ b/programs/system/src/processor/create_address_cpi_data.rs @@ -0,0 +1,93 @@ +use crate::{ + context::SystemContext, + errors::SystemProgramError, + instruction_data::ZNewAddressParamsPacked, + sdk::address::{derive_address, derive_address_legacy}, +}; +use account_compression::{ + context::AcpAccount, errors::AccountCompressionErrorCode, + insert_into_queues::AppendNullifyCreateAddressInputs, +}; +use anchor_lang::prelude::*; + +pub fn derive_new_addresses<'a, 'info>( + new_address_params: &[ZNewAddressParamsPacked], + num_input_compressed_accounts: usize, + remaining_accounts: &'info [AccountInfo<'info>], + context: &mut SystemContext<'info>, + cpi_ix_data: &mut AppendNullifyCreateAddressInputs<'_>, + accounts: &[AcpAccount<'a, 'info>], +) -> Result<()> { + let init_len = context.account_indices.len(); + let invoking_program_id_bytes = context + .invoking_program_id + .as_ref() + .map(|invoking_program_id| invoking_program_id.to_bytes()); + new_address_params + .iter() + .enumerate() + .try_for_each(|(i, new_address_params)| { + let (address, rollover_fee) = match &accounts + [new_address_params.address_merkle_tree_account_index as usize] + { + AcpAccount::AddressTree((pubkey, _)) => { + cpi_ix_data.addresses[i].queue_index = context.get_index_or_insert( + new_address_params.address_queue_account_index, + &remaining_accounts, + ); + cpi_ix_data.addresses[i].tree_index = context.get_index_or_insert( + new_address_params.address_merkle_tree_account_index, + &remaining_accounts, + ); + ( + derive_address_legacy(&pubkey, &new_address_params.seed) + .map_err(ProgramError::from)?, + context + .legacy_merkle_context + .iter() + .find(|x| x.0 == new_address_params.address_merkle_tree_account_index) + .unwrap() + .1 + .rollover_fee, + ) + } + AcpAccount::BatchedAddressTree(tree) => { + let invoking_program_id_bytes = + if let Some(bytes) = invoking_program_id_bytes.as_ref() { + Ok(bytes) + } else { + err!(SystemProgramError::DeriveAddressError) + }?; + cpi_ix_data.addresses[i].tree_index = context.get_index_or_insert( + new_address_params.address_merkle_tree_account_index, + &remaining_accounts, + ); + + ( + derive_address( + &new_address_params.seed, + &tree.pubkey().to_bytes(), + invoking_program_id_bytes, + ), + tree.metadata.rollover_metadata.network_fee, + ) + } + _ => { + return err!( + AccountCompressionErrorCode::AddressMerkleTreeAccountDiscriminatorMismatch + ) + } + }; + + // We are inserting addresses into two vectors to avoid unwrapping + // the option in following functions. + context.addresses[i + num_input_compressed_accounts] = Some(address); + cpi_ix_data.addresses[i].address = address; + + context.set_rollover_fee(new_address_params.address_queue_account_index, rollover_fee); + Ok(()) + })?; + cpi_ix_data.num_address_appends = (context.account_indices.len() - init_len) as u8; + + Ok(()) +} diff --git a/programs/system/src/processor/create_inputs_cpi_data.rs b/programs/system/src/processor/create_inputs_cpi_data.rs new file mode 100644 index 0000000000..4e91bc4ca9 --- /dev/null +++ b/programs/system/src/processor/create_inputs_cpi_data.rs @@ -0,0 +1,139 @@ +use account_compression::{ + context::AcpAccount, insert_into_queues::AppendNullifyCreateAddressInputs, + processor::insert_nullifiers::InsertNullifierInput, +}; +use anchor_lang::prelude::*; + +use light_hasher::{Hasher, Poseidon}; +use light_utils::hash_to_bn254_field_size_be; + +use crate::{ + context::SystemContext, errors::SystemProgramError, + instruction_data::ZPackedCompressedAccountWithMerkleContext, +}; + +/// Hashes the input compressed accounts and stores the results in the leaves array. +/// Merkle tree pubkeys are hashed and stored in the hashed_pubkeys array. +/// Merkle tree pubkeys should be ordered for efficiency. +#[inline(always)] +pub fn create_inputs_cpi_data<'a, 'b, 'c: 'info, 'info>( + remaining_accounts: &'info [AccountInfo<'info>], + input_compressed_accounts_with_merkle_context: &'a [ZPackedCompressedAccountWithMerkleContext<'a>], + context: &mut SystemContext<'info>, + cpi_ix_data: &mut AppendNullifyCreateAddressInputs<'a>, + accounts: &[AcpAccount<'a, 'info>], +) -> Result<[u8; 32]> { + if input_compressed_accounts_with_merkle_context.is_empty() { + return Ok([0u8; 32]); + } + let mut owner_pubkey = input_compressed_accounts_with_merkle_context[0] + .compressed_account + .owner; + let mut hashed_owner = hash_to_bn254_field_size_be(&owner_pubkey.to_bytes()) + .unwrap() + .0; + context + .hashed_pubkeys + .push((owner_pubkey.into(), hashed_owner)); + let mut current_hashed_mt = [0u8; 32]; + let mut hash_chain = [0u8; 32]; + + let mut current_mt_index: i16 = -1; + for (j, input_compressed_account_with_context) in input_compressed_accounts_with_merkle_context + .iter() + .enumerate() + { + // For heap neutrality we cannot allocate new heap memory in this function. + if let Some(address) = &input_compressed_account_with_context + .compressed_account + .address + { + context.addresses[j] = Some(**address); + } + + #[allow(clippy::comparison_chain)] + if current_mt_index + != input_compressed_account_with_context + .merkle_context + .merkle_tree_pubkey_index as i16 + { + current_mt_index = input_compressed_account_with_context + .merkle_context + .merkle_tree_pubkey_index as i16; + current_hashed_mt = match &accounts[current_mt_index as usize] { + AcpAccount::OutputQueue(queue) => queue.hashed_merkle_tree_pubkey, + AcpAccount::StateTree(_) => { + context.legacy_merkle_context[current_mt_index as usize] + .1 + .hashed_pubkey + } + _ => { + return Err(SystemProgramError::InvalidAccount.into()); + } + }; + } + // Without cpi context all input compressed accounts have the same owner. + // With cpi context the owners will be different. + if owner_pubkey + != input_compressed_account_with_context + .compressed_account + .owner + { + owner_pubkey = input_compressed_account_with_context + .compressed_account + .owner; + hashed_owner = context.get_or_hash_pubkey(owner_pubkey.into()); + } + let queue_index = context.get_index_or_insert( + input_compressed_account_with_context + .merkle_context + .nullifier_queue_pubkey_index, + remaining_accounts, + ); + let tree_index = context.get_index_or_insert( + input_compressed_account_with_context + .merkle_context + .merkle_tree_pubkey_index, + remaining_accounts, + ); + cpi_ix_data.nullifiers[j] = InsertNullifierInput { + account_hash: input_compressed_account_with_context + .compressed_account + .hash_with_hashed_values::( + &hashed_owner, + ¤t_hashed_mt, + &input_compressed_account_with_context + .merkle_context + .leaf_index + .into(), + )?, + leaf_index: input_compressed_account_with_context + .merkle_context + .leaf_index + .into(), + prove_by_index: input_compressed_account_with_context + .merkle_context + .prove_by_index() as u8, + queue_index, + tree_index, + }; + if j == 0 { + hash_chain = cpi_ix_data.nullifiers[j].account_hash; + } else { + hash_chain = Poseidon::hashv(&[&hash_chain, &cpi_ix_data.nullifiers[j].account_hash]) + .map_err(ProgramError::from)?; + } + } + cpi_ix_data.num_queues = input_compressed_accounts_with_merkle_context + .iter() + .enumerate() + .filter(|(i, x)| { + let candidate = x.merkle_context.nullifier_queue_pubkey_index; + !input_compressed_accounts_with_merkle_context[..*i] + .iter() + .any(|y| y.merkle_context.nullifier_queue_pubkey_index == candidate) + }) + .count() as u8; + + Ok(hash_chain) +} diff --git a/programs/system/src/processor/create_outputs_cpi_data.rs b/programs/system/src/processor/create_outputs_cpi_data.rs new file mode 100644 index 0000000000..3b019d88b4 --- /dev/null +++ b/programs/system/src/processor/create_outputs_cpi_data.rs @@ -0,0 +1,170 @@ +use crate::{ + context::SystemContext, errors::SystemProgramError, + instruction_data::ZOutputCompressedAccountWithPackedContext, + sdk::event::MerkleTreeSequenceNumber, +}; +use account_compression::{ + context::AcpAccount, insert_into_queues::AppendNullifyCreateAddressInputs, +}; +use anchor_lang::prelude::*; +use light_hasher::{Hasher, Poseidon}; +use light_utils::hash_to_bn254_field_size_be; + +/// Creates CPI accounts, instruction data, and performs checks. +/// - Merkle tree indices must be in order. +/// - Hashes output accounts for insertion and event. +/// - Collects sequence numbers for event. +/// +/// Checks: +/// 1. Checks whether a Merkle tree is program owned, if so checks write +/// eligibility. +/// 2. Checks ordering of Merkle tree indices. +/// 3. Checks that addresses in output compressed accounts have been created or +/// exist in input compressed accounts. An address may not be used in an +/// output compressed accounts. This will close the account. +#[allow(clippy::too_many_arguments)] +#[allow(clippy::type_complexity)] +pub fn create_outputs_cpi_data<'a, 'info>( + output_compressed_accounts: &[ZOutputCompressedAccountWithPackedContext<'a>], + output_compressed_account_indices: &mut [u32], + sequence_numbers: &mut Vec, + remaining_accounts: &'info [AccountInfo<'info>], + context: &mut SystemContext<'info>, + cpi_ix_data: &mut AppendNullifyCreateAddressInputs<'a>, + accounts: &[AcpAccount<'a, 'info>], +) -> Result<[u8; 32]> { + if output_compressed_accounts.is_empty() { + return Ok([0u8; 32]); + } + let mut current_index: i16 = -1; + let mut num_leaves_in_tree: u32 = 0; + let mut mt_next_index: u32 = 0; + let mut hashed_merkle_tree = [0u8; 32]; + let mut index_merkle_tree_account = 0; + let number_of_merkle_trees = + output_compressed_accounts.last().unwrap().merkle_tree_index as usize + 1; + let mut merkle_tree_pubkeys = + Vec::::with_capacity(number_of_merkle_trees); + let mut hash_chain = [0u8; 32]; + let mut rollover_fee = 0; + + for (j, account) in output_compressed_accounts.iter().enumerate() { + // if mt index == current index Merkle tree account info has already been added. + // if mt index != current index, Merkle tree account info is new, add it. + #[allow(clippy::comparison_chain)] + if account.merkle_tree_index as i16 == current_index { + // Do nothing, but it is the most common case. + } else if account.merkle_tree_index as i16 > current_index { + current_index = account.merkle_tree_index.into(); + + let pubkey = match &accounts[current_index as usize] { + AcpAccount::OutputQueue(output_queue) => { + hashed_merkle_tree = output_queue.hashed_merkle_tree_pubkey; + rollover_fee = output_queue.metadata.rollover_metadata.rollover_fee; + mt_next_index = output_queue.batch_metadata.next_index as u32; + *output_queue.pubkey() + } + AcpAccount::StateTree((pubkey, tree)) => { + sequence_numbers.push(MerkleTreeSequenceNumber { + pubkey: *pubkey, + seq: tree.sequence_number() as u64, + }); + hashed_merkle_tree = context.legacy_merkle_context[current_index as usize] + .1 + .hashed_pubkey; + rollover_fee = context.legacy_merkle_context[current_index as usize] + .1 + .rollover_fee; + mt_next_index = tree.next_index() as u32; + (*pubkey).into() + } + _ => { + return err!(SystemProgramError::InvalidAccount); + } + }; + // check Merkle tree uniqueness + if merkle_tree_pubkeys.contains(&pubkey) { + return err!(SystemProgramError::OutputMerkleTreeNotUnique); + } else { + merkle_tree_pubkeys.push(pubkey); + } + + context.get_index_or_insert(account.merkle_tree_index, &remaining_accounts); + num_leaves_in_tree = 0; + index_merkle_tree_account += 1; + } else { + // Check 2. + // Output Merkle tree indices must be in order since we use the + // number of leaves in a Merkle tree to determine the correct leaf + // index. Since the leaf index is part of the hash this is security + // critical. + return err!(SystemProgramError::OutputMerkleTreeIndicesNotInOrder); + } + + // Check 3. + if let Some(address) = account.compressed_account.address { + if let Some(position) = context + .addresses + .iter() + .filter(|x| x.is_some()) + .position(|&x| x.unwrap() == *address) + { + context.addresses.remove(position); + } else { + msg!("Address {:?}, is no new address and does not exist in input compressed accounts.", address); + msg!( + "Remaining compressed_account_addresses: {:?}", + context.addresses + ); + return Err(SystemProgramError::InvalidAddress.into()); + } + } + + output_compressed_account_indices[j] = mt_next_index + num_leaves_in_tree; + num_leaves_in_tree += 1; + if account.compressed_account.data.is_some() && context.invoking_program_id.is_none() { + msg!("Invoking program is not provided."); + msg!("Only program owned compressed accounts can have data."); + return err!(SystemProgramError::InvokingProgramNotProvided); + } + let hashed_owner = match context + .hashed_pubkeys + .iter() + .find(|x| x.0 == account.compressed_account.owner.into()) + { + Some(hashed_owner) => hashed_owner.1, + None => { + let hashed_owner = + hash_to_bn254_field_size_be(&account.compressed_account.owner.to_bytes()) + .unwrap() + .0; + context + .hashed_pubkeys + .push((account.compressed_account.owner.into(), hashed_owner)); + hashed_owner + } + }; + // Compute output compressed account hash. + cpi_ix_data.leaves[j].leaf = account + .compressed_account + .hash_with_hashed_values::( + &hashed_owner, + &hashed_merkle_tree, + &output_compressed_account_indices[j], + )?; + cpi_ix_data.leaves[j].index = index_merkle_tree_account - 1; + + if !cpi_ix_data.nullifiers.is_empty() { + if j == 0 { + hash_chain = cpi_ix_data.leaves[j].leaf; + } else { + hash_chain = Poseidon::hashv(&[&hash_chain, &cpi_ix_data.leaves[j].leaf]) + .map_err(ProgramError::from)?; + } + } + context.set_rollover_fee(index_merkle_tree_account - 1, rollover_fee); + } + + cpi_ix_data.num_unique_appends = context.account_indices.len() as u8; + Ok(hash_chain) +} diff --git a/programs/system/src/processor/mod.rs b/programs/system/src/processor/mod.rs new file mode 100644 index 0000000000..9f81434dc3 --- /dev/null +++ b/programs/system/src/processor/mod.rs @@ -0,0 +1,10 @@ +pub mod cpi; +pub mod create_address_cpi_data; +pub mod create_inputs_cpi_data; +pub mod create_outputs_cpi_data; +pub mod processor; +pub mod read_only_account; +pub mod read_only_address; +pub mod sol_compression; +pub mod sum_check; +pub mod verify_proof; diff --git a/programs/system/src/invoke/processor.rs b/programs/system/src/processor/processor.rs similarity index 50% rename from programs/system/src/invoke/processor.rs rename to programs/system/src/processor/processor.rs index 17f8499451..b9113c055b 100644 --- a/programs/system/src/invoke/processor.rs +++ b/programs/system/src/processor/processor.rs @@ -1,37 +1,35 @@ -use account_compression::{ - append_nullify_create_address::{AppendNullifyCreateAddressInputs, InsertNullifierInput}, - utils::transfer_lamports::transfer_lamports_cpi, -}; -use anchor_lang::{prelude::*, Bumps}; -use light_heap::{bench_sbf_end, bench_sbf_start}; -use light_utils::hashchain::create_tx_hash_from_hash_chains; -use light_verifier::CompressedProof as CompressedVerifierProof; -use light_zero_copy::{slice::ZeroCopySliceBorsh, slice_mut::ZeroCopySliceMut}; - -use super::PackedReadOnlyAddress; use crate::{ + check_accounts::try_from_account_infos, constants::CPI_AUTHORITY_PDA_BUMP, errors::SystemProgramError, instruction_data::{ ZInstructionDataInvoke, ZPackedReadOnlyAddress, ZPackedReadOnlyCompressedAccount, }, - invoke::{ - address::derive_new_addresses, - append_state::create_cpi_accounts_and_instruction_data, - cpi_acp::{cpi_account_compression_program, create_cpi_data}, - emit_event::emit_state_transition_event, + processor::{ + cpi::{cpi_account_compression_program, create_cpi_data_and_context}, + create_address_cpi_data::derive_new_addresses, + create_inputs_cpi_data::create_inputs_cpi_data, + create_outputs_cpi_data::create_outputs_cpi_data, + read_only_account::verify_read_only_account_inclusion_by_index, + read_only_address::verify_read_only_address_queue_non_inclusion, sol_compression::compress_or_decompress_lamports, sum_check::sum_check, - verify_proof::{ - hash_input_compressed_accounts, read_address_roots, read_input_state_roots, - verify_proof, verify_read_only_account_inclusion_by_index, - verify_read_only_address_queue_non_inclusion, - }, + verify_proof::{read_address_roots, read_input_state_roots, verify_proof}, }, sdk::accounts::{InvokeAccounts, SignerAccounts}, }; +use account_compression::{ + insert_into_queues::AppendNullifyCreateAddressInputs, + processor::insert_nullifiers::InsertNullifierInput, +}; +use anchor_lang::{prelude::*, Bumps}; +#[cfg(feature = "bench-sbf")] +use light_heap::{bench_sbf_end, bench_sbf_start}; +use light_utils::hashchain::create_tx_hash_from_hash_chains; +use light_verifier::CompressedProof as CompressedVerifierProof; +use light_zero_copy::{slice::ZeroCopySliceBorsh, slice_mut::ZeroCopySliceMut}; -// TODO: remove once upgraded to anchor 0.30.0 (right now it's required for idl generation) +// TODO: remove once anchor is removed #[derive(Debug, Clone, PartialEq, Eq, AnchorSerialize, AnchorDeserialize)] pub struct CompressedProof { pub a: [u8; 32], @@ -64,13 +62,13 @@ impl Default for CompressedProof { /// 1.4.2. Validate Tree is writable by signer /// 1.4.3. Check that only existing addresses are used. /// 1.4.4. Enforce that Merkle tree indices are in order -/// 1.4.5. Cpi account compression program to insert into output queue or v1 state tree /// 1.5. Insert nullifiers /// 1.5.1. Validate Tree is writable by signer. -/// 1.5.2. Cpi account compression program to insert into nullifier queue. /// 1.6. Verify inclusion /// 1.5.1 by index /// 1.5.2 by zkp +/// 1.7. Cpi account compression program to insert new addresses, +/// nullify input and append output state. /// 2. Read-only compressed accounts /// `read_only_accounts` /// - is already hashed we only verify inclusion @@ -86,41 +84,6 @@ impl Default for CompressedProof { /// `read_only_addresses` /// 4.1. Verify non-inclusion in queue /// 4.2. Verify inclusion by zkp -/// -/// Steps: -/// 1. Sum check -/// 1.1. Count num_prove_by_index_input_accounts -/// 2. Compression lamports -/// 3. Allocate heap memory -/// 4. Hash input compressed accounts -/// 4.1. Collect addresses that exist in input accounts -/// 5. Create new & verify read-only addresses -/// 5.1. Verify read only address non-inclusion in bloom filters -/// 5.2. Derive new addresses from seed and invoking program -/// 5.3. cpi ACP to Insert new addresses into address merkle tree queue -/// 6. Verify read-only account inclusion by index -/// 7. Insert leaves (output compressed account hashes) -/// 8.1. Validate Tree is writable by signer -/// 8.2. Check that only existing addresses are used. -/// 8.3. Enforce that Merkle tree indices are in order -/// 8.4. Compute output compressed hashes -/// 8.5. cpi ACP to insert output compressed accounts -/// into state Merkle tree v1 or output queue -/// 8. Insert nullifiers (input compressed account hashes) -/// 8.1. Create a tx hash -/// 8.2. check_program_owner_state_merkle_tree (in sub fn) -/// 8.3. Cpi ACP to insert nullifiers -/// 9. Transfer network fee. -/// 10. Read Address and State tree roots -/// - For state roots get roots prior to modifying the tree (for v1 trees). -/// - For v2 and address trees (v1 & v2) the tree isn't modified -/// -> it doesn't matter when we fetch the roots. -/// 10.1 Read address roots from accounts -/// 10.2 Read state roots from accounts -/// 11. Verify Inclusion & Non-inclusion Proof -/// 11.1. Add read only addresses to new addresses vec -/// 11.2. filter_for_accounts_not_proven_by_index -/// 12. Emit state transition event pub fn process< 'a, 'b, @@ -135,63 +98,43 @@ pub fn process< read_only_addresses: Option>, read_only_accounts: Option>, ) -> Result<()> { - if inputs.relay_fee.is_some() { - unimplemented!("Relay fee is not implemented yet."); - } - // 1. Sum check --------------------------------------------------- - bench_sbf_start!("cpda_sum_check"); - let num_prove_by_index_input_accounts = sum_check( - &inputs.input_compressed_accounts_with_merkle_context, - &inputs.output_compressed_accounts, - &inputs.relay_fee.map(|x| (*x).into()), - &inputs.compress_or_decompress_lamports.map(|x| (*x).into()), - &inputs.is_compress, - )?; - bench_sbf_end!("cpda_sum_check"); - // 2. Compress or decompress lamports --------------------------------------------------- - bench_sbf_start!("cpda_process_compression"); - if inputs.compress_or_decompress_lamports.is_some() { - if inputs.is_compress && ctx.accounts.get_decompression_recipient().is_some() { - return err!(SystemProgramError::DecompressionRecipientDefined); - } - compress_or_decompress_lamports(&inputs, &ctx)?; - } else if ctx.accounts.get_decompression_recipient().is_some() { - return err!(SystemProgramError::DecompressionRecipientDefined); - } else if ctx.accounts.get_sol_pool_pda().is_some() { - return err!(SystemProgramError::SolPoolPdaDefined); - } + #[cfg(feature = "bench-sbf")] bench_sbf_end!("cpda_process_compression"); - let read_only_accounts = read_only_accounts.unwrap_or_else(|| { - ZeroCopySliceBorsh::::from_bytes(&[0u8, 0u8, 0u8, 0u8]) - .unwrap() - }); - - // 3. Allocate heap memory here so that we can free memory after function invocations. let num_input_compressed_accounts = inputs.input_compressed_accounts_with_merkle_context.len(); - let num_read_only_accounts = read_only_accounts.len(); let num_new_addresses = inputs.new_address_params.len(); let num_output_compressed_accounts = inputs.output_compressed_accounts.len(); - // let mut input_compressed_account_hashes = Vec::with_capacity(num_input_compressed_accounts); - // let mut compressed_account_addresses: Vec> = - // vec![None; num_input_compressed_accounts + num_new_addresses]; - let mut output_compressed_account_indices = vec![0u32; num_output_compressed_accounts]; // hashed_pubkeys_capacity is the maximum of hashed pubkey the tx could have. // 1 owner pubkey inputs + every remaining account pubkey can be a tree + every output can be owned by a different pubkey // + number of times cpi context account was filled. let hashed_pubkeys_capacity = 1 + ctx.remaining_accounts.len() + num_output_compressed_accounts + cpi_context_inputs; - // let mut hashed_pubkeys = Vec::<(Pubkey, [u8; 32])>::with_capacity(hashed_pubkeys_capacity); - let (mut cpi_data, mut cpi_ix_bytes) = create_cpi_data( + // 1. Allocate cpi data and initialize context + let (mut context, mut cpi_ix_bytes) = create_cpi_data_and_context( &ctx, num_output_compressed_accounts as u8, num_input_compressed_accounts as u8, num_new_addresses as u8, hashed_pubkeys_capacity, + invoking_program, )?; + // Collect all addresses to check that every address in the output compressed accounts + // is an input or a new address. + inputs + .input_compressed_accounts_with_merkle_context + .iter() + .for_each(|account| { + if let Some(address) = account.compressed_account.address { + context.addresses.push(Some(*address)); + } + }); + // 2. Deserialize and check all Merkle tree and queue accounts. + let mut accounts = try_from_account_infos(ctx.remaining_accounts, &mut context)?; + + // 3. Deserialize cpi instruction data as zero copy to fill it. let mut cpi_ix_data = AppendNullifyCreateAddressInputs::new( &mut cpi_ix_bytes, num_output_compressed_accounts as u8, @@ -202,215 +145,188 @@ pub fn process< cpi_ix_data.set_invoked_by_program(true); cpi_ix_data.bump = CPI_AUTHORITY_PDA_BUMP; - // 5. Create new & verify read-only addresses --------------------------------------------------- + // 4. Create new & verify read-only addresses --------------------------------------------------- let read_only_addresses = read_only_addresses.unwrap_or(ZeroCopySliceBorsh::from_bytes(&[0, 0, 0, 0]).unwrap()); let num_of_read_only_addresses = read_only_addresses.len(); let num_non_inclusion_proof_inputs = num_new_addresses + num_of_read_only_addresses; - let mut new_addresses = Vec::with_capacity(num_non_inclusion_proof_inputs); - // 5.1. Verify read only address non-inclusion in bloom filters - // Execute prior to inserting new addresses. - verify_read_only_address_queue_non_inclusion( - ctx.remaining_accounts, + let mut new_address_roots = Vec::with_capacity(num_non_inclusion_proof_inputs); + // 5 Read address roots --------------------------------------------------- + let address_tree_height = read_address_roots( + &accounts, + inputs.new_address_params.as_slice(), read_only_addresses.as_slice(), + &mut new_address_roots, )?; - // TODO: compute non inclusion hash chain inside - let address_network_fee_bundle = if num_new_addresses != 0 { - // 5.2. Derive new addresses from seed and invoking program + // 6. Derive new addresses from seed and invoking program + if num_new_addresses != 0 { derive_new_addresses( - &invoking_program, inputs.new_address_params.as_slice(), num_input_compressed_accounts, - ctx.remaining_accounts, - &invoking_program, - &mut cpi_data, + &ctx.remaining_accounts, + &mut context, &mut cpi_ix_data, + &accounts, )? - // // 5.3. Insert new addresses into address merkle tree queue --------------------------------------------------- - // insert_addresses_into_address_merkle_tree_queue( - // &ctx, - // &new_addresses, - // &inputs.new_address_params, - // &invoking_program, - // )? - } else { - None - }; + } - // 6. Verify read-only account inclusion by index --------------------------------------------------- - // Verify prior to creating new state in output queues so that - // reading an account is successful even when it is modified in the same transaction. - let num_prove_read_only_accounts_prove_by_index = verify_read_only_account_inclusion_by_index( - ctx.remaining_accounts, - read_only_accounts.as_slice(), - )?; + // 7. Verify read only address non-inclusion in bloom filters + verify_read_only_address_queue_non_inclusion(&mut accounts, read_only_addresses.as_slice())?; - let num_read_only_accounts_proof = - num_read_only_accounts - num_prove_read_only_accounts_prove_by_index; - let num_writable_accounts_proof = - num_input_compressed_accounts - num_prove_by_index_input_accounts; - let num_inclusion_proof_inputs = num_writable_accounts_proof + num_read_only_accounts_proof; + let mut output_compressed_account_indices = vec![0u32; num_output_compressed_accounts]; - // Allocate space for sequence numbers with remaining account length as a - // proxy. We cannot allocate heap memory in - // insert_output_compressed_accounts_into_state_merkle_tree because it is - // heap neutral. let mut sequence_numbers = Vec::with_capacity(ctx.remaining_accounts.len()); - // 7. Insert leaves (output compressed account hashes) --------------------------------------------------- - let (output_network_fee_bundle, output_compressed_account_hashes) = - if !inputs.output_compressed_accounts.is_empty() { - bench_sbf_start!("cpda_append"); - let (network_fee_bundle, output_compressed_account_hashes) = - create_cpi_accounts_and_instruction_data( - inputs.output_compressed_accounts.as_slice(), - &mut output_compressed_account_indices, - &invoking_program, - &mut sequence_numbers, - &ctx.remaining_accounts, - &mut cpi_data, - &mut cpi_ix_data, - )?; - // # Safety this is a safeguard for memory safety. - // This error should never be triggered. - check_vec_capacity( - hashed_pubkeys_capacity, - &cpi_data.hashed_pubkeys, - "hashed_pubkeys", - )?; - bench_sbf_end!("cpda_append"); - (network_fee_bundle, output_compressed_account_hashes) - } else { - (None, [0u8; 32]) - }; + // 8. Insert leaves (output compressed account hashes) --------------------------------------------------- + #[cfg(feature = "bench-sbf")] + bench_sbf_start!("cpda_append"); + // 9. Create cpi data for outputs + // 9.1. Compute output compressed hashes + // 9.2. Collect accounts + // 9.3. Validate order of output queue/ tree accounts + let output_compressed_account_hashes = create_outputs_cpi_data( + inputs.output_compressed_accounts.as_slice(), + &mut output_compressed_account_indices, + &mut sequence_numbers, + &ctx.remaining_accounts, + &mut context, + &mut cpi_ix_data, + &accounts, + )?; + #[cfg(feature = "debug")] + check_vec_capacity( + hashed_pubkeys_capacity, + &context.hashed_pubkeys, + "hashed_pubkeys", + )?; + + #[cfg(feature = "bench-sbf")] bench_sbf_start!("emit_state_transition_event"); + // TODO: add sequence numbers to the cpi to record them. // Reduce the capacity of the sequence numbers vector. sequence_numbers.shrink_to_fit(); - // must be post output accounts since the order of account infos matters - // for the outputs. - // 4. hash input compressed accounts --------------------------------------------------- - // 4.1. collects addresses that exist in input accounts - bench_sbf_start!("cpda_hash_input_compressed_accounts"); - let (input_network_fee_bundle, input_compressed_account_hashes) = if !inputs + // 10. hash input compressed accounts --------------------------------------------------- + #[cfg(feature = "bench-sbf")] + bench_sbf_start!("cpda_nullifiers"); + if !inputs .input_compressed_accounts_with_merkle_context .is_empty() { - let (network_fee, input_compressed_account_hashes) = hash_input_compressed_accounts( + // must be post output accounts since the order of account infos matters + // for the outputs. + let input_compressed_account_hashes = create_inputs_cpi_data( ctx.remaining_accounts, inputs .input_compressed_accounts_with_merkle_context .as_slice(), - &invoking_program, - &mut cpi_data, + &mut context, &mut cpi_ix_data, + &accounts, )?; - // # Safety this is a safeguard for memory safety. - // This error should never be triggered. - // TODO: move to the end of the function - // check_vec_capacity(hashed_pubkeys_capacity, &hashed_pubkeys, "hashed_pubkeys")?; - (network_fee, input_compressed_account_hashes) - } else { - (None, [0u8; 32]) - }; - bench_sbf_end!("cpda_hash_input_compressed_accounts"); - // 8. insert nullifiers (input compressed account hashes)--------------------------------------------------- - // Note: It would make sense to nullify prior to appending new state. - // Since output compressed account hashes are inputs - // for the tx hash on which the nullifier depends - // and the logic to compute output hashes is higly optimized - // and entangled with the cpi we leave it as is for now. - bench_sbf_start!("cpda_nullifiers"); - if !inputs - .input_compressed_accounts_with_merkle_context - .is_empty() - { - let current_slot = Clock::get()?.slot; + #[cfg(feature = "debug")] + check_vec_capacity( + hashed_pubkeys_capacity, + &context.hashed_pubkeys, + "hashed_pubkeys", + )?; // 8.1. Create a tx hash - // let tx_hash = create_tx_hash( - // &input_compressed_account_hashes, - // &output_compressed_account_hashes, - // current_slot, - // ) - // .map_err(ProgramError::from)?; - + let current_slot = Clock::get()?.slot; cpi_ix_data.tx_hash = create_tx_hash_from_hash_chains( &input_compressed_account_hashes, &output_compressed_account_hashes, current_slot, ) .map_err(ProgramError::from)?; - // 8.2. Insert nullifiers for compressed input account hashes into nullifier - // queue. - // insert_nullifiers( - // &inputs.input_compressed_accounts_with_merkle_context, - // &ctx, - // &input_compressed_account_hashes, - // &invoking_program, - // tx_hash, - // )? } + #[cfg(feature = "bench-sbf")] bench_sbf_end!("cpda_nullifiers"); - // 9. Transfer network fee - transfer_network_fee( - &ctx, - input_network_fee_bundle, - address_network_fee_bundle, - output_network_fee_bundle, + // 11. Sum check --------------------------------------------------- + #[cfg(feature = "bench-sbf")] + bench_sbf_start!("cpda_sum_check"); + let num_prove_by_index_input_accounts = sum_check( + &inputs.input_compressed_accounts_with_merkle_context, + &inputs.output_compressed_accounts, + &inputs.relay_fee.map(|x| (*x).into()), + &inputs.compress_or_decompress_lamports.map(|x| (*x).into()), + &inputs.is_compress, )?; + #[cfg(feature = "bench-sbf")] + bench_sbf_end!("cpda_sum_check"); + // 12. Compress or decompress lamports --------------------------------------------------- + #[cfg(feature = "bench-sbf")] + bench_sbf_start!("cpda_process_compression"); + if inputs.compress_or_decompress_lamports.is_some() { + if inputs.is_compress && ctx.accounts.get_decompression_recipient().is_some() { + return err!(SystemProgramError::DecompressionRecipientDefined); + } + compress_or_decompress_lamports(&inputs, &ctx)?; + } else if ctx.accounts.get_decompression_recipient().is_some() { + return err!(SystemProgramError::DecompressionRecipientDefined); + } else if ctx.accounts.get_sol_pool_pda().is_some() { + return err!(SystemProgramError::SolPoolPdaDefined); + } + // 13. Verify read-only account inclusion by index --------------------------------------------------- + let read_only_accounts = read_only_accounts.unwrap_or_else(|| { + ZeroCopySliceBorsh::::from_bytes(&[0u8, 0u8, 0u8, 0u8]) + .unwrap() + }); + let num_prove_read_only_accounts_prove_by_index = + verify_read_only_account_inclusion_by_index(&mut accounts, read_only_accounts.as_slice())?; - // TODO: move roots all the way up so that we can compute two input hash chains in one go - // 10. Read Address and State tree roots --------------------------------------------------- - let mut new_address_roots = Vec::with_capacity(num_non_inclusion_proof_inputs); - // 10.1 Read address roots --------------------------------------------------- - let address_tree_height = read_address_roots( - ctx.remaining_accounts, - inputs.new_address_params.as_slice(), - read_only_addresses.as_slice(), - &mut new_address_roots, - )?; - // # Safety this is a safeguard for memory safety. - // This error should never be triggered. + let num_read_only_accounts = read_only_accounts.len(); + let num_read_only_accounts_proof = + num_read_only_accounts - num_prove_read_only_accounts_prove_by_index; + let num_writable_accounts_proof = + num_input_compressed_accounts - num_prove_by_index_input_accounts; + let num_inclusion_proof_inputs = num_writable_accounts_proof + num_read_only_accounts_proof; + + #[cfg(feature = "debug")] check_vec_capacity( num_non_inclusion_proof_inputs, &new_address_roots, "new_address_roots", )?; - // 10.2. Read state roots --------------------------------------------------- + // 14. Read state roots --------------------------------------------------- let mut input_compressed_account_roots = Vec::with_capacity(num_inclusion_proof_inputs); let state_tree_height = read_input_state_roots( - ctx.remaining_accounts, + &accounts, inputs .input_compressed_accounts_with_merkle_context .as_slice(), read_only_accounts.as_slice(), &mut input_compressed_account_roots, )?; - // # Safety this is a safeguard for memory safety. - // This error should never be triggered. + + #[cfg(feature = "debug")] check_vec_capacity( num_inclusion_proof_inputs, &input_compressed_account_roots, "input_compressed_account_roots", )?; - // TODO: optimize in separate step - // 11. Verify Inclusion & Non-inclusion Proof --------------------------------------------------- + // 15. Verify Inclusion & Non-inclusion Proof --------------------------------------------------- if num_inclusion_proof_inputs != 0 || num_non_inclusion_proof_inputs != 0 { if let Some(proof) = inputs.proof.as_ref() { + #[cfg(feature = "bench-sbf")] bench_sbf_start!("cpda_verify_state_proof"); - - // 11.1. Add read only addresses to new addresses vec before proof verification. + let mut new_addresses = Vec::with_capacity(num_non_inclusion_proof_inputs); + // 15.1. Copy the new addresses to new addresses vec + // (Remove and compute hash chain directly once concurrent trees are phased out.) + for new_address in cpi_ix_data.addresses.iter() { + new_addresses.push(new_address.address); + } + // 15.1. Add read only addresses to new addresses vec before proof verification. // We don't add read only addresses before since // read-only addresses must not be used in output compressed accounts. for read_only_address in read_only_addresses.iter() { new_addresses.push(read_only_address.address); } - // 11.2. Select accounts account hashes for ZKP. + // 15.2. Select accounts account hashes for ZKP. // We need to filter out accounts that are proven by index. let mut proof_input_compressed_account_hashes = Vec::with_capacity(num_inclusion_proof_inputs); @@ -419,6 +335,7 @@ pub fn process< &cpi_ix_data.nullifiers, &mut proof_input_compressed_account_hashes, ); + #[cfg(feature = "debug")] check_vec_capacity( num_inclusion_proof_inputs, &proof_input_compressed_account_hashes, @@ -430,7 +347,7 @@ pub fn process< b: proof.b, c: proof.c, }; - // 11.3. Verify proof + // 15.3. Verify proof // Proof inputs order: // 1. input compressed accounts // 2. read-only compressed accounts @@ -464,6 +381,7 @@ pub fn process< Err(e) } }?; + #[cfg(feature = "bench-sbf")] bench_sbf_end!("cpda_verify_state_proof"); } else { return err!(SystemProgramError::ProofIsNone); @@ -481,29 +399,12 @@ pub fn process< return err!(SystemProgramError::EmptyInputs); } - // 12. Emit state transition event --------------------------------------------------- - bench_sbf_start!("emit_state_transition_event"); - // let input_compressed_account_hashes = cpi_ix_data - // .nullifiers - // .iter() - // .map(|x| x.account_hash) - // .collect(); - // let output_compressed_account_hashes = cpi_ix_data.leaves.iter().map(|x| x.leaf).collect(); - cpi_data.transfer_rollover_fees(&ctx.remaining_accounts, &ctx.accounts.get_fee_payer())?; - - cpi_account_compression_program(cpi_data, cpi_ix_bytes)?; - - // emit_state_transition_event( - // inputs, - // &ctx, - // input_compressed_account_hashes, - // output_compressed_account_hashes, - // output_compressed_account_indices, - // sequence_numbers, - // )?; - bench_sbf_end!("emit_state_transition_event"); - - Ok(()) + // 16. Transfer network, address, and rollover fees --------------------------------------------------- + // Note: we transfer rollover fees from the system program instead + // of the account compression program to reduce cpi depth. + context.transfer_fees(&ctx.remaining_accounts, &ctx.accounts.get_fee_payer())?; + // 17. CPI account compression program --------------------------------------------------- + cpi_account_compression_program(context, cpi_ix_bytes) } #[inline(always)] @@ -538,64 +439,3 @@ fn filter_for_accounts_not_proven_by_index( } } } - -/// Network fee distribution: -/// - if any account is created or modified -> transfer network fee (5000 lamports) -/// (Previously we didn't charge for appends now we have to since values go into a queue.) -/// - if an address is created -> transfer an additional network fee (5000 lamports) -/// -/// Examples: -/// 1. create account with address network fee 10,000 lamports -/// 2. token transfer network fee 5,000 lamports -/// 3. mint token network fee 5,000 lamports -#[inline(always)] -fn transfer_network_fee< - 'a, - 'b, - 'c: 'info, - 'info, - A: InvokeAccounts<'info> + SignerAccounts<'info> + Bumps, ->( - ctx: &Context<'a, 'b, 'c, 'info, A>, - input_network_fee_bundle: Option<(u8, u64)>, - address_network_fee_bundle: Option<(u8, u64)>, - output_network_fee_bundle: Option<(u8, u64)>, -) -> Result<()> { - if let Some(network_fee_bundle) = input_network_fee_bundle { - let address_fee = if let Some(network_fee_bundle) = address_network_fee_bundle { - let (_, network_fee) = network_fee_bundle; - network_fee - } else { - 0 - }; - let (remaining_account_index, mut network_fee) = network_fee_bundle; - network_fee += address_fee; - transfer_lamports_cpi( - ctx.accounts.get_fee_payer(), - &ctx.remaining_accounts[remaining_account_index as usize], - network_fee, - )?; - } else if let Some(network_fee_bundle) = output_network_fee_bundle { - let address_fee = if let Some(network_fee_bundle) = address_network_fee_bundle { - let (_, network_fee) = network_fee_bundle; - network_fee - } else { - 0 - }; - let (remaining_account_index, mut network_fee) = network_fee_bundle; - network_fee += address_fee; - transfer_lamports_cpi( - ctx.accounts.get_fee_payer(), - &ctx.remaining_accounts[remaining_account_index as usize], - network_fee, - )?; - } else if let Some(network_fee_bundle) = address_network_fee_bundle { - let (remaining_account_index, network_fee) = network_fee_bundle; - transfer_lamports_cpi( - ctx.accounts.get_fee_payer(), - &ctx.remaining_accounts[remaining_account_index as usize], - network_fee, - )?; - } - Ok(()) -} diff --git a/programs/system/src/processor/read_only_account.rs b/programs/system/src/processor/read_only_account.rs new file mode 100644 index 0000000000..e660064a91 --- /dev/null +++ b/programs/system/src/processor/read_only_account.rs @@ -0,0 +1,68 @@ +use account_compression::{context::AcpAccount, insert_into_queues::get_queue_and_tree_accounts}; +use anchor_lang::prelude::*; + +use crate::{errors::SystemProgramError, instruction_data::ZPackedReadOnlyCompressedAccount}; + +/// For each read-only account +/// 1. prove inclusion by index in the output queue if leaf index should exist in the output queue. +/// 1.1. if inclusion was proven by index, return Ok. +/// 2. prove non-inclusion in the bloom filters +/// 2.1. skip cleared batches. +/// 2.2. prove non-inclusion in the bloom filters for each batch. +#[inline(always)] +pub fn verify_read_only_account_inclusion_by_index<'a>( + accounts: &mut [AcpAccount<'a, '_>], + read_only_accounts: &'a [ZPackedReadOnlyCompressedAccount], +) -> Result { + let mut num_prove_read_only_accounts_prove_by_index = 0; + for read_only_account in read_only_accounts.iter() { + let queue_index = read_only_account + .merkle_context + .nullifier_queue_pubkey_index; + let tree_index = read_only_account.merkle_context.merkle_tree_pubkey_index; + let (output_queue_account_info, merkle_tree_account_info) = + get_queue_and_tree_accounts(accounts, queue_index as usize, tree_index as usize)?; + + let output_queue = if let AcpAccount::OutputQueue(queue) = output_queue_account_info { + queue + } else { + return err!(SystemProgramError::InvalidAccount); + }; + let merkle_tree = if let AcpAccount::BatchedStateTree(tree) = merkle_tree_account_info { + tree + } else { + return err!(SystemProgramError::InvalidAccount); + }; + output_queue + .check_is_associated(&merkle_tree.pubkey()) + .map_err(ProgramError::from)?; + + // Checks inclusion by index in the output queue if leaf index should exist in the output queue. + // Else does nothing. + let proved_inclusion = output_queue + .prove_inclusion_by_index( + read_only_account.merkle_context.leaf_index.into(), + &read_only_account.account_hash, + ) + .map_err(|_| SystemProgramError::ReadOnlyAccountDoesNotExist)?; + if read_only_account.merkle_context.prove_by_index() { + num_prove_read_only_accounts_prove_by_index += 1; + } + // If a read-only account is marked as proven by index + // inclusion proof by index has to be successful + // -> proved_inclusion == true. + if !proved_inclusion && read_only_account.merkle_context.prove_by_index() { + msg!("Expected read-only account in the output queue but account does not exist."); + return err!(SystemProgramError::ReadOnlyAccountDoesNotExist); + } + // If we prove inclusion by index we do not need to check non-inclusion in bloom filters. + // Since proving inclusion by index of non-read + // only accounts overwrites the leaf in the output queue. + if !proved_inclusion { + merkle_tree + .check_input_queue_non_inclusion(&read_only_account.account_hash) + .map_err(|_| SystemProgramError::ReadOnlyAccountDoesNotExist)?; + } + } + Ok(num_prove_read_only_accounts_prove_by_index) +} diff --git a/programs/system/src/processor/read_only_address.rs b/programs/system/src/processor/read_only_address.rs new file mode 100644 index 0000000000..f0162b5872 --- /dev/null +++ b/programs/system/src/processor/read_only_address.rs @@ -0,0 +1,27 @@ +use account_compression::context::AcpAccount; +use anchor_lang::prelude::*; + +use crate::{errors::SystemProgramError, instruction_data::ZPackedReadOnlyAddress}; + +#[inline(always)] +pub fn verify_read_only_address_queue_non_inclusion<'a, 'info>( + remaining_accounts: &mut [AcpAccount<'a, 'info>], + read_only_addresses: &'a [ZPackedReadOnlyAddress], +) -> Result<()> { + if read_only_addresses.is_empty() { + return Ok(()); + } + for read_only_address in read_only_addresses.iter() { + let merkle_tree = if let AcpAccount::BatchedStateTree(tree) = + &mut remaining_accounts[read_only_address.address_merkle_tree_account_index as usize] + { + tree + } else { + return err!(SystemProgramError::InvalidAccount); + }; + merkle_tree + .check_input_queue_non_inclusion(&read_only_address.address) + .map_err(|_| SystemProgramError::ReadOnlyAddressAlreadyExists)?; + } + Ok(()) +} diff --git a/programs/system/src/invoke/sol_compression.rs b/programs/system/src/processor/sol_compression.rs similarity index 100% rename from programs/system/src/invoke/sol_compression.rs rename to programs/system/src/processor/sol_compression.rs diff --git a/programs/system/src/invoke/sum_check.rs b/programs/system/src/processor/sum_check.rs similarity index 99% rename from programs/system/src/invoke/sum_check.rs rename to programs/system/src/processor/sum_check.rs index 337ae81232..6a85c6e20a 100644 --- a/programs/system/src/invoke/sum_check.rs +++ b/programs/system/src/processor/sum_check.rs @@ -1,5 +1,4 @@ use anchor_lang::{solana_program::program_error::ProgramError, Result}; -use light_macros::heap_neutral; use crate::{ errors::SystemProgramError, @@ -8,8 +7,7 @@ use crate::{ }, }; -#[inline(never)] -#[heap_neutral] +#[inline(always)] pub fn sum_check( input_compressed_accounts_with_merkle_context: &[ZPackedCompressedAccountWithMerkleContext< '_, diff --git a/programs/system/src/processor/verify_proof.rs b/programs/system/src/processor/verify_proof.rs new file mode 100644 index 0000000000..9143460fb3 --- /dev/null +++ b/programs/system/src/processor/verify_proof.rs @@ -0,0 +1,258 @@ +use account_compression::{context::AcpAccount, errors::AccountCompressionErrorCode}; +use anchor_lang::prelude::*; +use light_batched_merkle_tree::constants::{ + DEFAULT_BATCH_ADDRESS_TREE_HEIGHT, DEFAULT_BATCH_STATE_TREE_HEIGHT, +}; + +use light_utils::hashchain::{create_hash_chain_from_slice, create_two_inputs_hash_chain}; +use light_verifier::{ + select_verifying_key, verify_create_addresses_and_inclusion_proof, + verify_create_addresses_proof, verify_inclusion_proof, CompressedProof, +}; + +use crate::{ + errors::SystemProgramError, + instruction_data::{ + ZNewAddressParamsPacked, ZPackedCompressedAccountWithMerkleContext, ZPackedReadOnlyAddress, + ZPackedReadOnlyCompressedAccount, + }, +}; + +const IS_READ_ONLY: bool = true; +const IS_NOT_READ_ONLY: bool = false; +const IS_STATE: bool = true; +const IS_NOT_STATE: bool = false; + +#[inline(always)] +pub fn read_input_state_roots<'a>( + remaining_accounts: &'a [AcpAccount<'a, '_>], + input_compressed_accounts_with_merkle_context: &'a [ZPackedCompressedAccountWithMerkleContext<'a>], + read_only_accounts: &'a [ZPackedReadOnlyCompressedAccount], + input_roots: &'a mut Vec<[u8; 32]>, +) -> Result { + let mut state_tree_height = 0; + for input_compressed_account_with_context in + input_compressed_accounts_with_merkle_context.iter() + { + if input_compressed_account_with_context + .merkle_context + .prove_by_index() + { + continue; + } + let internal_height = read_root::( + &remaining_accounts[input_compressed_account_with_context + .merkle_context + .merkle_tree_pubkey_index as usize], + u16::from(input_compressed_account_with_context.root_index), + input_roots, + )?; + if state_tree_height == 0 { + state_tree_height = internal_height; + } else if state_tree_height != internal_height { + msg!( + "tree height {} != internal height {}", + state_tree_height, + internal_height + ); + return err!(SystemProgramError::InvalidStateTreeHeight); + } + } + for readonly_input_account in read_only_accounts.iter() { + if readonly_input_account.merkle_context.prove_by_index() { + continue; + } + let internal_height = read_root::( + &remaining_accounts[readonly_input_account + .merkle_context + .merkle_tree_pubkey_index as usize], + readonly_input_account.root_index.into(), + input_roots, + )?; + if state_tree_height == 0 { + state_tree_height = internal_height; + } else if state_tree_height != internal_height { + msg!( + "tree height {} != internal height {}", + state_tree_height, + internal_height + ); + return err!(SystemProgramError::InvalidStateTreeHeight); + } + } + Ok(state_tree_height) +} + +#[inline(always)] +pub fn read_address_roots<'a>( + remaining_accounts: &'a [AcpAccount<'a, '_>], + new_address_params: &'a [ZNewAddressParamsPacked], + read_only_addresses: &'a [ZPackedReadOnlyAddress], + address_roots: &'a mut Vec<[u8; 32]>, +) -> Result { + let mut address_tree_height = 0; + for new_address_param in new_address_params.iter() { + let internal_height = read_root::( + &remaining_accounts[new_address_param.address_merkle_tree_account_index as usize], + new_address_param.address_merkle_tree_root_index.into(), + address_roots, + )?; + if address_tree_height == 0 { + address_tree_height = internal_height; + } else if address_tree_height != internal_height { + msg!( + "tree height {} != internal height {}", + address_tree_height, + internal_height + ); + return err!(SystemProgramError::InvalidAddressTreeHeight); + } + } + for read_only_address in read_only_addresses.iter() { + let internal_height = read_root::( + &remaining_accounts[read_only_address.address_merkle_tree_account_index as usize], + read_only_address.address_merkle_tree_root_index.into(), + address_roots, + )?; + if address_tree_height == 0 { + address_tree_height = internal_height; + } else if address_tree_height != internal_height { + msg!( + "tree height {} != internal height {}", + address_tree_height, + internal_height + ); + return err!(SystemProgramError::InvalidAddressTreeHeight); + } + } + + Ok(address_tree_height) +} + +#[inline(always)] +fn read_root( + merkle_tree_account: &AcpAccount<'_, '_>, + root_index: u16, + roots: &mut Vec<[u8; 32]>, +) -> Result { + let height; + // let mut discriminator_bytes = [0u8; 8]; + // discriminator_bytes.copy_from_slice(&merkle_tree_account_info.try_borrow_data()?[0..8]); + match merkle_tree_account { + AcpAccount::AddressTree((_, merkle_tree)) => { + // AddressMerkleTreeAccount::DISCRIMINATOR => { + if IS_READ_ONLY { + msg!("Read only addresses are only supported for batched address trees."); + return err!( + AccountCompressionErrorCode::AddressMerkleTreeAccountDiscriminatorMismatch + ); + } + // let merkle_tree = merkle_tree_account_info.try_borrow_data()?; + // let merkle_tree = + // IndexedMerkleTreeZeroCopy::::from_bytes_zero_copy( + // &merkle_tree[8 + mem::size_of::()..], + // ) + // .map_err(ProgramError::from)?; + height = merkle_tree.height as u8; + (*roots).push(merkle_tree.roots[root_index as usize]); + } + // BatchedMerkleTreeAccount::DISCRIMINATOR => { + AcpAccount::BatchedStateTree(merkle_tree) => { + (*roots).push(merkle_tree.root_history[root_index as usize]); + height = merkle_tree.height as u8; + } + AcpAccount::BatchedAddressTree(merkle_tree) => { + if IS_READ_ONLY { + msg!("Read only addresses are only supported for batched address trees."); + return err!( + AccountCompressionErrorCode::StateMerkleTreeAccountDiscriminatorMismatch + ); + } + height = merkle_tree.height as u8; + (*roots).push(merkle_tree.root_history[root_index as usize]); + } + AcpAccount::StateTree((_, merkle_tree)) => { + if IS_READ_ONLY { + msg!("Read only addresses are only supported for batched address trees."); + return err!( + AccountCompressionErrorCode::StateMerkleTreeAccountDiscriminatorMismatch + ); + } + let fetched_roots = &merkle_tree.roots; + + (*roots).push(fetched_roots[root_index as usize]); + height = merkle_tree.height as u8; + } + _ => { + if IS_STATE { + return err!( + AccountCompressionErrorCode::StateMerkleTreeAccountDiscriminatorMismatch + ); + } else { + return err!( + AccountCompressionErrorCode::AddressMerkleTreeAccountDiscriminatorMismatch + ); + } + } + } + Ok(height) +} + +#[allow(clippy::too_many_arguments)] +pub fn verify_proof( + roots: &[[u8; 32]], + leaves: &[[u8; 32]], + address_roots: &[[u8; 32]], + addresses: &[[u8; 32]], + compressed_proof: &CompressedProof, + address_tree_height: u8, + state_tree_height: u8, +) -> anchor_lang::Result<()> { + if state_tree_height as u32 == DEFAULT_BATCH_STATE_TREE_HEIGHT + || address_tree_height as u32 == DEFAULT_BATCH_ADDRESS_TREE_HEIGHT + { + let public_input_hash = if !leaves.is_empty() && !addresses.is_empty() { + // combined inclusion & non-inclusion proof + let inclusion_hash = + create_two_inputs_hash_chain(roots, leaves).map_err(ProgramError::from)?; + let non_inclusion_hash = create_two_inputs_hash_chain(address_roots, addresses) + .map_err(ProgramError::from)?; + create_hash_chain_from_slice(&[inclusion_hash, non_inclusion_hash]) + .map_err(ProgramError::from)? + } else if !leaves.is_empty() { + // inclusion proof + create_two_inputs_hash_chain(roots, leaves).map_err(ProgramError::from)? + } else { + // TODO: compute with addresses + // non-inclusion proof + create_two_inputs_hash_chain(address_roots, addresses).map_err(ProgramError::from)? + }; + + let vk = select_verifying_key(leaves.len(), addresses.len()).map_err(ProgramError::from)?; + light_verifier::verify(&[public_input_hash], compressed_proof, vk) + .map_err(ProgramError::from)?; + } else if state_tree_height == 26 && address_tree_height == 26 { + // legacy combined inclusion & non-inclusion proof + verify_create_addresses_and_inclusion_proof( + roots, + leaves, + address_roots, + addresses, + compressed_proof, + ) + .map_err(ProgramError::from)?; + } else if state_tree_height == 26 { + // legacy inclusion proof + verify_inclusion_proof(roots, leaves, compressed_proof).map_err(ProgramError::from)?; + } else if address_tree_height == 26 { + // legacy non-inclusion proof + verify_create_addresses_proof(address_roots, addresses, compressed_proof) + .map_err(ProgramError::from)?; + } else { + msg!("state tree height: {}", state_tree_height); + msg!("address tree height: {}", address_tree_height); + return err!(SystemProgramError::InvalidAddressTreeHeight); + } + + Ok(()) +} diff --git a/programs/system/src/sdk/compressed_account.rs b/programs/system/src/sdk/compressed_account.rs index 57a7d3fb21..f343e0fe63 100644 --- a/programs/system/src/sdk/compressed_account.rs +++ b/programs/system/src/sdk/compressed_account.rs @@ -5,7 +5,7 @@ use light_hasher::{Hasher, Poseidon}; use light_utils::hash_to_bn254_field_size_be; use super::address::pack_account; -use crate::{instruction_data::ZCompressedAccount, OutputCompressedAccountWithPackedContext}; +use crate::OutputCompressedAccountWithPackedContext; #[derive(Debug, PartialEq, Default, Clone, AnchorSerialize, AnchorDeserialize)] pub struct PackedCompressedAccountWithMerkleContext { @@ -243,66 +243,6 @@ impl CompressedAccount { } } -impl ZCompressedAccount<'_> { - pub fn hash_with_hashed_values( - &self, - &owner_hashed: &[u8; 32], - &merkle_tree_hashed: &[u8; 32], - leaf_index: &u32, - ) -> Result<[u8; 32]> { - let capacity = 3 - + std::cmp::min(u64::from(self.lamports), 1) as usize - + self.address.is_some() as usize - + self.data.is_some() as usize * 2; - let mut vec: Vec<&[u8]> = Vec::with_capacity(capacity); - vec.push(owner_hashed.as_slice()); - - // leaf index and merkle tree pubkey are used to make every compressed account hash unique - let leaf_index = leaf_index.to_le_bytes(); - vec.push(leaf_index.as_slice()); - - vec.push(merkle_tree_hashed.as_slice()); - - // Lamports are only hashed if non-zero to safe CU - // For safety we prefix the lamports with 1 in 1 byte. - // Thus even if the discriminator has the same value as the lamports, the hash will be different. - let mut lamports_bytes = [1, 0, 0, 0, 0, 0, 0, 0, 0]; - if self.lamports != 0 { - lamports_bytes[1..].copy_from_slice(&(u64::from(self.lamports)).to_le_bytes()); - vec.push(lamports_bytes.as_slice()); - } - - if self.address.is_some() { - vec.push(self.address.as_ref().unwrap().as_slice()); - } - - let mut discriminator_bytes = [2, 0, 0, 0, 0, 0, 0, 0, 0]; - if let Some(data) = &self.data { - discriminator_bytes[1..].copy_from_slice(data.discriminator.as_slice()); - vec.push(&discriminator_bytes); - vec.push(data.data_hash.as_slice()); - } - let hash = H::hashv(&vec).map_err(ProgramError::from)?; - Ok(hash) - } - - pub fn hash( - &self, - &merkle_tree_pubkey: &Pubkey, - leaf_index: &u32, - ) -> Result<[u8; 32]> { - self.hash_with_hashed_values::( - &hash_to_bn254_field_size_be(&self.owner.to_bytes()) - .unwrap() - .0, - &hash_to_bn254_field_size_be(&merkle_tree_pubkey.to_bytes()) - .unwrap() - .0, - leaf_index, - ) - } -} - #[cfg(test)] mod tests { use light_hasher::Poseidon; diff --git a/programs/system/src/sdk/event.rs b/programs/system/src/sdk/event.rs index 42c8bfe12a..091cb0715a 100644 --- a/programs/system/src/sdk/event.rs +++ b/programs/system/src/sdk/event.rs @@ -1,5 +1,3 @@ -use std::io::Write; - use anchor_lang::{ prelude::borsh, solana_program::pubkey::Pubkey, AnchorDeserialize, AnchorSerialize, }; @@ -25,191 +23,3 @@ pub struct PublicTransactionEvent { pub pubkey_array: Vec, pub message: Option>, } - -impl PublicTransactionEvent { - pub fn man_serialize(&self, writer: &mut W) -> std::io::Result<()> { - writer.write_all(&(self.input_compressed_account_hashes.len() as u32).to_le_bytes())?; - for hash in self.input_compressed_account_hashes.iter() { - writer.write_all(hash)?; - } - - writer.write_all(&(self.output_compressed_account_hashes.len() as u32).to_le_bytes())?; - for hash in self.output_compressed_account_hashes.iter() { - writer.write_all(hash)?; - } - - #[cfg(target_os = "solana")] - let pos = light_heap::GLOBAL_ALLOCATOR.get_heap_pos(); - writer.write_all(&(self.output_compressed_accounts.len() as u32).to_le_bytes())?; - for i in 0..self.output_compressed_accounts.len() { - let account = self.output_compressed_accounts[i].clone(); - account.serialize(writer)?; - } - #[cfg(target_os = "solana")] - light_heap::GLOBAL_ALLOCATOR.free_heap(pos).unwrap(); - - writer.write_all(&(self.output_leaf_indices.len() as u32).to_le_bytes())?; - for index in self.output_leaf_indices.iter() { - writer.write_all(&index.to_le_bytes())?; - } - - writer.write_all(&(self.sequence_numbers.len() as u32).to_le_bytes())?; - for element in self.sequence_numbers.iter() { - writer.write_all(&element.pubkey.to_bytes())?; - writer.write_all(&element.seq.to_le_bytes())?; - } - match self.relay_fee { - Some(relay_fee) => { - writer.write_all(&[1])?; - writer.write_all(&relay_fee.to_le_bytes()) - } - None => writer.write_all(&[0]), - }?; - - writer.write_all(&[self.is_compress as u8])?; - - match self.compress_or_decompress_lamports { - Some(compress_or_decompress_lamports) => { - writer.write_all(&[1])?; - writer.write_all(&compress_or_decompress_lamports.to_le_bytes()) - } - None => writer.write_all(&[0]), - }?; - - writer.write_all(&(self.pubkey_array.len() as u32).to_le_bytes())?; - for pubkey in self.pubkey_array.iter() { - writer.write_all(&pubkey.to_bytes())?; - } - - match &self.message { - Some(message) => { - writer.write_all(&[1])?; - writer.write_all(&(message.len() as u32).to_le_bytes())?; - writer.write_all(message) - } - None => writer.write_all(&[0]), - }?; - - Ok(()) - } -} - -#[cfg(test)] -pub mod test { - use rand::Rng; - use solana_sdk::{signature::Keypair, signer::Signer}; - - use super::*; - use crate::sdk::compressed_account::{CompressedAccount, CompressedAccountData}; - - #[test] - fn test_manual_vs_borsh_serialization() { - // Create a sample `PublicTransactionEvent` instance - let event = PublicTransactionEvent { - input_compressed_account_hashes: vec![[0u8; 32], [1u8; 32]], - output_compressed_account_hashes: vec![[2u8; 32], [3u8; 32]], - output_compressed_accounts: vec![OutputCompressedAccountWithPackedContext { - compressed_account: CompressedAccount { - owner: Keypair::new().pubkey(), - lamports: 100, - address: Some([5u8; 32]), - data: Some(CompressedAccountData { - discriminator: [6u8; 8], - data: vec![7u8; 32], - data_hash: [8u8; 32], - }), - }, - merkle_tree_index: 1, - }], - sequence_numbers: vec![ - MerkleTreeSequenceNumber { - pubkey: Keypair::new().pubkey(), - seq: 10, - }, - MerkleTreeSequenceNumber { - pubkey: Keypair::new().pubkey(), - seq: 2, - }, - ], - output_leaf_indices: vec![4, 5, 6], - relay_fee: Some(1000), - is_compress: true, - compress_or_decompress_lamports: Some(5000), - pubkey_array: vec![Keypair::new().pubkey(), Keypair::new().pubkey()], - message: Some(vec![8, 9, 10]), - }; - - // Serialize using Borsh - let borsh_serialized = event.try_to_vec().unwrap(); - - // Serialize manually - let mut manual_serialized = Vec::new(); - event.man_serialize(&mut manual_serialized).unwrap(); - - // Compare the two byte arrays - assert_eq!( - borsh_serialized, manual_serialized, - "Borsh and manual serialization results should match" - ); - } - - #[test] - fn test_serialization_consistency() { - let mut rng = rand::thread_rng(); - - for _ in 0..10_000 { - let input_hashes: Vec<[u8; 32]> = - (0..rng.gen_range(1..10)).map(|_| rng.gen()).collect(); - let output_hashes: Vec<[u8; 32]> = - (0..rng.gen_range(1..10)).map(|_| rng.gen()).collect(); - let output_accounts: Vec = (0..rng - .gen_range(1..10)) - .map(|_| OutputCompressedAccountWithPackedContext { - compressed_account: CompressedAccount { - owner: Keypair::new().pubkey(), - lamports: rng.gen(), - address: Some(rng.gen()), - data: None, - }, - merkle_tree_index: rng.gen(), - }) - .collect(); - let leaf_indices: Vec = (0..rng.gen_range(1..10)).map(|_| rng.gen()).collect(); - let pubkeys: Vec = (0..rng.gen_range(1..10)) - .map(|_| Keypair::new().pubkey()) - .collect(); - let message: Option> = if rng.gen() { - Some((0..rng.gen_range(1..100)).map(|_| rng.gen()).collect()) - } else { - None - }; - - let event = PublicTransactionEvent { - input_compressed_account_hashes: input_hashes, - output_compressed_account_hashes: output_hashes, - output_compressed_accounts: output_accounts, - output_leaf_indices: leaf_indices, - sequence_numbers: (0..rng.gen_range(1..10)) - .map(|_| MerkleTreeSequenceNumber { - pubkey: Keypair::new().pubkey(), - seq: rng.gen(), - }) - .collect(), - relay_fee: if rng.gen() { Some(rng.gen()) } else { None }, - is_compress: rng.gen(), - compress_or_decompress_lamports: if rng.gen() { Some(rng.gen()) } else { None }, - pubkey_array: pubkeys, - message, - }; - - let borsh_serialized = event.try_to_vec().unwrap(); - let mut manual_serialized = Vec::new(); - event.man_serialize(&mut manual_serialized).unwrap(); - - assert_eq!( - borsh_serialized, manual_serialized, - "Borsh and manual serialization results should match" - ); - } - } -} diff --git a/programs/system/src/sdk/invoke.rs b/programs/system/src/sdk/invoke.rs index b009ba81a2..e6303f7d55 100644 --- a/programs/system/src/sdk/invoke.rs +++ b/programs/system/src/sdk/invoke.rs @@ -11,7 +11,7 @@ use super::compressed_account::{ CompressedAccount, MerkleContext, PackedCompressedAccountWithMerkleContext, PackedMerkleContext, }; use crate::{ - invoke::{processor::CompressedProof, sol_compression::SOL_POOL_PDA_SEED}, + processor::{processor::CompressedProof, sol_compression::SOL_POOL_PDA_SEED}, utils::{get_cpi_authority_pda, get_registered_program_pda}, InstructionDataInvoke, NewAddressParams, NewAddressParamsPacked, OutputCompressedAccountWithPackedContext, @@ -20,7 +20,7 @@ use crate::{ pub fn get_sol_pool_pda() -> Pubkey { Pubkey::find_program_address(&[SOL_POOL_PDA_SEED], &crate::ID).0 } - +// TODO: move to light-test-utils #[allow(clippy::too_many_arguments)] pub fn create_invoke_instruction( fee_payer: &Pubkey, diff --git a/programs/system/src/utils.rs b/programs/system/src/utils.rs index 86239aecbc..c5df94f011 100644 --- a/programs/system/src/utils.rs +++ b/programs/system/src/utils.rs @@ -1,6 +1,6 @@ use account_compression::utils::constants::CPI_AUTHORITY_PDA_SEED; use anchor_lang::solana_program::pubkey::Pubkey; - +// TODO: move file to sdk or test-utils pub fn get_registered_program_pda(program_id: &Pubkey) -> Pubkey { Pubkey::find_program_address( &[program_id.to_bytes().as_slice()], diff --git a/sdk-libs/program-test/src/test_batch_forester.rs b/sdk-libs/program-test/src/test_batch_forester.rs index 3d6014815a..1ca2e8baf8 100644 --- a/sdk-libs/program-test/src/test_batch_forester.rs +++ b/sdk-libs/program-test/src/test_batch_forester.rs @@ -493,7 +493,11 @@ pub async fn assert_registry_created_batched_state_merkle_tree AccountZeroCopy::::new(rpc, merkle_tree_pubkey).await; let mut queue = AccountZeroCopy::::new(rpc, output_queue_pubkey).await; - let mt_params = CreateTreeParams::from_state_ix_params(params, payer_pubkey.into()); + let mt_params = CreateTreeParams::from_state_ix_params( + params, + payer_pubkey.into(), + merkle_tree_pubkey.into(), + ); let ref_mt_account = BatchedMerkleTreeMetadata::new_state_tree(mt_params, output_queue_pubkey.into()); @@ -528,6 +532,7 @@ pub async fn assert_registry_created_batched_state_merkle_tree payer_pubkey.into(), total_rent, merkle_tree_pubkey.into(), + output_queue_pubkey.into(), ); let ref_output_queue_account = create_output_queue_account(queue_params); @@ -660,7 +665,11 @@ pub async fn assert_perform_state_mt_roll_over( .await .unwrap() .unwrap(); - let create_tree_params = CreateTreeParams::from_state_ix_params(params, owner.into()); + let create_tree_params = CreateTreeParams::from_state_ix_params( + params, + owner.into(), + old_state_merkle_tree_pubkey.into(), + ); let ref_mt_account = BatchedMerkleTreeMetadata::new_state_tree(create_tree_params, old_queue_pubkey.into()); @@ -677,9 +686,17 @@ pub async fn assert_perform_state_mt_roll_over( owner.into(), new_queue_account.lamports + new_state_merkle_tree.lamports + additional_bytes_rent, old_state_merkle_tree_pubkey.into(), + old_queue_pubkey.into(), ); let ref_queue_account = create_output_queue_account(queue_params); - let mut new_ref_queue_account = ref_queue_account; + let queue_params = CreateOutputQueueParams::from( + params, + owner.into(), + new_queue_account.lamports + new_state_merkle_tree.lamports + additional_bytes_rent, + old_state_merkle_tree_pubkey.into(), + new_queue_pubkey.into(), + ); + let mut new_ref_queue_account = create_output_queue_account(queue_params); new_ref_queue_account.metadata.associated_merkle_tree = new_state_merkle_tree_pubkey.into(); let mut new_ref_mt_account = ref_mt_account; new_ref_mt_account.metadata.associated_queue = new_queue_pubkey.into(); @@ -760,7 +777,11 @@ pub async fn assert_registry_created_batched_address_merkle_tree