From b0475cef87bcbaa8158fd12184a186b7d534e675 Mon Sep 17 00:00:00 2001 From: Michal Rostecki Date: Tue, 26 Mar 2024 10:48:23 +0100 Subject: [PATCH] refactor: Remove array fields from Merkle tree accounts One more step towards making Merkle trees dynamic! * Stop hard coding the array sizes in accounts. This way, we don't have to re-compile the program to . * Don't store the pointers in the account. Instead, just store primitive fields we are interested in and manually restore vectors. --- merkle-tree/concurrent/src/errors.rs | 18 +- merkle-tree/concurrent/src/lib.rs | 631 +++++++++--------- merkle-tree/concurrent/tests/tests.rs | 4 +- merkle-tree/indexed/src/lib.rs | 98 ++- pnpm-lock.yaml | 4 - .../src/instructions/append_leaves.rs | 6 +- .../initialize_address_merkle_tree.rs | 29 +- .../initialize_concurrent_merkle_tree.rs | 31 +- .../src/instructions/nullify_leaves.rs | 10 +- .../update_address_merkle_tree.rs | 7 +- .../account-compression/src/state/address.rs | 118 ++-- .../src/state/public_state_merkle_tree.rs | 144 ++-- xtask/src/type_sizes.rs | 115 ++-- 13 files changed, 542 insertions(+), 673 deletions(-) diff --git a/merkle-tree/concurrent/src/errors.rs b/merkle-tree/concurrent/src/errors.rs index cbb1a1e188..65588fcd0b 100644 --- a/merkle-tree/concurrent/src/errors.rs +++ b/merkle-tree/concurrent/src/errors.rs @@ -36,16 +36,8 @@ pub enum ConcurrentMerkleTreeError { "Found an empty node in the Merkle path buffer, where we expected all nodes to be filled" )] MerklePathsEmptyNode, - #[error("Invalid struct buffer size, expected {0}, got {1}")] - StructBufferSize(usize, usize), - #[error("Invalid filled subtrees buffer size, expected {0}, got {1}")] - FilledSubtreesBufferSize(usize, usize), - #[error("Invalid changelog buffer size, expected {0}, got {1}")] - ChangelogBufferSize(usize, usize), - #[error("Invalid root buffer size, expected {0}, got {1}")] - RootBufferSize(usize, usize), - #[error("Invalid canopy buffer size, expected {0}, got {1}")] - CanopyBufferSize(usize, usize), + #[error("Invalid buffer size, expected {0}, got {1}")] + BufferSize(usize, usize), #[error("Hasher error: {0}")] Hasher(#[from] HasherError), #[error("Bounded vector error: {0}")] @@ -73,11 +65,7 @@ impl From for u32 { ConcurrentMerkleTreeError::EmptyLeaves => 2013, ConcurrentMerkleTreeError::EmptyChangelogEntries => 2014, ConcurrentMerkleTreeError::MerklePathsEmptyNode => 2015, - ConcurrentMerkleTreeError::StructBufferSize(_, _) => 2016, - ConcurrentMerkleTreeError::FilledSubtreesBufferSize(_, _) => 2017, - ConcurrentMerkleTreeError::ChangelogBufferSize(_, _) => 2018, - ConcurrentMerkleTreeError::RootBufferSize(_, _) => 2019, - ConcurrentMerkleTreeError::CanopyBufferSize(_, _) => 2020, + ConcurrentMerkleTreeError::BufferSize(_, _) => 2016, ConcurrentMerkleTreeError::Hasher(e) => e.into(), ConcurrentMerkleTreeError::BoundedVec(e) => e.into(), } diff --git a/merkle-tree/concurrent/src/lib.rs b/merkle-tree/concurrent/src/lib.rs index 17f7a8e855..dfc5c908da 100644 --- a/merkle-tree/concurrent/src/lib.rs +++ b/merkle-tree/concurrent/src/lib.rs @@ -90,6 +90,52 @@ where (1 << (canopy_depth + 1)) - 2 } + /// Size of the struct **without** dynamically sized fields (`BoundedVec`, + /// `CyclicBoundedVec`). + pub fn non_dyn_fields_size() -> usize { + // height + 8 + // changelog_capacity + + 8 + // changelog_length + + 8 + // current_changelog_index + + 8 + // roots_capacity + + 8 + // roots_length + + 8 + // current_root_index + + 8 + // canopy_depth + + 8 + // next_index + + 8 + // sequence_number + + 8 + // rightmost_leaf + + 32 + } + + // TODO(vadorovsky): Make a macro for that. + pub fn size( + height: usize, + changelog_size: usize, + roots_size: usize, + canopy_depth: usize, + ) -> usize { + // non-dynamic fields + Self::non_dyn_fields_size() + // filled_subtrees + + mem::size_of::<[u8; 32]>() * height + // changelog + + mem::size_of::>() * changelog_size + // roots + + mem::size_of::<[u8; 32]>() * roots_size + // canopy + + mem::size_of::<[u8; 32]>() * Self::canopy_size(canopy_depth) + } + pub fn new( height: usize, changelog_size: usize, @@ -147,88 +193,53 @@ where /// Merkle tree is the caller's responsibility. /// /// It can be used correctly in async Rust. - pub unsafe fn copy_from_bytes( - bytes_struct: &[u8], - bytes_filled_subtrees: &[u8], - bytes_changelog: &[u8], - bytes_roots: &[u8], - ) -> Result { - let expected_bytes_struct_size = mem::size_of::(); - if bytes_struct.len() != expected_bytes_struct_size { - return Err(ConcurrentMerkleTreeError::StructBufferSize( - expected_bytes_struct_size, - bytes_struct.len(), + pub unsafe fn from_bytes_copy(bytes: &[u8]) -> Result { + if bytes.len() < Self::non_dyn_fields_size() { + return Err(ConcurrentMerkleTreeError::BufferSize( + Self::non_dyn_fields_size(), + bytes.len(), )); } - let struct_ref: *mut Self = bytes_struct.as_ptr() as _; - - let mut merkle_tree = unsafe { - Self { - height: (*struct_ref).height, - - changelog_capacity: (*struct_ref).changelog_capacity, - changelog_length: (*struct_ref).changelog_length, - current_changelog_index: (*struct_ref).current_changelog_index, - roots_capacity: (*struct_ref).roots_capacity, - roots_length: (*struct_ref).roots_length, - current_root_index: (*struct_ref).current_root_index, + let mut merkle_tree = Self::struct_from_bytes(&bytes[..Self::non_dyn_fields_size()])?; - canopy_depth: (*struct_ref).canopy_depth, - - next_index: (*struct_ref).next_index, - sequence_number: (*struct_ref).sequence_number, - rightmost_leaf: (*struct_ref).rightmost_leaf, - - filled_subtrees: BoundedVec::with_capacity((*struct_ref).height), - changelog: CyclicBoundedVec::with_capacity((*struct_ref).changelog_capacity), - roots: CyclicBoundedVec::with_capacity((*struct_ref).roots_capacity), - canopy: BoundedVec::with_capacity(Self::canopy_size((*struct_ref).canopy_depth)), - - _hasher: PhantomData, - } - }; - - let expected_bytes_filled_subtrees_size = mem::size_of::<[u8; 32]>() * (*struct_ref).height; - if bytes_filled_subtrees.len() != expected_bytes_filled_subtrees_size { - return Err(ConcurrentMerkleTreeError::FilledSubtreesBufferSize( - expected_bytes_filled_subtrees_size, - bytes_filled_subtrees.len(), + let expected_size = Self::size( + merkle_tree.height, + merkle_tree.changelog_capacity, + merkle_tree.roots_capacity, + merkle_tree.canopy_depth, + ); + if bytes.len() != expected_size { + return Err(ConcurrentMerkleTreeError::BufferSize( + expected_size, + bytes.len(), )); } - let filled_subtrees: &[[u8; 32]] = slice::from_raw_parts( - bytes_filled_subtrees.as_ptr() as *const _, - (*struct_ref).height, - ); + + let offset = Self::non_dyn_fields_size(); + let filled_subtrees_size = mem::size_of::<[u8; 32]>() * merkle_tree.height; + let filled_subtrees: &[[u8; 32]] = + slice::from_raw_parts(bytes.as_ptr().add(offset) as *const _, merkle_tree.height); for subtree in filled_subtrees.iter() { merkle_tree.filled_subtrees.push(*subtree)?; } - let expected_bytes_changelog_size = - mem::size_of::>() * (*struct_ref).changelog_capacity; - if bytes_changelog.len() != expected_bytes_changelog_size { - return Err(ConcurrentMerkleTreeError::ChangelogBufferSize( - expected_bytes_changelog_size, - bytes_changelog.len(), - )); - } + let offset = offset + filled_subtrees_size; + let changelog_size = + mem::size_of::>() * merkle_tree.changelog_capacity; let changelog: &[ChangelogEntry] = slice::from_raw_parts( - bytes_changelog.as_ptr() as *const _, - (*struct_ref).changelog_length, + bytes.as_ptr().add(offset) as *const _, + merkle_tree.changelog_length, ); for changelog_entry in changelog.iter() { merkle_tree.changelog.push(changelog_entry.clone())?; } - let expected_bytes_roots_size = mem::size_of::<[u8; 32]>() * (*struct_ref).roots_capacity; - if bytes_roots.len() != expected_bytes_roots_size { - return Err(ConcurrentMerkleTreeError::RootBufferSize( - expected_bytes_roots_size, - bytes_roots.len(), - )); - } - let roots: &[[u8; 32]] = - slice::from_raw_parts(bytes_roots.as_ptr() as *const _, (*struct_ref).roots_length); + let offset = offset + changelog_size; + let roots: &[[u8; 32]] = slice::from_raw_parts( + bytes.as_ptr().add(offset) as *const _, + merkle_tree.roots_length, + ); for root in roots.iter() { merkle_tree.roots.push(*root)?; } @@ -236,81 +247,103 @@ where Ok(merkle_tree) } - /// Casts a byte slice into `ConcurrentMerkleTree`. + /// Instantiantes a `ConcurrentMerkleTree` from the given slice of bytes. /// - /// # Safety - /// - /// This is highly unsafe. Ensuring the size and alignment of the byte - /// slice is the caller's responsibility. - pub unsafe fn struct_from_bytes( - bytes_struct: &'a [u8], - ) -> Result<&'a Self, ConcurrentMerkleTreeError> { - let expected_bytes_struct_size = mem::size_of::(); - if bytes_struct.len() != expected_bytes_struct_size { - return Err(ConcurrentMerkleTreeError::StructBufferSize( - expected_bytes_struct_size, - bytes_struct.len(), - )); - } - let tree: *const Self = bytes_struct.as_ptr() as _; - Ok(&*tree) - } - - /// Casts a byte slice into `ConcurrentMerkleTree`. + /// This method handles only primitive fields of `ConcurrentMerkleTree`. + /// Dynamic fields (of type `BoundedVec` and `CyclicBoundedeVec`) need to + /// be handled separately. /// /// # Safety /// /// This is highly unsafe. Ensuring the size and alignment of the byte /// slice is the caller's responsibility. - pub unsafe fn struct_from_bytes_mut( - bytes_struct: &[u8], - ) -> Result<&mut Self, ConcurrentMerkleTreeError> { - let expected_bytes_struct_size = mem::size_of::(); - if bytes_struct.len() != expected_bytes_struct_size { - return Err(ConcurrentMerkleTreeError::StructBufferSize( - expected_bytes_struct_size, - bytes_struct.len(), - )); - } - let tree: *mut Self = bytes_struct.as_ptr() as _; - Ok(&mut *tree) - } + unsafe fn struct_from_bytes(bytes_struct: &[u8]) -> Result { + // let expected_bytes_struct_size = Self::non_dyn_fields_size(); + // if bytes_struct.len() != expected_bytes_struct_size { + // return Err(ConcurrentMerkleTreeError::BufferSize( + // expected_bytes_struct_size, + // bytes_struct.len(), + // )); + // } + + let height = usize::from_ne_bytes( + bytes_struct[memoffset::span_of!(Self, height)] + .try_into() + .unwrap(), + ); + let changelog_capacity = usize::from_ne_bytes( + bytes_struct[memoffset::span_of!(Self, changelog_capacity)] + .try_into() + .unwrap(), + ); + let changelog_length = usize::from_ne_bytes( + bytes_struct[memoffset::span_of!(Self, changelog_length)] + .try_into() + .unwrap(), + ); + let current_changelog_index = usize::from_ne_bytes( + bytes_struct[memoffset::span_of!(Self, current_changelog_index)] + .try_into() + .unwrap(), + ); + let roots_capacity = usize::from_ne_bytes( + bytes_struct[memoffset::span_of!(Self, roots_capacity)] + .try_into() + .unwrap(), + ); + let roots_length = usize::from_ne_bytes( + bytes_struct[memoffset::span_of!(Self, roots_length)] + .try_into() + .unwrap(), + ); + let current_root_index = usize::from_ne_bytes( + bytes_struct[memoffset::span_of!(Self, current_root_index)] + .try_into() + .unwrap(), + ); + let canopy_depth = usize::from_ne_bytes( + bytes_struct[memoffset::span_of!(Self, canopy_depth)] + .try_into() + .unwrap(), + ); + let next_index = usize::from_ne_bytes( + bytes_struct[memoffset::span_of!(Self, next_index)] + .try_into() + .unwrap(), + ); + let sequence_number = usize::from_ne_bytes( + bytes_struct[memoffset::span_of!(Self, sequence_number)] + .try_into() + .unwrap(), + ); + let rightmost_leaf = bytes_struct[memoffset::span_of!(Self, rightmost_leaf)] + .try_into() + .unwrap(); + + Ok(Self { + height, + + changelog_capacity, + changelog_length, + current_changelog_index, + + roots_capacity, + roots_length, + current_root_index, + + canopy_depth, - /// Casts a byte slice into a `CyclicBoundedVec` containing MErkle tree - /// roots. - /// - /// # Purpose - /// - /// This method is meant to be used mostly in Solana programs, where memory - /// constraints are tight and we want to make sure no data is copied. - /// - /// # Safety - /// - /// This is highly unsafe. This method validates only sizes of slices. - /// Ensuring the alignment and that the slices provide actual data of the - /// Merkle tree is the caller's responsibility. - /// - /// Calling it in async context (or anywhere where the underlying data can - /// be moved in the memory) is certainly going to cause undefined behavior. - pub unsafe fn roots_from_bytes( - bytes_roots: &[u8], - next_index: usize, - length: usize, - capacity: usize, - ) -> Result, ConcurrentMerkleTreeError> { - let expected_bytes_roots_size = mem::size_of::<[u8; 32]>() * capacity; - if bytes_roots.len() != expected_bytes_roots_size { - return Err(ConcurrentMerkleTreeError::RootBufferSize( - expected_bytes_roots_size, - bytes_roots.len(), - )); - } - Ok(CyclicBoundedVec::from_raw_parts( - bytes_roots.as_ptr() as _, next_index, - length, - capacity, - )) + sequence_number, + rightmost_leaf, + + filled_subtrees: BoundedVec::with_capacity(height), + changelog: CyclicBoundedVec::with_capacity(changelog_capacity), + roots: CyclicBoundedVec::with_capacity(roots_capacity), + canopy: BoundedVec::with_capacity(canopy_depth), + + _hasher: PhantomData, + }) } /// Casts byte slices into `ConcurrentMerkleTree`. @@ -337,79 +370,92 @@ where /// /// Calling it in async context (or anywhere where the underlying data can /// be moved in the memory) is certainly going to cause undefined behavior. - pub unsafe fn from_bytes( - bytes_struct: &'a [u8], - bytes_filled_subtrees: &'a [u8], - bytes_changelog: &'a [u8], - bytes_roots: &'a [u8], - bytes_canopy: &'a [u8], - ) -> Result<&'a Self, ConcurrentMerkleTreeError> { - let expected_bytes_struct_size = mem::size_of::(); - if bytes_struct.len() != expected_bytes_struct_size { - return Err(ConcurrentMerkleTreeError::StructBufferSize( - expected_bytes_struct_size, - bytes_struct.len(), + pub unsafe fn from_bytes_zero_copy(bytes: &'a [u8]) -> Result { + if bytes.len() < Self::non_dyn_fields_size() { + return Err(ConcurrentMerkleTreeError::BufferSize( + Self::non_dyn_fields_size(), + bytes.len(), )); } - let tree = Self::struct_from_bytes_mut(bytes_struct)?; - // Restore the vectors correctly, by pointing them to the appropriate - // byte slices as underlying data. The most unsafe part of this code. - // Here be dragons! - let expected_bytes_filled_subtrees_size = mem::size_of::<[u8; 32]>() * tree.height; - if bytes_filled_subtrees.len() != expected_bytes_filled_subtrees_size { - return Err(ConcurrentMerkleTreeError::FilledSubtreesBufferSize( - expected_bytes_filled_subtrees_size, - bytes_filled_subtrees.len(), + let mut tree = Self::struct_from_bytes(bytes)?; + tree.fill_vectors(bytes)?; + + Ok(tree) + } + + pub unsafe fn from_bytes_zero_copy_mut( + bytes: &'a mut [u8], + ) -> Result { + if bytes.len() < Self::non_dyn_fields_size() { + return Err(ConcurrentMerkleTreeError::BufferSize( + Self::non_dyn_fields_size(), + bytes.len(), )); } - tree.filled_subtrees = BoundedVec::from_raw_parts( - bytes_filled_subtrees.as_ptr() as _, - tree.height, - tree.height, - ); - let expected_bytes_changelog_size = - mem::size_of::>() * tree.changelog_capacity; - if bytes_changelog.len() != expected_bytes_changelog_size { - return Err(ConcurrentMerkleTreeError::ChangelogBufferSize( - expected_bytes_changelog_size, - bytes_changelog.len(), + let mut tree = Self::struct_from_bytes(&bytes)?; + tree.fill_vectors_mut(bytes)?; + + Ok(tree) + } + + /// Assigns byte slices into vectors belonging to `ConcurrentMerkleTree`. + /// + /// # Safety + /// + /// This is highly unsafe. Ensuring the size and alignment of the byte + /// slices is the caller's responsibility. + #[allow(clippy::too_many_arguments)] + unsafe fn fill_vectors<'b>( + &'b mut self, + bytes: &'b [u8], + ) -> Result<(), ConcurrentMerkleTreeError> { + let expected_size = Self::size( + self.height, + self.changelog_capacity, + self.roots_capacity, + self.canopy_depth, + ); + if bytes.len() != expected_size { + return Err(ConcurrentMerkleTreeError::BufferSize( + expected_size, + bytes.len(), )); } - tree.changelog = CyclicBoundedVec::from_raw_parts( - bytes_changelog.as_ptr() as _, - tree.current_changelog_index + 1, - tree.changelog_length, - tree.changelog_capacity, + + // Restore the vectors correctly, by pointing them to the appropriate + // byte slices as underlying data. The most unsafe part of this code. + // Here be dragons! + let offset = Self::non_dyn_fields_size(); + let filled_subtrees_size = mem::size_of::<[u8; 32]>() * self.height; + self.filled_subtrees = + BoundedVec::from_raw_parts(bytes.as_ptr().add(offset) as _, self.height, self.height); + + let offset = offset + filled_subtrees_size; + let changelog_size = mem::size_of::>() * self.changelog_capacity; + self.changelog = CyclicBoundedVec::from_raw_parts( + bytes.as_ptr().add(offset) as _, + self.current_changelog_index + 1, + self.changelog_length, + self.changelog_capacity, ); - let expected_bytes_roots_size = mem::size_of::<[u8; 32]>() * tree.roots_capacity; - if bytes_roots.len() != expected_bytes_roots_size { - return Err(ConcurrentMerkleTreeError::RootBufferSize( - expected_bytes_roots_size, - bytes_roots.len(), - )); - } - tree.roots = Self::roots_from_bytes( - bytes_roots, - tree.current_root_index + 1, - tree.roots_length, - tree.roots_capacity, - )?; - - let canopy_size = Self::canopy_size(tree.canopy_depth); - let expected_canopy_size = mem::size_of::<[u8; 32]>() * canopy_size; - if bytes_canopy.len() != expected_canopy_size { - return Err(ConcurrentMerkleTreeError::CanopyBufferSize( - expected_canopy_size, - bytes_canopy.len(), - )); - } - tree.canopy = - BoundedVec::from_raw_parts(bytes_canopy.as_ptr() as _, canopy_size, canopy_size); + let offset = offset + changelog_size; + let roots_size = mem::size_of::<[u8; 32]>() * self.roots_capacity; + self.roots = CyclicBoundedVec::from_raw_parts( + bytes.as_ptr().add(offset) as _, + self.current_root_index + 1, + self.roots_length, + self.roots_capacity, + ); - Ok(tree) + let offset = offset + roots_size; + let canopy_size = Self::canopy_size(self.canopy_depth); + self.canopy = + BoundedVec::from_raw_parts(bytes.as_ptr().add(offset) as _, canopy_size, canopy_size); + + Ok(()) } /// Assigns byte slices into vectors belonging to `ConcurrentMerkleTree`. @@ -421,72 +467,57 @@ where #[allow(clippy::too_many_arguments)] unsafe fn fill_vectors_mut<'b>( &'b mut self, - bytes_filled_subtrees: &'b mut [u8], - bytes_changelog: &'b mut [u8], - bytes_roots: &'b mut [u8], - bytes_canopy: &'b mut [u8], - subtrees_length: usize, - changelog_next_index: usize, - changelog_length: usize, - roots_next_index: usize, - roots_length: usize, - canopy_length: usize, + bytes: &'b mut [u8], ) -> Result<(), ConcurrentMerkleTreeError> { + let expected_size = Self::size( + self.height, + self.changelog_capacity, + self.roots_capacity, + self.canopy_depth, + ); + if bytes.len() != expected_size { + return Err(ConcurrentMerkleTreeError::BufferSize( + expected_size, + bytes.len(), + )); + } + // Restore the vectors correctly, by pointing them to the appropriate // byte slices as underlying data. The most unsafe part of this code. // Here be dragons! - let expected_bytes_filled_subtrees_size = mem::size_of::<[u8; 32]>() * self.height; - if bytes_filled_subtrees.len() != expected_bytes_filled_subtrees_size { - return Err(ConcurrentMerkleTreeError::FilledSubtreesBufferSize( - expected_bytes_filled_subtrees_size, - bytes_filled_subtrees.len(), - )); - } + let offset = Self::non_dyn_fields_size(); + let filled_subtrees_size = mem::size_of::<[u8; 32]>() * self.height; self.filled_subtrees = BoundedVec::from_raw_parts( - bytes_filled_subtrees.as_mut_ptr() as _, - subtrees_length, + bytes.as_mut_ptr().add(offset) as _, + self.height, self.height, ); - let expected_bytes_changelog_size = - mem::size_of::>() * self.changelog_capacity; - if bytes_changelog.len() != expected_bytes_changelog_size { - return Err(ConcurrentMerkleTreeError::ChangelogBufferSize( - expected_bytes_changelog_size, - bytes_changelog.len(), - )); - } + let offset = offset + filled_subtrees_size; + let changelog_size = mem::size_of::>() * self.changelog_capacity; self.changelog = CyclicBoundedVec::from_raw_parts( - bytes_changelog.as_mut_ptr() as _, - changelog_next_index, - changelog_length, + bytes.as_mut_ptr().add(offset) as _, + self.current_changelog_index + 1, + self.changelog_length, self.changelog_capacity, ); - let expected_bytes_roots_size = mem::size_of::<[u8; 32]>() * self.roots_capacity; - if bytes_roots.len() != expected_bytes_roots_size { - return Err(ConcurrentMerkleTreeError::RootBufferSize( - expected_bytes_roots_size, - bytes_roots.len(), - )); - } + let offset = offset + changelog_size; + let roots_size = mem::size_of::<[u8; 32]>() * self.roots_capacity; self.roots = CyclicBoundedVec::from_raw_parts( - bytes_roots.as_mut_ptr() as _, - roots_next_index, - roots_length, + bytes.as_mut_ptr().add(offset) as _, + self.current_root_index + 1, + self.roots_length, self.roots_capacity, ); + let offset = offset + roots_size; let canopy_size = Self::canopy_size(self.canopy_depth); - let expected_canopy_size = mem::size_of::<[u8; 32]>() * canopy_size; - if bytes_canopy.len() != expected_canopy_size { - return Err(ConcurrentMerkleTreeError::CanopyBufferSize( - expected_canopy_size, - bytes_canopy.len(), - )); - } - self.canopy = - BoundedVec::from_raw_parts(bytes_canopy.as_mut_ptr() as _, canopy_length, canopy_size); + self.canopy = BoundedVec::from_raw_parts( + bytes.as_mut_ptr().add(offset) as _, + canopy_size, + canopy_size, + ); Ok(()) } @@ -516,90 +547,44 @@ where /// Calling it in async context (or anywhere where the underlying data can /// be moved in the memory) is certainly going to cause undefined behavior. #[allow(clippy::too_many_arguments)] - pub unsafe fn from_bytes_init( - bytes_struct: &'a mut [u8], - bytes_filled_subtrees: &'a mut [u8], - bytes_changelog: &'a mut [u8], - bytes_roots: &'a mut [u8], - bytes_canopy: &'a mut [u8], + pub unsafe fn from_bytes_zero_copy_init( + bytes: &'a mut [u8], height: usize, changelog_size: usize, roots_size: usize, canopy_depth: usize, - ) -> Result<&'a mut Self, ConcurrentMerkleTreeError> { - let tree = ConcurrentMerkleTree::struct_from_bytes_mut(bytes_struct)?; - - tree.height = height; - - tree.changelog_capacity = changelog_size; - tree.changelog_length = 0; - tree.current_changelog_index = 0; - - tree.roots_capacity = roots_size; - tree.roots_length = 0; - tree.current_root_index = 0; - - tree.canopy_depth = canopy_depth; - - tree.fill_vectors_mut( - bytes_filled_subtrees, - bytes_changelog, - bytes_roots, - bytes_canopy, - 0, - 0, - 0, - 0, - 0, - 0, - )?; - Ok(tree) - } + ) -> Result { + if bytes.len() < Self::non_dyn_fields_size() { + return Err(ConcurrentMerkleTreeError::BufferSize( + Self::non_dyn_fields_size(), + bytes.len(), + )); + } - /// Casts byte slices into `ConcurrentMerkleTree`. - /// - /// * `bytes_struct` is casted directly into a reference of - /// `ConcurrentMerkleTree`. - /// * `bytes_filled_subtrees` is used to create a `BoundedVec` directly. - /// That `BoundedVec` is assigned to the struct. - /// * `bytes_changelog` is used to create a `CyclicBoundedVec` directly. - /// That `CyclicBoundedVec` is assigned to the struct. - /// * `bytes_roots` is used to create a `CyclicBoundedVec` directly. That - /// `CyclicBoundedVec` is assigned to the struct. - /// - /// # Purpose - /// - /// This method is meant to be used mostly in Solana programs, where memory - /// constraints are tight and we want to make sure no data is copied. - /// - /// # Safety - /// - /// This is highly unsafe. This method validates only sizes of slices. - /// Ensuring the alignment and that the slices provide actual data of the - /// Merkle tree is the caller's responsibility. - /// - /// Calling it in async context (or anywhere where the underlying data can - /// be moved in the memory) is certainly going to cause undefined behavior. - pub unsafe fn from_bytes_mut<'b>( - bytes_struct: &'b mut [u8], - bytes_filled_subtrees: &'b mut [u8], - bytes_changelog: &'b mut [u8], - bytes_roots: &'b mut [u8], - bytes_canopy: &'b mut [u8], - ) -> Result<&'b mut Self, ConcurrentMerkleTreeError> { - let tree = ConcurrentMerkleTree::struct_from_bytes_mut(bytes_struct)?; - tree.fill_vectors_mut( - bytes_filled_subtrees, - bytes_changelog, - bytes_roots, - bytes_canopy, - tree.height, - tree.current_changelog_index + 1, - tree.changelog_length, - tree.current_root_index + 1, - tree.roots_length, - Self::canopy_size(tree.canopy_depth), - )?; + bytes[memoffset::span_of!(Self, height)].copy_from_slice(height.to_ne_bytes().as_slice()); + bytes[memoffset::span_of!(Self, changelog_capacity)] + .copy_from_slice(changelog_size.to_ne_bytes().as_slice()); + bytes[memoffset::span_of!(Self, changelog_length)] + .copy_from_slice(0_usize.to_ne_bytes().as_slice()); + bytes[memoffset::span_of!(Self, current_changelog_index)] + .copy_from_slice(0_usize.to_ne_bytes().as_slice()); + bytes[memoffset::span_of!(Self, roots_capacity)] + .copy_from_slice(roots_size.to_ne_bytes().as_slice()); + bytes[memoffset::span_of!(Self, roots_length)] + .copy_from_slice(0_usize.to_ne_bytes().as_slice()); + bytes[memoffset::span_of!(Self, current_root_index)] + .copy_from_slice(0_usize.to_ne_bytes().as_slice()); + bytes[memoffset::span_of!(Self, canopy_depth)] + .copy_from_slice(canopy_depth.to_ne_bytes().as_slice()); + bytes[memoffset::span_of!(Self, next_index)] + .copy_from_slice(0_usize.to_ne_bytes().as_slice()); + bytes[memoffset::span_of!(Self, sequence_number)] + .copy_from_slice(0_usize.to_ne_bytes().as_slice()); + bytes[memoffset::span_of!(Self, rightmost_leaf)].copy_from_slice(&[0u8; 32]); + + let mut tree = ConcurrentMerkleTree::struct_from_bytes(bytes)?; + + tree.fill_vectors(bytes)?; Ok(tree) } diff --git a/merkle-tree/concurrent/tests/tests.rs b/merkle-tree/concurrent/tests/tests.rs index ca07b99d7b..645d74b7fc 100644 --- a/merkle-tree/concurrent/tests/tests.rs +++ b/merkle-tree/concurrent/tests/tests.rs @@ -1009,7 +1009,7 @@ where ]; let merkle_tree = unsafe { - ConcurrentMerkleTree::::from_bytes_init( + ConcurrentMerkleTree::::from_bytes_zero_copy_init( bytes_struct.as_mut_slice(), bytes_filled_subtrees.as_mut_slice(), bytes_changelog.as_mut_slice(), @@ -1042,7 +1042,7 @@ where } let merkle_tree = unsafe { - ConcurrentMerkleTree::::from_bytes( + ConcurrentMerkleTree::::from_bytes_zero_copy( bytes_struct.as_slice(), bytes_filled_subtrees.as_slice(), bytes_changelog.as_slice(), diff --git a/merkle-tree/indexed/src/lib.rs b/merkle-tree/indexed/src/lib.rs index d9f0918056..2ee3e2cb51 100644 --- a/merkle-tree/indexed/src/lib.rs +++ b/merkle-tree/indexed/src/lib.rs @@ -58,28 +58,30 @@ where } } + pub unsafe fn from_bytes_copy(bytes: &[u8]) -> Result { + let merkle_tree = ConcurrentMerkleTree::::from_bytes_copy(bytes)?; + + Ok(Self { + merkle_tree, + _index: PhantomData, + _bigint: PhantomData, + }) + } + /// Casts byte slices into `ConcurrentMerkleTree`. /// /// # Safety /// /// This is highly unsafe. Ensuring the size and alignment of the byte /// slices is the caller's responsibility. - pub unsafe fn from_bytes<'b>( - bytes_struct: &'b [u8], - bytes_filled_subtrees: &'b [u8], - bytes_changelog: &'b [u8], - bytes_roots: &'b [u8], - bytes_canopy: &'b [u8], - ) -> Result<&'b Self, ConcurrentMerkleTreeError> { - let merkle_tree = ConcurrentMerkleTree::::from_bytes( - bytes_struct, - bytes_filled_subtrees, - bytes_changelog, - bytes_roots, - bytes_canopy, - )?; + pub unsafe fn from_bytes_zero_copy(bytes: &'a [u8]) -> Result { + let merkle_tree = ConcurrentMerkleTree::::from_bytes_zero_copy(bytes)?; - Ok(&*(merkle_tree as *const ConcurrentMerkleTree as *const Self)) + Ok(Self { + merkle_tree, + _index: PhantomData, + _bigint: PhantomData, + }) } /// Casts byte slices into `ConcurrentMerkleTree`. @@ -88,31 +90,16 @@ where /// /// This is highly unsafe. Ensuring the size and alignment of the byte /// slices is the caller's responsibility. - #[allow(clippy::too_many_arguments)] - pub unsafe fn from_bytes_init( - bytes_struct: &'a mut [u8], - bytes_filled_subtrees: &'a mut [u8], - bytes_changelog: &'a mut [u8], - bytes_roots: &'a mut [u8], - bytes_canopy: &'a mut [u8], - height: usize, - changelog_size: usize, - roots_size: usize, - canopy_depth: usize, - ) -> Result<&'a mut Self, ConcurrentMerkleTreeError> { - let merkle_tree = ConcurrentMerkleTree::::from_bytes_init( - bytes_struct, - bytes_filled_subtrees, - bytes_changelog, - bytes_roots, - bytes_canopy, - height, - changelog_size, - roots_size, - canopy_depth, - )?; + pub unsafe fn from_bytes_zero_copy_mut( + bytes: &'a mut [u8], + ) -> Result { + let merkle_tree = ConcurrentMerkleTree::::from_bytes_zero_copy_mut(bytes)?; - Ok(&mut *(merkle_tree as *mut ConcurrentMerkleTree as *mut Self)) + Ok(Self { + merkle_tree, + _index: PhantomData, + _bigint: PhantomData, + }) } /// Casts byte slices into `ConcurrentMerkleTree`. @@ -121,22 +108,27 @@ where /// /// This is highly unsafe. Ensuring the size and alignment of the byte /// slices is the caller's responsibility. - pub unsafe fn from_bytes_mut<'b>( - bytes_struct: &'b mut [u8], - bytes_filled_subtrees: &'b mut [u8], - bytes_changelog: &'b mut [u8], - bytes_roots: &'b mut [u8], - bytes_canopy: &'b mut [u8], - ) -> Result<&'b mut Self, ConcurrentMerkleTreeError> { - let merkle_tree = ConcurrentMerkleTree::::from_bytes_mut( - bytes_struct, - bytes_filled_subtrees, - bytes_changelog, - bytes_roots, - bytes_canopy, + #[allow(clippy::too_many_arguments)] + pub unsafe fn from_bytes_zero_copy_init( + bytes: &'a mut [u8], + height: usize, + changelog_size: usize, + roots_size: usize, + canopy_depth: usize, + ) -> Result { + let merkle_tree = ConcurrentMerkleTree::::from_bytes_zero_copy_init( + bytes, + height, + changelog_size, + roots_size, + canopy_depth, )?; - Ok(&mut *(merkle_tree as *mut ConcurrentMerkleTree as *mut Self)) + Ok(Self { + merkle_tree, + _index: PhantomData, + _bigint: PhantomData, + }) } pub fn init(&mut self) -> Result<(), IndexedMerkleTreeError> { diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 8350d07e10..01a53bbd85 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -380,10 +380,6 @@ importers: specifier: ^0.34.6 version: 0.34.6(@vitest/browser@0.34.6)(playwright@1.40.1) - hasher.rs/src/main/wasm: {} - - hasher.rs/src/main/wasm-simd: {} - js/compressed-token: dependencies: '@coral-xyz/anchor': diff --git a/programs/account-compression/src/instructions/append_leaves.rs b/programs/account-compression/src/instructions/append_leaves.rs index 0a34fdfe48..d12cf4ef24 100644 --- a/programs/account-compression/src/instructions/append_leaves.rs +++ b/programs/account-compression/src/instructions/append_leaves.rs @@ -6,6 +6,7 @@ use crate::{ emit_indexer_event, errors::AccountCompressionErrorCode, state::StateMerkleTreeAccount, + state_mt_from_bytes_zero_copy_mut, utils::check_registered_or_signer::{check_registered_or_signer, GroupAccess, GroupAccounts}, ChangelogEvent, ChangelogEventV1, Changelogs, RegisteredProgram, }; @@ -69,16 +70,15 @@ pub fn process_append_leaves_to_merkle_trees<'a, 'b, 'c: 'info, 'info>( let mut changelog_events = Vec::new(); for (mt, leaves) in merkle_tree_map.values() { let merkle_tree_account = AccountLoader::::try_from(mt).unwrap(); - let mut merkle_tree = merkle_tree_account.load_mut()?; + let mut merkle_tree = state_mt_from_bytes_zero_copy_mut(merkle_tree)?; check_registered_or_signer::(&ctx, &merkle_tree)?; msg!("inserting leaves: {:?}", leaves); let changelog_entries = merkle_tree - .load_merkle_tree_mut()? .append_batch(&leaves[..]) .map_err(ProgramError::from)?; - let sequence_number = u64::try_from(merkle_tree.load_merkle_tree()?.sequence_number) + let sequence_number = u64::try_from(merkle_tree.sequence_number) .map_err(|_| AccountCompressionErrorCode::IntegerOverflow)?; changelog_events.push(ChangelogEvent::V1(ChangelogEventV1::new( mt.key(), diff --git a/programs/account-compression/src/instructions/initialize_address_merkle_tree.rs b/programs/account-compression/src/instructions/initialize_address_merkle_tree.rs index dbc2896c68..09463ab9ce 100644 --- a/programs/account-compression/src/instructions/initialize_address_merkle_tree.rs +++ b/programs/account-compression/src/instructions/initialize_address_merkle_tree.rs @@ -1,6 +1,9 @@ pub use anchor_lang::prelude::*; -use crate::{errors::AccountCompressionErrorCode, state::AddressMerkleTreeAccount}; +use crate::{ + address_mt_from_bytes_zero_copy_init, errors::AccountCompressionErrorCode, + state::AddressMerkleTreeAccount, +}; #[derive(Accounts)] pub struct InitializeAddressMerkleTree<'info> { @@ -26,22 +29,14 @@ pub fn process_initialize_address_merkle_tree<'info>( address_merkle_tree.owner = owner; address_merkle_tree.delegate = delegate.unwrap_or(owner); - address_merkle_tree - .load_merkle_tree_init( - height - .try_into() - .map_err(|_| AccountCompressionErrorCode::IntegerOverflow)?, - changelog_size - .try_into() - .map_err(|_| AccountCompressionErrorCode::IntegerOverflow)?, - roots_size - .try_into() - .map_err(|_| AccountCompressionErrorCode::IntegerOverflow)?, - canopy_depth - .try_into() - .map_err(|_| AccountCompressionErrorCode::IntegerOverflow)?, - ) - .map_err(ProgramError::from)?; + address_mt_from_bytes_zero_copy_init( + ctx.accounts.merkle_tree, + height as usize, + changelog_size as usize, + roots_size as usize, + canopy_depth as usize, + ) + .map_err(ProgramError::from)?; Ok(()) } diff --git a/programs/account-compression/src/instructions/initialize_concurrent_merkle_tree.rs b/programs/account-compression/src/instructions/initialize_concurrent_merkle_tree.rs index 9bc8e752f5..47ad7d749e 100644 --- a/programs/account-compression/src/instructions/initialize_concurrent_merkle_tree.rs +++ b/programs/account-compression/src/instructions/initialize_concurrent_merkle_tree.rs @@ -1,6 +1,11 @@ +use std::borrow::BorrowMut; + use anchor_lang::prelude::*; -use crate::{errors::AccountCompressionErrorCode, state::StateMerkleTreeAccount}; +use crate::{ + errors::AccountCompressionErrorCode, state::StateMerkleTreeAccount, + state_mt_from_bytes_zero_copy_init, +}; #[derive(Accounts)] pub struct InitializeStateMerkleTree<'info> { @@ -23,6 +28,7 @@ pub fn process_initialize_state_merkle_tree( canopy_depth: u64, associated_queue: Option, ) -> Result<()> { + let foo = ctx.accounts.merkle_tree.to_account_info(); // Initialize new Merkle trees. let mut merkle_tree = ctx.accounts.merkle_tree.load_init()?; @@ -35,22 +41,13 @@ pub fn process_initialize_state_merkle_tree( // we could create a group which has ownership over a set of Merkle trees same registration process as for pool program // this needs to be the delegate and or owner // if part of a group we can apply the same registration model as for the pool program - merkle_tree - .load_merkle_tree_init( - height - .try_into() - .map_err(|_| AccountCompressionErrorCode::IntegerOverflow)?, - changelog_size - .try_into() - .map_err(|_| AccountCompressionErrorCode::IntegerOverflow)?, - roots_size - .try_into() - .map_err(|_| AccountCompressionErrorCode::IntegerOverflow)?, - canopy_depth - .try_into() - .map_err(|_| AccountCompressionErrorCode::IntegerOverflow)?, - ) - .map_err(ProgramError::from)?; + state_mt_from_bytes_zero_copy_init( + ctx.accounts.merkle_tree, + height as usize, + changelog_size as usize, + roots_size as usize, + canopy_depth as usize, + )?; Ok(()) } diff --git a/programs/account-compression/src/instructions/nullify_leaves.rs b/programs/account-compression/src/instructions/nullify_leaves.rs index caba761a31..8819a645c7 100644 --- a/programs/account-compression/src/instructions/nullify_leaves.rs +++ b/programs/account-compression/src/instructions/nullify_leaves.rs @@ -3,8 +3,9 @@ use light_bounded_vec::BoundedVec; use light_hasher::zero_bytes::poseidon::ZERO_BYTES; use crate::{ - emit_indexer_event, errors::AccountCompressionErrorCode, state::StateMerkleTreeAccount, - ChangelogEvent, ChangelogEventV1, Changelogs, IndexedArrayAccount, RegisteredProgram, + address_mt_from_bytes_zero_copy_mut, emit_indexer_event, errors::AccountCompressionErrorCode, + state::StateMerkleTreeAccount, state_mt_from_bytes_zero_copy_mut, ChangelogEvent, + ChangelogEventV1, Changelogs, IndexedArrayAccount, RegisteredProgram, }; #[derive(Accounts)] @@ -96,7 +97,8 @@ fn insert_nullifier( ); return Err(AccountCompressionErrorCode::InvalidIndexedArray.into()); } - let loaded_merkle_tree = merkle_tree_account.load_merkle_tree_mut()?; + + let mut loaded_merkle_tree = state_mt_from_bytes_zero_copy_mut(ctx.accounts.merkle_tree)?; let allowed_proof_size = loaded_merkle_tree.height - loaded_merkle_tree.canopy_depth; if proofs[0].len() != allowed_proof_size { msg!( @@ -120,7 +122,7 @@ fn insert_nullifier( &mut bounded_vec, ) .map_err(ProgramError::from)?; - let sequence_number = u64::try_from(loaded_merkle_tree.sequence_number) + let sequence_number = u64::try_from(merkle_tree.sequence_number) .map_err(|_| AccountCompressionErrorCode::IntegerOverflow)?; // TODO: replace with root history sequence number array_account.indexed_array[leaves_queue_indices[0] as usize] diff --git a/programs/account-compression/src/instructions/update_address_merkle_tree.rs b/programs/account-compression/src/instructions/update_address_merkle_tree.rs index dc8c22d7bf..f32394c5e3 100644 --- a/programs/account-compression/src/instructions/update_address_merkle_tree.rs +++ b/programs/account-compression/src/instructions/update_address_merkle_tree.rs @@ -6,6 +6,7 @@ use light_indexed_merkle_tree::array::{IndexingElement, RawIndexingElement}; use light_utils::bigint::be_bytes_to_bigint; use crate::{ + address_mt_from_bytes_zero_copy_mut, errors::AccountCompressionErrorCode, state::address::{AddressMerkleTreeAccount, AddressQueueAccount}, }; @@ -43,7 +44,8 @@ pub fn process_update_address_merkle_tree<'info>( ) -> Result<()> { let mut address_queue = ctx.accounts.queue.load_mut()?; let address_queue = address_queue_from_bytes_mut(&mut address_queue.queue); - let mut merkle_tree = ctx.accounts.merkle_tree.load_mut()?; + // let mut merkle_tree = ctx.accounts.merkle_tree.load_mut()?; + let mut merkle_tree = address_mt_from_bytes_zero_copy_mut(ctx.accounts.merkle_tree)?; // Remove the address from the queue. let address = address_queue @@ -53,7 +55,7 @@ pub fn process_update_address_merkle_tree<'info>( // Update the address with ranges adjusted to the Merkle tree state. let address: IndexingElement = IndexingElement { - index: merkle_tree.load_merkle_tree()?.merkle_tree.next_index, + index: merkle_tree.merkle_tree.next_index, value: address.value, next_index: address_next_index, }; @@ -69,7 +71,6 @@ pub fn process_update_address_merkle_tree<'info>( // Update the Merkle tree. merkle_tree - .load_merkle_tree_mut()? .update( usize::from(changelog_index), address, diff --git a/programs/account-compression/src/state/address.rs b/programs/account-compression/src/state/address.rs index 830a33cd7e..0fea73f077 100644 --- a/programs/account-compression/src/state/address.rs +++ b/programs/account-compression/src/state/address.rs @@ -1,3 +1,5 @@ +use std::{cell::RefMut, mem}; + use aligned_sized::aligned_sized; use anchor_lang::prelude::*; use ark_ff::BigInteger256; @@ -14,6 +16,8 @@ pub struct AddressQueueAccount { pub queue: [u8; 112008], } +pub type AddressMerkleTree<'a> = IndexedMerkleTree22<'a, Poseidon, usize, BigInteger256>; + #[account(zero_copy)] #[aligned_sized(anchor)] #[derive(BorshDeserialize, BorshSerialize, Debug)] @@ -26,81 +30,51 @@ pub struct AddressMerkleTreeAccount { pub owner: Pubkey, /// Delegate of the Merkle tree. This will be used for program owned Merkle trees. pub delegate: Pubkey, - - pub merkle_tree_struct: [u8; 224], - pub merkle_tree_filled_subtrees: [u8; 704], - pub merkle_tree_changelog: [u8; 2083200], - pub merkle_tree_roots: [u8; 89600], - pub merkle_tree_canopy: [u8; 0], } -impl AddressMerkleTreeAccount { - pub fn load_merkle_tree(&self) -> Result<&IndexedMerkleTree22> { - let tree = unsafe { - IndexedMerkleTree22::from_bytes( - &self.merkle_tree_struct, - &self.merkle_tree_filled_subtrees, - &self.merkle_tree_changelog, - &self.merkle_tree_roots, - &self.merkle_tree_canopy, - ) - .map_err(ProgramError::from)? - }; - Ok(tree) - } +pub unsafe fn address_mt_from_bytes_copy<'a>( + data: RefMut<'_, &'a mut [u8]>, +) -> Result> { + let data = &data[8 + mem::size_of::()..]; + let tree = AddressMerkleTree::from_bytes_copy(data).map_err(ProgramError::from)?; + Ok(tree) +} - pub fn load_merkle_tree_init( - &mut self, - height: usize, - changelog_size: usize, - roots_size: usize, - canopy_depth: usize, - ) -> Result<&mut IndexedMerkleTree22> { - let tree = unsafe { - IndexedMerkleTree22::::from_bytes_init( - &mut self.merkle_tree_struct, - &mut self.merkle_tree_filled_subtrees, - &mut self.merkle_tree_changelog, - &mut self.merkle_tree_roots, - &mut self.merkle_tree_canopy, - height, - changelog_size, - roots_size, - canopy_depth, - ) - .map_err(ProgramError::from)? - }; - tree.init().map_err(ProgramError::from)?; - Ok(tree) - } +pub fn address_mt_from_bytes_zero_copy<'a>( + data: RefMut<'_, &'a mut [u8]>, +) -> Result> { + let data = &data[8 + mem::size_of::()..]; + let tree = + unsafe { AddressMerkleTree::from_bytes_zero_copy(data).map_err(ProgramError::from)? }; + Ok(tree) +} - pub fn load_merkle_tree_mut( - &mut self, - ) -> Result<&mut IndexedMerkleTree22> { - let tree = unsafe { - IndexedMerkleTree22::from_bytes_mut( - &mut self.merkle_tree_struct, - &mut self.merkle_tree_filled_subtrees, - &mut self.merkle_tree_changelog, - &mut self.merkle_tree_roots, - &mut self.merkle_tree_canopy, - ) - .map_err(ProgramError::from)? - }; - Ok(tree) - } +pub fn address_mt_from_bytes_zero_copy_mut<'a>( + data: RefMut<'_, &'a mut [u8]>, +) -> Result> { + let data = &data[8 + mem::size_of::()..]; + let tree = + unsafe { AddressMerkleTree::from_bytes_zero_copy_mut(data).map_err(ProgramError::from)? }; + Ok(tree) +} - pub fn load_roots(&self) -> Result> { - let tree = self.load_merkle_tree()?; - let roots = unsafe { - ConcurrentMerkleTree22::::roots_from_bytes( - &self.merkle_tree_roots, - tree.merkle_tree.current_root_index + 1, - tree.merkle_tree.roots_length, - tree.merkle_tree.roots_capacity, - ) - .map_err(ProgramError::from)? - }; - Ok(roots) - } +pub fn address_mt_from_bytes_zero_copy_init<'info>( + data: RefMut<'_, &'a mut [u8]>, + height: usize, + changelog_size: usize, + roots_size: usize, + canopy_depth: usize, +) -> Result> { + let data = &data[8 + mem::size_of::()..]; + let tree = unsafe { + AddressMerkleTree::from_bytes_zero_copy_init( + data, + height, + changelog_size, + roots_size, + canopy_depth, + ) + .map_err(ProgramError::from)? + }; + Ok(tree) } diff --git a/programs/account-compression/src/state/public_state_merkle_tree.rs b/programs/account-compression/src/state/public_state_merkle_tree.rs index 5cee6581b9..2504339566 100644 --- a/programs/account-compression/src/state/public_state_merkle_tree.rs +++ b/programs/account-compression/src/state/public_state_merkle_tree.rs @@ -1,3 +1,5 @@ +use std::{borrow::BorrowMut, mem}; + use aligned_sized::aligned_sized; use anchor_lang::prelude::*; use light_bounded_vec::CyclicBoundedVec; @@ -20,106 +22,53 @@ pub struct StateMerkleTreeAccount { /// Delegate of the Merkle tree. This will be used for program owned Merkle trees. pub delegate: Pubkey, pub associated_queue: Pubkey, - - /// Merkle tree for the transaction state. - pub state_merkle_tree_struct: [u8; 256], - pub state_merkle_tree_filled_subtrees: [u8; 832], - pub state_merkle_tree_changelog: [u8; 1220800], - pub state_merkle_tree_roots: [u8; 76800], - pub state_merkle_tree_canopy: [u8; 65472], } -impl StateMerkleTreeAccount { - pub fn copy_merkle_tree(&self) -> Result> { - let tree = unsafe { - ConcurrentMerkleTree26::copy_from_bytes( - &self.state_merkle_tree_struct, - &self.state_merkle_tree_filled_subtrees, - &self.state_merkle_tree_changelog, - &self.state_merkle_tree_roots, - ) - .map_err(ProgramError::from)? - }; - Ok(tree) - } - - pub fn load_merkle_tree(&self) -> Result<&ConcurrentMerkleTree26> { - let tree = unsafe { - ConcurrentMerkleTree26::::from_bytes( - &self.state_merkle_tree_struct, - &self.state_merkle_tree_filled_subtrees, - &self.state_merkle_tree_changelog, - &self.state_merkle_tree_roots, - &self.state_merkle_tree_canopy, - ) - .map_err(ProgramError::from)? - }; - Ok(tree) - } - - pub fn load_merkle_tree_init( - &mut self, - height: usize, - changelog_size: usize, - roots_size: usize, - canopy_depth: usize, - ) -> Result<&mut ConcurrentMerkleTree26> { - let tree = unsafe { - ConcurrentMerkleTree26::::from_bytes_init( - &mut self.state_merkle_tree_struct, - &mut self.state_merkle_tree_filled_subtrees, - &mut self.state_merkle_tree_changelog, - &mut self.state_merkle_tree_roots, - &mut self.state_merkle_tree_canopy, - height, - changelog_size, - roots_size, - canopy_depth, - ) - .map_err(ProgramError::from)? - }; - tree.init().map_err(ProgramError::from)?; - Ok(tree) - } +pub unsafe fn state_mt_from_bytes_copy(account: AccountInfo) -> Result { + let data = &account.try_borrow_mut_data()?[8 + mem::size_of::()..]; + let tree = StateMerkleTree::from_bytes_copy(data).map_err(ProgramError::from)?; + Ok(tree) +} - pub fn load_merkle_tree_mut(&mut self) -> Result<&mut ConcurrentMerkleTree26> { - let tree = unsafe { - ConcurrentMerkleTree26::::from_bytes_mut( - &mut self.state_merkle_tree_struct, - &mut self.state_merkle_tree_filled_subtrees, - &mut self.state_merkle_tree_changelog, - &mut self.state_merkle_tree_roots, - &mut self.state_merkle_tree_canopy, - ) - .map_err(ProgramError::from)? - }; - Ok(tree) - } +pub fn state_mt_from_bytes_zero_copy<'info>( + account: AccountLoader<'info, StateMerkleTreeAccount>, +) -> Result { + let data = &account.to_account_info().try_borrow_mut_data()? + [8 + mem::size_of::()..]; + let tree = unsafe { StateMerkleTree::from_bytes_zero_copy(data).map_err(ProgramError::from)? }; + Ok(tree) +} - pub fn load_next_index(&self) -> Result { - let tree = unsafe { - ConcurrentMerkleTree26::::struct_from_bytes(&self.state_merkle_tree_struct) - .map_err(ProgramError::from)? - }; - Ok(tree.next_index) - } +pub fn state_mt_from_bytes_zero_copy_mut<'info>( + account: AccountLoader<'info, StateMerkleTreeAccount>, +) -> Result { + let data = &mut account.to_account_info().try_borrow_mut_data()? + [8 + mem::size_of::()..]; + let tree = + unsafe { StateMerkleTree::from_bytes_zero_copy_mut(data).map_err(ProgramError::from)? }; + Ok(tree) +} - pub fn load_roots(&self) -> Result> { - let tree = unsafe { - ConcurrentMerkleTree26::::struct_from_bytes(&self.state_merkle_tree_struct) - .map_err(ProgramError::from)? - }; - let roots = unsafe { - ConcurrentMerkleTree26::::roots_from_bytes( - &self.state_merkle_tree_roots, - tree.current_root_index + 1, - tree.roots_length, - tree.roots_capacity, - ) - .map_err(ProgramError::from)? - }; - Ok(roots) - } +pub fn state_mt_from_bytes_zero_copy_init<'info>( + account: AccountLoader<'info, StateMerkleTreeAccount>, + height: usize, + changelog_size: usize, + roots_size: usize, + canopy_depth: usize, +) -> Result { + let data = &mut account.to_account_info().try_borrow_mut_data()? + [8 + mem::size_of::()..]; + let tree = unsafe { + StateMerkleTree::from_bytes_zero_copy_init( + data, + height, + changelog_size, + roots_size, + canopy_depth, + ) + .map_err(ProgramError::from)? + }; + Ok(tree) } #[cfg(test)] @@ -138,11 +87,6 @@ mod test { owner: Pubkey::new_from_array([2u8; 32]), delegate: Pubkey::new_from_array([3u8; 32]), associated_queue: Pubkey::new_from_array([4u8; 32]), - state_merkle_tree_struct: [0u8; 256], - state_merkle_tree_filled_subtrees: [0u8; 832], - state_merkle_tree_changelog: [0u8; 1220800], - state_merkle_tree_roots: [0u8; 76800], - state_merkle_tree_canopy: [0u8; 65472], }; let merkle_tree = account diff --git a/xtask/src/type_sizes.rs b/xtask/src/type_sizes.rs index e1c319d754..dd612b89a9 100644 --- a/xtask/src/type_sizes.rs +++ b/xtask/src/type_sizes.rs @@ -27,66 +27,61 @@ struct Type { pub fn type_sizes() -> anyhow::Result<()> { let accounts = vec![ - Type { - name: "StateMerkleTreeAccount (with discriminator)".to_owned(), - space: mem::size_of::() + 8, - }, - Type { - name: "StateMerkleTree".to_owned(), - space: mem::size_of::(), - }, - Type { - name: "StateMerkleTree->filled_subtrees".to_owned(), - space: mem::size_of::<[u8; 32]>() * STATE_MERKLE_TREE_HEIGHT, - }, - Type { - name: "StateMerkleTree->changelog".to_owned(), - space: mem::size_of::() * STATE_MERKLE_TREE_CHANGELOG, - }, - Type { - name: "StateMerkleTree->roots".to_owned(), - space: mem::size_of::<[u8; 32]>() * STATE_MERKLE_TREE_ROOTS, - }, - Type { - name: "StateMerkleTree->canopy".to_owned(), - space: mem::size_of::<[u8; 32]>() - * ConcurrentMerkleTree26::::canopy_size(STATE_MERKLE_TREE_CANOPY_DEPTH), - }, - Type { - name: "IndexedArray".to_owned(), - space: mem::size_of::< - IndexingArray, - >(), - }, - Type { - name: "AddressQueue".to_owned(), - space: mem::size_of::(), - }, - Type { - name: "AddressMerkleTreeAccount (with discriminator)".to_owned(), - space: mem::size_of::() + 8, - }, - Type { - name: "AddressMerkleTree".to_owned(), - space: mem::size_of::(), - }, - Type { - name: "AddressMerkleTree->filled_subtrees".to_owned(), - space: mem::size_of::<[u8; 32]>() * ADDRESS_MERKLE_TREE_HEIGHT, - }, - Type { - name: "AddressMerkleTree->changelog".to_owned(), - space: mem::size_of::() * ADDRESS_MERKLE_TREE_CHANGELOG, - }, - Type { - name: "AddressMerkleTree->roots".to_owned(), - space: mem::size_of::<[u8; 32]>() * ADDRESS_MERKLE_TREE_ROOTS, - }, - Type { - name: "AddressMerkleTree->canopy".to_owned(), - space: mem::size_of::<[u8; 32]>() * ADDRESS_MERKLE_TREE_CANOPY_DEPTH, - }, - ]; + Type { + name: "StateMerkleTreeAccount (with discriminator)".to_owned(), + space: mem::size_of::() + 8, + }, + Type { + name: "StateMerkleTree".to_owned(), + space: mem::size_of::(), + }, + Type { + name: "StateMerkleTree->filled_subtrees".to_owned(), + space: mem::size_of::<[u8; 32]>() * STATE_MERKLE_TREE_HEIGHT, + }, + Type { + name: "StateMerkleTree->changelog".to_owned(), + space: mem::size_of::() * STATE_MERKLE_TREE_CHANGELOG, + }, + Type { + name: "StateMerkleTree->roots".to_owned(), + space: mem::size_of::<[u8; 32]>() * STATE_MERKLE_TREE_ROOTS, + }, + Type { + name: "StateMerkleTree->canopy".to_owned(), + space: mem::size_of::<[u8; 32]>() + * ConcurrentMerkleTree26::::canopy_size(STATE_MERKLE_TREE_CANOPY_DEPTH), + }, + Type { + name: "IndexedArray".to_owned(), + space: mem::size_of::< + IndexingArray, + >(), + }, + Type { + name: "AddressQueue".to_owned(), + space: mem::size_of::(), + }, + Type { + name: "AddressMerkleTreeAccount (with discriminator and tree)".to_owned(), + space: mem::size_of::() + 8 + + + mem::size_of::() + + mem::size_of::<[u8; 32]>() * ADDRESS_MERKLE_TREE_HEIGHT + + mem::size_of::() * ADDRESS_MERKLE_TREE_CHANGELOG + + mem::size_of::<[u8; 32]>() * ADDRESS_MERKLE_TREE_ROOTS, + + mem::size_of::<[u8; 32]>() * ADDRESS_MERKLE_TREE_CANOPY_DEPTH, + , + }, + Type { + name: "AddressMerkleTree".to_owned(), + space: mem::size_of::() + + mem::size_of::<[u8; 32]>() * ADDRESS_MERKLE_TREE_HEIGHT + + mem::size_of::() * ADDRESS_MERKLE_TREE_CHANGELOG + + mem::size_of::<[u8; 32]>() * ADDRESS_MERKLE_TREE_ROOTS, + + mem::size_of::<[u8; 32]>() * ADDRESS_MERKLE_TREE_CANOPY_DEPTH, + }, + ]; let table = Table::new(accounts); println!("{table}");