Skip to content

Commit

Permalink
chore: batched handoff cleanup 2 (#1480)
Browse files Browse the repository at this point in the history
* chore: update docs append_leaves

* chore: update comments on insert_into_queues, QueueType::Input -> BatchedInput, QueueType::Output -> BatchedOutput, QueueType::Address -> BatchedAddress

* chore: remove usage of get_metadata and get_metadata_mut

* chore: add comments batch_append

* chore: add comments batch_nullify

* chore: added comments for batch_update_address_tree

* chore: rename rollover batch tree -> batched, add comments to rollover_batched_address_merkle_tree

* chore: add comments to process_rollover_batched_state_merkle_tree

* chore: add comments to migrate state, add comments to init batched address Merkle tree

* chore: registry program cleanup

* chore: rename add_state_queue_bundle_v2 -> add_nullifier_queue_bundle_v2
  • Loading branch information
ananas-block authored Jan 14, 2025
1 parent 63b0da7 commit 55a5220
Show file tree
Hide file tree
Showing 36 changed files with 736 additions and 585 deletions.
6 changes: 3 additions & 3 deletions forester-utils/src/address_merkle_tree_config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -174,9 +174,9 @@ pub async fn state_tree_ready_for_rollover<R: RpcConnection>(
.unwrap();

(
tree_meta_data.get_metadata().next_index as usize,
tree_meta_data.get_metadata().metadata,
tree_meta_data.get_metadata().height,
tree_meta_data.next_index as usize,
tree_meta_data.metadata,
tree_meta_data.height,
)
}
_ => panic!("Invalid discriminator"),
Expand Down
19 changes: 7 additions & 12 deletions forester-utils/src/instructions.rs
Original file line number Diff line number Diff line change
Expand Up @@ -65,15 +65,12 @@ where
.unwrap();

let old_root_index = merkle_tree.root_history.last_index();
let full_batch_index = merkle_tree
.get_metadata()
.queue_metadata
.next_full_batch_index;
let full_batch_index = merkle_tree.queue_metadata.next_full_batch_index;
let batch = &merkle_tree.batches[full_batch_index as usize];
let zkp_batch_index = batch.get_num_inserted_zkps();
let leaves_hashchain =
merkle_tree.hashchain_store[full_batch_index as usize][zkp_batch_index as usize];
let start_index = merkle_tree.get_metadata().next_index;
let start_index = merkle_tree.next_index;
let current_root = *merkle_tree.root_history.last().unwrap();
let batch_size = batch.zkp_batch_size as usize;

Expand Down Expand Up @@ -227,7 +224,7 @@ pub async fn create_append_batch_ix_data<R: RpcConnection, I: Indexer<R>>(
)
.unwrap();
(
merkle_tree.get_metadata().next_index,
merkle_tree.next_index,
*merkle_tree.root_history.last().unwrap(),
)
};
Expand All @@ -239,9 +236,8 @@ pub async fn create_append_batch_ix_data<R: RpcConnection, I: Indexer<R>>(
)
.unwrap();

let queue_metadata = output_queue.get_metadata();
let full_batch_index = queue_metadata.batch_metadata.next_full_batch_index;
let zkp_batch_size = queue_metadata.batch_metadata.zkp_batch_size;
let full_batch_index = output_queue.batch_metadata.next_full_batch_index;
let zkp_batch_size = output_queue.batch_metadata.zkp_batch_size;

let num_inserted_zkps =
output_queue.batches[full_batch_index as usize].get_num_inserted_zkps();
Expand Down Expand Up @@ -352,9 +348,8 @@ pub async fn create_nullify_batch_ix_data<R: RpcConnection, I: Indexer<R>>(
let merkle_tree =
BatchedMerkleTreeAccount::state_tree_from_bytes_mut(account.data.as_mut_slice())
.unwrap();
let metadata = merkle_tree.get_metadata();
let batch_idx = metadata.queue_metadata.next_full_batch_index as usize;
let zkp_size = metadata.queue_metadata.zkp_batch_size;
let batch_idx = merkle_tree.queue_metadata.next_full_batch_index as usize;
let zkp_size = merkle_tree.queue_metadata.zkp_batch_size;
let batch = &merkle_tree.batches[batch_idx];
let zkp_idx = batch.get_num_inserted_zkps();
let hashchain = merkle_tree.hashchain_store[batch_idx][zkp_idx as usize];
Expand Down
15 changes: 6 additions & 9 deletions forester/src/batch_processor/common.rs
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,7 @@ impl<R: RpcConnection, I: Indexer<R> + IndexerType<R>> BatchProcessor<R, I> {
Err(_) => return 0.0,
};

let batch_index = tree.get_metadata().queue_metadata.next_full_batch_index;
let batch_index = tree.queue_metadata.next_full_batch_index;
match tree.batches.get(batch_index as usize) {
Some(batch) => Self::calculate_completion(batch),
None => 0.0,
Expand All @@ -138,7 +138,7 @@ impl<R: RpcConnection, I: Indexer<R> + IndexerType<R>> BatchProcessor<R, I> {
Err(_) => return 0.0,
};

let batch_index = queue.get_metadata().batch_metadata.next_full_batch_index;
let batch_index = queue.batch_metadata.next_full_batch_index;
match queue.batches.get(batch_index as usize) {
Some(batch) => Self::calculate_completion(batch),
None => 0.0,
Expand Down Expand Up @@ -178,11 +178,8 @@ impl<R: RpcConnection, I: Indexer<R> + IndexerType<R>> BatchProcessor<R, I> {
)
.map_err(|e| BatchProcessError::QueueParsing(e.to_string()))?;

let batch_index = output_queue
.get_metadata()
.batch_metadata
.next_full_batch_index;
let zkp_batch_size = output_queue.get_metadata().batch_metadata.zkp_batch_size;
let batch_index = output_queue.batch_metadata.next_full_batch_index;
let zkp_batch_size = output_queue.batch_metadata.zkp_batch_size;

(
output_queue.batches[batch_index as usize].get_num_inserted_zkps(),
Expand All @@ -209,7 +206,7 @@ impl<R: RpcConnection, I: Indexer<R> + IndexerType<R>> BatchProcessor<R, I> {
};

if let Ok(tree) = merkle_tree {
let batch_index = tree.get_metadata().queue_metadata.next_full_batch_index;
let batch_index = tree.queue_metadata.next_full_batch_index;
let full_batch = tree.batches.get(batch_index as usize).unwrap();

full_batch.get_state() != BatchState::Inserted
Expand All @@ -233,7 +230,7 @@ impl<R: RpcConnection, I: Indexer<R> + IndexerType<R>> BatchProcessor<R, I> {
};

if let Ok(queue) = output_queue {
let batch_index = queue.get_metadata().batch_metadata.next_full_batch_index;
let batch_index = queue.batch_metadata.next_full_batch_index;
let full_batch = queue.batches.get(batch_index as usize).unwrap();

full_batch.get_state() != BatchState::Inserted
Expand Down
5 changes: 1 addition & 4 deletions forester/src/batch_processor/state.rs
Original file line number Diff line number Diff line change
Expand Up @@ -111,8 +111,5 @@ async fn get_batch_index<R: RpcConnection, I: Indexer<R>>(
account.data.as_mut_slice(),
).map_err(|e| BatchProcessError::MerkleTreeParsing(e.to_string()))?;

Ok(merkle_tree
.get_metadata()
.queue_metadata
.next_full_batch_index as usize)
Ok(merkle_tree.queue_metadata.next_full_batch_index as usize)
}
60 changes: 18 additions & 42 deletions forester/src/rollover/operations.rs
Original file line number Diff line number Diff line change
Expand Up @@ -109,49 +109,37 @@ pub async fn get_tree_fullness<R: RpcConnection>(
BatchedMerkleTreeAccount::state_tree_from_bytes_mut(&mut account.data).unwrap();
println!(
"merkle_tree.get_account().queue.batch_size: {:?}",
merkle_tree.get_metadata().queue_metadata.batch_size
merkle_tree.queue_metadata.batch_size
);

println!(
"queue currently_processing_batch_index: {:?}",
merkle_tree
.get_metadata()
.queue_metadata
.currently_processing_batch_index as usize
merkle_tree.queue_metadata.currently_processing_batch_index as usize
);

println!(
"queue batch_size: {:?}",
merkle_tree.get_metadata().queue_metadata.batch_size
merkle_tree.queue_metadata.batch_size
);
println!(
"queue zkp_batch_size: {:?}",
merkle_tree.get_metadata().queue_metadata.zkp_batch_size
merkle_tree.queue_metadata.zkp_batch_size
);
println!(
"queue next_full_batch_index: {:?}",
merkle_tree
.get_metadata()
.queue_metadata
.next_full_batch_index
merkle_tree.queue_metadata.next_full_batch_index
);
println!(
"queue bloom_filter_capacity: {:?}",
merkle_tree
.get_metadata()
.queue_metadata
.bloom_filter_capacity
merkle_tree.queue_metadata.bloom_filter_capacity
);
println!(
"queue num_batches: {:?}",
merkle_tree.get_metadata().queue_metadata.num_batches
merkle_tree.queue_metadata.num_batches
);

println!(
"tree next_index: {:?}",
merkle_tree.get_metadata().next_index
);
println!("tree height: {:?}", merkle_tree.get_metadata().height);
println!("tree next_index: {:?}", merkle_tree.next_index);
println!("tree height: {:?}", merkle_tree.height);

// TODO: implement
let threshold = 0;
Expand All @@ -171,49 +159,37 @@ pub async fn get_tree_fullness<R: RpcConnection>(
BatchedMerkleTreeAccount::state_tree_from_bytes_mut(&mut account.data).unwrap();
println!(
"merkle_tree.get_account().queue.batch_size: {:?}",
merkle_tree.get_metadata().queue_metadata.batch_size
merkle_tree.queue_metadata.batch_size
);

println!(
"queue currently_processing_batch_index: {:?}",
merkle_tree
.get_metadata()
.queue_metadata
.currently_processing_batch_index as usize
merkle_tree.queue_metadata.currently_processing_batch_index as usize
);

println!(
"queue batch_size: {:?}",
merkle_tree.get_metadata().queue_metadata.batch_size
merkle_tree.queue_metadata.batch_size
);
println!(
"queue zkp_batch_size: {:?}",
merkle_tree.get_metadata().queue_metadata.zkp_batch_size
merkle_tree.queue_metadata.zkp_batch_size
);
println!(
"queue next_full_batch_index: {:?}",
merkle_tree
.get_metadata()
.queue_metadata
.next_full_batch_index
merkle_tree.queue_metadata.next_full_batch_index
);
println!(
"queue bloom_filter_capacity: {:?}",
merkle_tree
.get_metadata()
.queue_metadata
.bloom_filter_capacity
merkle_tree.queue_metadata.bloom_filter_capacity
);
println!(
"queue num_batches: {:?}",
merkle_tree.get_metadata().queue_metadata.num_batches
merkle_tree.queue_metadata.num_batches
);

println!(
"tree next_index: {:?}",
merkle_tree.get_metadata().next_index
);
println!("tree height: {:?}", merkle_tree.get_metadata().height);
println!("tree next_index: {:?}", merkle_tree.next_index);
println!("tree height: {:?}", merkle_tree.height);

// TODO: implement
let threshold = 0;
Expand Down
4 changes: 2 additions & 2 deletions forester/src/tree_data_sync.rs
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ fn process_batch_state_account(account: &mut Account, pubkey: Pubkey) -> Result<
})?;
Ok(create_tree_accounts(
pubkey,
&tree_account.get_metadata().metadata,
&tree_account.metadata,
TreeType::BatchedState,
))
}
Expand All @@ -70,7 +70,7 @@ fn process_batch_address_account(account: &mut Account, pubkey: Pubkey) -> Resul
})?;
Ok(create_tree_accounts(
pubkey,
&tree_account.get_metadata().metadata,
&tree_account.metadata,
TreeType::BatchedAddress,
))
}
Expand Down
14 changes: 7 additions & 7 deletions program-libs/batched-merkle-tree/src/initialize_state_tree.rs
Original file line number Diff line number Diff line change
Expand Up @@ -204,7 +204,7 @@ pub fn init_batched_state_merkle_tree_accounts<'a>(
params.close_threshold,
Some(params.additional_bytes),
),
queue_type: QueueType::Output as u64,
queue_type: QueueType::BatchedOutput as u64,
associated_merkle_tree: mt_pubkey,
};

Expand Down Expand Up @@ -345,11 +345,11 @@ fn _assert_mt_zero_copy_inited<const TREE_TYPE: u64>(
) {
use light_hasher::Hasher;

let queue = account.get_metadata().queue_metadata;
let queue = account.queue_metadata;
let ref_queue = ref_account.queue_metadata;
let num_batches = ref_queue.num_batches as usize;
let mut next_index = account.get_metadata().next_index;
assert_eq!(*account.get_metadata(), ref_account, "metadata mismatch");
let mut next_index = account.next_index;
assert_eq!(*account, ref_account, "metadata mismatch");

assert_eq!(
account.root_history.capacity(),
Expand Down Expand Up @@ -381,9 +381,9 @@ fn _assert_mt_zero_copy_inited<const TREE_TYPE: u64>(
}

let queue_type = if tree_type == TreeType::BatchedState as u64 {
QueueType::Input as u64
QueueType::BatchedInput as u64
} else {
QueueType::Address as u64
QueueType::BatchedAddress as u64
};
crate::queue::assert_queue_inited(
queue,
Expand Down Expand Up @@ -464,7 +464,7 @@ pub fn create_output_queue_account(params: CreateOutputQueueParams) -> BatchedQu
network_fee: params.network_fee,
additional_bytes: params.additional_bytes,
},
queue_type: QueueType::Output as u64,
queue_type: QueueType::BatchedOutput as u64,
associated_merkle_tree: params.associated_merkle_tree,
};
let batch_metadata = BatchMetadata::new_output_queue(
Expand Down
Loading

0 comments on commit 55a5220

Please sign in to comment.